##// END OF EJS Templates
Merge with stable
Martin Geisler -
r10049:5b9709f8 merge default
parent child Browse files
Show More
@@ -1,2701 +1,2701 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details)::
17 Common tasks (use "hg help command" for more details)::
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25
25
26 add known patch to applied stack qpush
26 add known patch to applied stack qpush
27 remove patch from applied stack qpop
27 remove patch from applied stack qpop
28 refresh contents of top applied patch qrefresh
28 refresh contents of top applied patch qrefresh
29 '''
29 '''
30
30
31 from mercurial.i18n import _
31 from mercurial.i18n import _
32 from mercurial.node import bin, hex, short, nullid, nullrev
32 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.lock import release
33 from mercurial.lock import release
34 from mercurial import commands, cmdutil, hg, patch, util
34 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import repair, extensions, url, error
35 from mercurial import repair, extensions, url, error
36 import os, sys, re, errno
36 import os, sys, re, errno
37
37
38 commands.norepo += " qclone"
38 commands.norepo += " qclone"
39
39
40 # Patch names looks like unix-file names.
40 # Patch names looks like unix-file names.
41 # They must be joinable with queue directory and result in the patch path.
41 # They must be joinable with queue directory and result in the patch path.
42 normname = util.normpath
42 normname = util.normpath
43
43
44 class statusentry(object):
44 class statusentry(object):
45 def __init__(self, rev, name=None):
45 def __init__(self, rev, name=None):
46 if not name:
46 if not name:
47 fields = rev.split(':', 1)
47 fields = rev.split(':', 1)
48 if len(fields) == 2:
48 if len(fields) == 2:
49 self.rev, self.name = fields
49 self.rev, self.name = fields
50 else:
50 else:
51 self.rev, self.name = None, None
51 self.rev, self.name = None, None
52 else:
52 else:
53 self.rev, self.name = rev, name
53 self.rev, self.name = rev, name
54
54
55 def __str__(self):
55 def __str__(self):
56 return self.rev + ':' + self.name
56 return self.rev + ':' + self.name
57
57
58 class patchheader(object):
58 class patchheader(object):
59 def __init__(self, pf):
59 def __init__(self, pf):
60 def eatdiff(lines):
60 def eatdiff(lines):
61 while lines:
61 while lines:
62 l = lines[-1]
62 l = lines[-1]
63 if (l.startswith("diff -") or
63 if (l.startswith("diff -") or
64 l.startswith("Index:") or
64 l.startswith("Index:") or
65 l.startswith("===========")):
65 l.startswith("===========")):
66 del lines[-1]
66 del lines[-1]
67 else:
67 else:
68 break
68 break
69 def eatempty(lines):
69 def eatempty(lines):
70 while lines:
70 while lines:
71 l = lines[-1]
71 l = lines[-1]
72 if re.match('\s*$', l):
72 if re.match('\s*$', l):
73 del lines[-1]
73 del lines[-1]
74 else:
74 else:
75 break
75 break
76
76
77 message = []
77 message = []
78 comments = []
78 comments = []
79 user = None
79 user = None
80 date = None
80 date = None
81 format = None
81 format = None
82 subject = None
82 subject = None
83 diffstart = 0
83 diffstart = 0
84
84
85 for line in file(pf):
85 for line in file(pf):
86 line = line.rstrip()
86 line = line.rstrip()
87 if line.startswith('diff --git'):
87 if line.startswith('diff --git'):
88 diffstart = 2
88 diffstart = 2
89 break
89 break
90 if diffstart:
90 if diffstart:
91 if line.startswith('+++ '):
91 if line.startswith('+++ '):
92 diffstart = 2
92 diffstart = 2
93 break
93 break
94 if line.startswith("--- "):
94 if line.startswith("--- "):
95 diffstart = 1
95 diffstart = 1
96 continue
96 continue
97 elif format == "hgpatch":
97 elif format == "hgpatch":
98 # parse values when importing the result of an hg export
98 # parse values when importing the result of an hg export
99 if line.startswith("# User "):
99 if line.startswith("# User "):
100 user = line[7:]
100 user = line[7:]
101 elif line.startswith("# Date "):
101 elif line.startswith("# Date "):
102 date = line[7:]
102 date = line[7:]
103 elif not line.startswith("# ") and line:
103 elif not line.startswith("# ") and line:
104 message.append(line)
104 message.append(line)
105 format = None
105 format = None
106 elif line == '# HG changeset patch':
106 elif line == '# HG changeset patch':
107 message = []
107 message = []
108 format = "hgpatch"
108 format = "hgpatch"
109 elif (format != "tagdone" and (line.startswith("Subject: ") or
109 elif (format != "tagdone" and (line.startswith("Subject: ") or
110 line.startswith("subject: "))):
110 line.startswith("subject: "))):
111 subject = line[9:]
111 subject = line[9:]
112 format = "tag"
112 format = "tag"
113 elif (format != "tagdone" and (line.startswith("From: ") or
113 elif (format != "tagdone" and (line.startswith("From: ") or
114 line.startswith("from: "))):
114 line.startswith("from: "))):
115 user = line[6:]
115 user = line[6:]
116 format = "tag"
116 format = "tag"
117 elif format == "tag" and line == "":
117 elif format == "tag" and line == "":
118 # when looking for tags (subject: from: etc) they
118 # when looking for tags (subject: from: etc) they
119 # end once you find a blank line in the source
119 # end once you find a blank line in the source
120 format = "tagdone"
120 format = "tagdone"
121 elif message or line:
121 elif message or line:
122 message.append(line)
122 message.append(line)
123 comments.append(line)
123 comments.append(line)
124
124
125 eatdiff(message)
125 eatdiff(message)
126 eatdiff(comments)
126 eatdiff(comments)
127 eatempty(message)
127 eatempty(message)
128 eatempty(comments)
128 eatempty(comments)
129
129
130 # make sure message isn't empty
130 # make sure message isn't empty
131 if format and format.startswith("tag") and subject:
131 if format and format.startswith("tag") and subject:
132 message.insert(0, "")
132 message.insert(0, "")
133 message.insert(0, subject)
133 message.insert(0, subject)
134
134
135 self.message = message
135 self.message = message
136 self.comments = comments
136 self.comments = comments
137 self.user = user
137 self.user = user
138 self.date = date
138 self.date = date
139 self.haspatch = diffstart > 1
139 self.haspatch = diffstart > 1
140
140
141 def setuser(self, user):
141 def setuser(self, user):
142 if not self.updateheader(['From: ', '# User '], user):
142 if not self.updateheader(['From: ', '# User '], user):
143 try:
143 try:
144 patchheaderat = self.comments.index('# HG changeset patch')
144 patchheaderat = self.comments.index('# HG changeset patch')
145 self.comments.insert(patchheaderat + 1, '# User ' + user)
145 self.comments.insert(patchheaderat + 1, '# User ' + user)
146 except ValueError:
146 except ValueError:
147 if self._hasheader(['Date: ']):
147 if self._hasheader(['Date: ']):
148 self.comments = ['From: ' + user] + self.comments
148 self.comments = ['From: ' + user] + self.comments
149 else:
149 else:
150 tmp = ['# HG changeset patch', '# User ' + user, '']
150 tmp = ['# HG changeset patch', '# User ' + user, '']
151 self.comments = tmp + self.comments
151 self.comments = tmp + self.comments
152 self.user = user
152 self.user = user
153
153
154 def setdate(self, date):
154 def setdate(self, date):
155 if not self.updateheader(['Date: ', '# Date '], date):
155 if not self.updateheader(['Date: ', '# Date '], date):
156 try:
156 try:
157 patchheaderat = self.comments.index('# HG changeset patch')
157 patchheaderat = self.comments.index('# HG changeset patch')
158 self.comments.insert(patchheaderat + 1, '# Date ' + date)
158 self.comments.insert(patchheaderat + 1, '# Date ' + date)
159 except ValueError:
159 except ValueError:
160 if self._hasheader(['From: ']):
160 if self._hasheader(['From: ']):
161 self.comments = ['Date: ' + date] + self.comments
161 self.comments = ['Date: ' + date] + self.comments
162 else:
162 else:
163 tmp = ['# HG changeset patch', '# Date ' + date, '']
163 tmp = ['# HG changeset patch', '# Date ' + date, '']
164 self.comments = tmp + self.comments
164 self.comments = tmp + self.comments
165 self.date = date
165 self.date = date
166
166
167 def setmessage(self, message):
167 def setmessage(self, message):
168 if self.comments:
168 if self.comments:
169 self._delmsg()
169 self._delmsg()
170 self.message = [message]
170 self.message = [message]
171 self.comments += self.message
171 self.comments += self.message
172
172
173 def updateheader(self, prefixes, new):
173 def updateheader(self, prefixes, new):
174 '''Update all references to a field in the patch header.
174 '''Update all references to a field in the patch header.
175 Return whether the field is present.'''
175 Return whether the field is present.'''
176 res = False
176 res = False
177 for prefix in prefixes:
177 for prefix in prefixes:
178 for i in xrange(len(self.comments)):
178 for i in xrange(len(self.comments)):
179 if self.comments[i].startswith(prefix):
179 if self.comments[i].startswith(prefix):
180 self.comments[i] = prefix + new
180 self.comments[i] = prefix + new
181 res = True
181 res = True
182 break
182 break
183 return res
183 return res
184
184
185 def _hasheader(self, prefixes):
185 def _hasheader(self, prefixes):
186 '''Check if a header starts with any of the given prefixes.'''
186 '''Check if a header starts with any of the given prefixes.'''
187 for prefix in prefixes:
187 for prefix in prefixes:
188 for comment in self.comments:
188 for comment in self.comments:
189 if comment.startswith(prefix):
189 if comment.startswith(prefix):
190 return True
190 return True
191 return False
191 return False
192
192
193 def __str__(self):
193 def __str__(self):
194 if not self.comments:
194 if not self.comments:
195 return ''
195 return ''
196 return '\n'.join(self.comments) + '\n\n'
196 return '\n'.join(self.comments) + '\n\n'
197
197
198 def _delmsg(self):
198 def _delmsg(self):
199 '''Remove existing message, keeping the rest of the comments fields.
199 '''Remove existing message, keeping the rest of the comments fields.
200 If comments contains 'subject: ', message will prepend
200 If comments contains 'subject: ', message will prepend
201 the field and a blank line.'''
201 the field and a blank line.'''
202 if self.message:
202 if self.message:
203 subj = 'subject: ' + self.message[0].lower()
203 subj = 'subject: ' + self.message[0].lower()
204 for i in xrange(len(self.comments)):
204 for i in xrange(len(self.comments)):
205 if subj == self.comments[i].lower():
205 if subj == self.comments[i].lower():
206 del self.comments[i]
206 del self.comments[i]
207 self.message = self.message[2:]
207 self.message = self.message[2:]
208 break
208 break
209 ci = 0
209 ci = 0
210 for mi in self.message:
210 for mi in self.message:
211 while mi != self.comments[ci]:
211 while mi != self.comments[ci]:
212 ci += 1
212 ci += 1
213 del self.comments[ci]
213 del self.comments[ci]
214
214
215 class queue(object):
215 class queue(object):
216 def __init__(self, ui, path, patchdir=None):
216 def __init__(self, ui, path, patchdir=None):
217 self.basepath = path
217 self.basepath = path
218 self.path = patchdir or os.path.join(path, "patches")
218 self.path = patchdir or os.path.join(path, "patches")
219 self.opener = util.opener(self.path)
219 self.opener = util.opener(self.path)
220 self.ui = ui
220 self.ui = ui
221 self.applied_dirty = 0
221 self.applied_dirty = 0
222 self.series_dirty = 0
222 self.series_dirty = 0
223 self.series_path = "series"
223 self.series_path = "series"
224 self.status_path = "status"
224 self.status_path = "status"
225 self.guards_path = "guards"
225 self.guards_path = "guards"
226 self.active_guards = None
226 self.active_guards = None
227 self.guards_dirty = False
227 self.guards_dirty = False
228 self._diffopts = None
228 self._diffopts = None
229
229
230 @util.propertycache
230 @util.propertycache
231 def applied(self):
231 def applied(self):
232 if os.path.exists(self.join(self.status_path)):
232 if os.path.exists(self.join(self.status_path)):
233 lines = self.opener(self.status_path).read().splitlines()
233 lines = self.opener(self.status_path).read().splitlines()
234 return [statusentry(l) for l in lines]
234 return [statusentry(l) for l in lines]
235 return []
235 return []
236
236
237 @util.propertycache
237 @util.propertycache
238 def full_series(self):
238 def full_series(self):
239 if os.path.exists(self.join(self.series_path)):
239 if os.path.exists(self.join(self.series_path)):
240 return self.opener(self.series_path).read().splitlines()
240 return self.opener(self.series_path).read().splitlines()
241 return []
241 return []
242
242
243 @util.propertycache
243 @util.propertycache
244 def series(self):
244 def series(self):
245 self.parse_series()
245 self.parse_series()
246 return self.series
246 return self.series
247
247
248 @util.propertycache
248 @util.propertycache
249 def series_guards(self):
249 def series_guards(self):
250 self.parse_series()
250 self.parse_series()
251 return self.series_guards
251 return self.series_guards
252
252
253 def invalidate(self):
253 def invalidate(self):
254 for a in 'applied full_series series series_guards'.split():
254 for a in 'applied full_series series series_guards'.split():
255 if a in self.__dict__:
255 if a in self.__dict__:
256 delattr(self, a)
256 delattr(self, a)
257 self.applied_dirty = 0
257 self.applied_dirty = 0
258 self.series_dirty = 0
258 self.series_dirty = 0
259 self.guards_dirty = False
259 self.guards_dirty = False
260 self.active_guards = None
260 self.active_guards = None
261
261
262 def diffopts(self):
262 def diffopts(self):
263 if self._diffopts is None:
263 if self._diffopts is None:
264 self._diffopts = patch.diffopts(self.ui)
264 self._diffopts = patch.diffopts(self.ui)
265 return self._diffopts
265 return self._diffopts
266
266
267 def join(self, *p):
267 def join(self, *p):
268 return os.path.join(self.path, *p)
268 return os.path.join(self.path, *p)
269
269
270 def find_series(self, patch):
270 def find_series(self, patch):
271 pre = re.compile("(\s*)([^#]+)")
271 pre = re.compile("(\s*)([^#]+)")
272 index = 0
272 index = 0
273 for l in self.full_series:
273 for l in self.full_series:
274 m = pre.match(l)
274 m = pre.match(l)
275 if m:
275 if m:
276 s = m.group(2)
276 s = m.group(2)
277 s = s.rstrip()
277 s = s.rstrip()
278 if s == patch:
278 if s == patch:
279 return index
279 return index
280 index += 1
280 index += 1
281 return None
281 return None
282
282
283 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
283 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
284
284
285 def parse_series(self):
285 def parse_series(self):
286 self.series = []
286 self.series = []
287 self.series_guards = []
287 self.series_guards = []
288 for l in self.full_series:
288 for l in self.full_series:
289 h = l.find('#')
289 h = l.find('#')
290 if h == -1:
290 if h == -1:
291 patch = l
291 patch = l
292 comment = ''
292 comment = ''
293 elif h == 0:
293 elif h == 0:
294 continue
294 continue
295 else:
295 else:
296 patch = l[:h]
296 patch = l[:h]
297 comment = l[h:]
297 comment = l[h:]
298 patch = patch.strip()
298 patch = patch.strip()
299 if patch:
299 if patch:
300 if patch in self.series:
300 if patch in self.series:
301 raise util.Abort(_('%s appears more than once in %s') %
301 raise util.Abort(_('%s appears more than once in %s') %
302 (patch, self.join(self.series_path)))
302 (patch, self.join(self.series_path)))
303 self.series.append(patch)
303 self.series.append(patch)
304 self.series_guards.append(self.guard_re.findall(comment))
304 self.series_guards.append(self.guard_re.findall(comment))
305
305
306 def check_guard(self, guard):
306 def check_guard(self, guard):
307 if not guard:
307 if not guard:
308 return _('guard cannot be an empty string')
308 return _('guard cannot be an empty string')
309 bad_chars = '# \t\r\n\f'
309 bad_chars = '# \t\r\n\f'
310 first = guard[0]
310 first = guard[0]
311 if first in '-+':
311 if first in '-+':
312 return (_('guard %r starts with invalid character: %r') %
312 return (_('guard %r starts with invalid character: %r') %
313 (guard, first))
313 (guard, first))
314 for c in bad_chars:
314 for c in bad_chars:
315 if c in guard:
315 if c in guard:
316 return _('invalid character in guard %r: %r') % (guard, c)
316 return _('invalid character in guard %r: %r') % (guard, c)
317
317
318 def set_active(self, guards):
318 def set_active(self, guards):
319 for guard in guards:
319 for guard in guards:
320 bad = self.check_guard(guard)
320 bad = self.check_guard(guard)
321 if bad:
321 if bad:
322 raise util.Abort(bad)
322 raise util.Abort(bad)
323 guards = sorted(set(guards))
323 guards = sorted(set(guards))
324 self.ui.debug('active guards: %s\n' % ' '.join(guards))
324 self.ui.debug('active guards: %s\n' % ' '.join(guards))
325 self.active_guards = guards
325 self.active_guards = guards
326 self.guards_dirty = True
326 self.guards_dirty = True
327
327
328 def active(self):
328 def active(self):
329 if self.active_guards is None:
329 if self.active_guards is None:
330 self.active_guards = []
330 self.active_guards = []
331 try:
331 try:
332 guards = self.opener(self.guards_path).read().split()
332 guards = self.opener(self.guards_path).read().split()
333 except IOError, err:
333 except IOError, err:
334 if err.errno != errno.ENOENT: raise
334 if err.errno != errno.ENOENT: raise
335 guards = []
335 guards = []
336 for i, guard in enumerate(guards):
336 for i, guard in enumerate(guards):
337 bad = self.check_guard(guard)
337 bad = self.check_guard(guard)
338 if bad:
338 if bad:
339 self.ui.warn('%s:%d: %s\n' %
339 self.ui.warn('%s:%d: %s\n' %
340 (self.join(self.guards_path), i + 1, bad))
340 (self.join(self.guards_path), i + 1, bad))
341 else:
341 else:
342 self.active_guards.append(guard)
342 self.active_guards.append(guard)
343 return self.active_guards
343 return self.active_guards
344
344
345 def set_guards(self, idx, guards):
345 def set_guards(self, idx, guards):
346 for g in guards:
346 for g in guards:
347 if len(g) < 2:
347 if len(g) < 2:
348 raise util.Abort(_('guard %r too short') % g)
348 raise util.Abort(_('guard %r too short') % g)
349 if g[0] not in '-+':
349 if g[0] not in '-+':
350 raise util.Abort(_('guard %r starts with invalid char') % g)
350 raise util.Abort(_('guard %r starts with invalid char') % g)
351 bad = self.check_guard(g[1:])
351 bad = self.check_guard(g[1:])
352 if bad:
352 if bad:
353 raise util.Abort(bad)
353 raise util.Abort(bad)
354 drop = self.guard_re.sub('', self.full_series[idx])
354 drop = self.guard_re.sub('', self.full_series[idx])
355 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
355 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
356 self.parse_series()
356 self.parse_series()
357 self.series_dirty = True
357 self.series_dirty = True
358
358
359 def pushable(self, idx):
359 def pushable(self, idx):
360 if isinstance(idx, str):
360 if isinstance(idx, str):
361 idx = self.series.index(idx)
361 idx = self.series.index(idx)
362 patchguards = self.series_guards[idx]
362 patchguards = self.series_guards[idx]
363 if not patchguards:
363 if not patchguards:
364 return True, None
364 return True, None
365 guards = self.active()
365 guards = self.active()
366 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
366 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
367 if exactneg:
367 if exactneg:
368 return False, exactneg[0]
368 return False, exactneg[0]
369 pos = [g for g in patchguards if g[0] == '+']
369 pos = [g for g in patchguards if g[0] == '+']
370 exactpos = [g for g in pos if g[1:] in guards]
370 exactpos = [g for g in pos if g[1:] in guards]
371 if pos:
371 if pos:
372 if exactpos:
372 if exactpos:
373 return True, exactpos[0]
373 return True, exactpos[0]
374 return False, pos
374 return False, pos
375 return True, ''
375 return True, ''
376
376
377 def explain_pushable(self, idx, all_patches=False):
377 def explain_pushable(self, idx, all_patches=False):
378 write = all_patches and self.ui.write or self.ui.warn
378 write = all_patches and self.ui.write or self.ui.warn
379 if all_patches or self.ui.verbose:
379 if all_patches or self.ui.verbose:
380 if isinstance(idx, str):
380 if isinstance(idx, str):
381 idx = self.series.index(idx)
381 idx = self.series.index(idx)
382 pushable, why = self.pushable(idx)
382 pushable, why = self.pushable(idx)
383 if all_patches and pushable:
383 if all_patches and pushable:
384 if why is None:
384 if why is None:
385 write(_('allowing %s - no guards in effect\n') %
385 write(_('allowing %s - no guards in effect\n') %
386 self.series[idx])
386 self.series[idx])
387 else:
387 else:
388 if not why:
388 if not why:
389 write(_('allowing %s - no matching negative guards\n') %
389 write(_('allowing %s - no matching negative guards\n') %
390 self.series[idx])
390 self.series[idx])
391 else:
391 else:
392 write(_('allowing %s - guarded by %r\n') %
392 write(_('allowing %s - guarded by %r\n') %
393 (self.series[idx], why))
393 (self.series[idx], why))
394 if not pushable:
394 if not pushable:
395 if why:
395 if why:
396 write(_('skipping %s - guarded by %r\n') %
396 write(_('skipping %s - guarded by %r\n') %
397 (self.series[idx], why))
397 (self.series[idx], why))
398 else:
398 else:
399 write(_('skipping %s - no matching guards\n') %
399 write(_('skipping %s - no matching guards\n') %
400 self.series[idx])
400 self.series[idx])
401
401
402 def save_dirty(self):
402 def save_dirty(self):
403 def write_list(items, path):
403 def write_list(items, path):
404 fp = self.opener(path, 'w')
404 fp = self.opener(path, 'w')
405 for i in items:
405 for i in items:
406 fp.write("%s\n" % i)
406 fp.write("%s\n" % i)
407 fp.close()
407 fp.close()
408 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
408 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
409 if self.series_dirty: write_list(self.full_series, self.series_path)
409 if self.series_dirty: write_list(self.full_series, self.series_path)
410 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
410 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
411
411
412 def removeundo(self, repo):
412 def removeundo(self, repo):
413 undo = repo.sjoin('undo')
413 undo = repo.sjoin('undo')
414 if not os.path.exists(undo):
414 if not os.path.exists(undo):
415 return
415 return
416 try:
416 try:
417 os.unlink(undo)
417 os.unlink(undo)
418 except OSError, inst:
418 except OSError, inst:
419 self.ui.warn(_('error removing undo: %s\n') % str(inst))
419 self.ui.warn(_('error removing undo: %s\n') % str(inst))
420
420
421 def printdiff(self, repo, node1, node2=None, files=None,
421 def printdiff(self, repo, node1, node2=None, files=None,
422 fp=None, changes=None, opts={}):
422 fp=None, changes=None, opts={}):
423 stat = opts.get('stat')
423 stat = opts.get('stat')
424 if stat:
424 if stat:
425 opts['unified'] = '0'
425 opts['unified'] = '0'
426
426
427 m = cmdutil.match(repo, files, opts)
427 m = cmdutil.match(repo, files, opts)
428 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
428 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
429 write = fp is None and repo.ui.write or fp.write
429 write = fp is None and repo.ui.write or fp.write
430 if stat:
430 if stat:
431 width = self.ui.interactive() and util.termwidth() or 80
431 width = self.ui.interactive() and util.termwidth() or 80
432 write(patch.diffstat(util.iterlines(chunks), width=width,
432 write(patch.diffstat(util.iterlines(chunks), width=width,
433 git=self.diffopts().git))
433 git=self.diffopts().git))
434 else:
434 else:
435 for chunk in chunks:
435 for chunk in chunks:
436 write(chunk)
436 write(chunk)
437
437
438 def mergeone(self, repo, mergeq, head, patch, rev):
438 def mergeone(self, repo, mergeq, head, patch, rev):
439 # first try just applying the patch
439 # first try just applying the patch
440 (err, n) = self.apply(repo, [ patch ], update_status=False,
440 (err, n) = self.apply(repo, [ patch ], update_status=False,
441 strict=True, merge=rev)
441 strict=True, merge=rev)
442
442
443 if err == 0:
443 if err == 0:
444 return (err, n)
444 return (err, n)
445
445
446 if n is None:
446 if n is None:
447 raise util.Abort(_("apply failed for patch %s") % patch)
447 raise util.Abort(_("apply failed for patch %s") % patch)
448
448
449 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
449 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
450
450
451 # apply failed, strip away that rev and merge.
451 # apply failed, strip away that rev and merge.
452 hg.clean(repo, head)
452 hg.clean(repo, head)
453 self.strip(repo, n, update=False, backup='strip')
453 self.strip(repo, n, update=False, backup='strip')
454
454
455 ctx = repo[rev]
455 ctx = repo[rev]
456 ret = hg.merge(repo, rev)
456 ret = hg.merge(repo, rev)
457 if ret:
457 if ret:
458 raise util.Abort(_("update returned %d") % ret)
458 raise util.Abort(_("update returned %d") % ret)
459 n = repo.commit(ctx.description(), ctx.user(), force=True)
459 n = repo.commit(ctx.description(), ctx.user(), force=True)
460 if n is None:
460 if n is None:
461 raise util.Abort(_("repo commit failed"))
461 raise util.Abort(_("repo commit failed"))
462 try:
462 try:
463 ph = patchheader(mergeq.join(patch))
463 ph = patchheader(mergeq.join(patch))
464 except:
464 except:
465 raise util.Abort(_("unable to read %s") % patch)
465 raise util.Abort(_("unable to read %s") % patch)
466
466
467 patchf = self.opener(patch, "w")
467 patchf = self.opener(patch, "w")
468 comments = str(ph)
468 comments = str(ph)
469 if comments:
469 if comments:
470 patchf.write(comments)
470 patchf.write(comments)
471 self.printdiff(repo, head, n, fp=patchf)
471 self.printdiff(repo, head, n, fp=patchf)
472 patchf.close()
472 patchf.close()
473 self.removeundo(repo)
473 self.removeundo(repo)
474 return (0, n)
474 return (0, n)
475
475
476 def qparents(self, repo, rev=None):
476 def qparents(self, repo, rev=None):
477 if rev is None:
477 if rev is None:
478 (p1, p2) = repo.dirstate.parents()
478 (p1, p2) = repo.dirstate.parents()
479 if p2 == nullid:
479 if p2 == nullid:
480 return p1
480 return p1
481 if len(self.applied) == 0:
481 if len(self.applied) == 0:
482 return None
482 return None
483 return bin(self.applied[-1].rev)
483 return bin(self.applied[-1].rev)
484 pp = repo.changelog.parents(rev)
484 pp = repo.changelog.parents(rev)
485 if pp[1] != nullid:
485 if pp[1] != nullid:
486 arevs = [ x.rev for x in self.applied ]
486 arevs = [ x.rev for x in self.applied ]
487 p0 = hex(pp[0])
487 p0 = hex(pp[0])
488 p1 = hex(pp[1])
488 p1 = hex(pp[1])
489 if p0 in arevs:
489 if p0 in arevs:
490 return pp[0]
490 return pp[0]
491 if p1 in arevs:
491 if p1 in arevs:
492 return pp[1]
492 return pp[1]
493 return pp[0]
493 return pp[0]
494
494
495 def mergepatch(self, repo, mergeq, series):
495 def mergepatch(self, repo, mergeq, series):
496 if len(self.applied) == 0:
496 if len(self.applied) == 0:
497 # each of the patches merged in will have two parents. This
497 # each of the patches merged in will have two parents. This
498 # can confuse the qrefresh, qdiff, and strip code because it
498 # can confuse the qrefresh, qdiff, and strip code because it
499 # needs to know which parent is actually in the patch queue.
499 # needs to know which parent is actually in the patch queue.
500 # so, we insert a merge marker with only one parent. This way
500 # so, we insert a merge marker with only one parent. This way
501 # the first patch in the queue is never a merge patch
501 # the first patch in the queue is never a merge patch
502 #
502 #
503 pname = ".hg.patches.merge.marker"
503 pname = ".hg.patches.merge.marker"
504 n = repo.commit('[mq]: merge marker', force=True)
504 n = repo.commit('[mq]: merge marker', force=True)
505 self.removeundo(repo)
505 self.removeundo(repo)
506 self.applied.append(statusentry(hex(n), pname))
506 self.applied.append(statusentry(hex(n), pname))
507 self.applied_dirty = 1
507 self.applied_dirty = 1
508
508
509 head = self.qparents(repo)
509 head = self.qparents(repo)
510
510
511 for patch in series:
511 for patch in series:
512 patch = mergeq.lookup(patch, strict=True)
512 patch = mergeq.lookup(patch, strict=True)
513 if not patch:
513 if not patch:
514 self.ui.warn(_("patch %s does not exist\n") % patch)
514 self.ui.warn(_("patch %s does not exist\n") % patch)
515 return (1, None)
515 return (1, None)
516 pushable, reason = self.pushable(patch)
516 pushable, reason = self.pushable(patch)
517 if not pushable:
517 if not pushable:
518 self.explain_pushable(patch, all_patches=True)
518 self.explain_pushable(patch, all_patches=True)
519 continue
519 continue
520 info = mergeq.isapplied(patch)
520 info = mergeq.isapplied(patch)
521 if not info:
521 if not info:
522 self.ui.warn(_("patch %s is not applied\n") % patch)
522 self.ui.warn(_("patch %s is not applied\n") % patch)
523 return (1, None)
523 return (1, None)
524 rev = bin(info[1])
524 rev = bin(info[1])
525 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
525 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
526 if head:
526 if head:
527 self.applied.append(statusentry(hex(head), patch))
527 self.applied.append(statusentry(hex(head), patch))
528 self.applied_dirty = 1
528 self.applied_dirty = 1
529 if err:
529 if err:
530 return (err, head)
530 return (err, head)
531 self.save_dirty()
531 self.save_dirty()
532 return (0, head)
532 return (0, head)
533
533
534 def patch(self, repo, patchfile):
534 def patch(self, repo, patchfile):
535 '''Apply patchfile to the working directory.
535 '''Apply patchfile to the working directory.
536 patchfile: name of patch file'''
536 patchfile: name of patch file'''
537 files = {}
537 files = {}
538 try:
538 try:
539 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
539 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
540 files=files, eolmode=None)
540 files=files, eolmode=None)
541 except Exception, inst:
541 except Exception, inst:
542 self.ui.note(str(inst) + '\n')
542 self.ui.note(str(inst) + '\n')
543 if not self.ui.verbose:
543 if not self.ui.verbose:
544 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
544 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
545 return (False, files, False)
545 return (False, files, False)
546
546
547 return (True, files, fuzz)
547 return (True, files, fuzz)
548
548
549 def apply(self, repo, series, list=False, update_status=True,
549 def apply(self, repo, series, list=False, update_status=True,
550 strict=False, patchdir=None, merge=None, all_files={}):
550 strict=False, patchdir=None, merge=None, all_files={}):
551 wlock = lock = tr = None
551 wlock = lock = tr = None
552 try:
552 try:
553 wlock = repo.wlock()
553 wlock = repo.wlock()
554 lock = repo.lock()
554 lock = repo.lock()
555 tr = repo.transaction()
555 tr = repo.transaction()
556 try:
556 try:
557 ret = self._apply(repo, series, list, update_status,
557 ret = self._apply(repo, series, list, update_status,
558 strict, patchdir, merge, all_files=all_files)
558 strict, patchdir, merge, all_files=all_files)
559 tr.close()
559 tr.close()
560 self.save_dirty()
560 self.save_dirty()
561 return ret
561 return ret
562 except:
562 except:
563 try:
563 try:
564 tr.abort()
564 tr.abort()
565 finally:
565 finally:
566 repo.invalidate()
566 repo.invalidate()
567 repo.dirstate.invalidate()
567 repo.dirstate.invalidate()
568 raise
568 raise
569 finally:
569 finally:
570 del tr
570 del tr
571 release(lock, wlock)
571 release(lock, wlock)
572 self.removeundo(repo)
572 self.removeundo(repo)
573
573
574 def _apply(self, repo, series, list=False, update_status=True,
574 def _apply(self, repo, series, list=False, update_status=True,
575 strict=False, patchdir=None, merge=None, all_files={}):
575 strict=False, patchdir=None, merge=None, all_files={}):
576 '''returns (error, hash)
576 '''returns (error, hash)
577 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
577 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
578 # TODO unify with commands.py
578 # TODO unify with commands.py
579 if not patchdir:
579 if not patchdir:
580 patchdir = self.path
580 patchdir = self.path
581 err = 0
581 err = 0
582 n = None
582 n = None
583 for patchname in series:
583 for patchname in series:
584 pushable, reason = self.pushable(patchname)
584 pushable, reason = self.pushable(patchname)
585 if not pushable:
585 if not pushable:
586 self.explain_pushable(patchname, all_patches=True)
586 self.explain_pushable(patchname, all_patches=True)
587 continue
587 continue
588 self.ui.status(_("applying %s\n") % patchname)
588 self.ui.status(_("applying %s\n") % patchname)
589 pf = os.path.join(patchdir, patchname)
589 pf = os.path.join(patchdir, patchname)
590
590
591 try:
591 try:
592 ph = patchheader(self.join(patchname))
592 ph = patchheader(self.join(patchname))
593 except:
593 except:
594 self.ui.warn(_("unable to read %s\n") % patchname)
594 self.ui.warn(_("unable to read %s\n") % patchname)
595 err = 1
595 err = 1
596 break
596 break
597
597
598 message = ph.message
598 message = ph.message
599 if not message:
599 if not message:
600 message = _("imported patch %s\n") % patchname
600 message = _("imported patch %s\n") % patchname
601 else:
601 else:
602 if list:
602 if list:
603 message.append(_("\nimported patch %s") % patchname)
603 message.append(_("\nimported patch %s") % patchname)
604 message = '\n'.join(message)
604 message = '\n'.join(message)
605
605
606 if ph.haspatch:
606 if ph.haspatch:
607 (patcherr, files, fuzz) = self.patch(repo, pf)
607 (patcherr, files, fuzz) = self.patch(repo, pf)
608 all_files.update(files)
608 all_files.update(files)
609 patcherr = not patcherr
609 patcherr = not patcherr
610 else:
610 else:
611 self.ui.warn(_("patch %s is empty\n") % patchname)
611 self.ui.warn(_("patch %s is empty\n") % patchname)
612 patcherr, files, fuzz = 0, [], 0
612 patcherr, files, fuzz = 0, [], 0
613
613
614 if merge and files:
614 if merge and files:
615 # Mark as removed/merged and update dirstate parent info
615 # Mark as removed/merged and update dirstate parent info
616 removed = []
616 removed = []
617 merged = []
617 merged = []
618 for f in files:
618 for f in files:
619 if os.path.exists(repo.wjoin(f)):
619 if os.path.exists(repo.wjoin(f)):
620 merged.append(f)
620 merged.append(f)
621 else:
621 else:
622 removed.append(f)
622 removed.append(f)
623 for f in removed:
623 for f in removed:
624 repo.dirstate.remove(f)
624 repo.dirstate.remove(f)
625 for f in merged:
625 for f in merged:
626 repo.dirstate.merge(f)
626 repo.dirstate.merge(f)
627 p1, p2 = repo.dirstate.parents()
627 p1, p2 = repo.dirstate.parents()
628 repo.dirstate.setparents(p1, merge)
628 repo.dirstate.setparents(p1, merge)
629
629
630 files = patch.updatedir(self.ui, repo, files)
630 files = patch.updatedir(self.ui, repo, files)
631 match = cmdutil.matchfiles(repo, files or [])
631 match = cmdutil.matchfiles(repo, files or [])
632 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
632 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
633
633
634 if n is None:
634 if n is None:
635 raise util.Abort(_("repo commit failed"))
635 raise util.Abort(_("repo commit failed"))
636
636
637 if update_status:
637 if update_status:
638 self.applied.append(statusentry(hex(n), patchname))
638 self.applied.append(statusentry(hex(n), patchname))
639
639
640 if patcherr:
640 if patcherr:
641 self.ui.warn(_("patch failed, rejects left in working dir\n"))
641 self.ui.warn(_("patch failed, rejects left in working dir\n"))
642 err = 2
642 err = 2
643 break
643 break
644
644
645 if fuzz and strict:
645 if fuzz and strict:
646 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
646 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
647 err = 3
647 err = 3
648 break
648 break
649 return (err, n)
649 return (err, n)
650
650
651 def _cleanup(self, patches, numrevs, keep=False):
651 def _cleanup(self, patches, numrevs, keep=False):
652 if not keep:
652 if not keep:
653 r = self.qrepo()
653 r = self.qrepo()
654 if r:
654 if r:
655 r.remove(patches, True)
655 r.remove(patches, True)
656 else:
656 else:
657 for p in patches:
657 for p in patches:
658 os.unlink(self.join(p))
658 os.unlink(self.join(p))
659
659
660 if numrevs:
660 if numrevs:
661 del self.applied[:numrevs]
661 del self.applied[:numrevs]
662 self.applied_dirty = 1
662 self.applied_dirty = 1
663
663
664 for i in sorted([self.find_series(p) for p in patches], reverse=True):
664 for i in sorted([self.find_series(p) for p in patches], reverse=True):
665 del self.full_series[i]
665 del self.full_series[i]
666 self.parse_series()
666 self.parse_series()
667 self.series_dirty = 1
667 self.series_dirty = 1
668
668
669 def _revpatches(self, repo, revs):
669 def _revpatches(self, repo, revs):
670 firstrev = repo[self.applied[0].rev].rev()
670 firstrev = repo[self.applied[0].rev].rev()
671 patches = []
671 patches = []
672 for i, rev in enumerate(revs):
672 for i, rev in enumerate(revs):
673
673
674 if rev < firstrev:
674 if rev < firstrev:
675 raise util.Abort(_('revision %d is not managed') % rev)
675 raise util.Abort(_('revision %d is not managed') % rev)
676
676
677 ctx = repo[rev]
677 ctx = repo[rev]
678 base = bin(self.applied[i].rev)
678 base = bin(self.applied[i].rev)
679 if ctx.node() != base:
679 if ctx.node() != base:
680 msg = _('cannot delete revision %d above applied patches')
680 msg = _('cannot delete revision %d above applied patches')
681 raise util.Abort(msg % rev)
681 raise util.Abort(msg % rev)
682
682
683 patch = self.applied[i].name
683 patch = self.applied[i].name
684 for fmt in ('[mq]: %s', 'imported patch %s'):
684 for fmt in ('[mq]: %s', 'imported patch %s'):
685 if ctx.description() == fmt % patch:
685 if ctx.description() == fmt % patch:
686 msg = _('patch %s finalized without changeset message\n')
686 msg = _('patch %s finalized without changeset message\n')
687 repo.ui.status(msg % patch)
687 repo.ui.status(msg % patch)
688 break
688 break
689
689
690 patches.append(patch)
690 patches.append(patch)
691 return patches
691 return patches
692
692
693 def finish(self, repo, revs):
693 def finish(self, repo, revs):
694 patches = self._revpatches(repo, sorted(revs))
694 patches = self._revpatches(repo, sorted(revs))
695 self._cleanup(patches, len(patches))
695 self._cleanup(patches, len(patches))
696
696
697 def delete(self, repo, patches, opts):
697 def delete(self, repo, patches, opts):
698 if not patches and not opts.get('rev'):
698 if not patches and not opts.get('rev'):
699 raise util.Abort(_('qdelete requires at least one revision or '
699 raise util.Abort(_('qdelete requires at least one revision or '
700 'patch name'))
700 'patch name'))
701
701
702 realpatches = []
702 realpatches = []
703 for patch in patches:
703 for patch in patches:
704 patch = self.lookup(patch, strict=True)
704 patch = self.lookup(patch, strict=True)
705 info = self.isapplied(patch)
705 info = self.isapplied(patch)
706 if info:
706 if info:
707 raise util.Abort(_("cannot delete applied patch %s") % patch)
707 raise util.Abort(_("cannot delete applied patch %s") % patch)
708 if patch not in self.series:
708 if patch not in self.series:
709 raise util.Abort(_("patch %s not in series file") % patch)
709 raise util.Abort(_("patch %s not in series file") % patch)
710 realpatches.append(patch)
710 realpatches.append(patch)
711
711
712 numrevs = 0
712 numrevs = 0
713 if opts.get('rev'):
713 if opts.get('rev'):
714 if not self.applied:
714 if not self.applied:
715 raise util.Abort(_('no patches applied'))
715 raise util.Abort(_('no patches applied'))
716 revs = cmdutil.revrange(repo, opts['rev'])
716 revs = cmdutil.revrange(repo, opts['rev'])
717 if len(revs) > 1 and revs[0] > revs[1]:
717 if len(revs) > 1 and revs[0] > revs[1]:
718 revs.reverse()
718 revs.reverse()
719 revpatches = self._revpatches(repo, revs)
719 revpatches = self._revpatches(repo, revs)
720 realpatches += revpatches
720 realpatches += revpatches
721 numrevs = len(revpatches)
721 numrevs = len(revpatches)
722
722
723 self._cleanup(realpatches, numrevs, opts.get('keep'))
723 self._cleanup(realpatches, numrevs, opts.get('keep'))
724
724
725 def check_toppatch(self, repo):
725 def check_toppatch(self, repo):
726 if len(self.applied) > 0:
726 if len(self.applied) > 0:
727 top = bin(self.applied[-1].rev)
727 top = bin(self.applied[-1].rev)
728 pp = repo.dirstate.parents()
728 pp = repo.dirstate.parents()
729 if top not in pp:
729 if top not in pp:
730 raise util.Abort(_("working directory revision is not qtip"))
730 raise util.Abort(_("working directory revision is not qtip"))
731 return top
731 return top
732 return None
732 return None
733 def check_localchanges(self, repo, force=False, refresh=True):
733 def check_localchanges(self, repo, force=False, refresh=True):
734 m, a, r, d = repo.status()[:4]
734 m, a, r, d = repo.status()[:4]
735 if m or a or r or d:
735 if m or a or r or d:
736 if not force:
736 if not force:
737 if refresh:
737 if refresh:
738 raise util.Abort(_("local changes found, refresh first"))
738 raise util.Abort(_("local changes found, refresh first"))
739 else:
739 else:
740 raise util.Abort(_("local changes found"))
740 raise util.Abort(_("local changes found"))
741 return m, a, r, d
741 return m, a, r, d
742
742
743 _reserved = ('series', 'status', 'guards')
743 _reserved = ('series', 'status', 'guards')
744 def check_reserved_name(self, name):
744 def check_reserved_name(self, name):
745 if (name in self._reserved or name.startswith('.hg')
745 if (name in self._reserved or name.startswith('.hg')
746 or name.startswith('.mq')):
746 or name.startswith('.mq')):
747 raise util.Abort(_('"%s" cannot be used as the name of a patch')
747 raise util.Abort(_('"%s" cannot be used as the name of a patch')
748 % name)
748 % name)
749
749
750 def new(self, repo, patchfn, *pats, **opts):
750 def new(self, repo, patchfn, *pats, **opts):
751 """options:
751 """options:
752 msg: a string or a no-argument function returning a string
752 msg: a string or a no-argument function returning a string
753 """
753 """
754 msg = opts.get('msg')
754 msg = opts.get('msg')
755 force = opts.get('force')
755 force = opts.get('force')
756 user = opts.get('user')
756 user = opts.get('user')
757 date = opts.get('date')
757 date = opts.get('date')
758 if date:
758 if date:
759 date = util.parsedate(date)
759 date = util.parsedate(date)
760 self.check_reserved_name(patchfn)
760 self.check_reserved_name(patchfn)
761 if os.path.exists(self.join(patchfn)):
761 if os.path.exists(self.join(patchfn)):
762 raise util.Abort(_('patch "%s" already exists') % patchfn)
762 raise util.Abort(_('patch "%s" already exists') % patchfn)
763 if opts.get('include') or opts.get('exclude') or pats:
763 if opts.get('include') or opts.get('exclude') or pats:
764 match = cmdutil.match(repo, pats, opts)
764 match = cmdutil.match(repo, pats, opts)
765 # detect missing files in pats
765 # detect missing files in pats
766 def badfn(f, msg):
766 def badfn(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
767 raise util.Abort('%s: %s' % (f, msg))
768 match.bad = badfn
768 match.bad = badfn
769 m, a, r, d = repo.status(match=match)[:4]
769 m, a, r, d = repo.status(match=match)[:4]
770 else:
770 else:
771 m, a, r, d = self.check_localchanges(repo, force)
771 m, a, r, d = self.check_localchanges(repo, force)
772 match = cmdutil.matchfiles(repo, m + a + r)
772 match = cmdutil.matchfiles(repo, m + a + r)
773 commitfiles = m + a + r
773 commitfiles = m + a + r
774 self.check_toppatch(repo)
774 self.check_toppatch(repo)
775 insert = self.full_series_end()
775 insert = self.full_series_end()
776 wlock = repo.wlock()
776 wlock = repo.wlock()
777 try:
777 try:
778 # if patch file write fails, abort early
778 # if patch file write fails, abort early
779 p = self.opener(patchfn, "w")
779 p = self.opener(patchfn, "w")
780 try:
780 try:
781 if date:
781 if date:
782 p.write("# HG changeset patch\n")
782 p.write("# HG changeset patch\n")
783 if user:
783 if user:
784 p.write("# User " + user + "\n")
784 p.write("# User " + user + "\n")
785 p.write("# Date %d %d\n\n" % date)
785 p.write("# Date %d %d\n\n" % date)
786 elif user:
786 elif user:
787 p.write("From: " + user + "\n\n")
787 p.write("From: " + user + "\n\n")
788
788
789 if hasattr(msg, '__call__'):
789 if hasattr(msg, '__call__'):
790 msg = msg()
790 msg = msg()
791 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
791 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
792 n = repo.commit(commitmsg, user, date, match=match, force=True)
792 n = repo.commit(commitmsg, user, date, match=match, force=True)
793 if n is None:
793 if n is None:
794 raise util.Abort(_("repo commit failed"))
794 raise util.Abort(_("repo commit failed"))
795 try:
795 try:
796 self.full_series[insert:insert] = [patchfn]
796 self.full_series[insert:insert] = [patchfn]
797 self.applied.append(statusentry(hex(n), patchfn))
797 self.applied.append(statusentry(hex(n), patchfn))
798 self.parse_series()
798 self.parse_series()
799 self.series_dirty = 1
799 self.series_dirty = 1
800 self.applied_dirty = 1
800 self.applied_dirty = 1
801 if msg:
801 if msg:
802 msg = msg + "\n\n"
802 msg = msg + "\n\n"
803 p.write(msg)
803 p.write(msg)
804 if commitfiles:
804 if commitfiles:
805 diffopts = self.diffopts()
805 diffopts = self.diffopts()
806 if opts.get('git'): diffopts.git = True
806 if opts.get('git'): diffopts.git = True
807 parent = self.qparents(repo, n)
807 parent = self.qparents(repo, n)
808 chunks = patch.diff(repo, node1=parent, node2=n,
808 chunks = patch.diff(repo, node1=parent, node2=n,
809 match=match, opts=diffopts)
809 match=match, opts=diffopts)
810 for chunk in chunks:
810 for chunk in chunks:
811 p.write(chunk)
811 p.write(chunk)
812 p.close()
812 p.close()
813 wlock.release()
813 wlock.release()
814 wlock = None
814 wlock = None
815 r = self.qrepo()
815 r = self.qrepo()
816 if r: r.add([patchfn])
816 if r: r.add([patchfn])
817 except:
817 except:
818 repo.rollback()
818 repo.rollback()
819 raise
819 raise
820 except Exception:
820 except Exception:
821 patchpath = self.join(patchfn)
821 patchpath = self.join(patchfn)
822 try:
822 try:
823 os.unlink(patchpath)
823 os.unlink(patchpath)
824 except:
824 except:
825 self.ui.warn(_('error unlinking %s\n') % patchpath)
825 self.ui.warn(_('error unlinking %s\n') % patchpath)
826 raise
826 raise
827 self.removeundo(repo)
827 self.removeundo(repo)
828 finally:
828 finally:
829 release(wlock)
829 release(wlock)
830
830
831 def strip(self, repo, rev, update=True, backup="all", force=None):
831 def strip(self, repo, rev, update=True, backup="all", force=None):
832 wlock = lock = None
832 wlock = lock = None
833 try:
833 try:
834 wlock = repo.wlock()
834 wlock = repo.wlock()
835 lock = repo.lock()
835 lock = repo.lock()
836
836
837 if update:
837 if update:
838 self.check_localchanges(repo, force=force, refresh=False)
838 self.check_localchanges(repo, force=force, refresh=False)
839 urev = self.qparents(repo, rev)
839 urev = self.qparents(repo, rev)
840 hg.clean(repo, urev)
840 hg.clean(repo, urev)
841 repo.dirstate.write()
841 repo.dirstate.write()
842
842
843 self.removeundo(repo)
843 self.removeundo(repo)
844 repair.strip(self.ui, repo, rev, backup)
844 repair.strip(self.ui, repo, rev, backup)
845 # strip may have unbundled a set of backed up revisions after
845 # strip may have unbundled a set of backed up revisions after
846 # the actual strip
846 # the actual strip
847 self.removeundo(repo)
847 self.removeundo(repo)
848 finally:
848 finally:
849 release(lock, wlock)
849 release(lock, wlock)
850
850
851 def isapplied(self, patch):
851 def isapplied(self, patch):
852 """returns (index, rev, patch)"""
852 """returns (index, rev, patch)"""
853 for i, a in enumerate(self.applied):
853 for i, a in enumerate(self.applied):
854 if a.name == patch:
854 if a.name == patch:
855 return (i, a.rev, a.name)
855 return (i, a.rev, a.name)
856 return None
856 return None
857
857
858 # if the exact patch name does not exist, we try a few
858 # if the exact patch name does not exist, we try a few
859 # variations. If strict is passed, we try only #1
859 # variations. If strict is passed, we try only #1
860 #
860 #
861 # 1) a number to indicate an offset in the series file
861 # 1) a number to indicate an offset in the series file
862 # 2) a unique substring of the patch name was given
862 # 2) a unique substring of the patch name was given
863 # 3) patchname[-+]num to indicate an offset in the series file
863 # 3) patchname[-+]num to indicate an offset in the series file
864 def lookup(self, patch, strict=False):
864 def lookup(self, patch, strict=False):
865 patch = patch and str(patch)
865 patch = patch and str(patch)
866
866
867 def partial_name(s):
867 def partial_name(s):
868 if s in self.series:
868 if s in self.series:
869 return s
869 return s
870 matches = [x for x in self.series if s in x]
870 matches = [x for x in self.series if s in x]
871 if len(matches) > 1:
871 if len(matches) > 1:
872 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
872 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
873 for m in matches:
873 for m in matches:
874 self.ui.warn(' %s\n' % m)
874 self.ui.warn(' %s\n' % m)
875 return None
875 return None
876 if matches:
876 if matches:
877 return matches[0]
877 return matches[0]
878 if len(self.series) > 0 and len(self.applied) > 0:
878 if len(self.series) > 0 and len(self.applied) > 0:
879 if s == 'qtip':
879 if s == 'qtip':
880 return self.series[self.series_end(True)-1]
880 return self.series[self.series_end(True)-1]
881 if s == 'qbase':
881 if s == 'qbase':
882 return self.series[0]
882 return self.series[0]
883 return None
883 return None
884
884
885 if patch is None:
885 if patch is None:
886 return None
886 return None
887 if patch in self.series:
887 if patch in self.series:
888 return patch
888 return patch
889
889
890 if not os.path.isfile(self.join(patch)):
890 if not os.path.isfile(self.join(patch)):
891 try:
891 try:
892 sno = int(patch)
892 sno = int(patch)
893 except(ValueError, OverflowError):
893 except(ValueError, OverflowError):
894 pass
894 pass
895 else:
895 else:
896 if -len(self.series) <= sno < len(self.series):
896 if -len(self.series) <= sno < len(self.series):
897 return self.series[sno]
897 return self.series[sno]
898
898
899 if not strict:
899 if not strict:
900 res = partial_name(patch)
900 res = partial_name(patch)
901 if res:
901 if res:
902 return res
902 return res
903 minus = patch.rfind('-')
903 minus = patch.rfind('-')
904 if minus >= 0:
904 if minus >= 0:
905 res = partial_name(patch[:minus])
905 res = partial_name(patch[:minus])
906 if res:
906 if res:
907 i = self.series.index(res)
907 i = self.series.index(res)
908 try:
908 try:
909 off = int(patch[minus+1:] or 1)
909 off = int(patch[minus+1:] or 1)
910 except(ValueError, OverflowError):
910 except(ValueError, OverflowError):
911 pass
911 pass
912 else:
912 else:
913 if i - off >= 0:
913 if i - off >= 0:
914 return self.series[i - off]
914 return self.series[i - off]
915 plus = patch.rfind('+')
915 plus = patch.rfind('+')
916 if plus >= 0:
916 if plus >= 0:
917 res = partial_name(patch[:plus])
917 res = partial_name(patch[:plus])
918 if res:
918 if res:
919 i = self.series.index(res)
919 i = self.series.index(res)
920 try:
920 try:
921 off = int(patch[plus+1:] or 1)
921 off = int(patch[plus+1:] or 1)
922 except(ValueError, OverflowError):
922 except(ValueError, OverflowError):
923 pass
923 pass
924 else:
924 else:
925 if i + off < len(self.series):
925 if i + off < len(self.series):
926 return self.series[i + off]
926 return self.series[i + off]
927 raise util.Abort(_("patch %s not in series") % patch)
927 raise util.Abort(_("patch %s not in series") % patch)
928
928
929 def push(self, repo, patch=None, force=False, list=False,
929 def push(self, repo, patch=None, force=False, list=False,
930 mergeq=None, all=False):
930 mergeq=None, all=False):
931 wlock = repo.wlock()
931 wlock = repo.wlock()
932 try:
932 try:
933 if repo.dirstate.parents()[0] not in repo.heads():
933 if repo.dirstate.parents()[0] not in repo.heads():
934 self.ui.status(_("(working directory not at a head)\n"))
934 self.ui.status(_("(working directory not at a head)\n"))
935
935
936 if not self.series:
936 if not self.series:
937 self.ui.warn(_('no patches in series\n'))
937 self.ui.warn(_('no patches in series\n'))
938 return 0
938 return 0
939
939
940 patch = self.lookup(patch)
940 patch = self.lookup(patch)
941 # Suppose our series file is: A B C and the current 'top'
941 # Suppose our series file is: A B C and the current 'top'
942 # patch is B. qpush C should be performed (moving forward)
942 # patch is B. qpush C should be performed (moving forward)
943 # qpush B is a NOP (no change) qpush A is an error (can't
943 # qpush B is a NOP (no change) qpush A is an error (can't
944 # go backwards with qpush)
944 # go backwards with qpush)
945 if patch:
945 if patch:
946 info = self.isapplied(patch)
946 info = self.isapplied(patch)
947 if info:
947 if info:
948 if info[0] < len(self.applied) - 1:
948 if info[0] < len(self.applied) - 1:
949 raise util.Abort(
949 raise util.Abort(
950 _("cannot push to a previous patch: %s") % patch)
950 _("cannot push to a previous patch: %s") % patch)
951 self.ui.warn(
951 self.ui.warn(
952 _('qpush: %s is already at the top\n') % patch)
952 _('qpush: %s is already at the top\n') % patch)
953 return
953 return
954 pushable, reason = self.pushable(patch)
954 pushable, reason = self.pushable(patch)
955 if not pushable:
955 if not pushable:
956 if reason:
956 if reason:
957 reason = _('guarded by %r') % reason
957 reason = _('guarded by %r') % reason
958 else:
958 else:
959 reason = _('no matching guards')
959 reason = _('no matching guards')
960 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
960 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
961 return 1
961 return 1
962 elif all:
962 elif all:
963 patch = self.series[-1]
963 patch = self.series[-1]
964 if self.isapplied(patch):
964 if self.isapplied(patch):
965 self.ui.warn(_('all patches are currently applied\n'))
965 self.ui.warn(_('all patches are currently applied\n'))
966 return 0
966 return 0
967
967
968 # Following the above example, starting at 'top' of B:
968 # Following the above example, starting at 'top' of B:
969 # qpush should be performed (pushes C), but a subsequent
969 # qpush should be performed (pushes C), but a subsequent
970 # qpush without an argument is an error (nothing to
970 # qpush without an argument is an error (nothing to
971 # apply). This allows a loop of "...while hg qpush..." to
971 # apply). This allows a loop of "...while hg qpush..." to
972 # work as it detects an error when done
972 # work as it detects an error when done
973 start = self.series_end()
973 start = self.series_end()
974 if start == len(self.series):
974 if start == len(self.series):
975 self.ui.warn(_('patch series already fully applied\n'))
975 self.ui.warn(_('patch series already fully applied\n'))
976 return 1
976 return 1
977 if not force:
977 if not force:
978 self.check_localchanges(repo)
978 self.check_localchanges(repo)
979
979
980 self.applied_dirty = 1
980 self.applied_dirty = 1
981 if start > 0:
981 if start > 0:
982 self.check_toppatch(repo)
982 self.check_toppatch(repo)
983 if not patch:
983 if not patch:
984 patch = self.series[start]
984 patch = self.series[start]
985 end = start + 1
985 end = start + 1
986 else:
986 else:
987 end = self.series.index(patch, start) + 1
987 end = self.series.index(patch, start) + 1
988
988
989 s = self.series[start:end]
989 s = self.series[start:end]
990 all_files = {}
990 all_files = {}
991 try:
991 try:
992 if mergeq:
992 if mergeq:
993 ret = self.mergepatch(repo, mergeq, s)
993 ret = self.mergepatch(repo, mergeq, s)
994 else:
994 else:
995 ret = self.apply(repo, s, list, all_files=all_files)
995 ret = self.apply(repo, s, list, all_files=all_files)
996 except:
996 except:
997 self.ui.warn(_('cleaning up working directory...'))
997 self.ui.warn(_('cleaning up working directory...'))
998 node = repo.dirstate.parents()[0]
998 node = repo.dirstate.parents()[0]
999 hg.revert(repo, node, None)
999 hg.revert(repo, node, None)
1000 unknown = repo.status(unknown=True)[4]
1000 unknown = repo.status(unknown=True)[4]
1001 # only remove unknown files that we know we touched or
1001 # only remove unknown files that we know we touched or
1002 # created while patching
1002 # created while patching
1003 for f in unknown:
1003 for f in unknown:
1004 if f in all_files:
1004 if f in all_files:
1005 util.unlink(repo.wjoin(f))
1005 util.unlink(repo.wjoin(f))
1006 self.ui.warn(_('done\n'))
1006 self.ui.warn(_('done\n'))
1007 raise
1007 raise
1008
1008
1009 if not self.applied:
1009 if not self.applied:
1010 return ret[0]
1010 return ret[0]
1011 top = self.applied[-1].name
1011 top = self.applied[-1].name
1012 if ret[0] and ret[0] > 1:
1012 if ret[0] and ret[0] > 1:
1013 msg = _("errors during apply, please fix and refresh %s\n")
1013 msg = _("errors during apply, please fix and refresh %s\n")
1014 self.ui.write(msg % top)
1014 self.ui.write(msg % top)
1015 else:
1015 else:
1016 self.ui.write(_("now at: %s\n") % top)
1016 self.ui.write(_("now at: %s\n") % top)
1017 return ret[0]
1017 return ret[0]
1018
1018
1019 finally:
1019 finally:
1020 wlock.release()
1020 wlock.release()
1021
1021
1022 def pop(self, repo, patch=None, force=False, update=True, all=False):
1022 def pop(self, repo, patch=None, force=False, update=True, all=False):
1023 def getfile(f, rev, flags):
1023 def getfile(f, rev, flags):
1024 t = repo.file(f).read(rev)
1024 t = repo.file(f).read(rev)
1025 repo.wwrite(f, t, flags)
1025 repo.wwrite(f, t, flags)
1026
1026
1027 wlock = repo.wlock()
1027 wlock = repo.wlock()
1028 try:
1028 try:
1029 if patch:
1029 if patch:
1030 # index, rev, patch
1030 # index, rev, patch
1031 info = self.isapplied(patch)
1031 info = self.isapplied(patch)
1032 if not info:
1032 if not info:
1033 patch = self.lookup(patch)
1033 patch = self.lookup(patch)
1034 info = self.isapplied(patch)
1034 info = self.isapplied(patch)
1035 if not info:
1035 if not info:
1036 raise util.Abort(_("patch %s is not applied") % patch)
1036 raise util.Abort(_("patch %s is not applied") % patch)
1037
1037
1038 if len(self.applied) == 0:
1038 if len(self.applied) == 0:
1039 # Allow qpop -a to work repeatedly,
1039 # Allow qpop -a to work repeatedly,
1040 # but not qpop without an argument
1040 # but not qpop without an argument
1041 self.ui.warn(_("no patches applied\n"))
1041 self.ui.warn(_("no patches applied\n"))
1042 return not all
1042 return not all
1043
1043
1044 if all:
1044 if all:
1045 start = 0
1045 start = 0
1046 elif patch:
1046 elif patch:
1047 start = info[0] + 1
1047 start = info[0] + 1
1048 else:
1048 else:
1049 start = len(self.applied) - 1
1049 start = len(self.applied) - 1
1050
1050
1051 if start >= len(self.applied):
1051 if start >= len(self.applied):
1052 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1052 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1053 return
1053 return
1054
1054
1055 if not update:
1055 if not update:
1056 parents = repo.dirstate.parents()
1056 parents = repo.dirstate.parents()
1057 rr = [ bin(x.rev) for x in self.applied ]
1057 rr = [ bin(x.rev) for x in self.applied ]
1058 for p in parents:
1058 for p in parents:
1059 if p in rr:
1059 if p in rr:
1060 self.ui.warn(_("qpop: forcing dirstate update\n"))
1060 self.ui.warn(_("qpop: forcing dirstate update\n"))
1061 update = True
1061 update = True
1062 else:
1062 else:
1063 parents = [p.hex() for p in repo[None].parents()]
1063 parents = [p.hex() for p in repo[None].parents()]
1064 needupdate = False
1064 needupdate = False
1065 for entry in self.applied[start:]:
1065 for entry in self.applied[start:]:
1066 if entry.rev in parents:
1066 if entry.rev in parents:
1067 needupdate = True
1067 needupdate = True
1068 break
1068 break
1069 update = needupdate
1069 update = needupdate
1070
1070
1071 if not force and update:
1071 if not force and update:
1072 self.check_localchanges(repo)
1072 self.check_localchanges(repo)
1073
1073
1074 self.applied_dirty = 1
1074 self.applied_dirty = 1
1075 end = len(self.applied)
1075 end = len(self.applied)
1076 rev = bin(self.applied[start].rev)
1076 rev = bin(self.applied[start].rev)
1077 if update:
1077 if update:
1078 top = self.check_toppatch(repo)
1078 top = self.check_toppatch(repo)
1079
1079
1080 try:
1080 try:
1081 heads = repo.changelog.heads(rev)
1081 heads = repo.changelog.heads(rev)
1082 except error.LookupError:
1082 except error.LookupError:
1083 node = short(rev)
1083 node = short(rev)
1084 raise util.Abort(_('trying to pop unknown node %s') % node)
1084 raise util.Abort(_('trying to pop unknown node %s') % node)
1085
1085
1086 if heads != [bin(self.applied[-1].rev)]:
1086 if heads != [bin(self.applied[-1].rev)]:
1087 raise util.Abort(_("popping would remove a revision not "
1087 raise util.Abort(_("popping would remove a revision not "
1088 "managed by this patch queue"))
1088 "managed by this patch queue"))
1089
1089
1090 # we know there are no local changes, so we can make a simplified
1090 # we know there are no local changes, so we can make a simplified
1091 # form of hg.update.
1091 # form of hg.update.
1092 if update:
1092 if update:
1093 qp = self.qparents(repo, rev)
1093 qp = self.qparents(repo, rev)
1094 changes = repo.changelog.read(qp)
1094 changes = repo.changelog.read(qp)
1095 mmap = repo.manifest.read(changes[0])
1095 mmap = repo.manifest.read(changes[0])
1096 m, a, r, d = repo.status(qp, top)[:4]
1096 m, a, r, d = repo.status(qp, top)[:4]
1097 if d:
1097 if d:
1098 raise util.Abort(_("deletions found between repo revs"))
1098 raise util.Abort(_("deletions found between repo revs"))
1099 for f in m:
1100 getfile(f, mmap[f], mmap.flags(f))
1101 for f in r:
1102 getfile(f, mmap[f], mmap.flags(f))
1103 for f in m + r:
1104 repo.dirstate.normal(f)
1105 for f in a:
1099 for f in a:
1106 try:
1100 try:
1107 os.unlink(repo.wjoin(f))
1101 os.unlink(repo.wjoin(f))
1108 except OSError, e:
1102 except OSError, e:
1109 if e.errno != errno.ENOENT:
1103 if e.errno != errno.ENOENT:
1110 raise
1104 raise
1111 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1105 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1112 except: pass
1106 except: pass
1113 repo.dirstate.forget(f)
1107 repo.dirstate.forget(f)
1108 for f in m:
1109 getfile(f, mmap[f], mmap.flags(f))
1110 for f in r:
1111 getfile(f, mmap[f], mmap.flags(f))
1112 for f in m + r:
1113 repo.dirstate.normal(f)
1114 repo.dirstate.setparents(qp, nullid)
1114 repo.dirstate.setparents(qp, nullid)
1115 for patch in reversed(self.applied[start:end]):
1115 for patch in reversed(self.applied[start:end]):
1116 self.ui.status(_("popping %s\n") % patch.name)
1116 self.ui.status(_("popping %s\n") % patch.name)
1117 del self.applied[start:end]
1117 del self.applied[start:end]
1118 self.strip(repo, rev, update=False, backup='strip')
1118 self.strip(repo, rev, update=False, backup='strip')
1119 if len(self.applied):
1119 if len(self.applied):
1120 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1120 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1121 else:
1121 else:
1122 self.ui.write(_("patch queue now empty\n"))
1122 self.ui.write(_("patch queue now empty\n"))
1123 finally:
1123 finally:
1124 wlock.release()
1124 wlock.release()
1125
1125
1126 def diff(self, repo, pats, opts):
1126 def diff(self, repo, pats, opts):
1127 top = self.check_toppatch(repo)
1127 top = self.check_toppatch(repo)
1128 if not top:
1128 if not top:
1129 self.ui.write(_("no patches applied\n"))
1129 self.ui.write(_("no patches applied\n"))
1130 return
1130 return
1131 qp = self.qparents(repo, top)
1131 qp = self.qparents(repo, top)
1132 if opts.get('reverse'):
1132 if opts.get('reverse'):
1133 node1, node2 = None, qp
1133 node1, node2 = None, qp
1134 else:
1134 else:
1135 node1, node2 = qp, None
1135 node1, node2 = qp, None
1136 self._diffopts = patch.diffopts(self.ui, opts)
1136 self._diffopts = patch.diffopts(self.ui, opts)
1137 self.printdiff(repo, node1, node2, files=pats, opts=opts)
1137 self.printdiff(repo, node1, node2, files=pats, opts=opts)
1138
1138
1139 def refresh(self, repo, pats=None, **opts):
1139 def refresh(self, repo, pats=None, **opts):
1140 if len(self.applied) == 0:
1140 if len(self.applied) == 0:
1141 self.ui.write(_("no patches applied\n"))
1141 self.ui.write(_("no patches applied\n"))
1142 return 1
1142 return 1
1143 msg = opts.get('msg', '').rstrip()
1143 msg = opts.get('msg', '').rstrip()
1144 newuser = opts.get('user')
1144 newuser = opts.get('user')
1145 newdate = opts.get('date')
1145 newdate = opts.get('date')
1146 if newdate:
1146 if newdate:
1147 newdate = '%d %d' % util.parsedate(newdate)
1147 newdate = '%d %d' % util.parsedate(newdate)
1148 wlock = repo.wlock()
1148 wlock = repo.wlock()
1149 try:
1149 try:
1150 self.check_toppatch(repo)
1150 self.check_toppatch(repo)
1151 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1151 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1152 top = bin(top)
1152 top = bin(top)
1153 if repo.changelog.heads(top) != [top]:
1153 if repo.changelog.heads(top) != [top]:
1154 raise util.Abort(_("cannot refresh a revision with children"))
1154 raise util.Abort(_("cannot refresh a revision with children"))
1155 cparents = repo.changelog.parents(top)
1155 cparents = repo.changelog.parents(top)
1156 patchparent = self.qparents(repo, top)
1156 patchparent = self.qparents(repo, top)
1157 ph = patchheader(self.join(patchfn))
1157 ph = patchheader(self.join(patchfn))
1158
1158
1159 patchf = self.opener(patchfn, 'r')
1159 patchf = self.opener(patchfn, 'r')
1160
1160
1161 # if the patch was a git patch, refresh it as a git patch
1161 # if the patch was a git patch, refresh it as a git patch
1162 for line in patchf:
1162 for line in patchf:
1163 if line.startswith('diff --git'):
1163 if line.startswith('diff --git'):
1164 self.diffopts().git = True
1164 self.diffopts().git = True
1165 break
1165 break
1166
1166
1167 if msg:
1167 if msg:
1168 ph.setmessage(msg)
1168 ph.setmessage(msg)
1169 if newuser:
1169 if newuser:
1170 ph.setuser(newuser)
1170 ph.setuser(newuser)
1171 if newdate:
1171 if newdate:
1172 ph.setdate(newdate)
1172 ph.setdate(newdate)
1173
1173
1174 # only commit new patch when write is complete
1174 # only commit new patch when write is complete
1175 patchf = self.opener(patchfn, 'w', atomictemp=True)
1175 patchf = self.opener(patchfn, 'w', atomictemp=True)
1176
1176
1177 patchf.seek(0)
1177 patchf.seek(0)
1178 patchf.truncate()
1178 patchf.truncate()
1179
1179
1180 comments = str(ph)
1180 comments = str(ph)
1181 if comments:
1181 if comments:
1182 patchf.write(comments)
1182 patchf.write(comments)
1183
1183
1184 if opts.get('git'):
1184 if opts.get('git'):
1185 self.diffopts().git = True
1185 self.diffopts().git = True
1186 tip = repo.changelog.tip()
1186 tip = repo.changelog.tip()
1187 if top == tip:
1187 if top == tip:
1188 # if the top of our patch queue is also the tip, there is an
1188 # if the top of our patch queue is also the tip, there is an
1189 # optimization here. We update the dirstate in place and strip
1189 # optimization here. We update the dirstate in place and strip
1190 # off the tip commit. Then just commit the current directory
1190 # off the tip commit. Then just commit the current directory
1191 # tree. We can also send repo.commit the list of files
1191 # tree. We can also send repo.commit the list of files
1192 # changed to speed up the diff
1192 # changed to speed up the diff
1193 #
1193 #
1194 # in short mode, we only diff the files included in the
1194 # in short mode, we only diff the files included in the
1195 # patch already plus specified files
1195 # patch already plus specified files
1196 #
1196 #
1197 # this should really read:
1197 # this should really read:
1198 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1198 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1199 # but we do it backwards to take advantage of manifest/chlog
1199 # but we do it backwards to take advantage of manifest/chlog
1200 # caching against the next repo.status call
1200 # caching against the next repo.status call
1201 #
1201 #
1202 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1202 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1203 changes = repo.changelog.read(tip)
1203 changes = repo.changelog.read(tip)
1204 man = repo.manifest.read(changes[0])
1204 man = repo.manifest.read(changes[0])
1205 aaa = aa[:]
1205 aaa = aa[:]
1206 matchfn = cmdutil.match(repo, pats, opts)
1206 matchfn = cmdutil.match(repo, pats, opts)
1207 if opts.get('short'):
1207 if opts.get('short'):
1208 # if amending a patch, we start with existing
1208 # if amending a patch, we start with existing
1209 # files plus specified files - unfiltered
1209 # files plus specified files - unfiltered
1210 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1210 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1211 # filter with inc/exl options
1211 # filter with inc/exl options
1212 matchfn = cmdutil.match(repo, opts=opts)
1212 matchfn = cmdutil.match(repo, opts=opts)
1213 else:
1213 else:
1214 match = cmdutil.matchall(repo)
1214 match = cmdutil.matchall(repo)
1215 m, a, r, d = repo.status(match=match)[:4]
1215 m, a, r, d = repo.status(match=match)[:4]
1216
1216
1217 # we might end up with files that were added between
1217 # we might end up with files that were added between
1218 # tip and the dirstate parent, but then changed in the
1218 # tip and the dirstate parent, but then changed in the
1219 # local dirstate. in this case, we want them to only
1219 # local dirstate. in this case, we want them to only
1220 # show up in the added section
1220 # show up in the added section
1221 for x in m:
1221 for x in m:
1222 if x not in aa:
1222 if x not in aa:
1223 mm.append(x)
1223 mm.append(x)
1224 # we might end up with files added by the local dirstate that
1224 # we might end up with files added by the local dirstate that
1225 # were deleted by the patch. In this case, they should only
1225 # were deleted by the patch. In this case, they should only
1226 # show up in the changed section.
1226 # show up in the changed section.
1227 for x in a:
1227 for x in a:
1228 if x in dd:
1228 if x in dd:
1229 del dd[dd.index(x)]
1229 del dd[dd.index(x)]
1230 mm.append(x)
1230 mm.append(x)
1231 else:
1231 else:
1232 aa.append(x)
1232 aa.append(x)
1233 # make sure any files deleted in the local dirstate
1233 # make sure any files deleted in the local dirstate
1234 # are not in the add or change column of the patch
1234 # are not in the add or change column of the patch
1235 forget = []
1235 forget = []
1236 for x in d + r:
1236 for x in d + r:
1237 if x in aa:
1237 if x in aa:
1238 del aa[aa.index(x)]
1238 del aa[aa.index(x)]
1239 forget.append(x)
1239 forget.append(x)
1240 continue
1240 continue
1241 elif x in mm:
1241 elif x in mm:
1242 del mm[mm.index(x)]
1242 del mm[mm.index(x)]
1243 dd.append(x)
1243 dd.append(x)
1244
1244
1245 m = list(set(mm))
1245 m = list(set(mm))
1246 r = list(set(dd))
1246 r = list(set(dd))
1247 a = list(set(aa))
1247 a = list(set(aa))
1248 c = [filter(matchfn, l) for l in (m, a, r)]
1248 c = [filter(matchfn, l) for l in (m, a, r)]
1249 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1249 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1250 chunks = patch.diff(repo, patchparent, match=match,
1250 chunks = patch.diff(repo, patchparent, match=match,
1251 changes=c, opts=self.diffopts())
1251 changes=c, opts=self.diffopts())
1252 for chunk in chunks:
1252 for chunk in chunks:
1253 patchf.write(chunk)
1253 patchf.write(chunk)
1254
1254
1255 try:
1255 try:
1256 if self.diffopts().git:
1256 if self.diffopts().git:
1257 copies = {}
1257 copies = {}
1258 for dst in a:
1258 for dst in a:
1259 src = repo.dirstate.copied(dst)
1259 src = repo.dirstate.copied(dst)
1260 # during qfold, the source file for copies may
1260 # during qfold, the source file for copies may
1261 # be removed. Treat this as a simple add.
1261 # be removed. Treat this as a simple add.
1262 if src is not None and src in repo.dirstate:
1262 if src is not None and src in repo.dirstate:
1263 copies.setdefault(src, []).append(dst)
1263 copies.setdefault(src, []).append(dst)
1264 repo.dirstate.add(dst)
1264 repo.dirstate.add(dst)
1265 # remember the copies between patchparent and tip
1265 # remember the copies between patchparent and tip
1266 for dst in aaa:
1266 for dst in aaa:
1267 f = repo.file(dst)
1267 f = repo.file(dst)
1268 src = f.renamed(man[dst])
1268 src = f.renamed(man[dst])
1269 if src:
1269 if src:
1270 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1270 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1271 if dst in a:
1271 if dst in a:
1272 copies[src[0]].append(dst)
1272 copies[src[0]].append(dst)
1273 # we can't copy a file created by the patch itself
1273 # we can't copy a file created by the patch itself
1274 if dst in copies:
1274 if dst in copies:
1275 del copies[dst]
1275 del copies[dst]
1276 for src, dsts in copies.iteritems():
1276 for src, dsts in copies.iteritems():
1277 for dst in dsts:
1277 for dst in dsts:
1278 repo.dirstate.copy(src, dst)
1278 repo.dirstate.copy(src, dst)
1279 else:
1279 else:
1280 for dst in a:
1280 for dst in a:
1281 repo.dirstate.add(dst)
1281 repo.dirstate.add(dst)
1282 # Drop useless copy information
1282 # Drop useless copy information
1283 for f in list(repo.dirstate.copies()):
1283 for f in list(repo.dirstate.copies()):
1284 repo.dirstate.copy(None, f)
1284 repo.dirstate.copy(None, f)
1285 for f in r:
1285 for f in r:
1286 repo.dirstate.remove(f)
1286 repo.dirstate.remove(f)
1287 # if the patch excludes a modified file, mark that
1287 # if the patch excludes a modified file, mark that
1288 # file with mtime=0 so status can see it.
1288 # file with mtime=0 so status can see it.
1289 mm = []
1289 mm = []
1290 for i in xrange(len(m)-1, -1, -1):
1290 for i in xrange(len(m)-1, -1, -1):
1291 if not matchfn(m[i]):
1291 if not matchfn(m[i]):
1292 mm.append(m[i])
1292 mm.append(m[i])
1293 del m[i]
1293 del m[i]
1294 for f in m:
1294 for f in m:
1295 repo.dirstate.normal(f)
1295 repo.dirstate.normal(f)
1296 for f in mm:
1296 for f in mm:
1297 repo.dirstate.normallookup(f)
1297 repo.dirstate.normallookup(f)
1298 for f in forget:
1298 for f in forget:
1299 repo.dirstate.forget(f)
1299 repo.dirstate.forget(f)
1300
1300
1301 if not msg:
1301 if not msg:
1302 if not ph.message:
1302 if not ph.message:
1303 message = "[mq]: %s\n" % patchfn
1303 message = "[mq]: %s\n" % patchfn
1304 else:
1304 else:
1305 message = "\n".join(ph.message)
1305 message = "\n".join(ph.message)
1306 else:
1306 else:
1307 message = msg
1307 message = msg
1308
1308
1309 user = ph.user or changes[1]
1309 user = ph.user or changes[1]
1310
1310
1311 # assumes strip can roll itself back if interrupted
1311 # assumes strip can roll itself back if interrupted
1312 repo.dirstate.setparents(*cparents)
1312 repo.dirstate.setparents(*cparents)
1313 self.applied.pop()
1313 self.applied.pop()
1314 self.applied_dirty = 1
1314 self.applied_dirty = 1
1315 self.strip(repo, top, update=False,
1315 self.strip(repo, top, update=False,
1316 backup='strip')
1316 backup='strip')
1317 except:
1317 except:
1318 repo.dirstate.invalidate()
1318 repo.dirstate.invalidate()
1319 raise
1319 raise
1320
1320
1321 try:
1321 try:
1322 # might be nice to attempt to roll back strip after this
1322 # might be nice to attempt to roll back strip after this
1323 patchf.rename()
1323 patchf.rename()
1324 n = repo.commit(message, user, ph.date, match=match,
1324 n = repo.commit(message, user, ph.date, match=match,
1325 force=True)
1325 force=True)
1326 self.applied.append(statusentry(hex(n), patchfn))
1326 self.applied.append(statusentry(hex(n), patchfn))
1327 except:
1327 except:
1328 ctx = repo[cparents[0]]
1328 ctx = repo[cparents[0]]
1329 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1329 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1330 self.save_dirty()
1330 self.save_dirty()
1331 self.ui.warn(_('refresh interrupted while patch was popped! '
1331 self.ui.warn(_('refresh interrupted while patch was popped! '
1332 '(revert --all, qpush to recover)\n'))
1332 '(revert --all, qpush to recover)\n'))
1333 raise
1333 raise
1334 else:
1334 else:
1335 self.printdiff(repo, patchparent, fp=patchf)
1335 self.printdiff(repo, patchparent, fp=patchf)
1336 patchf.rename()
1336 patchf.rename()
1337 added = repo.status()[1]
1337 added = repo.status()[1]
1338 for a in added:
1338 for a in added:
1339 f = repo.wjoin(a)
1339 f = repo.wjoin(a)
1340 try:
1340 try:
1341 os.unlink(f)
1341 os.unlink(f)
1342 except OSError, e:
1342 except OSError, e:
1343 if e.errno != errno.ENOENT:
1343 if e.errno != errno.ENOENT:
1344 raise
1344 raise
1345 try: os.removedirs(os.path.dirname(f))
1345 try: os.removedirs(os.path.dirname(f))
1346 except: pass
1346 except: pass
1347 # forget the file copies in the dirstate
1347 # forget the file copies in the dirstate
1348 # push should readd the files later on
1348 # push should readd the files later on
1349 repo.dirstate.forget(a)
1349 repo.dirstate.forget(a)
1350 self.pop(repo, force=True)
1350 self.pop(repo, force=True)
1351 self.push(repo, force=True)
1351 self.push(repo, force=True)
1352 finally:
1352 finally:
1353 wlock.release()
1353 wlock.release()
1354 self.removeundo(repo)
1354 self.removeundo(repo)
1355
1355
1356 def init(self, repo, create=False):
1356 def init(self, repo, create=False):
1357 if not create and os.path.isdir(self.path):
1357 if not create and os.path.isdir(self.path):
1358 raise util.Abort(_("patch queue directory already exists"))
1358 raise util.Abort(_("patch queue directory already exists"))
1359 try:
1359 try:
1360 os.mkdir(self.path)
1360 os.mkdir(self.path)
1361 except OSError, inst:
1361 except OSError, inst:
1362 if inst.errno != errno.EEXIST or not create:
1362 if inst.errno != errno.EEXIST or not create:
1363 raise
1363 raise
1364 if create:
1364 if create:
1365 return self.qrepo(create=True)
1365 return self.qrepo(create=True)
1366
1366
1367 def unapplied(self, repo, patch=None):
1367 def unapplied(self, repo, patch=None):
1368 if patch and patch not in self.series:
1368 if patch and patch not in self.series:
1369 raise util.Abort(_("patch %s is not in series file") % patch)
1369 raise util.Abort(_("patch %s is not in series file") % patch)
1370 if not patch:
1370 if not patch:
1371 start = self.series_end()
1371 start = self.series_end()
1372 else:
1372 else:
1373 start = self.series.index(patch) + 1
1373 start = self.series.index(patch) + 1
1374 unapplied = []
1374 unapplied = []
1375 for i in xrange(start, len(self.series)):
1375 for i in xrange(start, len(self.series)):
1376 pushable, reason = self.pushable(i)
1376 pushable, reason = self.pushable(i)
1377 if pushable:
1377 if pushable:
1378 unapplied.append((i, self.series[i]))
1378 unapplied.append((i, self.series[i]))
1379 self.explain_pushable(i)
1379 self.explain_pushable(i)
1380 return unapplied
1380 return unapplied
1381
1381
1382 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1382 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1383 summary=False):
1383 summary=False):
1384 def displayname(pfx, patchname):
1384 def displayname(pfx, patchname):
1385 if summary:
1385 if summary:
1386 ph = patchheader(self.join(patchname))
1386 ph = patchheader(self.join(patchname))
1387 msg = ph.message and ph.message[0] or ''
1387 msg = ph.message and ph.message[0] or ''
1388 if self.ui.interactive():
1388 if self.ui.interactive():
1389 width = util.termwidth() - len(pfx) - len(patchname) - 2
1389 width = util.termwidth() - len(pfx) - len(patchname) - 2
1390 if width > 0:
1390 if width > 0:
1391 msg = util.ellipsis(msg, width)
1391 msg = util.ellipsis(msg, width)
1392 else:
1392 else:
1393 msg = ''
1393 msg = ''
1394 msg = "%s%s: %s" % (pfx, patchname, msg)
1394 msg = "%s%s: %s" % (pfx, patchname, msg)
1395 else:
1395 else:
1396 msg = pfx + patchname
1396 msg = pfx + patchname
1397 self.ui.write(msg + '\n')
1397 self.ui.write(msg + '\n')
1398
1398
1399 applied = set([p.name for p in self.applied])
1399 applied = set([p.name for p in self.applied])
1400 if length is None:
1400 if length is None:
1401 length = len(self.series) - start
1401 length = len(self.series) - start
1402 if not missing:
1402 if not missing:
1403 if self.ui.verbose:
1403 if self.ui.verbose:
1404 idxwidth = len(str(start+length - 1))
1404 idxwidth = len(str(start+length - 1))
1405 for i in xrange(start, start+length):
1405 for i in xrange(start, start+length):
1406 patch = self.series[i]
1406 patch = self.series[i]
1407 if patch in applied:
1407 if patch in applied:
1408 stat = 'A'
1408 stat = 'A'
1409 elif self.pushable(i)[0]:
1409 elif self.pushable(i)[0]:
1410 stat = 'U'
1410 stat = 'U'
1411 else:
1411 else:
1412 stat = 'G'
1412 stat = 'G'
1413 pfx = ''
1413 pfx = ''
1414 if self.ui.verbose:
1414 if self.ui.verbose:
1415 pfx = '%*d %s ' % (idxwidth, i, stat)
1415 pfx = '%*d %s ' % (idxwidth, i, stat)
1416 elif status and status != stat:
1416 elif status and status != stat:
1417 continue
1417 continue
1418 displayname(pfx, patch)
1418 displayname(pfx, patch)
1419 else:
1419 else:
1420 msng_list = []
1420 msng_list = []
1421 for root, dirs, files in os.walk(self.path):
1421 for root, dirs, files in os.walk(self.path):
1422 d = root[len(self.path) + 1:]
1422 d = root[len(self.path) + 1:]
1423 for f in files:
1423 for f in files:
1424 fl = os.path.join(d, f)
1424 fl = os.path.join(d, f)
1425 if (fl not in self.series and
1425 if (fl not in self.series and
1426 fl not in (self.status_path, self.series_path,
1426 fl not in (self.status_path, self.series_path,
1427 self.guards_path)
1427 self.guards_path)
1428 and not fl.startswith('.')):
1428 and not fl.startswith('.')):
1429 msng_list.append(fl)
1429 msng_list.append(fl)
1430 for x in sorted(msng_list):
1430 for x in sorted(msng_list):
1431 pfx = self.ui.verbose and ('D ') or ''
1431 pfx = self.ui.verbose and ('D ') or ''
1432 displayname(pfx, x)
1432 displayname(pfx, x)
1433
1433
1434 def issaveline(self, l):
1434 def issaveline(self, l):
1435 if l.name == '.hg.patches.save.line':
1435 if l.name == '.hg.patches.save.line':
1436 return True
1436 return True
1437
1437
1438 def qrepo(self, create=False):
1438 def qrepo(self, create=False):
1439 if create or os.path.isdir(self.join(".hg")):
1439 if create or os.path.isdir(self.join(".hg")):
1440 return hg.repository(self.ui, path=self.path, create=create)
1440 return hg.repository(self.ui, path=self.path, create=create)
1441
1441
1442 def restore(self, repo, rev, delete=None, qupdate=None):
1442 def restore(self, repo, rev, delete=None, qupdate=None):
1443 c = repo.changelog.read(rev)
1443 c = repo.changelog.read(rev)
1444 desc = c[4].strip()
1444 desc = c[4].strip()
1445 lines = desc.splitlines()
1445 lines = desc.splitlines()
1446 i = 0
1446 i = 0
1447 datastart = None
1447 datastart = None
1448 series = []
1448 series = []
1449 applied = []
1449 applied = []
1450 qpp = None
1450 qpp = None
1451 for i, line in enumerate(lines):
1451 for i, line in enumerate(lines):
1452 if line == 'Patch Data:':
1452 if line == 'Patch Data:':
1453 datastart = i + 1
1453 datastart = i + 1
1454 elif line.startswith('Dirstate:'):
1454 elif line.startswith('Dirstate:'):
1455 l = line.rstrip()
1455 l = line.rstrip()
1456 l = l[10:].split(' ')
1456 l = l[10:].split(' ')
1457 qpp = [ bin(x) for x in l ]
1457 qpp = [ bin(x) for x in l ]
1458 elif datastart != None:
1458 elif datastart != None:
1459 l = line.rstrip()
1459 l = line.rstrip()
1460 se = statusentry(l)
1460 se = statusentry(l)
1461 file_ = se.name
1461 file_ = se.name
1462 if se.rev:
1462 if se.rev:
1463 applied.append(se)
1463 applied.append(se)
1464 else:
1464 else:
1465 series.append(file_)
1465 series.append(file_)
1466 if datastart is None:
1466 if datastart is None:
1467 self.ui.warn(_("No saved patch data found\n"))
1467 self.ui.warn(_("No saved patch data found\n"))
1468 return 1
1468 return 1
1469 self.ui.warn(_("restoring status: %s\n") % lines[0])
1469 self.ui.warn(_("restoring status: %s\n") % lines[0])
1470 self.full_series = series
1470 self.full_series = series
1471 self.applied = applied
1471 self.applied = applied
1472 self.parse_series()
1472 self.parse_series()
1473 self.series_dirty = 1
1473 self.series_dirty = 1
1474 self.applied_dirty = 1
1474 self.applied_dirty = 1
1475 heads = repo.changelog.heads()
1475 heads = repo.changelog.heads()
1476 if delete:
1476 if delete:
1477 if rev not in heads:
1477 if rev not in heads:
1478 self.ui.warn(_("save entry has children, leaving it alone\n"))
1478 self.ui.warn(_("save entry has children, leaving it alone\n"))
1479 else:
1479 else:
1480 self.ui.warn(_("removing save entry %s\n") % short(rev))
1480 self.ui.warn(_("removing save entry %s\n") % short(rev))
1481 pp = repo.dirstate.parents()
1481 pp = repo.dirstate.parents()
1482 if rev in pp:
1482 if rev in pp:
1483 update = True
1483 update = True
1484 else:
1484 else:
1485 update = False
1485 update = False
1486 self.strip(repo, rev, update=update, backup='strip')
1486 self.strip(repo, rev, update=update, backup='strip')
1487 if qpp:
1487 if qpp:
1488 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1488 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1489 (short(qpp[0]), short(qpp[1])))
1489 (short(qpp[0]), short(qpp[1])))
1490 if qupdate:
1490 if qupdate:
1491 self.ui.status(_("queue directory updating\n"))
1491 self.ui.status(_("queue directory updating\n"))
1492 r = self.qrepo()
1492 r = self.qrepo()
1493 if not r:
1493 if not r:
1494 self.ui.warn(_("Unable to load queue repository\n"))
1494 self.ui.warn(_("Unable to load queue repository\n"))
1495 return 1
1495 return 1
1496 hg.clean(r, qpp[0])
1496 hg.clean(r, qpp[0])
1497
1497
1498 def save(self, repo, msg=None):
1498 def save(self, repo, msg=None):
1499 if len(self.applied) == 0:
1499 if len(self.applied) == 0:
1500 self.ui.warn(_("save: no patches applied, exiting\n"))
1500 self.ui.warn(_("save: no patches applied, exiting\n"))
1501 return 1
1501 return 1
1502 if self.issaveline(self.applied[-1]):
1502 if self.issaveline(self.applied[-1]):
1503 self.ui.warn(_("status is already saved\n"))
1503 self.ui.warn(_("status is already saved\n"))
1504 return 1
1504 return 1
1505
1505
1506 ar = [ ':' + x for x in self.full_series ]
1506 ar = [ ':' + x for x in self.full_series ]
1507 if not msg:
1507 if not msg:
1508 msg = _("hg patches saved state")
1508 msg = _("hg patches saved state")
1509 else:
1509 else:
1510 msg = "hg patches: " + msg.rstrip('\r\n')
1510 msg = "hg patches: " + msg.rstrip('\r\n')
1511 r = self.qrepo()
1511 r = self.qrepo()
1512 if r:
1512 if r:
1513 pp = r.dirstate.parents()
1513 pp = r.dirstate.parents()
1514 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1514 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1515 msg += "\n\nPatch Data:\n"
1515 msg += "\n\nPatch Data:\n"
1516 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1516 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1517 "\n".join(ar) + '\n' or "")
1517 "\n".join(ar) + '\n' or "")
1518 n = repo.commit(text, force=True)
1518 n = repo.commit(text, force=True)
1519 if not n:
1519 if not n:
1520 self.ui.warn(_("repo commit failed\n"))
1520 self.ui.warn(_("repo commit failed\n"))
1521 return 1
1521 return 1
1522 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1522 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1523 self.applied_dirty = 1
1523 self.applied_dirty = 1
1524 self.removeundo(repo)
1524 self.removeundo(repo)
1525
1525
1526 def full_series_end(self):
1526 def full_series_end(self):
1527 if len(self.applied) > 0:
1527 if len(self.applied) > 0:
1528 p = self.applied[-1].name
1528 p = self.applied[-1].name
1529 end = self.find_series(p)
1529 end = self.find_series(p)
1530 if end is None:
1530 if end is None:
1531 return len(self.full_series)
1531 return len(self.full_series)
1532 return end + 1
1532 return end + 1
1533 return 0
1533 return 0
1534
1534
1535 def series_end(self, all_patches=False):
1535 def series_end(self, all_patches=False):
1536 """If all_patches is False, return the index of the next pushable patch
1536 """If all_patches is False, return the index of the next pushable patch
1537 in the series, or the series length. If all_patches is True, return the
1537 in the series, or the series length. If all_patches is True, return the
1538 index of the first patch past the last applied one.
1538 index of the first patch past the last applied one.
1539 """
1539 """
1540 end = 0
1540 end = 0
1541 def next(start):
1541 def next(start):
1542 if all_patches:
1542 if all_patches:
1543 return start
1543 return start
1544 i = start
1544 i = start
1545 while i < len(self.series):
1545 while i < len(self.series):
1546 p, reason = self.pushable(i)
1546 p, reason = self.pushable(i)
1547 if p:
1547 if p:
1548 break
1548 break
1549 self.explain_pushable(i)
1549 self.explain_pushable(i)
1550 i += 1
1550 i += 1
1551 return i
1551 return i
1552 if len(self.applied) > 0:
1552 if len(self.applied) > 0:
1553 p = self.applied[-1].name
1553 p = self.applied[-1].name
1554 try:
1554 try:
1555 end = self.series.index(p)
1555 end = self.series.index(p)
1556 except ValueError:
1556 except ValueError:
1557 return 0
1557 return 0
1558 return next(end + 1)
1558 return next(end + 1)
1559 return next(end)
1559 return next(end)
1560
1560
1561 def appliedname(self, index):
1561 def appliedname(self, index):
1562 pname = self.applied[index].name
1562 pname = self.applied[index].name
1563 if not self.ui.verbose:
1563 if not self.ui.verbose:
1564 p = pname
1564 p = pname
1565 else:
1565 else:
1566 p = str(self.series.index(pname)) + " " + pname
1566 p = str(self.series.index(pname)) + " " + pname
1567 return p
1567 return p
1568
1568
1569 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1569 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1570 force=None, git=False):
1570 force=None, git=False):
1571 def checkseries(patchname):
1571 def checkseries(patchname):
1572 if patchname in self.series:
1572 if patchname in self.series:
1573 raise util.Abort(_('patch %s is already in the series file')
1573 raise util.Abort(_('patch %s is already in the series file')
1574 % patchname)
1574 % patchname)
1575 def checkfile(patchname):
1575 def checkfile(patchname):
1576 if not force and os.path.exists(self.join(patchname)):
1576 if not force and os.path.exists(self.join(patchname)):
1577 raise util.Abort(_('patch "%s" already exists')
1577 raise util.Abort(_('patch "%s" already exists')
1578 % patchname)
1578 % patchname)
1579
1579
1580 if rev:
1580 if rev:
1581 if files:
1581 if files:
1582 raise util.Abort(_('option "-r" not valid when importing '
1582 raise util.Abort(_('option "-r" not valid when importing '
1583 'files'))
1583 'files'))
1584 rev = cmdutil.revrange(repo, rev)
1584 rev = cmdutil.revrange(repo, rev)
1585 rev.sort(reverse=True)
1585 rev.sort(reverse=True)
1586 if (len(files) > 1 or len(rev) > 1) and patchname:
1586 if (len(files) > 1 or len(rev) > 1) and patchname:
1587 raise util.Abort(_('option "-n" not valid when importing multiple '
1587 raise util.Abort(_('option "-n" not valid when importing multiple '
1588 'patches'))
1588 'patches'))
1589 i = 0
1589 i = 0
1590 added = []
1590 added = []
1591 if rev:
1591 if rev:
1592 # If mq patches are applied, we can only import revisions
1592 # If mq patches are applied, we can only import revisions
1593 # that form a linear path to qbase.
1593 # that form a linear path to qbase.
1594 # Otherwise, they should form a linear path to a head.
1594 # Otherwise, they should form a linear path to a head.
1595 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1595 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1596 if len(heads) > 1:
1596 if len(heads) > 1:
1597 raise util.Abort(_('revision %d is the root of more than one '
1597 raise util.Abort(_('revision %d is the root of more than one '
1598 'branch') % rev[-1])
1598 'branch') % rev[-1])
1599 if self.applied:
1599 if self.applied:
1600 base = hex(repo.changelog.node(rev[0]))
1600 base = hex(repo.changelog.node(rev[0]))
1601 if base in [n.rev for n in self.applied]:
1601 if base in [n.rev for n in self.applied]:
1602 raise util.Abort(_('revision %d is already managed')
1602 raise util.Abort(_('revision %d is already managed')
1603 % rev[0])
1603 % rev[0])
1604 if heads != [bin(self.applied[-1].rev)]:
1604 if heads != [bin(self.applied[-1].rev)]:
1605 raise util.Abort(_('revision %d is not the parent of '
1605 raise util.Abort(_('revision %d is not the parent of '
1606 'the queue') % rev[0])
1606 'the queue') % rev[0])
1607 base = repo.changelog.rev(bin(self.applied[0].rev))
1607 base = repo.changelog.rev(bin(self.applied[0].rev))
1608 lastparent = repo.changelog.parentrevs(base)[0]
1608 lastparent = repo.changelog.parentrevs(base)[0]
1609 else:
1609 else:
1610 if heads != [repo.changelog.node(rev[0])]:
1610 if heads != [repo.changelog.node(rev[0])]:
1611 raise util.Abort(_('revision %d has unmanaged children')
1611 raise util.Abort(_('revision %d has unmanaged children')
1612 % rev[0])
1612 % rev[0])
1613 lastparent = None
1613 lastparent = None
1614
1614
1615 if git:
1615 if git:
1616 self.diffopts().git = True
1616 self.diffopts().git = True
1617
1617
1618 for r in rev:
1618 for r in rev:
1619 p1, p2 = repo.changelog.parentrevs(r)
1619 p1, p2 = repo.changelog.parentrevs(r)
1620 n = repo.changelog.node(r)
1620 n = repo.changelog.node(r)
1621 if p2 != nullrev:
1621 if p2 != nullrev:
1622 raise util.Abort(_('cannot import merge revision %d') % r)
1622 raise util.Abort(_('cannot import merge revision %d') % r)
1623 if lastparent and lastparent != r:
1623 if lastparent and lastparent != r:
1624 raise util.Abort(_('revision %d is not the parent of %d')
1624 raise util.Abort(_('revision %d is not the parent of %d')
1625 % (r, lastparent))
1625 % (r, lastparent))
1626 lastparent = p1
1626 lastparent = p1
1627
1627
1628 if not patchname:
1628 if not patchname:
1629 patchname = normname('%d.diff' % r)
1629 patchname = normname('%d.diff' % r)
1630 self.check_reserved_name(patchname)
1630 self.check_reserved_name(patchname)
1631 checkseries(patchname)
1631 checkseries(patchname)
1632 checkfile(patchname)
1632 checkfile(patchname)
1633 self.full_series.insert(0, patchname)
1633 self.full_series.insert(0, patchname)
1634
1634
1635 patchf = self.opener(patchname, "w")
1635 patchf = self.opener(patchname, "w")
1636 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1636 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1637 patchf.close()
1637 patchf.close()
1638
1638
1639 se = statusentry(hex(n), patchname)
1639 se = statusentry(hex(n), patchname)
1640 self.applied.insert(0, se)
1640 self.applied.insert(0, se)
1641
1641
1642 added.append(patchname)
1642 added.append(patchname)
1643 patchname = None
1643 patchname = None
1644 self.parse_series()
1644 self.parse_series()
1645 self.applied_dirty = 1
1645 self.applied_dirty = 1
1646
1646
1647 for filename in files:
1647 for filename in files:
1648 if existing:
1648 if existing:
1649 if filename == '-':
1649 if filename == '-':
1650 raise util.Abort(_('-e is incompatible with import from -'))
1650 raise util.Abort(_('-e is incompatible with import from -'))
1651 if not patchname:
1651 if not patchname:
1652 patchname = normname(filename)
1652 patchname = normname(filename)
1653 self.check_reserved_name(patchname)
1653 self.check_reserved_name(patchname)
1654 if not os.path.isfile(self.join(patchname)):
1654 if not os.path.isfile(self.join(patchname)):
1655 raise util.Abort(_("patch %s does not exist") % patchname)
1655 raise util.Abort(_("patch %s does not exist") % patchname)
1656 else:
1656 else:
1657 try:
1657 try:
1658 if filename == '-':
1658 if filename == '-':
1659 if not patchname:
1659 if not patchname:
1660 raise util.Abort(_('need --name to import a patch from -'))
1660 raise util.Abort(_('need --name to import a patch from -'))
1661 text = sys.stdin.read()
1661 text = sys.stdin.read()
1662 else:
1662 else:
1663 text = url.open(self.ui, filename).read()
1663 text = url.open(self.ui, filename).read()
1664 except (OSError, IOError):
1664 except (OSError, IOError):
1665 raise util.Abort(_("unable to read %s") % filename)
1665 raise util.Abort(_("unable to read %s") % filename)
1666 if not patchname:
1666 if not patchname:
1667 patchname = normname(os.path.basename(filename))
1667 patchname = normname(os.path.basename(filename))
1668 self.check_reserved_name(patchname)
1668 self.check_reserved_name(patchname)
1669 checkfile(patchname)
1669 checkfile(patchname)
1670 patchf = self.opener(patchname, "w")
1670 patchf = self.opener(patchname, "w")
1671 patchf.write(text)
1671 patchf.write(text)
1672 if not force:
1672 if not force:
1673 checkseries(patchname)
1673 checkseries(patchname)
1674 if patchname not in self.series:
1674 if patchname not in self.series:
1675 index = self.full_series_end() + i
1675 index = self.full_series_end() + i
1676 self.full_series[index:index] = [patchname]
1676 self.full_series[index:index] = [patchname]
1677 self.parse_series()
1677 self.parse_series()
1678 self.ui.warn(_("adding %s to series file\n") % patchname)
1678 self.ui.warn(_("adding %s to series file\n") % patchname)
1679 i += 1
1679 i += 1
1680 added.append(patchname)
1680 added.append(patchname)
1681 patchname = None
1681 patchname = None
1682 self.series_dirty = 1
1682 self.series_dirty = 1
1683 qrepo = self.qrepo()
1683 qrepo = self.qrepo()
1684 if qrepo:
1684 if qrepo:
1685 qrepo.add(added)
1685 qrepo.add(added)
1686
1686
1687 def delete(ui, repo, *patches, **opts):
1687 def delete(ui, repo, *patches, **opts):
1688 """remove patches from queue
1688 """remove patches from queue
1689
1689
1690 The patches must not be applied, and at least one patch is required. With
1690 The patches must not be applied, and at least one patch is required. With
1691 -k/--keep, the patch files are preserved in the patch directory.
1691 -k/--keep, the patch files are preserved in the patch directory.
1692
1692
1693 To stop managing a patch and move it into permanent history,
1693 To stop managing a patch and move it into permanent history,
1694 use the qfinish command."""
1694 use the qfinish command."""
1695 q = repo.mq
1695 q = repo.mq
1696 q.delete(repo, patches, opts)
1696 q.delete(repo, patches, opts)
1697 q.save_dirty()
1697 q.save_dirty()
1698 return 0
1698 return 0
1699
1699
1700 def applied(ui, repo, patch=None, **opts):
1700 def applied(ui, repo, patch=None, **opts):
1701 """print the patches already applied"""
1701 """print the patches already applied"""
1702
1702
1703 q = repo.mq
1703 q = repo.mq
1704 l = len(q.applied)
1704 l = len(q.applied)
1705
1705
1706 if patch:
1706 if patch:
1707 if patch not in q.series:
1707 if patch not in q.series:
1708 raise util.Abort(_("patch %s is not in series file") % patch)
1708 raise util.Abort(_("patch %s is not in series file") % patch)
1709 end = q.series.index(patch) + 1
1709 end = q.series.index(patch) + 1
1710 else:
1710 else:
1711 end = q.series_end(True)
1711 end = q.series_end(True)
1712
1712
1713 if opts.get('last') and not end:
1713 if opts.get('last') and not end:
1714 ui.write(_("no patches applied\n"))
1714 ui.write(_("no patches applied\n"))
1715 return 1
1715 return 1
1716 elif opts.get('last') and end == 1:
1716 elif opts.get('last') and end == 1:
1717 ui.write(_("only one patch applied\n"))
1717 ui.write(_("only one patch applied\n"))
1718 return 1
1718 return 1
1719 elif opts.get('last'):
1719 elif opts.get('last'):
1720 start = end - 2
1720 start = end - 2
1721 end = 1
1721 end = 1
1722 else:
1722 else:
1723 start = 0
1723 start = 0
1724
1724
1725 return q.qseries(repo, length=end, start=start, status='A',
1725 return q.qseries(repo, length=end, start=start, status='A',
1726 summary=opts.get('summary'))
1726 summary=opts.get('summary'))
1727
1727
1728 def unapplied(ui, repo, patch=None, **opts):
1728 def unapplied(ui, repo, patch=None, **opts):
1729 """print the patches not yet applied"""
1729 """print the patches not yet applied"""
1730
1730
1731 q = repo.mq
1731 q = repo.mq
1732 if patch:
1732 if patch:
1733 if patch not in q.series:
1733 if patch not in q.series:
1734 raise util.Abort(_("patch %s is not in series file") % patch)
1734 raise util.Abort(_("patch %s is not in series file") % patch)
1735 start = q.series.index(patch) + 1
1735 start = q.series.index(patch) + 1
1736 else:
1736 else:
1737 start = q.series_end(True)
1737 start = q.series_end(True)
1738
1738
1739 if start == len(q.series) and opts.get('first'):
1739 if start == len(q.series) and opts.get('first'):
1740 ui.write(_("all patches applied\n"))
1740 ui.write(_("all patches applied\n"))
1741 return 1
1741 return 1
1742
1742
1743 length = opts.get('first') and 1 or None
1743 length = opts.get('first') and 1 or None
1744 return q.qseries(repo, start=start, length=length, status='U',
1744 return q.qseries(repo, start=start, length=length, status='U',
1745 summary=opts.get('summary'))
1745 summary=opts.get('summary'))
1746
1746
1747 def qimport(ui, repo, *filename, **opts):
1747 def qimport(ui, repo, *filename, **opts):
1748 """import a patch
1748 """import a patch
1749
1749
1750 The patch is inserted into the series after the last applied
1750 The patch is inserted into the series after the last applied
1751 patch. If no patches have been applied, qimport prepends the patch
1751 patch. If no patches have been applied, qimport prepends the patch
1752 to the series.
1752 to the series.
1753
1753
1754 The patch will have the same name as its source file unless you
1754 The patch will have the same name as its source file unless you
1755 give it a new one with -n/--name.
1755 give it a new one with -n/--name.
1756
1756
1757 You can register an existing patch inside the patch directory with
1757 You can register an existing patch inside the patch directory with
1758 the -e/--existing flag.
1758 the -e/--existing flag.
1759
1759
1760 With -f/--force, an existing patch of the same name will be
1760 With -f/--force, an existing patch of the same name will be
1761 overwritten.
1761 overwritten.
1762
1762
1763 An existing changeset may be placed under mq control with -r/--rev
1763 An existing changeset may be placed under mq control with -r/--rev
1764 (e.g. qimport --rev tip -n patch will place tip under mq control).
1764 (e.g. qimport --rev tip -n patch will place tip under mq control).
1765 With -g/--git, patches imported with --rev will use the git diff
1765 With -g/--git, patches imported with --rev will use the git diff
1766 format. See the diffs help topic for information on why this is
1766 format. See the diffs help topic for information on why this is
1767 important for preserving rename/copy information and permission
1767 important for preserving rename/copy information and permission
1768 changes.
1768 changes.
1769
1769
1770 To import a patch from standard input, pass - as the patch file.
1770 To import a patch from standard input, pass - as the patch file.
1771 When importing from standard input, a patch name must be specified
1771 When importing from standard input, a patch name must be specified
1772 using the --name flag.
1772 using the --name flag.
1773 """
1773 """
1774 q = repo.mq
1774 q = repo.mq
1775 q.qimport(repo, filename, patchname=opts['name'],
1775 q.qimport(repo, filename, patchname=opts['name'],
1776 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1776 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1777 git=opts['git'])
1777 git=opts['git'])
1778 q.save_dirty()
1778 q.save_dirty()
1779
1779
1780 if opts.get('push') and not opts.get('rev'):
1780 if opts.get('push') and not opts.get('rev'):
1781 return q.push(repo, None)
1781 return q.push(repo, None)
1782 return 0
1782 return 0
1783
1783
1784 def init(ui, repo, **opts):
1784 def init(ui, repo, **opts):
1785 """init a new queue repository
1785 """init a new queue repository
1786
1786
1787 The queue repository is unversioned by default. If
1787 The queue repository is unversioned by default. If
1788 -c/--create-repo is specified, qinit will create a separate nested
1788 -c/--create-repo is specified, qinit will create a separate nested
1789 repository for patches (qinit -c may also be run later to convert
1789 repository for patches (qinit -c may also be run later to convert
1790 an unversioned patch repository into a versioned one). You can use
1790 an unversioned patch repository into a versioned one). You can use
1791 qcommit to commit changes to this queue repository."""
1791 qcommit to commit changes to this queue repository."""
1792 q = repo.mq
1792 q = repo.mq
1793 r = q.init(repo, create=opts['create_repo'])
1793 r = q.init(repo, create=opts['create_repo'])
1794 q.save_dirty()
1794 q.save_dirty()
1795 if r:
1795 if r:
1796 if not os.path.exists(r.wjoin('.hgignore')):
1796 if not os.path.exists(r.wjoin('.hgignore')):
1797 fp = r.wopener('.hgignore', 'w')
1797 fp = r.wopener('.hgignore', 'w')
1798 fp.write('^\\.hg\n')
1798 fp.write('^\\.hg\n')
1799 fp.write('^\\.mq\n')
1799 fp.write('^\\.mq\n')
1800 fp.write('syntax: glob\n')
1800 fp.write('syntax: glob\n')
1801 fp.write('status\n')
1801 fp.write('status\n')
1802 fp.write('guards\n')
1802 fp.write('guards\n')
1803 fp.close()
1803 fp.close()
1804 if not os.path.exists(r.wjoin('series')):
1804 if not os.path.exists(r.wjoin('series')):
1805 r.wopener('series', 'w').close()
1805 r.wopener('series', 'w').close()
1806 r.add(['.hgignore', 'series'])
1806 r.add(['.hgignore', 'series'])
1807 commands.add(ui, r)
1807 commands.add(ui, r)
1808 return 0
1808 return 0
1809
1809
1810 def clone(ui, source, dest=None, **opts):
1810 def clone(ui, source, dest=None, **opts):
1811 '''clone main and patch repository at same time
1811 '''clone main and patch repository at same time
1812
1812
1813 If source is local, destination will have no patches applied. If
1813 If source is local, destination will have no patches applied. If
1814 source is remote, this command can not check if patches are
1814 source is remote, this command can not check if patches are
1815 applied in source, so cannot guarantee that patches are not
1815 applied in source, so cannot guarantee that patches are not
1816 applied in destination. If you clone remote repository, be sure
1816 applied in destination. If you clone remote repository, be sure
1817 before that it has no patches applied.
1817 before that it has no patches applied.
1818
1818
1819 Source patch repository is looked for in <src>/.hg/patches by
1819 Source patch repository is looked for in <src>/.hg/patches by
1820 default. Use -p <url> to change.
1820 default. Use -p <url> to change.
1821
1821
1822 The patch directory must be a nested Mercurial repository, as
1822 The patch directory must be a nested Mercurial repository, as
1823 would be created by qinit -c.
1823 would be created by qinit -c.
1824 '''
1824 '''
1825 def patchdir(repo):
1825 def patchdir(repo):
1826 url = repo.url()
1826 url = repo.url()
1827 if url.endswith('/'):
1827 if url.endswith('/'):
1828 url = url[:-1]
1828 url = url[:-1]
1829 return url + '/.hg/patches'
1829 return url + '/.hg/patches'
1830 if dest is None:
1830 if dest is None:
1831 dest = hg.defaultdest(source)
1831 dest = hg.defaultdest(source)
1832 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1832 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1833 if opts['patches']:
1833 if opts['patches']:
1834 patchespath = ui.expandpath(opts['patches'])
1834 patchespath = ui.expandpath(opts['patches'])
1835 else:
1835 else:
1836 patchespath = patchdir(sr)
1836 patchespath = patchdir(sr)
1837 try:
1837 try:
1838 hg.repository(ui, patchespath)
1838 hg.repository(ui, patchespath)
1839 except error.RepoError:
1839 except error.RepoError:
1840 raise util.Abort(_('versioned patch repository not found'
1840 raise util.Abort(_('versioned patch repository not found'
1841 ' (see qinit -c)'))
1841 ' (see qinit -c)'))
1842 qbase, destrev = None, None
1842 qbase, destrev = None, None
1843 if sr.local():
1843 if sr.local():
1844 if sr.mq.applied:
1844 if sr.mq.applied:
1845 qbase = bin(sr.mq.applied[0].rev)
1845 qbase = bin(sr.mq.applied[0].rev)
1846 if not hg.islocal(dest):
1846 if not hg.islocal(dest):
1847 heads = set(sr.heads())
1847 heads = set(sr.heads())
1848 destrev = list(heads.difference(sr.heads(qbase)))
1848 destrev = list(heads.difference(sr.heads(qbase)))
1849 destrev.append(sr.changelog.parents(qbase)[0])
1849 destrev.append(sr.changelog.parents(qbase)[0])
1850 elif sr.capable('lookup'):
1850 elif sr.capable('lookup'):
1851 try:
1851 try:
1852 qbase = sr.lookup('qbase')
1852 qbase = sr.lookup('qbase')
1853 except error.RepoError:
1853 except error.RepoError:
1854 pass
1854 pass
1855 ui.note(_('cloning main repository\n'))
1855 ui.note(_('cloning main repository\n'))
1856 sr, dr = hg.clone(ui, sr.url(), dest,
1856 sr, dr = hg.clone(ui, sr.url(), dest,
1857 pull=opts['pull'],
1857 pull=opts['pull'],
1858 rev=destrev,
1858 rev=destrev,
1859 update=False,
1859 update=False,
1860 stream=opts['uncompressed'])
1860 stream=opts['uncompressed'])
1861 ui.note(_('cloning patch repository\n'))
1861 ui.note(_('cloning patch repository\n'))
1862 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1862 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1863 pull=opts['pull'], update=not opts['noupdate'],
1863 pull=opts['pull'], update=not opts['noupdate'],
1864 stream=opts['uncompressed'])
1864 stream=opts['uncompressed'])
1865 if dr.local():
1865 if dr.local():
1866 if qbase:
1866 if qbase:
1867 ui.note(_('stripping applied patches from destination '
1867 ui.note(_('stripping applied patches from destination '
1868 'repository\n'))
1868 'repository\n'))
1869 dr.mq.strip(dr, qbase, update=False, backup=None)
1869 dr.mq.strip(dr, qbase, update=False, backup=None)
1870 if not opts['noupdate']:
1870 if not opts['noupdate']:
1871 ui.note(_('updating destination repository\n'))
1871 ui.note(_('updating destination repository\n'))
1872 hg.update(dr, dr.changelog.tip())
1872 hg.update(dr, dr.changelog.tip())
1873
1873
1874 def commit(ui, repo, *pats, **opts):
1874 def commit(ui, repo, *pats, **opts):
1875 """commit changes in the queue repository"""
1875 """commit changes in the queue repository"""
1876 q = repo.mq
1876 q = repo.mq
1877 r = q.qrepo()
1877 r = q.qrepo()
1878 if not r: raise util.Abort('no queue repository')
1878 if not r: raise util.Abort('no queue repository')
1879 commands.commit(r.ui, r, *pats, **opts)
1879 commands.commit(r.ui, r, *pats, **opts)
1880
1880
1881 def series(ui, repo, **opts):
1881 def series(ui, repo, **opts):
1882 """print the entire series file"""
1882 """print the entire series file"""
1883 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1883 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1884 return 0
1884 return 0
1885
1885
1886 def top(ui, repo, **opts):
1886 def top(ui, repo, **opts):
1887 """print the name of the current patch"""
1887 """print the name of the current patch"""
1888 q = repo.mq
1888 q = repo.mq
1889 t = q.applied and q.series_end(True) or 0
1889 t = q.applied and q.series_end(True) or 0
1890 if t:
1890 if t:
1891 return q.qseries(repo, start=t-1, length=1, status='A',
1891 return q.qseries(repo, start=t-1, length=1, status='A',
1892 summary=opts.get('summary'))
1892 summary=opts.get('summary'))
1893 else:
1893 else:
1894 ui.write(_("no patches applied\n"))
1894 ui.write(_("no patches applied\n"))
1895 return 1
1895 return 1
1896
1896
1897 def next(ui, repo, **opts):
1897 def next(ui, repo, **opts):
1898 """print the name of the next patch"""
1898 """print the name of the next patch"""
1899 q = repo.mq
1899 q = repo.mq
1900 end = q.series_end()
1900 end = q.series_end()
1901 if end == len(q.series):
1901 if end == len(q.series):
1902 ui.write(_("all patches applied\n"))
1902 ui.write(_("all patches applied\n"))
1903 return 1
1903 return 1
1904 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1904 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1905
1905
1906 def prev(ui, repo, **opts):
1906 def prev(ui, repo, **opts):
1907 """print the name of the previous patch"""
1907 """print the name of the previous patch"""
1908 q = repo.mq
1908 q = repo.mq
1909 l = len(q.applied)
1909 l = len(q.applied)
1910 if l == 1:
1910 if l == 1:
1911 ui.write(_("only one patch applied\n"))
1911 ui.write(_("only one patch applied\n"))
1912 return 1
1912 return 1
1913 if not l:
1913 if not l:
1914 ui.write(_("no patches applied\n"))
1914 ui.write(_("no patches applied\n"))
1915 return 1
1915 return 1
1916 return q.qseries(repo, start=l-2, length=1, status='A',
1916 return q.qseries(repo, start=l-2, length=1, status='A',
1917 summary=opts.get('summary'))
1917 summary=opts.get('summary'))
1918
1918
1919 def setupheaderopts(ui, opts):
1919 def setupheaderopts(ui, opts):
1920 if not opts.get('user') and opts.get('currentuser'):
1920 if not opts.get('user') and opts.get('currentuser'):
1921 opts['user'] = ui.username()
1921 opts['user'] = ui.username()
1922 if not opts.get('date') and opts.get('currentdate'):
1922 if not opts.get('date') and opts.get('currentdate'):
1923 opts['date'] = "%d %d" % util.makedate()
1923 opts['date'] = "%d %d" % util.makedate()
1924
1924
1925 def new(ui, repo, patch, *args, **opts):
1925 def new(ui, repo, patch, *args, **opts):
1926 """create a new patch
1926 """create a new patch
1927
1927
1928 qnew creates a new patch on top of the currently-applied patch (if
1928 qnew creates a new patch on top of the currently-applied patch (if
1929 any). It will refuse to run if there are any outstanding changes
1929 any). It will refuse to run if there are any outstanding changes
1930 unless -f/--force is specified, in which case the patch will be
1930 unless -f/--force is specified, in which case the patch will be
1931 initialized with them. You may also use -I/--include,
1931 initialized with them. You may also use -I/--include,
1932 -X/--exclude, and/or a list of files after the patch name to add
1932 -X/--exclude, and/or a list of files after the patch name to add
1933 only changes to matching files to the new patch, leaving the rest
1933 only changes to matching files to the new patch, leaving the rest
1934 as uncommitted modifications.
1934 as uncommitted modifications.
1935
1935
1936 -u/--user and -d/--date can be used to set the (given) user and
1936 -u/--user and -d/--date can be used to set the (given) user and
1937 date, respectively. -U/--currentuser and -D/--currentdate set user
1937 date, respectively. -U/--currentuser and -D/--currentdate set user
1938 to current user and date to current date.
1938 to current user and date to current date.
1939
1939
1940 -e/--edit, -m/--message or -l/--logfile set the patch header as
1940 -e/--edit, -m/--message or -l/--logfile set the patch header as
1941 well as the commit message. If none is specified, the header is
1941 well as the commit message. If none is specified, the header is
1942 empty and the commit message is '[mq]: PATCH'.
1942 empty and the commit message is '[mq]: PATCH'.
1943
1943
1944 Use the -g/--git option to keep the patch in the git extended diff
1944 Use the -g/--git option to keep the patch in the git extended diff
1945 format. Read the diffs help topic for more information on why this
1945 format. Read the diffs help topic for more information on why this
1946 is important for preserving permission changes and copy/rename
1946 is important for preserving permission changes and copy/rename
1947 information.
1947 information.
1948 """
1948 """
1949 msg = cmdutil.logmessage(opts)
1949 msg = cmdutil.logmessage(opts)
1950 def getmsg(): return ui.edit(msg, ui.username())
1950 def getmsg(): return ui.edit(msg, ui.username())
1951 q = repo.mq
1951 q = repo.mq
1952 opts['msg'] = msg
1952 opts['msg'] = msg
1953 if opts.get('edit'):
1953 if opts.get('edit'):
1954 opts['msg'] = getmsg
1954 opts['msg'] = getmsg
1955 else:
1955 else:
1956 opts['msg'] = msg
1956 opts['msg'] = msg
1957 setupheaderopts(ui, opts)
1957 setupheaderopts(ui, opts)
1958 q.new(repo, patch, *args, **opts)
1958 q.new(repo, patch, *args, **opts)
1959 q.save_dirty()
1959 q.save_dirty()
1960 return 0
1960 return 0
1961
1961
1962 def refresh(ui, repo, *pats, **opts):
1962 def refresh(ui, repo, *pats, **opts):
1963 """update the current patch
1963 """update the current patch
1964
1964
1965 If any file patterns are provided, the refreshed patch will
1965 If any file patterns are provided, the refreshed patch will
1966 contain only the modifications that match those patterns; the
1966 contain only the modifications that match those patterns; the
1967 remaining modifications will remain in the working directory.
1967 remaining modifications will remain in the working directory.
1968
1968
1969 If -s/--short is specified, files currently included in the patch
1969 If -s/--short is specified, files currently included in the patch
1970 will be refreshed just like matched files and remain in the patch.
1970 will be refreshed just like matched files and remain in the patch.
1971
1971
1972 hg add/remove/copy/rename work as usual, though you might want to
1972 hg add/remove/copy/rename work as usual, though you might want to
1973 use git-style patches (-g/--git or [diff] git=1) to track copies
1973 use git-style patches (-g/--git or [diff] git=1) to track copies
1974 and renames. See the diffs help topic for more information on the
1974 and renames. See the diffs help topic for more information on the
1975 git diff format.
1975 git diff format.
1976 """
1976 """
1977 q = repo.mq
1977 q = repo.mq
1978 message = cmdutil.logmessage(opts)
1978 message = cmdutil.logmessage(opts)
1979 if opts['edit']:
1979 if opts['edit']:
1980 if not q.applied:
1980 if not q.applied:
1981 ui.write(_("no patches applied\n"))
1981 ui.write(_("no patches applied\n"))
1982 return 1
1982 return 1
1983 if message:
1983 if message:
1984 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1984 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1985 patch = q.applied[-1].name
1985 patch = q.applied[-1].name
1986 ph = patchheader(q.join(patch))
1986 ph = patchheader(q.join(patch))
1987 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1987 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1988 setupheaderopts(ui, opts)
1988 setupheaderopts(ui, opts)
1989 ret = q.refresh(repo, pats, msg=message, **opts)
1989 ret = q.refresh(repo, pats, msg=message, **opts)
1990 q.save_dirty()
1990 q.save_dirty()
1991 return ret
1991 return ret
1992
1992
1993 def diff(ui, repo, *pats, **opts):
1993 def diff(ui, repo, *pats, **opts):
1994 """diff of the current patch and subsequent modifications
1994 """diff of the current patch and subsequent modifications
1995
1995
1996 Shows a diff which includes the current patch as well as any
1996 Shows a diff which includes the current patch as well as any
1997 changes which have been made in the working directory since the
1997 changes which have been made in the working directory since the
1998 last refresh (thus showing what the current patch would become
1998 last refresh (thus showing what the current patch would become
1999 after a qrefresh).
1999 after a qrefresh).
2000
2000
2001 Use 'hg diff' if you only want to see the changes made since the
2001 Use 'hg diff' if you only want to see the changes made since the
2002 last qrefresh, or 'hg export qtip' if you want to see changes made
2002 last qrefresh, or 'hg export qtip' if you want to see changes made
2003 by the current patch without including changes made since the
2003 by the current patch without including changes made since the
2004 qrefresh.
2004 qrefresh.
2005 """
2005 """
2006 repo.mq.diff(repo, pats, opts)
2006 repo.mq.diff(repo, pats, opts)
2007 return 0
2007 return 0
2008
2008
2009 def fold(ui, repo, *files, **opts):
2009 def fold(ui, repo, *files, **opts):
2010 """fold the named patches into the current patch
2010 """fold the named patches into the current patch
2011
2011
2012 Patches must not yet be applied. Each patch will be successively
2012 Patches must not yet be applied. Each patch will be successively
2013 applied to the current patch in the order given. If all the
2013 applied to the current patch in the order given. If all the
2014 patches apply successfully, the current patch will be refreshed
2014 patches apply successfully, the current patch will be refreshed
2015 with the new cumulative patch, and the folded patches will be
2015 with the new cumulative patch, and the folded patches will be
2016 deleted. With -k/--keep, the folded patch files will not be
2016 deleted. With -k/--keep, the folded patch files will not be
2017 removed afterwards.
2017 removed afterwards.
2018
2018
2019 The header for each folded patch will be concatenated with the
2019 The header for each folded patch will be concatenated with the
2020 current patch header, separated by a line of '* * *'."""
2020 current patch header, separated by a line of '* * *'."""
2021
2021
2022 q = repo.mq
2022 q = repo.mq
2023
2023
2024 if not files:
2024 if not files:
2025 raise util.Abort(_('qfold requires at least one patch name'))
2025 raise util.Abort(_('qfold requires at least one patch name'))
2026 if not q.check_toppatch(repo):
2026 if not q.check_toppatch(repo):
2027 raise util.Abort(_('No patches applied'))
2027 raise util.Abort(_('No patches applied'))
2028 q.check_localchanges(repo)
2028 q.check_localchanges(repo)
2029
2029
2030 message = cmdutil.logmessage(opts)
2030 message = cmdutil.logmessage(opts)
2031 if opts['edit']:
2031 if opts['edit']:
2032 if message:
2032 if message:
2033 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2033 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2034
2034
2035 parent = q.lookup('qtip')
2035 parent = q.lookup('qtip')
2036 patches = []
2036 patches = []
2037 messages = []
2037 messages = []
2038 for f in files:
2038 for f in files:
2039 p = q.lookup(f)
2039 p = q.lookup(f)
2040 if p in patches or p == parent:
2040 if p in patches or p == parent:
2041 ui.warn(_('Skipping already folded patch %s') % p)
2041 ui.warn(_('Skipping already folded patch %s') % p)
2042 if q.isapplied(p):
2042 if q.isapplied(p):
2043 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2043 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2044 patches.append(p)
2044 patches.append(p)
2045
2045
2046 for p in patches:
2046 for p in patches:
2047 if not message:
2047 if not message:
2048 ph = patchheader(q.join(p))
2048 ph = patchheader(q.join(p))
2049 if ph.message:
2049 if ph.message:
2050 messages.append(ph.message)
2050 messages.append(ph.message)
2051 pf = q.join(p)
2051 pf = q.join(p)
2052 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2052 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2053 if not patchsuccess:
2053 if not patchsuccess:
2054 raise util.Abort(_('Error folding patch %s') % p)
2054 raise util.Abort(_('Error folding patch %s') % p)
2055 patch.updatedir(ui, repo, files)
2055 patch.updatedir(ui, repo, files)
2056
2056
2057 if not message:
2057 if not message:
2058 ph = patchheader(q.join(parent))
2058 ph = patchheader(q.join(parent))
2059 message, user = ph.message, ph.user
2059 message, user = ph.message, ph.user
2060 for msg in messages:
2060 for msg in messages:
2061 message.append('* * *')
2061 message.append('* * *')
2062 message.extend(msg)
2062 message.extend(msg)
2063 message = '\n'.join(message)
2063 message = '\n'.join(message)
2064
2064
2065 if opts['edit']:
2065 if opts['edit']:
2066 message = ui.edit(message, user or ui.username())
2066 message = ui.edit(message, user or ui.username())
2067
2067
2068 q.refresh(repo, msg=message)
2068 q.refresh(repo, msg=message)
2069 q.delete(repo, patches, opts)
2069 q.delete(repo, patches, opts)
2070 q.save_dirty()
2070 q.save_dirty()
2071
2071
2072 def goto(ui, repo, patch, **opts):
2072 def goto(ui, repo, patch, **opts):
2073 '''push or pop patches until named patch is at top of stack'''
2073 '''push or pop patches until named patch is at top of stack'''
2074 q = repo.mq
2074 q = repo.mq
2075 patch = q.lookup(patch)
2075 patch = q.lookup(patch)
2076 if q.isapplied(patch):
2076 if q.isapplied(patch):
2077 ret = q.pop(repo, patch, force=opts['force'])
2077 ret = q.pop(repo, patch, force=opts['force'])
2078 else:
2078 else:
2079 ret = q.push(repo, patch, force=opts['force'])
2079 ret = q.push(repo, patch, force=opts['force'])
2080 q.save_dirty()
2080 q.save_dirty()
2081 return ret
2081 return ret
2082
2082
2083 def guard(ui, repo, *args, **opts):
2083 def guard(ui, repo, *args, **opts):
2084 '''set or print guards for a patch
2084 '''set or print guards for a patch
2085
2085
2086 Guards control whether a patch can be pushed. A patch with no
2086 Guards control whether a patch can be pushed. A patch with no
2087 guards is always pushed. A patch with a positive guard ("+foo") is
2087 guards is always pushed. A patch with a positive guard ("+foo") is
2088 pushed only if the qselect command has activated it. A patch with
2088 pushed only if the qselect command has activated it. A patch with
2089 a negative guard ("-foo") is never pushed if the qselect command
2089 a negative guard ("-foo") is never pushed if the qselect command
2090 has activated it.
2090 has activated it.
2091
2091
2092 With no arguments, print the currently active guards.
2092 With no arguments, print the currently active guards.
2093 With arguments, set guards for the named patch.
2093 With arguments, set guards for the named patch.
2094 NOTE: Specifying negative guards now requires '--'.
2094 NOTE: Specifying negative guards now requires '--'.
2095
2095
2096 To set guards on another patch::
2096 To set guards on another patch::
2097
2097
2098 hg qguard -- other.patch +2.6.17 -stable
2098 hg qguard -- other.patch +2.6.17 -stable
2099 '''
2099 '''
2100 def status(idx):
2100 def status(idx):
2101 guards = q.series_guards[idx] or ['unguarded']
2101 guards = q.series_guards[idx] or ['unguarded']
2102 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2102 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2103 q = repo.mq
2103 q = repo.mq
2104 patch = None
2104 patch = None
2105 args = list(args)
2105 args = list(args)
2106 if opts['list']:
2106 if opts['list']:
2107 if args or opts['none']:
2107 if args or opts['none']:
2108 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2108 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2109 for i in xrange(len(q.series)):
2109 for i in xrange(len(q.series)):
2110 status(i)
2110 status(i)
2111 return
2111 return
2112 if not args or args[0][0:1] in '-+':
2112 if not args or args[0][0:1] in '-+':
2113 if not q.applied:
2113 if not q.applied:
2114 raise util.Abort(_('no patches applied'))
2114 raise util.Abort(_('no patches applied'))
2115 patch = q.applied[-1].name
2115 patch = q.applied[-1].name
2116 if patch is None and args[0][0:1] not in '-+':
2116 if patch is None and args[0][0:1] not in '-+':
2117 patch = args.pop(0)
2117 patch = args.pop(0)
2118 if patch is None:
2118 if patch is None:
2119 raise util.Abort(_('no patch to work with'))
2119 raise util.Abort(_('no patch to work with'))
2120 if args or opts['none']:
2120 if args or opts['none']:
2121 idx = q.find_series(patch)
2121 idx = q.find_series(patch)
2122 if idx is None:
2122 if idx is None:
2123 raise util.Abort(_('no patch named %s') % patch)
2123 raise util.Abort(_('no patch named %s') % patch)
2124 q.set_guards(idx, args)
2124 q.set_guards(idx, args)
2125 q.save_dirty()
2125 q.save_dirty()
2126 else:
2126 else:
2127 status(q.series.index(q.lookup(patch)))
2127 status(q.series.index(q.lookup(patch)))
2128
2128
2129 def header(ui, repo, patch=None):
2129 def header(ui, repo, patch=None):
2130 """print the header of the topmost or specified patch"""
2130 """print the header of the topmost or specified patch"""
2131 q = repo.mq
2131 q = repo.mq
2132
2132
2133 if patch:
2133 if patch:
2134 patch = q.lookup(patch)
2134 patch = q.lookup(patch)
2135 else:
2135 else:
2136 if not q.applied:
2136 if not q.applied:
2137 ui.write('no patches applied\n')
2137 ui.write('no patches applied\n')
2138 return 1
2138 return 1
2139 patch = q.lookup('qtip')
2139 patch = q.lookup('qtip')
2140 ph = patchheader(repo.mq.join(patch))
2140 ph = patchheader(repo.mq.join(patch))
2141
2141
2142 ui.write('\n'.join(ph.message) + '\n')
2142 ui.write('\n'.join(ph.message) + '\n')
2143
2143
2144 def lastsavename(path):
2144 def lastsavename(path):
2145 (directory, base) = os.path.split(path)
2145 (directory, base) = os.path.split(path)
2146 names = os.listdir(directory)
2146 names = os.listdir(directory)
2147 namere = re.compile("%s.([0-9]+)" % base)
2147 namere = re.compile("%s.([0-9]+)" % base)
2148 maxindex = None
2148 maxindex = None
2149 maxname = None
2149 maxname = None
2150 for f in names:
2150 for f in names:
2151 m = namere.match(f)
2151 m = namere.match(f)
2152 if m:
2152 if m:
2153 index = int(m.group(1))
2153 index = int(m.group(1))
2154 if maxindex is None or index > maxindex:
2154 if maxindex is None or index > maxindex:
2155 maxindex = index
2155 maxindex = index
2156 maxname = f
2156 maxname = f
2157 if maxname:
2157 if maxname:
2158 return (os.path.join(directory, maxname), maxindex)
2158 return (os.path.join(directory, maxname), maxindex)
2159 return (None, None)
2159 return (None, None)
2160
2160
2161 def savename(path):
2161 def savename(path):
2162 (last, index) = lastsavename(path)
2162 (last, index) = lastsavename(path)
2163 if last is None:
2163 if last is None:
2164 index = 0
2164 index = 0
2165 newpath = path + ".%d" % (index + 1)
2165 newpath = path + ".%d" % (index + 1)
2166 return newpath
2166 return newpath
2167
2167
2168 def push(ui, repo, patch=None, **opts):
2168 def push(ui, repo, patch=None, **opts):
2169 """push the next patch onto the stack
2169 """push the next patch onto the stack
2170
2170
2171 When -f/--force is applied, all local changes in patched files
2171 When -f/--force is applied, all local changes in patched files
2172 will be lost.
2172 will be lost.
2173 """
2173 """
2174 q = repo.mq
2174 q = repo.mq
2175 mergeq = None
2175 mergeq = None
2176
2176
2177 if opts['merge']:
2177 if opts['merge']:
2178 if opts['name']:
2178 if opts['name']:
2179 newpath = repo.join(opts['name'])
2179 newpath = repo.join(opts['name'])
2180 else:
2180 else:
2181 newpath, i = lastsavename(q.path)
2181 newpath, i = lastsavename(q.path)
2182 if not newpath:
2182 if not newpath:
2183 ui.warn(_("no saved queues found, please use -n\n"))
2183 ui.warn(_("no saved queues found, please use -n\n"))
2184 return 1
2184 return 1
2185 mergeq = queue(ui, repo.join(""), newpath)
2185 mergeq = queue(ui, repo.join(""), newpath)
2186 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2186 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2187 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2187 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2188 mergeq=mergeq, all=opts.get('all'))
2188 mergeq=mergeq, all=opts.get('all'))
2189 return ret
2189 return ret
2190
2190
2191 def pop(ui, repo, patch=None, **opts):
2191 def pop(ui, repo, patch=None, **opts):
2192 """pop the current patch off the stack
2192 """pop the current patch off the stack
2193
2193
2194 By default, pops off the top of the patch stack. If given a patch
2194 By default, pops off the top of the patch stack. If given a patch
2195 name, keeps popping off patches until the named patch is at the
2195 name, keeps popping off patches until the named patch is at the
2196 top of the stack.
2196 top of the stack.
2197 """
2197 """
2198 localupdate = True
2198 localupdate = True
2199 if opts['name']:
2199 if opts['name']:
2200 q = queue(ui, repo.join(""), repo.join(opts['name']))
2200 q = queue(ui, repo.join(""), repo.join(opts['name']))
2201 ui.warn(_('using patch queue: %s\n') % q.path)
2201 ui.warn(_('using patch queue: %s\n') % q.path)
2202 localupdate = False
2202 localupdate = False
2203 else:
2203 else:
2204 q = repo.mq
2204 q = repo.mq
2205 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2205 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2206 all=opts['all'])
2206 all=opts['all'])
2207 q.save_dirty()
2207 q.save_dirty()
2208 return ret
2208 return ret
2209
2209
2210 def rename(ui, repo, patch, name=None, **opts):
2210 def rename(ui, repo, patch, name=None, **opts):
2211 """rename a patch
2211 """rename a patch
2212
2212
2213 With one argument, renames the current patch to PATCH1.
2213 With one argument, renames the current patch to PATCH1.
2214 With two arguments, renames PATCH1 to PATCH2."""
2214 With two arguments, renames PATCH1 to PATCH2."""
2215
2215
2216 q = repo.mq
2216 q = repo.mq
2217
2217
2218 if not name:
2218 if not name:
2219 name = patch
2219 name = patch
2220 patch = None
2220 patch = None
2221
2221
2222 if patch:
2222 if patch:
2223 patch = q.lookup(patch)
2223 patch = q.lookup(patch)
2224 else:
2224 else:
2225 if not q.applied:
2225 if not q.applied:
2226 ui.write(_('no patches applied\n'))
2226 ui.write(_('no patches applied\n'))
2227 return
2227 return
2228 patch = q.lookup('qtip')
2228 patch = q.lookup('qtip')
2229 absdest = q.join(name)
2229 absdest = q.join(name)
2230 if os.path.isdir(absdest):
2230 if os.path.isdir(absdest):
2231 name = normname(os.path.join(name, os.path.basename(patch)))
2231 name = normname(os.path.join(name, os.path.basename(patch)))
2232 absdest = q.join(name)
2232 absdest = q.join(name)
2233 if os.path.exists(absdest):
2233 if os.path.exists(absdest):
2234 raise util.Abort(_('%s already exists') % absdest)
2234 raise util.Abort(_('%s already exists') % absdest)
2235
2235
2236 if name in q.series:
2236 if name in q.series:
2237 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2237 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2238
2238
2239 if ui.verbose:
2239 if ui.verbose:
2240 ui.write('renaming %s to %s\n' % (patch, name))
2240 ui.write('renaming %s to %s\n' % (patch, name))
2241 i = q.find_series(patch)
2241 i = q.find_series(patch)
2242 guards = q.guard_re.findall(q.full_series[i])
2242 guards = q.guard_re.findall(q.full_series[i])
2243 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2243 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2244 q.parse_series()
2244 q.parse_series()
2245 q.series_dirty = 1
2245 q.series_dirty = 1
2246
2246
2247 info = q.isapplied(patch)
2247 info = q.isapplied(patch)
2248 if info:
2248 if info:
2249 q.applied[info[0]] = statusentry(info[1], name)
2249 q.applied[info[0]] = statusentry(info[1], name)
2250 q.applied_dirty = 1
2250 q.applied_dirty = 1
2251
2251
2252 util.rename(q.join(patch), absdest)
2252 util.rename(q.join(patch), absdest)
2253 r = q.qrepo()
2253 r = q.qrepo()
2254 if r:
2254 if r:
2255 wlock = r.wlock()
2255 wlock = r.wlock()
2256 try:
2256 try:
2257 if r.dirstate[patch] == 'a':
2257 if r.dirstate[patch] == 'a':
2258 r.dirstate.forget(patch)
2258 r.dirstate.forget(patch)
2259 r.dirstate.add(name)
2259 r.dirstate.add(name)
2260 else:
2260 else:
2261 if r.dirstate[name] == 'r':
2261 if r.dirstate[name] == 'r':
2262 r.undelete([name])
2262 r.undelete([name])
2263 r.copy(patch, name)
2263 r.copy(patch, name)
2264 r.remove([patch], False)
2264 r.remove([patch], False)
2265 finally:
2265 finally:
2266 wlock.release()
2266 wlock.release()
2267
2267
2268 q.save_dirty()
2268 q.save_dirty()
2269
2269
2270 def restore(ui, repo, rev, **opts):
2270 def restore(ui, repo, rev, **opts):
2271 """restore the queue state saved by a revision"""
2271 """restore the queue state saved by a revision"""
2272 rev = repo.lookup(rev)
2272 rev = repo.lookup(rev)
2273 q = repo.mq
2273 q = repo.mq
2274 q.restore(repo, rev, delete=opts['delete'],
2274 q.restore(repo, rev, delete=opts['delete'],
2275 qupdate=opts['update'])
2275 qupdate=opts['update'])
2276 q.save_dirty()
2276 q.save_dirty()
2277 return 0
2277 return 0
2278
2278
2279 def save(ui, repo, **opts):
2279 def save(ui, repo, **opts):
2280 """save current queue state"""
2280 """save current queue state"""
2281 q = repo.mq
2281 q = repo.mq
2282 message = cmdutil.logmessage(opts)
2282 message = cmdutil.logmessage(opts)
2283 ret = q.save(repo, msg=message)
2283 ret = q.save(repo, msg=message)
2284 if ret:
2284 if ret:
2285 return ret
2285 return ret
2286 q.save_dirty()
2286 q.save_dirty()
2287 if opts['copy']:
2287 if opts['copy']:
2288 path = q.path
2288 path = q.path
2289 if opts['name']:
2289 if opts['name']:
2290 newpath = os.path.join(q.basepath, opts['name'])
2290 newpath = os.path.join(q.basepath, opts['name'])
2291 if os.path.exists(newpath):
2291 if os.path.exists(newpath):
2292 if not os.path.isdir(newpath):
2292 if not os.path.isdir(newpath):
2293 raise util.Abort(_('destination %s exists and is not '
2293 raise util.Abort(_('destination %s exists and is not '
2294 'a directory') % newpath)
2294 'a directory') % newpath)
2295 if not opts['force']:
2295 if not opts['force']:
2296 raise util.Abort(_('destination %s exists, '
2296 raise util.Abort(_('destination %s exists, '
2297 'use -f to force') % newpath)
2297 'use -f to force') % newpath)
2298 else:
2298 else:
2299 newpath = savename(path)
2299 newpath = savename(path)
2300 ui.warn(_("copy %s to %s\n") % (path, newpath))
2300 ui.warn(_("copy %s to %s\n") % (path, newpath))
2301 util.copyfiles(path, newpath)
2301 util.copyfiles(path, newpath)
2302 if opts['empty']:
2302 if opts['empty']:
2303 try:
2303 try:
2304 os.unlink(q.join(q.status_path))
2304 os.unlink(q.join(q.status_path))
2305 except:
2305 except:
2306 pass
2306 pass
2307 return 0
2307 return 0
2308
2308
2309 def strip(ui, repo, rev, **opts):
2309 def strip(ui, repo, rev, **opts):
2310 """strip a revision and all its descendants from the repository
2310 """strip a revision and all its descendants from the repository
2311
2311
2312 If one of the working directory's parent revisions is stripped, the
2312 If one of the working directory's parent revisions is stripped, the
2313 working directory will be updated to the parent of the stripped
2313 working directory will be updated to the parent of the stripped
2314 revision.
2314 revision.
2315 """
2315 """
2316 backup = 'all'
2316 backup = 'all'
2317 if opts['backup']:
2317 if opts['backup']:
2318 backup = 'strip'
2318 backup = 'strip'
2319 elif opts['nobackup']:
2319 elif opts['nobackup']:
2320 backup = 'none'
2320 backup = 'none'
2321
2321
2322 rev = repo.lookup(rev)
2322 rev = repo.lookup(rev)
2323 p = repo.dirstate.parents()
2323 p = repo.dirstate.parents()
2324 cl = repo.changelog
2324 cl = repo.changelog
2325 update = True
2325 update = True
2326 if p[0] == nullid:
2326 if p[0] == nullid:
2327 update = False
2327 update = False
2328 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2328 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2329 update = False
2329 update = False
2330 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2330 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2331 update = False
2331 update = False
2332
2332
2333 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2333 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2334 return 0
2334 return 0
2335
2335
2336 def select(ui, repo, *args, **opts):
2336 def select(ui, repo, *args, **opts):
2337 '''set or print guarded patches to push
2337 '''set or print guarded patches to push
2338
2338
2339 Use the qguard command to set or print guards on patch, then use
2339 Use the qguard command to set or print guards on patch, then use
2340 qselect to tell mq which guards to use. A patch will be pushed if
2340 qselect to tell mq which guards to use. A patch will be pushed if
2341 it has no guards or any positive guards match the currently
2341 it has no guards or any positive guards match the currently
2342 selected guard, but will not be pushed if any negative guards
2342 selected guard, but will not be pushed if any negative guards
2343 match the current guard. For example::
2343 match the current guard. For example::
2344
2344
2345 qguard foo.patch -stable (negative guard)
2345 qguard foo.patch -stable (negative guard)
2346 qguard bar.patch +stable (positive guard)
2346 qguard bar.patch +stable (positive guard)
2347 qselect stable
2347 qselect stable
2348
2348
2349 This activates the "stable" guard. mq will skip foo.patch (because
2349 This activates the "stable" guard. mq will skip foo.patch (because
2350 it has a negative match) but push bar.patch (because it has a
2350 it has a negative match) but push bar.patch (because it has a
2351 positive match).
2351 positive match).
2352
2352
2353 With no arguments, prints the currently active guards.
2353 With no arguments, prints the currently active guards.
2354 With one argument, sets the active guard.
2354 With one argument, sets the active guard.
2355
2355
2356 Use -n/--none to deactivate guards (no other arguments needed).
2356 Use -n/--none to deactivate guards (no other arguments needed).
2357 When no guards are active, patches with positive guards are
2357 When no guards are active, patches with positive guards are
2358 skipped and patches with negative guards are pushed.
2358 skipped and patches with negative guards are pushed.
2359
2359
2360 qselect can change the guards on applied patches. It does not pop
2360 qselect can change the guards on applied patches. It does not pop
2361 guarded patches by default. Use --pop to pop back to the last
2361 guarded patches by default. Use --pop to pop back to the last
2362 applied patch that is not guarded. Use --reapply (which implies
2362 applied patch that is not guarded. Use --reapply (which implies
2363 --pop) to push back to the current patch afterwards, but skip
2363 --pop) to push back to the current patch afterwards, but skip
2364 guarded patches.
2364 guarded patches.
2365
2365
2366 Use -s/--series to print a list of all guards in the series file
2366 Use -s/--series to print a list of all guards in the series file
2367 (no other arguments needed). Use -v for more information.'''
2367 (no other arguments needed). Use -v for more information.'''
2368
2368
2369 q = repo.mq
2369 q = repo.mq
2370 guards = q.active()
2370 guards = q.active()
2371 if args or opts['none']:
2371 if args or opts['none']:
2372 old_unapplied = q.unapplied(repo)
2372 old_unapplied = q.unapplied(repo)
2373 old_guarded = [i for i in xrange(len(q.applied)) if
2373 old_guarded = [i for i in xrange(len(q.applied)) if
2374 not q.pushable(i)[0]]
2374 not q.pushable(i)[0]]
2375 q.set_active(args)
2375 q.set_active(args)
2376 q.save_dirty()
2376 q.save_dirty()
2377 if not args:
2377 if not args:
2378 ui.status(_('guards deactivated\n'))
2378 ui.status(_('guards deactivated\n'))
2379 if not opts['pop'] and not opts['reapply']:
2379 if not opts['pop'] and not opts['reapply']:
2380 unapplied = q.unapplied(repo)
2380 unapplied = q.unapplied(repo)
2381 guarded = [i for i in xrange(len(q.applied))
2381 guarded = [i for i in xrange(len(q.applied))
2382 if not q.pushable(i)[0]]
2382 if not q.pushable(i)[0]]
2383 if len(unapplied) != len(old_unapplied):
2383 if len(unapplied) != len(old_unapplied):
2384 ui.status(_('number of unguarded, unapplied patches has '
2384 ui.status(_('number of unguarded, unapplied patches has '
2385 'changed from %d to %d\n') %
2385 'changed from %d to %d\n') %
2386 (len(old_unapplied), len(unapplied)))
2386 (len(old_unapplied), len(unapplied)))
2387 if len(guarded) != len(old_guarded):
2387 if len(guarded) != len(old_guarded):
2388 ui.status(_('number of guarded, applied patches has changed '
2388 ui.status(_('number of guarded, applied patches has changed '
2389 'from %d to %d\n') %
2389 'from %d to %d\n') %
2390 (len(old_guarded), len(guarded)))
2390 (len(old_guarded), len(guarded)))
2391 elif opts['series']:
2391 elif opts['series']:
2392 guards = {}
2392 guards = {}
2393 noguards = 0
2393 noguards = 0
2394 for gs in q.series_guards:
2394 for gs in q.series_guards:
2395 if not gs:
2395 if not gs:
2396 noguards += 1
2396 noguards += 1
2397 for g in gs:
2397 for g in gs:
2398 guards.setdefault(g, 0)
2398 guards.setdefault(g, 0)
2399 guards[g] += 1
2399 guards[g] += 1
2400 if ui.verbose:
2400 if ui.verbose:
2401 guards['NONE'] = noguards
2401 guards['NONE'] = noguards
2402 guards = guards.items()
2402 guards = guards.items()
2403 guards.sort(key=lambda x: x[0][1:])
2403 guards.sort(key=lambda x: x[0][1:])
2404 if guards:
2404 if guards:
2405 ui.note(_('guards in series file:\n'))
2405 ui.note(_('guards in series file:\n'))
2406 for guard, count in guards:
2406 for guard, count in guards:
2407 ui.note('%2d ' % count)
2407 ui.note('%2d ' % count)
2408 ui.write(guard, '\n')
2408 ui.write(guard, '\n')
2409 else:
2409 else:
2410 ui.note(_('no guards in series file\n'))
2410 ui.note(_('no guards in series file\n'))
2411 else:
2411 else:
2412 if guards:
2412 if guards:
2413 ui.note(_('active guards:\n'))
2413 ui.note(_('active guards:\n'))
2414 for g in guards:
2414 for g in guards:
2415 ui.write(g, '\n')
2415 ui.write(g, '\n')
2416 else:
2416 else:
2417 ui.write(_('no active guards\n'))
2417 ui.write(_('no active guards\n'))
2418 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2418 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2419 popped = False
2419 popped = False
2420 if opts['pop'] or opts['reapply']:
2420 if opts['pop'] or opts['reapply']:
2421 for i in xrange(len(q.applied)):
2421 for i in xrange(len(q.applied)):
2422 pushable, reason = q.pushable(i)
2422 pushable, reason = q.pushable(i)
2423 if not pushable:
2423 if not pushable:
2424 ui.status(_('popping guarded patches\n'))
2424 ui.status(_('popping guarded patches\n'))
2425 popped = True
2425 popped = True
2426 if i == 0:
2426 if i == 0:
2427 q.pop(repo, all=True)
2427 q.pop(repo, all=True)
2428 else:
2428 else:
2429 q.pop(repo, i-1)
2429 q.pop(repo, i-1)
2430 break
2430 break
2431 if popped:
2431 if popped:
2432 try:
2432 try:
2433 if reapply:
2433 if reapply:
2434 ui.status(_('reapplying unguarded patches\n'))
2434 ui.status(_('reapplying unguarded patches\n'))
2435 q.push(repo, reapply)
2435 q.push(repo, reapply)
2436 finally:
2436 finally:
2437 q.save_dirty()
2437 q.save_dirty()
2438
2438
2439 def finish(ui, repo, *revrange, **opts):
2439 def finish(ui, repo, *revrange, **opts):
2440 """move applied patches into repository history
2440 """move applied patches into repository history
2441
2441
2442 Finishes the specified revisions (corresponding to applied
2442 Finishes the specified revisions (corresponding to applied
2443 patches) by moving them out of mq control into regular repository
2443 patches) by moving them out of mq control into regular repository
2444 history.
2444 history.
2445
2445
2446 Accepts a revision range or the -a/--applied option. If --applied
2446 Accepts a revision range or the -a/--applied option. If --applied
2447 is specified, all applied mq revisions are removed from mq
2447 is specified, all applied mq revisions are removed from mq
2448 control. Otherwise, the given revisions must be at the base of the
2448 control. Otherwise, the given revisions must be at the base of the
2449 stack of applied patches.
2449 stack of applied patches.
2450
2450
2451 This can be especially useful if your changes have been applied to
2451 This can be especially useful if your changes have been applied to
2452 an upstream repository, or if you are about to push your changes
2452 an upstream repository, or if you are about to push your changes
2453 to upstream.
2453 to upstream.
2454 """
2454 """
2455 if not opts['applied'] and not revrange:
2455 if not opts['applied'] and not revrange:
2456 raise util.Abort(_('no revisions specified'))
2456 raise util.Abort(_('no revisions specified'))
2457 elif opts['applied']:
2457 elif opts['applied']:
2458 revrange = ('qbase:qtip',) + revrange
2458 revrange = ('qbase:qtip',) + revrange
2459
2459
2460 q = repo.mq
2460 q = repo.mq
2461 if not q.applied:
2461 if not q.applied:
2462 ui.status(_('no patches applied\n'))
2462 ui.status(_('no patches applied\n'))
2463 return 0
2463 return 0
2464
2464
2465 revs = cmdutil.revrange(repo, revrange)
2465 revs = cmdutil.revrange(repo, revrange)
2466 q.finish(repo, revs)
2466 q.finish(repo, revs)
2467 q.save_dirty()
2467 q.save_dirty()
2468 return 0
2468 return 0
2469
2469
2470 def reposetup(ui, repo):
2470 def reposetup(ui, repo):
2471 class mqrepo(repo.__class__):
2471 class mqrepo(repo.__class__):
2472 @util.propertycache
2472 @util.propertycache
2473 def mq(self):
2473 def mq(self):
2474 return queue(self.ui, self.join(""))
2474 return queue(self.ui, self.join(""))
2475
2475
2476 def abort_if_wdir_patched(self, errmsg, force=False):
2476 def abort_if_wdir_patched(self, errmsg, force=False):
2477 if self.mq.applied and not force:
2477 if self.mq.applied and not force:
2478 parent = hex(self.dirstate.parents()[0])
2478 parent = hex(self.dirstate.parents()[0])
2479 if parent in [s.rev for s in self.mq.applied]:
2479 if parent in [s.rev for s in self.mq.applied]:
2480 raise util.Abort(errmsg)
2480 raise util.Abort(errmsg)
2481
2481
2482 def commit(self, text="", user=None, date=None, match=None,
2482 def commit(self, text="", user=None, date=None, match=None,
2483 force=False, editor=False, extra={}):
2483 force=False, editor=False, extra={}):
2484 self.abort_if_wdir_patched(
2484 self.abort_if_wdir_patched(
2485 _('cannot commit over an applied mq patch'),
2485 _('cannot commit over an applied mq patch'),
2486 force)
2486 force)
2487
2487
2488 return super(mqrepo, self).commit(text, user, date, match, force,
2488 return super(mqrepo, self).commit(text, user, date, match, force,
2489 editor, extra)
2489 editor, extra)
2490
2490
2491 def push(self, remote, force=False, revs=None):
2491 def push(self, remote, force=False, revs=None):
2492 if self.mq.applied and not force and not revs:
2492 if self.mq.applied and not force and not revs:
2493 raise util.Abort(_('source has mq patches applied'))
2493 raise util.Abort(_('source has mq patches applied'))
2494 return super(mqrepo, self).push(remote, force, revs)
2494 return super(mqrepo, self).push(remote, force, revs)
2495
2495
2496 def _findtags(self):
2496 def _findtags(self):
2497 '''augment tags from base class with patch tags'''
2497 '''augment tags from base class with patch tags'''
2498 result = super(mqrepo, self)._findtags()
2498 result = super(mqrepo, self)._findtags()
2499
2499
2500 q = self.mq
2500 q = self.mq
2501 if not q.applied:
2501 if not q.applied:
2502 return result
2502 return result
2503
2503
2504 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2504 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2505
2505
2506 if mqtags[-1][0] not in self.changelog.nodemap:
2506 if mqtags[-1][0] not in self.changelog.nodemap:
2507 self.ui.warn(_('mq status file refers to unknown node %s\n')
2507 self.ui.warn(_('mq status file refers to unknown node %s\n')
2508 % short(mqtags[-1][0]))
2508 % short(mqtags[-1][0]))
2509 return result
2509 return result
2510
2510
2511 mqtags.append((mqtags[-1][0], 'qtip'))
2511 mqtags.append((mqtags[-1][0], 'qtip'))
2512 mqtags.append((mqtags[0][0], 'qbase'))
2512 mqtags.append((mqtags[0][0], 'qbase'))
2513 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2513 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2514 tags = result[0]
2514 tags = result[0]
2515 for patch in mqtags:
2515 for patch in mqtags:
2516 if patch[1] in tags:
2516 if patch[1] in tags:
2517 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2517 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2518 % patch[1])
2518 % patch[1])
2519 else:
2519 else:
2520 tags[patch[1]] = patch[0]
2520 tags[patch[1]] = patch[0]
2521
2521
2522 return result
2522 return result
2523
2523
2524 def _branchtags(self, partial, lrev):
2524 def _branchtags(self, partial, lrev):
2525 q = self.mq
2525 q = self.mq
2526 if not q.applied:
2526 if not q.applied:
2527 return super(mqrepo, self)._branchtags(partial, lrev)
2527 return super(mqrepo, self)._branchtags(partial, lrev)
2528
2528
2529 cl = self.changelog
2529 cl = self.changelog
2530 qbasenode = bin(q.applied[0].rev)
2530 qbasenode = bin(q.applied[0].rev)
2531 if qbasenode not in cl.nodemap:
2531 if qbasenode not in cl.nodemap:
2532 self.ui.warn(_('mq status file refers to unknown node %s\n')
2532 self.ui.warn(_('mq status file refers to unknown node %s\n')
2533 % short(qbasenode))
2533 % short(qbasenode))
2534 return super(mqrepo, self)._branchtags(partial, lrev)
2534 return super(mqrepo, self)._branchtags(partial, lrev)
2535
2535
2536 qbase = cl.rev(qbasenode)
2536 qbase = cl.rev(qbasenode)
2537 start = lrev + 1
2537 start = lrev + 1
2538 if start < qbase:
2538 if start < qbase:
2539 # update the cache (excluding the patches) and save it
2539 # update the cache (excluding the patches) and save it
2540 self._updatebranchcache(partial, lrev+1, qbase)
2540 self._updatebranchcache(partial, lrev+1, qbase)
2541 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2541 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2542 start = qbase
2542 start = qbase
2543 # if start = qbase, the cache is as updated as it should be.
2543 # if start = qbase, the cache is as updated as it should be.
2544 # if start > qbase, the cache includes (part of) the patches.
2544 # if start > qbase, the cache includes (part of) the patches.
2545 # we might as well use it, but we won't save it.
2545 # we might as well use it, but we won't save it.
2546
2546
2547 # update the cache up to the tip
2547 # update the cache up to the tip
2548 self._updatebranchcache(partial, start, len(cl))
2548 self._updatebranchcache(partial, start, len(cl))
2549
2549
2550 return partial
2550 return partial
2551
2551
2552 if repo.local():
2552 if repo.local():
2553 repo.__class__ = mqrepo
2553 repo.__class__ = mqrepo
2554
2554
2555 def mqimport(orig, ui, repo, *args, **kwargs):
2555 def mqimport(orig, ui, repo, *args, **kwargs):
2556 if hasattr(repo, 'abort_if_wdir_patched') and not kwargs.get('no_commit', False):
2556 if hasattr(repo, 'abort_if_wdir_patched') and not kwargs.get('no_commit', False):
2557 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2557 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2558 kwargs.get('force'))
2558 kwargs.get('force'))
2559 return orig(ui, repo, *args, **kwargs)
2559 return orig(ui, repo, *args, **kwargs)
2560
2560
2561 def uisetup(ui):
2561 def uisetup(ui):
2562 extensions.wrapcommand(commands.table, 'import', mqimport)
2562 extensions.wrapcommand(commands.table, 'import', mqimport)
2563
2563
2564 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2564 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2565
2565
2566 cmdtable = {
2566 cmdtable = {
2567 "qapplied":
2567 "qapplied":
2568 (applied,
2568 (applied,
2569 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2569 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2570 _('hg qapplied [-1] [-s] [PATCH]')),
2570 _('hg qapplied [-1] [-s] [PATCH]')),
2571 "qclone":
2571 "qclone":
2572 (clone,
2572 (clone,
2573 [('', 'pull', None, _('use pull protocol to copy metadata')),
2573 [('', 'pull', None, _('use pull protocol to copy metadata')),
2574 ('U', 'noupdate', None, _('do not update the new working directories')),
2574 ('U', 'noupdate', None, _('do not update the new working directories')),
2575 ('', 'uncompressed', None,
2575 ('', 'uncompressed', None,
2576 _('use uncompressed transfer (fast over LAN)')),
2576 _('use uncompressed transfer (fast over LAN)')),
2577 ('p', 'patches', '', _('location of source patch repository')),
2577 ('p', 'patches', '', _('location of source patch repository')),
2578 ] + commands.remoteopts,
2578 ] + commands.remoteopts,
2579 _('hg qclone [OPTION]... SOURCE [DEST]')),
2579 _('hg qclone [OPTION]... SOURCE [DEST]')),
2580 "qcommit|qci":
2580 "qcommit|qci":
2581 (commit,
2581 (commit,
2582 commands.table["^commit|ci"][1],
2582 commands.table["^commit|ci"][1],
2583 _('hg qcommit [OPTION]... [FILE]...')),
2583 _('hg qcommit [OPTION]... [FILE]...')),
2584 "^qdiff":
2584 "^qdiff":
2585 (diff,
2585 (diff,
2586 commands.diffopts + commands.diffopts2 + commands.walkopts,
2586 commands.diffopts + commands.diffopts2 + commands.walkopts,
2587 _('hg qdiff [OPTION]... [FILE]...')),
2587 _('hg qdiff [OPTION]... [FILE]...')),
2588 "qdelete|qremove|qrm":
2588 "qdelete|qremove|qrm":
2589 (delete,
2589 (delete,
2590 [('k', 'keep', None, _('keep patch file')),
2590 [('k', 'keep', None, _('keep patch file')),
2591 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2591 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2592 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2592 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2593 'qfold':
2593 'qfold':
2594 (fold,
2594 (fold,
2595 [('e', 'edit', None, _('edit patch header')),
2595 [('e', 'edit', None, _('edit patch header')),
2596 ('k', 'keep', None, _('keep folded patch files')),
2596 ('k', 'keep', None, _('keep folded patch files')),
2597 ] + commands.commitopts,
2597 ] + commands.commitopts,
2598 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2598 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2599 'qgoto':
2599 'qgoto':
2600 (goto,
2600 (goto,
2601 [('f', 'force', None, _('overwrite any local changes'))],
2601 [('f', 'force', None, _('overwrite any local changes'))],
2602 _('hg qgoto [OPTION]... PATCH')),
2602 _('hg qgoto [OPTION]... PATCH')),
2603 'qguard':
2603 'qguard':
2604 (guard,
2604 (guard,
2605 [('l', 'list', None, _('list all patches and guards')),
2605 [('l', 'list', None, _('list all patches and guards')),
2606 ('n', 'none', None, _('drop all guards'))],
2606 ('n', 'none', None, _('drop all guards'))],
2607 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2607 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2608 'qheader': (header, [], _('hg qheader [PATCH]')),
2608 'qheader': (header, [], _('hg qheader [PATCH]')),
2609 "^qimport":
2609 "^qimport":
2610 (qimport,
2610 (qimport,
2611 [('e', 'existing', None, _('import file in patch directory')),
2611 [('e', 'existing', None, _('import file in patch directory')),
2612 ('n', 'name', '', _('name of patch file')),
2612 ('n', 'name', '', _('name of patch file')),
2613 ('f', 'force', None, _('overwrite existing files')),
2613 ('f', 'force', None, _('overwrite existing files')),
2614 ('r', 'rev', [], _('place existing revisions under mq control')),
2614 ('r', 'rev', [], _('place existing revisions under mq control')),
2615 ('g', 'git', None, _('use git extended diff format')),
2615 ('g', 'git', None, _('use git extended diff format')),
2616 ('P', 'push', None, _('qpush after importing'))],
2616 ('P', 'push', None, _('qpush after importing'))],
2617 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2617 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2618 "^qinit":
2618 "^qinit":
2619 (init,
2619 (init,
2620 [('c', 'create-repo', None, _('create queue repository'))],
2620 [('c', 'create-repo', None, _('create queue repository'))],
2621 _('hg qinit [-c]')),
2621 _('hg qinit [-c]')),
2622 "qnew":
2622 "qnew":
2623 (new,
2623 (new,
2624 [('e', 'edit', None, _('edit commit message')),
2624 [('e', 'edit', None, _('edit commit message')),
2625 ('f', 'force', None, _('import uncommitted changes into patch')),
2625 ('f', 'force', None, _('import uncommitted changes into patch')),
2626 ('g', 'git', None, _('use git extended diff format')),
2626 ('g', 'git', None, _('use git extended diff format')),
2627 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2627 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2628 ('u', 'user', '', _('add "From: <given user>" to patch')),
2628 ('u', 'user', '', _('add "From: <given user>" to patch')),
2629 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2629 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2630 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2630 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2631 ] + commands.walkopts + commands.commitopts,
2631 ] + commands.walkopts + commands.commitopts,
2632 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2632 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2633 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2633 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2634 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2634 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2635 "^qpop":
2635 "^qpop":
2636 (pop,
2636 (pop,
2637 [('a', 'all', None, _('pop all patches')),
2637 [('a', 'all', None, _('pop all patches')),
2638 ('n', 'name', '', _('queue name to pop')),
2638 ('n', 'name', '', _('queue name to pop')),
2639 ('f', 'force', None, _('forget any local changes to patched files'))],
2639 ('f', 'force', None, _('forget any local changes to patched files'))],
2640 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2640 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2641 "^qpush":
2641 "^qpush":
2642 (push,
2642 (push,
2643 [('f', 'force', None, _('apply if the patch has rejects')),
2643 [('f', 'force', None, _('apply if the patch has rejects')),
2644 ('l', 'list', None, _('list patch name in commit text')),
2644 ('l', 'list', None, _('list patch name in commit text')),
2645 ('a', 'all', None, _('apply all patches')),
2645 ('a', 'all', None, _('apply all patches')),
2646 ('m', 'merge', None, _('merge from another queue')),
2646 ('m', 'merge', None, _('merge from another queue')),
2647 ('n', 'name', '', _('merge queue name'))],
2647 ('n', 'name', '', _('merge queue name'))],
2648 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2648 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2649 "^qrefresh":
2649 "^qrefresh":
2650 (refresh,
2650 (refresh,
2651 [('e', 'edit', None, _('edit commit message')),
2651 [('e', 'edit', None, _('edit commit message')),
2652 ('g', 'git', None, _('use git extended diff format')),
2652 ('g', 'git', None, _('use git extended diff format')),
2653 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2653 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2654 ('U', 'currentuser', None, _('add/update author field in patch with current user')),
2654 ('U', 'currentuser', None, _('add/update author field in patch with current user')),
2655 ('u', 'user', '', _('add/update author field in patch with given user')),
2655 ('u', 'user', '', _('add/update author field in patch with given user')),
2656 ('D', 'currentdate', None, _('add/update date field in patch with current date')),
2656 ('D', 'currentdate', None, _('add/update date field in patch with current date')),
2657 ('d', 'date', '', _('add/update date field in patch with given date'))
2657 ('d', 'date', '', _('add/update date field in patch with given date'))
2658 ] + commands.walkopts + commands.commitopts,
2658 ] + commands.walkopts + commands.commitopts,
2659 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2659 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2660 'qrename|qmv':
2660 'qrename|qmv':
2661 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2661 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2662 "qrestore":
2662 "qrestore":
2663 (restore,
2663 (restore,
2664 [('d', 'delete', None, _('delete save entry')),
2664 [('d', 'delete', None, _('delete save entry')),
2665 ('u', 'update', None, _('update queue working directory'))],
2665 ('u', 'update', None, _('update queue working directory'))],
2666 _('hg qrestore [-d] [-u] REV')),
2666 _('hg qrestore [-d] [-u] REV')),
2667 "qsave":
2667 "qsave":
2668 (save,
2668 (save,
2669 [('c', 'copy', None, _('copy patch directory')),
2669 [('c', 'copy', None, _('copy patch directory')),
2670 ('n', 'name', '', _('copy directory name')),
2670 ('n', 'name', '', _('copy directory name')),
2671 ('e', 'empty', None, _('clear queue status file')),
2671 ('e', 'empty', None, _('clear queue status file')),
2672 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2672 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2673 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2673 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2674 "qselect":
2674 "qselect":
2675 (select,
2675 (select,
2676 [('n', 'none', None, _('disable all guards')),
2676 [('n', 'none', None, _('disable all guards')),
2677 ('s', 'series', None, _('list all guards in series file')),
2677 ('s', 'series', None, _('list all guards in series file')),
2678 ('', 'pop', None, _('pop to before first guarded applied patch')),
2678 ('', 'pop', None, _('pop to before first guarded applied patch')),
2679 ('', 'reapply', None, _('pop, then reapply patches'))],
2679 ('', 'reapply', None, _('pop, then reapply patches'))],
2680 _('hg qselect [OPTION]... [GUARD]...')),
2680 _('hg qselect [OPTION]... [GUARD]...')),
2681 "qseries":
2681 "qseries":
2682 (series,
2682 (series,
2683 [('m', 'missing', None, _('print patches not in series')),
2683 [('m', 'missing', None, _('print patches not in series')),
2684 ] + seriesopts,
2684 ] + seriesopts,
2685 _('hg qseries [-ms]')),
2685 _('hg qseries [-ms]')),
2686 "^strip":
2686 "^strip":
2687 (strip,
2687 (strip,
2688 [('f', 'force', None, _('force removal with local changes')),
2688 [('f', 'force', None, _('force removal with local changes')),
2689 ('b', 'backup', None, _('bundle unrelated changesets')),
2689 ('b', 'backup', None, _('bundle unrelated changesets')),
2690 ('n', 'nobackup', None, _('no backups'))],
2690 ('n', 'nobackup', None, _('no backups'))],
2691 _('hg strip [-f] [-b] [-n] REV')),
2691 _('hg strip [-f] [-b] [-n] REV')),
2692 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2692 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2693 "qunapplied":
2693 "qunapplied":
2694 (unapplied,
2694 (unapplied,
2695 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2695 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2696 _('hg qunapplied [-1] [-s] [PATCH]')),
2696 _('hg qunapplied [-1] [-s] [PATCH]')),
2697 "qfinish":
2697 "qfinish":
2698 (finish,
2698 (finish,
2699 [('a', 'applied', None, _('finish all applied changesets'))],
2699 [('a', 'applied', None, _('finish all applied changesets'))],
2700 _('hg qfinish [-a] [REV]...')),
2700 _('hg qfinish [-a] [REV]...')),
2701 }
2701 }
@@ -1,1387 +1,1409 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog flags
26 # revlog flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33
33
34 _prereadsize = 1048576
34 _prereadsize = 1048576
35
35
36 RevlogError = error.RevlogError
36 RevlogError = error.RevlogError
37 LookupError = error.LookupError
37 LookupError = error.LookupError
38
38
39 def getoffset(q):
39 def getoffset(q):
40 return int(q >> 16)
40 return int(q >> 16)
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return long(long(offset) << 16 | type)
46 return long(long(offset) << 16 | type)
47
47
48 nullhash = _sha(nullid)
48 nullhash = _sha(nullid)
49
49
50 def hash(text, p1, p2):
50 def hash(text, p1, p2):
51 """generate a hash from the given text and its parent hashes
51 """generate a hash from the given text and its parent hashes
52
52
53 This hash combines both the current file contents and its history
53 This hash combines both the current file contents and its history
54 in a manner that makes it easy to distinguish nodes with the same
54 in a manner that makes it easy to distinguish nodes with the same
55 content in the revision graph.
55 content in the revision graph.
56 """
56 """
57 # As of now, if one of the parent node is null, p2 is null
57 # As of now, if one of the parent node is null, p2 is null
58 if p2 == nullid:
58 if p2 == nullid:
59 # deep copy of a hash is faster than creating one
59 # deep copy of a hash is faster than creating one
60 s = nullhash.copy()
60 s = nullhash.copy()
61 s.update(p1)
61 s.update(p1)
62 else:
62 else:
63 # none of the parent nodes are nullid
63 # none of the parent nodes are nullid
64 l = [p1, p2]
64 l = [p1, p2]
65 l.sort()
65 l.sort()
66 s = _sha(l[0])
66 s = _sha(l[0])
67 s.update(l[1])
67 s.update(l[1])
68 s.update(text)
68 s.update(text)
69 return s.digest()
69 return s.digest()
70
70
71 def compress(text):
71 def compress(text):
72 """ generate a possibly-compressed representation of text """
72 """ generate a possibly-compressed representation of text """
73 if not text:
73 if not text:
74 return ("", text)
74 return ("", text)
75 l = len(text)
75 l = len(text)
76 bin = None
76 bin = None
77 if l < 44:
77 if l < 44:
78 pass
78 pass
79 elif l > 1000000:
79 elif l > 1000000:
80 # zlib makes an internal copy, thus doubling memory usage for
80 # zlib makes an internal copy, thus doubling memory usage for
81 # large files, so lets do this in pieces
81 # large files, so lets do this in pieces
82 z = zlib.compressobj()
82 z = zlib.compressobj()
83 p = []
83 p = []
84 pos = 0
84 pos = 0
85 while pos < l:
85 while pos < l:
86 pos2 = pos + 2**20
86 pos2 = pos + 2**20
87 p.append(z.compress(text[pos:pos2]))
87 p.append(z.compress(text[pos:pos2]))
88 pos = pos2
88 pos = pos2
89 p.append(z.flush())
89 p.append(z.flush())
90 if sum(map(len, p)) < l:
90 if sum(map(len, p)) < l:
91 bin = "".join(p)
91 bin = "".join(p)
92 else:
92 else:
93 bin = _compress(text)
93 bin = _compress(text)
94 if bin is None or len(bin) > l:
94 if bin is None or len(bin) > l:
95 if text[0] == '\0':
95 if text[0] == '\0':
96 return ("", text)
96 return ("", text)
97 return ('u', text)
97 return ('u', text)
98 return ("", bin)
98 return ("", bin)
99
99
100 def decompress(bin):
100 def decompress(bin):
101 """ decompress the given input """
101 """ decompress the given input """
102 if not bin:
102 if not bin:
103 return bin
103 return bin
104 t = bin[0]
104 t = bin[0]
105 if t == '\0':
105 if t == '\0':
106 return bin
106 return bin
107 if t == 'x':
107 if t == 'x':
108 return _decompress(bin)
108 return _decompress(bin)
109 if t == 'u':
109 if t == 'u':
110 return bin[1:]
110 return bin[1:]
111 raise RevlogError(_("unknown compression type %r") % t)
111 raise RevlogError(_("unknown compression type %r") % t)
112
112
113 class lazyparser(object):
113 class lazyparser(object):
114 """
114 """
115 this class avoids the need to parse the entirety of large indices
115 this class avoids the need to parse the entirety of large indices
116 """
116 """
117
117
118 # lazyparser is not safe to use on windows if win32 extensions not
118 # lazyparser is not safe to use on windows if win32 extensions not
119 # available. it keeps file handle open, which make it not possible
119 # available. it keeps file handle open, which make it not possible
120 # to break hardlinks on local cloned repos.
120 # to break hardlinks on local cloned repos.
121
121
122 def __init__(self, dataf):
122 def __init__(self, dataf):
123 try:
123 try:
124 size = util.fstat(dataf).st_size
124 size = util.fstat(dataf).st_size
125 except AttributeError:
125 except AttributeError:
126 size = 0
126 size = 0
127 self.dataf = dataf
127 self.dataf = dataf
128 self.s = struct.calcsize(indexformatng)
128 self.s = struct.calcsize(indexformatng)
129 self.datasize = size
129 self.datasize = size
130 self.l = size/self.s
130 self.l = size/self.s
131 self.index = [None] * self.l
131 self.index = [None] * self.l
132 self.map = {nullid: nullrev}
132 self.map = {nullid: nullrev}
133 self.allmap = 0
133 self.allmap = 0
134 self.all = 0
134 self.all = 0
135 self.mapfind_count = 0
135 self.mapfind_count = 0
136
136
137 def loadmap(self):
137 def loadmap(self):
138 """
138 """
139 during a commit, we need to make sure the rev being added is
139 during a commit, we need to make sure the rev being added is
140 not a duplicate. This requires loading the entire index,
140 not a duplicate. This requires loading the entire index,
141 which is fairly slow. loadmap can load up just the node map,
141 which is fairly slow. loadmap can load up just the node map,
142 which takes much less time.
142 which takes much less time.
143 """
143 """
144 if self.allmap:
144 if self.allmap:
145 return
145 return
146 end = self.datasize
146 end = self.datasize
147 self.allmap = 1
147 self.allmap = 1
148 cur = 0
148 cur = 0
149 count = 0
149 count = 0
150 blocksize = self.s * 256
150 blocksize = self.s * 256
151 self.dataf.seek(0)
151 self.dataf.seek(0)
152 while cur < end:
152 while cur < end:
153 data = self.dataf.read(blocksize)
153 data = self.dataf.read(blocksize)
154 off = 0
154 off = 0
155 for x in xrange(256):
155 for x in xrange(256):
156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
157 self.map[n] = count
157 self.map[n] = count
158 count += 1
158 count += 1
159 if count >= self.l:
159 if count >= self.l:
160 break
160 break
161 off += self.s
161 off += self.s
162 cur += blocksize
162 cur += blocksize
163
163
164 def loadblock(self, blockstart, blocksize, data=None):
164 def loadblock(self, blockstart, blocksize, data=None):
165 if self.all:
165 if self.all:
166 return
166 return
167 if data is None:
167 if data is None:
168 self.dataf.seek(blockstart)
168 self.dataf.seek(blockstart)
169 if blockstart + blocksize > self.datasize:
169 if blockstart + blocksize > self.datasize:
170 # the revlog may have grown since we've started running,
170 # the revlog may have grown since we've started running,
171 # but we don't have space in self.index for more entries.
171 # but we don't have space in self.index for more entries.
172 # limit blocksize so that we don't get too much data.
172 # limit blocksize so that we don't get too much data.
173 blocksize = max(self.datasize - blockstart, 0)
173 blocksize = max(self.datasize - blockstart, 0)
174 data = self.dataf.read(blocksize)
174 data = self.dataf.read(blocksize)
175 lend = len(data) / self.s
175 lend = len(data) / self.s
176 i = blockstart / self.s
176 i = blockstart / self.s
177 off = 0
177 off = 0
178 # lazyindex supports __delitem__
178 # lazyindex supports __delitem__
179 if lend > len(self.index) - i:
179 if lend > len(self.index) - i:
180 lend = len(self.index) - i
180 lend = len(self.index) - i
181 for x in xrange(lend):
181 for x in xrange(lend):
182 if self.index[i + x] is None:
182 if self.index[i + x] is None:
183 b = data[off : off + self.s]
183 b = data[off : off + self.s]
184 self.index[i + x] = b
184 self.index[i + x] = b
185 n = b[ngshaoffset:ngshaoffset + 20]
185 n = b[ngshaoffset:ngshaoffset + 20]
186 self.map[n] = i + x
186 self.map[n] = i + x
187 off += self.s
187 off += self.s
188
188
189 def findnode(self, node):
189 def findnode(self, node):
190 """search backwards through the index file for a specific node"""
190 """search backwards through the index file for a specific node"""
191 if self.allmap:
191 if self.allmap:
192 return None
192 return None
193
193
194 # hg log will cause many many searches for the manifest
194 # hg log will cause many many searches for the manifest
195 # nodes. After we get called a few times, just load the whole
195 # nodes. After we get called a few times, just load the whole
196 # thing.
196 # thing.
197 if self.mapfind_count > 8:
197 if self.mapfind_count > 8:
198 self.loadmap()
198 self.loadmap()
199 if node in self.map:
199 if node in self.map:
200 return node
200 return node
201 return None
201 return None
202 self.mapfind_count += 1
202 self.mapfind_count += 1
203 last = self.l - 1
203 last = self.l - 1
204 while self.index[last] != None:
204 while self.index[last] != None:
205 if last == 0:
205 if last == 0:
206 self.all = 1
206 self.all = 1
207 self.allmap = 1
207 self.allmap = 1
208 return None
208 return None
209 last -= 1
209 last -= 1
210 end = (last + 1) * self.s
210 end = (last + 1) * self.s
211 blocksize = self.s * 256
211 blocksize = self.s * 256
212 while end >= 0:
212 while end >= 0:
213 start = max(end - blocksize, 0)
213 start = max(end - blocksize, 0)
214 self.dataf.seek(start)
214 self.dataf.seek(start)
215 data = self.dataf.read(end - start)
215 data = self.dataf.read(end - start)
216 findend = end - start
216 findend = end - start
217 while True:
217 while True:
218 # we're searching backwards, so we have to make sure
218 # we're searching backwards, so we have to make sure
219 # we don't find a changeset where this node is a parent
219 # we don't find a changeset where this node is a parent
220 off = data.find(node, 0, findend)
220 off = data.find(node, 0, findend)
221 findend = off
221 findend = off
222 if off >= 0:
222 if off >= 0:
223 i = off / self.s
223 i = off / self.s
224 off = i * self.s
224 off = i * self.s
225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
226 if n == node:
226 if n == node:
227 self.map[n] = i + start / self.s
227 self.map[n] = i + start / self.s
228 return node
228 return node
229 else:
229 else:
230 break
230 break
231 end -= blocksize
231 end -= blocksize
232 return None
232 return None
233
233
234 def loadindex(self, i=None, end=None):
234 def loadindex(self, i=None, end=None):
235 if self.all:
235 if self.all:
236 return
236 return
237 all = False
237 all = False
238 if i is None:
238 if i is None:
239 blockstart = 0
239 blockstart = 0
240 blocksize = (65536 / self.s) * self.s
240 blocksize = (65536 / self.s) * self.s
241 end = self.datasize
241 end = self.datasize
242 all = True
242 all = True
243 else:
243 else:
244 if end:
244 if end:
245 blockstart = i * self.s
245 blockstart = i * self.s
246 end = end * self.s
246 end = end * self.s
247 blocksize = end - blockstart
247 blocksize = end - blockstart
248 else:
248 else:
249 blockstart = (i & ~1023) * self.s
249 blockstart = (i & ~1023) * self.s
250 blocksize = self.s * 1024
250 blocksize = self.s * 1024
251 end = blockstart + blocksize
251 end = blockstart + blocksize
252 while blockstart < end:
252 while blockstart < end:
253 self.loadblock(blockstart, blocksize)
253 self.loadblock(blockstart, blocksize)
254 blockstart += blocksize
254 blockstart += blocksize
255 if all:
255 if all:
256 self.all = True
256 self.all = True
257
257
258 class lazyindex(object):
258 class lazyindex(object):
259 """a lazy version of the index array"""
259 """a lazy version of the index array"""
260 def __init__(self, parser):
260 def __init__(self, parser):
261 self.p = parser
261 self.p = parser
262 def __len__(self):
262 def __len__(self):
263 return len(self.p.index)
263 return len(self.p.index)
264 def load(self, pos):
264 def load(self, pos):
265 if pos < 0:
265 if pos < 0:
266 pos += len(self.p.index)
266 pos += len(self.p.index)
267 self.p.loadindex(pos)
267 self.p.loadindex(pos)
268 return self.p.index[pos]
268 return self.p.index[pos]
269 def __getitem__(self, pos):
269 def __getitem__(self, pos):
270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
271 def __setitem__(self, pos, item):
271 def __setitem__(self, pos, item):
272 self.p.index[pos] = _pack(indexformatng, *item)
272 self.p.index[pos] = _pack(indexformatng, *item)
273 def __delitem__(self, pos):
273 def __delitem__(self, pos):
274 del self.p.index[pos]
274 del self.p.index[pos]
275 def insert(self, pos, e):
275 def insert(self, pos, e):
276 self.p.index.insert(pos, _pack(indexformatng, *e))
276 self.p.index.insert(pos, _pack(indexformatng, *e))
277 def append(self, e):
277 def append(self, e):
278 self.p.index.append(_pack(indexformatng, *e))
278 self.p.index.append(_pack(indexformatng, *e))
279
279
280 class lazymap(object):
280 class lazymap(object):
281 """a lazy version of the node map"""
281 """a lazy version of the node map"""
282 def __init__(self, parser):
282 def __init__(self, parser):
283 self.p = parser
283 self.p = parser
284 def load(self, key):
284 def load(self, key):
285 n = self.p.findnode(key)
285 n = self.p.findnode(key)
286 if n is None:
286 if n is None:
287 raise KeyError(key)
287 raise KeyError(key)
288 def __contains__(self, key):
288 def __contains__(self, key):
289 if key in self.p.map:
289 if key in self.p.map:
290 return True
290 return True
291 self.p.loadmap()
291 self.p.loadmap()
292 return key in self.p.map
292 return key in self.p.map
293 def __iter__(self):
293 def __iter__(self):
294 yield nullid
294 yield nullid
295 for i in xrange(self.p.l):
295 for i in xrange(self.p.l):
296 ret = self.p.index[i]
296 ret = self.p.index[i]
297 if not ret:
297 if not ret:
298 self.p.loadindex(i)
298 self.p.loadindex(i)
299 ret = self.p.index[i]
299 ret = self.p.index[i]
300 if isinstance(ret, str):
300 if isinstance(ret, str):
301 ret = _unpack(indexformatng, ret)
301 ret = _unpack(indexformatng, ret)
302 yield ret[7]
302 yield ret[7]
303 def __getitem__(self, key):
303 def __getitem__(self, key):
304 try:
304 try:
305 return self.p.map[key]
305 return self.p.map[key]
306 except KeyError:
306 except KeyError:
307 try:
307 try:
308 self.load(key)
308 self.load(key)
309 return self.p.map[key]
309 return self.p.map[key]
310 except KeyError:
310 except KeyError:
311 raise KeyError("node " + hex(key))
311 raise KeyError("node " + hex(key))
312 def __setitem__(self, key, val):
312 def __setitem__(self, key, val):
313 self.p.map[key] = val
313 self.p.map[key] = val
314 def __delitem__(self, key):
314 def __delitem__(self, key):
315 del self.p.map[key]
315 del self.p.map[key]
316
316
317 indexformatv0 = ">4l20s20s20s"
317 indexformatv0 = ">4l20s20s20s"
318 v0shaoffset = 56
318 v0shaoffset = 56
319
319
320 class revlogoldio(object):
320 class revlogoldio(object):
321 def __init__(self):
321 def __init__(self):
322 self.size = struct.calcsize(indexformatv0)
322 self.size = struct.calcsize(indexformatv0)
323
323
324 def parseindex(self, fp, data, inline):
324 def parseindex(self, fp, data, inline):
325 s = self.size
325 s = self.size
326 index = []
326 index = []
327 nodemap = {nullid: nullrev}
327 nodemap = {nullid: nullrev}
328 n = off = 0
328 n = off = 0
329 if len(data) == _prereadsize:
329 if len(data) == _prereadsize:
330 data += fp.read() # read the rest
330 data += fp.read() # read the rest
331 l = len(data)
331 l = len(data)
332 while off + s <= l:
332 while off + s <= l:
333 cur = data[off:off + s]
333 cur = data[off:off + s]
334 off += s
334 off += s
335 e = _unpack(indexformatv0, cur)
335 e = _unpack(indexformatv0, cur)
336 # transform to revlogv1 format
336 # transform to revlogv1 format
337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
339 index.append(e2)
339 index.append(e2)
340 nodemap[e[6]] = n
340 nodemap[e[6]] = n
341 n += 1
341 n += 1
342
342
343 return index, nodemap, None
343 return index, nodemap, None
344
344
345 def packentry(self, entry, node, version, rev):
345 def packentry(self, entry, node, version, rev):
346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
347 node(entry[5]), node(entry[6]), entry[7])
347 node(entry[5]), node(entry[6]), entry[7])
348 return _pack(indexformatv0, *e2)
348 return _pack(indexformatv0, *e2)
349
349
350 # index ng:
350 # index ng:
351 # 6 bytes offset
351 # 6 bytes offset
352 # 2 bytes flags
352 # 2 bytes flags
353 # 4 bytes compressed length
353 # 4 bytes compressed length
354 # 4 bytes uncompressed length
354 # 4 bytes uncompressed length
355 # 4 bytes: base rev
355 # 4 bytes: base rev
356 # 4 bytes link rev
356 # 4 bytes link rev
357 # 4 bytes parent 1 rev
357 # 4 bytes parent 1 rev
358 # 4 bytes parent 2 rev
358 # 4 bytes parent 2 rev
359 # 32 bytes: nodeid
359 # 32 bytes: nodeid
360 indexformatng = ">Qiiiiii20s12x"
360 indexformatng = ">Qiiiiii20s12x"
361 ngshaoffset = 32
361 ngshaoffset = 32
362 versionformat = ">I"
362 versionformat = ">I"
363
363
364 class revlogio(object):
364 class revlogio(object):
365 def __init__(self):
365 def __init__(self):
366 self.size = struct.calcsize(indexformatng)
366 self.size = struct.calcsize(indexformatng)
367
367
368 def parseindex(self, fp, data, inline):
368 def parseindex(self, fp, data, inline):
369 if len(data) == _prereadsize:
369 if len(data) == _prereadsize:
370 if util.openhardlinks() and not inline:
370 if util.openhardlinks() and not inline:
371 # big index, let's parse it on demand
371 # big index, let's parse it on demand
372 parser = lazyparser(fp)
372 parser = lazyparser(fp)
373 index = lazyindex(parser)
373 index = lazyindex(parser)
374 nodemap = lazymap(parser)
374 nodemap = lazymap(parser)
375 e = list(index[0])
375 e = list(index[0])
376 type = gettype(e[0])
376 type = gettype(e[0])
377 e[0] = offset_type(0, type)
377 e[0] = offset_type(0, type)
378 index[0] = e
378 index[0] = e
379 return index, nodemap, None
379 return index, nodemap, None
380 else:
380 else:
381 data += fp.read()
381 data += fp.read()
382
382
383 # call the C implementation to parse the index data
383 # call the C implementation to parse the index data
384 index, nodemap, cache = parsers.parse_index(data, inline)
384 index, nodemap, cache = parsers.parse_index(data, inline)
385 return index, nodemap, cache
385 return index, nodemap, cache
386
386
387 def packentry(self, entry, node, version, rev):
387 def packentry(self, entry, node, version, rev):
388 p = _pack(indexformatng, *entry)
388 p = _pack(indexformatng, *entry)
389 if rev == 0:
389 if rev == 0:
390 p = _pack(versionformat, version) + p[4:]
390 p = _pack(versionformat, version) + p[4:]
391 return p
391 return p
392
392
393 class revlog(object):
393 class revlog(object):
394 """
394 """
395 the underlying revision storage object
395 the underlying revision storage object
396
396
397 A revlog consists of two parts, an index and the revision data.
397 A revlog consists of two parts, an index and the revision data.
398
398
399 The index is a file with a fixed record size containing
399 The index is a file with a fixed record size containing
400 information on each revision, including its nodeid (hash), the
400 information on each revision, including its nodeid (hash), the
401 nodeids of its parents, the position and offset of its data within
401 nodeids of its parents, the position and offset of its data within
402 the data file, and the revision it's based on. Finally, each entry
402 the data file, and the revision it's based on. Finally, each entry
403 contains a linkrev entry that can serve as a pointer to external
403 contains a linkrev entry that can serve as a pointer to external
404 data.
404 data.
405
405
406 The revision data itself is a linear collection of data chunks.
406 The revision data itself is a linear collection of data chunks.
407 Each chunk represents a revision and is usually represented as a
407 Each chunk represents a revision and is usually represented as a
408 delta against the previous chunk. To bound lookup time, runs of
408 delta against the previous chunk. To bound lookup time, runs of
409 deltas are limited to about 2 times the length of the original
409 deltas are limited to about 2 times the length of the original
410 version data. This makes retrieval of a version proportional to
410 version data. This makes retrieval of a version proportional to
411 its size, or O(1) relative to the number of revisions.
411 its size, or O(1) relative to the number of revisions.
412
412
413 Both pieces of the revlog are written to in an append-only
413 Both pieces of the revlog are written to in an append-only
414 fashion, which means we never need to rewrite a file to insert or
414 fashion, which means we never need to rewrite a file to insert or
415 remove data, and can use some simple techniques to avoid the need
415 remove data, and can use some simple techniques to avoid the need
416 for locking while reading.
416 for locking while reading.
417 """
417 """
418 def __init__(self, opener, indexfile):
418 def __init__(self, opener, indexfile):
419 """
419 """
420 create a revlog object
420 create a revlog object
421
421
422 opener is a function that abstracts the file opening operation
422 opener is a function that abstracts the file opening operation
423 and can be used to implement COW semantics or the like.
423 and can be used to implement COW semantics or the like.
424 """
424 """
425 self.indexfile = indexfile
425 self.indexfile = indexfile
426 self.datafile = indexfile[:-2] + ".d"
426 self.datafile = indexfile[:-2] + ".d"
427 self.opener = opener
427 self.opener = opener
428 self._cache = None
428 self._cache = None
429 self._chunkcache = (0, '')
429 self._chunkcache = (0, '')
430 self.nodemap = {nullid: nullrev}
430 self.nodemap = {nullid: nullrev}
431 self.index = []
431 self.index = []
432
432
433 v = REVLOG_DEFAULT_VERSION
433 v = REVLOG_DEFAULT_VERSION
434 if hasattr(opener, "defversion"):
434 if hasattr(opener, "defversion"):
435 v = opener.defversion
435 v = opener.defversion
436 if v & REVLOGNG:
436 if v & REVLOGNG:
437 v |= REVLOGNGINLINEDATA
437 v |= REVLOGNGINLINEDATA
438
438
439 i = ''
439 i = ''
440 try:
440 try:
441 f = self.opener(self.indexfile)
441 f = self.opener(self.indexfile)
442 i = f.read(_prereadsize)
442 i = f.read(_prereadsize)
443 if len(i) > 0:
443 if len(i) > 0:
444 v = struct.unpack(versionformat, i[:4])[0]
444 v = struct.unpack(versionformat, i[:4])[0]
445 except IOError, inst:
445 except IOError, inst:
446 if inst.errno != errno.ENOENT:
446 if inst.errno != errno.ENOENT:
447 raise
447 raise
448
448
449 self.version = v
449 self.version = v
450 self._inline = v & REVLOGNGINLINEDATA
450 self._inline = v & REVLOGNGINLINEDATA
451 flags = v & ~0xFFFF
451 flags = v & ~0xFFFF
452 fmt = v & 0xFFFF
452 fmt = v & 0xFFFF
453 if fmt == REVLOGV0 and flags:
453 if fmt == REVLOGV0 and flags:
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 % (self.indexfile, flags >> 16))
455 % (self.indexfile, flags >> 16))
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 % (self.indexfile, flags >> 16))
458 % (self.indexfile, flags >> 16))
459 elif fmt > REVLOGNG:
459 elif fmt > REVLOGNG:
460 raise RevlogError(_("index %s unknown format %d")
460 raise RevlogError(_("index %s unknown format %d")
461 % (self.indexfile, fmt))
461 % (self.indexfile, fmt))
462
462
463 self._io = revlogio()
463 self._io = revlogio()
464 if self.version == REVLOGV0:
464 if self.version == REVLOGV0:
465 self._io = revlogoldio()
465 self._io = revlogoldio()
466 if i:
466 if i:
467 try:
467 try:
468 d = self._io.parseindex(f, i, self._inline)
468 d = self._io.parseindex(f, i, self._inline)
469 except (ValueError, IndexError):
469 except (ValueError, IndexError):
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 self.index, self.nodemap, self._chunkcache = d
471 self.index, self.nodemap, self._chunkcache = d
472 if not self._chunkcache:
472 if not self._chunkcache:
473 self._chunkclear()
473 self._chunkclear()
474
474
475 # add the magic null revision at -1 (if it hasn't been done already)
475 # add the magic null revision at -1 (if it hasn't been done already)
476 if (self.index == [] or isinstance(self.index, lazyindex) or
476 if (self.index == [] or isinstance(self.index, lazyindex) or
477 self.index[-1][7] != nullid) :
477 self.index[-1][7] != nullid) :
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479
479
480 def _loadindex(self, start, end):
480 def _loadindex(self, start, end):
481 """load a block of indexes all at once from the lazy parser"""
481 """load a block of indexes all at once from the lazy parser"""
482 if isinstance(self.index, lazyindex):
482 if isinstance(self.index, lazyindex):
483 self.index.p.loadindex(start, end)
483 self.index.p.loadindex(start, end)
484
484
485 def _loadindexmap(self):
485 def _loadindexmap(self):
486 """loads both the map and the index from the lazy parser"""
486 """loads both the map and the index from the lazy parser"""
487 if isinstance(self.index, lazyindex):
487 if isinstance(self.index, lazyindex):
488 p = self.index.p
488 p = self.index.p
489 p.loadindex()
489 p.loadindex()
490 self.nodemap = p.map
490 self.nodemap = p.map
491
491
492 def _loadmap(self):
492 def _loadmap(self):
493 """loads the map from the lazy parser"""
493 """loads the map from the lazy parser"""
494 if isinstance(self.nodemap, lazymap):
494 if isinstance(self.nodemap, lazymap):
495 self.nodemap.p.loadmap()
495 self.nodemap.p.loadmap()
496 self.nodemap = self.nodemap.p.map
496 self.nodemap = self.nodemap.p.map
497
497
498 def tip(self):
498 def tip(self):
499 return self.node(len(self.index) - 2)
499 return self.node(len(self.index) - 2)
500 def __len__(self):
500 def __len__(self):
501 return len(self.index) - 1
501 return len(self.index) - 1
502 def __iter__(self):
502 def __iter__(self):
503 for i in xrange(len(self)):
503 for i in xrange(len(self)):
504 yield i
504 yield i
505 def rev(self, node):
505 def rev(self, node):
506 try:
506 try:
507 return self.nodemap[node]
507 return self.nodemap[node]
508 except KeyError:
508 except KeyError:
509 raise LookupError(node, self.indexfile, _('no node'))
509 raise LookupError(node, self.indexfile, _('no node'))
510 def node(self, rev):
510 def node(self, rev):
511 return self.index[rev][7]
511 return self.index[rev][7]
512 def linkrev(self, rev):
512 def linkrev(self, rev):
513 return self.index[rev][4]
513 return self.index[rev][4]
514 def parents(self, node):
514 def parents(self, node):
515 i = self.index
515 i = self.index
516 d = i[self.rev(node)]
516 d = i[self.rev(node)]
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 def parentrevs(self, rev):
518 def parentrevs(self, rev):
519 return self.index[rev][5:7]
519 return self.index[rev][5:7]
520 def start(self, rev):
520 def start(self, rev):
521 return int(self.index[rev][0] >> 16)
521 return int(self.index[rev][0] >> 16)
522 def end(self, rev):
522 def end(self, rev):
523 return self.start(rev) + self.length(rev)
523 return self.start(rev) + self.length(rev)
524 def length(self, rev):
524 def length(self, rev):
525 return self.index[rev][1]
525 return self.index[rev][1]
526 def base(self, rev):
526 def base(self, rev):
527 return self.index[rev][3]
527 return self.index[rev][3]
528
528
529 def size(self, rev):
529 def size(self, rev):
530 """return the length of the uncompressed text for a given revision"""
530 """return the length of the uncompressed text for a given revision"""
531 l = self.index[rev][2]
531 l = self.index[rev][2]
532 if l >= 0:
532 if l >= 0:
533 return l
533 return l
534
534
535 t = self.revision(self.node(rev))
535 t = self.revision(self.node(rev))
536 return len(t)
536 return len(t)
537
537
538 # Alternate implementation. The advantage to this code is it
538 # Alternate implementation. The advantage to this code is it
539 # will be faster for a single revision. However, the results
539 # will be faster for a single revision. However, the results
540 # are not cached, so finding the size of every revision will
540 # are not cached, so finding the size of every revision will
541 # be slower.
541 # be slower.
542 #
542 #
543 # if self.cache and self.cache[1] == rev:
543 # if self.cache and self.cache[1] == rev:
544 # return len(self.cache[2])
544 # return len(self.cache[2])
545 #
545 #
546 # base = self.base(rev)
546 # base = self.base(rev)
547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 # base = self.cache[1]
548 # base = self.cache[1]
549 # text = self.cache[2]
549 # text = self.cache[2]
550 # else:
550 # else:
551 # text = self.revision(self.node(base))
551 # text = self.revision(self.node(base))
552 #
552 #
553 # l = len(text)
553 # l = len(text)
554 # for x in xrange(base + 1, rev + 1):
554 # for x in xrange(base + 1, rev + 1):
555 # l = mdiff.patchedsize(l, self._chunk(x))
555 # l = mdiff.patchedsize(l, self._chunk(x))
556 # return l
556 # return l
557
557
558 def reachable(self, node, stop=None):
558 def reachable(self, node, stop=None):
559 """return the set of all nodes ancestral to a given node, including
559 """return the set of all nodes ancestral to a given node, including
560 the node itself, stopping when stop is matched"""
560 the node itself, stopping when stop is matched"""
561 reachable = set((node,))
561 reachable = set((node,))
562 visit = [node]
562 visit = [node]
563 if stop:
563 if stop:
564 stopn = self.rev(stop)
564 stopn = self.rev(stop)
565 else:
565 else:
566 stopn = 0
566 stopn = 0
567 while visit:
567 while visit:
568 n = visit.pop(0)
568 n = visit.pop(0)
569 if n == stop:
569 if n == stop:
570 continue
570 continue
571 if n == nullid:
571 if n == nullid:
572 continue
572 continue
573 for p in self.parents(n):
573 for p in self.parents(n):
574 if self.rev(p) < stopn:
574 if self.rev(p) < stopn:
575 continue
575 continue
576 if p not in reachable:
576 if p not in reachable:
577 reachable.add(p)
577 reachable.add(p)
578 visit.append(p)
578 visit.append(p)
579 return reachable
579 return reachable
580
580
581 def ancestors(self, *revs):
581 def ancestors(self, *revs):
582 'Generate the ancestors of revs using a breadth-first visit'
582 """Generate the ancestors of 'revs' in reverse topological order.
583
584 Yield a sequence of revision numbers starting with the parents
585 of each revision in revs, i.e., each revision is *not* considered
586 an ancestor of itself. Results are in breadth-first order:
587 parents of each rev in revs, then parents of those, etc. Result
588 does not include the null revision."""
583 visit = list(revs)
589 visit = list(revs)
584 seen = set([nullrev])
590 seen = set([nullrev])
585 while visit:
591 while visit:
586 for parent in self.parentrevs(visit.pop(0)):
592 for parent in self.parentrevs(visit.pop(0)):
587 if parent not in seen:
593 if parent not in seen:
588 visit.append(parent)
594 visit.append(parent)
589 seen.add(parent)
595 seen.add(parent)
590 yield parent
596 yield parent
591
597
592 def descendants(self, *revs):
598 def descendants(self, *revs):
593 'Generate the descendants of revs in topological order'
599 """Generate the descendants of 'revs' in revision order.
600
601 Yield a sequence of revision numbers starting with a child of
602 some rev in revs, i.e., each revision is *not* considered a
603 descendant of itself. Results are ordered by revision number (a
604 topological sort)."""
594 seen = set(revs)
605 seen = set(revs)
595 for i in xrange(min(revs) + 1, len(self)):
606 for i in xrange(min(revs) + 1, len(self)):
596 for x in self.parentrevs(i):
607 for x in self.parentrevs(i):
597 if x != nullrev and x in seen:
608 if x != nullrev and x in seen:
598 seen.add(i)
609 seen.add(i)
599 yield i
610 yield i
600 break
611 break
601
612
602 def findmissing(self, common=None, heads=None):
613 def findmissing(self, common=None, heads=None):
603 '''
614 """Return the ancestors of heads that are not ancestors of common.
604 returns the topologically sorted list of nodes from the set:
615
605 missing = (ancestors(heads) \ ancestors(common))
616 More specifically, return a list of nodes N such that every N
617 satisfies the following constraints:
606
618
607 where ancestors() is the set of ancestors from heads, heads included
619 1. N is an ancestor of some node in 'heads'
620 2. N is not an ancestor of any node in 'common'
608
621
609 if heads is None, the heads of the revlog are used
622 The list is sorted by revision number, meaning it is
610 if common is None, nullid is assumed to be a common node
623 topologically sorted.
611 '''
624
625 'heads' and 'common' are both lists of node IDs. If heads is
626 not supplied, uses all of the revlog's heads. If common is not
627 supplied, uses nullid."""
612 if common is None:
628 if common is None:
613 common = [nullid]
629 common = [nullid]
614 if heads is None:
630 if heads is None:
615 heads = self.heads()
631 heads = self.heads()
616
632
617 common = [self.rev(n) for n in common]
633 common = [self.rev(n) for n in common]
618 heads = [self.rev(n) for n in heads]
634 heads = [self.rev(n) for n in heads]
619
635
620 # we want the ancestors, but inclusive
636 # we want the ancestors, but inclusive
621 has = set(self.ancestors(*common))
637 has = set(self.ancestors(*common))
622 has.add(nullrev)
638 has.add(nullrev)
623 has.update(common)
639 has.update(common)
624
640
625 # take all ancestors from heads that aren't in has
641 # take all ancestors from heads that aren't in has
626 missing = set()
642 missing = set()
627 visit = [r for r in heads if r not in has]
643 visit = [r for r in heads if r not in has]
628 while visit:
644 while visit:
629 r = visit.pop(0)
645 r = visit.pop(0)
630 if r in missing:
646 if r in missing:
631 continue
647 continue
632 else:
648 else:
633 missing.add(r)
649 missing.add(r)
634 for p in self.parentrevs(r):
650 for p in self.parentrevs(r):
635 if p not in has:
651 if p not in has:
636 visit.append(p)
652 visit.append(p)
637 missing = list(missing)
653 missing = list(missing)
638 missing.sort()
654 missing.sort()
639 return [self.node(r) for r in missing]
655 return [self.node(r) for r in missing]
640
656
641 def nodesbetween(self, roots=None, heads=None):
657 def nodesbetween(self, roots=None, heads=None):
642 """Return a tuple containing three elements. Elements 1 and 2 contain
658 """Return a topological path from 'roots' to 'heads'.
643 a final list bases and heads after all the unreachable ones have been
659
644 pruned. Element 0 contains a topologically sorted list of all
660 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
661 topologically sorted list of all nodes N that satisfy both of
662 these constraints:
663
664 1. N is a descendant of some node in 'roots'
665 2. N is an ancestor of some node in 'heads'
645
666
646 nodes that satisfy these constraints:
667 Every node is considered to be both a descendant and an ancestor
647 1. All nodes must be descended from a node in roots (the nodes on
668 of itself, so every reachable node in 'roots' and 'heads' will be
648 roots are considered descended from themselves).
669 included in 'nodes'.
649 2. All nodes must also be ancestors of a node in heads (the nodes in
650 heads are considered to be their own ancestors).
651
670
652 If roots is unspecified, nullid is assumed as the only root.
671 'outroots' is the list of reachable nodes in 'roots', i.e., the
653 If heads is unspecified, it is taken to be the output of the
672 subset of 'roots' that is returned in 'nodes'. Likewise,
654 heads method (i.e. a list of all nodes in the repository that
673 'outheads' is the subset of 'heads' that is also in 'nodes'.
655 have no children)."""
674
675 'roots' and 'heads' are both lists of node IDs. If 'roots' is
676 unspecified, uses nullid as the only root. If 'heads' is
677 unspecified, uses list of all of the revlog's heads."""
656 nonodes = ([], [], [])
678 nonodes = ([], [], [])
657 if roots is not None:
679 if roots is not None:
658 roots = list(roots)
680 roots = list(roots)
659 if not roots:
681 if not roots:
660 return nonodes
682 return nonodes
661 lowestrev = min([self.rev(n) for n in roots])
683 lowestrev = min([self.rev(n) for n in roots])
662 else:
684 else:
663 roots = [nullid] # Everybody's a descendent of nullid
685 roots = [nullid] # Everybody's a descendent of nullid
664 lowestrev = nullrev
686 lowestrev = nullrev
665 if (lowestrev == nullrev) and (heads is None):
687 if (lowestrev == nullrev) and (heads is None):
666 # We want _all_ the nodes!
688 # We want _all_ the nodes!
667 return ([self.node(r) for r in self], [nullid], list(self.heads()))
689 return ([self.node(r) for r in self], [nullid], list(self.heads()))
668 if heads is None:
690 if heads is None:
669 # All nodes are ancestors, so the latest ancestor is the last
691 # All nodes are ancestors, so the latest ancestor is the last
670 # node.
692 # node.
671 highestrev = len(self) - 1
693 highestrev = len(self) - 1
672 # Set ancestors to None to signal that every node is an ancestor.
694 # Set ancestors to None to signal that every node is an ancestor.
673 ancestors = None
695 ancestors = None
674 # Set heads to an empty dictionary for later discovery of heads
696 # Set heads to an empty dictionary for later discovery of heads
675 heads = {}
697 heads = {}
676 else:
698 else:
677 heads = list(heads)
699 heads = list(heads)
678 if not heads:
700 if not heads:
679 return nonodes
701 return nonodes
680 ancestors = set()
702 ancestors = set()
681 # Turn heads into a dictionary so we can remove 'fake' heads.
703 # Turn heads into a dictionary so we can remove 'fake' heads.
682 # Also, later we will be using it to filter out the heads we can't
704 # Also, later we will be using it to filter out the heads we can't
683 # find from roots.
705 # find from roots.
684 heads = dict.fromkeys(heads, 0)
706 heads = dict.fromkeys(heads, 0)
685 # Start at the top and keep marking parents until we're done.
707 # Start at the top and keep marking parents until we're done.
686 nodestotag = set(heads)
708 nodestotag = set(heads)
687 # Remember where the top was so we can use it as a limit later.
709 # Remember where the top was so we can use it as a limit later.
688 highestrev = max([self.rev(n) for n in nodestotag])
710 highestrev = max([self.rev(n) for n in nodestotag])
689 while nodestotag:
711 while nodestotag:
690 # grab a node to tag
712 # grab a node to tag
691 n = nodestotag.pop()
713 n = nodestotag.pop()
692 # Never tag nullid
714 # Never tag nullid
693 if n == nullid:
715 if n == nullid:
694 continue
716 continue
695 # A node's revision number represents its place in a
717 # A node's revision number represents its place in a
696 # topologically sorted list of nodes.
718 # topologically sorted list of nodes.
697 r = self.rev(n)
719 r = self.rev(n)
698 if r >= lowestrev:
720 if r >= lowestrev:
699 if n not in ancestors:
721 if n not in ancestors:
700 # If we are possibly a descendent of one of the roots
722 # If we are possibly a descendent of one of the roots
701 # and we haven't already been marked as an ancestor
723 # and we haven't already been marked as an ancestor
702 ancestors.add(n) # Mark as ancestor
724 ancestors.add(n) # Mark as ancestor
703 # Add non-nullid parents to list of nodes to tag.
725 # Add non-nullid parents to list of nodes to tag.
704 nodestotag.update([p for p in self.parents(n) if
726 nodestotag.update([p for p in self.parents(n) if
705 p != nullid])
727 p != nullid])
706 elif n in heads: # We've seen it before, is it a fake head?
728 elif n in heads: # We've seen it before, is it a fake head?
707 # So it is, real heads should not be the ancestors of
729 # So it is, real heads should not be the ancestors of
708 # any other heads.
730 # any other heads.
709 heads.pop(n)
731 heads.pop(n)
710 if not ancestors:
732 if not ancestors:
711 return nonodes
733 return nonodes
712 # Now that we have our set of ancestors, we want to remove any
734 # Now that we have our set of ancestors, we want to remove any
713 # roots that are not ancestors.
735 # roots that are not ancestors.
714
736
715 # If one of the roots was nullid, everything is included anyway.
737 # If one of the roots was nullid, everything is included anyway.
716 if lowestrev > nullrev:
738 if lowestrev > nullrev:
717 # But, since we weren't, let's recompute the lowest rev to not
739 # But, since we weren't, let's recompute the lowest rev to not
718 # include roots that aren't ancestors.
740 # include roots that aren't ancestors.
719
741
720 # Filter out roots that aren't ancestors of heads
742 # Filter out roots that aren't ancestors of heads
721 roots = [n for n in roots if n in ancestors]
743 roots = [n for n in roots if n in ancestors]
722 # Recompute the lowest revision
744 # Recompute the lowest revision
723 if roots:
745 if roots:
724 lowestrev = min([self.rev(n) for n in roots])
746 lowestrev = min([self.rev(n) for n in roots])
725 else:
747 else:
726 # No more roots? Return empty list
748 # No more roots? Return empty list
727 return nonodes
749 return nonodes
728 else:
750 else:
729 # We are descending from nullid, and don't need to care about
751 # We are descending from nullid, and don't need to care about
730 # any other roots.
752 # any other roots.
731 lowestrev = nullrev
753 lowestrev = nullrev
732 roots = [nullid]
754 roots = [nullid]
733 # Transform our roots list into a set.
755 # Transform our roots list into a set.
734 descendents = set(roots)
756 descendents = set(roots)
735 # Also, keep the original roots so we can filter out roots that aren't
757 # Also, keep the original roots so we can filter out roots that aren't
736 # 'real' roots (i.e. are descended from other roots).
758 # 'real' roots (i.e. are descended from other roots).
737 roots = descendents.copy()
759 roots = descendents.copy()
738 # Our topologically sorted list of output nodes.
760 # Our topologically sorted list of output nodes.
739 orderedout = []
761 orderedout = []
740 # Don't start at nullid since we don't want nullid in our output list,
762 # Don't start at nullid since we don't want nullid in our output list,
741 # and if nullid shows up in descedents, empty parents will look like
763 # and if nullid shows up in descedents, empty parents will look like
742 # they're descendents.
764 # they're descendents.
743 for r in xrange(max(lowestrev, 0), highestrev + 1):
765 for r in xrange(max(lowestrev, 0), highestrev + 1):
744 n = self.node(r)
766 n = self.node(r)
745 isdescendent = False
767 isdescendent = False
746 if lowestrev == nullrev: # Everybody is a descendent of nullid
768 if lowestrev == nullrev: # Everybody is a descendent of nullid
747 isdescendent = True
769 isdescendent = True
748 elif n in descendents:
770 elif n in descendents:
749 # n is already a descendent
771 # n is already a descendent
750 isdescendent = True
772 isdescendent = True
751 # This check only needs to be done here because all the roots
773 # This check only needs to be done here because all the roots
752 # will start being marked is descendents before the loop.
774 # will start being marked is descendents before the loop.
753 if n in roots:
775 if n in roots:
754 # If n was a root, check if it's a 'real' root.
776 # If n was a root, check if it's a 'real' root.
755 p = tuple(self.parents(n))
777 p = tuple(self.parents(n))
756 # If any of its parents are descendents, it's not a root.
778 # If any of its parents are descendents, it's not a root.
757 if (p[0] in descendents) or (p[1] in descendents):
779 if (p[0] in descendents) or (p[1] in descendents):
758 roots.remove(n)
780 roots.remove(n)
759 else:
781 else:
760 p = tuple(self.parents(n))
782 p = tuple(self.parents(n))
761 # A node is a descendent if either of its parents are
783 # A node is a descendent if either of its parents are
762 # descendents. (We seeded the dependents list with the roots
784 # descendents. (We seeded the dependents list with the roots
763 # up there, remember?)
785 # up there, remember?)
764 if (p[0] in descendents) or (p[1] in descendents):
786 if (p[0] in descendents) or (p[1] in descendents):
765 descendents.add(n)
787 descendents.add(n)
766 isdescendent = True
788 isdescendent = True
767 if isdescendent and ((ancestors is None) or (n in ancestors)):
789 if isdescendent and ((ancestors is None) or (n in ancestors)):
768 # Only include nodes that are both descendents and ancestors.
790 # Only include nodes that are both descendents and ancestors.
769 orderedout.append(n)
791 orderedout.append(n)
770 if (ancestors is not None) and (n in heads):
792 if (ancestors is not None) and (n in heads):
771 # We're trying to figure out which heads are reachable
793 # We're trying to figure out which heads are reachable
772 # from roots.
794 # from roots.
773 # Mark this head as having been reached
795 # Mark this head as having been reached
774 heads[n] = 1
796 heads[n] = 1
775 elif ancestors is None:
797 elif ancestors is None:
776 # Otherwise, we're trying to discover the heads.
798 # Otherwise, we're trying to discover the heads.
777 # Assume this is a head because if it isn't, the next step
799 # Assume this is a head because if it isn't, the next step
778 # will eventually remove it.
800 # will eventually remove it.
779 heads[n] = 1
801 heads[n] = 1
780 # But, obviously its parents aren't.
802 # But, obviously its parents aren't.
781 for p in self.parents(n):
803 for p in self.parents(n):
782 heads.pop(p, None)
804 heads.pop(p, None)
783 heads = [n for n in heads.iterkeys() if heads[n] != 0]
805 heads = [n for n in heads.iterkeys() if heads[n] != 0]
784 roots = list(roots)
806 roots = list(roots)
785 assert orderedout
807 assert orderedout
786 assert roots
808 assert roots
787 assert heads
809 assert heads
788 return (orderedout, roots, heads)
810 return (orderedout, roots, heads)
789
811
790 def heads(self, start=None, stop=None):
812 def heads(self, start=None, stop=None):
791 """return the list of all nodes that have no children
813 """return the list of all nodes that have no children
792
814
793 if start is specified, only heads that are descendants of
815 if start is specified, only heads that are descendants of
794 start will be returned
816 start will be returned
795 if stop is specified, it will consider all the revs from stop
817 if stop is specified, it will consider all the revs from stop
796 as if they had no children
818 as if they had no children
797 """
819 """
798 if start is None and stop is None:
820 if start is None and stop is None:
799 count = len(self)
821 count = len(self)
800 if not count:
822 if not count:
801 return [nullid]
823 return [nullid]
802 ishead = [1] * (count + 1)
824 ishead = [1] * (count + 1)
803 index = self.index
825 index = self.index
804 for r in xrange(count):
826 for r in xrange(count):
805 e = index[r]
827 e = index[r]
806 ishead[e[5]] = ishead[e[6]] = 0
828 ishead[e[5]] = ishead[e[6]] = 0
807 return [self.node(r) for r in xrange(count) if ishead[r]]
829 return [self.node(r) for r in xrange(count) if ishead[r]]
808
830
809 if start is None:
831 if start is None:
810 start = nullid
832 start = nullid
811 if stop is None:
833 if stop is None:
812 stop = []
834 stop = []
813 stoprevs = set([self.rev(n) for n in stop])
835 stoprevs = set([self.rev(n) for n in stop])
814 startrev = self.rev(start)
836 startrev = self.rev(start)
815 reachable = set((startrev,))
837 reachable = set((startrev,))
816 heads = set((startrev,))
838 heads = set((startrev,))
817
839
818 parentrevs = self.parentrevs
840 parentrevs = self.parentrevs
819 for r in xrange(startrev + 1, len(self)):
841 for r in xrange(startrev + 1, len(self)):
820 for p in parentrevs(r):
842 for p in parentrevs(r):
821 if p in reachable:
843 if p in reachable:
822 if r not in stoprevs:
844 if r not in stoprevs:
823 reachable.add(r)
845 reachable.add(r)
824 heads.add(r)
846 heads.add(r)
825 if p in heads and p not in stoprevs:
847 if p in heads and p not in stoprevs:
826 heads.remove(p)
848 heads.remove(p)
827
849
828 return [self.node(r) for r in heads]
850 return [self.node(r) for r in heads]
829
851
830 def children(self, node):
852 def children(self, node):
831 """find the children of a given node"""
853 """find the children of a given node"""
832 c = []
854 c = []
833 p = self.rev(node)
855 p = self.rev(node)
834 for r in range(p + 1, len(self)):
856 for r in range(p + 1, len(self)):
835 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
857 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
836 if prevs:
858 if prevs:
837 for pr in prevs:
859 for pr in prevs:
838 if pr == p:
860 if pr == p:
839 c.append(self.node(r))
861 c.append(self.node(r))
840 elif p == nullrev:
862 elif p == nullrev:
841 c.append(self.node(r))
863 c.append(self.node(r))
842 return c
864 return c
843
865
844 def _match(self, id):
866 def _match(self, id):
845 if isinstance(id, (long, int)):
867 if isinstance(id, (long, int)):
846 # rev
868 # rev
847 return self.node(id)
869 return self.node(id)
848 if len(id) == 20:
870 if len(id) == 20:
849 # possibly a binary node
871 # possibly a binary node
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
872 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 try:
873 try:
852 node = id
874 node = id
853 self.rev(node) # quick search the index
875 self.rev(node) # quick search the index
854 return node
876 return node
855 except LookupError:
877 except LookupError:
856 pass # may be partial hex id
878 pass # may be partial hex id
857 try:
879 try:
858 # str(rev)
880 # str(rev)
859 rev = int(id)
881 rev = int(id)
860 if str(rev) != id:
882 if str(rev) != id:
861 raise ValueError
883 raise ValueError
862 if rev < 0:
884 if rev < 0:
863 rev = len(self) + rev
885 rev = len(self) + rev
864 if rev < 0 or rev >= len(self):
886 if rev < 0 or rev >= len(self):
865 raise ValueError
887 raise ValueError
866 return self.node(rev)
888 return self.node(rev)
867 except (ValueError, OverflowError):
889 except (ValueError, OverflowError):
868 pass
890 pass
869 if len(id) == 40:
891 if len(id) == 40:
870 try:
892 try:
871 # a full hex nodeid?
893 # a full hex nodeid?
872 node = bin(id)
894 node = bin(id)
873 self.rev(node)
895 self.rev(node)
874 return node
896 return node
875 except (TypeError, LookupError):
897 except (TypeError, LookupError):
876 pass
898 pass
877
899
878 def _partialmatch(self, id):
900 def _partialmatch(self, id):
879 if len(id) < 40:
901 if len(id) < 40:
880 try:
902 try:
881 # hex(node)[:...]
903 # hex(node)[:...]
882 l = len(id) // 2 # grab an even number of digits
904 l = len(id) // 2 # grab an even number of digits
883 bin_id = bin(id[:l*2])
905 bin_id = bin(id[:l*2])
884 nl = [n for n in self.nodemap if n[:l] == bin_id]
906 nl = [n for n in self.nodemap if n[:l] == bin_id]
885 nl = [n for n in nl if hex(n).startswith(id)]
907 nl = [n for n in nl if hex(n).startswith(id)]
886 if len(nl) > 0:
908 if len(nl) > 0:
887 if len(nl) == 1:
909 if len(nl) == 1:
888 return nl[0]
910 return nl[0]
889 raise LookupError(id, self.indexfile,
911 raise LookupError(id, self.indexfile,
890 _('ambiguous identifier'))
912 _('ambiguous identifier'))
891 return None
913 return None
892 except TypeError:
914 except TypeError:
893 pass
915 pass
894
916
895 def lookup(self, id):
917 def lookup(self, id):
896 """locate a node based on:
918 """locate a node based on:
897 - revision number or str(revision number)
919 - revision number or str(revision number)
898 - nodeid or subset of hex nodeid
920 - nodeid or subset of hex nodeid
899 """
921 """
900 n = self._match(id)
922 n = self._match(id)
901 if n is not None:
923 if n is not None:
902 return n
924 return n
903 n = self._partialmatch(id)
925 n = self._partialmatch(id)
904 if n:
926 if n:
905 return n
927 return n
906
928
907 raise LookupError(id, self.indexfile, _('no match found'))
929 raise LookupError(id, self.indexfile, _('no match found'))
908
930
909 def cmp(self, node, text):
931 def cmp(self, node, text):
910 """compare text with a given file revision"""
932 """compare text with a given file revision"""
911 p1, p2 = self.parents(node)
933 p1, p2 = self.parents(node)
912 return hash(text, p1, p2) != node
934 return hash(text, p1, p2) != node
913
935
914 def _addchunk(self, offset, data):
936 def _addchunk(self, offset, data):
915 o, d = self._chunkcache
937 o, d = self._chunkcache
916 # try to add to existing cache
938 # try to add to existing cache
917 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
939 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
918 self._chunkcache = o, d + data
940 self._chunkcache = o, d + data
919 else:
941 else:
920 self._chunkcache = offset, data
942 self._chunkcache = offset, data
921
943
922 def _loadchunk(self, offset, length):
944 def _loadchunk(self, offset, length):
923 if self._inline:
945 if self._inline:
924 df = self.opener(self.indexfile)
946 df = self.opener(self.indexfile)
925 else:
947 else:
926 df = self.opener(self.datafile)
948 df = self.opener(self.datafile)
927
949
928 readahead = max(65536, length)
950 readahead = max(65536, length)
929 df.seek(offset)
951 df.seek(offset)
930 d = df.read(readahead)
952 d = df.read(readahead)
931 self._addchunk(offset, d)
953 self._addchunk(offset, d)
932 if readahead > length:
954 if readahead > length:
933 return d[:length]
955 return d[:length]
934 return d
956 return d
935
957
936 def _getchunk(self, offset, length):
958 def _getchunk(self, offset, length):
937 o, d = self._chunkcache
959 o, d = self._chunkcache
938 l = len(d)
960 l = len(d)
939
961
940 # is it in the cache?
962 # is it in the cache?
941 cachestart = offset - o
963 cachestart = offset - o
942 cacheend = cachestart + length
964 cacheend = cachestart + length
943 if cachestart >= 0 and cacheend <= l:
965 if cachestart >= 0 and cacheend <= l:
944 if cachestart == 0 and cacheend == l:
966 if cachestart == 0 and cacheend == l:
945 return d # avoid a copy
967 return d # avoid a copy
946 return d[cachestart:cacheend]
968 return d[cachestart:cacheend]
947
969
948 return self._loadchunk(offset, length)
970 return self._loadchunk(offset, length)
949
971
950 def _chunkraw(self, startrev, endrev):
972 def _chunkraw(self, startrev, endrev):
951 start = self.start(startrev)
973 start = self.start(startrev)
952 length = self.end(endrev) - start
974 length = self.end(endrev) - start
953 if self._inline:
975 if self._inline:
954 start += (startrev + 1) * self._io.size
976 start += (startrev + 1) * self._io.size
955 return self._getchunk(start, length)
977 return self._getchunk(start, length)
956
978
957 def _chunk(self, rev):
979 def _chunk(self, rev):
958 return decompress(self._chunkraw(rev, rev))
980 return decompress(self._chunkraw(rev, rev))
959
981
960 def _chunkclear(self):
982 def _chunkclear(self):
961 self._chunkcache = (0, '')
983 self._chunkcache = (0, '')
962
984
963 def revdiff(self, rev1, rev2):
985 def revdiff(self, rev1, rev2):
964 """return or calculate a delta between two revisions"""
986 """return or calculate a delta between two revisions"""
965 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
987 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
966 return self._chunk(rev2)
988 return self._chunk(rev2)
967
989
968 return mdiff.textdiff(self.revision(self.node(rev1)),
990 return mdiff.textdiff(self.revision(self.node(rev1)),
969 self.revision(self.node(rev2)))
991 self.revision(self.node(rev2)))
970
992
971 def revision(self, node):
993 def revision(self, node):
972 """return an uncompressed revision of a given node"""
994 """return an uncompressed revision of a given node"""
973 if node == nullid:
995 if node == nullid:
974 return ""
996 return ""
975 if self._cache and self._cache[0] == node:
997 if self._cache and self._cache[0] == node:
976 return self._cache[2]
998 return self._cache[2]
977
999
978 # look up what we need to read
1000 # look up what we need to read
979 text = None
1001 text = None
980 rev = self.rev(node)
1002 rev = self.rev(node)
981 base = self.base(rev)
1003 base = self.base(rev)
982
1004
983 # check rev flags
1005 # check rev flags
984 if self.index[rev][0] & 0xFFFF:
1006 if self.index[rev][0] & 0xFFFF:
985 raise RevlogError(_('incompatible revision flag %x') %
1007 raise RevlogError(_('incompatible revision flag %x') %
986 (self.index[rev][0] & 0xFFFF))
1008 (self.index[rev][0] & 0xFFFF))
987
1009
988 # do we have useful data cached?
1010 # do we have useful data cached?
989 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1011 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
990 base = self._cache[1]
1012 base = self._cache[1]
991 text = self._cache[2]
1013 text = self._cache[2]
992
1014
993 self._loadindex(base, rev + 1)
1015 self._loadindex(base, rev + 1)
994 self._chunkraw(base, rev)
1016 self._chunkraw(base, rev)
995 if text is None:
1017 if text is None:
996 text = self._chunk(base)
1018 text = self._chunk(base)
997
1019
998 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1020 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
999 text = mdiff.patches(text, bins)
1021 text = mdiff.patches(text, bins)
1000 p1, p2 = self.parents(node)
1022 p1, p2 = self.parents(node)
1001 if node != hash(text, p1, p2):
1023 if node != hash(text, p1, p2):
1002 raise RevlogError(_("integrity check failed on %s:%d")
1024 raise RevlogError(_("integrity check failed on %s:%d")
1003 % (self.indexfile, rev))
1025 % (self.indexfile, rev))
1004
1026
1005 self._cache = (node, rev, text)
1027 self._cache = (node, rev, text)
1006 return text
1028 return text
1007
1029
1008 def checkinlinesize(self, tr, fp=None):
1030 def checkinlinesize(self, tr, fp=None):
1009 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1031 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1010 return
1032 return
1011
1033
1012 trinfo = tr.find(self.indexfile)
1034 trinfo = tr.find(self.indexfile)
1013 if trinfo is None:
1035 if trinfo is None:
1014 raise RevlogError(_("%s not found in the transaction")
1036 raise RevlogError(_("%s not found in the transaction")
1015 % self.indexfile)
1037 % self.indexfile)
1016
1038
1017 trindex = trinfo[2]
1039 trindex = trinfo[2]
1018 dataoff = self.start(trindex)
1040 dataoff = self.start(trindex)
1019
1041
1020 tr.add(self.datafile, dataoff)
1042 tr.add(self.datafile, dataoff)
1021
1043
1022 if fp:
1044 if fp:
1023 fp.flush()
1045 fp.flush()
1024 fp.close()
1046 fp.close()
1025
1047
1026 df = self.opener(self.datafile, 'w')
1048 df = self.opener(self.datafile, 'w')
1027 try:
1049 try:
1028 for r in self:
1050 for r in self:
1029 df.write(self._chunkraw(r, r))
1051 df.write(self._chunkraw(r, r))
1030 finally:
1052 finally:
1031 df.close()
1053 df.close()
1032
1054
1033 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1055 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1034 self.version &= ~(REVLOGNGINLINEDATA)
1056 self.version &= ~(REVLOGNGINLINEDATA)
1035 self._inline = False
1057 self._inline = False
1036 for i in self:
1058 for i in self:
1037 e = self._io.packentry(self.index[i], self.node, self.version, i)
1059 e = self._io.packentry(self.index[i], self.node, self.version, i)
1038 fp.write(e)
1060 fp.write(e)
1039
1061
1040 # if we don't call rename, the temp file will never replace the
1062 # if we don't call rename, the temp file will never replace the
1041 # real index
1063 # real index
1042 fp.rename()
1064 fp.rename()
1043
1065
1044 tr.replace(self.indexfile, trindex * self._io.size)
1066 tr.replace(self.indexfile, trindex * self._io.size)
1045 self._chunkclear()
1067 self._chunkclear()
1046
1068
1047 def addrevision(self, text, transaction, link, p1, p2, d=None):
1069 def addrevision(self, text, transaction, link, p1, p2, d=None):
1048 """add a revision to the log
1070 """add a revision to the log
1049
1071
1050 text - the revision data to add
1072 text - the revision data to add
1051 transaction - the transaction object used for rollback
1073 transaction - the transaction object used for rollback
1052 link - the linkrev data to add
1074 link - the linkrev data to add
1053 p1, p2 - the parent nodeids of the revision
1075 p1, p2 - the parent nodeids of the revision
1054 d - an optional precomputed delta
1076 d - an optional precomputed delta
1055 """
1077 """
1056 dfh = None
1078 dfh = None
1057 if not self._inline:
1079 if not self._inline:
1058 dfh = self.opener(self.datafile, "a")
1080 dfh = self.opener(self.datafile, "a")
1059 ifh = self.opener(self.indexfile, "a+")
1081 ifh = self.opener(self.indexfile, "a+")
1060 try:
1082 try:
1061 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1083 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1062 finally:
1084 finally:
1063 if dfh:
1085 if dfh:
1064 dfh.close()
1086 dfh.close()
1065 ifh.close()
1087 ifh.close()
1066
1088
1067 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1089 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1068 node = hash(text, p1, p2)
1090 node = hash(text, p1, p2)
1069 if node in self.nodemap:
1091 if node in self.nodemap:
1070 return node
1092 return node
1071
1093
1072 curr = len(self)
1094 curr = len(self)
1073 prev = curr - 1
1095 prev = curr - 1
1074 base = self.base(prev)
1096 base = self.base(prev)
1075 offset = self.end(prev)
1097 offset = self.end(prev)
1076
1098
1077 if curr:
1099 if curr:
1078 if not d:
1100 if not d:
1079 ptext = self.revision(self.node(prev))
1101 ptext = self.revision(self.node(prev))
1080 d = mdiff.textdiff(ptext, text)
1102 d = mdiff.textdiff(ptext, text)
1081 data = compress(d)
1103 data = compress(d)
1082 l = len(data[1]) + len(data[0])
1104 l = len(data[1]) + len(data[0])
1083 dist = l + offset - self.start(base)
1105 dist = l + offset - self.start(base)
1084
1106
1085 # full versions are inserted when the needed deltas
1107 # full versions are inserted when the needed deltas
1086 # become comparable to the uncompressed text
1108 # become comparable to the uncompressed text
1087 if not curr or dist > len(text) * 2:
1109 if not curr or dist > len(text) * 2:
1088 data = compress(text)
1110 data = compress(text)
1089 l = len(data[1]) + len(data[0])
1111 l = len(data[1]) + len(data[0])
1090 base = curr
1112 base = curr
1091
1113
1092 e = (offset_type(offset, 0), l, len(text),
1114 e = (offset_type(offset, 0), l, len(text),
1093 base, link, self.rev(p1), self.rev(p2), node)
1115 base, link, self.rev(p1), self.rev(p2), node)
1094 self.index.insert(-1, e)
1116 self.index.insert(-1, e)
1095 self.nodemap[node] = curr
1117 self.nodemap[node] = curr
1096
1118
1097 entry = self._io.packentry(e, self.node, self.version, curr)
1119 entry = self._io.packentry(e, self.node, self.version, curr)
1098 if not self._inline:
1120 if not self._inline:
1099 transaction.add(self.datafile, offset)
1121 transaction.add(self.datafile, offset)
1100 transaction.add(self.indexfile, curr * len(entry))
1122 transaction.add(self.indexfile, curr * len(entry))
1101 if data[0]:
1123 if data[0]:
1102 dfh.write(data[0])
1124 dfh.write(data[0])
1103 dfh.write(data[1])
1125 dfh.write(data[1])
1104 dfh.flush()
1126 dfh.flush()
1105 ifh.write(entry)
1127 ifh.write(entry)
1106 else:
1128 else:
1107 offset += curr * self._io.size
1129 offset += curr * self._io.size
1108 transaction.add(self.indexfile, offset, curr)
1130 transaction.add(self.indexfile, offset, curr)
1109 ifh.write(entry)
1131 ifh.write(entry)
1110 ifh.write(data[0])
1132 ifh.write(data[0])
1111 ifh.write(data[1])
1133 ifh.write(data[1])
1112 self.checkinlinesize(transaction, ifh)
1134 self.checkinlinesize(transaction, ifh)
1113
1135
1114 if type(text) == str: # only accept immutable objects
1136 if type(text) == str: # only accept immutable objects
1115 self._cache = (node, curr, text)
1137 self._cache = (node, curr, text)
1116 return node
1138 return node
1117
1139
1118 def ancestor(self, a, b):
1140 def ancestor(self, a, b):
1119 """calculate the least common ancestor of nodes a and b"""
1141 """calculate the least common ancestor of nodes a and b"""
1120
1142
1121 # fast path, check if it is a descendant
1143 # fast path, check if it is a descendant
1122 a, b = self.rev(a), self.rev(b)
1144 a, b = self.rev(a), self.rev(b)
1123 start, end = sorted((a, b))
1145 start, end = sorted((a, b))
1124 for i in self.descendants(start):
1146 for i in self.descendants(start):
1125 if i == end:
1147 if i == end:
1126 return self.node(start)
1148 return self.node(start)
1127 elif i > end:
1149 elif i > end:
1128 break
1150 break
1129
1151
1130 def parents(rev):
1152 def parents(rev):
1131 return [p for p in self.parentrevs(rev) if p != nullrev]
1153 return [p for p in self.parentrevs(rev) if p != nullrev]
1132
1154
1133 c = ancestor.ancestor(a, b, parents)
1155 c = ancestor.ancestor(a, b, parents)
1134 if c is None:
1156 if c is None:
1135 return nullid
1157 return nullid
1136
1158
1137 return self.node(c)
1159 return self.node(c)
1138
1160
1139 def group(self, nodelist, lookup, infocollect=None):
1161 def group(self, nodelist, lookup, infocollect=None):
1140 """Calculate a delta group, yielding a sequence of changegroup chunks
1162 """Calculate a delta group, yielding a sequence of changegroup chunks
1141 (strings).
1163 (strings).
1142
1164
1143 Given a list of changeset revs, return a set of deltas and
1165 Given a list of changeset revs, return a set of deltas and
1144 metadata corresponding to nodes. the first delta is
1166 metadata corresponding to nodes. the first delta is
1145 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1167 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1146 have this parent as it has all history before these
1168 have this parent as it has all history before these
1147 changesets. parent is parent[0]
1169 changesets. parent is parent[0]
1148 """
1170 """
1149
1171
1150 revs = [self.rev(n) for n in nodelist]
1172 revs = [self.rev(n) for n in nodelist]
1151
1173
1152 # if we don't have any revisions touched by these changesets, bail
1174 # if we don't have any revisions touched by these changesets, bail
1153 if not revs:
1175 if not revs:
1154 yield changegroup.closechunk()
1176 yield changegroup.closechunk()
1155 return
1177 return
1156
1178
1157 # add the parent of the first rev
1179 # add the parent of the first rev
1158 p = self.parentrevs(revs[0])[0]
1180 p = self.parentrevs(revs[0])[0]
1159 revs.insert(0, p)
1181 revs.insert(0, p)
1160
1182
1161 # build deltas
1183 # build deltas
1162 for d in xrange(len(revs) - 1):
1184 for d in xrange(len(revs) - 1):
1163 a, b = revs[d], revs[d + 1]
1185 a, b = revs[d], revs[d + 1]
1164 nb = self.node(b)
1186 nb = self.node(b)
1165
1187
1166 if infocollect is not None:
1188 if infocollect is not None:
1167 infocollect(nb)
1189 infocollect(nb)
1168
1190
1169 p = self.parents(nb)
1191 p = self.parents(nb)
1170 meta = nb + p[0] + p[1] + lookup(nb)
1192 meta = nb + p[0] + p[1] + lookup(nb)
1171 if a == -1:
1193 if a == -1:
1172 d = self.revision(nb)
1194 d = self.revision(nb)
1173 meta += mdiff.trivialdiffheader(len(d))
1195 meta += mdiff.trivialdiffheader(len(d))
1174 else:
1196 else:
1175 d = self.revdiff(a, b)
1197 d = self.revdiff(a, b)
1176 yield changegroup.chunkheader(len(meta) + len(d))
1198 yield changegroup.chunkheader(len(meta) + len(d))
1177 yield meta
1199 yield meta
1178 if len(d) > 2**20:
1200 if len(d) > 2**20:
1179 pos = 0
1201 pos = 0
1180 while pos < len(d):
1202 while pos < len(d):
1181 pos2 = pos + 2 ** 18
1203 pos2 = pos + 2 ** 18
1182 yield d[pos:pos2]
1204 yield d[pos:pos2]
1183 pos = pos2
1205 pos = pos2
1184 else:
1206 else:
1185 yield d
1207 yield d
1186
1208
1187 yield changegroup.closechunk()
1209 yield changegroup.closechunk()
1188
1210
1189 def addgroup(self, revs, linkmapper, transaction):
1211 def addgroup(self, revs, linkmapper, transaction):
1190 """
1212 """
1191 add a delta group
1213 add a delta group
1192
1214
1193 given a set of deltas, add them to the revision log. the
1215 given a set of deltas, add them to the revision log. the
1194 first delta is against its parent, which should be in our
1216 first delta is against its parent, which should be in our
1195 log, the rest are against the previous delta.
1217 log, the rest are against the previous delta.
1196 """
1218 """
1197
1219
1198 #track the base of the current delta log
1220 #track the base of the current delta log
1199 r = len(self)
1221 r = len(self)
1200 t = r - 1
1222 t = r - 1
1201 node = None
1223 node = None
1202
1224
1203 base = prev = nullrev
1225 base = prev = nullrev
1204 start = end = textlen = 0
1226 start = end = textlen = 0
1205 if r:
1227 if r:
1206 end = self.end(t)
1228 end = self.end(t)
1207
1229
1208 ifh = self.opener(self.indexfile, "a+")
1230 ifh = self.opener(self.indexfile, "a+")
1209 isize = r * self._io.size
1231 isize = r * self._io.size
1210 if self._inline:
1232 if self._inline:
1211 transaction.add(self.indexfile, end + isize, r)
1233 transaction.add(self.indexfile, end + isize, r)
1212 dfh = None
1234 dfh = None
1213 else:
1235 else:
1214 transaction.add(self.indexfile, isize, r)
1236 transaction.add(self.indexfile, isize, r)
1215 transaction.add(self.datafile, end)
1237 transaction.add(self.datafile, end)
1216 dfh = self.opener(self.datafile, "a")
1238 dfh = self.opener(self.datafile, "a")
1217
1239
1218 try:
1240 try:
1219 # loop through our set of deltas
1241 # loop through our set of deltas
1220 chain = None
1242 chain = None
1221 for chunk in revs:
1243 for chunk in revs:
1222 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1244 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1223 link = linkmapper(cs)
1245 link = linkmapper(cs)
1224 if node in self.nodemap:
1246 if node in self.nodemap:
1225 # this can happen if two branches make the same change
1247 # this can happen if two branches make the same change
1226 chain = node
1248 chain = node
1227 continue
1249 continue
1228 delta = buffer(chunk, 80)
1250 delta = buffer(chunk, 80)
1229 del chunk
1251 del chunk
1230
1252
1231 for p in (p1, p2):
1253 for p in (p1, p2):
1232 if not p in self.nodemap:
1254 if not p in self.nodemap:
1233 raise LookupError(p, self.indexfile, _('unknown parent'))
1255 raise LookupError(p, self.indexfile, _('unknown parent'))
1234
1256
1235 if not chain:
1257 if not chain:
1236 # retrieve the parent revision of the delta chain
1258 # retrieve the parent revision of the delta chain
1237 chain = p1
1259 chain = p1
1238 if not chain in self.nodemap:
1260 if not chain in self.nodemap:
1239 raise LookupError(chain, self.indexfile, _('unknown base'))
1261 raise LookupError(chain, self.indexfile, _('unknown base'))
1240
1262
1241 # full versions are inserted when the needed deltas become
1263 # full versions are inserted when the needed deltas become
1242 # comparable to the uncompressed text or when the previous
1264 # comparable to the uncompressed text or when the previous
1243 # version is not the one we have a delta against. We use
1265 # version is not the one we have a delta against. We use
1244 # the size of the previous full rev as a proxy for the
1266 # the size of the previous full rev as a proxy for the
1245 # current size.
1267 # current size.
1246
1268
1247 if chain == prev:
1269 if chain == prev:
1248 cdelta = compress(delta)
1270 cdelta = compress(delta)
1249 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1271 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1250 textlen = mdiff.patchedsize(textlen, delta)
1272 textlen = mdiff.patchedsize(textlen, delta)
1251
1273
1252 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1274 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1253 # flush our writes here so we can read it in revision
1275 # flush our writes here so we can read it in revision
1254 if dfh:
1276 if dfh:
1255 dfh.flush()
1277 dfh.flush()
1256 ifh.flush()
1278 ifh.flush()
1257 text = self.revision(chain)
1279 text = self.revision(chain)
1258 if len(text) == 0:
1280 if len(text) == 0:
1259 # skip over trivial delta header
1281 # skip over trivial delta header
1260 text = buffer(delta, 12)
1282 text = buffer(delta, 12)
1261 else:
1283 else:
1262 text = mdiff.patches(text, [delta])
1284 text = mdiff.patches(text, [delta])
1263 del delta
1285 del delta
1264 chk = self._addrevision(text, transaction, link, p1, p2, None,
1286 chk = self._addrevision(text, transaction, link, p1, p2, None,
1265 ifh, dfh)
1287 ifh, dfh)
1266 if not dfh and not self._inline:
1288 if not dfh and not self._inline:
1267 # addrevision switched from inline to conventional
1289 # addrevision switched from inline to conventional
1268 # reopen the index
1290 # reopen the index
1269 dfh = self.opener(self.datafile, "a")
1291 dfh = self.opener(self.datafile, "a")
1270 ifh = self.opener(self.indexfile, "a")
1292 ifh = self.opener(self.indexfile, "a")
1271 if chk != node:
1293 if chk != node:
1272 raise RevlogError(_("consistency error adding group"))
1294 raise RevlogError(_("consistency error adding group"))
1273 textlen = len(text)
1295 textlen = len(text)
1274 else:
1296 else:
1275 e = (offset_type(end, 0), cdeltalen, textlen, base,
1297 e = (offset_type(end, 0), cdeltalen, textlen, base,
1276 link, self.rev(p1), self.rev(p2), node)
1298 link, self.rev(p1), self.rev(p2), node)
1277 self.index.insert(-1, e)
1299 self.index.insert(-1, e)
1278 self.nodemap[node] = r
1300 self.nodemap[node] = r
1279 entry = self._io.packentry(e, self.node, self.version, r)
1301 entry = self._io.packentry(e, self.node, self.version, r)
1280 if self._inline:
1302 if self._inline:
1281 ifh.write(entry)
1303 ifh.write(entry)
1282 ifh.write(cdelta[0])
1304 ifh.write(cdelta[0])
1283 ifh.write(cdelta[1])
1305 ifh.write(cdelta[1])
1284 self.checkinlinesize(transaction, ifh)
1306 self.checkinlinesize(transaction, ifh)
1285 if not self._inline:
1307 if not self._inline:
1286 dfh = self.opener(self.datafile, "a")
1308 dfh = self.opener(self.datafile, "a")
1287 ifh = self.opener(self.indexfile, "a")
1309 ifh = self.opener(self.indexfile, "a")
1288 else:
1310 else:
1289 dfh.write(cdelta[0])
1311 dfh.write(cdelta[0])
1290 dfh.write(cdelta[1])
1312 dfh.write(cdelta[1])
1291 ifh.write(entry)
1313 ifh.write(entry)
1292
1314
1293 t, r, chain, prev = r, r + 1, node, node
1315 t, r, chain, prev = r, r + 1, node, node
1294 base = self.base(t)
1316 base = self.base(t)
1295 start = self.start(base)
1317 start = self.start(base)
1296 end = self.end(t)
1318 end = self.end(t)
1297 finally:
1319 finally:
1298 if dfh:
1320 if dfh:
1299 dfh.close()
1321 dfh.close()
1300 ifh.close()
1322 ifh.close()
1301
1323
1302 return node
1324 return node
1303
1325
1304 def strip(self, minlink, transaction):
1326 def strip(self, minlink, transaction):
1305 """truncate the revlog on the first revision with a linkrev >= minlink
1327 """truncate the revlog on the first revision with a linkrev >= minlink
1306
1328
1307 This function is called when we're stripping revision minlink and
1329 This function is called when we're stripping revision minlink and
1308 its descendants from the repository.
1330 its descendants from the repository.
1309
1331
1310 We have to remove all revisions with linkrev >= minlink, because
1332 We have to remove all revisions with linkrev >= minlink, because
1311 the equivalent changelog revisions will be renumbered after the
1333 the equivalent changelog revisions will be renumbered after the
1312 strip.
1334 strip.
1313
1335
1314 So we truncate the revlog on the first of these revisions, and
1336 So we truncate the revlog on the first of these revisions, and
1315 trust that the caller has saved the revisions that shouldn't be
1337 trust that the caller has saved the revisions that shouldn't be
1316 removed and that it'll readd them after this truncation.
1338 removed and that it'll readd them after this truncation.
1317 """
1339 """
1318 if len(self) == 0:
1340 if len(self) == 0:
1319 return
1341 return
1320
1342
1321 if isinstance(self.index, lazyindex):
1343 if isinstance(self.index, lazyindex):
1322 self._loadindexmap()
1344 self._loadindexmap()
1323
1345
1324 for rev in self:
1346 for rev in self:
1325 if self.index[rev][4] >= minlink:
1347 if self.index[rev][4] >= minlink:
1326 break
1348 break
1327 else:
1349 else:
1328 return
1350 return
1329
1351
1330 # first truncate the files on disk
1352 # first truncate the files on disk
1331 end = self.start(rev)
1353 end = self.start(rev)
1332 if not self._inline:
1354 if not self._inline:
1333 transaction.add(self.datafile, end)
1355 transaction.add(self.datafile, end)
1334 end = rev * self._io.size
1356 end = rev * self._io.size
1335 else:
1357 else:
1336 end += rev * self._io.size
1358 end += rev * self._io.size
1337
1359
1338 transaction.add(self.indexfile, end)
1360 transaction.add(self.indexfile, end)
1339
1361
1340 # then reset internal state in memory to forget those revisions
1362 # then reset internal state in memory to forget those revisions
1341 self._cache = None
1363 self._cache = None
1342 self._chunkclear()
1364 self._chunkclear()
1343 for x in xrange(rev, len(self)):
1365 for x in xrange(rev, len(self)):
1344 del self.nodemap[self.node(x)]
1366 del self.nodemap[self.node(x)]
1345
1367
1346 del self.index[rev:-1]
1368 del self.index[rev:-1]
1347
1369
1348 def checksize(self):
1370 def checksize(self):
1349 expected = 0
1371 expected = 0
1350 if len(self):
1372 if len(self):
1351 expected = max(0, self.end(len(self) - 1))
1373 expected = max(0, self.end(len(self) - 1))
1352
1374
1353 try:
1375 try:
1354 f = self.opener(self.datafile)
1376 f = self.opener(self.datafile)
1355 f.seek(0, 2)
1377 f.seek(0, 2)
1356 actual = f.tell()
1378 actual = f.tell()
1357 dd = actual - expected
1379 dd = actual - expected
1358 except IOError, inst:
1380 except IOError, inst:
1359 if inst.errno != errno.ENOENT:
1381 if inst.errno != errno.ENOENT:
1360 raise
1382 raise
1361 dd = 0
1383 dd = 0
1362
1384
1363 try:
1385 try:
1364 f = self.opener(self.indexfile)
1386 f = self.opener(self.indexfile)
1365 f.seek(0, 2)
1387 f.seek(0, 2)
1366 actual = f.tell()
1388 actual = f.tell()
1367 s = self._io.size
1389 s = self._io.size
1368 i = max(0, actual // s)
1390 i = max(0, actual // s)
1369 di = actual - (i * s)
1391 di = actual - (i * s)
1370 if self._inline:
1392 if self._inline:
1371 databytes = 0
1393 databytes = 0
1372 for r in self:
1394 for r in self:
1373 databytes += max(0, self.length(r))
1395 databytes += max(0, self.length(r))
1374 dd = 0
1396 dd = 0
1375 di = actual - len(self) * s - databytes
1397 di = actual - len(self) * s - databytes
1376 except IOError, inst:
1398 except IOError, inst:
1377 if inst.errno != errno.ENOENT:
1399 if inst.errno != errno.ENOENT:
1378 raise
1400 raise
1379 di = 0
1401 di = 0
1380
1402
1381 return (dd, di)
1403 return (dd, di)
1382
1404
1383 def files(self):
1405 def files(self):
1384 res = [ self.indexfile ]
1406 res = [ self.indexfile ]
1385 if not self._inline:
1407 if not self._inline:
1386 res.append(self.datafile)
1408 res.append(self.datafile)
1387 return res
1409 return res
@@ -1,563 +1,583 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 checkundo()
3 checkundo()
4 {
4 {
5 if [ -f .hg/store/undo ]; then
5 if [ -f .hg/store/undo ]; then
6 echo ".hg/store/undo still exists after $1"
6 echo ".hg/store/undo still exists after $1"
7 fi
7 fi
8 }
8 }
9
9
10 echo "[extensions]" >> $HGRCPATH
10 echo "[extensions]" >> $HGRCPATH
11 echo "mq=" >> $HGRCPATH
11 echo "mq=" >> $HGRCPATH
12
12
13 echo % help
13 echo % help
14 hg help mq
14 hg help mq
15
15
16 hg init a
16 hg init a
17 cd a
17 cd a
18 echo a > a
18 echo a > a
19 hg ci -Ama
19 hg ci -Ama
20
20
21 hg clone . ../k
21 hg clone . ../k
22
22
23 mkdir b
23 mkdir b
24 echo z > b/z
24 echo z > b/z
25 hg ci -Ama
25 hg ci -Ama
26
26
27 echo % qinit
27 echo % qinit
28
28
29 hg qinit
29 hg qinit
30
30
31 cd ..
31 cd ..
32 hg init b
32 hg init b
33
33
34 echo % -R qinit
34 echo % -R qinit
35
35
36 hg -R b qinit
36 hg -R b qinit
37
37
38 hg init c
38 hg init c
39
39
40 echo % qinit -c
40 echo % qinit -c
41
41
42 hg --cwd c qinit -c
42 hg --cwd c qinit -c
43 hg -R c/.hg/patches st
43 hg -R c/.hg/patches st
44
44
45 echo '% qinit; qinit -c'
45 echo '% qinit; qinit -c'
46 hg init d
46 hg init d
47 cd d
47 cd d
48 hg qinit
48 hg qinit
49 hg qinit -c
49 hg qinit -c
50 # qinit -c should create both files if they don't exist
50 # qinit -c should create both files if they don't exist
51 echo ' .hgignore:'
51 echo ' .hgignore:'
52 cat .hg/patches/.hgignore
52 cat .hg/patches/.hgignore
53 echo ' series:'
53 echo ' series:'
54 cat .hg/patches/series
54 cat .hg/patches/series
55 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
55 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
56 cd ..
56 cd ..
57
57
58 echo '% qinit; <stuff>; qinit -c'
58 echo '% qinit; <stuff>; qinit -c'
59 hg init e
59 hg init e
60 cd e
60 cd e
61 hg qnew A
61 hg qnew A
62 checkundo qnew
62 checkundo qnew
63 echo foo > foo
63 echo foo > foo
64 hg add foo
64 hg add foo
65 hg qrefresh
65 hg qrefresh
66 hg qnew B
66 hg qnew B
67 echo >> foo
67 echo >> foo
68 hg qrefresh
68 hg qrefresh
69 echo status >> .hg/patches/.hgignore
69 echo status >> .hg/patches/.hgignore
70 echo bleh >> .hg/patches/.hgignore
70 echo bleh >> .hg/patches/.hgignore
71 hg qinit -c
71 hg qinit -c
72 hg -R .hg/patches status
72 hg -R .hg/patches status
73 # qinit -c shouldn't touch these files if they already exist
73 # qinit -c shouldn't touch these files if they already exist
74 echo ' .hgignore:'
74 echo ' .hgignore:'
75 cat .hg/patches/.hgignore
75 cat .hg/patches/.hgignore
76 echo ' series:'
76 echo ' series:'
77 cat .hg/patches/series
77 cat .hg/patches/series
78 cd ..
78 cd ..
79
79
80 cd a
80 cd a
81
81
82 hg qnew -m 'foo bar' test.patch
82 hg qnew -m 'foo bar' test.patch
83
83
84 echo % qrefresh
84 echo % qrefresh
85
85
86 echo a >> a
86 echo a >> a
87 hg qrefresh
87 hg qrefresh
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
91
91
92 echo % empty qrefresh
92 echo % empty qrefresh
93
93
94 hg qrefresh -X a
94 hg qrefresh -X a
95 echo 'revision:'
95 echo 'revision:'
96 hg diff -r -2 -r -1
96 hg diff -r -2 -r -1
97 echo 'patch:'
97 echo 'patch:'
98 cat .hg/patches/test.patch
98 cat .hg/patches/test.patch
99 echo 'working dir diff:'
99 echo 'working dir diff:'
100 hg diff --nodates -q
100 hg diff --nodates -q
101 # restore things
101 # restore things
102 hg qrefresh
102 hg qrefresh
103 checkundo qrefresh
103 checkundo qrefresh
104
104
105 echo % qpop
105 echo % qpop
106
106
107 hg qpop
107 hg qpop
108 checkundo qpop
108 checkundo qpop
109
109
110 echo % qpush with dump of tag cache
110 echo % qpush with dump of tag cache
111
111
112 # Dump the tag cache to ensure that it has exactly one head after qpush.
112 # Dump the tag cache to ensure that it has exactly one head after qpush.
113 rm -f .hg/tags.cache
113 rm -f .hg/tags.cache
114 hg tags > /dev/null
114 hg tags > /dev/null
115 echo ".hg/tags.cache (pre qpush):"
115 echo ".hg/tags.cache (pre qpush):"
116 sed 's/ [0-9a-f]*//' .hg/tags.cache
116 sed 's/ [0-9a-f]*//' .hg/tags.cache
117 hg qpush
117 hg qpush
118 hg tags > /dev/null
118 hg tags > /dev/null
119 echo ".hg/tags.cache (post qpush):"
119 echo ".hg/tags.cache (post qpush):"
120 sed 's/ [0-9a-f]*//' .hg/tags.cache
120 sed 's/ [0-9a-f]*//' .hg/tags.cache
121
121
122 checkundo qpush
122 checkundo qpush
123
123
124 cd ..
124 cd ..
125
125
126 echo % pop/push outside repo
126 echo % pop/push outside repo
127
127
128 hg -R a qpop
128 hg -R a qpop
129 hg -R a qpush
129 hg -R a qpush
130
130
131 cd a
131 cd a
132 hg qnew test2.patch
132 hg qnew test2.patch
133
133
134 echo % qrefresh in subdir
134 echo % qrefresh in subdir
135
135
136 cd b
136 cd b
137 echo a > a
137 echo a > a
138 hg add a
138 hg add a
139 hg qrefresh
139 hg qrefresh
140
140
141 echo % pop/push -a in subdir
141 echo % pop/push -a in subdir
142
142
143 hg qpop -a
143 hg qpop -a
144 hg --traceback qpush -a
144 hg --traceback qpush -a
145
145
146 # setting columns & interactive tests truncating (issue1912)
146 # setting columns & interactive tests truncating (issue1912)
147 echo % qseries
147 echo % qseries
148 COLUMNS=4 hg qseries --config ui.interactive=true
148 COLUMNS=4 hg qseries --config ui.interactive=true
149 COLUMNS=20 hg qseries --config ui.interactive=true -vs
149 COLUMNS=20 hg qseries --config ui.interactive=true -vs
150 hg qpop
150 hg qpop
151 hg qseries -vs
151 hg qseries -vs
152 hg qpush
152 hg qpush
153
153
154 echo % qapplied
154 echo % qapplied
155 hg qapplied
155 hg qapplied
156
156
157 echo % qtop
157 echo % qtop
158 hg qtop
158 hg qtop
159
159
160 echo % prev
160 echo % prev
161 hg qapp -1
161 hg qapp -1
162
162
163 echo % next
163 echo % next
164 hg qunapp -1
164 hg qunapp -1
165
165
166 hg qpop
166 hg qpop
167 echo % commit should fail
167 echo % commit should fail
168 hg commit
168 hg commit
169
169
170 echo % push should fail
170 echo % push should fail
171 hg push ../../k
171 hg push ../../k
172
172
173 echo % import should fail
173 echo % import should fail
174 hg st .
174 hg st .
175 echo foo >> ../a
175 echo foo >> ../a
176 hg diff > ../../import.diff
176 hg diff > ../../import.diff
177 hg revert --no-backup ../a
177 hg revert --no-backup ../a
178 hg import ../../import.diff
178 hg import ../../import.diff
179 hg st
179 hg st
180 echo % import --no-commit should succeed
180 echo % import --no-commit should succeed
181 hg import --no-commit ../../import.diff
181 hg import --no-commit ../../import.diff
182 hg st
182 hg st
183 hg revert --no-backup ../a
183 hg revert --no-backup ../a
184
184
185 echo % qunapplied
185 echo % qunapplied
186 hg qunapplied
186 hg qunapplied
187
187
188 echo % qpush/qpop with index
188 echo % qpush/qpop with index
189 hg qnew test1b.patch
189 hg qnew test1b.patch
190 echo 1b > 1b
190 echo 1b > 1b
191 hg add 1b
191 hg add 1b
192 hg qrefresh
192 hg qrefresh
193 hg qpush 2
193 hg qpush 2
194 hg qpop 0
194 hg qpop 0
195 hg qpush test.patch+1
195 hg qpush test.patch+1
196 hg qpush test.patch+2
196 hg qpush test.patch+2
197 hg qpop test2.patch-1
197 hg qpop test2.patch-1
198 hg qpop test2.patch-2
198 hg qpop test2.patch-2
199 hg qpush test1b.patch+1
199 hg qpush test1b.patch+1
200
200
201 echo % pop, qapplied, qunapplied
201 echo % pop, qapplied, qunapplied
202 hg qseries -v
202 hg qseries -v
203 echo % qapplied -1 test.patch
203 echo % qapplied -1 test.patch
204 hg qapplied -1 test.patch
204 hg qapplied -1 test.patch
205 echo % qapplied -1 test1b.patch
205 echo % qapplied -1 test1b.patch
206 hg qapplied -1 test1b.patch
206 hg qapplied -1 test1b.patch
207 echo % qapplied -1 test2.patch
207 echo % qapplied -1 test2.patch
208 hg qapplied -1 test2.patch
208 hg qapplied -1 test2.patch
209 echo % qapplied -1
209 echo % qapplied -1
210 hg qapplied -1
210 hg qapplied -1
211 echo % qapplied
211 echo % qapplied
212 hg qapplied
212 hg qapplied
213 echo % qapplied test1b.patch
213 echo % qapplied test1b.patch
214 hg qapplied test1b.patch
214 hg qapplied test1b.patch
215 echo % qunapplied -1
215 echo % qunapplied -1
216 hg qunapplied -1
216 hg qunapplied -1
217 echo % qunapplied
217 echo % qunapplied
218 hg qunapplied
218 hg qunapplied
219 echo % popping
219 echo % popping
220 hg qpop
220 hg qpop
221 echo % qunapplied -1
221 echo % qunapplied -1
222 hg qunapplied -1
222 hg qunapplied -1
223 echo % qunapplied
223 echo % qunapplied
224 hg qunapplied
224 hg qunapplied
225 echo % qunapplied test2.patch
225 echo % qunapplied test2.patch
226 hg qunapplied test2.patch
226 hg qunapplied test2.patch
227 echo % qunapplied -1 test2.patch
227 echo % qunapplied -1 test2.patch
228 hg qunapplied -1 test2.patch
228 hg qunapplied -1 test2.patch
229 echo % popping -a
229 echo % popping -a
230 hg qpop -a
230 hg qpop -a
231 echo % qapplied
231 echo % qapplied
232 hg qapplied
232 hg qapplied
233 echo % qapplied -1
233 echo % qapplied -1
234 hg qapplied -1
234 hg qapplied -1
235 hg qpush
235 hg qpush
236
236
237 echo % push should succeed
237 echo % push should succeed
238 hg qpop -a
238 hg qpop -a
239 hg push ../../k
239 hg push ../../k
240
240
241 echo % qpush/qpop error codes
241 echo % qpush/qpop error codes
242 errorcode()
242 errorcode()
243 {
243 {
244 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
244 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
245 }
245 }
246
246
247 # we want to start with some patches applied
247 # we want to start with some patches applied
248 hg qpush -a
248 hg qpush -a
249 echo " % pops all patches and succeeds"
249 echo " % pops all patches and succeeds"
250 errorcode qpop -a
250 errorcode qpop -a
251 echo " % does nothing and succeeds"
251 echo " % does nothing and succeeds"
252 errorcode qpop -a
252 errorcode qpop -a
253 echo " % fails - nothing else to pop"
253 echo " % fails - nothing else to pop"
254 errorcode qpop
254 errorcode qpop
255 echo " % pushes a patch and succeeds"
255 echo " % pushes a patch and succeeds"
256 errorcode qpush
256 errorcode qpush
257 echo " % pops a patch and succeeds"
257 echo " % pops a patch and succeeds"
258 errorcode qpop
258 errorcode qpop
259 echo " % pushes up to test1b.patch and succeeds"
259 echo " % pushes up to test1b.patch and succeeds"
260 errorcode qpush test1b.patch
260 errorcode qpush test1b.patch
261 echo " % does nothing and succeeds"
261 echo " % does nothing and succeeds"
262 errorcode qpush test1b.patch
262 errorcode qpush test1b.patch
263 echo " % does nothing and succeeds"
263 echo " % does nothing and succeeds"
264 errorcode qpop test1b.patch
264 errorcode qpop test1b.patch
265 echo " % fails - can't push to this patch"
265 echo " % fails - can't push to this patch"
266 errorcode qpush test.patch
266 errorcode qpush test.patch
267 echo " % fails - can't pop to this patch"
267 echo " % fails - can't pop to this patch"
268 errorcode qpop test2.patch
268 errorcode qpop test2.patch
269 echo " % pops up to test.patch and succeeds"
269 echo " % pops up to test.patch and succeeds"
270 errorcode qpop test.patch
270 errorcode qpop test.patch
271 echo " % pushes all patches and succeeds"
271 echo " % pushes all patches and succeeds"
272 errorcode qpush -a
272 errorcode qpush -a
273 echo " % does nothing and succeeds"
273 echo " % does nothing and succeeds"
274 errorcode qpush -a
274 errorcode qpush -a
275 echo " % fails - nothing else to push"
275 echo " % fails - nothing else to push"
276 errorcode qpush
276 errorcode qpush
277 echo " % does nothing and succeeds"
277 echo " % does nothing and succeeds"
278 errorcode qpush test2.patch
278 errorcode qpush test2.patch
279
279
280
280
281 echo % strip
281 echo % strip
282 cd ../../b
282 cd ../../b
283 echo x>x
283 echo x>x
284 hg ci -Ama
284 hg ci -Ama
285 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
285 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
286 hg unbundle .hg/strip-backup/*
286 hg unbundle .hg/strip-backup/*
287
287
288 echo % strip with local changes, should complain
288 echo % strip with local changes, should complain
289 hg up
289 hg up
290 echo y>y
290 echo y>y
291 hg add y
291 hg add y
292 hg strip tip | sed 's/\(saving bundle to \).*/\1/'
292 hg strip tip | sed 's/\(saving bundle to \).*/\1/'
293 echo % --force strip with local changes
293 echo % --force strip with local changes
294 hg strip -f tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
294 hg strip -f tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
295
295
296 echo '% cd b; hg qrefresh'
296 echo '% cd b; hg qrefresh'
297 hg init refresh
297 hg init refresh
298 cd refresh
298 cd refresh
299 echo a > a
299 echo a > a
300 hg ci -Ama
300 hg ci -Ama
301 hg qnew -mfoo foo
301 hg qnew -mfoo foo
302 echo a >> a
302 echo a >> a
303 hg qrefresh
303 hg qrefresh
304 mkdir b
304 mkdir b
305 cd b
305 cd b
306 echo f > f
306 echo f > f
307 hg add f
307 hg add f
308 hg qrefresh
308 hg qrefresh
309 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
309 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
310 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
310 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
311 echo % hg qrefresh .
311 echo % hg qrefresh .
312 hg qrefresh .
312 hg qrefresh .
313 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
313 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
314 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
314 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
315 hg status
315 hg status
316
316
317 echo % qpush failure
317 echo % qpush failure
318 cd ..
318 cd ..
319 hg qrefresh
319 hg qrefresh
320 hg qnew -mbar bar
320 hg qnew -mbar bar
321 echo foo > foo
321 echo foo > foo
322 echo bar > bar
322 echo bar > bar
323 hg add foo bar
323 hg add foo bar
324 hg qrefresh
324 hg qrefresh
325 hg qpop -a
325 hg qpop -a
326 echo bar > foo
326 echo bar > foo
327 hg qpush -a
327 hg qpush -a
328 hg st
328 hg st
329
329
330 echo % mq tags
330 echo % mq tags
331 hg log --template '{rev} {tags}\n' -r qparent:qtip
331 hg log --template '{rev} {tags}\n' -r qparent:qtip
332
332
333 echo % bad node in status
333 echo % bad node in status
334 hg qpop
334 hg qpop
335 hg strip -qn tip
335 hg strip -qn tip
336 hg tip 2>&1 | sed -e 's/unknown node .*/unknown node/'
336 hg tip 2>&1 | sed -e 's/unknown node .*/unknown node/'
337 hg branches 2>&1 | sed -e 's/unknown node .*/unknown node/'
337 hg branches 2>&1 | sed -e 's/unknown node .*/unknown node/'
338 hg qpop 2>&1 | sed -e 's/unknown node .*/unknown node/'
338 hg qpop 2>&1 | sed -e 's/unknown node .*/unknown node/'
339
339
340 cat >>$HGRCPATH <<EOF
340 cat >>$HGRCPATH <<EOF
341 [diff]
341 [diff]
342 git = True
342 git = True
343 EOF
343 EOF
344 cd ..
344 cd ..
345 hg init git
345 hg init git
346 cd git
346 cd git
347 hg qinit
347 hg qinit
348
348
349 hg qnew -m'new file' new
349 hg qnew -m'new file' new
350 echo foo > new
350 echo foo > new
351 chmod +x new
351 chmod +x new
352 hg add new
352 hg add new
353 hg qrefresh
353 hg qrefresh
354 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
354 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
355 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
355 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
356
356
357 hg qnew -m'copy file' copy
357 hg qnew -m'copy file' copy
358 hg cp new copy
358 hg cp new copy
359 hg qrefresh
359 hg qrefresh
360 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
360 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
361 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
361 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
362
362
363 hg qpop
363 hg qpop
364 hg qpush
364 hg qpush
365 hg qdiff
365 hg qdiff
366 cat >>$HGRCPATH <<EOF
366 cat >>$HGRCPATH <<EOF
367 [diff]
367 [diff]
368 git = False
368 git = False
369 EOF
369 EOF
370 hg qdiff --git
370 hg qdiff --git
371
371
372 cd ..
372 cd ..
373 hg init slow
373 hg init slow
374 cd slow
374 cd slow
375 hg qinit
375 hg qinit
376 echo foo > foo
376 echo foo > foo
377 hg add foo
377 hg add foo
378 hg ci -m 'add foo'
378 hg ci -m 'add foo'
379 hg qnew bar
379 hg qnew bar
380 echo bar > bar
380 echo bar > bar
381 hg add bar
381 hg add bar
382 hg mv foo baz
382 hg mv foo baz
383 hg qrefresh --git
383 hg qrefresh --git
384 hg up -C 0
384 hg up -C 0
385 echo >> foo
385 echo >> foo
386 hg ci -m 'change foo'
386 hg ci -m 'change foo'
387 hg up -C 1
387 hg up -C 1
388 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
388 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
389 cat .hg/patches/bar
389 cat .hg/patches/bar
390 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
390 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
391 hg qrefresh --git
391 hg qrefresh --git
392 cat .hg/patches/bar
392 cat .hg/patches/bar
393 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
393 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
394 hg qrefresh
394 hg qrefresh
395 grep 'diff --git' .hg/patches/bar
395 grep 'diff --git' .hg/patches/bar
396
396
397 echo
397 echo
398 hg up -C 1
398 hg up -C 1
399 echo >> foo
399 echo >> foo
400 hg ci -m 'change foo again'
400 hg ci -m 'change foo again'
401 hg up -C 2
401 hg up -C 2
402 hg mv bar quux
402 hg mv bar quux
403 hg mv baz bleh
403 hg mv baz bleh
404 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
404 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
405 cat .hg/patches/bar
405 cat .hg/patches/bar
406 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
406 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
407 hg mv quux fred
407 hg mv quux fred
408 hg mv bleh barney
408 hg mv bleh barney
409 hg qrefresh --git
409 hg qrefresh --git
410 cat .hg/patches/bar
410 cat .hg/patches/bar
411 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
411 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
412
412
413 echo % refresh omitting an added file
413 echo % refresh omitting an added file
414 hg qnew baz
414 hg qnew baz
415 echo newfile > newfile
415 echo newfile > newfile
416 hg add newfile
416 hg add newfile
417 hg qrefresh
417 hg qrefresh
418 hg st -A newfile
418 hg st -A newfile
419 hg qrefresh -X newfile
419 hg qrefresh -X newfile
420 hg st -A newfile
420 hg st -A newfile
421 hg revert newfile
421 hg revert newfile
422 rm newfile
422 rm newfile
423 hg qpop
423 hg qpop
424 hg qdel baz
424 hg qdel baz
425
425
426 echo % create a git patch
426 echo % create a git patch
427 echo a > alexander
427 echo a > alexander
428 hg add alexander
428 hg add alexander
429 hg qnew -f --git addalexander
429 hg qnew -f --git addalexander
430 grep diff .hg/patches/addalexander
430 grep diff .hg/patches/addalexander
431
431
432 echo % create a git binary patch
432 echo % create a git binary patch
433 cat > writebin.py <<EOF
433 cat > writebin.py <<EOF
434 import sys
434 import sys
435 path = sys.argv[1]
435 path = sys.argv[1]
436 open(path, 'wb').write('BIN\x00ARY')
436 open(path, 'wb').write('BIN\x00ARY')
437 EOF
437 EOF
438 python writebin.py bucephalus
438 python writebin.py bucephalus
439
439
440 python "$TESTDIR/md5sum.py" bucephalus
440 python "$TESTDIR/md5sum.py" bucephalus
441 hg add bucephalus
441 hg add bucephalus
442 hg qnew -f --git addbucephalus
442 hg qnew -f --git addbucephalus
443 grep diff .hg/patches/addbucephalus
443 grep diff .hg/patches/addbucephalus
444
444
445 echo % check binary patches can be popped and pushed
445 echo % check binary patches can be popped and pushed
446 hg qpop
446 hg qpop
447 test -f bucephalus && echo % bucephalus should not be there
447 test -f bucephalus && echo % bucephalus should not be there
448 hg qpush
448 hg qpush
449 test -f bucephalus || echo % bucephalus should be there
449 test -f bucephalus || echo % bucephalus should be there
450 python "$TESTDIR/md5sum.py" bucephalus
450 python "$TESTDIR/md5sum.py" bucephalus
451
451
452
452
453 echo '% strip again'
453 echo '% strip again'
454 cd ..
454 cd ..
455 hg init strip
455 hg init strip
456 cd strip
456 cd strip
457 touch foo
457 touch foo
458 hg add foo
458 hg add foo
459 hg ci -m 'add foo'
459 hg ci -m 'add foo'
460 echo >> foo
460 echo >> foo
461 hg ci -m 'change foo 1'
461 hg ci -m 'change foo 1'
462 hg up -C 0
462 hg up -C 0
463 echo 1 >> foo
463 echo 1 >> foo
464 hg ci -m 'change foo 2'
464 hg ci -m 'change foo 2'
465 HGMERGE=true hg merge
465 HGMERGE=true hg merge
466 hg ci -m merge
466 hg ci -m merge
467 hg log
467 hg log
468 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
468 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
469 checkundo strip
469 checkundo strip
470 hg log
470 hg log
471 cd ..
471 cd ..
472
472
473 echo '% qclone'
473 echo '% qclone'
474 qlog()
474 qlog()
475 {
475 {
476 echo 'main repo:'
476 echo 'main repo:'
477 hg log --template ' rev {rev}: {desc}\n'
477 hg log --template ' rev {rev}: {desc}\n'
478 echo 'patch repo:'
478 echo 'patch repo:'
479 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
479 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
480 }
480 }
481 hg init qclonesource
481 hg init qclonesource
482 cd qclonesource
482 cd qclonesource
483 echo foo > foo
483 echo foo > foo
484 hg add foo
484 hg add foo
485 hg ci -m 'add foo'
485 hg ci -m 'add foo'
486 hg qinit
486 hg qinit
487 hg qnew patch1
487 hg qnew patch1
488 echo bar >> foo
488 echo bar >> foo
489 hg qrefresh -m 'change foo'
489 hg qrefresh -m 'change foo'
490 cd ..
490 cd ..
491
491
492 # repo with unversioned patch dir
492 # repo with unversioned patch dir
493 hg qclone qclonesource failure
493 hg qclone qclonesource failure
494
494
495 cd qclonesource
495 cd qclonesource
496 hg qinit -c
496 hg qinit -c
497 hg qci -m checkpoint
497 hg qci -m checkpoint
498 qlog
498 qlog
499 cd ..
499 cd ..
500
500
501 # repo with patches applied
501 # repo with patches applied
502 hg qclone qclonesource qclonedest
502 hg qclone qclonesource qclonedest
503 cd qclonedest
503 cd qclonedest
504 qlog
504 qlog
505 cd ..
505 cd ..
506
506
507 # repo with patches unapplied
507 # repo with patches unapplied
508 cd qclonesource
508 cd qclonesource
509 hg qpop -a
509 hg qpop -a
510 qlog
510 qlog
511 cd ..
511 cd ..
512 hg qclone qclonesource qclonedest2
512 hg qclone qclonesource qclonedest2
513 cd qclonedest2
513 cd qclonedest2
514 qlog
514 qlog
515 cd ..
515 cd ..
516
516
517 echo % 'test applying on an empty file (issue 1033)'
517 echo % 'test applying on an empty file (issue 1033)'
518 hg init empty
518 hg init empty
519 cd empty
519 cd empty
520 touch a
520 touch a
521 hg ci -Am addempty
521 hg ci -Am addempty
522 echo a > a
522 echo a > a
523 hg qnew -f -e changea
523 hg qnew -f -e changea
524 hg qpop
524 hg qpop
525 hg qpush
525 hg qpush
526 cd ..
526 cd ..
527
527
528 echo % test qpush with --force, issue1087
528 echo % test qpush with --force, issue1087
529 hg init forcepush
529 hg init forcepush
530 cd forcepush
530 cd forcepush
531 echo hello > hello.txt
531 echo hello > hello.txt
532 echo bye > bye.txt
532 echo bye > bye.txt
533 hg ci -Ama
533 hg ci -Ama
534 hg qnew -d '0 0' empty
534 hg qnew -d '0 0' empty
535 hg qpop
535 hg qpop
536 echo world >> hello.txt
536 echo world >> hello.txt
537
537
538 echo % qpush should fail, local changes
538 echo % qpush should fail, local changes
539 hg qpush
539 hg qpush
540
540
541 echo % apply force, should not discard changes with empty patch
541 echo % apply force, should not discard changes with empty patch
542 hg qpush -f 2>&1 | sed 's,^.*/patch,patch,g'
542 hg qpush -f 2>&1 | sed 's,^.*/patch,patch,g'
543 hg diff --config diff.nodates=True
543 hg diff --config diff.nodates=True
544 hg qdiff --config diff.nodates=True
544 hg qdiff --config diff.nodates=True
545 hg log -l1 -p
545 hg log -l1 -p
546 hg qref -d '0 0'
546 hg qref -d '0 0'
547 hg qpop
547 hg qpop
548 echo universe >> hello.txt
548 echo universe >> hello.txt
549 echo universe >> bye.txt
549 echo universe >> bye.txt
550
550
551 echo % qpush should fail, local changes
551 echo % qpush should fail, local changes
552 hg qpush
552 hg qpush
553
553
554 echo % apply force, should discard changes in hello, but not bye
554 echo % apply force, should discard changes in hello, but not bye
555 hg qpush -f
555 hg qpush -f
556 hg st
556 hg st
557 hg diff --config diff.nodates=True
557 hg diff --config diff.nodates=True
558 hg qdiff --config diff.nodates=True
558 hg qdiff --config diff.nodates=True
559
559
560 echo % test popping revisions not in working dir ancestry
560 echo % test popping revisions not in working dir ancestry
561 hg qseries -v
561 hg qseries -v
562 hg up qparent
562 hg up qparent
563 hg qpop
563 hg qpop
564
565 cd ..
566 hg init deletion-order
567 cd deletion-order
568
569 touch a
570 hg ci -Aqm0
571
572 hg qnew rename-dir
573 hg rm a
574 hg qrefresh
575
576 mkdir a b
577 touch a/a b/b
578 hg add -q a b
579 hg qrefresh
580
581 echo % test popping must remove files added in subdirectories first
582 hg qpop
583 cd ..
@@ -1,614 +1,617 b''
1 % help
1 % help
2 mq extension - manage a stack of patches
2 mq extension - manage a stack of patches
3
3
4 This extension lets you work with a stack of patches in a Mercurial
4 This extension lets you work with a stack of patches in a Mercurial
5 repository. It manages two stacks of patches - all known patches, and applied
5 repository. It manages two stacks of patches - all known patches, and applied
6 patches (subset of known patches).
6 patches (subset of known patches).
7
7
8 Known patches are represented as patch files in the .hg/patches directory.
8 Known patches are represented as patch files in the .hg/patches directory.
9 Applied patches are both patch files and changesets.
9 Applied patches are both patch files and changesets.
10
10
11 Common tasks (use "hg help command" for more details):
11 Common tasks (use "hg help command" for more details):
12
12
13 prepare repository to work with patches qinit
13 prepare repository to work with patches qinit
14 create new patch qnew
14 create new patch qnew
15 import existing patch qimport
15 import existing patch qimport
16
16
17 print patch series qseries
17 print patch series qseries
18 print applied patches qapplied
18 print applied patches qapplied
19
19
20 add known patch to applied stack qpush
20 add known patch to applied stack qpush
21 remove patch from applied stack qpop
21 remove patch from applied stack qpop
22 refresh contents of top applied patch qrefresh
22 refresh contents of top applied patch qrefresh
23
23
24 list of commands:
24 list of commands:
25
25
26 qapplied print the patches already applied
26 qapplied print the patches already applied
27 qclone clone main and patch repository at same time
27 qclone clone main and patch repository at same time
28 qcommit commit changes in the queue repository
28 qcommit commit changes in the queue repository
29 qdelete remove patches from queue
29 qdelete remove patches from queue
30 qdiff diff of the current patch and subsequent modifications
30 qdiff diff of the current patch and subsequent modifications
31 qfinish move applied patches into repository history
31 qfinish move applied patches into repository history
32 qfold fold the named patches into the current patch
32 qfold fold the named patches into the current patch
33 qgoto push or pop patches until named patch is at top of stack
33 qgoto push or pop patches until named patch is at top of stack
34 qguard set or print guards for a patch
34 qguard set or print guards for a patch
35 qheader print the header of the topmost or specified patch
35 qheader print the header of the topmost or specified patch
36 qimport import a patch
36 qimport import a patch
37 qinit init a new queue repository
37 qinit init a new queue repository
38 qnew create a new patch
38 qnew create a new patch
39 qnext print the name of the next patch
39 qnext print the name of the next patch
40 qpop pop the current patch off the stack
40 qpop pop the current patch off the stack
41 qprev print the name of the previous patch
41 qprev print the name of the previous patch
42 qpush push the next patch onto the stack
42 qpush push the next patch onto the stack
43 qrefresh update the current patch
43 qrefresh update the current patch
44 qrename rename a patch
44 qrename rename a patch
45 qrestore restore the queue state saved by a revision
45 qrestore restore the queue state saved by a revision
46 qsave save current queue state
46 qsave save current queue state
47 qselect set or print guarded patches to push
47 qselect set or print guarded patches to push
48 qseries print the entire series file
48 qseries print the entire series file
49 qtop print the name of the current patch
49 qtop print the name of the current patch
50 qunapplied print the patches not yet applied
50 qunapplied print the patches not yet applied
51 strip strip a revision and all its descendants from the repository
51 strip strip a revision and all its descendants from the repository
52
52
53 use "hg -v help mq" to show aliases and global options
53 use "hg -v help mq" to show aliases and global options
54 adding a
54 adding a
55 updating to branch default
55 updating to branch default
56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 adding b/z
57 adding b/z
58 % qinit
58 % qinit
59 % -R qinit
59 % -R qinit
60 % qinit -c
60 % qinit -c
61 A .hgignore
61 A .hgignore
62 A series
62 A series
63 % qinit; qinit -c
63 % qinit; qinit -c
64 .hgignore:
64 .hgignore:
65 ^\.hg
65 ^\.hg
66 ^\.mq
66 ^\.mq
67 syntax: glob
67 syntax: glob
68 status
68 status
69 guards
69 guards
70 series:
70 series:
71 abort: repository already exists!
71 abort: repository already exists!
72 % qinit; <stuff>; qinit -c
72 % qinit; <stuff>; qinit -c
73 adding .hg/patches/A
73 adding .hg/patches/A
74 adding .hg/patches/B
74 adding .hg/patches/B
75 A .hgignore
75 A .hgignore
76 A A
76 A A
77 A B
77 A B
78 A series
78 A series
79 .hgignore:
79 .hgignore:
80 status
80 status
81 bleh
81 bleh
82 series:
82 series:
83 A
83 A
84 B
84 B
85 % qrefresh
85 % qrefresh
86 foo bar
86 foo bar
87
87
88 diff -r xa
88 diff -r xa
89 --- a/a
89 --- a/a
90 +++ b/a
90 +++ b/a
91 @@ -1,1 +1,2 @@
91 @@ -1,1 +1,2 @@
92 a
92 a
93 +a
93 +a
94 % empty qrefresh
94 % empty qrefresh
95 revision:
95 revision:
96 patch:
96 patch:
97 foo bar
97 foo bar
98
98
99 working dir diff:
99 working dir diff:
100 --- a/a
100 --- a/a
101 +++ b/a
101 +++ b/a
102 @@ -1,1 +1,2 @@
102 @@ -1,1 +1,2 @@
103 a
103 a
104 +a
104 +a
105 % qpop
105 % qpop
106 popping test.patch
106 popping test.patch
107 patch queue now empty
107 patch queue now empty
108 % qpush with dump of tag cache
108 % qpush with dump of tag cache
109 .hg/tags.cache (pre qpush):
109 .hg/tags.cache (pre qpush):
110 1
110 1
111
111
112 applying test.patch
112 applying test.patch
113 now at: test.patch
113 now at: test.patch
114 .hg/tags.cache (post qpush):
114 .hg/tags.cache (post qpush):
115 2
115 2
116
116
117 % pop/push outside repo
117 % pop/push outside repo
118 popping test.patch
118 popping test.patch
119 patch queue now empty
119 patch queue now empty
120 applying test.patch
120 applying test.patch
121 now at: test.patch
121 now at: test.patch
122 % qrefresh in subdir
122 % qrefresh in subdir
123 % pop/push -a in subdir
123 % pop/push -a in subdir
124 popping test2.patch
124 popping test2.patch
125 popping test.patch
125 popping test.patch
126 patch queue now empty
126 patch queue now empty
127 applying test.patch
127 applying test.patch
128 applying test2.patch
128 applying test2.patch
129 now at: test2.patch
129 now at: test2.patch
130 % qseries
130 % qseries
131 test.patch
131 test.patch
132 test2.patch
132 test2.patch
133 0 A test.patch: f...
133 0 A test.patch: f...
134 1 A test2.patch:
134 1 A test2.patch:
135 popping test2.patch
135 popping test2.patch
136 now at: test.patch
136 now at: test.patch
137 0 A test.patch: foo bar
137 0 A test.patch: foo bar
138 1 U test2.patch:
138 1 U test2.patch:
139 applying test2.patch
139 applying test2.patch
140 now at: test2.patch
140 now at: test2.patch
141 % qapplied
141 % qapplied
142 test.patch
142 test.patch
143 test2.patch
143 test2.patch
144 % qtop
144 % qtop
145 test2.patch
145 test2.patch
146 % prev
146 % prev
147 test.patch
147 test.patch
148 % next
148 % next
149 all patches applied
149 all patches applied
150 popping test2.patch
150 popping test2.patch
151 now at: test.patch
151 now at: test.patch
152 % commit should fail
152 % commit should fail
153 abort: cannot commit over an applied mq patch
153 abort: cannot commit over an applied mq patch
154 % push should fail
154 % push should fail
155 pushing to ../../k
155 pushing to ../../k
156 abort: source has mq patches applied
156 abort: source has mq patches applied
157 % import should fail
157 % import should fail
158 abort: cannot import over an applied patch
158 abort: cannot import over an applied patch
159 % import --no-commit should succeed
159 % import --no-commit should succeed
160 applying ../../import.diff
160 applying ../../import.diff
161 M a
161 M a
162 % qunapplied
162 % qunapplied
163 test2.patch
163 test2.patch
164 % qpush/qpop with index
164 % qpush/qpop with index
165 applying test2.patch
165 applying test2.patch
166 now at: test2.patch
166 now at: test2.patch
167 popping test2.patch
167 popping test2.patch
168 popping test1b.patch
168 popping test1b.patch
169 now at: test.patch
169 now at: test.patch
170 applying test1b.patch
170 applying test1b.patch
171 now at: test1b.patch
171 now at: test1b.patch
172 applying test2.patch
172 applying test2.patch
173 now at: test2.patch
173 now at: test2.patch
174 popping test2.patch
174 popping test2.patch
175 now at: test1b.patch
175 now at: test1b.patch
176 popping test1b.patch
176 popping test1b.patch
177 now at: test.patch
177 now at: test.patch
178 applying test1b.patch
178 applying test1b.patch
179 applying test2.patch
179 applying test2.patch
180 now at: test2.patch
180 now at: test2.patch
181 % pop, qapplied, qunapplied
181 % pop, qapplied, qunapplied
182 0 A test.patch
182 0 A test.patch
183 1 A test1b.patch
183 1 A test1b.patch
184 2 A test2.patch
184 2 A test2.patch
185 % qapplied -1 test.patch
185 % qapplied -1 test.patch
186 only one patch applied
186 only one patch applied
187 % qapplied -1 test1b.patch
187 % qapplied -1 test1b.patch
188 test.patch
188 test.patch
189 % qapplied -1 test2.patch
189 % qapplied -1 test2.patch
190 test1b.patch
190 test1b.patch
191 % qapplied -1
191 % qapplied -1
192 test1b.patch
192 test1b.patch
193 % qapplied
193 % qapplied
194 test.patch
194 test.patch
195 test1b.patch
195 test1b.patch
196 test2.patch
196 test2.patch
197 % qapplied test1b.patch
197 % qapplied test1b.patch
198 test.patch
198 test.patch
199 test1b.patch
199 test1b.patch
200 % qunapplied -1
200 % qunapplied -1
201 all patches applied
201 all patches applied
202 % qunapplied
202 % qunapplied
203 % popping
203 % popping
204 popping test2.patch
204 popping test2.patch
205 now at: test1b.patch
205 now at: test1b.patch
206 % qunapplied -1
206 % qunapplied -1
207 test2.patch
207 test2.patch
208 % qunapplied
208 % qunapplied
209 test2.patch
209 test2.patch
210 % qunapplied test2.patch
210 % qunapplied test2.patch
211 % qunapplied -1 test2.patch
211 % qunapplied -1 test2.patch
212 all patches applied
212 all patches applied
213 % popping -a
213 % popping -a
214 popping test1b.patch
214 popping test1b.patch
215 popping test.patch
215 popping test.patch
216 patch queue now empty
216 patch queue now empty
217 % qapplied
217 % qapplied
218 % qapplied -1
218 % qapplied -1
219 no patches applied
219 no patches applied
220 applying test.patch
220 applying test.patch
221 now at: test.patch
221 now at: test.patch
222 % push should succeed
222 % push should succeed
223 popping test.patch
223 popping test.patch
224 patch queue now empty
224 patch queue now empty
225 pushing to ../../k
225 pushing to ../../k
226 searching for changes
226 searching for changes
227 adding changesets
227 adding changesets
228 adding manifests
228 adding manifests
229 adding file changes
229 adding file changes
230 added 1 changesets with 1 changes to 1 files
230 added 1 changesets with 1 changes to 1 files
231 % qpush/qpop error codes
231 % qpush/qpop error codes
232 applying test.patch
232 applying test.patch
233 applying test1b.patch
233 applying test1b.patch
234 applying test2.patch
234 applying test2.patch
235 now at: test2.patch
235 now at: test2.patch
236 % pops all patches and succeeds
236 % pops all patches and succeeds
237 popping test2.patch
237 popping test2.patch
238 popping test1b.patch
238 popping test1b.patch
239 popping test.patch
239 popping test.patch
240 patch queue now empty
240 patch queue now empty
241 qpop -a succeeds
241 qpop -a succeeds
242 % does nothing and succeeds
242 % does nothing and succeeds
243 no patches applied
243 no patches applied
244 qpop -a succeeds
244 qpop -a succeeds
245 % fails - nothing else to pop
245 % fails - nothing else to pop
246 no patches applied
246 no patches applied
247 qpop fails
247 qpop fails
248 % pushes a patch and succeeds
248 % pushes a patch and succeeds
249 applying test.patch
249 applying test.patch
250 now at: test.patch
250 now at: test.patch
251 qpush succeeds
251 qpush succeeds
252 % pops a patch and succeeds
252 % pops a patch and succeeds
253 popping test.patch
253 popping test.patch
254 patch queue now empty
254 patch queue now empty
255 qpop succeeds
255 qpop succeeds
256 % pushes up to test1b.patch and succeeds
256 % pushes up to test1b.patch and succeeds
257 applying test.patch
257 applying test.patch
258 applying test1b.patch
258 applying test1b.patch
259 now at: test1b.patch
259 now at: test1b.patch
260 qpush test1b.patch succeeds
260 qpush test1b.patch succeeds
261 % does nothing and succeeds
261 % does nothing and succeeds
262 qpush: test1b.patch is already at the top
262 qpush: test1b.patch is already at the top
263 qpush test1b.patch succeeds
263 qpush test1b.patch succeeds
264 % does nothing and succeeds
264 % does nothing and succeeds
265 qpop: test1b.patch is already at the top
265 qpop: test1b.patch is already at the top
266 qpop test1b.patch succeeds
266 qpop test1b.patch succeeds
267 % fails - can't push to this patch
267 % fails - can't push to this patch
268 abort: cannot push to a previous patch: test.patch
268 abort: cannot push to a previous patch: test.patch
269 qpush test.patch fails
269 qpush test.patch fails
270 % fails - can't pop to this patch
270 % fails - can't pop to this patch
271 abort: patch test2.patch is not applied
271 abort: patch test2.patch is not applied
272 qpop test2.patch fails
272 qpop test2.patch fails
273 % pops up to test.patch and succeeds
273 % pops up to test.patch and succeeds
274 popping test1b.patch
274 popping test1b.patch
275 now at: test.patch
275 now at: test.patch
276 qpop test.patch succeeds
276 qpop test.patch succeeds
277 % pushes all patches and succeeds
277 % pushes all patches and succeeds
278 applying test1b.patch
278 applying test1b.patch
279 applying test2.patch
279 applying test2.patch
280 now at: test2.patch
280 now at: test2.patch
281 qpush -a succeeds
281 qpush -a succeeds
282 % does nothing and succeeds
282 % does nothing and succeeds
283 all patches are currently applied
283 all patches are currently applied
284 qpush -a succeeds
284 qpush -a succeeds
285 % fails - nothing else to push
285 % fails - nothing else to push
286 patch series already fully applied
286 patch series already fully applied
287 qpush fails
287 qpush fails
288 % does nothing and succeeds
288 % does nothing and succeeds
289 qpush: test2.patch is already at the top
289 qpush: test2.patch is already at the top
290 qpush test2.patch succeeds
290 qpush test2.patch succeeds
291 % strip
291 % strip
292 adding x
292 adding x
293 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
293 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
294 saving bundle to
294 saving bundle to
295 adding changesets
295 adding changesets
296 adding manifests
296 adding manifests
297 adding file changes
297 adding file changes
298 added 1 changesets with 1 changes to 1 files
298 added 1 changesets with 1 changes to 1 files
299 (run 'hg update' to get a working copy)
299 (run 'hg update' to get a working copy)
300 % strip with local changes, should complain
300 % strip with local changes, should complain
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 abort: local changes found
302 abort: local changes found
303 % --force strip with local changes
303 % --force strip with local changes
304 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
304 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
305 saving bundle to
305 saving bundle to
306 % cd b; hg qrefresh
306 % cd b; hg qrefresh
307 adding a
307 adding a
308 foo
308 foo
309
309
310 diff -r cb9a9f314b8b a
310 diff -r cb9a9f314b8b a
311 --- a/a
311 --- a/a
312 +++ b/a
312 +++ b/a
313 @@ -1,1 +1,2 @@
313 @@ -1,1 +1,2 @@
314 a
314 a
315 +a
315 +a
316 diff -r cb9a9f314b8b b/f
316 diff -r cb9a9f314b8b b/f
317 --- /dev/null
317 --- /dev/null
318 +++ b/b/f
318 +++ b/b/f
319 @@ -0,0 +1,1 @@
319 @@ -0,0 +1,1 @@
320 +f
320 +f
321 % hg qrefresh .
321 % hg qrefresh .
322 foo
322 foo
323
323
324 diff -r cb9a9f314b8b b/f
324 diff -r cb9a9f314b8b b/f
325 --- /dev/null
325 --- /dev/null
326 +++ b/b/f
326 +++ b/b/f
327 @@ -0,0 +1,1 @@
327 @@ -0,0 +1,1 @@
328 +f
328 +f
329 M a
329 M a
330 % qpush failure
330 % qpush failure
331 popping bar
331 popping bar
332 popping foo
332 popping foo
333 patch queue now empty
333 patch queue now empty
334 applying foo
334 applying foo
335 applying bar
335 applying bar
336 file foo already exists
336 file foo already exists
337 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
337 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
338 patch failed, unable to continue (try -v)
338 patch failed, unable to continue (try -v)
339 patch failed, rejects left in working dir
339 patch failed, rejects left in working dir
340 errors during apply, please fix and refresh bar
340 errors during apply, please fix and refresh bar
341 ? foo
341 ? foo
342 ? foo.rej
342 ? foo.rej
343 % mq tags
343 % mq tags
344 0 qparent
344 0 qparent
345 1 qbase foo
345 1 qbase foo
346 2 qtip bar tip
346 2 qtip bar tip
347 % bad node in status
347 % bad node in status
348 popping bar
348 popping bar
349 now at: foo
349 now at: foo
350 changeset: 0:cb9a9f314b8b
350 changeset: 0:cb9a9f314b8b
351 mq status file refers to unknown node
351 mq status file refers to unknown node
352 tag: tip
352 tag: tip
353 user: test
353 user: test
354 date: Thu Jan 01 00:00:00 1970 +0000
354 date: Thu Jan 01 00:00:00 1970 +0000
355 summary: a
355 summary: a
356
356
357 mq status file refers to unknown node
357 mq status file refers to unknown node
358 default 0:cb9a9f314b8b
358 default 0:cb9a9f314b8b
359 abort: trying to pop unknown node
359 abort: trying to pop unknown node
360 new file
360 new file
361
361
362 diff --git a/new b/new
362 diff --git a/new b/new
363 new file mode 100755
363 new file mode 100755
364 --- /dev/null
364 --- /dev/null
365 +++ b/new
365 +++ b/new
366 @@ -0,0 +1,1 @@
366 @@ -0,0 +1,1 @@
367 +foo
367 +foo
368 copy file
368 copy file
369
369
370 diff --git a/new b/copy
370 diff --git a/new b/copy
371 copy from new
371 copy from new
372 copy to copy
372 copy to copy
373 popping copy
373 popping copy
374 now at: new
374 now at: new
375 applying copy
375 applying copy
376 now at: copy
376 now at: copy
377 diff --git a/new b/copy
377 diff --git a/new b/copy
378 copy from new
378 copy from new
379 copy to copy
379 copy to copy
380 diff --git a/new b/copy
380 diff --git a/new b/copy
381 copy from new
381 copy from new
382 copy to copy
382 copy to copy
383 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
383 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
384 created new head
384 created new head
385 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
385 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
386 popping bar
386 popping bar
387 adding branch
387 adding branch
388 adding changesets
388 adding changesets
389 adding manifests
389 adding manifests
390 adding file changes
390 adding file changes
391 added 1 changesets with 1 changes to 1 files
391 added 1 changesets with 1 changes to 1 files
392 patch queue now empty
392 patch queue now empty
393 (working directory not at a head)
393 (working directory not at a head)
394 applying bar
394 applying bar
395 now at: bar
395 now at: bar
396 diff --git a/bar b/bar
396 diff --git a/bar b/bar
397 new file mode 100644
397 new file mode 100644
398 --- /dev/null
398 --- /dev/null
399 +++ b/bar
399 +++ b/bar
400 @@ -0,0 +1,1 @@
400 @@ -0,0 +1,1 @@
401 +bar
401 +bar
402 diff --git a/foo b/baz
402 diff --git a/foo b/baz
403 rename from foo
403 rename from foo
404 rename to baz
404 rename to baz
405 2 baz (foo)
405 2 baz (foo)
406 diff --git a/bar b/bar
406 diff --git a/bar b/bar
407 new file mode 100644
407 new file mode 100644
408 --- /dev/null
408 --- /dev/null
409 +++ b/bar
409 +++ b/bar
410 @@ -0,0 +1,1 @@
410 @@ -0,0 +1,1 @@
411 +bar
411 +bar
412 diff --git a/foo b/baz
412 diff --git a/foo b/baz
413 rename from foo
413 rename from foo
414 rename to baz
414 rename to baz
415 2 baz (foo)
415 2 baz (foo)
416 diff --git a/bar b/bar
416 diff --git a/bar b/bar
417 diff --git a/foo b/baz
417 diff --git a/foo b/baz
418
418
419 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
419 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
420 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
420 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
421 popping bar
421 popping bar
422 adding branch
422 adding branch
423 adding changesets
423 adding changesets
424 adding manifests
424 adding manifests
425 adding file changes
425 adding file changes
426 added 1 changesets with 1 changes to 1 files
426 added 1 changesets with 1 changes to 1 files
427 patch queue now empty
427 patch queue now empty
428 (working directory not at a head)
428 (working directory not at a head)
429 applying bar
429 applying bar
430 now at: bar
430 now at: bar
431 diff --git a/foo b/bleh
431 diff --git a/foo b/bleh
432 rename from foo
432 rename from foo
433 rename to bleh
433 rename to bleh
434 diff --git a/quux b/quux
434 diff --git a/quux b/quux
435 new file mode 100644
435 new file mode 100644
436 --- /dev/null
436 --- /dev/null
437 +++ b/quux
437 +++ b/quux
438 @@ -0,0 +1,1 @@
438 @@ -0,0 +1,1 @@
439 +bar
439 +bar
440 3 bleh (foo)
440 3 bleh (foo)
441 diff --git a/foo b/barney
441 diff --git a/foo b/barney
442 rename from foo
442 rename from foo
443 rename to barney
443 rename to barney
444 diff --git a/fred b/fred
444 diff --git a/fred b/fred
445 new file mode 100644
445 new file mode 100644
446 --- /dev/null
446 --- /dev/null
447 +++ b/fred
447 +++ b/fred
448 @@ -0,0 +1,1 @@
448 @@ -0,0 +1,1 @@
449 +bar
449 +bar
450 3 barney (foo)
450 3 barney (foo)
451 % refresh omitting an added file
451 % refresh omitting an added file
452 C newfile
452 C newfile
453 A newfile
453 A newfile
454 popping baz
454 popping baz
455 now at: bar
455 now at: bar
456 % create a git patch
456 % create a git patch
457 diff --git a/alexander b/alexander
457 diff --git a/alexander b/alexander
458 % create a git binary patch
458 % create a git binary patch
459 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
459 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
460 diff --git a/bucephalus b/bucephalus
460 diff --git a/bucephalus b/bucephalus
461 % check binary patches can be popped and pushed
461 % check binary patches can be popped and pushed
462 popping addbucephalus
462 popping addbucephalus
463 now at: addalexander
463 now at: addalexander
464 applying addbucephalus
464 applying addbucephalus
465 now at: addbucephalus
465 now at: addbucephalus
466 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
466 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
467 % strip again
467 % strip again
468 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
468 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
469 created new head
469 created new head
470 merging foo
470 merging foo
471 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
471 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
472 (branch merge, don't forget to commit)
472 (branch merge, don't forget to commit)
473 changeset: 3:99615015637b
473 changeset: 3:99615015637b
474 tag: tip
474 tag: tip
475 parent: 2:20cbbe65cff7
475 parent: 2:20cbbe65cff7
476 parent: 1:d2871fc282d4
476 parent: 1:d2871fc282d4
477 user: test
477 user: test
478 date: Thu Jan 01 00:00:00 1970 +0000
478 date: Thu Jan 01 00:00:00 1970 +0000
479 summary: merge
479 summary: merge
480
480
481 changeset: 2:20cbbe65cff7
481 changeset: 2:20cbbe65cff7
482 parent: 0:53245c60e682
482 parent: 0:53245c60e682
483 user: test
483 user: test
484 date: Thu Jan 01 00:00:00 1970 +0000
484 date: Thu Jan 01 00:00:00 1970 +0000
485 summary: change foo 2
485 summary: change foo 2
486
486
487 changeset: 1:d2871fc282d4
487 changeset: 1:d2871fc282d4
488 user: test
488 user: test
489 date: Thu Jan 01 00:00:00 1970 +0000
489 date: Thu Jan 01 00:00:00 1970 +0000
490 summary: change foo 1
490 summary: change foo 1
491
491
492 changeset: 0:53245c60e682
492 changeset: 0:53245c60e682
493 user: test
493 user: test
494 date: Thu Jan 01 00:00:00 1970 +0000
494 date: Thu Jan 01 00:00:00 1970 +0000
495 summary: add foo
495 summary: add foo
496
496
497 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
497 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 saving bundle to
498 saving bundle to
499 saving bundle to
499 saving bundle to
500 adding branch
500 adding branch
501 adding changesets
501 adding changesets
502 adding manifests
502 adding manifests
503 adding file changes
503 adding file changes
504 added 1 changesets with 1 changes to 1 files
504 added 1 changesets with 1 changes to 1 files
505 changeset: 1:20cbbe65cff7
505 changeset: 1:20cbbe65cff7
506 tag: tip
506 tag: tip
507 user: test
507 user: test
508 date: Thu Jan 01 00:00:00 1970 +0000
508 date: Thu Jan 01 00:00:00 1970 +0000
509 summary: change foo 2
509 summary: change foo 2
510
510
511 changeset: 0:53245c60e682
511 changeset: 0:53245c60e682
512 user: test
512 user: test
513 date: Thu Jan 01 00:00:00 1970 +0000
513 date: Thu Jan 01 00:00:00 1970 +0000
514 summary: add foo
514 summary: add foo
515
515
516 % qclone
516 % qclone
517 abort: versioned patch repository not found (see qinit -c)
517 abort: versioned patch repository not found (see qinit -c)
518 adding .hg/patches/patch1
518 adding .hg/patches/patch1
519 main repo:
519 main repo:
520 rev 1: change foo
520 rev 1: change foo
521 rev 0: add foo
521 rev 0: add foo
522 patch repo:
522 patch repo:
523 rev 0: checkpoint
523 rev 0: checkpoint
524 updating to branch default
524 updating to branch default
525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
527 main repo:
527 main repo:
528 rev 0: add foo
528 rev 0: add foo
529 patch repo:
529 patch repo:
530 rev 0: checkpoint
530 rev 0: checkpoint
531 popping patch1
531 popping patch1
532 patch queue now empty
532 patch queue now empty
533 main repo:
533 main repo:
534 rev 0: add foo
534 rev 0: add foo
535 patch repo:
535 patch repo:
536 rev 0: checkpoint
536 rev 0: checkpoint
537 updating to branch default
537 updating to branch default
538 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
538 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
539 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
539 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
540 main repo:
540 main repo:
541 rev 0: add foo
541 rev 0: add foo
542 patch repo:
542 patch repo:
543 rev 0: checkpoint
543 rev 0: checkpoint
544 % test applying on an empty file (issue 1033)
544 % test applying on an empty file (issue 1033)
545 adding a
545 adding a
546 popping changea
546 popping changea
547 patch queue now empty
547 patch queue now empty
548 applying changea
548 applying changea
549 now at: changea
549 now at: changea
550 % test qpush with --force, issue1087
550 % test qpush with --force, issue1087
551 adding bye.txt
551 adding bye.txt
552 adding hello.txt
552 adding hello.txt
553 popping empty
553 popping empty
554 patch queue now empty
554 patch queue now empty
555 % qpush should fail, local changes
555 % qpush should fail, local changes
556 abort: local changes found, refresh first
556 abort: local changes found, refresh first
557 % apply force, should not discard changes with empty patch
557 % apply force, should not discard changes with empty patch
558 applying empty
558 applying empty
559 patch empty is empty
559 patch empty is empty
560 now at: empty
560 now at: empty
561 diff -r bf5fc3f07a0a hello.txt
561 diff -r bf5fc3f07a0a hello.txt
562 --- a/hello.txt
562 --- a/hello.txt
563 +++ b/hello.txt
563 +++ b/hello.txt
564 @@ -1,1 +1,2 @@
564 @@ -1,1 +1,2 @@
565 hello
565 hello
566 +world
566 +world
567 diff -r 9ecee4f634e3 hello.txt
567 diff -r 9ecee4f634e3 hello.txt
568 --- a/hello.txt
568 --- a/hello.txt
569 +++ b/hello.txt
569 +++ b/hello.txt
570 @@ -1,1 +1,2 @@
570 @@ -1,1 +1,2 @@
571 hello
571 hello
572 +world
572 +world
573 changeset: 1:bf5fc3f07a0a
573 changeset: 1:bf5fc3f07a0a
574 tag: qtip
574 tag: qtip
575 tag: tip
575 tag: tip
576 tag: empty
576 tag: empty
577 tag: qbase
577 tag: qbase
578 user: test
578 user: test
579 date: Thu Jan 01 00:00:00 1970 +0000
579 date: Thu Jan 01 00:00:00 1970 +0000
580 summary: imported patch empty
580 summary: imported patch empty
581
581
582
582
583 popping empty
583 popping empty
584 patch queue now empty
584 patch queue now empty
585 % qpush should fail, local changes
585 % qpush should fail, local changes
586 abort: local changes found, refresh first
586 abort: local changes found, refresh first
587 % apply force, should discard changes in hello, but not bye
587 % apply force, should discard changes in hello, but not bye
588 applying empty
588 applying empty
589 now at: empty
589 now at: empty
590 M bye.txt
590 M bye.txt
591 diff -r ba252371dbc1 bye.txt
591 diff -r ba252371dbc1 bye.txt
592 --- a/bye.txt
592 --- a/bye.txt
593 +++ b/bye.txt
593 +++ b/bye.txt
594 @@ -1,1 +1,2 @@
594 @@ -1,1 +1,2 @@
595 bye
595 bye
596 +universe
596 +universe
597 diff -r 9ecee4f634e3 bye.txt
597 diff -r 9ecee4f634e3 bye.txt
598 --- a/bye.txt
598 --- a/bye.txt
599 +++ b/bye.txt
599 +++ b/bye.txt
600 @@ -1,1 +1,2 @@
600 @@ -1,1 +1,2 @@
601 bye
601 bye
602 +universe
602 +universe
603 diff -r 9ecee4f634e3 hello.txt
603 diff -r 9ecee4f634e3 hello.txt
604 --- a/hello.txt
604 --- a/hello.txt
605 +++ b/hello.txt
605 +++ b/hello.txt
606 @@ -1,1 +1,3 @@
606 @@ -1,1 +1,3 @@
607 hello
607 hello
608 +world
608 +world
609 +universe
609 +universe
610 % test popping revisions not in working dir ancestry
610 % test popping revisions not in working dir ancestry
611 0 A empty
611 0 A empty
612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
613 popping empty
613 popping empty
614 patch queue now empty
614 patch queue now empty
615 % test popping must remove files added in subdirectories first
616 popping rename-dir
617 patch queue now empty
General Comments 0
You need to be logged in to leave comments. Login now