##// END OF EJS Templates
Merge with stable
Martin Geisler -
r10049:5b9709f8 merge default
parent child Browse files
Show More
@@ -1,2701 +1,2701 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details)::
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25
26 26 add known patch to applied stack qpush
27 27 remove patch from applied stack qpop
28 28 refresh contents of top applied patch qrefresh
29 29 '''
30 30
31 31 from mercurial.i18n import _
32 32 from mercurial.node import bin, hex, short, nullid, nullrev
33 33 from mercurial.lock import release
34 34 from mercurial import commands, cmdutil, hg, patch, util
35 35 from mercurial import repair, extensions, url, error
36 36 import os, sys, re, errno
37 37
38 38 commands.norepo += " qclone"
39 39
40 40 # Patch names looks like unix-file names.
41 41 # They must be joinable with queue directory and result in the patch path.
42 42 normname = util.normpath
43 43
44 44 class statusentry(object):
45 45 def __init__(self, rev, name=None):
46 46 if not name:
47 47 fields = rev.split(':', 1)
48 48 if len(fields) == 2:
49 49 self.rev, self.name = fields
50 50 else:
51 51 self.rev, self.name = None, None
52 52 else:
53 53 self.rev, self.name = rev, name
54 54
55 55 def __str__(self):
56 56 return self.rev + ':' + self.name
57 57
58 58 class patchheader(object):
59 59 def __init__(self, pf):
60 60 def eatdiff(lines):
61 61 while lines:
62 62 l = lines[-1]
63 63 if (l.startswith("diff -") or
64 64 l.startswith("Index:") or
65 65 l.startswith("===========")):
66 66 del lines[-1]
67 67 else:
68 68 break
69 69 def eatempty(lines):
70 70 while lines:
71 71 l = lines[-1]
72 72 if re.match('\s*$', l):
73 73 del lines[-1]
74 74 else:
75 75 break
76 76
77 77 message = []
78 78 comments = []
79 79 user = None
80 80 date = None
81 81 format = None
82 82 subject = None
83 83 diffstart = 0
84 84
85 85 for line in file(pf):
86 86 line = line.rstrip()
87 87 if line.startswith('diff --git'):
88 88 diffstart = 2
89 89 break
90 90 if diffstart:
91 91 if line.startswith('+++ '):
92 92 diffstart = 2
93 93 break
94 94 if line.startswith("--- "):
95 95 diffstart = 1
96 96 continue
97 97 elif format == "hgpatch":
98 98 # parse values when importing the result of an hg export
99 99 if line.startswith("# User "):
100 100 user = line[7:]
101 101 elif line.startswith("# Date "):
102 102 date = line[7:]
103 103 elif not line.startswith("# ") and line:
104 104 message.append(line)
105 105 format = None
106 106 elif line == '# HG changeset patch':
107 107 message = []
108 108 format = "hgpatch"
109 109 elif (format != "tagdone" and (line.startswith("Subject: ") or
110 110 line.startswith("subject: "))):
111 111 subject = line[9:]
112 112 format = "tag"
113 113 elif (format != "tagdone" and (line.startswith("From: ") or
114 114 line.startswith("from: "))):
115 115 user = line[6:]
116 116 format = "tag"
117 117 elif format == "tag" and line == "":
118 118 # when looking for tags (subject: from: etc) they
119 119 # end once you find a blank line in the source
120 120 format = "tagdone"
121 121 elif message or line:
122 122 message.append(line)
123 123 comments.append(line)
124 124
125 125 eatdiff(message)
126 126 eatdiff(comments)
127 127 eatempty(message)
128 128 eatempty(comments)
129 129
130 130 # make sure message isn't empty
131 131 if format and format.startswith("tag") and subject:
132 132 message.insert(0, "")
133 133 message.insert(0, subject)
134 134
135 135 self.message = message
136 136 self.comments = comments
137 137 self.user = user
138 138 self.date = date
139 139 self.haspatch = diffstart > 1
140 140
141 141 def setuser(self, user):
142 142 if not self.updateheader(['From: ', '# User '], user):
143 143 try:
144 144 patchheaderat = self.comments.index('# HG changeset patch')
145 145 self.comments.insert(patchheaderat + 1, '# User ' + user)
146 146 except ValueError:
147 147 if self._hasheader(['Date: ']):
148 148 self.comments = ['From: ' + user] + self.comments
149 149 else:
150 150 tmp = ['# HG changeset patch', '# User ' + user, '']
151 151 self.comments = tmp + self.comments
152 152 self.user = user
153 153
154 154 def setdate(self, date):
155 155 if not self.updateheader(['Date: ', '# Date '], date):
156 156 try:
157 157 patchheaderat = self.comments.index('# HG changeset patch')
158 158 self.comments.insert(patchheaderat + 1, '# Date ' + date)
159 159 except ValueError:
160 160 if self._hasheader(['From: ']):
161 161 self.comments = ['Date: ' + date] + self.comments
162 162 else:
163 163 tmp = ['# HG changeset patch', '# Date ' + date, '']
164 164 self.comments = tmp + self.comments
165 165 self.date = date
166 166
167 167 def setmessage(self, message):
168 168 if self.comments:
169 169 self._delmsg()
170 170 self.message = [message]
171 171 self.comments += self.message
172 172
173 173 def updateheader(self, prefixes, new):
174 174 '''Update all references to a field in the patch header.
175 175 Return whether the field is present.'''
176 176 res = False
177 177 for prefix in prefixes:
178 178 for i in xrange(len(self.comments)):
179 179 if self.comments[i].startswith(prefix):
180 180 self.comments[i] = prefix + new
181 181 res = True
182 182 break
183 183 return res
184 184
185 185 def _hasheader(self, prefixes):
186 186 '''Check if a header starts with any of the given prefixes.'''
187 187 for prefix in prefixes:
188 188 for comment in self.comments:
189 189 if comment.startswith(prefix):
190 190 return True
191 191 return False
192 192
193 193 def __str__(self):
194 194 if not self.comments:
195 195 return ''
196 196 return '\n'.join(self.comments) + '\n\n'
197 197
198 198 def _delmsg(self):
199 199 '''Remove existing message, keeping the rest of the comments fields.
200 200 If comments contains 'subject: ', message will prepend
201 201 the field and a blank line.'''
202 202 if self.message:
203 203 subj = 'subject: ' + self.message[0].lower()
204 204 for i in xrange(len(self.comments)):
205 205 if subj == self.comments[i].lower():
206 206 del self.comments[i]
207 207 self.message = self.message[2:]
208 208 break
209 209 ci = 0
210 210 for mi in self.message:
211 211 while mi != self.comments[ci]:
212 212 ci += 1
213 213 del self.comments[ci]
214 214
215 215 class queue(object):
216 216 def __init__(self, ui, path, patchdir=None):
217 217 self.basepath = path
218 218 self.path = patchdir or os.path.join(path, "patches")
219 219 self.opener = util.opener(self.path)
220 220 self.ui = ui
221 221 self.applied_dirty = 0
222 222 self.series_dirty = 0
223 223 self.series_path = "series"
224 224 self.status_path = "status"
225 225 self.guards_path = "guards"
226 226 self.active_guards = None
227 227 self.guards_dirty = False
228 228 self._diffopts = None
229 229
230 230 @util.propertycache
231 231 def applied(self):
232 232 if os.path.exists(self.join(self.status_path)):
233 233 lines = self.opener(self.status_path).read().splitlines()
234 234 return [statusentry(l) for l in lines]
235 235 return []
236 236
237 237 @util.propertycache
238 238 def full_series(self):
239 239 if os.path.exists(self.join(self.series_path)):
240 240 return self.opener(self.series_path).read().splitlines()
241 241 return []
242 242
243 243 @util.propertycache
244 244 def series(self):
245 245 self.parse_series()
246 246 return self.series
247 247
248 248 @util.propertycache
249 249 def series_guards(self):
250 250 self.parse_series()
251 251 return self.series_guards
252 252
253 253 def invalidate(self):
254 254 for a in 'applied full_series series series_guards'.split():
255 255 if a in self.__dict__:
256 256 delattr(self, a)
257 257 self.applied_dirty = 0
258 258 self.series_dirty = 0
259 259 self.guards_dirty = False
260 260 self.active_guards = None
261 261
262 262 def diffopts(self):
263 263 if self._diffopts is None:
264 264 self._diffopts = patch.diffopts(self.ui)
265 265 return self._diffopts
266 266
267 267 def join(self, *p):
268 268 return os.path.join(self.path, *p)
269 269
270 270 def find_series(self, patch):
271 271 pre = re.compile("(\s*)([^#]+)")
272 272 index = 0
273 273 for l in self.full_series:
274 274 m = pre.match(l)
275 275 if m:
276 276 s = m.group(2)
277 277 s = s.rstrip()
278 278 if s == patch:
279 279 return index
280 280 index += 1
281 281 return None
282 282
283 283 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
284 284
285 285 def parse_series(self):
286 286 self.series = []
287 287 self.series_guards = []
288 288 for l in self.full_series:
289 289 h = l.find('#')
290 290 if h == -1:
291 291 patch = l
292 292 comment = ''
293 293 elif h == 0:
294 294 continue
295 295 else:
296 296 patch = l[:h]
297 297 comment = l[h:]
298 298 patch = patch.strip()
299 299 if patch:
300 300 if patch in self.series:
301 301 raise util.Abort(_('%s appears more than once in %s') %
302 302 (patch, self.join(self.series_path)))
303 303 self.series.append(patch)
304 304 self.series_guards.append(self.guard_re.findall(comment))
305 305
306 306 def check_guard(self, guard):
307 307 if not guard:
308 308 return _('guard cannot be an empty string')
309 309 bad_chars = '# \t\r\n\f'
310 310 first = guard[0]
311 311 if first in '-+':
312 312 return (_('guard %r starts with invalid character: %r') %
313 313 (guard, first))
314 314 for c in bad_chars:
315 315 if c in guard:
316 316 return _('invalid character in guard %r: %r') % (guard, c)
317 317
318 318 def set_active(self, guards):
319 319 for guard in guards:
320 320 bad = self.check_guard(guard)
321 321 if bad:
322 322 raise util.Abort(bad)
323 323 guards = sorted(set(guards))
324 324 self.ui.debug('active guards: %s\n' % ' '.join(guards))
325 325 self.active_guards = guards
326 326 self.guards_dirty = True
327 327
328 328 def active(self):
329 329 if self.active_guards is None:
330 330 self.active_guards = []
331 331 try:
332 332 guards = self.opener(self.guards_path).read().split()
333 333 except IOError, err:
334 334 if err.errno != errno.ENOENT: raise
335 335 guards = []
336 336 for i, guard in enumerate(guards):
337 337 bad = self.check_guard(guard)
338 338 if bad:
339 339 self.ui.warn('%s:%d: %s\n' %
340 340 (self.join(self.guards_path), i + 1, bad))
341 341 else:
342 342 self.active_guards.append(guard)
343 343 return self.active_guards
344 344
345 345 def set_guards(self, idx, guards):
346 346 for g in guards:
347 347 if len(g) < 2:
348 348 raise util.Abort(_('guard %r too short') % g)
349 349 if g[0] not in '-+':
350 350 raise util.Abort(_('guard %r starts with invalid char') % g)
351 351 bad = self.check_guard(g[1:])
352 352 if bad:
353 353 raise util.Abort(bad)
354 354 drop = self.guard_re.sub('', self.full_series[idx])
355 355 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
356 356 self.parse_series()
357 357 self.series_dirty = True
358 358
359 359 def pushable(self, idx):
360 360 if isinstance(idx, str):
361 361 idx = self.series.index(idx)
362 362 patchguards = self.series_guards[idx]
363 363 if not patchguards:
364 364 return True, None
365 365 guards = self.active()
366 366 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
367 367 if exactneg:
368 368 return False, exactneg[0]
369 369 pos = [g for g in patchguards if g[0] == '+']
370 370 exactpos = [g for g in pos if g[1:] in guards]
371 371 if pos:
372 372 if exactpos:
373 373 return True, exactpos[0]
374 374 return False, pos
375 375 return True, ''
376 376
377 377 def explain_pushable(self, idx, all_patches=False):
378 378 write = all_patches and self.ui.write or self.ui.warn
379 379 if all_patches or self.ui.verbose:
380 380 if isinstance(idx, str):
381 381 idx = self.series.index(idx)
382 382 pushable, why = self.pushable(idx)
383 383 if all_patches and pushable:
384 384 if why is None:
385 385 write(_('allowing %s - no guards in effect\n') %
386 386 self.series[idx])
387 387 else:
388 388 if not why:
389 389 write(_('allowing %s - no matching negative guards\n') %
390 390 self.series[idx])
391 391 else:
392 392 write(_('allowing %s - guarded by %r\n') %
393 393 (self.series[idx], why))
394 394 if not pushable:
395 395 if why:
396 396 write(_('skipping %s - guarded by %r\n') %
397 397 (self.series[idx], why))
398 398 else:
399 399 write(_('skipping %s - no matching guards\n') %
400 400 self.series[idx])
401 401
402 402 def save_dirty(self):
403 403 def write_list(items, path):
404 404 fp = self.opener(path, 'w')
405 405 for i in items:
406 406 fp.write("%s\n" % i)
407 407 fp.close()
408 408 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
409 409 if self.series_dirty: write_list(self.full_series, self.series_path)
410 410 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
411 411
412 412 def removeundo(self, repo):
413 413 undo = repo.sjoin('undo')
414 414 if not os.path.exists(undo):
415 415 return
416 416 try:
417 417 os.unlink(undo)
418 418 except OSError, inst:
419 419 self.ui.warn(_('error removing undo: %s\n') % str(inst))
420 420
421 421 def printdiff(self, repo, node1, node2=None, files=None,
422 422 fp=None, changes=None, opts={}):
423 423 stat = opts.get('stat')
424 424 if stat:
425 425 opts['unified'] = '0'
426 426
427 427 m = cmdutil.match(repo, files, opts)
428 428 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
429 429 write = fp is None and repo.ui.write or fp.write
430 430 if stat:
431 431 width = self.ui.interactive() and util.termwidth() or 80
432 432 write(patch.diffstat(util.iterlines(chunks), width=width,
433 433 git=self.diffopts().git))
434 434 else:
435 435 for chunk in chunks:
436 436 write(chunk)
437 437
438 438 def mergeone(self, repo, mergeq, head, patch, rev):
439 439 # first try just applying the patch
440 440 (err, n) = self.apply(repo, [ patch ], update_status=False,
441 441 strict=True, merge=rev)
442 442
443 443 if err == 0:
444 444 return (err, n)
445 445
446 446 if n is None:
447 447 raise util.Abort(_("apply failed for patch %s") % patch)
448 448
449 449 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
450 450
451 451 # apply failed, strip away that rev and merge.
452 452 hg.clean(repo, head)
453 453 self.strip(repo, n, update=False, backup='strip')
454 454
455 455 ctx = repo[rev]
456 456 ret = hg.merge(repo, rev)
457 457 if ret:
458 458 raise util.Abort(_("update returned %d") % ret)
459 459 n = repo.commit(ctx.description(), ctx.user(), force=True)
460 460 if n is None:
461 461 raise util.Abort(_("repo commit failed"))
462 462 try:
463 463 ph = patchheader(mergeq.join(patch))
464 464 except:
465 465 raise util.Abort(_("unable to read %s") % patch)
466 466
467 467 patchf = self.opener(patch, "w")
468 468 comments = str(ph)
469 469 if comments:
470 470 patchf.write(comments)
471 471 self.printdiff(repo, head, n, fp=patchf)
472 472 patchf.close()
473 473 self.removeundo(repo)
474 474 return (0, n)
475 475
476 476 def qparents(self, repo, rev=None):
477 477 if rev is None:
478 478 (p1, p2) = repo.dirstate.parents()
479 479 if p2 == nullid:
480 480 return p1
481 481 if len(self.applied) == 0:
482 482 return None
483 483 return bin(self.applied[-1].rev)
484 484 pp = repo.changelog.parents(rev)
485 485 if pp[1] != nullid:
486 486 arevs = [ x.rev for x in self.applied ]
487 487 p0 = hex(pp[0])
488 488 p1 = hex(pp[1])
489 489 if p0 in arevs:
490 490 return pp[0]
491 491 if p1 in arevs:
492 492 return pp[1]
493 493 return pp[0]
494 494
495 495 def mergepatch(self, repo, mergeq, series):
496 496 if len(self.applied) == 0:
497 497 # each of the patches merged in will have two parents. This
498 498 # can confuse the qrefresh, qdiff, and strip code because it
499 499 # needs to know which parent is actually in the patch queue.
500 500 # so, we insert a merge marker with only one parent. This way
501 501 # the first patch in the queue is never a merge patch
502 502 #
503 503 pname = ".hg.patches.merge.marker"
504 504 n = repo.commit('[mq]: merge marker', force=True)
505 505 self.removeundo(repo)
506 506 self.applied.append(statusentry(hex(n), pname))
507 507 self.applied_dirty = 1
508 508
509 509 head = self.qparents(repo)
510 510
511 511 for patch in series:
512 512 patch = mergeq.lookup(patch, strict=True)
513 513 if not patch:
514 514 self.ui.warn(_("patch %s does not exist\n") % patch)
515 515 return (1, None)
516 516 pushable, reason = self.pushable(patch)
517 517 if not pushable:
518 518 self.explain_pushable(patch, all_patches=True)
519 519 continue
520 520 info = mergeq.isapplied(patch)
521 521 if not info:
522 522 self.ui.warn(_("patch %s is not applied\n") % patch)
523 523 return (1, None)
524 524 rev = bin(info[1])
525 525 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
526 526 if head:
527 527 self.applied.append(statusentry(hex(head), patch))
528 528 self.applied_dirty = 1
529 529 if err:
530 530 return (err, head)
531 531 self.save_dirty()
532 532 return (0, head)
533 533
534 534 def patch(self, repo, patchfile):
535 535 '''Apply patchfile to the working directory.
536 536 patchfile: name of patch file'''
537 537 files = {}
538 538 try:
539 539 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
540 540 files=files, eolmode=None)
541 541 except Exception, inst:
542 542 self.ui.note(str(inst) + '\n')
543 543 if not self.ui.verbose:
544 544 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
545 545 return (False, files, False)
546 546
547 547 return (True, files, fuzz)
548 548
549 549 def apply(self, repo, series, list=False, update_status=True,
550 550 strict=False, patchdir=None, merge=None, all_files={}):
551 551 wlock = lock = tr = None
552 552 try:
553 553 wlock = repo.wlock()
554 554 lock = repo.lock()
555 555 tr = repo.transaction()
556 556 try:
557 557 ret = self._apply(repo, series, list, update_status,
558 558 strict, patchdir, merge, all_files=all_files)
559 559 tr.close()
560 560 self.save_dirty()
561 561 return ret
562 562 except:
563 563 try:
564 564 tr.abort()
565 565 finally:
566 566 repo.invalidate()
567 567 repo.dirstate.invalidate()
568 568 raise
569 569 finally:
570 570 del tr
571 571 release(lock, wlock)
572 572 self.removeundo(repo)
573 573
574 574 def _apply(self, repo, series, list=False, update_status=True,
575 575 strict=False, patchdir=None, merge=None, all_files={}):
576 576 '''returns (error, hash)
577 577 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
578 578 # TODO unify with commands.py
579 579 if not patchdir:
580 580 patchdir = self.path
581 581 err = 0
582 582 n = None
583 583 for patchname in series:
584 584 pushable, reason = self.pushable(patchname)
585 585 if not pushable:
586 586 self.explain_pushable(patchname, all_patches=True)
587 587 continue
588 588 self.ui.status(_("applying %s\n") % patchname)
589 589 pf = os.path.join(patchdir, patchname)
590 590
591 591 try:
592 592 ph = patchheader(self.join(patchname))
593 593 except:
594 594 self.ui.warn(_("unable to read %s\n") % patchname)
595 595 err = 1
596 596 break
597 597
598 598 message = ph.message
599 599 if not message:
600 600 message = _("imported patch %s\n") % patchname
601 601 else:
602 602 if list:
603 603 message.append(_("\nimported patch %s") % patchname)
604 604 message = '\n'.join(message)
605 605
606 606 if ph.haspatch:
607 607 (patcherr, files, fuzz) = self.patch(repo, pf)
608 608 all_files.update(files)
609 609 patcherr = not patcherr
610 610 else:
611 611 self.ui.warn(_("patch %s is empty\n") % patchname)
612 612 patcherr, files, fuzz = 0, [], 0
613 613
614 614 if merge and files:
615 615 # Mark as removed/merged and update dirstate parent info
616 616 removed = []
617 617 merged = []
618 618 for f in files:
619 619 if os.path.exists(repo.wjoin(f)):
620 620 merged.append(f)
621 621 else:
622 622 removed.append(f)
623 623 for f in removed:
624 624 repo.dirstate.remove(f)
625 625 for f in merged:
626 626 repo.dirstate.merge(f)
627 627 p1, p2 = repo.dirstate.parents()
628 628 repo.dirstate.setparents(p1, merge)
629 629
630 630 files = patch.updatedir(self.ui, repo, files)
631 631 match = cmdutil.matchfiles(repo, files or [])
632 632 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
633 633
634 634 if n is None:
635 635 raise util.Abort(_("repo commit failed"))
636 636
637 637 if update_status:
638 638 self.applied.append(statusentry(hex(n), patchname))
639 639
640 640 if patcherr:
641 641 self.ui.warn(_("patch failed, rejects left in working dir\n"))
642 642 err = 2
643 643 break
644 644
645 645 if fuzz and strict:
646 646 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
647 647 err = 3
648 648 break
649 649 return (err, n)
650 650
651 651 def _cleanup(self, patches, numrevs, keep=False):
652 652 if not keep:
653 653 r = self.qrepo()
654 654 if r:
655 655 r.remove(patches, True)
656 656 else:
657 657 for p in patches:
658 658 os.unlink(self.join(p))
659 659
660 660 if numrevs:
661 661 del self.applied[:numrevs]
662 662 self.applied_dirty = 1
663 663
664 664 for i in sorted([self.find_series(p) for p in patches], reverse=True):
665 665 del self.full_series[i]
666 666 self.parse_series()
667 667 self.series_dirty = 1
668 668
669 669 def _revpatches(self, repo, revs):
670 670 firstrev = repo[self.applied[0].rev].rev()
671 671 patches = []
672 672 for i, rev in enumerate(revs):
673 673
674 674 if rev < firstrev:
675 675 raise util.Abort(_('revision %d is not managed') % rev)
676 676
677 677 ctx = repo[rev]
678 678 base = bin(self.applied[i].rev)
679 679 if ctx.node() != base:
680 680 msg = _('cannot delete revision %d above applied patches')
681 681 raise util.Abort(msg % rev)
682 682
683 683 patch = self.applied[i].name
684 684 for fmt in ('[mq]: %s', 'imported patch %s'):
685 685 if ctx.description() == fmt % patch:
686 686 msg = _('patch %s finalized without changeset message\n')
687 687 repo.ui.status(msg % patch)
688 688 break
689 689
690 690 patches.append(patch)
691 691 return patches
692 692
693 693 def finish(self, repo, revs):
694 694 patches = self._revpatches(repo, sorted(revs))
695 695 self._cleanup(patches, len(patches))
696 696
697 697 def delete(self, repo, patches, opts):
698 698 if not patches and not opts.get('rev'):
699 699 raise util.Abort(_('qdelete requires at least one revision or '
700 700 'patch name'))
701 701
702 702 realpatches = []
703 703 for patch in patches:
704 704 patch = self.lookup(patch, strict=True)
705 705 info = self.isapplied(patch)
706 706 if info:
707 707 raise util.Abort(_("cannot delete applied patch %s") % patch)
708 708 if patch not in self.series:
709 709 raise util.Abort(_("patch %s not in series file") % patch)
710 710 realpatches.append(patch)
711 711
712 712 numrevs = 0
713 713 if opts.get('rev'):
714 714 if not self.applied:
715 715 raise util.Abort(_('no patches applied'))
716 716 revs = cmdutil.revrange(repo, opts['rev'])
717 717 if len(revs) > 1 and revs[0] > revs[1]:
718 718 revs.reverse()
719 719 revpatches = self._revpatches(repo, revs)
720 720 realpatches += revpatches
721 721 numrevs = len(revpatches)
722 722
723 723 self._cleanup(realpatches, numrevs, opts.get('keep'))
724 724
725 725 def check_toppatch(self, repo):
726 726 if len(self.applied) > 0:
727 727 top = bin(self.applied[-1].rev)
728 728 pp = repo.dirstate.parents()
729 729 if top not in pp:
730 730 raise util.Abort(_("working directory revision is not qtip"))
731 731 return top
732 732 return None
733 733 def check_localchanges(self, repo, force=False, refresh=True):
734 734 m, a, r, d = repo.status()[:4]
735 735 if m or a or r or d:
736 736 if not force:
737 737 if refresh:
738 738 raise util.Abort(_("local changes found, refresh first"))
739 739 else:
740 740 raise util.Abort(_("local changes found"))
741 741 return m, a, r, d
742 742
743 743 _reserved = ('series', 'status', 'guards')
744 744 def check_reserved_name(self, name):
745 745 if (name in self._reserved or name.startswith('.hg')
746 746 or name.startswith('.mq')):
747 747 raise util.Abort(_('"%s" cannot be used as the name of a patch')
748 748 % name)
749 749
750 750 def new(self, repo, patchfn, *pats, **opts):
751 751 """options:
752 752 msg: a string or a no-argument function returning a string
753 753 """
754 754 msg = opts.get('msg')
755 755 force = opts.get('force')
756 756 user = opts.get('user')
757 757 date = opts.get('date')
758 758 if date:
759 759 date = util.parsedate(date)
760 760 self.check_reserved_name(patchfn)
761 761 if os.path.exists(self.join(patchfn)):
762 762 raise util.Abort(_('patch "%s" already exists') % patchfn)
763 763 if opts.get('include') or opts.get('exclude') or pats:
764 764 match = cmdutil.match(repo, pats, opts)
765 765 # detect missing files in pats
766 766 def badfn(f, msg):
767 767 raise util.Abort('%s: %s' % (f, msg))
768 768 match.bad = badfn
769 769 m, a, r, d = repo.status(match=match)[:4]
770 770 else:
771 771 m, a, r, d = self.check_localchanges(repo, force)
772 772 match = cmdutil.matchfiles(repo, m + a + r)
773 773 commitfiles = m + a + r
774 774 self.check_toppatch(repo)
775 775 insert = self.full_series_end()
776 776 wlock = repo.wlock()
777 777 try:
778 778 # if patch file write fails, abort early
779 779 p = self.opener(patchfn, "w")
780 780 try:
781 781 if date:
782 782 p.write("# HG changeset patch\n")
783 783 if user:
784 784 p.write("# User " + user + "\n")
785 785 p.write("# Date %d %d\n\n" % date)
786 786 elif user:
787 787 p.write("From: " + user + "\n\n")
788 788
789 789 if hasattr(msg, '__call__'):
790 790 msg = msg()
791 791 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
792 792 n = repo.commit(commitmsg, user, date, match=match, force=True)
793 793 if n is None:
794 794 raise util.Abort(_("repo commit failed"))
795 795 try:
796 796 self.full_series[insert:insert] = [patchfn]
797 797 self.applied.append(statusentry(hex(n), patchfn))
798 798 self.parse_series()
799 799 self.series_dirty = 1
800 800 self.applied_dirty = 1
801 801 if msg:
802 802 msg = msg + "\n\n"
803 803 p.write(msg)
804 804 if commitfiles:
805 805 diffopts = self.diffopts()
806 806 if opts.get('git'): diffopts.git = True
807 807 parent = self.qparents(repo, n)
808 808 chunks = patch.diff(repo, node1=parent, node2=n,
809 809 match=match, opts=diffopts)
810 810 for chunk in chunks:
811 811 p.write(chunk)
812 812 p.close()
813 813 wlock.release()
814 814 wlock = None
815 815 r = self.qrepo()
816 816 if r: r.add([patchfn])
817 817 except:
818 818 repo.rollback()
819 819 raise
820 820 except Exception:
821 821 patchpath = self.join(patchfn)
822 822 try:
823 823 os.unlink(patchpath)
824 824 except:
825 825 self.ui.warn(_('error unlinking %s\n') % patchpath)
826 826 raise
827 827 self.removeundo(repo)
828 828 finally:
829 829 release(wlock)
830 830
831 831 def strip(self, repo, rev, update=True, backup="all", force=None):
832 832 wlock = lock = None
833 833 try:
834 834 wlock = repo.wlock()
835 835 lock = repo.lock()
836 836
837 837 if update:
838 838 self.check_localchanges(repo, force=force, refresh=False)
839 839 urev = self.qparents(repo, rev)
840 840 hg.clean(repo, urev)
841 841 repo.dirstate.write()
842 842
843 843 self.removeundo(repo)
844 844 repair.strip(self.ui, repo, rev, backup)
845 845 # strip may have unbundled a set of backed up revisions after
846 846 # the actual strip
847 847 self.removeundo(repo)
848 848 finally:
849 849 release(lock, wlock)
850 850
851 851 def isapplied(self, patch):
852 852 """returns (index, rev, patch)"""
853 853 for i, a in enumerate(self.applied):
854 854 if a.name == patch:
855 855 return (i, a.rev, a.name)
856 856 return None
857 857
858 858 # if the exact patch name does not exist, we try a few
859 859 # variations. If strict is passed, we try only #1
860 860 #
861 861 # 1) a number to indicate an offset in the series file
862 862 # 2) a unique substring of the patch name was given
863 863 # 3) patchname[-+]num to indicate an offset in the series file
864 864 def lookup(self, patch, strict=False):
865 865 patch = patch and str(patch)
866 866
867 867 def partial_name(s):
868 868 if s in self.series:
869 869 return s
870 870 matches = [x for x in self.series if s in x]
871 871 if len(matches) > 1:
872 872 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
873 873 for m in matches:
874 874 self.ui.warn(' %s\n' % m)
875 875 return None
876 876 if matches:
877 877 return matches[0]
878 878 if len(self.series) > 0 and len(self.applied) > 0:
879 879 if s == 'qtip':
880 880 return self.series[self.series_end(True)-1]
881 881 if s == 'qbase':
882 882 return self.series[0]
883 883 return None
884 884
885 885 if patch is None:
886 886 return None
887 887 if patch in self.series:
888 888 return patch
889 889
890 890 if not os.path.isfile(self.join(patch)):
891 891 try:
892 892 sno = int(patch)
893 893 except(ValueError, OverflowError):
894 894 pass
895 895 else:
896 896 if -len(self.series) <= sno < len(self.series):
897 897 return self.series[sno]
898 898
899 899 if not strict:
900 900 res = partial_name(patch)
901 901 if res:
902 902 return res
903 903 minus = patch.rfind('-')
904 904 if minus >= 0:
905 905 res = partial_name(patch[:minus])
906 906 if res:
907 907 i = self.series.index(res)
908 908 try:
909 909 off = int(patch[minus+1:] or 1)
910 910 except(ValueError, OverflowError):
911 911 pass
912 912 else:
913 913 if i - off >= 0:
914 914 return self.series[i - off]
915 915 plus = patch.rfind('+')
916 916 if plus >= 0:
917 917 res = partial_name(patch[:plus])
918 918 if res:
919 919 i = self.series.index(res)
920 920 try:
921 921 off = int(patch[plus+1:] or 1)
922 922 except(ValueError, OverflowError):
923 923 pass
924 924 else:
925 925 if i + off < len(self.series):
926 926 return self.series[i + off]
927 927 raise util.Abort(_("patch %s not in series") % patch)
928 928
929 929 def push(self, repo, patch=None, force=False, list=False,
930 930 mergeq=None, all=False):
931 931 wlock = repo.wlock()
932 932 try:
933 933 if repo.dirstate.parents()[0] not in repo.heads():
934 934 self.ui.status(_("(working directory not at a head)\n"))
935 935
936 936 if not self.series:
937 937 self.ui.warn(_('no patches in series\n'))
938 938 return 0
939 939
940 940 patch = self.lookup(patch)
941 941 # Suppose our series file is: A B C and the current 'top'
942 942 # patch is B. qpush C should be performed (moving forward)
943 943 # qpush B is a NOP (no change) qpush A is an error (can't
944 944 # go backwards with qpush)
945 945 if patch:
946 946 info = self.isapplied(patch)
947 947 if info:
948 948 if info[0] < len(self.applied) - 1:
949 949 raise util.Abort(
950 950 _("cannot push to a previous patch: %s") % patch)
951 951 self.ui.warn(
952 952 _('qpush: %s is already at the top\n') % patch)
953 953 return
954 954 pushable, reason = self.pushable(patch)
955 955 if not pushable:
956 956 if reason:
957 957 reason = _('guarded by %r') % reason
958 958 else:
959 959 reason = _('no matching guards')
960 960 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
961 961 return 1
962 962 elif all:
963 963 patch = self.series[-1]
964 964 if self.isapplied(patch):
965 965 self.ui.warn(_('all patches are currently applied\n'))
966 966 return 0
967 967
968 968 # Following the above example, starting at 'top' of B:
969 969 # qpush should be performed (pushes C), but a subsequent
970 970 # qpush without an argument is an error (nothing to
971 971 # apply). This allows a loop of "...while hg qpush..." to
972 972 # work as it detects an error when done
973 973 start = self.series_end()
974 974 if start == len(self.series):
975 975 self.ui.warn(_('patch series already fully applied\n'))
976 976 return 1
977 977 if not force:
978 978 self.check_localchanges(repo)
979 979
980 980 self.applied_dirty = 1
981 981 if start > 0:
982 982 self.check_toppatch(repo)
983 983 if not patch:
984 984 patch = self.series[start]
985 985 end = start + 1
986 986 else:
987 987 end = self.series.index(patch, start) + 1
988 988
989 989 s = self.series[start:end]
990 990 all_files = {}
991 991 try:
992 992 if mergeq:
993 993 ret = self.mergepatch(repo, mergeq, s)
994 994 else:
995 995 ret = self.apply(repo, s, list, all_files=all_files)
996 996 except:
997 997 self.ui.warn(_('cleaning up working directory...'))
998 998 node = repo.dirstate.parents()[0]
999 999 hg.revert(repo, node, None)
1000 1000 unknown = repo.status(unknown=True)[4]
1001 1001 # only remove unknown files that we know we touched or
1002 1002 # created while patching
1003 1003 for f in unknown:
1004 1004 if f in all_files:
1005 1005 util.unlink(repo.wjoin(f))
1006 1006 self.ui.warn(_('done\n'))
1007 1007 raise
1008 1008
1009 1009 if not self.applied:
1010 1010 return ret[0]
1011 1011 top = self.applied[-1].name
1012 1012 if ret[0] and ret[0] > 1:
1013 1013 msg = _("errors during apply, please fix and refresh %s\n")
1014 1014 self.ui.write(msg % top)
1015 1015 else:
1016 1016 self.ui.write(_("now at: %s\n") % top)
1017 1017 return ret[0]
1018 1018
1019 1019 finally:
1020 1020 wlock.release()
1021 1021
1022 1022 def pop(self, repo, patch=None, force=False, update=True, all=False):
1023 1023 def getfile(f, rev, flags):
1024 1024 t = repo.file(f).read(rev)
1025 1025 repo.wwrite(f, t, flags)
1026 1026
1027 1027 wlock = repo.wlock()
1028 1028 try:
1029 1029 if patch:
1030 1030 # index, rev, patch
1031 1031 info = self.isapplied(patch)
1032 1032 if not info:
1033 1033 patch = self.lookup(patch)
1034 1034 info = self.isapplied(patch)
1035 1035 if not info:
1036 1036 raise util.Abort(_("patch %s is not applied") % patch)
1037 1037
1038 1038 if len(self.applied) == 0:
1039 1039 # Allow qpop -a to work repeatedly,
1040 1040 # but not qpop without an argument
1041 1041 self.ui.warn(_("no patches applied\n"))
1042 1042 return not all
1043 1043
1044 1044 if all:
1045 1045 start = 0
1046 1046 elif patch:
1047 1047 start = info[0] + 1
1048 1048 else:
1049 1049 start = len(self.applied) - 1
1050 1050
1051 1051 if start >= len(self.applied):
1052 1052 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1053 1053 return
1054 1054
1055 1055 if not update:
1056 1056 parents = repo.dirstate.parents()
1057 1057 rr = [ bin(x.rev) for x in self.applied ]
1058 1058 for p in parents:
1059 1059 if p in rr:
1060 1060 self.ui.warn(_("qpop: forcing dirstate update\n"))
1061 1061 update = True
1062 1062 else:
1063 1063 parents = [p.hex() for p in repo[None].parents()]
1064 1064 needupdate = False
1065 1065 for entry in self.applied[start:]:
1066 1066 if entry.rev in parents:
1067 1067 needupdate = True
1068 1068 break
1069 1069 update = needupdate
1070 1070
1071 1071 if not force and update:
1072 1072 self.check_localchanges(repo)
1073 1073
1074 1074 self.applied_dirty = 1
1075 1075 end = len(self.applied)
1076 1076 rev = bin(self.applied[start].rev)
1077 1077 if update:
1078 1078 top = self.check_toppatch(repo)
1079 1079
1080 1080 try:
1081 1081 heads = repo.changelog.heads(rev)
1082 1082 except error.LookupError:
1083 1083 node = short(rev)
1084 1084 raise util.Abort(_('trying to pop unknown node %s') % node)
1085 1085
1086 1086 if heads != [bin(self.applied[-1].rev)]:
1087 1087 raise util.Abort(_("popping would remove a revision not "
1088 1088 "managed by this patch queue"))
1089 1089
1090 1090 # we know there are no local changes, so we can make a simplified
1091 1091 # form of hg.update.
1092 1092 if update:
1093 1093 qp = self.qparents(repo, rev)
1094 1094 changes = repo.changelog.read(qp)
1095 1095 mmap = repo.manifest.read(changes[0])
1096 1096 m, a, r, d = repo.status(qp, top)[:4]
1097 1097 if d:
1098 1098 raise util.Abort(_("deletions found between repo revs"))
1099 for f in m:
1100 getfile(f, mmap[f], mmap.flags(f))
1101 for f in r:
1102 getfile(f, mmap[f], mmap.flags(f))
1103 for f in m + r:
1104 repo.dirstate.normal(f)
1105 1099 for f in a:
1106 1100 try:
1107 1101 os.unlink(repo.wjoin(f))
1108 1102 except OSError, e:
1109 1103 if e.errno != errno.ENOENT:
1110 1104 raise
1111 1105 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1112 1106 except: pass
1113 1107 repo.dirstate.forget(f)
1108 for f in m:
1109 getfile(f, mmap[f], mmap.flags(f))
1110 for f in r:
1111 getfile(f, mmap[f], mmap.flags(f))
1112 for f in m + r:
1113 repo.dirstate.normal(f)
1114 1114 repo.dirstate.setparents(qp, nullid)
1115 1115 for patch in reversed(self.applied[start:end]):
1116 1116 self.ui.status(_("popping %s\n") % patch.name)
1117 1117 del self.applied[start:end]
1118 1118 self.strip(repo, rev, update=False, backup='strip')
1119 1119 if len(self.applied):
1120 1120 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1121 1121 else:
1122 1122 self.ui.write(_("patch queue now empty\n"))
1123 1123 finally:
1124 1124 wlock.release()
1125 1125
1126 1126 def diff(self, repo, pats, opts):
1127 1127 top = self.check_toppatch(repo)
1128 1128 if not top:
1129 1129 self.ui.write(_("no patches applied\n"))
1130 1130 return
1131 1131 qp = self.qparents(repo, top)
1132 1132 if opts.get('reverse'):
1133 1133 node1, node2 = None, qp
1134 1134 else:
1135 1135 node1, node2 = qp, None
1136 1136 self._diffopts = patch.diffopts(self.ui, opts)
1137 1137 self.printdiff(repo, node1, node2, files=pats, opts=opts)
1138 1138
1139 1139 def refresh(self, repo, pats=None, **opts):
1140 1140 if len(self.applied) == 0:
1141 1141 self.ui.write(_("no patches applied\n"))
1142 1142 return 1
1143 1143 msg = opts.get('msg', '').rstrip()
1144 1144 newuser = opts.get('user')
1145 1145 newdate = opts.get('date')
1146 1146 if newdate:
1147 1147 newdate = '%d %d' % util.parsedate(newdate)
1148 1148 wlock = repo.wlock()
1149 1149 try:
1150 1150 self.check_toppatch(repo)
1151 1151 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1152 1152 top = bin(top)
1153 1153 if repo.changelog.heads(top) != [top]:
1154 1154 raise util.Abort(_("cannot refresh a revision with children"))
1155 1155 cparents = repo.changelog.parents(top)
1156 1156 patchparent = self.qparents(repo, top)
1157 1157 ph = patchheader(self.join(patchfn))
1158 1158
1159 1159 patchf = self.opener(patchfn, 'r')
1160 1160
1161 1161 # if the patch was a git patch, refresh it as a git patch
1162 1162 for line in patchf:
1163 1163 if line.startswith('diff --git'):
1164 1164 self.diffopts().git = True
1165 1165 break
1166 1166
1167 1167 if msg:
1168 1168 ph.setmessage(msg)
1169 1169 if newuser:
1170 1170 ph.setuser(newuser)
1171 1171 if newdate:
1172 1172 ph.setdate(newdate)
1173 1173
1174 1174 # only commit new patch when write is complete
1175 1175 patchf = self.opener(patchfn, 'w', atomictemp=True)
1176 1176
1177 1177 patchf.seek(0)
1178 1178 patchf.truncate()
1179 1179
1180 1180 comments = str(ph)
1181 1181 if comments:
1182 1182 patchf.write(comments)
1183 1183
1184 1184 if opts.get('git'):
1185 1185 self.diffopts().git = True
1186 1186 tip = repo.changelog.tip()
1187 1187 if top == tip:
1188 1188 # if the top of our patch queue is also the tip, there is an
1189 1189 # optimization here. We update the dirstate in place and strip
1190 1190 # off the tip commit. Then just commit the current directory
1191 1191 # tree. We can also send repo.commit the list of files
1192 1192 # changed to speed up the diff
1193 1193 #
1194 1194 # in short mode, we only diff the files included in the
1195 1195 # patch already plus specified files
1196 1196 #
1197 1197 # this should really read:
1198 1198 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1199 1199 # but we do it backwards to take advantage of manifest/chlog
1200 1200 # caching against the next repo.status call
1201 1201 #
1202 1202 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1203 1203 changes = repo.changelog.read(tip)
1204 1204 man = repo.manifest.read(changes[0])
1205 1205 aaa = aa[:]
1206 1206 matchfn = cmdutil.match(repo, pats, opts)
1207 1207 if opts.get('short'):
1208 1208 # if amending a patch, we start with existing
1209 1209 # files plus specified files - unfiltered
1210 1210 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1211 1211 # filter with inc/exl options
1212 1212 matchfn = cmdutil.match(repo, opts=opts)
1213 1213 else:
1214 1214 match = cmdutil.matchall(repo)
1215 1215 m, a, r, d = repo.status(match=match)[:4]
1216 1216
1217 1217 # we might end up with files that were added between
1218 1218 # tip and the dirstate parent, but then changed in the
1219 1219 # local dirstate. in this case, we want them to only
1220 1220 # show up in the added section
1221 1221 for x in m:
1222 1222 if x not in aa:
1223 1223 mm.append(x)
1224 1224 # we might end up with files added by the local dirstate that
1225 1225 # were deleted by the patch. In this case, they should only
1226 1226 # show up in the changed section.
1227 1227 for x in a:
1228 1228 if x in dd:
1229 1229 del dd[dd.index(x)]
1230 1230 mm.append(x)
1231 1231 else:
1232 1232 aa.append(x)
1233 1233 # make sure any files deleted in the local dirstate
1234 1234 # are not in the add or change column of the patch
1235 1235 forget = []
1236 1236 for x in d + r:
1237 1237 if x in aa:
1238 1238 del aa[aa.index(x)]
1239 1239 forget.append(x)
1240 1240 continue
1241 1241 elif x in mm:
1242 1242 del mm[mm.index(x)]
1243 1243 dd.append(x)
1244 1244
1245 1245 m = list(set(mm))
1246 1246 r = list(set(dd))
1247 1247 a = list(set(aa))
1248 1248 c = [filter(matchfn, l) for l in (m, a, r)]
1249 1249 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1250 1250 chunks = patch.diff(repo, patchparent, match=match,
1251 1251 changes=c, opts=self.diffopts())
1252 1252 for chunk in chunks:
1253 1253 patchf.write(chunk)
1254 1254
1255 1255 try:
1256 1256 if self.diffopts().git:
1257 1257 copies = {}
1258 1258 for dst in a:
1259 1259 src = repo.dirstate.copied(dst)
1260 1260 # during qfold, the source file for copies may
1261 1261 # be removed. Treat this as a simple add.
1262 1262 if src is not None and src in repo.dirstate:
1263 1263 copies.setdefault(src, []).append(dst)
1264 1264 repo.dirstate.add(dst)
1265 1265 # remember the copies between patchparent and tip
1266 1266 for dst in aaa:
1267 1267 f = repo.file(dst)
1268 1268 src = f.renamed(man[dst])
1269 1269 if src:
1270 1270 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1271 1271 if dst in a:
1272 1272 copies[src[0]].append(dst)
1273 1273 # we can't copy a file created by the patch itself
1274 1274 if dst in copies:
1275 1275 del copies[dst]
1276 1276 for src, dsts in copies.iteritems():
1277 1277 for dst in dsts:
1278 1278 repo.dirstate.copy(src, dst)
1279 1279 else:
1280 1280 for dst in a:
1281 1281 repo.dirstate.add(dst)
1282 1282 # Drop useless copy information
1283 1283 for f in list(repo.dirstate.copies()):
1284 1284 repo.dirstate.copy(None, f)
1285 1285 for f in r:
1286 1286 repo.dirstate.remove(f)
1287 1287 # if the patch excludes a modified file, mark that
1288 1288 # file with mtime=0 so status can see it.
1289 1289 mm = []
1290 1290 for i in xrange(len(m)-1, -1, -1):
1291 1291 if not matchfn(m[i]):
1292 1292 mm.append(m[i])
1293 1293 del m[i]
1294 1294 for f in m:
1295 1295 repo.dirstate.normal(f)
1296 1296 for f in mm:
1297 1297 repo.dirstate.normallookup(f)
1298 1298 for f in forget:
1299 1299 repo.dirstate.forget(f)
1300 1300
1301 1301 if not msg:
1302 1302 if not ph.message:
1303 1303 message = "[mq]: %s\n" % patchfn
1304 1304 else:
1305 1305 message = "\n".join(ph.message)
1306 1306 else:
1307 1307 message = msg
1308 1308
1309 1309 user = ph.user or changes[1]
1310 1310
1311 1311 # assumes strip can roll itself back if interrupted
1312 1312 repo.dirstate.setparents(*cparents)
1313 1313 self.applied.pop()
1314 1314 self.applied_dirty = 1
1315 1315 self.strip(repo, top, update=False,
1316 1316 backup='strip')
1317 1317 except:
1318 1318 repo.dirstate.invalidate()
1319 1319 raise
1320 1320
1321 1321 try:
1322 1322 # might be nice to attempt to roll back strip after this
1323 1323 patchf.rename()
1324 1324 n = repo.commit(message, user, ph.date, match=match,
1325 1325 force=True)
1326 1326 self.applied.append(statusentry(hex(n), patchfn))
1327 1327 except:
1328 1328 ctx = repo[cparents[0]]
1329 1329 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1330 1330 self.save_dirty()
1331 1331 self.ui.warn(_('refresh interrupted while patch was popped! '
1332 1332 '(revert --all, qpush to recover)\n'))
1333 1333 raise
1334 1334 else:
1335 1335 self.printdiff(repo, patchparent, fp=patchf)
1336 1336 patchf.rename()
1337 1337 added = repo.status()[1]
1338 1338 for a in added:
1339 1339 f = repo.wjoin(a)
1340 1340 try:
1341 1341 os.unlink(f)
1342 1342 except OSError, e:
1343 1343 if e.errno != errno.ENOENT:
1344 1344 raise
1345 1345 try: os.removedirs(os.path.dirname(f))
1346 1346 except: pass
1347 1347 # forget the file copies in the dirstate
1348 1348 # push should readd the files later on
1349 1349 repo.dirstate.forget(a)
1350 1350 self.pop(repo, force=True)
1351 1351 self.push(repo, force=True)
1352 1352 finally:
1353 1353 wlock.release()
1354 1354 self.removeundo(repo)
1355 1355
1356 1356 def init(self, repo, create=False):
1357 1357 if not create and os.path.isdir(self.path):
1358 1358 raise util.Abort(_("patch queue directory already exists"))
1359 1359 try:
1360 1360 os.mkdir(self.path)
1361 1361 except OSError, inst:
1362 1362 if inst.errno != errno.EEXIST or not create:
1363 1363 raise
1364 1364 if create:
1365 1365 return self.qrepo(create=True)
1366 1366
1367 1367 def unapplied(self, repo, patch=None):
1368 1368 if patch and patch not in self.series:
1369 1369 raise util.Abort(_("patch %s is not in series file") % patch)
1370 1370 if not patch:
1371 1371 start = self.series_end()
1372 1372 else:
1373 1373 start = self.series.index(patch) + 1
1374 1374 unapplied = []
1375 1375 for i in xrange(start, len(self.series)):
1376 1376 pushable, reason = self.pushable(i)
1377 1377 if pushable:
1378 1378 unapplied.append((i, self.series[i]))
1379 1379 self.explain_pushable(i)
1380 1380 return unapplied
1381 1381
1382 1382 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1383 1383 summary=False):
1384 1384 def displayname(pfx, patchname):
1385 1385 if summary:
1386 1386 ph = patchheader(self.join(patchname))
1387 1387 msg = ph.message and ph.message[0] or ''
1388 1388 if self.ui.interactive():
1389 1389 width = util.termwidth() - len(pfx) - len(patchname) - 2
1390 1390 if width > 0:
1391 1391 msg = util.ellipsis(msg, width)
1392 1392 else:
1393 1393 msg = ''
1394 1394 msg = "%s%s: %s" % (pfx, patchname, msg)
1395 1395 else:
1396 1396 msg = pfx + patchname
1397 1397 self.ui.write(msg + '\n')
1398 1398
1399 1399 applied = set([p.name for p in self.applied])
1400 1400 if length is None:
1401 1401 length = len(self.series) - start
1402 1402 if not missing:
1403 1403 if self.ui.verbose:
1404 1404 idxwidth = len(str(start+length - 1))
1405 1405 for i in xrange(start, start+length):
1406 1406 patch = self.series[i]
1407 1407 if patch in applied:
1408 1408 stat = 'A'
1409 1409 elif self.pushable(i)[0]:
1410 1410 stat = 'U'
1411 1411 else:
1412 1412 stat = 'G'
1413 1413 pfx = ''
1414 1414 if self.ui.verbose:
1415 1415 pfx = '%*d %s ' % (idxwidth, i, stat)
1416 1416 elif status and status != stat:
1417 1417 continue
1418 1418 displayname(pfx, patch)
1419 1419 else:
1420 1420 msng_list = []
1421 1421 for root, dirs, files in os.walk(self.path):
1422 1422 d = root[len(self.path) + 1:]
1423 1423 for f in files:
1424 1424 fl = os.path.join(d, f)
1425 1425 if (fl not in self.series and
1426 1426 fl not in (self.status_path, self.series_path,
1427 1427 self.guards_path)
1428 1428 and not fl.startswith('.')):
1429 1429 msng_list.append(fl)
1430 1430 for x in sorted(msng_list):
1431 1431 pfx = self.ui.verbose and ('D ') or ''
1432 1432 displayname(pfx, x)
1433 1433
1434 1434 def issaveline(self, l):
1435 1435 if l.name == '.hg.patches.save.line':
1436 1436 return True
1437 1437
1438 1438 def qrepo(self, create=False):
1439 1439 if create or os.path.isdir(self.join(".hg")):
1440 1440 return hg.repository(self.ui, path=self.path, create=create)
1441 1441
1442 1442 def restore(self, repo, rev, delete=None, qupdate=None):
1443 1443 c = repo.changelog.read(rev)
1444 1444 desc = c[4].strip()
1445 1445 lines = desc.splitlines()
1446 1446 i = 0
1447 1447 datastart = None
1448 1448 series = []
1449 1449 applied = []
1450 1450 qpp = None
1451 1451 for i, line in enumerate(lines):
1452 1452 if line == 'Patch Data:':
1453 1453 datastart = i + 1
1454 1454 elif line.startswith('Dirstate:'):
1455 1455 l = line.rstrip()
1456 1456 l = l[10:].split(' ')
1457 1457 qpp = [ bin(x) for x in l ]
1458 1458 elif datastart != None:
1459 1459 l = line.rstrip()
1460 1460 se = statusentry(l)
1461 1461 file_ = se.name
1462 1462 if se.rev:
1463 1463 applied.append(se)
1464 1464 else:
1465 1465 series.append(file_)
1466 1466 if datastart is None:
1467 1467 self.ui.warn(_("No saved patch data found\n"))
1468 1468 return 1
1469 1469 self.ui.warn(_("restoring status: %s\n") % lines[0])
1470 1470 self.full_series = series
1471 1471 self.applied = applied
1472 1472 self.parse_series()
1473 1473 self.series_dirty = 1
1474 1474 self.applied_dirty = 1
1475 1475 heads = repo.changelog.heads()
1476 1476 if delete:
1477 1477 if rev not in heads:
1478 1478 self.ui.warn(_("save entry has children, leaving it alone\n"))
1479 1479 else:
1480 1480 self.ui.warn(_("removing save entry %s\n") % short(rev))
1481 1481 pp = repo.dirstate.parents()
1482 1482 if rev in pp:
1483 1483 update = True
1484 1484 else:
1485 1485 update = False
1486 1486 self.strip(repo, rev, update=update, backup='strip')
1487 1487 if qpp:
1488 1488 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1489 1489 (short(qpp[0]), short(qpp[1])))
1490 1490 if qupdate:
1491 1491 self.ui.status(_("queue directory updating\n"))
1492 1492 r = self.qrepo()
1493 1493 if not r:
1494 1494 self.ui.warn(_("Unable to load queue repository\n"))
1495 1495 return 1
1496 1496 hg.clean(r, qpp[0])
1497 1497
1498 1498 def save(self, repo, msg=None):
1499 1499 if len(self.applied) == 0:
1500 1500 self.ui.warn(_("save: no patches applied, exiting\n"))
1501 1501 return 1
1502 1502 if self.issaveline(self.applied[-1]):
1503 1503 self.ui.warn(_("status is already saved\n"))
1504 1504 return 1
1505 1505
1506 1506 ar = [ ':' + x for x in self.full_series ]
1507 1507 if not msg:
1508 1508 msg = _("hg patches saved state")
1509 1509 else:
1510 1510 msg = "hg patches: " + msg.rstrip('\r\n')
1511 1511 r = self.qrepo()
1512 1512 if r:
1513 1513 pp = r.dirstate.parents()
1514 1514 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1515 1515 msg += "\n\nPatch Data:\n"
1516 1516 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1517 1517 "\n".join(ar) + '\n' or "")
1518 1518 n = repo.commit(text, force=True)
1519 1519 if not n:
1520 1520 self.ui.warn(_("repo commit failed\n"))
1521 1521 return 1
1522 1522 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1523 1523 self.applied_dirty = 1
1524 1524 self.removeundo(repo)
1525 1525
1526 1526 def full_series_end(self):
1527 1527 if len(self.applied) > 0:
1528 1528 p = self.applied[-1].name
1529 1529 end = self.find_series(p)
1530 1530 if end is None:
1531 1531 return len(self.full_series)
1532 1532 return end + 1
1533 1533 return 0
1534 1534
1535 1535 def series_end(self, all_patches=False):
1536 1536 """If all_patches is False, return the index of the next pushable patch
1537 1537 in the series, or the series length. If all_patches is True, return the
1538 1538 index of the first patch past the last applied one.
1539 1539 """
1540 1540 end = 0
1541 1541 def next(start):
1542 1542 if all_patches:
1543 1543 return start
1544 1544 i = start
1545 1545 while i < len(self.series):
1546 1546 p, reason = self.pushable(i)
1547 1547 if p:
1548 1548 break
1549 1549 self.explain_pushable(i)
1550 1550 i += 1
1551 1551 return i
1552 1552 if len(self.applied) > 0:
1553 1553 p = self.applied[-1].name
1554 1554 try:
1555 1555 end = self.series.index(p)
1556 1556 except ValueError:
1557 1557 return 0
1558 1558 return next(end + 1)
1559 1559 return next(end)
1560 1560
1561 1561 def appliedname(self, index):
1562 1562 pname = self.applied[index].name
1563 1563 if not self.ui.verbose:
1564 1564 p = pname
1565 1565 else:
1566 1566 p = str(self.series.index(pname)) + " " + pname
1567 1567 return p
1568 1568
1569 1569 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1570 1570 force=None, git=False):
1571 1571 def checkseries(patchname):
1572 1572 if patchname in self.series:
1573 1573 raise util.Abort(_('patch %s is already in the series file')
1574 1574 % patchname)
1575 1575 def checkfile(patchname):
1576 1576 if not force and os.path.exists(self.join(patchname)):
1577 1577 raise util.Abort(_('patch "%s" already exists')
1578 1578 % patchname)
1579 1579
1580 1580 if rev:
1581 1581 if files:
1582 1582 raise util.Abort(_('option "-r" not valid when importing '
1583 1583 'files'))
1584 1584 rev = cmdutil.revrange(repo, rev)
1585 1585 rev.sort(reverse=True)
1586 1586 if (len(files) > 1 or len(rev) > 1) and patchname:
1587 1587 raise util.Abort(_('option "-n" not valid when importing multiple '
1588 1588 'patches'))
1589 1589 i = 0
1590 1590 added = []
1591 1591 if rev:
1592 1592 # If mq patches are applied, we can only import revisions
1593 1593 # that form a linear path to qbase.
1594 1594 # Otherwise, they should form a linear path to a head.
1595 1595 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1596 1596 if len(heads) > 1:
1597 1597 raise util.Abort(_('revision %d is the root of more than one '
1598 1598 'branch') % rev[-1])
1599 1599 if self.applied:
1600 1600 base = hex(repo.changelog.node(rev[0]))
1601 1601 if base in [n.rev for n in self.applied]:
1602 1602 raise util.Abort(_('revision %d is already managed')
1603 1603 % rev[0])
1604 1604 if heads != [bin(self.applied[-1].rev)]:
1605 1605 raise util.Abort(_('revision %d is not the parent of '
1606 1606 'the queue') % rev[0])
1607 1607 base = repo.changelog.rev(bin(self.applied[0].rev))
1608 1608 lastparent = repo.changelog.parentrevs(base)[0]
1609 1609 else:
1610 1610 if heads != [repo.changelog.node(rev[0])]:
1611 1611 raise util.Abort(_('revision %d has unmanaged children')
1612 1612 % rev[0])
1613 1613 lastparent = None
1614 1614
1615 1615 if git:
1616 1616 self.diffopts().git = True
1617 1617
1618 1618 for r in rev:
1619 1619 p1, p2 = repo.changelog.parentrevs(r)
1620 1620 n = repo.changelog.node(r)
1621 1621 if p2 != nullrev:
1622 1622 raise util.Abort(_('cannot import merge revision %d') % r)
1623 1623 if lastparent and lastparent != r:
1624 1624 raise util.Abort(_('revision %d is not the parent of %d')
1625 1625 % (r, lastparent))
1626 1626 lastparent = p1
1627 1627
1628 1628 if not patchname:
1629 1629 patchname = normname('%d.diff' % r)
1630 1630 self.check_reserved_name(patchname)
1631 1631 checkseries(patchname)
1632 1632 checkfile(patchname)
1633 1633 self.full_series.insert(0, patchname)
1634 1634
1635 1635 patchf = self.opener(patchname, "w")
1636 1636 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1637 1637 patchf.close()
1638 1638
1639 1639 se = statusentry(hex(n), patchname)
1640 1640 self.applied.insert(0, se)
1641 1641
1642 1642 added.append(patchname)
1643 1643 patchname = None
1644 1644 self.parse_series()
1645 1645 self.applied_dirty = 1
1646 1646
1647 1647 for filename in files:
1648 1648 if existing:
1649 1649 if filename == '-':
1650 1650 raise util.Abort(_('-e is incompatible with import from -'))
1651 1651 if not patchname:
1652 1652 patchname = normname(filename)
1653 1653 self.check_reserved_name(patchname)
1654 1654 if not os.path.isfile(self.join(patchname)):
1655 1655 raise util.Abort(_("patch %s does not exist") % patchname)
1656 1656 else:
1657 1657 try:
1658 1658 if filename == '-':
1659 1659 if not patchname:
1660 1660 raise util.Abort(_('need --name to import a patch from -'))
1661 1661 text = sys.stdin.read()
1662 1662 else:
1663 1663 text = url.open(self.ui, filename).read()
1664 1664 except (OSError, IOError):
1665 1665 raise util.Abort(_("unable to read %s") % filename)
1666 1666 if not patchname:
1667 1667 patchname = normname(os.path.basename(filename))
1668 1668 self.check_reserved_name(patchname)
1669 1669 checkfile(patchname)
1670 1670 patchf = self.opener(patchname, "w")
1671 1671 patchf.write(text)
1672 1672 if not force:
1673 1673 checkseries(patchname)
1674 1674 if patchname not in self.series:
1675 1675 index = self.full_series_end() + i
1676 1676 self.full_series[index:index] = [patchname]
1677 1677 self.parse_series()
1678 1678 self.ui.warn(_("adding %s to series file\n") % patchname)
1679 1679 i += 1
1680 1680 added.append(patchname)
1681 1681 patchname = None
1682 1682 self.series_dirty = 1
1683 1683 qrepo = self.qrepo()
1684 1684 if qrepo:
1685 1685 qrepo.add(added)
1686 1686
1687 1687 def delete(ui, repo, *patches, **opts):
1688 1688 """remove patches from queue
1689 1689
1690 1690 The patches must not be applied, and at least one patch is required. With
1691 1691 -k/--keep, the patch files are preserved in the patch directory.
1692 1692
1693 1693 To stop managing a patch and move it into permanent history,
1694 1694 use the qfinish command."""
1695 1695 q = repo.mq
1696 1696 q.delete(repo, patches, opts)
1697 1697 q.save_dirty()
1698 1698 return 0
1699 1699
1700 1700 def applied(ui, repo, patch=None, **opts):
1701 1701 """print the patches already applied"""
1702 1702
1703 1703 q = repo.mq
1704 1704 l = len(q.applied)
1705 1705
1706 1706 if patch:
1707 1707 if patch not in q.series:
1708 1708 raise util.Abort(_("patch %s is not in series file") % patch)
1709 1709 end = q.series.index(patch) + 1
1710 1710 else:
1711 1711 end = q.series_end(True)
1712 1712
1713 1713 if opts.get('last') and not end:
1714 1714 ui.write(_("no patches applied\n"))
1715 1715 return 1
1716 1716 elif opts.get('last') and end == 1:
1717 1717 ui.write(_("only one patch applied\n"))
1718 1718 return 1
1719 1719 elif opts.get('last'):
1720 1720 start = end - 2
1721 1721 end = 1
1722 1722 else:
1723 1723 start = 0
1724 1724
1725 1725 return q.qseries(repo, length=end, start=start, status='A',
1726 1726 summary=opts.get('summary'))
1727 1727
1728 1728 def unapplied(ui, repo, patch=None, **opts):
1729 1729 """print the patches not yet applied"""
1730 1730
1731 1731 q = repo.mq
1732 1732 if patch:
1733 1733 if patch not in q.series:
1734 1734 raise util.Abort(_("patch %s is not in series file") % patch)
1735 1735 start = q.series.index(patch) + 1
1736 1736 else:
1737 1737 start = q.series_end(True)
1738 1738
1739 1739 if start == len(q.series) and opts.get('first'):
1740 1740 ui.write(_("all patches applied\n"))
1741 1741 return 1
1742 1742
1743 1743 length = opts.get('first') and 1 or None
1744 1744 return q.qseries(repo, start=start, length=length, status='U',
1745 1745 summary=opts.get('summary'))
1746 1746
1747 1747 def qimport(ui, repo, *filename, **opts):
1748 1748 """import a patch
1749 1749
1750 1750 The patch is inserted into the series after the last applied
1751 1751 patch. If no patches have been applied, qimport prepends the patch
1752 1752 to the series.
1753 1753
1754 1754 The patch will have the same name as its source file unless you
1755 1755 give it a new one with -n/--name.
1756 1756
1757 1757 You can register an existing patch inside the patch directory with
1758 1758 the -e/--existing flag.
1759 1759
1760 1760 With -f/--force, an existing patch of the same name will be
1761 1761 overwritten.
1762 1762
1763 1763 An existing changeset may be placed under mq control with -r/--rev
1764 1764 (e.g. qimport --rev tip -n patch will place tip under mq control).
1765 1765 With -g/--git, patches imported with --rev will use the git diff
1766 1766 format. See the diffs help topic for information on why this is
1767 1767 important for preserving rename/copy information and permission
1768 1768 changes.
1769 1769
1770 1770 To import a patch from standard input, pass - as the patch file.
1771 1771 When importing from standard input, a patch name must be specified
1772 1772 using the --name flag.
1773 1773 """
1774 1774 q = repo.mq
1775 1775 q.qimport(repo, filename, patchname=opts['name'],
1776 1776 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1777 1777 git=opts['git'])
1778 1778 q.save_dirty()
1779 1779
1780 1780 if opts.get('push') and not opts.get('rev'):
1781 1781 return q.push(repo, None)
1782 1782 return 0
1783 1783
1784 1784 def init(ui, repo, **opts):
1785 1785 """init a new queue repository
1786 1786
1787 1787 The queue repository is unversioned by default. If
1788 1788 -c/--create-repo is specified, qinit will create a separate nested
1789 1789 repository for patches (qinit -c may also be run later to convert
1790 1790 an unversioned patch repository into a versioned one). You can use
1791 1791 qcommit to commit changes to this queue repository."""
1792 1792 q = repo.mq
1793 1793 r = q.init(repo, create=opts['create_repo'])
1794 1794 q.save_dirty()
1795 1795 if r:
1796 1796 if not os.path.exists(r.wjoin('.hgignore')):
1797 1797 fp = r.wopener('.hgignore', 'w')
1798 1798 fp.write('^\\.hg\n')
1799 1799 fp.write('^\\.mq\n')
1800 1800 fp.write('syntax: glob\n')
1801 1801 fp.write('status\n')
1802 1802 fp.write('guards\n')
1803 1803 fp.close()
1804 1804 if not os.path.exists(r.wjoin('series')):
1805 1805 r.wopener('series', 'w').close()
1806 1806 r.add(['.hgignore', 'series'])
1807 1807 commands.add(ui, r)
1808 1808 return 0
1809 1809
1810 1810 def clone(ui, source, dest=None, **opts):
1811 1811 '''clone main and patch repository at same time
1812 1812
1813 1813 If source is local, destination will have no patches applied. If
1814 1814 source is remote, this command can not check if patches are
1815 1815 applied in source, so cannot guarantee that patches are not
1816 1816 applied in destination. If you clone remote repository, be sure
1817 1817 before that it has no patches applied.
1818 1818
1819 1819 Source patch repository is looked for in <src>/.hg/patches by
1820 1820 default. Use -p <url> to change.
1821 1821
1822 1822 The patch directory must be a nested Mercurial repository, as
1823 1823 would be created by qinit -c.
1824 1824 '''
1825 1825 def patchdir(repo):
1826 1826 url = repo.url()
1827 1827 if url.endswith('/'):
1828 1828 url = url[:-1]
1829 1829 return url + '/.hg/patches'
1830 1830 if dest is None:
1831 1831 dest = hg.defaultdest(source)
1832 1832 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1833 1833 if opts['patches']:
1834 1834 patchespath = ui.expandpath(opts['patches'])
1835 1835 else:
1836 1836 patchespath = patchdir(sr)
1837 1837 try:
1838 1838 hg.repository(ui, patchespath)
1839 1839 except error.RepoError:
1840 1840 raise util.Abort(_('versioned patch repository not found'
1841 1841 ' (see qinit -c)'))
1842 1842 qbase, destrev = None, None
1843 1843 if sr.local():
1844 1844 if sr.mq.applied:
1845 1845 qbase = bin(sr.mq.applied[0].rev)
1846 1846 if not hg.islocal(dest):
1847 1847 heads = set(sr.heads())
1848 1848 destrev = list(heads.difference(sr.heads(qbase)))
1849 1849 destrev.append(sr.changelog.parents(qbase)[0])
1850 1850 elif sr.capable('lookup'):
1851 1851 try:
1852 1852 qbase = sr.lookup('qbase')
1853 1853 except error.RepoError:
1854 1854 pass
1855 1855 ui.note(_('cloning main repository\n'))
1856 1856 sr, dr = hg.clone(ui, sr.url(), dest,
1857 1857 pull=opts['pull'],
1858 1858 rev=destrev,
1859 1859 update=False,
1860 1860 stream=opts['uncompressed'])
1861 1861 ui.note(_('cloning patch repository\n'))
1862 1862 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1863 1863 pull=opts['pull'], update=not opts['noupdate'],
1864 1864 stream=opts['uncompressed'])
1865 1865 if dr.local():
1866 1866 if qbase:
1867 1867 ui.note(_('stripping applied patches from destination '
1868 1868 'repository\n'))
1869 1869 dr.mq.strip(dr, qbase, update=False, backup=None)
1870 1870 if not opts['noupdate']:
1871 1871 ui.note(_('updating destination repository\n'))
1872 1872 hg.update(dr, dr.changelog.tip())
1873 1873
1874 1874 def commit(ui, repo, *pats, **opts):
1875 1875 """commit changes in the queue repository"""
1876 1876 q = repo.mq
1877 1877 r = q.qrepo()
1878 1878 if not r: raise util.Abort('no queue repository')
1879 1879 commands.commit(r.ui, r, *pats, **opts)
1880 1880
1881 1881 def series(ui, repo, **opts):
1882 1882 """print the entire series file"""
1883 1883 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1884 1884 return 0
1885 1885
1886 1886 def top(ui, repo, **opts):
1887 1887 """print the name of the current patch"""
1888 1888 q = repo.mq
1889 1889 t = q.applied and q.series_end(True) or 0
1890 1890 if t:
1891 1891 return q.qseries(repo, start=t-1, length=1, status='A',
1892 1892 summary=opts.get('summary'))
1893 1893 else:
1894 1894 ui.write(_("no patches applied\n"))
1895 1895 return 1
1896 1896
1897 1897 def next(ui, repo, **opts):
1898 1898 """print the name of the next patch"""
1899 1899 q = repo.mq
1900 1900 end = q.series_end()
1901 1901 if end == len(q.series):
1902 1902 ui.write(_("all patches applied\n"))
1903 1903 return 1
1904 1904 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1905 1905
1906 1906 def prev(ui, repo, **opts):
1907 1907 """print the name of the previous patch"""
1908 1908 q = repo.mq
1909 1909 l = len(q.applied)
1910 1910 if l == 1:
1911 1911 ui.write(_("only one patch applied\n"))
1912 1912 return 1
1913 1913 if not l:
1914 1914 ui.write(_("no patches applied\n"))
1915 1915 return 1
1916 1916 return q.qseries(repo, start=l-2, length=1, status='A',
1917 1917 summary=opts.get('summary'))
1918 1918
1919 1919 def setupheaderopts(ui, opts):
1920 1920 if not opts.get('user') and opts.get('currentuser'):
1921 1921 opts['user'] = ui.username()
1922 1922 if not opts.get('date') and opts.get('currentdate'):
1923 1923 opts['date'] = "%d %d" % util.makedate()
1924 1924
1925 1925 def new(ui, repo, patch, *args, **opts):
1926 1926 """create a new patch
1927 1927
1928 1928 qnew creates a new patch on top of the currently-applied patch (if
1929 1929 any). It will refuse to run if there are any outstanding changes
1930 1930 unless -f/--force is specified, in which case the patch will be
1931 1931 initialized with them. You may also use -I/--include,
1932 1932 -X/--exclude, and/or a list of files after the patch name to add
1933 1933 only changes to matching files to the new patch, leaving the rest
1934 1934 as uncommitted modifications.
1935 1935
1936 1936 -u/--user and -d/--date can be used to set the (given) user and
1937 1937 date, respectively. -U/--currentuser and -D/--currentdate set user
1938 1938 to current user and date to current date.
1939 1939
1940 1940 -e/--edit, -m/--message or -l/--logfile set the patch header as
1941 1941 well as the commit message. If none is specified, the header is
1942 1942 empty and the commit message is '[mq]: PATCH'.
1943 1943
1944 1944 Use the -g/--git option to keep the patch in the git extended diff
1945 1945 format. Read the diffs help topic for more information on why this
1946 1946 is important for preserving permission changes and copy/rename
1947 1947 information.
1948 1948 """
1949 1949 msg = cmdutil.logmessage(opts)
1950 1950 def getmsg(): return ui.edit(msg, ui.username())
1951 1951 q = repo.mq
1952 1952 opts['msg'] = msg
1953 1953 if opts.get('edit'):
1954 1954 opts['msg'] = getmsg
1955 1955 else:
1956 1956 opts['msg'] = msg
1957 1957 setupheaderopts(ui, opts)
1958 1958 q.new(repo, patch, *args, **opts)
1959 1959 q.save_dirty()
1960 1960 return 0
1961 1961
1962 1962 def refresh(ui, repo, *pats, **opts):
1963 1963 """update the current patch
1964 1964
1965 1965 If any file patterns are provided, the refreshed patch will
1966 1966 contain only the modifications that match those patterns; the
1967 1967 remaining modifications will remain in the working directory.
1968 1968
1969 1969 If -s/--short is specified, files currently included in the patch
1970 1970 will be refreshed just like matched files and remain in the patch.
1971 1971
1972 1972 hg add/remove/copy/rename work as usual, though you might want to
1973 1973 use git-style patches (-g/--git or [diff] git=1) to track copies
1974 1974 and renames. See the diffs help topic for more information on the
1975 1975 git diff format.
1976 1976 """
1977 1977 q = repo.mq
1978 1978 message = cmdutil.logmessage(opts)
1979 1979 if opts['edit']:
1980 1980 if not q.applied:
1981 1981 ui.write(_("no patches applied\n"))
1982 1982 return 1
1983 1983 if message:
1984 1984 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1985 1985 patch = q.applied[-1].name
1986 1986 ph = patchheader(q.join(patch))
1987 1987 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1988 1988 setupheaderopts(ui, opts)
1989 1989 ret = q.refresh(repo, pats, msg=message, **opts)
1990 1990 q.save_dirty()
1991 1991 return ret
1992 1992
1993 1993 def diff(ui, repo, *pats, **opts):
1994 1994 """diff of the current patch and subsequent modifications
1995 1995
1996 1996 Shows a diff which includes the current patch as well as any
1997 1997 changes which have been made in the working directory since the
1998 1998 last refresh (thus showing what the current patch would become
1999 1999 after a qrefresh).
2000 2000
2001 2001 Use 'hg diff' if you only want to see the changes made since the
2002 2002 last qrefresh, or 'hg export qtip' if you want to see changes made
2003 2003 by the current patch without including changes made since the
2004 2004 qrefresh.
2005 2005 """
2006 2006 repo.mq.diff(repo, pats, opts)
2007 2007 return 0
2008 2008
2009 2009 def fold(ui, repo, *files, **opts):
2010 2010 """fold the named patches into the current patch
2011 2011
2012 2012 Patches must not yet be applied. Each patch will be successively
2013 2013 applied to the current patch in the order given. If all the
2014 2014 patches apply successfully, the current patch will be refreshed
2015 2015 with the new cumulative patch, and the folded patches will be
2016 2016 deleted. With -k/--keep, the folded patch files will not be
2017 2017 removed afterwards.
2018 2018
2019 2019 The header for each folded patch will be concatenated with the
2020 2020 current patch header, separated by a line of '* * *'."""
2021 2021
2022 2022 q = repo.mq
2023 2023
2024 2024 if not files:
2025 2025 raise util.Abort(_('qfold requires at least one patch name'))
2026 2026 if not q.check_toppatch(repo):
2027 2027 raise util.Abort(_('No patches applied'))
2028 2028 q.check_localchanges(repo)
2029 2029
2030 2030 message = cmdutil.logmessage(opts)
2031 2031 if opts['edit']:
2032 2032 if message:
2033 2033 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2034 2034
2035 2035 parent = q.lookup('qtip')
2036 2036 patches = []
2037 2037 messages = []
2038 2038 for f in files:
2039 2039 p = q.lookup(f)
2040 2040 if p in patches or p == parent:
2041 2041 ui.warn(_('Skipping already folded patch %s') % p)
2042 2042 if q.isapplied(p):
2043 2043 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2044 2044 patches.append(p)
2045 2045
2046 2046 for p in patches:
2047 2047 if not message:
2048 2048 ph = patchheader(q.join(p))
2049 2049 if ph.message:
2050 2050 messages.append(ph.message)
2051 2051 pf = q.join(p)
2052 2052 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2053 2053 if not patchsuccess:
2054 2054 raise util.Abort(_('Error folding patch %s') % p)
2055 2055 patch.updatedir(ui, repo, files)
2056 2056
2057 2057 if not message:
2058 2058 ph = patchheader(q.join(parent))
2059 2059 message, user = ph.message, ph.user
2060 2060 for msg in messages:
2061 2061 message.append('* * *')
2062 2062 message.extend(msg)
2063 2063 message = '\n'.join(message)
2064 2064
2065 2065 if opts['edit']:
2066 2066 message = ui.edit(message, user or ui.username())
2067 2067
2068 2068 q.refresh(repo, msg=message)
2069 2069 q.delete(repo, patches, opts)
2070 2070 q.save_dirty()
2071 2071
2072 2072 def goto(ui, repo, patch, **opts):
2073 2073 '''push or pop patches until named patch is at top of stack'''
2074 2074 q = repo.mq
2075 2075 patch = q.lookup(patch)
2076 2076 if q.isapplied(patch):
2077 2077 ret = q.pop(repo, patch, force=opts['force'])
2078 2078 else:
2079 2079 ret = q.push(repo, patch, force=opts['force'])
2080 2080 q.save_dirty()
2081 2081 return ret
2082 2082
2083 2083 def guard(ui, repo, *args, **opts):
2084 2084 '''set or print guards for a patch
2085 2085
2086 2086 Guards control whether a patch can be pushed. A patch with no
2087 2087 guards is always pushed. A patch with a positive guard ("+foo") is
2088 2088 pushed only if the qselect command has activated it. A patch with
2089 2089 a negative guard ("-foo") is never pushed if the qselect command
2090 2090 has activated it.
2091 2091
2092 2092 With no arguments, print the currently active guards.
2093 2093 With arguments, set guards for the named patch.
2094 2094 NOTE: Specifying negative guards now requires '--'.
2095 2095
2096 2096 To set guards on another patch::
2097 2097
2098 2098 hg qguard -- other.patch +2.6.17 -stable
2099 2099 '''
2100 2100 def status(idx):
2101 2101 guards = q.series_guards[idx] or ['unguarded']
2102 2102 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2103 2103 q = repo.mq
2104 2104 patch = None
2105 2105 args = list(args)
2106 2106 if opts['list']:
2107 2107 if args or opts['none']:
2108 2108 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2109 2109 for i in xrange(len(q.series)):
2110 2110 status(i)
2111 2111 return
2112 2112 if not args or args[0][0:1] in '-+':
2113 2113 if not q.applied:
2114 2114 raise util.Abort(_('no patches applied'))
2115 2115 patch = q.applied[-1].name
2116 2116 if patch is None and args[0][0:1] not in '-+':
2117 2117 patch = args.pop(0)
2118 2118 if patch is None:
2119 2119 raise util.Abort(_('no patch to work with'))
2120 2120 if args or opts['none']:
2121 2121 idx = q.find_series(patch)
2122 2122 if idx is None:
2123 2123 raise util.Abort(_('no patch named %s') % patch)
2124 2124 q.set_guards(idx, args)
2125 2125 q.save_dirty()
2126 2126 else:
2127 2127 status(q.series.index(q.lookup(patch)))
2128 2128
2129 2129 def header(ui, repo, patch=None):
2130 2130 """print the header of the topmost or specified patch"""
2131 2131 q = repo.mq
2132 2132
2133 2133 if patch:
2134 2134 patch = q.lookup(patch)
2135 2135 else:
2136 2136 if not q.applied:
2137 2137 ui.write('no patches applied\n')
2138 2138 return 1
2139 2139 patch = q.lookup('qtip')
2140 2140 ph = patchheader(repo.mq.join(patch))
2141 2141
2142 2142 ui.write('\n'.join(ph.message) + '\n')
2143 2143
2144 2144 def lastsavename(path):
2145 2145 (directory, base) = os.path.split(path)
2146 2146 names = os.listdir(directory)
2147 2147 namere = re.compile("%s.([0-9]+)" % base)
2148 2148 maxindex = None
2149 2149 maxname = None
2150 2150 for f in names:
2151 2151 m = namere.match(f)
2152 2152 if m:
2153 2153 index = int(m.group(1))
2154 2154 if maxindex is None or index > maxindex:
2155 2155 maxindex = index
2156 2156 maxname = f
2157 2157 if maxname:
2158 2158 return (os.path.join(directory, maxname), maxindex)
2159 2159 return (None, None)
2160 2160
2161 2161 def savename(path):
2162 2162 (last, index) = lastsavename(path)
2163 2163 if last is None:
2164 2164 index = 0
2165 2165 newpath = path + ".%d" % (index + 1)
2166 2166 return newpath
2167 2167
2168 2168 def push(ui, repo, patch=None, **opts):
2169 2169 """push the next patch onto the stack
2170 2170
2171 2171 When -f/--force is applied, all local changes in patched files
2172 2172 will be lost.
2173 2173 """
2174 2174 q = repo.mq
2175 2175 mergeq = None
2176 2176
2177 2177 if opts['merge']:
2178 2178 if opts['name']:
2179 2179 newpath = repo.join(opts['name'])
2180 2180 else:
2181 2181 newpath, i = lastsavename(q.path)
2182 2182 if not newpath:
2183 2183 ui.warn(_("no saved queues found, please use -n\n"))
2184 2184 return 1
2185 2185 mergeq = queue(ui, repo.join(""), newpath)
2186 2186 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2187 2187 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2188 2188 mergeq=mergeq, all=opts.get('all'))
2189 2189 return ret
2190 2190
2191 2191 def pop(ui, repo, patch=None, **opts):
2192 2192 """pop the current patch off the stack
2193 2193
2194 2194 By default, pops off the top of the patch stack. If given a patch
2195 2195 name, keeps popping off patches until the named patch is at the
2196 2196 top of the stack.
2197 2197 """
2198 2198 localupdate = True
2199 2199 if opts['name']:
2200 2200 q = queue(ui, repo.join(""), repo.join(opts['name']))
2201 2201 ui.warn(_('using patch queue: %s\n') % q.path)
2202 2202 localupdate = False
2203 2203 else:
2204 2204 q = repo.mq
2205 2205 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2206 2206 all=opts['all'])
2207 2207 q.save_dirty()
2208 2208 return ret
2209 2209
2210 2210 def rename(ui, repo, patch, name=None, **opts):
2211 2211 """rename a patch
2212 2212
2213 2213 With one argument, renames the current patch to PATCH1.
2214 2214 With two arguments, renames PATCH1 to PATCH2."""
2215 2215
2216 2216 q = repo.mq
2217 2217
2218 2218 if not name:
2219 2219 name = patch
2220 2220 patch = None
2221 2221
2222 2222 if patch:
2223 2223 patch = q.lookup(patch)
2224 2224 else:
2225 2225 if not q.applied:
2226 2226 ui.write(_('no patches applied\n'))
2227 2227 return
2228 2228 patch = q.lookup('qtip')
2229 2229 absdest = q.join(name)
2230 2230 if os.path.isdir(absdest):
2231 2231 name = normname(os.path.join(name, os.path.basename(patch)))
2232 2232 absdest = q.join(name)
2233 2233 if os.path.exists(absdest):
2234 2234 raise util.Abort(_('%s already exists') % absdest)
2235 2235
2236 2236 if name in q.series:
2237 2237 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2238 2238
2239 2239 if ui.verbose:
2240 2240 ui.write('renaming %s to %s\n' % (patch, name))
2241 2241 i = q.find_series(patch)
2242 2242 guards = q.guard_re.findall(q.full_series[i])
2243 2243 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2244 2244 q.parse_series()
2245 2245 q.series_dirty = 1
2246 2246
2247 2247 info = q.isapplied(patch)
2248 2248 if info:
2249 2249 q.applied[info[0]] = statusentry(info[1], name)
2250 2250 q.applied_dirty = 1
2251 2251
2252 2252 util.rename(q.join(patch), absdest)
2253 2253 r = q.qrepo()
2254 2254 if r:
2255 2255 wlock = r.wlock()
2256 2256 try:
2257 2257 if r.dirstate[patch] == 'a':
2258 2258 r.dirstate.forget(patch)
2259 2259 r.dirstate.add(name)
2260 2260 else:
2261 2261 if r.dirstate[name] == 'r':
2262 2262 r.undelete([name])
2263 2263 r.copy(patch, name)
2264 2264 r.remove([patch], False)
2265 2265 finally:
2266 2266 wlock.release()
2267 2267
2268 2268 q.save_dirty()
2269 2269
2270 2270 def restore(ui, repo, rev, **opts):
2271 2271 """restore the queue state saved by a revision"""
2272 2272 rev = repo.lookup(rev)
2273 2273 q = repo.mq
2274 2274 q.restore(repo, rev, delete=opts['delete'],
2275 2275 qupdate=opts['update'])
2276 2276 q.save_dirty()
2277 2277 return 0
2278 2278
2279 2279 def save(ui, repo, **opts):
2280 2280 """save current queue state"""
2281 2281 q = repo.mq
2282 2282 message = cmdutil.logmessage(opts)
2283 2283 ret = q.save(repo, msg=message)
2284 2284 if ret:
2285 2285 return ret
2286 2286 q.save_dirty()
2287 2287 if opts['copy']:
2288 2288 path = q.path
2289 2289 if opts['name']:
2290 2290 newpath = os.path.join(q.basepath, opts['name'])
2291 2291 if os.path.exists(newpath):
2292 2292 if not os.path.isdir(newpath):
2293 2293 raise util.Abort(_('destination %s exists and is not '
2294 2294 'a directory') % newpath)
2295 2295 if not opts['force']:
2296 2296 raise util.Abort(_('destination %s exists, '
2297 2297 'use -f to force') % newpath)
2298 2298 else:
2299 2299 newpath = savename(path)
2300 2300 ui.warn(_("copy %s to %s\n") % (path, newpath))
2301 2301 util.copyfiles(path, newpath)
2302 2302 if opts['empty']:
2303 2303 try:
2304 2304 os.unlink(q.join(q.status_path))
2305 2305 except:
2306 2306 pass
2307 2307 return 0
2308 2308
2309 2309 def strip(ui, repo, rev, **opts):
2310 2310 """strip a revision and all its descendants from the repository
2311 2311
2312 2312 If one of the working directory's parent revisions is stripped, the
2313 2313 working directory will be updated to the parent of the stripped
2314 2314 revision.
2315 2315 """
2316 2316 backup = 'all'
2317 2317 if opts['backup']:
2318 2318 backup = 'strip'
2319 2319 elif opts['nobackup']:
2320 2320 backup = 'none'
2321 2321
2322 2322 rev = repo.lookup(rev)
2323 2323 p = repo.dirstate.parents()
2324 2324 cl = repo.changelog
2325 2325 update = True
2326 2326 if p[0] == nullid:
2327 2327 update = False
2328 2328 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2329 2329 update = False
2330 2330 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2331 2331 update = False
2332 2332
2333 2333 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2334 2334 return 0
2335 2335
2336 2336 def select(ui, repo, *args, **opts):
2337 2337 '''set or print guarded patches to push
2338 2338
2339 2339 Use the qguard command to set or print guards on patch, then use
2340 2340 qselect to tell mq which guards to use. A patch will be pushed if
2341 2341 it has no guards or any positive guards match the currently
2342 2342 selected guard, but will not be pushed if any negative guards
2343 2343 match the current guard. For example::
2344 2344
2345 2345 qguard foo.patch -stable (negative guard)
2346 2346 qguard bar.patch +stable (positive guard)
2347 2347 qselect stable
2348 2348
2349 2349 This activates the "stable" guard. mq will skip foo.patch (because
2350 2350 it has a negative match) but push bar.patch (because it has a
2351 2351 positive match).
2352 2352
2353 2353 With no arguments, prints the currently active guards.
2354 2354 With one argument, sets the active guard.
2355 2355
2356 2356 Use -n/--none to deactivate guards (no other arguments needed).
2357 2357 When no guards are active, patches with positive guards are
2358 2358 skipped and patches with negative guards are pushed.
2359 2359
2360 2360 qselect can change the guards on applied patches. It does not pop
2361 2361 guarded patches by default. Use --pop to pop back to the last
2362 2362 applied patch that is not guarded. Use --reapply (which implies
2363 2363 --pop) to push back to the current patch afterwards, but skip
2364 2364 guarded patches.
2365 2365
2366 2366 Use -s/--series to print a list of all guards in the series file
2367 2367 (no other arguments needed). Use -v for more information.'''
2368 2368
2369 2369 q = repo.mq
2370 2370 guards = q.active()
2371 2371 if args or opts['none']:
2372 2372 old_unapplied = q.unapplied(repo)
2373 2373 old_guarded = [i for i in xrange(len(q.applied)) if
2374 2374 not q.pushable(i)[0]]
2375 2375 q.set_active(args)
2376 2376 q.save_dirty()
2377 2377 if not args:
2378 2378 ui.status(_('guards deactivated\n'))
2379 2379 if not opts['pop'] and not opts['reapply']:
2380 2380 unapplied = q.unapplied(repo)
2381 2381 guarded = [i for i in xrange(len(q.applied))
2382 2382 if not q.pushable(i)[0]]
2383 2383 if len(unapplied) != len(old_unapplied):
2384 2384 ui.status(_('number of unguarded, unapplied patches has '
2385 2385 'changed from %d to %d\n') %
2386 2386 (len(old_unapplied), len(unapplied)))
2387 2387 if len(guarded) != len(old_guarded):
2388 2388 ui.status(_('number of guarded, applied patches has changed '
2389 2389 'from %d to %d\n') %
2390 2390 (len(old_guarded), len(guarded)))
2391 2391 elif opts['series']:
2392 2392 guards = {}
2393 2393 noguards = 0
2394 2394 for gs in q.series_guards:
2395 2395 if not gs:
2396 2396 noguards += 1
2397 2397 for g in gs:
2398 2398 guards.setdefault(g, 0)
2399 2399 guards[g] += 1
2400 2400 if ui.verbose:
2401 2401 guards['NONE'] = noguards
2402 2402 guards = guards.items()
2403 2403 guards.sort(key=lambda x: x[0][1:])
2404 2404 if guards:
2405 2405 ui.note(_('guards in series file:\n'))
2406 2406 for guard, count in guards:
2407 2407 ui.note('%2d ' % count)
2408 2408 ui.write(guard, '\n')
2409 2409 else:
2410 2410 ui.note(_('no guards in series file\n'))
2411 2411 else:
2412 2412 if guards:
2413 2413 ui.note(_('active guards:\n'))
2414 2414 for g in guards:
2415 2415 ui.write(g, '\n')
2416 2416 else:
2417 2417 ui.write(_('no active guards\n'))
2418 2418 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2419 2419 popped = False
2420 2420 if opts['pop'] or opts['reapply']:
2421 2421 for i in xrange(len(q.applied)):
2422 2422 pushable, reason = q.pushable(i)
2423 2423 if not pushable:
2424 2424 ui.status(_('popping guarded patches\n'))
2425 2425 popped = True
2426 2426 if i == 0:
2427 2427 q.pop(repo, all=True)
2428 2428 else:
2429 2429 q.pop(repo, i-1)
2430 2430 break
2431 2431 if popped:
2432 2432 try:
2433 2433 if reapply:
2434 2434 ui.status(_('reapplying unguarded patches\n'))
2435 2435 q.push(repo, reapply)
2436 2436 finally:
2437 2437 q.save_dirty()
2438 2438
2439 2439 def finish(ui, repo, *revrange, **opts):
2440 2440 """move applied patches into repository history
2441 2441
2442 2442 Finishes the specified revisions (corresponding to applied
2443 2443 patches) by moving them out of mq control into regular repository
2444 2444 history.
2445 2445
2446 2446 Accepts a revision range or the -a/--applied option. If --applied
2447 2447 is specified, all applied mq revisions are removed from mq
2448 2448 control. Otherwise, the given revisions must be at the base of the
2449 2449 stack of applied patches.
2450 2450
2451 2451 This can be especially useful if your changes have been applied to
2452 2452 an upstream repository, or if you are about to push your changes
2453 2453 to upstream.
2454 2454 """
2455 2455 if not opts['applied'] and not revrange:
2456 2456 raise util.Abort(_('no revisions specified'))
2457 2457 elif opts['applied']:
2458 2458 revrange = ('qbase:qtip',) + revrange
2459 2459
2460 2460 q = repo.mq
2461 2461 if not q.applied:
2462 2462 ui.status(_('no patches applied\n'))
2463 2463 return 0
2464 2464
2465 2465 revs = cmdutil.revrange(repo, revrange)
2466 2466 q.finish(repo, revs)
2467 2467 q.save_dirty()
2468 2468 return 0
2469 2469
2470 2470 def reposetup(ui, repo):
2471 2471 class mqrepo(repo.__class__):
2472 2472 @util.propertycache
2473 2473 def mq(self):
2474 2474 return queue(self.ui, self.join(""))
2475 2475
2476 2476 def abort_if_wdir_patched(self, errmsg, force=False):
2477 2477 if self.mq.applied and not force:
2478 2478 parent = hex(self.dirstate.parents()[0])
2479 2479 if parent in [s.rev for s in self.mq.applied]:
2480 2480 raise util.Abort(errmsg)
2481 2481
2482 2482 def commit(self, text="", user=None, date=None, match=None,
2483 2483 force=False, editor=False, extra={}):
2484 2484 self.abort_if_wdir_patched(
2485 2485 _('cannot commit over an applied mq patch'),
2486 2486 force)
2487 2487
2488 2488 return super(mqrepo, self).commit(text, user, date, match, force,
2489 2489 editor, extra)
2490 2490
2491 2491 def push(self, remote, force=False, revs=None):
2492 2492 if self.mq.applied and not force and not revs:
2493 2493 raise util.Abort(_('source has mq patches applied'))
2494 2494 return super(mqrepo, self).push(remote, force, revs)
2495 2495
2496 2496 def _findtags(self):
2497 2497 '''augment tags from base class with patch tags'''
2498 2498 result = super(mqrepo, self)._findtags()
2499 2499
2500 2500 q = self.mq
2501 2501 if not q.applied:
2502 2502 return result
2503 2503
2504 2504 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2505 2505
2506 2506 if mqtags[-1][0] not in self.changelog.nodemap:
2507 2507 self.ui.warn(_('mq status file refers to unknown node %s\n')
2508 2508 % short(mqtags[-1][0]))
2509 2509 return result
2510 2510
2511 2511 mqtags.append((mqtags[-1][0], 'qtip'))
2512 2512 mqtags.append((mqtags[0][0], 'qbase'))
2513 2513 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2514 2514 tags = result[0]
2515 2515 for patch in mqtags:
2516 2516 if patch[1] in tags:
2517 2517 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2518 2518 % patch[1])
2519 2519 else:
2520 2520 tags[patch[1]] = patch[0]
2521 2521
2522 2522 return result
2523 2523
2524 2524 def _branchtags(self, partial, lrev):
2525 2525 q = self.mq
2526 2526 if not q.applied:
2527 2527 return super(mqrepo, self)._branchtags(partial, lrev)
2528 2528
2529 2529 cl = self.changelog
2530 2530 qbasenode = bin(q.applied[0].rev)
2531 2531 if qbasenode not in cl.nodemap:
2532 2532 self.ui.warn(_('mq status file refers to unknown node %s\n')
2533 2533 % short(qbasenode))
2534 2534 return super(mqrepo, self)._branchtags(partial, lrev)
2535 2535
2536 2536 qbase = cl.rev(qbasenode)
2537 2537 start = lrev + 1
2538 2538 if start < qbase:
2539 2539 # update the cache (excluding the patches) and save it
2540 2540 self._updatebranchcache(partial, lrev+1, qbase)
2541 2541 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2542 2542 start = qbase
2543 2543 # if start = qbase, the cache is as updated as it should be.
2544 2544 # if start > qbase, the cache includes (part of) the patches.
2545 2545 # we might as well use it, but we won't save it.
2546 2546
2547 2547 # update the cache up to the tip
2548 2548 self._updatebranchcache(partial, start, len(cl))
2549 2549
2550 2550 return partial
2551 2551
2552 2552 if repo.local():
2553 2553 repo.__class__ = mqrepo
2554 2554
2555 2555 def mqimport(orig, ui, repo, *args, **kwargs):
2556 2556 if hasattr(repo, 'abort_if_wdir_patched') and not kwargs.get('no_commit', False):
2557 2557 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2558 2558 kwargs.get('force'))
2559 2559 return orig(ui, repo, *args, **kwargs)
2560 2560
2561 2561 def uisetup(ui):
2562 2562 extensions.wrapcommand(commands.table, 'import', mqimport)
2563 2563
2564 2564 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2565 2565
2566 2566 cmdtable = {
2567 2567 "qapplied":
2568 2568 (applied,
2569 2569 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2570 2570 _('hg qapplied [-1] [-s] [PATCH]')),
2571 2571 "qclone":
2572 2572 (clone,
2573 2573 [('', 'pull', None, _('use pull protocol to copy metadata')),
2574 2574 ('U', 'noupdate', None, _('do not update the new working directories')),
2575 2575 ('', 'uncompressed', None,
2576 2576 _('use uncompressed transfer (fast over LAN)')),
2577 2577 ('p', 'patches', '', _('location of source patch repository')),
2578 2578 ] + commands.remoteopts,
2579 2579 _('hg qclone [OPTION]... SOURCE [DEST]')),
2580 2580 "qcommit|qci":
2581 2581 (commit,
2582 2582 commands.table["^commit|ci"][1],
2583 2583 _('hg qcommit [OPTION]... [FILE]...')),
2584 2584 "^qdiff":
2585 2585 (diff,
2586 2586 commands.diffopts + commands.diffopts2 + commands.walkopts,
2587 2587 _('hg qdiff [OPTION]... [FILE]...')),
2588 2588 "qdelete|qremove|qrm":
2589 2589 (delete,
2590 2590 [('k', 'keep', None, _('keep patch file')),
2591 2591 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2592 2592 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2593 2593 'qfold':
2594 2594 (fold,
2595 2595 [('e', 'edit', None, _('edit patch header')),
2596 2596 ('k', 'keep', None, _('keep folded patch files')),
2597 2597 ] + commands.commitopts,
2598 2598 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2599 2599 'qgoto':
2600 2600 (goto,
2601 2601 [('f', 'force', None, _('overwrite any local changes'))],
2602 2602 _('hg qgoto [OPTION]... PATCH')),
2603 2603 'qguard':
2604 2604 (guard,
2605 2605 [('l', 'list', None, _('list all patches and guards')),
2606 2606 ('n', 'none', None, _('drop all guards'))],
2607 2607 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2608 2608 'qheader': (header, [], _('hg qheader [PATCH]')),
2609 2609 "^qimport":
2610 2610 (qimport,
2611 2611 [('e', 'existing', None, _('import file in patch directory')),
2612 2612 ('n', 'name', '', _('name of patch file')),
2613 2613 ('f', 'force', None, _('overwrite existing files')),
2614 2614 ('r', 'rev', [], _('place existing revisions under mq control')),
2615 2615 ('g', 'git', None, _('use git extended diff format')),
2616 2616 ('P', 'push', None, _('qpush after importing'))],
2617 2617 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2618 2618 "^qinit":
2619 2619 (init,
2620 2620 [('c', 'create-repo', None, _('create queue repository'))],
2621 2621 _('hg qinit [-c]')),
2622 2622 "qnew":
2623 2623 (new,
2624 2624 [('e', 'edit', None, _('edit commit message')),
2625 2625 ('f', 'force', None, _('import uncommitted changes into patch')),
2626 2626 ('g', 'git', None, _('use git extended diff format')),
2627 2627 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2628 2628 ('u', 'user', '', _('add "From: <given user>" to patch')),
2629 2629 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2630 2630 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2631 2631 ] + commands.walkopts + commands.commitopts,
2632 2632 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2633 2633 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2634 2634 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2635 2635 "^qpop":
2636 2636 (pop,
2637 2637 [('a', 'all', None, _('pop all patches')),
2638 2638 ('n', 'name', '', _('queue name to pop')),
2639 2639 ('f', 'force', None, _('forget any local changes to patched files'))],
2640 2640 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2641 2641 "^qpush":
2642 2642 (push,
2643 2643 [('f', 'force', None, _('apply if the patch has rejects')),
2644 2644 ('l', 'list', None, _('list patch name in commit text')),
2645 2645 ('a', 'all', None, _('apply all patches')),
2646 2646 ('m', 'merge', None, _('merge from another queue')),
2647 2647 ('n', 'name', '', _('merge queue name'))],
2648 2648 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2649 2649 "^qrefresh":
2650 2650 (refresh,
2651 2651 [('e', 'edit', None, _('edit commit message')),
2652 2652 ('g', 'git', None, _('use git extended diff format')),
2653 2653 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2654 2654 ('U', 'currentuser', None, _('add/update author field in patch with current user')),
2655 2655 ('u', 'user', '', _('add/update author field in patch with given user')),
2656 2656 ('D', 'currentdate', None, _('add/update date field in patch with current date')),
2657 2657 ('d', 'date', '', _('add/update date field in patch with given date'))
2658 2658 ] + commands.walkopts + commands.commitopts,
2659 2659 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2660 2660 'qrename|qmv':
2661 2661 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2662 2662 "qrestore":
2663 2663 (restore,
2664 2664 [('d', 'delete', None, _('delete save entry')),
2665 2665 ('u', 'update', None, _('update queue working directory'))],
2666 2666 _('hg qrestore [-d] [-u] REV')),
2667 2667 "qsave":
2668 2668 (save,
2669 2669 [('c', 'copy', None, _('copy patch directory')),
2670 2670 ('n', 'name', '', _('copy directory name')),
2671 2671 ('e', 'empty', None, _('clear queue status file')),
2672 2672 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2673 2673 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2674 2674 "qselect":
2675 2675 (select,
2676 2676 [('n', 'none', None, _('disable all guards')),
2677 2677 ('s', 'series', None, _('list all guards in series file')),
2678 2678 ('', 'pop', None, _('pop to before first guarded applied patch')),
2679 2679 ('', 'reapply', None, _('pop, then reapply patches'))],
2680 2680 _('hg qselect [OPTION]... [GUARD]...')),
2681 2681 "qseries":
2682 2682 (series,
2683 2683 [('m', 'missing', None, _('print patches not in series')),
2684 2684 ] + seriesopts,
2685 2685 _('hg qseries [-ms]')),
2686 2686 "^strip":
2687 2687 (strip,
2688 2688 [('f', 'force', None, _('force removal with local changes')),
2689 2689 ('b', 'backup', None, _('bundle unrelated changesets')),
2690 2690 ('n', 'nobackup', None, _('no backups'))],
2691 2691 _('hg strip [-f] [-b] [-n] REV')),
2692 2692 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2693 2693 "qunapplied":
2694 2694 (unapplied,
2695 2695 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2696 2696 _('hg qunapplied [-1] [-s] [PATCH]')),
2697 2697 "qfinish":
2698 2698 (finish,
2699 2699 [('a', 'applied', None, _('finish all applied changesets'))],
2700 2700 _('hg qfinish [-a] [REV]...')),
2701 2701 }
@@ -1,1387 +1,1409 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 16 from i18n import _
17 17 import changegroup, ancestor, mdiff, parsers, error, util
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 33
34 34 _prereadsize = 1048576
35 35
36 36 RevlogError = error.RevlogError
37 37 LookupError = error.LookupError
38 38
39 39 def getoffset(q):
40 40 return int(q >> 16)
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45 def offset_type(offset, type):
46 46 return long(long(offset) << 16 | type)
47 47
48 48 nullhash = _sha(nullid)
49 49
50 50 def hash(text, p1, p2):
51 51 """generate a hash from the given text and its parent hashes
52 52
53 53 This hash combines both the current file contents and its history
54 54 in a manner that makes it easy to distinguish nodes with the same
55 55 content in the revision graph.
56 56 """
57 57 # As of now, if one of the parent node is null, p2 is null
58 58 if p2 == nullid:
59 59 # deep copy of a hash is faster than creating one
60 60 s = nullhash.copy()
61 61 s.update(p1)
62 62 else:
63 63 # none of the parent nodes are nullid
64 64 l = [p1, p2]
65 65 l.sort()
66 66 s = _sha(l[0])
67 67 s.update(l[1])
68 68 s.update(text)
69 69 return s.digest()
70 70
71 71 def compress(text):
72 72 """ generate a possibly-compressed representation of text """
73 73 if not text:
74 74 return ("", text)
75 75 l = len(text)
76 76 bin = None
77 77 if l < 44:
78 78 pass
79 79 elif l > 1000000:
80 80 # zlib makes an internal copy, thus doubling memory usage for
81 81 # large files, so lets do this in pieces
82 82 z = zlib.compressobj()
83 83 p = []
84 84 pos = 0
85 85 while pos < l:
86 86 pos2 = pos + 2**20
87 87 p.append(z.compress(text[pos:pos2]))
88 88 pos = pos2
89 89 p.append(z.flush())
90 90 if sum(map(len, p)) < l:
91 91 bin = "".join(p)
92 92 else:
93 93 bin = _compress(text)
94 94 if bin is None or len(bin) > l:
95 95 if text[0] == '\0':
96 96 return ("", text)
97 97 return ('u', text)
98 98 return ("", bin)
99 99
100 100 def decompress(bin):
101 101 """ decompress the given input """
102 102 if not bin:
103 103 return bin
104 104 t = bin[0]
105 105 if t == '\0':
106 106 return bin
107 107 if t == 'x':
108 108 return _decompress(bin)
109 109 if t == 'u':
110 110 return bin[1:]
111 111 raise RevlogError(_("unknown compression type %r") % t)
112 112
113 113 class lazyparser(object):
114 114 """
115 115 this class avoids the need to parse the entirety of large indices
116 116 """
117 117
118 118 # lazyparser is not safe to use on windows if win32 extensions not
119 119 # available. it keeps file handle open, which make it not possible
120 120 # to break hardlinks on local cloned repos.
121 121
122 122 def __init__(self, dataf):
123 123 try:
124 124 size = util.fstat(dataf).st_size
125 125 except AttributeError:
126 126 size = 0
127 127 self.dataf = dataf
128 128 self.s = struct.calcsize(indexformatng)
129 129 self.datasize = size
130 130 self.l = size/self.s
131 131 self.index = [None] * self.l
132 132 self.map = {nullid: nullrev}
133 133 self.allmap = 0
134 134 self.all = 0
135 135 self.mapfind_count = 0
136 136
137 137 def loadmap(self):
138 138 """
139 139 during a commit, we need to make sure the rev being added is
140 140 not a duplicate. This requires loading the entire index,
141 141 which is fairly slow. loadmap can load up just the node map,
142 142 which takes much less time.
143 143 """
144 144 if self.allmap:
145 145 return
146 146 end = self.datasize
147 147 self.allmap = 1
148 148 cur = 0
149 149 count = 0
150 150 blocksize = self.s * 256
151 151 self.dataf.seek(0)
152 152 while cur < end:
153 153 data = self.dataf.read(blocksize)
154 154 off = 0
155 155 for x in xrange(256):
156 156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
157 157 self.map[n] = count
158 158 count += 1
159 159 if count >= self.l:
160 160 break
161 161 off += self.s
162 162 cur += blocksize
163 163
164 164 def loadblock(self, blockstart, blocksize, data=None):
165 165 if self.all:
166 166 return
167 167 if data is None:
168 168 self.dataf.seek(blockstart)
169 169 if blockstart + blocksize > self.datasize:
170 170 # the revlog may have grown since we've started running,
171 171 # but we don't have space in self.index for more entries.
172 172 # limit blocksize so that we don't get too much data.
173 173 blocksize = max(self.datasize - blockstart, 0)
174 174 data = self.dataf.read(blocksize)
175 175 lend = len(data) / self.s
176 176 i = blockstart / self.s
177 177 off = 0
178 178 # lazyindex supports __delitem__
179 179 if lend > len(self.index) - i:
180 180 lend = len(self.index) - i
181 181 for x in xrange(lend):
182 182 if self.index[i + x] is None:
183 183 b = data[off : off + self.s]
184 184 self.index[i + x] = b
185 185 n = b[ngshaoffset:ngshaoffset + 20]
186 186 self.map[n] = i + x
187 187 off += self.s
188 188
189 189 def findnode(self, node):
190 190 """search backwards through the index file for a specific node"""
191 191 if self.allmap:
192 192 return None
193 193
194 194 # hg log will cause many many searches for the manifest
195 195 # nodes. After we get called a few times, just load the whole
196 196 # thing.
197 197 if self.mapfind_count > 8:
198 198 self.loadmap()
199 199 if node in self.map:
200 200 return node
201 201 return None
202 202 self.mapfind_count += 1
203 203 last = self.l - 1
204 204 while self.index[last] != None:
205 205 if last == 0:
206 206 self.all = 1
207 207 self.allmap = 1
208 208 return None
209 209 last -= 1
210 210 end = (last + 1) * self.s
211 211 blocksize = self.s * 256
212 212 while end >= 0:
213 213 start = max(end - blocksize, 0)
214 214 self.dataf.seek(start)
215 215 data = self.dataf.read(end - start)
216 216 findend = end - start
217 217 while True:
218 218 # we're searching backwards, so we have to make sure
219 219 # we don't find a changeset where this node is a parent
220 220 off = data.find(node, 0, findend)
221 221 findend = off
222 222 if off >= 0:
223 223 i = off / self.s
224 224 off = i * self.s
225 225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
226 226 if n == node:
227 227 self.map[n] = i + start / self.s
228 228 return node
229 229 else:
230 230 break
231 231 end -= blocksize
232 232 return None
233 233
234 234 def loadindex(self, i=None, end=None):
235 235 if self.all:
236 236 return
237 237 all = False
238 238 if i is None:
239 239 blockstart = 0
240 240 blocksize = (65536 / self.s) * self.s
241 241 end = self.datasize
242 242 all = True
243 243 else:
244 244 if end:
245 245 blockstart = i * self.s
246 246 end = end * self.s
247 247 blocksize = end - blockstart
248 248 else:
249 249 blockstart = (i & ~1023) * self.s
250 250 blocksize = self.s * 1024
251 251 end = blockstart + blocksize
252 252 while blockstart < end:
253 253 self.loadblock(blockstart, blocksize)
254 254 blockstart += blocksize
255 255 if all:
256 256 self.all = True
257 257
258 258 class lazyindex(object):
259 259 """a lazy version of the index array"""
260 260 def __init__(self, parser):
261 261 self.p = parser
262 262 def __len__(self):
263 263 return len(self.p.index)
264 264 def load(self, pos):
265 265 if pos < 0:
266 266 pos += len(self.p.index)
267 267 self.p.loadindex(pos)
268 268 return self.p.index[pos]
269 269 def __getitem__(self, pos):
270 270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
271 271 def __setitem__(self, pos, item):
272 272 self.p.index[pos] = _pack(indexformatng, *item)
273 273 def __delitem__(self, pos):
274 274 del self.p.index[pos]
275 275 def insert(self, pos, e):
276 276 self.p.index.insert(pos, _pack(indexformatng, *e))
277 277 def append(self, e):
278 278 self.p.index.append(_pack(indexformatng, *e))
279 279
280 280 class lazymap(object):
281 281 """a lazy version of the node map"""
282 282 def __init__(self, parser):
283 283 self.p = parser
284 284 def load(self, key):
285 285 n = self.p.findnode(key)
286 286 if n is None:
287 287 raise KeyError(key)
288 288 def __contains__(self, key):
289 289 if key in self.p.map:
290 290 return True
291 291 self.p.loadmap()
292 292 return key in self.p.map
293 293 def __iter__(self):
294 294 yield nullid
295 295 for i in xrange(self.p.l):
296 296 ret = self.p.index[i]
297 297 if not ret:
298 298 self.p.loadindex(i)
299 299 ret = self.p.index[i]
300 300 if isinstance(ret, str):
301 301 ret = _unpack(indexformatng, ret)
302 302 yield ret[7]
303 303 def __getitem__(self, key):
304 304 try:
305 305 return self.p.map[key]
306 306 except KeyError:
307 307 try:
308 308 self.load(key)
309 309 return self.p.map[key]
310 310 except KeyError:
311 311 raise KeyError("node " + hex(key))
312 312 def __setitem__(self, key, val):
313 313 self.p.map[key] = val
314 314 def __delitem__(self, key):
315 315 del self.p.map[key]
316 316
317 317 indexformatv0 = ">4l20s20s20s"
318 318 v0shaoffset = 56
319 319
320 320 class revlogoldio(object):
321 321 def __init__(self):
322 322 self.size = struct.calcsize(indexformatv0)
323 323
324 324 def parseindex(self, fp, data, inline):
325 325 s = self.size
326 326 index = []
327 327 nodemap = {nullid: nullrev}
328 328 n = off = 0
329 329 if len(data) == _prereadsize:
330 330 data += fp.read() # read the rest
331 331 l = len(data)
332 332 while off + s <= l:
333 333 cur = data[off:off + s]
334 334 off += s
335 335 e = _unpack(indexformatv0, cur)
336 336 # transform to revlogv1 format
337 337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
338 338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
339 339 index.append(e2)
340 340 nodemap[e[6]] = n
341 341 n += 1
342 342
343 343 return index, nodemap, None
344 344
345 345 def packentry(self, entry, node, version, rev):
346 346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
347 347 node(entry[5]), node(entry[6]), entry[7])
348 348 return _pack(indexformatv0, *e2)
349 349
350 350 # index ng:
351 351 # 6 bytes offset
352 352 # 2 bytes flags
353 353 # 4 bytes compressed length
354 354 # 4 bytes uncompressed length
355 355 # 4 bytes: base rev
356 356 # 4 bytes link rev
357 357 # 4 bytes parent 1 rev
358 358 # 4 bytes parent 2 rev
359 359 # 32 bytes: nodeid
360 360 indexformatng = ">Qiiiiii20s12x"
361 361 ngshaoffset = 32
362 362 versionformat = ">I"
363 363
364 364 class revlogio(object):
365 365 def __init__(self):
366 366 self.size = struct.calcsize(indexformatng)
367 367
368 368 def parseindex(self, fp, data, inline):
369 369 if len(data) == _prereadsize:
370 370 if util.openhardlinks() and not inline:
371 371 # big index, let's parse it on demand
372 372 parser = lazyparser(fp)
373 373 index = lazyindex(parser)
374 374 nodemap = lazymap(parser)
375 375 e = list(index[0])
376 376 type = gettype(e[0])
377 377 e[0] = offset_type(0, type)
378 378 index[0] = e
379 379 return index, nodemap, None
380 380 else:
381 381 data += fp.read()
382 382
383 383 # call the C implementation to parse the index data
384 384 index, nodemap, cache = parsers.parse_index(data, inline)
385 385 return index, nodemap, cache
386 386
387 387 def packentry(self, entry, node, version, rev):
388 388 p = _pack(indexformatng, *entry)
389 389 if rev == 0:
390 390 p = _pack(versionformat, version) + p[4:]
391 391 return p
392 392
393 393 class revlog(object):
394 394 """
395 395 the underlying revision storage object
396 396
397 397 A revlog consists of two parts, an index and the revision data.
398 398
399 399 The index is a file with a fixed record size containing
400 400 information on each revision, including its nodeid (hash), the
401 401 nodeids of its parents, the position and offset of its data within
402 402 the data file, and the revision it's based on. Finally, each entry
403 403 contains a linkrev entry that can serve as a pointer to external
404 404 data.
405 405
406 406 The revision data itself is a linear collection of data chunks.
407 407 Each chunk represents a revision and is usually represented as a
408 408 delta against the previous chunk. To bound lookup time, runs of
409 409 deltas are limited to about 2 times the length of the original
410 410 version data. This makes retrieval of a version proportional to
411 411 its size, or O(1) relative to the number of revisions.
412 412
413 413 Both pieces of the revlog are written to in an append-only
414 414 fashion, which means we never need to rewrite a file to insert or
415 415 remove data, and can use some simple techniques to avoid the need
416 416 for locking while reading.
417 417 """
418 418 def __init__(self, opener, indexfile):
419 419 """
420 420 create a revlog object
421 421
422 422 opener is a function that abstracts the file opening operation
423 423 and can be used to implement COW semantics or the like.
424 424 """
425 425 self.indexfile = indexfile
426 426 self.datafile = indexfile[:-2] + ".d"
427 427 self.opener = opener
428 428 self._cache = None
429 429 self._chunkcache = (0, '')
430 430 self.nodemap = {nullid: nullrev}
431 431 self.index = []
432 432
433 433 v = REVLOG_DEFAULT_VERSION
434 434 if hasattr(opener, "defversion"):
435 435 v = opener.defversion
436 436 if v & REVLOGNG:
437 437 v |= REVLOGNGINLINEDATA
438 438
439 439 i = ''
440 440 try:
441 441 f = self.opener(self.indexfile)
442 442 i = f.read(_prereadsize)
443 443 if len(i) > 0:
444 444 v = struct.unpack(versionformat, i[:4])[0]
445 445 except IOError, inst:
446 446 if inst.errno != errno.ENOENT:
447 447 raise
448 448
449 449 self.version = v
450 450 self._inline = v & REVLOGNGINLINEDATA
451 451 flags = v & ~0xFFFF
452 452 fmt = v & 0xFFFF
453 453 if fmt == REVLOGV0 and flags:
454 454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 455 % (self.indexfile, flags >> 16))
456 456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 458 % (self.indexfile, flags >> 16))
459 459 elif fmt > REVLOGNG:
460 460 raise RevlogError(_("index %s unknown format %d")
461 461 % (self.indexfile, fmt))
462 462
463 463 self._io = revlogio()
464 464 if self.version == REVLOGV0:
465 465 self._io = revlogoldio()
466 466 if i:
467 467 try:
468 468 d = self._io.parseindex(f, i, self._inline)
469 469 except (ValueError, IndexError):
470 470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 471 self.index, self.nodemap, self._chunkcache = d
472 472 if not self._chunkcache:
473 473 self._chunkclear()
474 474
475 475 # add the magic null revision at -1 (if it hasn't been done already)
476 476 if (self.index == [] or isinstance(self.index, lazyindex) or
477 477 self.index[-1][7] != nullid) :
478 478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479 479
480 480 def _loadindex(self, start, end):
481 481 """load a block of indexes all at once from the lazy parser"""
482 482 if isinstance(self.index, lazyindex):
483 483 self.index.p.loadindex(start, end)
484 484
485 485 def _loadindexmap(self):
486 486 """loads both the map and the index from the lazy parser"""
487 487 if isinstance(self.index, lazyindex):
488 488 p = self.index.p
489 489 p.loadindex()
490 490 self.nodemap = p.map
491 491
492 492 def _loadmap(self):
493 493 """loads the map from the lazy parser"""
494 494 if isinstance(self.nodemap, lazymap):
495 495 self.nodemap.p.loadmap()
496 496 self.nodemap = self.nodemap.p.map
497 497
498 498 def tip(self):
499 499 return self.node(len(self.index) - 2)
500 500 def __len__(self):
501 501 return len(self.index) - 1
502 502 def __iter__(self):
503 503 for i in xrange(len(self)):
504 504 yield i
505 505 def rev(self, node):
506 506 try:
507 507 return self.nodemap[node]
508 508 except KeyError:
509 509 raise LookupError(node, self.indexfile, _('no node'))
510 510 def node(self, rev):
511 511 return self.index[rev][7]
512 512 def linkrev(self, rev):
513 513 return self.index[rev][4]
514 514 def parents(self, node):
515 515 i = self.index
516 516 d = i[self.rev(node)]
517 517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 518 def parentrevs(self, rev):
519 519 return self.index[rev][5:7]
520 520 def start(self, rev):
521 521 return int(self.index[rev][0] >> 16)
522 522 def end(self, rev):
523 523 return self.start(rev) + self.length(rev)
524 524 def length(self, rev):
525 525 return self.index[rev][1]
526 526 def base(self, rev):
527 527 return self.index[rev][3]
528 528
529 529 def size(self, rev):
530 530 """return the length of the uncompressed text for a given revision"""
531 531 l = self.index[rev][2]
532 532 if l >= 0:
533 533 return l
534 534
535 535 t = self.revision(self.node(rev))
536 536 return len(t)
537 537
538 538 # Alternate implementation. The advantage to this code is it
539 539 # will be faster for a single revision. However, the results
540 540 # are not cached, so finding the size of every revision will
541 541 # be slower.
542 542 #
543 543 # if self.cache and self.cache[1] == rev:
544 544 # return len(self.cache[2])
545 545 #
546 546 # base = self.base(rev)
547 547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 548 # base = self.cache[1]
549 549 # text = self.cache[2]
550 550 # else:
551 551 # text = self.revision(self.node(base))
552 552 #
553 553 # l = len(text)
554 554 # for x in xrange(base + 1, rev + 1):
555 555 # l = mdiff.patchedsize(l, self._chunk(x))
556 556 # return l
557 557
558 558 def reachable(self, node, stop=None):
559 559 """return the set of all nodes ancestral to a given node, including
560 560 the node itself, stopping when stop is matched"""
561 561 reachable = set((node,))
562 562 visit = [node]
563 563 if stop:
564 564 stopn = self.rev(stop)
565 565 else:
566 566 stopn = 0
567 567 while visit:
568 568 n = visit.pop(0)
569 569 if n == stop:
570 570 continue
571 571 if n == nullid:
572 572 continue
573 573 for p in self.parents(n):
574 574 if self.rev(p) < stopn:
575 575 continue
576 576 if p not in reachable:
577 577 reachable.add(p)
578 578 visit.append(p)
579 579 return reachable
580 580
581 581 def ancestors(self, *revs):
582 'Generate the ancestors of revs using a breadth-first visit'
582 """Generate the ancestors of 'revs' in reverse topological order.
583
584 Yield a sequence of revision numbers starting with the parents
585 of each revision in revs, i.e., each revision is *not* considered
586 an ancestor of itself. Results are in breadth-first order:
587 parents of each rev in revs, then parents of those, etc. Result
588 does not include the null revision."""
583 589 visit = list(revs)
584 590 seen = set([nullrev])
585 591 while visit:
586 592 for parent in self.parentrevs(visit.pop(0)):
587 593 if parent not in seen:
588 594 visit.append(parent)
589 595 seen.add(parent)
590 596 yield parent
591 597
592 598 def descendants(self, *revs):
593 'Generate the descendants of revs in topological order'
599 """Generate the descendants of 'revs' in revision order.
600
601 Yield a sequence of revision numbers starting with a child of
602 some rev in revs, i.e., each revision is *not* considered a
603 descendant of itself. Results are ordered by revision number (a
604 topological sort)."""
594 605 seen = set(revs)
595 606 for i in xrange(min(revs) + 1, len(self)):
596 607 for x in self.parentrevs(i):
597 608 if x != nullrev and x in seen:
598 609 seen.add(i)
599 610 yield i
600 611 break
601 612
602 613 def findmissing(self, common=None, heads=None):
603 '''
604 returns the topologically sorted list of nodes from the set:
605 missing = (ancestors(heads) \ ancestors(common))
614 """Return the ancestors of heads that are not ancestors of common.
615
616 More specifically, return a list of nodes N such that every N
617 satisfies the following constraints:
606 618
607 where ancestors() is the set of ancestors from heads, heads included
619 1. N is an ancestor of some node in 'heads'
620 2. N is not an ancestor of any node in 'common'
608 621
609 if heads is None, the heads of the revlog are used
610 if common is None, nullid is assumed to be a common node
611 '''
622 The list is sorted by revision number, meaning it is
623 topologically sorted.
624
625 'heads' and 'common' are both lists of node IDs. If heads is
626 not supplied, uses all of the revlog's heads. If common is not
627 supplied, uses nullid."""
612 628 if common is None:
613 629 common = [nullid]
614 630 if heads is None:
615 631 heads = self.heads()
616 632
617 633 common = [self.rev(n) for n in common]
618 634 heads = [self.rev(n) for n in heads]
619 635
620 636 # we want the ancestors, but inclusive
621 637 has = set(self.ancestors(*common))
622 638 has.add(nullrev)
623 639 has.update(common)
624 640
625 641 # take all ancestors from heads that aren't in has
626 642 missing = set()
627 643 visit = [r for r in heads if r not in has]
628 644 while visit:
629 645 r = visit.pop(0)
630 646 if r in missing:
631 647 continue
632 648 else:
633 649 missing.add(r)
634 650 for p in self.parentrevs(r):
635 651 if p not in has:
636 652 visit.append(p)
637 653 missing = list(missing)
638 654 missing.sort()
639 655 return [self.node(r) for r in missing]
640 656
641 657 def nodesbetween(self, roots=None, heads=None):
642 """Return a tuple containing three elements. Elements 1 and 2 contain
643 a final list bases and heads after all the unreachable ones have been
644 pruned. Element 0 contains a topologically sorted list of all
658 """Return a topological path from 'roots' to 'heads'.
659
660 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
661 topologically sorted list of all nodes N that satisfy both of
662 these constraints:
663
664 1. N is a descendant of some node in 'roots'
665 2. N is an ancestor of some node in 'heads'
645 666
646 nodes that satisfy these constraints:
647 1. All nodes must be descended from a node in roots (the nodes on
648 roots are considered descended from themselves).
649 2. All nodes must also be ancestors of a node in heads (the nodes in
650 heads are considered to be their own ancestors).
667 Every node is considered to be both a descendant and an ancestor
668 of itself, so every reachable node in 'roots' and 'heads' will be
669 included in 'nodes'.
651 670
652 If roots is unspecified, nullid is assumed as the only root.
653 If heads is unspecified, it is taken to be the output of the
654 heads method (i.e. a list of all nodes in the repository that
655 have no children)."""
671 'outroots' is the list of reachable nodes in 'roots', i.e., the
672 subset of 'roots' that is returned in 'nodes'. Likewise,
673 'outheads' is the subset of 'heads' that is also in 'nodes'.
674
675 'roots' and 'heads' are both lists of node IDs. If 'roots' is
676 unspecified, uses nullid as the only root. If 'heads' is
677 unspecified, uses list of all of the revlog's heads."""
656 678 nonodes = ([], [], [])
657 679 if roots is not None:
658 680 roots = list(roots)
659 681 if not roots:
660 682 return nonodes
661 683 lowestrev = min([self.rev(n) for n in roots])
662 684 else:
663 685 roots = [nullid] # Everybody's a descendent of nullid
664 686 lowestrev = nullrev
665 687 if (lowestrev == nullrev) and (heads is None):
666 688 # We want _all_ the nodes!
667 689 return ([self.node(r) for r in self], [nullid], list(self.heads()))
668 690 if heads is None:
669 691 # All nodes are ancestors, so the latest ancestor is the last
670 692 # node.
671 693 highestrev = len(self) - 1
672 694 # Set ancestors to None to signal that every node is an ancestor.
673 695 ancestors = None
674 696 # Set heads to an empty dictionary for later discovery of heads
675 697 heads = {}
676 698 else:
677 699 heads = list(heads)
678 700 if not heads:
679 701 return nonodes
680 702 ancestors = set()
681 703 # Turn heads into a dictionary so we can remove 'fake' heads.
682 704 # Also, later we will be using it to filter out the heads we can't
683 705 # find from roots.
684 706 heads = dict.fromkeys(heads, 0)
685 707 # Start at the top and keep marking parents until we're done.
686 708 nodestotag = set(heads)
687 709 # Remember where the top was so we can use it as a limit later.
688 710 highestrev = max([self.rev(n) for n in nodestotag])
689 711 while nodestotag:
690 712 # grab a node to tag
691 713 n = nodestotag.pop()
692 714 # Never tag nullid
693 715 if n == nullid:
694 716 continue
695 717 # A node's revision number represents its place in a
696 718 # topologically sorted list of nodes.
697 719 r = self.rev(n)
698 720 if r >= lowestrev:
699 721 if n not in ancestors:
700 722 # If we are possibly a descendent of one of the roots
701 723 # and we haven't already been marked as an ancestor
702 724 ancestors.add(n) # Mark as ancestor
703 725 # Add non-nullid parents to list of nodes to tag.
704 726 nodestotag.update([p for p in self.parents(n) if
705 727 p != nullid])
706 728 elif n in heads: # We've seen it before, is it a fake head?
707 729 # So it is, real heads should not be the ancestors of
708 730 # any other heads.
709 731 heads.pop(n)
710 732 if not ancestors:
711 733 return nonodes
712 734 # Now that we have our set of ancestors, we want to remove any
713 735 # roots that are not ancestors.
714 736
715 737 # If one of the roots was nullid, everything is included anyway.
716 738 if lowestrev > nullrev:
717 739 # But, since we weren't, let's recompute the lowest rev to not
718 740 # include roots that aren't ancestors.
719 741
720 742 # Filter out roots that aren't ancestors of heads
721 743 roots = [n for n in roots if n in ancestors]
722 744 # Recompute the lowest revision
723 745 if roots:
724 746 lowestrev = min([self.rev(n) for n in roots])
725 747 else:
726 748 # No more roots? Return empty list
727 749 return nonodes
728 750 else:
729 751 # We are descending from nullid, and don't need to care about
730 752 # any other roots.
731 753 lowestrev = nullrev
732 754 roots = [nullid]
733 755 # Transform our roots list into a set.
734 756 descendents = set(roots)
735 757 # Also, keep the original roots so we can filter out roots that aren't
736 758 # 'real' roots (i.e. are descended from other roots).
737 759 roots = descendents.copy()
738 760 # Our topologically sorted list of output nodes.
739 761 orderedout = []
740 762 # Don't start at nullid since we don't want nullid in our output list,
741 763 # and if nullid shows up in descedents, empty parents will look like
742 764 # they're descendents.
743 765 for r in xrange(max(lowestrev, 0), highestrev + 1):
744 766 n = self.node(r)
745 767 isdescendent = False
746 768 if lowestrev == nullrev: # Everybody is a descendent of nullid
747 769 isdescendent = True
748 770 elif n in descendents:
749 771 # n is already a descendent
750 772 isdescendent = True
751 773 # This check only needs to be done here because all the roots
752 774 # will start being marked is descendents before the loop.
753 775 if n in roots:
754 776 # If n was a root, check if it's a 'real' root.
755 777 p = tuple(self.parents(n))
756 778 # If any of its parents are descendents, it's not a root.
757 779 if (p[0] in descendents) or (p[1] in descendents):
758 780 roots.remove(n)
759 781 else:
760 782 p = tuple(self.parents(n))
761 783 # A node is a descendent if either of its parents are
762 784 # descendents. (We seeded the dependents list with the roots
763 785 # up there, remember?)
764 786 if (p[0] in descendents) or (p[1] in descendents):
765 787 descendents.add(n)
766 788 isdescendent = True
767 789 if isdescendent and ((ancestors is None) or (n in ancestors)):
768 790 # Only include nodes that are both descendents and ancestors.
769 791 orderedout.append(n)
770 792 if (ancestors is not None) and (n in heads):
771 793 # We're trying to figure out which heads are reachable
772 794 # from roots.
773 795 # Mark this head as having been reached
774 796 heads[n] = 1
775 797 elif ancestors is None:
776 798 # Otherwise, we're trying to discover the heads.
777 799 # Assume this is a head because if it isn't, the next step
778 800 # will eventually remove it.
779 801 heads[n] = 1
780 802 # But, obviously its parents aren't.
781 803 for p in self.parents(n):
782 804 heads.pop(p, None)
783 805 heads = [n for n in heads.iterkeys() if heads[n] != 0]
784 806 roots = list(roots)
785 807 assert orderedout
786 808 assert roots
787 809 assert heads
788 810 return (orderedout, roots, heads)
789 811
790 812 def heads(self, start=None, stop=None):
791 813 """return the list of all nodes that have no children
792 814
793 815 if start is specified, only heads that are descendants of
794 816 start will be returned
795 817 if stop is specified, it will consider all the revs from stop
796 818 as if they had no children
797 819 """
798 820 if start is None and stop is None:
799 821 count = len(self)
800 822 if not count:
801 823 return [nullid]
802 824 ishead = [1] * (count + 1)
803 825 index = self.index
804 826 for r in xrange(count):
805 827 e = index[r]
806 828 ishead[e[5]] = ishead[e[6]] = 0
807 829 return [self.node(r) for r in xrange(count) if ishead[r]]
808 830
809 831 if start is None:
810 832 start = nullid
811 833 if stop is None:
812 834 stop = []
813 835 stoprevs = set([self.rev(n) for n in stop])
814 836 startrev = self.rev(start)
815 837 reachable = set((startrev,))
816 838 heads = set((startrev,))
817 839
818 840 parentrevs = self.parentrevs
819 841 for r in xrange(startrev + 1, len(self)):
820 842 for p in parentrevs(r):
821 843 if p in reachable:
822 844 if r not in stoprevs:
823 845 reachable.add(r)
824 846 heads.add(r)
825 847 if p in heads and p not in stoprevs:
826 848 heads.remove(p)
827 849
828 850 return [self.node(r) for r in heads]
829 851
830 852 def children(self, node):
831 853 """find the children of a given node"""
832 854 c = []
833 855 p = self.rev(node)
834 856 for r in range(p + 1, len(self)):
835 857 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
836 858 if prevs:
837 859 for pr in prevs:
838 860 if pr == p:
839 861 c.append(self.node(r))
840 862 elif p == nullrev:
841 863 c.append(self.node(r))
842 864 return c
843 865
844 866 def _match(self, id):
845 867 if isinstance(id, (long, int)):
846 868 # rev
847 869 return self.node(id)
848 870 if len(id) == 20:
849 871 # possibly a binary node
850 872 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 873 try:
852 874 node = id
853 875 self.rev(node) # quick search the index
854 876 return node
855 877 except LookupError:
856 878 pass # may be partial hex id
857 879 try:
858 880 # str(rev)
859 881 rev = int(id)
860 882 if str(rev) != id:
861 883 raise ValueError
862 884 if rev < 0:
863 885 rev = len(self) + rev
864 886 if rev < 0 or rev >= len(self):
865 887 raise ValueError
866 888 return self.node(rev)
867 889 except (ValueError, OverflowError):
868 890 pass
869 891 if len(id) == 40:
870 892 try:
871 893 # a full hex nodeid?
872 894 node = bin(id)
873 895 self.rev(node)
874 896 return node
875 897 except (TypeError, LookupError):
876 898 pass
877 899
878 900 def _partialmatch(self, id):
879 901 if len(id) < 40:
880 902 try:
881 903 # hex(node)[:...]
882 904 l = len(id) // 2 # grab an even number of digits
883 905 bin_id = bin(id[:l*2])
884 906 nl = [n for n in self.nodemap if n[:l] == bin_id]
885 907 nl = [n for n in nl if hex(n).startswith(id)]
886 908 if len(nl) > 0:
887 909 if len(nl) == 1:
888 910 return nl[0]
889 911 raise LookupError(id, self.indexfile,
890 912 _('ambiguous identifier'))
891 913 return None
892 914 except TypeError:
893 915 pass
894 916
895 917 def lookup(self, id):
896 918 """locate a node based on:
897 919 - revision number or str(revision number)
898 920 - nodeid or subset of hex nodeid
899 921 """
900 922 n = self._match(id)
901 923 if n is not None:
902 924 return n
903 925 n = self._partialmatch(id)
904 926 if n:
905 927 return n
906 928
907 929 raise LookupError(id, self.indexfile, _('no match found'))
908 930
909 931 def cmp(self, node, text):
910 932 """compare text with a given file revision"""
911 933 p1, p2 = self.parents(node)
912 934 return hash(text, p1, p2) != node
913 935
914 936 def _addchunk(self, offset, data):
915 937 o, d = self._chunkcache
916 938 # try to add to existing cache
917 939 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
918 940 self._chunkcache = o, d + data
919 941 else:
920 942 self._chunkcache = offset, data
921 943
922 944 def _loadchunk(self, offset, length):
923 945 if self._inline:
924 946 df = self.opener(self.indexfile)
925 947 else:
926 948 df = self.opener(self.datafile)
927 949
928 950 readahead = max(65536, length)
929 951 df.seek(offset)
930 952 d = df.read(readahead)
931 953 self._addchunk(offset, d)
932 954 if readahead > length:
933 955 return d[:length]
934 956 return d
935 957
936 958 def _getchunk(self, offset, length):
937 959 o, d = self._chunkcache
938 960 l = len(d)
939 961
940 962 # is it in the cache?
941 963 cachestart = offset - o
942 964 cacheend = cachestart + length
943 965 if cachestart >= 0 and cacheend <= l:
944 966 if cachestart == 0 and cacheend == l:
945 967 return d # avoid a copy
946 968 return d[cachestart:cacheend]
947 969
948 970 return self._loadchunk(offset, length)
949 971
950 972 def _chunkraw(self, startrev, endrev):
951 973 start = self.start(startrev)
952 974 length = self.end(endrev) - start
953 975 if self._inline:
954 976 start += (startrev + 1) * self._io.size
955 977 return self._getchunk(start, length)
956 978
957 979 def _chunk(self, rev):
958 980 return decompress(self._chunkraw(rev, rev))
959 981
960 982 def _chunkclear(self):
961 983 self._chunkcache = (0, '')
962 984
963 985 def revdiff(self, rev1, rev2):
964 986 """return or calculate a delta between two revisions"""
965 987 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
966 988 return self._chunk(rev2)
967 989
968 990 return mdiff.textdiff(self.revision(self.node(rev1)),
969 991 self.revision(self.node(rev2)))
970 992
971 993 def revision(self, node):
972 994 """return an uncompressed revision of a given node"""
973 995 if node == nullid:
974 996 return ""
975 997 if self._cache and self._cache[0] == node:
976 998 return self._cache[2]
977 999
978 1000 # look up what we need to read
979 1001 text = None
980 1002 rev = self.rev(node)
981 1003 base = self.base(rev)
982 1004
983 1005 # check rev flags
984 1006 if self.index[rev][0] & 0xFFFF:
985 1007 raise RevlogError(_('incompatible revision flag %x') %
986 1008 (self.index[rev][0] & 0xFFFF))
987 1009
988 1010 # do we have useful data cached?
989 1011 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
990 1012 base = self._cache[1]
991 1013 text = self._cache[2]
992 1014
993 1015 self._loadindex(base, rev + 1)
994 1016 self._chunkraw(base, rev)
995 1017 if text is None:
996 1018 text = self._chunk(base)
997 1019
998 1020 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
999 1021 text = mdiff.patches(text, bins)
1000 1022 p1, p2 = self.parents(node)
1001 1023 if node != hash(text, p1, p2):
1002 1024 raise RevlogError(_("integrity check failed on %s:%d")
1003 1025 % (self.indexfile, rev))
1004 1026
1005 1027 self._cache = (node, rev, text)
1006 1028 return text
1007 1029
1008 1030 def checkinlinesize(self, tr, fp=None):
1009 1031 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1010 1032 return
1011 1033
1012 1034 trinfo = tr.find(self.indexfile)
1013 1035 if trinfo is None:
1014 1036 raise RevlogError(_("%s not found in the transaction")
1015 1037 % self.indexfile)
1016 1038
1017 1039 trindex = trinfo[2]
1018 1040 dataoff = self.start(trindex)
1019 1041
1020 1042 tr.add(self.datafile, dataoff)
1021 1043
1022 1044 if fp:
1023 1045 fp.flush()
1024 1046 fp.close()
1025 1047
1026 1048 df = self.opener(self.datafile, 'w')
1027 1049 try:
1028 1050 for r in self:
1029 1051 df.write(self._chunkraw(r, r))
1030 1052 finally:
1031 1053 df.close()
1032 1054
1033 1055 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1034 1056 self.version &= ~(REVLOGNGINLINEDATA)
1035 1057 self._inline = False
1036 1058 for i in self:
1037 1059 e = self._io.packentry(self.index[i], self.node, self.version, i)
1038 1060 fp.write(e)
1039 1061
1040 1062 # if we don't call rename, the temp file will never replace the
1041 1063 # real index
1042 1064 fp.rename()
1043 1065
1044 1066 tr.replace(self.indexfile, trindex * self._io.size)
1045 1067 self._chunkclear()
1046 1068
1047 1069 def addrevision(self, text, transaction, link, p1, p2, d=None):
1048 1070 """add a revision to the log
1049 1071
1050 1072 text - the revision data to add
1051 1073 transaction - the transaction object used for rollback
1052 1074 link - the linkrev data to add
1053 1075 p1, p2 - the parent nodeids of the revision
1054 1076 d - an optional precomputed delta
1055 1077 """
1056 1078 dfh = None
1057 1079 if not self._inline:
1058 1080 dfh = self.opener(self.datafile, "a")
1059 1081 ifh = self.opener(self.indexfile, "a+")
1060 1082 try:
1061 1083 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1062 1084 finally:
1063 1085 if dfh:
1064 1086 dfh.close()
1065 1087 ifh.close()
1066 1088
1067 1089 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1068 1090 node = hash(text, p1, p2)
1069 1091 if node in self.nodemap:
1070 1092 return node
1071 1093
1072 1094 curr = len(self)
1073 1095 prev = curr - 1
1074 1096 base = self.base(prev)
1075 1097 offset = self.end(prev)
1076 1098
1077 1099 if curr:
1078 1100 if not d:
1079 1101 ptext = self.revision(self.node(prev))
1080 1102 d = mdiff.textdiff(ptext, text)
1081 1103 data = compress(d)
1082 1104 l = len(data[1]) + len(data[0])
1083 1105 dist = l + offset - self.start(base)
1084 1106
1085 1107 # full versions are inserted when the needed deltas
1086 1108 # become comparable to the uncompressed text
1087 1109 if not curr or dist > len(text) * 2:
1088 1110 data = compress(text)
1089 1111 l = len(data[1]) + len(data[0])
1090 1112 base = curr
1091 1113
1092 1114 e = (offset_type(offset, 0), l, len(text),
1093 1115 base, link, self.rev(p1), self.rev(p2), node)
1094 1116 self.index.insert(-1, e)
1095 1117 self.nodemap[node] = curr
1096 1118
1097 1119 entry = self._io.packentry(e, self.node, self.version, curr)
1098 1120 if not self._inline:
1099 1121 transaction.add(self.datafile, offset)
1100 1122 transaction.add(self.indexfile, curr * len(entry))
1101 1123 if data[0]:
1102 1124 dfh.write(data[0])
1103 1125 dfh.write(data[1])
1104 1126 dfh.flush()
1105 1127 ifh.write(entry)
1106 1128 else:
1107 1129 offset += curr * self._io.size
1108 1130 transaction.add(self.indexfile, offset, curr)
1109 1131 ifh.write(entry)
1110 1132 ifh.write(data[0])
1111 1133 ifh.write(data[1])
1112 1134 self.checkinlinesize(transaction, ifh)
1113 1135
1114 1136 if type(text) == str: # only accept immutable objects
1115 1137 self._cache = (node, curr, text)
1116 1138 return node
1117 1139
1118 1140 def ancestor(self, a, b):
1119 1141 """calculate the least common ancestor of nodes a and b"""
1120 1142
1121 1143 # fast path, check if it is a descendant
1122 1144 a, b = self.rev(a), self.rev(b)
1123 1145 start, end = sorted((a, b))
1124 1146 for i in self.descendants(start):
1125 1147 if i == end:
1126 1148 return self.node(start)
1127 1149 elif i > end:
1128 1150 break
1129 1151
1130 1152 def parents(rev):
1131 1153 return [p for p in self.parentrevs(rev) if p != nullrev]
1132 1154
1133 1155 c = ancestor.ancestor(a, b, parents)
1134 1156 if c is None:
1135 1157 return nullid
1136 1158
1137 1159 return self.node(c)
1138 1160
1139 1161 def group(self, nodelist, lookup, infocollect=None):
1140 1162 """Calculate a delta group, yielding a sequence of changegroup chunks
1141 1163 (strings).
1142 1164
1143 1165 Given a list of changeset revs, return a set of deltas and
1144 1166 metadata corresponding to nodes. the first delta is
1145 1167 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1146 1168 have this parent as it has all history before these
1147 1169 changesets. parent is parent[0]
1148 1170 """
1149 1171
1150 1172 revs = [self.rev(n) for n in nodelist]
1151 1173
1152 1174 # if we don't have any revisions touched by these changesets, bail
1153 1175 if not revs:
1154 1176 yield changegroup.closechunk()
1155 1177 return
1156 1178
1157 1179 # add the parent of the first rev
1158 1180 p = self.parentrevs(revs[0])[0]
1159 1181 revs.insert(0, p)
1160 1182
1161 1183 # build deltas
1162 1184 for d in xrange(len(revs) - 1):
1163 1185 a, b = revs[d], revs[d + 1]
1164 1186 nb = self.node(b)
1165 1187
1166 1188 if infocollect is not None:
1167 1189 infocollect(nb)
1168 1190
1169 1191 p = self.parents(nb)
1170 1192 meta = nb + p[0] + p[1] + lookup(nb)
1171 1193 if a == -1:
1172 1194 d = self.revision(nb)
1173 1195 meta += mdiff.trivialdiffheader(len(d))
1174 1196 else:
1175 1197 d = self.revdiff(a, b)
1176 1198 yield changegroup.chunkheader(len(meta) + len(d))
1177 1199 yield meta
1178 1200 if len(d) > 2**20:
1179 1201 pos = 0
1180 1202 while pos < len(d):
1181 1203 pos2 = pos + 2 ** 18
1182 1204 yield d[pos:pos2]
1183 1205 pos = pos2
1184 1206 else:
1185 1207 yield d
1186 1208
1187 1209 yield changegroup.closechunk()
1188 1210
1189 1211 def addgroup(self, revs, linkmapper, transaction):
1190 1212 """
1191 1213 add a delta group
1192 1214
1193 1215 given a set of deltas, add them to the revision log. the
1194 1216 first delta is against its parent, which should be in our
1195 1217 log, the rest are against the previous delta.
1196 1218 """
1197 1219
1198 1220 #track the base of the current delta log
1199 1221 r = len(self)
1200 1222 t = r - 1
1201 1223 node = None
1202 1224
1203 1225 base = prev = nullrev
1204 1226 start = end = textlen = 0
1205 1227 if r:
1206 1228 end = self.end(t)
1207 1229
1208 1230 ifh = self.opener(self.indexfile, "a+")
1209 1231 isize = r * self._io.size
1210 1232 if self._inline:
1211 1233 transaction.add(self.indexfile, end + isize, r)
1212 1234 dfh = None
1213 1235 else:
1214 1236 transaction.add(self.indexfile, isize, r)
1215 1237 transaction.add(self.datafile, end)
1216 1238 dfh = self.opener(self.datafile, "a")
1217 1239
1218 1240 try:
1219 1241 # loop through our set of deltas
1220 1242 chain = None
1221 1243 for chunk in revs:
1222 1244 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1223 1245 link = linkmapper(cs)
1224 1246 if node in self.nodemap:
1225 1247 # this can happen if two branches make the same change
1226 1248 chain = node
1227 1249 continue
1228 1250 delta = buffer(chunk, 80)
1229 1251 del chunk
1230 1252
1231 1253 for p in (p1, p2):
1232 1254 if not p in self.nodemap:
1233 1255 raise LookupError(p, self.indexfile, _('unknown parent'))
1234 1256
1235 1257 if not chain:
1236 1258 # retrieve the parent revision of the delta chain
1237 1259 chain = p1
1238 1260 if not chain in self.nodemap:
1239 1261 raise LookupError(chain, self.indexfile, _('unknown base'))
1240 1262
1241 1263 # full versions are inserted when the needed deltas become
1242 1264 # comparable to the uncompressed text or when the previous
1243 1265 # version is not the one we have a delta against. We use
1244 1266 # the size of the previous full rev as a proxy for the
1245 1267 # current size.
1246 1268
1247 1269 if chain == prev:
1248 1270 cdelta = compress(delta)
1249 1271 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1250 1272 textlen = mdiff.patchedsize(textlen, delta)
1251 1273
1252 1274 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1253 1275 # flush our writes here so we can read it in revision
1254 1276 if dfh:
1255 1277 dfh.flush()
1256 1278 ifh.flush()
1257 1279 text = self.revision(chain)
1258 1280 if len(text) == 0:
1259 1281 # skip over trivial delta header
1260 1282 text = buffer(delta, 12)
1261 1283 else:
1262 1284 text = mdiff.patches(text, [delta])
1263 1285 del delta
1264 1286 chk = self._addrevision(text, transaction, link, p1, p2, None,
1265 1287 ifh, dfh)
1266 1288 if not dfh and not self._inline:
1267 1289 # addrevision switched from inline to conventional
1268 1290 # reopen the index
1269 1291 dfh = self.opener(self.datafile, "a")
1270 1292 ifh = self.opener(self.indexfile, "a")
1271 1293 if chk != node:
1272 1294 raise RevlogError(_("consistency error adding group"))
1273 1295 textlen = len(text)
1274 1296 else:
1275 1297 e = (offset_type(end, 0), cdeltalen, textlen, base,
1276 1298 link, self.rev(p1), self.rev(p2), node)
1277 1299 self.index.insert(-1, e)
1278 1300 self.nodemap[node] = r
1279 1301 entry = self._io.packentry(e, self.node, self.version, r)
1280 1302 if self._inline:
1281 1303 ifh.write(entry)
1282 1304 ifh.write(cdelta[0])
1283 1305 ifh.write(cdelta[1])
1284 1306 self.checkinlinesize(transaction, ifh)
1285 1307 if not self._inline:
1286 1308 dfh = self.opener(self.datafile, "a")
1287 1309 ifh = self.opener(self.indexfile, "a")
1288 1310 else:
1289 1311 dfh.write(cdelta[0])
1290 1312 dfh.write(cdelta[1])
1291 1313 ifh.write(entry)
1292 1314
1293 1315 t, r, chain, prev = r, r + 1, node, node
1294 1316 base = self.base(t)
1295 1317 start = self.start(base)
1296 1318 end = self.end(t)
1297 1319 finally:
1298 1320 if dfh:
1299 1321 dfh.close()
1300 1322 ifh.close()
1301 1323
1302 1324 return node
1303 1325
1304 1326 def strip(self, minlink, transaction):
1305 1327 """truncate the revlog on the first revision with a linkrev >= minlink
1306 1328
1307 1329 This function is called when we're stripping revision minlink and
1308 1330 its descendants from the repository.
1309 1331
1310 1332 We have to remove all revisions with linkrev >= minlink, because
1311 1333 the equivalent changelog revisions will be renumbered after the
1312 1334 strip.
1313 1335
1314 1336 So we truncate the revlog on the first of these revisions, and
1315 1337 trust that the caller has saved the revisions that shouldn't be
1316 1338 removed and that it'll readd them after this truncation.
1317 1339 """
1318 1340 if len(self) == 0:
1319 1341 return
1320 1342
1321 1343 if isinstance(self.index, lazyindex):
1322 1344 self._loadindexmap()
1323 1345
1324 1346 for rev in self:
1325 1347 if self.index[rev][4] >= minlink:
1326 1348 break
1327 1349 else:
1328 1350 return
1329 1351
1330 1352 # first truncate the files on disk
1331 1353 end = self.start(rev)
1332 1354 if not self._inline:
1333 1355 transaction.add(self.datafile, end)
1334 1356 end = rev * self._io.size
1335 1357 else:
1336 1358 end += rev * self._io.size
1337 1359
1338 1360 transaction.add(self.indexfile, end)
1339 1361
1340 1362 # then reset internal state in memory to forget those revisions
1341 1363 self._cache = None
1342 1364 self._chunkclear()
1343 1365 for x in xrange(rev, len(self)):
1344 1366 del self.nodemap[self.node(x)]
1345 1367
1346 1368 del self.index[rev:-1]
1347 1369
1348 1370 def checksize(self):
1349 1371 expected = 0
1350 1372 if len(self):
1351 1373 expected = max(0, self.end(len(self) - 1))
1352 1374
1353 1375 try:
1354 1376 f = self.opener(self.datafile)
1355 1377 f.seek(0, 2)
1356 1378 actual = f.tell()
1357 1379 dd = actual - expected
1358 1380 except IOError, inst:
1359 1381 if inst.errno != errno.ENOENT:
1360 1382 raise
1361 1383 dd = 0
1362 1384
1363 1385 try:
1364 1386 f = self.opener(self.indexfile)
1365 1387 f.seek(0, 2)
1366 1388 actual = f.tell()
1367 1389 s = self._io.size
1368 1390 i = max(0, actual // s)
1369 1391 di = actual - (i * s)
1370 1392 if self._inline:
1371 1393 databytes = 0
1372 1394 for r in self:
1373 1395 databytes += max(0, self.length(r))
1374 1396 dd = 0
1375 1397 di = actual - len(self) * s - databytes
1376 1398 except IOError, inst:
1377 1399 if inst.errno != errno.ENOENT:
1378 1400 raise
1379 1401 di = 0
1380 1402
1381 1403 return (dd, di)
1382 1404
1383 1405 def files(self):
1384 1406 res = [ self.indexfile ]
1385 1407 if not self._inline:
1386 1408 res.append(self.datafile)
1387 1409 return res
@@ -1,563 +1,583 b''
1 1 #!/bin/sh
2 2
3 3 checkundo()
4 4 {
5 5 if [ -f .hg/store/undo ]; then
6 6 echo ".hg/store/undo still exists after $1"
7 7 fi
8 8 }
9 9
10 10 echo "[extensions]" >> $HGRCPATH
11 11 echo "mq=" >> $HGRCPATH
12 12
13 13 echo % help
14 14 hg help mq
15 15
16 16 hg init a
17 17 cd a
18 18 echo a > a
19 19 hg ci -Ama
20 20
21 21 hg clone . ../k
22 22
23 23 mkdir b
24 24 echo z > b/z
25 25 hg ci -Ama
26 26
27 27 echo % qinit
28 28
29 29 hg qinit
30 30
31 31 cd ..
32 32 hg init b
33 33
34 34 echo % -R qinit
35 35
36 36 hg -R b qinit
37 37
38 38 hg init c
39 39
40 40 echo % qinit -c
41 41
42 42 hg --cwd c qinit -c
43 43 hg -R c/.hg/patches st
44 44
45 45 echo '% qinit; qinit -c'
46 46 hg init d
47 47 cd d
48 48 hg qinit
49 49 hg qinit -c
50 50 # qinit -c should create both files if they don't exist
51 51 echo ' .hgignore:'
52 52 cat .hg/patches/.hgignore
53 53 echo ' series:'
54 54 cat .hg/patches/series
55 55 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
56 56 cd ..
57 57
58 58 echo '% qinit; <stuff>; qinit -c'
59 59 hg init e
60 60 cd e
61 61 hg qnew A
62 62 checkundo qnew
63 63 echo foo > foo
64 64 hg add foo
65 65 hg qrefresh
66 66 hg qnew B
67 67 echo >> foo
68 68 hg qrefresh
69 69 echo status >> .hg/patches/.hgignore
70 70 echo bleh >> .hg/patches/.hgignore
71 71 hg qinit -c
72 72 hg -R .hg/patches status
73 73 # qinit -c shouldn't touch these files if they already exist
74 74 echo ' .hgignore:'
75 75 cat .hg/patches/.hgignore
76 76 echo ' series:'
77 77 cat .hg/patches/series
78 78 cd ..
79 79
80 80 cd a
81 81
82 82 hg qnew -m 'foo bar' test.patch
83 83
84 84 echo % qrefresh
85 85
86 86 echo a >> a
87 87 hg qrefresh
88 88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
89 89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
90 90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
91 91
92 92 echo % empty qrefresh
93 93
94 94 hg qrefresh -X a
95 95 echo 'revision:'
96 96 hg diff -r -2 -r -1
97 97 echo 'patch:'
98 98 cat .hg/patches/test.patch
99 99 echo 'working dir diff:'
100 100 hg diff --nodates -q
101 101 # restore things
102 102 hg qrefresh
103 103 checkundo qrefresh
104 104
105 105 echo % qpop
106 106
107 107 hg qpop
108 108 checkundo qpop
109 109
110 110 echo % qpush with dump of tag cache
111 111
112 112 # Dump the tag cache to ensure that it has exactly one head after qpush.
113 113 rm -f .hg/tags.cache
114 114 hg tags > /dev/null
115 115 echo ".hg/tags.cache (pre qpush):"
116 116 sed 's/ [0-9a-f]*//' .hg/tags.cache
117 117 hg qpush
118 118 hg tags > /dev/null
119 119 echo ".hg/tags.cache (post qpush):"
120 120 sed 's/ [0-9a-f]*//' .hg/tags.cache
121 121
122 122 checkundo qpush
123 123
124 124 cd ..
125 125
126 126 echo % pop/push outside repo
127 127
128 128 hg -R a qpop
129 129 hg -R a qpush
130 130
131 131 cd a
132 132 hg qnew test2.patch
133 133
134 134 echo % qrefresh in subdir
135 135
136 136 cd b
137 137 echo a > a
138 138 hg add a
139 139 hg qrefresh
140 140
141 141 echo % pop/push -a in subdir
142 142
143 143 hg qpop -a
144 144 hg --traceback qpush -a
145 145
146 146 # setting columns & interactive tests truncating (issue1912)
147 147 echo % qseries
148 148 COLUMNS=4 hg qseries --config ui.interactive=true
149 149 COLUMNS=20 hg qseries --config ui.interactive=true -vs
150 150 hg qpop
151 151 hg qseries -vs
152 152 hg qpush
153 153
154 154 echo % qapplied
155 155 hg qapplied
156 156
157 157 echo % qtop
158 158 hg qtop
159 159
160 160 echo % prev
161 161 hg qapp -1
162 162
163 163 echo % next
164 164 hg qunapp -1
165 165
166 166 hg qpop
167 167 echo % commit should fail
168 168 hg commit
169 169
170 170 echo % push should fail
171 171 hg push ../../k
172 172
173 173 echo % import should fail
174 174 hg st .
175 175 echo foo >> ../a
176 176 hg diff > ../../import.diff
177 177 hg revert --no-backup ../a
178 178 hg import ../../import.diff
179 179 hg st
180 180 echo % import --no-commit should succeed
181 181 hg import --no-commit ../../import.diff
182 182 hg st
183 183 hg revert --no-backup ../a
184 184
185 185 echo % qunapplied
186 186 hg qunapplied
187 187
188 188 echo % qpush/qpop with index
189 189 hg qnew test1b.patch
190 190 echo 1b > 1b
191 191 hg add 1b
192 192 hg qrefresh
193 193 hg qpush 2
194 194 hg qpop 0
195 195 hg qpush test.patch+1
196 196 hg qpush test.patch+2
197 197 hg qpop test2.patch-1
198 198 hg qpop test2.patch-2
199 199 hg qpush test1b.patch+1
200 200
201 201 echo % pop, qapplied, qunapplied
202 202 hg qseries -v
203 203 echo % qapplied -1 test.patch
204 204 hg qapplied -1 test.patch
205 205 echo % qapplied -1 test1b.patch
206 206 hg qapplied -1 test1b.patch
207 207 echo % qapplied -1 test2.patch
208 208 hg qapplied -1 test2.patch
209 209 echo % qapplied -1
210 210 hg qapplied -1
211 211 echo % qapplied
212 212 hg qapplied
213 213 echo % qapplied test1b.patch
214 214 hg qapplied test1b.patch
215 215 echo % qunapplied -1
216 216 hg qunapplied -1
217 217 echo % qunapplied
218 218 hg qunapplied
219 219 echo % popping
220 220 hg qpop
221 221 echo % qunapplied -1
222 222 hg qunapplied -1
223 223 echo % qunapplied
224 224 hg qunapplied
225 225 echo % qunapplied test2.patch
226 226 hg qunapplied test2.patch
227 227 echo % qunapplied -1 test2.patch
228 228 hg qunapplied -1 test2.patch
229 229 echo % popping -a
230 230 hg qpop -a
231 231 echo % qapplied
232 232 hg qapplied
233 233 echo % qapplied -1
234 234 hg qapplied -1
235 235 hg qpush
236 236
237 237 echo % push should succeed
238 238 hg qpop -a
239 239 hg push ../../k
240 240
241 241 echo % qpush/qpop error codes
242 242 errorcode()
243 243 {
244 244 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
245 245 }
246 246
247 247 # we want to start with some patches applied
248 248 hg qpush -a
249 249 echo " % pops all patches and succeeds"
250 250 errorcode qpop -a
251 251 echo " % does nothing and succeeds"
252 252 errorcode qpop -a
253 253 echo " % fails - nothing else to pop"
254 254 errorcode qpop
255 255 echo " % pushes a patch and succeeds"
256 256 errorcode qpush
257 257 echo " % pops a patch and succeeds"
258 258 errorcode qpop
259 259 echo " % pushes up to test1b.patch and succeeds"
260 260 errorcode qpush test1b.patch
261 261 echo " % does nothing and succeeds"
262 262 errorcode qpush test1b.patch
263 263 echo " % does nothing and succeeds"
264 264 errorcode qpop test1b.patch
265 265 echo " % fails - can't push to this patch"
266 266 errorcode qpush test.patch
267 267 echo " % fails - can't pop to this patch"
268 268 errorcode qpop test2.patch
269 269 echo " % pops up to test.patch and succeeds"
270 270 errorcode qpop test.patch
271 271 echo " % pushes all patches and succeeds"
272 272 errorcode qpush -a
273 273 echo " % does nothing and succeeds"
274 274 errorcode qpush -a
275 275 echo " % fails - nothing else to push"
276 276 errorcode qpush
277 277 echo " % does nothing and succeeds"
278 278 errorcode qpush test2.patch
279 279
280 280
281 281 echo % strip
282 282 cd ../../b
283 283 echo x>x
284 284 hg ci -Ama
285 285 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
286 286 hg unbundle .hg/strip-backup/*
287 287
288 288 echo % strip with local changes, should complain
289 289 hg up
290 290 echo y>y
291 291 hg add y
292 292 hg strip tip | sed 's/\(saving bundle to \).*/\1/'
293 293 echo % --force strip with local changes
294 294 hg strip -f tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
295 295
296 296 echo '% cd b; hg qrefresh'
297 297 hg init refresh
298 298 cd refresh
299 299 echo a > a
300 300 hg ci -Ama
301 301 hg qnew -mfoo foo
302 302 echo a >> a
303 303 hg qrefresh
304 304 mkdir b
305 305 cd b
306 306 echo f > f
307 307 hg add f
308 308 hg qrefresh
309 309 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
310 310 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
311 311 echo % hg qrefresh .
312 312 hg qrefresh .
313 313 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
314 314 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
315 315 hg status
316 316
317 317 echo % qpush failure
318 318 cd ..
319 319 hg qrefresh
320 320 hg qnew -mbar bar
321 321 echo foo > foo
322 322 echo bar > bar
323 323 hg add foo bar
324 324 hg qrefresh
325 325 hg qpop -a
326 326 echo bar > foo
327 327 hg qpush -a
328 328 hg st
329 329
330 330 echo % mq tags
331 331 hg log --template '{rev} {tags}\n' -r qparent:qtip
332 332
333 333 echo % bad node in status
334 334 hg qpop
335 335 hg strip -qn tip
336 336 hg tip 2>&1 | sed -e 's/unknown node .*/unknown node/'
337 337 hg branches 2>&1 | sed -e 's/unknown node .*/unknown node/'
338 338 hg qpop 2>&1 | sed -e 's/unknown node .*/unknown node/'
339 339
340 340 cat >>$HGRCPATH <<EOF
341 341 [diff]
342 342 git = True
343 343 EOF
344 344 cd ..
345 345 hg init git
346 346 cd git
347 347 hg qinit
348 348
349 349 hg qnew -m'new file' new
350 350 echo foo > new
351 351 chmod +x new
352 352 hg add new
353 353 hg qrefresh
354 354 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
355 355 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
356 356
357 357 hg qnew -m'copy file' copy
358 358 hg cp new copy
359 359 hg qrefresh
360 360 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
361 361 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
362 362
363 363 hg qpop
364 364 hg qpush
365 365 hg qdiff
366 366 cat >>$HGRCPATH <<EOF
367 367 [diff]
368 368 git = False
369 369 EOF
370 370 hg qdiff --git
371 371
372 372 cd ..
373 373 hg init slow
374 374 cd slow
375 375 hg qinit
376 376 echo foo > foo
377 377 hg add foo
378 378 hg ci -m 'add foo'
379 379 hg qnew bar
380 380 echo bar > bar
381 381 hg add bar
382 382 hg mv foo baz
383 383 hg qrefresh --git
384 384 hg up -C 0
385 385 echo >> foo
386 386 hg ci -m 'change foo'
387 387 hg up -C 1
388 388 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
389 389 cat .hg/patches/bar
390 390 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
391 391 hg qrefresh --git
392 392 cat .hg/patches/bar
393 393 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
394 394 hg qrefresh
395 395 grep 'diff --git' .hg/patches/bar
396 396
397 397 echo
398 398 hg up -C 1
399 399 echo >> foo
400 400 hg ci -m 'change foo again'
401 401 hg up -C 2
402 402 hg mv bar quux
403 403 hg mv baz bleh
404 404 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
405 405 cat .hg/patches/bar
406 406 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
407 407 hg mv quux fred
408 408 hg mv bleh barney
409 409 hg qrefresh --git
410 410 cat .hg/patches/bar
411 411 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
412 412
413 413 echo % refresh omitting an added file
414 414 hg qnew baz
415 415 echo newfile > newfile
416 416 hg add newfile
417 417 hg qrefresh
418 418 hg st -A newfile
419 419 hg qrefresh -X newfile
420 420 hg st -A newfile
421 421 hg revert newfile
422 422 rm newfile
423 423 hg qpop
424 424 hg qdel baz
425 425
426 426 echo % create a git patch
427 427 echo a > alexander
428 428 hg add alexander
429 429 hg qnew -f --git addalexander
430 430 grep diff .hg/patches/addalexander
431 431
432 432 echo % create a git binary patch
433 433 cat > writebin.py <<EOF
434 434 import sys
435 435 path = sys.argv[1]
436 436 open(path, 'wb').write('BIN\x00ARY')
437 437 EOF
438 438 python writebin.py bucephalus
439 439
440 440 python "$TESTDIR/md5sum.py" bucephalus
441 441 hg add bucephalus
442 442 hg qnew -f --git addbucephalus
443 443 grep diff .hg/patches/addbucephalus
444 444
445 445 echo % check binary patches can be popped and pushed
446 446 hg qpop
447 447 test -f bucephalus && echo % bucephalus should not be there
448 448 hg qpush
449 449 test -f bucephalus || echo % bucephalus should be there
450 450 python "$TESTDIR/md5sum.py" bucephalus
451 451
452 452
453 453 echo '% strip again'
454 454 cd ..
455 455 hg init strip
456 456 cd strip
457 457 touch foo
458 458 hg add foo
459 459 hg ci -m 'add foo'
460 460 echo >> foo
461 461 hg ci -m 'change foo 1'
462 462 hg up -C 0
463 463 echo 1 >> foo
464 464 hg ci -m 'change foo 2'
465 465 HGMERGE=true hg merge
466 466 hg ci -m merge
467 467 hg log
468 468 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
469 469 checkundo strip
470 470 hg log
471 471 cd ..
472 472
473 473 echo '% qclone'
474 474 qlog()
475 475 {
476 476 echo 'main repo:'
477 477 hg log --template ' rev {rev}: {desc}\n'
478 478 echo 'patch repo:'
479 479 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
480 480 }
481 481 hg init qclonesource
482 482 cd qclonesource
483 483 echo foo > foo
484 484 hg add foo
485 485 hg ci -m 'add foo'
486 486 hg qinit
487 487 hg qnew patch1
488 488 echo bar >> foo
489 489 hg qrefresh -m 'change foo'
490 490 cd ..
491 491
492 492 # repo with unversioned patch dir
493 493 hg qclone qclonesource failure
494 494
495 495 cd qclonesource
496 496 hg qinit -c
497 497 hg qci -m checkpoint
498 498 qlog
499 499 cd ..
500 500
501 501 # repo with patches applied
502 502 hg qclone qclonesource qclonedest
503 503 cd qclonedest
504 504 qlog
505 505 cd ..
506 506
507 507 # repo with patches unapplied
508 508 cd qclonesource
509 509 hg qpop -a
510 510 qlog
511 511 cd ..
512 512 hg qclone qclonesource qclonedest2
513 513 cd qclonedest2
514 514 qlog
515 515 cd ..
516 516
517 517 echo % 'test applying on an empty file (issue 1033)'
518 518 hg init empty
519 519 cd empty
520 520 touch a
521 521 hg ci -Am addempty
522 522 echo a > a
523 523 hg qnew -f -e changea
524 524 hg qpop
525 525 hg qpush
526 526 cd ..
527 527
528 528 echo % test qpush with --force, issue1087
529 529 hg init forcepush
530 530 cd forcepush
531 531 echo hello > hello.txt
532 532 echo bye > bye.txt
533 533 hg ci -Ama
534 534 hg qnew -d '0 0' empty
535 535 hg qpop
536 536 echo world >> hello.txt
537 537
538 538 echo % qpush should fail, local changes
539 539 hg qpush
540 540
541 541 echo % apply force, should not discard changes with empty patch
542 542 hg qpush -f 2>&1 | sed 's,^.*/patch,patch,g'
543 543 hg diff --config diff.nodates=True
544 544 hg qdiff --config diff.nodates=True
545 545 hg log -l1 -p
546 546 hg qref -d '0 0'
547 547 hg qpop
548 548 echo universe >> hello.txt
549 549 echo universe >> bye.txt
550 550
551 551 echo % qpush should fail, local changes
552 552 hg qpush
553 553
554 554 echo % apply force, should discard changes in hello, but not bye
555 555 hg qpush -f
556 556 hg st
557 557 hg diff --config diff.nodates=True
558 558 hg qdiff --config diff.nodates=True
559 559
560 560 echo % test popping revisions not in working dir ancestry
561 561 hg qseries -v
562 562 hg up qparent
563 563 hg qpop
564
565 cd ..
566 hg init deletion-order
567 cd deletion-order
568
569 touch a
570 hg ci -Aqm0
571
572 hg qnew rename-dir
573 hg rm a
574 hg qrefresh
575
576 mkdir a b
577 touch a/a b/b
578 hg add -q a b
579 hg qrefresh
580
581 echo % test popping must remove files added in subdirectories first
582 hg qpop
583 cd ..
@@ -1,614 +1,617 b''
1 1 % help
2 2 mq extension - manage a stack of patches
3 3
4 4 This extension lets you work with a stack of patches in a Mercurial
5 5 repository. It manages two stacks of patches - all known patches, and applied
6 6 patches (subset of known patches).
7 7
8 8 Known patches are represented as patch files in the .hg/patches directory.
9 9 Applied patches are both patch files and changesets.
10 10
11 11 Common tasks (use "hg help command" for more details):
12 12
13 13 prepare repository to work with patches qinit
14 14 create new patch qnew
15 15 import existing patch qimport
16 16
17 17 print patch series qseries
18 18 print applied patches qapplied
19 19
20 20 add known patch to applied stack qpush
21 21 remove patch from applied stack qpop
22 22 refresh contents of top applied patch qrefresh
23 23
24 24 list of commands:
25 25
26 26 qapplied print the patches already applied
27 27 qclone clone main and patch repository at same time
28 28 qcommit commit changes in the queue repository
29 29 qdelete remove patches from queue
30 30 qdiff diff of the current patch and subsequent modifications
31 31 qfinish move applied patches into repository history
32 32 qfold fold the named patches into the current patch
33 33 qgoto push or pop patches until named patch is at top of stack
34 34 qguard set or print guards for a patch
35 35 qheader print the header of the topmost or specified patch
36 36 qimport import a patch
37 37 qinit init a new queue repository
38 38 qnew create a new patch
39 39 qnext print the name of the next patch
40 40 qpop pop the current patch off the stack
41 41 qprev print the name of the previous patch
42 42 qpush push the next patch onto the stack
43 43 qrefresh update the current patch
44 44 qrename rename a patch
45 45 qrestore restore the queue state saved by a revision
46 46 qsave save current queue state
47 47 qselect set or print guarded patches to push
48 48 qseries print the entire series file
49 49 qtop print the name of the current patch
50 50 qunapplied print the patches not yet applied
51 51 strip strip a revision and all its descendants from the repository
52 52
53 53 use "hg -v help mq" to show aliases and global options
54 54 adding a
55 55 updating to branch default
56 56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 57 adding b/z
58 58 % qinit
59 59 % -R qinit
60 60 % qinit -c
61 61 A .hgignore
62 62 A series
63 63 % qinit; qinit -c
64 64 .hgignore:
65 65 ^\.hg
66 66 ^\.mq
67 67 syntax: glob
68 68 status
69 69 guards
70 70 series:
71 71 abort: repository already exists!
72 72 % qinit; <stuff>; qinit -c
73 73 adding .hg/patches/A
74 74 adding .hg/patches/B
75 75 A .hgignore
76 76 A A
77 77 A B
78 78 A series
79 79 .hgignore:
80 80 status
81 81 bleh
82 82 series:
83 83 A
84 84 B
85 85 % qrefresh
86 86 foo bar
87 87
88 88 diff -r xa
89 89 --- a/a
90 90 +++ b/a
91 91 @@ -1,1 +1,2 @@
92 92 a
93 93 +a
94 94 % empty qrefresh
95 95 revision:
96 96 patch:
97 97 foo bar
98 98
99 99 working dir diff:
100 100 --- a/a
101 101 +++ b/a
102 102 @@ -1,1 +1,2 @@
103 103 a
104 104 +a
105 105 % qpop
106 106 popping test.patch
107 107 patch queue now empty
108 108 % qpush with dump of tag cache
109 109 .hg/tags.cache (pre qpush):
110 110 1
111 111
112 112 applying test.patch
113 113 now at: test.patch
114 114 .hg/tags.cache (post qpush):
115 115 2
116 116
117 117 % pop/push outside repo
118 118 popping test.patch
119 119 patch queue now empty
120 120 applying test.patch
121 121 now at: test.patch
122 122 % qrefresh in subdir
123 123 % pop/push -a in subdir
124 124 popping test2.patch
125 125 popping test.patch
126 126 patch queue now empty
127 127 applying test.patch
128 128 applying test2.patch
129 129 now at: test2.patch
130 130 % qseries
131 131 test.patch
132 132 test2.patch
133 133 0 A test.patch: f...
134 134 1 A test2.patch:
135 135 popping test2.patch
136 136 now at: test.patch
137 137 0 A test.patch: foo bar
138 138 1 U test2.patch:
139 139 applying test2.patch
140 140 now at: test2.patch
141 141 % qapplied
142 142 test.patch
143 143 test2.patch
144 144 % qtop
145 145 test2.patch
146 146 % prev
147 147 test.patch
148 148 % next
149 149 all patches applied
150 150 popping test2.patch
151 151 now at: test.patch
152 152 % commit should fail
153 153 abort: cannot commit over an applied mq patch
154 154 % push should fail
155 155 pushing to ../../k
156 156 abort: source has mq patches applied
157 157 % import should fail
158 158 abort: cannot import over an applied patch
159 159 % import --no-commit should succeed
160 160 applying ../../import.diff
161 161 M a
162 162 % qunapplied
163 163 test2.patch
164 164 % qpush/qpop with index
165 165 applying test2.patch
166 166 now at: test2.patch
167 167 popping test2.patch
168 168 popping test1b.patch
169 169 now at: test.patch
170 170 applying test1b.patch
171 171 now at: test1b.patch
172 172 applying test2.patch
173 173 now at: test2.patch
174 174 popping test2.patch
175 175 now at: test1b.patch
176 176 popping test1b.patch
177 177 now at: test.patch
178 178 applying test1b.patch
179 179 applying test2.patch
180 180 now at: test2.patch
181 181 % pop, qapplied, qunapplied
182 182 0 A test.patch
183 183 1 A test1b.patch
184 184 2 A test2.patch
185 185 % qapplied -1 test.patch
186 186 only one patch applied
187 187 % qapplied -1 test1b.patch
188 188 test.patch
189 189 % qapplied -1 test2.patch
190 190 test1b.patch
191 191 % qapplied -1
192 192 test1b.patch
193 193 % qapplied
194 194 test.patch
195 195 test1b.patch
196 196 test2.patch
197 197 % qapplied test1b.patch
198 198 test.patch
199 199 test1b.patch
200 200 % qunapplied -1
201 201 all patches applied
202 202 % qunapplied
203 203 % popping
204 204 popping test2.patch
205 205 now at: test1b.patch
206 206 % qunapplied -1
207 207 test2.patch
208 208 % qunapplied
209 209 test2.patch
210 210 % qunapplied test2.patch
211 211 % qunapplied -1 test2.patch
212 212 all patches applied
213 213 % popping -a
214 214 popping test1b.patch
215 215 popping test.patch
216 216 patch queue now empty
217 217 % qapplied
218 218 % qapplied -1
219 219 no patches applied
220 220 applying test.patch
221 221 now at: test.patch
222 222 % push should succeed
223 223 popping test.patch
224 224 patch queue now empty
225 225 pushing to ../../k
226 226 searching for changes
227 227 adding changesets
228 228 adding manifests
229 229 adding file changes
230 230 added 1 changesets with 1 changes to 1 files
231 231 % qpush/qpop error codes
232 232 applying test.patch
233 233 applying test1b.patch
234 234 applying test2.patch
235 235 now at: test2.patch
236 236 % pops all patches and succeeds
237 237 popping test2.patch
238 238 popping test1b.patch
239 239 popping test.patch
240 240 patch queue now empty
241 241 qpop -a succeeds
242 242 % does nothing and succeeds
243 243 no patches applied
244 244 qpop -a succeeds
245 245 % fails - nothing else to pop
246 246 no patches applied
247 247 qpop fails
248 248 % pushes a patch and succeeds
249 249 applying test.patch
250 250 now at: test.patch
251 251 qpush succeeds
252 252 % pops a patch and succeeds
253 253 popping test.patch
254 254 patch queue now empty
255 255 qpop succeeds
256 256 % pushes up to test1b.patch and succeeds
257 257 applying test.patch
258 258 applying test1b.patch
259 259 now at: test1b.patch
260 260 qpush test1b.patch succeeds
261 261 % does nothing and succeeds
262 262 qpush: test1b.patch is already at the top
263 263 qpush test1b.patch succeeds
264 264 % does nothing and succeeds
265 265 qpop: test1b.patch is already at the top
266 266 qpop test1b.patch succeeds
267 267 % fails - can't push to this patch
268 268 abort: cannot push to a previous patch: test.patch
269 269 qpush test.patch fails
270 270 % fails - can't pop to this patch
271 271 abort: patch test2.patch is not applied
272 272 qpop test2.patch fails
273 273 % pops up to test.patch and succeeds
274 274 popping test1b.patch
275 275 now at: test.patch
276 276 qpop test.patch succeeds
277 277 % pushes all patches and succeeds
278 278 applying test1b.patch
279 279 applying test2.patch
280 280 now at: test2.patch
281 281 qpush -a succeeds
282 282 % does nothing and succeeds
283 283 all patches are currently applied
284 284 qpush -a succeeds
285 285 % fails - nothing else to push
286 286 patch series already fully applied
287 287 qpush fails
288 288 % does nothing and succeeds
289 289 qpush: test2.patch is already at the top
290 290 qpush test2.patch succeeds
291 291 % strip
292 292 adding x
293 293 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
294 294 saving bundle to
295 295 adding changesets
296 296 adding manifests
297 297 adding file changes
298 298 added 1 changesets with 1 changes to 1 files
299 299 (run 'hg update' to get a working copy)
300 300 % strip with local changes, should complain
301 301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 302 abort: local changes found
303 303 % --force strip with local changes
304 304 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
305 305 saving bundle to
306 306 % cd b; hg qrefresh
307 307 adding a
308 308 foo
309 309
310 310 diff -r cb9a9f314b8b a
311 311 --- a/a
312 312 +++ b/a
313 313 @@ -1,1 +1,2 @@
314 314 a
315 315 +a
316 316 diff -r cb9a9f314b8b b/f
317 317 --- /dev/null
318 318 +++ b/b/f
319 319 @@ -0,0 +1,1 @@
320 320 +f
321 321 % hg qrefresh .
322 322 foo
323 323
324 324 diff -r cb9a9f314b8b b/f
325 325 --- /dev/null
326 326 +++ b/b/f
327 327 @@ -0,0 +1,1 @@
328 328 +f
329 329 M a
330 330 % qpush failure
331 331 popping bar
332 332 popping foo
333 333 patch queue now empty
334 334 applying foo
335 335 applying bar
336 336 file foo already exists
337 337 1 out of 1 hunks FAILED -- saving rejects to file foo.rej
338 338 patch failed, unable to continue (try -v)
339 339 patch failed, rejects left in working dir
340 340 errors during apply, please fix and refresh bar
341 341 ? foo
342 342 ? foo.rej
343 343 % mq tags
344 344 0 qparent
345 345 1 qbase foo
346 346 2 qtip bar tip
347 347 % bad node in status
348 348 popping bar
349 349 now at: foo
350 350 changeset: 0:cb9a9f314b8b
351 351 mq status file refers to unknown node
352 352 tag: tip
353 353 user: test
354 354 date: Thu Jan 01 00:00:00 1970 +0000
355 355 summary: a
356 356
357 357 mq status file refers to unknown node
358 358 default 0:cb9a9f314b8b
359 359 abort: trying to pop unknown node
360 360 new file
361 361
362 362 diff --git a/new b/new
363 363 new file mode 100755
364 364 --- /dev/null
365 365 +++ b/new
366 366 @@ -0,0 +1,1 @@
367 367 +foo
368 368 copy file
369 369
370 370 diff --git a/new b/copy
371 371 copy from new
372 372 copy to copy
373 373 popping copy
374 374 now at: new
375 375 applying copy
376 376 now at: copy
377 377 diff --git a/new b/copy
378 378 copy from new
379 379 copy to copy
380 380 diff --git a/new b/copy
381 381 copy from new
382 382 copy to copy
383 383 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
384 384 created new head
385 385 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
386 386 popping bar
387 387 adding branch
388 388 adding changesets
389 389 adding manifests
390 390 adding file changes
391 391 added 1 changesets with 1 changes to 1 files
392 392 patch queue now empty
393 393 (working directory not at a head)
394 394 applying bar
395 395 now at: bar
396 396 diff --git a/bar b/bar
397 397 new file mode 100644
398 398 --- /dev/null
399 399 +++ b/bar
400 400 @@ -0,0 +1,1 @@
401 401 +bar
402 402 diff --git a/foo b/baz
403 403 rename from foo
404 404 rename to baz
405 405 2 baz (foo)
406 406 diff --git a/bar b/bar
407 407 new file mode 100644
408 408 --- /dev/null
409 409 +++ b/bar
410 410 @@ -0,0 +1,1 @@
411 411 +bar
412 412 diff --git a/foo b/baz
413 413 rename from foo
414 414 rename to baz
415 415 2 baz (foo)
416 416 diff --git a/bar b/bar
417 417 diff --git a/foo b/baz
418 418
419 419 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
420 420 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
421 421 popping bar
422 422 adding branch
423 423 adding changesets
424 424 adding manifests
425 425 adding file changes
426 426 added 1 changesets with 1 changes to 1 files
427 427 patch queue now empty
428 428 (working directory not at a head)
429 429 applying bar
430 430 now at: bar
431 431 diff --git a/foo b/bleh
432 432 rename from foo
433 433 rename to bleh
434 434 diff --git a/quux b/quux
435 435 new file mode 100644
436 436 --- /dev/null
437 437 +++ b/quux
438 438 @@ -0,0 +1,1 @@
439 439 +bar
440 440 3 bleh (foo)
441 441 diff --git a/foo b/barney
442 442 rename from foo
443 443 rename to barney
444 444 diff --git a/fred b/fred
445 445 new file mode 100644
446 446 --- /dev/null
447 447 +++ b/fred
448 448 @@ -0,0 +1,1 @@
449 449 +bar
450 450 3 barney (foo)
451 451 % refresh omitting an added file
452 452 C newfile
453 453 A newfile
454 454 popping baz
455 455 now at: bar
456 456 % create a git patch
457 457 diff --git a/alexander b/alexander
458 458 % create a git binary patch
459 459 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
460 460 diff --git a/bucephalus b/bucephalus
461 461 % check binary patches can be popped and pushed
462 462 popping addbucephalus
463 463 now at: addalexander
464 464 applying addbucephalus
465 465 now at: addbucephalus
466 466 8ba2a2f3e77b55d03051ff9c24ad65e7 bucephalus
467 467 % strip again
468 468 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
469 469 created new head
470 470 merging foo
471 471 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
472 472 (branch merge, don't forget to commit)
473 473 changeset: 3:99615015637b
474 474 tag: tip
475 475 parent: 2:20cbbe65cff7
476 476 parent: 1:d2871fc282d4
477 477 user: test
478 478 date: Thu Jan 01 00:00:00 1970 +0000
479 479 summary: merge
480 480
481 481 changeset: 2:20cbbe65cff7
482 482 parent: 0:53245c60e682
483 483 user: test
484 484 date: Thu Jan 01 00:00:00 1970 +0000
485 485 summary: change foo 2
486 486
487 487 changeset: 1:d2871fc282d4
488 488 user: test
489 489 date: Thu Jan 01 00:00:00 1970 +0000
490 490 summary: change foo 1
491 491
492 492 changeset: 0:53245c60e682
493 493 user: test
494 494 date: Thu Jan 01 00:00:00 1970 +0000
495 495 summary: add foo
496 496
497 497 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 498 saving bundle to
499 499 saving bundle to
500 500 adding branch
501 501 adding changesets
502 502 adding manifests
503 503 adding file changes
504 504 added 1 changesets with 1 changes to 1 files
505 505 changeset: 1:20cbbe65cff7
506 506 tag: tip
507 507 user: test
508 508 date: Thu Jan 01 00:00:00 1970 +0000
509 509 summary: change foo 2
510 510
511 511 changeset: 0:53245c60e682
512 512 user: test
513 513 date: Thu Jan 01 00:00:00 1970 +0000
514 514 summary: add foo
515 515
516 516 % qclone
517 517 abort: versioned patch repository not found (see qinit -c)
518 518 adding .hg/patches/patch1
519 519 main repo:
520 520 rev 1: change foo
521 521 rev 0: add foo
522 522 patch repo:
523 523 rev 0: checkpoint
524 524 updating to branch default
525 525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 526 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
527 527 main repo:
528 528 rev 0: add foo
529 529 patch repo:
530 530 rev 0: checkpoint
531 531 popping patch1
532 532 patch queue now empty
533 533 main repo:
534 534 rev 0: add foo
535 535 patch repo:
536 536 rev 0: checkpoint
537 537 updating to branch default
538 538 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
539 539 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
540 540 main repo:
541 541 rev 0: add foo
542 542 patch repo:
543 543 rev 0: checkpoint
544 544 % test applying on an empty file (issue 1033)
545 545 adding a
546 546 popping changea
547 547 patch queue now empty
548 548 applying changea
549 549 now at: changea
550 550 % test qpush with --force, issue1087
551 551 adding bye.txt
552 552 adding hello.txt
553 553 popping empty
554 554 patch queue now empty
555 555 % qpush should fail, local changes
556 556 abort: local changes found, refresh first
557 557 % apply force, should not discard changes with empty patch
558 558 applying empty
559 559 patch empty is empty
560 560 now at: empty
561 561 diff -r bf5fc3f07a0a hello.txt
562 562 --- a/hello.txt
563 563 +++ b/hello.txt
564 564 @@ -1,1 +1,2 @@
565 565 hello
566 566 +world
567 567 diff -r 9ecee4f634e3 hello.txt
568 568 --- a/hello.txt
569 569 +++ b/hello.txt
570 570 @@ -1,1 +1,2 @@
571 571 hello
572 572 +world
573 573 changeset: 1:bf5fc3f07a0a
574 574 tag: qtip
575 575 tag: tip
576 576 tag: empty
577 577 tag: qbase
578 578 user: test
579 579 date: Thu Jan 01 00:00:00 1970 +0000
580 580 summary: imported patch empty
581 581
582 582
583 583 popping empty
584 584 patch queue now empty
585 585 % qpush should fail, local changes
586 586 abort: local changes found, refresh first
587 587 % apply force, should discard changes in hello, but not bye
588 588 applying empty
589 589 now at: empty
590 590 M bye.txt
591 591 diff -r ba252371dbc1 bye.txt
592 592 --- a/bye.txt
593 593 +++ b/bye.txt
594 594 @@ -1,1 +1,2 @@
595 595 bye
596 596 +universe
597 597 diff -r 9ecee4f634e3 bye.txt
598 598 --- a/bye.txt
599 599 +++ b/bye.txt
600 600 @@ -1,1 +1,2 @@
601 601 bye
602 602 +universe
603 603 diff -r 9ecee4f634e3 hello.txt
604 604 --- a/hello.txt
605 605 +++ b/hello.txt
606 606 @@ -1,1 +1,3 @@
607 607 hello
608 608 +world
609 609 +universe
610 610 % test popping revisions not in working dir ancestry
611 611 0 A empty
612 612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
613 613 popping empty
614 614 patch queue now empty
615 % test popping must remove files added in subdirectories first
616 popping rename-dir
617 patch queue now empty
General Comments 0
You need to be logged in to leave comments. Login now