##// END OF EJS Templates
phase: add a transaction argument to advanceboundary...
Pierre-Yves David -
r22069:616a455b default
parent child Browse files
Show More
@@ -1,3480 +1,3480 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from mercurial.i18n import _
66 66 from mercurial.node import bin, hex, short, nullid, nullrev
67 67 from mercurial.lock import release
68 68 from mercurial import commands, cmdutil, hg, scmutil, util, revset
69 69 from mercurial import extensions, error, phases
70 70 from mercurial import patch as patchmod
71 71 from mercurial import localrepo
72 72 from mercurial import subrepo
73 73 import os, re, errno, shutil
74 74
75 75 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
76 76
77 77 cmdtable = {}
78 78 command = cmdutil.command(cmdtable)
79 79 testedwith = 'internal'
80 80
81 81 # force load strip extension formerly included in mq and import some utility
82 82 try:
83 83 stripext = extensions.find('strip')
84 84 except KeyError:
85 85 # note: load is lazy so we could avoid the try-except,
86 86 # but I (marmoute) prefer this explicit code.
87 87 class dummyui(object):
88 88 def debug(self, msg):
89 89 pass
90 90 stripext = extensions.load(dummyui(), 'strip', '')
91 91
92 92 strip = stripext.strip
93 93 checksubstate = stripext.checksubstate
94 94 checklocalchanges = stripext.checklocalchanges
95 95
96 96
97 97 # Patch names looks like unix-file names.
98 98 # They must be joinable with queue directory and result in the patch path.
99 99 normname = util.normpath
100 100
101 101 class statusentry(object):
102 102 def __init__(self, node, name):
103 103 self.node, self.name = node, name
104 104 def __repr__(self):
105 105 return hex(self.node) + ':' + self.name
106 106
107 107 class patchheader(object):
108 108 def __init__(self, pf, plainmode=False):
109 109 def eatdiff(lines):
110 110 while lines:
111 111 l = lines[-1]
112 112 if (l.startswith("diff -") or
113 113 l.startswith("Index:") or
114 114 l.startswith("===========")):
115 115 del lines[-1]
116 116 else:
117 117 break
118 118 def eatempty(lines):
119 119 while lines:
120 120 if not lines[-1].strip():
121 121 del lines[-1]
122 122 else:
123 123 break
124 124
125 125 message = []
126 126 comments = []
127 127 user = None
128 128 date = None
129 129 parent = None
130 130 format = None
131 131 subject = None
132 132 branch = None
133 133 nodeid = None
134 134 diffstart = 0
135 135
136 136 for line in file(pf):
137 137 line = line.rstrip()
138 138 if (line.startswith('diff --git')
139 139 or (diffstart and line.startswith('+++ '))):
140 140 diffstart = 2
141 141 break
142 142 diffstart = 0 # reset
143 143 if line.startswith("--- "):
144 144 diffstart = 1
145 145 continue
146 146 elif format == "hgpatch":
147 147 # parse values when importing the result of an hg export
148 148 if line.startswith("# User "):
149 149 user = line[7:]
150 150 elif line.startswith("# Date "):
151 151 date = line[7:]
152 152 elif line.startswith("# Parent "):
153 153 parent = line[9:].lstrip()
154 154 elif line.startswith("# Branch "):
155 155 branch = line[9:]
156 156 elif line.startswith("# Node ID "):
157 157 nodeid = line[10:]
158 158 elif not line.startswith("# ") and line:
159 159 message.append(line)
160 160 format = None
161 161 elif line == '# HG changeset patch':
162 162 message = []
163 163 format = "hgpatch"
164 164 elif (format != "tagdone" and (line.startswith("Subject: ") or
165 165 line.startswith("subject: "))):
166 166 subject = line[9:]
167 167 format = "tag"
168 168 elif (format != "tagdone" and (line.startswith("From: ") or
169 169 line.startswith("from: "))):
170 170 user = line[6:]
171 171 format = "tag"
172 172 elif (format != "tagdone" and (line.startswith("Date: ") or
173 173 line.startswith("date: "))):
174 174 date = line[6:]
175 175 format = "tag"
176 176 elif format == "tag" and line == "":
177 177 # when looking for tags (subject: from: etc) they
178 178 # end once you find a blank line in the source
179 179 format = "tagdone"
180 180 elif message or line:
181 181 message.append(line)
182 182 comments.append(line)
183 183
184 184 eatdiff(message)
185 185 eatdiff(comments)
186 186 # Remember the exact starting line of the patch diffs before consuming
187 187 # empty lines, for external use by TortoiseHg and others
188 188 self.diffstartline = len(comments)
189 189 eatempty(message)
190 190 eatempty(comments)
191 191
192 192 # make sure message isn't empty
193 193 if format and format.startswith("tag") and subject:
194 194 message.insert(0, "")
195 195 message.insert(0, subject)
196 196
197 197 self.message = message
198 198 self.comments = comments
199 199 self.user = user
200 200 self.date = date
201 201 self.parent = parent
202 202 # nodeid and branch are for external use by TortoiseHg and others
203 203 self.nodeid = nodeid
204 204 self.branch = branch
205 205 self.haspatch = diffstart > 1
206 206 self.plainmode = plainmode
207 207
208 208 def setuser(self, user):
209 209 if not self.updateheader(['From: ', '# User '], user):
210 210 try:
211 211 patchheaderat = self.comments.index('# HG changeset patch')
212 212 self.comments.insert(patchheaderat + 1, '# User ' + user)
213 213 except ValueError:
214 214 if self.plainmode or self._hasheader(['Date: ']):
215 215 self.comments = ['From: ' + user] + self.comments
216 216 else:
217 217 tmp = ['# HG changeset patch', '# User ' + user, '']
218 218 self.comments = tmp + self.comments
219 219 self.user = user
220 220
221 221 def setdate(self, date):
222 222 if not self.updateheader(['Date: ', '# Date '], date):
223 223 try:
224 224 patchheaderat = self.comments.index('# HG changeset patch')
225 225 self.comments.insert(patchheaderat + 1, '# Date ' + date)
226 226 except ValueError:
227 227 if self.plainmode or self._hasheader(['From: ']):
228 228 self.comments = ['Date: ' + date] + self.comments
229 229 else:
230 230 tmp = ['# HG changeset patch', '# Date ' + date, '']
231 231 self.comments = tmp + self.comments
232 232 self.date = date
233 233
234 234 def setparent(self, parent):
235 235 if not self.updateheader(['# Parent '], parent):
236 236 try:
237 237 patchheaderat = self.comments.index('# HG changeset patch')
238 238 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
239 239 except ValueError:
240 240 pass
241 241 self.parent = parent
242 242
243 243 def setmessage(self, message):
244 244 if self.comments:
245 245 self._delmsg()
246 246 self.message = [message]
247 247 self.comments += self.message
248 248
249 249 def updateheader(self, prefixes, new):
250 250 '''Update all references to a field in the patch header.
251 251 Return whether the field is present.'''
252 252 res = False
253 253 for prefix in prefixes:
254 254 for i in xrange(len(self.comments)):
255 255 if self.comments[i].startswith(prefix):
256 256 self.comments[i] = prefix + new
257 257 res = True
258 258 break
259 259 return res
260 260
261 261 def _hasheader(self, prefixes):
262 262 '''Check if a header starts with any of the given prefixes.'''
263 263 for prefix in prefixes:
264 264 for comment in self.comments:
265 265 if comment.startswith(prefix):
266 266 return True
267 267 return False
268 268
269 269 def __str__(self):
270 270 if not self.comments:
271 271 return ''
272 272 return '\n'.join(self.comments) + '\n\n'
273 273
274 274 def _delmsg(self):
275 275 '''Remove existing message, keeping the rest of the comments fields.
276 276 If comments contains 'subject: ', message will prepend
277 277 the field and a blank line.'''
278 278 if self.message:
279 279 subj = 'subject: ' + self.message[0].lower()
280 280 for i in xrange(len(self.comments)):
281 281 if subj == self.comments[i].lower():
282 282 del self.comments[i]
283 283 self.message = self.message[2:]
284 284 break
285 285 ci = 0
286 286 for mi in self.message:
287 287 while mi != self.comments[ci]:
288 288 ci += 1
289 289 del self.comments[ci]
290 290
291 291 def newcommit(repo, phase, *args, **kwargs):
292 292 """helper dedicated to ensure a commit respect mq.secret setting
293 293
294 294 It should be used instead of repo.commit inside the mq source for operation
295 295 creating new changeset.
296 296 """
297 297 repo = repo.unfiltered()
298 298 if phase is None:
299 299 if repo.ui.configbool('mq', 'secret', False):
300 300 phase = phases.secret
301 301 if phase is not None:
302 302 backup = repo.ui.backupconfig('phases', 'new-commit')
303 303 try:
304 304 if phase is not None:
305 305 repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
306 306 return repo.commit(*args, **kwargs)
307 307 finally:
308 308 if phase is not None:
309 309 repo.ui.restoreconfig(backup)
310 310
311 311 class AbortNoCleanup(error.Abort):
312 312 pass
313 313
314 314 class queue(object):
315 315 def __init__(self, ui, baseui, path, patchdir=None):
316 316 self.basepath = path
317 317 try:
318 318 fh = open(os.path.join(path, 'patches.queue'))
319 319 cur = fh.read().rstrip()
320 320 fh.close()
321 321 if not cur:
322 322 curpath = os.path.join(path, 'patches')
323 323 else:
324 324 curpath = os.path.join(path, 'patches-' + cur)
325 325 except IOError:
326 326 curpath = os.path.join(path, 'patches')
327 327 self.path = patchdir or curpath
328 328 self.opener = scmutil.opener(self.path)
329 329 self.ui = ui
330 330 self.baseui = baseui
331 331 self.applieddirty = False
332 332 self.seriesdirty = False
333 333 self.added = []
334 334 self.seriespath = "series"
335 335 self.statuspath = "status"
336 336 self.guardspath = "guards"
337 337 self.activeguards = None
338 338 self.guardsdirty = False
339 339 # Handle mq.git as a bool with extended values
340 340 try:
341 341 gitmode = ui.configbool('mq', 'git', None)
342 342 if gitmode is None:
343 343 raise error.ConfigError
344 344 self.gitmode = gitmode and 'yes' or 'no'
345 345 except error.ConfigError:
346 346 self.gitmode = ui.config('mq', 'git', 'auto').lower()
347 347 self.plainmode = ui.configbool('mq', 'plain', False)
348 348 self.checkapplied = True
349 349
350 350 @util.propertycache
351 351 def applied(self):
352 352 def parselines(lines):
353 353 for l in lines:
354 354 entry = l.split(':', 1)
355 355 if len(entry) > 1:
356 356 n, name = entry
357 357 yield statusentry(bin(n), name)
358 358 elif l.strip():
359 359 self.ui.warn(_('malformated mq status line: %s\n') % entry)
360 360 # else we ignore empty lines
361 361 try:
362 362 lines = self.opener.read(self.statuspath).splitlines()
363 363 return list(parselines(lines))
364 364 except IOError, e:
365 365 if e.errno == errno.ENOENT:
366 366 return []
367 367 raise
368 368
369 369 @util.propertycache
370 370 def fullseries(self):
371 371 try:
372 372 return self.opener.read(self.seriespath).splitlines()
373 373 except IOError, e:
374 374 if e.errno == errno.ENOENT:
375 375 return []
376 376 raise
377 377
378 378 @util.propertycache
379 379 def series(self):
380 380 self.parseseries()
381 381 return self.series
382 382
383 383 @util.propertycache
384 384 def seriesguards(self):
385 385 self.parseseries()
386 386 return self.seriesguards
387 387
388 388 def invalidate(self):
389 389 for a in 'applied fullseries series seriesguards'.split():
390 390 if a in self.__dict__:
391 391 delattr(self, a)
392 392 self.applieddirty = False
393 393 self.seriesdirty = False
394 394 self.guardsdirty = False
395 395 self.activeguards = None
396 396
397 397 def diffopts(self, opts={}, patchfn=None):
398 398 diffopts = patchmod.diffopts(self.ui, opts)
399 399 if self.gitmode == 'auto':
400 400 diffopts.upgrade = True
401 401 elif self.gitmode == 'keep':
402 402 pass
403 403 elif self.gitmode in ('yes', 'no'):
404 404 diffopts.git = self.gitmode == 'yes'
405 405 else:
406 406 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
407 407 ' got %s') % self.gitmode)
408 408 if patchfn:
409 409 diffopts = self.patchopts(diffopts, patchfn)
410 410 return diffopts
411 411
412 412 def patchopts(self, diffopts, *patches):
413 413 """Return a copy of input diff options with git set to true if
414 414 referenced patch is a git patch and should be preserved as such.
415 415 """
416 416 diffopts = diffopts.copy()
417 417 if not diffopts.git and self.gitmode == 'keep':
418 418 for patchfn in patches:
419 419 patchf = self.opener(patchfn, 'r')
420 420 # if the patch was a git patch, refresh it as a git patch
421 421 for line in patchf:
422 422 if line.startswith('diff --git'):
423 423 diffopts.git = True
424 424 break
425 425 patchf.close()
426 426 return diffopts
427 427
428 428 def join(self, *p):
429 429 return os.path.join(self.path, *p)
430 430
431 431 def findseries(self, patch):
432 432 def matchpatch(l):
433 433 l = l.split('#', 1)[0]
434 434 return l.strip() == patch
435 435 for index, l in enumerate(self.fullseries):
436 436 if matchpatch(l):
437 437 return index
438 438 return None
439 439
440 440 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
441 441
442 442 def parseseries(self):
443 443 self.series = []
444 444 self.seriesguards = []
445 445 for l in self.fullseries:
446 446 h = l.find('#')
447 447 if h == -1:
448 448 patch = l
449 449 comment = ''
450 450 elif h == 0:
451 451 continue
452 452 else:
453 453 patch = l[:h]
454 454 comment = l[h:]
455 455 patch = patch.strip()
456 456 if patch:
457 457 if patch in self.series:
458 458 raise util.Abort(_('%s appears more than once in %s') %
459 459 (patch, self.join(self.seriespath)))
460 460 self.series.append(patch)
461 461 self.seriesguards.append(self.guard_re.findall(comment))
462 462
463 463 def checkguard(self, guard):
464 464 if not guard:
465 465 return _('guard cannot be an empty string')
466 466 bad_chars = '# \t\r\n\f'
467 467 first = guard[0]
468 468 if first in '-+':
469 469 return (_('guard %r starts with invalid character: %r') %
470 470 (guard, first))
471 471 for c in bad_chars:
472 472 if c in guard:
473 473 return _('invalid character in guard %r: %r') % (guard, c)
474 474
475 475 def setactive(self, guards):
476 476 for guard in guards:
477 477 bad = self.checkguard(guard)
478 478 if bad:
479 479 raise util.Abort(bad)
480 480 guards = sorted(set(guards))
481 481 self.ui.debug('active guards: %s\n' % ' '.join(guards))
482 482 self.activeguards = guards
483 483 self.guardsdirty = True
484 484
485 485 def active(self):
486 486 if self.activeguards is None:
487 487 self.activeguards = []
488 488 try:
489 489 guards = self.opener.read(self.guardspath).split()
490 490 except IOError, err:
491 491 if err.errno != errno.ENOENT:
492 492 raise
493 493 guards = []
494 494 for i, guard in enumerate(guards):
495 495 bad = self.checkguard(guard)
496 496 if bad:
497 497 self.ui.warn('%s:%d: %s\n' %
498 498 (self.join(self.guardspath), i + 1, bad))
499 499 else:
500 500 self.activeguards.append(guard)
501 501 return self.activeguards
502 502
503 503 def setguards(self, idx, guards):
504 504 for g in guards:
505 505 if len(g) < 2:
506 506 raise util.Abort(_('guard %r too short') % g)
507 507 if g[0] not in '-+':
508 508 raise util.Abort(_('guard %r starts with invalid char') % g)
509 509 bad = self.checkguard(g[1:])
510 510 if bad:
511 511 raise util.Abort(bad)
512 512 drop = self.guard_re.sub('', self.fullseries[idx])
513 513 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
514 514 self.parseseries()
515 515 self.seriesdirty = True
516 516
517 517 def pushable(self, idx):
518 518 if isinstance(idx, str):
519 519 idx = self.series.index(idx)
520 520 patchguards = self.seriesguards[idx]
521 521 if not patchguards:
522 522 return True, None
523 523 guards = self.active()
524 524 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
525 525 if exactneg:
526 526 return False, repr(exactneg[0])
527 527 pos = [g for g in patchguards if g[0] == '+']
528 528 exactpos = [g for g in pos if g[1:] in guards]
529 529 if pos:
530 530 if exactpos:
531 531 return True, repr(exactpos[0])
532 532 return False, ' '.join(map(repr, pos))
533 533 return True, ''
534 534
535 535 def explainpushable(self, idx, all_patches=False):
536 536 write = all_patches and self.ui.write or self.ui.warn
537 537 if all_patches or self.ui.verbose:
538 538 if isinstance(idx, str):
539 539 idx = self.series.index(idx)
540 540 pushable, why = self.pushable(idx)
541 541 if all_patches and pushable:
542 542 if why is None:
543 543 write(_('allowing %s - no guards in effect\n') %
544 544 self.series[idx])
545 545 else:
546 546 if not why:
547 547 write(_('allowing %s - no matching negative guards\n') %
548 548 self.series[idx])
549 549 else:
550 550 write(_('allowing %s - guarded by %s\n') %
551 551 (self.series[idx], why))
552 552 if not pushable:
553 553 if why:
554 554 write(_('skipping %s - guarded by %s\n') %
555 555 (self.series[idx], why))
556 556 else:
557 557 write(_('skipping %s - no matching guards\n') %
558 558 self.series[idx])
559 559
560 560 def savedirty(self):
561 561 def writelist(items, path):
562 562 fp = self.opener(path, 'w')
563 563 for i in items:
564 564 fp.write("%s\n" % i)
565 565 fp.close()
566 566 if self.applieddirty:
567 567 writelist(map(str, self.applied), self.statuspath)
568 568 self.applieddirty = False
569 569 if self.seriesdirty:
570 570 writelist(self.fullseries, self.seriespath)
571 571 self.seriesdirty = False
572 572 if self.guardsdirty:
573 573 writelist(self.activeguards, self.guardspath)
574 574 self.guardsdirty = False
575 575 if self.added:
576 576 qrepo = self.qrepo()
577 577 if qrepo:
578 578 qrepo[None].add(f for f in self.added if f not in qrepo[None])
579 579 self.added = []
580 580
581 581 def removeundo(self, repo):
582 582 undo = repo.sjoin('undo')
583 583 if not os.path.exists(undo):
584 584 return
585 585 try:
586 586 os.unlink(undo)
587 587 except OSError, inst:
588 588 self.ui.warn(_('error removing undo: %s\n') % str(inst))
589 589
590 590 def backup(self, repo, files, copy=False):
591 591 # backup local changes in --force case
592 592 for f in sorted(files):
593 593 absf = repo.wjoin(f)
594 594 if os.path.lexists(absf):
595 595 self.ui.note(_('saving current version of %s as %s\n') %
596 596 (f, f + '.orig'))
597 597 if copy:
598 598 util.copyfile(absf, absf + '.orig')
599 599 else:
600 600 util.rename(absf, absf + '.orig')
601 601
602 602 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
603 603 fp=None, changes=None, opts={}):
604 604 stat = opts.get('stat')
605 605 m = scmutil.match(repo[node1], files, opts)
606 606 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
607 607 changes, stat, fp)
608 608
609 609 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
610 610 # first try just applying the patch
611 611 (err, n) = self.apply(repo, [patch], update_status=False,
612 612 strict=True, merge=rev)
613 613
614 614 if err == 0:
615 615 return (err, n)
616 616
617 617 if n is None:
618 618 raise util.Abort(_("apply failed for patch %s") % patch)
619 619
620 620 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
621 621
622 622 # apply failed, strip away that rev and merge.
623 623 hg.clean(repo, head)
624 624 strip(self.ui, repo, [n], update=False, backup=False)
625 625
626 626 ctx = repo[rev]
627 627 ret = hg.merge(repo, rev)
628 628 if ret:
629 629 raise util.Abort(_("update returned %d") % ret)
630 630 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
631 631 if n is None:
632 632 raise util.Abort(_("repo commit failed"))
633 633 try:
634 634 ph = patchheader(mergeq.join(patch), self.plainmode)
635 635 except Exception:
636 636 raise util.Abort(_("unable to read %s") % patch)
637 637
638 638 diffopts = self.patchopts(diffopts, patch)
639 639 patchf = self.opener(patch, "w")
640 640 comments = str(ph)
641 641 if comments:
642 642 patchf.write(comments)
643 643 self.printdiff(repo, diffopts, head, n, fp=patchf)
644 644 patchf.close()
645 645 self.removeundo(repo)
646 646 return (0, n)
647 647
648 648 def qparents(self, repo, rev=None):
649 649 """return the mq handled parent or p1
650 650
651 651 In some case where mq get himself in being the parent of a merge the
652 652 appropriate parent may be p2.
653 653 (eg: an in progress merge started with mq disabled)
654 654
655 655 If no parent are managed by mq, p1 is returned.
656 656 """
657 657 if rev is None:
658 658 (p1, p2) = repo.dirstate.parents()
659 659 if p2 == nullid:
660 660 return p1
661 661 if not self.applied:
662 662 return None
663 663 return self.applied[-1].node
664 664 p1, p2 = repo.changelog.parents(rev)
665 665 if p2 != nullid and p2 in [x.node for x in self.applied]:
666 666 return p2
667 667 return p1
668 668
669 669 def mergepatch(self, repo, mergeq, series, diffopts):
670 670 if not self.applied:
671 671 # each of the patches merged in will have two parents. This
672 672 # can confuse the qrefresh, qdiff, and strip code because it
673 673 # needs to know which parent is actually in the patch queue.
674 674 # so, we insert a merge marker with only one parent. This way
675 675 # the first patch in the queue is never a merge patch
676 676 #
677 677 pname = ".hg.patches.merge.marker"
678 678 n = newcommit(repo, None, '[mq]: merge marker', force=True)
679 679 self.removeundo(repo)
680 680 self.applied.append(statusentry(n, pname))
681 681 self.applieddirty = True
682 682
683 683 head = self.qparents(repo)
684 684
685 685 for patch in series:
686 686 patch = mergeq.lookup(patch, strict=True)
687 687 if not patch:
688 688 self.ui.warn(_("patch %s does not exist\n") % patch)
689 689 return (1, None)
690 690 pushable, reason = self.pushable(patch)
691 691 if not pushable:
692 692 self.explainpushable(patch, all_patches=True)
693 693 continue
694 694 info = mergeq.isapplied(patch)
695 695 if not info:
696 696 self.ui.warn(_("patch %s is not applied\n") % patch)
697 697 return (1, None)
698 698 rev = info[1]
699 699 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
700 700 if head:
701 701 self.applied.append(statusentry(head, patch))
702 702 self.applieddirty = True
703 703 if err:
704 704 return (err, head)
705 705 self.savedirty()
706 706 return (0, head)
707 707
708 708 def patch(self, repo, patchfile):
709 709 '''Apply patchfile to the working directory.
710 710 patchfile: name of patch file'''
711 711 files = set()
712 712 try:
713 713 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
714 714 files=files, eolmode=None)
715 715 return (True, list(files), fuzz)
716 716 except Exception, inst:
717 717 self.ui.note(str(inst) + '\n')
718 718 if not self.ui.verbose:
719 719 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
720 720 self.ui.traceback()
721 721 return (False, list(files), False)
722 722
723 723 def apply(self, repo, series, list=False, update_status=True,
724 724 strict=False, patchdir=None, merge=None, all_files=None,
725 725 tobackup=None, keepchanges=False):
726 726 wlock = lock = tr = None
727 727 try:
728 728 wlock = repo.wlock()
729 729 lock = repo.lock()
730 730 tr = repo.transaction("qpush")
731 731 try:
732 732 ret = self._apply(repo, series, list, update_status,
733 733 strict, patchdir, merge, all_files=all_files,
734 734 tobackup=tobackup, keepchanges=keepchanges)
735 735 tr.close()
736 736 self.savedirty()
737 737 return ret
738 738 except AbortNoCleanup:
739 739 tr.close()
740 740 self.savedirty()
741 741 return 2, repo.dirstate.p1()
742 742 except: # re-raises
743 743 try:
744 744 tr.abort()
745 745 finally:
746 746 repo.invalidate()
747 747 repo.dirstate.invalidate()
748 748 self.invalidate()
749 749 raise
750 750 finally:
751 751 release(tr, lock, wlock)
752 752 self.removeundo(repo)
753 753
754 754 def _apply(self, repo, series, list=False, update_status=True,
755 755 strict=False, patchdir=None, merge=None, all_files=None,
756 756 tobackup=None, keepchanges=False):
757 757 """returns (error, hash)
758 758
759 759 error = 1 for unable to read, 2 for patch failed, 3 for patch
760 760 fuzz. tobackup is None or a set of files to backup before they
761 761 are modified by a patch.
762 762 """
763 763 # TODO unify with commands.py
764 764 if not patchdir:
765 765 patchdir = self.path
766 766 err = 0
767 767 n = None
768 768 for patchname in series:
769 769 pushable, reason = self.pushable(patchname)
770 770 if not pushable:
771 771 self.explainpushable(patchname, all_patches=True)
772 772 continue
773 773 self.ui.status(_("applying %s\n") % patchname)
774 774 pf = os.path.join(patchdir, patchname)
775 775
776 776 try:
777 777 ph = patchheader(self.join(patchname), self.plainmode)
778 778 except IOError:
779 779 self.ui.warn(_("unable to read %s\n") % patchname)
780 780 err = 1
781 781 break
782 782
783 783 message = ph.message
784 784 if not message:
785 785 # The commit message should not be translated
786 786 message = "imported patch %s\n" % patchname
787 787 else:
788 788 if list:
789 789 # The commit message should not be translated
790 790 message.append("\nimported patch %s" % patchname)
791 791 message = '\n'.join(message)
792 792
793 793 if ph.haspatch:
794 794 if tobackup:
795 795 touched = patchmod.changedfiles(self.ui, repo, pf)
796 796 touched = set(touched) & tobackup
797 797 if touched and keepchanges:
798 798 raise AbortNoCleanup(
799 799 _("local changes found, refresh first"))
800 800 self.backup(repo, touched, copy=True)
801 801 tobackup = tobackup - touched
802 802 (patcherr, files, fuzz) = self.patch(repo, pf)
803 803 if all_files is not None:
804 804 all_files.update(files)
805 805 patcherr = not patcherr
806 806 else:
807 807 self.ui.warn(_("patch %s is empty\n") % patchname)
808 808 patcherr, files, fuzz = 0, [], 0
809 809
810 810 if merge and files:
811 811 # Mark as removed/merged and update dirstate parent info
812 812 removed = []
813 813 merged = []
814 814 for f in files:
815 815 if os.path.lexists(repo.wjoin(f)):
816 816 merged.append(f)
817 817 else:
818 818 removed.append(f)
819 819 for f in removed:
820 820 repo.dirstate.remove(f)
821 821 for f in merged:
822 822 repo.dirstate.merge(f)
823 823 p1, p2 = repo.dirstate.parents()
824 824 repo.setparents(p1, merge)
825 825
826 826 if all_files and '.hgsubstate' in all_files:
827 827 wctx = repo[None]
828 828 pctx = repo['.']
829 829 overwrite = False
830 830 mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
831 831 overwrite)
832 832 files += mergedsubstate.keys()
833 833
834 834 match = scmutil.matchfiles(repo, files or [])
835 835 oldtip = repo['tip']
836 836 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
837 837 force=True)
838 838 if repo['tip'] == oldtip:
839 839 raise util.Abort(_("qpush exactly duplicates child changeset"))
840 840 if n is None:
841 841 raise util.Abort(_("repository commit failed"))
842 842
843 843 if update_status:
844 844 self.applied.append(statusentry(n, patchname))
845 845
846 846 if patcherr:
847 847 self.ui.warn(_("patch failed, rejects left in working dir\n"))
848 848 err = 2
849 849 break
850 850
851 851 if fuzz and strict:
852 852 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
853 853 err = 3
854 854 break
855 855 return (err, n)
856 856
857 857 def _cleanup(self, patches, numrevs, keep=False):
858 858 if not keep:
859 859 r = self.qrepo()
860 860 if r:
861 861 r[None].forget(patches)
862 862 for p in patches:
863 863 try:
864 864 os.unlink(self.join(p))
865 865 except OSError, inst:
866 866 if inst.errno != errno.ENOENT:
867 867 raise
868 868
869 869 qfinished = []
870 870 if numrevs:
871 871 qfinished = self.applied[:numrevs]
872 872 del self.applied[:numrevs]
873 873 self.applieddirty = True
874 874
875 875 unknown = []
876 876
877 877 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
878 878 reverse=True):
879 879 if i is not None:
880 880 del self.fullseries[i]
881 881 else:
882 882 unknown.append(p)
883 883
884 884 if unknown:
885 885 if numrevs:
886 886 rev = dict((entry.name, entry.node) for entry in qfinished)
887 887 for p in unknown:
888 888 msg = _('revision %s refers to unknown patches: %s\n')
889 889 self.ui.warn(msg % (short(rev[p]), p))
890 890 else:
891 891 msg = _('unknown patches: %s\n')
892 892 raise util.Abort(''.join(msg % p for p in unknown))
893 893
894 894 self.parseseries()
895 895 self.seriesdirty = True
896 896 return [entry.node for entry in qfinished]
897 897
898 898 def _revpatches(self, repo, revs):
899 899 firstrev = repo[self.applied[0].node].rev()
900 900 patches = []
901 901 for i, rev in enumerate(revs):
902 902
903 903 if rev < firstrev:
904 904 raise util.Abort(_('revision %d is not managed') % rev)
905 905
906 906 ctx = repo[rev]
907 907 base = self.applied[i].node
908 908 if ctx.node() != base:
909 909 msg = _('cannot delete revision %d above applied patches')
910 910 raise util.Abort(msg % rev)
911 911
912 912 patch = self.applied[i].name
913 913 for fmt in ('[mq]: %s', 'imported patch %s'):
914 914 if ctx.description() == fmt % patch:
915 915 msg = _('patch %s finalized without changeset message\n')
916 916 repo.ui.status(msg % patch)
917 917 break
918 918
919 919 patches.append(patch)
920 920 return patches
921 921
922 922 def finish(self, repo, revs):
923 923 # Manually trigger phase computation to ensure phasedefaults is
924 924 # executed before we remove the patches.
925 925 repo._phasecache
926 926 patches = self._revpatches(repo, sorted(revs))
927 927 qfinished = self._cleanup(patches, len(patches))
928 928 if qfinished and repo.ui.configbool('mq', 'secret', False):
929 929 # only use this logic when the secret option is added
930 930 oldqbase = repo[qfinished[0]]
931 931 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
932 932 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
933 933 tr = repo.transaction('qfinish')
934 934 try:
935 phases.advanceboundary(repo, tphase, qfinished)
935 phases.advanceboundary(repo, tr, tphase, qfinished)
936 936 tr.close()
937 937 finally:
938 938 tr.release()
939 939
940 940 def delete(self, repo, patches, opts):
941 941 if not patches and not opts.get('rev'):
942 942 raise util.Abort(_('qdelete requires at least one revision or '
943 943 'patch name'))
944 944
945 945 realpatches = []
946 946 for patch in patches:
947 947 patch = self.lookup(patch, strict=True)
948 948 info = self.isapplied(patch)
949 949 if info:
950 950 raise util.Abort(_("cannot delete applied patch %s") % patch)
951 951 if patch not in self.series:
952 952 raise util.Abort(_("patch %s not in series file") % patch)
953 953 if patch not in realpatches:
954 954 realpatches.append(patch)
955 955
956 956 numrevs = 0
957 957 if opts.get('rev'):
958 958 if not self.applied:
959 959 raise util.Abort(_('no patches applied'))
960 960 revs = scmutil.revrange(repo, opts.get('rev'))
961 961 if len(revs) > 1 and revs[0] > revs[1]:
962 962 revs.reverse()
963 963 revpatches = self._revpatches(repo, revs)
964 964 realpatches += revpatches
965 965 numrevs = len(revpatches)
966 966
967 967 self._cleanup(realpatches, numrevs, opts.get('keep'))
968 968
969 969 def checktoppatch(self, repo):
970 970 '''check that working directory is at qtip'''
971 971 if self.applied:
972 972 top = self.applied[-1].node
973 973 patch = self.applied[-1].name
974 974 if repo.dirstate.p1() != top:
975 975 raise util.Abort(_("working directory revision is not qtip"))
976 976 return top, patch
977 977 return None, None
978 978
979 979 def putsubstate2changes(self, substatestate, changes):
980 980 for files in changes[:3]:
981 981 if '.hgsubstate' in files:
982 982 return # already listed up
983 983 # not yet listed up
984 984 if substatestate in 'a?':
985 985 changes[1].append('.hgsubstate')
986 986 elif substatestate in 'r':
987 987 changes[2].append('.hgsubstate')
988 988 else: # modified
989 989 changes[0].append('.hgsubstate')
990 990
991 991 def checklocalchanges(self, repo, force=False, refresh=True):
992 992 excsuffix = ''
993 993 if refresh:
994 994 excsuffix = ', refresh first'
995 995 # plain versions for i18n tool to detect them
996 996 _("local changes found, refresh first")
997 997 _("local changed subrepos found, refresh first")
998 998 return checklocalchanges(repo, force, excsuffix)
999 999
1000 1000 _reserved = ('series', 'status', 'guards', '.', '..')
1001 1001 def checkreservedname(self, name):
1002 1002 if name in self._reserved:
1003 1003 raise util.Abort(_('"%s" cannot be used as the name of a patch')
1004 1004 % name)
1005 1005 for prefix in ('.hg', '.mq'):
1006 1006 if name.startswith(prefix):
1007 1007 raise util.Abort(_('patch name cannot begin with "%s"')
1008 1008 % prefix)
1009 1009 for c in ('#', ':'):
1010 1010 if c in name:
1011 1011 raise util.Abort(_('"%s" cannot be used in the name of a patch')
1012 1012 % c)
1013 1013
1014 1014 def checkpatchname(self, name, force=False):
1015 1015 self.checkreservedname(name)
1016 1016 if not force and os.path.exists(self.join(name)):
1017 1017 if os.path.isdir(self.join(name)):
1018 1018 raise util.Abort(_('"%s" already exists as a directory')
1019 1019 % name)
1020 1020 else:
1021 1021 raise util.Abort(_('patch "%s" already exists') % name)
1022 1022
1023 1023 def checkkeepchanges(self, keepchanges, force):
1024 1024 if force and keepchanges:
1025 1025 raise util.Abort(_('cannot use both --force and --keep-changes'))
1026 1026
1027 1027 def new(self, repo, patchfn, *pats, **opts):
1028 1028 """options:
1029 1029 msg: a string or a no-argument function returning a string
1030 1030 """
1031 1031 msg = opts.get('msg')
1032 1032 edit = opts.get('edit')
1033 1033 editform = opts.get('editform', 'mq.qnew')
1034 1034 user = opts.get('user')
1035 1035 date = opts.get('date')
1036 1036 if date:
1037 1037 date = util.parsedate(date)
1038 1038 diffopts = self.diffopts({'git': opts.get('git')})
1039 1039 if opts.get('checkname', True):
1040 1040 self.checkpatchname(patchfn)
1041 1041 inclsubs = checksubstate(repo)
1042 1042 if inclsubs:
1043 1043 substatestate = repo.dirstate['.hgsubstate']
1044 1044 if opts.get('include') or opts.get('exclude') or pats:
1045 1045 match = scmutil.match(repo[None], pats, opts)
1046 1046 # detect missing files in pats
1047 1047 def badfn(f, msg):
1048 1048 if f != '.hgsubstate': # .hgsubstate is auto-created
1049 1049 raise util.Abort('%s: %s' % (f, msg))
1050 1050 match.bad = badfn
1051 1051 changes = repo.status(match=match)
1052 1052 else:
1053 1053 changes = self.checklocalchanges(repo, force=True)
1054 1054 commitfiles = list(inclsubs)
1055 1055 for files in changes[:3]:
1056 1056 commitfiles.extend(files)
1057 1057 match = scmutil.matchfiles(repo, commitfiles)
1058 1058 if len(repo[None].parents()) > 1:
1059 1059 raise util.Abort(_('cannot manage merge changesets'))
1060 1060 self.checktoppatch(repo)
1061 1061 insert = self.fullseriesend()
1062 1062 wlock = repo.wlock()
1063 1063 try:
1064 1064 try:
1065 1065 # if patch file write fails, abort early
1066 1066 p = self.opener(patchfn, "w")
1067 1067 except IOError, e:
1068 1068 raise util.Abort(_('cannot write patch "%s": %s')
1069 1069 % (patchfn, e.strerror))
1070 1070 try:
1071 1071 if self.plainmode:
1072 1072 if user:
1073 1073 p.write("From: " + user + "\n")
1074 1074 if not date:
1075 1075 p.write("\n")
1076 1076 if date:
1077 1077 p.write("Date: %d %d\n\n" % date)
1078 1078 else:
1079 1079 p.write("# HG changeset patch\n")
1080 1080 p.write("# Parent "
1081 1081 + hex(repo[None].p1().node()) + "\n")
1082 1082 if user:
1083 1083 p.write("# User " + user + "\n")
1084 1084 if date:
1085 1085 p.write("# Date %s %s\n\n" % date)
1086 1086
1087 1087 defaultmsg = "[mq]: %s" % patchfn
1088 1088 editor = cmdutil.getcommiteditor(editform=editform)
1089 1089 if edit:
1090 1090 def finishdesc(desc):
1091 1091 if desc.rstrip():
1092 1092 return desc
1093 1093 else:
1094 1094 return defaultmsg
1095 1095 # i18n: this message is shown in editor with "HG: " prefix
1096 1096 extramsg = _('Leave message empty to use default message.')
1097 1097 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1098 1098 extramsg=extramsg,
1099 1099 editform=editform)
1100 1100 commitmsg = msg
1101 1101 else:
1102 1102 commitmsg = msg or defaultmsg
1103 1103
1104 1104 n = newcommit(repo, None, commitmsg, user, date, match=match,
1105 1105 force=True, editor=editor)
1106 1106 if n is None:
1107 1107 raise util.Abort(_("repo commit failed"))
1108 1108 try:
1109 1109 self.fullseries[insert:insert] = [patchfn]
1110 1110 self.applied.append(statusentry(n, patchfn))
1111 1111 self.parseseries()
1112 1112 self.seriesdirty = True
1113 1113 self.applieddirty = True
1114 1114 nctx = repo[n]
1115 1115 if nctx.description() != defaultmsg.rstrip():
1116 1116 msg = nctx.description() + "\n\n"
1117 1117 p.write(msg)
1118 1118 if commitfiles:
1119 1119 parent = self.qparents(repo, n)
1120 1120 if inclsubs:
1121 1121 self.putsubstate2changes(substatestate, changes)
1122 1122 chunks = patchmod.diff(repo, node1=parent, node2=n,
1123 1123 changes=changes, opts=diffopts)
1124 1124 for chunk in chunks:
1125 1125 p.write(chunk)
1126 1126 p.close()
1127 1127 r = self.qrepo()
1128 1128 if r:
1129 1129 r[None].add([patchfn])
1130 1130 except: # re-raises
1131 1131 repo.rollback()
1132 1132 raise
1133 1133 except Exception:
1134 1134 patchpath = self.join(patchfn)
1135 1135 try:
1136 1136 os.unlink(patchpath)
1137 1137 except OSError:
1138 1138 self.ui.warn(_('error unlinking %s\n') % patchpath)
1139 1139 raise
1140 1140 self.removeundo(repo)
1141 1141 finally:
1142 1142 release(wlock)
1143 1143
1144 1144 def isapplied(self, patch):
1145 1145 """returns (index, rev, patch)"""
1146 1146 for i, a in enumerate(self.applied):
1147 1147 if a.name == patch:
1148 1148 return (i, a.node, a.name)
1149 1149 return None
1150 1150
1151 1151 # if the exact patch name does not exist, we try a few
1152 1152 # variations. If strict is passed, we try only #1
1153 1153 #
1154 1154 # 1) a number (as string) to indicate an offset in the series file
1155 1155 # 2) a unique substring of the patch name was given
1156 1156 # 3) patchname[-+]num to indicate an offset in the series file
1157 1157 def lookup(self, patch, strict=False):
1158 1158 def partialname(s):
1159 1159 if s in self.series:
1160 1160 return s
1161 1161 matches = [x for x in self.series if s in x]
1162 1162 if len(matches) > 1:
1163 1163 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1164 1164 for m in matches:
1165 1165 self.ui.warn(' %s\n' % m)
1166 1166 return None
1167 1167 if matches:
1168 1168 return matches[0]
1169 1169 if self.series and self.applied:
1170 1170 if s == 'qtip':
1171 1171 return self.series[self.seriesend(True) - 1]
1172 1172 if s == 'qbase':
1173 1173 return self.series[0]
1174 1174 return None
1175 1175
1176 1176 if patch in self.series:
1177 1177 return patch
1178 1178
1179 1179 if not os.path.isfile(self.join(patch)):
1180 1180 try:
1181 1181 sno = int(patch)
1182 1182 except (ValueError, OverflowError):
1183 1183 pass
1184 1184 else:
1185 1185 if -len(self.series) <= sno < len(self.series):
1186 1186 return self.series[sno]
1187 1187
1188 1188 if not strict:
1189 1189 res = partialname(patch)
1190 1190 if res:
1191 1191 return res
1192 1192 minus = patch.rfind('-')
1193 1193 if minus >= 0:
1194 1194 res = partialname(patch[:minus])
1195 1195 if res:
1196 1196 i = self.series.index(res)
1197 1197 try:
1198 1198 off = int(patch[minus + 1:] or 1)
1199 1199 except (ValueError, OverflowError):
1200 1200 pass
1201 1201 else:
1202 1202 if i - off >= 0:
1203 1203 return self.series[i - off]
1204 1204 plus = patch.rfind('+')
1205 1205 if plus >= 0:
1206 1206 res = partialname(patch[:plus])
1207 1207 if res:
1208 1208 i = self.series.index(res)
1209 1209 try:
1210 1210 off = int(patch[plus + 1:] or 1)
1211 1211 except (ValueError, OverflowError):
1212 1212 pass
1213 1213 else:
1214 1214 if i + off < len(self.series):
1215 1215 return self.series[i + off]
1216 1216 raise util.Abort(_("patch %s not in series") % patch)
1217 1217
1218 1218 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1219 1219 all=False, move=False, exact=False, nobackup=False,
1220 1220 keepchanges=False):
1221 1221 self.checkkeepchanges(keepchanges, force)
1222 1222 diffopts = self.diffopts()
1223 1223 wlock = repo.wlock()
1224 1224 try:
1225 1225 heads = []
1226 1226 for hs in repo.branchmap().itervalues():
1227 1227 heads.extend(hs)
1228 1228 if not heads:
1229 1229 heads = [nullid]
1230 1230 if repo.dirstate.p1() not in heads and not exact:
1231 1231 self.ui.status(_("(working directory not at a head)\n"))
1232 1232
1233 1233 if not self.series:
1234 1234 self.ui.warn(_('no patches in series\n'))
1235 1235 return 0
1236 1236
1237 1237 # Suppose our series file is: A B C and the current 'top'
1238 1238 # patch is B. qpush C should be performed (moving forward)
1239 1239 # qpush B is a NOP (no change) qpush A is an error (can't
1240 1240 # go backwards with qpush)
1241 1241 if patch:
1242 1242 patch = self.lookup(patch)
1243 1243 info = self.isapplied(patch)
1244 1244 if info and info[0] >= len(self.applied) - 1:
1245 1245 self.ui.warn(
1246 1246 _('qpush: %s is already at the top\n') % patch)
1247 1247 return 0
1248 1248
1249 1249 pushable, reason = self.pushable(patch)
1250 1250 if pushable:
1251 1251 if self.series.index(patch) < self.seriesend():
1252 1252 raise util.Abort(
1253 1253 _("cannot push to a previous patch: %s") % patch)
1254 1254 else:
1255 1255 if reason:
1256 1256 reason = _('guarded by %s') % reason
1257 1257 else:
1258 1258 reason = _('no matching guards')
1259 1259 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1260 1260 return 1
1261 1261 elif all:
1262 1262 patch = self.series[-1]
1263 1263 if self.isapplied(patch):
1264 1264 self.ui.warn(_('all patches are currently applied\n'))
1265 1265 return 0
1266 1266
1267 1267 # Following the above example, starting at 'top' of B:
1268 1268 # qpush should be performed (pushes C), but a subsequent
1269 1269 # qpush without an argument is an error (nothing to
1270 1270 # apply). This allows a loop of "...while hg qpush..." to
1271 1271 # work as it detects an error when done
1272 1272 start = self.seriesend()
1273 1273 if start == len(self.series):
1274 1274 self.ui.warn(_('patch series already fully applied\n'))
1275 1275 return 1
1276 1276 if not force and not keepchanges:
1277 1277 self.checklocalchanges(repo, refresh=self.applied)
1278 1278
1279 1279 if exact:
1280 1280 if keepchanges:
1281 1281 raise util.Abort(
1282 1282 _("cannot use --exact and --keep-changes together"))
1283 1283 if move:
1284 1284 raise util.Abort(_('cannot use --exact and --move '
1285 1285 'together'))
1286 1286 if self.applied:
1287 1287 raise util.Abort(_('cannot push --exact with applied '
1288 1288 'patches'))
1289 1289 root = self.series[start]
1290 1290 target = patchheader(self.join(root), self.plainmode).parent
1291 1291 if not target:
1292 1292 raise util.Abort(
1293 1293 _("%s does not have a parent recorded") % root)
1294 1294 if not repo[target] == repo['.']:
1295 1295 hg.update(repo, target)
1296 1296
1297 1297 if move:
1298 1298 if not patch:
1299 1299 raise util.Abort(_("please specify the patch to move"))
1300 1300 for fullstart, rpn in enumerate(self.fullseries):
1301 1301 # strip markers for patch guards
1302 1302 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1303 1303 break
1304 1304 for i, rpn in enumerate(self.fullseries[fullstart:]):
1305 1305 # strip markers for patch guards
1306 1306 if self.guard_re.split(rpn, 1)[0] == patch:
1307 1307 break
1308 1308 index = fullstart + i
1309 1309 assert index < len(self.fullseries)
1310 1310 fullpatch = self.fullseries[index]
1311 1311 del self.fullseries[index]
1312 1312 self.fullseries.insert(fullstart, fullpatch)
1313 1313 self.parseseries()
1314 1314 self.seriesdirty = True
1315 1315
1316 1316 self.applieddirty = True
1317 1317 if start > 0:
1318 1318 self.checktoppatch(repo)
1319 1319 if not patch:
1320 1320 patch = self.series[start]
1321 1321 end = start + 1
1322 1322 else:
1323 1323 end = self.series.index(patch, start) + 1
1324 1324
1325 1325 tobackup = set()
1326 1326 if (not nobackup and force) or keepchanges:
1327 1327 m, a, r, d = self.checklocalchanges(repo, force=True)
1328 1328 if keepchanges:
1329 1329 tobackup.update(m + a + r + d)
1330 1330 else:
1331 1331 tobackup.update(m + a)
1332 1332
1333 1333 s = self.series[start:end]
1334 1334 all_files = set()
1335 1335 try:
1336 1336 if mergeq:
1337 1337 ret = self.mergepatch(repo, mergeq, s, diffopts)
1338 1338 else:
1339 1339 ret = self.apply(repo, s, list, all_files=all_files,
1340 1340 tobackup=tobackup, keepchanges=keepchanges)
1341 1341 except: # re-raises
1342 1342 self.ui.warn(_('cleaning up working directory...'))
1343 1343 node = repo.dirstate.p1()
1344 1344 hg.revert(repo, node, None)
1345 1345 # only remove unknown files that we know we touched or
1346 1346 # created while patching
1347 1347 for f in all_files:
1348 1348 if f not in repo.dirstate:
1349 1349 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1350 1350 self.ui.warn(_('done\n'))
1351 1351 raise
1352 1352
1353 1353 if not self.applied:
1354 1354 return ret[0]
1355 1355 top = self.applied[-1].name
1356 1356 if ret[0] and ret[0] > 1:
1357 1357 msg = _("errors during apply, please fix and refresh %s\n")
1358 1358 self.ui.write(msg % top)
1359 1359 else:
1360 1360 self.ui.write(_("now at: %s\n") % top)
1361 1361 return ret[0]
1362 1362
1363 1363 finally:
1364 1364 wlock.release()
1365 1365
1366 1366 def pop(self, repo, patch=None, force=False, update=True, all=False,
1367 1367 nobackup=False, keepchanges=False):
1368 1368 self.checkkeepchanges(keepchanges, force)
1369 1369 wlock = repo.wlock()
1370 1370 try:
1371 1371 if patch:
1372 1372 # index, rev, patch
1373 1373 info = self.isapplied(patch)
1374 1374 if not info:
1375 1375 patch = self.lookup(patch)
1376 1376 info = self.isapplied(patch)
1377 1377 if not info:
1378 1378 raise util.Abort(_("patch %s is not applied") % patch)
1379 1379
1380 1380 if not self.applied:
1381 1381 # Allow qpop -a to work repeatedly,
1382 1382 # but not qpop without an argument
1383 1383 self.ui.warn(_("no patches applied\n"))
1384 1384 return not all
1385 1385
1386 1386 if all:
1387 1387 start = 0
1388 1388 elif patch:
1389 1389 start = info[0] + 1
1390 1390 else:
1391 1391 start = len(self.applied) - 1
1392 1392
1393 1393 if start >= len(self.applied):
1394 1394 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1395 1395 return
1396 1396
1397 1397 if not update:
1398 1398 parents = repo.dirstate.parents()
1399 1399 rr = [x.node for x in self.applied]
1400 1400 for p in parents:
1401 1401 if p in rr:
1402 1402 self.ui.warn(_("qpop: forcing dirstate update\n"))
1403 1403 update = True
1404 1404 else:
1405 1405 parents = [p.node() for p in repo[None].parents()]
1406 1406 needupdate = False
1407 1407 for entry in self.applied[start:]:
1408 1408 if entry.node in parents:
1409 1409 needupdate = True
1410 1410 break
1411 1411 update = needupdate
1412 1412
1413 1413 tobackup = set()
1414 1414 if update:
1415 1415 m, a, r, d = self.checklocalchanges(
1416 1416 repo, force=force or keepchanges)
1417 1417 if force:
1418 1418 if not nobackup:
1419 1419 tobackup.update(m + a)
1420 1420 elif keepchanges:
1421 1421 tobackup.update(m + a + r + d)
1422 1422
1423 1423 self.applieddirty = True
1424 1424 end = len(self.applied)
1425 1425 rev = self.applied[start].node
1426 1426
1427 1427 try:
1428 1428 heads = repo.changelog.heads(rev)
1429 1429 except error.LookupError:
1430 1430 node = short(rev)
1431 1431 raise util.Abort(_('trying to pop unknown node %s') % node)
1432 1432
1433 1433 if heads != [self.applied[-1].node]:
1434 1434 raise util.Abort(_("popping would remove a revision not "
1435 1435 "managed by this patch queue"))
1436 1436 if not repo[self.applied[-1].node].mutable():
1437 1437 raise util.Abort(
1438 1438 _("popping would remove an immutable revision"),
1439 1439 hint=_('see "hg help phases" for details'))
1440 1440
1441 1441 # we know there are no local changes, so we can make a simplified
1442 1442 # form of hg.update.
1443 1443 if update:
1444 1444 qp = self.qparents(repo, rev)
1445 1445 ctx = repo[qp]
1446 1446 m, a, r, d = repo.status(qp, '.')[:4]
1447 1447 if d:
1448 1448 raise util.Abort(_("deletions found between repo revs"))
1449 1449
1450 1450 tobackup = set(a + m + r) & tobackup
1451 1451 if keepchanges and tobackup:
1452 1452 raise util.Abort(_("local changes found, refresh first"))
1453 1453 self.backup(repo, tobackup)
1454 1454
1455 1455 for f in a:
1456 1456 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1457 1457 repo.dirstate.drop(f)
1458 1458 for f in m + r:
1459 1459 fctx = ctx[f]
1460 1460 repo.wwrite(f, fctx.data(), fctx.flags())
1461 1461 repo.dirstate.normal(f)
1462 1462 repo.setparents(qp, nullid)
1463 1463 for patch in reversed(self.applied[start:end]):
1464 1464 self.ui.status(_("popping %s\n") % patch.name)
1465 1465 del self.applied[start:end]
1466 1466 strip(self.ui, repo, [rev], update=False, backup=False)
1467 1467 for s, state in repo['.'].substate.items():
1468 1468 repo['.'].sub(s).get(state)
1469 1469 if self.applied:
1470 1470 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1471 1471 else:
1472 1472 self.ui.write(_("patch queue now empty\n"))
1473 1473 finally:
1474 1474 wlock.release()
1475 1475
1476 1476 def diff(self, repo, pats, opts):
1477 1477 top, patch = self.checktoppatch(repo)
1478 1478 if not top:
1479 1479 self.ui.write(_("no patches applied\n"))
1480 1480 return
1481 1481 qp = self.qparents(repo, top)
1482 1482 if opts.get('reverse'):
1483 1483 node1, node2 = None, qp
1484 1484 else:
1485 1485 node1, node2 = qp, None
1486 1486 diffopts = self.diffopts(opts, patch)
1487 1487 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1488 1488
1489 1489 def refresh(self, repo, pats=None, **opts):
1490 1490 if not self.applied:
1491 1491 self.ui.write(_("no patches applied\n"))
1492 1492 return 1
1493 1493 msg = opts.get('msg', '').rstrip()
1494 1494 edit = opts.get('edit')
1495 1495 editform = opts.get('editform', 'mq.qrefresh')
1496 1496 newuser = opts.get('user')
1497 1497 newdate = opts.get('date')
1498 1498 if newdate:
1499 1499 newdate = '%d %d' % util.parsedate(newdate)
1500 1500 wlock = repo.wlock()
1501 1501
1502 1502 try:
1503 1503 self.checktoppatch(repo)
1504 1504 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1505 1505 if repo.changelog.heads(top) != [top]:
1506 1506 raise util.Abort(_("cannot refresh a revision with children"))
1507 1507 if not repo[top].mutable():
1508 1508 raise util.Abort(_("cannot refresh immutable revision"),
1509 1509 hint=_('see "hg help phases" for details'))
1510 1510
1511 1511 cparents = repo.changelog.parents(top)
1512 1512 patchparent = self.qparents(repo, top)
1513 1513
1514 1514 inclsubs = checksubstate(repo, hex(patchparent))
1515 1515 if inclsubs:
1516 1516 substatestate = repo.dirstate['.hgsubstate']
1517 1517
1518 1518 ph = patchheader(self.join(patchfn), self.plainmode)
1519 1519 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1520 1520 if newuser:
1521 1521 ph.setuser(newuser)
1522 1522 if newdate:
1523 1523 ph.setdate(newdate)
1524 1524 ph.setparent(hex(patchparent))
1525 1525
1526 1526 # only commit new patch when write is complete
1527 1527 patchf = self.opener(patchfn, 'w', atomictemp=True)
1528 1528
1529 1529 # update the dirstate in place, strip off the qtip commit
1530 1530 # and then commit.
1531 1531 #
1532 1532 # this should really read:
1533 1533 # mm, dd, aa = repo.status(top, patchparent)[:3]
1534 1534 # but we do it backwards to take advantage of manifest/changelog
1535 1535 # caching against the next repo.status call
1536 1536 mm, aa, dd = repo.status(patchparent, top)[:3]
1537 1537 changes = repo.changelog.read(top)
1538 1538 man = repo.manifest.read(changes[0])
1539 1539 aaa = aa[:]
1540 1540 matchfn = scmutil.match(repo[None], pats, opts)
1541 1541 # in short mode, we only diff the files included in the
1542 1542 # patch already plus specified files
1543 1543 if opts.get('short'):
1544 1544 # if amending a patch, we start with existing
1545 1545 # files plus specified files - unfiltered
1546 1546 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1547 1547 # filter with include/exclude options
1548 1548 matchfn = scmutil.match(repo[None], opts=opts)
1549 1549 else:
1550 1550 match = scmutil.matchall(repo)
1551 1551 m, a, r, d = repo.status(match=match)[:4]
1552 1552 mm = set(mm)
1553 1553 aa = set(aa)
1554 1554 dd = set(dd)
1555 1555
1556 1556 # we might end up with files that were added between
1557 1557 # qtip and the dirstate parent, but then changed in the
1558 1558 # local dirstate. in this case, we want them to only
1559 1559 # show up in the added section
1560 1560 for x in m:
1561 1561 if x not in aa:
1562 1562 mm.add(x)
1563 1563 # we might end up with files added by the local dirstate that
1564 1564 # were deleted by the patch. In this case, they should only
1565 1565 # show up in the changed section.
1566 1566 for x in a:
1567 1567 if x in dd:
1568 1568 dd.remove(x)
1569 1569 mm.add(x)
1570 1570 else:
1571 1571 aa.add(x)
1572 1572 # make sure any files deleted in the local dirstate
1573 1573 # are not in the add or change column of the patch
1574 1574 forget = []
1575 1575 for x in d + r:
1576 1576 if x in aa:
1577 1577 aa.remove(x)
1578 1578 forget.append(x)
1579 1579 continue
1580 1580 else:
1581 1581 mm.discard(x)
1582 1582 dd.add(x)
1583 1583
1584 1584 m = list(mm)
1585 1585 r = list(dd)
1586 1586 a = list(aa)
1587 1587
1588 1588 # create 'match' that includes the files to be recommitted.
1589 1589 # apply matchfn via repo.status to ensure correct case handling.
1590 1590 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1591 1591 allmatches = set(cm + ca + cr + cd)
1592 1592 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1593 1593
1594 1594 files = set(inclsubs)
1595 1595 for x in refreshchanges:
1596 1596 files.update(x)
1597 1597 match = scmutil.matchfiles(repo, files)
1598 1598
1599 1599 bmlist = repo[top].bookmarks()
1600 1600
1601 1601 try:
1602 1602 if diffopts.git or diffopts.upgrade:
1603 1603 copies = {}
1604 1604 for dst in a:
1605 1605 src = repo.dirstate.copied(dst)
1606 1606 # during qfold, the source file for copies may
1607 1607 # be removed. Treat this as a simple add.
1608 1608 if src is not None and src in repo.dirstate:
1609 1609 copies.setdefault(src, []).append(dst)
1610 1610 repo.dirstate.add(dst)
1611 1611 # remember the copies between patchparent and qtip
1612 1612 for dst in aaa:
1613 1613 f = repo.file(dst)
1614 1614 src = f.renamed(man[dst])
1615 1615 if src:
1616 1616 copies.setdefault(src[0], []).extend(
1617 1617 copies.get(dst, []))
1618 1618 if dst in a:
1619 1619 copies[src[0]].append(dst)
1620 1620 # we can't copy a file created by the patch itself
1621 1621 if dst in copies:
1622 1622 del copies[dst]
1623 1623 for src, dsts in copies.iteritems():
1624 1624 for dst in dsts:
1625 1625 repo.dirstate.copy(src, dst)
1626 1626 else:
1627 1627 for dst in a:
1628 1628 repo.dirstate.add(dst)
1629 1629 # Drop useless copy information
1630 1630 for f in list(repo.dirstate.copies()):
1631 1631 repo.dirstate.copy(None, f)
1632 1632 for f in r:
1633 1633 repo.dirstate.remove(f)
1634 1634 # if the patch excludes a modified file, mark that
1635 1635 # file with mtime=0 so status can see it.
1636 1636 mm = []
1637 1637 for i in xrange(len(m) - 1, -1, -1):
1638 1638 if not matchfn(m[i]):
1639 1639 mm.append(m[i])
1640 1640 del m[i]
1641 1641 for f in m:
1642 1642 repo.dirstate.normal(f)
1643 1643 for f in mm:
1644 1644 repo.dirstate.normallookup(f)
1645 1645 for f in forget:
1646 1646 repo.dirstate.drop(f)
1647 1647
1648 1648 user = ph.user or changes[1]
1649 1649
1650 1650 oldphase = repo[top].phase()
1651 1651
1652 1652 # assumes strip can roll itself back if interrupted
1653 1653 repo.setparents(*cparents)
1654 1654 self.applied.pop()
1655 1655 self.applieddirty = True
1656 1656 strip(self.ui, repo, [top], update=False, backup=False)
1657 1657 except: # re-raises
1658 1658 repo.dirstate.invalidate()
1659 1659 raise
1660 1660
1661 1661 try:
1662 1662 # might be nice to attempt to roll back strip after this
1663 1663
1664 1664 defaultmsg = "[mq]: %s" % patchfn
1665 1665 editor = cmdutil.getcommiteditor(editform=editform)
1666 1666 if edit:
1667 1667 def finishdesc(desc):
1668 1668 if desc.rstrip():
1669 1669 ph.setmessage(desc)
1670 1670 return desc
1671 1671 return defaultmsg
1672 1672 # i18n: this message is shown in editor with "HG: " prefix
1673 1673 extramsg = _('Leave message empty to use default message.')
1674 1674 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1675 1675 extramsg=extramsg,
1676 1676 editform=editform)
1677 1677 message = msg or "\n".join(ph.message)
1678 1678 elif not msg:
1679 1679 if not ph.message:
1680 1680 message = defaultmsg
1681 1681 else:
1682 1682 message = "\n".join(ph.message)
1683 1683 else:
1684 1684 message = msg
1685 1685 ph.setmessage(msg)
1686 1686
1687 1687 # Ensure we create a new changeset in the same phase than
1688 1688 # the old one.
1689 1689 n = newcommit(repo, oldphase, message, user, ph.date,
1690 1690 match=match, force=True, editor=editor)
1691 1691 # only write patch after a successful commit
1692 1692 c = [list(x) for x in refreshchanges]
1693 1693 if inclsubs:
1694 1694 self.putsubstate2changes(substatestate, c)
1695 1695 chunks = patchmod.diff(repo, patchparent,
1696 1696 changes=c, opts=diffopts)
1697 1697 comments = str(ph)
1698 1698 if comments:
1699 1699 patchf.write(comments)
1700 1700 for chunk in chunks:
1701 1701 patchf.write(chunk)
1702 1702 patchf.close()
1703 1703
1704 1704 marks = repo._bookmarks
1705 1705 for bm in bmlist:
1706 1706 marks[bm] = n
1707 1707 marks.write()
1708 1708
1709 1709 self.applied.append(statusentry(n, patchfn))
1710 1710 except: # re-raises
1711 1711 ctx = repo[cparents[0]]
1712 1712 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1713 1713 self.savedirty()
1714 1714 self.ui.warn(_('refresh interrupted while patch was popped! '
1715 1715 '(revert --all, qpush to recover)\n'))
1716 1716 raise
1717 1717 finally:
1718 1718 wlock.release()
1719 1719 self.removeundo(repo)
1720 1720
1721 1721 def init(self, repo, create=False):
1722 1722 if not create and os.path.isdir(self.path):
1723 1723 raise util.Abort(_("patch queue directory already exists"))
1724 1724 try:
1725 1725 os.mkdir(self.path)
1726 1726 except OSError, inst:
1727 1727 if inst.errno != errno.EEXIST or not create:
1728 1728 raise
1729 1729 if create:
1730 1730 return self.qrepo(create=True)
1731 1731
1732 1732 def unapplied(self, repo, patch=None):
1733 1733 if patch and patch not in self.series:
1734 1734 raise util.Abort(_("patch %s is not in series file") % patch)
1735 1735 if not patch:
1736 1736 start = self.seriesend()
1737 1737 else:
1738 1738 start = self.series.index(patch) + 1
1739 1739 unapplied = []
1740 1740 for i in xrange(start, len(self.series)):
1741 1741 pushable, reason = self.pushable(i)
1742 1742 if pushable:
1743 1743 unapplied.append((i, self.series[i]))
1744 1744 self.explainpushable(i)
1745 1745 return unapplied
1746 1746
1747 1747 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1748 1748 summary=False):
1749 1749 def displayname(pfx, patchname, state):
1750 1750 if pfx:
1751 1751 self.ui.write(pfx)
1752 1752 if summary:
1753 1753 ph = patchheader(self.join(patchname), self.plainmode)
1754 1754 msg = ph.message and ph.message[0] or ''
1755 1755 if self.ui.formatted():
1756 1756 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1757 1757 if width > 0:
1758 1758 msg = util.ellipsis(msg, width)
1759 1759 else:
1760 1760 msg = ''
1761 1761 self.ui.write(patchname, label='qseries.' + state)
1762 1762 self.ui.write(': ')
1763 1763 self.ui.write(msg, label='qseries.message.' + state)
1764 1764 else:
1765 1765 self.ui.write(patchname, label='qseries.' + state)
1766 1766 self.ui.write('\n')
1767 1767
1768 1768 applied = set([p.name for p in self.applied])
1769 1769 if length is None:
1770 1770 length = len(self.series) - start
1771 1771 if not missing:
1772 1772 if self.ui.verbose:
1773 1773 idxwidth = len(str(start + length - 1))
1774 1774 for i in xrange(start, start + length):
1775 1775 patch = self.series[i]
1776 1776 if patch in applied:
1777 1777 char, state = 'A', 'applied'
1778 1778 elif self.pushable(i)[0]:
1779 1779 char, state = 'U', 'unapplied'
1780 1780 else:
1781 1781 char, state = 'G', 'guarded'
1782 1782 pfx = ''
1783 1783 if self.ui.verbose:
1784 1784 pfx = '%*d %s ' % (idxwidth, i, char)
1785 1785 elif status and status != char:
1786 1786 continue
1787 1787 displayname(pfx, patch, state)
1788 1788 else:
1789 1789 msng_list = []
1790 1790 for root, dirs, files in os.walk(self.path):
1791 1791 d = root[len(self.path) + 1:]
1792 1792 for f in files:
1793 1793 fl = os.path.join(d, f)
1794 1794 if (fl not in self.series and
1795 1795 fl not in (self.statuspath, self.seriespath,
1796 1796 self.guardspath)
1797 1797 and not fl.startswith('.')):
1798 1798 msng_list.append(fl)
1799 1799 for x in sorted(msng_list):
1800 1800 pfx = self.ui.verbose and ('D ') or ''
1801 1801 displayname(pfx, x, 'missing')
1802 1802
1803 1803 def issaveline(self, l):
1804 1804 if l.name == '.hg.patches.save.line':
1805 1805 return True
1806 1806
1807 1807 def qrepo(self, create=False):
1808 1808 ui = self.baseui.copy()
1809 1809 if create or os.path.isdir(self.join(".hg")):
1810 1810 return hg.repository(ui, path=self.path, create=create)
1811 1811
1812 1812 def restore(self, repo, rev, delete=None, qupdate=None):
1813 1813 desc = repo[rev].description().strip()
1814 1814 lines = desc.splitlines()
1815 1815 i = 0
1816 1816 datastart = None
1817 1817 series = []
1818 1818 applied = []
1819 1819 qpp = None
1820 1820 for i, line in enumerate(lines):
1821 1821 if line == 'Patch Data:':
1822 1822 datastart = i + 1
1823 1823 elif line.startswith('Dirstate:'):
1824 1824 l = line.rstrip()
1825 1825 l = l[10:].split(' ')
1826 1826 qpp = [bin(x) for x in l]
1827 1827 elif datastart is not None:
1828 1828 l = line.rstrip()
1829 1829 n, name = l.split(':', 1)
1830 1830 if n:
1831 1831 applied.append(statusentry(bin(n), name))
1832 1832 else:
1833 1833 series.append(l)
1834 1834 if datastart is None:
1835 1835 self.ui.warn(_("no saved patch data found\n"))
1836 1836 return 1
1837 1837 self.ui.warn(_("restoring status: %s\n") % lines[0])
1838 1838 self.fullseries = series
1839 1839 self.applied = applied
1840 1840 self.parseseries()
1841 1841 self.seriesdirty = True
1842 1842 self.applieddirty = True
1843 1843 heads = repo.changelog.heads()
1844 1844 if delete:
1845 1845 if rev not in heads:
1846 1846 self.ui.warn(_("save entry has children, leaving it alone\n"))
1847 1847 else:
1848 1848 self.ui.warn(_("removing save entry %s\n") % short(rev))
1849 1849 pp = repo.dirstate.parents()
1850 1850 if rev in pp:
1851 1851 update = True
1852 1852 else:
1853 1853 update = False
1854 1854 strip(self.ui, repo, [rev], update=update, backup=False)
1855 1855 if qpp:
1856 1856 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1857 1857 (short(qpp[0]), short(qpp[1])))
1858 1858 if qupdate:
1859 1859 self.ui.status(_("updating queue directory\n"))
1860 1860 r = self.qrepo()
1861 1861 if not r:
1862 1862 self.ui.warn(_("unable to load queue repository\n"))
1863 1863 return 1
1864 1864 hg.clean(r, qpp[0])
1865 1865
1866 1866 def save(self, repo, msg=None):
1867 1867 if not self.applied:
1868 1868 self.ui.warn(_("save: no patches applied, exiting\n"))
1869 1869 return 1
1870 1870 if self.issaveline(self.applied[-1]):
1871 1871 self.ui.warn(_("status is already saved\n"))
1872 1872 return 1
1873 1873
1874 1874 if not msg:
1875 1875 msg = _("hg patches saved state")
1876 1876 else:
1877 1877 msg = "hg patches: " + msg.rstrip('\r\n')
1878 1878 r = self.qrepo()
1879 1879 if r:
1880 1880 pp = r.dirstate.parents()
1881 1881 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1882 1882 msg += "\n\nPatch Data:\n"
1883 1883 msg += ''.join('%s\n' % x for x in self.applied)
1884 1884 msg += ''.join(':%s\n' % x for x in self.fullseries)
1885 1885 n = repo.commit(msg, force=True)
1886 1886 if not n:
1887 1887 self.ui.warn(_("repo commit failed\n"))
1888 1888 return 1
1889 1889 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1890 1890 self.applieddirty = True
1891 1891 self.removeundo(repo)
1892 1892
1893 1893 def fullseriesend(self):
1894 1894 if self.applied:
1895 1895 p = self.applied[-1].name
1896 1896 end = self.findseries(p)
1897 1897 if end is None:
1898 1898 return len(self.fullseries)
1899 1899 return end + 1
1900 1900 return 0
1901 1901
1902 1902 def seriesend(self, all_patches=False):
1903 1903 """If all_patches is False, return the index of the next pushable patch
1904 1904 in the series, or the series length. If all_patches is True, return the
1905 1905 index of the first patch past the last applied one.
1906 1906 """
1907 1907 end = 0
1908 1908 def nextpatch(start):
1909 1909 if all_patches or start >= len(self.series):
1910 1910 return start
1911 1911 for i in xrange(start, len(self.series)):
1912 1912 p, reason = self.pushable(i)
1913 1913 if p:
1914 1914 return i
1915 1915 self.explainpushable(i)
1916 1916 return len(self.series)
1917 1917 if self.applied:
1918 1918 p = self.applied[-1].name
1919 1919 try:
1920 1920 end = self.series.index(p)
1921 1921 except ValueError:
1922 1922 return 0
1923 1923 return nextpatch(end + 1)
1924 1924 return nextpatch(end)
1925 1925
1926 1926 def appliedname(self, index):
1927 1927 pname = self.applied[index].name
1928 1928 if not self.ui.verbose:
1929 1929 p = pname
1930 1930 else:
1931 1931 p = str(self.series.index(pname)) + " " + pname
1932 1932 return p
1933 1933
1934 1934 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1935 1935 force=None, git=False):
1936 1936 def checkseries(patchname):
1937 1937 if patchname in self.series:
1938 1938 raise util.Abort(_('patch %s is already in the series file')
1939 1939 % patchname)
1940 1940
1941 1941 if rev:
1942 1942 if files:
1943 1943 raise util.Abort(_('option "-r" not valid when importing '
1944 1944 'files'))
1945 1945 rev = scmutil.revrange(repo, rev)
1946 1946 rev.sort(reverse=True)
1947 1947 elif not files:
1948 1948 raise util.Abort(_('no files or revisions specified'))
1949 1949 if (len(files) > 1 or len(rev) > 1) and patchname:
1950 1950 raise util.Abort(_('option "-n" not valid when importing multiple '
1951 1951 'patches'))
1952 1952 imported = []
1953 1953 if rev:
1954 1954 # If mq patches are applied, we can only import revisions
1955 1955 # that form a linear path to qbase.
1956 1956 # Otherwise, they should form a linear path to a head.
1957 1957 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1958 1958 if len(heads) > 1:
1959 1959 raise util.Abort(_('revision %d is the root of more than one '
1960 1960 'branch') % rev[-1])
1961 1961 if self.applied:
1962 1962 base = repo.changelog.node(rev[0])
1963 1963 if base in [n.node for n in self.applied]:
1964 1964 raise util.Abort(_('revision %d is already managed')
1965 1965 % rev[0])
1966 1966 if heads != [self.applied[-1].node]:
1967 1967 raise util.Abort(_('revision %d is not the parent of '
1968 1968 'the queue') % rev[0])
1969 1969 base = repo.changelog.rev(self.applied[0].node)
1970 1970 lastparent = repo.changelog.parentrevs(base)[0]
1971 1971 else:
1972 1972 if heads != [repo.changelog.node(rev[0])]:
1973 1973 raise util.Abort(_('revision %d has unmanaged children')
1974 1974 % rev[0])
1975 1975 lastparent = None
1976 1976
1977 1977 diffopts = self.diffopts({'git': git})
1978 1978 tr = repo.transaction('qimport')
1979 1979 try:
1980 1980 for r in rev:
1981 1981 if not repo[r].mutable():
1982 1982 raise util.Abort(_('revision %d is not mutable') % r,
1983 1983 hint=_('see "hg help phases" '
1984 1984 'for details'))
1985 1985 p1, p2 = repo.changelog.parentrevs(r)
1986 1986 n = repo.changelog.node(r)
1987 1987 if p2 != nullrev:
1988 1988 raise util.Abort(_('cannot import merge revision %d')
1989 1989 % r)
1990 1990 if lastparent and lastparent != r:
1991 1991 raise util.Abort(_('revision %d is not the parent of '
1992 1992 '%d')
1993 1993 % (r, lastparent))
1994 1994 lastparent = p1
1995 1995
1996 1996 if not patchname:
1997 1997 patchname = normname('%d.diff' % r)
1998 1998 checkseries(patchname)
1999 1999 self.checkpatchname(patchname, force)
2000 2000 self.fullseries.insert(0, patchname)
2001 2001
2002 2002 patchf = self.opener(patchname, "w")
2003 2003 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
2004 2004 patchf.close()
2005 2005
2006 2006 se = statusentry(n, patchname)
2007 2007 self.applied.insert(0, se)
2008 2008
2009 2009 self.added.append(patchname)
2010 2010 imported.append(patchname)
2011 2011 patchname = None
2012 2012 if rev and repo.ui.configbool('mq', 'secret', False):
2013 2013 # if we added anything with --rev, move the secret root
2014 2014 phases.retractboundary(repo, phases.secret, [n])
2015 2015 self.parseseries()
2016 2016 self.applieddirty = True
2017 2017 self.seriesdirty = True
2018 2018 tr.close()
2019 2019 finally:
2020 2020 tr.release()
2021 2021
2022 2022 for i, filename in enumerate(files):
2023 2023 if existing:
2024 2024 if filename == '-':
2025 2025 raise util.Abort(_('-e is incompatible with import from -'))
2026 2026 filename = normname(filename)
2027 2027 self.checkreservedname(filename)
2028 2028 if util.url(filename).islocal():
2029 2029 originpath = self.join(filename)
2030 2030 if not os.path.isfile(originpath):
2031 2031 raise util.Abort(
2032 2032 _("patch %s does not exist") % filename)
2033 2033
2034 2034 if patchname:
2035 2035 self.checkpatchname(patchname, force)
2036 2036
2037 2037 self.ui.write(_('renaming %s to %s\n')
2038 2038 % (filename, patchname))
2039 2039 util.rename(originpath, self.join(patchname))
2040 2040 else:
2041 2041 patchname = filename
2042 2042
2043 2043 else:
2044 2044 if filename == '-' and not patchname:
2045 2045 raise util.Abort(_('need --name to import a patch from -'))
2046 2046 elif not patchname:
2047 2047 patchname = normname(os.path.basename(filename.rstrip('/')))
2048 2048 self.checkpatchname(patchname, force)
2049 2049 try:
2050 2050 if filename == '-':
2051 2051 text = self.ui.fin.read()
2052 2052 else:
2053 2053 fp = hg.openpath(self.ui, filename)
2054 2054 text = fp.read()
2055 2055 fp.close()
2056 2056 except (OSError, IOError):
2057 2057 raise util.Abort(_("unable to read file %s") % filename)
2058 2058 patchf = self.opener(patchname, "w")
2059 2059 patchf.write(text)
2060 2060 patchf.close()
2061 2061 if not force:
2062 2062 checkseries(patchname)
2063 2063 if patchname not in self.series:
2064 2064 index = self.fullseriesend() + i
2065 2065 self.fullseries[index:index] = [patchname]
2066 2066 self.parseseries()
2067 2067 self.seriesdirty = True
2068 2068 self.ui.warn(_("adding %s to series file\n") % patchname)
2069 2069 self.added.append(patchname)
2070 2070 imported.append(patchname)
2071 2071 patchname = None
2072 2072
2073 2073 self.removeundo(repo)
2074 2074 return imported
2075 2075
2076 2076 def fixkeepchangesopts(ui, opts):
2077 2077 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2078 2078 or opts.get('exact')):
2079 2079 return opts
2080 2080 opts = dict(opts)
2081 2081 opts['keep_changes'] = True
2082 2082 return opts
2083 2083
2084 2084 @command("qdelete|qremove|qrm",
2085 2085 [('k', 'keep', None, _('keep patch file')),
2086 2086 ('r', 'rev', [],
2087 2087 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2088 2088 _('hg qdelete [-k] [PATCH]...'))
2089 2089 def delete(ui, repo, *patches, **opts):
2090 2090 """remove patches from queue
2091 2091
2092 2092 The patches must not be applied, and at least one patch is required. Exact
2093 2093 patch identifiers must be given. With -k/--keep, the patch files are
2094 2094 preserved in the patch directory.
2095 2095
2096 2096 To stop managing a patch and move it into permanent history,
2097 2097 use the :hg:`qfinish` command."""
2098 2098 q = repo.mq
2099 2099 q.delete(repo, patches, opts)
2100 2100 q.savedirty()
2101 2101 return 0
2102 2102
2103 2103 @command("qapplied",
2104 2104 [('1', 'last', None, _('show only the preceding applied patch'))
2105 2105 ] + seriesopts,
2106 2106 _('hg qapplied [-1] [-s] [PATCH]'))
2107 2107 def applied(ui, repo, patch=None, **opts):
2108 2108 """print the patches already applied
2109 2109
2110 2110 Returns 0 on success."""
2111 2111
2112 2112 q = repo.mq
2113 2113
2114 2114 if patch:
2115 2115 if patch not in q.series:
2116 2116 raise util.Abort(_("patch %s is not in series file") % patch)
2117 2117 end = q.series.index(patch) + 1
2118 2118 else:
2119 2119 end = q.seriesend(True)
2120 2120
2121 2121 if opts.get('last') and not end:
2122 2122 ui.write(_("no patches applied\n"))
2123 2123 return 1
2124 2124 elif opts.get('last') and end == 1:
2125 2125 ui.write(_("only one patch applied\n"))
2126 2126 return 1
2127 2127 elif opts.get('last'):
2128 2128 start = end - 2
2129 2129 end = 1
2130 2130 else:
2131 2131 start = 0
2132 2132
2133 2133 q.qseries(repo, length=end, start=start, status='A',
2134 2134 summary=opts.get('summary'))
2135 2135
2136 2136
2137 2137 @command("qunapplied",
2138 2138 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2139 2139 _('hg qunapplied [-1] [-s] [PATCH]'))
2140 2140 def unapplied(ui, repo, patch=None, **opts):
2141 2141 """print the patches not yet applied
2142 2142
2143 2143 Returns 0 on success."""
2144 2144
2145 2145 q = repo.mq
2146 2146 if patch:
2147 2147 if patch not in q.series:
2148 2148 raise util.Abort(_("patch %s is not in series file") % patch)
2149 2149 start = q.series.index(patch) + 1
2150 2150 else:
2151 2151 start = q.seriesend(True)
2152 2152
2153 2153 if start == len(q.series) and opts.get('first'):
2154 2154 ui.write(_("all patches applied\n"))
2155 2155 return 1
2156 2156
2157 2157 length = opts.get('first') and 1 or None
2158 2158 q.qseries(repo, start=start, length=length, status='U',
2159 2159 summary=opts.get('summary'))
2160 2160
2161 2161 @command("qimport",
2162 2162 [('e', 'existing', None, _('import file in patch directory')),
2163 2163 ('n', 'name', '',
2164 2164 _('name of patch file'), _('NAME')),
2165 2165 ('f', 'force', None, _('overwrite existing files')),
2166 2166 ('r', 'rev', [],
2167 2167 _('place existing revisions under mq control'), _('REV')),
2168 2168 ('g', 'git', None, _('use git extended diff format')),
2169 2169 ('P', 'push', None, _('qpush after importing'))],
2170 2170 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2171 2171 def qimport(ui, repo, *filename, **opts):
2172 2172 """import a patch or existing changeset
2173 2173
2174 2174 The patch is inserted into the series after the last applied
2175 2175 patch. If no patches have been applied, qimport prepends the patch
2176 2176 to the series.
2177 2177
2178 2178 The patch will have the same name as its source file unless you
2179 2179 give it a new one with -n/--name.
2180 2180
2181 2181 You can register an existing patch inside the patch directory with
2182 2182 the -e/--existing flag.
2183 2183
2184 2184 With -f/--force, an existing patch of the same name will be
2185 2185 overwritten.
2186 2186
2187 2187 An existing changeset may be placed under mq control with -r/--rev
2188 2188 (e.g. qimport --rev . -n patch will place the current revision
2189 2189 under mq control). With -g/--git, patches imported with --rev will
2190 2190 use the git diff format. See the diffs help topic for information
2191 2191 on why this is important for preserving rename/copy information
2192 2192 and permission changes. Use :hg:`qfinish` to remove changesets
2193 2193 from mq control.
2194 2194
2195 2195 To import a patch from standard input, pass - as the patch file.
2196 2196 When importing from standard input, a patch name must be specified
2197 2197 using the --name flag.
2198 2198
2199 2199 To import an existing patch while renaming it::
2200 2200
2201 2201 hg qimport -e existing-patch -n new-name
2202 2202
2203 2203 Returns 0 if import succeeded.
2204 2204 """
2205 2205 lock = repo.lock() # cause this may move phase
2206 2206 try:
2207 2207 q = repo.mq
2208 2208 try:
2209 2209 imported = q.qimport(
2210 2210 repo, filename, patchname=opts.get('name'),
2211 2211 existing=opts.get('existing'), force=opts.get('force'),
2212 2212 rev=opts.get('rev'), git=opts.get('git'))
2213 2213 finally:
2214 2214 q.savedirty()
2215 2215 finally:
2216 2216 lock.release()
2217 2217
2218 2218 if imported and opts.get('push') and not opts.get('rev'):
2219 2219 return q.push(repo, imported[-1])
2220 2220 return 0
2221 2221
2222 2222 def qinit(ui, repo, create):
2223 2223 """initialize a new queue repository
2224 2224
2225 2225 This command also creates a series file for ordering patches, and
2226 2226 an mq-specific .hgignore file in the queue repository, to exclude
2227 2227 the status and guards files (these contain mostly transient state).
2228 2228
2229 2229 Returns 0 if initialization succeeded."""
2230 2230 q = repo.mq
2231 2231 r = q.init(repo, create)
2232 2232 q.savedirty()
2233 2233 if r:
2234 2234 if not os.path.exists(r.wjoin('.hgignore')):
2235 2235 fp = r.wopener('.hgignore', 'w')
2236 2236 fp.write('^\\.hg\n')
2237 2237 fp.write('^\\.mq\n')
2238 2238 fp.write('syntax: glob\n')
2239 2239 fp.write('status\n')
2240 2240 fp.write('guards\n')
2241 2241 fp.close()
2242 2242 if not os.path.exists(r.wjoin('series')):
2243 2243 r.wopener('series', 'w').close()
2244 2244 r[None].add(['.hgignore', 'series'])
2245 2245 commands.add(ui, r)
2246 2246 return 0
2247 2247
2248 2248 @command("^qinit",
2249 2249 [('c', 'create-repo', None, _('create queue repository'))],
2250 2250 _('hg qinit [-c]'))
2251 2251 def init(ui, repo, **opts):
2252 2252 """init a new queue repository (DEPRECATED)
2253 2253
2254 2254 The queue repository is unversioned by default. If
2255 2255 -c/--create-repo is specified, qinit will create a separate nested
2256 2256 repository for patches (qinit -c may also be run later to convert
2257 2257 an unversioned patch repository into a versioned one). You can use
2258 2258 qcommit to commit changes to this queue repository.
2259 2259
2260 2260 This command is deprecated. Without -c, it's implied by other relevant
2261 2261 commands. With -c, use :hg:`init --mq` instead."""
2262 2262 return qinit(ui, repo, create=opts.get('create_repo'))
2263 2263
2264 2264 @command("qclone",
2265 2265 [('', 'pull', None, _('use pull protocol to copy metadata')),
2266 2266 ('U', 'noupdate', None,
2267 2267 _('do not update the new working directories')),
2268 2268 ('', 'uncompressed', None,
2269 2269 _('use uncompressed transfer (fast over LAN)')),
2270 2270 ('p', 'patches', '',
2271 2271 _('location of source patch repository'), _('REPO')),
2272 2272 ] + commands.remoteopts,
2273 2273 _('hg qclone [OPTION]... SOURCE [DEST]'),
2274 2274 norepo=True)
2275 2275 def clone(ui, source, dest=None, **opts):
2276 2276 '''clone main and patch repository at same time
2277 2277
2278 2278 If source is local, destination will have no patches applied. If
2279 2279 source is remote, this command can not check if patches are
2280 2280 applied in source, so cannot guarantee that patches are not
2281 2281 applied in destination. If you clone remote repository, be sure
2282 2282 before that it has no patches applied.
2283 2283
2284 2284 Source patch repository is looked for in <src>/.hg/patches by
2285 2285 default. Use -p <url> to change.
2286 2286
2287 2287 The patch directory must be a nested Mercurial repository, as
2288 2288 would be created by :hg:`init --mq`.
2289 2289
2290 2290 Return 0 on success.
2291 2291 '''
2292 2292 def patchdir(repo):
2293 2293 """compute a patch repo url from a repo object"""
2294 2294 url = repo.url()
2295 2295 if url.endswith('/'):
2296 2296 url = url[:-1]
2297 2297 return url + '/.hg/patches'
2298 2298
2299 2299 # main repo (destination and sources)
2300 2300 if dest is None:
2301 2301 dest = hg.defaultdest(source)
2302 2302 sr = hg.peer(ui, opts, ui.expandpath(source))
2303 2303
2304 2304 # patches repo (source only)
2305 2305 if opts.get('patches'):
2306 2306 patchespath = ui.expandpath(opts.get('patches'))
2307 2307 else:
2308 2308 patchespath = patchdir(sr)
2309 2309 try:
2310 2310 hg.peer(ui, opts, patchespath)
2311 2311 except error.RepoError:
2312 2312 raise util.Abort(_('versioned patch repository not found'
2313 2313 ' (see init --mq)'))
2314 2314 qbase, destrev = None, None
2315 2315 if sr.local():
2316 2316 repo = sr.local()
2317 2317 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2318 2318 qbase = repo.mq.applied[0].node
2319 2319 if not hg.islocal(dest):
2320 2320 heads = set(repo.heads())
2321 2321 destrev = list(heads.difference(repo.heads(qbase)))
2322 2322 destrev.append(repo.changelog.parents(qbase)[0])
2323 2323 elif sr.capable('lookup'):
2324 2324 try:
2325 2325 qbase = sr.lookup('qbase')
2326 2326 except error.RepoError:
2327 2327 pass
2328 2328
2329 2329 ui.note(_('cloning main repository\n'))
2330 2330 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2331 2331 pull=opts.get('pull'),
2332 2332 rev=destrev,
2333 2333 update=False,
2334 2334 stream=opts.get('uncompressed'))
2335 2335
2336 2336 ui.note(_('cloning patch repository\n'))
2337 2337 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2338 2338 pull=opts.get('pull'), update=not opts.get('noupdate'),
2339 2339 stream=opts.get('uncompressed'))
2340 2340
2341 2341 if dr.local():
2342 2342 repo = dr.local()
2343 2343 if qbase:
2344 2344 ui.note(_('stripping applied patches from destination '
2345 2345 'repository\n'))
2346 2346 strip(ui, repo, [qbase], update=False, backup=None)
2347 2347 if not opts.get('noupdate'):
2348 2348 ui.note(_('updating destination repository\n'))
2349 2349 hg.update(repo, repo.changelog.tip())
2350 2350
2351 2351 @command("qcommit|qci",
2352 2352 commands.table["^commit|ci"][1],
2353 2353 _('hg qcommit [OPTION]... [FILE]...'),
2354 2354 inferrepo=True)
2355 2355 def commit(ui, repo, *pats, **opts):
2356 2356 """commit changes in the queue repository (DEPRECATED)
2357 2357
2358 2358 This command is deprecated; use :hg:`commit --mq` instead."""
2359 2359 q = repo.mq
2360 2360 r = q.qrepo()
2361 2361 if not r:
2362 2362 raise util.Abort('no queue repository')
2363 2363 commands.commit(r.ui, r, *pats, **opts)
2364 2364
2365 2365 @command("qseries",
2366 2366 [('m', 'missing', None, _('print patches not in series')),
2367 2367 ] + seriesopts,
2368 2368 _('hg qseries [-ms]'))
2369 2369 def series(ui, repo, **opts):
2370 2370 """print the entire series file
2371 2371
2372 2372 Returns 0 on success."""
2373 2373 repo.mq.qseries(repo, missing=opts.get('missing'),
2374 2374 summary=opts.get('summary'))
2375 2375 return 0
2376 2376
2377 2377 @command("qtop", seriesopts, _('hg qtop [-s]'))
2378 2378 def top(ui, repo, **opts):
2379 2379 """print the name of the current patch
2380 2380
2381 2381 Returns 0 on success."""
2382 2382 q = repo.mq
2383 2383 t = q.applied and q.seriesend(True) or 0
2384 2384 if t:
2385 2385 q.qseries(repo, start=t - 1, length=1, status='A',
2386 2386 summary=opts.get('summary'))
2387 2387 else:
2388 2388 ui.write(_("no patches applied\n"))
2389 2389 return 1
2390 2390
2391 2391 @command("qnext", seriesopts, _('hg qnext [-s]'))
2392 2392 def next(ui, repo, **opts):
2393 2393 """print the name of the next pushable patch
2394 2394
2395 2395 Returns 0 on success."""
2396 2396 q = repo.mq
2397 2397 end = q.seriesend()
2398 2398 if end == len(q.series):
2399 2399 ui.write(_("all patches applied\n"))
2400 2400 return 1
2401 2401 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2402 2402
2403 2403 @command("qprev", seriesopts, _('hg qprev [-s]'))
2404 2404 def prev(ui, repo, **opts):
2405 2405 """print the name of the preceding applied patch
2406 2406
2407 2407 Returns 0 on success."""
2408 2408 q = repo.mq
2409 2409 l = len(q.applied)
2410 2410 if l == 1:
2411 2411 ui.write(_("only one patch applied\n"))
2412 2412 return 1
2413 2413 if not l:
2414 2414 ui.write(_("no patches applied\n"))
2415 2415 return 1
2416 2416 idx = q.series.index(q.applied[-2].name)
2417 2417 q.qseries(repo, start=idx, length=1, status='A',
2418 2418 summary=opts.get('summary'))
2419 2419
2420 2420 def setupheaderopts(ui, opts):
2421 2421 if not opts.get('user') and opts.get('currentuser'):
2422 2422 opts['user'] = ui.username()
2423 2423 if not opts.get('date') and opts.get('currentdate'):
2424 2424 opts['date'] = "%d %d" % util.makedate()
2425 2425
2426 2426 @command("^qnew",
2427 2427 [('e', 'edit', None, _('invoke editor on commit messages')),
2428 2428 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2429 2429 ('g', 'git', None, _('use git extended diff format')),
2430 2430 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2431 2431 ('u', 'user', '',
2432 2432 _('add "From: <USER>" to patch'), _('USER')),
2433 2433 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2434 2434 ('d', 'date', '',
2435 2435 _('add "Date: <DATE>" to patch'), _('DATE'))
2436 2436 ] + commands.walkopts + commands.commitopts,
2437 2437 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2438 2438 inferrepo=True)
2439 2439 def new(ui, repo, patch, *args, **opts):
2440 2440 """create a new patch
2441 2441
2442 2442 qnew creates a new patch on top of the currently-applied patch (if
2443 2443 any). The patch will be initialized with any outstanding changes
2444 2444 in the working directory. You may also use -I/--include,
2445 2445 -X/--exclude, and/or a list of files after the patch name to add
2446 2446 only changes to matching files to the new patch, leaving the rest
2447 2447 as uncommitted modifications.
2448 2448
2449 2449 -u/--user and -d/--date can be used to set the (given) user and
2450 2450 date, respectively. -U/--currentuser and -D/--currentdate set user
2451 2451 to current user and date to current date.
2452 2452
2453 2453 -e/--edit, -m/--message or -l/--logfile set the patch header as
2454 2454 well as the commit message. If none is specified, the header is
2455 2455 empty and the commit message is '[mq]: PATCH'.
2456 2456
2457 2457 Use the -g/--git option to keep the patch in the git extended diff
2458 2458 format. Read the diffs help topic for more information on why this
2459 2459 is important for preserving permission changes and copy/rename
2460 2460 information.
2461 2461
2462 2462 Returns 0 on successful creation of a new patch.
2463 2463 """
2464 2464 msg = cmdutil.logmessage(ui, opts)
2465 2465 q = repo.mq
2466 2466 opts['msg'] = msg
2467 2467 setupheaderopts(ui, opts)
2468 2468 q.new(repo, patch, *args, **opts)
2469 2469 q.savedirty()
2470 2470 return 0
2471 2471
2472 2472 @command("^qrefresh",
2473 2473 [('e', 'edit', None, _('invoke editor on commit messages')),
2474 2474 ('g', 'git', None, _('use git extended diff format')),
2475 2475 ('s', 'short', None,
2476 2476 _('refresh only files already in the patch and specified files')),
2477 2477 ('U', 'currentuser', None,
2478 2478 _('add/update author field in patch with current user')),
2479 2479 ('u', 'user', '',
2480 2480 _('add/update author field in patch with given user'), _('USER')),
2481 2481 ('D', 'currentdate', None,
2482 2482 _('add/update date field in patch with current date')),
2483 2483 ('d', 'date', '',
2484 2484 _('add/update date field in patch with given date'), _('DATE'))
2485 2485 ] + commands.walkopts + commands.commitopts,
2486 2486 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2487 2487 inferrepo=True)
2488 2488 def refresh(ui, repo, *pats, **opts):
2489 2489 """update the current patch
2490 2490
2491 2491 If any file patterns are provided, the refreshed patch will
2492 2492 contain only the modifications that match those patterns; the
2493 2493 remaining modifications will remain in the working directory.
2494 2494
2495 2495 If -s/--short is specified, files currently included in the patch
2496 2496 will be refreshed just like matched files and remain in the patch.
2497 2497
2498 2498 If -e/--edit is specified, Mercurial will start your configured editor for
2499 2499 you to enter a message. In case qrefresh fails, you will find a backup of
2500 2500 your message in ``.hg/last-message.txt``.
2501 2501
2502 2502 hg add/remove/copy/rename work as usual, though you might want to
2503 2503 use git-style patches (-g/--git or [diff] git=1) to track copies
2504 2504 and renames. See the diffs help topic for more information on the
2505 2505 git diff format.
2506 2506
2507 2507 Returns 0 on success.
2508 2508 """
2509 2509 q = repo.mq
2510 2510 message = cmdutil.logmessage(ui, opts)
2511 2511 setupheaderopts(ui, opts)
2512 2512 wlock = repo.wlock()
2513 2513 try:
2514 2514 ret = q.refresh(repo, pats, msg=message, **opts)
2515 2515 q.savedirty()
2516 2516 return ret
2517 2517 finally:
2518 2518 wlock.release()
2519 2519
2520 2520 @command("^qdiff",
2521 2521 commands.diffopts + commands.diffopts2 + commands.walkopts,
2522 2522 _('hg qdiff [OPTION]... [FILE]...'),
2523 2523 inferrepo=True)
2524 2524 def diff(ui, repo, *pats, **opts):
2525 2525 """diff of the current patch and subsequent modifications
2526 2526
2527 2527 Shows a diff which includes the current patch as well as any
2528 2528 changes which have been made in the working directory since the
2529 2529 last refresh (thus showing what the current patch would become
2530 2530 after a qrefresh).
2531 2531
2532 2532 Use :hg:`diff` if you only want to see the changes made since the
2533 2533 last qrefresh, or :hg:`export qtip` if you want to see changes
2534 2534 made by the current patch without including changes made since the
2535 2535 qrefresh.
2536 2536
2537 2537 Returns 0 on success.
2538 2538 """
2539 2539 repo.mq.diff(repo, pats, opts)
2540 2540 return 0
2541 2541
2542 2542 @command('qfold',
2543 2543 [('e', 'edit', None, _('invoke editor on commit messages')),
2544 2544 ('k', 'keep', None, _('keep folded patch files')),
2545 2545 ] + commands.commitopts,
2546 2546 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2547 2547 def fold(ui, repo, *files, **opts):
2548 2548 """fold the named patches into the current patch
2549 2549
2550 2550 Patches must not yet be applied. Each patch will be successively
2551 2551 applied to the current patch in the order given. If all the
2552 2552 patches apply successfully, the current patch will be refreshed
2553 2553 with the new cumulative patch, and the folded patches will be
2554 2554 deleted. With -k/--keep, the folded patch files will not be
2555 2555 removed afterwards.
2556 2556
2557 2557 The header for each folded patch will be concatenated with the
2558 2558 current patch header, separated by a line of ``* * *``.
2559 2559
2560 2560 Returns 0 on success."""
2561 2561 q = repo.mq
2562 2562 if not files:
2563 2563 raise util.Abort(_('qfold requires at least one patch name'))
2564 2564 if not q.checktoppatch(repo)[0]:
2565 2565 raise util.Abort(_('no patches applied'))
2566 2566 q.checklocalchanges(repo)
2567 2567
2568 2568 message = cmdutil.logmessage(ui, opts)
2569 2569
2570 2570 parent = q.lookup('qtip')
2571 2571 patches = []
2572 2572 messages = []
2573 2573 for f in files:
2574 2574 p = q.lookup(f)
2575 2575 if p in patches or p == parent:
2576 2576 ui.warn(_('skipping already folded patch %s\n') % p)
2577 2577 if q.isapplied(p):
2578 2578 raise util.Abort(_('qfold cannot fold already applied patch %s')
2579 2579 % p)
2580 2580 patches.append(p)
2581 2581
2582 2582 for p in patches:
2583 2583 if not message:
2584 2584 ph = patchheader(q.join(p), q.plainmode)
2585 2585 if ph.message:
2586 2586 messages.append(ph.message)
2587 2587 pf = q.join(p)
2588 2588 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2589 2589 if not patchsuccess:
2590 2590 raise util.Abort(_('error folding patch %s') % p)
2591 2591
2592 2592 if not message:
2593 2593 ph = patchheader(q.join(parent), q.plainmode)
2594 2594 message = ph.message
2595 2595 for msg in messages:
2596 2596 if msg:
2597 2597 if message:
2598 2598 message.append('* * *')
2599 2599 message.extend(msg)
2600 2600 message = '\n'.join(message)
2601 2601
2602 2602 diffopts = q.patchopts(q.diffopts(), *patches)
2603 2603 wlock = repo.wlock()
2604 2604 try:
2605 2605 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2606 2606 editform='mq.qfold')
2607 2607 q.delete(repo, patches, opts)
2608 2608 q.savedirty()
2609 2609 finally:
2610 2610 wlock.release()
2611 2611
2612 2612 @command("qgoto",
2613 2613 [('', 'keep-changes', None,
2614 2614 _('tolerate non-conflicting local changes')),
2615 2615 ('f', 'force', None, _('overwrite any local changes')),
2616 2616 ('', 'no-backup', None, _('do not save backup copies of files'))],
2617 2617 _('hg qgoto [OPTION]... PATCH'))
2618 2618 def goto(ui, repo, patch, **opts):
2619 2619 '''push or pop patches until named patch is at top of stack
2620 2620
2621 2621 Returns 0 on success.'''
2622 2622 opts = fixkeepchangesopts(ui, opts)
2623 2623 q = repo.mq
2624 2624 patch = q.lookup(patch)
2625 2625 nobackup = opts.get('no_backup')
2626 2626 keepchanges = opts.get('keep_changes')
2627 2627 if q.isapplied(patch):
2628 2628 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2629 2629 keepchanges=keepchanges)
2630 2630 else:
2631 2631 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2632 2632 keepchanges=keepchanges)
2633 2633 q.savedirty()
2634 2634 return ret
2635 2635
2636 2636 @command("qguard",
2637 2637 [('l', 'list', None, _('list all patches and guards')),
2638 2638 ('n', 'none', None, _('drop all guards'))],
2639 2639 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2640 2640 def guard(ui, repo, *args, **opts):
2641 2641 '''set or print guards for a patch
2642 2642
2643 2643 Guards control whether a patch can be pushed. A patch with no
2644 2644 guards is always pushed. A patch with a positive guard ("+foo") is
2645 2645 pushed only if the :hg:`qselect` command has activated it. A patch with
2646 2646 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2647 2647 has activated it.
2648 2648
2649 2649 With no arguments, print the currently active guards.
2650 2650 With arguments, set guards for the named patch.
2651 2651
2652 2652 .. note::
2653 2653
2654 2654 Specifying negative guards now requires '--'.
2655 2655
2656 2656 To set guards on another patch::
2657 2657
2658 2658 hg qguard other.patch -- +2.6.17 -stable
2659 2659
2660 2660 Returns 0 on success.
2661 2661 '''
2662 2662 def status(idx):
2663 2663 guards = q.seriesguards[idx] or ['unguarded']
2664 2664 if q.series[idx] in applied:
2665 2665 state = 'applied'
2666 2666 elif q.pushable(idx)[0]:
2667 2667 state = 'unapplied'
2668 2668 else:
2669 2669 state = 'guarded'
2670 2670 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2671 2671 ui.write('%s: ' % ui.label(q.series[idx], label))
2672 2672
2673 2673 for i, guard in enumerate(guards):
2674 2674 if guard.startswith('+'):
2675 2675 ui.write(guard, label='qguard.positive')
2676 2676 elif guard.startswith('-'):
2677 2677 ui.write(guard, label='qguard.negative')
2678 2678 else:
2679 2679 ui.write(guard, label='qguard.unguarded')
2680 2680 if i != len(guards) - 1:
2681 2681 ui.write(' ')
2682 2682 ui.write('\n')
2683 2683 q = repo.mq
2684 2684 applied = set(p.name for p in q.applied)
2685 2685 patch = None
2686 2686 args = list(args)
2687 2687 if opts.get('list'):
2688 2688 if args or opts.get('none'):
2689 2689 raise util.Abort(_('cannot mix -l/--list with options or '
2690 2690 'arguments'))
2691 2691 for i in xrange(len(q.series)):
2692 2692 status(i)
2693 2693 return
2694 2694 if not args or args[0][0:1] in '-+':
2695 2695 if not q.applied:
2696 2696 raise util.Abort(_('no patches applied'))
2697 2697 patch = q.applied[-1].name
2698 2698 if patch is None and args[0][0:1] not in '-+':
2699 2699 patch = args.pop(0)
2700 2700 if patch is None:
2701 2701 raise util.Abort(_('no patch to work with'))
2702 2702 if args or opts.get('none'):
2703 2703 idx = q.findseries(patch)
2704 2704 if idx is None:
2705 2705 raise util.Abort(_('no patch named %s') % patch)
2706 2706 q.setguards(idx, args)
2707 2707 q.savedirty()
2708 2708 else:
2709 2709 status(q.series.index(q.lookup(patch)))
2710 2710
2711 2711 @command("qheader", [], _('hg qheader [PATCH]'))
2712 2712 def header(ui, repo, patch=None):
2713 2713 """print the header of the topmost or specified patch
2714 2714
2715 2715 Returns 0 on success."""
2716 2716 q = repo.mq
2717 2717
2718 2718 if patch:
2719 2719 patch = q.lookup(patch)
2720 2720 else:
2721 2721 if not q.applied:
2722 2722 ui.write(_('no patches applied\n'))
2723 2723 return 1
2724 2724 patch = q.lookup('qtip')
2725 2725 ph = patchheader(q.join(patch), q.plainmode)
2726 2726
2727 2727 ui.write('\n'.join(ph.message) + '\n')
2728 2728
2729 2729 def lastsavename(path):
2730 2730 (directory, base) = os.path.split(path)
2731 2731 names = os.listdir(directory)
2732 2732 namere = re.compile("%s.([0-9]+)" % base)
2733 2733 maxindex = None
2734 2734 maxname = None
2735 2735 for f in names:
2736 2736 m = namere.match(f)
2737 2737 if m:
2738 2738 index = int(m.group(1))
2739 2739 if maxindex is None or index > maxindex:
2740 2740 maxindex = index
2741 2741 maxname = f
2742 2742 if maxname:
2743 2743 return (os.path.join(directory, maxname), maxindex)
2744 2744 return (None, None)
2745 2745
2746 2746 def savename(path):
2747 2747 (last, index) = lastsavename(path)
2748 2748 if last is None:
2749 2749 index = 0
2750 2750 newpath = path + ".%d" % (index + 1)
2751 2751 return newpath
2752 2752
2753 2753 @command("^qpush",
2754 2754 [('', 'keep-changes', None,
2755 2755 _('tolerate non-conflicting local changes')),
2756 2756 ('f', 'force', None, _('apply on top of local changes')),
2757 2757 ('e', 'exact', None,
2758 2758 _('apply the target patch to its recorded parent')),
2759 2759 ('l', 'list', None, _('list patch name in commit text')),
2760 2760 ('a', 'all', None, _('apply all patches')),
2761 2761 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2762 2762 ('n', 'name', '',
2763 2763 _('merge queue name (DEPRECATED)'), _('NAME')),
2764 2764 ('', 'move', None,
2765 2765 _('reorder patch series and apply only the patch')),
2766 2766 ('', 'no-backup', None, _('do not save backup copies of files'))],
2767 2767 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2768 2768 def push(ui, repo, patch=None, **opts):
2769 2769 """push the next patch onto the stack
2770 2770
2771 2771 By default, abort if the working directory contains uncommitted
2772 2772 changes. With --keep-changes, abort only if the uncommitted files
2773 2773 overlap with patched files. With -f/--force, backup and patch over
2774 2774 uncommitted changes.
2775 2775
2776 2776 Return 0 on success.
2777 2777 """
2778 2778 q = repo.mq
2779 2779 mergeq = None
2780 2780
2781 2781 opts = fixkeepchangesopts(ui, opts)
2782 2782 if opts.get('merge'):
2783 2783 if opts.get('name'):
2784 2784 newpath = repo.join(opts.get('name'))
2785 2785 else:
2786 2786 newpath, i = lastsavename(q.path)
2787 2787 if not newpath:
2788 2788 ui.warn(_("no saved queues found, please use -n\n"))
2789 2789 return 1
2790 2790 mergeq = queue(ui, repo.baseui, repo.path, newpath)
2791 2791 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2792 2792 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2793 2793 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2794 2794 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2795 2795 keepchanges=opts.get('keep_changes'))
2796 2796 return ret
2797 2797
2798 2798 @command("^qpop",
2799 2799 [('a', 'all', None, _('pop all patches')),
2800 2800 ('n', 'name', '',
2801 2801 _('queue name to pop (DEPRECATED)'), _('NAME')),
2802 2802 ('', 'keep-changes', None,
2803 2803 _('tolerate non-conflicting local changes')),
2804 2804 ('f', 'force', None, _('forget any local changes to patched files')),
2805 2805 ('', 'no-backup', None, _('do not save backup copies of files'))],
2806 2806 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2807 2807 def pop(ui, repo, patch=None, **opts):
2808 2808 """pop the current patch off the stack
2809 2809
2810 2810 Without argument, pops off the top of the patch stack. If given a
2811 2811 patch name, keeps popping off patches until the named patch is at
2812 2812 the top of the stack.
2813 2813
2814 2814 By default, abort if the working directory contains uncommitted
2815 2815 changes. With --keep-changes, abort only if the uncommitted files
2816 2816 overlap with patched files. With -f/--force, backup and discard
2817 2817 changes made to such files.
2818 2818
2819 2819 Return 0 on success.
2820 2820 """
2821 2821 opts = fixkeepchangesopts(ui, opts)
2822 2822 localupdate = True
2823 2823 if opts.get('name'):
2824 2824 q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name')))
2825 2825 ui.warn(_('using patch queue: %s\n') % q.path)
2826 2826 localupdate = False
2827 2827 else:
2828 2828 q = repo.mq
2829 2829 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2830 2830 all=opts.get('all'), nobackup=opts.get('no_backup'),
2831 2831 keepchanges=opts.get('keep_changes'))
2832 2832 q.savedirty()
2833 2833 return ret
2834 2834
2835 2835 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2836 2836 def rename(ui, repo, patch, name=None, **opts):
2837 2837 """rename a patch
2838 2838
2839 2839 With one argument, renames the current patch to PATCH1.
2840 2840 With two arguments, renames PATCH1 to PATCH2.
2841 2841
2842 2842 Returns 0 on success."""
2843 2843 q = repo.mq
2844 2844 if not name:
2845 2845 name = patch
2846 2846 patch = None
2847 2847
2848 2848 if patch:
2849 2849 patch = q.lookup(patch)
2850 2850 else:
2851 2851 if not q.applied:
2852 2852 ui.write(_('no patches applied\n'))
2853 2853 return
2854 2854 patch = q.lookup('qtip')
2855 2855 absdest = q.join(name)
2856 2856 if os.path.isdir(absdest):
2857 2857 name = normname(os.path.join(name, os.path.basename(patch)))
2858 2858 absdest = q.join(name)
2859 2859 q.checkpatchname(name)
2860 2860
2861 2861 ui.note(_('renaming %s to %s\n') % (patch, name))
2862 2862 i = q.findseries(patch)
2863 2863 guards = q.guard_re.findall(q.fullseries[i])
2864 2864 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2865 2865 q.parseseries()
2866 2866 q.seriesdirty = True
2867 2867
2868 2868 info = q.isapplied(patch)
2869 2869 if info:
2870 2870 q.applied[info[0]] = statusentry(info[1], name)
2871 2871 q.applieddirty = True
2872 2872
2873 2873 destdir = os.path.dirname(absdest)
2874 2874 if not os.path.isdir(destdir):
2875 2875 os.makedirs(destdir)
2876 2876 util.rename(q.join(patch), absdest)
2877 2877 r = q.qrepo()
2878 2878 if r and patch in r.dirstate:
2879 2879 wctx = r[None]
2880 2880 wlock = r.wlock()
2881 2881 try:
2882 2882 if r.dirstate[patch] == 'a':
2883 2883 r.dirstate.drop(patch)
2884 2884 r.dirstate.add(name)
2885 2885 else:
2886 2886 wctx.copy(patch, name)
2887 2887 wctx.forget([patch])
2888 2888 finally:
2889 2889 wlock.release()
2890 2890
2891 2891 q.savedirty()
2892 2892
2893 2893 @command("qrestore",
2894 2894 [('d', 'delete', None, _('delete save entry')),
2895 2895 ('u', 'update', None, _('update queue working directory'))],
2896 2896 _('hg qrestore [-d] [-u] REV'))
2897 2897 def restore(ui, repo, rev, **opts):
2898 2898 """restore the queue state saved by a revision (DEPRECATED)
2899 2899
2900 2900 This command is deprecated, use :hg:`rebase` instead."""
2901 2901 rev = repo.lookup(rev)
2902 2902 q = repo.mq
2903 2903 q.restore(repo, rev, delete=opts.get('delete'),
2904 2904 qupdate=opts.get('update'))
2905 2905 q.savedirty()
2906 2906 return 0
2907 2907
2908 2908 @command("qsave",
2909 2909 [('c', 'copy', None, _('copy patch directory')),
2910 2910 ('n', 'name', '',
2911 2911 _('copy directory name'), _('NAME')),
2912 2912 ('e', 'empty', None, _('clear queue status file')),
2913 2913 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2914 2914 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2915 2915 def save(ui, repo, **opts):
2916 2916 """save current queue state (DEPRECATED)
2917 2917
2918 2918 This command is deprecated, use :hg:`rebase` instead."""
2919 2919 q = repo.mq
2920 2920 message = cmdutil.logmessage(ui, opts)
2921 2921 ret = q.save(repo, msg=message)
2922 2922 if ret:
2923 2923 return ret
2924 2924 q.savedirty() # save to .hg/patches before copying
2925 2925 if opts.get('copy'):
2926 2926 path = q.path
2927 2927 if opts.get('name'):
2928 2928 newpath = os.path.join(q.basepath, opts.get('name'))
2929 2929 if os.path.exists(newpath):
2930 2930 if not os.path.isdir(newpath):
2931 2931 raise util.Abort(_('destination %s exists and is not '
2932 2932 'a directory') % newpath)
2933 2933 if not opts.get('force'):
2934 2934 raise util.Abort(_('destination %s exists, '
2935 2935 'use -f to force') % newpath)
2936 2936 else:
2937 2937 newpath = savename(path)
2938 2938 ui.warn(_("copy %s to %s\n") % (path, newpath))
2939 2939 util.copyfiles(path, newpath)
2940 2940 if opts.get('empty'):
2941 2941 del q.applied[:]
2942 2942 q.applieddirty = True
2943 2943 q.savedirty()
2944 2944 return 0
2945 2945
2946 2946
2947 2947 @command("qselect",
2948 2948 [('n', 'none', None, _('disable all guards')),
2949 2949 ('s', 'series', None, _('list all guards in series file')),
2950 2950 ('', 'pop', None, _('pop to before first guarded applied patch')),
2951 2951 ('', 'reapply', None, _('pop, then reapply patches'))],
2952 2952 _('hg qselect [OPTION]... [GUARD]...'))
2953 2953 def select(ui, repo, *args, **opts):
2954 2954 '''set or print guarded patches to push
2955 2955
2956 2956 Use the :hg:`qguard` command to set or print guards on patch, then use
2957 2957 qselect to tell mq which guards to use. A patch will be pushed if
2958 2958 it has no guards or any positive guards match the currently
2959 2959 selected guard, but will not be pushed if any negative guards
2960 2960 match the current guard. For example::
2961 2961
2962 2962 qguard foo.patch -- -stable (negative guard)
2963 2963 qguard bar.patch +stable (positive guard)
2964 2964 qselect stable
2965 2965
2966 2966 This activates the "stable" guard. mq will skip foo.patch (because
2967 2967 it has a negative match) but push bar.patch (because it has a
2968 2968 positive match).
2969 2969
2970 2970 With no arguments, prints the currently active guards.
2971 2971 With one argument, sets the active guard.
2972 2972
2973 2973 Use -n/--none to deactivate guards (no other arguments needed).
2974 2974 When no guards are active, patches with positive guards are
2975 2975 skipped and patches with negative guards are pushed.
2976 2976
2977 2977 qselect can change the guards on applied patches. It does not pop
2978 2978 guarded patches by default. Use --pop to pop back to the last
2979 2979 applied patch that is not guarded. Use --reapply (which implies
2980 2980 --pop) to push back to the current patch afterwards, but skip
2981 2981 guarded patches.
2982 2982
2983 2983 Use -s/--series to print a list of all guards in the series file
2984 2984 (no other arguments needed). Use -v for more information.
2985 2985
2986 2986 Returns 0 on success.'''
2987 2987
2988 2988 q = repo.mq
2989 2989 guards = q.active()
2990 2990 if args or opts.get('none'):
2991 2991 old_unapplied = q.unapplied(repo)
2992 2992 old_guarded = [i for i in xrange(len(q.applied)) if
2993 2993 not q.pushable(i)[0]]
2994 2994 q.setactive(args)
2995 2995 q.savedirty()
2996 2996 if not args:
2997 2997 ui.status(_('guards deactivated\n'))
2998 2998 if not opts.get('pop') and not opts.get('reapply'):
2999 2999 unapplied = q.unapplied(repo)
3000 3000 guarded = [i for i in xrange(len(q.applied))
3001 3001 if not q.pushable(i)[0]]
3002 3002 if len(unapplied) != len(old_unapplied):
3003 3003 ui.status(_('number of unguarded, unapplied patches has '
3004 3004 'changed from %d to %d\n') %
3005 3005 (len(old_unapplied), len(unapplied)))
3006 3006 if len(guarded) != len(old_guarded):
3007 3007 ui.status(_('number of guarded, applied patches has changed '
3008 3008 'from %d to %d\n') %
3009 3009 (len(old_guarded), len(guarded)))
3010 3010 elif opts.get('series'):
3011 3011 guards = {}
3012 3012 noguards = 0
3013 3013 for gs in q.seriesguards:
3014 3014 if not gs:
3015 3015 noguards += 1
3016 3016 for g in gs:
3017 3017 guards.setdefault(g, 0)
3018 3018 guards[g] += 1
3019 3019 if ui.verbose:
3020 3020 guards['NONE'] = noguards
3021 3021 guards = guards.items()
3022 3022 guards.sort(key=lambda x: x[0][1:])
3023 3023 if guards:
3024 3024 ui.note(_('guards in series file:\n'))
3025 3025 for guard, count in guards:
3026 3026 ui.note('%2d ' % count)
3027 3027 ui.write(guard, '\n')
3028 3028 else:
3029 3029 ui.note(_('no guards in series file\n'))
3030 3030 else:
3031 3031 if guards:
3032 3032 ui.note(_('active guards:\n'))
3033 3033 for g in guards:
3034 3034 ui.write(g, '\n')
3035 3035 else:
3036 3036 ui.write(_('no active guards\n'))
3037 3037 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3038 3038 popped = False
3039 3039 if opts.get('pop') or opts.get('reapply'):
3040 3040 for i in xrange(len(q.applied)):
3041 3041 pushable, reason = q.pushable(i)
3042 3042 if not pushable:
3043 3043 ui.status(_('popping guarded patches\n'))
3044 3044 popped = True
3045 3045 if i == 0:
3046 3046 q.pop(repo, all=True)
3047 3047 else:
3048 3048 q.pop(repo, str(i - 1))
3049 3049 break
3050 3050 if popped:
3051 3051 try:
3052 3052 if reapply:
3053 3053 ui.status(_('reapplying unguarded patches\n'))
3054 3054 q.push(repo, reapply)
3055 3055 finally:
3056 3056 q.savedirty()
3057 3057
3058 3058 @command("qfinish",
3059 3059 [('a', 'applied', None, _('finish all applied changesets'))],
3060 3060 _('hg qfinish [-a] [REV]...'))
3061 3061 def finish(ui, repo, *revrange, **opts):
3062 3062 """move applied patches into repository history
3063 3063
3064 3064 Finishes the specified revisions (corresponding to applied
3065 3065 patches) by moving them out of mq control into regular repository
3066 3066 history.
3067 3067
3068 3068 Accepts a revision range or the -a/--applied option. If --applied
3069 3069 is specified, all applied mq revisions are removed from mq
3070 3070 control. Otherwise, the given revisions must be at the base of the
3071 3071 stack of applied patches.
3072 3072
3073 3073 This can be especially useful if your changes have been applied to
3074 3074 an upstream repository, or if you are about to push your changes
3075 3075 to upstream.
3076 3076
3077 3077 Returns 0 on success.
3078 3078 """
3079 3079 if not opts.get('applied') and not revrange:
3080 3080 raise util.Abort(_('no revisions specified'))
3081 3081 elif opts.get('applied'):
3082 3082 revrange = ('qbase::qtip',) + revrange
3083 3083
3084 3084 q = repo.mq
3085 3085 if not q.applied:
3086 3086 ui.status(_('no patches applied\n'))
3087 3087 return 0
3088 3088
3089 3089 revs = scmutil.revrange(repo, revrange)
3090 3090 if repo['.'].rev() in revs and repo[None].files():
3091 3091 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3092 3092 # queue.finish may changes phases but leave the responsibility to lock the
3093 3093 # repo to the caller to avoid deadlock with wlock. This command code is
3094 3094 # responsibility for this locking.
3095 3095 lock = repo.lock()
3096 3096 try:
3097 3097 q.finish(repo, revs)
3098 3098 q.savedirty()
3099 3099 finally:
3100 3100 lock.release()
3101 3101 return 0
3102 3102
3103 3103 @command("qqueue",
3104 3104 [('l', 'list', False, _('list all available queues')),
3105 3105 ('', 'active', False, _('print name of active queue')),
3106 3106 ('c', 'create', False, _('create new queue')),
3107 3107 ('', 'rename', False, _('rename active queue')),
3108 3108 ('', 'delete', False, _('delete reference to queue')),
3109 3109 ('', 'purge', False, _('delete queue, and remove patch dir')),
3110 3110 ],
3111 3111 _('[OPTION] [QUEUE]'))
3112 3112 def qqueue(ui, repo, name=None, **opts):
3113 3113 '''manage multiple patch queues
3114 3114
3115 3115 Supports switching between different patch queues, as well as creating
3116 3116 new patch queues and deleting existing ones.
3117 3117
3118 3118 Omitting a queue name or specifying -l/--list will show you the registered
3119 3119 queues - by default the "normal" patches queue is registered. The currently
3120 3120 active queue will be marked with "(active)". Specifying --active will print
3121 3121 only the name of the active queue.
3122 3122
3123 3123 To create a new queue, use -c/--create. The queue is automatically made
3124 3124 active, except in the case where there are applied patches from the
3125 3125 currently active queue in the repository. Then the queue will only be
3126 3126 created and switching will fail.
3127 3127
3128 3128 To delete an existing queue, use --delete. You cannot delete the currently
3129 3129 active queue.
3130 3130
3131 3131 Returns 0 on success.
3132 3132 '''
3133 3133 q = repo.mq
3134 3134 _defaultqueue = 'patches'
3135 3135 _allqueues = 'patches.queues'
3136 3136 _activequeue = 'patches.queue'
3137 3137
3138 3138 def _getcurrent():
3139 3139 cur = os.path.basename(q.path)
3140 3140 if cur.startswith('patches-'):
3141 3141 cur = cur[8:]
3142 3142 return cur
3143 3143
3144 3144 def _noqueues():
3145 3145 try:
3146 3146 fh = repo.opener(_allqueues, 'r')
3147 3147 fh.close()
3148 3148 except IOError:
3149 3149 return True
3150 3150
3151 3151 return False
3152 3152
3153 3153 def _getqueues():
3154 3154 current = _getcurrent()
3155 3155
3156 3156 try:
3157 3157 fh = repo.opener(_allqueues, 'r')
3158 3158 queues = [queue.strip() for queue in fh if queue.strip()]
3159 3159 fh.close()
3160 3160 if current not in queues:
3161 3161 queues.append(current)
3162 3162 except IOError:
3163 3163 queues = [_defaultqueue]
3164 3164
3165 3165 return sorted(queues)
3166 3166
3167 3167 def _setactive(name):
3168 3168 if q.applied:
3169 3169 raise util.Abort(_('new queue created, but cannot make active '
3170 3170 'as patches are applied'))
3171 3171 _setactivenocheck(name)
3172 3172
3173 3173 def _setactivenocheck(name):
3174 3174 fh = repo.opener(_activequeue, 'w')
3175 3175 if name != 'patches':
3176 3176 fh.write(name)
3177 3177 fh.close()
3178 3178
3179 3179 def _addqueue(name):
3180 3180 fh = repo.opener(_allqueues, 'a')
3181 3181 fh.write('%s\n' % (name,))
3182 3182 fh.close()
3183 3183
3184 3184 def _queuedir(name):
3185 3185 if name == 'patches':
3186 3186 return repo.join('patches')
3187 3187 else:
3188 3188 return repo.join('patches-' + name)
3189 3189
3190 3190 def _validname(name):
3191 3191 for n in name:
3192 3192 if n in ':\\/.':
3193 3193 return False
3194 3194 return True
3195 3195
3196 3196 def _delete(name):
3197 3197 if name not in existing:
3198 3198 raise util.Abort(_('cannot delete queue that does not exist'))
3199 3199
3200 3200 current = _getcurrent()
3201 3201
3202 3202 if name == current:
3203 3203 raise util.Abort(_('cannot delete currently active queue'))
3204 3204
3205 3205 fh = repo.opener('patches.queues.new', 'w')
3206 3206 for queue in existing:
3207 3207 if queue == name:
3208 3208 continue
3209 3209 fh.write('%s\n' % (queue,))
3210 3210 fh.close()
3211 3211 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3212 3212
3213 3213 if not name or opts.get('list') or opts.get('active'):
3214 3214 current = _getcurrent()
3215 3215 if opts.get('active'):
3216 3216 ui.write('%s\n' % (current,))
3217 3217 return
3218 3218 for queue in _getqueues():
3219 3219 ui.write('%s' % (queue,))
3220 3220 if queue == current and not ui.quiet:
3221 3221 ui.write(_(' (active)\n'))
3222 3222 else:
3223 3223 ui.write('\n')
3224 3224 return
3225 3225
3226 3226 if not _validname(name):
3227 3227 raise util.Abort(
3228 3228 _('invalid queue name, may not contain the characters ":\\/."'))
3229 3229
3230 3230 existing = _getqueues()
3231 3231
3232 3232 if opts.get('create'):
3233 3233 if name in existing:
3234 3234 raise util.Abort(_('queue "%s" already exists') % name)
3235 3235 if _noqueues():
3236 3236 _addqueue(_defaultqueue)
3237 3237 _addqueue(name)
3238 3238 _setactive(name)
3239 3239 elif opts.get('rename'):
3240 3240 current = _getcurrent()
3241 3241 if name == current:
3242 3242 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3243 3243 if name in existing:
3244 3244 raise util.Abort(_('queue "%s" already exists') % name)
3245 3245
3246 3246 olddir = _queuedir(current)
3247 3247 newdir = _queuedir(name)
3248 3248
3249 3249 if os.path.exists(newdir):
3250 3250 raise util.Abort(_('non-queue directory "%s" already exists') %
3251 3251 newdir)
3252 3252
3253 3253 fh = repo.opener('patches.queues.new', 'w')
3254 3254 for queue in existing:
3255 3255 if queue == current:
3256 3256 fh.write('%s\n' % (name,))
3257 3257 if os.path.exists(olddir):
3258 3258 util.rename(olddir, newdir)
3259 3259 else:
3260 3260 fh.write('%s\n' % (queue,))
3261 3261 fh.close()
3262 3262 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3263 3263 _setactivenocheck(name)
3264 3264 elif opts.get('delete'):
3265 3265 _delete(name)
3266 3266 elif opts.get('purge'):
3267 3267 if name in existing:
3268 3268 _delete(name)
3269 3269 qdir = _queuedir(name)
3270 3270 if os.path.exists(qdir):
3271 3271 shutil.rmtree(qdir)
3272 3272 else:
3273 3273 if name not in existing:
3274 3274 raise util.Abort(_('use --create to create a new queue'))
3275 3275 _setactive(name)
3276 3276
3277 3277 def mqphasedefaults(repo, roots):
3278 3278 """callback used to set mq changeset as secret when no phase data exists"""
3279 3279 if repo.mq.applied:
3280 3280 if repo.ui.configbool('mq', 'secret', False):
3281 3281 mqphase = phases.secret
3282 3282 else:
3283 3283 mqphase = phases.draft
3284 3284 qbase = repo[repo.mq.applied[0].node]
3285 3285 roots[mqphase].add(qbase.node())
3286 3286 return roots
3287 3287
3288 3288 def reposetup(ui, repo):
3289 3289 class mqrepo(repo.__class__):
3290 3290 @localrepo.unfilteredpropertycache
3291 3291 def mq(self):
3292 3292 return queue(self.ui, self.baseui, self.path)
3293 3293
3294 3294 def invalidateall(self):
3295 3295 super(mqrepo, self).invalidateall()
3296 3296 if localrepo.hasunfilteredcache(self, 'mq'):
3297 3297 # recreate mq in case queue path was changed
3298 3298 delattr(self.unfiltered(), 'mq')
3299 3299
3300 3300 def abortifwdirpatched(self, errmsg, force=False):
3301 3301 if self.mq.applied and self.mq.checkapplied and not force:
3302 3302 parents = self.dirstate.parents()
3303 3303 patches = [s.node for s in self.mq.applied]
3304 3304 if parents[0] in patches or parents[1] in patches:
3305 3305 raise util.Abort(errmsg)
3306 3306
3307 3307 def commit(self, text="", user=None, date=None, match=None,
3308 3308 force=False, editor=False, extra={}):
3309 3309 self.abortifwdirpatched(
3310 3310 _('cannot commit over an applied mq patch'),
3311 3311 force)
3312 3312
3313 3313 return super(mqrepo, self).commit(text, user, date, match, force,
3314 3314 editor, extra)
3315 3315
3316 3316 def checkpush(self, pushop):
3317 3317 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3318 3318 outapplied = [e.node for e in self.mq.applied]
3319 3319 if pushop.revs:
3320 3320 # Assume applied patches have no non-patch descendants and
3321 3321 # are not on remote already. Filtering any changeset not
3322 3322 # pushed.
3323 3323 heads = set(pushop.revs)
3324 3324 for node in reversed(outapplied):
3325 3325 if node in heads:
3326 3326 break
3327 3327 else:
3328 3328 outapplied.pop()
3329 3329 # looking for pushed and shared changeset
3330 3330 for node in outapplied:
3331 3331 if self[node].phase() < phases.secret:
3332 3332 raise util.Abort(_('source has mq patches applied'))
3333 3333 # no non-secret patches pushed
3334 3334 super(mqrepo, self).checkpush(pushop)
3335 3335
3336 3336 def _findtags(self):
3337 3337 '''augment tags from base class with patch tags'''
3338 3338 result = super(mqrepo, self)._findtags()
3339 3339
3340 3340 q = self.mq
3341 3341 if not q.applied:
3342 3342 return result
3343 3343
3344 3344 mqtags = [(patch.node, patch.name) for patch in q.applied]
3345 3345
3346 3346 try:
3347 3347 # for now ignore filtering business
3348 3348 self.unfiltered().changelog.rev(mqtags[-1][0])
3349 3349 except error.LookupError:
3350 3350 self.ui.warn(_('mq status file refers to unknown node %s\n')
3351 3351 % short(mqtags[-1][0]))
3352 3352 return result
3353 3353
3354 3354 # do not add fake tags for filtered revisions
3355 3355 included = self.changelog.hasnode
3356 3356 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3357 3357 if not mqtags:
3358 3358 return result
3359 3359
3360 3360 mqtags.append((mqtags[-1][0], 'qtip'))
3361 3361 mqtags.append((mqtags[0][0], 'qbase'))
3362 3362 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3363 3363 tags = result[0]
3364 3364 for patch in mqtags:
3365 3365 if patch[1] in tags:
3366 3366 self.ui.warn(_('tag %s overrides mq patch of the same '
3367 3367 'name\n') % patch[1])
3368 3368 else:
3369 3369 tags[patch[1]] = patch[0]
3370 3370
3371 3371 return result
3372 3372
3373 3373 if repo.local():
3374 3374 repo.__class__ = mqrepo
3375 3375
3376 3376 repo._phasedefaults.append(mqphasedefaults)
3377 3377
3378 3378 def mqimport(orig, ui, repo, *args, **kwargs):
3379 3379 if (util.safehasattr(repo, 'abortifwdirpatched')
3380 3380 and not kwargs.get('no_commit', False)):
3381 3381 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3382 3382 kwargs.get('force'))
3383 3383 return orig(ui, repo, *args, **kwargs)
3384 3384
3385 3385 def mqinit(orig, ui, *args, **kwargs):
3386 3386 mq = kwargs.pop('mq', None)
3387 3387
3388 3388 if not mq:
3389 3389 return orig(ui, *args, **kwargs)
3390 3390
3391 3391 if args:
3392 3392 repopath = args[0]
3393 3393 if not hg.islocal(repopath):
3394 3394 raise util.Abort(_('only a local queue repository '
3395 3395 'may be initialized'))
3396 3396 else:
3397 3397 repopath = cmdutil.findrepo(os.getcwd())
3398 3398 if not repopath:
3399 3399 raise util.Abort(_('there is no Mercurial repository here '
3400 3400 '(.hg not found)'))
3401 3401 repo = hg.repository(ui, repopath)
3402 3402 return qinit(ui, repo, True)
3403 3403
3404 3404 def mqcommand(orig, ui, repo, *args, **kwargs):
3405 3405 """Add --mq option to operate on patch repository instead of main"""
3406 3406
3407 3407 # some commands do not like getting unknown options
3408 3408 mq = kwargs.pop('mq', None)
3409 3409
3410 3410 if not mq:
3411 3411 return orig(ui, repo, *args, **kwargs)
3412 3412
3413 3413 q = repo.mq
3414 3414 r = q.qrepo()
3415 3415 if not r:
3416 3416 raise util.Abort(_('no queue repository'))
3417 3417 return orig(r.ui, r, *args, **kwargs)
3418 3418
3419 3419 def summaryhook(ui, repo):
3420 3420 q = repo.mq
3421 3421 m = []
3422 3422 a, u = len(q.applied), len(q.unapplied(repo))
3423 3423 if a:
3424 3424 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3425 3425 if u:
3426 3426 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3427 3427 if m:
3428 3428 # i18n: column positioning for "hg summary"
3429 3429 ui.write(_("mq: %s\n") % ', '.join(m))
3430 3430 else:
3431 3431 # i18n: column positioning for "hg summary"
3432 3432 ui.note(_("mq: (empty queue)\n"))
3433 3433
3434 3434 def revsetmq(repo, subset, x):
3435 3435 """``mq()``
3436 3436 Changesets managed by MQ.
3437 3437 """
3438 3438 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3439 3439 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3440 3440 return revset.baseset([r for r in subset if r in applied])
3441 3441
3442 3442 # tell hggettext to extract docstrings from these functions:
3443 3443 i18nfunctions = [revsetmq]
3444 3444
3445 3445 def extsetup(ui):
3446 3446 # Ensure mq wrappers are called first, regardless of extension load order by
3447 3447 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3448 3448 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3449 3449
3450 3450 extensions.wrapcommand(commands.table, 'import', mqimport)
3451 3451 cmdutil.summaryhooks.add('mq', summaryhook)
3452 3452
3453 3453 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3454 3454 entry[1].extend(mqopt)
3455 3455
3456 3456 nowrap = set(commands.norepo.split(" "))
3457 3457
3458 3458 def dotable(cmdtable):
3459 3459 for cmd in cmdtable.keys():
3460 3460 cmd = cmdutil.parsealiases(cmd)[0]
3461 3461 if cmd in nowrap:
3462 3462 continue
3463 3463 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3464 3464 entry[1].extend(mqopt)
3465 3465
3466 3466 dotable(commands.table)
3467 3467
3468 3468 for extname, extmodule in extensions.extensions():
3469 3469 if extmodule.__file__ != __file__:
3470 3470 dotable(getattr(extmodule, 'cmdtable', {}))
3471 3471
3472 3472 revset.symbols['mq'] = revsetmq
3473 3473
3474 3474 colortable = {'qguard.negative': 'red',
3475 3475 'qguard.positive': 'yellow',
3476 3476 'qguard.unguarded': 'green',
3477 3477 'qseries.applied': 'blue bold underline',
3478 3478 'qseries.guarded': 'black bold',
3479 3479 'qseries.missing': 'red bold',
3480 3480 'qseries.unapplied': 'black bold'}
@@ -1,756 +1,756 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import weakref
9 9 from i18n import _
10 10 from node import nullrev, nullid, hex, short
11 11 import mdiff, util, dagutil
12 12 import struct, os, bz2, zlib, tempfile
13 13 import discovery, error, phases, branchmap
14 14
15 15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16 16
17 17 def readexactly(stream, n):
18 18 '''read n bytes from stream.read and abort if less was available'''
19 19 s = stream.read(n)
20 20 if len(s) < n:
21 21 raise util.Abort(_("stream ended unexpectedly"
22 22 " (got %d bytes, expected %d)")
23 23 % (len(s), n))
24 24 return s
25 25
26 26 def getchunk(stream):
27 27 """return the next chunk from stream as a string"""
28 28 d = readexactly(stream, 4)
29 29 l = struct.unpack(">l", d)[0]
30 30 if l <= 4:
31 31 if l:
32 32 raise util.Abort(_("invalid chunk length %d") % l)
33 33 return ""
34 34 return readexactly(stream, l - 4)
35 35
36 36 def chunkheader(length):
37 37 """return a changegroup chunk header (string)"""
38 38 return struct.pack(">l", length + 4)
39 39
40 40 def closechunk():
41 41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 42 return struct.pack(">l", 0)
43 43
44 44 class nocompress(object):
45 45 def compress(self, x):
46 46 return x
47 47 def flush(self):
48 48 return ""
49 49
50 50 bundletypes = {
51 51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 52 # since the unification ssh accepts a header but there
53 53 # is no capability signaling it.
54 54 "HG10UN": ("HG10UN", nocompress),
55 55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 57 }
58 58
59 59 # hgweb uses this list to communicate its preferred type
60 60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 61
62 62 def writebundle(cg, filename, bundletype, vfs=None):
63 63 """Write a bundle file and return its filename.
64 64
65 65 Existing files will not be overwritten.
66 66 If no filename is specified, a temporary file is created.
67 67 bz2 compression can be turned off.
68 68 The bundle file will be deleted in case of errors.
69 69 """
70 70
71 71 fh = None
72 72 cleanup = None
73 73 try:
74 74 if filename:
75 75 if vfs:
76 76 fh = vfs.open(filename, "wb")
77 77 else:
78 78 fh = open(filename, "wb")
79 79 else:
80 80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 81 fh = os.fdopen(fd, "wb")
82 82 cleanup = filename
83 83
84 84 header, compressor = bundletypes[bundletype]
85 85 fh.write(header)
86 86 z = compressor()
87 87
88 88 # parse the changegroup data, otherwise we will block
89 89 # in case of sshrepo because we don't know the end of the stream
90 90
91 91 # an empty chunkgroup is the end of the changegroup
92 92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 93 # after that, an empty chunkgroup is the end of the changegroup
94 94 for chunk in cg.getchunks():
95 95 fh.write(z.compress(chunk))
96 96 fh.write(z.flush())
97 97 cleanup = None
98 98 return filename
99 99 finally:
100 100 if fh is not None:
101 101 fh.close()
102 102 if cleanup is not None:
103 103 if filename and vfs:
104 104 vfs.unlink(cleanup)
105 105 else:
106 106 os.unlink(cleanup)
107 107
108 108 def decompressor(fh, alg):
109 109 if alg == 'UN':
110 110 return fh
111 111 elif alg == 'GZ':
112 112 def generator(f):
113 113 zd = zlib.decompressobj()
114 114 for chunk in util.filechunkiter(f):
115 115 yield zd.decompress(chunk)
116 116 elif alg == 'BZ':
117 117 def generator(f):
118 118 zd = bz2.BZ2Decompressor()
119 119 zd.decompress("BZ")
120 120 for chunk in util.filechunkiter(f, 4096):
121 121 yield zd.decompress(chunk)
122 122 else:
123 123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 124 return util.chunkbuffer(generator(fh))
125 125
126 126 class unbundle10(object):
127 127 deltaheader = _BUNDLE10_DELTA_HEADER
128 128 deltaheadersize = struct.calcsize(deltaheader)
129 129 def __init__(self, fh, alg):
130 130 self._stream = decompressor(fh, alg)
131 131 self._type = alg
132 132 self.callback = None
133 133 def compressed(self):
134 134 return self._type != 'UN'
135 135 def read(self, l):
136 136 return self._stream.read(l)
137 137 def seek(self, pos):
138 138 return self._stream.seek(pos)
139 139 def tell(self):
140 140 return self._stream.tell()
141 141 def close(self):
142 142 return self._stream.close()
143 143
144 144 def chunklength(self):
145 145 d = readexactly(self._stream, 4)
146 146 l = struct.unpack(">l", d)[0]
147 147 if l <= 4:
148 148 if l:
149 149 raise util.Abort(_("invalid chunk length %d") % l)
150 150 return 0
151 151 if self.callback:
152 152 self.callback()
153 153 return l - 4
154 154
155 155 def changelogheader(self):
156 156 """v10 does not have a changelog header chunk"""
157 157 return {}
158 158
159 159 def manifestheader(self):
160 160 """v10 does not have a manifest header chunk"""
161 161 return {}
162 162
163 163 def filelogheader(self):
164 164 """return the header of the filelogs chunk, v10 only has the filename"""
165 165 l = self.chunklength()
166 166 if not l:
167 167 return {}
168 168 fname = readexactly(self._stream, l)
169 169 return {'filename': fname}
170 170
171 171 def _deltaheader(self, headertuple, prevnode):
172 172 node, p1, p2, cs = headertuple
173 173 if prevnode is None:
174 174 deltabase = p1
175 175 else:
176 176 deltabase = prevnode
177 177 return node, p1, p2, deltabase, cs
178 178
179 179 def deltachunk(self, prevnode):
180 180 l = self.chunklength()
181 181 if not l:
182 182 return {}
183 183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 184 header = struct.unpack(self.deltaheader, headerdata)
185 185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 188 'deltabase': deltabase, 'delta': delta}
189 189
190 190 def getchunks(self):
191 191 """returns all the chunks contains in the bundle
192 192
193 193 Used when you need to forward the binary stream to a file or another
194 194 network API. To do so, it parse the changegroup data, otherwise it will
195 195 block in case of sshrepo because it don't know the end of the stream.
196 196 """
197 197 # an empty chunkgroup is the end of the changegroup
198 198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 199 # after that, an empty chunkgroup is the end of the changegroup
200 200 empty = False
201 201 count = 0
202 202 while not empty or count <= 2:
203 203 empty = True
204 204 count += 1
205 205 while True:
206 206 chunk = getchunk(self)
207 207 if not chunk:
208 208 break
209 209 empty = False
210 210 yield chunkheader(len(chunk))
211 211 pos = 0
212 212 while pos < len(chunk):
213 213 next = pos + 2**20
214 214 yield chunk[pos:next]
215 215 pos = next
216 216 yield closechunk()
217 217
218 218 class headerlessfixup(object):
219 219 def __init__(self, fh, h):
220 220 self._h = h
221 221 self._fh = fh
222 222 def read(self, n):
223 223 if self._h:
224 224 d, self._h = self._h[:n], self._h[n:]
225 225 if len(d) < n:
226 226 d += readexactly(self._fh, n - len(d))
227 227 return d
228 228 return readexactly(self._fh, n)
229 229
230 230 class bundle10(object):
231 231 deltaheader = _BUNDLE10_DELTA_HEADER
232 232 def __init__(self, repo, bundlecaps=None):
233 233 """Given a source repo, construct a bundler.
234 234
235 235 bundlecaps is optional and can be used to specify the set of
236 236 capabilities which can be used to build the bundle.
237 237 """
238 238 # Set of capabilities we can use to build the bundle.
239 239 if bundlecaps is None:
240 240 bundlecaps = set()
241 241 self._bundlecaps = bundlecaps
242 242 self._changelog = repo.changelog
243 243 self._manifest = repo.manifest
244 244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 245 if reorder == 'auto':
246 246 reorder = None
247 247 else:
248 248 reorder = util.parsebool(reorder)
249 249 self._repo = repo
250 250 self._reorder = reorder
251 251 self._progress = repo.ui.progress
252 252 def close(self):
253 253 return closechunk()
254 254
255 255 def fileheader(self, fname):
256 256 return chunkheader(len(fname)) + fname
257 257
258 258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 260 (strings).
261 261
262 262 Given a list of changeset revs, return a set of deltas and
263 263 metadata corresponding to nodes. The first delta is
264 264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 265 guaranteed to have this parent as it has all history before
266 266 these changesets. In the case firstparent is nullrev the
267 267 changegroup starts with a full revision.
268 268
269 269 If units is not None, progress detail will be generated, units specifies
270 270 the type of revlog that is touched (changelog, manifest, etc.).
271 271 """
272 272 # if we don't have any revisions touched by these changesets, bail
273 273 if len(nodelist) == 0:
274 274 yield self.close()
275 275 return
276 276
277 277 # for generaldelta revlogs, we linearize the revs; this will both be
278 278 # much quicker and generate a much smaller bundle
279 279 if (revlog._generaldelta and reorder is not False) or reorder:
280 280 dag = dagutil.revlogdag(revlog)
281 281 revs = set(revlog.rev(n) for n in nodelist)
282 282 revs = dag.linearize(revs)
283 283 else:
284 284 revs = sorted([revlog.rev(n) for n in nodelist])
285 285
286 286 # add the parent of the first rev
287 287 p = revlog.parentrevs(revs[0])[0]
288 288 revs.insert(0, p)
289 289
290 290 # build deltas
291 291 total = len(revs) - 1
292 292 msgbundling = _('bundling')
293 293 for r in xrange(len(revs) - 1):
294 294 if units is not None:
295 295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 296 prev, curr = revs[r], revs[r + 1]
297 297 linknode = lookup(revlog.node(curr))
298 298 for c in self.revchunk(revlog, curr, prev, linknode):
299 299 yield c
300 300
301 301 yield self.close()
302 302
303 303 # filter any nodes that claim to be part of the known set
304 304 def prune(self, revlog, missing, commonrevs, source):
305 305 rr, rl = revlog.rev, revlog.linkrev
306 306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307 307
308 308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 309 '''yield a sequence of changegroup chunks (strings)'''
310 310 repo = self._repo
311 311 cl = self._changelog
312 312 mf = self._manifest
313 313 reorder = self._reorder
314 314 progress = self._progress
315 315
316 316 # for progress output
317 317 msgbundling = _('bundling')
318 318
319 319 mfs = {} # needed manifests
320 320 fnodes = {} # needed file nodes
321 321 changedfiles = set()
322 322
323 323 # Callback for the changelog, used to collect changed files and manifest
324 324 # nodes.
325 325 # Returns the linkrev node (identity in the changelog case).
326 326 def lookupcl(x):
327 327 c = cl.read(x)
328 328 changedfiles.update(c[3])
329 329 # record the first changeset introducing this manifest version
330 330 mfs.setdefault(c[0], x)
331 331 return x
332 332
333 333 # Callback for the manifest, used to collect linkrevs for filelog
334 334 # revisions.
335 335 # Returns the linkrev node (collected in lookupcl).
336 336 def lookupmf(x):
337 337 clnode = mfs[x]
338 338 if not fastpathlinkrev:
339 339 mdata = mf.readfast(x)
340 340 for f, n in mdata.iteritems():
341 341 if f in changedfiles:
342 342 # record the first changeset introducing this filelog
343 343 # version
344 344 fnodes[f].setdefault(n, clnode)
345 345 return clnode
346 346
347 347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 348 reorder=reorder):
349 349 yield chunk
350 350 progress(msgbundling, None)
351 351
352 352 for f in changedfiles:
353 353 fnodes[f] = {}
354 354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 356 reorder=reorder):
357 357 yield chunk
358 358 progress(msgbundling, None)
359 359
360 360 mfs.clear()
361 361 needed = set(cl.rev(x) for x in clnodes)
362 362
363 363 def linknodes(filerevlog, fname):
364 364 if fastpathlinkrev:
365 365 llr = filerevlog.linkrev
366 366 def genfilenodes():
367 367 for r in filerevlog:
368 368 linkrev = llr(r)
369 369 if linkrev in needed:
370 370 yield filerevlog.node(r), cl.node(linkrev)
371 371 fnodes[fname] = dict(genfilenodes())
372 372 return fnodes.get(fname, {})
373 373
374 374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 375 source):
376 376 yield chunk
377 377
378 378 yield self.close()
379 379 progress(msgbundling, None)
380 380
381 381 if clnodes:
382 382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383 383
384 384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 385 repo = self._repo
386 386 progress = self._progress
387 387 reorder = self._reorder
388 388 msgbundling = _('bundling')
389 389
390 390 total = len(changedfiles)
391 391 # for progress output
392 392 msgfiles = _('files')
393 393 for i, fname in enumerate(sorted(changedfiles)):
394 394 filerevlog = repo.file(fname)
395 395 if not filerevlog:
396 396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397 397
398 398 linkrevnodes = linknodes(filerevlog, fname)
399 399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 400 # fastpath case and with lookupmf in the slowpath case.
401 401 def lookupfilelog(x):
402 402 return linkrevnodes[x]
403 403
404 404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 405 if filenodes:
406 406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 407 total=total)
408 408 yield self.fileheader(fname)
409 409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 410 reorder=reorder):
411 411 yield chunk
412 412
413 413 def revchunk(self, revlog, rev, prev, linknode):
414 414 node = revlog.node(rev)
415 415 p1, p2 = revlog.parentrevs(rev)
416 416 base = prev
417 417
418 418 prefix = ''
419 419 if base == nullrev:
420 420 delta = revlog.revision(node)
421 421 prefix = mdiff.trivialdiffheader(len(delta))
422 422 else:
423 423 delta = revlog.revdiff(base, rev)
424 424 p1n, p2n = revlog.parents(node)
425 425 basenode = revlog.node(base)
426 426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 427 meta += prefix
428 428 l = len(meta) + len(delta)
429 429 yield chunkheader(l)
430 430 yield meta
431 431 yield delta
432 432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 433 # do nothing with basenode, it is implicitly the previous one in HG10
434 434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435 435
436 436 def _changegroupinfo(repo, nodes, source):
437 437 if repo.ui.verbose or source == 'bundle':
438 438 repo.ui.status(_("%d changesets found\n") % len(nodes))
439 439 if repo.ui.debugflag:
440 440 repo.ui.debug("list of changesets:\n")
441 441 for node in nodes:
442 442 repo.ui.debug("%s\n" % hex(node))
443 443
444 444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
445 445 repo = repo.unfiltered()
446 446 commonrevs = outgoing.common
447 447 csets = outgoing.missing
448 448 heads = outgoing.missingheads
449 449 # We go through the fast path if we get told to, or if all (unfiltered
450 450 # heads have been requested (since we then know there all linkrevs will
451 451 # be pulled by the client).
452 452 heads.sort()
453 453 fastpathlinkrev = fastpath or (
454 454 repo.filtername is None and heads == sorted(repo.heads()))
455 455
456 456 repo.hook('preoutgoing', throw=True, source=source)
457 457 _changegroupinfo(repo, csets, source)
458 458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
459 459 return unbundle10(util.chunkbuffer(gengroup), 'UN')
460 460
461 461 def changegroupsubset(repo, roots, heads, source):
462 462 """Compute a changegroup consisting of all the nodes that are
463 463 descendants of any of the roots and ancestors of any of the heads.
464 464 Return a chunkbuffer object whose read() method will return
465 465 successive changegroup chunks.
466 466
467 467 It is fairly complex as determining which filenodes and which
468 468 manifest nodes need to be included for the changeset to be complete
469 469 is non-trivial.
470 470
471 471 Another wrinkle is doing the reverse, figuring out which changeset in
472 472 the changegroup a particular filenode or manifestnode belongs to.
473 473 """
474 474 cl = repo.changelog
475 475 if not roots:
476 476 roots = [nullid]
477 477 # TODO: remove call to nodesbetween.
478 478 csets, roots, heads = cl.nodesbetween(roots, heads)
479 479 discbases = []
480 480 for n in roots:
481 481 discbases.extend([p for p in cl.parents(n) if p != nullid])
482 482 outgoing = discovery.outgoing(cl, discbases, heads)
483 483 bundler = bundle10(repo)
484 484 return getsubset(repo, outgoing, bundler, source)
485 485
486 486 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
487 487 """Like getbundle, but taking a discovery.outgoing as an argument.
488 488
489 489 This is only implemented for local repos and reuses potentially
490 490 precomputed sets in outgoing."""
491 491 if not outgoing.missing:
492 492 return None
493 493 bundler = bundle10(repo, bundlecaps)
494 494 return getsubset(repo, outgoing, bundler, source)
495 495
496 496 def _computeoutgoing(repo, heads, common):
497 497 """Computes which revs are outgoing given a set of common
498 498 and a set of heads.
499 499
500 500 This is a separate function so extensions can have access to
501 501 the logic.
502 502
503 503 Returns a discovery.outgoing object.
504 504 """
505 505 cl = repo.changelog
506 506 if common:
507 507 hasnode = cl.hasnode
508 508 common = [n for n in common if hasnode(n)]
509 509 else:
510 510 common = [nullid]
511 511 if not heads:
512 512 heads = cl.heads()
513 513 return discovery.outgoing(cl, common, heads)
514 514
515 515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
516 516 """Like changegroupsubset, but returns the set difference between the
517 517 ancestors of heads and the ancestors common.
518 518
519 519 If heads is None, use the local heads. If common is None, use [nullid].
520 520
521 521 The nodes in common might not all be known locally due to the way the
522 522 current discovery protocol works.
523 523 """
524 524 outgoing = _computeoutgoing(repo, heads, common)
525 525 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
526 526
527 527 def changegroup(repo, basenodes, source):
528 528 # to avoid a race we use changegroupsubset() (issue1320)
529 529 return changegroupsubset(repo, basenodes, repo.heads(), source)
530 530
531 531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
532 532 revisions = 0
533 533 files = 0
534 534 while True:
535 535 chunkdata = source.filelogheader()
536 536 if not chunkdata:
537 537 break
538 538 f = chunkdata["filename"]
539 539 repo.ui.debug("adding %s revisions\n" % f)
540 540 pr()
541 541 fl = repo.file(f)
542 542 o = len(fl)
543 543 if not fl.addgroup(source, revmap, trp):
544 544 raise util.Abort(_("received file revlog group is empty"))
545 545 revisions += len(fl) - o
546 546 files += 1
547 547 if f in needfiles:
548 548 needs = needfiles[f]
549 549 for new in xrange(o, len(fl)):
550 550 n = fl.node(new)
551 551 if n in needs:
552 552 needs.remove(n)
553 553 else:
554 554 raise util.Abort(
555 555 _("received spurious file revlog entry"))
556 556 if not needs:
557 557 del needfiles[f]
558 558 repo.ui.progress(_('files'), None)
559 559
560 560 for f, needs in needfiles.iteritems():
561 561 fl = repo.file(f)
562 562 for n in needs:
563 563 try:
564 564 fl.rev(n)
565 565 except error.LookupError:
566 566 raise util.Abort(
567 567 _('missing file data for %s:%s - run hg verify') %
568 568 (f, hex(n)))
569 569
570 570 return revisions, files
571 571
572 572 def addchangegroup(repo, source, srctype, url, emptyok=False,
573 573 targetphase=phases.draft):
574 574 """Add the changegroup returned by source.read() to this repo.
575 575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
576 576 the URL of the repo where this changegroup is coming from.
577 577
578 578 Return an integer summarizing the change to this repo:
579 579 - nothing changed or no source: 0
580 580 - more heads than before: 1+added heads (2..n)
581 581 - fewer heads than before: -1-removed heads (-2..-n)
582 582 - number of heads stays the same: 1
583 583 """
584 584 repo = repo.unfiltered()
585 585 def csmap(x):
586 586 repo.ui.debug("add changeset %s\n" % short(x))
587 587 return len(cl)
588 588
589 589 def revmap(x):
590 590 return cl.rev(x)
591 591
592 592 if not source:
593 593 return 0
594 594
595 595 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
596 596
597 597 changesets = files = revisions = 0
598 598 efiles = set()
599 599
600 600 # write changelog data to temp files so concurrent readers will not see
601 601 # inconsistent view
602 602 cl = repo.changelog
603 603 cl.delayupdate()
604 604 oldheads = cl.heads()
605 605
606 606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
607 607 try:
608 608 trp = weakref.proxy(tr)
609 609 # pull off the changeset group
610 610 repo.ui.status(_("adding changesets\n"))
611 611 clstart = len(cl)
612 612 class prog(object):
613 613 step = _('changesets')
614 614 count = 1
615 615 ui = repo.ui
616 616 total = None
617 617 def __call__(repo):
618 618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
619 619 total=repo.total)
620 620 repo.count += 1
621 621 pr = prog()
622 622 source.callback = pr
623 623
624 624 source.changelogheader()
625 625 srccontent = cl.addgroup(source, csmap, trp)
626 626 if not (srccontent or emptyok):
627 627 raise util.Abort(_("received changelog group is empty"))
628 628 clend = len(cl)
629 629 changesets = clend - clstart
630 630 for c in xrange(clstart, clend):
631 631 efiles.update(repo[c].files())
632 632 efiles = len(efiles)
633 633 repo.ui.progress(_('changesets'), None)
634 634
635 635 # pull off the manifest group
636 636 repo.ui.status(_("adding manifests\n"))
637 637 pr.step = _('manifests')
638 638 pr.count = 1
639 639 pr.total = changesets # manifests <= changesets
640 640 # no need to check for empty manifest group here:
641 641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
642 642 # no new manifest will be created and the manifest group will
643 643 # be empty during the pull
644 644 source.manifestheader()
645 645 repo.manifest.addgroup(source, revmap, trp)
646 646 repo.ui.progress(_('manifests'), None)
647 647
648 648 needfiles = {}
649 649 if repo.ui.configbool('server', 'validate', default=False):
650 650 # validate incoming csets have their manifests
651 651 for cset in xrange(clstart, clend):
652 652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
653 653 mfest = repo.manifest.readdelta(mfest)
654 654 # store file nodes we must see
655 655 for f, n in mfest.iteritems():
656 656 needfiles.setdefault(f, set()).add(n)
657 657
658 658 # process the files
659 659 repo.ui.status(_("adding file changes\n"))
660 660 pr.step = _('files')
661 661 pr.count = 1
662 662 pr.total = efiles
663 663 source.callback = None
664 664
665 665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
666 666 needfiles)
667 667 revisions += newrevs
668 668 files += newfiles
669 669
670 670 dh = 0
671 671 if oldheads:
672 672 heads = cl.heads()
673 673 dh = len(heads) - len(oldheads)
674 674 for h in heads:
675 675 if h not in oldheads and repo[h].closesbranch():
676 676 dh -= 1
677 677 htext = ""
678 678 if dh:
679 679 htext = _(" (%+d heads)") % dh
680 680
681 681 repo.ui.status(_("added %d changesets"
682 682 " with %d changes to %d files%s\n")
683 683 % (changesets, revisions, files, htext))
684 684 repo.invalidatevolatilesets()
685 685
686 686 if changesets > 0:
687 687 p = lambda: cl.writepending() and repo.root or ""
688 688 if 'node' not in tr.hookargs:
689 689 tr.hookargs['node'] = hex(cl.node(clstart))
690 690 repo.hook('pretxnchangegroup', throw=True, source=srctype,
691 691 url=url, pending=p, **tr.hookargs)
692 692
693 693 added = [cl.node(r) for r in xrange(clstart, clend)]
694 694 publishing = repo.ui.configbool('phases', 'publish', True)
695 695 if srctype in ('push', 'serve'):
696 696 # Old servers can not push the boundary themselves.
697 697 # New servers won't push the boundary if changeset already
698 698 # exists locally as secret
699 699 #
700 700 # We should not use added here but the list of all change in
701 701 # the bundle
702 702 if publishing:
703 phases.advanceboundary(repo, phases.public, srccontent)
703 phases.advanceboundary(repo, tr, phases.public, srccontent)
704 704 else:
705 705 # Those changesets have been pushed from the outside, their
706 706 # phases are going to be pushed alongside. Therefor
707 707 # `targetphase` is ignored.
708 phases.advanceboundary(repo, phases.draft, srccontent)
708 phases.advanceboundary(repo, tr, phases.draft, srccontent)
709 709 phases.retractboundary(repo, phases.draft, added)
710 710 elif srctype != 'strip':
711 711 # publishing only alter behavior during push
712 712 #
713 713 # strip should not touch boundary at all
714 714 phases.retractboundary(repo, targetphase, added)
715 715
716 716 # make changelog see real files again
717 717 cl.finalize(trp)
718 718
719 719 tr.close()
720 720
721 721 if changesets > 0:
722 722 if srctype != 'strip':
723 723 # During strip, branchcache is invalid but coming call to
724 724 # `destroyed` will repair it.
725 725 # In other case we can safely update cache on disk.
726 726 branchmap.updatecache(repo.filtered('served'))
727 727 def runhooks():
728 728 # These hooks run when the lock releases, not when the
729 729 # transaction closes. So it's possible for the changelog
730 730 # to have changed since we last saw it.
731 731 if clstart >= len(repo):
732 732 return
733 733
734 734 # forcefully update the on-disk branch cache
735 735 repo.ui.debug("updating the branch cache\n")
736 736 repo.hook("changegroup", source=srctype, url=url,
737 737 **tr.hookargs)
738 738
739 739 for n in added:
740 740 repo.hook("incoming", node=hex(n), source=srctype,
741 741 url=url)
742 742
743 743 newheads = [h for h in repo.heads() if h not in oldheads]
744 744 repo.ui.log("incoming",
745 745 "%s incoming changes - new heads: %s\n",
746 746 len(added),
747 747 ', '.join([hex(c[:6]) for c in newheads]))
748 748 repo._afterlock(runhooks)
749 749
750 750 finally:
751 751 tr.release()
752 752 # never return 0 here:
753 753 if dh < 0:
754 754 return dh - 1
755 755 else:
756 756 return dh + 1
@@ -1,6061 +1,6061 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from lock import release
10 10 from i18n import _
11 11 import os, re, difflib, time, tempfile, errno, shlex
12 12 import sys
13 13 import hg, scmutil, util, revlog, copies, error, bookmarks
14 14 import patch, help, encoding, templatekw, discovery
15 15 import archival, changegroup, cmdutil, hbisect
16 16 import sshserver, hgweb, commandserver
17 17 import extensions
18 18 from hgweb import server as hgweb_server
19 19 import merge as mergemod
20 20 import minirst, revset, fileset
21 21 import dagparser, context, simplemerge, graphmod
22 22 import random
23 23 import setdiscovery, treediscovery, dagutil, pvec, localrepo
24 24 import phases, obsolete, exchange
25 25
26 26 table = {}
27 27
28 28 command = cmdutil.command(table)
29 29
30 30 # Space delimited list of commands that don't require local repositories.
31 31 # This should be populated by passing norepo=True into the @command decorator.
32 32 norepo = ''
33 33 # Space delimited list of commands that optionally require local repositories.
34 34 # This should be populated by passing optionalrepo=True into the @command
35 35 # decorator.
36 36 optionalrepo = ''
37 37 # Space delimited list of commands that will examine arguments looking for
38 38 # a repository. This should be populated by passing inferrepo=True into the
39 39 # @command decorator.
40 40 inferrepo = ''
41 41
42 42 # common command options
43 43
44 44 globalopts = [
45 45 ('R', 'repository', '',
46 46 _('repository root directory or name of overlay bundle file'),
47 47 _('REPO')),
48 48 ('', 'cwd', '',
49 49 _('change working directory'), _('DIR')),
50 50 ('y', 'noninteractive', None,
51 51 _('do not prompt, automatically pick the first choice for all prompts')),
52 52 ('q', 'quiet', None, _('suppress output')),
53 53 ('v', 'verbose', None, _('enable additional output')),
54 54 ('', 'config', [],
55 55 _('set/override config option (use \'section.name=value\')'),
56 56 _('CONFIG')),
57 57 ('', 'debug', None, _('enable debugging output')),
58 58 ('', 'debugger', None, _('start debugger')),
59 59 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
60 60 _('ENCODE')),
61 61 ('', 'encodingmode', encoding.encodingmode,
62 62 _('set the charset encoding mode'), _('MODE')),
63 63 ('', 'traceback', None, _('always print a traceback on exception')),
64 64 ('', 'time', None, _('time how long the command takes')),
65 65 ('', 'profile', None, _('print command execution profile')),
66 66 ('', 'version', None, _('output version information and exit')),
67 67 ('h', 'help', None, _('display help and exit')),
68 68 ('', 'hidden', False, _('consider hidden changesets')),
69 69 ]
70 70
71 71 dryrunopts = [('n', 'dry-run', None,
72 72 _('do not perform actions, just print output'))]
73 73
74 74 remoteopts = [
75 75 ('e', 'ssh', '',
76 76 _('specify ssh command to use'), _('CMD')),
77 77 ('', 'remotecmd', '',
78 78 _('specify hg command to run on the remote side'), _('CMD')),
79 79 ('', 'insecure', None,
80 80 _('do not verify server certificate (ignoring web.cacerts config)')),
81 81 ]
82 82
83 83 walkopts = [
84 84 ('I', 'include', [],
85 85 _('include names matching the given patterns'), _('PATTERN')),
86 86 ('X', 'exclude', [],
87 87 _('exclude names matching the given patterns'), _('PATTERN')),
88 88 ]
89 89
90 90 commitopts = [
91 91 ('m', 'message', '',
92 92 _('use text as commit message'), _('TEXT')),
93 93 ('l', 'logfile', '',
94 94 _('read commit message from file'), _('FILE')),
95 95 ]
96 96
97 97 commitopts2 = [
98 98 ('d', 'date', '',
99 99 _('record the specified date as commit date'), _('DATE')),
100 100 ('u', 'user', '',
101 101 _('record the specified user as committer'), _('USER')),
102 102 ]
103 103
104 104 templateopts = [
105 105 ('', 'style', '',
106 106 _('display using template map file (DEPRECATED)'), _('STYLE')),
107 107 ('T', 'template', '',
108 108 _('display with template'), _('TEMPLATE')),
109 109 ]
110 110
111 111 logopts = [
112 112 ('p', 'patch', None, _('show patch')),
113 113 ('g', 'git', None, _('use git extended diff format')),
114 114 ('l', 'limit', '',
115 115 _('limit number of changes displayed'), _('NUM')),
116 116 ('M', 'no-merges', None, _('do not show merges')),
117 117 ('', 'stat', None, _('output diffstat-style summary of changes')),
118 118 ('G', 'graph', None, _("show the revision DAG")),
119 119 ] + templateopts
120 120
121 121 diffopts = [
122 122 ('a', 'text', None, _('treat all files as text')),
123 123 ('g', 'git', None, _('use git extended diff format')),
124 124 ('', 'nodates', None, _('omit dates from diff headers'))
125 125 ]
126 126
127 127 diffwsopts = [
128 128 ('w', 'ignore-all-space', None,
129 129 _('ignore white space when comparing lines')),
130 130 ('b', 'ignore-space-change', None,
131 131 _('ignore changes in the amount of white space')),
132 132 ('B', 'ignore-blank-lines', None,
133 133 _('ignore changes whose lines are all blank')),
134 134 ]
135 135
136 136 diffopts2 = [
137 137 ('p', 'show-function', None, _('show which function each change is in')),
138 138 ('', 'reverse', None, _('produce a diff that undoes the changes')),
139 139 ] + diffwsopts + [
140 140 ('U', 'unified', '',
141 141 _('number of lines of context to show'), _('NUM')),
142 142 ('', 'stat', None, _('output diffstat-style summary of changes')),
143 143 ]
144 144
145 145 mergetoolopts = [
146 146 ('t', 'tool', '', _('specify merge tool')),
147 147 ]
148 148
149 149 similarityopts = [
150 150 ('s', 'similarity', '',
151 151 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
152 152 ]
153 153
154 154 subrepoopts = [
155 155 ('S', 'subrepos', None,
156 156 _('recurse into subrepositories'))
157 157 ]
158 158
159 159 # Commands start here, listed alphabetically
160 160
161 161 @command('^add',
162 162 walkopts + subrepoopts + dryrunopts,
163 163 _('[OPTION]... [FILE]...'),
164 164 inferrepo=True)
165 165 def add(ui, repo, *pats, **opts):
166 166 """add the specified files on the next commit
167 167
168 168 Schedule files to be version controlled and added to the
169 169 repository.
170 170
171 171 The files will be added to the repository at the next commit. To
172 172 undo an add before that, see :hg:`forget`.
173 173
174 174 If no names are given, add all files to the repository.
175 175
176 176 .. container:: verbose
177 177
178 178 An example showing how new (unknown) files are added
179 179 automatically by :hg:`add`::
180 180
181 181 $ ls
182 182 foo.c
183 183 $ hg status
184 184 ? foo.c
185 185 $ hg add
186 186 adding foo.c
187 187 $ hg status
188 188 A foo.c
189 189
190 190 Returns 0 if all files are successfully added.
191 191 """
192 192
193 193 m = scmutil.match(repo[None], pats, opts)
194 194 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
195 195 opts.get('subrepos'), prefix="", explicitonly=False)
196 196 return rejected and 1 or 0
197 197
198 198 @command('addremove',
199 199 similarityopts + walkopts + dryrunopts,
200 200 _('[OPTION]... [FILE]...'),
201 201 inferrepo=True)
202 202 def addremove(ui, repo, *pats, **opts):
203 203 """add all new files, delete all missing files
204 204
205 205 Add all new files and remove all missing files from the
206 206 repository.
207 207
208 208 New files are ignored if they match any of the patterns in
209 209 ``.hgignore``. As with add, these changes take effect at the next
210 210 commit.
211 211
212 212 Use the -s/--similarity option to detect renamed files. This
213 213 option takes a percentage between 0 (disabled) and 100 (files must
214 214 be identical) as its parameter. With a parameter greater than 0,
215 215 this compares every removed file with every added file and records
216 216 those similar enough as renames. Detecting renamed files this way
217 217 can be expensive. After using this option, :hg:`status -C` can be
218 218 used to check which files were identified as moved or renamed. If
219 219 not specified, -s/--similarity defaults to 100 and only renames of
220 220 identical files are detected.
221 221
222 222 Returns 0 if all files are successfully added.
223 223 """
224 224 try:
225 225 sim = float(opts.get('similarity') or 100)
226 226 except ValueError:
227 227 raise util.Abort(_('similarity must be a number'))
228 228 if sim < 0 or sim > 100:
229 229 raise util.Abort(_('similarity must be between 0 and 100'))
230 230 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
231 231
232 232 @command('^annotate|blame',
233 233 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
234 234 ('', 'follow', None,
235 235 _('follow copies/renames and list the filename (DEPRECATED)')),
236 236 ('', 'no-follow', None, _("don't follow copies and renames")),
237 237 ('a', 'text', None, _('treat all files as text')),
238 238 ('u', 'user', None, _('list the author (long with -v)')),
239 239 ('f', 'file', None, _('list the filename')),
240 240 ('d', 'date', None, _('list the date (short with -q)')),
241 241 ('n', 'number', None, _('list the revision number (default)')),
242 242 ('c', 'changeset', None, _('list the changeset')),
243 243 ('l', 'line-number', None, _('show line number at the first appearance'))
244 244 ] + diffwsopts + walkopts,
245 245 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
246 246 inferrepo=True)
247 247 def annotate(ui, repo, *pats, **opts):
248 248 """show changeset information by line for each file
249 249
250 250 List changes in files, showing the revision id responsible for
251 251 each line
252 252
253 253 This command is useful for discovering when a change was made and
254 254 by whom.
255 255
256 256 Without the -a/--text option, annotate will avoid processing files
257 257 it detects as binary. With -a, annotate will annotate the file
258 258 anyway, although the results will probably be neither useful
259 259 nor desirable.
260 260
261 261 Returns 0 on success.
262 262 """
263 263 if opts.get('follow'):
264 264 # --follow is deprecated and now just an alias for -f/--file
265 265 # to mimic the behavior of Mercurial before version 1.5
266 266 opts['file'] = True
267 267
268 268 datefunc = ui.quiet and util.shortdate or util.datestr
269 269 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
270 270
271 271 if not pats:
272 272 raise util.Abort(_('at least one filename or pattern is required'))
273 273
274 274 hexfn = ui.debugflag and hex or short
275 275
276 276 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
277 277 ('number', ' ', lambda x: str(x[0].rev())),
278 278 ('changeset', ' ', lambda x: hexfn(x[0].node())),
279 279 ('date', ' ', getdate),
280 280 ('file', ' ', lambda x: x[0].path()),
281 281 ('line_number', ':', lambda x: str(x[1])),
282 282 ]
283 283
284 284 if (not opts.get('user') and not opts.get('changeset')
285 285 and not opts.get('date') and not opts.get('file')):
286 286 opts['number'] = True
287 287
288 288 linenumber = opts.get('line_number') is not None
289 289 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
290 290 raise util.Abort(_('at least one of -n/-c is required for -l'))
291 291
292 292 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
293 293 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
294 294
295 295 def bad(x, y):
296 296 raise util.Abort("%s: %s" % (x, y))
297 297
298 298 ctx = scmutil.revsingle(repo, opts.get('rev'))
299 299 m = scmutil.match(ctx, pats, opts)
300 300 m.bad = bad
301 301 follow = not opts.get('no_follow')
302 302 diffopts = patch.diffopts(ui, opts, section='annotate')
303 303 for abs in ctx.walk(m):
304 304 fctx = ctx[abs]
305 305 if not opts.get('text') and util.binary(fctx.data()):
306 306 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
307 307 continue
308 308
309 309 lines = fctx.annotate(follow=follow, linenumber=linenumber,
310 310 diffopts=diffopts)
311 311 pieces = []
312 312
313 313 for f, sep in funcmap:
314 314 l = [f(n) for n, dummy in lines]
315 315 if l:
316 316 sized = [(x, encoding.colwidth(x)) for x in l]
317 317 ml = max([w for x, w in sized])
318 318 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
319 319 for x, w in sized])
320 320
321 321 if pieces:
322 322 for p, l in zip(zip(*pieces), lines):
323 323 ui.write("%s: %s" % ("".join(p), l[1]))
324 324
325 325 if lines and not lines[-1][1].endswith('\n'):
326 326 ui.write('\n')
327 327
328 328 @command('archive',
329 329 [('', 'no-decode', None, _('do not pass files through decoders')),
330 330 ('p', 'prefix', '', _('directory prefix for files in archive'),
331 331 _('PREFIX')),
332 332 ('r', 'rev', '', _('revision to distribute'), _('REV')),
333 333 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
334 334 ] + subrepoopts + walkopts,
335 335 _('[OPTION]... DEST'))
336 336 def archive(ui, repo, dest, **opts):
337 337 '''create an unversioned archive of a repository revision
338 338
339 339 By default, the revision used is the parent of the working
340 340 directory; use -r/--rev to specify a different revision.
341 341
342 342 The archive type is automatically detected based on file
343 343 extension (or override using -t/--type).
344 344
345 345 .. container:: verbose
346 346
347 347 Examples:
348 348
349 349 - create a zip file containing the 1.0 release::
350 350
351 351 hg archive -r 1.0 project-1.0.zip
352 352
353 353 - create a tarball excluding .hg files::
354 354
355 355 hg archive project.tar.gz -X ".hg*"
356 356
357 357 Valid types are:
358 358
359 359 :``files``: a directory full of files (default)
360 360 :``tar``: tar archive, uncompressed
361 361 :``tbz2``: tar archive, compressed using bzip2
362 362 :``tgz``: tar archive, compressed using gzip
363 363 :``uzip``: zip archive, uncompressed
364 364 :``zip``: zip archive, compressed using deflate
365 365
366 366 The exact name of the destination archive or directory is given
367 367 using a format string; see :hg:`help export` for details.
368 368
369 369 Each member added to an archive file has a directory prefix
370 370 prepended. Use -p/--prefix to specify a format string for the
371 371 prefix. The default is the basename of the archive, with suffixes
372 372 removed.
373 373
374 374 Returns 0 on success.
375 375 '''
376 376
377 377 ctx = scmutil.revsingle(repo, opts.get('rev'))
378 378 if not ctx:
379 379 raise util.Abort(_('no working directory: please specify a revision'))
380 380 node = ctx.node()
381 381 dest = cmdutil.makefilename(repo, dest, node)
382 382 if os.path.realpath(dest) == repo.root:
383 383 raise util.Abort(_('repository root cannot be destination'))
384 384
385 385 kind = opts.get('type') or archival.guesskind(dest) or 'files'
386 386 prefix = opts.get('prefix')
387 387
388 388 if dest == '-':
389 389 if kind == 'files':
390 390 raise util.Abort(_('cannot archive plain files to stdout'))
391 391 dest = cmdutil.makefileobj(repo, dest)
392 392 if not prefix:
393 393 prefix = os.path.basename(repo.root) + '-%h'
394 394
395 395 prefix = cmdutil.makefilename(repo, prefix, node)
396 396 matchfn = scmutil.match(ctx, [], opts)
397 397 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
398 398 matchfn, prefix, subrepos=opts.get('subrepos'))
399 399
400 400 @command('backout',
401 401 [('', 'merge', None, _('merge with old dirstate parent after backout')),
402 402 ('', 'parent', '',
403 403 _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
404 404 ('r', 'rev', '', _('revision to backout'), _('REV')),
405 405 ('e', 'edit', False, _('invoke editor on commit messages')),
406 406 ] + mergetoolopts + walkopts + commitopts + commitopts2,
407 407 _('[OPTION]... [-r] REV'))
408 408 def backout(ui, repo, node=None, rev=None, **opts):
409 409 '''reverse effect of earlier changeset
410 410
411 411 Prepare a new changeset with the effect of REV undone in the
412 412 current working directory.
413 413
414 414 If REV is the parent of the working directory, then this new changeset
415 415 is committed automatically. Otherwise, hg needs to merge the
416 416 changes and the merged result is left uncommitted.
417 417
418 418 .. note::
419 419
420 420 backout cannot be used to fix either an unwanted or
421 421 incorrect merge.
422 422
423 423 .. container:: verbose
424 424
425 425 By default, the pending changeset will have one parent,
426 426 maintaining a linear history. With --merge, the pending
427 427 changeset will instead have two parents: the old parent of the
428 428 working directory and a new child of REV that simply undoes REV.
429 429
430 430 Before version 1.7, the behavior without --merge was equivalent
431 431 to specifying --merge followed by :hg:`update --clean .` to
432 432 cancel the merge and leave the child of REV as a head to be
433 433 merged separately.
434 434
435 435 See :hg:`help dates` for a list of formats valid for -d/--date.
436 436
437 437 Returns 0 on success, 1 if nothing to backout or there are unresolved
438 438 files.
439 439 '''
440 440 if rev and node:
441 441 raise util.Abort(_("please specify just one revision"))
442 442
443 443 if not rev:
444 444 rev = node
445 445
446 446 if not rev:
447 447 raise util.Abort(_("please specify a revision to backout"))
448 448
449 449 date = opts.get('date')
450 450 if date:
451 451 opts['date'] = util.parsedate(date)
452 452
453 453 cmdutil.checkunfinished(repo)
454 454 cmdutil.bailifchanged(repo)
455 455 node = scmutil.revsingle(repo, rev).node()
456 456
457 457 op1, op2 = repo.dirstate.parents()
458 458 if node not in repo.changelog.commonancestorsheads(op1, node):
459 459 raise util.Abort(_('cannot backout change that is not an ancestor'))
460 460
461 461 p1, p2 = repo.changelog.parents(node)
462 462 if p1 == nullid:
463 463 raise util.Abort(_('cannot backout a change with no parents'))
464 464 if p2 != nullid:
465 465 if not opts.get('parent'):
466 466 raise util.Abort(_('cannot backout a merge changeset'))
467 467 p = repo.lookup(opts['parent'])
468 468 if p not in (p1, p2):
469 469 raise util.Abort(_('%s is not a parent of %s') %
470 470 (short(p), short(node)))
471 471 parent = p
472 472 else:
473 473 if opts.get('parent'):
474 474 raise util.Abort(_('cannot use --parent on non-merge changeset'))
475 475 parent = p1
476 476
477 477 # the backout should appear on the same branch
478 478 wlock = repo.wlock()
479 479 try:
480 480 branch = repo.dirstate.branch()
481 481 bheads = repo.branchheads(branch)
482 482 rctx = scmutil.revsingle(repo, hex(parent))
483 483 if not opts.get('merge') and op1 != node:
484 484 try:
485 485 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
486 486 'backout')
487 487 stats = mergemod.update(repo, parent, True, True, False,
488 488 node, False)
489 489 repo.setparents(op1, op2)
490 490 hg._showstats(repo, stats)
491 491 if stats[3]:
492 492 repo.ui.status(_("use 'hg resolve' to retry unresolved "
493 493 "file merges\n"))
494 494 else:
495 495 msg = _("changeset %s backed out, "
496 496 "don't forget to commit.\n")
497 497 ui.status(msg % short(node))
498 498 return stats[3] > 0
499 499 finally:
500 500 ui.setconfig('ui', 'forcemerge', '', '')
501 501 else:
502 502 hg.clean(repo, node, show_stats=False)
503 503 repo.dirstate.setbranch(branch)
504 504 cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
505 505
506 506
507 507 def commitfunc(ui, repo, message, match, opts):
508 508 editform = 'backout'
509 509 e = cmdutil.getcommiteditor(editform=editform, **opts)
510 510 if not message:
511 511 # we don't translate commit messages
512 512 message = "Backed out changeset %s" % short(node)
513 513 e = cmdutil.getcommiteditor(edit=True, editform=editform)
514 514 return repo.commit(message, opts.get('user'), opts.get('date'),
515 515 match, editor=e)
516 516 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
517 517 if not newnode:
518 518 ui.status(_("nothing changed\n"))
519 519 return 1
520 520 cmdutil.commitstatus(repo, newnode, branch, bheads)
521 521
522 522 def nice(node):
523 523 return '%d:%s' % (repo.changelog.rev(node), short(node))
524 524 ui.status(_('changeset %s backs out changeset %s\n') %
525 525 (nice(repo.changelog.tip()), nice(node)))
526 526 if opts.get('merge') and op1 != node:
527 527 hg.clean(repo, op1, show_stats=False)
528 528 ui.status(_('merging with changeset %s\n')
529 529 % nice(repo.changelog.tip()))
530 530 try:
531 531 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
532 532 'backout')
533 533 return hg.merge(repo, hex(repo.changelog.tip()))
534 534 finally:
535 535 ui.setconfig('ui', 'forcemerge', '', '')
536 536 finally:
537 537 wlock.release()
538 538 return 0
539 539
540 540 @command('bisect',
541 541 [('r', 'reset', False, _('reset bisect state')),
542 542 ('g', 'good', False, _('mark changeset good')),
543 543 ('b', 'bad', False, _('mark changeset bad')),
544 544 ('s', 'skip', False, _('skip testing changeset')),
545 545 ('e', 'extend', False, _('extend the bisect range')),
546 546 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
547 547 ('U', 'noupdate', False, _('do not update to target'))],
548 548 _("[-gbsr] [-U] [-c CMD] [REV]"))
549 549 def bisect(ui, repo, rev=None, extra=None, command=None,
550 550 reset=None, good=None, bad=None, skip=None, extend=None,
551 551 noupdate=None):
552 552 """subdivision search of changesets
553 553
554 554 This command helps to find changesets which introduce problems. To
555 555 use, mark the earliest changeset you know exhibits the problem as
556 556 bad, then mark the latest changeset which is free from the problem
557 557 as good. Bisect will update your working directory to a revision
558 558 for testing (unless the -U/--noupdate option is specified). Once
559 559 you have performed tests, mark the working directory as good or
560 560 bad, and bisect will either update to another candidate changeset
561 561 or announce that it has found the bad revision.
562 562
563 563 As a shortcut, you can also use the revision argument to mark a
564 564 revision as good or bad without checking it out first.
565 565
566 566 If you supply a command, it will be used for automatic bisection.
567 567 The environment variable HG_NODE will contain the ID of the
568 568 changeset being tested. The exit status of the command will be
569 569 used to mark revisions as good or bad: status 0 means good, 125
570 570 means to skip the revision, 127 (command not found) will abort the
571 571 bisection, and any other non-zero exit status means the revision
572 572 is bad.
573 573
574 574 .. container:: verbose
575 575
576 576 Some examples:
577 577
578 578 - start a bisection with known bad revision 34, and good revision 12::
579 579
580 580 hg bisect --bad 34
581 581 hg bisect --good 12
582 582
583 583 - advance the current bisection by marking current revision as good or
584 584 bad::
585 585
586 586 hg bisect --good
587 587 hg bisect --bad
588 588
589 589 - mark the current revision, or a known revision, to be skipped (e.g. if
590 590 that revision is not usable because of another issue)::
591 591
592 592 hg bisect --skip
593 593 hg bisect --skip 23
594 594
595 595 - skip all revisions that do not touch directories ``foo`` or ``bar``::
596 596
597 597 hg bisect --skip "!( file('path:foo') & file('path:bar') )"
598 598
599 599 - forget the current bisection::
600 600
601 601 hg bisect --reset
602 602
603 603 - use 'make && make tests' to automatically find the first broken
604 604 revision::
605 605
606 606 hg bisect --reset
607 607 hg bisect --bad 34
608 608 hg bisect --good 12
609 609 hg bisect --command "make && make tests"
610 610
611 611 - see all changesets whose states are already known in the current
612 612 bisection::
613 613
614 614 hg log -r "bisect(pruned)"
615 615
616 616 - see the changeset currently being bisected (especially useful
617 617 if running with -U/--noupdate)::
618 618
619 619 hg log -r "bisect(current)"
620 620
621 621 - see all changesets that took part in the current bisection::
622 622
623 623 hg log -r "bisect(range)"
624 624
625 625 - you can even get a nice graph::
626 626
627 627 hg log --graph -r "bisect(range)"
628 628
629 629 See :hg:`help revsets` for more about the `bisect()` keyword.
630 630
631 631 Returns 0 on success.
632 632 """
633 633 def extendbisectrange(nodes, good):
634 634 # bisect is incomplete when it ends on a merge node and
635 635 # one of the parent was not checked.
636 636 parents = repo[nodes[0]].parents()
637 637 if len(parents) > 1:
638 638 side = good and state['bad'] or state['good']
639 639 num = len(set(i.node() for i in parents) & set(side))
640 640 if num == 1:
641 641 return parents[0].ancestor(parents[1])
642 642 return None
643 643
644 644 def print_result(nodes, good):
645 645 displayer = cmdutil.show_changeset(ui, repo, {})
646 646 if len(nodes) == 1:
647 647 # narrowed it down to a single revision
648 648 if good:
649 649 ui.write(_("The first good revision is:\n"))
650 650 else:
651 651 ui.write(_("The first bad revision is:\n"))
652 652 displayer.show(repo[nodes[0]])
653 653 extendnode = extendbisectrange(nodes, good)
654 654 if extendnode is not None:
655 655 ui.write(_('Not all ancestors of this changeset have been'
656 656 ' checked.\nUse bisect --extend to continue the '
657 657 'bisection from\nthe common ancestor, %s.\n')
658 658 % extendnode)
659 659 else:
660 660 # multiple possible revisions
661 661 if good:
662 662 ui.write(_("Due to skipped revisions, the first "
663 663 "good revision could be any of:\n"))
664 664 else:
665 665 ui.write(_("Due to skipped revisions, the first "
666 666 "bad revision could be any of:\n"))
667 667 for n in nodes:
668 668 displayer.show(repo[n])
669 669 displayer.close()
670 670
671 671 def check_state(state, interactive=True):
672 672 if not state['good'] or not state['bad']:
673 673 if (good or bad or skip or reset) and interactive:
674 674 return
675 675 if not state['good']:
676 676 raise util.Abort(_('cannot bisect (no known good revisions)'))
677 677 else:
678 678 raise util.Abort(_('cannot bisect (no known bad revisions)'))
679 679 return True
680 680
681 681 # backward compatibility
682 682 if rev in "good bad reset init".split():
683 683 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
684 684 cmd, rev, extra = rev, extra, None
685 685 if cmd == "good":
686 686 good = True
687 687 elif cmd == "bad":
688 688 bad = True
689 689 else:
690 690 reset = True
691 691 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
692 692 raise util.Abort(_('incompatible arguments'))
693 693
694 694 cmdutil.checkunfinished(repo)
695 695
696 696 if reset:
697 697 p = repo.join("bisect.state")
698 698 if os.path.exists(p):
699 699 os.unlink(p)
700 700 return
701 701
702 702 state = hbisect.load_state(repo)
703 703
704 704 if command:
705 705 changesets = 1
706 706 if noupdate:
707 707 try:
708 708 node = state['current'][0]
709 709 except LookupError:
710 710 raise util.Abort(_('current bisect revision is unknown - '
711 711 'start a new bisect to fix'))
712 712 else:
713 713 node, p2 = repo.dirstate.parents()
714 714 if p2 != nullid:
715 715 raise util.Abort(_('current bisect revision is a merge'))
716 716 try:
717 717 while changesets:
718 718 # update state
719 719 state['current'] = [node]
720 720 hbisect.save_state(repo, state)
721 721 status = util.system(command,
722 722 environ={'HG_NODE': hex(node)},
723 723 out=ui.fout)
724 724 if status == 125:
725 725 transition = "skip"
726 726 elif status == 0:
727 727 transition = "good"
728 728 # status < 0 means process was killed
729 729 elif status == 127:
730 730 raise util.Abort(_("failed to execute %s") % command)
731 731 elif status < 0:
732 732 raise util.Abort(_("%s killed") % command)
733 733 else:
734 734 transition = "bad"
735 735 ctx = scmutil.revsingle(repo, rev, node)
736 736 rev = None # clear for future iterations
737 737 state[transition].append(ctx.node())
738 738 ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
739 739 check_state(state, interactive=False)
740 740 # bisect
741 741 nodes, changesets, bgood = hbisect.bisect(repo.changelog, state)
742 742 # update to next check
743 743 node = nodes[0]
744 744 if not noupdate:
745 745 cmdutil.bailifchanged(repo)
746 746 hg.clean(repo, node, show_stats=False)
747 747 finally:
748 748 state['current'] = [node]
749 749 hbisect.save_state(repo, state)
750 750 print_result(nodes, bgood)
751 751 return
752 752
753 753 # update state
754 754
755 755 if rev:
756 756 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
757 757 else:
758 758 nodes = [repo.lookup('.')]
759 759
760 760 if good or bad or skip:
761 761 if good:
762 762 state['good'] += nodes
763 763 elif bad:
764 764 state['bad'] += nodes
765 765 elif skip:
766 766 state['skip'] += nodes
767 767 hbisect.save_state(repo, state)
768 768
769 769 if not check_state(state):
770 770 return
771 771
772 772 # actually bisect
773 773 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
774 774 if extend:
775 775 if not changesets:
776 776 extendnode = extendbisectrange(nodes, good)
777 777 if extendnode is not None:
778 778 ui.write(_("Extending search to changeset %d:%s\n")
779 779 % (extendnode.rev(), extendnode))
780 780 state['current'] = [extendnode.node()]
781 781 hbisect.save_state(repo, state)
782 782 if noupdate:
783 783 return
784 784 cmdutil.bailifchanged(repo)
785 785 return hg.clean(repo, extendnode.node())
786 786 raise util.Abort(_("nothing to extend"))
787 787
788 788 if changesets == 0:
789 789 print_result(nodes, good)
790 790 else:
791 791 assert len(nodes) == 1 # only a single node can be tested next
792 792 node = nodes[0]
793 793 # compute the approximate number of remaining tests
794 794 tests, size = 0, 2
795 795 while size <= changesets:
796 796 tests, size = tests + 1, size * 2
797 797 rev = repo.changelog.rev(node)
798 798 ui.write(_("Testing changeset %d:%s "
799 799 "(%d changesets remaining, ~%d tests)\n")
800 800 % (rev, short(node), changesets, tests))
801 801 state['current'] = [node]
802 802 hbisect.save_state(repo, state)
803 803 if not noupdate:
804 804 cmdutil.bailifchanged(repo)
805 805 return hg.clean(repo, node)
806 806
807 807 @command('bookmarks|bookmark',
808 808 [('f', 'force', False, _('force')),
809 809 ('r', 'rev', '', _('revision'), _('REV')),
810 810 ('d', 'delete', False, _('delete a given bookmark')),
811 811 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
812 812 ('i', 'inactive', False, _('mark a bookmark inactive'))],
813 813 _('hg bookmarks [OPTIONS]... [NAME]...'))
814 814 def bookmark(ui, repo, *names, **opts):
815 815 '''create a new bookmark or list existing bookmarks
816 816
817 817 Bookmarks are labels on changesets to help track lines of development.
818 818 Bookmarks are unversioned and can be moved, renamed and deleted.
819 819 Deleting or moving a bookmark has no effect on the associated changesets.
820 820
821 821 Creating or updating to a bookmark causes it to be marked as 'active'.
822 822 Active bookmarks are indicated with a '*'.
823 823 When a commit is made, an active bookmark will advance to the new commit.
824 824 A plain :hg:`update` will also advance an active bookmark, if possible.
825 825 Updating away from a bookmark will cause it to be deactivated.
826 826
827 827 Bookmarks can be pushed and pulled between repositories (see
828 828 :hg:`help push` and :hg:`help pull`). If a shared bookmark has
829 829 diverged, a new 'divergent bookmark' of the form 'name@path' will
830 830 be created. Using :hg:'merge' will resolve the divergence.
831 831
832 832 A bookmark named '@' has the special property that :hg:`clone` will
833 833 check it out by default if it exists.
834 834
835 835 .. container:: verbose
836 836
837 837 Examples:
838 838
839 839 - create an active bookmark for a new line of development::
840 840
841 841 hg book new-feature
842 842
843 843 - create an inactive bookmark as a place marker::
844 844
845 845 hg book -i reviewed
846 846
847 847 - create an inactive bookmark on another changeset::
848 848
849 849 hg book -r .^ tested
850 850
851 851 - move the '@' bookmark from another branch::
852 852
853 853 hg book -f @
854 854 '''
855 855 force = opts.get('force')
856 856 rev = opts.get('rev')
857 857 delete = opts.get('delete')
858 858 rename = opts.get('rename')
859 859 inactive = opts.get('inactive')
860 860
861 861 def checkformat(mark):
862 862 mark = mark.strip()
863 863 if not mark:
864 864 raise util.Abort(_("bookmark names cannot consist entirely of "
865 865 "whitespace"))
866 866 scmutil.checknewlabel(repo, mark, 'bookmark')
867 867 return mark
868 868
869 869 def checkconflict(repo, mark, cur, force=False, target=None):
870 870 if mark in marks and not force:
871 871 if target:
872 872 if marks[mark] == target and target == cur:
873 873 # re-activating a bookmark
874 874 return
875 875 anc = repo.changelog.ancestors([repo[target].rev()])
876 876 bmctx = repo[marks[mark]]
877 877 divs = [repo[b].node() for b in marks
878 878 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
879 879
880 880 # allow resolving a single divergent bookmark even if moving
881 881 # the bookmark across branches when a revision is specified
882 882 # that contains a divergent bookmark
883 883 if bmctx.rev() not in anc and target in divs:
884 884 bookmarks.deletedivergent(repo, [target], mark)
885 885 return
886 886
887 887 deletefrom = [b for b in divs
888 888 if repo[b].rev() in anc or b == target]
889 889 bookmarks.deletedivergent(repo, deletefrom, mark)
890 890 if bookmarks.validdest(repo, bmctx, repo[target]):
891 891 ui.status(_("moving bookmark '%s' forward from %s\n") %
892 892 (mark, short(bmctx.node())))
893 893 return
894 894 raise util.Abort(_("bookmark '%s' already exists "
895 895 "(use -f to force)") % mark)
896 896 if ((mark in repo.branchmap() or mark == repo.dirstate.branch())
897 897 and not force):
898 898 raise util.Abort(
899 899 _("a bookmark cannot have the name of an existing branch"))
900 900
901 901 if delete and rename:
902 902 raise util.Abort(_("--delete and --rename are incompatible"))
903 903 if delete and rev:
904 904 raise util.Abort(_("--rev is incompatible with --delete"))
905 905 if rename and rev:
906 906 raise util.Abort(_("--rev is incompatible with --rename"))
907 907 if not names and (delete or rev):
908 908 raise util.Abort(_("bookmark name required"))
909 909
910 910 if delete or rename or names or inactive:
911 911 wlock = repo.wlock()
912 912 try:
913 913 cur = repo.changectx('.').node()
914 914 marks = repo._bookmarks
915 915 if delete:
916 916 for mark in names:
917 917 if mark not in marks:
918 918 raise util.Abort(_("bookmark '%s' does not exist") %
919 919 mark)
920 920 if mark == repo._bookmarkcurrent:
921 921 bookmarks.unsetcurrent(repo)
922 922 del marks[mark]
923 923 marks.write()
924 924
925 925 elif rename:
926 926 if not names:
927 927 raise util.Abort(_("new bookmark name required"))
928 928 elif len(names) > 1:
929 929 raise util.Abort(_("only one new bookmark name allowed"))
930 930 mark = checkformat(names[0])
931 931 if rename not in marks:
932 932 raise util.Abort(_("bookmark '%s' does not exist") % rename)
933 933 checkconflict(repo, mark, cur, force)
934 934 marks[mark] = marks[rename]
935 935 if repo._bookmarkcurrent == rename and not inactive:
936 936 bookmarks.setcurrent(repo, mark)
937 937 del marks[rename]
938 938 marks.write()
939 939
940 940 elif names:
941 941 newact = None
942 942 for mark in names:
943 943 mark = checkformat(mark)
944 944 if newact is None:
945 945 newact = mark
946 946 if inactive and mark == repo._bookmarkcurrent:
947 947 bookmarks.unsetcurrent(repo)
948 948 return
949 949 tgt = cur
950 950 if rev:
951 951 tgt = scmutil.revsingle(repo, rev).node()
952 952 checkconflict(repo, mark, cur, force, tgt)
953 953 marks[mark] = tgt
954 954 if not inactive and cur == marks[newact] and not rev:
955 955 bookmarks.setcurrent(repo, newact)
956 956 elif cur != tgt and newact == repo._bookmarkcurrent:
957 957 bookmarks.unsetcurrent(repo)
958 958 marks.write()
959 959
960 960 elif inactive:
961 961 if len(marks) == 0:
962 962 ui.status(_("no bookmarks set\n"))
963 963 elif not repo._bookmarkcurrent:
964 964 ui.status(_("no active bookmark\n"))
965 965 else:
966 966 bookmarks.unsetcurrent(repo)
967 967 finally:
968 968 wlock.release()
969 969 else: # show bookmarks
970 970 hexfn = ui.debugflag and hex or short
971 971 marks = repo._bookmarks
972 972 if len(marks) == 0:
973 973 ui.status(_("no bookmarks set\n"))
974 974 else:
975 975 for bmark, n in sorted(marks.iteritems()):
976 976 current = repo._bookmarkcurrent
977 977 if bmark == current:
978 978 prefix, label = '*', 'bookmarks.current'
979 979 else:
980 980 prefix, label = ' ', ''
981 981
982 982 if ui.quiet:
983 983 ui.write("%s\n" % bmark, label=label)
984 984 else:
985 985 pad = " " * (25 - encoding.colwidth(bmark))
986 986 ui.write(" %s %s%s %d:%s\n" % (
987 987 prefix, bmark, pad, repo.changelog.rev(n), hexfn(n)),
988 988 label=label)
989 989
990 990 @command('branch',
991 991 [('f', 'force', None,
992 992 _('set branch name even if it shadows an existing branch')),
993 993 ('C', 'clean', None, _('reset branch name to parent branch name'))],
994 994 _('[-fC] [NAME]'))
995 995 def branch(ui, repo, label=None, **opts):
996 996 """set or show the current branch name
997 997
998 998 .. note::
999 999
1000 1000 Branch names are permanent and global. Use :hg:`bookmark` to create a
1001 1001 light-weight bookmark instead. See :hg:`help glossary` for more
1002 1002 information about named branches and bookmarks.
1003 1003
1004 1004 With no argument, show the current branch name. With one argument,
1005 1005 set the working directory branch name (the branch will not exist
1006 1006 in the repository until the next commit). Standard practice
1007 1007 recommends that primary development take place on the 'default'
1008 1008 branch.
1009 1009
1010 1010 Unless -f/--force is specified, branch will not let you set a
1011 1011 branch name that already exists, even if it's inactive.
1012 1012
1013 1013 Use -C/--clean to reset the working directory branch to that of
1014 1014 the parent of the working directory, negating a previous branch
1015 1015 change.
1016 1016
1017 1017 Use the command :hg:`update` to switch to an existing branch. Use
1018 1018 :hg:`commit --close-branch` to mark this branch as closed.
1019 1019
1020 1020 Returns 0 on success.
1021 1021 """
1022 1022 if label:
1023 1023 label = label.strip()
1024 1024
1025 1025 if not opts.get('clean') and not label:
1026 1026 ui.write("%s\n" % repo.dirstate.branch())
1027 1027 return
1028 1028
1029 1029 wlock = repo.wlock()
1030 1030 try:
1031 1031 if opts.get('clean'):
1032 1032 label = repo[None].p1().branch()
1033 1033 repo.dirstate.setbranch(label)
1034 1034 ui.status(_('reset working directory to branch %s\n') % label)
1035 1035 elif label:
1036 1036 if not opts.get('force') and label in repo.branchmap():
1037 1037 if label not in [p.branch() for p in repo.parents()]:
1038 1038 raise util.Abort(_('a branch of the same name already'
1039 1039 ' exists'),
1040 1040 # i18n: "it" refers to an existing branch
1041 1041 hint=_("use 'hg update' to switch to it"))
1042 1042 scmutil.checknewlabel(repo, label, 'branch')
1043 1043 repo.dirstate.setbranch(label)
1044 1044 ui.status(_('marked working directory as branch %s\n') % label)
1045 1045 ui.status(_('(branches are permanent and global, '
1046 1046 'did you want a bookmark?)\n'))
1047 1047 finally:
1048 1048 wlock.release()
1049 1049
1050 1050 @command('branches',
1051 1051 [('a', 'active', False, _('show only branches that have unmerged heads')),
1052 1052 ('c', 'closed', False, _('show normal and closed branches'))],
1053 1053 _('[-ac]'))
1054 1054 def branches(ui, repo, active=False, closed=False):
1055 1055 """list repository named branches
1056 1056
1057 1057 List the repository's named branches, indicating which ones are
1058 1058 inactive. If -c/--closed is specified, also list branches which have
1059 1059 been marked closed (see :hg:`commit --close-branch`).
1060 1060
1061 1061 If -a/--active is specified, only show active branches. A branch
1062 1062 is considered active if it contains repository heads.
1063 1063
1064 1064 Use the command :hg:`update` to switch to an existing branch.
1065 1065
1066 1066 Returns 0.
1067 1067 """
1068 1068
1069 1069 hexfunc = ui.debugflag and hex or short
1070 1070
1071 1071 allheads = set(repo.heads())
1072 1072 branches = []
1073 1073 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
1074 1074 isactive = not isclosed and bool(set(heads) & allheads)
1075 1075 branches.append((tag, repo[tip], isactive, not isclosed))
1076 1076 branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]),
1077 1077 reverse=True)
1078 1078
1079 1079 for tag, ctx, isactive, isopen in branches:
1080 1080 if (not active) or isactive:
1081 1081 if isactive:
1082 1082 label = 'branches.active'
1083 1083 notice = ''
1084 1084 elif not isopen:
1085 1085 if not closed:
1086 1086 continue
1087 1087 label = 'branches.closed'
1088 1088 notice = _(' (closed)')
1089 1089 else:
1090 1090 label = 'branches.inactive'
1091 1091 notice = _(' (inactive)')
1092 1092 if tag == repo.dirstate.branch():
1093 1093 label = 'branches.current'
1094 1094 rev = str(ctx.rev()).rjust(31 - encoding.colwidth(tag))
1095 1095 rev = ui.label('%s:%s' % (rev, hexfunc(ctx.node())),
1096 1096 'log.changeset changeset.%s' % ctx.phasestr())
1097 1097 labeledtag = ui.label(tag, label)
1098 1098 if ui.quiet:
1099 1099 ui.write("%s\n" % labeledtag)
1100 1100 else:
1101 1101 ui.write("%s %s%s\n" % (labeledtag, rev, notice))
1102 1102
1103 1103 @command('bundle',
1104 1104 [('f', 'force', None, _('run even when the destination is unrelated')),
1105 1105 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
1106 1106 _('REV')),
1107 1107 ('b', 'branch', [], _('a specific branch you would like to bundle'),
1108 1108 _('BRANCH')),
1109 1109 ('', 'base', [],
1110 1110 _('a base changeset assumed to be available at the destination'),
1111 1111 _('REV')),
1112 1112 ('a', 'all', None, _('bundle all changesets in the repository')),
1113 1113 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
1114 1114 ] + remoteopts,
1115 1115 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
1116 1116 def bundle(ui, repo, fname, dest=None, **opts):
1117 1117 """create a changegroup file
1118 1118
1119 1119 Generate a compressed changegroup file collecting changesets not
1120 1120 known to be in another repository.
1121 1121
1122 1122 If you omit the destination repository, then hg assumes the
1123 1123 destination will have all the nodes you specify with --base
1124 1124 parameters. To create a bundle containing all changesets, use
1125 1125 -a/--all (or --base null).
1126 1126
1127 1127 You can change compression method with the -t/--type option.
1128 1128 The available compression methods are: none, bzip2, and
1129 1129 gzip (by default, bundles are compressed using bzip2).
1130 1130
1131 1131 The bundle file can then be transferred using conventional means
1132 1132 and applied to another repository with the unbundle or pull
1133 1133 command. This is useful when direct push and pull are not
1134 1134 available or when exporting an entire repository is undesirable.
1135 1135
1136 1136 Applying bundles preserves all changeset contents including
1137 1137 permissions, copy/rename information, and revision history.
1138 1138
1139 1139 Returns 0 on success, 1 if no changes found.
1140 1140 """
1141 1141 revs = None
1142 1142 if 'rev' in opts:
1143 1143 revs = scmutil.revrange(repo, opts['rev'])
1144 1144
1145 1145 bundletype = opts.get('type', 'bzip2').lower()
1146 1146 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1147 1147 bundletype = btypes.get(bundletype)
1148 1148 if bundletype not in changegroup.bundletypes:
1149 1149 raise util.Abort(_('unknown bundle type specified with --type'))
1150 1150
1151 1151 if opts.get('all'):
1152 1152 base = ['null']
1153 1153 else:
1154 1154 base = scmutil.revrange(repo, opts.get('base'))
1155 1155 # TODO: get desired bundlecaps from command line.
1156 1156 bundlecaps = None
1157 1157 if base:
1158 1158 if dest:
1159 1159 raise util.Abort(_("--base is incompatible with specifying "
1160 1160 "a destination"))
1161 1161 common = [repo.lookup(rev) for rev in base]
1162 1162 heads = revs and map(repo.lookup, revs) or revs
1163 1163 cg = changegroup.getbundle(repo, 'bundle', heads=heads, common=common,
1164 1164 bundlecaps=bundlecaps)
1165 1165 outgoing = None
1166 1166 else:
1167 1167 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1168 1168 dest, branches = hg.parseurl(dest, opts.get('branch'))
1169 1169 other = hg.peer(repo, opts, dest)
1170 1170 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1171 1171 heads = revs and map(repo.lookup, revs) or revs
1172 1172 outgoing = discovery.findcommonoutgoing(repo, other,
1173 1173 onlyheads=heads,
1174 1174 force=opts.get('force'),
1175 1175 portable=True)
1176 1176 cg = changegroup.getlocalbundle(repo, 'bundle', outgoing, bundlecaps)
1177 1177 if not cg:
1178 1178 scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
1179 1179 return 1
1180 1180
1181 1181 changegroup.writebundle(cg, fname, bundletype)
1182 1182
1183 1183 @command('cat',
1184 1184 [('o', 'output', '',
1185 1185 _('print output to file with formatted name'), _('FORMAT')),
1186 1186 ('r', 'rev', '', _('print the given revision'), _('REV')),
1187 1187 ('', 'decode', None, _('apply any matching decode filter')),
1188 1188 ] + walkopts,
1189 1189 _('[OPTION]... FILE...'),
1190 1190 inferrepo=True)
1191 1191 def cat(ui, repo, file1, *pats, **opts):
1192 1192 """output the current or given revision of files
1193 1193
1194 1194 Print the specified files as they were at the given revision. If
1195 1195 no revision is given, the parent of the working directory is used.
1196 1196
1197 1197 Output may be to a file, in which case the name of the file is
1198 1198 given using a format string. The formatting rules as follows:
1199 1199
1200 1200 :``%%``: literal "%" character
1201 1201 :``%s``: basename of file being printed
1202 1202 :``%d``: dirname of file being printed, or '.' if in repository root
1203 1203 :``%p``: root-relative path name of file being printed
1204 1204 :``%H``: changeset hash (40 hexadecimal digits)
1205 1205 :``%R``: changeset revision number
1206 1206 :``%h``: short-form changeset hash (12 hexadecimal digits)
1207 1207 :``%r``: zero-padded changeset revision number
1208 1208 :``%b``: basename of the exporting repository
1209 1209
1210 1210 Returns 0 on success.
1211 1211 """
1212 1212 ctx = scmutil.revsingle(repo, opts.get('rev'))
1213 1213 m = scmutil.match(ctx, (file1,) + pats, opts)
1214 1214
1215 1215 return cmdutil.cat(ui, repo, ctx, m, '', **opts)
1216 1216
1217 1217 @command('^clone',
1218 1218 [('U', 'noupdate', None,
1219 1219 _('the clone will include an empty working copy (only a repository)')),
1220 1220 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
1221 1221 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
1222 1222 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
1223 1223 ('', 'pull', None, _('use pull protocol to copy metadata')),
1224 1224 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
1225 1225 ] + remoteopts,
1226 1226 _('[OPTION]... SOURCE [DEST]'),
1227 1227 norepo=True)
1228 1228 def clone(ui, source, dest=None, **opts):
1229 1229 """make a copy of an existing repository
1230 1230
1231 1231 Create a copy of an existing repository in a new directory.
1232 1232
1233 1233 If no destination directory name is specified, it defaults to the
1234 1234 basename of the source.
1235 1235
1236 1236 The location of the source is added to the new repository's
1237 1237 ``.hg/hgrc`` file, as the default to be used for future pulls.
1238 1238
1239 1239 Only local paths and ``ssh://`` URLs are supported as
1240 1240 destinations. For ``ssh://`` destinations, no working directory or
1241 1241 ``.hg/hgrc`` will be created on the remote side.
1242 1242
1243 1243 To pull only a subset of changesets, specify one or more revisions
1244 1244 identifiers with -r/--rev or branches with -b/--branch. The
1245 1245 resulting clone will contain only the specified changesets and
1246 1246 their ancestors. These options (or 'clone src#rev dest') imply
1247 1247 --pull, even for local source repositories. Note that specifying a
1248 1248 tag will include the tagged changeset but not the changeset
1249 1249 containing the tag.
1250 1250
1251 1251 If the source repository has a bookmark called '@' set, that
1252 1252 revision will be checked out in the new repository by default.
1253 1253
1254 1254 To check out a particular version, use -u/--update, or
1255 1255 -U/--noupdate to create a clone with no working directory.
1256 1256
1257 1257 .. container:: verbose
1258 1258
1259 1259 For efficiency, hardlinks are used for cloning whenever the
1260 1260 source and destination are on the same filesystem (note this
1261 1261 applies only to the repository data, not to the working
1262 1262 directory). Some filesystems, such as AFS, implement hardlinking
1263 1263 incorrectly, but do not report errors. In these cases, use the
1264 1264 --pull option to avoid hardlinking.
1265 1265
1266 1266 In some cases, you can clone repositories and the working
1267 1267 directory using full hardlinks with ::
1268 1268
1269 1269 $ cp -al REPO REPOCLONE
1270 1270
1271 1271 This is the fastest way to clone, but it is not always safe. The
1272 1272 operation is not atomic (making sure REPO is not modified during
1273 1273 the operation is up to you) and you have to make sure your
1274 1274 editor breaks hardlinks (Emacs and most Linux Kernel tools do
1275 1275 so). Also, this is not compatible with certain extensions that
1276 1276 place their metadata under the .hg directory, such as mq.
1277 1277
1278 1278 Mercurial will update the working directory to the first applicable
1279 1279 revision from this list:
1280 1280
1281 1281 a) null if -U or the source repository has no changesets
1282 1282 b) if -u . and the source repository is local, the first parent of
1283 1283 the source repository's working directory
1284 1284 c) the changeset specified with -u (if a branch name, this means the
1285 1285 latest head of that branch)
1286 1286 d) the changeset specified with -r
1287 1287 e) the tipmost head specified with -b
1288 1288 f) the tipmost head specified with the url#branch source syntax
1289 1289 g) the revision marked with the '@' bookmark, if present
1290 1290 h) the tipmost head of the default branch
1291 1291 i) tip
1292 1292
1293 1293 Examples:
1294 1294
1295 1295 - clone a remote repository to a new directory named hg/::
1296 1296
1297 1297 hg clone http://selenic.com/hg
1298 1298
1299 1299 - create a lightweight local clone::
1300 1300
1301 1301 hg clone project/ project-feature/
1302 1302
1303 1303 - clone from an absolute path on an ssh server (note double-slash)::
1304 1304
1305 1305 hg clone ssh://user@server//home/projects/alpha/
1306 1306
1307 1307 - do a high-speed clone over a LAN while checking out a
1308 1308 specified version::
1309 1309
1310 1310 hg clone --uncompressed http://server/repo -u 1.5
1311 1311
1312 1312 - create a repository without changesets after a particular revision::
1313 1313
1314 1314 hg clone -r 04e544 experimental/ good/
1315 1315
1316 1316 - clone (and track) a particular named branch::
1317 1317
1318 1318 hg clone http://selenic.com/hg#stable
1319 1319
1320 1320 See :hg:`help urls` for details on specifying URLs.
1321 1321
1322 1322 Returns 0 on success.
1323 1323 """
1324 1324 if opts.get('noupdate') and opts.get('updaterev'):
1325 1325 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1326 1326
1327 1327 r = hg.clone(ui, opts, source, dest,
1328 1328 pull=opts.get('pull'),
1329 1329 stream=opts.get('uncompressed'),
1330 1330 rev=opts.get('rev'),
1331 1331 update=opts.get('updaterev') or not opts.get('noupdate'),
1332 1332 branch=opts.get('branch'))
1333 1333
1334 1334 return r is None
1335 1335
1336 1336 @command('^commit|ci',
1337 1337 [('A', 'addremove', None,
1338 1338 _('mark new/missing files as added/removed before committing')),
1339 1339 ('', 'close-branch', None,
1340 1340 _('mark a branch as closed, hiding it from the branch list')),
1341 1341 ('', 'amend', None, _('amend the parent of the working dir')),
1342 1342 ('s', 'secret', None, _('use the secret phase for committing')),
1343 1343 ('e', 'edit', None, _('invoke editor on commit messages')),
1344 1344 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1345 1345 _('[OPTION]... [FILE]...'),
1346 1346 inferrepo=True)
1347 1347 def commit(ui, repo, *pats, **opts):
1348 1348 """commit the specified files or all outstanding changes
1349 1349
1350 1350 Commit changes to the given files into the repository. Unlike a
1351 1351 centralized SCM, this operation is a local operation. See
1352 1352 :hg:`push` for a way to actively distribute your changes.
1353 1353
1354 1354 If a list of files is omitted, all changes reported by :hg:`status`
1355 1355 will be committed.
1356 1356
1357 1357 If you are committing the result of a merge, do not provide any
1358 1358 filenames or -I/-X filters.
1359 1359
1360 1360 If no commit message is specified, Mercurial starts your
1361 1361 configured editor where you can enter a message. In case your
1362 1362 commit fails, you will find a backup of your message in
1363 1363 ``.hg/last-message.txt``.
1364 1364
1365 1365 The --amend flag can be used to amend the parent of the
1366 1366 working directory with a new commit that contains the changes
1367 1367 in the parent in addition to those currently reported by :hg:`status`,
1368 1368 if there are any. The old commit is stored in a backup bundle in
1369 1369 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1370 1370 on how to restore it).
1371 1371
1372 1372 Message, user and date are taken from the amended commit unless
1373 1373 specified. When a message isn't specified on the command line,
1374 1374 the editor will open with the message of the amended commit.
1375 1375
1376 1376 It is not possible to amend public changesets (see :hg:`help phases`)
1377 1377 or changesets that have children.
1378 1378
1379 1379 See :hg:`help dates` for a list of formats valid for -d/--date.
1380 1380
1381 1381 Returns 0 on success, 1 if nothing changed.
1382 1382 """
1383 1383 if opts.get('subrepos'):
1384 1384 if opts.get('amend'):
1385 1385 raise util.Abort(_('cannot amend with --subrepos'))
1386 1386 # Let --subrepos on the command line override config setting.
1387 1387 ui.setconfig('ui', 'commitsubrepos', True, 'commit')
1388 1388
1389 1389 cmdutil.checkunfinished(repo, commit=True)
1390 1390
1391 1391 branch = repo[None].branch()
1392 1392 bheads = repo.branchheads(branch)
1393 1393
1394 1394 extra = {}
1395 1395 if opts.get('close_branch'):
1396 1396 extra['close'] = 1
1397 1397
1398 1398 if not bheads:
1399 1399 raise util.Abort(_('can only close branch heads'))
1400 1400 elif opts.get('amend'):
1401 1401 if repo.parents()[0].p1().branch() != branch and \
1402 1402 repo.parents()[0].p2().branch() != branch:
1403 1403 raise util.Abort(_('can only close branch heads'))
1404 1404
1405 1405 if opts.get('amend'):
1406 1406 if ui.configbool('ui', 'commitsubrepos'):
1407 1407 raise util.Abort(_('cannot amend with ui.commitsubrepos enabled'))
1408 1408
1409 1409 old = repo['.']
1410 1410 if old.phase() == phases.public:
1411 1411 raise util.Abort(_('cannot amend public changesets'))
1412 1412 if len(repo[None].parents()) > 1:
1413 1413 raise util.Abort(_('cannot amend while merging'))
1414 1414 if (not obsolete._enabled) and old.children():
1415 1415 raise util.Abort(_('cannot amend changeset with children'))
1416 1416
1417 1417 # commitfunc is used only for temporary amend commit by cmdutil.amend
1418 1418 def commitfunc(ui, repo, message, match, opts):
1419 1419 return repo.commit(message,
1420 1420 opts.get('user') or old.user(),
1421 1421 opts.get('date') or old.date(),
1422 1422 match,
1423 1423 extra=extra)
1424 1424
1425 1425 current = repo._bookmarkcurrent
1426 1426 marks = old.bookmarks()
1427 1427 node = cmdutil.amend(ui, repo, commitfunc, old, extra, pats, opts)
1428 1428 if node == old.node():
1429 1429 ui.status(_("nothing changed\n"))
1430 1430 return 1
1431 1431 elif marks:
1432 1432 ui.debug('moving bookmarks %r from %s to %s\n' %
1433 1433 (marks, old.hex(), hex(node)))
1434 1434 newmarks = repo._bookmarks
1435 1435 for bm in marks:
1436 1436 newmarks[bm] = node
1437 1437 if bm == current:
1438 1438 bookmarks.setcurrent(repo, bm)
1439 1439 newmarks.write()
1440 1440 else:
1441 1441 def commitfunc(ui, repo, message, match, opts):
1442 1442 backup = ui.backupconfig('phases', 'new-commit')
1443 1443 baseui = repo.baseui
1444 1444 basebackup = baseui.backupconfig('phases', 'new-commit')
1445 1445 try:
1446 1446 if opts.get('secret'):
1447 1447 ui.setconfig('phases', 'new-commit', 'secret', 'commit')
1448 1448 # Propagate to subrepos
1449 1449 baseui.setconfig('phases', 'new-commit', 'secret', 'commit')
1450 1450
1451 1451 editform = 'commit.normal'
1452 1452 editor = cmdutil.getcommiteditor(editform=editform, **opts)
1453 1453 return repo.commit(message, opts.get('user'), opts.get('date'),
1454 1454 match,
1455 1455 editor=editor,
1456 1456 extra=extra)
1457 1457 finally:
1458 1458 ui.restoreconfig(backup)
1459 1459 repo.baseui.restoreconfig(basebackup)
1460 1460
1461 1461
1462 1462 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1463 1463
1464 1464 if not node:
1465 1465 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
1466 1466 if stat[3]:
1467 1467 ui.status(_("nothing changed (%d missing files, see "
1468 1468 "'hg status')\n") % len(stat[3]))
1469 1469 else:
1470 1470 ui.status(_("nothing changed\n"))
1471 1471 return 1
1472 1472
1473 1473 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1474 1474
1475 1475 @command('config|showconfig|debugconfig',
1476 1476 [('u', 'untrusted', None, _('show untrusted configuration options')),
1477 1477 ('e', 'edit', None, _('edit user config')),
1478 1478 ('l', 'local', None, _('edit repository config')),
1479 1479 ('g', 'global', None, _('edit global config'))],
1480 1480 _('[-u] [NAME]...'),
1481 1481 optionalrepo=True)
1482 1482 def config(ui, repo, *values, **opts):
1483 1483 """show combined config settings from all hgrc files
1484 1484
1485 1485 With no arguments, print names and values of all config items.
1486 1486
1487 1487 With one argument of the form section.name, print just the value
1488 1488 of that config item.
1489 1489
1490 1490 With multiple arguments, print names and values of all config
1491 1491 items with matching section names.
1492 1492
1493 1493 With --edit, start an editor on the user-level config file. With
1494 1494 --global, edit the system-wide config file. With --local, edit the
1495 1495 repository-level config file.
1496 1496
1497 1497 With --debug, the source (filename and line number) is printed
1498 1498 for each config item.
1499 1499
1500 1500 See :hg:`help config` for more information about config files.
1501 1501
1502 1502 Returns 0 on success.
1503 1503
1504 1504 """
1505 1505
1506 1506 if opts.get('edit') or opts.get('local') or opts.get('global'):
1507 1507 if opts.get('local') and opts.get('global'):
1508 1508 raise util.Abort(_("can't use --local and --global together"))
1509 1509
1510 1510 if opts.get('local'):
1511 1511 if not repo:
1512 1512 raise util.Abort(_("can't use --local outside a repository"))
1513 1513 paths = [repo.join('hgrc')]
1514 1514 elif opts.get('global'):
1515 1515 paths = scmutil.systemrcpath()
1516 1516 else:
1517 1517 paths = scmutil.userrcpath()
1518 1518
1519 1519 for f in paths:
1520 1520 if os.path.exists(f):
1521 1521 break
1522 1522 else:
1523 1523 f = paths[0]
1524 1524 fp = open(f, "w")
1525 1525 fp.write(
1526 1526 '# example config (see "hg help config" for more info)\n'
1527 1527 '\n'
1528 1528 '[ui]\n'
1529 1529 '# name and email, e.g.\n'
1530 1530 '# username = Jane Doe <jdoe@example.com>\n'
1531 1531 'username =\n'
1532 1532 '\n'
1533 1533 '[extensions]\n'
1534 1534 '# uncomment these lines to enable some popular extensions\n'
1535 1535 '# (see "hg help extensions" for more info)\n'
1536 1536 '# pager =\n'
1537 1537 '# progress =\n'
1538 1538 '# color =\n')
1539 1539 fp.close()
1540 1540
1541 1541 editor = ui.geteditor()
1542 1542 util.system("%s \"%s\"" % (editor, f),
1543 1543 onerr=util.Abort, errprefix=_("edit failed"),
1544 1544 out=ui.fout)
1545 1545 return
1546 1546
1547 1547 for f in scmutil.rcpath():
1548 1548 ui.debug('read config from: %s\n' % f)
1549 1549 untrusted = bool(opts.get('untrusted'))
1550 1550 if values:
1551 1551 sections = [v for v in values if '.' not in v]
1552 1552 items = [v for v in values if '.' in v]
1553 1553 if len(items) > 1 or items and sections:
1554 1554 raise util.Abort(_('only one config item permitted'))
1555 1555 for section, name, value in ui.walkconfig(untrusted=untrusted):
1556 1556 value = str(value).replace('\n', '\\n')
1557 1557 sectname = section + '.' + name
1558 1558 if values:
1559 1559 for v in values:
1560 1560 if v == section:
1561 1561 ui.debug('%s: ' %
1562 1562 ui.configsource(section, name, untrusted))
1563 1563 ui.write('%s=%s\n' % (sectname, value))
1564 1564 elif v == sectname:
1565 1565 ui.debug('%s: ' %
1566 1566 ui.configsource(section, name, untrusted))
1567 1567 ui.write(value, '\n')
1568 1568 else:
1569 1569 ui.debug('%s: ' %
1570 1570 ui.configsource(section, name, untrusted))
1571 1571 ui.write('%s=%s\n' % (sectname, value))
1572 1572
1573 1573 @command('copy|cp',
1574 1574 [('A', 'after', None, _('record a copy that has already occurred')),
1575 1575 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1576 1576 ] + walkopts + dryrunopts,
1577 1577 _('[OPTION]... [SOURCE]... DEST'))
1578 1578 def copy(ui, repo, *pats, **opts):
1579 1579 """mark files as copied for the next commit
1580 1580
1581 1581 Mark dest as having copies of source files. If dest is a
1582 1582 directory, copies are put in that directory. If dest is a file,
1583 1583 the source must be a single file.
1584 1584
1585 1585 By default, this command copies the contents of files as they
1586 1586 exist in the working directory. If invoked with -A/--after, the
1587 1587 operation is recorded, but no copying is performed.
1588 1588
1589 1589 This command takes effect with the next commit. To undo a copy
1590 1590 before that, see :hg:`revert`.
1591 1591
1592 1592 Returns 0 on success, 1 if errors are encountered.
1593 1593 """
1594 1594 wlock = repo.wlock(False)
1595 1595 try:
1596 1596 return cmdutil.copy(ui, repo, pats, opts)
1597 1597 finally:
1598 1598 wlock.release()
1599 1599
1600 1600 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
1601 1601 def debugancestor(ui, repo, *args):
1602 1602 """find the ancestor revision of two revisions in a given index"""
1603 1603 if len(args) == 3:
1604 1604 index, rev1, rev2 = args
1605 1605 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1606 1606 lookup = r.lookup
1607 1607 elif len(args) == 2:
1608 1608 if not repo:
1609 1609 raise util.Abort(_("there is no Mercurial repository here "
1610 1610 "(.hg not found)"))
1611 1611 rev1, rev2 = args
1612 1612 r = repo.changelog
1613 1613 lookup = repo.lookup
1614 1614 else:
1615 1615 raise util.Abort(_('either two or three arguments required'))
1616 1616 a = r.ancestor(lookup(rev1), lookup(rev2))
1617 1617 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1618 1618
1619 1619 @command('debugbuilddag',
1620 1620 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1621 1621 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1622 1622 ('n', 'new-file', None, _('add new file at each rev'))],
1623 1623 _('[OPTION]... [TEXT]'))
1624 1624 def debugbuilddag(ui, repo, text=None,
1625 1625 mergeable_file=False,
1626 1626 overwritten_file=False,
1627 1627 new_file=False):
1628 1628 """builds a repo with a given DAG from scratch in the current empty repo
1629 1629
1630 1630 The description of the DAG is read from stdin if not given on the
1631 1631 command line.
1632 1632
1633 1633 Elements:
1634 1634
1635 1635 - "+n" is a linear run of n nodes based on the current default parent
1636 1636 - "." is a single node based on the current default parent
1637 1637 - "$" resets the default parent to null (implied at the start);
1638 1638 otherwise the default parent is always the last node created
1639 1639 - "<p" sets the default parent to the backref p
1640 1640 - "*p" is a fork at parent p, which is a backref
1641 1641 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1642 1642 - "/p2" is a merge of the preceding node and p2
1643 1643 - ":tag" defines a local tag for the preceding node
1644 1644 - "@branch" sets the named branch for subsequent nodes
1645 1645 - "#...\\n" is a comment up to the end of the line
1646 1646
1647 1647 Whitespace between the above elements is ignored.
1648 1648
1649 1649 A backref is either
1650 1650
1651 1651 - a number n, which references the node curr-n, where curr is the current
1652 1652 node, or
1653 1653 - the name of a local tag you placed earlier using ":tag", or
1654 1654 - empty to denote the default parent.
1655 1655
1656 1656 All string valued-elements are either strictly alphanumeric, or must
1657 1657 be enclosed in double quotes ("..."), with "\\" as escape character.
1658 1658 """
1659 1659
1660 1660 if text is None:
1661 1661 ui.status(_("reading DAG from stdin\n"))
1662 1662 text = ui.fin.read()
1663 1663
1664 1664 cl = repo.changelog
1665 1665 if len(cl) > 0:
1666 1666 raise util.Abort(_('repository is not empty'))
1667 1667
1668 1668 # determine number of revs in DAG
1669 1669 total = 0
1670 1670 for type, data in dagparser.parsedag(text):
1671 1671 if type == 'n':
1672 1672 total += 1
1673 1673
1674 1674 if mergeable_file:
1675 1675 linesperrev = 2
1676 1676 # make a file with k lines per rev
1677 1677 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1678 1678 initialmergedlines.append("")
1679 1679
1680 1680 tags = []
1681 1681
1682 1682 lock = tr = None
1683 1683 try:
1684 1684 lock = repo.lock()
1685 1685 tr = repo.transaction("builddag")
1686 1686
1687 1687 at = -1
1688 1688 atbranch = 'default'
1689 1689 nodeids = []
1690 1690 id = 0
1691 1691 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1692 1692 for type, data in dagparser.parsedag(text):
1693 1693 if type == 'n':
1694 1694 ui.note(('node %s\n' % str(data)))
1695 1695 id, ps = data
1696 1696
1697 1697 files = []
1698 1698 fctxs = {}
1699 1699
1700 1700 p2 = None
1701 1701 if mergeable_file:
1702 1702 fn = "mf"
1703 1703 p1 = repo[ps[0]]
1704 1704 if len(ps) > 1:
1705 1705 p2 = repo[ps[1]]
1706 1706 pa = p1.ancestor(p2)
1707 1707 base, local, other = [x[fn].data() for x in (pa, p1,
1708 1708 p2)]
1709 1709 m3 = simplemerge.Merge3Text(base, local, other)
1710 1710 ml = [l.strip() for l in m3.merge_lines()]
1711 1711 ml.append("")
1712 1712 elif at > 0:
1713 1713 ml = p1[fn].data().split("\n")
1714 1714 else:
1715 1715 ml = initialmergedlines
1716 1716 ml[id * linesperrev] += " r%i" % id
1717 1717 mergedtext = "\n".join(ml)
1718 1718 files.append(fn)
1719 1719 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
1720 1720
1721 1721 if overwritten_file:
1722 1722 fn = "of"
1723 1723 files.append(fn)
1724 1724 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
1725 1725
1726 1726 if new_file:
1727 1727 fn = "nf%i" % id
1728 1728 files.append(fn)
1729 1729 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
1730 1730 if len(ps) > 1:
1731 1731 if not p2:
1732 1732 p2 = repo[ps[1]]
1733 1733 for fn in p2:
1734 1734 if fn.startswith("nf"):
1735 1735 files.append(fn)
1736 1736 fctxs[fn] = p2[fn]
1737 1737
1738 1738 def fctxfn(repo, cx, path):
1739 1739 return fctxs.get(path)
1740 1740
1741 1741 if len(ps) == 0 or ps[0] < 0:
1742 1742 pars = [None, None]
1743 1743 elif len(ps) == 1:
1744 1744 pars = [nodeids[ps[0]], None]
1745 1745 else:
1746 1746 pars = [nodeids[p] for p in ps]
1747 1747 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1748 1748 date=(id, 0),
1749 1749 user="debugbuilddag",
1750 1750 extra={'branch': atbranch})
1751 1751 nodeid = repo.commitctx(cx)
1752 1752 nodeids.append(nodeid)
1753 1753 at = id
1754 1754 elif type == 'l':
1755 1755 id, name = data
1756 1756 ui.note(('tag %s\n' % name))
1757 1757 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1758 1758 elif type == 'a':
1759 1759 ui.note(('branch %s\n' % data))
1760 1760 atbranch = data
1761 1761 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1762 1762 tr.close()
1763 1763
1764 1764 if tags:
1765 1765 repo.opener.write("localtags", "".join(tags))
1766 1766 finally:
1767 1767 ui.progress(_('building'), None)
1768 1768 release(tr, lock)
1769 1769
1770 1770 @command('debugbundle',
1771 1771 [('a', 'all', None, _('show all details'))],
1772 1772 _('FILE'),
1773 1773 norepo=True)
1774 1774 def debugbundle(ui, bundlepath, all=None, **opts):
1775 1775 """lists the contents of a bundle"""
1776 1776 f = hg.openpath(ui, bundlepath)
1777 1777 try:
1778 1778 gen = exchange.readbundle(ui, f, bundlepath)
1779 1779 if all:
1780 1780 ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
1781 1781
1782 1782 def showchunks(named):
1783 1783 ui.write("\n%s\n" % named)
1784 1784 chain = None
1785 1785 while True:
1786 1786 chunkdata = gen.deltachunk(chain)
1787 1787 if not chunkdata:
1788 1788 break
1789 1789 node = chunkdata['node']
1790 1790 p1 = chunkdata['p1']
1791 1791 p2 = chunkdata['p2']
1792 1792 cs = chunkdata['cs']
1793 1793 deltabase = chunkdata['deltabase']
1794 1794 delta = chunkdata['delta']
1795 1795 ui.write("%s %s %s %s %s %s\n" %
1796 1796 (hex(node), hex(p1), hex(p2),
1797 1797 hex(cs), hex(deltabase), len(delta)))
1798 1798 chain = node
1799 1799
1800 1800 chunkdata = gen.changelogheader()
1801 1801 showchunks("changelog")
1802 1802 chunkdata = gen.manifestheader()
1803 1803 showchunks("manifest")
1804 1804 while True:
1805 1805 chunkdata = gen.filelogheader()
1806 1806 if not chunkdata:
1807 1807 break
1808 1808 fname = chunkdata['filename']
1809 1809 showchunks(fname)
1810 1810 else:
1811 1811 chunkdata = gen.changelogheader()
1812 1812 chain = None
1813 1813 while True:
1814 1814 chunkdata = gen.deltachunk(chain)
1815 1815 if not chunkdata:
1816 1816 break
1817 1817 node = chunkdata['node']
1818 1818 ui.write("%s\n" % hex(node))
1819 1819 chain = node
1820 1820 finally:
1821 1821 f.close()
1822 1822
1823 1823 @command('debugcheckstate', [], '')
1824 1824 def debugcheckstate(ui, repo):
1825 1825 """validate the correctness of the current dirstate"""
1826 1826 parent1, parent2 = repo.dirstate.parents()
1827 1827 m1 = repo[parent1].manifest()
1828 1828 m2 = repo[parent2].manifest()
1829 1829 errors = 0
1830 1830 for f in repo.dirstate:
1831 1831 state = repo.dirstate[f]
1832 1832 if state in "nr" and f not in m1:
1833 1833 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1834 1834 errors += 1
1835 1835 if state in "a" and f in m1:
1836 1836 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1837 1837 errors += 1
1838 1838 if state in "m" and f not in m1 and f not in m2:
1839 1839 ui.warn(_("%s in state %s, but not in either manifest\n") %
1840 1840 (f, state))
1841 1841 errors += 1
1842 1842 for f in m1:
1843 1843 state = repo.dirstate[f]
1844 1844 if state not in "nrm":
1845 1845 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1846 1846 errors += 1
1847 1847 if errors:
1848 1848 error = _(".hg/dirstate inconsistent with current parent's manifest")
1849 1849 raise util.Abort(error)
1850 1850
1851 1851 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
1852 1852 def debugcommands(ui, cmd='', *args):
1853 1853 """list all available commands and options"""
1854 1854 for cmd, vals in sorted(table.iteritems()):
1855 1855 cmd = cmd.split('|')[0].strip('^')
1856 1856 opts = ', '.join([i[1] for i in vals[1]])
1857 1857 ui.write('%s: %s\n' % (cmd, opts))
1858 1858
1859 1859 @command('debugcomplete',
1860 1860 [('o', 'options', None, _('show the command options'))],
1861 1861 _('[-o] CMD'),
1862 1862 norepo=True)
1863 1863 def debugcomplete(ui, cmd='', **opts):
1864 1864 """returns the completion list associated with the given command"""
1865 1865
1866 1866 if opts.get('options'):
1867 1867 options = []
1868 1868 otables = [globalopts]
1869 1869 if cmd:
1870 1870 aliases, entry = cmdutil.findcmd(cmd, table, False)
1871 1871 otables.append(entry[1])
1872 1872 for t in otables:
1873 1873 for o in t:
1874 1874 if "(DEPRECATED)" in o[3]:
1875 1875 continue
1876 1876 if o[0]:
1877 1877 options.append('-%s' % o[0])
1878 1878 options.append('--%s' % o[1])
1879 1879 ui.write("%s\n" % "\n".join(options))
1880 1880 return
1881 1881
1882 1882 cmdlist = cmdutil.findpossible(cmd, table)
1883 1883 if ui.verbose:
1884 1884 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1885 1885 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1886 1886
1887 1887 @command('debugdag',
1888 1888 [('t', 'tags', None, _('use tags as labels')),
1889 1889 ('b', 'branches', None, _('annotate with branch names')),
1890 1890 ('', 'dots', None, _('use dots for runs')),
1891 1891 ('s', 'spaces', None, _('separate elements by spaces'))],
1892 1892 _('[OPTION]... [FILE [REV]...]'),
1893 1893 optionalrepo=True)
1894 1894 def debugdag(ui, repo, file_=None, *revs, **opts):
1895 1895 """format the changelog or an index DAG as a concise textual description
1896 1896
1897 1897 If you pass a revlog index, the revlog's DAG is emitted. If you list
1898 1898 revision numbers, they get labeled in the output as rN.
1899 1899
1900 1900 Otherwise, the changelog DAG of the current repo is emitted.
1901 1901 """
1902 1902 spaces = opts.get('spaces')
1903 1903 dots = opts.get('dots')
1904 1904 if file_:
1905 1905 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1906 1906 revs = set((int(r) for r in revs))
1907 1907 def events():
1908 1908 for r in rlog:
1909 1909 yield 'n', (r, list(set(p for p in rlog.parentrevs(r)
1910 1910 if p != -1)))
1911 1911 if r in revs:
1912 1912 yield 'l', (r, "r%i" % r)
1913 1913 elif repo:
1914 1914 cl = repo.changelog
1915 1915 tags = opts.get('tags')
1916 1916 branches = opts.get('branches')
1917 1917 if tags:
1918 1918 labels = {}
1919 1919 for l, n in repo.tags().items():
1920 1920 labels.setdefault(cl.rev(n), []).append(l)
1921 1921 def events():
1922 1922 b = "default"
1923 1923 for r in cl:
1924 1924 if branches:
1925 1925 newb = cl.read(cl.node(r))[5]['branch']
1926 1926 if newb != b:
1927 1927 yield 'a', newb
1928 1928 b = newb
1929 1929 yield 'n', (r, list(set(p for p in cl.parentrevs(r)
1930 1930 if p != -1)))
1931 1931 if tags:
1932 1932 ls = labels.get(r)
1933 1933 if ls:
1934 1934 for l in ls:
1935 1935 yield 'l', (r, l)
1936 1936 else:
1937 1937 raise util.Abort(_('need repo for changelog dag'))
1938 1938
1939 1939 for line in dagparser.dagtextlines(events(),
1940 1940 addspaces=spaces,
1941 1941 wraplabels=True,
1942 1942 wrapannotations=True,
1943 1943 wrapnonlinear=dots,
1944 1944 usedots=dots,
1945 1945 maxlinewidth=70):
1946 1946 ui.write(line)
1947 1947 ui.write("\n")
1948 1948
1949 1949 @command('debugdata',
1950 1950 [('c', 'changelog', False, _('open changelog')),
1951 1951 ('m', 'manifest', False, _('open manifest'))],
1952 1952 _('-c|-m|FILE REV'))
1953 1953 def debugdata(ui, repo, file_, rev=None, **opts):
1954 1954 """dump the contents of a data file revision"""
1955 1955 if opts.get('changelog') or opts.get('manifest'):
1956 1956 file_, rev = None, file_
1957 1957 elif rev is None:
1958 1958 raise error.CommandError('debugdata', _('invalid arguments'))
1959 1959 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1960 1960 try:
1961 1961 ui.write(r.revision(r.lookup(rev)))
1962 1962 except KeyError:
1963 1963 raise util.Abort(_('invalid revision identifier %s') % rev)
1964 1964
1965 1965 @command('debugdate',
1966 1966 [('e', 'extended', None, _('try extended date formats'))],
1967 1967 _('[-e] DATE [RANGE]'),
1968 1968 norepo=True, optionalrepo=True)
1969 1969 def debugdate(ui, date, range=None, **opts):
1970 1970 """parse and display a date"""
1971 1971 if opts["extended"]:
1972 1972 d = util.parsedate(date, util.extendeddateformats)
1973 1973 else:
1974 1974 d = util.parsedate(date)
1975 1975 ui.write(("internal: %s %s\n") % d)
1976 1976 ui.write(("standard: %s\n") % util.datestr(d))
1977 1977 if range:
1978 1978 m = util.matchdate(range)
1979 1979 ui.write(("match: %s\n") % m(d[0]))
1980 1980
1981 1981 @command('debugdiscovery',
1982 1982 [('', 'old', None, _('use old-style discovery')),
1983 1983 ('', 'nonheads', None,
1984 1984 _('use old-style discovery with non-heads included')),
1985 1985 ] + remoteopts,
1986 1986 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1987 1987 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1988 1988 """runs the changeset discovery protocol in isolation"""
1989 1989 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
1990 1990 opts.get('branch'))
1991 1991 remote = hg.peer(repo, opts, remoteurl)
1992 1992 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1993 1993
1994 1994 # make sure tests are repeatable
1995 1995 random.seed(12323)
1996 1996
1997 1997 def doit(localheads, remoteheads, remote=remote):
1998 1998 if opts.get('old'):
1999 1999 if localheads:
2000 2000 raise util.Abort('cannot use localheads with old style '
2001 2001 'discovery')
2002 2002 if not util.safehasattr(remote, 'branches'):
2003 2003 # enable in-client legacy support
2004 2004 remote = localrepo.locallegacypeer(remote.local())
2005 2005 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
2006 2006 force=True)
2007 2007 common = set(common)
2008 2008 if not opts.get('nonheads'):
2009 2009 ui.write(("unpruned common: %s\n") %
2010 2010 " ".join(sorted(short(n) for n in common)))
2011 2011 dag = dagutil.revlogdag(repo.changelog)
2012 2012 all = dag.ancestorset(dag.internalizeall(common))
2013 2013 common = dag.externalizeall(dag.headsetofconnecteds(all))
2014 2014 else:
2015 2015 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
2016 2016 common = set(common)
2017 2017 rheads = set(hds)
2018 2018 lheads = set(repo.heads())
2019 2019 ui.write(("common heads: %s\n") %
2020 2020 " ".join(sorted(short(n) for n in common)))
2021 2021 if lheads <= common:
2022 2022 ui.write(("local is subset\n"))
2023 2023 elif rheads <= common:
2024 2024 ui.write(("remote is subset\n"))
2025 2025
2026 2026 serverlogs = opts.get('serverlog')
2027 2027 if serverlogs:
2028 2028 for filename in serverlogs:
2029 2029 logfile = open(filename, 'r')
2030 2030 try:
2031 2031 line = logfile.readline()
2032 2032 while line:
2033 2033 parts = line.strip().split(';')
2034 2034 op = parts[1]
2035 2035 if op == 'cg':
2036 2036 pass
2037 2037 elif op == 'cgss':
2038 2038 doit(parts[2].split(' '), parts[3].split(' '))
2039 2039 elif op == 'unb':
2040 2040 doit(parts[3].split(' '), parts[2].split(' '))
2041 2041 line = logfile.readline()
2042 2042 finally:
2043 2043 logfile.close()
2044 2044
2045 2045 else:
2046 2046 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
2047 2047 opts.get('remote_head'))
2048 2048 localrevs = opts.get('local_head')
2049 2049 doit(localrevs, remoterevs)
2050 2050
2051 2051 @command('debugfileset',
2052 2052 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
2053 2053 _('[-r REV] FILESPEC'))
2054 2054 def debugfileset(ui, repo, expr, **opts):
2055 2055 '''parse and apply a fileset specification'''
2056 2056 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
2057 2057 if ui.verbose:
2058 2058 tree = fileset.parse(expr)[0]
2059 2059 ui.note(tree, "\n")
2060 2060
2061 2061 for f in ctx.getfileset(expr):
2062 2062 ui.write("%s\n" % f)
2063 2063
2064 2064 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
2065 2065 def debugfsinfo(ui, path="."):
2066 2066 """show information detected about current filesystem"""
2067 2067 util.writefile('.debugfsinfo', '')
2068 2068 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
2069 2069 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
2070 2070 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
2071 2071 ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
2072 2072 and 'yes' or 'no'))
2073 2073 os.unlink('.debugfsinfo')
2074 2074
2075 2075 @command('debuggetbundle',
2076 2076 [('H', 'head', [], _('id of head node'), _('ID')),
2077 2077 ('C', 'common', [], _('id of common node'), _('ID')),
2078 2078 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
2079 2079 _('REPO FILE [-H|-C ID]...'),
2080 2080 norepo=True)
2081 2081 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
2082 2082 """retrieves a bundle from a repo
2083 2083
2084 2084 Every ID must be a full-length hex node id string. Saves the bundle to the
2085 2085 given file.
2086 2086 """
2087 2087 repo = hg.peer(ui, opts, repopath)
2088 2088 if not repo.capable('getbundle'):
2089 2089 raise util.Abort("getbundle() not supported by target repository")
2090 2090 args = {}
2091 2091 if common:
2092 2092 args['common'] = [bin(s) for s in common]
2093 2093 if head:
2094 2094 args['heads'] = [bin(s) for s in head]
2095 2095 # TODO: get desired bundlecaps from command line.
2096 2096 args['bundlecaps'] = None
2097 2097 bundle = repo.getbundle('debug', **args)
2098 2098
2099 2099 bundletype = opts.get('type', 'bzip2').lower()
2100 2100 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
2101 2101 bundletype = btypes.get(bundletype)
2102 2102 if bundletype not in changegroup.bundletypes:
2103 2103 raise util.Abort(_('unknown bundle type specified with --type'))
2104 2104 changegroup.writebundle(bundle, bundlepath, bundletype)
2105 2105
2106 2106 @command('debugignore', [], '')
2107 2107 def debugignore(ui, repo, *values, **opts):
2108 2108 """display the combined ignore pattern"""
2109 2109 ignore = repo.dirstate._ignore
2110 2110 includepat = getattr(ignore, 'includepat', None)
2111 2111 if includepat is not None:
2112 2112 ui.write("%s\n" % includepat)
2113 2113 else:
2114 2114 raise util.Abort(_("no ignore patterns found"))
2115 2115
2116 2116 @command('debugindex',
2117 2117 [('c', 'changelog', False, _('open changelog')),
2118 2118 ('m', 'manifest', False, _('open manifest')),
2119 2119 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2120 2120 _('[-f FORMAT] -c|-m|FILE'),
2121 2121 optionalrepo=True)
2122 2122 def debugindex(ui, repo, file_=None, **opts):
2123 2123 """dump the contents of an index file"""
2124 2124 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
2125 2125 format = opts.get('format', 0)
2126 2126 if format not in (0, 1):
2127 2127 raise util.Abort(_("unknown format %d") % format)
2128 2128
2129 2129 generaldelta = r.version & revlog.REVLOGGENERALDELTA
2130 2130 if generaldelta:
2131 2131 basehdr = ' delta'
2132 2132 else:
2133 2133 basehdr = ' base'
2134 2134
2135 2135 if format == 0:
2136 2136 ui.write(" rev offset length " + basehdr + " linkrev"
2137 2137 " nodeid p1 p2\n")
2138 2138 elif format == 1:
2139 2139 ui.write(" rev flag offset length"
2140 2140 " size " + basehdr + " link p1 p2"
2141 2141 " nodeid\n")
2142 2142
2143 2143 for i in r:
2144 2144 node = r.node(i)
2145 2145 if generaldelta:
2146 2146 base = r.deltaparent(i)
2147 2147 else:
2148 2148 base = r.chainbase(i)
2149 2149 if format == 0:
2150 2150 try:
2151 2151 pp = r.parents(node)
2152 2152 except Exception:
2153 2153 pp = [nullid, nullid]
2154 2154 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
2155 2155 i, r.start(i), r.length(i), base, r.linkrev(i),
2156 2156 short(node), short(pp[0]), short(pp[1])))
2157 2157 elif format == 1:
2158 2158 pr = r.parentrevs(i)
2159 2159 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
2160 2160 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2161 2161 base, r.linkrev(i), pr[0], pr[1], short(node)))
2162 2162
2163 2163 @command('debugindexdot', [], _('FILE'), optionalrepo=True)
2164 2164 def debugindexdot(ui, repo, file_):
2165 2165 """dump an index DAG as a graphviz dot file"""
2166 2166 r = None
2167 2167 if repo:
2168 2168 filelog = repo.file(file_)
2169 2169 if len(filelog):
2170 2170 r = filelog
2171 2171 if not r:
2172 2172 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
2173 2173 ui.write(("digraph G {\n"))
2174 2174 for i in r:
2175 2175 node = r.node(i)
2176 2176 pp = r.parents(node)
2177 2177 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
2178 2178 if pp[1] != nullid:
2179 2179 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
2180 2180 ui.write("}\n")
2181 2181
2182 2182 @command('debuginstall', [], '', norepo=True)
2183 2183 def debuginstall(ui):
2184 2184 '''test Mercurial installation
2185 2185
2186 2186 Returns 0 on success.
2187 2187 '''
2188 2188
2189 2189 def writetemp(contents):
2190 2190 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
2191 2191 f = os.fdopen(fd, "wb")
2192 2192 f.write(contents)
2193 2193 f.close()
2194 2194 return name
2195 2195
2196 2196 problems = 0
2197 2197
2198 2198 # encoding
2199 2199 ui.status(_("checking encoding (%s)...\n") % encoding.encoding)
2200 2200 try:
2201 2201 encoding.fromlocal("test")
2202 2202 except util.Abort, inst:
2203 2203 ui.write(" %s\n" % inst)
2204 2204 ui.write(_(" (check that your locale is properly set)\n"))
2205 2205 problems += 1
2206 2206
2207 2207 # Python
2208 2208 ui.status(_("checking Python executable (%s)\n") % sys.executable)
2209 2209 ui.status(_("checking Python version (%s)\n")
2210 2210 % ("%s.%s.%s" % sys.version_info[:3]))
2211 2211 ui.status(_("checking Python lib (%s)...\n")
2212 2212 % os.path.dirname(os.__file__))
2213 2213
2214 2214 # compiled modules
2215 2215 ui.status(_("checking installed modules (%s)...\n")
2216 2216 % os.path.dirname(__file__))
2217 2217 try:
2218 2218 import bdiff, mpatch, base85, osutil
2219 2219 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
2220 2220 except Exception, inst:
2221 2221 ui.write(" %s\n" % inst)
2222 2222 ui.write(_(" One or more extensions could not be found"))
2223 2223 ui.write(_(" (check that you compiled the extensions)\n"))
2224 2224 problems += 1
2225 2225
2226 2226 # templates
2227 2227 import templater
2228 2228 p = templater.templatepath()
2229 2229 ui.status(_("checking templates (%s)...\n") % ' '.join(p))
2230 2230 if p:
2231 2231 m = templater.templatepath("map-cmdline.default")
2232 2232 if m:
2233 2233 # template found, check if it is working
2234 2234 try:
2235 2235 templater.templater(m)
2236 2236 except Exception, inst:
2237 2237 ui.write(" %s\n" % inst)
2238 2238 p = None
2239 2239 else:
2240 2240 ui.write(_(" template 'default' not found\n"))
2241 2241 p = None
2242 2242 else:
2243 2243 ui.write(_(" no template directories found\n"))
2244 2244 if not p:
2245 2245 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
2246 2246 problems += 1
2247 2247
2248 2248 # editor
2249 2249 ui.status(_("checking commit editor...\n"))
2250 2250 editor = ui.geteditor()
2251 2251 cmdpath = util.findexe(shlex.split(editor)[0])
2252 2252 if not cmdpath:
2253 2253 if editor == 'vi':
2254 2254 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
2255 2255 ui.write(_(" (specify a commit editor in your configuration"
2256 2256 " file)\n"))
2257 2257 else:
2258 2258 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
2259 2259 ui.write(_(" (specify a commit editor in your configuration"
2260 2260 " file)\n"))
2261 2261 problems += 1
2262 2262
2263 2263 # check username
2264 2264 ui.status(_("checking username...\n"))
2265 2265 try:
2266 2266 ui.username()
2267 2267 except util.Abort, e:
2268 2268 ui.write(" %s\n" % e)
2269 2269 ui.write(_(" (specify a username in your configuration file)\n"))
2270 2270 problems += 1
2271 2271
2272 2272 if not problems:
2273 2273 ui.status(_("no problems detected\n"))
2274 2274 else:
2275 2275 ui.write(_("%s problems detected,"
2276 2276 " please check your install!\n") % problems)
2277 2277
2278 2278 return problems
2279 2279
2280 2280 @command('debugknown', [], _('REPO ID...'), norepo=True)
2281 2281 def debugknown(ui, repopath, *ids, **opts):
2282 2282 """test whether node ids are known to a repo
2283 2283
2284 2284 Every ID must be a full-length hex node id string. Returns a list of 0s
2285 2285 and 1s indicating unknown/known.
2286 2286 """
2287 2287 repo = hg.peer(ui, opts, repopath)
2288 2288 if not repo.capable('known'):
2289 2289 raise util.Abort("known() not supported by target repository")
2290 2290 flags = repo.known([bin(s) for s in ids])
2291 2291 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
2292 2292
2293 2293 @command('debuglabelcomplete', [], _('LABEL...'))
2294 2294 def debuglabelcomplete(ui, repo, *args):
2295 2295 '''complete "labels" - tags, open branch names, bookmark names'''
2296 2296
2297 2297 labels = set()
2298 2298 labels.update(t[0] for t in repo.tagslist())
2299 2299 labels.update(repo._bookmarks.keys())
2300 2300 labels.update(tag for (tag, heads, tip, closed)
2301 2301 in repo.branchmap().iterbranches() if not closed)
2302 2302 completions = set()
2303 2303 if not args:
2304 2304 args = ['']
2305 2305 for a in args:
2306 2306 completions.update(l for l in labels if l.startswith(a))
2307 2307 ui.write('\n'.join(sorted(completions)))
2308 2308 ui.write('\n')
2309 2309
2310 2310 @command('debugobsolete',
2311 2311 [('', 'flags', 0, _('markers flag')),
2312 2312 ] + commitopts2,
2313 2313 _('[OBSOLETED [REPLACEMENT] [REPL... ]'))
2314 2314 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2315 2315 """create arbitrary obsolete marker
2316 2316
2317 2317 With no arguments, displays the list of obsolescence markers."""
2318 2318
2319 2319 def parsenodeid(s):
2320 2320 try:
2321 2321 # We do not use revsingle/revrange functions here to accept
2322 2322 # arbitrary node identifiers, possibly not present in the
2323 2323 # local repository.
2324 2324 n = bin(s)
2325 2325 if len(n) != len(nullid):
2326 2326 raise TypeError()
2327 2327 return n
2328 2328 except TypeError:
2329 2329 raise util.Abort('changeset references must be full hexadecimal '
2330 2330 'node identifiers')
2331 2331
2332 2332 if precursor is not None:
2333 2333 metadata = {}
2334 2334 if 'date' in opts:
2335 2335 metadata['date'] = opts['date']
2336 2336 metadata['user'] = opts['user'] or ui.username()
2337 2337 succs = tuple(parsenodeid(succ) for succ in successors)
2338 2338 l = repo.lock()
2339 2339 try:
2340 2340 tr = repo.transaction('debugobsolete')
2341 2341 try:
2342 2342 repo.obsstore.create(tr, parsenodeid(precursor), succs,
2343 2343 opts['flags'], metadata)
2344 2344 tr.close()
2345 2345 finally:
2346 2346 tr.release()
2347 2347 finally:
2348 2348 l.release()
2349 2349 else:
2350 2350 for m in obsolete.allmarkers(repo):
2351 2351 cmdutil.showmarker(ui, m)
2352 2352
2353 2353 @command('debugpathcomplete',
2354 2354 [('f', 'full', None, _('complete an entire path')),
2355 2355 ('n', 'normal', None, _('show only normal files')),
2356 2356 ('a', 'added', None, _('show only added files')),
2357 2357 ('r', 'removed', None, _('show only removed files'))],
2358 2358 _('FILESPEC...'))
2359 2359 def debugpathcomplete(ui, repo, *specs, **opts):
2360 2360 '''complete part or all of a tracked path
2361 2361
2362 2362 This command supports shells that offer path name completion. It
2363 2363 currently completes only files already known to the dirstate.
2364 2364
2365 2365 Completion extends only to the next path segment unless
2366 2366 --full is specified, in which case entire paths are used.'''
2367 2367
2368 2368 def complete(path, acceptable):
2369 2369 dirstate = repo.dirstate
2370 2370 spec = os.path.normpath(os.path.join(os.getcwd(), path))
2371 2371 rootdir = repo.root + os.sep
2372 2372 if spec != repo.root and not spec.startswith(rootdir):
2373 2373 return [], []
2374 2374 if os.path.isdir(spec):
2375 2375 spec += '/'
2376 2376 spec = spec[len(rootdir):]
2377 2377 fixpaths = os.sep != '/'
2378 2378 if fixpaths:
2379 2379 spec = spec.replace(os.sep, '/')
2380 2380 speclen = len(spec)
2381 2381 fullpaths = opts['full']
2382 2382 files, dirs = set(), set()
2383 2383 adddir, addfile = dirs.add, files.add
2384 2384 for f, st in dirstate.iteritems():
2385 2385 if f.startswith(spec) and st[0] in acceptable:
2386 2386 if fixpaths:
2387 2387 f = f.replace('/', os.sep)
2388 2388 if fullpaths:
2389 2389 addfile(f)
2390 2390 continue
2391 2391 s = f.find(os.sep, speclen)
2392 2392 if s >= 0:
2393 2393 adddir(f[:s])
2394 2394 else:
2395 2395 addfile(f)
2396 2396 return files, dirs
2397 2397
2398 2398 acceptable = ''
2399 2399 if opts['normal']:
2400 2400 acceptable += 'nm'
2401 2401 if opts['added']:
2402 2402 acceptable += 'a'
2403 2403 if opts['removed']:
2404 2404 acceptable += 'r'
2405 2405 cwd = repo.getcwd()
2406 2406 if not specs:
2407 2407 specs = ['.']
2408 2408
2409 2409 files, dirs = set(), set()
2410 2410 for spec in specs:
2411 2411 f, d = complete(spec, acceptable or 'nmar')
2412 2412 files.update(f)
2413 2413 dirs.update(d)
2414 2414 files.update(dirs)
2415 2415 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2416 2416 ui.write('\n')
2417 2417
2418 2418 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2419 2419 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2420 2420 '''access the pushkey key/value protocol
2421 2421
2422 2422 With two args, list the keys in the given namespace.
2423 2423
2424 2424 With five args, set a key to new if it currently is set to old.
2425 2425 Reports success or failure.
2426 2426 '''
2427 2427
2428 2428 target = hg.peer(ui, {}, repopath)
2429 2429 if keyinfo:
2430 2430 key, old, new = keyinfo
2431 2431 r = target.pushkey(namespace, key, old, new)
2432 2432 ui.status(str(r) + '\n')
2433 2433 return not r
2434 2434 else:
2435 2435 for k, v in sorted(target.listkeys(namespace).iteritems()):
2436 2436 ui.write("%s\t%s\n" % (k.encode('string-escape'),
2437 2437 v.encode('string-escape')))
2438 2438
2439 2439 @command('debugpvec', [], _('A B'))
2440 2440 def debugpvec(ui, repo, a, b=None):
2441 2441 ca = scmutil.revsingle(repo, a)
2442 2442 cb = scmutil.revsingle(repo, b)
2443 2443 pa = pvec.ctxpvec(ca)
2444 2444 pb = pvec.ctxpvec(cb)
2445 2445 if pa == pb:
2446 2446 rel = "="
2447 2447 elif pa > pb:
2448 2448 rel = ">"
2449 2449 elif pa < pb:
2450 2450 rel = "<"
2451 2451 elif pa | pb:
2452 2452 rel = "|"
2453 2453 ui.write(_("a: %s\n") % pa)
2454 2454 ui.write(_("b: %s\n") % pb)
2455 2455 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2456 2456 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2457 2457 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2458 2458 pa.distance(pb), rel))
2459 2459
2460 2460 @command('debugrebuilddirstate|debugrebuildstate',
2461 2461 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
2462 2462 _('[-r REV]'))
2463 2463 def debugrebuilddirstate(ui, repo, rev):
2464 2464 """rebuild the dirstate as it would look like for the given revision
2465 2465
2466 2466 If no revision is specified the first current parent will be used.
2467 2467
2468 2468 The dirstate will be set to the files of the given revision.
2469 2469 The actual working directory content or existing dirstate
2470 2470 information such as adds or removes is not considered.
2471 2471
2472 2472 One use of this command is to make the next :hg:`status` invocation
2473 2473 check the actual file content.
2474 2474 """
2475 2475 ctx = scmutil.revsingle(repo, rev)
2476 2476 wlock = repo.wlock()
2477 2477 try:
2478 2478 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2479 2479 finally:
2480 2480 wlock.release()
2481 2481
2482 2482 @command('debugrename',
2483 2483 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2484 2484 _('[-r REV] FILE'))
2485 2485 def debugrename(ui, repo, file1, *pats, **opts):
2486 2486 """dump rename information"""
2487 2487
2488 2488 ctx = scmutil.revsingle(repo, opts.get('rev'))
2489 2489 m = scmutil.match(ctx, (file1,) + pats, opts)
2490 2490 for abs in ctx.walk(m):
2491 2491 fctx = ctx[abs]
2492 2492 o = fctx.filelog().renamed(fctx.filenode())
2493 2493 rel = m.rel(abs)
2494 2494 if o:
2495 2495 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2496 2496 else:
2497 2497 ui.write(_("%s not renamed\n") % rel)
2498 2498
2499 2499 @command('debugrevlog',
2500 2500 [('c', 'changelog', False, _('open changelog')),
2501 2501 ('m', 'manifest', False, _('open manifest')),
2502 2502 ('d', 'dump', False, _('dump index data'))],
2503 2503 _('-c|-m|FILE'),
2504 2504 optionalrepo=True)
2505 2505 def debugrevlog(ui, repo, file_=None, **opts):
2506 2506 """show data and statistics about a revlog"""
2507 2507 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2508 2508
2509 2509 if opts.get("dump"):
2510 2510 numrevs = len(r)
2511 2511 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
2512 2512 " rawsize totalsize compression heads\n")
2513 2513 ts = 0
2514 2514 heads = set()
2515 2515 for rev in xrange(numrevs):
2516 2516 dbase = r.deltaparent(rev)
2517 2517 if dbase == -1:
2518 2518 dbase = rev
2519 2519 cbase = r.chainbase(rev)
2520 2520 p1, p2 = r.parentrevs(rev)
2521 2521 rs = r.rawsize(rev)
2522 2522 ts = ts + rs
2523 2523 heads -= set(r.parentrevs(rev))
2524 2524 heads.add(rev)
2525 2525 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d %11d %5d\n" %
2526 2526 (rev, p1, p2, r.start(rev), r.end(rev),
2527 2527 r.start(dbase), r.start(cbase),
2528 2528 r.start(p1), r.start(p2),
2529 2529 rs, ts, ts / r.end(rev), len(heads)))
2530 2530 return 0
2531 2531
2532 2532 v = r.version
2533 2533 format = v & 0xFFFF
2534 2534 flags = []
2535 2535 gdelta = False
2536 2536 if v & revlog.REVLOGNGINLINEDATA:
2537 2537 flags.append('inline')
2538 2538 if v & revlog.REVLOGGENERALDELTA:
2539 2539 gdelta = True
2540 2540 flags.append('generaldelta')
2541 2541 if not flags:
2542 2542 flags = ['(none)']
2543 2543
2544 2544 nummerges = 0
2545 2545 numfull = 0
2546 2546 numprev = 0
2547 2547 nump1 = 0
2548 2548 nump2 = 0
2549 2549 numother = 0
2550 2550 nump1prev = 0
2551 2551 nump2prev = 0
2552 2552 chainlengths = []
2553 2553
2554 2554 datasize = [None, 0, 0L]
2555 2555 fullsize = [None, 0, 0L]
2556 2556 deltasize = [None, 0, 0L]
2557 2557
2558 2558 def addsize(size, l):
2559 2559 if l[0] is None or size < l[0]:
2560 2560 l[0] = size
2561 2561 if size > l[1]:
2562 2562 l[1] = size
2563 2563 l[2] += size
2564 2564
2565 2565 numrevs = len(r)
2566 2566 for rev in xrange(numrevs):
2567 2567 p1, p2 = r.parentrevs(rev)
2568 2568 delta = r.deltaparent(rev)
2569 2569 if format > 0:
2570 2570 addsize(r.rawsize(rev), datasize)
2571 2571 if p2 != nullrev:
2572 2572 nummerges += 1
2573 2573 size = r.length(rev)
2574 2574 if delta == nullrev:
2575 2575 chainlengths.append(0)
2576 2576 numfull += 1
2577 2577 addsize(size, fullsize)
2578 2578 else:
2579 2579 chainlengths.append(chainlengths[delta] + 1)
2580 2580 addsize(size, deltasize)
2581 2581 if delta == rev - 1:
2582 2582 numprev += 1
2583 2583 if delta == p1:
2584 2584 nump1prev += 1
2585 2585 elif delta == p2:
2586 2586 nump2prev += 1
2587 2587 elif delta == p1:
2588 2588 nump1 += 1
2589 2589 elif delta == p2:
2590 2590 nump2 += 1
2591 2591 elif delta != nullrev:
2592 2592 numother += 1
2593 2593
2594 2594 # Adjust size min value for empty cases
2595 2595 for size in (datasize, fullsize, deltasize):
2596 2596 if size[0] is None:
2597 2597 size[0] = 0
2598 2598
2599 2599 numdeltas = numrevs - numfull
2600 2600 numoprev = numprev - nump1prev - nump2prev
2601 2601 totalrawsize = datasize[2]
2602 2602 datasize[2] /= numrevs
2603 2603 fulltotal = fullsize[2]
2604 2604 fullsize[2] /= numfull
2605 2605 deltatotal = deltasize[2]
2606 2606 if numrevs - numfull > 0:
2607 2607 deltasize[2] /= numrevs - numfull
2608 2608 totalsize = fulltotal + deltatotal
2609 2609 avgchainlen = sum(chainlengths) / numrevs
2610 2610 compratio = totalrawsize / totalsize
2611 2611
2612 2612 basedfmtstr = '%%%dd\n'
2613 2613 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2614 2614
2615 2615 def dfmtstr(max):
2616 2616 return basedfmtstr % len(str(max))
2617 2617 def pcfmtstr(max, padding=0):
2618 2618 return basepcfmtstr % (len(str(max)), ' ' * padding)
2619 2619
2620 2620 def pcfmt(value, total):
2621 2621 return (value, 100 * float(value) / total)
2622 2622
2623 2623 ui.write(('format : %d\n') % format)
2624 2624 ui.write(('flags : %s\n') % ', '.join(flags))
2625 2625
2626 2626 ui.write('\n')
2627 2627 fmt = pcfmtstr(totalsize)
2628 2628 fmt2 = dfmtstr(totalsize)
2629 2629 ui.write(('revisions : ') + fmt2 % numrevs)
2630 2630 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2631 2631 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2632 2632 ui.write(('revisions : ') + fmt2 % numrevs)
2633 2633 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2634 2634 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2635 2635 ui.write(('revision size : ') + fmt2 % totalsize)
2636 2636 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2637 2637 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2638 2638
2639 2639 ui.write('\n')
2640 2640 fmt = dfmtstr(max(avgchainlen, compratio))
2641 2641 ui.write(('avg chain length : ') + fmt % avgchainlen)
2642 2642 ui.write(('compression ratio : ') + fmt % compratio)
2643 2643
2644 2644 if format > 0:
2645 2645 ui.write('\n')
2646 2646 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2647 2647 % tuple(datasize))
2648 2648 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2649 2649 % tuple(fullsize))
2650 2650 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2651 2651 % tuple(deltasize))
2652 2652
2653 2653 if numdeltas > 0:
2654 2654 ui.write('\n')
2655 2655 fmt = pcfmtstr(numdeltas)
2656 2656 fmt2 = pcfmtstr(numdeltas, 4)
2657 2657 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2658 2658 if numprev > 0:
2659 2659 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2660 2660 numprev))
2661 2661 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2662 2662 numprev))
2663 2663 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2664 2664 numprev))
2665 2665 if gdelta:
2666 2666 ui.write(('deltas against p1 : ')
2667 2667 + fmt % pcfmt(nump1, numdeltas))
2668 2668 ui.write(('deltas against p2 : ')
2669 2669 + fmt % pcfmt(nump2, numdeltas))
2670 2670 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2671 2671 numdeltas))
2672 2672
2673 2673 @command('debugrevspec',
2674 2674 [('', 'optimize', None, _('print parsed tree after optimizing'))],
2675 2675 ('REVSPEC'))
2676 2676 def debugrevspec(ui, repo, expr, **opts):
2677 2677 """parse and apply a revision specification
2678 2678
2679 2679 Use --verbose to print the parsed tree before and after aliases
2680 2680 expansion.
2681 2681 """
2682 2682 if ui.verbose:
2683 2683 tree = revset.parse(expr)[0]
2684 2684 ui.note(revset.prettyformat(tree), "\n")
2685 2685 newtree = revset.findaliases(ui, tree)
2686 2686 if newtree != tree:
2687 2687 ui.note(revset.prettyformat(newtree), "\n")
2688 2688 if opts["optimize"]:
2689 2689 weight, optimizedtree = revset.optimize(newtree, True)
2690 2690 ui.note("* optimized:\n", revset.prettyformat(optimizedtree), "\n")
2691 2691 func = revset.match(ui, expr)
2692 2692 for c in func(repo, revset.spanset(repo)):
2693 2693 ui.write("%s\n" % c)
2694 2694
2695 2695 @command('debugsetparents', [], _('REV1 [REV2]'))
2696 2696 def debugsetparents(ui, repo, rev1, rev2=None):
2697 2697 """manually set the parents of the current working directory
2698 2698
2699 2699 This is useful for writing repository conversion tools, but should
2700 2700 be used with care.
2701 2701
2702 2702 Returns 0 on success.
2703 2703 """
2704 2704
2705 2705 r1 = scmutil.revsingle(repo, rev1).node()
2706 2706 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2707 2707
2708 2708 wlock = repo.wlock()
2709 2709 try:
2710 2710 repo.setparents(r1, r2)
2711 2711 finally:
2712 2712 wlock.release()
2713 2713
2714 2714 @command('debugdirstate|debugstate',
2715 2715 [('', 'nodates', None, _('do not display the saved mtime')),
2716 2716 ('', 'datesort', None, _('sort by saved mtime'))],
2717 2717 _('[OPTION]...'))
2718 2718 def debugstate(ui, repo, nodates=None, datesort=None):
2719 2719 """show the contents of the current dirstate"""
2720 2720 timestr = ""
2721 2721 showdate = not nodates
2722 2722 if datesort:
2723 2723 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2724 2724 else:
2725 2725 keyfunc = None # sort by filename
2726 2726 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2727 2727 if showdate:
2728 2728 if ent[3] == -1:
2729 2729 # Pad or slice to locale representation
2730 2730 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2731 2731 time.localtime(0)))
2732 2732 timestr = 'unset'
2733 2733 timestr = (timestr[:locale_len] +
2734 2734 ' ' * (locale_len - len(timestr)))
2735 2735 else:
2736 2736 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2737 2737 time.localtime(ent[3]))
2738 2738 if ent[1] & 020000:
2739 2739 mode = 'lnk'
2740 2740 else:
2741 2741 mode = '%3o' % (ent[1] & 0777 & ~util.umask)
2742 2742 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2743 2743 for f in repo.dirstate.copies():
2744 2744 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2745 2745
2746 2746 @command('debugsub',
2747 2747 [('r', 'rev', '',
2748 2748 _('revision to check'), _('REV'))],
2749 2749 _('[-r REV] [REV]'))
2750 2750 def debugsub(ui, repo, rev=None):
2751 2751 ctx = scmutil.revsingle(repo, rev, None)
2752 2752 for k, v in sorted(ctx.substate.items()):
2753 2753 ui.write(('path %s\n') % k)
2754 2754 ui.write((' source %s\n') % v[0])
2755 2755 ui.write((' revision %s\n') % v[1])
2756 2756
2757 2757 @command('debugsuccessorssets',
2758 2758 [],
2759 2759 _('[REV]'))
2760 2760 def debugsuccessorssets(ui, repo, *revs):
2761 2761 """show set of successors for revision
2762 2762
2763 2763 A successors set of changeset A is a consistent group of revisions that
2764 2764 succeed A. It contains non-obsolete changesets only.
2765 2765
2766 2766 In most cases a changeset A has a single successors set containing a single
2767 2767 successor (changeset A replaced by A').
2768 2768
2769 2769 A changeset that is made obsolete with no successors are called "pruned".
2770 2770 Such changesets have no successors sets at all.
2771 2771
2772 2772 A changeset that has been "split" will have a successors set containing
2773 2773 more than one successor.
2774 2774
2775 2775 A changeset that has been rewritten in multiple different ways is called
2776 2776 "divergent". Such changesets have multiple successor sets (each of which
2777 2777 may also be split, i.e. have multiple successors).
2778 2778
2779 2779 Results are displayed as follows::
2780 2780
2781 2781 <rev1>
2782 2782 <successors-1A>
2783 2783 <rev2>
2784 2784 <successors-2A>
2785 2785 <successors-2B1> <successors-2B2> <successors-2B3>
2786 2786
2787 2787 Here rev2 has two possible (i.e. divergent) successors sets. The first
2788 2788 holds one element, whereas the second holds three (i.e. the changeset has
2789 2789 been split).
2790 2790 """
2791 2791 # passed to successorssets caching computation from one call to another
2792 2792 cache = {}
2793 2793 ctx2str = str
2794 2794 node2str = short
2795 2795 if ui.debug():
2796 2796 def ctx2str(ctx):
2797 2797 return ctx.hex()
2798 2798 node2str = hex
2799 2799 for rev in scmutil.revrange(repo, revs):
2800 2800 ctx = repo[rev]
2801 2801 ui.write('%s\n'% ctx2str(ctx))
2802 2802 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2803 2803 if succsset:
2804 2804 ui.write(' ')
2805 2805 ui.write(node2str(succsset[0]))
2806 2806 for node in succsset[1:]:
2807 2807 ui.write(' ')
2808 2808 ui.write(node2str(node))
2809 2809 ui.write('\n')
2810 2810
2811 2811 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'), inferrepo=True)
2812 2812 def debugwalk(ui, repo, *pats, **opts):
2813 2813 """show how files match on given patterns"""
2814 2814 m = scmutil.match(repo[None], pats, opts)
2815 2815 items = list(repo.walk(m))
2816 2816 if not items:
2817 2817 return
2818 2818 f = lambda fn: fn
2819 2819 if ui.configbool('ui', 'slash') and os.sep != '/':
2820 2820 f = lambda fn: util.normpath(fn)
2821 2821 fmt = 'f %%-%ds %%-%ds %%s' % (
2822 2822 max([len(abs) for abs in items]),
2823 2823 max([len(m.rel(abs)) for abs in items]))
2824 2824 for abs in items:
2825 2825 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2826 2826 ui.write("%s\n" % line.rstrip())
2827 2827
2828 2828 @command('debugwireargs',
2829 2829 [('', 'three', '', 'three'),
2830 2830 ('', 'four', '', 'four'),
2831 2831 ('', 'five', '', 'five'),
2832 2832 ] + remoteopts,
2833 2833 _('REPO [OPTIONS]... [ONE [TWO]]'),
2834 2834 norepo=True)
2835 2835 def debugwireargs(ui, repopath, *vals, **opts):
2836 2836 repo = hg.peer(ui, opts, repopath)
2837 2837 for opt in remoteopts:
2838 2838 del opts[opt[1]]
2839 2839 args = {}
2840 2840 for k, v in opts.iteritems():
2841 2841 if v:
2842 2842 args[k] = v
2843 2843 # run twice to check that we don't mess up the stream for the next command
2844 2844 res1 = repo.debugwireargs(*vals, **args)
2845 2845 res2 = repo.debugwireargs(*vals, **args)
2846 2846 ui.write("%s\n" % res1)
2847 2847 if res1 != res2:
2848 2848 ui.warn("%s\n" % res2)
2849 2849
2850 2850 @command('^diff',
2851 2851 [('r', 'rev', [], _('revision'), _('REV')),
2852 2852 ('c', 'change', '', _('change made by revision'), _('REV'))
2853 2853 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2854 2854 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
2855 2855 inferrepo=True)
2856 2856 def diff(ui, repo, *pats, **opts):
2857 2857 """diff repository (or selected files)
2858 2858
2859 2859 Show differences between revisions for the specified files.
2860 2860
2861 2861 Differences between files are shown using the unified diff format.
2862 2862
2863 2863 .. note::
2864 2864
2865 2865 diff may generate unexpected results for merges, as it will
2866 2866 default to comparing against the working directory's first
2867 2867 parent changeset if no revisions are specified.
2868 2868
2869 2869 When two revision arguments are given, then changes are shown
2870 2870 between those revisions. If only one revision is specified then
2871 2871 that revision is compared to the working directory, and, when no
2872 2872 revisions are specified, the working directory files are compared
2873 2873 to its parent.
2874 2874
2875 2875 Alternatively you can specify -c/--change with a revision to see
2876 2876 the changes in that changeset relative to its first parent.
2877 2877
2878 2878 Without the -a/--text option, diff will avoid generating diffs of
2879 2879 files it detects as binary. With -a, diff will generate a diff
2880 2880 anyway, probably with undesirable results.
2881 2881
2882 2882 Use the -g/--git option to generate diffs in the git extended diff
2883 2883 format. For more information, read :hg:`help diffs`.
2884 2884
2885 2885 .. container:: verbose
2886 2886
2887 2887 Examples:
2888 2888
2889 2889 - compare a file in the current working directory to its parent::
2890 2890
2891 2891 hg diff foo.c
2892 2892
2893 2893 - compare two historical versions of a directory, with rename info::
2894 2894
2895 2895 hg diff --git -r 1.0:1.2 lib/
2896 2896
2897 2897 - get change stats relative to the last change on some date::
2898 2898
2899 2899 hg diff --stat -r "date('may 2')"
2900 2900
2901 2901 - diff all newly-added files that contain a keyword::
2902 2902
2903 2903 hg diff "set:added() and grep(GNU)"
2904 2904
2905 2905 - compare a revision and its parents::
2906 2906
2907 2907 hg diff -c 9353 # compare against first parent
2908 2908 hg diff -r 9353^:9353 # same using revset syntax
2909 2909 hg diff -r 9353^2:9353 # compare against the second parent
2910 2910
2911 2911 Returns 0 on success.
2912 2912 """
2913 2913
2914 2914 revs = opts.get('rev')
2915 2915 change = opts.get('change')
2916 2916 stat = opts.get('stat')
2917 2917 reverse = opts.get('reverse')
2918 2918
2919 2919 if revs and change:
2920 2920 msg = _('cannot specify --rev and --change at the same time')
2921 2921 raise util.Abort(msg)
2922 2922 elif change:
2923 2923 node2 = scmutil.revsingle(repo, change, None).node()
2924 2924 node1 = repo[node2].p1().node()
2925 2925 else:
2926 2926 node1, node2 = scmutil.revpair(repo, revs)
2927 2927
2928 2928 if reverse:
2929 2929 node1, node2 = node2, node1
2930 2930
2931 2931 diffopts = patch.diffopts(ui, opts)
2932 2932 m = scmutil.match(repo[node2], pats, opts)
2933 2933 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2934 2934 listsubrepos=opts.get('subrepos'))
2935 2935
2936 2936 @command('^export',
2937 2937 [('o', 'output', '',
2938 2938 _('print output to file with formatted name'), _('FORMAT')),
2939 2939 ('', 'switch-parent', None, _('diff against the second parent')),
2940 2940 ('r', 'rev', [], _('revisions to export'), _('REV')),
2941 2941 ] + diffopts,
2942 2942 _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
2943 2943 def export(ui, repo, *changesets, **opts):
2944 2944 """dump the header and diffs for one or more changesets
2945 2945
2946 2946 Print the changeset header and diffs for one or more revisions.
2947 2947 If no revision is given, the parent of the working directory is used.
2948 2948
2949 2949 The information shown in the changeset header is: author, date,
2950 2950 branch name (if non-default), changeset hash, parent(s) and commit
2951 2951 comment.
2952 2952
2953 2953 .. note::
2954 2954
2955 2955 export may generate unexpected diff output for merge
2956 2956 changesets, as it will compare the merge changeset against its
2957 2957 first parent only.
2958 2958
2959 2959 Output may be to a file, in which case the name of the file is
2960 2960 given using a format string. The formatting rules are as follows:
2961 2961
2962 2962 :``%%``: literal "%" character
2963 2963 :``%H``: changeset hash (40 hexadecimal digits)
2964 2964 :``%N``: number of patches being generated
2965 2965 :``%R``: changeset revision number
2966 2966 :``%b``: basename of the exporting repository
2967 2967 :``%h``: short-form changeset hash (12 hexadecimal digits)
2968 2968 :``%m``: first line of the commit message (only alphanumeric characters)
2969 2969 :``%n``: zero-padded sequence number, starting at 1
2970 2970 :``%r``: zero-padded changeset revision number
2971 2971
2972 2972 Without the -a/--text option, export will avoid generating diffs
2973 2973 of files it detects as binary. With -a, export will generate a
2974 2974 diff anyway, probably with undesirable results.
2975 2975
2976 2976 Use the -g/--git option to generate diffs in the git extended diff
2977 2977 format. See :hg:`help diffs` for more information.
2978 2978
2979 2979 With the --switch-parent option, the diff will be against the
2980 2980 second parent. It can be useful to review a merge.
2981 2981
2982 2982 .. container:: verbose
2983 2983
2984 2984 Examples:
2985 2985
2986 2986 - use export and import to transplant a bugfix to the current
2987 2987 branch::
2988 2988
2989 2989 hg export -r 9353 | hg import -
2990 2990
2991 2991 - export all the changesets between two revisions to a file with
2992 2992 rename information::
2993 2993
2994 2994 hg export --git -r 123:150 > changes.txt
2995 2995
2996 2996 - split outgoing changes into a series of patches with
2997 2997 descriptive names::
2998 2998
2999 2999 hg export -r "outgoing()" -o "%n-%m.patch"
3000 3000
3001 3001 Returns 0 on success.
3002 3002 """
3003 3003 changesets += tuple(opts.get('rev', []))
3004 3004 if not changesets:
3005 3005 changesets = ['.']
3006 3006 revs = scmutil.revrange(repo, changesets)
3007 3007 if not revs:
3008 3008 raise util.Abort(_("export requires at least one changeset"))
3009 3009 if len(revs) > 1:
3010 3010 ui.note(_('exporting patches:\n'))
3011 3011 else:
3012 3012 ui.note(_('exporting patch:\n'))
3013 3013 cmdutil.export(repo, revs, template=opts.get('output'),
3014 3014 switch_parent=opts.get('switch_parent'),
3015 3015 opts=patch.diffopts(ui, opts))
3016 3016
3017 3017 @command('^forget', walkopts, _('[OPTION]... FILE...'), inferrepo=True)
3018 3018 def forget(ui, repo, *pats, **opts):
3019 3019 """forget the specified files on the next commit
3020 3020
3021 3021 Mark the specified files so they will no longer be tracked
3022 3022 after the next commit.
3023 3023
3024 3024 This only removes files from the current branch, not from the
3025 3025 entire project history, and it does not delete them from the
3026 3026 working directory.
3027 3027
3028 3028 To undo a forget before the next commit, see :hg:`add`.
3029 3029
3030 3030 .. container:: verbose
3031 3031
3032 3032 Examples:
3033 3033
3034 3034 - forget newly-added binary files::
3035 3035
3036 3036 hg forget "set:added() and binary()"
3037 3037
3038 3038 - forget files that would be excluded by .hgignore::
3039 3039
3040 3040 hg forget "set:hgignore()"
3041 3041
3042 3042 Returns 0 on success.
3043 3043 """
3044 3044
3045 3045 if not pats:
3046 3046 raise util.Abort(_('no files specified'))
3047 3047
3048 3048 m = scmutil.match(repo[None], pats, opts)
3049 3049 rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
3050 3050 return rejected and 1 or 0
3051 3051
3052 3052 @command(
3053 3053 'graft',
3054 3054 [('r', 'rev', [], _('revisions to graft'), _('REV')),
3055 3055 ('c', 'continue', False, _('resume interrupted graft')),
3056 3056 ('e', 'edit', False, _('invoke editor on commit messages')),
3057 3057 ('', 'log', None, _('append graft info to log message')),
3058 3058 ('f', 'force', False, _('force graft')),
3059 3059 ('D', 'currentdate', False,
3060 3060 _('record the current date as commit date')),
3061 3061 ('U', 'currentuser', False,
3062 3062 _('record the current user as committer'), _('DATE'))]
3063 3063 + commitopts2 + mergetoolopts + dryrunopts,
3064 3064 _('[OPTION]... [-r] REV...'))
3065 3065 def graft(ui, repo, *revs, **opts):
3066 3066 '''copy changes from other branches onto the current branch
3067 3067
3068 3068 This command uses Mercurial's merge logic to copy individual
3069 3069 changes from other branches without merging branches in the
3070 3070 history graph. This is sometimes known as 'backporting' or
3071 3071 'cherry-picking'. By default, graft will copy user, date, and
3072 3072 description from the source changesets.
3073 3073
3074 3074 Changesets that are ancestors of the current revision, that have
3075 3075 already been grafted, or that are merges will be skipped.
3076 3076
3077 3077 If --log is specified, log messages will have a comment appended
3078 3078 of the form::
3079 3079
3080 3080 (grafted from CHANGESETHASH)
3081 3081
3082 3082 If --force is specified, revisions will be grafted even if they
3083 3083 are already ancestors of or have been grafted to the destination.
3084 3084 This is useful when the revisions have since been backed out.
3085 3085
3086 3086 If a graft merge results in conflicts, the graft process is
3087 3087 interrupted so that the current merge can be manually resolved.
3088 3088 Once all conflicts are addressed, the graft process can be
3089 3089 continued with the -c/--continue option.
3090 3090
3091 3091 .. note::
3092 3092
3093 3093 The -c/--continue option does not reapply earlier options, except
3094 3094 for --force.
3095 3095
3096 3096 .. container:: verbose
3097 3097
3098 3098 Examples:
3099 3099
3100 3100 - copy a single change to the stable branch and edit its description::
3101 3101
3102 3102 hg update stable
3103 3103 hg graft --edit 9393
3104 3104
3105 3105 - graft a range of changesets with one exception, updating dates::
3106 3106
3107 3107 hg graft -D "2085::2093 and not 2091"
3108 3108
3109 3109 - continue a graft after resolving conflicts::
3110 3110
3111 3111 hg graft -c
3112 3112
3113 3113 - show the source of a grafted changeset::
3114 3114
3115 3115 hg log --debug -r .
3116 3116
3117 3117 See :hg:`help revisions` and :hg:`help revsets` for more about
3118 3118 specifying revisions.
3119 3119
3120 3120 Returns 0 on successful completion.
3121 3121 '''
3122 3122
3123 3123 revs = list(revs)
3124 3124 revs.extend(opts['rev'])
3125 3125
3126 3126 if not opts.get('user') and opts.get('currentuser'):
3127 3127 opts['user'] = ui.username()
3128 3128 if not opts.get('date') and opts.get('currentdate'):
3129 3129 opts['date'] = "%d %d" % util.makedate()
3130 3130
3131 3131 editor = cmdutil.getcommiteditor(editform='graft', **opts)
3132 3132
3133 3133 cont = False
3134 3134 if opts['continue']:
3135 3135 cont = True
3136 3136 if revs:
3137 3137 raise util.Abort(_("can't specify --continue and revisions"))
3138 3138 # read in unfinished revisions
3139 3139 try:
3140 3140 nodes = repo.opener.read('graftstate').splitlines()
3141 3141 revs = [repo[node].rev() for node in nodes]
3142 3142 except IOError, inst:
3143 3143 if inst.errno != errno.ENOENT:
3144 3144 raise
3145 3145 raise util.Abort(_("no graft state found, can't continue"))
3146 3146 else:
3147 3147 cmdutil.checkunfinished(repo)
3148 3148 cmdutil.bailifchanged(repo)
3149 3149 if not revs:
3150 3150 raise util.Abort(_('no revisions specified'))
3151 3151 revs = scmutil.revrange(repo, revs)
3152 3152
3153 3153 # check for merges
3154 3154 for rev in repo.revs('%ld and merge()', revs):
3155 3155 ui.warn(_('skipping ungraftable merge revision %s\n') % rev)
3156 3156 revs.remove(rev)
3157 3157 if not revs:
3158 3158 return -1
3159 3159
3160 3160 # Don't check in the --continue case, in effect retaining --force across
3161 3161 # --continues. That's because without --force, any revisions we decided to
3162 3162 # skip would have been filtered out here, so they wouldn't have made their
3163 3163 # way to the graftstate. With --force, any revisions we would have otherwise
3164 3164 # skipped would not have been filtered out, and if they hadn't been applied
3165 3165 # already, they'd have been in the graftstate.
3166 3166 if not (cont or opts.get('force')):
3167 3167 # check for ancestors of dest branch
3168 3168 crev = repo['.'].rev()
3169 3169 ancestors = repo.changelog.ancestors([crev], inclusive=True)
3170 3170 # Cannot use x.remove(y) on smart set, this has to be a list.
3171 3171 # XXX make this lazy in the future
3172 3172 revs = list(revs)
3173 3173 # don't mutate while iterating, create a copy
3174 3174 for rev in list(revs):
3175 3175 if rev in ancestors:
3176 3176 ui.warn(_('skipping ancestor revision %s\n') % rev)
3177 3177 # XXX remove on list is slow
3178 3178 revs.remove(rev)
3179 3179 if not revs:
3180 3180 return -1
3181 3181
3182 3182 # analyze revs for earlier grafts
3183 3183 ids = {}
3184 3184 for ctx in repo.set("%ld", revs):
3185 3185 ids[ctx.hex()] = ctx.rev()
3186 3186 n = ctx.extra().get('source')
3187 3187 if n:
3188 3188 ids[n] = ctx.rev()
3189 3189
3190 3190 # check ancestors for earlier grafts
3191 3191 ui.debug('scanning for duplicate grafts\n')
3192 3192
3193 3193 for rev in repo.changelog.findmissingrevs(revs, [crev]):
3194 3194 ctx = repo[rev]
3195 3195 n = ctx.extra().get('source')
3196 3196 if n in ids:
3197 3197 r = repo[n].rev()
3198 3198 if r in revs:
3199 3199 ui.warn(_('skipping revision %s (already grafted to %s)\n')
3200 3200 % (r, rev))
3201 3201 revs.remove(r)
3202 3202 elif ids[n] in revs:
3203 3203 ui.warn(_('skipping already grafted revision %s '
3204 3204 '(%s also has origin %d)\n') % (ids[n], rev, r))
3205 3205 revs.remove(ids[n])
3206 3206 elif ctx.hex() in ids:
3207 3207 r = ids[ctx.hex()]
3208 3208 ui.warn(_('skipping already grafted revision %s '
3209 3209 '(was grafted from %d)\n') % (r, rev))
3210 3210 revs.remove(r)
3211 3211 if not revs:
3212 3212 return -1
3213 3213
3214 3214 wlock = repo.wlock()
3215 3215 try:
3216 3216 current = repo['.']
3217 3217 for pos, ctx in enumerate(repo.set("%ld", revs)):
3218 3218
3219 3219 ui.status(_('grafting revision %s\n') % ctx.rev())
3220 3220 if opts.get('dry_run'):
3221 3221 continue
3222 3222
3223 3223 source = ctx.extra().get('source')
3224 3224 if not source:
3225 3225 source = ctx.hex()
3226 3226 extra = {'source': source}
3227 3227 user = ctx.user()
3228 3228 if opts.get('user'):
3229 3229 user = opts['user']
3230 3230 date = ctx.date()
3231 3231 if opts.get('date'):
3232 3232 date = opts['date']
3233 3233 message = ctx.description()
3234 3234 if opts.get('log'):
3235 3235 message += '\n(grafted from %s)' % ctx.hex()
3236 3236
3237 3237 # we don't merge the first commit when continuing
3238 3238 if not cont:
3239 3239 # perform the graft merge with p1(rev) as 'ancestor'
3240 3240 try:
3241 3241 # ui.forcemerge is an internal variable, do not document
3242 3242 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
3243 3243 'graft')
3244 3244 stats = mergemod.update(repo, ctx.node(), True, True, False,
3245 3245 ctx.p1().node(),
3246 3246 labels=['local', 'graft'])
3247 3247 finally:
3248 3248 repo.ui.setconfig('ui', 'forcemerge', '', 'graft')
3249 3249 # report any conflicts
3250 3250 if stats and stats[3] > 0:
3251 3251 # write out state for --continue
3252 3252 nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
3253 3253 repo.opener.write('graftstate', ''.join(nodelines))
3254 3254 raise util.Abort(
3255 3255 _("unresolved conflicts, can't continue"),
3256 3256 hint=_('use hg resolve and hg graft --continue'))
3257 3257 else:
3258 3258 cont = False
3259 3259
3260 3260 # drop the second merge parent
3261 3261 repo.setparents(current.node(), nullid)
3262 3262 repo.dirstate.write()
3263 3263 # fix up dirstate for copies and renames
3264 3264 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
3265 3265
3266 3266 # commit
3267 3267 node = repo.commit(text=message, user=user,
3268 3268 date=date, extra=extra, editor=editor)
3269 3269 if node is None:
3270 3270 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
3271 3271 else:
3272 3272 current = repo[node]
3273 3273 finally:
3274 3274 wlock.release()
3275 3275
3276 3276 # remove state when we complete successfully
3277 3277 if not opts.get('dry_run'):
3278 3278 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
3279 3279
3280 3280 return 0
3281 3281
3282 3282 @command('grep',
3283 3283 [('0', 'print0', None, _('end fields with NUL')),
3284 3284 ('', 'all', None, _('print all revisions that match')),
3285 3285 ('a', 'text', None, _('treat all files as text')),
3286 3286 ('f', 'follow', None,
3287 3287 _('follow changeset history,'
3288 3288 ' or file history across copies and renames')),
3289 3289 ('i', 'ignore-case', None, _('ignore case when matching')),
3290 3290 ('l', 'files-with-matches', None,
3291 3291 _('print only filenames and revisions that match')),
3292 3292 ('n', 'line-number', None, _('print matching line numbers')),
3293 3293 ('r', 'rev', [],
3294 3294 _('only search files changed within revision range'), _('REV')),
3295 3295 ('u', 'user', None, _('list the author (long with -v)')),
3296 3296 ('d', 'date', None, _('list the date (short with -q)')),
3297 3297 ] + walkopts,
3298 3298 _('[OPTION]... PATTERN [FILE]...'),
3299 3299 inferrepo=True)
3300 3300 def grep(ui, repo, pattern, *pats, **opts):
3301 3301 """search for a pattern in specified files and revisions
3302 3302
3303 3303 Search revisions of files for a regular expression.
3304 3304
3305 3305 This command behaves differently than Unix grep. It only accepts
3306 3306 Python/Perl regexps. It searches repository history, not the
3307 3307 working directory. It always prints the revision number in which a
3308 3308 match appears.
3309 3309
3310 3310 By default, grep only prints output for the first revision of a
3311 3311 file in which it finds a match. To get it to print every revision
3312 3312 that contains a change in match status ("-" for a match that
3313 3313 becomes a non-match, or "+" for a non-match that becomes a match),
3314 3314 use the --all flag.
3315 3315
3316 3316 Returns 0 if a match is found, 1 otherwise.
3317 3317 """
3318 3318 reflags = re.M
3319 3319 if opts.get('ignore_case'):
3320 3320 reflags |= re.I
3321 3321 try:
3322 3322 regexp = util.re.compile(pattern, reflags)
3323 3323 except re.error, inst:
3324 3324 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
3325 3325 return 1
3326 3326 sep, eol = ':', '\n'
3327 3327 if opts.get('print0'):
3328 3328 sep = eol = '\0'
3329 3329
3330 3330 getfile = util.lrucachefunc(repo.file)
3331 3331
3332 3332 def matchlines(body):
3333 3333 begin = 0
3334 3334 linenum = 0
3335 3335 while begin < len(body):
3336 3336 match = regexp.search(body, begin)
3337 3337 if not match:
3338 3338 break
3339 3339 mstart, mend = match.span()
3340 3340 linenum += body.count('\n', begin, mstart) + 1
3341 3341 lstart = body.rfind('\n', begin, mstart) + 1 or begin
3342 3342 begin = body.find('\n', mend) + 1 or len(body) + 1
3343 3343 lend = begin - 1
3344 3344 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
3345 3345
3346 3346 class linestate(object):
3347 3347 def __init__(self, line, linenum, colstart, colend):
3348 3348 self.line = line
3349 3349 self.linenum = linenum
3350 3350 self.colstart = colstart
3351 3351 self.colend = colend
3352 3352
3353 3353 def __hash__(self):
3354 3354 return hash((self.linenum, self.line))
3355 3355
3356 3356 def __eq__(self, other):
3357 3357 return self.line == other.line
3358 3358
3359 3359 def __iter__(self):
3360 3360 yield (self.line[:self.colstart], '')
3361 3361 yield (self.line[self.colstart:self.colend], 'grep.match')
3362 3362 rest = self.line[self.colend:]
3363 3363 while rest != '':
3364 3364 match = regexp.search(rest)
3365 3365 if not match:
3366 3366 yield (rest, '')
3367 3367 break
3368 3368 mstart, mend = match.span()
3369 3369 yield (rest[:mstart], '')
3370 3370 yield (rest[mstart:mend], 'grep.match')
3371 3371 rest = rest[mend:]
3372 3372
3373 3373 matches = {}
3374 3374 copies = {}
3375 3375 def grepbody(fn, rev, body):
3376 3376 matches[rev].setdefault(fn, [])
3377 3377 m = matches[rev][fn]
3378 3378 for lnum, cstart, cend, line in matchlines(body):
3379 3379 s = linestate(line, lnum, cstart, cend)
3380 3380 m.append(s)
3381 3381
3382 3382 def difflinestates(a, b):
3383 3383 sm = difflib.SequenceMatcher(None, a, b)
3384 3384 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3385 3385 if tag == 'insert':
3386 3386 for i in xrange(blo, bhi):
3387 3387 yield ('+', b[i])
3388 3388 elif tag == 'delete':
3389 3389 for i in xrange(alo, ahi):
3390 3390 yield ('-', a[i])
3391 3391 elif tag == 'replace':
3392 3392 for i in xrange(alo, ahi):
3393 3393 yield ('-', a[i])
3394 3394 for i in xrange(blo, bhi):
3395 3395 yield ('+', b[i])
3396 3396
3397 3397 def display(fn, ctx, pstates, states):
3398 3398 rev = ctx.rev()
3399 3399 datefunc = ui.quiet and util.shortdate or util.datestr
3400 3400 found = False
3401 3401 @util.cachefunc
3402 3402 def binary():
3403 3403 flog = getfile(fn)
3404 3404 return util.binary(flog.read(ctx.filenode(fn)))
3405 3405
3406 3406 if opts.get('all'):
3407 3407 iter = difflinestates(pstates, states)
3408 3408 else:
3409 3409 iter = [('', l) for l in states]
3410 3410 for change, l in iter:
3411 3411 cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
3412 3412
3413 3413 if opts.get('line_number'):
3414 3414 cols.append((str(l.linenum), 'grep.linenumber'))
3415 3415 if opts.get('all'):
3416 3416 cols.append((change, 'grep.change'))
3417 3417 if opts.get('user'):
3418 3418 cols.append((ui.shortuser(ctx.user()), 'grep.user'))
3419 3419 if opts.get('date'):
3420 3420 cols.append((datefunc(ctx.date()), 'grep.date'))
3421 3421 for col, label in cols[:-1]:
3422 3422 ui.write(col, label=label)
3423 3423 ui.write(sep, label='grep.sep')
3424 3424 ui.write(cols[-1][0], label=cols[-1][1])
3425 3425 if not opts.get('files_with_matches'):
3426 3426 ui.write(sep, label='grep.sep')
3427 3427 if not opts.get('text') and binary():
3428 3428 ui.write(" Binary file matches")
3429 3429 else:
3430 3430 for s, label in l:
3431 3431 ui.write(s, label=label)
3432 3432 ui.write(eol)
3433 3433 found = True
3434 3434 if opts.get('files_with_matches'):
3435 3435 break
3436 3436 return found
3437 3437
3438 3438 skip = {}
3439 3439 revfiles = {}
3440 3440 matchfn = scmutil.match(repo[None], pats, opts)
3441 3441 found = False
3442 3442 follow = opts.get('follow')
3443 3443
3444 3444 def prep(ctx, fns):
3445 3445 rev = ctx.rev()
3446 3446 pctx = ctx.p1()
3447 3447 parent = pctx.rev()
3448 3448 matches.setdefault(rev, {})
3449 3449 matches.setdefault(parent, {})
3450 3450 files = revfiles.setdefault(rev, [])
3451 3451 for fn in fns:
3452 3452 flog = getfile(fn)
3453 3453 try:
3454 3454 fnode = ctx.filenode(fn)
3455 3455 except error.LookupError:
3456 3456 continue
3457 3457
3458 3458 copied = flog.renamed(fnode)
3459 3459 copy = follow and copied and copied[0]
3460 3460 if copy:
3461 3461 copies.setdefault(rev, {})[fn] = copy
3462 3462 if fn in skip:
3463 3463 if copy:
3464 3464 skip[copy] = True
3465 3465 continue
3466 3466 files.append(fn)
3467 3467
3468 3468 if fn not in matches[rev]:
3469 3469 grepbody(fn, rev, flog.read(fnode))
3470 3470
3471 3471 pfn = copy or fn
3472 3472 if pfn not in matches[parent]:
3473 3473 try:
3474 3474 fnode = pctx.filenode(pfn)
3475 3475 grepbody(pfn, parent, flog.read(fnode))
3476 3476 except error.LookupError:
3477 3477 pass
3478 3478
3479 3479 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3480 3480 rev = ctx.rev()
3481 3481 parent = ctx.p1().rev()
3482 3482 for fn in sorted(revfiles.get(rev, [])):
3483 3483 states = matches[rev][fn]
3484 3484 copy = copies.get(rev, {}).get(fn)
3485 3485 if fn in skip:
3486 3486 if copy:
3487 3487 skip[copy] = True
3488 3488 continue
3489 3489 pstates = matches.get(parent, {}).get(copy or fn, [])
3490 3490 if pstates or states:
3491 3491 r = display(fn, ctx, pstates, states)
3492 3492 found = found or r
3493 3493 if r and not opts.get('all'):
3494 3494 skip[fn] = True
3495 3495 if copy:
3496 3496 skip[copy] = True
3497 3497 del matches[rev]
3498 3498 del revfiles[rev]
3499 3499
3500 3500 return not found
3501 3501
3502 3502 @command('heads',
3503 3503 [('r', 'rev', '',
3504 3504 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
3505 3505 ('t', 'topo', False, _('show topological heads only')),
3506 3506 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
3507 3507 ('c', 'closed', False, _('show normal and closed branch heads')),
3508 3508 ] + templateopts,
3509 3509 _('[-ct] [-r STARTREV] [REV]...'))
3510 3510 def heads(ui, repo, *branchrevs, **opts):
3511 3511 """show branch heads
3512 3512
3513 3513 With no arguments, show all open branch heads in the repository.
3514 3514 Branch heads are changesets that have no descendants on the
3515 3515 same branch. They are where development generally takes place and
3516 3516 are the usual targets for update and merge operations.
3517 3517
3518 3518 If one or more REVs are given, only open branch heads on the
3519 3519 branches associated with the specified changesets are shown. This
3520 3520 means that you can use :hg:`heads .` to see the heads on the
3521 3521 currently checked-out branch.
3522 3522
3523 3523 If -c/--closed is specified, also show branch heads marked closed
3524 3524 (see :hg:`commit --close-branch`).
3525 3525
3526 3526 If STARTREV is specified, only those heads that are descendants of
3527 3527 STARTREV will be displayed.
3528 3528
3529 3529 If -t/--topo is specified, named branch mechanics will be ignored and only
3530 3530 topological heads (changesets with no children) will be shown.
3531 3531
3532 3532 Returns 0 if matching heads are found, 1 if not.
3533 3533 """
3534 3534
3535 3535 start = None
3536 3536 if 'rev' in opts:
3537 3537 start = scmutil.revsingle(repo, opts['rev'], None).node()
3538 3538
3539 3539 if opts.get('topo'):
3540 3540 heads = [repo[h] for h in repo.heads(start)]
3541 3541 else:
3542 3542 heads = []
3543 3543 for branch in repo.branchmap():
3544 3544 heads += repo.branchheads(branch, start, opts.get('closed'))
3545 3545 heads = [repo[h] for h in heads]
3546 3546
3547 3547 if branchrevs:
3548 3548 branches = set(repo[br].branch() for br in branchrevs)
3549 3549 heads = [h for h in heads if h.branch() in branches]
3550 3550
3551 3551 if opts.get('active') and branchrevs:
3552 3552 dagheads = repo.heads(start)
3553 3553 heads = [h for h in heads if h.node() in dagheads]
3554 3554
3555 3555 if branchrevs:
3556 3556 haveheads = set(h.branch() for h in heads)
3557 3557 if branches - haveheads:
3558 3558 headless = ', '.join(b for b in branches - haveheads)
3559 3559 msg = _('no open branch heads found on branches %s')
3560 3560 if opts.get('rev'):
3561 3561 msg += _(' (started at %s)') % opts['rev']
3562 3562 ui.warn((msg + '\n') % headless)
3563 3563
3564 3564 if not heads:
3565 3565 return 1
3566 3566
3567 3567 heads = sorted(heads, key=lambda x: -x.rev())
3568 3568 displayer = cmdutil.show_changeset(ui, repo, opts)
3569 3569 for ctx in heads:
3570 3570 displayer.show(ctx)
3571 3571 displayer.close()
3572 3572
3573 3573 @command('help',
3574 3574 [('e', 'extension', None, _('show only help for extensions')),
3575 3575 ('c', 'command', None, _('show only help for commands')),
3576 3576 ('k', 'keyword', '', _('show topics matching keyword')),
3577 3577 ],
3578 3578 _('[-ec] [TOPIC]'),
3579 3579 norepo=True)
3580 3580 def help_(ui, name=None, **opts):
3581 3581 """show help for a given topic or a help overview
3582 3582
3583 3583 With no arguments, print a list of commands with short help messages.
3584 3584
3585 3585 Given a topic, extension, or command name, print help for that
3586 3586 topic.
3587 3587
3588 3588 Returns 0 if successful.
3589 3589 """
3590 3590
3591 3591 textwidth = min(ui.termwidth(), 80) - 2
3592 3592
3593 3593 keep = ui.verbose and ['verbose'] or []
3594 3594 text = help.help_(ui, name, **opts)
3595 3595
3596 3596 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3597 3597 if 'verbose' in pruned:
3598 3598 keep.append('omitted')
3599 3599 else:
3600 3600 keep.append('notomitted')
3601 3601 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3602 3602 ui.write(formatted)
3603 3603
3604 3604
3605 3605 @command('identify|id',
3606 3606 [('r', 'rev', '',
3607 3607 _('identify the specified revision'), _('REV')),
3608 3608 ('n', 'num', None, _('show local revision number')),
3609 3609 ('i', 'id', None, _('show global revision id')),
3610 3610 ('b', 'branch', None, _('show branch')),
3611 3611 ('t', 'tags', None, _('show tags')),
3612 3612 ('B', 'bookmarks', None, _('show bookmarks')),
3613 3613 ] + remoteopts,
3614 3614 _('[-nibtB] [-r REV] [SOURCE]'),
3615 3615 optionalrepo=True)
3616 3616 def identify(ui, repo, source=None, rev=None,
3617 3617 num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
3618 3618 """identify the working copy or specified revision
3619 3619
3620 3620 Print a summary identifying the repository state at REV using one or
3621 3621 two parent hash identifiers, followed by a "+" if the working
3622 3622 directory has uncommitted changes, the branch name (if not default),
3623 3623 a list of tags, and a list of bookmarks.
3624 3624
3625 3625 When REV is not given, print a summary of the current state of the
3626 3626 repository.
3627 3627
3628 3628 Specifying a path to a repository root or Mercurial bundle will
3629 3629 cause lookup to operate on that repository/bundle.
3630 3630
3631 3631 .. container:: verbose
3632 3632
3633 3633 Examples:
3634 3634
3635 3635 - generate a build identifier for the working directory::
3636 3636
3637 3637 hg id --id > build-id.dat
3638 3638
3639 3639 - find the revision corresponding to a tag::
3640 3640
3641 3641 hg id -n -r 1.3
3642 3642
3643 3643 - check the most recent revision of a remote repository::
3644 3644
3645 3645 hg id -r tip http://selenic.com/hg/
3646 3646
3647 3647 Returns 0 if successful.
3648 3648 """
3649 3649
3650 3650 if not repo and not source:
3651 3651 raise util.Abort(_("there is no Mercurial repository here "
3652 3652 "(.hg not found)"))
3653 3653
3654 3654 hexfunc = ui.debugflag and hex or short
3655 3655 default = not (num or id or branch or tags or bookmarks)
3656 3656 output = []
3657 3657 revs = []
3658 3658
3659 3659 if source:
3660 3660 source, branches = hg.parseurl(ui.expandpath(source))
3661 3661 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3662 3662 repo = peer.local()
3663 3663 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3664 3664
3665 3665 if not repo:
3666 3666 if num or branch or tags:
3667 3667 raise util.Abort(
3668 3668 _("can't query remote revision number, branch, or tags"))
3669 3669 if not rev and revs:
3670 3670 rev = revs[0]
3671 3671 if not rev:
3672 3672 rev = "tip"
3673 3673
3674 3674 remoterev = peer.lookup(rev)
3675 3675 if default or id:
3676 3676 output = [hexfunc(remoterev)]
3677 3677
3678 3678 def getbms():
3679 3679 bms = []
3680 3680
3681 3681 if 'bookmarks' in peer.listkeys('namespaces'):
3682 3682 hexremoterev = hex(remoterev)
3683 3683 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
3684 3684 if bmr == hexremoterev]
3685 3685
3686 3686 return sorted(bms)
3687 3687
3688 3688 if bookmarks:
3689 3689 output.extend(getbms())
3690 3690 elif default and not ui.quiet:
3691 3691 # multiple bookmarks for a single parent separated by '/'
3692 3692 bm = '/'.join(getbms())
3693 3693 if bm:
3694 3694 output.append(bm)
3695 3695 else:
3696 3696 if not rev:
3697 3697 ctx = repo[None]
3698 3698 parents = ctx.parents()
3699 3699 changed = ""
3700 3700 if default or id or num:
3701 3701 if (util.any(repo.status())
3702 3702 or util.any(ctx.sub(s).dirty() for s in ctx.substate)):
3703 3703 changed = '+'
3704 3704 if default or id:
3705 3705 output = ["%s%s" %
3706 3706 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
3707 3707 if num:
3708 3708 output.append("%s%s" %
3709 3709 ('+'.join([str(p.rev()) for p in parents]), changed))
3710 3710 else:
3711 3711 ctx = scmutil.revsingle(repo, rev)
3712 3712 if default or id:
3713 3713 output = [hexfunc(ctx.node())]
3714 3714 if num:
3715 3715 output.append(str(ctx.rev()))
3716 3716
3717 3717 if default and not ui.quiet:
3718 3718 b = ctx.branch()
3719 3719 if b != 'default':
3720 3720 output.append("(%s)" % b)
3721 3721
3722 3722 # multiple tags for a single parent separated by '/'
3723 3723 t = '/'.join(ctx.tags())
3724 3724 if t:
3725 3725 output.append(t)
3726 3726
3727 3727 # multiple bookmarks for a single parent separated by '/'
3728 3728 bm = '/'.join(ctx.bookmarks())
3729 3729 if bm:
3730 3730 output.append(bm)
3731 3731 else:
3732 3732 if branch:
3733 3733 output.append(ctx.branch())
3734 3734
3735 3735 if tags:
3736 3736 output.extend(ctx.tags())
3737 3737
3738 3738 if bookmarks:
3739 3739 output.extend(ctx.bookmarks())
3740 3740
3741 3741 ui.write("%s\n" % ' '.join(output))
3742 3742
3743 3743 @command('import|patch',
3744 3744 [('p', 'strip', 1,
3745 3745 _('directory strip option for patch. This has the same '
3746 3746 'meaning as the corresponding patch option'), _('NUM')),
3747 3747 ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
3748 3748 ('e', 'edit', False, _('invoke editor on commit messages')),
3749 3749 ('f', 'force', None,
3750 3750 _('skip check for outstanding uncommitted changes (DEPRECATED)')),
3751 3751 ('', 'no-commit', None,
3752 3752 _("don't commit, just update the working directory")),
3753 3753 ('', 'bypass', None,
3754 3754 _("apply patch without touching the working directory")),
3755 3755 ('', 'partial', None,
3756 3756 _('commit even if some hunks fail')),
3757 3757 ('', 'exact', None,
3758 3758 _('apply patch to the nodes from which it was generated')),
3759 3759 ('', 'import-branch', None,
3760 3760 _('use any branch information in patch (implied by --exact)'))] +
3761 3761 commitopts + commitopts2 + similarityopts,
3762 3762 _('[OPTION]... PATCH...'))
3763 3763 def import_(ui, repo, patch1=None, *patches, **opts):
3764 3764 """import an ordered set of patches
3765 3765
3766 3766 Import a list of patches and commit them individually (unless
3767 3767 --no-commit is specified).
3768 3768
3769 3769 Because import first applies changes to the working directory,
3770 3770 import will abort if there are outstanding changes.
3771 3771
3772 3772 You can import a patch straight from a mail message. Even patches
3773 3773 as attachments work (to use the body part, it must have type
3774 3774 text/plain or text/x-patch). From and Subject headers of email
3775 3775 message are used as default committer and commit message. All
3776 3776 text/plain body parts before first diff are added to commit
3777 3777 message.
3778 3778
3779 3779 If the imported patch was generated by :hg:`export`, user and
3780 3780 description from patch override values from message headers and
3781 3781 body. Values given on command line with -m/--message and -u/--user
3782 3782 override these.
3783 3783
3784 3784 If --exact is specified, import will set the working directory to
3785 3785 the parent of each patch before applying it, and will abort if the
3786 3786 resulting changeset has a different ID than the one recorded in
3787 3787 the patch. This may happen due to character set problems or other
3788 3788 deficiencies in the text patch format.
3789 3789
3790 3790 Use --bypass to apply and commit patches directly to the
3791 3791 repository, not touching the working directory. Without --exact,
3792 3792 patches will be applied on top of the working directory parent
3793 3793 revision.
3794 3794
3795 3795 With -s/--similarity, hg will attempt to discover renames and
3796 3796 copies in the patch in the same way as :hg:`addremove`.
3797 3797
3798 3798 Use --partial to ensure a changeset will be created from the patch
3799 3799 even if some hunks fail to apply. Hunks that fail to apply will be
3800 3800 written to a <target-file>.rej file. Conflicts can then be resolved
3801 3801 by hand before :hg:`commit --amend` is run to update the created
3802 3802 changeset. This flag exists to let people import patches that
3803 3803 partially apply without losing the associated metadata (author,
3804 3804 date, description, ...). Note that when none of the hunk applies
3805 3805 cleanly, :hg:`import --partial` will create an empty changeset,
3806 3806 importing only the patch metadata.
3807 3807
3808 3808 To read a patch from standard input, use "-" as the patch name. If
3809 3809 a URL is specified, the patch will be downloaded from it.
3810 3810 See :hg:`help dates` for a list of formats valid for -d/--date.
3811 3811
3812 3812 .. container:: verbose
3813 3813
3814 3814 Examples:
3815 3815
3816 3816 - import a traditional patch from a website and detect renames::
3817 3817
3818 3818 hg import -s 80 http://example.com/bugfix.patch
3819 3819
3820 3820 - import a changeset from an hgweb server::
3821 3821
3822 3822 hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
3823 3823
3824 3824 - import all the patches in an Unix-style mbox::
3825 3825
3826 3826 hg import incoming-patches.mbox
3827 3827
3828 3828 - attempt to exactly restore an exported changeset (not always
3829 3829 possible)::
3830 3830
3831 3831 hg import --exact proposed-fix.patch
3832 3832
3833 3833 Returns 0 on success, 1 on partial success (see --partial).
3834 3834 """
3835 3835
3836 3836 if not patch1:
3837 3837 raise util.Abort(_('need at least one patch to import'))
3838 3838
3839 3839 patches = (patch1,) + patches
3840 3840
3841 3841 date = opts.get('date')
3842 3842 if date:
3843 3843 opts['date'] = util.parsedate(date)
3844 3844
3845 3845 update = not opts.get('bypass')
3846 3846 if not update and opts.get('no_commit'):
3847 3847 raise util.Abort(_('cannot use --no-commit with --bypass'))
3848 3848 try:
3849 3849 sim = float(opts.get('similarity') or 0)
3850 3850 except ValueError:
3851 3851 raise util.Abort(_('similarity must be a number'))
3852 3852 if sim < 0 or sim > 100:
3853 3853 raise util.Abort(_('similarity must be between 0 and 100'))
3854 3854 if sim and not update:
3855 3855 raise util.Abort(_('cannot use --similarity with --bypass'))
3856 3856
3857 3857 if update:
3858 3858 cmdutil.checkunfinished(repo)
3859 3859 if (opts.get('exact') or not opts.get('force')) and update:
3860 3860 cmdutil.bailifchanged(repo)
3861 3861
3862 3862 base = opts["base"]
3863 3863 wlock = lock = tr = None
3864 3864 msgs = []
3865 3865 ret = 0
3866 3866
3867 3867
3868 3868 try:
3869 3869 try:
3870 3870 wlock = repo.wlock()
3871 3871 if not opts.get('no_commit'):
3872 3872 lock = repo.lock()
3873 3873 tr = repo.transaction('import')
3874 3874 parents = repo.parents()
3875 3875 for patchurl in patches:
3876 3876 if patchurl == '-':
3877 3877 ui.status(_('applying patch from stdin\n'))
3878 3878 patchfile = ui.fin
3879 3879 patchurl = 'stdin' # for error message
3880 3880 else:
3881 3881 patchurl = os.path.join(base, patchurl)
3882 3882 ui.status(_('applying %s\n') % patchurl)
3883 3883 patchfile = hg.openpath(ui, patchurl)
3884 3884
3885 3885 haspatch = False
3886 3886 for hunk in patch.split(patchfile):
3887 3887 (msg, node, rej) = cmdutil.tryimportone(ui, repo, hunk,
3888 3888 parents, opts,
3889 3889 msgs, hg.clean)
3890 3890 if msg:
3891 3891 haspatch = True
3892 3892 ui.note(msg + '\n')
3893 3893 if update or opts.get('exact'):
3894 3894 parents = repo.parents()
3895 3895 else:
3896 3896 parents = [repo[node]]
3897 3897 if rej:
3898 3898 ui.write_err(_("patch applied partially\n"))
3899 3899 ui.write_err(_("(fix the .rej files and run "
3900 3900 "`hg commit --amend`)\n"))
3901 3901 ret = 1
3902 3902 break
3903 3903
3904 3904 if not haspatch:
3905 3905 raise util.Abort(_('%s: no diffs found') % patchurl)
3906 3906
3907 3907 if tr:
3908 3908 tr.close()
3909 3909 if msgs:
3910 3910 repo.savecommitmessage('\n* * *\n'.join(msgs))
3911 3911 return ret
3912 3912 except: # re-raises
3913 3913 # wlock.release() indirectly calls dirstate.write(): since
3914 3914 # we're crashing, we do not want to change the working dir
3915 3915 # parent after all, so make sure it writes nothing
3916 3916 repo.dirstate.invalidate()
3917 3917 raise
3918 3918 finally:
3919 3919 if tr:
3920 3920 tr.release()
3921 3921 release(lock, wlock)
3922 3922
3923 3923 @command('incoming|in',
3924 3924 [('f', 'force', None,
3925 3925 _('run even if remote repository is unrelated')),
3926 3926 ('n', 'newest-first', None, _('show newest record first')),
3927 3927 ('', 'bundle', '',
3928 3928 _('file to store the bundles into'), _('FILE')),
3929 3929 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3930 3930 ('B', 'bookmarks', False, _("compare bookmarks")),
3931 3931 ('b', 'branch', [],
3932 3932 _('a specific branch you would like to pull'), _('BRANCH')),
3933 3933 ] + logopts + remoteopts + subrepoopts,
3934 3934 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3935 3935 def incoming(ui, repo, source="default", **opts):
3936 3936 """show new changesets found in source
3937 3937
3938 3938 Show new changesets found in the specified path/URL or the default
3939 3939 pull location. These are the changesets that would have been pulled
3940 3940 if a pull at the time you issued this command.
3941 3941
3942 3942 For remote repository, using --bundle avoids downloading the
3943 3943 changesets twice if the incoming is followed by a pull.
3944 3944
3945 3945 See pull for valid source format details.
3946 3946
3947 3947 .. container:: verbose
3948 3948
3949 3949 Examples:
3950 3950
3951 3951 - show incoming changes with patches and full description::
3952 3952
3953 3953 hg incoming -vp
3954 3954
3955 3955 - show incoming changes excluding merges, store a bundle::
3956 3956
3957 3957 hg in -vpM --bundle incoming.hg
3958 3958 hg pull incoming.hg
3959 3959
3960 3960 - briefly list changes inside a bundle::
3961 3961
3962 3962 hg in changes.hg -T "{desc|firstline}\\n"
3963 3963
3964 3964 Returns 0 if there are incoming changes, 1 otherwise.
3965 3965 """
3966 3966 if opts.get('graph'):
3967 3967 cmdutil.checkunsupportedgraphflags([], opts)
3968 3968 def display(other, chlist, displayer):
3969 3969 revdag = cmdutil.graphrevs(other, chlist, opts)
3970 3970 showparents = [ctx.node() for ctx in repo[None].parents()]
3971 3971 cmdutil.displaygraph(ui, revdag, displayer, showparents,
3972 3972 graphmod.asciiedges)
3973 3973
3974 3974 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
3975 3975 return 0
3976 3976
3977 3977 if opts.get('bundle') and opts.get('subrepos'):
3978 3978 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3979 3979
3980 3980 if opts.get('bookmarks'):
3981 3981 source, branches = hg.parseurl(ui.expandpath(source),
3982 3982 opts.get('branch'))
3983 3983 other = hg.peer(repo, opts, source)
3984 3984 if 'bookmarks' not in other.listkeys('namespaces'):
3985 3985 ui.warn(_("remote doesn't support bookmarks\n"))
3986 3986 return 0
3987 3987 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3988 3988 return bookmarks.diff(ui, repo, other)
3989 3989
3990 3990 repo._subtoppath = ui.expandpath(source)
3991 3991 try:
3992 3992 return hg.incoming(ui, repo, source, opts)
3993 3993 finally:
3994 3994 del repo._subtoppath
3995 3995
3996 3996
3997 3997 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
3998 3998 norepo=True)
3999 3999 def init(ui, dest=".", **opts):
4000 4000 """create a new repository in the given directory
4001 4001
4002 4002 Initialize a new repository in the given directory. If the given
4003 4003 directory does not exist, it will be created.
4004 4004
4005 4005 If no directory is given, the current directory is used.
4006 4006
4007 4007 It is possible to specify an ``ssh://`` URL as the destination.
4008 4008 See :hg:`help urls` for more information.
4009 4009
4010 4010 Returns 0 on success.
4011 4011 """
4012 4012 hg.peer(ui, opts, ui.expandpath(dest), create=True)
4013 4013
4014 4014 @command('locate',
4015 4015 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
4016 4016 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
4017 4017 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
4018 4018 ] + walkopts,
4019 4019 _('[OPTION]... [PATTERN]...'))
4020 4020 def locate(ui, repo, *pats, **opts):
4021 4021 """locate files matching specific patterns
4022 4022
4023 4023 Print files under Mercurial control in the working directory whose
4024 4024 names match the given patterns.
4025 4025
4026 4026 By default, this command searches all directories in the working
4027 4027 directory. To search just the current directory and its
4028 4028 subdirectories, use "--include .".
4029 4029
4030 4030 If no patterns are given to match, this command prints the names
4031 4031 of all files under Mercurial control in the working directory.
4032 4032
4033 4033 If you want to feed the output of this command into the "xargs"
4034 4034 command, use the -0 option to both this command and "xargs". This
4035 4035 will avoid the problem of "xargs" treating single filenames that
4036 4036 contain whitespace as multiple filenames.
4037 4037
4038 4038 Returns 0 if a match is found, 1 otherwise.
4039 4039 """
4040 4040 end = opts.get('print0') and '\0' or '\n'
4041 4041 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
4042 4042
4043 4043 ret = 1
4044 4044 ctx = repo[rev]
4045 4045 m = scmutil.match(ctx, pats, opts, default='relglob')
4046 4046 m.bad = lambda x, y: False
4047 4047
4048 4048 for abs in ctx.matches(m):
4049 4049 if opts.get('fullpath'):
4050 4050 ui.write(repo.wjoin(abs), end)
4051 4051 else:
4052 4052 ui.write(((pats and m.rel(abs)) or abs), end)
4053 4053 ret = 0
4054 4054
4055 4055 return ret
4056 4056
4057 4057 @command('^log|history',
4058 4058 [('f', 'follow', None,
4059 4059 _('follow changeset history, or file history across copies and renames')),
4060 4060 ('', 'follow-first', None,
4061 4061 _('only follow the first parent of merge changesets (DEPRECATED)')),
4062 4062 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
4063 4063 ('C', 'copies', None, _('show copied files')),
4064 4064 ('k', 'keyword', [],
4065 4065 _('do case-insensitive search for a given text'), _('TEXT')),
4066 4066 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
4067 4067 ('', 'removed', None, _('include revisions where files were removed')),
4068 4068 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
4069 4069 ('u', 'user', [], _('revisions committed by user'), _('USER')),
4070 4070 ('', 'only-branch', [],
4071 4071 _('show only changesets within the given named branch (DEPRECATED)'),
4072 4072 _('BRANCH')),
4073 4073 ('b', 'branch', [],
4074 4074 _('show changesets within the given named branch'), _('BRANCH')),
4075 4075 ('P', 'prune', [],
4076 4076 _('do not display revision or any of its ancestors'), _('REV')),
4077 4077 ] + logopts + walkopts,
4078 4078 _('[OPTION]... [FILE]'),
4079 4079 inferrepo=True)
4080 4080 def log(ui, repo, *pats, **opts):
4081 4081 """show revision history of entire repository or files
4082 4082
4083 4083 Print the revision history of the specified files or the entire
4084 4084 project.
4085 4085
4086 4086 If no revision range is specified, the default is ``tip:0`` unless
4087 4087 --follow is set, in which case the working directory parent is
4088 4088 used as the starting revision.
4089 4089
4090 4090 File history is shown without following rename or copy history of
4091 4091 files. Use -f/--follow with a filename to follow history across
4092 4092 renames and copies. --follow without a filename will only show
4093 4093 ancestors or descendants of the starting revision.
4094 4094
4095 4095 By default this command prints revision number and changeset id,
4096 4096 tags, non-trivial parents, user, date and time, and a summary for
4097 4097 each commit. When the -v/--verbose switch is used, the list of
4098 4098 changed files and full commit message are shown.
4099 4099
4100 4100 With --graph the revisions are shown as an ASCII art DAG with the most
4101 4101 recent changeset at the top.
4102 4102 'o' is a changeset, '@' is a working directory parent, 'x' is obsolete,
4103 4103 and '+' represents a fork where the changeset from the lines below is a
4104 4104 parent of the 'o' merge on the same line.
4105 4105
4106 4106 .. note::
4107 4107
4108 4108 log -p/--patch may generate unexpected diff output for merge
4109 4109 changesets, as it will only compare the merge changeset against
4110 4110 its first parent. Also, only files different from BOTH parents
4111 4111 will appear in files:.
4112 4112
4113 4113 .. note::
4114 4114
4115 4115 for performance reasons, log FILE may omit duplicate changes
4116 4116 made on branches and will not show deletions. To see all
4117 4117 changes including duplicates and deletions, use the --removed
4118 4118 switch.
4119 4119
4120 4120 .. container:: verbose
4121 4121
4122 4122 Some examples:
4123 4123
4124 4124 - changesets with full descriptions and file lists::
4125 4125
4126 4126 hg log -v
4127 4127
4128 4128 - changesets ancestral to the working directory::
4129 4129
4130 4130 hg log -f
4131 4131
4132 4132 - last 10 commits on the current branch::
4133 4133
4134 4134 hg log -l 10 -b .
4135 4135
4136 4136 - changesets showing all modifications of a file, including removals::
4137 4137
4138 4138 hg log --removed file.c
4139 4139
4140 4140 - all changesets that touch a directory, with diffs, excluding merges::
4141 4141
4142 4142 hg log -Mp lib/
4143 4143
4144 4144 - all revision numbers that match a keyword::
4145 4145
4146 4146 hg log -k bug --template "{rev}\\n"
4147 4147
4148 4148 - list available log templates::
4149 4149
4150 4150 hg log -T list
4151 4151
4152 4152 - check if a given changeset is included is a tagged release::
4153 4153
4154 4154 hg log -r "a21ccf and ancestor(1.9)"
4155 4155
4156 4156 - find all changesets by some user in a date range::
4157 4157
4158 4158 hg log -k alice -d "may 2008 to jul 2008"
4159 4159
4160 4160 - summary of all changesets after the last tag::
4161 4161
4162 4162 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
4163 4163
4164 4164 See :hg:`help dates` for a list of formats valid for -d/--date.
4165 4165
4166 4166 See :hg:`help revisions` and :hg:`help revsets` for more about
4167 4167 specifying revisions.
4168 4168
4169 4169 See :hg:`help templates` for more about pre-packaged styles and
4170 4170 specifying custom templates.
4171 4171
4172 4172 Returns 0 on success.
4173 4173 """
4174 4174 if opts.get('graph'):
4175 4175 return cmdutil.graphlog(ui, repo, *pats, **opts)
4176 4176
4177 4177 revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts)
4178 4178 limit = cmdutil.loglimit(opts)
4179 4179 count = 0
4180 4180
4181 4181 getrenamed = None
4182 4182 if opts.get('copies'):
4183 4183 endrev = None
4184 4184 if opts.get('rev'):
4185 4185 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
4186 4186 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
4187 4187
4188 4188 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
4189 4189 for rev in revs:
4190 4190 if count == limit:
4191 4191 break
4192 4192 ctx = repo[rev]
4193 4193 copies = None
4194 4194 if getrenamed is not None and rev:
4195 4195 copies = []
4196 4196 for fn in ctx.files():
4197 4197 rename = getrenamed(fn, rev)
4198 4198 if rename:
4199 4199 copies.append((fn, rename[0]))
4200 4200 revmatchfn = filematcher and filematcher(ctx.rev()) or None
4201 4201 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
4202 4202 if displayer.flush(rev):
4203 4203 count += 1
4204 4204
4205 4205 displayer.close()
4206 4206
4207 4207 @command('manifest',
4208 4208 [('r', 'rev', '', _('revision to display'), _('REV')),
4209 4209 ('', 'all', False, _("list files from all revisions"))],
4210 4210 _('[-r REV]'))
4211 4211 def manifest(ui, repo, node=None, rev=None, **opts):
4212 4212 """output the current or given revision of the project manifest
4213 4213
4214 4214 Print a list of version controlled files for the given revision.
4215 4215 If no revision is given, the first parent of the working directory
4216 4216 is used, or the null revision if no revision is checked out.
4217 4217
4218 4218 With -v, print file permissions, symlink and executable bits.
4219 4219 With --debug, print file revision hashes.
4220 4220
4221 4221 If option --all is specified, the list of all files from all revisions
4222 4222 is printed. This includes deleted and renamed files.
4223 4223
4224 4224 Returns 0 on success.
4225 4225 """
4226 4226
4227 4227 fm = ui.formatter('manifest', opts)
4228 4228
4229 4229 if opts.get('all'):
4230 4230 if rev or node:
4231 4231 raise util.Abort(_("can't specify a revision with --all"))
4232 4232
4233 4233 res = []
4234 4234 prefix = "data/"
4235 4235 suffix = ".i"
4236 4236 plen = len(prefix)
4237 4237 slen = len(suffix)
4238 4238 lock = repo.lock()
4239 4239 try:
4240 4240 for fn, b, size in repo.store.datafiles():
4241 4241 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
4242 4242 res.append(fn[plen:-slen])
4243 4243 finally:
4244 4244 lock.release()
4245 4245 for f in res:
4246 4246 fm.startitem()
4247 4247 fm.write("path", '%s\n', f)
4248 4248 fm.end()
4249 4249 return
4250 4250
4251 4251 if rev and node:
4252 4252 raise util.Abort(_("please specify just one revision"))
4253 4253
4254 4254 if not node:
4255 4255 node = rev
4256 4256
4257 4257 char = {'l': '@', 'x': '*', '': ''}
4258 4258 mode = {'l': '644', 'x': '755', '': '644'}
4259 4259 ctx = scmutil.revsingle(repo, node)
4260 4260 mf = ctx.manifest()
4261 4261 for f in ctx:
4262 4262 fm.startitem()
4263 4263 fl = ctx[f].flags()
4264 4264 fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
4265 4265 fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
4266 4266 fm.write('path', '%s\n', f)
4267 4267 fm.end()
4268 4268
4269 4269 @command('^merge',
4270 4270 [('f', 'force', None,
4271 4271 _('force a merge including outstanding changes (DEPRECATED)')),
4272 4272 ('r', 'rev', '', _('revision to merge'), _('REV')),
4273 4273 ('P', 'preview', None,
4274 4274 _('review revisions to merge (no merge is performed)'))
4275 4275 ] + mergetoolopts,
4276 4276 _('[-P] [-f] [[-r] REV]'))
4277 4277 def merge(ui, repo, node=None, **opts):
4278 4278 """merge working directory with another revision
4279 4279
4280 4280 The current working directory is updated with all changes made in
4281 4281 the requested revision since the last common predecessor revision.
4282 4282
4283 4283 Files that changed between either parent are marked as changed for
4284 4284 the next commit and a commit must be performed before any further
4285 4285 updates to the repository are allowed. The next commit will have
4286 4286 two parents.
4287 4287
4288 4288 ``--tool`` can be used to specify the merge tool used for file
4289 4289 merges. It overrides the HGMERGE environment variable and your
4290 4290 configuration files. See :hg:`help merge-tools` for options.
4291 4291
4292 4292 If no revision is specified, the working directory's parent is a
4293 4293 head revision, and the current branch contains exactly one other
4294 4294 head, the other head is merged with by default. Otherwise, an
4295 4295 explicit revision with which to merge with must be provided.
4296 4296
4297 4297 :hg:`resolve` must be used to resolve unresolved files.
4298 4298
4299 4299 To undo an uncommitted merge, use :hg:`update --clean .` which
4300 4300 will check out a clean copy of the original merge parent, losing
4301 4301 all changes.
4302 4302
4303 4303 Returns 0 on success, 1 if there are unresolved files.
4304 4304 """
4305 4305
4306 4306 if opts.get('rev') and node:
4307 4307 raise util.Abort(_("please specify just one revision"))
4308 4308 if not node:
4309 4309 node = opts.get('rev')
4310 4310
4311 4311 if node:
4312 4312 node = scmutil.revsingle(repo, node).node()
4313 4313
4314 4314 if not node and repo._bookmarkcurrent:
4315 4315 bmheads = repo.bookmarkheads(repo._bookmarkcurrent)
4316 4316 curhead = repo[repo._bookmarkcurrent].node()
4317 4317 if len(bmheads) == 2:
4318 4318 if curhead == bmheads[0]:
4319 4319 node = bmheads[1]
4320 4320 else:
4321 4321 node = bmheads[0]
4322 4322 elif len(bmheads) > 2:
4323 4323 raise util.Abort(_("multiple matching bookmarks to merge - "
4324 4324 "please merge with an explicit rev or bookmark"),
4325 4325 hint=_("run 'hg heads' to see all heads"))
4326 4326 elif len(bmheads) <= 1:
4327 4327 raise util.Abort(_("no matching bookmark to merge - "
4328 4328 "please merge with an explicit rev or bookmark"),
4329 4329 hint=_("run 'hg heads' to see all heads"))
4330 4330
4331 4331 if not node and not repo._bookmarkcurrent:
4332 4332 branch = repo[None].branch()
4333 4333 bheads = repo.branchheads(branch)
4334 4334 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
4335 4335
4336 4336 if len(nbhs) > 2:
4337 4337 raise util.Abort(_("branch '%s' has %d heads - "
4338 4338 "please merge with an explicit rev")
4339 4339 % (branch, len(bheads)),
4340 4340 hint=_("run 'hg heads .' to see heads"))
4341 4341
4342 4342 parent = repo.dirstate.p1()
4343 4343 if len(nbhs) <= 1:
4344 4344 if len(bheads) > 1:
4345 4345 raise util.Abort(_("heads are bookmarked - "
4346 4346 "please merge with an explicit rev"),
4347 4347 hint=_("run 'hg heads' to see all heads"))
4348 4348 if len(repo.heads()) > 1:
4349 4349 raise util.Abort(_("branch '%s' has one head - "
4350 4350 "please merge with an explicit rev")
4351 4351 % branch,
4352 4352 hint=_("run 'hg heads' to see all heads"))
4353 4353 msg, hint = _('nothing to merge'), None
4354 4354 if parent != repo.lookup(branch):
4355 4355 hint = _("use 'hg update' instead")
4356 4356 raise util.Abort(msg, hint=hint)
4357 4357
4358 4358 if parent not in bheads:
4359 4359 raise util.Abort(_('working directory not at a head revision'),
4360 4360 hint=_("use 'hg update' or merge with an "
4361 4361 "explicit revision"))
4362 4362 if parent == nbhs[0]:
4363 4363 node = nbhs[-1]
4364 4364 else:
4365 4365 node = nbhs[0]
4366 4366
4367 4367 if opts.get('preview'):
4368 4368 # find nodes that are ancestors of p2 but not of p1
4369 4369 p1 = repo.lookup('.')
4370 4370 p2 = repo.lookup(node)
4371 4371 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
4372 4372
4373 4373 displayer = cmdutil.show_changeset(ui, repo, opts)
4374 4374 for node in nodes:
4375 4375 displayer.show(repo[node])
4376 4376 displayer.close()
4377 4377 return 0
4378 4378
4379 4379 try:
4380 4380 # ui.forcemerge is an internal variable, do not document
4381 4381 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge')
4382 4382 return hg.merge(repo, node, force=opts.get('force'))
4383 4383 finally:
4384 4384 ui.setconfig('ui', 'forcemerge', '', 'merge')
4385 4385
4386 4386 @command('outgoing|out',
4387 4387 [('f', 'force', None, _('run even when the destination is unrelated')),
4388 4388 ('r', 'rev', [],
4389 4389 _('a changeset intended to be included in the destination'), _('REV')),
4390 4390 ('n', 'newest-first', None, _('show newest record first')),
4391 4391 ('B', 'bookmarks', False, _('compare bookmarks')),
4392 4392 ('b', 'branch', [], _('a specific branch you would like to push'),
4393 4393 _('BRANCH')),
4394 4394 ] + logopts + remoteopts + subrepoopts,
4395 4395 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
4396 4396 def outgoing(ui, repo, dest=None, **opts):
4397 4397 """show changesets not found in the destination
4398 4398
4399 4399 Show changesets not found in the specified destination repository
4400 4400 or the default push location. These are the changesets that would
4401 4401 be pushed if a push was requested.
4402 4402
4403 4403 See pull for details of valid destination formats.
4404 4404
4405 4405 Returns 0 if there are outgoing changes, 1 otherwise.
4406 4406 """
4407 4407 if opts.get('graph'):
4408 4408 cmdutil.checkunsupportedgraphflags([], opts)
4409 4409 o, other = hg._outgoing(ui, repo, dest, opts)
4410 4410 if not o:
4411 4411 cmdutil.outgoinghooks(ui, repo, other, opts, o)
4412 4412 return
4413 4413
4414 4414 revdag = cmdutil.graphrevs(repo, o, opts)
4415 4415 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
4416 4416 showparents = [ctx.node() for ctx in repo[None].parents()]
4417 4417 cmdutil.displaygraph(ui, revdag, displayer, showparents,
4418 4418 graphmod.asciiedges)
4419 4419 cmdutil.outgoinghooks(ui, repo, other, opts, o)
4420 4420 return 0
4421 4421
4422 4422 if opts.get('bookmarks'):
4423 4423 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4424 4424 dest, branches = hg.parseurl(dest, opts.get('branch'))
4425 4425 other = hg.peer(repo, opts, dest)
4426 4426 if 'bookmarks' not in other.listkeys('namespaces'):
4427 4427 ui.warn(_("remote doesn't support bookmarks\n"))
4428 4428 return 0
4429 4429 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
4430 4430 return bookmarks.diff(ui, other, repo)
4431 4431
4432 4432 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
4433 4433 try:
4434 4434 return hg.outgoing(ui, repo, dest, opts)
4435 4435 finally:
4436 4436 del repo._subtoppath
4437 4437
4438 4438 @command('parents',
4439 4439 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
4440 4440 ] + templateopts,
4441 4441 _('[-r REV] [FILE]'),
4442 4442 inferrepo=True)
4443 4443 def parents(ui, repo, file_=None, **opts):
4444 4444 """show the parents of the working directory or revision
4445 4445
4446 4446 Print the working directory's parent revisions. If a revision is
4447 4447 given via -r/--rev, the parent of that revision will be printed.
4448 4448 If a file argument is given, the revision in which the file was
4449 4449 last changed (before the working directory revision or the
4450 4450 argument to --rev if given) is printed.
4451 4451
4452 4452 Returns 0 on success.
4453 4453 """
4454 4454
4455 4455 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
4456 4456
4457 4457 if file_:
4458 4458 m = scmutil.match(ctx, (file_,), opts)
4459 4459 if m.anypats() or len(m.files()) != 1:
4460 4460 raise util.Abort(_('can only specify an explicit filename'))
4461 4461 file_ = m.files()[0]
4462 4462 filenodes = []
4463 4463 for cp in ctx.parents():
4464 4464 if not cp:
4465 4465 continue
4466 4466 try:
4467 4467 filenodes.append(cp.filenode(file_))
4468 4468 except error.LookupError:
4469 4469 pass
4470 4470 if not filenodes:
4471 4471 raise util.Abort(_("'%s' not found in manifest!") % file_)
4472 4472 p = []
4473 4473 for fn in filenodes:
4474 4474 fctx = repo.filectx(file_, fileid=fn)
4475 4475 p.append(fctx.node())
4476 4476 else:
4477 4477 p = [cp.node() for cp in ctx.parents()]
4478 4478
4479 4479 displayer = cmdutil.show_changeset(ui, repo, opts)
4480 4480 for n in p:
4481 4481 if n != nullid:
4482 4482 displayer.show(repo[n])
4483 4483 displayer.close()
4484 4484
4485 4485 @command('paths', [], _('[NAME]'), optionalrepo=True)
4486 4486 def paths(ui, repo, search=None):
4487 4487 """show aliases for remote repositories
4488 4488
4489 4489 Show definition of symbolic path name NAME. If no name is given,
4490 4490 show definition of all available names.
4491 4491
4492 4492 Option -q/--quiet suppresses all output when searching for NAME
4493 4493 and shows only the path names when listing all definitions.
4494 4494
4495 4495 Path names are defined in the [paths] section of your
4496 4496 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
4497 4497 repository, ``.hg/hgrc`` is used, too.
4498 4498
4499 4499 The path names ``default`` and ``default-push`` have a special
4500 4500 meaning. When performing a push or pull operation, they are used
4501 4501 as fallbacks if no location is specified on the command-line.
4502 4502 When ``default-push`` is set, it will be used for push and
4503 4503 ``default`` will be used for pull; otherwise ``default`` is used
4504 4504 as the fallback for both. When cloning a repository, the clone
4505 4505 source is written as ``default`` in ``.hg/hgrc``. Note that
4506 4506 ``default`` and ``default-push`` apply to all inbound (e.g.
4507 4507 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
4508 4508 :hg:`bundle`) operations.
4509 4509
4510 4510 See :hg:`help urls` for more information.
4511 4511
4512 4512 Returns 0 on success.
4513 4513 """
4514 4514 if search:
4515 4515 for name, path in ui.configitems("paths"):
4516 4516 if name == search:
4517 4517 ui.status("%s\n" % util.hidepassword(path))
4518 4518 return
4519 4519 if not ui.quiet:
4520 4520 ui.warn(_("not found!\n"))
4521 4521 return 1
4522 4522 else:
4523 4523 for name, path in ui.configitems("paths"):
4524 4524 if ui.quiet:
4525 4525 ui.write("%s\n" % name)
4526 4526 else:
4527 4527 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
4528 4528
4529 4529 @command('phase',
4530 4530 [('p', 'public', False, _('set changeset phase to public')),
4531 4531 ('d', 'draft', False, _('set changeset phase to draft')),
4532 4532 ('s', 'secret', False, _('set changeset phase to secret')),
4533 4533 ('f', 'force', False, _('allow to move boundary backward')),
4534 4534 ('r', 'rev', [], _('target revision'), _('REV')),
4535 4535 ],
4536 4536 _('[-p|-d|-s] [-f] [-r] REV...'))
4537 4537 def phase(ui, repo, *revs, **opts):
4538 4538 """set or show the current phase name
4539 4539
4540 4540 With no argument, show the phase name of specified revisions.
4541 4541
4542 4542 With one of -p/--public, -d/--draft or -s/--secret, change the
4543 4543 phase value of the specified revisions.
4544 4544
4545 4545 Unless -f/--force is specified, :hg:`phase` won't move changeset from a
4546 4546 lower phase to an higher phase. Phases are ordered as follows::
4547 4547
4548 4548 public < draft < secret
4549 4549
4550 4550 Returns 0 on success, 1 if no phases were changed or some could not
4551 4551 be changed.
4552 4552 """
4553 4553 # search for a unique phase argument
4554 4554 targetphase = None
4555 4555 for idx, name in enumerate(phases.phasenames):
4556 4556 if opts[name]:
4557 4557 if targetphase is not None:
4558 4558 raise util.Abort(_('only one phase can be specified'))
4559 4559 targetphase = idx
4560 4560
4561 4561 # look for specified revision
4562 4562 revs = list(revs)
4563 4563 revs.extend(opts['rev'])
4564 4564 if not revs:
4565 4565 raise util.Abort(_('no revisions specified'))
4566 4566
4567 4567 revs = scmutil.revrange(repo, revs)
4568 4568
4569 4569 lock = None
4570 4570 ret = 0
4571 4571 if targetphase is None:
4572 4572 # display
4573 4573 for r in revs:
4574 4574 ctx = repo[r]
4575 4575 ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
4576 4576 else:
4577 4577 tr = None
4578 4578 lock = repo.lock()
4579 4579 try:
4580 4580 tr = repo.transaction("phase")
4581 4581 # set phase
4582 4582 if not revs:
4583 4583 raise util.Abort(_('empty revision set'))
4584 4584 nodes = [repo[r].node() for r in revs]
4585 4585 olddata = repo._phasecache.getphaserevs(repo)[:]
4586 phases.advanceboundary(repo, targetphase, nodes)
4586 phases.advanceboundary(repo, tr, targetphase, nodes)
4587 4587 if opts['force']:
4588 4588 phases.retractboundary(repo, targetphase, nodes)
4589 4589 tr.close()
4590 4590 finally:
4591 4591 if tr is not None:
4592 4592 tr.release()
4593 4593 lock.release()
4594 4594 # moving revision from public to draft may hide them
4595 4595 # We have to check result on an unfiltered repository
4596 4596 unfi = repo.unfiltered()
4597 4597 newdata = repo._phasecache.getphaserevs(unfi)
4598 4598 changes = sum(o != newdata[i] for i, o in enumerate(olddata))
4599 4599 cl = unfi.changelog
4600 4600 rejected = [n for n in nodes
4601 4601 if newdata[cl.rev(n)] < targetphase]
4602 4602 if rejected:
4603 4603 ui.warn(_('cannot move %i changesets to a higher '
4604 4604 'phase, use --force\n') % len(rejected))
4605 4605 ret = 1
4606 4606 if changes:
4607 4607 msg = _('phase changed for %i changesets\n') % changes
4608 4608 if ret:
4609 4609 ui.status(msg)
4610 4610 else:
4611 4611 ui.note(msg)
4612 4612 else:
4613 4613 ui.warn(_('no phases changed\n'))
4614 4614 ret = 1
4615 4615 return ret
4616 4616
4617 4617 def postincoming(ui, repo, modheads, optupdate, checkout):
4618 4618 if modheads == 0:
4619 4619 return
4620 4620 if optupdate:
4621 4621 checkout, movemarkfrom = bookmarks.calculateupdate(ui, repo, checkout)
4622 4622 try:
4623 4623 ret = hg.update(repo, checkout)
4624 4624 except util.Abort, inst:
4625 4625 ui.warn(_("not updating: %s\n") % str(inst))
4626 4626 if inst.hint:
4627 4627 ui.warn(_("(%s)\n") % inst.hint)
4628 4628 return 0
4629 4629 if not ret and not checkout:
4630 4630 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
4631 4631 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
4632 4632 return ret
4633 4633 if modheads > 1:
4634 4634 currentbranchheads = len(repo.branchheads())
4635 4635 if currentbranchheads == modheads:
4636 4636 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
4637 4637 elif currentbranchheads > 1:
4638 4638 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
4639 4639 "merge)\n"))
4640 4640 else:
4641 4641 ui.status(_("(run 'hg heads' to see heads)\n"))
4642 4642 else:
4643 4643 ui.status(_("(run 'hg update' to get a working copy)\n"))
4644 4644
4645 4645 @command('^pull',
4646 4646 [('u', 'update', None,
4647 4647 _('update to new branch head if changesets were pulled')),
4648 4648 ('f', 'force', None, _('run even when remote repository is unrelated')),
4649 4649 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
4650 4650 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
4651 4651 ('b', 'branch', [], _('a specific branch you would like to pull'),
4652 4652 _('BRANCH')),
4653 4653 ] + remoteopts,
4654 4654 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
4655 4655 def pull(ui, repo, source="default", **opts):
4656 4656 """pull changes from the specified source
4657 4657
4658 4658 Pull changes from a remote repository to a local one.
4659 4659
4660 4660 This finds all changes from the repository at the specified path
4661 4661 or URL and adds them to a local repository (the current one unless
4662 4662 -R is specified). By default, this does not update the copy of the
4663 4663 project in the working directory.
4664 4664
4665 4665 Use :hg:`incoming` if you want to see what would have been added
4666 4666 by a pull at the time you issued this command. If you then decide
4667 4667 to add those changes to the repository, you should use :hg:`pull
4668 4668 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
4669 4669
4670 4670 If SOURCE is omitted, the 'default' path will be used.
4671 4671 See :hg:`help urls` for more information.
4672 4672
4673 4673 Returns 0 on success, 1 if an update had unresolved files.
4674 4674 """
4675 4675 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
4676 4676 other = hg.peer(repo, opts, source)
4677 4677 try:
4678 4678 ui.status(_('pulling from %s\n') % util.hidepassword(source))
4679 4679 revs, checkout = hg.addbranchrevs(repo, other, branches,
4680 4680 opts.get('rev'))
4681 4681
4682 4682 remotebookmarks = other.listkeys('bookmarks')
4683 4683
4684 4684 if opts.get('bookmark'):
4685 4685 if not revs:
4686 4686 revs = []
4687 4687 for b in opts['bookmark']:
4688 4688 if b not in remotebookmarks:
4689 4689 raise util.Abort(_('remote bookmark %s not found!') % b)
4690 4690 revs.append(remotebookmarks[b])
4691 4691
4692 4692 if revs:
4693 4693 try:
4694 4694 revs = [other.lookup(rev) for rev in revs]
4695 4695 except error.CapabilityError:
4696 4696 err = _("other repository doesn't support revision lookup, "
4697 4697 "so a rev cannot be specified.")
4698 4698 raise util.Abort(err)
4699 4699
4700 4700 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
4701 4701 bookmarks.updatefromremote(ui, repo, remotebookmarks, source)
4702 4702 if checkout:
4703 4703 checkout = str(repo.changelog.rev(other.lookup(checkout)))
4704 4704 repo._subtoppath = source
4705 4705 try:
4706 4706 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
4707 4707
4708 4708 finally:
4709 4709 del repo._subtoppath
4710 4710
4711 4711 # update specified bookmarks
4712 4712 if opts.get('bookmark'):
4713 4713 marks = repo._bookmarks
4714 4714 for b in opts['bookmark']:
4715 4715 # explicit pull overrides local bookmark if any
4716 4716 ui.status(_("importing bookmark %s\n") % b)
4717 4717 marks[b] = repo[remotebookmarks[b]].node()
4718 4718 marks.write()
4719 4719 finally:
4720 4720 other.close()
4721 4721 return ret
4722 4722
4723 4723 @command('^push',
4724 4724 [('f', 'force', None, _('force push')),
4725 4725 ('r', 'rev', [],
4726 4726 _('a changeset intended to be included in the destination'),
4727 4727 _('REV')),
4728 4728 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
4729 4729 ('b', 'branch', [],
4730 4730 _('a specific branch you would like to push'), _('BRANCH')),
4731 4731 ('', 'new-branch', False, _('allow pushing a new branch')),
4732 4732 ] + remoteopts,
4733 4733 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
4734 4734 def push(ui, repo, dest=None, **opts):
4735 4735 """push changes to the specified destination
4736 4736
4737 4737 Push changesets from the local repository to the specified
4738 4738 destination.
4739 4739
4740 4740 This operation is symmetrical to pull: it is identical to a pull
4741 4741 in the destination repository from the current one.
4742 4742
4743 4743 By default, push will not allow creation of new heads at the
4744 4744 destination, since multiple heads would make it unclear which head
4745 4745 to use. In this situation, it is recommended to pull and merge
4746 4746 before pushing.
4747 4747
4748 4748 Use --new-branch if you want to allow push to create a new named
4749 4749 branch that is not present at the destination. This allows you to
4750 4750 only create a new branch without forcing other changes.
4751 4751
4752 4752 .. note::
4753 4753
4754 4754 Extra care should be taken with the -f/--force option,
4755 4755 which will push all new heads on all branches, an action which will
4756 4756 almost always cause confusion for collaborators.
4757 4757
4758 4758 If -r/--rev is used, the specified revision and all its ancestors
4759 4759 will be pushed to the remote repository.
4760 4760
4761 4761 If -B/--bookmark is used, the specified bookmarked revision, its
4762 4762 ancestors, and the bookmark will be pushed to the remote
4763 4763 repository.
4764 4764
4765 4765 Please see :hg:`help urls` for important details about ``ssh://``
4766 4766 URLs. If DESTINATION is omitted, a default path will be used.
4767 4767
4768 4768 Returns 0 if push was successful, 1 if nothing to push.
4769 4769 """
4770 4770
4771 4771 if opts.get('bookmark'):
4772 4772 ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
4773 4773 for b in opts['bookmark']:
4774 4774 # translate -B options to -r so changesets get pushed
4775 4775 if b in repo._bookmarks:
4776 4776 opts.setdefault('rev', []).append(b)
4777 4777 else:
4778 4778 # if we try to push a deleted bookmark, translate it to null
4779 4779 # this lets simultaneous -r, -b options continue working
4780 4780 opts.setdefault('rev', []).append("null")
4781 4781
4782 4782 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4783 4783 dest, branches = hg.parseurl(dest, opts.get('branch'))
4784 4784 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
4785 4785 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
4786 4786 try:
4787 4787 other = hg.peer(repo, opts, dest)
4788 4788 except error.RepoError:
4789 4789 if dest == "default-push":
4790 4790 raise util.Abort(_("default repository not configured!"),
4791 4791 hint=_('see the "path" section in "hg help config"'))
4792 4792 else:
4793 4793 raise
4794 4794
4795 4795 if revs:
4796 4796 revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
4797 4797
4798 4798 repo._subtoppath = dest
4799 4799 try:
4800 4800 # push subrepos depth-first for coherent ordering
4801 4801 c = repo['']
4802 4802 subs = c.substate # only repos that are committed
4803 4803 for s in sorted(subs):
4804 4804 result = c.sub(s).push(opts)
4805 4805 if result == 0:
4806 4806 return not result
4807 4807 finally:
4808 4808 del repo._subtoppath
4809 4809 result = repo.push(other, opts.get('force'), revs=revs,
4810 4810 newbranch=opts.get('new_branch'))
4811 4811
4812 4812 result = not result
4813 4813
4814 4814 if opts.get('bookmark'):
4815 4815 bresult = bookmarks.pushtoremote(ui, repo, other, opts['bookmark'])
4816 4816 if bresult == 2:
4817 4817 return 2
4818 4818 if not result and bresult:
4819 4819 result = 2
4820 4820
4821 4821 return result
4822 4822
4823 4823 @command('recover', [])
4824 4824 def recover(ui, repo):
4825 4825 """roll back an interrupted transaction
4826 4826
4827 4827 Recover from an interrupted commit or pull.
4828 4828
4829 4829 This command tries to fix the repository status after an
4830 4830 interrupted operation. It should only be necessary when Mercurial
4831 4831 suggests it.
4832 4832
4833 4833 Returns 0 if successful, 1 if nothing to recover or verify fails.
4834 4834 """
4835 4835 if repo.recover():
4836 4836 return hg.verify(repo)
4837 4837 return 1
4838 4838
4839 4839 @command('^remove|rm',
4840 4840 [('A', 'after', None, _('record delete for missing files')),
4841 4841 ('f', 'force', None,
4842 4842 _('remove (and delete) file even if added or modified')),
4843 4843 ] + walkopts,
4844 4844 _('[OPTION]... FILE...'),
4845 4845 inferrepo=True)
4846 4846 def remove(ui, repo, *pats, **opts):
4847 4847 """remove the specified files on the next commit
4848 4848
4849 4849 Schedule the indicated files for removal from the current branch.
4850 4850
4851 4851 This command schedules the files to be removed at the next commit.
4852 4852 To undo a remove before that, see :hg:`revert`. To undo added
4853 4853 files, see :hg:`forget`.
4854 4854
4855 4855 .. container:: verbose
4856 4856
4857 4857 -A/--after can be used to remove only files that have already
4858 4858 been deleted, -f/--force can be used to force deletion, and -Af
4859 4859 can be used to remove files from the next revision without
4860 4860 deleting them from the working directory.
4861 4861
4862 4862 The following table details the behavior of remove for different
4863 4863 file states (columns) and option combinations (rows). The file
4864 4864 states are Added [A], Clean [C], Modified [M] and Missing [!]
4865 4865 (as reported by :hg:`status`). The actions are Warn, Remove
4866 4866 (from branch) and Delete (from disk):
4867 4867
4868 4868 ========= == == == ==
4869 4869 opt/state A C M !
4870 4870 ========= == == == ==
4871 4871 none W RD W R
4872 4872 -f R RD RD R
4873 4873 -A W W W R
4874 4874 -Af R R R R
4875 4875 ========= == == == ==
4876 4876
4877 4877 Note that remove never deletes files in Added [A] state from the
4878 4878 working directory, not even if option --force is specified.
4879 4879
4880 4880 Returns 0 on success, 1 if any warnings encountered.
4881 4881 """
4882 4882
4883 4883 ret = 0
4884 4884 after, force = opts.get('after'), opts.get('force')
4885 4885 if not pats and not after:
4886 4886 raise util.Abort(_('no files specified'))
4887 4887
4888 4888 m = scmutil.match(repo[None], pats, opts)
4889 4889 s = repo.status(match=m, clean=True)
4890 4890 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
4891 4891
4892 4892 # warn about failure to delete explicit files/dirs
4893 4893 wctx = repo[None]
4894 4894 for f in m.files():
4895 4895 if f in repo.dirstate or f in wctx.dirs():
4896 4896 continue
4897 4897 if os.path.exists(m.rel(f)):
4898 4898 if os.path.isdir(m.rel(f)):
4899 4899 ui.warn(_('not removing %s: no tracked files\n') % m.rel(f))
4900 4900 else:
4901 4901 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
4902 4902 # missing files will generate a warning elsewhere
4903 4903 ret = 1
4904 4904
4905 4905 if force:
4906 4906 list = modified + deleted + clean + added
4907 4907 elif after:
4908 4908 list = deleted
4909 4909 for f in modified + added + clean:
4910 4910 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
4911 4911 ret = 1
4912 4912 else:
4913 4913 list = deleted + clean
4914 4914 for f in modified:
4915 4915 ui.warn(_('not removing %s: file is modified (use -f'
4916 4916 ' to force removal)\n') % m.rel(f))
4917 4917 ret = 1
4918 4918 for f in added:
4919 4919 ui.warn(_('not removing %s: file has been marked for add'
4920 4920 ' (use forget to undo)\n') % m.rel(f))
4921 4921 ret = 1
4922 4922
4923 4923 for f in sorted(list):
4924 4924 if ui.verbose or not m.exact(f):
4925 4925 ui.status(_('removing %s\n') % m.rel(f))
4926 4926
4927 4927 wlock = repo.wlock()
4928 4928 try:
4929 4929 if not after:
4930 4930 for f in list:
4931 4931 if f in added:
4932 4932 continue # we never unlink added files on remove
4933 4933 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
4934 4934 repo[None].forget(list)
4935 4935 finally:
4936 4936 wlock.release()
4937 4937
4938 4938 return ret
4939 4939
4940 4940 @command('rename|move|mv',
4941 4941 [('A', 'after', None, _('record a rename that has already occurred')),
4942 4942 ('f', 'force', None, _('forcibly copy over an existing managed file')),
4943 4943 ] + walkopts + dryrunopts,
4944 4944 _('[OPTION]... SOURCE... DEST'))
4945 4945 def rename(ui, repo, *pats, **opts):
4946 4946 """rename files; equivalent of copy + remove
4947 4947
4948 4948 Mark dest as copies of sources; mark sources for deletion. If dest
4949 4949 is a directory, copies are put in that directory. If dest is a
4950 4950 file, there can only be one source.
4951 4951
4952 4952 By default, this command copies the contents of files as they
4953 4953 exist in the working directory. If invoked with -A/--after, the
4954 4954 operation is recorded, but no copying is performed.
4955 4955
4956 4956 This command takes effect at the next commit. To undo a rename
4957 4957 before that, see :hg:`revert`.
4958 4958
4959 4959 Returns 0 on success, 1 if errors are encountered.
4960 4960 """
4961 4961 wlock = repo.wlock(False)
4962 4962 try:
4963 4963 return cmdutil.copy(ui, repo, pats, opts, rename=True)
4964 4964 finally:
4965 4965 wlock.release()
4966 4966
4967 4967 @command('resolve',
4968 4968 [('a', 'all', None, _('select all unresolved files')),
4969 4969 ('l', 'list', None, _('list state of files needing merge')),
4970 4970 ('m', 'mark', None, _('mark files as resolved')),
4971 4971 ('u', 'unmark', None, _('mark files as unresolved')),
4972 4972 ('n', 'no-status', None, _('hide status prefix'))]
4973 4973 + mergetoolopts + walkopts,
4974 4974 _('[OPTION]... [FILE]...'),
4975 4975 inferrepo=True)
4976 4976 def resolve(ui, repo, *pats, **opts):
4977 4977 """redo merges or set/view the merge status of files
4978 4978
4979 4979 Merges with unresolved conflicts are often the result of
4980 4980 non-interactive merging using the ``internal:merge`` configuration
4981 4981 setting, or a command-line merge tool like ``diff3``. The resolve
4982 4982 command is used to manage the files involved in a merge, after
4983 4983 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
4984 4984 working directory must have two parents). See :hg:`help
4985 4985 merge-tools` for information on configuring merge tools.
4986 4986
4987 4987 The resolve command can be used in the following ways:
4988 4988
4989 4989 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4990 4990 files, discarding any previous merge attempts. Re-merging is not
4991 4991 performed for files already marked as resolved. Use ``--all/-a``
4992 4992 to select all unresolved files. ``--tool`` can be used to specify
4993 4993 the merge tool used for the given files. It overrides the HGMERGE
4994 4994 environment variable and your configuration files. Previous file
4995 4995 contents are saved with a ``.orig`` suffix.
4996 4996
4997 4997 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4998 4998 (e.g. after having manually fixed-up the files). The default is
4999 4999 to mark all unresolved files.
5000 5000
5001 5001 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
5002 5002 default is to mark all resolved files.
5003 5003
5004 5004 - :hg:`resolve -l`: list files which had or still have conflicts.
5005 5005 In the printed list, ``U`` = unresolved and ``R`` = resolved.
5006 5006
5007 5007 Note that Mercurial will not let you commit files with unresolved
5008 5008 merge conflicts. You must use :hg:`resolve -m ...` before you can
5009 5009 commit after a conflicting merge.
5010 5010
5011 5011 Returns 0 on success, 1 if any files fail a resolve attempt.
5012 5012 """
5013 5013
5014 5014 all, mark, unmark, show, nostatus = \
5015 5015 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
5016 5016
5017 5017 if (show and (mark or unmark)) or (mark and unmark):
5018 5018 raise util.Abort(_("too many options specified"))
5019 5019 if pats and all:
5020 5020 raise util.Abort(_("can't specify --all and patterns"))
5021 5021 if not (all or pats or show or mark or unmark):
5022 5022 raise util.Abort(_('no files or directories specified'),
5023 5023 hint=('use --all to remerge all files'))
5024 5024
5025 5025 wlock = repo.wlock()
5026 5026 try:
5027 5027 ms = mergemod.mergestate(repo)
5028 5028
5029 5029 if not ms.active() and not show:
5030 5030 raise util.Abort(
5031 5031 _('resolve command not applicable when not merging'))
5032 5032
5033 5033 m = scmutil.match(repo[None], pats, opts)
5034 5034 ret = 0
5035 5035 didwork = False
5036 5036
5037 5037 for f in ms:
5038 5038 if not m(f):
5039 5039 continue
5040 5040
5041 5041 didwork = True
5042 5042
5043 5043 if show:
5044 5044 if nostatus:
5045 5045 ui.write("%s\n" % f)
5046 5046 else:
5047 5047 ui.write("%s %s\n" % (ms[f].upper(), f),
5048 5048 label='resolve.' +
5049 5049 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
5050 5050 elif mark:
5051 5051 ms.mark(f, "r")
5052 5052 elif unmark:
5053 5053 ms.mark(f, "u")
5054 5054 else:
5055 5055 wctx = repo[None]
5056 5056
5057 5057 # backup pre-resolve (merge uses .orig for its own purposes)
5058 5058 a = repo.wjoin(f)
5059 5059 util.copyfile(a, a + ".resolve")
5060 5060
5061 5061 try:
5062 5062 # resolve file
5063 5063 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
5064 5064 'resolve')
5065 5065 if ms.resolve(f, wctx):
5066 5066 ret = 1
5067 5067 finally:
5068 5068 ui.setconfig('ui', 'forcemerge', '', 'resolve')
5069 5069 ms.commit()
5070 5070
5071 5071 # replace filemerge's .orig file with our resolve file
5072 5072 util.rename(a + ".resolve", a + ".orig")
5073 5073
5074 5074 ms.commit()
5075 5075
5076 5076 if not didwork and pats:
5077 5077 ui.warn(_("arguments do not match paths that need resolving\n"))
5078 5078
5079 5079 finally:
5080 5080 wlock.release()
5081 5081
5082 5082 # Nudge users into finishing an unfinished operation. We don't print
5083 5083 # this with the list/show operation because we want list/show to remain
5084 5084 # machine readable.
5085 5085 if not list(ms.unresolved()) and not show:
5086 5086 ui.status(_('(no more unresolved files)\n'))
5087 5087
5088 5088 return ret
5089 5089
5090 5090 @command('revert',
5091 5091 [('a', 'all', None, _('revert all changes when no arguments given')),
5092 5092 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5093 5093 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
5094 5094 ('C', 'no-backup', None, _('do not save backup copies of files')),
5095 5095 ] + walkopts + dryrunopts,
5096 5096 _('[OPTION]... [-r REV] [NAME]...'))
5097 5097 def revert(ui, repo, *pats, **opts):
5098 5098 """restore files to their checkout state
5099 5099
5100 5100 .. note::
5101 5101
5102 5102 To check out earlier revisions, you should use :hg:`update REV`.
5103 5103 To cancel an uncommitted merge (and lose your changes),
5104 5104 use :hg:`update --clean .`.
5105 5105
5106 5106 With no revision specified, revert the specified files or directories
5107 5107 to the contents they had in the parent of the working directory.
5108 5108 This restores the contents of files to an unmodified
5109 5109 state and unschedules adds, removes, copies, and renames. If the
5110 5110 working directory has two parents, you must explicitly specify a
5111 5111 revision.
5112 5112
5113 5113 Using the -r/--rev or -d/--date options, revert the given files or
5114 5114 directories to their states as of a specific revision. Because
5115 5115 revert does not change the working directory parents, this will
5116 5116 cause these files to appear modified. This can be helpful to "back
5117 5117 out" some or all of an earlier change. See :hg:`backout` for a
5118 5118 related method.
5119 5119
5120 5120 Modified files are saved with a .orig suffix before reverting.
5121 5121 To disable these backups, use --no-backup.
5122 5122
5123 5123 See :hg:`help dates` for a list of formats valid for -d/--date.
5124 5124
5125 5125 Returns 0 on success.
5126 5126 """
5127 5127
5128 5128 if opts.get("date"):
5129 5129 if opts.get("rev"):
5130 5130 raise util.Abort(_("you can't specify a revision and a date"))
5131 5131 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
5132 5132
5133 5133 parent, p2 = repo.dirstate.parents()
5134 5134 if not opts.get('rev') and p2 != nullid:
5135 5135 # revert after merge is a trap for new users (issue2915)
5136 5136 raise util.Abort(_('uncommitted merge with no revision specified'),
5137 5137 hint=_('use "hg update" or see "hg help revert"'))
5138 5138
5139 5139 ctx = scmutil.revsingle(repo, opts.get('rev'))
5140 5140
5141 5141 if not pats and not opts.get('all'):
5142 5142 msg = _("no files or directories specified")
5143 5143 if p2 != nullid:
5144 5144 hint = _("uncommitted merge, use --all to discard all changes,"
5145 5145 " or 'hg update -C .' to abort the merge")
5146 5146 raise util.Abort(msg, hint=hint)
5147 5147 dirty = util.any(repo.status())
5148 5148 node = ctx.node()
5149 5149 if node != parent:
5150 5150 if dirty:
5151 5151 hint = _("uncommitted changes, use --all to discard all"
5152 5152 " changes, or 'hg update %s' to update") % ctx.rev()
5153 5153 else:
5154 5154 hint = _("use --all to revert all files,"
5155 5155 " or 'hg update %s' to update") % ctx.rev()
5156 5156 elif dirty:
5157 5157 hint = _("uncommitted changes, use --all to discard all changes")
5158 5158 else:
5159 5159 hint = _("use --all to revert all files")
5160 5160 raise util.Abort(msg, hint=hint)
5161 5161
5162 5162 return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
5163 5163
5164 5164 @command('rollback', dryrunopts +
5165 5165 [('f', 'force', False, _('ignore safety measures'))])
5166 5166 def rollback(ui, repo, **opts):
5167 5167 """roll back the last transaction (DANGEROUS) (DEPRECATED)
5168 5168
5169 5169 Please use :hg:`commit --amend` instead of rollback to correct
5170 5170 mistakes in the last commit.
5171 5171
5172 5172 This command should be used with care. There is only one level of
5173 5173 rollback, and there is no way to undo a rollback. It will also
5174 5174 restore the dirstate at the time of the last transaction, losing
5175 5175 any dirstate changes since that time. This command does not alter
5176 5176 the working directory.
5177 5177
5178 5178 Transactions are used to encapsulate the effects of all commands
5179 5179 that create new changesets or propagate existing changesets into a
5180 5180 repository.
5181 5181
5182 5182 .. container:: verbose
5183 5183
5184 5184 For example, the following commands are transactional, and their
5185 5185 effects can be rolled back:
5186 5186
5187 5187 - commit
5188 5188 - import
5189 5189 - pull
5190 5190 - push (with this repository as the destination)
5191 5191 - unbundle
5192 5192
5193 5193 To avoid permanent data loss, rollback will refuse to rollback a
5194 5194 commit transaction if it isn't checked out. Use --force to
5195 5195 override this protection.
5196 5196
5197 5197 This command is not intended for use on public repositories. Once
5198 5198 changes are visible for pull by other users, rolling a transaction
5199 5199 back locally is ineffective (someone else may already have pulled
5200 5200 the changes). Furthermore, a race is possible with readers of the
5201 5201 repository; for example an in-progress pull from the repository
5202 5202 may fail if a rollback is performed.
5203 5203
5204 5204 Returns 0 on success, 1 if no rollback data is available.
5205 5205 """
5206 5206 return repo.rollback(dryrun=opts.get('dry_run'),
5207 5207 force=opts.get('force'))
5208 5208
5209 5209 @command('root', [])
5210 5210 def root(ui, repo):
5211 5211 """print the root (top) of the current working directory
5212 5212
5213 5213 Print the root directory of the current repository.
5214 5214
5215 5215 Returns 0 on success.
5216 5216 """
5217 5217 ui.write(repo.root + "\n")
5218 5218
5219 5219 @command('^serve',
5220 5220 [('A', 'accesslog', '', _('name of access log file to write to'),
5221 5221 _('FILE')),
5222 5222 ('d', 'daemon', None, _('run server in background')),
5223 5223 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
5224 5224 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
5225 5225 # use string type, then we can check if something was passed
5226 5226 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
5227 5227 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
5228 5228 _('ADDR')),
5229 5229 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
5230 5230 _('PREFIX')),
5231 5231 ('n', 'name', '',
5232 5232 _('name to show in web pages (default: working directory)'), _('NAME')),
5233 5233 ('', 'web-conf', '',
5234 5234 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
5235 5235 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
5236 5236 _('FILE')),
5237 5237 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
5238 5238 ('', 'stdio', None, _('for remote clients')),
5239 5239 ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
5240 5240 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
5241 5241 ('', 'style', '', _('template style to use'), _('STYLE')),
5242 5242 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
5243 5243 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
5244 5244 _('[OPTION]...'),
5245 5245 optionalrepo=True)
5246 5246 def serve(ui, repo, **opts):
5247 5247 """start stand-alone webserver
5248 5248
5249 5249 Start a local HTTP repository browser and pull server. You can use
5250 5250 this for ad-hoc sharing and browsing of repositories. It is
5251 5251 recommended to use a real web server to serve a repository for
5252 5252 longer periods of time.
5253 5253
5254 5254 Please note that the server does not implement access control.
5255 5255 This means that, by default, anybody can read from the server and
5256 5256 nobody can write to it by default. Set the ``web.allow_push``
5257 5257 option to ``*`` to allow everybody to push to the server. You
5258 5258 should use a real web server if you need to authenticate users.
5259 5259
5260 5260 By default, the server logs accesses to stdout and errors to
5261 5261 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
5262 5262 files.
5263 5263
5264 5264 To have the server choose a free port number to listen on, specify
5265 5265 a port number of 0; in this case, the server will print the port
5266 5266 number it uses.
5267 5267
5268 5268 Returns 0 on success.
5269 5269 """
5270 5270
5271 5271 if opts["stdio"] and opts["cmdserver"]:
5272 5272 raise util.Abort(_("cannot use --stdio with --cmdserver"))
5273 5273
5274 5274 if opts["stdio"]:
5275 5275 if repo is None:
5276 5276 raise error.RepoError(_("there is no Mercurial repository here"
5277 5277 " (.hg not found)"))
5278 5278 s = sshserver.sshserver(ui, repo)
5279 5279 s.serve_forever()
5280 5280
5281 5281 if opts["cmdserver"]:
5282 5282 s = commandserver.server(ui, repo, opts["cmdserver"])
5283 5283 return s.serve()
5284 5284
5285 5285 # this way we can check if something was given in the command-line
5286 5286 if opts.get('port'):
5287 5287 opts['port'] = util.getport(opts.get('port'))
5288 5288
5289 5289 baseui = repo and repo.baseui or ui
5290 5290 optlist = ("name templates style address port prefix ipv6"
5291 5291 " accesslog errorlog certificate encoding")
5292 5292 for o in optlist.split():
5293 5293 val = opts.get(o, '')
5294 5294 if val in (None, ''): # should check against default options instead
5295 5295 continue
5296 5296 baseui.setconfig("web", o, val, 'serve')
5297 5297 if repo and repo.ui != baseui:
5298 5298 repo.ui.setconfig("web", o, val, 'serve')
5299 5299
5300 5300 o = opts.get('web_conf') or opts.get('webdir_conf')
5301 5301 if not o:
5302 5302 if not repo:
5303 5303 raise error.RepoError(_("there is no Mercurial repository"
5304 5304 " here (.hg not found)"))
5305 5305 o = repo
5306 5306
5307 5307 app = hgweb.hgweb(o, baseui=baseui)
5308 5308 service = httpservice(ui, app, opts)
5309 5309 cmdutil.service(opts, initfn=service.init, runfn=service.run)
5310 5310
5311 5311 class httpservice(object):
5312 5312 def __init__(self, ui, app, opts):
5313 5313 self.ui = ui
5314 5314 self.app = app
5315 5315 self.opts = opts
5316 5316
5317 5317 def init(self):
5318 5318 util.setsignalhandler()
5319 5319 self.httpd = hgweb_server.create_server(self.ui, self.app)
5320 5320
5321 5321 if self.opts['port'] and not self.ui.verbose:
5322 5322 return
5323 5323
5324 5324 if self.httpd.prefix:
5325 5325 prefix = self.httpd.prefix.strip('/') + '/'
5326 5326 else:
5327 5327 prefix = ''
5328 5328
5329 5329 port = ':%d' % self.httpd.port
5330 5330 if port == ':80':
5331 5331 port = ''
5332 5332
5333 5333 bindaddr = self.httpd.addr
5334 5334 if bindaddr == '0.0.0.0':
5335 5335 bindaddr = '*'
5336 5336 elif ':' in bindaddr: # IPv6
5337 5337 bindaddr = '[%s]' % bindaddr
5338 5338
5339 5339 fqaddr = self.httpd.fqaddr
5340 5340 if ':' in fqaddr:
5341 5341 fqaddr = '[%s]' % fqaddr
5342 5342 if self.opts['port']:
5343 5343 write = self.ui.status
5344 5344 else:
5345 5345 write = self.ui.write
5346 5346 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
5347 5347 (fqaddr, port, prefix, bindaddr, self.httpd.port))
5348 5348 self.ui.flush() # avoid buffering of status message
5349 5349
5350 5350 def run(self):
5351 5351 self.httpd.serve_forever()
5352 5352
5353 5353
5354 5354 @command('^status|st',
5355 5355 [('A', 'all', None, _('show status of all files')),
5356 5356 ('m', 'modified', None, _('show only modified files')),
5357 5357 ('a', 'added', None, _('show only added files')),
5358 5358 ('r', 'removed', None, _('show only removed files')),
5359 5359 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
5360 5360 ('c', 'clean', None, _('show only files without changes')),
5361 5361 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
5362 5362 ('i', 'ignored', None, _('show only ignored files')),
5363 5363 ('n', 'no-status', None, _('hide status prefix')),
5364 5364 ('C', 'copies', None, _('show source of copied files')),
5365 5365 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
5366 5366 ('', 'rev', [], _('show difference from revision'), _('REV')),
5367 5367 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
5368 5368 ] + walkopts + subrepoopts,
5369 5369 _('[OPTION]... [FILE]...'),
5370 5370 inferrepo=True)
5371 5371 def status(ui, repo, *pats, **opts):
5372 5372 """show changed files in the working directory
5373 5373
5374 5374 Show status of files in the repository. If names are given, only
5375 5375 files that match are shown. Files that are clean or ignored or
5376 5376 the source of a copy/move operation, are not listed unless
5377 5377 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
5378 5378 Unless options described with "show only ..." are given, the
5379 5379 options -mardu are used.
5380 5380
5381 5381 Option -q/--quiet hides untracked (unknown and ignored) files
5382 5382 unless explicitly requested with -u/--unknown or -i/--ignored.
5383 5383
5384 5384 .. note::
5385 5385
5386 5386 status may appear to disagree with diff if permissions have
5387 5387 changed or a merge has occurred. The standard diff format does
5388 5388 not report permission changes and diff only reports changes
5389 5389 relative to one merge parent.
5390 5390
5391 5391 If one revision is given, it is used as the base revision.
5392 5392 If two revisions are given, the differences between them are
5393 5393 shown. The --change option can also be used as a shortcut to list
5394 5394 the changed files of a revision from its first parent.
5395 5395
5396 5396 The codes used to show the status of files are::
5397 5397
5398 5398 M = modified
5399 5399 A = added
5400 5400 R = removed
5401 5401 C = clean
5402 5402 ! = missing (deleted by non-hg command, but still tracked)
5403 5403 ? = not tracked
5404 5404 I = ignored
5405 5405 = origin of the previous file (with --copies)
5406 5406
5407 5407 .. container:: verbose
5408 5408
5409 5409 Examples:
5410 5410
5411 5411 - show changes in the working directory relative to a
5412 5412 changeset::
5413 5413
5414 5414 hg status --rev 9353
5415 5415
5416 5416 - show all changes including copies in an existing changeset::
5417 5417
5418 5418 hg status --copies --change 9353
5419 5419
5420 5420 - get a NUL separated list of added files, suitable for xargs::
5421 5421
5422 5422 hg status -an0
5423 5423
5424 5424 Returns 0 on success.
5425 5425 """
5426 5426
5427 5427 revs = opts.get('rev')
5428 5428 change = opts.get('change')
5429 5429
5430 5430 if revs and change:
5431 5431 msg = _('cannot specify --rev and --change at the same time')
5432 5432 raise util.Abort(msg)
5433 5433 elif change:
5434 5434 node2 = scmutil.revsingle(repo, change, None).node()
5435 5435 node1 = repo[node2].p1().node()
5436 5436 else:
5437 5437 node1, node2 = scmutil.revpair(repo, revs)
5438 5438
5439 5439 cwd = (pats and repo.getcwd()) or ''
5440 5440 end = opts.get('print0') and '\0' or '\n'
5441 5441 copy = {}
5442 5442 states = 'modified added removed deleted unknown ignored clean'.split()
5443 5443 show = [k for k in states if opts.get(k)]
5444 5444 if opts.get('all'):
5445 5445 show += ui.quiet and (states[:4] + ['clean']) or states
5446 5446 if not show:
5447 5447 show = ui.quiet and states[:4] or states[:5]
5448 5448
5449 5449 stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts),
5450 5450 'ignored' in show, 'clean' in show, 'unknown' in show,
5451 5451 opts.get('subrepos'))
5452 5452 changestates = zip(states, 'MAR!?IC', stat)
5453 5453
5454 5454 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
5455 5455 copy = copies.pathcopies(repo[node1], repo[node2])
5456 5456
5457 5457 fm = ui.formatter('status', opts)
5458 5458 fmt = '%s' + end
5459 5459 showchar = not opts.get('no_status')
5460 5460
5461 5461 for state, char, files in changestates:
5462 5462 if state in show:
5463 5463 label = 'status.' + state
5464 5464 for f in files:
5465 5465 fm.startitem()
5466 5466 fm.condwrite(showchar, 'status', '%s ', char, label=label)
5467 5467 fm.write('path', fmt, repo.pathto(f, cwd), label=label)
5468 5468 if f in copy:
5469 5469 fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd),
5470 5470 label='status.copied')
5471 5471 fm.end()
5472 5472
5473 5473 @command('^summary|sum',
5474 5474 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
5475 5475 def summary(ui, repo, **opts):
5476 5476 """summarize working directory state
5477 5477
5478 5478 This generates a brief summary of the working directory state,
5479 5479 including parents, branch, commit status, and available updates.
5480 5480
5481 5481 With the --remote option, this will check the default paths for
5482 5482 incoming and outgoing changes. This can be time-consuming.
5483 5483
5484 5484 Returns 0 on success.
5485 5485 """
5486 5486
5487 5487 ctx = repo[None]
5488 5488 parents = ctx.parents()
5489 5489 pnode = parents[0].node()
5490 5490 marks = []
5491 5491
5492 5492 for p in parents:
5493 5493 # label with log.changeset (instead of log.parent) since this
5494 5494 # shows a working directory parent *changeset*:
5495 5495 # i18n: column positioning for "hg summary"
5496 5496 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
5497 5497 label='log.changeset changeset.%s' % p.phasestr())
5498 5498 ui.write(' '.join(p.tags()), label='log.tag')
5499 5499 if p.bookmarks():
5500 5500 marks.extend(p.bookmarks())
5501 5501 if p.rev() == -1:
5502 5502 if not len(repo):
5503 5503 ui.write(_(' (empty repository)'))
5504 5504 else:
5505 5505 ui.write(_(' (no revision checked out)'))
5506 5506 ui.write('\n')
5507 5507 if p.description():
5508 5508 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
5509 5509 label='log.summary')
5510 5510
5511 5511 branch = ctx.branch()
5512 5512 bheads = repo.branchheads(branch)
5513 5513 # i18n: column positioning for "hg summary"
5514 5514 m = _('branch: %s\n') % branch
5515 5515 if branch != 'default':
5516 5516 ui.write(m, label='log.branch')
5517 5517 else:
5518 5518 ui.status(m, label='log.branch')
5519 5519
5520 5520 if marks:
5521 5521 current = repo._bookmarkcurrent
5522 5522 # i18n: column positioning for "hg summary"
5523 5523 ui.write(_('bookmarks:'), label='log.bookmark')
5524 5524 if current is not None:
5525 5525 if current in marks:
5526 5526 ui.write(' *' + current, label='bookmarks.current')
5527 5527 marks.remove(current)
5528 5528 else:
5529 5529 ui.write(' [%s]' % current, label='bookmarks.current')
5530 5530 for m in marks:
5531 5531 ui.write(' ' + m, label='log.bookmark')
5532 5532 ui.write('\n', label='log.bookmark')
5533 5533
5534 5534 st = list(repo.status(unknown=True))[:6]
5535 5535
5536 5536 c = repo.dirstate.copies()
5537 5537 copied, renamed = [], []
5538 5538 for d, s in c.iteritems():
5539 5539 if s in st[2]:
5540 5540 st[2].remove(s)
5541 5541 renamed.append(d)
5542 5542 else:
5543 5543 copied.append(d)
5544 5544 if d in st[1]:
5545 5545 st[1].remove(d)
5546 5546 st.insert(3, renamed)
5547 5547 st.insert(4, copied)
5548 5548
5549 5549 ms = mergemod.mergestate(repo)
5550 5550 st.append([f for f in ms if ms[f] == 'u'])
5551 5551
5552 5552 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
5553 5553 st.append(subs)
5554 5554
5555 5555 labels = [ui.label(_('%d modified'), 'status.modified'),
5556 5556 ui.label(_('%d added'), 'status.added'),
5557 5557 ui.label(_('%d removed'), 'status.removed'),
5558 5558 ui.label(_('%d renamed'), 'status.copied'),
5559 5559 ui.label(_('%d copied'), 'status.copied'),
5560 5560 ui.label(_('%d deleted'), 'status.deleted'),
5561 5561 ui.label(_('%d unknown'), 'status.unknown'),
5562 5562 ui.label(_('%d ignored'), 'status.ignored'),
5563 5563 ui.label(_('%d unresolved'), 'resolve.unresolved'),
5564 5564 ui.label(_('%d subrepos'), 'status.modified')]
5565 5565 t = []
5566 5566 for s, l in zip(st, labels):
5567 5567 if s:
5568 5568 t.append(l % len(s))
5569 5569
5570 5570 t = ', '.join(t)
5571 5571 cleanworkdir = False
5572 5572
5573 5573 if repo.vfs.exists('updatestate'):
5574 5574 t += _(' (interrupted update)')
5575 5575 elif len(parents) > 1:
5576 5576 t += _(' (merge)')
5577 5577 elif branch != parents[0].branch():
5578 5578 t += _(' (new branch)')
5579 5579 elif (parents[0].closesbranch() and
5580 5580 pnode in repo.branchheads(branch, closed=True)):
5581 5581 t += _(' (head closed)')
5582 5582 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
5583 5583 t += _(' (clean)')
5584 5584 cleanworkdir = True
5585 5585 elif pnode not in bheads:
5586 5586 t += _(' (new branch head)')
5587 5587
5588 5588 if cleanworkdir:
5589 5589 # i18n: column positioning for "hg summary"
5590 5590 ui.status(_('commit: %s\n') % t.strip())
5591 5591 else:
5592 5592 # i18n: column positioning for "hg summary"
5593 5593 ui.write(_('commit: %s\n') % t.strip())
5594 5594
5595 5595 # all ancestors of branch heads - all ancestors of parent = new csets
5596 5596 new = len(repo.changelog.findmissing([ctx.node() for ctx in parents],
5597 5597 bheads))
5598 5598
5599 5599 if new == 0:
5600 5600 # i18n: column positioning for "hg summary"
5601 5601 ui.status(_('update: (current)\n'))
5602 5602 elif pnode not in bheads:
5603 5603 # i18n: column positioning for "hg summary"
5604 5604 ui.write(_('update: %d new changesets (update)\n') % new)
5605 5605 else:
5606 5606 # i18n: column positioning for "hg summary"
5607 5607 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
5608 5608 (new, len(bheads)))
5609 5609
5610 5610 cmdutil.summaryhooks(ui, repo)
5611 5611
5612 5612 if opts.get('remote'):
5613 5613 needsincoming, needsoutgoing = True, True
5614 5614 else:
5615 5615 needsincoming, needsoutgoing = False, False
5616 5616 for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
5617 5617 if i:
5618 5618 needsincoming = True
5619 5619 if o:
5620 5620 needsoutgoing = True
5621 5621 if not needsincoming and not needsoutgoing:
5622 5622 return
5623 5623
5624 5624 def getincoming():
5625 5625 source, branches = hg.parseurl(ui.expandpath('default'))
5626 5626 sbranch = branches[0]
5627 5627 try:
5628 5628 other = hg.peer(repo, {}, source)
5629 5629 except error.RepoError:
5630 5630 if opts.get('remote'):
5631 5631 raise
5632 5632 return source, sbranch, None, None, None
5633 5633 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
5634 5634 if revs:
5635 5635 revs = [other.lookup(rev) for rev in revs]
5636 5636 ui.debug('comparing with %s\n' % util.hidepassword(source))
5637 5637 repo.ui.pushbuffer()
5638 5638 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
5639 5639 repo.ui.popbuffer()
5640 5640 return source, sbranch, other, commoninc, commoninc[1]
5641 5641
5642 5642 if needsincoming:
5643 5643 source, sbranch, sother, commoninc, incoming = getincoming()
5644 5644 else:
5645 5645 source = sbranch = sother = commoninc = incoming = None
5646 5646
5647 5647 def getoutgoing():
5648 5648 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
5649 5649 dbranch = branches[0]
5650 5650 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
5651 5651 if source != dest:
5652 5652 try:
5653 5653 dother = hg.peer(repo, {}, dest)
5654 5654 except error.RepoError:
5655 5655 if opts.get('remote'):
5656 5656 raise
5657 5657 return dest, dbranch, None, None
5658 5658 ui.debug('comparing with %s\n' % util.hidepassword(dest))
5659 5659 elif sother is None:
5660 5660 # there is no explicit destination peer, but source one is invalid
5661 5661 return dest, dbranch, None, None
5662 5662 else:
5663 5663 dother = sother
5664 5664 if (source != dest or (sbranch is not None and sbranch != dbranch)):
5665 5665 common = None
5666 5666 else:
5667 5667 common = commoninc
5668 5668 if revs:
5669 5669 revs = [repo.lookup(rev) for rev in revs]
5670 5670 repo.ui.pushbuffer()
5671 5671 outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs,
5672 5672 commoninc=common)
5673 5673 repo.ui.popbuffer()
5674 5674 return dest, dbranch, dother, outgoing
5675 5675
5676 5676 if needsoutgoing:
5677 5677 dest, dbranch, dother, outgoing = getoutgoing()
5678 5678 else:
5679 5679 dest = dbranch = dother = outgoing = None
5680 5680
5681 5681 if opts.get('remote'):
5682 5682 t = []
5683 5683 if incoming:
5684 5684 t.append(_('1 or more incoming'))
5685 5685 o = outgoing.missing
5686 5686 if o:
5687 5687 t.append(_('%d outgoing') % len(o))
5688 5688 other = dother or sother
5689 5689 if 'bookmarks' in other.listkeys('namespaces'):
5690 5690 lmarks = repo.listkeys('bookmarks')
5691 5691 rmarks = other.listkeys('bookmarks')
5692 5692 diff = set(rmarks) - set(lmarks)
5693 5693 if len(diff) > 0:
5694 5694 t.append(_('%d incoming bookmarks') % len(diff))
5695 5695 diff = set(lmarks) - set(rmarks)
5696 5696 if len(diff) > 0:
5697 5697 t.append(_('%d outgoing bookmarks') % len(diff))
5698 5698
5699 5699 if t:
5700 5700 # i18n: column positioning for "hg summary"
5701 5701 ui.write(_('remote: %s\n') % (', '.join(t)))
5702 5702 else:
5703 5703 # i18n: column positioning for "hg summary"
5704 5704 ui.status(_('remote: (synced)\n'))
5705 5705
5706 5706 cmdutil.summaryremotehooks(ui, repo, opts,
5707 5707 ((source, sbranch, sother, commoninc),
5708 5708 (dest, dbranch, dother, outgoing)))
5709 5709
5710 5710 @command('tag',
5711 5711 [('f', 'force', None, _('force tag')),
5712 5712 ('l', 'local', None, _('make the tag local')),
5713 5713 ('r', 'rev', '', _('revision to tag'), _('REV')),
5714 5714 ('', 'remove', None, _('remove a tag')),
5715 5715 # -l/--local is already there, commitopts cannot be used
5716 5716 ('e', 'edit', None, _('invoke editor on commit messages')),
5717 5717 ('m', 'message', '', _('use text as commit message'), _('TEXT')),
5718 5718 ] + commitopts2,
5719 5719 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
5720 5720 def tag(ui, repo, name1, *names, **opts):
5721 5721 """add one or more tags for the current or given revision
5722 5722
5723 5723 Name a particular revision using <name>.
5724 5724
5725 5725 Tags are used to name particular revisions of the repository and are
5726 5726 very useful to compare different revisions, to go back to significant
5727 5727 earlier versions or to mark branch points as releases, etc. Changing
5728 5728 an existing tag is normally disallowed; use -f/--force to override.
5729 5729
5730 5730 If no revision is given, the parent of the working directory is
5731 5731 used.
5732 5732
5733 5733 To facilitate version control, distribution, and merging of tags,
5734 5734 they are stored as a file named ".hgtags" which is managed similarly
5735 5735 to other project files and can be hand-edited if necessary. This
5736 5736 also means that tagging creates a new commit. The file
5737 5737 ".hg/localtags" is used for local tags (not shared among
5738 5738 repositories).
5739 5739
5740 5740 Tag commits are usually made at the head of a branch. If the parent
5741 5741 of the working directory is not a branch head, :hg:`tag` aborts; use
5742 5742 -f/--force to force the tag commit to be based on a non-head
5743 5743 changeset.
5744 5744
5745 5745 See :hg:`help dates` for a list of formats valid for -d/--date.
5746 5746
5747 5747 Since tag names have priority over branch names during revision
5748 5748 lookup, using an existing branch name as a tag name is discouraged.
5749 5749
5750 5750 Returns 0 on success.
5751 5751 """
5752 5752 wlock = lock = None
5753 5753 try:
5754 5754 wlock = repo.wlock()
5755 5755 lock = repo.lock()
5756 5756 rev_ = "."
5757 5757 names = [t.strip() for t in (name1,) + names]
5758 5758 if len(names) != len(set(names)):
5759 5759 raise util.Abort(_('tag names must be unique'))
5760 5760 for n in names:
5761 5761 scmutil.checknewlabel(repo, n, 'tag')
5762 5762 if not n:
5763 5763 raise util.Abort(_('tag names cannot consist entirely of '
5764 5764 'whitespace'))
5765 5765 if opts.get('rev') and opts.get('remove'):
5766 5766 raise util.Abort(_("--rev and --remove are incompatible"))
5767 5767 if opts.get('rev'):
5768 5768 rev_ = opts['rev']
5769 5769 message = opts.get('message')
5770 5770 if opts.get('remove'):
5771 5771 expectedtype = opts.get('local') and 'local' or 'global'
5772 5772 for n in names:
5773 5773 if not repo.tagtype(n):
5774 5774 raise util.Abort(_("tag '%s' does not exist") % n)
5775 5775 if repo.tagtype(n) != expectedtype:
5776 5776 if expectedtype == 'global':
5777 5777 raise util.Abort(_("tag '%s' is not a global tag") % n)
5778 5778 else:
5779 5779 raise util.Abort(_("tag '%s' is not a local tag") % n)
5780 5780 rev_ = nullid
5781 5781 if not message:
5782 5782 # we don't translate commit messages
5783 5783 message = 'Removed tag %s' % ', '.join(names)
5784 5784 elif not opts.get('force'):
5785 5785 for n in names:
5786 5786 if n in repo.tags():
5787 5787 raise util.Abort(_("tag '%s' already exists "
5788 5788 "(use -f to force)") % n)
5789 5789 if not opts.get('local'):
5790 5790 p1, p2 = repo.dirstate.parents()
5791 5791 if p2 != nullid:
5792 5792 raise util.Abort(_('uncommitted merge'))
5793 5793 bheads = repo.branchheads()
5794 5794 if not opts.get('force') and bheads and p1 not in bheads:
5795 5795 raise util.Abort(_('not at a branch head (use -f to force)'))
5796 5796 r = scmutil.revsingle(repo, rev_).node()
5797 5797
5798 5798 if not message:
5799 5799 # we don't translate commit messages
5800 5800 message = ('Added tag %s for changeset %s' %
5801 5801 (', '.join(names), short(r)))
5802 5802
5803 5803 date = opts.get('date')
5804 5804 if date:
5805 5805 date = util.parsedate(date)
5806 5806
5807 5807 if opts.get('remove'):
5808 5808 editform = 'tag.remove'
5809 5809 else:
5810 5810 editform = 'tag.add'
5811 5811 editor = cmdutil.getcommiteditor(editform=editform, **opts)
5812 5812
5813 5813 # don't allow tagging the null rev
5814 5814 if (not opts.get('remove') and
5815 5815 scmutil.revsingle(repo, rev_).rev() == nullrev):
5816 5816 raise util.Abort(_("cannot tag null revision"))
5817 5817
5818 5818 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date,
5819 5819 editor=editor)
5820 5820 finally:
5821 5821 release(lock, wlock)
5822 5822
5823 5823 @command('tags', [], '')
5824 5824 def tags(ui, repo, **opts):
5825 5825 """list repository tags
5826 5826
5827 5827 This lists both regular and local tags. When the -v/--verbose
5828 5828 switch is used, a third column "local" is printed for local tags.
5829 5829
5830 5830 Returns 0 on success.
5831 5831 """
5832 5832
5833 5833 fm = ui.formatter('tags', opts)
5834 5834 hexfunc = ui.debugflag and hex or short
5835 5835 tagtype = ""
5836 5836
5837 5837 for t, n in reversed(repo.tagslist()):
5838 5838 hn = hexfunc(n)
5839 5839 label = 'tags.normal'
5840 5840 tagtype = ''
5841 5841 if repo.tagtype(t) == 'local':
5842 5842 label = 'tags.local'
5843 5843 tagtype = 'local'
5844 5844
5845 5845 fm.startitem()
5846 5846 fm.write('tag', '%s', t, label=label)
5847 5847 fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
5848 5848 fm.condwrite(not ui.quiet, 'rev id', fmt,
5849 5849 repo.changelog.rev(n), hn, label=label)
5850 5850 fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
5851 5851 tagtype, label=label)
5852 5852 fm.plain('\n')
5853 5853 fm.end()
5854 5854
5855 5855 @command('tip',
5856 5856 [('p', 'patch', None, _('show patch')),
5857 5857 ('g', 'git', None, _('use git extended diff format')),
5858 5858 ] + templateopts,
5859 5859 _('[-p] [-g]'))
5860 5860 def tip(ui, repo, **opts):
5861 5861 """show the tip revision (DEPRECATED)
5862 5862
5863 5863 The tip revision (usually just called the tip) is the changeset
5864 5864 most recently added to the repository (and therefore the most
5865 5865 recently changed head).
5866 5866
5867 5867 If you have just made a commit, that commit will be the tip. If
5868 5868 you have just pulled changes from another repository, the tip of
5869 5869 that repository becomes the current tip. The "tip" tag is special
5870 5870 and cannot be renamed or assigned to a different changeset.
5871 5871
5872 5872 This command is deprecated, please use :hg:`heads` instead.
5873 5873
5874 5874 Returns 0 on success.
5875 5875 """
5876 5876 displayer = cmdutil.show_changeset(ui, repo, opts)
5877 5877 displayer.show(repo['tip'])
5878 5878 displayer.close()
5879 5879
5880 5880 @command('unbundle',
5881 5881 [('u', 'update', None,
5882 5882 _('update to new branch head if changesets were unbundled'))],
5883 5883 _('[-u] FILE...'))
5884 5884 def unbundle(ui, repo, fname1, *fnames, **opts):
5885 5885 """apply one or more changegroup files
5886 5886
5887 5887 Apply one or more compressed changegroup files generated by the
5888 5888 bundle command.
5889 5889
5890 5890 Returns 0 on success, 1 if an update has unresolved files.
5891 5891 """
5892 5892 fnames = (fname1,) + fnames
5893 5893
5894 5894 lock = repo.lock()
5895 5895 wc = repo['.']
5896 5896 try:
5897 5897 for fname in fnames:
5898 5898 f = hg.openpath(ui, fname)
5899 5899 gen = exchange.readbundle(ui, f, fname)
5900 5900 modheads = changegroup.addchangegroup(repo, gen, 'unbundle',
5901 5901 'bundle:' + fname)
5902 5902 finally:
5903 5903 lock.release()
5904 5904 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
5905 5905 return postincoming(ui, repo, modheads, opts.get('update'), None)
5906 5906
5907 5907 @command('^update|up|checkout|co',
5908 5908 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
5909 5909 ('c', 'check', None,
5910 5910 _('update across branches if no uncommitted changes')),
5911 5911 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5912 5912 ('r', 'rev', '', _('revision'), _('REV'))
5913 5913 ] + mergetoolopts,
5914 5914 _('[-c] [-C] [-d DATE] [[-r] REV]'))
5915 5915 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False,
5916 5916 tool=None):
5917 5917 """update working directory (or switch revisions)
5918 5918
5919 5919 Update the repository's working directory to the specified
5920 5920 changeset. If no changeset is specified, update to the tip of the
5921 5921 current named branch and move the current bookmark (see :hg:`help
5922 5922 bookmarks`).
5923 5923
5924 5924 Update sets the working directory's parent revision to the specified
5925 5925 changeset (see :hg:`help parents`).
5926 5926
5927 5927 If the changeset is not a descendant or ancestor of the working
5928 5928 directory's parent, the update is aborted. With the -c/--check
5929 5929 option, the working directory is checked for uncommitted changes; if
5930 5930 none are found, the working directory is updated to the specified
5931 5931 changeset.
5932 5932
5933 5933 .. container:: verbose
5934 5934
5935 5935 The following rules apply when the working directory contains
5936 5936 uncommitted changes:
5937 5937
5938 5938 1. If neither -c/--check nor -C/--clean is specified, and if
5939 5939 the requested changeset is an ancestor or descendant of
5940 5940 the working directory's parent, the uncommitted changes
5941 5941 are merged into the requested changeset and the merged
5942 5942 result is left uncommitted. If the requested changeset is
5943 5943 not an ancestor or descendant (that is, it is on another
5944 5944 branch), the update is aborted and the uncommitted changes
5945 5945 are preserved.
5946 5946
5947 5947 2. With the -c/--check option, the update is aborted and the
5948 5948 uncommitted changes are preserved.
5949 5949
5950 5950 3. With the -C/--clean option, uncommitted changes are discarded and
5951 5951 the working directory is updated to the requested changeset.
5952 5952
5953 5953 To cancel an uncommitted merge (and lose your changes), use
5954 5954 :hg:`update --clean .`.
5955 5955
5956 5956 Use null as the changeset to remove the working directory (like
5957 5957 :hg:`clone -U`).
5958 5958
5959 5959 If you want to revert just one file to an older revision, use
5960 5960 :hg:`revert [-r REV] NAME`.
5961 5961
5962 5962 See :hg:`help dates` for a list of formats valid for -d/--date.
5963 5963
5964 5964 Returns 0 on success, 1 if there are unresolved files.
5965 5965 """
5966 5966 if rev and node:
5967 5967 raise util.Abort(_("please specify just one revision"))
5968 5968
5969 5969 if rev is None or rev == '':
5970 5970 rev = node
5971 5971
5972 5972 cmdutil.clearunfinished(repo)
5973 5973
5974 5974 # with no argument, we also move the current bookmark, if any
5975 5975 rev, movemarkfrom = bookmarks.calculateupdate(ui, repo, rev)
5976 5976
5977 5977 # if we defined a bookmark, we have to remember the original bookmark name
5978 5978 brev = rev
5979 5979 rev = scmutil.revsingle(repo, rev, rev).rev()
5980 5980
5981 5981 if check and clean:
5982 5982 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5983 5983
5984 5984 if date:
5985 5985 if rev is not None:
5986 5986 raise util.Abort(_("you can't specify a revision and a date"))
5987 5987 rev = cmdutil.finddate(ui, repo, date)
5988 5988
5989 5989 if check:
5990 5990 c = repo[None]
5991 5991 if c.dirty(merge=False, branch=False, missing=True):
5992 5992 raise util.Abort(_("uncommitted changes"))
5993 5993 if rev is None:
5994 5994 rev = repo[repo[None].branch()].rev()
5995 5995 mergemod._checkunknown(repo, repo[None], repo[rev])
5996 5996
5997 5997 repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
5998 5998
5999 5999 if clean:
6000 6000 ret = hg.clean(repo, rev)
6001 6001 else:
6002 6002 ret = hg.update(repo, rev)
6003 6003
6004 6004 if not ret and movemarkfrom:
6005 6005 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
6006 6006 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
6007 6007 elif brev in repo._bookmarks:
6008 6008 bookmarks.setcurrent(repo, brev)
6009 6009 ui.status(_("(activating bookmark %s)\n") % brev)
6010 6010 elif brev:
6011 6011 if repo._bookmarkcurrent:
6012 6012 ui.status(_("(leaving bookmark %s)\n") %
6013 6013 repo._bookmarkcurrent)
6014 6014 bookmarks.unsetcurrent(repo)
6015 6015
6016 6016 return ret
6017 6017
6018 6018 @command('verify', [])
6019 6019 def verify(ui, repo):
6020 6020 """verify the integrity of the repository
6021 6021
6022 6022 Verify the integrity of the current repository.
6023 6023
6024 6024 This will perform an extensive check of the repository's
6025 6025 integrity, validating the hashes and checksums of each entry in
6026 6026 the changelog, manifest, and tracked files, as well as the
6027 6027 integrity of their crosslinks and indices.
6028 6028
6029 6029 Please see http://mercurial.selenic.com/wiki/RepositoryCorruption
6030 6030 for more information about recovery from corruption of the
6031 6031 repository.
6032 6032
6033 6033 Returns 0 on success, 1 if errors are encountered.
6034 6034 """
6035 6035 return hg.verify(repo)
6036 6036
6037 6037 @command('version', [], norepo=True)
6038 6038 def version_(ui):
6039 6039 """output version and copyright information"""
6040 6040 ui.write(_("Mercurial Distributed SCM (version %s)\n")
6041 6041 % util.version())
6042 6042 ui.status(_(
6043 6043 "(see http://mercurial.selenic.com for more information)\n"
6044 6044 "\nCopyright (C) 2005-2014 Matt Mackall and others\n"
6045 6045 "This is free software; see the source for copying conditions. "
6046 6046 "There is NO\nwarranty; "
6047 6047 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
6048 6048 ))
6049 6049
6050 6050 ui.note(_("\nEnabled extensions:\n\n"))
6051 6051 if ui.verbose:
6052 6052 # format names and versions into columns
6053 6053 names = []
6054 6054 vers = []
6055 6055 for name, module in extensions.extensions():
6056 6056 names.append(name)
6057 6057 vers.append(extensions.moduleversion(module))
6058 6058 if names:
6059 6059 maxnamelen = max(len(n) for n in names)
6060 6060 for i, name in enumerate(names):
6061 6061 ui.write(" %-*s %s\n" % (maxnamelen, name, vers[i]))
@@ -1,981 +1,983 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.unbundle10(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40
41 41 class pushoperation(object):
42 42 """A object that represent a single push operation
43 43
44 44 It purpose is to carry push related state and very common operation.
45 45
46 46 A new should be created at the beginning of each push and discarded
47 47 afterward.
48 48 """
49 49
50 50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 51 # repo we push from
52 52 self.repo = repo
53 53 self.ui = repo.ui
54 54 # repo we push to
55 55 self.remote = remote
56 56 # force option provided
57 57 self.force = force
58 58 # revs to be pushed (None is "all")
59 59 self.revs = revs
60 60 # allow push of new branch
61 61 self.newbranch = newbranch
62 62 # did a local lock get acquired?
63 63 self.locallocked = None
64 64 # step already performed
65 65 # (used to check what steps have been already performed through bundle2)
66 66 self.stepsdone = set()
67 67 # Integer version of the push result
68 68 # - None means nothing to push
69 69 # - 0 means HTTP error
70 70 # - 1 means we pushed and remote head count is unchanged *or*
71 71 # we have outgoing changesets but refused to push
72 72 # - other values as described by addchangegroup()
73 73 self.ret = None
74 74 # discover.outgoing object (contains common and outgoing data)
75 75 self.outgoing = None
76 76 # all remote heads before the push
77 77 self.remoteheads = None
78 78 # testable as a boolean indicating if any nodes are missing locally.
79 79 self.incoming = None
80 80 # phases changes that must be pushed along side the changesets
81 81 self.outdatedphases = None
82 82 # phases changes that must be pushed if changeset push fails
83 83 self.fallbackoutdatedphases = None
84 84 # outgoing obsmarkers
85 85 self.outobsmarkers = set()
86 86
87 87 @util.propertycache
88 88 def futureheads(self):
89 89 """future remote heads if the changeset push succeeds"""
90 90 return self.outgoing.missingheads
91 91
92 92 @util.propertycache
93 93 def fallbackheads(self):
94 94 """future remote heads if the changeset push fails"""
95 95 if self.revs is None:
96 96 # not target to push, all common are relevant
97 97 return self.outgoing.commonheads
98 98 unfi = self.repo.unfiltered()
99 99 # I want cheads = heads(::missingheads and ::commonheads)
100 100 # (missingheads is revs with secret changeset filtered out)
101 101 #
102 102 # This can be expressed as:
103 103 # cheads = ( (missingheads and ::commonheads)
104 104 # + (commonheads and ::missingheads))"
105 105 # )
106 106 #
107 107 # while trying to push we already computed the following:
108 108 # common = (::commonheads)
109 109 # missing = ((commonheads::missingheads) - commonheads)
110 110 #
111 111 # We can pick:
112 112 # * missingheads part of common (::commonheads)
113 113 common = set(self.outgoing.common)
114 114 nm = self.repo.changelog.nodemap
115 115 cheads = [node for node in self.revs if nm[node] in common]
116 116 # and
117 117 # * commonheads parents on missing
118 118 revset = unfi.set('%ln and parents(roots(%ln))',
119 119 self.outgoing.commonheads,
120 120 self.outgoing.missing)
121 121 cheads.extend(c.node() for c in revset)
122 122 return cheads
123 123
124 124 @property
125 125 def commonheads(self):
126 126 """set of all common heads after changeset bundle push"""
127 127 if self.ret:
128 128 return self.futureheads
129 129 else:
130 130 return self.fallbackheads
131 131
132 132 def push(repo, remote, force=False, revs=None, newbranch=False):
133 133 '''Push outgoing changesets (limited by revs) from a local
134 134 repository to remote. Return an integer:
135 135 - None means nothing to push
136 136 - 0 means HTTP error
137 137 - 1 means we pushed and remote head count is unchanged *or*
138 138 we have outgoing changesets but refused to push
139 139 - other values as described by addchangegroup()
140 140 '''
141 141 pushop = pushoperation(repo, remote, force, revs, newbranch)
142 142 if pushop.remote.local():
143 143 missing = (set(pushop.repo.requirements)
144 144 - pushop.remote.local().supported)
145 145 if missing:
146 146 msg = _("required features are not"
147 147 " supported in the destination:"
148 148 " %s") % (', '.join(sorted(missing)))
149 149 raise util.Abort(msg)
150 150
151 151 # there are two ways to push to remote repo:
152 152 #
153 153 # addchangegroup assumes local user can lock remote
154 154 # repo (local filesystem, old ssh servers).
155 155 #
156 156 # unbundle assumes local user cannot lock remote repo (new ssh
157 157 # servers, http servers).
158 158
159 159 if not pushop.remote.canpush():
160 160 raise util.Abort(_("destination does not support push"))
161 161 # get local lock as we might write phase data
162 162 locallock = None
163 163 try:
164 164 locallock = pushop.repo.lock()
165 165 pushop.locallocked = True
166 166 except IOError, err:
167 167 pushop.locallocked = False
168 168 if err.errno != errno.EACCES:
169 169 raise
170 170 # source repo cannot be locked.
171 171 # We do not abort the push, but just disable the local phase
172 172 # synchronisation.
173 173 msg = 'cannot lock source repository: %s\n' % err
174 174 pushop.ui.debug(msg)
175 175 try:
176 176 pushop.repo.checkpush(pushop)
177 177 lock = None
178 178 unbundle = pushop.remote.capable('unbundle')
179 179 if not unbundle:
180 180 lock = pushop.remote.lock()
181 181 try:
182 182 _pushdiscovery(pushop)
183 183 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
184 184 False)
185 185 and pushop.remote.capable('bundle2-exp')):
186 186 _pushbundle2(pushop)
187 187 _pushchangeset(pushop)
188 188 _pushsyncphase(pushop)
189 189 _pushobsolete(pushop)
190 190 finally:
191 191 if lock is not None:
192 192 lock.release()
193 193 finally:
194 194 if locallock is not None:
195 195 locallock.release()
196 196
197 197 _pushbookmark(pushop)
198 198 return pushop.ret
199 199
200 200 # list of steps to perform discovery before push
201 201 pushdiscoveryorder = []
202 202
203 203 # Mapping between step name and function
204 204 #
205 205 # This exists to help extensions wrap steps if necessary
206 206 pushdiscoverymapping = {}
207 207
208 208 def pushdiscovery(stepname):
209 209 """decorator for function performing discovery before push
210 210
211 211 The function is added to the step -> function mapping and appended to the
212 212 list of steps. Beware that decorated function will be added in order (this
213 213 may matter).
214 214
215 215 You can only use this decorator for a new step, if you want to wrap a step
216 216 from an extension, change the pushdiscovery dictionary directly."""
217 217 def dec(func):
218 218 assert stepname not in pushdiscoverymapping
219 219 pushdiscoverymapping[stepname] = func
220 220 pushdiscoveryorder.append(stepname)
221 221 return func
222 222 return dec
223 223
224 224 def _pushdiscovery(pushop):
225 225 """Run all discovery steps"""
226 226 for stepname in pushdiscoveryorder:
227 227 step = pushdiscoverymapping[stepname]
228 228 step(pushop)
229 229
230 230 @pushdiscovery('changeset')
231 231 def _pushdiscoverychangeset(pushop):
232 232 """discover the changeset that need to be pushed"""
233 233 unfi = pushop.repo.unfiltered()
234 234 fci = discovery.findcommonincoming
235 235 commoninc = fci(unfi, pushop.remote, force=pushop.force)
236 236 common, inc, remoteheads = commoninc
237 237 fco = discovery.findcommonoutgoing
238 238 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
239 239 commoninc=commoninc, force=pushop.force)
240 240 pushop.outgoing = outgoing
241 241 pushop.remoteheads = remoteheads
242 242 pushop.incoming = inc
243 243
244 244 @pushdiscovery('phase')
245 245 def _pushdiscoveryphase(pushop):
246 246 """discover the phase that needs to be pushed
247 247
248 248 (computed for both success and failure case for changesets push)"""
249 249 outgoing = pushop.outgoing
250 250 unfi = pushop.repo.unfiltered()
251 251 remotephases = pushop.remote.listkeys('phases')
252 252 publishing = remotephases.get('publishing', False)
253 253 ana = phases.analyzeremotephases(pushop.repo,
254 254 pushop.fallbackheads,
255 255 remotephases)
256 256 pheads, droots = ana
257 257 extracond = ''
258 258 if not publishing:
259 259 extracond = ' and public()'
260 260 revset = 'heads((%%ln::%%ln) %s)' % extracond
261 261 # Get the list of all revs draft on remote by public here.
262 262 # XXX Beware that revset break if droots is not strictly
263 263 # XXX root we may want to ensure it is but it is costly
264 264 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
265 265 if not outgoing.missing:
266 266 future = fallback
267 267 else:
268 268 # adds changeset we are going to push as draft
269 269 #
270 270 # should not be necessary for pushblishing server, but because of an
271 271 # issue fixed in xxxxx we have to do it anyway.
272 272 fdroots = list(unfi.set('roots(%ln + %ln::)',
273 273 outgoing.missing, droots))
274 274 fdroots = [f.node() for f in fdroots]
275 275 future = list(unfi.set(revset, fdroots, pushop.futureheads))
276 276 pushop.outdatedphases = future
277 277 pushop.fallbackoutdatedphases = fallback
278 278
279 279 @pushdiscovery('obsmarker')
280 280 def _pushdiscoveryobsmarkers(pushop):
281 281 pushop.outobsmarkers = pushop.repo.obsstore
282 282
283 283 def _pushcheckoutgoing(pushop):
284 284 outgoing = pushop.outgoing
285 285 unfi = pushop.repo.unfiltered()
286 286 if not outgoing.missing:
287 287 # nothing to push
288 288 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
289 289 return False
290 290 # something to push
291 291 if not pushop.force:
292 292 # if repo.obsstore == False --> no obsolete
293 293 # then, save the iteration
294 294 if unfi.obsstore:
295 295 # this message are here for 80 char limit reason
296 296 mso = _("push includes obsolete changeset: %s!")
297 297 mst = "push includes %s changeset: %s!"
298 298 # plain versions for i18n tool to detect them
299 299 _("push includes unstable changeset: %s!")
300 300 _("push includes bumped changeset: %s!")
301 301 _("push includes divergent changeset: %s!")
302 302 # If we are to push if there is at least one
303 303 # obsolete or unstable changeset in missing, at
304 304 # least one of the missinghead will be obsolete or
305 305 # unstable. So checking heads only is ok
306 306 for node in outgoing.missingheads:
307 307 ctx = unfi[node]
308 308 if ctx.obsolete():
309 309 raise util.Abort(mso % ctx)
310 310 elif ctx.troubled():
311 311 raise util.Abort(_(mst)
312 312 % (ctx.troubles()[0],
313 313 ctx))
314 314 newbm = pushop.ui.configlist('bookmarks', 'pushing')
315 315 discovery.checkheads(unfi, pushop.remote, outgoing,
316 316 pushop.remoteheads,
317 317 pushop.newbranch,
318 318 bool(pushop.incoming),
319 319 newbm)
320 320 return True
321 321
322 322 # List of names of steps to perform for an outgoing bundle2, order matters.
323 323 b2partsgenorder = []
324 324
325 325 # Mapping between step name and function
326 326 #
327 327 # This exists to help extensions wrap steps if necessary
328 328 b2partsgenmapping = {}
329 329
330 330 def b2partsgenerator(stepname):
331 331 """decorator for function generating bundle2 part
332 332
333 333 The function is added to the step -> function mapping and appended to the
334 334 list of steps. Beware that decorated functions will be added in order
335 335 (this may matter).
336 336
337 337 You can only use this decorator for new steps, if you want to wrap a step
338 338 from an extension, attack the b2partsgenmapping dictionary directly."""
339 339 def dec(func):
340 340 assert stepname not in b2partsgenmapping
341 341 b2partsgenmapping[stepname] = func
342 342 b2partsgenorder.append(stepname)
343 343 return func
344 344 return dec
345 345
346 346 @b2partsgenerator('changeset')
347 347 def _pushb2ctx(pushop, bundler):
348 348 """handle changegroup push through bundle2
349 349
350 350 addchangegroup result is stored in the ``pushop.ret`` attribute.
351 351 """
352 352 if 'changesets' in pushop.stepsdone:
353 353 return
354 354 pushop.stepsdone.add('changesets')
355 355 # Send known heads to the server for race detection.
356 356 pushop.stepsdone.add('changesets')
357 357 if not _pushcheckoutgoing(pushop):
358 358 return
359 359 pushop.repo.prepushoutgoinghooks(pushop.repo,
360 360 pushop.remote,
361 361 pushop.outgoing)
362 362 if not pushop.force:
363 363 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
364 364 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
365 365 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
366 366 def handlereply(op):
367 367 """extract addchangroup returns from server reply"""
368 368 cgreplies = op.records.getreplies(cgpart.id)
369 369 assert len(cgreplies['changegroup']) == 1
370 370 pushop.ret = cgreplies['changegroup'][0]['return']
371 371 return handlereply
372 372
373 373 @b2partsgenerator('phase')
374 374 def _pushb2phases(pushop, bundler):
375 375 """handle phase push through bundle2"""
376 376 if 'phases' in pushop.stepsdone:
377 377 return
378 378 b2caps = bundle2.bundle2caps(pushop.remote)
379 379 if not 'b2x:pushkey' in b2caps:
380 380 return
381 381 pushop.stepsdone.add('phases')
382 382 part2node = []
383 383 enc = pushkey.encode
384 384 for newremotehead in pushop.outdatedphases:
385 385 part = bundler.newpart('b2x:pushkey')
386 386 part.addparam('namespace', enc('phases'))
387 387 part.addparam('key', enc(newremotehead.hex()))
388 388 part.addparam('old', enc(str(phases.draft)))
389 389 part.addparam('new', enc(str(phases.public)))
390 390 part2node.append((part.id, newremotehead))
391 391 def handlereply(op):
392 392 for partid, node in part2node:
393 393 partrep = op.records.getreplies(partid)
394 394 results = partrep['pushkey']
395 395 assert len(results) <= 1
396 396 msg = None
397 397 if not results:
398 398 msg = _('server ignored update of %s to public!\n') % node
399 399 elif not int(results[0]['return']):
400 400 msg = _('updating %s to public failed!\n') % node
401 401 if msg is not None:
402 402 pushop.ui.warn(msg)
403 403 return handlereply
404 404
405 405 def _pushbundle2(pushop):
406 406 """push data to the remote using bundle2
407 407
408 408 The only currently supported type of data is changegroup but this will
409 409 evolve in the future."""
410 410 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
411 411 # create reply capability
412 412 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
413 413 bundler.newpart('b2x:replycaps', data=capsblob)
414 414 replyhandlers = []
415 415 for partgenname in b2partsgenorder:
416 416 partgen = b2partsgenmapping[partgenname]
417 417 ret = partgen(pushop, bundler)
418 418 if callable(ret):
419 419 replyhandlers.append(ret)
420 420 # do not push if nothing to push
421 421 if bundler.nbparts <= 1:
422 422 return
423 423 stream = util.chunkbuffer(bundler.getchunks())
424 424 try:
425 425 reply = pushop.remote.unbundle(stream, ['force'], 'push')
426 426 except error.BundleValueError, exc:
427 427 raise util.Abort('missing support for %s' % exc)
428 428 try:
429 429 op = bundle2.processbundle(pushop.repo, reply)
430 430 except error.BundleValueError, exc:
431 431 raise util.Abort('missing support for %s' % exc)
432 432 for rephand in replyhandlers:
433 433 rephand(op)
434 434
435 435 def _pushchangeset(pushop):
436 436 """Make the actual push of changeset bundle to remote repo"""
437 437 if 'changesets' in pushop.stepsdone:
438 438 return
439 439 pushop.stepsdone.add('changesets')
440 440 if not _pushcheckoutgoing(pushop):
441 441 return
442 442 pushop.repo.prepushoutgoinghooks(pushop.repo,
443 443 pushop.remote,
444 444 pushop.outgoing)
445 445 outgoing = pushop.outgoing
446 446 unbundle = pushop.remote.capable('unbundle')
447 447 # TODO: get bundlecaps from remote
448 448 bundlecaps = None
449 449 # create a changegroup from local
450 450 if pushop.revs is None and not (outgoing.excluded
451 451 or pushop.repo.changelog.filteredrevs):
452 452 # push everything,
453 453 # use the fast path, no race possible on push
454 454 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
455 455 cg = changegroup.getsubset(pushop.repo,
456 456 outgoing,
457 457 bundler,
458 458 'push',
459 459 fastpath=True)
460 460 else:
461 461 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
462 462 bundlecaps)
463 463
464 464 # apply changegroup to remote
465 465 if unbundle:
466 466 # local repo finds heads on server, finds out what
467 467 # revs it must push. once revs transferred, if server
468 468 # finds it has different heads (someone else won
469 469 # commit/push race), server aborts.
470 470 if pushop.force:
471 471 remoteheads = ['force']
472 472 else:
473 473 remoteheads = pushop.remoteheads
474 474 # ssh: return remote's addchangegroup()
475 475 # http: return remote's addchangegroup() or 0 for error
476 476 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
477 477 pushop.repo.url())
478 478 else:
479 479 # we return an integer indicating remote head count
480 480 # change
481 481 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
482 482
483 483 def _pushsyncphase(pushop):
484 484 """synchronise phase information locally and remotely"""
485 485 cheads = pushop.commonheads
486 486 # even when we don't push, exchanging phase data is useful
487 487 remotephases = pushop.remote.listkeys('phases')
488 488 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
489 489 and remotephases # server supports phases
490 490 and pushop.ret is None # nothing was pushed
491 491 and remotephases.get('publishing', False)):
492 492 # When:
493 493 # - this is a subrepo push
494 494 # - and remote support phase
495 495 # - and no changeset was pushed
496 496 # - and remote is publishing
497 497 # We may be in issue 3871 case!
498 498 # We drop the possible phase synchronisation done by
499 499 # courtesy to publish changesets possibly locally draft
500 500 # on the remote.
501 501 remotephases = {'publishing': 'True'}
502 502 if not remotephases: # old server or public only reply from non-publishing
503 503 _localphasemove(pushop, cheads)
504 504 # don't push any phase data as there is nothing to push
505 505 else:
506 506 ana = phases.analyzeremotephases(pushop.repo, cheads,
507 507 remotephases)
508 508 pheads, droots = ana
509 509 ### Apply remote phase on local
510 510 if remotephases.get('publishing', False):
511 511 _localphasemove(pushop, cheads)
512 512 else: # publish = False
513 513 _localphasemove(pushop, pheads)
514 514 _localphasemove(pushop, cheads, phases.draft)
515 515 ### Apply local phase on remote
516 516
517 517 if pushop.ret:
518 518 if 'phases' in pushop.stepsdone:
519 519 # phases already pushed though bundle2
520 520 return
521 521 outdated = pushop.outdatedphases
522 522 else:
523 523 outdated = pushop.fallbackoutdatedphases
524 524
525 525 pushop.stepsdone.add('phases')
526 526
527 527 # filter heads already turned public by the push
528 528 outdated = [c for c in outdated if c.node() not in pheads]
529 529 b2caps = bundle2.bundle2caps(pushop.remote)
530 530 if 'b2x:pushkey' in b2caps:
531 531 # server supports bundle2, let's do a batched push through it
532 532 #
533 533 # This will eventually be unified with the changesets bundle2 push
534 534 bundler = bundle2.bundle20(pushop.ui, b2caps)
535 535 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
536 536 bundler.newpart('b2x:replycaps', data=capsblob)
537 537 part2node = []
538 538 enc = pushkey.encode
539 539 for newremotehead in outdated:
540 540 part = bundler.newpart('b2x:pushkey')
541 541 part.addparam('namespace', enc('phases'))
542 542 part.addparam('key', enc(newremotehead.hex()))
543 543 part.addparam('old', enc(str(phases.draft)))
544 544 part.addparam('new', enc(str(phases.public)))
545 545 part2node.append((part.id, newremotehead))
546 546 stream = util.chunkbuffer(bundler.getchunks())
547 547 try:
548 548 reply = pushop.remote.unbundle(stream, ['force'], 'push')
549 549 op = bundle2.processbundle(pushop.repo, reply)
550 550 except error.BundleValueError, exc:
551 551 raise util.Abort('missing support for %s' % exc)
552 552 for partid, node in part2node:
553 553 partrep = op.records.getreplies(partid)
554 554 results = partrep['pushkey']
555 555 assert len(results) <= 1
556 556 msg = None
557 557 if not results:
558 558 msg = _('server ignored update of %s to public!\n') % node
559 559 elif not int(results[0]['return']):
560 560 msg = _('updating %s to public failed!\n') % node
561 561 if msg is not None:
562 562 pushop.ui.warn(msg)
563 563
564 564 else:
565 565 # fallback to independant pushkey command
566 566 for newremotehead in outdated:
567 567 r = pushop.remote.pushkey('phases',
568 568 newremotehead.hex(),
569 569 str(phases.draft),
570 570 str(phases.public))
571 571 if not r:
572 572 pushop.ui.warn(_('updating %s to public failed!\n')
573 573 % newremotehead)
574 574
575 575 def _localphasemove(pushop, nodes, phase=phases.public):
576 576 """move <nodes> to <phase> in the local source repo"""
577 577 if pushop.locallocked:
578 578 tr = pushop.repo.transaction('push-phase-sync')
579 579 try:
580 phases.advanceboundary(pushop.repo, phase, nodes)
580 phases.advanceboundary(pushop.repo, tr, phase, nodes)
581 581 tr.close()
582 582 finally:
583 583 tr.release()
584 584 else:
585 585 # repo is not locked, do not change any phases!
586 586 # Informs the user that phases should have been moved when
587 587 # applicable.
588 588 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
589 589 phasestr = phases.phasenames[phase]
590 590 if actualmoves:
591 591 pushop.ui.status(_('cannot lock source repo, skipping '
592 592 'local %s phase update\n') % phasestr)
593 593
594 594 def _pushobsolete(pushop):
595 595 """utility function to push obsolete markers to a remote"""
596 596 if 'obsmarkers' in pushop.stepsdone:
597 597 return
598 598 pushop.ui.debug('try to push obsolete markers to remote\n')
599 599 repo = pushop.repo
600 600 remote = pushop.remote
601 601 pushop.stepsdone.add('obsmarkers')
602 602 if (obsolete._enabled and repo.obsstore and
603 603 'obsolete' in remote.listkeys('namespaces')):
604 604 rslts = []
605 605 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
606 606 for key in sorted(remotedata, reverse=True):
607 607 # reverse sort to ensure we end with dump0
608 608 data = remotedata[key]
609 609 rslts.append(remote.pushkey('obsolete', key, '', data))
610 610 if [r for r in rslts if not r]:
611 611 msg = _('failed to push some obsolete markers!\n')
612 612 repo.ui.warn(msg)
613 613
614 614 def _pushbookmark(pushop):
615 615 """Update bookmark position on remote"""
616 616 ui = pushop.ui
617 617 repo = pushop.repo.unfiltered()
618 618 remote = pushop.remote
619 619 ui.debug("checking for updated bookmarks\n")
620 620 revnums = map(repo.changelog.rev, pushop.revs or [])
621 621 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
622 622 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
623 623 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
624 624 srchex=hex)
625 625
626 626 for b, scid, dcid in advsrc:
627 627 if ancestors and repo[scid].rev() not in ancestors:
628 628 continue
629 629 if remote.pushkey('bookmarks', b, dcid, scid):
630 630 ui.status(_("updating bookmark %s\n") % b)
631 631 else:
632 632 ui.warn(_('updating bookmark %s failed!\n') % b)
633 633
634 634 class pulloperation(object):
635 635 """A object that represent a single pull operation
636 636
637 637 It purpose is to carry push related state and very common operation.
638 638
639 639 A new should be created at the beginning of each pull and discarded
640 640 afterward.
641 641 """
642 642
643 643 def __init__(self, repo, remote, heads=None, force=False):
644 644 # repo we pull into
645 645 self.repo = repo
646 646 # repo we pull from
647 647 self.remote = remote
648 648 # revision we try to pull (None is "all")
649 649 self.heads = heads
650 650 # do we force pull?
651 651 self.force = force
652 652 # the name the pull transaction
653 653 self._trname = 'pull\n' + util.hidepassword(remote.url())
654 654 # hold the transaction once created
655 655 self._tr = None
656 656 # set of common changeset between local and remote before pull
657 657 self.common = None
658 658 # set of pulled head
659 659 self.rheads = None
660 660 # list of missing changeset to fetch remotely
661 661 self.fetch = None
662 662 # result of changegroup pulling (used as return code by pull)
663 663 self.cgresult = None
664 664 # list of step remaining todo (related to future bundle2 usage)
665 665 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
666 666
667 667 @util.propertycache
668 668 def pulledsubset(self):
669 669 """heads of the set of changeset target by the pull"""
670 670 # compute target subset
671 671 if self.heads is None:
672 672 # We pulled every thing possible
673 673 # sync on everything common
674 674 c = set(self.common)
675 675 ret = list(self.common)
676 676 for n in self.rheads:
677 677 if n not in c:
678 678 ret.append(n)
679 679 return ret
680 680 else:
681 681 # We pulled a specific subset
682 682 # sync on this subset
683 683 return self.heads
684 684
685 685 def gettransaction(self):
686 686 """get appropriate pull transaction, creating it if needed"""
687 687 if self._tr is None:
688 688 self._tr = self.repo.transaction(self._trname)
689 689 return self._tr
690 690
691 691 def closetransaction(self):
692 692 """close transaction if created"""
693 693 if self._tr is not None:
694 694 self._tr.close()
695 695
696 696 def releasetransaction(self):
697 697 """release transaction if created"""
698 698 if self._tr is not None:
699 699 self._tr.release()
700 700
701 701 def pull(repo, remote, heads=None, force=False):
702 702 pullop = pulloperation(repo, remote, heads, force)
703 703 if pullop.remote.local():
704 704 missing = set(pullop.remote.requirements) - pullop.repo.supported
705 705 if missing:
706 706 msg = _("required features are not"
707 707 " supported in the destination:"
708 708 " %s") % (', '.join(sorted(missing)))
709 709 raise util.Abort(msg)
710 710
711 711 lock = pullop.repo.lock()
712 712 try:
713 713 _pulldiscovery(pullop)
714 714 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
715 715 and pullop.remote.capable('bundle2-exp')):
716 716 _pullbundle2(pullop)
717 717 if 'changegroup' in pullop.todosteps:
718 718 _pullchangeset(pullop)
719 719 if 'phases' in pullop.todosteps:
720 720 _pullphase(pullop)
721 721 if 'obsmarkers' in pullop.todosteps:
722 722 _pullobsolete(pullop)
723 723 pullop.closetransaction()
724 724 finally:
725 725 pullop.releasetransaction()
726 726 lock.release()
727 727
728 728 return pullop.cgresult
729 729
730 730 def _pulldiscovery(pullop):
731 731 """discovery phase for the pull
732 732
733 733 Current handle changeset discovery only, will change handle all discovery
734 734 at some point."""
735 735 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
736 736 pullop.remote,
737 737 heads=pullop.heads,
738 738 force=pullop.force)
739 739 pullop.common, pullop.fetch, pullop.rheads = tmp
740 740
741 741 def _pullbundle2(pullop):
742 742 """pull data using bundle2
743 743
744 744 For now, the only supported data are changegroup."""
745 745 remotecaps = bundle2.bundle2caps(pullop.remote)
746 746 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
747 747 # pulling changegroup
748 748 pullop.todosteps.remove('changegroup')
749 749
750 750 kwargs['common'] = pullop.common
751 751 kwargs['heads'] = pullop.heads or pullop.rheads
752 752 if 'b2x:listkeys' in remotecaps:
753 753 kwargs['listkeys'] = ['phase']
754 754 if not pullop.fetch:
755 755 pullop.repo.ui.status(_("no changes found\n"))
756 756 pullop.cgresult = 0
757 757 else:
758 758 if pullop.heads is None and list(pullop.common) == [nullid]:
759 759 pullop.repo.ui.status(_("requesting all changes\n"))
760 760 _pullbundle2extraprepare(pullop, kwargs)
761 761 if kwargs.keys() == ['format']:
762 762 return # nothing to pull
763 763 bundle = pullop.remote.getbundle('pull', **kwargs)
764 764 try:
765 765 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
766 766 except error.BundleValueError, exc:
767 767 raise util.Abort('missing support for %s' % exc)
768 768
769 769 if pullop.fetch:
770 770 assert len(op.records['changegroup']) == 1
771 771 pullop.cgresult = op.records['changegroup'][0]['return']
772 772
773 773 # processing phases change
774 774 for namespace, value in op.records['listkeys']:
775 775 if namespace == 'phases':
776 776 _pullapplyphases(pullop, value)
777 777
778 778 def _pullbundle2extraprepare(pullop, kwargs):
779 779 """hook function so that extensions can extend the getbundle call"""
780 780 pass
781 781
782 782 def _pullchangeset(pullop):
783 783 """pull changeset from unbundle into the local repo"""
784 784 # We delay the open of the transaction as late as possible so we
785 785 # don't open transaction for nothing or you break future useful
786 786 # rollback call
787 787 pullop.todosteps.remove('changegroup')
788 788 if not pullop.fetch:
789 789 pullop.repo.ui.status(_("no changes found\n"))
790 790 pullop.cgresult = 0
791 791 return
792 792 pullop.gettransaction()
793 793 if pullop.heads is None and list(pullop.common) == [nullid]:
794 794 pullop.repo.ui.status(_("requesting all changes\n"))
795 795 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
796 796 # issue1320, avoid a race if remote changed after discovery
797 797 pullop.heads = pullop.rheads
798 798
799 799 if pullop.remote.capable('getbundle'):
800 800 # TODO: get bundlecaps from remote
801 801 cg = pullop.remote.getbundle('pull', common=pullop.common,
802 802 heads=pullop.heads or pullop.rheads)
803 803 elif pullop.heads is None:
804 804 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
805 805 elif not pullop.remote.capable('changegroupsubset'):
806 806 raise util.Abort(_("partial pull cannot be done because "
807 807 "other repository doesn't support "
808 808 "changegroupsubset."))
809 809 else:
810 810 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
811 811 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
812 812 pullop.remote.url())
813 813
814 814 def _pullphase(pullop):
815 815 # Get remote phases data from remote
816 816 remotephases = pullop.remote.listkeys('phases')
817 817 _pullapplyphases(pullop, remotephases)
818 818
819 819 def _pullapplyphases(pullop, remotephases):
820 820 """apply phase movement from observed remote state"""
821 821 pullop.todosteps.remove('phases')
822 822 publishing = bool(remotephases.get('publishing', False))
823 823 if remotephases and not publishing:
824 824 # remote is new and unpublishing
825 825 pheads, _dr = phases.analyzeremotephases(pullop.repo,
826 826 pullop.pulledsubset,
827 827 remotephases)
828 828 dheads = pullop.pulledsubset
829 829 else:
830 830 # Remote is old or publishing all common changesets
831 831 # should be seen as public
832 832 pheads = pullop.pulledsubset
833 833 dheads = []
834 834 unfi = pullop.repo.unfiltered()
835 835 phase = unfi._phasecache.phase
836 836 rev = unfi.changelog.nodemap.get
837 837 public = phases.public
838 838 draft = phases.draft
839 839
840 840 # exclude changesets already public locally and update the others
841 841 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
842 842 if pheads:
843 phases.advanceboundary(pullop.repo, public, pheads)
843 tr = pullop.gettransaction()
844 phases.advanceboundary(pullop.repo, tr, public, pheads)
844 845
845 846 # exclude changesets already draft locally and update the others
846 847 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
847 848 if dheads:
848 phases.advanceboundary(pullop.repo, draft, dheads)
849 tr = pullop.gettransaction()
850 phases.advanceboundary(pullop.repo, tr, draft, dheads)
849 851
850 852 def _pullobsolete(pullop):
851 853 """utility function to pull obsolete markers from a remote
852 854
853 855 The `gettransaction` is function that return the pull transaction, creating
854 856 one if necessary. We return the transaction to inform the calling code that
855 857 a new transaction have been created (when applicable).
856 858
857 859 Exists mostly to allow overriding for experimentation purpose"""
858 860 pullop.todosteps.remove('obsmarkers')
859 861 tr = None
860 862 if obsolete._enabled:
861 863 pullop.repo.ui.debug('fetching remote obsolete markers\n')
862 864 remoteobs = pullop.remote.listkeys('obsolete')
863 865 if 'dump0' in remoteobs:
864 866 tr = pullop.gettransaction()
865 867 for key in sorted(remoteobs, reverse=True):
866 868 if key.startswith('dump'):
867 869 data = base85.b85decode(remoteobs[key])
868 870 pullop.repo.obsstore.mergemarkers(tr, data)
869 871 pullop.repo.invalidatevolatilesets()
870 872 return tr
871 873
872 874 def caps20to10(repo):
873 875 """return a set with appropriate options to use bundle20 during getbundle"""
874 876 caps = set(['HG2X'])
875 877 capsblob = bundle2.encodecaps(repo.bundle2caps)
876 878 caps.add('bundle2=' + urllib.quote(capsblob))
877 879 return caps
878 880
879 881 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
880 882 **kwargs):
881 883 """return a full bundle (with potentially multiple kind of parts)
882 884
883 885 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
884 886 passed. For now, the bundle can contain only changegroup, but this will
885 887 changes when more part type will be available for bundle2.
886 888
887 889 This is different from changegroup.getbundle that only returns an HG10
888 890 changegroup bundle. They may eventually get reunited in the future when we
889 891 have a clearer idea of the API we what to query different data.
890 892
891 893 The implementation is at a very early stage and will get massive rework
892 894 when the API of bundle is refined.
893 895 """
894 896 cg = None
895 897 if kwargs.get('cg', True):
896 898 # build changegroup bundle here.
897 899 cg = changegroup.getbundle(repo, source, heads=heads,
898 900 common=common, bundlecaps=bundlecaps)
899 901 elif 'HG2X' not in bundlecaps:
900 902 raise ValueError(_('request for bundle10 must include changegroup'))
901 903 if bundlecaps is None or 'HG2X' not in bundlecaps:
902 904 if kwargs:
903 905 raise ValueError(_('unsupported getbundle arguments: %s')
904 906 % ', '.join(sorted(kwargs.keys())))
905 907 return cg
906 908 # very crude first implementation,
907 909 # the bundle API will change and the generation will be done lazily.
908 910 b2caps = {}
909 911 for bcaps in bundlecaps:
910 912 if bcaps.startswith('bundle2='):
911 913 blob = urllib.unquote(bcaps[len('bundle2='):])
912 914 b2caps.update(bundle2.decodecaps(blob))
913 915 bundler = bundle2.bundle20(repo.ui, b2caps)
914 916 if cg:
915 917 bundler.newpart('b2x:changegroup', data=cg.getchunks())
916 918 listkeys = kwargs.get('listkeys', ())
917 919 for namespace in listkeys:
918 920 part = bundler.newpart('b2x:listkeys')
919 921 part.addparam('namespace', namespace)
920 922 keys = repo.listkeys(namespace).items()
921 923 part.data = pushkey.encodekeys(keys)
922 924 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
923 925 bundlecaps=bundlecaps, **kwargs)
924 926 return util.chunkbuffer(bundler.getchunks())
925 927
926 928 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
927 929 bundlecaps=None, **kwargs):
928 930 """hook function to let extensions add parts to the requested bundle"""
929 931 pass
930 932
931 933 def check_heads(repo, their_heads, context):
932 934 """check if the heads of a repo have been modified
933 935
934 936 Used by peer for unbundling.
935 937 """
936 938 heads = repo.heads()
937 939 heads_hash = util.sha1(''.join(sorted(heads))).digest()
938 940 if not (their_heads == ['force'] or their_heads == heads or
939 941 their_heads == ['hashed', heads_hash]):
940 942 # someone else committed/pushed/unbundled while we
941 943 # were transferring data
942 944 raise error.PushRaced('repository changed while %s - '
943 945 'please try again' % context)
944 946
945 947 def unbundle(repo, cg, heads, source, url):
946 948 """Apply a bundle to a repo.
947 949
948 950 this function makes sure the repo is locked during the application and have
949 951 mechanism to check that no push race occurred between the creation of the
950 952 bundle and its application.
951 953
952 954 If the push was raced as PushRaced exception is raised."""
953 955 r = 0
954 956 # need a transaction when processing a bundle2 stream
955 957 tr = None
956 958 lock = repo.lock()
957 959 try:
958 960 check_heads(repo, heads, 'uploading changes')
959 961 # push can proceed
960 962 if util.safehasattr(cg, 'params'):
961 963 try:
962 964 tr = repo.transaction('unbundle')
963 965 tr.hookargs['bundle2-exp'] = '1'
964 966 r = bundle2.processbundle(repo, cg, lambda: tr).reply
965 967 cl = repo.unfiltered().changelog
966 968 p = cl.writepending() and repo.root or ""
967 969 repo.hook('b2x-pretransactionclose', throw=True, source=source,
968 970 url=url, pending=p, **tr.hookargs)
969 971 tr.close()
970 972 repo.hook('b2x-transactionclose', source=source, url=url,
971 973 **tr.hookargs)
972 974 except Exception, exc:
973 975 exc.duringunbundle2 = True
974 976 raise
975 977 else:
976 978 r = changegroup.addchangegroup(repo, cg, source, url)
977 979 finally:
978 980 if tr is not None:
979 981 tr.release()
980 982 lock.release()
981 983 return r
@@ -1,415 +1,415 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 import errno
104 104 from node import nullid, nullrev, bin, hex, short
105 105 from i18n import _
106 106 import util, error
107 107
108 108 allphases = public, draft, secret = range(3)
109 109 trackedphases = allphases[1:]
110 110 phasenames = ['public', 'draft', 'secret']
111 111
112 112 def _readroots(repo, phasedefaults=None):
113 113 """Read phase roots from disk
114 114
115 115 phasedefaults is a list of fn(repo, roots) callable, which are
116 116 executed if the phase roots file does not exist. When phases are
117 117 being initialized on an existing repository, this could be used to
118 118 set selected changesets phase to something else than public.
119 119
120 120 Return (roots, dirty) where dirty is true if roots differ from
121 121 what is being stored.
122 122 """
123 123 repo = repo.unfiltered()
124 124 dirty = False
125 125 roots = [set() for i in allphases]
126 126 try:
127 127 f = repo.sopener('phaseroots')
128 128 try:
129 129 for line in f:
130 130 phase, nh = line.split()
131 131 roots[int(phase)].add(bin(nh))
132 132 finally:
133 133 f.close()
134 134 except IOError, inst:
135 135 if inst.errno != errno.ENOENT:
136 136 raise
137 137 if phasedefaults:
138 138 for f in phasedefaults:
139 139 roots = f(repo, roots)
140 140 dirty = True
141 141 return roots, dirty
142 142
143 143 class phasecache(object):
144 144 def __init__(self, repo, phasedefaults, _load=True):
145 145 if _load:
146 146 # Cheap trick to allow shallow-copy without copy module
147 147 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
148 148 self._phaserevs = None
149 149 self.filterunknown(repo)
150 150 self.opener = repo.sopener
151 151
152 152 def copy(self):
153 153 # Shallow copy meant to ensure isolation in
154 154 # advance/retractboundary(), nothing more.
155 155 ph = phasecache(None, None, _load=False)
156 156 ph.phaseroots = self.phaseroots[:]
157 157 ph.dirty = self.dirty
158 158 ph.opener = self.opener
159 159 ph._phaserevs = self._phaserevs
160 160 return ph
161 161
162 162 def replace(self, phcache):
163 163 for a in 'phaseroots dirty opener _phaserevs'.split():
164 164 setattr(self, a, getattr(phcache, a))
165 165
166 166 def getphaserevs(self, repo, rebuild=False):
167 167 if rebuild or self._phaserevs is None:
168 168 repo = repo.unfiltered()
169 169 revs = [public] * len(repo.changelog)
170 170 for phase in trackedphases:
171 171 roots = map(repo.changelog.rev, self.phaseroots[phase])
172 172 if roots:
173 173 for rev in roots:
174 174 revs[rev] = phase
175 175 for rev in repo.changelog.descendants(roots):
176 176 revs[rev] = phase
177 177 self._phaserevs = revs
178 178 return self._phaserevs
179 179
180 180 def phase(self, repo, rev):
181 181 # We need a repo argument here to be able to build _phaserevs
182 182 # if necessary. The repository instance is not stored in
183 183 # phasecache to avoid reference cycles. The changelog instance
184 184 # is not stored because it is a filecache() property and can
185 185 # be replaced without us being notified.
186 186 if rev == nullrev:
187 187 return public
188 188 if rev < nullrev:
189 189 raise ValueError(_('cannot lookup negative revision'))
190 190 if self._phaserevs is None or rev >= len(self._phaserevs):
191 191 self._phaserevs = self.getphaserevs(repo, rebuild=True)
192 192 return self._phaserevs[rev]
193 193
194 194 def write(self):
195 195 if not self.dirty:
196 196 return
197 197 f = self.opener('phaseroots', 'w', atomictemp=True)
198 198 try:
199 199 for phase, roots in enumerate(self.phaseroots):
200 200 for h in roots:
201 201 f.write('%i %s\n' % (phase, hex(h)))
202 202 finally:
203 203 f.close()
204 204 self.dirty = False
205 205
206 206 def _updateroots(self, phase, newroots):
207 207 self.phaseroots[phase] = newroots
208 208 self._phaserevs = None
209 209 self.dirty = True
210 210
211 def advanceboundary(self, repo, targetphase, nodes):
211 def advanceboundary(self, repo, tr, targetphase, nodes):
212 212 # Be careful to preserve shallow-copied values: do not update
213 213 # phaseroots values, replace them.
214 214
215 215 repo = repo.unfiltered()
216 216 delroots = [] # set of root deleted by this path
217 217 for phase in xrange(targetphase + 1, len(allphases)):
218 218 # filter nodes that are not in a compatible phase already
219 219 nodes = [n for n in nodes
220 220 if self.phase(repo, repo[n].rev()) >= phase]
221 221 if not nodes:
222 222 break # no roots to move anymore
223 223 olds = self.phaseroots[phase]
224 224 roots = set(ctx.node() for ctx in repo.set(
225 225 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
226 226 if olds != roots:
227 227 self._updateroots(phase, roots)
228 228 # some roots may need to be declared for lower phases
229 229 delroots.extend(olds - roots)
230 230 # declare deleted root in the target phase
231 231 if targetphase != 0:
232 232 self.retractboundary(repo, targetphase, delroots)
233 233 repo.invalidatevolatilesets()
234 234
235 235 def retractboundary(self, repo, targetphase, nodes):
236 236 # Be careful to preserve shallow-copied values: do not update
237 237 # phaseroots values, replace them.
238 238
239 239 repo = repo.unfiltered()
240 240 currentroots = self.phaseroots[targetphase]
241 241 newroots = [n for n in nodes
242 242 if self.phase(repo, repo[n].rev()) < targetphase]
243 243 if newroots:
244 244 if nullid in newroots:
245 245 raise util.Abort(_('cannot change null revision phase'))
246 246 currentroots = currentroots.copy()
247 247 currentroots.update(newroots)
248 248 ctxs = repo.set('roots(%ln::)', currentroots)
249 249 currentroots.intersection_update(ctx.node() for ctx in ctxs)
250 250 self._updateroots(targetphase, currentroots)
251 251 repo.invalidatevolatilesets()
252 252
253 253 def filterunknown(self, repo):
254 254 """remove unknown nodes from the phase boundary
255 255
256 256 Nothing is lost as unknown nodes only hold data for their descendants.
257 257 """
258 258 filtered = False
259 259 nodemap = repo.changelog.nodemap # to filter unknown nodes
260 260 for phase, nodes in enumerate(self.phaseroots):
261 261 missing = sorted(node for node in nodes if node not in nodemap)
262 262 if missing:
263 263 for mnode in missing:
264 264 repo.ui.debug(
265 265 'removing unknown node %s from %i-phase boundary\n'
266 266 % (short(mnode), phase))
267 267 nodes.symmetric_difference_update(missing)
268 268 filtered = True
269 269 if filtered:
270 270 self.dirty = True
271 271 # filterunknown is called by repo.destroyed, we may have no changes in
272 272 # root but phaserevs contents is certainly invalid (or at least we
273 273 # have not proper way to check that). related to issue 3858.
274 274 #
275 275 # The other caller is __init__ that have no _phaserevs initialized
276 276 # anyway. If this change we should consider adding a dedicated
277 277 # "destroyed" function to phasecache or a proper cache key mechanism
278 278 # (see branchmap one)
279 279 self._phaserevs = None
280 280
281 def advanceboundary(repo, targetphase, nodes):
281 def advanceboundary(repo, tr, targetphase, nodes):
282 282 """Add nodes to a phase changing other nodes phases if necessary.
283 283
284 284 This function move boundary *forward* this means that all nodes
285 285 are set in the target phase or kept in a *lower* phase.
286 286
287 287 Simplify boundary to contains phase roots only."""
288 288 phcache = repo._phasecache.copy()
289 phcache.advanceboundary(repo, targetphase, nodes)
289 phcache.advanceboundary(repo, tr, targetphase, nodes)
290 290 repo._phasecache.replace(phcache)
291 291
292 292 def retractboundary(repo, targetphase, nodes):
293 293 """Set nodes back to a phase changing other nodes phases if
294 294 necessary.
295 295
296 296 This function move boundary *backward* this means that all nodes
297 297 are set in the target phase or kept in a *higher* phase.
298 298
299 299 Simplify boundary to contains phase roots only."""
300 300 phcache = repo._phasecache.copy()
301 301 phcache.retractboundary(repo, targetphase, nodes)
302 302 repo._phasecache.replace(phcache)
303 303
304 304 def listphases(repo):
305 305 """List phases root for serialization over pushkey"""
306 306 keys = {}
307 307 value = '%i' % draft
308 308 for root in repo._phasecache.phaseroots[draft]:
309 309 keys[hex(root)] = value
310 310
311 311 if repo.ui.configbool('phases', 'publish', True):
312 312 # Add an extra data to let remote know we are a publishing
313 313 # repo. Publishing repo can't just pretend they are old repo.
314 314 # When pushing to a publishing repo, the client still need to
315 315 # push phase boundary
316 316 #
317 317 # Push do not only push changeset. It also push phase data.
318 318 # New phase data may apply to common changeset which won't be
319 319 # push (as they are common). Here is a very simple example:
320 320 #
321 321 # 1) repo A push changeset X as draft to repo B
322 322 # 2) repo B make changeset X public
323 323 # 3) repo B push to repo A. X is not pushed but the data that
324 324 # X as now public should
325 325 #
326 326 # The server can't handle it on it's own as it has no idea of
327 327 # client phase data.
328 328 keys['publishing'] = 'True'
329 329 return keys
330 330
331 331 def pushphase(repo, nhex, oldphasestr, newphasestr):
332 332 """List phases root for serialization over pushkey"""
333 333 repo = repo.unfiltered()
334 334 tr = None
335 335 lock = repo.lock()
336 336 try:
337 337 currentphase = repo[nhex].phase()
338 338 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
339 339 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
340 340 if currentphase == oldphase and newphase < oldphase:
341 341 tr = repo.transaction('pushkey-phase')
342 advanceboundary(repo, newphase, [bin(nhex)])
342 advanceboundary(repo, tr, newphase, [bin(nhex)])
343 343 tr.close()
344 344 return 1
345 345 elif currentphase == newphase:
346 346 # raced, but got correct result
347 347 return 1
348 348 else:
349 349 return 0
350 350 finally:
351 351 if tr:
352 352 tr.release()
353 353 lock.release()
354 354
355 355 def analyzeremotephases(repo, subset, roots):
356 356 """Compute phases heads and root in a subset of node from root dict
357 357
358 358 * subset is heads of the subset
359 359 * roots is {<nodeid> => phase} mapping. key and value are string.
360 360
361 361 Accept unknown element input
362 362 """
363 363 repo = repo.unfiltered()
364 364 # build list from dictionary
365 365 draftroots = []
366 366 nodemap = repo.changelog.nodemap # to filter unknown nodes
367 367 for nhex, phase in roots.iteritems():
368 368 if nhex == 'publishing': # ignore data related to publish option
369 369 continue
370 370 node = bin(nhex)
371 371 phase = int(phase)
372 372 if phase == 0:
373 373 if node != nullid:
374 374 repo.ui.warn(_('ignoring inconsistent public root'
375 375 ' from remote: %s\n') % nhex)
376 376 elif phase == 1:
377 377 if node in nodemap:
378 378 draftroots.append(node)
379 379 else:
380 380 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
381 381 % (phase, nhex))
382 382 # compute heads
383 383 publicheads = newheads(repo, subset, draftroots)
384 384 return publicheads, draftroots
385 385
386 386 def newheads(repo, heads, roots):
387 387 """compute new head of a subset minus another
388 388
389 389 * `heads`: define the first subset
390 390 * `roots`: define the second we subtract from the first"""
391 391 repo = repo.unfiltered()
392 392 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
393 393 heads, roots, roots, heads)
394 394 return [c.node() for c in revset]
395 395
396 396
397 397 def newcommitphase(ui):
398 398 """helper to get the target phase of new commit
399 399
400 400 Handle all possible values for the phases.new-commit options.
401 401
402 402 """
403 403 v = ui.config('phases', 'new-commit', draft)
404 404 try:
405 405 return phasenames.index(v)
406 406 except ValueError:
407 407 try:
408 408 return int(v)
409 409 except ValueError:
410 410 msg = _("phases.new-commit: not a valid phase name ('%s')")
411 411 raise error.ConfigError(msg % v)
412 412
413 413 def hassecret(repo):
414 414 """utility function that check if a repo have any secret changeset."""
415 415 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now