##// END OF EJS Templates
localrepo: move the addchangegroup method in changegroup module...
Pierre-Yves David -
r20933:d3775db7 default
parent child Browse files
Show More
@@ -1,695 +1,696 b''
1 1 # shelve.py - save/restore working directory state
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """save and restore changes to the working directory
9 9
10 10 The "hg shelve" command saves changes made to the working directory
11 11 and reverts those changes, resetting the working directory to a clean
12 12 state.
13 13
14 14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 15 shelve". Changes can be restored even after updating to a different
16 16 parent, in which case Mercurial's merge machinery will resolve any
17 17 conflicts if necessary.
18 18
19 19 You can have more than one shelved change outstanding at a time; each
20 20 shelved change has a distinct name. For details, see the help for "hg
21 21 shelve".
22 22 """
23 23
24 24 from mercurial.i18n import _
25 25 from mercurial.node import nullid, nullrev, bin, hex
26 26 from mercurial import changegroup, cmdutil, scmutil, phases, commands
27 27 from mercurial import error, hg, mdiff, merge, patch, repair, util
28 28 from mercurial import templatefilters, changegroup
29 29 from mercurial import lock as lockmod
30 30 from hgext import rebase
31 31 import errno
32 32
33 33 cmdtable = {}
34 34 command = cmdutil.command(cmdtable)
35 35 testedwith = 'internal'
36 36
37 37 class shelvedfile(object):
38 38 """Helper for the file storing a single shelve
39 39
40 40 Handles common functions on shelve files (.hg/.files/.patch) using
41 41 the vfs layer"""
42 42 def __init__(self, repo, name, filetype=None):
43 43 self.repo = repo
44 44 self.name = name
45 45 self.vfs = scmutil.vfs(repo.join('shelved'))
46 46 if filetype:
47 47 self.fname = name + '.' + filetype
48 48 else:
49 49 self.fname = name
50 50
51 51 def exists(self):
52 52 return self.vfs.exists(self.fname)
53 53
54 54 def filename(self):
55 55 return self.vfs.join(self.fname)
56 56
57 57 def unlink(self):
58 58 util.unlink(self.filename())
59 59
60 60 def stat(self):
61 61 return self.vfs.stat(self.fname)
62 62
63 63 def opener(self, mode='rb'):
64 64 try:
65 65 return self.vfs(self.fname, mode)
66 66 except IOError, err:
67 67 if err.errno != errno.ENOENT:
68 68 raise
69 69 raise util.Abort(_("shelved change '%s' not found") % self.name)
70 70
71 71 class shelvedstate(object):
72 72 """Handle persistence during unshelving operations.
73 73
74 74 Handles saving and restoring a shelved state. Ensures that different
75 75 versions of a shelved state are possible and handles them appropriately.
76 76 """
77 77 _version = 1
78 78 _filename = 'shelvedstate'
79 79
80 80 @classmethod
81 81 def load(cls, repo):
82 82 fp = repo.opener(cls._filename)
83 83 try:
84 84 version = int(fp.readline().strip())
85 85
86 86 if version != cls._version:
87 87 raise util.Abort(_('this version of shelve is incompatible '
88 88 'with the version used in this repo'))
89 89 name = fp.readline().strip()
90 90 wctx = fp.readline().strip()
91 91 pendingctx = fp.readline().strip()
92 92 parents = [bin(h) for h in fp.readline().split()]
93 93 stripnodes = [bin(h) for h in fp.readline().split()]
94 94 finally:
95 95 fp.close()
96 96
97 97 obj = cls()
98 98 obj.name = name
99 99 obj.wctx = repo[bin(wctx)]
100 100 obj.pendingctx = repo[bin(pendingctx)]
101 101 obj.parents = parents
102 102 obj.stripnodes = stripnodes
103 103
104 104 return obj
105 105
106 106 @classmethod
107 107 def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
108 108 fp = repo.opener(cls._filename, 'wb')
109 109 fp.write('%i\n' % cls._version)
110 110 fp.write('%s\n' % name)
111 111 fp.write('%s\n' % hex(originalwctx.node()))
112 112 fp.write('%s\n' % hex(pendingctx.node()))
113 113 fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
114 114 fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
115 115 fp.close()
116 116
117 117 @classmethod
118 118 def clear(cls, repo):
119 119 util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
120 120
121 121 def createcmd(ui, repo, pats, opts):
122 122 """subcommand that creates a new shelve"""
123 123
124 124 def publicancestors(ctx):
125 125 """Compute the public ancestors of a commit.
126 126
127 127 Much faster than the revset ancestors(ctx) & draft()"""
128 128 seen = set([nullrev])
129 129 visit = util.deque()
130 130 visit.append(ctx)
131 131 while visit:
132 132 ctx = visit.popleft()
133 133 yield ctx.node()
134 134 for parent in ctx.parents():
135 135 rev = parent.rev()
136 136 if rev not in seen:
137 137 seen.add(rev)
138 138 if parent.mutable():
139 139 visit.append(parent)
140 140
141 141 wctx = repo[None]
142 142 parents = wctx.parents()
143 143 if len(parents) > 1:
144 144 raise util.Abort(_('cannot shelve while merging'))
145 145 parent = parents[0]
146 146
147 147 # we never need the user, so we use a generic user for all shelve operations
148 148 user = 'shelve@localhost'
149 149 label = repo._bookmarkcurrent or parent.branch() or 'default'
150 150
151 151 # slashes aren't allowed in filenames, therefore we rename it
152 152 origlabel, label = label, label.replace('/', '_')
153 153
154 154 def gennames():
155 155 yield label
156 156 for i in xrange(1, 100):
157 157 yield '%s-%02d' % (label, i)
158 158
159 159 shelvedfiles = []
160 160
161 161 def commitfunc(ui, repo, message, match, opts):
162 162 # check modified, added, removed, deleted only
163 163 for flist in repo.status(match=match)[:4]:
164 164 shelvedfiles.extend(flist)
165 165 hasmq = util.safehasattr(repo, 'mq')
166 166 if hasmq:
167 167 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
168 168 try:
169 169 return repo.commit(message, user, opts.get('date'), match)
170 170 finally:
171 171 if hasmq:
172 172 repo.mq.checkapplied = saved
173 173
174 174 if parent.node() != nullid:
175 175 desc = "changes to '%s'" % parent.description().split('\n', 1)[0]
176 176 else:
177 177 desc = '(changes in empty repository)'
178 178
179 179 if not opts['message']:
180 180 opts['message'] = desc
181 181
182 182 name = opts['name']
183 183
184 184 wlock = lock = tr = bms = None
185 185 try:
186 186 wlock = repo.wlock()
187 187 lock = repo.lock()
188 188
189 189 bms = repo._bookmarks.copy()
190 190 # use an uncommitted transaction to generate the bundle to avoid
191 191 # pull races. ensure we don't print the abort message to stderr.
192 192 tr = repo.transaction('commit', report=lambda x: None)
193 193
194 194 if name:
195 195 if shelvedfile(repo, name, 'hg').exists():
196 196 raise util.Abort(_("a shelved change named '%s' already exists")
197 197 % name)
198 198 else:
199 199 for n in gennames():
200 200 if not shelvedfile(repo, n, 'hg').exists():
201 201 name = n
202 202 break
203 203 else:
204 204 raise util.Abort(_("too many shelved changes named '%s'") %
205 205 label)
206 206
207 207 # ensure we are not creating a subdirectory or a hidden file
208 208 if '/' in name or '\\' in name:
209 209 raise util.Abort(_('shelved change names may not contain slashes'))
210 210 if name.startswith('.'):
211 211 raise util.Abort(_("shelved change names may not start with '.'"))
212 212
213 213 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
214 214
215 215 if not node:
216 216 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
217 217 if stat[3]:
218 218 ui.status(_("nothing changed (%d missing files, see "
219 219 "'hg status')\n") % len(stat[3]))
220 220 else:
221 221 ui.status(_("nothing changed\n"))
222 222 return 1
223 223
224 224 phases.retractboundary(repo, phases.secret, [node])
225 225
226 226 fp = shelvedfile(repo, name, 'files').opener('wb')
227 227 fp.write('\0'.join(shelvedfiles))
228 228
229 229 bases = list(publicancestors(repo[node]))
230 230 cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve')
231 231 changegroup.writebundle(cg, shelvedfile(repo, name, 'hg').filename(),
232 232 'HG10UN')
233 233 cmdutil.export(repo, [node],
234 234 fp=shelvedfile(repo, name, 'patch').opener('wb'),
235 235 opts=mdiff.diffopts(git=True))
236 236
237 237
238 238 if ui.formatted():
239 239 desc = util.ellipsis(desc, ui.termwidth())
240 240 ui.status(_('shelved as %s\n') % name)
241 241 hg.update(repo, parent.node())
242 242 finally:
243 243 if bms:
244 244 # restore old bookmarks
245 245 repo._bookmarks.update(bms)
246 246 repo._bookmarks.write()
247 247 if tr:
248 248 tr.abort()
249 249 lockmod.release(lock, wlock)
250 250
251 251 def cleanupcmd(ui, repo):
252 252 """subcommand that deletes all shelves"""
253 253
254 254 wlock = None
255 255 try:
256 256 wlock = repo.wlock()
257 257 for (name, _) in repo.vfs.readdir('shelved'):
258 258 suffix = name.rsplit('.', 1)[-1]
259 259 if suffix in ('hg', 'files', 'patch'):
260 260 shelvedfile(repo, name).unlink()
261 261 finally:
262 262 lockmod.release(wlock)
263 263
264 264 def deletecmd(ui, repo, pats):
265 265 """subcommand that deletes a specific shelve"""
266 266 if not pats:
267 267 raise util.Abort(_('no shelved changes specified!'))
268 268 wlock = None
269 269 try:
270 270 wlock = repo.wlock()
271 271 try:
272 272 for name in pats:
273 273 for suffix in 'hg files patch'.split():
274 274 shelvedfile(repo, name, suffix).unlink()
275 275 except OSError, err:
276 276 if err.errno != errno.ENOENT:
277 277 raise
278 278 raise util.Abort(_("shelved change '%s' not found") % name)
279 279 finally:
280 280 lockmod.release(wlock)
281 281
282 282 def listshelves(repo):
283 283 """return all shelves in repo as list of (time, filename)"""
284 284 try:
285 285 names = repo.vfs.readdir('shelved')
286 286 except OSError, err:
287 287 if err.errno != errno.ENOENT:
288 288 raise
289 289 return []
290 290 info = []
291 291 for (name, _) in names:
292 292 pfx, sfx = name.rsplit('.', 1)
293 293 if not pfx or sfx != 'patch':
294 294 continue
295 295 st = shelvedfile(repo, name).stat()
296 296 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
297 297 return sorted(info, reverse=True)
298 298
299 299 def listcmd(ui, repo, pats, opts):
300 300 """subcommand that displays the list of shelves"""
301 301 pats = set(pats)
302 302 width = 80
303 303 if not ui.plain():
304 304 width = ui.termwidth()
305 305 namelabel = 'shelve.newest'
306 306 for mtime, name in listshelves(repo):
307 307 sname = util.split(name)[1]
308 308 if pats and sname not in pats:
309 309 continue
310 310 ui.write(sname, label=namelabel)
311 311 namelabel = 'shelve.name'
312 312 if ui.quiet:
313 313 ui.write('\n')
314 314 continue
315 315 ui.write(' ' * (16 - len(sname)))
316 316 used = 16
317 317 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
318 318 ui.write(age, label='shelve.age')
319 319 ui.write(' ' * (12 - len(age)))
320 320 used += 12
321 321 fp = open(name + '.patch', 'rb')
322 322 try:
323 323 while True:
324 324 line = fp.readline()
325 325 if not line:
326 326 break
327 327 if not line.startswith('#'):
328 328 desc = line.rstrip()
329 329 if ui.formatted():
330 330 desc = util.ellipsis(desc, width - used)
331 331 ui.write(desc)
332 332 break
333 333 ui.write('\n')
334 334 if not (opts['patch'] or opts['stat']):
335 335 continue
336 336 difflines = fp.readlines()
337 337 if opts['patch']:
338 338 for chunk, label in patch.difflabel(iter, difflines):
339 339 ui.write(chunk, label=label)
340 340 if opts['stat']:
341 341 for chunk, label in patch.diffstatui(difflines, width=width,
342 342 git=True):
343 343 ui.write(chunk, label=label)
344 344 finally:
345 345 fp.close()
346 346
347 347 def checkparents(repo, state):
348 348 """check parent while resuming an unshelve"""
349 349 if state.parents != repo.dirstate.parents():
350 350 raise util.Abort(_('working directory parents do not match unshelve '
351 351 'state'))
352 352
353 353 def pathtofiles(repo, files):
354 354 cwd = repo.getcwd()
355 355 return [repo.pathto(f, cwd) for f in files]
356 356
357 357 def unshelveabort(ui, repo, state, opts):
358 358 """subcommand that abort an in-progress unshelve"""
359 359 wlock = repo.wlock()
360 360 lock = None
361 361 try:
362 362 checkparents(repo, state)
363 363
364 364 util.rename(repo.join('unshelverebasestate'),
365 365 repo.join('rebasestate'))
366 366 try:
367 367 rebase.rebase(ui, repo, **{
368 368 'abort' : True
369 369 })
370 370 except Exception:
371 371 util.rename(repo.join('rebasestate'),
372 372 repo.join('unshelverebasestate'))
373 373 raise
374 374
375 375 lock = repo.lock()
376 376
377 377 mergefiles(ui, repo, state.wctx, state.pendingctx)
378 378
379 379 repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve')
380 380 shelvedstate.clear(repo)
381 381 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
382 382 finally:
383 383 lockmod.release(lock, wlock)
384 384
385 385 def mergefiles(ui, repo, wctx, shelvectx):
386 386 """updates to wctx and merges the changes from shelvectx into the
387 387 dirstate."""
388 388 oldquiet = ui.quiet
389 389 try:
390 390 ui.quiet = True
391 391 hg.update(repo, wctx.node())
392 392 files = []
393 393 files.extend(shelvectx.files())
394 394 files.extend(shelvectx.parents()[0].files())
395 395
396 396 # revert will overwrite unknown files, so move them out of the way
397 397 m, a, r, d, u = repo.status(unknown=True)[:5]
398 398 for file in u:
399 399 if file in files:
400 400 util.rename(file, file + ".orig")
401 401 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
402 402 *pathtofiles(repo, files),
403 403 **{'no_backup': True})
404 404 finally:
405 405 ui.quiet = oldquiet
406 406
407 407 def unshelvecleanup(ui, repo, name, opts):
408 408 """remove related files after an unshelve"""
409 409 if not opts['keep']:
410 410 for filetype in 'hg files patch'.split():
411 411 shelvedfile(repo, name, filetype).unlink()
412 412
413 413 def unshelvecontinue(ui, repo, state, opts):
414 414 """subcommand to continue an in-progress unshelve"""
415 415 # We're finishing off a merge. First parent is our original
416 416 # parent, second is the temporary "fake" commit we're unshelving.
417 417 wlock = repo.wlock()
418 418 lock = None
419 419 try:
420 420 checkparents(repo, state)
421 421 ms = merge.mergestate(repo)
422 422 if [f for f in ms if ms[f] == 'u']:
423 423 raise util.Abort(
424 424 _("unresolved conflicts, can't continue"),
425 425 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
426 426
427 427 lock = repo.lock()
428 428
429 429 util.rename(repo.join('unshelverebasestate'),
430 430 repo.join('rebasestate'))
431 431 try:
432 432 rebase.rebase(ui, repo, **{
433 433 'continue' : True
434 434 })
435 435 except Exception:
436 436 util.rename(repo.join('rebasestate'),
437 437 repo.join('unshelverebasestate'))
438 438 raise
439 439
440 440 shelvectx = repo['tip']
441 441 if not shelvectx in state.pendingctx.children():
442 442 # rebase was a no-op, so it produced no child commit
443 443 shelvectx = state.pendingctx
444 444
445 445 mergefiles(ui, repo, state.wctx, shelvectx)
446 446
447 447 state.stripnodes.append(shelvectx.node())
448 448 repair.strip(ui, repo, state.stripnodes, backup='none', topic='shelve')
449 449 shelvedstate.clear(repo)
450 450 unshelvecleanup(ui, repo, state.name, opts)
451 451 ui.status(_("unshelve of '%s' complete\n") % state.name)
452 452 finally:
453 453 lockmod.release(lock, wlock)
454 454
455 455 @command('unshelve',
456 456 [('a', 'abort', None,
457 457 _('abort an incomplete unshelve operation')),
458 458 ('c', 'continue', None,
459 459 _('continue an incomplete unshelve operation')),
460 460 ('', 'keep', None,
461 461 _('keep shelve after unshelving'))],
462 462 _('hg unshelve [SHELVED]'))
463 463 def unshelve(ui, repo, *shelved, **opts):
464 464 """restore a shelved change to the working directory
465 465
466 466 This command accepts an optional name of a shelved change to
467 467 restore. If none is given, the most recent shelved change is used.
468 468
469 469 If a shelved change is applied successfully, the bundle that
470 470 contains the shelved changes is deleted afterwards.
471 471
472 472 Since you can restore a shelved change on top of an arbitrary
473 473 commit, it is possible that unshelving will result in a conflict
474 474 between your changes and the commits you are unshelving onto. If
475 475 this occurs, you must resolve the conflict, then use
476 476 ``--continue`` to complete the unshelve operation. (The bundle
477 477 will not be deleted until you successfully complete the unshelve.)
478 478
479 479 (Alternatively, you can use ``--abort`` to abandon an unshelve
480 480 that causes a conflict. This reverts the unshelved changes, and
481 481 does not delete the bundle.)
482 482 """
483 483 abortf = opts['abort']
484 484 continuef = opts['continue']
485 485 if not abortf and not continuef:
486 486 cmdutil.checkunfinished(repo)
487 487
488 488 if abortf or continuef:
489 489 if abortf and continuef:
490 490 raise util.Abort(_('cannot use both abort and continue'))
491 491 if shelved:
492 492 raise util.Abort(_('cannot combine abort/continue with '
493 493 'naming a shelved change'))
494 494
495 495 try:
496 496 state = shelvedstate.load(repo)
497 497 except IOError, err:
498 498 if err.errno != errno.ENOENT:
499 499 raise
500 500 raise util.Abort(_('no unshelve operation underway'))
501 501
502 502 if abortf:
503 503 return unshelveabort(ui, repo, state, opts)
504 504 elif continuef:
505 505 return unshelvecontinue(ui, repo, state, opts)
506 506 elif len(shelved) > 1:
507 507 raise util.Abort(_('can only unshelve one change at a time'))
508 508 elif not shelved:
509 509 shelved = listshelves(repo)
510 510 if not shelved:
511 511 raise util.Abort(_('no shelved changes to apply!'))
512 512 basename = util.split(shelved[0][1])[1]
513 513 ui.status(_("unshelving change '%s'\n") % basename)
514 514 else:
515 515 basename = shelved[0]
516 516
517 517 if not shelvedfile(repo, basename, 'files').exists():
518 518 raise util.Abort(_("shelved change '%s' not found") % basename)
519 519
520 520 oldquiet = ui.quiet
521 521 wlock = lock = tr = None
522 522 try:
523 523 lock = repo.lock()
524 524 wlock = repo.wlock()
525 525
526 526 tr = repo.transaction('unshelve', report=lambda x: None)
527 527 oldtiprev = len(repo)
528 528
529 529 wctx = repo['.']
530 530 tmpwctx = wctx
531 531 # The goal is to have a commit structure like so:
532 532 # ...-> wctx -> tmpwctx -> shelvectx
533 533 # where tmpwctx is an optional commit with the user's pending changes
534 534 # and shelvectx is the unshelved changes. Then we merge it all down
535 535 # to the original wctx.
536 536
537 537 # Store pending changes in a commit
538 538 m, a, r, d = repo.status()[:4]
539 539 if m or a or r or d:
540 540 ui.status(_("temporarily committing pending changes "
541 541 "(restore with 'hg unshelve --abort')\n"))
542 542 def commitfunc(ui, repo, message, match, opts):
543 543 hasmq = util.safehasattr(repo, 'mq')
544 544 if hasmq:
545 545 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
546 546
547 547 try:
548 548 return repo.commit(message, 'shelve@localhost',
549 549 opts.get('date'), match)
550 550 finally:
551 551 if hasmq:
552 552 repo.mq.checkapplied = saved
553 553
554 554 tempopts = {}
555 555 tempopts['message'] = "pending changes temporary commit"
556 556 ui.quiet = True
557 557 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
558 558 tmpwctx = repo[node]
559 559
560 560 try:
561 561 ui.quiet = True
562 562 fp = shelvedfile(repo, basename, 'hg').opener()
563 563 gen = changegroup.readbundle(fp, fp.name)
564 repo.addchangegroup(gen, 'unshelve', 'bundle:' + fp.name)
564 changegroup.addchangegroup(repo, gen, 'unshelve',
565 'bundle:' + fp.name)
565 566 nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)]
566 567 phases.retractboundary(repo, phases.secret, nodes)
567 568 finally:
568 569 fp.close()
569 570
570 571 ui.quiet = oldquiet
571 572
572 573 shelvectx = repo['tip']
573 574
574 575 # If the shelve is not immediately on top of the commit
575 576 # we'll be merging with, rebase it to be on top.
576 577 if tmpwctx.node() != shelvectx.parents()[0].node():
577 578 ui.status(_('rebasing shelved changes\n'))
578 579 try:
579 580 rebase.rebase(ui, repo, **{
580 581 'rev' : [shelvectx.rev()],
581 582 'dest' : str(tmpwctx.rev()),
582 583 'keep' : True,
583 584 })
584 585 except error.InterventionRequired:
585 586 tr.close()
586 587
587 588 stripnodes = [repo.changelog.node(rev)
588 589 for rev in xrange(oldtiprev, len(repo))]
589 590 shelvedstate.save(repo, basename, wctx, tmpwctx, stripnodes)
590 591
591 592 util.rename(repo.join('rebasestate'),
592 593 repo.join('unshelverebasestate'))
593 594 raise error.InterventionRequired(
594 595 _("unresolved conflicts (see 'hg resolve', then "
595 596 "'hg unshelve --continue')"))
596 597
597 598 # refresh ctx after rebase completes
598 599 shelvectx = repo['tip']
599 600
600 601 if not shelvectx in tmpwctx.children():
601 602 # rebase was a no-op, so it produced no child commit
602 603 shelvectx = tmpwctx
603 604
604 605 mergefiles(ui, repo, wctx, shelvectx)
605 606 shelvedstate.clear(repo)
606 607
607 608 # The transaction aborting will strip all the commits for us,
608 609 # but it doesn't update the inmemory structures, so addchangegroup
609 610 # hooks still fire and try to operate on the missing commits.
610 611 # Clean up manually to prevent this.
611 612 repo.unfiltered().changelog.strip(oldtiprev, tr)
612 613
613 614 unshelvecleanup(ui, repo, basename, opts)
614 615 finally:
615 616 ui.quiet = oldquiet
616 617 if tr:
617 618 tr.release()
618 619 lockmod.release(lock, wlock)
619 620
620 621 @command('shelve',
621 622 [('A', 'addremove', None,
622 623 _('mark new/missing files as added/removed before shelving')),
623 624 ('', 'cleanup', None,
624 625 _('delete all shelved changes')),
625 626 ('', 'date', '',
626 627 _('shelve with the specified commit date'), _('DATE')),
627 628 ('d', 'delete', None,
628 629 _('delete the named shelved change(s)')),
629 630 ('l', 'list', None,
630 631 _('list current shelves')),
631 632 ('m', 'message', '',
632 633 _('use text as shelve message'), _('TEXT')),
633 634 ('n', 'name', '',
634 635 _('use the given name for the shelved commit'), _('NAME')),
635 636 ('p', 'patch', None,
636 637 _('show patch')),
637 638 ('', 'stat', None,
638 639 _('output diffstat-style summary of changes'))] + commands.walkopts,
639 640 _('hg shelve [OPTION]... [FILE]...'))
640 641 def shelvecmd(ui, repo, *pats, **opts):
641 642 '''save and set aside changes from the working directory
642 643
643 644 Shelving takes files that "hg status" reports as not clean, saves
644 645 the modifications to a bundle (a shelved change), and reverts the
645 646 files so that their state in the working directory becomes clean.
646 647
647 648 To restore these changes to the working directory, using "hg
648 649 unshelve"; this will work even if you switch to a different
649 650 commit.
650 651
651 652 When no files are specified, "hg shelve" saves all not-clean
652 653 files. If specific files or directories are named, only changes to
653 654 those files are shelved.
654 655
655 656 Each shelved change has a name that makes it easier to find later.
656 657 The name of a shelved change defaults to being based on the active
657 658 bookmark, or if there is no active bookmark, the current named
658 659 branch. To specify a different name, use ``--name``.
659 660
660 661 To see a list of existing shelved changes, use the ``--list``
661 662 option. For each shelved change, this will print its name, age,
662 663 and description; use ``--patch`` or ``--stat`` for more details.
663 664
664 665 To delete specific shelved changes, use ``--delete``. To delete
665 666 all shelved changes, use ``--cleanup``.
666 667 '''
667 668 cmdutil.checkunfinished(repo)
668 669
669 670 def checkopt(opt, incompatible):
670 671 if opts[opt]:
671 672 for i in incompatible.split():
672 673 if opts[i]:
673 674 raise util.Abort(_("options '--%s' and '--%s' may not be "
674 675 "used together") % (opt, i))
675 676 return True
676 677 if checkopt('cleanup', 'addremove delete list message name patch stat'):
677 678 if pats:
678 679 raise util.Abort(_("cannot specify names when using '--cleanup'"))
679 680 return cleanupcmd(ui, repo)
680 681 elif checkopt('delete', 'addremove cleanup list message name patch stat'):
681 682 return deletecmd(ui, repo, pats)
682 683 elif checkopt('list', 'addremove cleanup delete message name'):
683 684 return listcmd(ui, repo, pats, opts)
684 685 else:
685 686 for i in ('patch', 'stat'):
686 687 if opts[i]:
687 688 raise util.Abort(_("option '--%s' may not be "
688 689 "used when shelving a change") % (i,))
689 690 return createcmd(ui, repo, pats, opts)
690 691
691 692 def extsetup(ui):
692 693 cmdutil.unfinishedstates.append(
693 694 [shelvedstate._filename, False, False,
694 695 _('unshelve already in progress'),
695 696 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
@@ -1,556 +1,738 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import weakref
8 9 from i18n import _
9 from node import nullrev, nullid, hex
10 from node import nullrev, nullid, hex, short
10 11 import mdiff, util, dagutil
11 12 import struct, os, bz2, zlib, tempfile
12 import discovery, error
13 import discovery, error, phases, branchmap
13 14
14 15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
15 16
16 17 def readexactly(stream, n):
17 18 '''read n bytes from stream.read and abort if less was available'''
18 19 s = stream.read(n)
19 20 if len(s) < n:
20 21 raise util.Abort(_("stream ended unexpectedly"
21 22 " (got %d bytes, expected %d)")
22 23 % (len(s), n))
23 24 return s
24 25
25 26 def getchunk(stream):
26 27 """return the next chunk from stream as a string"""
27 28 d = readexactly(stream, 4)
28 29 l = struct.unpack(">l", d)[0]
29 30 if l <= 4:
30 31 if l:
31 32 raise util.Abort(_("invalid chunk length %d") % l)
32 33 return ""
33 34 return readexactly(stream, l - 4)
34 35
35 36 def chunkheader(length):
36 37 """return a changegroup chunk header (string)"""
37 38 return struct.pack(">l", length + 4)
38 39
39 40 def closechunk():
40 41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 42 return struct.pack(">l", 0)
42 43
43 44 class nocompress(object):
44 45 def compress(self, x):
45 46 return x
46 47 def flush(self):
47 48 return ""
48 49
49 50 bundletypes = {
50 51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 52 # since the unification ssh accepts a header but there
52 53 # is no capability signaling it.
53 54 "HG10UN": ("HG10UN", nocompress),
54 55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 57 }
57 58
58 59 # hgweb uses this list to communicate its preferred type
59 60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 61
61 62 def writebundle(cg, filename, bundletype):
62 63 """Write a bundle file and return its filename.
63 64
64 65 Existing files will not be overwritten.
65 66 If no filename is specified, a temporary file is created.
66 67 bz2 compression can be turned off.
67 68 The bundle file will be deleted in case of errors.
68 69 """
69 70
70 71 fh = None
71 72 cleanup = None
72 73 try:
73 74 if filename:
74 75 fh = open(filename, "wb")
75 76 else:
76 77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
77 78 fh = os.fdopen(fd, "wb")
78 79 cleanup = filename
79 80
80 81 header, compressor = bundletypes[bundletype]
81 82 fh.write(header)
82 83 z = compressor()
83 84
84 85 # parse the changegroup data, otherwise we will block
85 86 # in case of sshrepo because we don't know the end of the stream
86 87
87 88 # an empty chunkgroup is the end of the changegroup
88 89 # a changegroup has at least 2 chunkgroups (changelog and manifest).
89 90 # after that, an empty chunkgroup is the end of the changegroup
90 91 empty = False
91 92 count = 0
92 93 while not empty or count <= 2:
93 94 empty = True
94 95 count += 1
95 96 while True:
96 97 chunk = getchunk(cg)
97 98 if not chunk:
98 99 break
99 100 empty = False
100 101 fh.write(z.compress(chunkheader(len(chunk))))
101 102 pos = 0
102 103 while pos < len(chunk):
103 104 next = pos + 2**20
104 105 fh.write(z.compress(chunk[pos:next]))
105 106 pos = next
106 107 fh.write(z.compress(closechunk()))
107 108 fh.write(z.flush())
108 109 cleanup = None
109 110 return filename
110 111 finally:
111 112 if fh is not None:
112 113 fh.close()
113 114 if cleanup is not None:
114 115 os.unlink(cleanup)
115 116
116 117 def decompressor(fh, alg):
117 118 if alg == 'UN':
118 119 return fh
119 120 elif alg == 'GZ':
120 121 def generator(f):
121 122 zd = zlib.decompressobj()
122 123 for chunk in util.filechunkiter(f):
123 124 yield zd.decompress(chunk)
124 125 elif alg == 'BZ':
125 126 def generator(f):
126 127 zd = bz2.BZ2Decompressor()
127 128 zd.decompress("BZ")
128 129 for chunk in util.filechunkiter(f, 4096):
129 130 yield zd.decompress(chunk)
130 131 else:
131 132 raise util.Abort("unknown bundle compression '%s'" % alg)
132 133 return util.chunkbuffer(generator(fh))
133 134
134 135 class unbundle10(object):
135 136 deltaheader = _BUNDLE10_DELTA_HEADER
136 137 deltaheadersize = struct.calcsize(deltaheader)
137 138 def __init__(self, fh, alg):
138 139 self._stream = decompressor(fh, alg)
139 140 self._type = alg
140 141 self.callback = None
141 142 def compressed(self):
142 143 return self._type != 'UN'
143 144 def read(self, l):
144 145 return self._stream.read(l)
145 146 def seek(self, pos):
146 147 return self._stream.seek(pos)
147 148 def tell(self):
148 149 return self._stream.tell()
149 150 def close(self):
150 151 return self._stream.close()
151 152
152 153 def chunklength(self):
153 154 d = readexactly(self._stream, 4)
154 155 l = struct.unpack(">l", d)[0]
155 156 if l <= 4:
156 157 if l:
157 158 raise util.Abort(_("invalid chunk length %d") % l)
158 159 return 0
159 160 if self.callback:
160 161 self.callback()
161 162 return l - 4
162 163
163 164 def changelogheader(self):
164 165 """v10 does not have a changelog header chunk"""
165 166 return {}
166 167
167 168 def manifestheader(self):
168 169 """v10 does not have a manifest header chunk"""
169 170 return {}
170 171
171 172 def filelogheader(self):
172 173 """return the header of the filelogs chunk, v10 only has the filename"""
173 174 l = self.chunklength()
174 175 if not l:
175 176 return {}
176 177 fname = readexactly(self._stream, l)
177 178 return {'filename': fname}
178 179
179 180 def _deltaheader(self, headertuple, prevnode):
180 181 node, p1, p2, cs = headertuple
181 182 if prevnode is None:
182 183 deltabase = p1
183 184 else:
184 185 deltabase = prevnode
185 186 return node, p1, p2, deltabase, cs
186 187
187 188 def deltachunk(self, prevnode):
188 189 l = self.chunklength()
189 190 if not l:
190 191 return {}
191 192 headerdata = readexactly(self._stream, self.deltaheadersize)
192 193 header = struct.unpack(self.deltaheader, headerdata)
193 194 delta = readexactly(self._stream, l - self.deltaheadersize)
194 195 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
195 196 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
196 197 'deltabase': deltabase, 'delta': delta}
197 198
198 199 class headerlessfixup(object):
199 200 def __init__(self, fh, h):
200 201 self._h = h
201 202 self._fh = fh
202 203 def read(self, n):
203 204 if self._h:
204 205 d, self._h = self._h[:n], self._h[n:]
205 206 if len(d) < n:
206 207 d += readexactly(self._fh, n - len(d))
207 208 return d
208 209 return readexactly(self._fh, n)
209 210
210 211 def readbundle(fh, fname):
211 212 header = readexactly(fh, 6)
212 213
213 214 if not fname:
214 215 fname = "stream"
215 216 if not header.startswith('HG') and header.startswith('\0'):
216 217 fh = headerlessfixup(fh, header)
217 218 header = "HG10UN"
218 219
219 220 magic, version, alg = header[0:2], header[2:4], header[4:6]
220 221
221 222 if magic != 'HG':
222 223 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
223 224 if version != '10':
224 225 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
225 226 return unbundle10(fh, alg)
226 227
227 228 class bundle10(object):
228 229 deltaheader = _BUNDLE10_DELTA_HEADER
229 230 def __init__(self, repo, bundlecaps=None):
230 231 """Given a source repo, construct a bundler.
231 232
232 233 bundlecaps is optional and can be used to specify the set of
233 234 capabilities which can be used to build the bundle.
234 235 """
235 236 # Set of capabilities we can use to build the bundle.
236 237 if bundlecaps is None:
237 238 bundlecaps = set()
238 239 self._bundlecaps = bundlecaps
239 240 self._changelog = repo.changelog
240 241 self._manifest = repo.manifest
241 242 reorder = repo.ui.config('bundle', 'reorder', 'auto')
242 243 if reorder == 'auto':
243 244 reorder = None
244 245 else:
245 246 reorder = util.parsebool(reorder)
246 247 self._repo = repo
247 248 self._reorder = reorder
248 249 self._progress = repo.ui.progress
249 250 def close(self):
250 251 return closechunk()
251 252
252 253 def fileheader(self, fname):
253 254 return chunkheader(len(fname)) + fname
254 255
255 256 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
256 257 """Calculate a delta group, yielding a sequence of changegroup chunks
257 258 (strings).
258 259
259 260 Given a list of changeset revs, return a set of deltas and
260 261 metadata corresponding to nodes. The first delta is
261 262 first parent(nodelist[0]) -> nodelist[0], the receiver is
262 263 guaranteed to have this parent as it has all history before
263 264 these changesets. In the case firstparent is nullrev the
264 265 changegroup starts with a full revision.
265 266
266 267 If units is not None, progress detail will be generated, units specifies
267 268 the type of revlog that is touched (changelog, manifest, etc.).
268 269 """
269 270 # if we don't have any revisions touched by these changesets, bail
270 271 if len(nodelist) == 0:
271 272 yield self.close()
272 273 return
273 274
274 275 # for generaldelta revlogs, we linearize the revs; this will both be
275 276 # much quicker and generate a much smaller bundle
276 277 if (revlog._generaldelta and reorder is not False) or reorder:
277 278 dag = dagutil.revlogdag(revlog)
278 279 revs = set(revlog.rev(n) for n in nodelist)
279 280 revs = dag.linearize(revs)
280 281 else:
281 282 revs = sorted([revlog.rev(n) for n in nodelist])
282 283
283 284 # add the parent of the first rev
284 285 p = revlog.parentrevs(revs[0])[0]
285 286 revs.insert(0, p)
286 287
287 288 # build deltas
288 289 total = len(revs) - 1
289 290 msgbundling = _('bundling')
290 291 for r in xrange(len(revs) - 1):
291 292 if units is not None:
292 293 self._progress(msgbundling, r + 1, unit=units, total=total)
293 294 prev, curr = revs[r], revs[r + 1]
294 295 linknode = lookup(revlog.node(curr))
295 296 for c in self.revchunk(revlog, curr, prev, linknode):
296 297 yield c
297 298
298 299 yield self.close()
299 300
300 301 # filter any nodes that claim to be part of the known set
301 302 def prune(self, revlog, missing, commonrevs, source):
302 303 rr, rl = revlog.rev, revlog.linkrev
303 304 return [n for n in missing if rl(rr(n)) not in commonrevs]
304 305
305 306 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
306 307 '''yield a sequence of changegroup chunks (strings)'''
307 308 repo = self._repo
308 309 cl = self._changelog
309 310 mf = self._manifest
310 311 reorder = self._reorder
311 312 progress = self._progress
312 313
313 314 # for progress output
314 315 msgbundling = _('bundling')
315 316
316 317 mfs = {} # needed manifests
317 318 fnodes = {} # needed file nodes
318 319 changedfiles = set()
319 320
320 321 # Callback for the changelog, used to collect changed files and manifest
321 322 # nodes.
322 323 # Returns the linkrev node (identity in the changelog case).
323 324 def lookupcl(x):
324 325 c = cl.read(x)
325 326 changedfiles.update(c[3])
326 327 # record the first changeset introducing this manifest version
327 328 mfs.setdefault(c[0], x)
328 329 return x
329 330
330 331 # Callback for the manifest, used to collect linkrevs for filelog
331 332 # revisions.
332 333 # Returns the linkrev node (collected in lookupcl).
333 334 def lookupmf(x):
334 335 clnode = mfs[x]
335 336 if not fastpathlinkrev:
336 337 mdata = mf.readfast(x)
337 338 for f, n in mdata.iteritems():
338 339 if f in changedfiles:
339 340 # record the first changeset introducing this filelog
340 341 # version
341 342 fnodes[f].setdefault(n, clnode)
342 343 return clnode
343 344
344 345 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
345 346 reorder=reorder):
346 347 yield chunk
347 348 progress(msgbundling, None)
348 349
349 350 for f in changedfiles:
350 351 fnodes[f] = {}
351 352 mfnodes = self.prune(mf, mfs, commonrevs, source)
352 353 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
353 354 reorder=reorder):
354 355 yield chunk
355 356 progress(msgbundling, None)
356 357
357 358 mfs.clear()
358 359 needed = set(cl.rev(x) for x in clnodes)
359 360
360 361 def linknodes(filerevlog, fname):
361 362 if fastpathlinkrev:
362 363 ln, llr = filerevlog.node, filerevlog.linkrev
363 364 def genfilenodes():
364 365 for r in filerevlog:
365 366 linkrev = llr(r)
366 367 if linkrev in needed:
367 368 yield filerevlog.node(r), cl.node(linkrev)
368 369 fnodes[fname] = dict(genfilenodes())
369 370 return fnodes.get(fname, {})
370 371
371 372 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
372 373 source):
373 374 yield chunk
374 375
375 376 yield self.close()
376 377 progress(msgbundling, None)
377 378
378 379 if clnodes:
379 380 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
380 381
381 382 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
382 383 repo = self._repo
383 384 progress = self._progress
384 385 reorder = self._reorder
385 386 msgbundling = _('bundling')
386 387
387 388 total = len(changedfiles)
388 389 # for progress output
389 390 msgfiles = _('files')
390 391 for i, fname in enumerate(sorted(changedfiles)):
391 392 filerevlog = repo.file(fname)
392 393 if not filerevlog:
393 394 raise util.Abort(_("empty or missing revlog for %s") % fname)
394 395
395 396 linkrevnodes = linknodes(filerevlog, fname)
396 397 # Lookup for filenodes, we collected the linkrev nodes above in the
397 398 # fastpath case and with lookupmf in the slowpath case.
398 399 def lookupfilelog(x):
399 400 return linkrevnodes[x]
400 401
401 402 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
402 403 if filenodes:
403 404 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
404 405 total=total)
405 406 yield self.fileheader(fname)
406 407 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
407 408 reorder=reorder):
408 409 yield chunk
409 410
410 411 def revchunk(self, revlog, rev, prev, linknode):
411 412 node = revlog.node(rev)
412 413 p1, p2 = revlog.parentrevs(rev)
413 414 base = prev
414 415
415 416 prefix = ''
416 417 if base == nullrev:
417 418 delta = revlog.revision(node)
418 419 prefix = mdiff.trivialdiffheader(len(delta))
419 420 else:
420 421 delta = revlog.revdiff(base, rev)
421 422 p1n, p2n = revlog.parents(node)
422 423 basenode = revlog.node(base)
423 424 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
424 425 meta += prefix
425 426 l = len(meta) + len(delta)
426 427 yield chunkheader(l)
427 428 yield meta
428 429 yield delta
429 430 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
430 431 # do nothing with basenode, it is implicitly the previous one in HG10
431 432 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
432 433
433 434 def _changegroupinfo(repo, nodes, source):
434 435 if repo.ui.verbose or source == 'bundle':
435 436 repo.ui.status(_("%d changesets found\n") % len(nodes))
436 437 if repo.ui.debugflag:
437 438 repo.ui.debug("list of changesets:\n")
438 439 for node in nodes:
439 440 repo.ui.debug("%s\n" % hex(node))
440 441
441 442 def getsubset(repo, outgoing, bundler, source, fastpath=False):
442 443 repo = repo.unfiltered()
443 444 commonrevs = outgoing.common
444 445 csets = outgoing.missing
445 446 heads = outgoing.missingheads
446 447 # We go through the fast path if we get told to, or if all (unfiltered
447 448 # heads have been requested (since we then know there all linkrevs will
448 449 # be pulled by the client).
449 450 heads.sort()
450 451 fastpathlinkrev = fastpath or (
451 452 repo.filtername is None and heads == sorted(repo.heads()))
452 453
453 454 repo.hook('preoutgoing', throw=True, source=source)
454 455 _changegroupinfo(repo, csets, source)
455 456 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
456 457 return unbundle10(util.chunkbuffer(gengroup), 'UN')
457 458
458 459 def changegroupsubset(repo, roots, heads, source):
459 460 """Compute a changegroup consisting of all the nodes that are
460 461 descendants of any of the roots and ancestors of any of the heads.
461 462 Return a chunkbuffer object whose read() method will return
462 463 successive changegroup chunks.
463 464
464 465 It is fairly complex as determining which filenodes and which
465 466 manifest nodes need to be included for the changeset to be complete
466 467 is non-trivial.
467 468
468 469 Another wrinkle is doing the reverse, figuring out which changeset in
469 470 the changegroup a particular filenode or manifestnode belongs to.
470 471 """
471 472 cl = repo.changelog
472 473 if not roots:
473 474 roots = [nullid]
474 475 # TODO: remove call to nodesbetween.
475 476 csets, roots, heads = cl.nodesbetween(roots, heads)
476 477 discbases = []
477 478 for n in roots:
478 479 discbases.extend([p for p in cl.parents(n) if p != nullid])
479 480 outgoing = discovery.outgoing(cl, discbases, heads)
480 481 bundler = bundle10(repo)
481 482 return getsubset(repo, outgoing, bundler, source)
482 483
483 484 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
484 485 """Like getbundle, but taking a discovery.outgoing as an argument.
485 486
486 487 This is only implemented for local repos and reuses potentially
487 488 precomputed sets in outgoing."""
488 489 if not outgoing.missing:
489 490 return None
490 491 bundler = bundle10(repo, bundlecaps)
491 492 return getsubset(repo, outgoing, bundler, source)
492 493
493 494 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
494 495 """Like changegroupsubset, but returns the set difference between the
495 496 ancestors of heads and the ancestors common.
496 497
497 498 If heads is None, use the local heads. If common is None, use [nullid].
498 499
499 500 The nodes in common might not all be known locally due to the way the
500 501 current discovery protocol works.
501 502 """
502 503 cl = repo.changelog
503 504 if common:
504 505 hasnode = cl.hasnode
505 506 common = [n for n in common if hasnode(n)]
506 507 else:
507 508 common = [nullid]
508 509 if not heads:
509 510 heads = cl.heads()
510 511 outgoing = discovery.outgoing(cl, common, heads)
511 512 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
512 513
513 514 def changegroup(repo, basenodes, source):
514 515 # to avoid a race we use changegroupsubset() (issue1320)
515 516 return changegroupsubset(repo, basenodes, repo.heads(), source)
516 517
517 518 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
518 519 revisions = 0
519 520 files = 0
520 521 while True:
521 522 chunkdata = source.filelogheader()
522 523 if not chunkdata:
523 524 break
524 525 f = chunkdata["filename"]
525 526 repo.ui.debug("adding %s revisions\n" % f)
526 527 pr()
527 528 fl = repo.file(f)
528 529 o = len(fl)
529 530 if not fl.addgroup(source, revmap, trp):
530 531 raise util.Abort(_("received file revlog group is empty"))
531 532 revisions += len(fl) - o
532 533 files += 1
533 534 if f in needfiles:
534 535 needs = needfiles[f]
535 536 for new in xrange(o, len(fl)):
536 537 n = fl.node(new)
537 538 if n in needs:
538 539 needs.remove(n)
539 540 else:
540 541 raise util.Abort(
541 542 _("received spurious file revlog entry"))
542 543 if not needs:
543 544 del needfiles[f]
544 545 repo.ui.progress(_('files'), None)
545 546
546 547 for f, needs in needfiles.iteritems():
547 548 fl = repo.file(f)
548 549 for n in needs:
549 550 try:
550 551 fl.rev(n)
551 552 except error.LookupError:
552 553 raise util.Abort(
553 554 _('missing file data for %s:%s - run hg verify') %
554 555 (f, hex(n)))
555 556
556 557 return revisions, files
558
559 def addchangegroup(repo, source, srctype, url, emptyok=False):
560 """Add the changegroup returned by source.read() to this repo.
561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
562 the URL of the repo where this changegroup is coming from.
563
564 Return an integer summarizing the change to this repo:
565 - nothing changed or no source: 0
566 - more heads than before: 1+added heads (2..n)
567 - fewer heads than before: -1-removed heads (-2..-n)
568 - number of heads stays the same: 1
569 """
570 repo = repo.unfiltered()
571 def csmap(x):
572 repo.ui.debug("add changeset %s\n" % short(x))
573 return len(cl)
574
575 def revmap(x):
576 return cl.rev(x)
577
578 if not source:
579 return 0
580
581 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
582
583 changesets = files = revisions = 0
584 efiles = set()
585
586 # write changelog data to temp files so concurrent readers will not see
587 # inconsistent view
588 cl = repo.changelog
589 cl.delayupdate()
590 oldheads = cl.heads()
591
592 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
593 try:
594 trp = weakref.proxy(tr)
595 # pull off the changeset group
596 repo.ui.status(_("adding changesets\n"))
597 clstart = len(cl)
598 class prog(object):
599 step = _('changesets')
600 count = 1
601 ui = repo.ui
602 total = None
603 def __call__(repo):
604 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
605 total=repo.total)
606 repo.count += 1
607 pr = prog()
608 source.callback = pr
609
610 source.changelogheader()
611 srccontent = cl.addgroup(source, csmap, trp)
612 if not (srccontent or emptyok):
613 raise util.Abort(_("received changelog group is empty"))
614 clend = len(cl)
615 changesets = clend - clstart
616 for c in xrange(clstart, clend):
617 efiles.update(repo[c].files())
618 efiles = len(efiles)
619 repo.ui.progress(_('changesets'), None)
620
621 # pull off the manifest group
622 repo.ui.status(_("adding manifests\n"))
623 pr.step = _('manifests')
624 pr.count = 1
625 pr.total = changesets # manifests <= changesets
626 # no need to check for empty manifest group here:
627 # if the result of the merge of 1 and 2 is the same in 3 and 4,
628 # no new manifest will be created and the manifest group will
629 # be empty during the pull
630 source.manifestheader()
631 repo.manifest.addgroup(source, revmap, trp)
632 repo.ui.progress(_('manifests'), None)
633
634 needfiles = {}
635 if repo.ui.configbool('server', 'validate', default=False):
636 # validate incoming csets have their manifests
637 for cset in xrange(clstart, clend):
638 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
639 mfest = repo.manifest.readdelta(mfest)
640 # store file nodes we must see
641 for f, n in mfest.iteritems():
642 needfiles.setdefault(f, set()).add(n)
643
644 # process the files
645 repo.ui.status(_("adding file changes\n"))
646 pr.step = _('files')
647 pr.count = 1
648 pr.total = efiles
649 source.callback = None
650
651 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
652 needfiles)
653 revisions += newrevs
654 files += newfiles
655
656 dh = 0
657 if oldheads:
658 heads = cl.heads()
659 dh = len(heads) - len(oldheads)
660 for h in heads:
661 if h not in oldheads and repo[h].closesbranch():
662 dh -= 1
663 htext = ""
664 if dh:
665 htext = _(" (%+d heads)") % dh
666
667 repo.ui.status(_("added %d changesets"
668 " with %d changes to %d files%s\n")
669 % (changesets, revisions, files, htext))
670 repo.invalidatevolatilesets()
671
672 if changesets > 0:
673 p = lambda: cl.writepending() and repo.root or ""
674 repo.hook('pretxnchangegroup', throw=True,
675 node=hex(cl.node(clstart)), source=srctype,
676 url=url, pending=p)
677
678 added = [cl.node(r) for r in xrange(clstart, clend)]
679 publishing = repo.ui.configbool('phases', 'publish', True)
680 if srctype == 'push':
681 # Old servers can not push the boundary themselves.
682 # New servers won't push the boundary if changeset already
683 # exists locally as secret
684 #
685 # We should not use added here but the list of all change in
686 # the bundle
687 if publishing:
688 phases.advanceboundary(repo, phases.public, srccontent)
689 else:
690 phases.advanceboundary(repo, phases.draft, srccontent)
691 phases.retractboundary(repo, phases.draft, added)
692 elif srctype != 'strip':
693 # publishing only alter behavior during push
694 #
695 # strip should not touch boundary at all
696 phases.retractboundary(repo, phases.draft, added)
697
698 # make changelog see real files again
699 cl.finalize(trp)
700
701 tr.close()
702
703 if changesets > 0:
704 if srctype != 'strip':
705 # During strip, branchcache is invalid but coming call to
706 # `destroyed` will repair it.
707 # In other case we can safely update cache on disk.
708 branchmap.updatecache(repo.filtered('served'))
709 def runhooks():
710 # These hooks run when the lock releases, not when the
711 # transaction closes. So it's possible for the changelog
712 # to have changed since we last saw it.
713 if clstart >= len(repo):
714 return
715
716 # forcefully update the on-disk branch cache
717 repo.ui.debug("updating the branch cache\n")
718 repo.hook("changegroup", node=hex(cl.node(clstart)),
719 source=srctype, url=url)
720
721 for n in added:
722 repo.hook("incoming", node=hex(n), source=srctype,
723 url=url)
724
725 newheads = [h for h in repo.heads() if h not in oldheads]
726 repo.ui.log("incoming",
727 "%s incoming changes - new heads: %s\n",
728 len(added),
729 ', '.join([hex(c[:6]) for c in newheads]))
730 repo._afterlock(runhooks)
731
732 finally:
733 tr.release()
734 # never return 0 here:
735 if dh < 0:
736 return dh - 1
737 else:
738 return dh + 1
@@ -1,5930 +1,5931 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from lock import release
10 10 from i18n import _
11 11 import os, re, difflib, time, tempfile, errno
12 12 import sys
13 13 import hg, scmutil, util, revlog, copies, error, bookmarks
14 14 import patch, help, encoding, templatekw, discovery
15 15 import archival, changegroup, cmdutil, hbisect
16 16 import sshserver, hgweb, commandserver
17 17 from hgweb import server as hgweb_server
18 18 import merge as mergemod
19 19 import minirst, revset, fileset
20 20 import dagparser, context, simplemerge, graphmod
21 21 import random
22 22 import setdiscovery, treediscovery, dagutil, pvec, localrepo
23 23 import phases, obsolete
24 24
25 25 table = {}
26 26
27 27 command = cmdutil.command(table)
28 28
29 29 # common command options
30 30
31 31 globalopts = [
32 32 ('R', 'repository', '',
33 33 _('repository root directory or name of overlay bundle file'),
34 34 _('REPO')),
35 35 ('', 'cwd', '',
36 36 _('change working directory'), _('DIR')),
37 37 ('y', 'noninteractive', None,
38 38 _('do not prompt, automatically pick the first choice for all prompts')),
39 39 ('q', 'quiet', None, _('suppress output')),
40 40 ('v', 'verbose', None, _('enable additional output')),
41 41 ('', 'config', [],
42 42 _('set/override config option (use \'section.name=value\')'),
43 43 _('CONFIG')),
44 44 ('', 'debug', None, _('enable debugging output')),
45 45 ('', 'debugger', None, _('start debugger')),
46 46 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
47 47 _('ENCODE')),
48 48 ('', 'encodingmode', encoding.encodingmode,
49 49 _('set the charset encoding mode'), _('MODE')),
50 50 ('', 'traceback', None, _('always print a traceback on exception')),
51 51 ('', 'time', None, _('time how long the command takes')),
52 52 ('', 'profile', None, _('print command execution profile')),
53 53 ('', 'version', None, _('output version information and exit')),
54 54 ('h', 'help', None, _('display help and exit')),
55 55 ('', 'hidden', False, _('consider hidden changesets')),
56 56 ]
57 57
58 58 dryrunopts = [('n', 'dry-run', None,
59 59 _('do not perform actions, just print output'))]
60 60
61 61 remoteopts = [
62 62 ('e', 'ssh', '',
63 63 _('specify ssh command to use'), _('CMD')),
64 64 ('', 'remotecmd', '',
65 65 _('specify hg command to run on the remote side'), _('CMD')),
66 66 ('', 'insecure', None,
67 67 _('do not verify server certificate (ignoring web.cacerts config)')),
68 68 ]
69 69
70 70 walkopts = [
71 71 ('I', 'include', [],
72 72 _('include names matching the given patterns'), _('PATTERN')),
73 73 ('X', 'exclude', [],
74 74 _('exclude names matching the given patterns'), _('PATTERN')),
75 75 ]
76 76
77 77 commitopts = [
78 78 ('m', 'message', '',
79 79 _('use text as commit message'), _('TEXT')),
80 80 ('l', 'logfile', '',
81 81 _('read commit message from file'), _('FILE')),
82 82 ]
83 83
84 84 commitopts2 = [
85 85 ('d', 'date', '',
86 86 _('record the specified date as commit date'), _('DATE')),
87 87 ('u', 'user', '',
88 88 _('record the specified user as committer'), _('USER')),
89 89 ]
90 90
91 91 templateopts = [
92 92 ('', 'style', '',
93 93 _('display using template map file (DEPRECATED)'), _('STYLE')),
94 94 ('T', 'template', '',
95 95 _('display with template'), _('TEMPLATE')),
96 96 ]
97 97
98 98 logopts = [
99 99 ('p', 'patch', None, _('show patch')),
100 100 ('g', 'git', None, _('use git extended diff format')),
101 101 ('l', 'limit', '',
102 102 _('limit number of changes displayed'), _('NUM')),
103 103 ('M', 'no-merges', None, _('do not show merges')),
104 104 ('', 'stat', None, _('output diffstat-style summary of changes')),
105 105 ('G', 'graph', None, _("show the revision DAG")),
106 106 ] + templateopts
107 107
108 108 diffopts = [
109 109 ('a', 'text', None, _('treat all files as text')),
110 110 ('g', 'git', None, _('use git extended diff format')),
111 111 ('', 'nodates', None, _('omit dates from diff headers'))
112 112 ]
113 113
114 114 diffwsopts = [
115 115 ('w', 'ignore-all-space', None,
116 116 _('ignore white space when comparing lines')),
117 117 ('b', 'ignore-space-change', None,
118 118 _('ignore changes in the amount of white space')),
119 119 ('B', 'ignore-blank-lines', None,
120 120 _('ignore changes whose lines are all blank')),
121 121 ]
122 122
123 123 diffopts2 = [
124 124 ('p', 'show-function', None, _('show which function each change is in')),
125 125 ('', 'reverse', None, _('produce a diff that undoes the changes')),
126 126 ] + diffwsopts + [
127 127 ('U', 'unified', '',
128 128 _('number of lines of context to show'), _('NUM')),
129 129 ('', 'stat', None, _('output diffstat-style summary of changes')),
130 130 ]
131 131
132 132 mergetoolopts = [
133 133 ('t', 'tool', '', _('specify merge tool')),
134 134 ]
135 135
136 136 similarityopts = [
137 137 ('s', 'similarity', '',
138 138 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
139 139 ]
140 140
141 141 subrepoopts = [
142 142 ('S', 'subrepos', None,
143 143 _('recurse into subrepositories'))
144 144 ]
145 145
146 146 # Commands start here, listed alphabetically
147 147
148 148 @command('^add',
149 149 walkopts + subrepoopts + dryrunopts,
150 150 _('[OPTION]... [FILE]...'))
151 151 def add(ui, repo, *pats, **opts):
152 152 """add the specified files on the next commit
153 153
154 154 Schedule files to be version controlled and added to the
155 155 repository.
156 156
157 157 The files will be added to the repository at the next commit. To
158 158 undo an add before that, see :hg:`forget`.
159 159
160 160 If no names are given, add all files to the repository.
161 161
162 162 .. container:: verbose
163 163
164 164 An example showing how new (unknown) files are added
165 165 automatically by :hg:`add`::
166 166
167 167 $ ls
168 168 foo.c
169 169 $ hg status
170 170 ? foo.c
171 171 $ hg add
172 172 adding foo.c
173 173 $ hg status
174 174 A foo.c
175 175
176 176 Returns 0 if all files are successfully added.
177 177 """
178 178
179 179 m = scmutil.match(repo[None], pats, opts)
180 180 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
181 181 opts.get('subrepos'), prefix="", explicitonly=False)
182 182 return rejected and 1 or 0
183 183
184 184 @command('addremove',
185 185 similarityopts + walkopts + dryrunopts,
186 186 _('[OPTION]... [FILE]...'))
187 187 def addremove(ui, repo, *pats, **opts):
188 188 """add all new files, delete all missing files
189 189
190 190 Add all new files and remove all missing files from the
191 191 repository.
192 192
193 193 New files are ignored if they match any of the patterns in
194 194 ``.hgignore``. As with add, these changes take effect at the next
195 195 commit.
196 196
197 197 Use the -s/--similarity option to detect renamed files. This
198 198 option takes a percentage between 0 (disabled) and 100 (files must
199 199 be identical) as its parameter. With a parameter greater than 0,
200 200 this compares every removed file with every added file and records
201 201 those similar enough as renames. Detecting renamed files this way
202 202 can be expensive. After using this option, :hg:`status -C` can be
203 203 used to check which files were identified as moved or renamed. If
204 204 not specified, -s/--similarity defaults to 100 and only renames of
205 205 identical files are detected.
206 206
207 207 Returns 0 if all files are successfully added.
208 208 """
209 209 try:
210 210 sim = float(opts.get('similarity') or 100)
211 211 except ValueError:
212 212 raise util.Abort(_('similarity must be a number'))
213 213 if sim < 0 or sim > 100:
214 214 raise util.Abort(_('similarity must be between 0 and 100'))
215 215 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
216 216
217 217 @command('^annotate|blame',
218 218 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
219 219 ('', 'follow', None,
220 220 _('follow copies/renames and list the filename (DEPRECATED)')),
221 221 ('', 'no-follow', None, _("don't follow copies and renames")),
222 222 ('a', 'text', None, _('treat all files as text')),
223 223 ('u', 'user', None, _('list the author (long with -v)')),
224 224 ('f', 'file', None, _('list the filename')),
225 225 ('d', 'date', None, _('list the date (short with -q)')),
226 226 ('n', 'number', None, _('list the revision number (default)')),
227 227 ('c', 'changeset', None, _('list the changeset')),
228 228 ('l', 'line-number', None, _('show line number at the first appearance'))
229 229 ] + diffwsopts + walkopts,
230 230 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
231 231 def annotate(ui, repo, *pats, **opts):
232 232 """show changeset information by line for each file
233 233
234 234 List changes in files, showing the revision id responsible for
235 235 each line
236 236
237 237 This command is useful for discovering when a change was made and
238 238 by whom.
239 239
240 240 Without the -a/--text option, annotate will avoid processing files
241 241 it detects as binary. With -a, annotate will annotate the file
242 242 anyway, although the results will probably be neither useful
243 243 nor desirable.
244 244
245 245 Returns 0 on success.
246 246 """
247 247 if opts.get('follow'):
248 248 # --follow is deprecated and now just an alias for -f/--file
249 249 # to mimic the behavior of Mercurial before version 1.5
250 250 opts['file'] = True
251 251
252 252 datefunc = ui.quiet and util.shortdate or util.datestr
253 253 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
254 254
255 255 if not pats:
256 256 raise util.Abort(_('at least one filename or pattern is required'))
257 257
258 258 hexfn = ui.debugflag and hex or short
259 259
260 260 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
261 261 ('number', ' ', lambda x: str(x[0].rev())),
262 262 ('changeset', ' ', lambda x: hexfn(x[0].node())),
263 263 ('date', ' ', getdate),
264 264 ('file', ' ', lambda x: x[0].path()),
265 265 ('line_number', ':', lambda x: str(x[1])),
266 266 ]
267 267
268 268 if (not opts.get('user') and not opts.get('changeset')
269 269 and not opts.get('date') and not opts.get('file')):
270 270 opts['number'] = True
271 271
272 272 linenumber = opts.get('line_number') is not None
273 273 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
274 274 raise util.Abort(_('at least one of -n/-c is required for -l'))
275 275
276 276 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
277 277 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
278 278
279 279 def bad(x, y):
280 280 raise util.Abort("%s: %s" % (x, y))
281 281
282 282 ctx = scmutil.revsingle(repo, opts.get('rev'))
283 283 m = scmutil.match(ctx, pats, opts)
284 284 m.bad = bad
285 285 follow = not opts.get('no_follow')
286 286 diffopts = patch.diffopts(ui, opts, section='annotate')
287 287 for abs in ctx.walk(m):
288 288 fctx = ctx[abs]
289 289 if not opts.get('text') and util.binary(fctx.data()):
290 290 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
291 291 continue
292 292
293 293 lines = fctx.annotate(follow=follow, linenumber=linenumber,
294 294 diffopts=diffopts)
295 295 pieces = []
296 296
297 297 for f, sep in funcmap:
298 298 l = [f(n) for n, dummy in lines]
299 299 if l:
300 300 sized = [(x, encoding.colwidth(x)) for x in l]
301 301 ml = max([w for x, w in sized])
302 302 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
303 303 for x, w in sized])
304 304
305 305 if pieces:
306 306 for p, l in zip(zip(*pieces), lines):
307 307 ui.write("%s: %s" % ("".join(p), l[1]))
308 308
309 309 if lines and not lines[-1][1].endswith('\n'):
310 310 ui.write('\n')
311 311
312 312 @command('archive',
313 313 [('', 'no-decode', None, _('do not pass files through decoders')),
314 314 ('p', 'prefix', '', _('directory prefix for files in archive'),
315 315 _('PREFIX')),
316 316 ('r', 'rev', '', _('revision to distribute'), _('REV')),
317 317 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
318 318 ] + subrepoopts + walkopts,
319 319 _('[OPTION]... DEST'))
320 320 def archive(ui, repo, dest, **opts):
321 321 '''create an unversioned archive of a repository revision
322 322
323 323 By default, the revision used is the parent of the working
324 324 directory; use -r/--rev to specify a different revision.
325 325
326 326 The archive type is automatically detected based on file
327 327 extension (or override using -t/--type).
328 328
329 329 .. container:: verbose
330 330
331 331 Examples:
332 332
333 333 - create a zip file containing the 1.0 release::
334 334
335 335 hg archive -r 1.0 project-1.0.zip
336 336
337 337 - create a tarball excluding .hg files::
338 338
339 339 hg archive project.tar.gz -X ".hg*"
340 340
341 341 Valid types are:
342 342
343 343 :``files``: a directory full of files (default)
344 344 :``tar``: tar archive, uncompressed
345 345 :``tbz2``: tar archive, compressed using bzip2
346 346 :``tgz``: tar archive, compressed using gzip
347 347 :``uzip``: zip archive, uncompressed
348 348 :``zip``: zip archive, compressed using deflate
349 349
350 350 The exact name of the destination archive or directory is given
351 351 using a format string; see :hg:`help export` for details.
352 352
353 353 Each member added to an archive file has a directory prefix
354 354 prepended. Use -p/--prefix to specify a format string for the
355 355 prefix. The default is the basename of the archive, with suffixes
356 356 removed.
357 357
358 358 Returns 0 on success.
359 359 '''
360 360
361 361 ctx = scmutil.revsingle(repo, opts.get('rev'))
362 362 if not ctx:
363 363 raise util.Abort(_('no working directory: please specify a revision'))
364 364 node = ctx.node()
365 365 dest = cmdutil.makefilename(repo, dest, node)
366 366 if os.path.realpath(dest) == repo.root:
367 367 raise util.Abort(_('repository root cannot be destination'))
368 368
369 369 kind = opts.get('type') or archival.guesskind(dest) or 'files'
370 370 prefix = opts.get('prefix')
371 371
372 372 if dest == '-':
373 373 if kind == 'files':
374 374 raise util.Abort(_('cannot archive plain files to stdout'))
375 375 dest = cmdutil.makefileobj(repo, dest)
376 376 if not prefix:
377 377 prefix = os.path.basename(repo.root) + '-%h'
378 378
379 379 prefix = cmdutil.makefilename(repo, prefix, node)
380 380 matchfn = scmutil.match(ctx, [], opts)
381 381 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
382 382 matchfn, prefix, subrepos=opts.get('subrepos'))
383 383
384 384 @command('backout',
385 385 [('', 'merge', None, _('merge with old dirstate parent after backout')),
386 386 ('', 'parent', '',
387 387 _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
388 388 ('r', 'rev', '', _('revision to backout'), _('REV')),
389 389 ] + mergetoolopts + walkopts + commitopts + commitopts2,
390 390 _('[OPTION]... [-r] REV'))
391 391 def backout(ui, repo, node=None, rev=None, **opts):
392 392 '''reverse effect of earlier changeset
393 393
394 394 Prepare a new changeset with the effect of REV undone in the
395 395 current working directory.
396 396
397 397 If REV is the parent of the working directory, then this new changeset
398 398 is committed automatically. Otherwise, hg needs to merge the
399 399 changes and the merged result is left uncommitted.
400 400
401 401 .. note::
402 402
403 403 backout cannot be used to fix either an unwanted or
404 404 incorrect merge.
405 405
406 406 .. container:: verbose
407 407
408 408 By default, the pending changeset will have one parent,
409 409 maintaining a linear history. With --merge, the pending
410 410 changeset will instead have two parents: the old parent of the
411 411 working directory and a new child of REV that simply undoes REV.
412 412
413 413 Before version 1.7, the behavior without --merge was equivalent
414 414 to specifying --merge followed by :hg:`update --clean .` to
415 415 cancel the merge and leave the child of REV as a head to be
416 416 merged separately.
417 417
418 418 See :hg:`help dates` for a list of formats valid for -d/--date.
419 419
420 420 Returns 0 on success, 1 if nothing to backout or there are unresolved
421 421 files.
422 422 '''
423 423 if rev and node:
424 424 raise util.Abort(_("please specify just one revision"))
425 425
426 426 if not rev:
427 427 rev = node
428 428
429 429 if not rev:
430 430 raise util.Abort(_("please specify a revision to backout"))
431 431
432 432 date = opts.get('date')
433 433 if date:
434 434 opts['date'] = util.parsedate(date)
435 435
436 436 cmdutil.checkunfinished(repo)
437 437 cmdutil.bailifchanged(repo)
438 438 node = scmutil.revsingle(repo, rev).node()
439 439
440 440 op1, op2 = repo.dirstate.parents()
441 441 a = repo.changelog.ancestor(op1, node)
442 442 if a != node:
443 443 raise util.Abort(_('cannot backout change that is not an ancestor'))
444 444
445 445 p1, p2 = repo.changelog.parents(node)
446 446 if p1 == nullid:
447 447 raise util.Abort(_('cannot backout a change with no parents'))
448 448 if p2 != nullid:
449 449 if not opts.get('parent'):
450 450 raise util.Abort(_('cannot backout a merge changeset'))
451 451 p = repo.lookup(opts['parent'])
452 452 if p not in (p1, p2):
453 453 raise util.Abort(_('%s is not a parent of %s') %
454 454 (short(p), short(node)))
455 455 parent = p
456 456 else:
457 457 if opts.get('parent'):
458 458 raise util.Abort(_('cannot use --parent on non-merge changeset'))
459 459 parent = p1
460 460
461 461 # the backout should appear on the same branch
462 462 wlock = repo.wlock()
463 463 try:
464 464 branch = repo.dirstate.branch()
465 465 bheads = repo.branchheads(branch)
466 466 rctx = scmutil.revsingle(repo, hex(parent))
467 467 if not opts.get('merge') and op1 != node:
468 468 try:
469 469 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
470 470 'backout')
471 471 stats = mergemod.update(repo, parent, True, True, False,
472 472 node, False)
473 473 repo.setparents(op1, op2)
474 474 hg._showstats(repo, stats)
475 475 if stats[3]:
476 476 repo.ui.status(_("use 'hg resolve' to retry unresolved "
477 477 "file merges\n"))
478 478 else:
479 479 msg = _("changeset %s backed out, "
480 480 "don't forget to commit.\n")
481 481 ui.status(msg % short(node))
482 482 return stats[3] > 0
483 483 finally:
484 484 ui.setconfig('ui', 'forcemerge', '', '')
485 485 else:
486 486 hg.clean(repo, node, show_stats=False)
487 487 repo.dirstate.setbranch(branch)
488 488 cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
489 489
490 490
491 491 e = cmdutil.commiteditor
492 492 if not opts['message'] and not opts['logfile']:
493 493 # we don't translate commit messages
494 494 opts['message'] = "Backed out changeset %s" % short(node)
495 495 e = cmdutil.commitforceeditor
496 496
497 497 def commitfunc(ui, repo, message, match, opts):
498 498 return repo.commit(message, opts.get('user'), opts.get('date'),
499 499 match, editor=e)
500 500 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
501 501 if not newnode:
502 502 ui.status(_("nothing changed\n"))
503 503 return 1
504 504 cmdutil.commitstatus(repo, newnode, branch, bheads)
505 505
506 506 def nice(node):
507 507 return '%d:%s' % (repo.changelog.rev(node), short(node))
508 508 ui.status(_('changeset %s backs out changeset %s\n') %
509 509 (nice(repo.changelog.tip()), nice(node)))
510 510 if opts.get('merge') and op1 != node:
511 511 hg.clean(repo, op1, show_stats=False)
512 512 ui.status(_('merging with changeset %s\n')
513 513 % nice(repo.changelog.tip()))
514 514 try:
515 515 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
516 516 'backout')
517 517 return hg.merge(repo, hex(repo.changelog.tip()))
518 518 finally:
519 519 ui.setconfig('ui', 'forcemerge', '', '')
520 520 finally:
521 521 wlock.release()
522 522 return 0
523 523
524 524 @command('bisect',
525 525 [('r', 'reset', False, _('reset bisect state')),
526 526 ('g', 'good', False, _('mark changeset good')),
527 527 ('b', 'bad', False, _('mark changeset bad')),
528 528 ('s', 'skip', False, _('skip testing changeset')),
529 529 ('e', 'extend', False, _('extend the bisect range')),
530 530 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
531 531 ('U', 'noupdate', False, _('do not update to target'))],
532 532 _("[-gbsr] [-U] [-c CMD] [REV]"))
533 533 def bisect(ui, repo, rev=None, extra=None, command=None,
534 534 reset=None, good=None, bad=None, skip=None, extend=None,
535 535 noupdate=None):
536 536 """subdivision search of changesets
537 537
538 538 This command helps to find changesets which introduce problems. To
539 539 use, mark the earliest changeset you know exhibits the problem as
540 540 bad, then mark the latest changeset which is free from the problem
541 541 as good. Bisect will update your working directory to a revision
542 542 for testing (unless the -U/--noupdate option is specified). Once
543 543 you have performed tests, mark the working directory as good or
544 544 bad, and bisect will either update to another candidate changeset
545 545 or announce that it has found the bad revision.
546 546
547 547 As a shortcut, you can also use the revision argument to mark a
548 548 revision as good or bad without checking it out first.
549 549
550 550 If you supply a command, it will be used for automatic bisection.
551 551 The environment variable HG_NODE will contain the ID of the
552 552 changeset being tested. The exit status of the command will be
553 553 used to mark revisions as good or bad: status 0 means good, 125
554 554 means to skip the revision, 127 (command not found) will abort the
555 555 bisection, and any other non-zero exit status means the revision
556 556 is bad.
557 557
558 558 .. container:: verbose
559 559
560 560 Some examples:
561 561
562 562 - start a bisection with known bad revision 34, and good revision 12::
563 563
564 564 hg bisect --bad 34
565 565 hg bisect --good 12
566 566
567 567 - advance the current bisection by marking current revision as good or
568 568 bad::
569 569
570 570 hg bisect --good
571 571 hg bisect --bad
572 572
573 573 - mark the current revision, or a known revision, to be skipped (e.g. if
574 574 that revision is not usable because of another issue)::
575 575
576 576 hg bisect --skip
577 577 hg bisect --skip 23
578 578
579 579 - skip all revisions that do not touch directories ``foo`` or ``bar``::
580 580
581 581 hg bisect --skip "!( file('path:foo') & file('path:bar') )"
582 582
583 583 - forget the current bisection::
584 584
585 585 hg bisect --reset
586 586
587 587 - use 'make && make tests' to automatically find the first broken
588 588 revision::
589 589
590 590 hg bisect --reset
591 591 hg bisect --bad 34
592 592 hg bisect --good 12
593 593 hg bisect --command "make && make tests"
594 594
595 595 - see all changesets whose states are already known in the current
596 596 bisection::
597 597
598 598 hg log -r "bisect(pruned)"
599 599
600 600 - see the changeset currently being bisected (especially useful
601 601 if running with -U/--noupdate)::
602 602
603 603 hg log -r "bisect(current)"
604 604
605 605 - see all changesets that took part in the current bisection::
606 606
607 607 hg log -r "bisect(range)"
608 608
609 609 - you can even get a nice graph::
610 610
611 611 hg log --graph -r "bisect(range)"
612 612
613 613 See :hg:`help revsets` for more about the `bisect()` keyword.
614 614
615 615 Returns 0 on success.
616 616 """
617 617 def extendbisectrange(nodes, good):
618 618 # bisect is incomplete when it ends on a merge node and
619 619 # one of the parent was not checked.
620 620 parents = repo[nodes[0]].parents()
621 621 if len(parents) > 1:
622 622 side = good and state['bad'] or state['good']
623 623 num = len(set(i.node() for i in parents) & set(side))
624 624 if num == 1:
625 625 return parents[0].ancestor(parents[1])
626 626 return None
627 627
628 628 def print_result(nodes, good):
629 629 displayer = cmdutil.show_changeset(ui, repo, {})
630 630 if len(nodes) == 1:
631 631 # narrowed it down to a single revision
632 632 if good:
633 633 ui.write(_("The first good revision is:\n"))
634 634 else:
635 635 ui.write(_("The first bad revision is:\n"))
636 636 displayer.show(repo[nodes[0]])
637 637 extendnode = extendbisectrange(nodes, good)
638 638 if extendnode is not None:
639 639 ui.write(_('Not all ancestors of this changeset have been'
640 640 ' checked.\nUse bisect --extend to continue the '
641 641 'bisection from\nthe common ancestor, %s.\n')
642 642 % extendnode)
643 643 else:
644 644 # multiple possible revisions
645 645 if good:
646 646 ui.write(_("Due to skipped revisions, the first "
647 647 "good revision could be any of:\n"))
648 648 else:
649 649 ui.write(_("Due to skipped revisions, the first "
650 650 "bad revision could be any of:\n"))
651 651 for n in nodes:
652 652 displayer.show(repo[n])
653 653 displayer.close()
654 654
655 655 def check_state(state, interactive=True):
656 656 if not state['good'] or not state['bad']:
657 657 if (good or bad or skip or reset) and interactive:
658 658 return
659 659 if not state['good']:
660 660 raise util.Abort(_('cannot bisect (no known good revisions)'))
661 661 else:
662 662 raise util.Abort(_('cannot bisect (no known bad revisions)'))
663 663 return True
664 664
665 665 # backward compatibility
666 666 if rev in "good bad reset init".split():
667 667 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
668 668 cmd, rev, extra = rev, extra, None
669 669 if cmd == "good":
670 670 good = True
671 671 elif cmd == "bad":
672 672 bad = True
673 673 else:
674 674 reset = True
675 675 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
676 676 raise util.Abort(_('incompatible arguments'))
677 677
678 678 cmdutil.checkunfinished(repo)
679 679
680 680 if reset:
681 681 p = repo.join("bisect.state")
682 682 if os.path.exists(p):
683 683 os.unlink(p)
684 684 return
685 685
686 686 state = hbisect.load_state(repo)
687 687
688 688 if command:
689 689 changesets = 1
690 690 if noupdate:
691 691 try:
692 692 node = state['current'][0]
693 693 except LookupError:
694 694 raise util.Abort(_('current bisect revision is unknown - '
695 695 'start a new bisect to fix'))
696 696 else:
697 697 node, p2 = repo.dirstate.parents()
698 698 if p2 != nullid:
699 699 raise util.Abort(_('current bisect revision is a merge'))
700 700 try:
701 701 while changesets:
702 702 # update state
703 703 state['current'] = [node]
704 704 hbisect.save_state(repo, state)
705 705 status = util.system(command,
706 706 environ={'HG_NODE': hex(node)},
707 707 out=ui.fout)
708 708 if status == 125:
709 709 transition = "skip"
710 710 elif status == 0:
711 711 transition = "good"
712 712 # status < 0 means process was killed
713 713 elif status == 127:
714 714 raise util.Abort(_("failed to execute %s") % command)
715 715 elif status < 0:
716 716 raise util.Abort(_("%s killed") % command)
717 717 else:
718 718 transition = "bad"
719 719 ctx = scmutil.revsingle(repo, rev, node)
720 720 rev = None # clear for future iterations
721 721 state[transition].append(ctx.node())
722 722 ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
723 723 check_state(state, interactive=False)
724 724 # bisect
725 725 nodes, changesets, bgood = hbisect.bisect(repo.changelog, state)
726 726 # update to next check
727 727 node = nodes[0]
728 728 if not noupdate:
729 729 cmdutil.bailifchanged(repo)
730 730 hg.clean(repo, node, show_stats=False)
731 731 finally:
732 732 state['current'] = [node]
733 733 hbisect.save_state(repo, state)
734 734 print_result(nodes, bgood)
735 735 return
736 736
737 737 # update state
738 738
739 739 if rev:
740 740 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
741 741 else:
742 742 nodes = [repo.lookup('.')]
743 743
744 744 if good or bad or skip:
745 745 if good:
746 746 state['good'] += nodes
747 747 elif bad:
748 748 state['bad'] += nodes
749 749 elif skip:
750 750 state['skip'] += nodes
751 751 hbisect.save_state(repo, state)
752 752
753 753 if not check_state(state):
754 754 return
755 755
756 756 # actually bisect
757 757 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
758 758 if extend:
759 759 if not changesets:
760 760 extendnode = extendbisectrange(nodes, good)
761 761 if extendnode is not None:
762 762 ui.write(_("Extending search to changeset %d:%s\n")
763 763 % (extendnode.rev(), extendnode))
764 764 state['current'] = [extendnode.node()]
765 765 hbisect.save_state(repo, state)
766 766 if noupdate:
767 767 return
768 768 cmdutil.bailifchanged(repo)
769 769 return hg.clean(repo, extendnode.node())
770 770 raise util.Abort(_("nothing to extend"))
771 771
772 772 if changesets == 0:
773 773 print_result(nodes, good)
774 774 else:
775 775 assert len(nodes) == 1 # only a single node can be tested next
776 776 node = nodes[0]
777 777 # compute the approximate number of remaining tests
778 778 tests, size = 0, 2
779 779 while size <= changesets:
780 780 tests, size = tests + 1, size * 2
781 781 rev = repo.changelog.rev(node)
782 782 ui.write(_("Testing changeset %d:%s "
783 783 "(%d changesets remaining, ~%d tests)\n")
784 784 % (rev, short(node), changesets, tests))
785 785 state['current'] = [node]
786 786 hbisect.save_state(repo, state)
787 787 if not noupdate:
788 788 cmdutil.bailifchanged(repo)
789 789 return hg.clean(repo, node)
790 790
791 791 @command('bookmarks|bookmark',
792 792 [('f', 'force', False, _('force')),
793 793 ('r', 'rev', '', _('revision'), _('REV')),
794 794 ('d', 'delete', False, _('delete a given bookmark')),
795 795 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
796 796 ('i', 'inactive', False, _('mark a bookmark inactive'))],
797 797 _('hg bookmarks [OPTIONS]... [NAME]...'))
798 798 def bookmark(ui, repo, *names, **opts):
799 799 '''track a line of development with movable markers
800 800
801 801 Bookmarks are pointers to certain commits that move when committing.
802 802 Bookmarks are local. They can be renamed, copied and deleted. It is
803 803 possible to use :hg:`merge NAME` to merge from a given bookmark, and
804 804 :hg:`update NAME` to update to a given bookmark.
805 805
806 806 You can use :hg:`bookmark NAME` to set a bookmark on the working
807 807 directory's parent revision with the given name. If you specify
808 808 a revision using -r REV (where REV may be an existing bookmark),
809 809 the bookmark is assigned to that revision.
810 810
811 811 Bookmarks can be pushed and pulled between repositories (see :hg:`help
812 812 push` and :hg:`help pull`). This requires both the local and remote
813 813 repositories to support bookmarks. For versions prior to 1.8, this means
814 814 the bookmarks extension must be enabled.
815 815
816 816 If you set a bookmark called '@', new clones of the repository will
817 817 have that revision checked out (and the bookmark made active) by
818 818 default.
819 819
820 820 With -i/--inactive, the new bookmark will not be made the active
821 821 bookmark. If -r/--rev is given, the new bookmark will not be made
822 822 active even if -i/--inactive is not given. If no NAME is given, the
823 823 current active bookmark will be marked inactive.
824 824 '''
825 825 force = opts.get('force')
826 826 rev = opts.get('rev')
827 827 delete = opts.get('delete')
828 828 rename = opts.get('rename')
829 829 inactive = opts.get('inactive')
830 830
831 831 def checkformat(mark):
832 832 mark = mark.strip()
833 833 if not mark:
834 834 raise util.Abort(_("bookmark names cannot consist entirely of "
835 835 "whitespace"))
836 836 scmutil.checknewlabel(repo, mark, 'bookmark')
837 837 return mark
838 838
839 839 def checkconflict(repo, mark, cur, force=False, target=None):
840 840 if mark in marks and not force:
841 841 if target:
842 842 if marks[mark] == target and target == cur:
843 843 # re-activating a bookmark
844 844 return
845 845 anc = repo.changelog.ancestors([repo[target].rev()])
846 846 bmctx = repo[marks[mark]]
847 847 divs = [repo[b].node() for b in marks
848 848 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
849 849
850 850 # allow resolving a single divergent bookmark even if moving
851 851 # the bookmark across branches when a revision is specified
852 852 # that contains a divergent bookmark
853 853 if bmctx.rev() not in anc and target in divs:
854 854 bookmarks.deletedivergent(repo, [target], mark)
855 855 return
856 856
857 857 deletefrom = [b for b in divs
858 858 if repo[b].rev() in anc or b == target]
859 859 bookmarks.deletedivergent(repo, deletefrom, mark)
860 860 if bookmarks.validdest(repo, bmctx, repo[target]):
861 861 ui.status(_("moving bookmark '%s' forward from %s\n") %
862 862 (mark, short(bmctx.node())))
863 863 return
864 864 raise util.Abort(_("bookmark '%s' already exists "
865 865 "(use -f to force)") % mark)
866 866 if ((mark in repo.branchmap() or mark == repo.dirstate.branch())
867 867 and not force):
868 868 raise util.Abort(
869 869 _("a bookmark cannot have the name of an existing branch"))
870 870
871 871 if delete and rename:
872 872 raise util.Abort(_("--delete and --rename are incompatible"))
873 873 if delete and rev:
874 874 raise util.Abort(_("--rev is incompatible with --delete"))
875 875 if rename and rev:
876 876 raise util.Abort(_("--rev is incompatible with --rename"))
877 877 if not names and (delete or rev):
878 878 raise util.Abort(_("bookmark name required"))
879 879
880 880 if delete or rename or names or inactive:
881 881 wlock = repo.wlock()
882 882 try:
883 883 cur = repo.changectx('.').node()
884 884 marks = repo._bookmarks
885 885 if delete:
886 886 for mark in names:
887 887 if mark not in marks:
888 888 raise util.Abort(_("bookmark '%s' does not exist") %
889 889 mark)
890 890 if mark == repo._bookmarkcurrent:
891 891 bookmarks.unsetcurrent(repo)
892 892 del marks[mark]
893 893 marks.write()
894 894
895 895 elif rename:
896 896 if not names:
897 897 raise util.Abort(_("new bookmark name required"))
898 898 elif len(names) > 1:
899 899 raise util.Abort(_("only one new bookmark name allowed"))
900 900 mark = checkformat(names[0])
901 901 if rename not in marks:
902 902 raise util.Abort(_("bookmark '%s' does not exist") % rename)
903 903 checkconflict(repo, mark, cur, force)
904 904 marks[mark] = marks[rename]
905 905 if repo._bookmarkcurrent == rename and not inactive:
906 906 bookmarks.setcurrent(repo, mark)
907 907 del marks[rename]
908 908 marks.write()
909 909
910 910 elif names:
911 911 newact = None
912 912 for mark in names:
913 913 mark = checkformat(mark)
914 914 if newact is None:
915 915 newact = mark
916 916 if inactive and mark == repo._bookmarkcurrent:
917 917 bookmarks.unsetcurrent(repo)
918 918 return
919 919 tgt = cur
920 920 if rev:
921 921 tgt = scmutil.revsingle(repo, rev).node()
922 922 checkconflict(repo, mark, cur, force, tgt)
923 923 marks[mark] = tgt
924 924 if not inactive and cur == marks[newact] and not rev:
925 925 bookmarks.setcurrent(repo, newact)
926 926 elif cur != tgt and newact == repo._bookmarkcurrent:
927 927 bookmarks.unsetcurrent(repo)
928 928 marks.write()
929 929
930 930 elif inactive:
931 931 if len(marks) == 0:
932 932 ui.status(_("no bookmarks set\n"))
933 933 elif not repo._bookmarkcurrent:
934 934 ui.status(_("no active bookmark\n"))
935 935 else:
936 936 bookmarks.unsetcurrent(repo)
937 937 finally:
938 938 wlock.release()
939 939 else: # show bookmarks
940 940 hexfn = ui.debugflag and hex or short
941 941 marks = repo._bookmarks
942 942 if len(marks) == 0:
943 943 ui.status(_("no bookmarks set\n"))
944 944 else:
945 945 for bmark, n in sorted(marks.iteritems()):
946 946 current = repo._bookmarkcurrent
947 947 if bmark == current:
948 948 prefix, label = '*', 'bookmarks.current'
949 949 else:
950 950 prefix, label = ' ', ''
951 951
952 952 if ui.quiet:
953 953 ui.write("%s\n" % bmark, label=label)
954 954 else:
955 955 ui.write(" %s %-25s %d:%s\n" % (
956 956 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
957 957 label=label)
958 958
959 959 @command('branch',
960 960 [('f', 'force', None,
961 961 _('set branch name even if it shadows an existing branch')),
962 962 ('C', 'clean', None, _('reset branch name to parent branch name'))],
963 963 _('[-fC] [NAME]'))
964 964 def branch(ui, repo, label=None, **opts):
965 965 """set or show the current branch name
966 966
967 967 .. note::
968 968
969 969 Branch names are permanent and global. Use :hg:`bookmark` to create a
970 970 light-weight bookmark instead. See :hg:`help glossary` for more
971 971 information about named branches and bookmarks.
972 972
973 973 With no argument, show the current branch name. With one argument,
974 974 set the working directory branch name (the branch will not exist
975 975 in the repository until the next commit). Standard practice
976 976 recommends that primary development take place on the 'default'
977 977 branch.
978 978
979 979 Unless -f/--force is specified, branch will not let you set a
980 980 branch name that already exists, even if it's inactive.
981 981
982 982 Use -C/--clean to reset the working directory branch to that of
983 983 the parent of the working directory, negating a previous branch
984 984 change.
985 985
986 986 Use the command :hg:`update` to switch to an existing branch. Use
987 987 :hg:`commit --close-branch` to mark this branch as closed.
988 988
989 989 Returns 0 on success.
990 990 """
991 991 if label:
992 992 label = label.strip()
993 993
994 994 if not opts.get('clean') and not label:
995 995 ui.write("%s\n" % repo.dirstate.branch())
996 996 return
997 997
998 998 wlock = repo.wlock()
999 999 try:
1000 1000 if opts.get('clean'):
1001 1001 label = repo[None].p1().branch()
1002 1002 repo.dirstate.setbranch(label)
1003 1003 ui.status(_('reset working directory to branch %s\n') % label)
1004 1004 elif label:
1005 1005 if not opts.get('force') and label in repo.branchmap():
1006 1006 if label not in [p.branch() for p in repo.parents()]:
1007 1007 raise util.Abort(_('a branch of the same name already'
1008 1008 ' exists'),
1009 1009 # i18n: "it" refers to an existing branch
1010 1010 hint=_("use 'hg update' to switch to it"))
1011 1011 scmutil.checknewlabel(repo, label, 'branch')
1012 1012 repo.dirstate.setbranch(label)
1013 1013 ui.status(_('marked working directory as branch %s\n') % label)
1014 1014 ui.status(_('(branches are permanent and global, '
1015 1015 'did you want a bookmark?)\n'))
1016 1016 finally:
1017 1017 wlock.release()
1018 1018
1019 1019 @command('branches',
1020 1020 [('a', 'active', False, _('show only branches that have unmerged heads')),
1021 1021 ('c', 'closed', False, _('show normal and closed branches'))],
1022 1022 _('[-ac]'))
1023 1023 def branches(ui, repo, active=False, closed=False):
1024 1024 """list repository named branches
1025 1025
1026 1026 List the repository's named branches, indicating which ones are
1027 1027 inactive. If -c/--closed is specified, also list branches which have
1028 1028 been marked closed (see :hg:`commit --close-branch`).
1029 1029
1030 1030 If -a/--active is specified, only show active branches. A branch
1031 1031 is considered active if it contains repository heads.
1032 1032
1033 1033 Use the command :hg:`update` to switch to an existing branch.
1034 1034
1035 1035 Returns 0.
1036 1036 """
1037 1037
1038 1038 hexfunc = ui.debugflag and hex or short
1039 1039
1040 1040 allheads = set(repo.heads())
1041 1041 branches = []
1042 1042 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
1043 1043 isactive = not isclosed and bool(set(heads) & allheads)
1044 1044 branches.append((tag, repo[tip], isactive, not isclosed))
1045 1045 branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]),
1046 1046 reverse=True)
1047 1047
1048 1048 for tag, ctx, isactive, isopen in branches:
1049 1049 if (not active) or isactive:
1050 1050 if isactive:
1051 1051 label = 'branches.active'
1052 1052 notice = ''
1053 1053 elif not isopen:
1054 1054 if not closed:
1055 1055 continue
1056 1056 label = 'branches.closed'
1057 1057 notice = _(' (closed)')
1058 1058 else:
1059 1059 label = 'branches.inactive'
1060 1060 notice = _(' (inactive)')
1061 1061 if tag == repo.dirstate.branch():
1062 1062 label = 'branches.current'
1063 1063 rev = str(ctx.rev()).rjust(31 - encoding.colwidth(tag))
1064 1064 rev = ui.label('%s:%s' % (rev, hexfunc(ctx.node())),
1065 1065 'log.changeset changeset.%s' % ctx.phasestr())
1066 1066 labeledtag = ui.label(tag, label)
1067 1067 if ui.quiet:
1068 1068 ui.write("%s\n" % labeledtag)
1069 1069 else:
1070 1070 ui.write("%s %s%s\n" % (labeledtag, rev, notice))
1071 1071
1072 1072 @command('bundle',
1073 1073 [('f', 'force', None, _('run even when the destination is unrelated')),
1074 1074 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
1075 1075 _('REV')),
1076 1076 ('b', 'branch', [], _('a specific branch you would like to bundle'),
1077 1077 _('BRANCH')),
1078 1078 ('', 'base', [],
1079 1079 _('a base changeset assumed to be available at the destination'),
1080 1080 _('REV')),
1081 1081 ('a', 'all', None, _('bundle all changesets in the repository')),
1082 1082 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
1083 1083 ] + remoteopts,
1084 1084 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
1085 1085 def bundle(ui, repo, fname, dest=None, **opts):
1086 1086 """create a changegroup file
1087 1087
1088 1088 Generate a compressed changegroup file collecting changesets not
1089 1089 known to be in another repository.
1090 1090
1091 1091 If you omit the destination repository, then hg assumes the
1092 1092 destination will have all the nodes you specify with --base
1093 1093 parameters. To create a bundle containing all changesets, use
1094 1094 -a/--all (or --base null).
1095 1095
1096 1096 You can change compression method with the -t/--type option.
1097 1097 The available compression methods are: none, bzip2, and
1098 1098 gzip (by default, bundles are compressed using bzip2).
1099 1099
1100 1100 The bundle file can then be transferred using conventional means
1101 1101 and applied to another repository with the unbundle or pull
1102 1102 command. This is useful when direct push and pull are not
1103 1103 available or when exporting an entire repository is undesirable.
1104 1104
1105 1105 Applying bundles preserves all changeset contents including
1106 1106 permissions, copy/rename information, and revision history.
1107 1107
1108 1108 Returns 0 on success, 1 if no changes found.
1109 1109 """
1110 1110 revs = None
1111 1111 if 'rev' in opts:
1112 1112 revs = scmutil.revrange(repo, opts['rev'])
1113 1113
1114 1114 bundletype = opts.get('type', 'bzip2').lower()
1115 1115 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1116 1116 bundletype = btypes.get(bundletype)
1117 1117 if bundletype not in changegroup.bundletypes:
1118 1118 raise util.Abort(_('unknown bundle type specified with --type'))
1119 1119
1120 1120 if opts.get('all'):
1121 1121 base = ['null']
1122 1122 else:
1123 1123 base = scmutil.revrange(repo, opts.get('base'))
1124 1124 # TODO: get desired bundlecaps from command line.
1125 1125 bundlecaps = None
1126 1126 if base:
1127 1127 if dest:
1128 1128 raise util.Abort(_("--base is incompatible with specifying "
1129 1129 "a destination"))
1130 1130 common = [repo.lookup(rev) for rev in base]
1131 1131 heads = revs and map(repo.lookup, revs) or revs
1132 1132 cg = changegroup.getbundle(repo, 'bundle', heads=heads, common=common,
1133 1133 bundlecaps=bundlecaps)
1134 1134 outgoing = None
1135 1135 else:
1136 1136 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1137 1137 dest, branches = hg.parseurl(dest, opts.get('branch'))
1138 1138 other = hg.peer(repo, opts, dest)
1139 1139 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1140 1140 heads = revs and map(repo.lookup, revs) or revs
1141 1141 outgoing = discovery.findcommonoutgoing(repo, other,
1142 1142 onlyheads=heads,
1143 1143 force=opts.get('force'),
1144 1144 portable=True)
1145 1145 cg = changegroup.getlocalbundle(repo, 'bundle', outgoing, bundlecaps)
1146 1146 if not cg:
1147 1147 scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded)
1148 1148 return 1
1149 1149
1150 1150 changegroup.writebundle(cg, fname, bundletype)
1151 1151
1152 1152 @command('cat',
1153 1153 [('o', 'output', '',
1154 1154 _('print output to file with formatted name'), _('FORMAT')),
1155 1155 ('r', 'rev', '', _('print the given revision'), _('REV')),
1156 1156 ('', 'decode', None, _('apply any matching decode filter')),
1157 1157 ] + walkopts,
1158 1158 _('[OPTION]... FILE...'))
1159 1159 def cat(ui, repo, file1, *pats, **opts):
1160 1160 """output the current or given revision of files
1161 1161
1162 1162 Print the specified files as they were at the given revision. If
1163 1163 no revision is given, the parent of the working directory is used.
1164 1164
1165 1165 Output may be to a file, in which case the name of the file is
1166 1166 given using a format string. The formatting rules are the same as
1167 1167 for the export command, with the following additions:
1168 1168
1169 1169 :``%s``: basename of file being printed
1170 1170 :``%d``: dirname of file being printed, or '.' if in repository root
1171 1171 :``%p``: root-relative path name of file being printed
1172 1172
1173 1173 Returns 0 on success.
1174 1174 """
1175 1175 ctx = scmutil.revsingle(repo, opts.get('rev'))
1176 1176 err = 1
1177 1177 m = scmutil.match(ctx, (file1,) + pats, opts)
1178 1178
1179 1179 def write(path):
1180 1180 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1181 1181 pathname=path)
1182 1182 data = ctx[path].data()
1183 1183 if opts.get('decode'):
1184 1184 data = repo.wwritedata(path, data)
1185 1185 fp.write(data)
1186 1186 fp.close()
1187 1187
1188 1188 # Automation often uses hg cat on single files, so special case it
1189 1189 # for performance to avoid the cost of parsing the manifest.
1190 1190 if len(m.files()) == 1 and not m.anypats():
1191 1191 file = m.files()[0]
1192 1192 mf = repo.manifest
1193 1193 mfnode = ctx._changeset[0]
1194 1194 if mf.find(mfnode, file)[0]:
1195 1195 write(file)
1196 1196 return 0
1197 1197
1198 1198 for abs in ctx.walk(m):
1199 1199 write(abs)
1200 1200 err = 0
1201 1201 return err
1202 1202
1203 1203 @command('^clone',
1204 1204 [('U', 'noupdate', None,
1205 1205 _('the clone will include an empty working copy (only a repository)')),
1206 1206 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
1207 1207 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
1208 1208 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
1209 1209 ('', 'pull', None, _('use pull protocol to copy metadata')),
1210 1210 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
1211 1211 ] + remoteopts,
1212 1212 _('[OPTION]... SOURCE [DEST]'))
1213 1213 def clone(ui, source, dest=None, **opts):
1214 1214 """make a copy of an existing repository
1215 1215
1216 1216 Create a copy of an existing repository in a new directory.
1217 1217
1218 1218 If no destination directory name is specified, it defaults to the
1219 1219 basename of the source.
1220 1220
1221 1221 The location of the source is added to the new repository's
1222 1222 ``.hg/hgrc`` file, as the default to be used for future pulls.
1223 1223
1224 1224 Only local paths and ``ssh://`` URLs are supported as
1225 1225 destinations. For ``ssh://`` destinations, no working directory or
1226 1226 ``.hg/hgrc`` will be created on the remote side.
1227 1227
1228 1228 To pull only a subset of changesets, specify one or more revisions
1229 1229 identifiers with -r/--rev or branches with -b/--branch. The
1230 1230 resulting clone will contain only the specified changesets and
1231 1231 their ancestors. These options (or 'clone src#rev dest') imply
1232 1232 --pull, even for local source repositories. Note that specifying a
1233 1233 tag will include the tagged changeset but not the changeset
1234 1234 containing the tag.
1235 1235
1236 1236 If the source repository has a bookmark called '@' set, that
1237 1237 revision will be checked out in the new repository by default.
1238 1238
1239 1239 To check out a particular version, use -u/--update, or
1240 1240 -U/--noupdate to create a clone with no working directory.
1241 1241
1242 1242 .. container:: verbose
1243 1243
1244 1244 For efficiency, hardlinks are used for cloning whenever the
1245 1245 source and destination are on the same filesystem (note this
1246 1246 applies only to the repository data, not to the working
1247 1247 directory). Some filesystems, such as AFS, implement hardlinking
1248 1248 incorrectly, but do not report errors. In these cases, use the
1249 1249 --pull option to avoid hardlinking.
1250 1250
1251 1251 In some cases, you can clone repositories and the working
1252 1252 directory using full hardlinks with ::
1253 1253
1254 1254 $ cp -al REPO REPOCLONE
1255 1255
1256 1256 This is the fastest way to clone, but it is not always safe. The
1257 1257 operation is not atomic (making sure REPO is not modified during
1258 1258 the operation is up to you) and you have to make sure your
1259 1259 editor breaks hardlinks (Emacs and most Linux Kernel tools do
1260 1260 so). Also, this is not compatible with certain extensions that
1261 1261 place their metadata under the .hg directory, such as mq.
1262 1262
1263 1263 Mercurial will update the working directory to the first applicable
1264 1264 revision from this list:
1265 1265
1266 1266 a) null if -U or the source repository has no changesets
1267 1267 b) if -u . and the source repository is local, the first parent of
1268 1268 the source repository's working directory
1269 1269 c) the changeset specified with -u (if a branch name, this means the
1270 1270 latest head of that branch)
1271 1271 d) the changeset specified with -r
1272 1272 e) the tipmost head specified with -b
1273 1273 f) the tipmost head specified with the url#branch source syntax
1274 1274 g) the revision marked with the '@' bookmark, if present
1275 1275 h) the tipmost head of the default branch
1276 1276 i) tip
1277 1277
1278 1278 Examples:
1279 1279
1280 1280 - clone a remote repository to a new directory named hg/::
1281 1281
1282 1282 hg clone http://selenic.com/hg
1283 1283
1284 1284 - create a lightweight local clone::
1285 1285
1286 1286 hg clone project/ project-feature/
1287 1287
1288 1288 - clone from an absolute path on an ssh server (note double-slash)::
1289 1289
1290 1290 hg clone ssh://user@server//home/projects/alpha/
1291 1291
1292 1292 - do a high-speed clone over a LAN while checking out a
1293 1293 specified version::
1294 1294
1295 1295 hg clone --uncompressed http://server/repo -u 1.5
1296 1296
1297 1297 - create a repository without changesets after a particular revision::
1298 1298
1299 1299 hg clone -r 04e544 experimental/ good/
1300 1300
1301 1301 - clone (and track) a particular named branch::
1302 1302
1303 1303 hg clone http://selenic.com/hg#stable
1304 1304
1305 1305 See :hg:`help urls` for details on specifying URLs.
1306 1306
1307 1307 Returns 0 on success.
1308 1308 """
1309 1309 if opts.get('noupdate') and opts.get('updaterev'):
1310 1310 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1311 1311
1312 1312 r = hg.clone(ui, opts, source, dest,
1313 1313 pull=opts.get('pull'),
1314 1314 stream=opts.get('uncompressed'),
1315 1315 rev=opts.get('rev'),
1316 1316 update=opts.get('updaterev') or not opts.get('noupdate'),
1317 1317 branch=opts.get('branch'))
1318 1318
1319 1319 return r is None
1320 1320
1321 1321 @command('^commit|ci',
1322 1322 [('A', 'addremove', None,
1323 1323 _('mark new/missing files as added/removed before committing')),
1324 1324 ('', 'close-branch', None,
1325 1325 _('mark a branch as closed, hiding it from the branch list')),
1326 1326 ('', 'amend', None, _('amend the parent of the working dir')),
1327 1327 ('s', 'secret', None, _('use the secret phase for committing')),
1328 1328 ] + walkopts + commitopts + commitopts2 + subrepoopts,
1329 1329 _('[OPTION]... [FILE]...'))
1330 1330 def commit(ui, repo, *pats, **opts):
1331 1331 """commit the specified files or all outstanding changes
1332 1332
1333 1333 Commit changes to the given files into the repository. Unlike a
1334 1334 centralized SCM, this operation is a local operation. See
1335 1335 :hg:`push` for a way to actively distribute your changes.
1336 1336
1337 1337 If a list of files is omitted, all changes reported by :hg:`status`
1338 1338 will be committed.
1339 1339
1340 1340 If you are committing the result of a merge, do not provide any
1341 1341 filenames or -I/-X filters.
1342 1342
1343 1343 If no commit message is specified, Mercurial starts your
1344 1344 configured editor where you can enter a message. In case your
1345 1345 commit fails, you will find a backup of your message in
1346 1346 ``.hg/last-message.txt``.
1347 1347
1348 1348 The --amend flag can be used to amend the parent of the
1349 1349 working directory with a new commit that contains the changes
1350 1350 in the parent in addition to those currently reported by :hg:`status`,
1351 1351 if there are any. The old commit is stored in a backup bundle in
1352 1352 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1353 1353 on how to restore it).
1354 1354
1355 1355 Message, user and date are taken from the amended commit unless
1356 1356 specified. When a message isn't specified on the command line,
1357 1357 the editor will open with the message of the amended commit.
1358 1358
1359 1359 It is not possible to amend public changesets (see :hg:`help phases`)
1360 1360 or changesets that have children.
1361 1361
1362 1362 See :hg:`help dates` for a list of formats valid for -d/--date.
1363 1363
1364 1364 Returns 0 on success, 1 if nothing changed.
1365 1365 """
1366 1366 if opts.get('subrepos'):
1367 1367 if opts.get('amend'):
1368 1368 raise util.Abort(_('cannot amend with --subrepos'))
1369 1369 # Let --subrepos on the command line override config setting.
1370 1370 ui.setconfig('ui', 'commitsubrepos', True, 'commit')
1371 1371
1372 1372 # Save this for restoring it later
1373 1373 oldcommitphase = ui.config('phases', 'new-commit')
1374 1374
1375 1375 cmdutil.checkunfinished(repo, commit=True)
1376 1376
1377 1377 branch = repo[None].branch()
1378 1378 bheads = repo.branchheads(branch)
1379 1379
1380 1380 extra = {}
1381 1381 if opts.get('close_branch'):
1382 1382 extra['close'] = 1
1383 1383
1384 1384 if not bheads:
1385 1385 raise util.Abort(_('can only close branch heads'))
1386 1386 elif opts.get('amend'):
1387 1387 if repo.parents()[0].p1().branch() != branch and \
1388 1388 repo.parents()[0].p2().branch() != branch:
1389 1389 raise util.Abort(_('can only close branch heads'))
1390 1390
1391 1391 if opts.get('amend'):
1392 1392 if ui.configbool('ui', 'commitsubrepos'):
1393 1393 raise util.Abort(_('cannot amend with ui.commitsubrepos enabled'))
1394 1394
1395 1395 old = repo['.']
1396 1396 if old.phase() == phases.public:
1397 1397 raise util.Abort(_('cannot amend public changesets'))
1398 1398 if len(repo[None].parents()) > 1:
1399 1399 raise util.Abort(_('cannot amend while merging'))
1400 1400 if (not obsolete._enabled) and old.children():
1401 1401 raise util.Abort(_('cannot amend changeset with children'))
1402 1402
1403 1403 e = cmdutil.commiteditor
1404 1404 if opts.get('force_editor'):
1405 1405 e = cmdutil.commitforceeditor
1406 1406
1407 1407 # commitfunc is used only for temporary amend commit by cmdutil.amend
1408 1408 def commitfunc(ui, repo, message, match, opts):
1409 1409 editor = e
1410 1410 # message contains text from -m or -l, if it's empty,
1411 1411 # open the editor with the old message
1412 1412 if not message:
1413 1413 message = old.description()
1414 1414 editor = cmdutil.commitforceeditor
1415 1415 return repo.commit(message,
1416 1416 opts.get('user') or old.user(),
1417 1417 opts.get('date') or old.date(),
1418 1418 match,
1419 1419 editor=editor,
1420 1420 extra=extra)
1421 1421
1422 1422 current = repo._bookmarkcurrent
1423 1423 marks = old.bookmarks()
1424 1424 node = cmdutil.amend(ui, repo, commitfunc, old, extra, pats, opts)
1425 1425 if node == old.node():
1426 1426 ui.status(_("nothing changed\n"))
1427 1427 return 1
1428 1428 elif marks:
1429 1429 ui.debug('moving bookmarks %r from %s to %s\n' %
1430 1430 (marks, old.hex(), hex(node)))
1431 1431 newmarks = repo._bookmarks
1432 1432 for bm in marks:
1433 1433 newmarks[bm] = node
1434 1434 if bm == current:
1435 1435 bookmarks.setcurrent(repo, bm)
1436 1436 newmarks.write()
1437 1437 else:
1438 1438 e = cmdutil.commiteditor
1439 1439 if opts.get('force_editor'):
1440 1440 e = cmdutil.commitforceeditor
1441 1441
1442 1442 def commitfunc(ui, repo, message, match, opts):
1443 1443 try:
1444 1444 if opts.get('secret'):
1445 1445 ui.setconfig('phases', 'new-commit', 'secret', 'commit')
1446 1446 # Propagate to subrepos
1447 1447 repo.baseui.setconfig('phases', 'new-commit', 'secret',
1448 1448 'commit')
1449 1449
1450 1450 return repo.commit(message, opts.get('user'), opts.get('date'),
1451 1451 match, editor=e, extra=extra)
1452 1452 finally:
1453 1453 ui.setconfig('phases', 'new-commit', oldcommitphase, 'commit')
1454 1454 repo.baseui.setconfig('phases', 'new-commit', oldcommitphase,
1455 1455 'commit')
1456 1456
1457 1457
1458 1458 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1459 1459
1460 1460 if not node:
1461 1461 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
1462 1462 if stat[3]:
1463 1463 ui.status(_("nothing changed (%d missing files, see "
1464 1464 "'hg status')\n") % len(stat[3]))
1465 1465 else:
1466 1466 ui.status(_("nothing changed\n"))
1467 1467 return 1
1468 1468
1469 1469 cmdutil.commitstatus(repo, node, branch, bheads, opts)
1470 1470
1471 1471 @command('config|showconfig|debugconfig',
1472 1472 [('u', 'untrusted', None, _('show untrusted configuration options')),
1473 1473 ('e', 'edit', None, _('edit user config')),
1474 1474 ('l', 'local', None, _('edit repository config')),
1475 1475 ('g', 'global', None, _('edit global config'))],
1476 1476 _('[-u] [NAME]...'))
1477 1477 def config(ui, repo, *values, **opts):
1478 1478 """show combined config settings from all hgrc files
1479 1479
1480 1480 With no arguments, print names and values of all config items.
1481 1481
1482 1482 With one argument of the form section.name, print just the value
1483 1483 of that config item.
1484 1484
1485 1485 With multiple arguments, print names and values of all config
1486 1486 items with matching section names.
1487 1487
1488 1488 With --edit, start an editor on the user-level config file. With
1489 1489 --global, edit the system-wide config file. With --local, edit the
1490 1490 repository-level config file.
1491 1491
1492 1492 With --debug, the source (filename and line number) is printed
1493 1493 for each config item.
1494 1494
1495 1495 See :hg:`help config` for more information about config files.
1496 1496
1497 1497 Returns 0 on success.
1498 1498
1499 1499 """
1500 1500
1501 1501 if opts.get('edit') or opts.get('local') or opts.get('global'):
1502 1502 if opts.get('local') and opts.get('global'):
1503 1503 raise util.Abort(_("can't use --local and --global together"))
1504 1504
1505 1505 if opts.get('local'):
1506 1506 if not repo:
1507 1507 raise util.Abort(_("can't use --local outside a repository"))
1508 1508 paths = [repo.join('hgrc')]
1509 1509 elif opts.get('global'):
1510 1510 paths = scmutil.systemrcpath()
1511 1511 else:
1512 1512 paths = scmutil.userrcpath()
1513 1513
1514 1514 for f in paths:
1515 1515 if os.path.exists(f):
1516 1516 break
1517 1517 else:
1518 1518 f = paths[0]
1519 1519 fp = open(f, "w")
1520 1520 fp.write(
1521 1521 '# example config (see "hg help config" for more info)\n'
1522 1522 '\n'
1523 1523 '[ui]\n'
1524 1524 '# name and email, e.g.\n'
1525 1525 '# username = Jane Doe <jdoe@example.com>\n'
1526 1526 'username =\n'
1527 1527 '\n'
1528 1528 '[extensions]\n'
1529 1529 '# uncomment these lines to enable some popular extensions\n'
1530 1530 '# (see "hg help extensions" for more info)\n'
1531 1531 '# pager =\n'
1532 1532 '# progress =\n'
1533 1533 '# color =\n')
1534 1534 fp.close()
1535 1535
1536 1536 editor = ui.geteditor()
1537 1537 util.system("%s \"%s\"" % (editor, f),
1538 1538 onerr=util.Abort, errprefix=_("edit failed"),
1539 1539 out=ui.fout)
1540 1540 return
1541 1541
1542 1542 for f in scmutil.rcpath():
1543 1543 ui.debug('read config from: %s\n' % f)
1544 1544 untrusted = bool(opts.get('untrusted'))
1545 1545 if values:
1546 1546 sections = [v for v in values if '.' not in v]
1547 1547 items = [v for v in values if '.' in v]
1548 1548 if len(items) > 1 or items and sections:
1549 1549 raise util.Abort(_('only one config item permitted'))
1550 1550 for section, name, value in ui.walkconfig(untrusted=untrusted):
1551 1551 value = str(value).replace('\n', '\\n')
1552 1552 sectname = section + '.' + name
1553 1553 if values:
1554 1554 for v in values:
1555 1555 if v == section:
1556 1556 ui.debug('%s: ' %
1557 1557 ui.configsource(section, name, untrusted))
1558 1558 ui.write('%s=%s\n' % (sectname, value))
1559 1559 elif v == sectname:
1560 1560 ui.debug('%s: ' %
1561 1561 ui.configsource(section, name, untrusted))
1562 1562 ui.write(value, '\n')
1563 1563 else:
1564 1564 ui.debug('%s: ' %
1565 1565 ui.configsource(section, name, untrusted))
1566 1566 ui.write('%s=%s\n' % (sectname, value))
1567 1567
1568 1568 @command('copy|cp',
1569 1569 [('A', 'after', None, _('record a copy that has already occurred')),
1570 1570 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1571 1571 ] + walkopts + dryrunopts,
1572 1572 _('[OPTION]... [SOURCE]... DEST'))
1573 1573 def copy(ui, repo, *pats, **opts):
1574 1574 """mark files as copied for the next commit
1575 1575
1576 1576 Mark dest as having copies of source files. If dest is a
1577 1577 directory, copies are put in that directory. If dest is a file,
1578 1578 the source must be a single file.
1579 1579
1580 1580 By default, this command copies the contents of files as they
1581 1581 exist in the working directory. If invoked with -A/--after, the
1582 1582 operation is recorded, but no copying is performed.
1583 1583
1584 1584 This command takes effect with the next commit. To undo a copy
1585 1585 before that, see :hg:`revert`.
1586 1586
1587 1587 Returns 0 on success, 1 if errors are encountered.
1588 1588 """
1589 1589 wlock = repo.wlock(False)
1590 1590 try:
1591 1591 return cmdutil.copy(ui, repo, pats, opts)
1592 1592 finally:
1593 1593 wlock.release()
1594 1594
1595 1595 @command('debugancestor', [], _('[INDEX] REV1 REV2'))
1596 1596 def debugancestor(ui, repo, *args):
1597 1597 """find the ancestor revision of two revisions in a given index"""
1598 1598 if len(args) == 3:
1599 1599 index, rev1, rev2 = args
1600 1600 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1601 1601 lookup = r.lookup
1602 1602 elif len(args) == 2:
1603 1603 if not repo:
1604 1604 raise util.Abort(_("there is no Mercurial repository here "
1605 1605 "(.hg not found)"))
1606 1606 rev1, rev2 = args
1607 1607 r = repo.changelog
1608 1608 lookup = repo.lookup
1609 1609 else:
1610 1610 raise util.Abort(_('either two or three arguments required'))
1611 1611 a = r.ancestor(lookup(rev1), lookup(rev2))
1612 1612 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1613 1613
1614 1614 @command('debugbuilddag',
1615 1615 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1616 1616 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1617 1617 ('n', 'new-file', None, _('add new file at each rev'))],
1618 1618 _('[OPTION]... [TEXT]'))
1619 1619 def debugbuilddag(ui, repo, text=None,
1620 1620 mergeable_file=False,
1621 1621 overwritten_file=False,
1622 1622 new_file=False):
1623 1623 """builds a repo with a given DAG from scratch in the current empty repo
1624 1624
1625 1625 The description of the DAG is read from stdin if not given on the
1626 1626 command line.
1627 1627
1628 1628 Elements:
1629 1629
1630 1630 - "+n" is a linear run of n nodes based on the current default parent
1631 1631 - "." is a single node based on the current default parent
1632 1632 - "$" resets the default parent to null (implied at the start);
1633 1633 otherwise the default parent is always the last node created
1634 1634 - "<p" sets the default parent to the backref p
1635 1635 - "*p" is a fork at parent p, which is a backref
1636 1636 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1637 1637 - "/p2" is a merge of the preceding node and p2
1638 1638 - ":tag" defines a local tag for the preceding node
1639 1639 - "@branch" sets the named branch for subsequent nodes
1640 1640 - "#...\\n" is a comment up to the end of the line
1641 1641
1642 1642 Whitespace between the above elements is ignored.
1643 1643
1644 1644 A backref is either
1645 1645
1646 1646 - a number n, which references the node curr-n, where curr is the current
1647 1647 node, or
1648 1648 - the name of a local tag you placed earlier using ":tag", or
1649 1649 - empty to denote the default parent.
1650 1650
1651 1651 All string valued-elements are either strictly alphanumeric, or must
1652 1652 be enclosed in double quotes ("..."), with "\\" as escape character.
1653 1653 """
1654 1654
1655 1655 if text is None:
1656 1656 ui.status(_("reading DAG from stdin\n"))
1657 1657 text = ui.fin.read()
1658 1658
1659 1659 cl = repo.changelog
1660 1660 if len(cl) > 0:
1661 1661 raise util.Abort(_('repository is not empty'))
1662 1662
1663 1663 # determine number of revs in DAG
1664 1664 total = 0
1665 1665 for type, data in dagparser.parsedag(text):
1666 1666 if type == 'n':
1667 1667 total += 1
1668 1668
1669 1669 if mergeable_file:
1670 1670 linesperrev = 2
1671 1671 # make a file with k lines per rev
1672 1672 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1673 1673 initialmergedlines.append("")
1674 1674
1675 1675 tags = []
1676 1676
1677 1677 lock = tr = None
1678 1678 try:
1679 1679 lock = repo.lock()
1680 1680 tr = repo.transaction("builddag")
1681 1681
1682 1682 at = -1
1683 1683 atbranch = 'default'
1684 1684 nodeids = []
1685 1685 id = 0
1686 1686 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1687 1687 for type, data in dagparser.parsedag(text):
1688 1688 if type == 'n':
1689 1689 ui.note(('node %s\n' % str(data)))
1690 1690 id, ps = data
1691 1691
1692 1692 files = []
1693 1693 fctxs = {}
1694 1694
1695 1695 p2 = None
1696 1696 if mergeable_file:
1697 1697 fn = "mf"
1698 1698 p1 = repo[ps[0]]
1699 1699 if len(ps) > 1:
1700 1700 p2 = repo[ps[1]]
1701 1701 pa = p1.ancestor(p2)
1702 1702 base, local, other = [x[fn].data() for x in (pa, p1,
1703 1703 p2)]
1704 1704 m3 = simplemerge.Merge3Text(base, local, other)
1705 1705 ml = [l.strip() for l in m3.merge_lines()]
1706 1706 ml.append("")
1707 1707 elif at > 0:
1708 1708 ml = p1[fn].data().split("\n")
1709 1709 else:
1710 1710 ml = initialmergedlines
1711 1711 ml[id * linesperrev] += " r%i" % id
1712 1712 mergedtext = "\n".join(ml)
1713 1713 files.append(fn)
1714 1714 fctxs[fn] = context.memfilectx(fn, mergedtext)
1715 1715
1716 1716 if overwritten_file:
1717 1717 fn = "of"
1718 1718 files.append(fn)
1719 1719 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1720 1720
1721 1721 if new_file:
1722 1722 fn = "nf%i" % id
1723 1723 files.append(fn)
1724 1724 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1725 1725 if len(ps) > 1:
1726 1726 if not p2:
1727 1727 p2 = repo[ps[1]]
1728 1728 for fn in p2:
1729 1729 if fn.startswith("nf"):
1730 1730 files.append(fn)
1731 1731 fctxs[fn] = p2[fn]
1732 1732
1733 1733 def fctxfn(repo, cx, path):
1734 1734 return fctxs.get(path)
1735 1735
1736 1736 if len(ps) == 0 or ps[0] < 0:
1737 1737 pars = [None, None]
1738 1738 elif len(ps) == 1:
1739 1739 pars = [nodeids[ps[0]], None]
1740 1740 else:
1741 1741 pars = [nodeids[p] for p in ps]
1742 1742 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1743 1743 date=(id, 0),
1744 1744 user="debugbuilddag",
1745 1745 extra={'branch': atbranch})
1746 1746 nodeid = repo.commitctx(cx)
1747 1747 nodeids.append(nodeid)
1748 1748 at = id
1749 1749 elif type == 'l':
1750 1750 id, name = data
1751 1751 ui.note(('tag %s\n' % name))
1752 1752 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1753 1753 elif type == 'a':
1754 1754 ui.note(('branch %s\n' % data))
1755 1755 atbranch = data
1756 1756 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1757 1757 tr.close()
1758 1758
1759 1759 if tags:
1760 1760 repo.opener.write("localtags", "".join(tags))
1761 1761 finally:
1762 1762 ui.progress(_('building'), None)
1763 1763 release(tr, lock)
1764 1764
1765 1765 @command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
1766 1766 def debugbundle(ui, bundlepath, all=None, **opts):
1767 1767 """lists the contents of a bundle"""
1768 1768 f = hg.openpath(ui, bundlepath)
1769 1769 try:
1770 1770 gen = changegroup.readbundle(f, bundlepath)
1771 1771 if all:
1772 1772 ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
1773 1773
1774 1774 def showchunks(named):
1775 1775 ui.write("\n%s\n" % named)
1776 1776 chain = None
1777 1777 while True:
1778 1778 chunkdata = gen.deltachunk(chain)
1779 1779 if not chunkdata:
1780 1780 break
1781 1781 node = chunkdata['node']
1782 1782 p1 = chunkdata['p1']
1783 1783 p2 = chunkdata['p2']
1784 1784 cs = chunkdata['cs']
1785 1785 deltabase = chunkdata['deltabase']
1786 1786 delta = chunkdata['delta']
1787 1787 ui.write("%s %s %s %s %s %s\n" %
1788 1788 (hex(node), hex(p1), hex(p2),
1789 1789 hex(cs), hex(deltabase), len(delta)))
1790 1790 chain = node
1791 1791
1792 1792 chunkdata = gen.changelogheader()
1793 1793 showchunks("changelog")
1794 1794 chunkdata = gen.manifestheader()
1795 1795 showchunks("manifest")
1796 1796 while True:
1797 1797 chunkdata = gen.filelogheader()
1798 1798 if not chunkdata:
1799 1799 break
1800 1800 fname = chunkdata['filename']
1801 1801 showchunks(fname)
1802 1802 else:
1803 1803 chunkdata = gen.changelogheader()
1804 1804 chain = None
1805 1805 while True:
1806 1806 chunkdata = gen.deltachunk(chain)
1807 1807 if not chunkdata:
1808 1808 break
1809 1809 node = chunkdata['node']
1810 1810 ui.write("%s\n" % hex(node))
1811 1811 chain = node
1812 1812 finally:
1813 1813 f.close()
1814 1814
1815 1815 @command('debugcheckstate', [], '')
1816 1816 def debugcheckstate(ui, repo):
1817 1817 """validate the correctness of the current dirstate"""
1818 1818 parent1, parent2 = repo.dirstate.parents()
1819 1819 m1 = repo[parent1].manifest()
1820 1820 m2 = repo[parent2].manifest()
1821 1821 errors = 0
1822 1822 for f in repo.dirstate:
1823 1823 state = repo.dirstate[f]
1824 1824 if state in "nr" and f not in m1:
1825 1825 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1826 1826 errors += 1
1827 1827 if state in "a" and f in m1:
1828 1828 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1829 1829 errors += 1
1830 1830 if state in "m" and f not in m1 and f not in m2:
1831 1831 ui.warn(_("%s in state %s, but not in either manifest\n") %
1832 1832 (f, state))
1833 1833 errors += 1
1834 1834 for f in m1:
1835 1835 state = repo.dirstate[f]
1836 1836 if state not in "nrm":
1837 1837 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1838 1838 errors += 1
1839 1839 if errors:
1840 1840 error = _(".hg/dirstate inconsistent with current parent's manifest")
1841 1841 raise util.Abort(error)
1842 1842
1843 1843 @command('debugcommands', [], _('[COMMAND]'))
1844 1844 def debugcommands(ui, cmd='', *args):
1845 1845 """list all available commands and options"""
1846 1846 for cmd, vals in sorted(table.iteritems()):
1847 1847 cmd = cmd.split('|')[0].strip('^')
1848 1848 opts = ', '.join([i[1] for i in vals[1]])
1849 1849 ui.write('%s: %s\n' % (cmd, opts))
1850 1850
1851 1851 @command('debugcomplete',
1852 1852 [('o', 'options', None, _('show the command options'))],
1853 1853 _('[-o] CMD'))
1854 1854 def debugcomplete(ui, cmd='', **opts):
1855 1855 """returns the completion list associated with the given command"""
1856 1856
1857 1857 if opts.get('options'):
1858 1858 options = []
1859 1859 otables = [globalopts]
1860 1860 if cmd:
1861 1861 aliases, entry = cmdutil.findcmd(cmd, table, False)
1862 1862 otables.append(entry[1])
1863 1863 for t in otables:
1864 1864 for o in t:
1865 1865 if "(DEPRECATED)" in o[3]:
1866 1866 continue
1867 1867 if o[0]:
1868 1868 options.append('-%s' % o[0])
1869 1869 options.append('--%s' % o[1])
1870 1870 ui.write("%s\n" % "\n".join(options))
1871 1871 return
1872 1872
1873 1873 cmdlist = cmdutil.findpossible(cmd, table)
1874 1874 if ui.verbose:
1875 1875 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1876 1876 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1877 1877
1878 1878 @command('debugdag',
1879 1879 [('t', 'tags', None, _('use tags as labels')),
1880 1880 ('b', 'branches', None, _('annotate with branch names')),
1881 1881 ('', 'dots', None, _('use dots for runs')),
1882 1882 ('s', 'spaces', None, _('separate elements by spaces'))],
1883 1883 _('[OPTION]... [FILE [REV]...]'))
1884 1884 def debugdag(ui, repo, file_=None, *revs, **opts):
1885 1885 """format the changelog or an index DAG as a concise textual description
1886 1886
1887 1887 If you pass a revlog index, the revlog's DAG is emitted. If you list
1888 1888 revision numbers, they get labeled in the output as rN.
1889 1889
1890 1890 Otherwise, the changelog DAG of the current repo is emitted.
1891 1891 """
1892 1892 spaces = opts.get('spaces')
1893 1893 dots = opts.get('dots')
1894 1894 if file_:
1895 1895 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1896 1896 revs = set((int(r) for r in revs))
1897 1897 def events():
1898 1898 for r in rlog:
1899 1899 yield 'n', (r, list(set(p for p in rlog.parentrevs(r)
1900 1900 if p != -1)))
1901 1901 if r in revs:
1902 1902 yield 'l', (r, "r%i" % r)
1903 1903 elif repo:
1904 1904 cl = repo.changelog
1905 1905 tags = opts.get('tags')
1906 1906 branches = opts.get('branches')
1907 1907 if tags:
1908 1908 labels = {}
1909 1909 for l, n in repo.tags().items():
1910 1910 labels.setdefault(cl.rev(n), []).append(l)
1911 1911 def events():
1912 1912 b = "default"
1913 1913 for r in cl:
1914 1914 if branches:
1915 1915 newb = cl.read(cl.node(r))[5]['branch']
1916 1916 if newb != b:
1917 1917 yield 'a', newb
1918 1918 b = newb
1919 1919 yield 'n', (r, list(set(p for p in cl.parentrevs(r)
1920 1920 if p != -1)))
1921 1921 if tags:
1922 1922 ls = labels.get(r)
1923 1923 if ls:
1924 1924 for l in ls:
1925 1925 yield 'l', (r, l)
1926 1926 else:
1927 1927 raise util.Abort(_('need repo for changelog dag'))
1928 1928
1929 1929 for line in dagparser.dagtextlines(events(),
1930 1930 addspaces=spaces,
1931 1931 wraplabels=True,
1932 1932 wrapannotations=True,
1933 1933 wrapnonlinear=dots,
1934 1934 usedots=dots,
1935 1935 maxlinewidth=70):
1936 1936 ui.write(line)
1937 1937 ui.write("\n")
1938 1938
1939 1939 @command('debugdata',
1940 1940 [('c', 'changelog', False, _('open changelog')),
1941 1941 ('m', 'manifest', False, _('open manifest'))],
1942 1942 _('-c|-m|FILE REV'))
1943 1943 def debugdata(ui, repo, file_, rev=None, **opts):
1944 1944 """dump the contents of a data file revision"""
1945 1945 if opts.get('changelog') or opts.get('manifest'):
1946 1946 file_, rev = None, file_
1947 1947 elif rev is None:
1948 1948 raise error.CommandError('debugdata', _('invalid arguments'))
1949 1949 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1950 1950 try:
1951 1951 ui.write(r.revision(r.lookup(rev)))
1952 1952 except KeyError:
1953 1953 raise util.Abort(_('invalid revision identifier %s') % rev)
1954 1954
1955 1955 @command('debugdate',
1956 1956 [('e', 'extended', None, _('try extended date formats'))],
1957 1957 _('[-e] DATE [RANGE]'))
1958 1958 def debugdate(ui, date, range=None, **opts):
1959 1959 """parse and display a date"""
1960 1960 if opts["extended"]:
1961 1961 d = util.parsedate(date, util.extendeddateformats)
1962 1962 else:
1963 1963 d = util.parsedate(date)
1964 1964 ui.write(("internal: %s %s\n") % d)
1965 1965 ui.write(("standard: %s\n") % util.datestr(d))
1966 1966 if range:
1967 1967 m = util.matchdate(range)
1968 1968 ui.write(("match: %s\n") % m(d[0]))
1969 1969
1970 1970 @command('debugdiscovery',
1971 1971 [('', 'old', None, _('use old-style discovery')),
1972 1972 ('', 'nonheads', None,
1973 1973 _('use old-style discovery with non-heads included')),
1974 1974 ] + remoteopts,
1975 1975 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1976 1976 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1977 1977 """runs the changeset discovery protocol in isolation"""
1978 1978 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
1979 1979 opts.get('branch'))
1980 1980 remote = hg.peer(repo, opts, remoteurl)
1981 1981 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1982 1982
1983 1983 # make sure tests are repeatable
1984 1984 random.seed(12323)
1985 1985
1986 1986 def doit(localheads, remoteheads, remote=remote):
1987 1987 if opts.get('old'):
1988 1988 if localheads:
1989 1989 raise util.Abort('cannot use localheads with old style '
1990 1990 'discovery')
1991 1991 if not util.safehasattr(remote, 'branches'):
1992 1992 # enable in-client legacy support
1993 1993 remote = localrepo.locallegacypeer(remote.local())
1994 1994 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
1995 1995 force=True)
1996 1996 common = set(common)
1997 1997 if not opts.get('nonheads'):
1998 1998 ui.write(("unpruned common: %s\n") %
1999 1999 " ".join(sorted(short(n) for n in common)))
2000 2000 dag = dagutil.revlogdag(repo.changelog)
2001 2001 all = dag.ancestorset(dag.internalizeall(common))
2002 2002 common = dag.externalizeall(dag.headsetofconnecteds(all))
2003 2003 else:
2004 2004 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
2005 2005 common = set(common)
2006 2006 rheads = set(hds)
2007 2007 lheads = set(repo.heads())
2008 2008 ui.write(("common heads: %s\n") %
2009 2009 " ".join(sorted(short(n) for n in common)))
2010 2010 if lheads <= common:
2011 2011 ui.write(("local is subset\n"))
2012 2012 elif rheads <= common:
2013 2013 ui.write(("remote is subset\n"))
2014 2014
2015 2015 serverlogs = opts.get('serverlog')
2016 2016 if serverlogs:
2017 2017 for filename in serverlogs:
2018 2018 logfile = open(filename, 'r')
2019 2019 try:
2020 2020 line = logfile.readline()
2021 2021 while line:
2022 2022 parts = line.strip().split(';')
2023 2023 op = parts[1]
2024 2024 if op == 'cg':
2025 2025 pass
2026 2026 elif op == 'cgss':
2027 2027 doit(parts[2].split(' '), parts[3].split(' '))
2028 2028 elif op == 'unb':
2029 2029 doit(parts[3].split(' '), parts[2].split(' '))
2030 2030 line = logfile.readline()
2031 2031 finally:
2032 2032 logfile.close()
2033 2033
2034 2034 else:
2035 2035 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
2036 2036 opts.get('remote_head'))
2037 2037 localrevs = opts.get('local_head')
2038 2038 doit(localrevs, remoterevs)
2039 2039
2040 2040 @command('debugfileset',
2041 2041 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
2042 2042 _('[-r REV] FILESPEC'))
2043 2043 def debugfileset(ui, repo, expr, **opts):
2044 2044 '''parse and apply a fileset specification'''
2045 2045 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
2046 2046 if ui.verbose:
2047 2047 tree = fileset.parse(expr)[0]
2048 2048 ui.note(tree, "\n")
2049 2049
2050 2050 for f in ctx.getfileset(expr):
2051 2051 ui.write("%s\n" % f)
2052 2052
2053 2053 @command('debugfsinfo', [], _('[PATH]'))
2054 2054 def debugfsinfo(ui, path="."):
2055 2055 """show information detected about current filesystem"""
2056 2056 util.writefile('.debugfsinfo', '')
2057 2057 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
2058 2058 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
2059 2059 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
2060 2060 ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
2061 2061 and 'yes' or 'no'))
2062 2062 os.unlink('.debugfsinfo')
2063 2063
2064 2064 @command('debuggetbundle',
2065 2065 [('H', 'head', [], _('id of head node'), _('ID')),
2066 2066 ('C', 'common', [], _('id of common node'), _('ID')),
2067 2067 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
2068 2068 _('REPO FILE [-H|-C ID]...'))
2069 2069 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
2070 2070 """retrieves a bundle from a repo
2071 2071
2072 2072 Every ID must be a full-length hex node id string. Saves the bundle to the
2073 2073 given file.
2074 2074 """
2075 2075 repo = hg.peer(ui, opts, repopath)
2076 2076 if not repo.capable('getbundle'):
2077 2077 raise util.Abort("getbundle() not supported by target repository")
2078 2078 args = {}
2079 2079 if common:
2080 2080 args['common'] = [bin(s) for s in common]
2081 2081 if head:
2082 2082 args['heads'] = [bin(s) for s in head]
2083 2083 # TODO: get desired bundlecaps from command line.
2084 2084 args['bundlecaps'] = None
2085 2085 bundle = repo.getbundle('debug', **args)
2086 2086
2087 2087 bundletype = opts.get('type', 'bzip2').lower()
2088 2088 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
2089 2089 bundletype = btypes.get(bundletype)
2090 2090 if bundletype not in changegroup.bundletypes:
2091 2091 raise util.Abort(_('unknown bundle type specified with --type'))
2092 2092 changegroup.writebundle(bundle, bundlepath, bundletype)
2093 2093
2094 2094 @command('debugignore', [], '')
2095 2095 def debugignore(ui, repo, *values, **opts):
2096 2096 """display the combined ignore pattern"""
2097 2097 ignore = repo.dirstate._ignore
2098 2098 includepat = getattr(ignore, 'includepat', None)
2099 2099 if includepat is not None:
2100 2100 ui.write("%s\n" % includepat)
2101 2101 else:
2102 2102 raise util.Abort(_("no ignore patterns found"))
2103 2103
2104 2104 @command('debugindex',
2105 2105 [('c', 'changelog', False, _('open changelog')),
2106 2106 ('m', 'manifest', False, _('open manifest')),
2107 2107 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2108 2108 _('[-f FORMAT] -c|-m|FILE'))
2109 2109 def debugindex(ui, repo, file_=None, **opts):
2110 2110 """dump the contents of an index file"""
2111 2111 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
2112 2112 format = opts.get('format', 0)
2113 2113 if format not in (0, 1):
2114 2114 raise util.Abort(_("unknown format %d") % format)
2115 2115
2116 2116 generaldelta = r.version & revlog.REVLOGGENERALDELTA
2117 2117 if generaldelta:
2118 2118 basehdr = ' delta'
2119 2119 else:
2120 2120 basehdr = ' base'
2121 2121
2122 2122 if format == 0:
2123 2123 ui.write(" rev offset length " + basehdr + " linkrev"
2124 2124 " nodeid p1 p2\n")
2125 2125 elif format == 1:
2126 2126 ui.write(" rev flag offset length"
2127 2127 " size " + basehdr + " link p1 p2"
2128 2128 " nodeid\n")
2129 2129
2130 2130 for i in r:
2131 2131 node = r.node(i)
2132 2132 if generaldelta:
2133 2133 base = r.deltaparent(i)
2134 2134 else:
2135 2135 base = r.chainbase(i)
2136 2136 if format == 0:
2137 2137 try:
2138 2138 pp = r.parents(node)
2139 2139 except Exception:
2140 2140 pp = [nullid, nullid]
2141 2141 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
2142 2142 i, r.start(i), r.length(i), base, r.linkrev(i),
2143 2143 short(node), short(pp[0]), short(pp[1])))
2144 2144 elif format == 1:
2145 2145 pr = r.parentrevs(i)
2146 2146 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
2147 2147 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2148 2148 base, r.linkrev(i), pr[0], pr[1], short(node)))
2149 2149
2150 2150 @command('debugindexdot', [], _('FILE'))
2151 2151 def debugindexdot(ui, repo, file_):
2152 2152 """dump an index DAG as a graphviz dot file"""
2153 2153 r = None
2154 2154 if repo:
2155 2155 filelog = repo.file(file_)
2156 2156 if len(filelog):
2157 2157 r = filelog
2158 2158 if not r:
2159 2159 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
2160 2160 ui.write(("digraph G {\n"))
2161 2161 for i in r:
2162 2162 node = r.node(i)
2163 2163 pp = r.parents(node)
2164 2164 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
2165 2165 if pp[1] != nullid:
2166 2166 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
2167 2167 ui.write("}\n")
2168 2168
2169 2169 @command('debuginstall', [], '')
2170 2170 def debuginstall(ui):
2171 2171 '''test Mercurial installation
2172 2172
2173 2173 Returns 0 on success.
2174 2174 '''
2175 2175
2176 2176 def writetemp(contents):
2177 2177 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
2178 2178 f = os.fdopen(fd, "wb")
2179 2179 f.write(contents)
2180 2180 f.close()
2181 2181 return name
2182 2182
2183 2183 problems = 0
2184 2184
2185 2185 # encoding
2186 2186 ui.status(_("checking encoding (%s)...\n") % encoding.encoding)
2187 2187 try:
2188 2188 encoding.fromlocal("test")
2189 2189 except util.Abort, inst:
2190 2190 ui.write(" %s\n" % inst)
2191 2191 ui.write(_(" (check that your locale is properly set)\n"))
2192 2192 problems += 1
2193 2193
2194 2194 # Python
2195 2195 ui.status(_("checking Python executable (%s)\n") % sys.executable)
2196 2196 ui.status(_("checking Python version (%s)\n")
2197 2197 % ("%s.%s.%s" % sys.version_info[:3]))
2198 2198 ui.status(_("checking Python lib (%s)...\n")
2199 2199 % os.path.dirname(os.__file__))
2200 2200
2201 2201 # compiled modules
2202 2202 ui.status(_("checking installed modules (%s)...\n")
2203 2203 % os.path.dirname(__file__))
2204 2204 try:
2205 2205 import bdiff, mpatch, base85, osutil
2206 2206 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
2207 2207 except Exception, inst:
2208 2208 ui.write(" %s\n" % inst)
2209 2209 ui.write(_(" One or more extensions could not be found"))
2210 2210 ui.write(_(" (check that you compiled the extensions)\n"))
2211 2211 problems += 1
2212 2212
2213 2213 # templates
2214 2214 import templater
2215 2215 p = templater.templatepath()
2216 2216 ui.status(_("checking templates (%s)...\n") % ' '.join(p))
2217 2217 if p:
2218 2218 m = templater.templatepath("map-cmdline.default")
2219 2219 if m:
2220 2220 # template found, check if it is working
2221 2221 try:
2222 2222 templater.templater(m)
2223 2223 except Exception, inst:
2224 2224 ui.write(" %s\n" % inst)
2225 2225 p = None
2226 2226 else:
2227 2227 ui.write(_(" template 'default' not found\n"))
2228 2228 p = None
2229 2229 else:
2230 2230 ui.write(_(" no template directories found\n"))
2231 2231 if not p:
2232 2232 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
2233 2233 problems += 1
2234 2234
2235 2235 # editor
2236 2236 ui.status(_("checking commit editor...\n"))
2237 2237 editor = ui.geteditor()
2238 2238 cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
2239 2239 if not cmdpath:
2240 2240 if editor == 'vi':
2241 2241 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
2242 2242 ui.write(_(" (specify a commit editor in your configuration"
2243 2243 " file)\n"))
2244 2244 else:
2245 2245 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
2246 2246 ui.write(_(" (specify a commit editor in your configuration"
2247 2247 " file)\n"))
2248 2248 problems += 1
2249 2249
2250 2250 # check username
2251 2251 ui.status(_("checking username...\n"))
2252 2252 try:
2253 2253 ui.username()
2254 2254 except util.Abort, e:
2255 2255 ui.write(" %s\n" % e)
2256 2256 ui.write(_(" (specify a username in your configuration file)\n"))
2257 2257 problems += 1
2258 2258
2259 2259 if not problems:
2260 2260 ui.status(_("no problems detected\n"))
2261 2261 else:
2262 2262 ui.write(_("%s problems detected,"
2263 2263 " please check your install!\n") % problems)
2264 2264
2265 2265 return problems
2266 2266
2267 2267 @command('debugknown', [], _('REPO ID...'))
2268 2268 def debugknown(ui, repopath, *ids, **opts):
2269 2269 """test whether node ids are known to a repo
2270 2270
2271 2271 Every ID must be a full-length hex node id string. Returns a list of 0s
2272 2272 and 1s indicating unknown/known.
2273 2273 """
2274 2274 repo = hg.peer(ui, opts, repopath)
2275 2275 if not repo.capable('known'):
2276 2276 raise util.Abort("known() not supported by target repository")
2277 2277 flags = repo.known([bin(s) for s in ids])
2278 2278 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
2279 2279
2280 2280 @command('debuglabelcomplete', [], _('LABEL...'))
2281 2281 def debuglabelcomplete(ui, repo, *args):
2282 2282 '''complete "labels" - tags, open branch names, bookmark names'''
2283 2283
2284 2284 labels = set()
2285 2285 labels.update(t[0] for t in repo.tagslist())
2286 2286 labels.update(repo._bookmarks.keys())
2287 2287 labels.update(tag for (tag, heads, tip, closed)
2288 2288 in repo.branchmap().iterbranches() if not closed)
2289 2289 completions = set()
2290 2290 if not args:
2291 2291 args = ['']
2292 2292 for a in args:
2293 2293 completions.update(l for l in labels if l.startswith(a))
2294 2294 ui.write('\n'.join(sorted(completions)))
2295 2295 ui.write('\n')
2296 2296
2297 2297 @command('debugobsolete',
2298 2298 [('', 'flags', 0, _('markers flag')),
2299 2299 ] + commitopts2,
2300 2300 _('[OBSOLETED [REPLACEMENT] [REPL... ]'))
2301 2301 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2302 2302 """create arbitrary obsolete marker
2303 2303
2304 2304 With no arguments, displays the list of obsolescence markers."""
2305 2305 def parsenodeid(s):
2306 2306 try:
2307 2307 # We do not use revsingle/revrange functions here to accept
2308 2308 # arbitrary node identifiers, possibly not present in the
2309 2309 # local repository.
2310 2310 n = bin(s)
2311 2311 if len(n) != len(nullid):
2312 2312 raise TypeError()
2313 2313 return n
2314 2314 except TypeError:
2315 2315 raise util.Abort('changeset references must be full hexadecimal '
2316 2316 'node identifiers')
2317 2317
2318 2318 if precursor is not None:
2319 2319 metadata = {}
2320 2320 if 'date' in opts:
2321 2321 metadata['date'] = opts['date']
2322 2322 metadata['user'] = opts['user'] or ui.username()
2323 2323 succs = tuple(parsenodeid(succ) for succ in successors)
2324 2324 l = repo.lock()
2325 2325 try:
2326 2326 tr = repo.transaction('debugobsolete')
2327 2327 try:
2328 2328 repo.obsstore.create(tr, parsenodeid(precursor), succs,
2329 2329 opts['flags'], metadata)
2330 2330 tr.close()
2331 2331 finally:
2332 2332 tr.release()
2333 2333 finally:
2334 2334 l.release()
2335 2335 else:
2336 2336 for m in obsolete.allmarkers(repo):
2337 2337 cmdutil.showmarker(ui, m)
2338 2338
2339 2339 @command('debugpathcomplete',
2340 2340 [('f', 'full', None, _('complete an entire path')),
2341 2341 ('n', 'normal', None, _('show only normal files')),
2342 2342 ('a', 'added', None, _('show only added files')),
2343 2343 ('r', 'removed', None, _('show only removed files'))],
2344 2344 _('FILESPEC...'))
2345 2345 def debugpathcomplete(ui, repo, *specs, **opts):
2346 2346 '''complete part or all of a tracked path
2347 2347
2348 2348 This command supports shells that offer path name completion. It
2349 2349 currently completes only files already known to the dirstate.
2350 2350
2351 2351 Completion extends only to the next path segment unless
2352 2352 --full is specified, in which case entire paths are used.'''
2353 2353
2354 2354 def complete(path, acceptable):
2355 2355 dirstate = repo.dirstate
2356 2356 spec = os.path.normpath(os.path.join(os.getcwd(), path))
2357 2357 rootdir = repo.root + os.sep
2358 2358 if spec != repo.root and not spec.startswith(rootdir):
2359 2359 return [], []
2360 2360 if os.path.isdir(spec):
2361 2361 spec += '/'
2362 2362 spec = spec[len(rootdir):]
2363 2363 fixpaths = os.sep != '/'
2364 2364 if fixpaths:
2365 2365 spec = spec.replace(os.sep, '/')
2366 2366 speclen = len(spec)
2367 2367 fullpaths = opts['full']
2368 2368 files, dirs = set(), set()
2369 2369 adddir, addfile = dirs.add, files.add
2370 2370 for f, st in dirstate.iteritems():
2371 2371 if f.startswith(spec) and st[0] in acceptable:
2372 2372 if fixpaths:
2373 2373 f = f.replace('/', os.sep)
2374 2374 if fullpaths:
2375 2375 addfile(f)
2376 2376 continue
2377 2377 s = f.find(os.sep, speclen)
2378 2378 if s >= 0:
2379 2379 adddir(f[:s])
2380 2380 else:
2381 2381 addfile(f)
2382 2382 return files, dirs
2383 2383
2384 2384 acceptable = ''
2385 2385 if opts['normal']:
2386 2386 acceptable += 'nm'
2387 2387 if opts['added']:
2388 2388 acceptable += 'a'
2389 2389 if opts['removed']:
2390 2390 acceptable += 'r'
2391 2391 cwd = repo.getcwd()
2392 2392 if not specs:
2393 2393 specs = ['.']
2394 2394
2395 2395 files, dirs = set(), set()
2396 2396 for spec in specs:
2397 2397 f, d = complete(spec, acceptable or 'nmar')
2398 2398 files.update(f)
2399 2399 dirs.update(d)
2400 2400 files.update(dirs)
2401 2401 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2402 2402 ui.write('\n')
2403 2403
2404 2404 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
2405 2405 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2406 2406 '''access the pushkey key/value protocol
2407 2407
2408 2408 With two args, list the keys in the given namespace.
2409 2409
2410 2410 With five args, set a key to new if it currently is set to old.
2411 2411 Reports success or failure.
2412 2412 '''
2413 2413
2414 2414 target = hg.peer(ui, {}, repopath)
2415 2415 if keyinfo:
2416 2416 key, old, new = keyinfo
2417 2417 r = target.pushkey(namespace, key, old, new)
2418 2418 ui.status(str(r) + '\n')
2419 2419 return not r
2420 2420 else:
2421 2421 for k, v in sorted(target.listkeys(namespace).iteritems()):
2422 2422 ui.write("%s\t%s\n" % (k.encode('string-escape'),
2423 2423 v.encode('string-escape')))
2424 2424
2425 2425 @command('debugpvec', [], _('A B'))
2426 2426 def debugpvec(ui, repo, a, b=None):
2427 2427 ca = scmutil.revsingle(repo, a)
2428 2428 cb = scmutil.revsingle(repo, b)
2429 2429 pa = pvec.ctxpvec(ca)
2430 2430 pb = pvec.ctxpvec(cb)
2431 2431 if pa == pb:
2432 2432 rel = "="
2433 2433 elif pa > pb:
2434 2434 rel = ">"
2435 2435 elif pa < pb:
2436 2436 rel = "<"
2437 2437 elif pa | pb:
2438 2438 rel = "|"
2439 2439 ui.write(_("a: %s\n") % pa)
2440 2440 ui.write(_("b: %s\n") % pb)
2441 2441 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2442 2442 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2443 2443 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2444 2444 pa.distance(pb), rel))
2445 2445
2446 2446 @command('debugrebuilddirstate|debugrebuildstate',
2447 2447 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
2448 2448 _('[-r REV]'))
2449 2449 def debugrebuilddirstate(ui, repo, rev):
2450 2450 """rebuild the dirstate as it would look like for the given revision
2451 2451
2452 2452 If no revision is specified the first current parent will be used.
2453 2453
2454 2454 The dirstate will be set to the files of the given revision.
2455 2455 The actual working directory content or existing dirstate
2456 2456 information such as adds or removes is not considered.
2457 2457
2458 2458 One use of this command is to make the next :hg:`status` invocation
2459 2459 check the actual file content.
2460 2460 """
2461 2461 ctx = scmutil.revsingle(repo, rev)
2462 2462 wlock = repo.wlock()
2463 2463 try:
2464 2464 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2465 2465 finally:
2466 2466 wlock.release()
2467 2467
2468 2468 @command('debugrename',
2469 2469 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2470 2470 _('[-r REV] FILE'))
2471 2471 def debugrename(ui, repo, file1, *pats, **opts):
2472 2472 """dump rename information"""
2473 2473
2474 2474 ctx = scmutil.revsingle(repo, opts.get('rev'))
2475 2475 m = scmutil.match(ctx, (file1,) + pats, opts)
2476 2476 for abs in ctx.walk(m):
2477 2477 fctx = ctx[abs]
2478 2478 o = fctx.filelog().renamed(fctx.filenode())
2479 2479 rel = m.rel(abs)
2480 2480 if o:
2481 2481 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2482 2482 else:
2483 2483 ui.write(_("%s not renamed\n") % rel)
2484 2484
2485 2485 @command('debugrevlog',
2486 2486 [('c', 'changelog', False, _('open changelog')),
2487 2487 ('m', 'manifest', False, _('open manifest')),
2488 2488 ('d', 'dump', False, _('dump index data'))],
2489 2489 _('-c|-m|FILE'))
2490 2490 def debugrevlog(ui, repo, file_=None, **opts):
2491 2491 """show data and statistics about a revlog"""
2492 2492 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2493 2493
2494 2494 if opts.get("dump"):
2495 2495 numrevs = len(r)
2496 2496 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
2497 2497 " rawsize totalsize compression heads\n")
2498 2498 ts = 0
2499 2499 heads = set()
2500 2500 for rev in xrange(numrevs):
2501 2501 dbase = r.deltaparent(rev)
2502 2502 if dbase == -1:
2503 2503 dbase = rev
2504 2504 cbase = r.chainbase(rev)
2505 2505 p1, p2 = r.parentrevs(rev)
2506 2506 rs = r.rawsize(rev)
2507 2507 ts = ts + rs
2508 2508 heads -= set(r.parentrevs(rev))
2509 2509 heads.add(rev)
2510 2510 ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
2511 2511 (rev, p1, p2, r.start(rev), r.end(rev),
2512 2512 r.start(dbase), r.start(cbase),
2513 2513 r.start(p1), r.start(p2),
2514 2514 rs, ts, ts / r.end(rev), len(heads)))
2515 2515 return 0
2516 2516
2517 2517 v = r.version
2518 2518 format = v & 0xFFFF
2519 2519 flags = []
2520 2520 gdelta = False
2521 2521 if v & revlog.REVLOGNGINLINEDATA:
2522 2522 flags.append('inline')
2523 2523 if v & revlog.REVLOGGENERALDELTA:
2524 2524 gdelta = True
2525 2525 flags.append('generaldelta')
2526 2526 if not flags:
2527 2527 flags = ['(none)']
2528 2528
2529 2529 nummerges = 0
2530 2530 numfull = 0
2531 2531 numprev = 0
2532 2532 nump1 = 0
2533 2533 nump2 = 0
2534 2534 numother = 0
2535 2535 nump1prev = 0
2536 2536 nump2prev = 0
2537 2537 chainlengths = []
2538 2538
2539 2539 datasize = [None, 0, 0L]
2540 2540 fullsize = [None, 0, 0L]
2541 2541 deltasize = [None, 0, 0L]
2542 2542
2543 2543 def addsize(size, l):
2544 2544 if l[0] is None or size < l[0]:
2545 2545 l[0] = size
2546 2546 if size > l[1]:
2547 2547 l[1] = size
2548 2548 l[2] += size
2549 2549
2550 2550 numrevs = len(r)
2551 2551 for rev in xrange(numrevs):
2552 2552 p1, p2 = r.parentrevs(rev)
2553 2553 delta = r.deltaparent(rev)
2554 2554 if format > 0:
2555 2555 addsize(r.rawsize(rev), datasize)
2556 2556 if p2 != nullrev:
2557 2557 nummerges += 1
2558 2558 size = r.length(rev)
2559 2559 if delta == nullrev:
2560 2560 chainlengths.append(0)
2561 2561 numfull += 1
2562 2562 addsize(size, fullsize)
2563 2563 else:
2564 2564 chainlengths.append(chainlengths[delta] + 1)
2565 2565 addsize(size, deltasize)
2566 2566 if delta == rev - 1:
2567 2567 numprev += 1
2568 2568 if delta == p1:
2569 2569 nump1prev += 1
2570 2570 elif delta == p2:
2571 2571 nump2prev += 1
2572 2572 elif delta == p1:
2573 2573 nump1 += 1
2574 2574 elif delta == p2:
2575 2575 nump2 += 1
2576 2576 elif delta != nullrev:
2577 2577 numother += 1
2578 2578
2579 2579 # Adjust size min value for empty cases
2580 2580 for size in (datasize, fullsize, deltasize):
2581 2581 if size[0] is None:
2582 2582 size[0] = 0
2583 2583
2584 2584 numdeltas = numrevs - numfull
2585 2585 numoprev = numprev - nump1prev - nump2prev
2586 2586 totalrawsize = datasize[2]
2587 2587 datasize[2] /= numrevs
2588 2588 fulltotal = fullsize[2]
2589 2589 fullsize[2] /= numfull
2590 2590 deltatotal = deltasize[2]
2591 2591 if numrevs - numfull > 0:
2592 2592 deltasize[2] /= numrevs - numfull
2593 2593 totalsize = fulltotal + deltatotal
2594 2594 avgchainlen = sum(chainlengths) / numrevs
2595 2595 compratio = totalrawsize / totalsize
2596 2596
2597 2597 basedfmtstr = '%%%dd\n'
2598 2598 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2599 2599
2600 2600 def dfmtstr(max):
2601 2601 return basedfmtstr % len(str(max))
2602 2602 def pcfmtstr(max, padding=0):
2603 2603 return basepcfmtstr % (len(str(max)), ' ' * padding)
2604 2604
2605 2605 def pcfmt(value, total):
2606 2606 return (value, 100 * float(value) / total)
2607 2607
2608 2608 ui.write(('format : %d\n') % format)
2609 2609 ui.write(('flags : %s\n') % ', '.join(flags))
2610 2610
2611 2611 ui.write('\n')
2612 2612 fmt = pcfmtstr(totalsize)
2613 2613 fmt2 = dfmtstr(totalsize)
2614 2614 ui.write(('revisions : ') + fmt2 % numrevs)
2615 2615 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2616 2616 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2617 2617 ui.write(('revisions : ') + fmt2 % numrevs)
2618 2618 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2619 2619 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2620 2620 ui.write(('revision size : ') + fmt2 % totalsize)
2621 2621 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2622 2622 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2623 2623
2624 2624 ui.write('\n')
2625 2625 fmt = dfmtstr(max(avgchainlen, compratio))
2626 2626 ui.write(('avg chain length : ') + fmt % avgchainlen)
2627 2627 ui.write(('compression ratio : ') + fmt % compratio)
2628 2628
2629 2629 if format > 0:
2630 2630 ui.write('\n')
2631 2631 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2632 2632 % tuple(datasize))
2633 2633 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2634 2634 % tuple(fullsize))
2635 2635 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2636 2636 % tuple(deltasize))
2637 2637
2638 2638 if numdeltas > 0:
2639 2639 ui.write('\n')
2640 2640 fmt = pcfmtstr(numdeltas)
2641 2641 fmt2 = pcfmtstr(numdeltas, 4)
2642 2642 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2643 2643 if numprev > 0:
2644 2644 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2645 2645 numprev))
2646 2646 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2647 2647 numprev))
2648 2648 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2649 2649 numprev))
2650 2650 if gdelta:
2651 2651 ui.write(('deltas against p1 : ')
2652 2652 + fmt % pcfmt(nump1, numdeltas))
2653 2653 ui.write(('deltas against p2 : ')
2654 2654 + fmt % pcfmt(nump2, numdeltas))
2655 2655 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2656 2656 numdeltas))
2657 2657
2658 2658 @command('debugrevspec',
2659 2659 [('', 'optimize', None, _('print parsed tree after optimizing'))],
2660 2660 ('REVSPEC'))
2661 2661 def debugrevspec(ui, repo, expr, **opts):
2662 2662 """parse and apply a revision specification
2663 2663
2664 2664 Use --verbose to print the parsed tree before and after aliases
2665 2665 expansion.
2666 2666 """
2667 2667 if ui.verbose:
2668 2668 tree = revset.parse(expr)[0]
2669 2669 ui.note(revset.prettyformat(tree), "\n")
2670 2670 newtree = revset.findaliases(ui, tree)
2671 2671 if newtree != tree:
2672 2672 ui.note(revset.prettyformat(newtree), "\n")
2673 2673 if opts["optimize"]:
2674 2674 weight, optimizedtree = revset.optimize(newtree, True)
2675 2675 ui.note("* optimized:\n", revset.prettyformat(optimizedtree), "\n")
2676 2676 func = revset.match(ui, expr)
2677 2677 for c in func(repo, revset.spanset(repo)):
2678 2678 ui.write("%s\n" % c)
2679 2679
2680 2680 @command('debugsetparents', [], _('REV1 [REV2]'))
2681 2681 def debugsetparents(ui, repo, rev1, rev2=None):
2682 2682 """manually set the parents of the current working directory
2683 2683
2684 2684 This is useful for writing repository conversion tools, but should
2685 2685 be used with care.
2686 2686
2687 2687 Returns 0 on success.
2688 2688 """
2689 2689
2690 2690 r1 = scmutil.revsingle(repo, rev1).node()
2691 2691 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2692 2692
2693 2693 wlock = repo.wlock()
2694 2694 try:
2695 2695 repo.setparents(r1, r2)
2696 2696 finally:
2697 2697 wlock.release()
2698 2698
2699 2699 @command('debugdirstate|debugstate',
2700 2700 [('', 'nodates', None, _('do not display the saved mtime')),
2701 2701 ('', 'datesort', None, _('sort by saved mtime'))],
2702 2702 _('[OPTION]...'))
2703 2703 def debugstate(ui, repo, nodates=None, datesort=None):
2704 2704 """show the contents of the current dirstate"""
2705 2705 timestr = ""
2706 2706 showdate = not nodates
2707 2707 if datesort:
2708 2708 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2709 2709 else:
2710 2710 keyfunc = None # sort by filename
2711 2711 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2712 2712 if showdate:
2713 2713 if ent[3] == -1:
2714 2714 # Pad or slice to locale representation
2715 2715 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2716 2716 time.localtime(0)))
2717 2717 timestr = 'unset'
2718 2718 timestr = (timestr[:locale_len] +
2719 2719 ' ' * (locale_len - len(timestr)))
2720 2720 else:
2721 2721 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2722 2722 time.localtime(ent[3]))
2723 2723 if ent[1] & 020000:
2724 2724 mode = 'lnk'
2725 2725 else:
2726 2726 mode = '%3o' % (ent[1] & 0777 & ~util.umask)
2727 2727 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2728 2728 for f in repo.dirstate.copies():
2729 2729 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2730 2730
2731 2731 @command('debugsub',
2732 2732 [('r', 'rev', '',
2733 2733 _('revision to check'), _('REV'))],
2734 2734 _('[-r REV] [REV]'))
2735 2735 def debugsub(ui, repo, rev=None):
2736 2736 ctx = scmutil.revsingle(repo, rev, None)
2737 2737 for k, v in sorted(ctx.substate.items()):
2738 2738 ui.write(('path %s\n') % k)
2739 2739 ui.write((' source %s\n') % v[0])
2740 2740 ui.write((' revision %s\n') % v[1])
2741 2741
2742 2742 @command('debugsuccessorssets',
2743 2743 [],
2744 2744 _('[REV]'))
2745 2745 def debugsuccessorssets(ui, repo, *revs):
2746 2746 """show set of successors for revision
2747 2747
2748 2748 A successors set of changeset A is a consistent group of revisions that
2749 2749 succeed A. It contains non-obsolete changesets only.
2750 2750
2751 2751 In most cases a changeset A has a single successors set containing a single
2752 2752 successor (changeset A replaced by A').
2753 2753
2754 2754 A changeset that is made obsolete with no successors are called "pruned".
2755 2755 Such changesets have no successors sets at all.
2756 2756
2757 2757 A changeset that has been "split" will have a successors set containing
2758 2758 more than one successor.
2759 2759
2760 2760 A changeset that has been rewritten in multiple different ways is called
2761 2761 "divergent". Such changesets have multiple successor sets (each of which
2762 2762 may also be split, i.e. have multiple successors).
2763 2763
2764 2764 Results are displayed as follows::
2765 2765
2766 2766 <rev1>
2767 2767 <successors-1A>
2768 2768 <rev2>
2769 2769 <successors-2A>
2770 2770 <successors-2B1> <successors-2B2> <successors-2B3>
2771 2771
2772 2772 Here rev2 has two possible (i.e. divergent) successors sets. The first
2773 2773 holds one element, whereas the second holds three (i.e. the changeset has
2774 2774 been split).
2775 2775 """
2776 2776 # passed to successorssets caching computation from one call to another
2777 2777 cache = {}
2778 2778 ctx2str = str
2779 2779 node2str = short
2780 2780 if ui.debug():
2781 2781 def ctx2str(ctx):
2782 2782 return ctx.hex()
2783 2783 node2str = hex
2784 2784 for rev in scmutil.revrange(repo, revs):
2785 2785 ctx = repo[rev]
2786 2786 ui.write('%s\n'% ctx2str(ctx))
2787 2787 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2788 2788 if succsset:
2789 2789 ui.write(' ')
2790 2790 ui.write(node2str(succsset[0]))
2791 2791 for node in succsset[1:]:
2792 2792 ui.write(' ')
2793 2793 ui.write(node2str(node))
2794 2794 ui.write('\n')
2795 2795
2796 2796 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
2797 2797 def debugwalk(ui, repo, *pats, **opts):
2798 2798 """show how files match on given patterns"""
2799 2799 m = scmutil.match(repo[None], pats, opts)
2800 2800 items = list(repo.walk(m))
2801 2801 if not items:
2802 2802 return
2803 2803 f = lambda fn: fn
2804 2804 if ui.configbool('ui', 'slash') and os.sep != '/':
2805 2805 f = lambda fn: util.normpath(fn)
2806 2806 fmt = 'f %%-%ds %%-%ds %%s' % (
2807 2807 max([len(abs) for abs in items]),
2808 2808 max([len(m.rel(abs)) for abs in items]))
2809 2809 for abs in items:
2810 2810 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2811 2811 ui.write("%s\n" % line.rstrip())
2812 2812
2813 2813 @command('debugwireargs',
2814 2814 [('', 'three', '', 'three'),
2815 2815 ('', 'four', '', 'four'),
2816 2816 ('', 'five', '', 'five'),
2817 2817 ] + remoteopts,
2818 2818 _('REPO [OPTIONS]... [ONE [TWO]]'))
2819 2819 def debugwireargs(ui, repopath, *vals, **opts):
2820 2820 repo = hg.peer(ui, opts, repopath)
2821 2821 for opt in remoteopts:
2822 2822 del opts[opt[1]]
2823 2823 args = {}
2824 2824 for k, v in opts.iteritems():
2825 2825 if v:
2826 2826 args[k] = v
2827 2827 # run twice to check that we don't mess up the stream for the next command
2828 2828 res1 = repo.debugwireargs(*vals, **args)
2829 2829 res2 = repo.debugwireargs(*vals, **args)
2830 2830 ui.write("%s\n" % res1)
2831 2831 if res1 != res2:
2832 2832 ui.warn("%s\n" % res2)
2833 2833
2834 2834 @command('^diff',
2835 2835 [('r', 'rev', [], _('revision'), _('REV')),
2836 2836 ('c', 'change', '', _('change made by revision'), _('REV'))
2837 2837 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2838 2838 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
2839 2839 def diff(ui, repo, *pats, **opts):
2840 2840 """diff repository (or selected files)
2841 2841
2842 2842 Show differences between revisions for the specified files.
2843 2843
2844 2844 Differences between files are shown using the unified diff format.
2845 2845
2846 2846 .. note::
2847 2847
2848 2848 diff may generate unexpected results for merges, as it will
2849 2849 default to comparing against the working directory's first
2850 2850 parent changeset if no revisions are specified.
2851 2851
2852 2852 When two revision arguments are given, then changes are shown
2853 2853 between those revisions. If only one revision is specified then
2854 2854 that revision is compared to the working directory, and, when no
2855 2855 revisions are specified, the working directory files are compared
2856 2856 to its parent.
2857 2857
2858 2858 Alternatively you can specify -c/--change with a revision to see
2859 2859 the changes in that changeset relative to its first parent.
2860 2860
2861 2861 Without the -a/--text option, diff will avoid generating diffs of
2862 2862 files it detects as binary. With -a, diff will generate a diff
2863 2863 anyway, probably with undesirable results.
2864 2864
2865 2865 Use the -g/--git option to generate diffs in the git extended diff
2866 2866 format. For more information, read :hg:`help diffs`.
2867 2867
2868 2868 .. container:: verbose
2869 2869
2870 2870 Examples:
2871 2871
2872 2872 - compare a file in the current working directory to its parent::
2873 2873
2874 2874 hg diff foo.c
2875 2875
2876 2876 - compare two historical versions of a directory, with rename info::
2877 2877
2878 2878 hg diff --git -r 1.0:1.2 lib/
2879 2879
2880 2880 - get change stats relative to the last change on some date::
2881 2881
2882 2882 hg diff --stat -r "date('may 2')"
2883 2883
2884 2884 - diff all newly-added files that contain a keyword::
2885 2885
2886 2886 hg diff "set:added() and grep(GNU)"
2887 2887
2888 2888 - compare a revision and its parents::
2889 2889
2890 2890 hg diff -c 9353 # compare against first parent
2891 2891 hg diff -r 9353^:9353 # same using revset syntax
2892 2892 hg diff -r 9353^2:9353 # compare against the second parent
2893 2893
2894 2894 Returns 0 on success.
2895 2895 """
2896 2896
2897 2897 revs = opts.get('rev')
2898 2898 change = opts.get('change')
2899 2899 stat = opts.get('stat')
2900 2900 reverse = opts.get('reverse')
2901 2901
2902 2902 if revs and change:
2903 2903 msg = _('cannot specify --rev and --change at the same time')
2904 2904 raise util.Abort(msg)
2905 2905 elif change:
2906 2906 node2 = scmutil.revsingle(repo, change, None).node()
2907 2907 node1 = repo[node2].p1().node()
2908 2908 else:
2909 2909 node1, node2 = scmutil.revpair(repo, revs)
2910 2910
2911 2911 if reverse:
2912 2912 node1, node2 = node2, node1
2913 2913
2914 2914 diffopts = patch.diffopts(ui, opts)
2915 2915 m = scmutil.match(repo[node2], pats, opts)
2916 2916 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2917 2917 listsubrepos=opts.get('subrepos'))
2918 2918
2919 2919 @command('^export',
2920 2920 [('o', 'output', '',
2921 2921 _('print output to file with formatted name'), _('FORMAT')),
2922 2922 ('', 'switch-parent', None, _('diff against the second parent')),
2923 2923 ('r', 'rev', [], _('revisions to export'), _('REV')),
2924 2924 ] + diffopts,
2925 2925 _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
2926 2926 def export(ui, repo, *changesets, **opts):
2927 2927 """dump the header and diffs for one or more changesets
2928 2928
2929 2929 Print the changeset header and diffs for one or more revisions.
2930 2930 If no revision is given, the parent of the working directory is used.
2931 2931
2932 2932 The information shown in the changeset header is: author, date,
2933 2933 branch name (if non-default), changeset hash, parent(s) and commit
2934 2934 comment.
2935 2935
2936 2936 .. note::
2937 2937
2938 2938 export may generate unexpected diff output for merge
2939 2939 changesets, as it will compare the merge changeset against its
2940 2940 first parent only.
2941 2941
2942 2942 Output may be to a file, in which case the name of the file is
2943 2943 given using a format string. The formatting rules are as follows:
2944 2944
2945 2945 :``%%``: literal "%" character
2946 2946 :``%H``: changeset hash (40 hexadecimal digits)
2947 2947 :``%N``: number of patches being generated
2948 2948 :``%R``: changeset revision number
2949 2949 :``%b``: basename of the exporting repository
2950 2950 :``%h``: short-form changeset hash (12 hexadecimal digits)
2951 2951 :``%m``: first line of the commit message (only alphanumeric characters)
2952 2952 :``%n``: zero-padded sequence number, starting at 1
2953 2953 :``%r``: zero-padded changeset revision number
2954 2954
2955 2955 Without the -a/--text option, export will avoid generating diffs
2956 2956 of files it detects as binary. With -a, export will generate a
2957 2957 diff anyway, probably with undesirable results.
2958 2958
2959 2959 Use the -g/--git option to generate diffs in the git extended diff
2960 2960 format. See :hg:`help diffs` for more information.
2961 2961
2962 2962 With the --switch-parent option, the diff will be against the
2963 2963 second parent. It can be useful to review a merge.
2964 2964
2965 2965 .. container:: verbose
2966 2966
2967 2967 Examples:
2968 2968
2969 2969 - use export and import to transplant a bugfix to the current
2970 2970 branch::
2971 2971
2972 2972 hg export -r 9353 | hg import -
2973 2973
2974 2974 - export all the changesets between two revisions to a file with
2975 2975 rename information::
2976 2976
2977 2977 hg export --git -r 123:150 > changes.txt
2978 2978
2979 2979 - split outgoing changes into a series of patches with
2980 2980 descriptive names::
2981 2981
2982 2982 hg export -r "outgoing()" -o "%n-%m.patch"
2983 2983
2984 2984 Returns 0 on success.
2985 2985 """
2986 2986 changesets += tuple(opts.get('rev', []))
2987 2987 if not changesets:
2988 2988 changesets = ['.']
2989 2989 revs = scmutil.revrange(repo, changesets)
2990 2990 if not revs:
2991 2991 raise util.Abort(_("export requires at least one changeset"))
2992 2992 if len(revs) > 1:
2993 2993 ui.note(_('exporting patches:\n'))
2994 2994 else:
2995 2995 ui.note(_('exporting patch:\n'))
2996 2996 cmdutil.export(repo, revs, template=opts.get('output'),
2997 2997 switch_parent=opts.get('switch_parent'),
2998 2998 opts=patch.diffopts(ui, opts))
2999 2999
3000 3000 @command('^forget', walkopts, _('[OPTION]... FILE...'))
3001 3001 def forget(ui, repo, *pats, **opts):
3002 3002 """forget the specified files on the next commit
3003 3003
3004 3004 Mark the specified files so they will no longer be tracked
3005 3005 after the next commit.
3006 3006
3007 3007 This only removes files from the current branch, not from the
3008 3008 entire project history, and it does not delete them from the
3009 3009 working directory.
3010 3010
3011 3011 To undo a forget before the next commit, see :hg:`add`.
3012 3012
3013 3013 .. container:: verbose
3014 3014
3015 3015 Examples:
3016 3016
3017 3017 - forget newly-added binary files::
3018 3018
3019 3019 hg forget "set:added() and binary()"
3020 3020
3021 3021 - forget files that would be excluded by .hgignore::
3022 3022
3023 3023 hg forget "set:hgignore()"
3024 3024
3025 3025 Returns 0 on success.
3026 3026 """
3027 3027
3028 3028 if not pats:
3029 3029 raise util.Abort(_('no files specified'))
3030 3030
3031 3031 m = scmutil.match(repo[None], pats, opts)
3032 3032 rejected = cmdutil.forget(ui, repo, m, prefix="", explicitonly=False)[0]
3033 3033 return rejected and 1 or 0
3034 3034
3035 3035 @command(
3036 3036 'graft',
3037 3037 [('r', 'rev', [], _('revisions to graft'), _('REV')),
3038 3038 ('c', 'continue', False, _('resume interrupted graft')),
3039 3039 ('e', 'edit', False, _('invoke editor on commit messages')),
3040 3040 ('', 'log', None, _('append graft info to log message')),
3041 3041 ('D', 'currentdate', False,
3042 3042 _('record the current date as commit date')),
3043 3043 ('U', 'currentuser', False,
3044 3044 _('record the current user as committer'), _('DATE'))]
3045 3045 + commitopts2 + mergetoolopts + dryrunopts,
3046 3046 _('[OPTION]... [-r] REV...'))
3047 3047 def graft(ui, repo, *revs, **opts):
3048 3048 '''copy changes from other branches onto the current branch
3049 3049
3050 3050 This command uses Mercurial's merge logic to copy individual
3051 3051 changes from other branches without merging branches in the
3052 3052 history graph. This is sometimes known as 'backporting' or
3053 3053 'cherry-picking'. By default, graft will copy user, date, and
3054 3054 description from the source changesets.
3055 3055
3056 3056 Changesets that are ancestors of the current revision, that have
3057 3057 already been grafted, or that are merges will be skipped.
3058 3058
3059 3059 If --log is specified, log messages will have a comment appended
3060 3060 of the form::
3061 3061
3062 3062 (grafted from CHANGESETHASH)
3063 3063
3064 3064 If a graft merge results in conflicts, the graft process is
3065 3065 interrupted so that the current merge can be manually resolved.
3066 3066 Once all conflicts are addressed, the graft process can be
3067 3067 continued with the -c/--continue option.
3068 3068
3069 3069 .. note::
3070 3070
3071 3071 The -c/--continue option does not reapply earlier options.
3072 3072
3073 3073 .. container:: verbose
3074 3074
3075 3075 Examples:
3076 3076
3077 3077 - copy a single change to the stable branch and edit its description::
3078 3078
3079 3079 hg update stable
3080 3080 hg graft --edit 9393
3081 3081
3082 3082 - graft a range of changesets with one exception, updating dates::
3083 3083
3084 3084 hg graft -D "2085::2093 and not 2091"
3085 3085
3086 3086 - continue a graft after resolving conflicts::
3087 3087
3088 3088 hg graft -c
3089 3089
3090 3090 - show the source of a grafted changeset::
3091 3091
3092 3092 hg log --debug -r .
3093 3093
3094 3094 Returns 0 on successful completion.
3095 3095 '''
3096 3096
3097 3097 revs = list(revs)
3098 3098 revs.extend(opts['rev'])
3099 3099
3100 3100 if not opts.get('user') and opts.get('currentuser'):
3101 3101 opts['user'] = ui.username()
3102 3102 if not opts.get('date') and opts.get('currentdate'):
3103 3103 opts['date'] = "%d %d" % util.makedate()
3104 3104
3105 3105 editor = None
3106 3106 if opts.get('edit'):
3107 3107 editor = cmdutil.commitforceeditor
3108 3108
3109 3109 cont = False
3110 3110 if opts['continue']:
3111 3111 cont = True
3112 3112 if revs:
3113 3113 raise util.Abort(_("can't specify --continue and revisions"))
3114 3114 # read in unfinished revisions
3115 3115 try:
3116 3116 nodes = repo.opener.read('graftstate').splitlines()
3117 3117 revs = [repo[node].rev() for node in nodes]
3118 3118 except IOError, inst:
3119 3119 if inst.errno != errno.ENOENT:
3120 3120 raise
3121 3121 raise util.Abort(_("no graft state found, can't continue"))
3122 3122 else:
3123 3123 cmdutil.checkunfinished(repo)
3124 3124 cmdutil.bailifchanged(repo)
3125 3125 if not revs:
3126 3126 raise util.Abort(_('no revisions specified'))
3127 3127 revs = scmutil.revrange(repo, revs)
3128 3128
3129 3129 # check for merges
3130 3130 for rev in repo.revs('%ld and merge()', revs):
3131 3131 ui.warn(_('skipping ungraftable merge revision %s\n') % rev)
3132 3132 revs.remove(rev)
3133 3133 if not revs:
3134 3134 return -1
3135 3135
3136 3136 # check for ancestors of dest branch
3137 3137 crev = repo['.'].rev()
3138 3138 ancestors = repo.changelog.ancestors([crev], inclusive=True)
3139 3139 # don't mutate while iterating, create a copy
3140 3140 for rev in list(revs):
3141 3141 if rev in ancestors:
3142 3142 ui.warn(_('skipping ancestor revision %s\n') % rev)
3143 3143 revs.remove(rev)
3144 3144 if not revs:
3145 3145 return -1
3146 3146
3147 3147 # analyze revs for earlier grafts
3148 3148 ids = {}
3149 3149 for ctx in repo.set("%ld", revs):
3150 3150 ids[ctx.hex()] = ctx.rev()
3151 3151 n = ctx.extra().get('source')
3152 3152 if n:
3153 3153 ids[n] = ctx.rev()
3154 3154
3155 3155 # check ancestors for earlier grafts
3156 3156 ui.debug('scanning for duplicate grafts\n')
3157 3157
3158 3158 for rev in repo.changelog.findmissingrevs(revs, [crev]):
3159 3159 ctx = repo[rev]
3160 3160 n = ctx.extra().get('source')
3161 3161 if n in ids:
3162 3162 r = repo[n].rev()
3163 3163 if r in revs:
3164 3164 ui.warn(_('skipping revision %s (already grafted to %s)\n')
3165 3165 % (r, rev))
3166 3166 revs.remove(r)
3167 3167 elif ids[n] in revs:
3168 3168 ui.warn(_('skipping already grafted revision %s '
3169 3169 '(%s also has origin %d)\n') % (ids[n], rev, r))
3170 3170 revs.remove(ids[n])
3171 3171 elif ctx.hex() in ids:
3172 3172 r = ids[ctx.hex()]
3173 3173 ui.warn(_('skipping already grafted revision %s '
3174 3174 '(was grafted from %d)\n') % (r, rev))
3175 3175 revs.remove(r)
3176 3176 if not revs:
3177 3177 return -1
3178 3178
3179 3179 wlock = repo.wlock()
3180 3180 try:
3181 3181 current = repo['.']
3182 3182 for pos, ctx in enumerate(repo.set("%ld", revs)):
3183 3183
3184 3184 ui.status(_('grafting revision %s\n') % ctx.rev())
3185 3185 if opts.get('dry_run'):
3186 3186 continue
3187 3187
3188 3188 source = ctx.extra().get('source')
3189 3189 if not source:
3190 3190 source = ctx.hex()
3191 3191 extra = {'source': source}
3192 3192 user = ctx.user()
3193 3193 if opts.get('user'):
3194 3194 user = opts['user']
3195 3195 date = ctx.date()
3196 3196 if opts.get('date'):
3197 3197 date = opts['date']
3198 3198 message = ctx.description()
3199 3199 if opts.get('log'):
3200 3200 message += '\n(grafted from %s)' % ctx.hex()
3201 3201
3202 3202 # we don't merge the first commit when continuing
3203 3203 if not cont:
3204 3204 # perform the graft merge with p1(rev) as 'ancestor'
3205 3205 try:
3206 3206 # ui.forcemerge is an internal variable, do not document
3207 3207 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
3208 3208 'graft')
3209 3209 stats = mergemod.update(repo, ctx.node(), True, True, False,
3210 3210 ctx.p1().node())
3211 3211 finally:
3212 3212 repo.ui.setconfig('ui', 'forcemerge', '', 'graft')
3213 3213 # report any conflicts
3214 3214 if stats and stats[3] > 0:
3215 3215 # write out state for --continue
3216 3216 nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
3217 3217 repo.opener.write('graftstate', ''.join(nodelines))
3218 3218 raise util.Abort(
3219 3219 _("unresolved conflicts, can't continue"),
3220 3220 hint=_('use hg resolve and hg graft --continue'))
3221 3221 else:
3222 3222 cont = False
3223 3223
3224 3224 # drop the second merge parent
3225 3225 repo.setparents(current.node(), nullid)
3226 3226 repo.dirstate.write()
3227 3227 # fix up dirstate for copies and renames
3228 3228 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
3229 3229
3230 3230 # commit
3231 3231 node = repo.commit(text=message, user=user,
3232 3232 date=date, extra=extra, editor=editor)
3233 3233 if node is None:
3234 3234 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
3235 3235 else:
3236 3236 current = repo[node]
3237 3237 finally:
3238 3238 wlock.release()
3239 3239
3240 3240 # remove state when we complete successfully
3241 3241 if not opts.get('dry_run'):
3242 3242 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
3243 3243
3244 3244 return 0
3245 3245
3246 3246 @command('grep',
3247 3247 [('0', 'print0', None, _('end fields with NUL')),
3248 3248 ('', 'all', None, _('print all revisions that match')),
3249 3249 ('a', 'text', None, _('treat all files as text')),
3250 3250 ('f', 'follow', None,
3251 3251 _('follow changeset history,'
3252 3252 ' or file history across copies and renames')),
3253 3253 ('i', 'ignore-case', None, _('ignore case when matching')),
3254 3254 ('l', 'files-with-matches', None,
3255 3255 _('print only filenames and revisions that match')),
3256 3256 ('n', 'line-number', None, _('print matching line numbers')),
3257 3257 ('r', 'rev', [],
3258 3258 _('only search files changed within revision range'), _('REV')),
3259 3259 ('u', 'user', None, _('list the author (long with -v)')),
3260 3260 ('d', 'date', None, _('list the date (short with -q)')),
3261 3261 ] + walkopts,
3262 3262 _('[OPTION]... PATTERN [FILE]...'))
3263 3263 def grep(ui, repo, pattern, *pats, **opts):
3264 3264 """search for a pattern in specified files and revisions
3265 3265
3266 3266 Search revisions of files for a regular expression.
3267 3267
3268 3268 This command behaves differently than Unix grep. It only accepts
3269 3269 Python/Perl regexps. It searches repository history, not the
3270 3270 working directory. It always prints the revision number in which a
3271 3271 match appears.
3272 3272
3273 3273 By default, grep only prints output for the first revision of a
3274 3274 file in which it finds a match. To get it to print every revision
3275 3275 that contains a change in match status ("-" for a match that
3276 3276 becomes a non-match, or "+" for a non-match that becomes a match),
3277 3277 use the --all flag.
3278 3278
3279 3279 Returns 0 if a match is found, 1 otherwise.
3280 3280 """
3281 3281 reflags = re.M
3282 3282 if opts.get('ignore_case'):
3283 3283 reflags |= re.I
3284 3284 try:
3285 3285 regexp = util.compilere(pattern, reflags)
3286 3286 except re.error, inst:
3287 3287 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
3288 3288 return 1
3289 3289 sep, eol = ':', '\n'
3290 3290 if opts.get('print0'):
3291 3291 sep = eol = '\0'
3292 3292
3293 3293 getfile = util.lrucachefunc(repo.file)
3294 3294
3295 3295 def matchlines(body):
3296 3296 begin = 0
3297 3297 linenum = 0
3298 3298 while begin < len(body):
3299 3299 match = regexp.search(body, begin)
3300 3300 if not match:
3301 3301 break
3302 3302 mstart, mend = match.span()
3303 3303 linenum += body.count('\n', begin, mstart) + 1
3304 3304 lstart = body.rfind('\n', begin, mstart) + 1 or begin
3305 3305 begin = body.find('\n', mend) + 1 or len(body) + 1
3306 3306 lend = begin - 1
3307 3307 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
3308 3308
3309 3309 class linestate(object):
3310 3310 def __init__(self, line, linenum, colstart, colend):
3311 3311 self.line = line
3312 3312 self.linenum = linenum
3313 3313 self.colstart = colstart
3314 3314 self.colend = colend
3315 3315
3316 3316 def __hash__(self):
3317 3317 return hash((self.linenum, self.line))
3318 3318
3319 3319 def __eq__(self, other):
3320 3320 return self.line == other.line
3321 3321
3322 3322 matches = {}
3323 3323 copies = {}
3324 3324 def grepbody(fn, rev, body):
3325 3325 matches[rev].setdefault(fn, [])
3326 3326 m = matches[rev][fn]
3327 3327 for lnum, cstart, cend, line in matchlines(body):
3328 3328 s = linestate(line, lnum, cstart, cend)
3329 3329 m.append(s)
3330 3330
3331 3331 def difflinestates(a, b):
3332 3332 sm = difflib.SequenceMatcher(None, a, b)
3333 3333 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3334 3334 if tag == 'insert':
3335 3335 for i in xrange(blo, bhi):
3336 3336 yield ('+', b[i])
3337 3337 elif tag == 'delete':
3338 3338 for i in xrange(alo, ahi):
3339 3339 yield ('-', a[i])
3340 3340 elif tag == 'replace':
3341 3341 for i in xrange(alo, ahi):
3342 3342 yield ('-', a[i])
3343 3343 for i in xrange(blo, bhi):
3344 3344 yield ('+', b[i])
3345 3345
3346 3346 def display(fn, ctx, pstates, states):
3347 3347 rev = ctx.rev()
3348 3348 datefunc = ui.quiet and util.shortdate or util.datestr
3349 3349 found = False
3350 3350 @util.cachefunc
3351 3351 def binary():
3352 3352 flog = getfile(fn)
3353 3353 return util.binary(flog.read(ctx.filenode(fn)))
3354 3354
3355 3355 if opts.get('all'):
3356 3356 iter = difflinestates(pstates, states)
3357 3357 else:
3358 3358 iter = [('', l) for l in states]
3359 3359 for change, l in iter:
3360 3360 cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')]
3361 3361 before, match, after = None, None, None
3362 3362
3363 3363 if opts.get('line_number'):
3364 3364 cols.append((str(l.linenum), 'grep.linenumber'))
3365 3365 if opts.get('all'):
3366 3366 cols.append((change, 'grep.change'))
3367 3367 if opts.get('user'):
3368 3368 cols.append((ui.shortuser(ctx.user()), 'grep.user'))
3369 3369 if opts.get('date'):
3370 3370 cols.append((datefunc(ctx.date()), 'grep.date'))
3371 3371 if not opts.get('files_with_matches'):
3372 3372 before = l.line[:l.colstart]
3373 3373 match = l.line[l.colstart:l.colend]
3374 3374 after = l.line[l.colend:]
3375 3375 for col, label in cols[:-1]:
3376 3376 ui.write(col, label=label)
3377 3377 ui.write(sep, label='grep.sep')
3378 3378 ui.write(cols[-1][0], label=cols[-1][1])
3379 3379 if before is not None:
3380 3380 ui.write(sep, label='grep.sep')
3381 3381 if not opts.get('text') and binary():
3382 3382 ui.write(" Binary file matches")
3383 3383 else:
3384 3384 ui.write(before)
3385 3385 ui.write(match, label='grep.match')
3386 3386 ui.write(after)
3387 3387 ui.write(eol)
3388 3388 found = True
3389 3389 if before is None:
3390 3390 break
3391 3391 return found
3392 3392
3393 3393 skip = {}
3394 3394 revfiles = {}
3395 3395 matchfn = scmutil.match(repo[None], pats, opts)
3396 3396 found = False
3397 3397 follow = opts.get('follow')
3398 3398
3399 3399 def prep(ctx, fns):
3400 3400 rev = ctx.rev()
3401 3401 pctx = ctx.p1()
3402 3402 parent = pctx.rev()
3403 3403 matches.setdefault(rev, {})
3404 3404 matches.setdefault(parent, {})
3405 3405 files = revfiles.setdefault(rev, [])
3406 3406 for fn in fns:
3407 3407 flog = getfile(fn)
3408 3408 try:
3409 3409 fnode = ctx.filenode(fn)
3410 3410 except error.LookupError:
3411 3411 continue
3412 3412
3413 3413 copied = flog.renamed(fnode)
3414 3414 copy = follow and copied and copied[0]
3415 3415 if copy:
3416 3416 copies.setdefault(rev, {})[fn] = copy
3417 3417 if fn in skip:
3418 3418 if copy:
3419 3419 skip[copy] = True
3420 3420 continue
3421 3421 files.append(fn)
3422 3422
3423 3423 if fn not in matches[rev]:
3424 3424 grepbody(fn, rev, flog.read(fnode))
3425 3425
3426 3426 pfn = copy or fn
3427 3427 if pfn not in matches[parent]:
3428 3428 try:
3429 3429 fnode = pctx.filenode(pfn)
3430 3430 grepbody(pfn, parent, flog.read(fnode))
3431 3431 except error.LookupError:
3432 3432 pass
3433 3433
3434 3434 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3435 3435 rev = ctx.rev()
3436 3436 parent = ctx.p1().rev()
3437 3437 for fn in sorted(revfiles.get(rev, [])):
3438 3438 states = matches[rev][fn]
3439 3439 copy = copies.get(rev, {}).get(fn)
3440 3440 if fn in skip:
3441 3441 if copy:
3442 3442 skip[copy] = True
3443 3443 continue
3444 3444 pstates = matches.get(parent, {}).get(copy or fn, [])
3445 3445 if pstates or states:
3446 3446 r = display(fn, ctx, pstates, states)
3447 3447 found = found or r
3448 3448 if r and not opts.get('all'):
3449 3449 skip[fn] = True
3450 3450 if copy:
3451 3451 skip[copy] = True
3452 3452 del matches[rev]
3453 3453 del revfiles[rev]
3454 3454
3455 3455 return not found
3456 3456
3457 3457 @command('heads',
3458 3458 [('r', 'rev', '',
3459 3459 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
3460 3460 ('t', 'topo', False, _('show topological heads only')),
3461 3461 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
3462 3462 ('c', 'closed', False, _('show normal and closed branch heads')),
3463 3463 ] + templateopts,
3464 3464 _('[-ct] [-r STARTREV] [REV]...'))
3465 3465 def heads(ui, repo, *branchrevs, **opts):
3466 3466 """show branch heads
3467 3467
3468 3468 With no arguments, show all open branch heads in the repository.
3469 3469 Branch heads are changesets that have no descendants on the
3470 3470 same branch. They are where development generally takes place and
3471 3471 are the usual targets for update and merge operations.
3472 3472
3473 3473 If one or more REVs are given, only open branch heads on the
3474 3474 branches associated with the specified changesets are shown. This
3475 3475 means that you can use :hg:`heads .` to see the heads on the
3476 3476 currently checked-out branch.
3477 3477
3478 3478 If -c/--closed is specified, also show branch heads marked closed
3479 3479 (see :hg:`commit --close-branch`).
3480 3480
3481 3481 If STARTREV is specified, only those heads that are descendants of
3482 3482 STARTREV will be displayed.
3483 3483
3484 3484 If -t/--topo is specified, named branch mechanics will be ignored and only
3485 3485 topological heads (changesets with no children) will be shown.
3486 3486
3487 3487 Returns 0 if matching heads are found, 1 if not.
3488 3488 """
3489 3489
3490 3490 start = None
3491 3491 if 'rev' in opts:
3492 3492 start = scmutil.revsingle(repo, opts['rev'], None).node()
3493 3493
3494 3494 if opts.get('topo'):
3495 3495 heads = [repo[h] for h in repo.heads(start)]
3496 3496 else:
3497 3497 heads = []
3498 3498 for branch in repo.branchmap():
3499 3499 heads += repo.branchheads(branch, start, opts.get('closed'))
3500 3500 heads = [repo[h] for h in heads]
3501 3501
3502 3502 if branchrevs:
3503 3503 branches = set(repo[br].branch() for br in branchrevs)
3504 3504 heads = [h for h in heads if h.branch() in branches]
3505 3505
3506 3506 if opts.get('active') and branchrevs:
3507 3507 dagheads = repo.heads(start)
3508 3508 heads = [h for h in heads if h.node() in dagheads]
3509 3509
3510 3510 if branchrevs:
3511 3511 haveheads = set(h.branch() for h in heads)
3512 3512 if branches - haveheads:
3513 3513 headless = ', '.join(b for b in branches - haveheads)
3514 3514 msg = _('no open branch heads found on branches %s')
3515 3515 if opts.get('rev'):
3516 3516 msg += _(' (started at %s)') % opts['rev']
3517 3517 ui.warn((msg + '\n') % headless)
3518 3518
3519 3519 if not heads:
3520 3520 return 1
3521 3521
3522 3522 heads = sorted(heads, key=lambda x: -x.rev())
3523 3523 displayer = cmdutil.show_changeset(ui, repo, opts)
3524 3524 for ctx in heads:
3525 3525 displayer.show(ctx)
3526 3526 displayer.close()
3527 3527
3528 3528 @command('help',
3529 3529 [('e', 'extension', None, _('show only help for extensions')),
3530 3530 ('c', 'command', None, _('show only help for commands')),
3531 3531 ('k', 'keyword', '', _('show topics matching keyword')),
3532 3532 ],
3533 3533 _('[-ec] [TOPIC]'))
3534 3534 def help_(ui, name=None, **opts):
3535 3535 """show help for a given topic or a help overview
3536 3536
3537 3537 With no arguments, print a list of commands with short help messages.
3538 3538
3539 3539 Given a topic, extension, or command name, print help for that
3540 3540 topic.
3541 3541
3542 3542 Returns 0 if successful.
3543 3543 """
3544 3544
3545 3545 textwidth = min(ui.termwidth(), 80) - 2
3546 3546
3547 3547 keep = ui.verbose and ['verbose'] or []
3548 3548 text = help.help_(ui, name, **opts)
3549 3549
3550 3550 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3551 3551 if 'verbose' in pruned:
3552 3552 keep.append('omitted')
3553 3553 else:
3554 3554 keep.append('notomitted')
3555 3555 formatted, pruned = minirst.format(text, textwidth, keep=keep)
3556 3556 ui.write(formatted)
3557 3557
3558 3558
3559 3559 @command('identify|id',
3560 3560 [('r', 'rev', '',
3561 3561 _('identify the specified revision'), _('REV')),
3562 3562 ('n', 'num', None, _('show local revision number')),
3563 3563 ('i', 'id', None, _('show global revision id')),
3564 3564 ('b', 'branch', None, _('show branch')),
3565 3565 ('t', 'tags', None, _('show tags')),
3566 3566 ('B', 'bookmarks', None, _('show bookmarks')),
3567 3567 ] + remoteopts,
3568 3568 _('[-nibtB] [-r REV] [SOURCE]'))
3569 3569 def identify(ui, repo, source=None, rev=None,
3570 3570 num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
3571 3571 """identify the working copy or specified revision
3572 3572
3573 3573 Print a summary identifying the repository state at REV using one or
3574 3574 two parent hash identifiers, followed by a "+" if the working
3575 3575 directory has uncommitted changes, the branch name (if not default),
3576 3576 a list of tags, and a list of bookmarks.
3577 3577
3578 3578 When REV is not given, print a summary of the current state of the
3579 3579 repository.
3580 3580
3581 3581 Specifying a path to a repository root or Mercurial bundle will
3582 3582 cause lookup to operate on that repository/bundle.
3583 3583
3584 3584 .. container:: verbose
3585 3585
3586 3586 Examples:
3587 3587
3588 3588 - generate a build identifier for the working directory::
3589 3589
3590 3590 hg id --id > build-id.dat
3591 3591
3592 3592 - find the revision corresponding to a tag::
3593 3593
3594 3594 hg id -n -r 1.3
3595 3595
3596 3596 - check the most recent revision of a remote repository::
3597 3597
3598 3598 hg id -r tip http://selenic.com/hg/
3599 3599
3600 3600 Returns 0 if successful.
3601 3601 """
3602 3602
3603 3603 if not repo and not source:
3604 3604 raise util.Abort(_("there is no Mercurial repository here "
3605 3605 "(.hg not found)"))
3606 3606
3607 3607 hexfunc = ui.debugflag and hex or short
3608 3608 default = not (num or id or branch or tags or bookmarks)
3609 3609 output = []
3610 3610 revs = []
3611 3611
3612 3612 if source:
3613 3613 source, branches = hg.parseurl(ui.expandpath(source))
3614 3614 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3615 3615 repo = peer.local()
3616 3616 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3617 3617
3618 3618 if not repo:
3619 3619 if num or branch or tags:
3620 3620 raise util.Abort(
3621 3621 _("can't query remote revision number, branch, or tags"))
3622 3622 if not rev and revs:
3623 3623 rev = revs[0]
3624 3624 if not rev:
3625 3625 rev = "tip"
3626 3626
3627 3627 remoterev = peer.lookup(rev)
3628 3628 if default or id:
3629 3629 output = [hexfunc(remoterev)]
3630 3630
3631 3631 def getbms():
3632 3632 bms = []
3633 3633
3634 3634 if 'bookmarks' in peer.listkeys('namespaces'):
3635 3635 hexremoterev = hex(remoterev)
3636 3636 bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
3637 3637 if bmr == hexremoterev]
3638 3638
3639 3639 return sorted(bms)
3640 3640
3641 3641 if bookmarks:
3642 3642 output.extend(getbms())
3643 3643 elif default and not ui.quiet:
3644 3644 # multiple bookmarks for a single parent separated by '/'
3645 3645 bm = '/'.join(getbms())
3646 3646 if bm:
3647 3647 output.append(bm)
3648 3648 else:
3649 3649 if not rev:
3650 3650 ctx = repo[None]
3651 3651 parents = ctx.parents()
3652 3652 changed = ""
3653 3653 if default or id or num:
3654 3654 if (util.any(repo.status())
3655 3655 or util.any(ctx.sub(s).dirty() for s in ctx.substate)):
3656 3656 changed = '+'
3657 3657 if default or id:
3658 3658 output = ["%s%s" %
3659 3659 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
3660 3660 if num:
3661 3661 output.append("%s%s" %
3662 3662 ('+'.join([str(p.rev()) for p in parents]), changed))
3663 3663 else:
3664 3664 ctx = scmutil.revsingle(repo, rev)
3665 3665 if default or id:
3666 3666 output = [hexfunc(ctx.node())]
3667 3667 if num:
3668 3668 output.append(str(ctx.rev()))
3669 3669
3670 3670 if default and not ui.quiet:
3671 3671 b = ctx.branch()
3672 3672 if b != 'default':
3673 3673 output.append("(%s)" % b)
3674 3674
3675 3675 # multiple tags for a single parent separated by '/'
3676 3676 t = '/'.join(ctx.tags())
3677 3677 if t:
3678 3678 output.append(t)
3679 3679
3680 3680 # multiple bookmarks for a single parent separated by '/'
3681 3681 bm = '/'.join(ctx.bookmarks())
3682 3682 if bm:
3683 3683 output.append(bm)
3684 3684 else:
3685 3685 if branch:
3686 3686 output.append(ctx.branch())
3687 3687
3688 3688 if tags:
3689 3689 output.extend(ctx.tags())
3690 3690
3691 3691 if bookmarks:
3692 3692 output.extend(ctx.bookmarks())
3693 3693
3694 3694 ui.write("%s\n" % ' '.join(output))
3695 3695
3696 3696 @command('import|patch',
3697 3697 [('p', 'strip', 1,
3698 3698 _('directory strip option for patch. This has the same '
3699 3699 'meaning as the corresponding patch option'), _('NUM')),
3700 3700 ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
3701 3701 ('e', 'edit', False, _('invoke editor on commit messages')),
3702 3702 ('f', 'force', None,
3703 3703 _('skip check for outstanding uncommitted changes (DEPRECATED)')),
3704 3704 ('', 'no-commit', None,
3705 3705 _("don't commit, just update the working directory")),
3706 3706 ('', 'bypass', None,
3707 3707 _("apply patch without touching the working directory")),
3708 3708 ('', 'exact', None,
3709 3709 _('apply patch to the nodes from which it was generated')),
3710 3710 ('', 'import-branch', None,
3711 3711 _('use any branch information in patch (implied by --exact)'))] +
3712 3712 commitopts + commitopts2 + similarityopts,
3713 3713 _('[OPTION]... PATCH...'))
3714 3714 def import_(ui, repo, patch1=None, *patches, **opts):
3715 3715 """import an ordered set of patches
3716 3716
3717 3717 Import a list of patches and commit them individually (unless
3718 3718 --no-commit is specified).
3719 3719
3720 3720 Because import first applies changes to the working directory,
3721 3721 import will abort if there are outstanding changes.
3722 3722
3723 3723 You can import a patch straight from a mail message. Even patches
3724 3724 as attachments work (to use the body part, it must have type
3725 3725 text/plain or text/x-patch). From and Subject headers of email
3726 3726 message are used as default committer and commit message. All
3727 3727 text/plain body parts before first diff are added to commit
3728 3728 message.
3729 3729
3730 3730 If the imported patch was generated by :hg:`export`, user and
3731 3731 description from patch override values from message headers and
3732 3732 body. Values given on command line with -m/--message and -u/--user
3733 3733 override these.
3734 3734
3735 3735 If --exact is specified, import will set the working directory to
3736 3736 the parent of each patch before applying it, and will abort if the
3737 3737 resulting changeset has a different ID than the one recorded in
3738 3738 the patch. This may happen due to character set problems or other
3739 3739 deficiencies in the text patch format.
3740 3740
3741 3741 Use --bypass to apply and commit patches directly to the
3742 3742 repository, not touching the working directory. Without --exact,
3743 3743 patches will be applied on top of the working directory parent
3744 3744 revision.
3745 3745
3746 3746 With -s/--similarity, hg will attempt to discover renames and
3747 3747 copies in the patch in the same way as :hg:`addremove`.
3748 3748
3749 3749 To read a patch from standard input, use "-" as the patch name. If
3750 3750 a URL is specified, the patch will be downloaded from it.
3751 3751 See :hg:`help dates` for a list of formats valid for -d/--date.
3752 3752
3753 3753 .. container:: verbose
3754 3754
3755 3755 Examples:
3756 3756
3757 3757 - import a traditional patch from a website and detect renames::
3758 3758
3759 3759 hg import -s 80 http://example.com/bugfix.patch
3760 3760
3761 3761 - import a changeset from an hgweb server::
3762 3762
3763 3763 hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
3764 3764
3765 3765 - import all the patches in an Unix-style mbox::
3766 3766
3767 3767 hg import incoming-patches.mbox
3768 3768
3769 3769 - attempt to exactly restore an exported changeset (not always
3770 3770 possible)::
3771 3771
3772 3772 hg import --exact proposed-fix.patch
3773 3773
3774 3774 Returns 0 on success.
3775 3775 """
3776 3776
3777 3777 if not patch1:
3778 3778 raise util.Abort(_('need at least one patch to import'))
3779 3779
3780 3780 patches = (patch1,) + patches
3781 3781
3782 3782 date = opts.get('date')
3783 3783 if date:
3784 3784 opts['date'] = util.parsedate(date)
3785 3785
3786 3786 update = not opts.get('bypass')
3787 3787 if not update and opts.get('no_commit'):
3788 3788 raise util.Abort(_('cannot use --no-commit with --bypass'))
3789 3789 try:
3790 3790 sim = float(opts.get('similarity') or 0)
3791 3791 except ValueError:
3792 3792 raise util.Abort(_('similarity must be a number'))
3793 3793 if sim < 0 or sim > 100:
3794 3794 raise util.Abort(_('similarity must be between 0 and 100'))
3795 3795 if sim and not update:
3796 3796 raise util.Abort(_('cannot use --similarity with --bypass'))
3797 3797
3798 3798 if update:
3799 3799 cmdutil.checkunfinished(repo)
3800 3800 if (opts.get('exact') or not opts.get('force')) and update:
3801 3801 cmdutil.bailifchanged(repo)
3802 3802
3803 3803 base = opts["base"]
3804 3804 wlock = lock = tr = None
3805 3805 msgs = []
3806 3806
3807 3807
3808 3808 try:
3809 3809 try:
3810 3810 wlock = repo.wlock()
3811 3811 if not opts.get('no_commit'):
3812 3812 lock = repo.lock()
3813 3813 tr = repo.transaction('import')
3814 3814 parents = repo.parents()
3815 3815 for patchurl in patches:
3816 3816 if patchurl == '-':
3817 3817 ui.status(_('applying patch from stdin\n'))
3818 3818 patchfile = ui.fin
3819 3819 patchurl = 'stdin' # for error message
3820 3820 else:
3821 3821 patchurl = os.path.join(base, patchurl)
3822 3822 ui.status(_('applying %s\n') % patchurl)
3823 3823 patchfile = hg.openpath(ui, patchurl)
3824 3824
3825 3825 haspatch = False
3826 3826 for hunk in patch.split(patchfile):
3827 3827 (msg, node) = cmdutil.tryimportone(ui, repo, hunk, parents,
3828 3828 opts, msgs, hg.clean)
3829 3829 if msg:
3830 3830 haspatch = True
3831 3831 ui.note(msg + '\n')
3832 3832 if update or opts.get('exact'):
3833 3833 parents = repo.parents()
3834 3834 else:
3835 3835 parents = [repo[node]]
3836 3836
3837 3837 if not haspatch:
3838 3838 raise util.Abort(_('%s: no diffs found') % patchurl)
3839 3839
3840 3840 if tr:
3841 3841 tr.close()
3842 3842 if msgs:
3843 3843 repo.savecommitmessage('\n* * *\n'.join(msgs))
3844 3844 except: # re-raises
3845 3845 # wlock.release() indirectly calls dirstate.write(): since
3846 3846 # we're crashing, we do not want to change the working dir
3847 3847 # parent after all, so make sure it writes nothing
3848 3848 repo.dirstate.invalidate()
3849 3849 raise
3850 3850 finally:
3851 3851 if tr:
3852 3852 tr.release()
3853 3853 release(lock, wlock)
3854 3854
3855 3855 @command('incoming|in',
3856 3856 [('f', 'force', None,
3857 3857 _('run even if remote repository is unrelated')),
3858 3858 ('n', 'newest-first', None, _('show newest record first')),
3859 3859 ('', 'bundle', '',
3860 3860 _('file to store the bundles into'), _('FILE')),
3861 3861 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3862 3862 ('B', 'bookmarks', False, _("compare bookmarks")),
3863 3863 ('b', 'branch', [],
3864 3864 _('a specific branch you would like to pull'), _('BRANCH')),
3865 3865 ] + logopts + remoteopts + subrepoopts,
3866 3866 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3867 3867 def incoming(ui, repo, source="default", **opts):
3868 3868 """show new changesets found in source
3869 3869
3870 3870 Show new changesets found in the specified path/URL or the default
3871 3871 pull location. These are the changesets that would have been pulled
3872 3872 if a pull at the time you issued this command.
3873 3873
3874 3874 For remote repository, using --bundle avoids downloading the
3875 3875 changesets twice if the incoming is followed by a pull.
3876 3876
3877 3877 See pull for valid source format details.
3878 3878
3879 3879 .. container:: verbose
3880 3880
3881 3881 Examples:
3882 3882
3883 3883 - show incoming changes with patches and full description::
3884 3884
3885 3885 hg incoming -vp
3886 3886
3887 3887 - show incoming changes excluding merges, store a bundle::
3888 3888
3889 3889 hg in -vpM --bundle incoming.hg
3890 3890 hg pull incoming.hg
3891 3891
3892 3892 - briefly list changes inside a bundle::
3893 3893
3894 3894 hg in changes.hg -T "{desc|firstline}\\n"
3895 3895
3896 3896 Returns 0 if there are incoming changes, 1 otherwise.
3897 3897 """
3898 3898 if opts.get('graph'):
3899 3899 cmdutil.checkunsupportedgraphflags([], opts)
3900 3900 def display(other, chlist, displayer):
3901 3901 revdag = cmdutil.graphrevs(other, chlist, opts)
3902 3902 showparents = [ctx.node() for ctx in repo[None].parents()]
3903 3903 cmdutil.displaygraph(ui, revdag, displayer, showparents,
3904 3904 graphmod.asciiedges)
3905 3905
3906 3906 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
3907 3907 return 0
3908 3908
3909 3909 if opts.get('bundle') and opts.get('subrepos'):
3910 3910 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3911 3911
3912 3912 if opts.get('bookmarks'):
3913 3913 source, branches = hg.parseurl(ui.expandpath(source),
3914 3914 opts.get('branch'))
3915 3915 other = hg.peer(repo, opts, source)
3916 3916 if 'bookmarks' not in other.listkeys('namespaces'):
3917 3917 ui.warn(_("remote doesn't support bookmarks\n"))
3918 3918 return 0
3919 3919 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3920 3920 return bookmarks.diff(ui, repo, other)
3921 3921
3922 3922 repo._subtoppath = ui.expandpath(source)
3923 3923 try:
3924 3924 return hg.incoming(ui, repo, source, opts)
3925 3925 finally:
3926 3926 del repo._subtoppath
3927 3927
3928 3928
3929 3929 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
3930 3930 def init(ui, dest=".", **opts):
3931 3931 """create a new repository in the given directory
3932 3932
3933 3933 Initialize a new repository in the given directory. If the given
3934 3934 directory does not exist, it will be created.
3935 3935
3936 3936 If no directory is given, the current directory is used.
3937 3937
3938 3938 It is possible to specify an ``ssh://`` URL as the destination.
3939 3939 See :hg:`help urls` for more information.
3940 3940
3941 3941 Returns 0 on success.
3942 3942 """
3943 3943 hg.peer(ui, opts, ui.expandpath(dest), create=True)
3944 3944
3945 3945 @command('locate',
3946 3946 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3947 3947 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3948 3948 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3949 3949 ] + walkopts,
3950 3950 _('[OPTION]... [PATTERN]...'))
3951 3951 def locate(ui, repo, *pats, **opts):
3952 3952 """locate files matching specific patterns
3953 3953
3954 3954 Print files under Mercurial control in the working directory whose
3955 3955 names match the given patterns.
3956 3956
3957 3957 By default, this command searches all directories in the working
3958 3958 directory. To search just the current directory and its
3959 3959 subdirectories, use "--include .".
3960 3960
3961 3961 If no patterns are given to match, this command prints the names
3962 3962 of all files under Mercurial control in the working directory.
3963 3963
3964 3964 If you want to feed the output of this command into the "xargs"
3965 3965 command, use the -0 option to both this command and "xargs". This
3966 3966 will avoid the problem of "xargs" treating single filenames that
3967 3967 contain whitespace as multiple filenames.
3968 3968
3969 3969 Returns 0 if a match is found, 1 otherwise.
3970 3970 """
3971 3971 end = opts.get('print0') and '\0' or '\n'
3972 3972 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
3973 3973
3974 3974 ret = 1
3975 3975 m = scmutil.match(repo[rev], pats, opts, default='relglob')
3976 3976 m.bad = lambda x, y: False
3977 3977 for abs in repo[rev].walk(m):
3978 3978 if not rev and abs not in repo.dirstate:
3979 3979 continue
3980 3980 if opts.get('fullpath'):
3981 3981 ui.write(repo.wjoin(abs), end)
3982 3982 else:
3983 3983 ui.write(((pats and m.rel(abs)) or abs), end)
3984 3984 ret = 0
3985 3985
3986 3986 return ret
3987 3987
3988 3988 @command('^log|history',
3989 3989 [('f', 'follow', None,
3990 3990 _('follow changeset history, or file history across copies and renames')),
3991 3991 ('', 'follow-first', None,
3992 3992 _('only follow the first parent of merge changesets (DEPRECATED)')),
3993 3993 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3994 3994 ('C', 'copies', None, _('show copied files')),
3995 3995 ('k', 'keyword', [],
3996 3996 _('do case-insensitive search for a given text'), _('TEXT')),
3997 3997 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
3998 3998 ('', 'removed', None, _('include revisions where files were removed')),
3999 3999 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
4000 4000 ('u', 'user', [], _('revisions committed by user'), _('USER')),
4001 4001 ('', 'only-branch', [],
4002 4002 _('show only changesets within the given named branch (DEPRECATED)'),
4003 4003 _('BRANCH')),
4004 4004 ('b', 'branch', [],
4005 4005 _('show changesets within the given named branch'), _('BRANCH')),
4006 4006 ('P', 'prune', [],
4007 4007 _('do not display revision or any of its ancestors'), _('REV')),
4008 4008 ] + logopts + walkopts,
4009 4009 _('[OPTION]... [FILE]'))
4010 4010 def log(ui, repo, *pats, **opts):
4011 4011 """show revision history of entire repository or files
4012 4012
4013 4013 Print the revision history of the specified files or the entire
4014 4014 project.
4015 4015
4016 4016 If no revision range is specified, the default is ``tip:0`` unless
4017 4017 --follow is set, in which case the working directory parent is
4018 4018 used as the starting revision.
4019 4019
4020 4020 File history is shown without following rename or copy history of
4021 4021 files. Use -f/--follow with a filename to follow history across
4022 4022 renames and copies. --follow without a filename will only show
4023 4023 ancestors or descendants of the starting revision.
4024 4024
4025 4025 By default this command prints revision number and changeset id,
4026 4026 tags, non-trivial parents, user, date and time, and a summary for
4027 4027 each commit. When the -v/--verbose switch is used, the list of
4028 4028 changed files and full commit message are shown.
4029 4029
4030 4030 With --graph the revisions are shown as an ASCII art DAG with the most
4031 4031 recent changeset at the top.
4032 4032 'o' is a changeset, '@' is a working directory parent, 'x' is obsolete,
4033 4033 and '+' represents a fork where the changeset from the lines below is a
4034 4034 parent of the 'o' merge on the same same line.
4035 4035
4036 4036 .. note::
4037 4037
4038 4038 log -p/--patch may generate unexpected diff output for merge
4039 4039 changesets, as it will only compare the merge changeset against
4040 4040 its first parent. Also, only files different from BOTH parents
4041 4041 will appear in files:.
4042 4042
4043 4043 .. note::
4044 4044
4045 4045 for performance reasons, log FILE may omit duplicate changes
4046 4046 made on branches and will not show deletions. To see all
4047 4047 changes including duplicates and deletions, use the --removed
4048 4048 switch.
4049 4049
4050 4050 .. container:: verbose
4051 4051
4052 4052 Some examples:
4053 4053
4054 4054 - changesets with full descriptions and file lists::
4055 4055
4056 4056 hg log -v
4057 4057
4058 4058 - changesets ancestral to the working directory::
4059 4059
4060 4060 hg log -f
4061 4061
4062 4062 - last 10 commits on the current branch::
4063 4063
4064 4064 hg log -l 10 -b .
4065 4065
4066 4066 - changesets showing all modifications of a file, including removals::
4067 4067
4068 4068 hg log --removed file.c
4069 4069
4070 4070 - all changesets that touch a directory, with diffs, excluding merges::
4071 4071
4072 4072 hg log -Mp lib/
4073 4073
4074 4074 - all revision numbers that match a keyword::
4075 4075
4076 4076 hg log -k bug --template "{rev}\\n"
4077 4077
4078 4078 - check if a given changeset is included is a tagged release::
4079 4079
4080 4080 hg log -r "a21ccf and ancestor(1.9)"
4081 4081
4082 4082 - find all changesets by some user in a date range::
4083 4083
4084 4084 hg log -k alice -d "may 2008 to jul 2008"
4085 4085
4086 4086 - summary of all changesets after the last tag::
4087 4087
4088 4088 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
4089 4089
4090 4090 See :hg:`help dates` for a list of formats valid for -d/--date.
4091 4091
4092 4092 See :hg:`help revisions` and :hg:`help revsets` for more about
4093 4093 specifying revisions.
4094 4094
4095 4095 See :hg:`help templates` for more about pre-packaged styles and
4096 4096 specifying custom templates.
4097 4097
4098 4098 Returns 0 on success.
4099 4099 """
4100 4100 if opts.get('graph'):
4101 4101 return cmdutil.graphlog(ui, repo, *pats, **opts)
4102 4102
4103 4103 matchfn = scmutil.match(repo[None], pats, opts)
4104 4104 limit = cmdutil.loglimit(opts)
4105 4105 count = 0
4106 4106
4107 4107 getrenamed, endrev = None, None
4108 4108 if opts.get('copies'):
4109 4109 if opts.get('rev'):
4110 4110 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
4111 4111 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
4112 4112
4113 4113 df = False
4114 4114 if opts.get("date"):
4115 4115 df = util.matchdate(opts["date"])
4116 4116
4117 4117 branches = opts.get('branch', []) + opts.get('only_branch', [])
4118 4118 opts['branch'] = [repo.lookupbranch(b) for b in branches]
4119 4119
4120 4120 displayer = cmdutil.show_changeset(ui, repo, opts, True)
4121 4121 def prep(ctx, fns):
4122 4122 rev = ctx.rev()
4123 4123 parents = [p for p in repo.changelog.parentrevs(rev)
4124 4124 if p != nullrev]
4125 4125 if opts.get('no_merges') and len(parents) == 2:
4126 4126 return
4127 4127 if opts.get('only_merges') and len(parents) != 2:
4128 4128 return
4129 4129 if opts.get('branch') and ctx.branch() not in opts['branch']:
4130 4130 return
4131 4131 if df and not df(ctx.date()[0]):
4132 4132 return
4133 4133
4134 4134 lower = encoding.lower
4135 4135 if opts.get('user'):
4136 4136 luser = lower(ctx.user())
4137 4137 for k in [lower(x) for x in opts['user']]:
4138 4138 if (k in luser):
4139 4139 break
4140 4140 else:
4141 4141 return
4142 4142 if opts.get('keyword'):
4143 4143 luser = lower(ctx.user())
4144 4144 ldesc = lower(ctx.description())
4145 4145 lfiles = lower(" ".join(ctx.files()))
4146 4146 for k in [lower(x) for x in opts['keyword']]:
4147 4147 if (k in luser or k in ldesc or k in lfiles):
4148 4148 break
4149 4149 else:
4150 4150 return
4151 4151
4152 4152 copies = None
4153 4153 if getrenamed is not None and rev:
4154 4154 copies = []
4155 4155 for fn in ctx.files():
4156 4156 rename = getrenamed(fn, rev)
4157 4157 if rename:
4158 4158 copies.append((fn, rename[0]))
4159 4159
4160 4160 revmatchfn = None
4161 4161 if opts.get('patch') or opts.get('stat'):
4162 4162 if opts.get('follow') or opts.get('follow_first'):
4163 4163 # note: this might be wrong when following through merges
4164 4164 revmatchfn = scmutil.match(repo[None], fns, default='path')
4165 4165 else:
4166 4166 revmatchfn = matchfn
4167 4167
4168 4168 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
4169 4169
4170 4170 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
4171 4171 if displayer.flush(ctx.rev()):
4172 4172 count += 1
4173 4173 if count == limit:
4174 4174 break
4175 4175 displayer.close()
4176 4176
4177 4177 @command('manifest',
4178 4178 [('r', 'rev', '', _('revision to display'), _('REV')),
4179 4179 ('', 'all', False, _("list files from all revisions"))],
4180 4180 _('[-r REV]'))
4181 4181 def manifest(ui, repo, node=None, rev=None, **opts):
4182 4182 """output the current or given revision of the project manifest
4183 4183
4184 4184 Print a list of version controlled files for the given revision.
4185 4185 If no revision is given, the first parent of the working directory
4186 4186 is used, or the null revision if no revision is checked out.
4187 4187
4188 4188 With -v, print file permissions, symlink and executable bits.
4189 4189 With --debug, print file revision hashes.
4190 4190
4191 4191 If option --all is specified, the list of all files from all revisions
4192 4192 is printed. This includes deleted and renamed files.
4193 4193
4194 4194 Returns 0 on success.
4195 4195 """
4196 4196
4197 4197 fm = ui.formatter('manifest', opts)
4198 4198
4199 4199 if opts.get('all'):
4200 4200 if rev or node:
4201 4201 raise util.Abort(_("can't specify a revision with --all"))
4202 4202
4203 4203 res = []
4204 4204 prefix = "data/"
4205 4205 suffix = ".i"
4206 4206 plen = len(prefix)
4207 4207 slen = len(suffix)
4208 4208 lock = repo.lock()
4209 4209 try:
4210 4210 for fn, b, size in repo.store.datafiles():
4211 4211 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
4212 4212 res.append(fn[plen:-slen])
4213 4213 finally:
4214 4214 lock.release()
4215 4215 for f in res:
4216 4216 fm.startitem()
4217 4217 fm.write("path", '%s\n', f)
4218 4218 fm.end()
4219 4219 return
4220 4220
4221 4221 if rev and node:
4222 4222 raise util.Abort(_("please specify just one revision"))
4223 4223
4224 4224 if not node:
4225 4225 node = rev
4226 4226
4227 4227 char = {'l': '@', 'x': '*', '': ''}
4228 4228 mode = {'l': '644', 'x': '755', '': '644'}
4229 4229 ctx = scmutil.revsingle(repo, node)
4230 4230 mf = ctx.manifest()
4231 4231 for f in ctx:
4232 4232 fm.startitem()
4233 4233 fl = ctx[f].flags()
4234 4234 fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
4235 4235 fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
4236 4236 fm.write('path', '%s\n', f)
4237 4237 fm.end()
4238 4238
4239 4239 @command('^merge',
4240 4240 [('f', 'force', None,
4241 4241 _('force a merge including outstanding changes (DEPRECATED)')),
4242 4242 ('r', 'rev', '', _('revision to merge'), _('REV')),
4243 4243 ('P', 'preview', None,
4244 4244 _('review revisions to merge (no merge is performed)'))
4245 4245 ] + mergetoolopts,
4246 4246 _('[-P] [-f] [[-r] REV]'))
4247 4247 def merge(ui, repo, node=None, **opts):
4248 4248 """merge working directory with another revision
4249 4249
4250 4250 The current working directory is updated with all changes made in
4251 4251 the requested revision since the last common predecessor revision.
4252 4252
4253 4253 Files that changed between either parent are marked as changed for
4254 4254 the next commit and a commit must be performed before any further
4255 4255 updates to the repository are allowed. The next commit will have
4256 4256 two parents.
4257 4257
4258 4258 ``--tool`` can be used to specify the merge tool used for file
4259 4259 merges. It overrides the HGMERGE environment variable and your
4260 4260 configuration files. See :hg:`help merge-tools` for options.
4261 4261
4262 4262 If no revision is specified, the working directory's parent is a
4263 4263 head revision, and the current branch contains exactly one other
4264 4264 head, the other head is merged with by default. Otherwise, an
4265 4265 explicit revision with which to merge with must be provided.
4266 4266
4267 4267 :hg:`resolve` must be used to resolve unresolved files.
4268 4268
4269 4269 To undo an uncommitted merge, use :hg:`update --clean .` which
4270 4270 will check out a clean copy of the original merge parent, losing
4271 4271 all changes.
4272 4272
4273 4273 Returns 0 on success, 1 if there are unresolved files.
4274 4274 """
4275 4275
4276 4276 if opts.get('rev') and node:
4277 4277 raise util.Abort(_("please specify just one revision"))
4278 4278 if not node:
4279 4279 node = opts.get('rev')
4280 4280
4281 4281 if node:
4282 4282 node = scmutil.revsingle(repo, node).node()
4283 4283
4284 4284 if not node and repo._bookmarkcurrent:
4285 4285 bmheads = repo.bookmarkheads(repo._bookmarkcurrent)
4286 4286 curhead = repo[repo._bookmarkcurrent].node()
4287 4287 if len(bmheads) == 2:
4288 4288 if curhead == bmheads[0]:
4289 4289 node = bmheads[1]
4290 4290 else:
4291 4291 node = bmheads[0]
4292 4292 elif len(bmheads) > 2:
4293 4293 raise util.Abort(_("multiple matching bookmarks to merge - "
4294 4294 "please merge with an explicit rev or bookmark"),
4295 4295 hint=_("run 'hg heads' to see all heads"))
4296 4296 elif len(bmheads) <= 1:
4297 4297 raise util.Abort(_("no matching bookmark to merge - "
4298 4298 "please merge with an explicit rev or bookmark"),
4299 4299 hint=_("run 'hg heads' to see all heads"))
4300 4300
4301 4301 if not node and not repo._bookmarkcurrent:
4302 4302 branch = repo[None].branch()
4303 4303 bheads = repo.branchheads(branch)
4304 4304 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
4305 4305
4306 4306 if len(nbhs) > 2:
4307 4307 raise util.Abort(_("branch '%s' has %d heads - "
4308 4308 "please merge with an explicit rev")
4309 4309 % (branch, len(bheads)),
4310 4310 hint=_("run 'hg heads .' to see heads"))
4311 4311
4312 4312 parent = repo.dirstate.p1()
4313 4313 if len(nbhs) <= 1:
4314 4314 if len(bheads) > 1:
4315 4315 raise util.Abort(_("heads are bookmarked - "
4316 4316 "please merge with an explicit rev"),
4317 4317 hint=_("run 'hg heads' to see all heads"))
4318 4318 if len(repo.heads()) > 1:
4319 4319 raise util.Abort(_("branch '%s' has one head - "
4320 4320 "please merge with an explicit rev")
4321 4321 % branch,
4322 4322 hint=_("run 'hg heads' to see all heads"))
4323 4323 msg, hint = _('nothing to merge'), None
4324 4324 if parent != repo.lookup(branch):
4325 4325 hint = _("use 'hg update' instead")
4326 4326 raise util.Abort(msg, hint=hint)
4327 4327
4328 4328 if parent not in bheads:
4329 4329 raise util.Abort(_('working directory not at a head revision'),
4330 4330 hint=_("use 'hg update' or merge with an "
4331 4331 "explicit revision"))
4332 4332 if parent == nbhs[0]:
4333 4333 node = nbhs[-1]
4334 4334 else:
4335 4335 node = nbhs[0]
4336 4336
4337 4337 if opts.get('preview'):
4338 4338 # find nodes that are ancestors of p2 but not of p1
4339 4339 p1 = repo.lookup('.')
4340 4340 p2 = repo.lookup(node)
4341 4341 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
4342 4342
4343 4343 displayer = cmdutil.show_changeset(ui, repo, opts)
4344 4344 for node in nodes:
4345 4345 displayer.show(repo[node])
4346 4346 displayer.close()
4347 4347 return 0
4348 4348
4349 4349 try:
4350 4350 # ui.forcemerge is an internal variable, do not document
4351 4351 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge')
4352 4352 return hg.merge(repo, node, force=opts.get('force'))
4353 4353 finally:
4354 4354 ui.setconfig('ui', 'forcemerge', '', 'merge')
4355 4355
4356 4356 @command('outgoing|out',
4357 4357 [('f', 'force', None, _('run even when the destination is unrelated')),
4358 4358 ('r', 'rev', [],
4359 4359 _('a changeset intended to be included in the destination'), _('REV')),
4360 4360 ('n', 'newest-first', None, _('show newest record first')),
4361 4361 ('B', 'bookmarks', False, _('compare bookmarks')),
4362 4362 ('b', 'branch', [], _('a specific branch you would like to push'),
4363 4363 _('BRANCH')),
4364 4364 ] + logopts + remoteopts + subrepoopts,
4365 4365 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
4366 4366 def outgoing(ui, repo, dest=None, **opts):
4367 4367 """show changesets not found in the destination
4368 4368
4369 4369 Show changesets not found in the specified destination repository
4370 4370 or the default push location. These are the changesets that would
4371 4371 be pushed if a push was requested.
4372 4372
4373 4373 See pull for details of valid destination formats.
4374 4374
4375 4375 Returns 0 if there are outgoing changes, 1 otherwise.
4376 4376 """
4377 4377 if opts.get('graph'):
4378 4378 cmdutil.checkunsupportedgraphflags([], opts)
4379 4379 o = hg._outgoing(ui, repo, dest, opts)
4380 4380 if o is None:
4381 4381 return
4382 4382
4383 4383 revdag = cmdutil.graphrevs(repo, o, opts)
4384 4384 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
4385 4385 showparents = [ctx.node() for ctx in repo[None].parents()]
4386 4386 cmdutil.displaygraph(ui, revdag, displayer, showparents,
4387 4387 graphmod.asciiedges)
4388 4388 return 0
4389 4389
4390 4390 if opts.get('bookmarks'):
4391 4391 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4392 4392 dest, branches = hg.parseurl(dest, opts.get('branch'))
4393 4393 other = hg.peer(repo, opts, dest)
4394 4394 if 'bookmarks' not in other.listkeys('namespaces'):
4395 4395 ui.warn(_("remote doesn't support bookmarks\n"))
4396 4396 return 0
4397 4397 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
4398 4398 return bookmarks.diff(ui, other, repo)
4399 4399
4400 4400 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
4401 4401 try:
4402 4402 return hg.outgoing(ui, repo, dest, opts)
4403 4403 finally:
4404 4404 del repo._subtoppath
4405 4405
4406 4406 @command('parents',
4407 4407 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
4408 4408 ] + templateopts,
4409 4409 _('[-r REV] [FILE]'))
4410 4410 def parents(ui, repo, file_=None, **opts):
4411 4411 """show the parents of the working directory or revision
4412 4412
4413 4413 Print the working directory's parent revisions. If a revision is
4414 4414 given via -r/--rev, the parent of that revision will be printed.
4415 4415 If a file argument is given, the revision in which the file was
4416 4416 last changed (before the working directory revision or the
4417 4417 argument to --rev if given) is printed.
4418 4418
4419 4419 Returns 0 on success.
4420 4420 """
4421 4421
4422 4422 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
4423 4423
4424 4424 if file_:
4425 4425 m = scmutil.match(ctx, (file_,), opts)
4426 4426 if m.anypats() or len(m.files()) != 1:
4427 4427 raise util.Abort(_('can only specify an explicit filename'))
4428 4428 file_ = m.files()[0]
4429 4429 filenodes = []
4430 4430 for cp in ctx.parents():
4431 4431 if not cp:
4432 4432 continue
4433 4433 try:
4434 4434 filenodes.append(cp.filenode(file_))
4435 4435 except error.LookupError:
4436 4436 pass
4437 4437 if not filenodes:
4438 4438 raise util.Abort(_("'%s' not found in manifest!") % file_)
4439 4439 p = []
4440 4440 for fn in filenodes:
4441 4441 fctx = repo.filectx(file_, fileid=fn)
4442 4442 p.append(fctx.node())
4443 4443 else:
4444 4444 p = [cp.node() for cp in ctx.parents()]
4445 4445
4446 4446 displayer = cmdutil.show_changeset(ui, repo, opts)
4447 4447 for n in p:
4448 4448 if n != nullid:
4449 4449 displayer.show(repo[n])
4450 4450 displayer.close()
4451 4451
4452 4452 @command('paths', [], _('[NAME]'))
4453 4453 def paths(ui, repo, search=None):
4454 4454 """show aliases for remote repositories
4455 4455
4456 4456 Show definition of symbolic path name NAME. If no name is given,
4457 4457 show definition of all available names.
4458 4458
4459 4459 Option -q/--quiet suppresses all output when searching for NAME
4460 4460 and shows only the path names when listing all definitions.
4461 4461
4462 4462 Path names are defined in the [paths] section of your
4463 4463 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
4464 4464 repository, ``.hg/hgrc`` is used, too.
4465 4465
4466 4466 The path names ``default`` and ``default-push`` have a special
4467 4467 meaning. When performing a push or pull operation, they are used
4468 4468 as fallbacks if no location is specified on the command-line.
4469 4469 When ``default-push`` is set, it will be used for push and
4470 4470 ``default`` will be used for pull; otherwise ``default`` is used
4471 4471 as the fallback for both. When cloning a repository, the clone
4472 4472 source is written as ``default`` in ``.hg/hgrc``. Note that
4473 4473 ``default`` and ``default-push`` apply to all inbound (e.g.
4474 4474 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
4475 4475 :hg:`bundle`) operations.
4476 4476
4477 4477 See :hg:`help urls` for more information.
4478 4478
4479 4479 Returns 0 on success.
4480 4480 """
4481 4481 if search:
4482 4482 for name, path in ui.configitems("paths"):
4483 4483 if name == search:
4484 4484 ui.status("%s\n" % util.hidepassword(path))
4485 4485 return
4486 4486 if not ui.quiet:
4487 4487 ui.warn(_("not found!\n"))
4488 4488 return 1
4489 4489 else:
4490 4490 for name, path in ui.configitems("paths"):
4491 4491 if ui.quiet:
4492 4492 ui.write("%s\n" % name)
4493 4493 else:
4494 4494 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
4495 4495
4496 4496 @command('phase',
4497 4497 [('p', 'public', False, _('set changeset phase to public')),
4498 4498 ('d', 'draft', False, _('set changeset phase to draft')),
4499 4499 ('s', 'secret', False, _('set changeset phase to secret')),
4500 4500 ('f', 'force', False, _('allow to move boundary backward')),
4501 4501 ('r', 'rev', [], _('target revision'), _('REV')),
4502 4502 ],
4503 4503 _('[-p|-d|-s] [-f] [-r] REV...'))
4504 4504 def phase(ui, repo, *revs, **opts):
4505 4505 """set or show the current phase name
4506 4506
4507 4507 With no argument, show the phase name of specified revisions.
4508 4508
4509 4509 With one of -p/--public, -d/--draft or -s/--secret, change the
4510 4510 phase value of the specified revisions.
4511 4511
4512 4512 Unless -f/--force is specified, :hg:`phase` won't move changeset from a
4513 4513 lower phase to an higher phase. Phases are ordered as follows::
4514 4514
4515 4515 public < draft < secret
4516 4516
4517 4517 Returns 0 on success, 1 if no phases were changed or some could not
4518 4518 be changed.
4519 4519 """
4520 4520 # search for a unique phase argument
4521 4521 targetphase = None
4522 4522 for idx, name in enumerate(phases.phasenames):
4523 4523 if opts[name]:
4524 4524 if targetphase is not None:
4525 4525 raise util.Abort(_('only one phase can be specified'))
4526 4526 targetphase = idx
4527 4527
4528 4528 # look for specified revision
4529 4529 revs = list(revs)
4530 4530 revs.extend(opts['rev'])
4531 4531 if not revs:
4532 4532 raise util.Abort(_('no revisions specified'))
4533 4533
4534 4534 revs = scmutil.revrange(repo, revs)
4535 4535
4536 4536 lock = None
4537 4537 ret = 0
4538 4538 if targetphase is None:
4539 4539 # display
4540 4540 for r in revs:
4541 4541 ctx = repo[r]
4542 4542 ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
4543 4543 else:
4544 4544 lock = repo.lock()
4545 4545 try:
4546 4546 # set phase
4547 4547 if not revs:
4548 4548 raise util.Abort(_('empty revision set'))
4549 4549 nodes = [repo[r].node() for r in revs]
4550 4550 olddata = repo._phasecache.getphaserevs(repo)[:]
4551 4551 phases.advanceboundary(repo, targetphase, nodes)
4552 4552 if opts['force']:
4553 4553 phases.retractboundary(repo, targetphase, nodes)
4554 4554 finally:
4555 4555 lock.release()
4556 4556 # moving revision from public to draft may hide them
4557 4557 # We have to check result on an unfiltered repository
4558 4558 unfi = repo.unfiltered()
4559 4559 newdata = repo._phasecache.getphaserevs(unfi)
4560 4560 changes = sum(o != newdata[i] for i, o in enumerate(olddata))
4561 4561 cl = unfi.changelog
4562 4562 rejected = [n for n in nodes
4563 4563 if newdata[cl.rev(n)] < targetphase]
4564 4564 if rejected:
4565 4565 ui.warn(_('cannot move %i changesets to a higher '
4566 4566 'phase, use --force\n') % len(rejected))
4567 4567 ret = 1
4568 4568 if changes:
4569 4569 msg = _('phase changed for %i changesets\n') % changes
4570 4570 if ret:
4571 4571 ui.status(msg)
4572 4572 else:
4573 4573 ui.note(msg)
4574 4574 else:
4575 4575 ui.warn(_('no phases changed\n'))
4576 4576 ret = 1
4577 4577 return ret
4578 4578
4579 4579 def postincoming(ui, repo, modheads, optupdate, checkout):
4580 4580 if modheads == 0:
4581 4581 return
4582 4582 if optupdate:
4583 4583 checkout, movemarkfrom = bookmarks.calculateupdate(ui, repo, checkout)
4584 4584 try:
4585 4585 ret = hg.update(repo, checkout)
4586 4586 except util.Abort, inst:
4587 4587 ui.warn(_("not updating: %s\n") % str(inst))
4588 4588 if inst.hint:
4589 4589 ui.warn(_("(%s)\n") % inst.hint)
4590 4590 return 0
4591 4591 if not ret and not checkout:
4592 4592 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
4593 4593 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
4594 4594 return ret
4595 4595 if modheads > 1:
4596 4596 currentbranchheads = len(repo.branchheads())
4597 4597 if currentbranchheads == modheads:
4598 4598 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
4599 4599 elif currentbranchheads > 1:
4600 4600 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
4601 4601 "merge)\n"))
4602 4602 else:
4603 4603 ui.status(_("(run 'hg heads' to see heads)\n"))
4604 4604 else:
4605 4605 ui.status(_("(run 'hg update' to get a working copy)\n"))
4606 4606
4607 4607 @command('^pull',
4608 4608 [('u', 'update', None,
4609 4609 _('update to new branch head if changesets were pulled')),
4610 4610 ('f', 'force', None, _('run even when remote repository is unrelated')),
4611 4611 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
4612 4612 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
4613 4613 ('b', 'branch', [], _('a specific branch you would like to pull'),
4614 4614 _('BRANCH')),
4615 4615 ] + remoteopts,
4616 4616 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
4617 4617 def pull(ui, repo, source="default", **opts):
4618 4618 """pull changes from the specified source
4619 4619
4620 4620 Pull changes from a remote repository to a local one.
4621 4621
4622 4622 This finds all changes from the repository at the specified path
4623 4623 or URL and adds them to a local repository (the current one unless
4624 4624 -R is specified). By default, this does not update the copy of the
4625 4625 project in the working directory.
4626 4626
4627 4627 Use :hg:`incoming` if you want to see what would have been added
4628 4628 by a pull at the time you issued this command. If you then decide
4629 4629 to add those changes to the repository, you should use :hg:`pull
4630 4630 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
4631 4631
4632 4632 If SOURCE is omitted, the 'default' path will be used.
4633 4633 See :hg:`help urls` for more information.
4634 4634
4635 4635 Returns 0 on success, 1 if an update had unresolved files.
4636 4636 """
4637 4637 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
4638 4638 other = hg.peer(repo, opts, source)
4639 4639 try:
4640 4640 ui.status(_('pulling from %s\n') % util.hidepassword(source))
4641 4641 revs, checkout = hg.addbranchrevs(repo, other, branches,
4642 4642 opts.get('rev'))
4643 4643
4644 4644 remotebookmarks = other.listkeys('bookmarks')
4645 4645
4646 4646 if opts.get('bookmark'):
4647 4647 if not revs:
4648 4648 revs = []
4649 4649 for b in opts['bookmark']:
4650 4650 if b not in remotebookmarks:
4651 4651 raise util.Abort(_('remote bookmark %s not found!') % b)
4652 4652 revs.append(remotebookmarks[b])
4653 4653
4654 4654 if revs:
4655 4655 try:
4656 4656 revs = [other.lookup(rev) for rev in revs]
4657 4657 except error.CapabilityError:
4658 4658 err = _("other repository doesn't support revision lookup, "
4659 4659 "so a rev cannot be specified.")
4660 4660 raise util.Abort(err)
4661 4661
4662 4662 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
4663 4663 bookmarks.updatefromremote(ui, repo, remotebookmarks, source)
4664 4664 if checkout:
4665 4665 checkout = str(repo.changelog.rev(other.lookup(checkout)))
4666 4666 repo._subtoppath = source
4667 4667 try:
4668 4668 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
4669 4669
4670 4670 finally:
4671 4671 del repo._subtoppath
4672 4672
4673 4673 # update specified bookmarks
4674 4674 if opts.get('bookmark'):
4675 4675 marks = repo._bookmarks
4676 4676 for b in opts['bookmark']:
4677 4677 # explicit pull overrides local bookmark if any
4678 4678 ui.status(_("importing bookmark %s\n") % b)
4679 4679 marks[b] = repo[remotebookmarks[b]].node()
4680 4680 marks.write()
4681 4681 finally:
4682 4682 other.close()
4683 4683 return ret
4684 4684
4685 4685 @command('^push',
4686 4686 [('f', 'force', None, _('force push')),
4687 4687 ('r', 'rev', [],
4688 4688 _('a changeset intended to be included in the destination'),
4689 4689 _('REV')),
4690 4690 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
4691 4691 ('b', 'branch', [],
4692 4692 _('a specific branch you would like to push'), _('BRANCH')),
4693 4693 ('', 'new-branch', False, _('allow pushing a new branch')),
4694 4694 ] + remoteopts,
4695 4695 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
4696 4696 def push(ui, repo, dest=None, **opts):
4697 4697 """push changes to the specified destination
4698 4698
4699 4699 Push changesets from the local repository to the specified
4700 4700 destination.
4701 4701
4702 4702 This operation is symmetrical to pull: it is identical to a pull
4703 4703 in the destination repository from the current one.
4704 4704
4705 4705 By default, push will not allow creation of new heads at the
4706 4706 destination, since multiple heads would make it unclear which head
4707 4707 to use. In this situation, it is recommended to pull and merge
4708 4708 before pushing.
4709 4709
4710 4710 Use --new-branch if you want to allow push to create a new named
4711 4711 branch that is not present at the destination. This allows you to
4712 4712 only create a new branch without forcing other changes.
4713 4713
4714 4714 .. note::
4715 4715
4716 4716 Extra care should be taken with the -f/--force option,
4717 4717 which will push all new heads on all branches, an action which will
4718 4718 almost always cause confusion for collaborators.
4719 4719
4720 4720 If -r/--rev is used, the specified revision and all its ancestors
4721 4721 will be pushed to the remote repository.
4722 4722
4723 4723 If -B/--bookmark is used, the specified bookmarked revision, its
4724 4724 ancestors, and the bookmark will be pushed to the remote
4725 4725 repository.
4726 4726
4727 4727 Please see :hg:`help urls` for important details about ``ssh://``
4728 4728 URLs. If DESTINATION is omitted, a default path will be used.
4729 4729
4730 4730 Returns 0 if push was successful, 1 if nothing to push.
4731 4731 """
4732 4732
4733 4733 if opts.get('bookmark'):
4734 4734 ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
4735 4735 for b in opts['bookmark']:
4736 4736 # translate -B options to -r so changesets get pushed
4737 4737 if b in repo._bookmarks:
4738 4738 opts.setdefault('rev', []).append(b)
4739 4739 else:
4740 4740 # if we try to push a deleted bookmark, translate it to null
4741 4741 # this lets simultaneous -r, -b options continue working
4742 4742 opts.setdefault('rev', []).append("null")
4743 4743
4744 4744 dest = ui.expandpath(dest or 'default-push', dest or 'default')
4745 4745 dest, branches = hg.parseurl(dest, opts.get('branch'))
4746 4746 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
4747 4747 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
4748 4748 try:
4749 4749 other = hg.peer(repo, opts, dest)
4750 4750 except error.RepoError:
4751 4751 if dest == "default-push":
4752 4752 raise util.Abort(_("default repository not configured!"),
4753 4753 hint=_('see the "path" section in "hg help config"'))
4754 4754 else:
4755 4755 raise
4756 4756
4757 4757 if revs:
4758 4758 revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
4759 4759
4760 4760 repo._subtoppath = dest
4761 4761 try:
4762 4762 # push subrepos depth-first for coherent ordering
4763 4763 c = repo['']
4764 4764 subs = c.substate # only repos that are committed
4765 4765 for s in sorted(subs):
4766 4766 if c.sub(s).push(opts) == 0:
4767 4767 return False
4768 4768 finally:
4769 4769 del repo._subtoppath
4770 4770 result = repo.push(other, opts.get('force'), revs=revs,
4771 4771 newbranch=opts.get('new_branch'))
4772 4772
4773 4773 result = not result
4774 4774
4775 4775 if opts.get('bookmark'):
4776 4776 bresult = bookmarks.pushtoremote(ui, repo, other, opts['bookmark'])
4777 4777 if bresult == 2:
4778 4778 return 2
4779 4779 if not result and bresult:
4780 4780 result = 2
4781 4781
4782 4782 return result
4783 4783
4784 4784 @command('recover', [])
4785 4785 def recover(ui, repo):
4786 4786 """roll back an interrupted transaction
4787 4787
4788 4788 Recover from an interrupted commit or pull.
4789 4789
4790 4790 This command tries to fix the repository status after an
4791 4791 interrupted operation. It should only be necessary when Mercurial
4792 4792 suggests it.
4793 4793
4794 4794 Returns 0 if successful, 1 if nothing to recover or verify fails.
4795 4795 """
4796 4796 if repo.recover():
4797 4797 return hg.verify(repo)
4798 4798 return 1
4799 4799
4800 4800 @command('^remove|rm',
4801 4801 [('A', 'after', None, _('record delete for missing files')),
4802 4802 ('f', 'force', None,
4803 4803 _('remove (and delete) file even if added or modified')),
4804 4804 ] + walkopts,
4805 4805 _('[OPTION]... FILE...'))
4806 4806 def remove(ui, repo, *pats, **opts):
4807 4807 """remove the specified files on the next commit
4808 4808
4809 4809 Schedule the indicated files for removal from the current branch.
4810 4810
4811 4811 This command schedules the files to be removed at the next commit.
4812 4812 To undo a remove before that, see :hg:`revert`. To undo added
4813 4813 files, see :hg:`forget`.
4814 4814
4815 4815 .. container:: verbose
4816 4816
4817 4817 -A/--after can be used to remove only files that have already
4818 4818 been deleted, -f/--force can be used to force deletion, and -Af
4819 4819 can be used to remove files from the next revision without
4820 4820 deleting them from the working directory.
4821 4821
4822 4822 The following table details the behavior of remove for different
4823 4823 file states (columns) and option combinations (rows). The file
4824 4824 states are Added [A], Clean [C], Modified [M] and Missing [!]
4825 4825 (as reported by :hg:`status`). The actions are Warn, Remove
4826 4826 (from branch) and Delete (from disk):
4827 4827
4828 4828 ========= == == == ==
4829 4829 opt/state A C M !
4830 4830 ========= == == == ==
4831 4831 none W RD W R
4832 4832 -f R RD RD R
4833 4833 -A W W W R
4834 4834 -Af R R R R
4835 4835 ========= == == == ==
4836 4836
4837 4837 Note that remove never deletes files in Added [A] state from the
4838 4838 working directory, not even if option --force is specified.
4839 4839
4840 4840 Returns 0 on success, 1 if any warnings encountered.
4841 4841 """
4842 4842
4843 4843 ret = 0
4844 4844 after, force = opts.get('after'), opts.get('force')
4845 4845 if not pats and not after:
4846 4846 raise util.Abort(_('no files specified'))
4847 4847
4848 4848 m = scmutil.match(repo[None], pats, opts)
4849 4849 s = repo.status(match=m, clean=True)
4850 4850 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
4851 4851
4852 4852 # warn about failure to delete explicit files/dirs
4853 4853 wctx = repo[None]
4854 4854 for f in m.files():
4855 4855 if f in repo.dirstate or f in wctx.dirs():
4856 4856 continue
4857 4857 if os.path.exists(m.rel(f)):
4858 4858 if os.path.isdir(m.rel(f)):
4859 4859 ui.warn(_('not removing %s: no tracked files\n') % m.rel(f))
4860 4860 else:
4861 4861 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
4862 4862 # missing files will generate a warning elsewhere
4863 4863 ret = 1
4864 4864
4865 4865 if force:
4866 4866 list = modified + deleted + clean + added
4867 4867 elif after:
4868 4868 list = deleted
4869 4869 for f in modified + added + clean:
4870 4870 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
4871 4871 ret = 1
4872 4872 else:
4873 4873 list = deleted + clean
4874 4874 for f in modified:
4875 4875 ui.warn(_('not removing %s: file is modified (use -f'
4876 4876 ' to force removal)\n') % m.rel(f))
4877 4877 ret = 1
4878 4878 for f in added:
4879 4879 ui.warn(_('not removing %s: file has been marked for add'
4880 4880 ' (use forget to undo)\n') % m.rel(f))
4881 4881 ret = 1
4882 4882
4883 4883 for f in sorted(list):
4884 4884 if ui.verbose or not m.exact(f):
4885 4885 ui.status(_('removing %s\n') % m.rel(f))
4886 4886
4887 4887 wlock = repo.wlock()
4888 4888 try:
4889 4889 if not after:
4890 4890 for f in list:
4891 4891 if f in added:
4892 4892 continue # we never unlink added files on remove
4893 4893 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
4894 4894 repo[None].forget(list)
4895 4895 finally:
4896 4896 wlock.release()
4897 4897
4898 4898 return ret
4899 4899
4900 4900 @command('rename|move|mv',
4901 4901 [('A', 'after', None, _('record a rename that has already occurred')),
4902 4902 ('f', 'force', None, _('forcibly copy over an existing managed file')),
4903 4903 ] + walkopts + dryrunopts,
4904 4904 _('[OPTION]... SOURCE... DEST'))
4905 4905 def rename(ui, repo, *pats, **opts):
4906 4906 """rename files; equivalent of copy + remove
4907 4907
4908 4908 Mark dest as copies of sources; mark sources for deletion. If dest
4909 4909 is a directory, copies are put in that directory. If dest is a
4910 4910 file, there can only be one source.
4911 4911
4912 4912 By default, this command copies the contents of files as they
4913 4913 exist in the working directory. If invoked with -A/--after, the
4914 4914 operation is recorded, but no copying is performed.
4915 4915
4916 4916 This command takes effect at the next commit. To undo a rename
4917 4917 before that, see :hg:`revert`.
4918 4918
4919 4919 Returns 0 on success, 1 if errors are encountered.
4920 4920 """
4921 4921 wlock = repo.wlock(False)
4922 4922 try:
4923 4923 return cmdutil.copy(ui, repo, pats, opts, rename=True)
4924 4924 finally:
4925 4925 wlock.release()
4926 4926
4927 4927 @command('resolve',
4928 4928 [('a', 'all', None, _('select all unresolved files')),
4929 4929 ('l', 'list', None, _('list state of files needing merge')),
4930 4930 ('m', 'mark', None, _('mark files as resolved')),
4931 4931 ('u', 'unmark', None, _('mark files as unresolved')),
4932 4932 ('n', 'no-status', None, _('hide status prefix'))]
4933 4933 + mergetoolopts + walkopts,
4934 4934 _('[OPTION]... [FILE]...'))
4935 4935 def resolve(ui, repo, *pats, **opts):
4936 4936 """redo merges or set/view the merge status of files
4937 4937
4938 4938 Merges with unresolved conflicts are often the result of
4939 4939 non-interactive merging using the ``internal:merge`` configuration
4940 4940 setting, or a command-line merge tool like ``diff3``. The resolve
4941 4941 command is used to manage the files involved in a merge, after
4942 4942 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
4943 4943 working directory must have two parents). See :hg:`help
4944 4944 merge-tools` for information on configuring merge tools.
4945 4945
4946 4946 The resolve command can be used in the following ways:
4947 4947
4948 4948 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4949 4949 files, discarding any previous merge attempts. Re-merging is not
4950 4950 performed for files already marked as resolved. Use ``--all/-a``
4951 4951 to select all unresolved files. ``--tool`` can be used to specify
4952 4952 the merge tool used for the given files. It overrides the HGMERGE
4953 4953 environment variable and your configuration files. Previous file
4954 4954 contents are saved with a ``.orig`` suffix.
4955 4955
4956 4956 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4957 4957 (e.g. after having manually fixed-up the files). The default is
4958 4958 to mark all unresolved files.
4959 4959
4960 4960 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4961 4961 default is to mark all resolved files.
4962 4962
4963 4963 - :hg:`resolve -l`: list files which had or still have conflicts.
4964 4964 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4965 4965
4966 4966 Note that Mercurial will not let you commit files with unresolved
4967 4967 merge conflicts. You must use :hg:`resolve -m ...` before you can
4968 4968 commit after a conflicting merge.
4969 4969
4970 4970 Returns 0 on success, 1 if any files fail a resolve attempt.
4971 4971 """
4972 4972
4973 4973 all, mark, unmark, show, nostatus = \
4974 4974 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
4975 4975
4976 4976 if (show and (mark or unmark)) or (mark and unmark):
4977 4977 raise util.Abort(_("too many options specified"))
4978 4978 if pats and all:
4979 4979 raise util.Abort(_("can't specify --all and patterns"))
4980 4980 if not (all or pats or show or mark or unmark):
4981 4981 raise util.Abort(_('no files or directories specified; '
4982 4982 'use --all to remerge all files'))
4983 4983
4984 4984 ms = mergemod.mergestate(repo)
4985 4985 m = scmutil.match(repo[None], pats, opts)
4986 4986 ret = 0
4987 4987
4988 4988 for f in ms:
4989 4989 if m(f):
4990 4990 if show:
4991 4991 if nostatus:
4992 4992 ui.write("%s\n" % f)
4993 4993 else:
4994 4994 ui.write("%s %s\n" % (ms[f].upper(), f),
4995 4995 label='resolve.' +
4996 4996 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
4997 4997 elif mark:
4998 4998 ms.mark(f, "r")
4999 4999 elif unmark:
5000 5000 ms.mark(f, "u")
5001 5001 else:
5002 5002 wctx = repo[None]
5003 5003
5004 5004 # backup pre-resolve (merge uses .orig for its own purposes)
5005 5005 a = repo.wjoin(f)
5006 5006 util.copyfile(a, a + ".resolve")
5007 5007
5008 5008 try:
5009 5009 # resolve file
5010 5010 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
5011 5011 'resolve')
5012 5012 if ms.resolve(f, wctx):
5013 5013 ret = 1
5014 5014 finally:
5015 5015 ui.setconfig('ui', 'forcemerge', '', 'resolve')
5016 5016 ms.commit()
5017 5017
5018 5018 # replace filemerge's .orig file with our resolve file
5019 5019 util.rename(a + ".resolve", a + ".orig")
5020 5020
5021 5021 ms.commit()
5022 5022 return ret
5023 5023
5024 5024 @command('revert',
5025 5025 [('a', 'all', None, _('revert all changes when no arguments given')),
5026 5026 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5027 5027 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
5028 5028 ('C', 'no-backup', None, _('do not save backup copies of files')),
5029 5029 ] + walkopts + dryrunopts,
5030 5030 _('[OPTION]... [-r REV] [NAME]...'))
5031 5031 def revert(ui, repo, *pats, **opts):
5032 5032 """restore files to their checkout state
5033 5033
5034 5034 .. note::
5035 5035
5036 5036 To check out earlier revisions, you should use :hg:`update REV`.
5037 5037 To cancel an uncommitted merge (and lose your changes),
5038 5038 use :hg:`update --clean .`.
5039 5039
5040 5040 With no revision specified, revert the specified files or directories
5041 5041 to the contents they had in the parent of the working directory.
5042 5042 This restores the contents of files to an unmodified
5043 5043 state and unschedules adds, removes, copies, and renames. If the
5044 5044 working directory has two parents, you must explicitly specify a
5045 5045 revision.
5046 5046
5047 5047 Using the -r/--rev or -d/--date options, revert the given files or
5048 5048 directories to their states as of a specific revision. Because
5049 5049 revert does not change the working directory parents, this will
5050 5050 cause these files to appear modified. This can be helpful to "back
5051 5051 out" some or all of an earlier change. See :hg:`backout` for a
5052 5052 related method.
5053 5053
5054 5054 Modified files are saved with a .orig suffix before reverting.
5055 5055 To disable these backups, use --no-backup.
5056 5056
5057 5057 See :hg:`help dates` for a list of formats valid for -d/--date.
5058 5058
5059 5059 Returns 0 on success.
5060 5060 """
5061 5061
5062 5062 if opts.get("date"):
5063 5063 if opts.get("rev"):
5064 5064 raise util.Abort(_("you can't specify a revision and a date"))
5065 5065 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
5066 5066
5067 5067 parent, p2 = repo.dirstate.parents()
5068 5068 if not opts.get('rev') and p2 != nullid:
5069 5069 # revert after merge is a trap for new users (issue2915)
5070 5070 raise util.Abort(_('uncommitted merge with no revision specified'),
5071 5071 hint=_('use "hg update" or see "hg help revert"'))
5072 5072
5073 5073 ctx = scmutil.revsingle(repo, opts.get('rev'))
5074 5074
5075 5075 if not pats and not opts.get('all'):
5076 5076 msg = _("no files or directories specified")
5077 5077 if p2 != nullid:
5078 5078 hint = _("uncommitted merge, use --all to discard all changes,"
5079 5079 " or 'hg update -C .' to abort the merge")
5080 5080 raise util.Abort(msg, hint=hint)
5081 5081 dirty = util.any(repo.status())
5082 5082 node = ctx.node()
5083 5083 if node != parent:
5084 5084 if dirty:
5085 5085 hint = _("uncommitted changes, use --all to discard all"
5086 5086 " changes, or 'hg update %s' to update") % ctx.rev()
5087 5087 else:
5088 5088 hint = _("use --all to revert all files,"
5089 5089 " or 'hg update %s' to update") % ctx.rev()
5090 5090 elif dirty:
5091 5091 hint = _("uncommitted changes, use --all to discard all changes")
5092 5092 else:
5093 5093 hint = _("use --all to revert all files")
5094 5094 raise util.Abort(msg, hint=hint)
5095 5095
5096 5096 return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
5097 5097
5098 5098 @command('rollback', dryrunopts +
5099 5099 [('f', 'force', False, _('ignore safety measures'))])
5100 5100 def rollback(ui, repo, **opts):
5101 5101 """roll back the last transaction (DANGEROUS) (DEPRECATED)
5102 5102
5103 5103 Please use :hg:`commit --amend` instead of rollback to correct
5104 5104 mistakes in the last commit.
5105 5105
5106 5106 This command should be used with care. There is only one level of
5107 5107 rollback, and there is no way to undo a rollback. It will also
5108 5108 restore the dirstate at the time of the last transaction, losing
5109 5109 any dirstate changes since that time. This command does not alter
5110 5110 the working directory.
5111 5111
5112 5112 Transactions are used to encapsulate the effects of all commands
5113 5113 that create new changesets or propagate existing changesets into a
5114 5114 repository.
5115 5115
5116 5116 .. container:: verbose
5117 5117
5118 5118 For example, the following commands are transactional, and their
5119 5119 effects can be rolled back:
5120 5120
5121 5121 - commit
5122 5122 - import
5123 5123 - pull
5124 5124 - push (with this repository as the destination)
5125 5125 - unbundle
5126 5126
5127 5127 To avoid permanent data loss, rollback will refuse to rollback a
5128 5128 commit transaction if it isn't checked out. Use --force to
5129 5129 override this protection.
5130 5130
5131 5131 This command is not intended for use on public repositories. Once
5132 5132 changes are visible for pull by other users, rolling a transaction
5133 5133 back locally is ineffective (someone else may already have pulled
5134 5134 the changes). Furthermore, a race is possible with readers of the
5135 5135 repository; for example an in-progress pull from the repository
5136 5136 may fail if a rollback is performed.
5137 5137
5138 5138 Returns 0 on success, 1 if no rollback data is available.
5139 5139 """
5140 5140 return repo.rollback(dryrun=opts.get('dry_run'),
5141 5141 force=opts.get('force'))
5142 5142
5143 5143 @command('root', [])
5144 5144 def root(ui, repo):
5145 5145 """print the root (top) of the current working directory
5146 5146
5147 5147 Print the root directory of the current repository.
5148 5148
5149 5149 Returns 0 on success.
5150 5150 """
5151 5151 ui.write(repo.root + "\n")
5152 5152
5153 5153 @command('^serve',
5154 5154 [('A', 'accesslog', '', _('name of access log file to write to'),
5155 5155 _('FILE')),
5156 5156 ('d', 'daemon', None, _('run server in background')),
5157 5157 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
5158 5158 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
5159 5159 # use string type, then we can check if something was passed
5160 5160 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
5161 5161 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
5162 5162 _('ADDR')),
5163 5163 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
5164 5164 _('PREFIX')),
5165 5165 ('n', 'name', '',
5166 5166 _('name to show in web pages (default: working directory)'), _('NAME')),
5167 5167 ('', 'web-conf', '',
5168 5168 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
5169 5169 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
5170 5170 _('FILE')),
5171 5171 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
5172 5172 ('', 'stdio', None, _('for remote clients')),
5173 5173 ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
5174 5174 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
5175 5175 ('', 'style', '', _('template style to use'), _('STYLE')),
5176 5176 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
5177 5177 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
5178 5178 _('[OPTION]...'))
5179 5179 def serve(ui, repo, **opts):
5180 5180 """start stand-alone webserver
5181 5181
5182 5182 Start a local HTTP repository browser and pull server. You can use
5183 5183 this for ad-hoc sharing and browsing of repositories. It is
5184 5184 recommended to use a real web server to serve a repository for
5185 5185 longer periods of time.
5186 5186
5187 5187 Please note that the server does not implement access control.
5188 5188 This means that, by default, anybody can read from the server and
5189 5189 nobody can write to it by default. Set the ``web.allow_push``
5190 5190 option to ``*`` to allow everybody to push to the server. You
5191 5191 should use a real web server if you need to authenticate users.
5192 5192
5193 5193 By default, the server logs accesses to stdout and errors to
5194 5194 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
5195 5195 files.
5196 5196
5197 5197 To have the server choose a free port number to listen on, specify
5198 5198 a port number of 0; in this case, the server will print the port
5199 5199 number it uses.
5200 5200
5201 5201 Returns 0 on success.
5202 5202 """
5203 5203
5204 5204 if opts["stdio"] and opts["cmdserver"]:
5205 5205 raise util.Abort(_("cannot use --stdio with --cmdserver"))
5206 5206
5207 5207 def checkrepo():
5208 5208 if repo is None:
5209 5209 raise error.RepoError(_("there is no Mercurial repository here"
5210 5210 " (.hg not found)"))
5211 5211
5212 5212 if opts["stdio"]:
5213 5213 checkrepo()
5214 5214 s = sshserver.sshserver(ui, repo)
5215 5215 s.serve_forever()
5216 5216
5217 5217 if opts["cmdserver"]:
5218 5218 s = commandserver.server(ui, repo, opts["cmdserver"])
5219 5219 return s.serve()
5220 5220
5221 5221 # this way we can check if something was given in the command-line
5222 5222 if opts.get('port'):
5223 5223 opts['port'] = util.getport(opts.get('port'))
5224 5224
5225 5225 baseui = repo and repo.baseui or ui
5226 5226 optlist = ("name templates style address port prefix ipv6"
5227 5227 " accesslog errorlog certificate encoding")
5228 5228 for o in optlist.split():
5229 5229 val = opts.get(o, '')
5230 5230 if val in (None, ''): # should check against default options instead
5231 5231 continue
5232 5232 baseui.setconfig("web", o, val, 'serve')
5233 5233 if repo and repo.ui != baseui:
5234 5234 repo.ui.setconfig("web", o, val, 'serve')
5235 5235
5236 5236 o = opts.get('web_conf') or opts.get('webdir_conf')
5237 5237 if not o:
5238 5238 if not repo:
5239 5239 raise error.RepoError(_("there is no Mercurial repository"
5240 5240 " here (.hg not found)"))
5241 5241 o = repo
5242 5242
5243 5243 app = hgweb.hgweb(o, baseui=baseui)
5244 5244 service = httpservice(ui, app, opts)
5245 5245 cmdutil.service(opts, initfn=service.init, runfn=service.run)
5246 5246
5247 5247 class httpservice(object):
5248 5248 def __init__(self, ui, app, opts):
5249 5249 self.ui = ui
5250 5250 self.app = app
5251 5251 self.opts = opts
5252 5252
5253 5253 def init(self):
5254 5254 util.setsignalhandler()
5255 5255 self.httpd = hgweb_server.create_server(self.ui, self.app)
5256 5256
5257 5257 if self.opts['port'] and not self.ui.verbose:
5258 5258 return
5259 5259
5260 5260 if self.httpd.prefix:
5261 5261 prefix = self.httpd.prefix.strip('/') + '/'
5262 5262 else:
5263 5263 prefix = ''
5264 5264
5265 5265 port = ':%d' % self.httpd.port
5266 5266 if port == ':80':
5267 5267 port = ''
5268 5268
5269 5269 bindaddr = self.httpd.addr
5270 5270 if bindaddr == '0.0.0.0':
5271 5271 bindaddr = '*'
5272 5272 elif ':' in bindaddr: # IPv6
5273 5273 bindaddr = '[%s]' % bindaddr
5274 5274
5275 5275 fqaddr = self.httpd.fqaddr
5276 5276 if ':' in fqaddr:
5277 5277 fqaddr = '[%s]' % fqaddr
5278 5278 if self.opts['port']:
5279 5279 write = self.ui.status
5280 5280 else:
5281 5281 write = self.ui.write
5282 5282 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
5283 5283 (fqaddr, port, prefix, bindaddr, self.httpd.port))
5284 5284
5285 5285 def run(self):
5286 5286 self.httpd.serve_forever()
5287 5287
5288 5288
5289 5289 @command('^status|st',
5290 5290 [('A', 'all', None, _('show status of all files')),
5291 5291 ('m', 'modified', None, _('show only modified files')),
5292 5292 ('a', 'added', None, _('show only added files')),
5293 5293 ('r', 'removed', None, _('show only removed files')),
5294 5294 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
5295 5295 ('c', 'clean', None, _('show only files without changes')),
5296 5296 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
5297 5297 ('i', 'ignored', None, _('show only ignored files')),
5298 5298 ('n', 'no-status', None, _('hide status prefix')),
5299 5299 ('C', 'copies', None, _('show source of copied files')),
5300 5300 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
5301 5301 ('', 'rev', [], _('show difference from revision'), _('REV')),
5302 5302 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
5303 5303 ] + walkopts + subrepoopts,
5304 5304 _('[OPTION]... [FILE]...'))
5305 5305 def status(ui, repo, *pats, **opts):
5306 5306 """show changed files in the working directory
5307 5307
5308 5308 Show status of files in the repository. If names are given, only
5309 5309 files that match are shown. Files that are clean or ignored or
5310 5310 the source of a copy/move operation, are not listed unless
5311 5311 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
5312 5312 Unless options described with "show only ..." are given, the
5313 5313 options -mardu are used.
5314 5314
5315 5315 Option -q/--quiet hides untracked (unknown and ignored) files
5316 5316 unless explicitly requested with -u/--unknown or -i/--ignored.
5317 5317
5318 5318 .. note::
5319 5319
5320 5320 status may appear to disagree with diff if permissions have
5321 5321 changed or a merge has occurred. The standard diff format does
5322 5322 not report permission changes and diff only reports changes
5323 5323 relative to one merge parent.
5324 5324
5325 5325 If one revision is given, it is used as the base revision.
5326 5326 If two revisions are given, the differences between them are
5327 5327 shown. The --change option can also be used as a shortcut to list
5328 5328 the changed files of a revision from its first parent.
5329 5329
5330 5330 The codes used to show the status of files are::
5331 5331
5332 5332 M = modified
5333 5333 A = added
5334 5334 R = removed
5335 5335 C = clean
5336 5336 ! = missing (deleted by non-hg command, but still tracked)
5337 5337 ? = not tracked
5338 5338 I = ignored
5339 5339 = origin of the previous file (with --copies)
5340 5340
5341 5341 .. container:: verbose
5342 5342
5343 5343 Examples:
5344 5344
5345 5345 - show changes in the working directory relative to a
5346 5346 changeset::
5347 5347
5348 5348 hg status --rev 9353
5349 5349
5350 5350 - show all changes including copies in an existing changeset::
5351 5351
5352 5352 hg status --copies --change 9353
5353 5353
5354 5354 - get a NUL separated list of added files, suitable for xargs::
5355 5355
5356 5356 hg status -an0
5357 5357
5358 5358 Returns 0 on success.
5359 5359 """
5360 5360
5361 5361 revs = opts.get('rev')
5362 5362 change = opts.get('change')
5363 5363
5364 5364 if revs and change:
5365 5365 msg = _('cannot specify --rev and --change at the same time')
5366 5366 raise util.Abort(msg)
5367 5367 elif change:
5368 5368 node2 = scmutil.revsingle(repo, change, None).node()
5369 5369 node1 = repo[node2].p1().node()
5370 5370 else:
5371 5371 node1, node2 = scmutil.revpair(repo, revs)
5372 5372
5373 5373 cwd = (pats and repo.getcwd()) or ''
5374 5374 end = opts.get('print0') and '\0' or '\n'
5375 5375 copy = {}
5376 5376 states = 'modified added removed deleted unknown ignored clean'.split()
5377 5377 show = [k for k in states if opts.get(k)]
5378 5378 if opts.get('all'):
5379 5379 show += ui.quiet and (states[:4] + ['clean']) or states
5380 5380 if not show:
5381 5381 show = ui.quiet and states[:4] or states[:5]
5382 5382
5383 5383 stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts),
5384 5384 'ignored' in show, 'clean' in show, 'unknown' in show,
5385 5385 opts.get('subrepos'))
5386 5386 changestates = zip(states, 'MAR!?IC', stat)
5387 5387
5388 5388 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
5389 5389 copy = copies.pathcopies(repo[node1], repo[node2])
5390 5390
5391 5391 fm = ui.formatter('status', opts)
5392 5392 fmt = '%s' + end
5393 5393 showchar = not opts.get('no_status')
5394 5394
5395 5395 for state, char, files in changestates:
5396 5396 if state in show:
5397 5397 label = 'status.' + state
5398 5398 for f in files:
5399 5399 fm.startitem()
5400 5400 fm.condwrite(showchar, 'status', '%s ', char, label=label)
5401 5401 fm.write('path', fmt, repo.pathto(f, cwd), label=label)
5402 5402 if f in copy:
5403 5403 fm.write("copy", ' %s' + end, repo.pathto(copy[f], cwd),
5404 5404 label='status.copied')
5405 5405 fm.end()
5406 5406
5407 5407 @command('^summary|sum',
5408 5408 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
5409 5409 def summary(ui, repo, **opts):
5410 5410 """summarize working directory state
5411 5411
5412 5412 This generates a brief summary of the working directory state,
5413 5413 including parents, branch, commit status, and available updates.
5414 5414
5415 5415 With the --remote option, this will check the default paths for
5416 5416 incoming and outgoing changes. This can be time-consuming.
5417 5417
5418 5418 Returns 0 on success.
5419 5419 """
5420 5420
5421 5421 ctx = repo[None]
5422 5422 parents = ctx.parents()
5423 5423 pnode = parents[0].node()
5424 5424 marks = []
5425 5425
5426 5426 for p in parents:
5427 5427 # label with log.changeset (instead of log.parent) since this
5428 5428 # shows a working directory parent *changeset*:
5429 5429 # i18n: column positioning for "hg summary"
5430 5430 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
5431 5431 label='log.changeset changeset.%s' % p.phasestr())
5432 5432 ui.write(' '.join(p.tags()), label='log.tag')
5433 5433 if p.bookmarks():
5434 5434 marks.extend(p.bookmarks())
5435 5435 if p.rev() == -1:
5436 5436 if not len(repo):
5437 5437 ui.write(_(' (empty repository)'))
5438 5438 else:
5439 5439 ui.write(_(' (no revision checked out)'))
5440 5440 ui.write('\n')
5441 5441 if p.description():
5442 5442 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
5443 5443 label='log.summary')
5444 5444
5445 5445 branch = ctx.branch()
5446 5446 bheads = repo.branchheads(branch)
5447 5447 # i18n: column positioning for "hg summary"
5448 5448 m = _('branch: %s\n') % branch
5449 5449 if branch != 'default':
5450 5450 ui.write(m, label='log.branch')
5451 5451 else:
5452 5452 ui.status(m, label='log.branch')
5453 5453
5454 5454 if marks:
5455 5455 current = repo._bookmarkcurrent
5456 5456 # i18n: column positioning for "hg summary"
5457 5457 ui.write(_('bookmarks:'), label='log.bookmark')
5458 5458 if current is not None:
5459 5459 if current in marks:
5460 5460 ui.write(' *' + current, label='bookmarks.current')
5461 5461 marks.remove(current)
5462 5462 else:
5463 5463 ui.write(' [%s]' % current, label='bookmarks.current')
5464 5464 for m in marks:
5465 5465 ui.write(' ' + m, label='log.bookmark')
5466 5466 ui.write('\n', label='log.bookmark')
5467 5467
5468 5468 st = list(repo.status(unknown=True))[:6]
5469 5469
5470 5470 c = repo.dirstate.copies()
5471 5471 copied, renamed = [], []
5472 5472 for d, s in c.iteritems():
5473 5473 if s in st[2]:
5474 5474 st[2].remove(s)
5475 5475 renamed.append(d)
5476 5476 else:
5477 5477 copied.append(d)
5478 5478 if d in st[1]:
5479 5479 st[1].remove(d)
5480 5480 st.insert(3, renamed)
5481 5481 st.insert(4, copied)
5482 5482
5483 5483 ms = mergemod.mergestate(repo)
5484 5484 st.append([f for f in ms if ms[f] == 'u'])
5485 5485
5486 5486 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
5487 5487 st.append(subs)
5488 5488
5489 5489 labels = [ui.label(_('%d modified'), 'status.modified'),
5490 5490 ui.label(_('%d added'), 'status.added'),
5491 5491 ui.label(_('%d removed'), 'status.removed'),
5492 5492 ui.label(_('%d renamed'), 'status.copied'),
5493 5493 ui.label(_('%d copied'), 'status.copied'),
5494 5494 ui.label(_('%d deleted'), 'status.deleted'),
5495 5495 ui.label(_('%d unknown'), 'status.unknown'),
5496 5496 ui.label(_('%d ignored'), 'status.ignored'),
5497 5497 ui.label(_('%d unresolved'), 'resolve.unresolved'),
5498 5498 ui.label(_('%d subrepos'), 'status.modified')]
5499 5499 t = []
5500 5500 for s, l in zip(st, labels):
5501 5501 if s:
5502 5502 t.append(l % len(s))
5503 5503
5504 5504 t = ', '.join(t)
5505 5505 cleanworkdir = False
5506 5506
5507 5507 if repo.vfs.exists('updatestate'):
5508 5508 t += _(' (interrupted update)')
5509 5509 elif len(parents) > 1:
5510 5510 t += _(' (merge)')
5511 5511 elif branch != parents[0].branch():
5512 5512 t += _(' (new branch)')
5513 5513 elif (parents[0].closesbranch() and
5514 5514 pnode in repo.branchheads(branch, closed=True)):
5515 5515 t += _(' (head closed)')
5516 5516 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
5517 5517 t += _(' (clean)')
5518 5518 cleanworkdir = True
5519 5519 elif pnode not in bheads:
5520 5520 t += _(' (new branch head)')
5521 5521
5522 5522 if cleanworkdir:
5523 5523 # i18n: column positioning for "hg summary"
5524 5524 ui.status(_('commit: %s\n') % t.strip())
5525 5525 else:
5526 5526 # i18n: column positioning for "hg summary"
5527 5527 ui.write(_('commit: %s\n') % t.strip())
5528 5528
5529 5529 # all ancestors of branch heads - all ancestors of parent = new csets
5530 5530 new = len(repo.changelog.findmissing([ctx.node() for ctx in parents],
5531 5531 bheads))
5532 5532
5533 5533 if new == 0:
5534 5534 # i18n: column positioning for "hg summary"
5535 5535 ui.status(_('update: (current)\n'))
5536 5536 elif pnode not in bheads:
5537 5537 # i18n: column positioning for "hg summary"
5538 5538 ui.write(_('update: %d new changesets (update)\n') % new)
5539 5539 else:
5540 5540 # i18n: column positioning for "hg summary"
5541 5541 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
5542 5542 (new, len(bheads)))
5543 5543
5544 5544 cmdutil.summaryhooks(ui, repo)
5545 5545
5546 5546 if opts.get('remote'):
5547 5547 t = []
5548 5548 source, branches = hg.parseurl(ui.expandpath('default'))
5549 5549 sbranch = branches[0]
5550 5550 other = hg.peer(repo, {}, source)
5551 5551 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
5552 5552 if revs:
5553 5553 revs = [other.lookup(rev) for rev in revs]
5554 5554 ui.debug('comparing with %s\n' % util.hidepassword(source))
5555 5555 repo.ui.pushbuffer()
5556 5556 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
5557 5557 _common, incoming, _rheads = commoninc
5558 5558 repo.ui.popbuffer()
5559 5559 if incoming:
5560 5560 t.append(_('1 or more incoming'))
5561 5561
5562 5562 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
5563 5563 dbranch = branches[0]
5564 5564 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
5565 5565 if source != dest:
5566 5566 other = hg.peer(repo, {}, dest)
5567 5567 ui.debug('comparing with %s\n' % util.hidepassword(dest))
5568 5568 if (source != dest or (sbranch is not None and sbranch != dbranch)):
5569 5569 commoninc = None
5570 5570 if revs:
5571 5571 revs = [repo.lookup(rev) for rev in revs]
5572 5572 repo.ui.pushbuffer()
5573 5573 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs,
5574 5574 commoninc=commoninc)
5575 5575 repo.ui.popbuffer()
5576 5576 o = outgoing.missing
5577 5577 if o:
5578 5578 t.append(_('%d outgoing') % len(o))
5579 5579 if 'bookmarks' in other.listkeys('namespaces'):
5580 5580 lmarks = repo.listkeys('bookmarks')
5581 5581 rmarks = other.listkeys('bookmarks')
5582 5582 diff = set(rmarks) - set(lmarks)
5583 5583 if len(diff) > 0:
5584 5584 t.append(_('%d incoming bookmarks') % len(diff))
5585 5585 diff = set(lmarks) - set(rmarks)
5586 5586 if len(diff) > 0:
5587 5587 t.append(_('%d outgoing bookmarks') % len(diff))
5588 5588
5589 5589 if t:
5590 5590 # i18n: column positioning for "hg summary"
5591 5591 ui.write(_('remote: %s\n') % (', '.join(t)))
5592 5592 else:
5593 5593 # i18n: column positioning for "hg summary"
5594 5594 ui.status(_('remote: (synced)\n'))
5595 5595
5596 5596 @command('tag',
5597 5597 [('f', 'force', None, _('force tag')),
5598 5598 ('l', 'local', None, _('make the tag local')),
5599 5599 ('r', 'rev', '', _('revision to tag'), _('REV')),
5600 5600 ('', 'remove', None, _('remove a tag')),
5601 5601 # -l/--local is already there, commitopts cannot be used
5602 5602 ('e', 'edit', None, _('edit commit message')),
5603 5603 ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
5604 5604 ] + commitopts2,
5605 5605 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
5606 5606 def tag(ui, repo, name1, *names, **opts):
5607 5607 """add one or more tags for the current or given revision
5608 5608
5609 5609 Name a particular revision using <name>.
5610 5610
5611 5611 Tags are used to name particular revisions of the repository and are
5612 5612 very useful to compare different revisions, to go back to significant
5613 5613 earlier versions or to mark branch points as releases, etc. Changing
5614 5614 an existing tag is normally disallowed; use -f/--force to override.
5615 5615
5616 5616 If no revision is given, the parent of the working directory is
5617 5617 used.
5618 5618
5619 5619 To facilitate version control, distribution, and merging of tags,
5620 5620 they are stored as a file named ".hgtags" which is managed similarly
5621 5621 to other project files and can be hand-edited if necessary. This
5622 5622 also means that tagging creates a new commit. The file
5623 5623 ".hg/localtags" is used for local tags (not shared among
5624 5624 repositories).
5625 5625
5626 5626 Tag commits are usually made at the head of a branch. If the parent
5627 5627 of the working directory is not a branch head, :hg:`tag` aborts; use
5628 5628 -f/--force to force the tag commit to be based on a non-head
5629 5629 changeset.
5630 5630
5631 5631 See :hg:`help dates` for a list of formats valid for -d/--date.
5632 5632
5633 5633 Since tag names have priority over branch names during revision
5634 5634 lookup, using an existing branch name as a tag name is discouraged.
5635 5635
5636 5636 Returns 0 on success.
5637 5637 """
5638 5638 wlock = lock = None
5639 5639 try:
5640 5640 wlock = repo.wlock()
5641 5641 lock = repo.lock()
5642 5642 rev_ = "."
5643 5643 names = [t.strip() for t in (name1,) + names]
5644 5644 if len(names) != len(set(names)):
5645 5645 raise util.Abort(_('tag names must be unique'))
5646 5646 for n in names:
5647 5647 scmutil.checknewlabel(repo, n, 'tag')
5648 5648 if not n:
5649 5649 raise util.Abort(_('tag names cannot consist entirely of '
5650 5650 'whitespace'))
5651 5651 if opts.get('rev') and opts.get('remove'):
5652 5652 raise util.Abort(_("--rev and --remove are incompatible"))
5653 5653 if opts.get('rev'):
5654 5654 rev_ = opts['rev']
5655 5655 message = opts.get('message')
5656 5656 if opts.get('remove'):
5657 5657 expectedtype = opts.get('local') and 'local' or 'global'
5658 5658 for n in names:
5659 5659 if not repo.tagtype(n):
5660 5660 raise util.Abort(_("tag '%s' does not exist") % n)
5661 5661 if repo.tagtype(n) != expectedtype:
5662 5662 if expectedtype == 'global':
5663 5663 raise util.Abort(_("tag '%s' is not a global tag") % n)
5664 5664 else:
5665 5665 raise util.Abort(_("tag '%s' is not a local tag") % n)
5666 5666 rev_ = nullid
5667 5667 if not message:
5668 5668 # we don't translate commit messages
5669 5669 message = 'Removed tag %s' % ', '.join(names)
5670 5670 elif not opts.get('force'):
5671 5671 for n in names:
5672 5672 if n in repo.tags():
5673 5673 raise util.Abort(_("tag '%s' already exists "
5674 5674 "(use -f to force)") % n)
5675 5675 if not opts.get('local'):
5676 5676 p1, p2 = repo.dirstate.parents()
5677 5677 if p2 != nullid:
5678 5678 raise util.Abort(_('uncommitted merge'))
5679 5679 bheads = repo.branchheads()
5680 5680 if not opts.get('force') and bheads and p1 not in bheads:
5681 5681 raise util.Abort(_('not at a branch head (use -f to force)'))
5682 5682 r = scmutil.revsingle(repo, rev_).node()
5683 5683
5684 5684 if not message:
5685 5685 # we don't translate commit messages
5686 5686 message = ('Added tag %s for changeset %s' %
5687 5687 (', '.join(names), short(r)))
5688 5688
5689 5689 date = opts.get('date')
5690 5690 if date:
5691 5691 date = util.parsedate(date)
5692 5692
5693 5693 if opts.get('edit'):
5694 5694 message = ui.edit(message, ui.username())
5695 5695 repo.savecommitmessage(message)
5696 5696
5697 5697 # don't allow tagging the null rev
5698 5698 if (not opts.get('remove') and
5699 5699 scmutil.revsingle(repo, rev_).rev() == nullrev):
5700 5700 raise util.Abort(_("cannot tag null revision"))
5701 5701
5702 5702 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
5703 5703 finally:
5704 5704 release(lock, wlock)
5705 5705
5706 5706 @command('tags', [], '')
5707 5707 def tags(ui, repo, **opts):
5708 5708 """list repository tags
5709 5709
5710 5710 This lists both regular and local tags. When the -v/--verbose
5711 5711 switch is used, a third column "local" is printed for local tags.
5712 5712
5713 5713 Returns 0 on success.
5714 5714 """
5715 5715
5716 5716 fm = ui.formatter('tags', opts)
5717 5717 hexfunc = ui.debugflag and hex or short
5718 5718 tagtype = ""
5719 5719
5720 5720 for t, n in reversed(repo.tagslist()):
5721 5721 hn = hexfunc(n)
5722 5722 label = 'tags.normal'
5723 5723 tagtype = ''
5724 5724 if repo.tagtype(t) == 'local':
5725 5725 label = 'tags.local'
5726 5726 tagtype = 'local'
5727 5727
5728 5728 fm.startitem()
5729 5729 fm.write('tag', '%s', t, label=label)
5730 5730 fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
5731 5731 fm.condwrite(not ui.quiet, 'rev id', fmt,
5732 5732 repo.changelog.rev(n), hn, label=label)
5733 5733 fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
5734 5734 tagtype, label=label)
5735 5735 fm.plain('\n')
5736 5736 fm.end()
5737 5737
5738 5738 @command('tip',
5739 5739 [('p', 'patch', None, _('show patch')),
5740 5740 ('g', 'git', None, _('use git extended diff format')),
5741 5741 ] + templateopts,
5742 5742 _('[-p] [-g]'))
5743 5743 def tip(ui, repo, **opts):
5744 5744 """show the tip revision (DEPRECATED)
5745 5745
5746 5746 The tip revision (usually just called the tip) is the changeset
5747 5747 most recently added to the repository (and therefore the most
5748 5748 recently changed head).
5749 5749
5750 5750 If you have just made a commit, that commit will be the tip. If
5751 5751 you have just pulled changes from another repository, the tip of
5752 5752 that repository becomes the current tip. The "tip" tag is special
5753 5753 and cannot be renamed or assigned to a different changeset.
5754 5754
5755 5755 This command is deprecated, please use :hg:`heads` instead.
5756 5756
5757 5757 Returns 0 on success.
5758 5758 """
5759 5759 displayer = cmdutil.show_changeset(ui, repo, opts)
5760 5760 displayer.show(repo['tip'])
5761 5761 displayer.close()
5762 5762
5763 5763 @command('unbundle',
5764 5764 [('u', 'update', None,
5765 5765 _('update to new branch head if changesets were unbundled'))],
5766 5766 _('[-u] FILE...'))
5767 5767 def unbundle(ui, repo, fname1, *fnames, **opts):
5768 5768 """apply one or more changegroup files
5769 5769
5770 5770 Apply one or more compressed changegroup files generated by the
5771 5771 bundle command.
5772 5772
5773 5773 Returns 0 on success, 1 if an update has unresolved files.
5774 5774 """
5775 5775 fnames = (fname1,) + fnames
5776 5776
5777 5777 lock = repo.lock()
5778 5778 wc = repo['.']
5779 5779 try:
5780 5780 for fname in fnames:
5781 5781 f = hg.openpath(ui, fname)
5782 5782 gen = changegroup.readbundle(f, fname)
5783 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
5783 modheads = changegroup.addchangegroup(repo, gen, 'unbundle',
5784 'bundle:' + fname)
5784 5785 finally:
5785 5786 lock.release()
5786 5787 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
5787 5788 return postincoming(ui, repo, modheads, opts.get('update'), None)
5788 5789
5789 5790 @command('^update|up|checkout|co',
5790 5791 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
5791 5792 ('c', 'check', None,
5792 5793 _('update across branches if no uncommitted changes')),
5793 5794 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
5794 5795 ('r', 'rev', '', _('revision'), _('REV'))],
5795 5796 _('[-c] [-C] [-d DATE] [[-r] REV]'))
5796 5797 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
5797 5798 """update working directory (or switch revisions)
5798 5799
5799 5800 Update the repository's working directory to the specified
5800 5801 changeset. If no changeset is specified, update to the tip of the
5801 5802 current named branch and move the current bookmark (see :hg:`help
5802 5803 bookmarks`).
5803 5804
5804 5805 Update sets the working directory's parent revision to the specified
5805 5806 changeset (see :hg:`help parents`).
5806 5807
5807 5808 If the changeset is not a descendant or ancestor of the working
5808 5809 directory's parent, the update is aborted. With the -c/--check
5809 5810 option, the working directory is checked for uncommitted changes; if
5810 5811 none are found, the working directory is updated to the specified
5811 5812 changeset.
5812 5813
5813 5814 .. container:: verbose
5814 5815
5815 5816 The following rules apply when the working directory contains
5816 5817 uncommitted changes:
5817 5818
5818 5819 1. If neither -c/--check nor -C/--clean is specified, and if
5819 5820 the requested changeset is an ancestor or descendant of
5820 5821 the working directory's parent, the uncommitted changes
5821 5822 are merged into the requested changeset and the merged
5822 5823 result is left uncommitted. If the requested changeset is
5823 5824 not an ancestor or descendant (that is, it is on another
5824 5825 branch), the update is aborted and the uncommitted changes
5825 5826 are preserved.
5826 5827
5827 5828 2. With the -c/--check option, the update is aborted and the
5828 5829 uncommitted changes are preserved.
5829 5830
5830 5831 3. With the -C/--clean option, uncommitted changes are discarded and
5831 5832 the working directory is updated to the requested changeset.
5832 5833
5833 5834 To cancel an uncommitted merge (and lose your changes), use
5834 5835 :hg:`update --clean .`.
5835 5836
5836 5837 Use null as the changeset to remove the working directory (like
5837 5838 :hg:`clone -U`).
5838 5839
5839 5840 If you want to revert just one file to an older revision, use
5840 5841 :hg:`revert [-r REV] NAME`.
5841 5842
5842 5843 See :hg:`help dates` for a list of formats valid for -d/--date.
5843 5844
5844 5845 Returns 0 on success, 1 if there are unresolved files.
5845 5846 """
5846 5847 if rev and node:
5847 5848 raise util.Abort(_("please specify just one revision"))
5848 5849
5849 5850 if rev is None or rev == '':
5850 5851 rev = node
5851 5852
5852 5853 cmdutil.clearunfinished(repo)
5853 5854
5854 5855 # with no argument, we also move the current bookmark, if any
5855 5856 rev, movemarkfrom = bookmarks.calculateupdate(ui, repo, rev)
5856 5857
5857 5858 # if we defined a bookmark, we have to remember the original bookmark name
5858 5859 brev = rev
5859 5860 rev = scmutil.revsingle(repo, rev, rev).rev()
5860 5861
5861 5862 if check and clean:
5862 5863 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5863 5864
5864 5865 if date:
5865 5866 if rev is not None:
5866 5867 raise util.Abort(_("you can't specify a revision and a date"))
5867 5868 rev = cmdutil.finddate(ui, repo, date)
5868 5869
5869 5870 if check:
5870 5871 c = repo[None]
5871 5872 if c.dirty(merge=False, branch=False, missing=True):
5872 5873 raise util.Abort(_("uncommitted changes"))
5873 5874 if rev is None:
5874 5875 rev = repo[repo[None].branch()].rev()
5875 5876 mergemod._checkunknown(repo, repo[None], repo[rev])
5876 5877
5877 5878 if clean:
5878 5879 ret = hg.clean(repo, rev)
5879 5880 else:
5880 5881 ret = hg.update(repo, rev)
5881 5882
5882 5883 if not ret and movemarkfrom:
5883 5884 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
5884 5885 ui.status(_("updating bookmark %s\n") % repo._bookmarkcurrent)
5885 5886 elif brev in repo._bookmarks:
5886 5887 bookmarks.setcurrent(repo, brev)
5887 5888 elif brev:
5888 5889 bookmarks.unsetcurrent(repo)
5889 5890
5890 5891 return ret
5891 5892
5892 5893 @command('verify', [])
5893 5894 def verify(ui, repo):
5894 5895 """verify the integrity of the repository
5895 5896
5896 5897 Verify the integrity of the current repository.
5897 5898
5898 5899 This will perform an extensive check of the repository's
5899 5900 integrity, validating the hashes and checksums of each entry in
5900 5901 the changelog, manifest, and tracked files, as well as the
5901 5902 integrity of their crosslinks and indices.
5902 5903
5903 5904 Please see http://mercurial.selenic.com/wiki/RepositoryCorruption
5904 5905 for more information about recovery from corruption of the
5905 5906 repository.
5906 5907
5907 5908 Returns 0 on success, 1 if errors are encountered.
5908 5909 """
5909 5910 return hg.verify(repo)
5910 5911
5911 5912 @command('version', [])
5912 5913 def version_(ui):
5913 5914 """output version and copyright information"""
5914 5915 ui.write(_("Mercurial Distributed SCM (version %s)\n")
5915 5916 % util.version())
5916 5917 ui.status(_(
5917 5918 "(see http://mercurial.selenic.com for more information)\n"
5918 5919 "\nCopyright (C) 2005-2014 Matt Mackall and others\n"
5919 5920 "This is free software; see the source for copying conditions. "
5920 5921 "There is NO\nwarranty; "
5921 5922 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5922 5923 ))
5923 5924
5924 5925 norepo = ("clone init version help debugcommands debugcomplete"
5925 5926 " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
5926 5927 " debugknown debuggetbundle debugbundle")
5927 5928 optionalrepo = ("identify paths serve config showconfig debugancestor debugdag"
5928 5929 " debugdata debugindex debugindexdot debugrevlog")
5929 5930 inferrepo = ("add addremove annotate cat commit diff grep forget log parents"
5930 5931 " remove resolve status debugwalk")
@@ -1,554 +1,554 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoin data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 106 _pushchangeset(pushop)
107 107 _pushcomputecommonheads(pushop)
108 108 _pushsyncphase(pushop)
109 109 _pushobsolete(pushop)
110 110 finally:
111 111 if lock is not None:
112 112 lock.release()
113 113 finally:
114 114 if locallock is not None:
115 115 locallock.release()
116 116
117 117 _pushbookmark(pushop)
118 118 return pushop.ret
119 119
120 120 def _pushdiscovery(pushop):
121 121 # discovery
122 122 unfi = pushop.repo.unfiltered()
123 123 fci = discovery.findcommonincoming
124 124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 125 common, inc, remoteheads = commoninc
126 126 fco = discovery.findcommonoutgoing
127 127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 128 commoninc=commoninc, force=pushop.force)
129 129 pushop.outgoing = outgoing
130 130 pushop.remoteheads = remoteheads
131 131 pushop.incoming = inc
132 132
133 133 def _pushcheckoutgoing(pushop):
134 134 outgoing = pushop.outgoing
135 135 unfi = pushop.repo.unfiltered()
136 136 if not outgoing.missing:
137 137 # nothing to push
138 138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 139 return False
140 140 # something to push
141 141 if not pushop.force:
142 142 # if repo.obsstore == False --> no obsolete
143 143 # then, save the iteration
144 144 if unfi.obsstore:
145 145 # this message are here for 80 char limit reason
146 146 mso = _("push includes obsolete changeset: %s!")
147 147 mst = "push includes %s changeset: %s!"
148 148 # plain versions for i18n tool to detect them
149 149 _("push includes unstable changeset: %s!")
150 150 _("push includes bumped changeset: %s!")
151 151 _("push includes divergent changeset: %s!")
152 152 # If we are to push if there is at least one
153 153 # obsolete or unstable changeset in missing, at
154 154 # least one of the missinghead will be obsolete or
155 155 # unstable. So checking heads only is ok
156 156 for node in outgoing.missingheads:
157 157 ctx = unfi[node]
158 158 if ctx.obsolete():
159 159 raise util.Abort(mso % ctx)
160 160 elif ctx.troubled():
161 161 raise util.Abort(_(mst)
162 162 % (ctx.troubles()[0],
163 163 ctx))
164 164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 166 pushop.remoteheads,
167 167 pushop.newbranch,
168 168 bool(pushop.incoming),
169 169 newbm)
170 170 return True
171 171
172 172 def _pushchangeset(pushop):
173 173 """Make the actual push of changeset bundle to remote repo"""
174 174 outgoing = pushop.outgoing
175 175 unbundle = pushop.remote.capable('unbundle')
176 176 # TODO: get bundlecaps from remote
177 177 bundlecaps = None
178 178 # create a changegroup from local
179 179 if pushop.revs is None and not (outgoing.excluded
180 180 or pushop.repo.changelog.filteredrevs):
181 181 # push everything,
182 182 # use the fast path, no race possible on push
183 183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 184 cg = changegroup.getsubset(pushop.repo,
185 185 outgoing,
186 186 bundler,
187 187 'push',
188 188 fastpath=True)
189 189 else:
190 190 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 191 bundlecaps)
192 192
193 193 # apply changegroup to remote
194 194 if unbundle:
195 195 # local repo finds heads on server, finds out what
196 196 # revs it must push. once revs transferred, if server
197 197 # finds it has different heads (someone else won
198 198 # commit/push race), server aborts.
199 199 if pushop.force:
200 200 remoteheads = ['force']
201 201 else:
202 202 remoteheads = pushop.remoteheads
203 203 # ssh: return remote's addchangegroup()
204 204 # http: return remote's addchangegroup() or 0 for error
205 205 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 206 'push')
207 207 else:
208 208 # we return an integer indicating remote head count
209 209 # change
210 210 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
211 211 pushop.repo.url())
212 212
213 213 def _pushcomputecommonheads(pushop):
214 214 unfi = pushop.repo.unfiltered()
215 215 if pushop.ret:
216 216 # push succeed, synchronize target of the push
217 217 cheads = pushop.outgoing.missingheads
218 218 elif pushop.revs is None:
219 219 # All out push fails. synchronize all common
220 220 cheads = pushop.outgoing.commonheads
221 221 else:
222 222 # I want cheads = heads(::missingheads and ::commonheads)
223 223 # (missingheads is revs with secret changeset filtered out)
224 224 #
225 225 # This can be expressed as:
226 226 # cheads = ( (missingheads and ::commonheads)
227 227 # + (commonheads and ::missingheads))"
228 228 # )
229 229 #
230 230 # while trying to push we already computed the following:
231 231 # common = (::commonheads)
232 232 # missing = ((commonheads::missingheads) - commonheads)
233 233 #
234 234 # We can pick:
235 235 # * missingheads part of common (::commonheads)
236 236 common = set(pushop.outgoing.common)
237 237 nm = pushop.repo.changelog.nodemap
238 238 cheads = [node for node in pushop.revs if nm[node] in common]
239 239 # and
240 240 # * commonheads parents on missing
241 241 revset = unfi.set('%ln and parents(roots(%ln))',
242 242 pushop.outgoing.commonheads,
243 243 pushop.outgoing.missing)
244 244 cheads.extend(c.node() for c in revset)
245 245 pushop.commonheads = cheads
246 246
247 247 def _pushsyncphase(pushop):
248 248 """synchronise phase information locally and remotly"""
249 249 unfi = pushop.repo.unfiltered()
250 250 cheads = pushop.commonheads
251 251 if pushop.ret:
252 252 # push succeed, synchronize target of the push
253 253 cheads = pushop.outgoing.missingheads
254 254 elif pushop.revs is None:
255 255 # All out push fails. synchronize all common
256 256 cheads = pushop.outgoing.commonheads
257 257 else:
258 258 # I want cheads = heads(::missingheads and ::commonheads)
259 259 # (missingheads is revs with secret changeset filtered out)
260 260 #
261 261 # This can be expressed as:
262 262 # cheads = ( (missingheads and ::commonheads)
263 263 # + (commonheads and ::missingheads))"
264 264 # )
265 265 #
266 266 # while trying to push we already computed the following:
267 267 # common = (::commonheads)
268 268 # missing = ((commonheads::missingheads) - commonheads)
269 269 #
270 270 # We can pick:
271 271 # * missingheads part of common (::commonheads)
272 272 common = set(pushop.outgoing.common)
273 273 nm = pushop.repo.changelog.nodemap
274 274 cheads = [node for node in pushop.revs if nm[node] in common]
275 275 # and
276 276 # * commonheads parents on missing
277 277 revset = unfi.set('%ln and parents(roots(%ln))',
278 278 pushop.outgoing.commonheads,
279 279 pushop.outgoing.missing)
280 280 cheads.extend(c.node() for c in revset)
281 281 pushop.commonheads = cheads
282 282 # even when we don't push, exchanging phase data is useful
283 283 remotephases = pushop.remote.listkeys('phases')
284 284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
285 285 and remotephases # server supports phases
286 286 and pushop.ret is None # nothing was pushed
287 287 and remotephases.get('publishing', False)):
288 288 # When:
289 289 # - this is a subrepo push
290 290 # - and remote support phase
291 291 # - and no changeset was pushed
292 292 # - and remote is publishing
293 293 # We may be in issue 3871 case!
294 294 # We drop the possible phase synchronisation done by
295 295 # courtesy to publish changesets possibly locally draft
296 296 # on the remote.
297 297 remotephases = {'publishing': 'True'}
298 298 if not remotephases: # old server or public only rer
299 299 _localphasemove(pushop, cheads)
300 300 # don't push any phase data as there is nothing to push
301 301 else:
302 302 ana = phases.analyzeremotephases(pushop.repo, cheads,
303 303 remotephases)
304 304 pheads, droots = ana
305 305 ### Apply remote phase on local
306 306 if remotephases.get('publishing', False):
307 307 _localphasemove(pushop, cheads)
308 308 else: # publish = False
309 309 _localphasemove(pushop, pheads)
310 310 _localphasemove(pushop, cheads, phases.draft)
311 311 ### Apply local phase on remote
312 312
313 313 # Get the list of all revs draft on remote by public here.
314 314 # XXX Beware that revset break if droots is not strictly
315 315 # XXX root we may want to ensure it is but it is costly
316 316 outdated = unfi.set('heads((%ln::%ln) and public())',
317 317 droots, cheads)
318 318 for newremotehead in outdated:
319 319 r = pushop.remote.pushkey('phases',
320 320 newremotehead.hex(),
321 321 str(phases.draft),
322 322 str(phases.public))
323 323 if not r:
324 324 pushop.ui.warn(_('updating %s to public failed!\n')
325 325 % newremotehead)
326 326
327 327 def _localphasemove(pushop, nodes, phase=phases.public):
328 328 """move <nodes> to <phase> in the local source repo"""
329 329 if pushop.locallocked:
330 330 phases.advanceboundary(pushop.repo, phase, nodes)
331 331 else:
332 332 # repo is not locked, do not change any phases!
333 333 # Informs the user that phases should have been moved when
334 334 # applicable.
335 335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
336 336 phasestr = phases.phasenames[phase]
337 337 if actualmoves:
338 338 pushop.ui.status(_('cannot lock source repo, skipping '
339 339 'local %s phase update\n') % phasestr)
340 340
341 341 def _pushobsolete(pushop):
342 342 """utility function to push obsolete markers to a remote"""
343 343 pushop.ui.debug('try to push obsolete markers to remote\n')
344 344 repo = pushop.repo
345 345 remote = pushop.remote
346 346 if (obsolete._enabled and repo.obsstore and
347 347 'obsolete' in remote.listkeys('namespaces')):
348 348 rslts = []
349 349 remotedata = repo.listkeys('obsolete')
350 350 for key in sorted(remotedata, reverse=True):
351 351 # reverse sort to ensure we end with dump0
352 352 data = remotedata[key]
353 353 rslts.append(remote.pushkey('obsolete', key, '', data))
354 354 if [r for r in rslts if not r]:
355 355 msg = _('failed to push some obsolete markers!\n')
356 356 repo.ui.warn(msg)
357 357
358 358 def _pushbookmark(pushop):
359 359 """Update bookmark position on remote"""
360 360 ui = pushop.ui
361 361 repo = pushop.repo.unfiltered()
362 362 remote = pushop.remote
363 363 ui.debug("checking for updated bookmarks\n")
364 364 revnums = map(repo.changelog.rev, pushop.revs or [])
365 365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
366 366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
367 367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
368 368 srchex=hex)
369 369
370 370 for b, scid, dcid in advsrc:
371 371 if ancestors and repo[scid].rev() not in ancestors:
372 372 continue
373 373 if remote.pushkey('bookmarks', b, dcid, scid):
374 374 ui.status(_("updating bookmark %s\n") % b)
375 375 else:
376 376 ui.warn(_('updating bookmark %s failed!\n') % b)
377 377
378 378 class pulloperation(object):
379 379 """A object that represent a single pull operation
380 380
381 381 It purpose is to carry push related state and very common operation.
382 382
383 383 A new should be created at the begining of each pull and discarded
384 384 afterward.
385 385 """
386 386
387 387 def __init__(self, repo, remote, heads=None, force=False):
388 388 # repo we pull into
389 389 self.repo = repo
390 390 # repo we pull from
391 391 self.remote = remote
392 392 # revision we try to pull (None is "all")
393 393 self.heads = heads
394 394 # do we force pull?
395 395 self.force = force
396 396 # the name the pull transaction
397 397 self._trname = 'pull\n' + util.hidepassword(remote.url())
398 398 # hold the transaction once created
399 399 self._tr = None
400 400 # set of common changeset between local and remote before pull
401 401 self.common = None
402 402 # set of pulled head
403 403 self.rheads = None
404 404 # list of missing changeset to fetch remotly
405 405 self.fetch = None
406 406 # result of changegroup pulling (used as returng code by pull)
407 407 self.cgresult = None
408 408 # list of step remaining todo (related to future bundle2 usage)
409 409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
410 410
411 411 @util.propertycache
412 412 def pulledsubset(self):
413 413 """heads of the set of changeset target by the pull"""
414 414 # compute target subset
415 415 if self.heads is None:
416 416 # We pulled every thing possible
417 417 # sync on everything common
418 418 c = set(self.common)
419 419 ret = list(self.common)
420 420 for n in self.rheads:
421 421 if n not in c:
422 422 ret.append(n)
423 423 return ret
424 424 else:
425 425 # We pulled a specific subset
426 426 # sync on this subset
427 427 return self.heads
428 428
429 429 def gettransaction(self):
430 430 """get appropriate pull transaction, creating it if needed"""
431 431 if self._tr is None:
432 432 self._tr = self.repo.transaction(self._trname)
433 433 return self._tr
434 434
435 435 def closetransaction(self):
436 436 """close transaction if created"""
437 437 if self._tr is not None:
438 438 self._tr.close()
439 439
440 440 def releasetransaction(self):
441 441 """release transaction if created"""
442 442 if self._tr is not None:
443 443 self._tr.release()
444 444
445 445 def pull(repo, remote, heads=None, force=False):
446 446 pullop = pulloperation(repo, remote, heads, force)
447 447 if pullop.remote.local():
448 448 missing = set(pullop.remote.requirements) - pullop.repo.supported
449 449 if missing:
450 450 msg = _("required features are not"
451 451 " supported in the destination:"
452 452 " %s") % (', '.join(sorted(missing)))
453 453 raise util.Abort(msg)
454 454
455 455 lock = pullop.repo.lock()
456 456 try:
457 457 _pulldiscovery(pullop)
458 458 if 'changegroup' in pullop.todosteps:
459 459 _pullchangeset(pullop)
460 460 if 'phases' in pullop.todosteps:
461 461 _pullphase(pullop)
462 462 if 'obsmarkers' in pullop.todosteps:
463 463 _pullobsolete(pullop)
464 464 pullop.closetransaction()
465 465 finally:
466 466 pullop.releasetransaction()
467 467 lock.release()
468 468
469 469 return pullop.cgresult
470 470
471 471 def _pulldiscovery(pullop):
472 472 """discovery phase for the pull
473 473
474 474 Current handle changeset discovery only, will change handle all discovery
475 475 at some point."""
476 476 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
477 477 pullop.remote,
478 478 heads=pullop.heads,
479 479 force=pullop.force)
480 480 pullop.common, pullop.fetch, pullop.rheads = tmp
481 481
482 482 def _pullchangeset(pullop):
483 483 """pull changeset from unbundle into the local repo"""
484 484 # We delay the open of the transaction as late as possible so we
485 485 # don't open transaction for nothing or you break future useful
486 486 # rollback call
487 487 pullop.todosteps.remove('changegroup')
488 488 if not pullop.fetch:
489 489 pullop.repo.ui.status(_("no changes found\n"))
490 490 pullop.cgresult = 0
491 491 return
492 492 pullop.gettransaction()
493 493 if pullop.heads is None and list(pullop.common) == [nullid]:
494 494 pullop.repo.ui.status(_("requesting all changes\n"))
495 495 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
496 496 # issue1320, avoid a race if remote changed after discovery
497 497 pullop.heads = pullop.rheads
498 498
499 499 if pullop.remote.capable('getbundle'):
500 500 # TODO: get bundlecaps from remote
501 501 cg = pullop.remote.getbundle('pull', common=pullop.common,
502 502 heads=pullop.heads or pullop.rheads)
503 503 elif pullop.heads is None:
504 504 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
505 505 elif not pullop.remote.capable('changegroupsubset'):
506 506 raise util.Abort(_("partial pull cannot be done because "
507 507 "other repository doesn't support "
508 508 "changegroupsubset."))
509 509 else:
510 510 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
511 pullop.cgresult = pullop.repo.addchangegroup(cg, 'pull',
511 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
512 512 pullop.remote.url())
513 513
514 514 def _pullphase(pullop):
515 515 # Get remote phases data from remote
516 516 pullop.todosteps.remove('phases')
517 517 remotephases = pullop.remote.listkeys('phases')
518 518 publishing = bool(remotephases.get('publishing', False))
519 519 if remotephases and not publishing:
520 520 # remote is new and unpublishing
521 521 pheads, _dr = phases.analyzeremotephases(pullop.repo,
522 522 pullop.pulledsubset,
523 523 remotephases)
524 524 phases.advanceboundary(pullop.repo, phases.public, pheads)
525 525 phases.advanceboundary(pullop.repo, phases.draft,
526 526 pullop.pulledsubset)
527 527 else:
528 528 # Remote is old or publishing all common changesets
529 529 # should be seen as public
530 530 phases.advanceboundary(pullop.repo, phases.public,
531 531 pullop.pulledsubset)
532 532
533 533 def _pullobsolete(pullop):
534 534 """utility function to pull obsolete markers from a remote
535 535
536 536 The `gettransaction` is function that return the pull transaction, creating
537 537 one if necessary. We return the transaction to inform the calling code that
538 538 a new transaction have been created (when applicable).
539 539
540 540 Exists mostly to allow overriding for experimentation purpose"""
541 541 pullop.todosteps.remove('obsmarkers')
542 542 tr = None
543 543 if obsolete._enabled:
544 544 pullop.repo.ui.debug('fetching remote obsolete markers\n')
545 545 remoteobs = pullop.remote.listkeys('obsolete')
546 546 if 'dump0' in remoteobs:
547 547 tr = pullop.gettransaction()
548 548 for key in sorted(remoteobs, reverse=True):
549 549 if key.startswith('dump'):
550 550 data = base85.b85decode(remoteobs[key])
551 551 pullop.repo.obsstore.mergemarkers(tr, data)
552 552 pullop.repo.invalidatevolatilesets()
553 553 return tr
554 554
@@ -1,2055 +1,1869 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding, exchange
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 66 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 67
68 68 class localpeer(peer.peerrepository):
69 69 '''peer for a local repo; reflects only the most recent API'''
70 70
71 71 def __init__(self, repo, caps=moderncaps):
72 72 peer.peerrepository.__init__(self)
73 73 self._repo = repo.filtered('served')
74 74 self.ui = repo.ui
75 75 self._caps = repo._restrictcapabilities(caps)
76 76 self.requirements = repo.requirements
77 77 self.supportedformats = repo.supportedformats
78 78
79 79 def close(self):
80 80 self._repo.close()
81 81
82 82 def _capabilities(self):
83 83 return self._caps
84 84
85 85 def local(self):
86 86 return self._repo
87 87
88 88 def canpush(self):
89 89 return True
90 90
91 91 def url(self):
92 92 return self._repo.url()
93 93
94 94 def lookup(self, key):
95 95 return self._repo.lookup(key)
96 96
97 97 def branchmap(self):
98 98 return self._repo.branchmap()
99 99
100 100 def heads(self):
101 101 return self._repo.heads()
102 102
103 103 def known(self, nodes):
104 104 return self._repo.known(nodes)
105 105
106 106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 107 return changegroup.getbundle(self._repo, source, heads=heads,
108 108 common=common, bundlecaps=None)
109 109
110 110 # TODO We might want to move the next two calls into legacypeer and add
111 111 # unbundle instead.
112 112
113 113 def lock(self):
114 114 return self._repo.lock()
115 115
116 116 def addchangegroup(self, cg, source, url):
117 return self._repo.addchangegroup(cg, source, url)
117 return changegroup.addchangegroup(self._repo, cg, source, url)
118 118
119 119 def pushkey(self, namespace, key, old, new):
120 120 return self._repo.pushkey(namespace, key, old, new)
121 121
122 122 def listkeys(self, namespace):
123 123 return self._repo.listkeys(namespace)
124 124
125 125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 126 '''used to test argument passing over the wire'''
127 127 return "%s %s %s %s %s" % (one, two, three, four, five)
128 128
129 129 class locallegacypeer(localpeer):
130 130 '''peer extension which implements legacy methods too; used for tests with
131 131 restricted capabilities'''
132 132
133 133 def __init__(self, repo):
134 134 localpeer.__init__(self, repo, caps=legacycaps)
135 135
136 136 def branches(self, nodes):
137 137 return self._repo.branches(nodes)
138 138
139 139 def between(self, pairs):
140 140 return self._repo.between(pairs)
141 141
142 142 def changegroup(self, basenodes, source):
143 143 return changegroup.changegroup(self._repo, basenodes, source)
144 144
145 145 def changegroupsubset(self, bases, heads, source):
146 146 return changegroup.changegroupsubset(self._repo, bases, heads, source)
147 147
148 148 class localrepository(object):
149 149
150 150 supportedformats = set(('revlogv1', 'generaldelta'))
151 151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 152 'dotencode'))
153 153 openerreqs = set(('revlogv1', 'generaldelta'))
154 154 requirements = ['revlogv1']
155 155 filtername = None
156 156
157 157 # a list of (ui, featureset) functions.
158 158 # only functions defined in module of enabled extensions are invoked
159 159 featuresetupfuncs = set()
160 160
161 161 def _baserequirements(self, create):
162 162 return self.requirements[:]
163 163
164 164 def __init__(self, baseui, path=None, create=False):
165 165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 166 self.wopener = self.wvfs
167 167 self.root = self.wvfs.base
168 168 self.path = self.wvfs.join(".hg")
169 169 self.origroot = path
170 170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 171 self.vfs = scmutil.vfs(self.path)
172 172 self.opener = self.vfs
173 173 self.baseui = baseui
174 174 self.ui = baseui.copy()
175 175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 176 # A list of callback to shape the phase if no data were found.
177 177 # Callback are in the form: func(repo, roots) --> processed root.
178 178 # This list it to be filled by extension during repo setup
179 179 self._phasedefaults = []
180 180 try:
181 181 self.ui.readconfig(self.join("hgrc"), self.root)
182 182 extensions.loadall(self.ui)
183 183 except IOError:
184 184 pass
185 185
186 186 if self.featuresetupfuncs:
187 187 self.supported = set(self._basesupported) # use private copy
188 188 extmods = set(m.__name__ for n, m
189 189 in extensions.extensions(self.ui))
190 190 for setupfunc in self.featuresetupfuncs:
191 191 if setupfunc.__module__ in extmods:
192 192 setupfunc(self.ui, self.supported)
193 193 else:
194 194 self.supported = self._basesupported
195 195
196 196 if not self.vfs.isdir():
197 197 if create:
198 198 if not self.wvfs.exists():
199 199 self.wvfs.makedirs()
200 200 self.vfs.makedir(notindexed=True)
201 201 requirements = self._baserequirements(create)
202 202 if self.ui.configbool('format', 'usestore', True):
203 203 self.vfs.mkdir("store")
204 204 requirements.append("store")
205 205 if self.ui.configbool('format', 'usefncache', True):
206 206 requirements.append("fncache")
207 207 if self.ui.configbool('format', 'dotencode', True):
208 208 requirements.append('dotencode')
209 209 # create an invalid changelog
210 210 self.vfs.append(
211 211 "00changelog.i",
212 212 '\0\0\0\2' # represents revlogv2
213 213 ' dummy changelog to prevent using the old repo layout'
214 214 )
215 215 if self.ui.configbool('format', 'generaldelta', False):
216 216 requirements.append("generaldelta")
217 217 requirements = set(requirements)
218 218 else:
219 219 raise error.RepoError(_("repository %s not found") % path)
220 220 elif create:
221 221 raise error.RepoError(_("repository %s already exists") % path)
222 222 else:
223 223 try:
224 224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 225 except IOError, inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 requirements = set()
229 229
230 230 self.sharedpath = self.path
231 231 try:
232 232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 233 realpath=True)
234 234 s = vfs.base
235 235 if not vfs.exists():
236 236 raise error.RepoError(
237 237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 238 self.sharedpath = s
239 239 except IOError, inst:
240 240 if inst.errno != errno.ENOENT:
241 241 raise
242 242
243 243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 244 self.spath = self.store.path
245 245 self.svfs = self.store.vfs
246 246 self.sopener = self.svfs
247 247 self.sjoin = self.store.join
248 248 self.vfs.createmode = self.store.createmode
249 249 self._applyrequirements(requirements)
250 250 if create:
251 251 self._writerequirements()
252 252
253 253
254 254 self._branchcaches = {}
255 255 self.filterpats = {}
256 256 self._datafilters = {}
257 257 self._transref = self._lockref = self._wlockref = None
258 258
259 259 # A cache for various files under .hg/ that tracks file changes,
260 260 # (used by the filecache decorator)
261 261 #
262 262 # Maps a property name to its util.filecacheentry
263 263 self._filecache = {}
264 264
265 265 # hold sets of revision to be filtered
266 266 # should be cleared when something might have changed the filter value:
267 267 # - new changesets,
268 268 # - phase change,
269 269 # - new obsolescence marker,
270 270 # - working directory parent change,
271 271 # - bookmark changes
272 272 self.filteredrevcache = {}
273 273
274 274 def close(self):
275 275 pass
276 276
277 277 def _restrictcapabilities(self, caps):
278 278 return caps
279 279
280 280 def _applyrequirements(self, requirements):
281 281 self.requirements = requirements
282 282 self.sopener.options = dict((r, 1) for r in requirements
283 283 if r in self.openerreqs)
284 284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 285 if chunkcachesize is not None:
286 286 self.sopener.options['chunkcachesize'] = chunkcachesize
287 287
288 288 def _writerequirements(self):
289 289 reqfile = self.opener("requires", "w")
290 290 for r in sorted(self.requirements):
291 291 reqfile.write("%s\n" % r)
292 292 reqfile.close()
293 293
294 294 def _checknested(self, path):
295 295 """Determine if path is a legal nested repository."""
296 296 if not path.startswith(self.root):
297 297 return False
298 298 subpath = path[len(self.root) + 1:]
299 299 normsubpath = util.pconvert(subpath)
300 300
301 301 # XXX: Checking against the current working copy is wrong in
302 302 # the sense that it can reject things like
303 303 #
304 304 # $ hg cat -r 10 sub/x.txt
305 305 #
306 306 # if sub/ is no longer a subrepository in the working copy
307 307 # parent revision.
308 308 #
309 309 # However, it can of course also allow things that would have
310 310 # been rejected before, such as the above cat command if sub/
311 311 # is a subrepository now, but was a normal directory before.
312 312 # The old path auditor would have rejected by mistake since it
313 313 # panics when it sees sub/.hg/.
314 314 #
315 315 # All in all, checking against the working copy seems sensible
316 316 # since we want to prevent access to nested repositories on
317 317 # the filesystem *now*.
318 318 ctx = self[None]
319 319 parts = util.splitpath(subpath)
320 320 while parts:
321 321 prefix = '/'.join(parts)
322 322 if prefix in ctx.substate:
323 323 if prefix == normsubpath:
324 324 return True
325 325 else:
326 326 sub = ctx.sub(prefix)
327 327 return sub.checknested(subpath[len(prefix) + 1:])
328 328 else:
329 329 parts.pop()
330 330 return False
331 331
332 332 def peer(self):
333 333 return localpeer(self) # not cached to avoid reference cycle
334 334
335 335 def unfiltered(self):
336 336 """Return unfiltered version of the repository
337 337
338 338 Intended to be overwritten by filtered repo."""
339 339 return self
340 340
341 341 def filtered(self, name):
342 342 """Return a filtered version of a repository"""
343 343 # build a new class with the mixin and the current class
344 344 # (possibly subclass of the repo)
345 345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 346 pass
347 347 return proxycls(self, name)
348 348
349 349 @repofilecache('bookmarks')
350 350 def _bookmarks(self):
351 351 return bookmarks.bmstore(self)
352 352
353 353 @repofilecache('bookmarks.current')
354 354 def _bookmarkcurrent(self):
355 355 return bookmarks.readcurrent(self)
356 356
357 357 def bookmarkheads(self, bookmark):
358 358 name = bookmark.split('@', 1)[0]
359 359 heads = []
360 360 for mark, n in self._bookmarks.iteritems():
361 361 if mark.split('@', 1)[0] == name:
362 362 heads.append(n)
363 363 return heads
364 364
365 365 @storecache('phaseroots')
366 366 def _phasecache(self):
367 367 return phases.phasecache(self, self._phasedefaults)
368 368
369 369 @storecache('obsstore')
370 370 def obsstore(self):
371 371 store = obsolete.obsstore(self.sopener)
372 372 if store and not obsolete._enabled:
373 373 # message is rare enough to not be translated
374 374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 375 self.ui.warn(msg % len(list(store)))
376 376 return store
377 377
378 378 @storecache('00changelog.i')
379 379 def changelog(self):
380 380 c = changelog.changelog(self.sopener)
381 381 if 'HG_PENDING' in os.environ:
382 382 p = os.environ['HG_PENDING']
383 383 if p.startswith(self.root):
384 384 c.readpending('00changelog.i.a')
385 385 return c
386 386
387 387 @storecache('00manifest.i')
388 388 def manifest(self):
389 389 return manifest.manifest(self.sopener)
390 390
391 391 @repofilecache('dirstate')
392 392 def dirstate(self):
393 393 warned = [0]
394 394 def validate(node):
395 395 try:
396 396 self.changelog.rev(node)
397 397 return node
398 398 except error.LookupError:
399 399 if not warned[0]:
400 400 warned[0] = True
401 401 self.ui.warn(_("warning: ignoring unknown"
402 402 " working parent %s!\n") % short(node))
403 403 return nullid
404 404
405 405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406 406
407 407 def __getitem__(self, changeid):
408 408 if changeid is None:
409 409 return context.workingctx(self)
410 410 return context.changectx(self, changeid)
411 411
412 412 def __contains__(self, changeid):
413 413 try:
414 414 return bool(self.lookup(changeid))
415 415 except error.RepoLookupError:
416 416 return False
417 417
418 418 def __nonzero__(self):
419 419 return True
420 420
421 421 def __len__(self):
422 422 return len(self.changelog)
423 423
424 424 def __iter__(self):
425 425 return iter(self.changelog)
426 426
427 427 def revs(self, expr, *args):
428 428 '''Return a list of revisions matching the given revset'''
429 429 expr = revset.formatspec(expr, *args)
430 430 m = revset.match(None, expr)
431 431 return m(self, revset.spanset(self))
432 432
433 433 def set(self, expr, *args):
434 434 '''
435 435 Yield a context for each matching revision, after doing arg
436 436 replacement via revset.formatspec
437 437 '''
438 438 for r in self.revs(expr, *args):
439 439 yield self[r]
440 440
441 441 def url(self):
442 442 return 'file:' + self.root
443 443
444 444 def hook(self, name, throw=False, **args):
445 445 return hook.hook(self.ui, self, name, throw, **args)
446 446
447 447 @unfilteredmethod
448 448 def _tag(self, names, node, message, local, user, date, extra={}):
449 449 if isinstance(names, str):
450 450 names = (names,)
451 451
452 452 branches = self.branchmap()
453 453 for name in names:
454 454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 455 local=local)
456 456 if name in branches:
457 457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 458 " branch name\n") % name)
459 459
460 460 def writetags(fp, names, munge, prevtags):
461 461 fp.seek(0, 2)
462 462 if prevtags and prevtags[-1] != '\n':
463 463 fp.write('\n')
464 464 for name in names:
465 465 m = munge and munge(name) or name
466 466 if (self._tagscache.tagtypes and
467 467 name in self._tagscache.tagtypes):
468 468 old = self.tags().get(name, nullid)
469 469 fp.write('%s %s\n' % (hex(old), m))
470 470 fp.write('%s %s\n' % (hex(node), m))
471 471 fp.close()
472 472
473 473 prevtags = ''
474 474 if local:
475 475 try:
476 476 fp = self.opener('localtags', 'r+')
477 477 except IOError:
478 478 fp = self.opener('localtags', 'a')
479 479 else:
480 480 prevtags = fp.read()
481 481
482 482 # local tags are stored in the current charset
483 483 writetags(fp, names, None, prevtags)
484 484 for name in names:
485 485 self.hook('tag', node=hex(node), tag=name, local=local)
486 486 return
487 487
488 488 try:
489 489 fp = self.wfile('.hgtags', 'rb+')
490 490 except IOError, e:
491 491 if e.errno != errno.ENOENT:
492 492 raise
493 493 fp = self.wfile('.hgtags', 'ab')
494 494 else:
495 495 prevtags = fp.read()
496 496
497 497 # committed tags are stored in UTF-8
498 498 writetags(fp, names, encoding.fromlocal, prevtags)
499 499
500 500 fp.close()
501 501
502 502 self.invalidatecaches()
503 503
504 504 if '.hgtags' not in self.dirstate:
505 505 self[None].add(['.hgtags'])
506 506
507 507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509 509
510 510 for name in names:
511 511 self.hook('tag', node=hex(node), tag=name, local=local)
512 512
513 513 return tagnode
514 514
515 515 def tag(self, names, node, message, local, user, date):
516 516 '''tag a revision with one or more symbolic names.
517 517
518 518 names is a list of strings or, when adding a single tag, names may be a
519 519 string.
520 520
521 521 if local is True, the tags are stored in a per-repository file.
522 522 otherwise, they are stored in the .hgtags file, and a new
523 523 changeset is committed with the change.
524 524
525 525 keyword arguments:
526 526
527 527 local: whether to store tags in non-version-controlled file
528 528 (default False)
529 529
530 530 message: commit message to use if committing
531 531
532 532 user: name of user to use if committing
533 533
534 534 date: date tuple to use if committing'''
535 535
536 536 if not local:
537 537 for x in self.status()[:5]:
538 538 if '.hgtags' in x:
539 539 raise util.Abort(_('working copy of .hgtags is changed '
540 540 '(please commit .hgtags manually)'))
541 541
542 542 self.tags() # instantiate the cache
543 543 self._tag(names, node, message, local, user, date)
544 544
545 545 @filteredpropertycache
546 546 def _tagscache(self):
547 547 '''Returns a tagscache object that contains various tags related
548 548 caches.'''
549 549
550 550 # This simplifies its cache management by having one decorated
551 551 # function (this one) and the rest simply fetch things from it.
552 552 class tagscache(object):
553 553 def __init__(self):
554 554 # These two define the set of tags for this repository. tags
555 555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 556 # 'local'. (Global tags are defined by .hgtags across all
557 557 # heads, and local tags are defined in .hg/localtags.)
558 558 # They constitute the in-memory cache of tags.
559 559 self.tags = self.tagtypes = None
560 560
561 561 self.nodetagscache = self.tagslist = None
562 562
563 563 cache = tagscache()
564 564 cache.tags, cache.tagtypes = self._findtags()
565 565
566 566 return cache
567 567
568 568 def tags(self):
569 569 '''return a mapping of tag to node'''
570 570 t = {}
571 571 if self.changelog.filteredrevs:
572 572 tags, tt = self._findtags()
573 573 else:
574 574 tags = self._tagscache.tags
575 575 for k, v in tags.iteritems():
576 576 try:
577 577 # ignore tags to unknown nodes
578 578 self.changelog.rev(v)
579 579 t[k] = v
580 580 except (error.LookupError, ValueError):
581 581 pass
582 582 return t
583 583
584 584 def _findtags(self):
585 585 '''Do the hard work of finding tags. Return a pair of dicts
586 586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 587 maps tag name to a string like \'global\' or \'local\'.
588 588 Subclasses or extensions are free to add their own tags, but
589 589 should be aware that the returned dicts will be retained for the
590 590 duration of the localrepo object.'''
591 591
592 592 # XXX what tagtype should subclasses/extensions use? Currently
593 593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 594 # Should each extension invent its own tag type? Should there
595 595 # be one tagtype for all such "virtual" tags? Or is the status
596 596 # quo fine?
597 597
598 598 alltags = {} # map tag name to (node, hist)
599 599 tagtypes = {}
600 600
601 601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603 603
604 604 # Build the return dicts. Have to re-encode tag names because
605 605 # the tags module always uses UTF-8 (in order not to lose info
606 606 # writing to the cache), but the rest of Mercurial wants them in
607 607 # local encoding.
608 608 tags = {}
609 609 for (name, (node, hist)) in alltags.iteritems():
610 610 if node != nullid:
611 611 tags[encoding.tolocal(name)] = node
612 612 tags['tip'] = self.changelog.tip()
613 613 tagtypes = dict([(encoding.tolocal(name), value)
614 614 for (name, value) in tagtypes.iteritems()])
615 615 return (tags, tagtypes)
616 616
617 617 def tagtype(self, tagname):
618 618 '''
619 619 return the type of the given tag. result can be:
620 620
621 621 'local' : a local tag
622 622 'global' : a global tag
623 623 None : tag does not exist
624 624 '''
625 625
626 626 return self._tagscache.tagtypes.get(tagname)
627 627
628 628 def tagslist(self):
629 629 '''return a list of tags ordered by revision'''
630 630 if not self._tagscache.tagslist:
631 631 l = []
632 632 for t, n in self.tags().iteritems():
633 633 r = self.changelog.rev(n)
634 634 l.append((r, t, n))
635 635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636 636
637 637 return self._tagscache.tagslist
638 638
639 639 def nodetags(self, node):
640 640 '''return the tags associated with a node'''
641 641 if not self._tagscache.nodetagscache:
642 642 nodetagscache = {}
643 643 for t, n in self._tagscache.tags.iteritems():
644 644 nodetagscache.setdefault(n, []).append(t)
645 645 for tags in nodetagscache.itervalues():
646 646 tags.sort()
647 647 self._tagscache.nodetagscache = nodetagscache
648 648 return self._tagscache.nodetagscache.get(node, [])
649 649
650 650 def nodebookmarks(self, node):
651 651 marks = []
652 652 for bookmark, n in self._bookmarks.iteritems():
653 653 if n == node:
654 654 marks.append(bookmark)
655 655 return sorted(marks)
656 656
657 657 def branchmap(self):
658 658 '''returns a dictionary {branch: [branchheads]} with branchheads
659 659 ordered by increasing revision number'''
660 660 branchmap.updatecache(self)
661 661 return self._branchcaches[self.filtername]
662 662
663 663 def branchtip(self, branch):
664 664 '''return the tip node for a given branch'''
665 665 try:
666 666 return self.branchmap().branchtip(branch)
667 667 except KeyError:
668 668 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
669 669
670 670 def lookup(self, key):
671 671 return self[key].node()
672 672
673 673 def lookupbranch(self, key, remote=None):
674 674 repo = remote or self
675 675 if key in repo.branchmap():
676 676 return key
677 677
678 678 repo = (remote and remote.local()) and remote or self
679 679 return repo[key].branch()
680 680
681 681 def known(self, nodes):
682 682 nm = self.changelog.nodemap
683 683 pc = self._phasecache
684 684 result = []
685 685 for n in nodes:
686 686 r = nm.get(n)
687 687 resp = not (r is None or pc.phase(self, r) >= phases.secret)
688 688 result.append(resp)
689 689 return result
690 690
691 691 def local(self):
692 692 return self
693 693
694 694 def cancopy(self):
695 695 # so statichttprepo's override of local() works
696 696 if not self.local():
697 697 return False
698 698 if not self.ui.configbool('phases', 'publish', True):
699 699 return True
700 700 # if publishing we can't copy if there is filtered content
701 701 return not self.filtered('visible').changelog.filteredrevs
702 702
703 703 def join(self, f):
704 704 return os.path.join(self.path, f)
705 705
706 706 def wjoin(self, f):
707 707 return os.path.join(self.root, f)
708 708
709 709 def file(self, f):
710 710 if f[0] == '/':
711 711 f = f[1:]
712 712 return filelog.filelog(self.sopener, f)
713 713
714 714 def changectx(self, changeid):
715 715 return self[changeid]
716 716
717 717 def parents(self, changeid=None):
718 718 '''get list of changectxs for parents of changeid'''
719 719 return self[changeid].parents()
720 720
721 721 def setparents(self, p1, p2=nullid):
722 722 copies = self.dirstate.setparents(p1, p2)
723 723 pctx = self[p1]
724 724 if copies:
725 725 # Adjust copy records, the dirstate cannot do it, it
726 726 # requires access to parents manifests. Preserve them
727 727 # only for entries added to first parent.
728 728 for f in copies:
729 729 if f not in pctx and copies[f] in pctx:
730 730 self.dirstate.copy(copies[f], f)
731 731 if p2 == nullid:
732 732 for f, s in sorted(self.dirstate.copies().items()):
733 733 if f not in pctx and s not in pctx:
734 734 self.dirstate.copy(None, f)
735 735
736 736 def filectx(self, path, changeid=None, fileid=None):
737 737 """changeid can be a changeset revision, node, or tag.
738 738 fileid can be a file revision or node."""
739 739 return context.filectx(self, path, changeid, fileid)
740 740
741 741 def getcwd(self):
742 742 return self.dirstate.getcwd()
743 743
744 744 def pathto(self, f, cwd=None):
745 745 return self.dirstate.pathto(f, cwd)
746 746
747 747 def wfile(self, f, mode='r'):
748 748 return self.wopener(f, mode)
749 749
750 750 def _link(self, f):
751 751 return self.wvfs.islink(f)
752 752
753 753 def _loadfilter(self, filter):
754 754 if filter not in self.filterpats:
755 755 l = []
756 756 for pat, cmd in self.ui.configitems(filter):
757 757 if cmd == '!':
758 758 continue
759 759 mf = matchmod.match(self.root, '', [pat])
760 760 fn = None
761 761 params = cmd
762 762 for name, filterfn in self._datafilters.iteritems():
763 763 if cmd.startswith(name):
764 764 fn = filterfn
765 765 params = cmd[len(name):].lstrip()
766 766 break
767 767 if not fn:
768 768 fn = lambda s, c, **kwargs: util.filter(s, c)
769 769 # Wrap old filters not supporting keyword arguments
770 770 if not inspect.getargspec(fn)[2]:
771 771 oldfn = fn
772 772 fn = lambda s, c, **kwargs: oldfn(s, c)
773 773 l.append((mf, fn, params))
774 774 self.filterpats[filter] = l
775 775 return self.filterpats[filter]
776 776
777 777 def _filter(self, filterpats, filename, data):
778 778 for mf, fn, cmd in filterpats:
779 779 if mf(filename):
780 780 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
781 781 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
782 782 break
783 783
784 784 return data
785 785
786 786 @unfilteredpropertycache
787 787 def _encodefilterpats(self):
788 788 return self._loadfilter('encode')
789 789
790 790 @unfilteredpropertycache
791 791 def _decodefilterpats(self):
792 792 return self._loadfilter('decode')
793 793
794 794 def adddatafilter(self, name, filter):
795 795 self._datafilters[name] = filter
796 796
797 797 def wread(self, filename):
798 798 if self._link(filename):
799 799 data = self.wvfs.readlink(filename)
800 800 else:
801 801 data = self.wopener.read(filename)
802 802 return self._filter(self._encodefilterpats, filename, data)
803 803
804 804 def wwrite(self, filename, data, flags):
805 805 data = self._filter(self._decodefilterpats, filename, data)
806 806 if 'l' in flags:
807 807 self.wopener.symlink(data, filename)
808 808 else:
809 809 self.wopener.write(filename, data)
810 810 if 'x' in flags:
811 811 self.wvfs.setflags(filename, False, True)
812 812
813 813 def wwritedata(self, filename, data):
814 814 return self._filter(self._decodefilterpats, filename, data)
815 815
816 816 def transaction(self, desc, report=None):
817 817 tr = self._transref and self._transref() or None
818 818 if tr and tr.running():
819 819 return tr.nest()
820 820
821 821 # abort here if the journal already exists
822 822 if self.svfs.exists("journal"):
823 823 raise error.RepoError(
824 824 _("abandoned transaction found - run hg recover"))
825 825
826 826 def onclose():
827 827 self.store.write(tr)
828 828
829 829 self._writejournal(desc)
830 830 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
831 831 rp = report and report or self.ui.warn
832 832 tr = transaction.transaction(rp, self.sopener,
833 833 "journal",
834 834 aftertrans(renames),
835 835 self.store.createmode,
836 836 onclose)
837 837 self._transref = weakref.ref(tr)
838 838 return tr
839 839
840 840 def _journalfiles(self):
841 841 return ((self.svfs, 'journal'),
842 842 (self.vfs, 'journal.dirstate'),
843 843 (self.vfs, 'journal.branch'),
844 844 (self.vfs, 'journal.desc'),
845 845 (self.vfs, 'journal.bookmarks'),
846 846 (self.svfs, 'journal.phaseroots'))
847 847
848 848 def undofiles(self):
849 849 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
850 850
851 851 def _writejournal(self, desc):
852 852 self.opener.write("journal.dirstate",
853 853 self.opener.tryread("dirstate"))
854 854 self.opener.write("journal.branch",
855 855 encoding.fromlocal(self.dirstate.branch()))
856 856 self.opener.write("journal.desc",
857 857 "%d\n%s\n" % (len(self), desc))
858 858 self.opener.write("journal.bookmarks",
859 859 self.opener.tryread("bookmarks"))
860 860 self.sopener.write("journal.phaseroots",
861 861 self.sopener.tryread("phaseroots"))
862 862
863 863 def recover(self):
864 864 lock = self.lock()
865 865 try:
866 866 if self.svfs.exists("journal"):
867 867 self.ui.status(_("rolling back interrupted transaction\n"))
868 868 transaction.rollback(self.sopener, "journal",
869 869 self.ui.warn)
870 870 self.invalidate()
871 871 return True
872 872 else:
873 873 self.ui.warn(_("no interrupted transaction available\n"))
874 874 return False
875 875 finally:
876 876 lock.release()
877 877
878 878 def rollback(self, dryrun=False, force=False):
879 879 wlock = lock = None
880 880 try:
881 881 wlock = self.wlock()
882 882 lock = self.lock()
883 883 if self.svfs.exists("undo"):
884 884 return self._rollback(dryrun, force)
885 885 else:
886 886 self.ui.warn(_("no rollback information available\n"))
887 887 return 1
888 888 finally:
889 889 release(lock, wlock)
890 890
891 891 @unfilteredmethod # Until we get smarter cache management
892 892 def _rollback(self, dryrun, force):
893 893 ui = self.ui
894 894 try:
895 895 args = self.opener.read('undo.desc').splitlines()
896 896 (oldlen, desc, detail) = (int(args[0]), args[1], None)
897 897 if len(args) >= 3:
898 898 detail = args[2]
899 899 oldtip = oldlen - 1
900 900
901 901 if detail and ui.verbose:
902 902 msg = (_('repository tip rolled back to revision %s'
903 903 ' (undo %s: %s)\n')
904 904 % (oldtip, desc, detail))
905 905 else:
906 906 msg = (_('repository tip rolled back to revision %s'
907 907 ' (undo %s)\n')
908 908 % (oldtip, desc))
909 909 except IOError:
910 910 msg = _('rolling back unknown transaction\n')
911 911 desc = None
912 912
913 913 if not force and self['.'] != self['tip'] and desc == 'commit':
914 914 raise util.Abort(
915 915 _('rollback of last commit while not checked out '
916 916 'may lose data'), hint=_('use -f to force'))
917 917
918 918 ui.status(msg)
919 919 if dryrun:
920 920 return 0
921 921
922 922 parents = self.dirstate.parents()
923 923 self.destroying()
924 924 transaction.rollback(self.sopener, 'undo', ui.warn)
925 925 if self.vfs.exists('undo.bookmarks'):
926 926 self.vfs.rename('undo.bookmarks', 'bookmarks')
927 927 if self.svfs.exists('undo.phaseroots'):
928 928 self.svfs.rename('undo.phaseroots', 'phaseroots')
929 929 self.invalidate()
930 930
931 931 parentgone = (parents[0] not in self.changelog.nodemap or
932 932 parents[1] not in self.changelog.nodemap)
933 933 if parentgone:
934 934 self.vfs.rename('undo.dirstate', 'dirstate')
935 935 try:
936 936 branch = self.opener.read('undo.branch')
937 937 self.dirstate.setbranch(encoding.tolocal(branch))
938 938 except IOError:
939 939 ui.warn(_('named branch could not be reset: '
940 940 'current branch is still \'%s\'\n')
941 941 % self.dirstate.branch())
942 942
943 943 self.dirstate.invalidate()
944 944 parents = tuple([p.rev() for p in self.parents()])
945 945 if len(parents) > 1:
946 946 ui.status(_('working directory now based on '
947 947 'revisions %d and %d\n') % parents)
948 948 else:
949 949 ui.status(_('working directory now based on '
950 950 'revision %d\n') % parents)
951 951 # TODO: if we know which new heads may result from this rollback, pass
952 952 # them to destroy(), which will prevent the branchhead cache from being
953 953 # invalidated.
954 954 self.destroyed()
955 955 return 0
956 956
957 957 def invalidatecaches(self):
958 958
959 959 if '_tagscache' in vars(self):
960 960 # can't use delattr on proxy
961 961 del self.__dict__['_tagscache']
962 962
963 963 self.unfiltered()._branchcaches.clear()
964 964 self.invalidatevolatilesets()
965 965
966 966 def invalidatevolatilesets(self):
967 967 self.filteredrevcache.clear()
968 968 obsolete.clearobscaches(self)
969 969
970 970 def invalidatedirstate(self):
971 971 '''Invalidates the dirstate, causing the next call to dirstate
972 972 to check if it was modified since the last time it was read,
973 973 rereading it if it has.
974 974
975 975 This is different to dirstate.invalidate() that it doesn't always
976 976 rereads the dirstate. Use dirstate.invalidate() if you want to
977 977 explicitly read the dirstate again (i.e. restoring it to a previous
978 978 known good state).'''
979 979 if hasunfilteredcache(self, 'dirstate'):
980 980 for k in self.dirstate._filecache:
981 981 try:
982 982 delattr(self.dirstate, k)
983 983 except AttributeError:
984 984 pass
985 985 delattr(self.unfiltered(), 'dirstate')
986 986
987 987 def invalidate(self):
988 988 unfiltered = self.unfiltered() # all file caches are stored unfiltered
989 989 for k in self._filecache:
990 990 # dirstate is invalidated separately in invalidatedirstate()
991 991 if k == 'dirstate':
992 992 continue
993 993
994 994 try:
995 995 delattr(unfiltered, k)
996 996 except AttributeError:
997 997 pass
998 998 self.invalidatecaches()
999 999 self.store.invalidatecaches()
1000 1000
1001 1001 def invalidateall(self):
1002 1002 '''Fully invalidates both store and non-store parts, causing the
1003 1003 subsequent operation to reread any outside changes.'''
1004 1004 # extension should hook this to invalidate its caches
1005 1005 self.invalidate()
1006 1006 self.invalidatedirstate()
1007 1007
1008 1008 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1009 1009 try:
1010 1010 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1011 1011 except error.LockHeld, inst:
1012 1012 if not wait:
1013 1013 raise
1014 1014 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1015 1015 (desc, inst.locker))
1016 1016 # default to 600 seconds timeout
1017 1017 l = lockmod.lock(vfs, lockname,
1018 1018 int(self.ui.config("ui", "timeout", "600")),
1019 1019 releasefn, desc=desc)
1020 1020 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1021 1021 if acquirefn:
1022 1022 acquirefn()
1023 1023 return l
1024 1024
1025 1025 def _afterlock(self, callback):
1026 1026 """add a callback to the current repository lock.
1027 1027
1028 1028 The callback will be executed on lock release."""
1029 1029 l = self._lockref and self._lockref()
1030 1030 if l:
1031 1031 l.postrelease.append(callback)
1032 1032 else:
1033 1033 callback()
1034 1034
1035 1035 def lock(self, wait=True):
1036 1036 '''Lock the repository store (.hg/store) and return a weak reference
1037 1037 to the lock. Use this before modifying the store (e.g. committing or
1038 1038 stripping). If you are opening a transaction, get a lock as well.)'''
1039 1039 l = self._lockref and self._lockref()
1040 1040 if l is not None and l.held:
1041 1041 l.lock()
1042 1042 return l
1043 1043
1044 1044 def unlock():
1045 1045 if hasunfilteredcache(self, '_phasecache'):
1046 1046 self._phasecache.write()
1047 1047 for k, ce in self._filecache.items():
1048 1048 if k == 'dirstate' or k not in self.__dict__:
1049 1049 continue
1050 1050 ce.refresh()
1051 1051
1052 1052 l = self._lock(self.svfs, "lock", wait, unlock,
1053 1053 self.invalidate, _('repository %s') % self.origroot)
1054 1054 self._lockref = weakref.ref(l)
1055 1055 return l
1056 1056
1057 1057 def wlock(self, wait=True):
1058 1058 '''Lock the non-store parts of the repository (everything under
1059 1059 .hg except .hg/store) and return a weak reference to the lock.
1060 1060 Use this before modifying files in .hg.'''
1061 1061 l = self._wlockref and self._wlockref()
1062 1062 if l is not None and l.held:
1063 1063 l.lock()
1064 1064 return l
1065 1065
1066 1066 def unlock():
1067 1067 self.dirstate.write()
1068 1068 self._filecache['dirstate'].refresh()
1069 1069
1070 1070 l = self._lock(self.vfs, "wlock", wait, unlock,
1071 1071 self.invalidatedirstate, _('working directory of %s') %
1072 1072 self.origroot)
1073 1073 self._wlockref = weakref.ref(l)
1074 1074 return l
1075 1075
1076 1076 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1077 1077 """
1078 1078 commit an individual file as part of a larger transaction
1079 1079 """
1080 1080
1081 1081 fname = fctx.path()
1082 1082 text = fctx.data()
1083 1083 flog = self.file(fname)
1084 1084 fparent1 = manifest1.get(fname, nullid)
1085 1085 fparent2 = fparent2o = manifest2.get(fname, nullid)
1086 1086
1087 1087 meta = {}
1088 1088 copy = fctx.renamed()
1089 1089 if copy and copy[0] != fname:
1090 1090 # Mark the new revision of this file as a copy of another
1091 1091 # file. This copy data will effectively act as a parent
1092 1092 # of this new revision. If this is a merge, the first
1093 1093 # parent will be the nullid (meaning "look up the copy data")
1094 1094 # and the second one will be the other parent. For example:
1095 1095 #
1096 1096 # 0 --- 1 --- 3 rev1 changes file foo
1097 1097 # \ / rev2 renames foo to bar and changes it
1098 1098 # \- 2 -/ rev3 should have bar with all changes and
1099 1099 # should record that bar descends from
1100 1100 # bar in rev2 and foo in rev1
1101 1101 #
1102 1102 # this allows this merge to succeed:
1103 1103 #
1104 1104 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1105 1105 # \ / merging rev3 and rev4 should use bar@rev2
1106 1106 # \- 2 --- 4 as the merge base
1107 1107 #
1108 1108
1109 1109 cfname = copy[0]
1110 1110 crev = manifest1.get(cfname)
1111 1111 newfparent = fparent2
1112 1112
1113 1113 if manifest2: # branch merge
1114 1114 if fparent2 == nullid or crev is None: # copied on remote side
1115 1115 if cfname in manifest2:
1116 1116 crev = manifest2[cfname]
1117 1117 newfparent = fparent1
1118 1118
1119 1119 # find source in nearest ancestor if we've lost track
1120 1120 if not crev:
1121 1121 self.ui.debug(" %s: searching for copy revision for %s\n" %
1122 1122 (fname, cfname))
1123 1123 for ancestor in self[None].ancestors():
1124 1124 if cfname in ancestor:
1125 1125 crev = ancestor[cfname].filenode()
1126 1126 break
1127 1127
1128 1128 if crev:
1129 1129 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1130 1130 meta["copy"] = cfname
1131 1131 meta["copyrev"] = hex(crev)
1132 1132 fparent1, fparent2 = nullid, newfparent
1133 1133 else:
1134 1134 self.ui.warn(_("warning: can't find ancestor for '%s' "
1135 1135 "copied from '%s'!\n") % (fname, cfname))
1136 1136
1137 1137 elif fparent1 == nullid:
1138 1138 fparent1, fparent2 = fparent2, nullid
1139 1139 elif fparent2 != nullid:
1140 1140 # is one parent an ancestor of the other?
1141 1141 fparentancestor = flog.ancestor(fparent1, fparent2)
1142 1142 if fparentancestor == fparent1:
1143 1143 fparent1, fparent2 = fparent2, nullid
1144 1144 elif fparentancestor == fparent2:
1145 1145 fparent2 = nullid
1146 1146
1147 1147 # is the file changed?
1148 1148 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1149 1149 changelist.append(fname)
1150 1150 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1151 1151
1152 1152 # are just the flags changed during merge?
1153 1153 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1154 1154 changelist.append(fname)
1155 1155
1156 1156 return fparent1
1157 1157
1158 1158 @unfilteredmethod
1159 1159 def commit(self, text="", user=None, date=None, match=None, force=False,
1160 1160 editor=False, extra={}):
1161 1161 """Add a new revision to current repository.
1162 1162
1163 1163 Revision information is gathered from the working directory,
1164 1164 match can be used to filter the committed files. If editor is
1165 1165 supplied, it is called to get a commit message.
1166 1166 """
1167 1167
1168 1168 def fail(f, msg):
1169 1169 raise util.Abort('%s: %s' % (f, msg))
1170 1170
1171 1171 if not match:
1172 1172 match = matchmod.always(self.root, '')
1173 1173
1174 1174 if not force:
1175 1175 vdirs = []
1176 1176 match.explicitdir = vdirs.append
1177 1177 match.bad = fail
1178 1178
1179 1179 wlock = self.wlock()
1180 1180 try:
1181 1181 wctx = self[None]
1182 1182 merge = len(wctx.parents()) > 1
1183 1183
1184 1184 if (not force and merge and match and
1185 1185 (match.files() or match.anypats())):
1186 1186 raise util.Abort(_('cannot partially commit a merge '
1187 1187 '(do not specify files or patterns)'))
1188 1188
1189 1189 changes = self.status(match=match, clean=force)
1190 1190 if force:
1191 1191 changes[0].extend(changes[6]) # mq may commit unchanged files
1192 1192
1193 1193 # check subrepos
1194 1194 subs = []
1195 1195 commitsubs = set()
1196 1196 newstate = wctx.substate.copy()
1197 1197 # only manage subrepos and .hgsubstate if .hgsub is present
1198 1198 if '.hgsub' in wctx:
1199 1199 # we'll decide whether to track this ourselves, thanks
1200 1200 for c in changes[:3]:
1201 1201 if '.hgsubstate' in c:
1202 1202 c.remove('.hgsubstate')
1203 1203
1204 1204 # compare current state to last committed state
1205 1205 # build new substate based on last committed state
1206 1206 oldstate = wctx.p1().substate
1207 1207 for s in sorted(newstate.keys()):
1208 1208 if not match(s):
1209 1209 # ignore working copy, use old state if present
1210 1210 if s in oldstate:
1211 1211 newstate[s] = oldstate[s]
1212 1212 continue
1213 1213 if not force:
1214 1214 raise util.Abort(
1215 1215 _("commit with new subrepo %s excluded") % s)
1216 1216 if wctx.sub(s).dirty(True):
1217 1217 if not self.ui.configbool('ui', 'commitsubrepos'):
1218 1218 raise util.Abort(
1219 1219 _("uncommitted changes in subrepo %s") % s,
1220 1220 hint=_("use --subrepos for recursive commit"))
1221 1221 subs.append(s)
1222 1222 commitsubs.add(s)
1223 1223 else:
1224 1224 bs = wctx.sub(s).basestate()
1225 1225 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1226 1226 if oldstate.get(s, (None, None, None))[1] != bs:
1227 1227 subs.append(s)
1228 1228
1229 1229 # check for removed subrepos
1230 1230 for p in wctx.parents():
1231 1231 r = [s for s in p.substate if s not in newstate]
1232 1232 subs += [s for s in r if match(s)]
1233 1233 if subs:
1234 1234 if (not match('.hgsub') and
1235 1235 '.hgsub' in (wctx.modified() + wctx.added())):
1236 1236 raise util.Abort(
1237 1237 _("can't commit subrepos without .hgsub"))
1238 1238 changes[0].insert(0, '.hgsubstate')
1239 1239
1240 1240 elif '.hgsub' in changes[2]:
1241 1241 # clean up .hgsubstate when .hgsub is removed
1242 1242 if ('.hgsubstate' in wctx and
1243 1243 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1244 1244 changes[2].insert(0, '.hgsubstate')
1245 1245
1246 1246 # make sure all explicit patterns are matched
1247 1247 if not force and match.files():
1248 1248 matched = set(changes[0] + changes[1] + changes[2])
1249 1249
1250 1250 for f in match.files():
1251 1251 f = self.dirstate.normalize(f)
1252 1252 if f == '.' or f in matched or f in wctx.substate:
1253 1253 continue
1254 1254 if f in changes[3]: # missing
1255 1255 fail(f, _('file not found!'))
1256 1256 if f in vdirs: # visited directory
1257 1257 d = f + '/'
1258 1258 for mf in matched:
1259 1259 if mf.startswith(d):
1260 1260 break
1261 1261 else:
1262 1262 fail(f, _("no match under directory!"))
1263 1263 elif f not in self.dirstate:
1264 1264 fail(f, _("file not tracked!"))
1265 1265
1266 1266 cctx = context.workingctx(self, text, user, date, extra, changes)
1267 1267
1268 1268 if (not force and not extra.get("close") and not merge
1269 1269 and not cctx.files()
1270 1270 and wctx.branch() == wctx.p1().branch()):
1271 1271 return None
1272 1272
1273 1273 if merge and cctx.deleted():
1274 1274 raise util.Abort(_("cannot commit merge with missing files"))
1275 1275
1276 1276 ms = mergemod.mergestate(self)
1277 1277 for f in changes[0]:
1278 1278 if f in ms and ms[f] == 'u':
1279 1279 raise util.Abort(_("unresolved merge conflicts "
1280 1280 "(see hg help resolve)"))
1281 1281
1282 1282 if editor:
1283 1283 cctx._text = editor(self, cctx, subs)
1284 1284 edited = (text != cctx._text)
1285 1285
1286 1286 # Save commit message in case this transaction gets rolled back
1287 1287 # (e.g. by a pretxncommit hook). Leave the content alone on
1288 1288 # the assumption that the user will use the same editor again.
1289 1289 msgfn = self.savecommitmessage(cctx._text)
1290 1290
1291 1291 # commit subs and write new state
1292 1292 if subs:
1293 1293 for s in sorted(commitsubs):
1294 1294 sub = wctx.sub(s)
1295 1295 self.ui.status(_('committing subrepository %s\n') %
1296 1296 subrepo.subrelpath(sub))
1297 1297 sr = sub.commit(cctx._text, user, date)
1298 1298 newstate[s] = (newstate[s][0], sr)
1299 1299 subrepo.writestate(self, newstate)
1300 1300
1301 1301 p1, p2 = self.dirstate.parents()
1302 1302 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1303 1303 try:
1304 1304 self.hook("precommit", throw=True, parent1=hookp1,
1305 1305 parent2=hookp2)
1306 1306 ret = self.commitctx(cctx, True)
1307 1307 except: # re-raises
1308 1308 if edited:
1309 1309 self.ui.write(
1310 1310 _('note: commit message saved in %s\n') % msgfn)
1311 1311 raise
1312 1312
1313 1313 # update bookmarks, dirstate and mergestate
1314 1314 bookmarks.update(self, [p1, p2], ret)
1315 1315 cctx.markcommitted(ret)
1316 1316 ms.reset()
1317 1317 finally:
1318 1318 wlock.release()
1319 1319
1320 1320 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1321 1321 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1322 1322 self._afterlock(commithook)
1323 1323 return ret
1324 1324
1325 1325 @unfilteredmethod
1326 1326 def commitctx(self, ctx, error=False):
1327 1327 """Add a new revision to current repository.
1328 1328 Revision information is passed via the context argument.
1329 1329 """
1330 1330
1331 1331 tr = lock = None
1332 1332 removed = list(ctx.removed())
1333 1333 p1, p2 = ctx.p1(), ctx.p2()
1334 1334 user = ctx.user()
1335 1335
1336 1336 lock = self.lock()
1337 1337 try:
1338 1338 tr = self.transaction("commit")
1339 1339 trp = weakref.proxy(tr)
1340 1340
1341 1341 if ctx.files():
1342 1342 m1 = p1.manifest().copy()
1343 1343 m2 = p2.manifest()
1344 1344
1345 1345 # check in files
1346 1346 new = {}
1347 1347 changed = []
1348 1348 linkrev = len(self)
1349 1349 for f in sorted(ctx.modified() + ctx.added()):
1350 1350 self.ui.note(f + "\n")
1351 1351 try:
1352 1352 fctx = ctx[f]
1353 1353 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1354 1354 changed)
1355 1355 m1.set(f, fctx.flags())
1356 1356 except OSError, inst:
1357 1357 self.ui.warn(_("trouble committing %s!\n") % f)
1358 1358 raise
1359 1359 except IOError, inst:
1360 1360 errcode = getattr(inst, 'errno', errno.ENOENT)
1361 1361 if error or errcode and errcode != errno.ENOENT:
1362 1362 self.ui.warn(_("trouble committing %s!\n") % f)
1363 1363 raise
1364 1364 else:
1365 1365 removed.append(f)
1366 1366
1367 1367 # update manifest
1368 1368 m1.update(new)
1369 1369 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1370 1370 drop = [f for f in removed if f in m1]
1371 1371 for f in drop:
1372 1372 del m1[f]
1373 1373 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1374 1374 p2.manifestnode(), (new, drop))
1375 1375 files = changed + removed
1376 1376 else:
1377 1377 mn = p1.manifestnode()
1378 1378 files = []
1379 1379
1380 1380 # update changelog
1381 1381 self.changelog.delayupdate()
1382 1382 n = self.changelog.add(mn, files, ctx.description(),
1383 1383 trp, p1.node(), p2.node(),
1384 1384 user, ctx.date(), ctx.extra().copy())
1385 1385 p = lambda: self.changelog.writepending() and self.root or ""
1386 1386 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1387 1387 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1388 1388 parent2=xp2, pending=p)
1389 1389 self.changelog.finalize(trp)
1390 1390 # set the new commit is proper phase
1391 1391 targetphase = subrepo.newcommitphase(self.ui, ctx)
1392 1392 if targetphase:
1393 1393 # retract boundary do not alter parent changeset.
1394 1394 # if a parent have higher the resulting phase will
1395 1395 # be compliant anyway
1396 1396 #
1397 1397 # if minimal phase was 0 we don't need to retract anything
1398 1398 phases.retractboundary(self, targetphase, [n])
1399 1399 tr.close()
1400 1400 branchmap.updatecache(self.filtered('served'))
1401 1401 return n
1402 1402 finally:
1403 1403 if tr:
1404 1404 tr.release()
1405 1405 lock.release()
1406 1406
1407 1407 @unfilteredmethod
1408 1408 def destroying(self):
1409 1409 '''Inform the repository that nodes are about to be destroyed.
1410 1410 Intended for use by strip and rollback, so there's a common
1411 1411 place for anything that has to be done before destroying history.
1412 1412
1413 1413 This is mostly useful for saving state that is in memory and waiting
1414 1414 to be flushed when the current lock is released. Because a call to
1415 1415 destroyed is imminent, the repo will be invalidated causing those
1416 1416 changes to stay in memory (waiting for the next unlock), or vanish
1417 1417 completely.
1418 1418 '''
1419 1419 # When using the same lock to commit and strip, the phasecache is left
1420 1420 # dirty after committing. Then when we strip, the repo is invalidated,
1421 1421 # causing those changes to disappear.
1422 1422 if '_phasecache' in vars(self):
1423 1423 self._phasecache.write()
1424 1424
1425 1425 @unfilteredmethod
1426 1426 def destroyed(self):
1427 1427 '''Inform the repository that nodes have been destroyed.
1428 1428 Intended for use by strip and rollback, so there's a common
1429 1429 place for anything that has to be done after destroying history.
1430 1430 '''
1431 1431 # When one tries to:
1432 1432 # 1) destroy nodes thus calling this method (e.g. strip)
1433 1433 # 2) use phasecache somewhere (e.g. commit)
1434 1434 #
1435 1435 # then 2) will fail because the phasecache contains nodes that were
1436 1436 # removed. We can either remove phasecache from the filecache,
1437 1437 # causing it to reload next time it is accessed, or simply filter
1438 1438 # the removed nodes now and write the updated cache.
1439 1439 self._phasecache.filterunknown(self)
1440 1440 self._phasecache.write()
1441 1441
1442 1442 # update the 'served' branch cache to help read only server process
1443 1443 # Thanks to branchcache collaboration this is done from the nearest
1444 1444 # filtered subset and it is expected to be fast.
1445 1445 branchmap.updatecache(self.filtered('served'))
1446 1446
1447 1447 # Ensure the persistent tag cache is updated. Doing it now
1448 1448 # means that the tag cache only has to worry about destroyed
1449 1449 # heads immediately after a strip/rollback. That in turn
1450 1450 # guarantees that "cachetip == currenttip" (comparing both rev
1451 1451 # and node) always means no nodes have been added or destroyed.
1452 1452
1453 1453 # XXX this is suboptimal when qrefresh'ing: we strip the current
1454 1454 # head, refresh the tag cache, then immediately add a new head.
1455 1455 # But I think doing it this way is necessary for the "instant
1456 1456 # tag cache retrieval" case to work.
1457 1457 self.invalidate()
1458 1458
1459 1459 def walk(self, match, node=None):
1460 1460 '''
1461 1461 walk recursively through the directory tree or a given
1462 1462 changeset, finding all files matched by the match
1463 1463 function
1464 1464 '''
1465 1465 return self[node].walk(match)
1466 1466
1467 1467 def status(self, node1='.', node2=None, match=None,
1468 1468 ignored=False, clean=False, unknown=False,
1469 1469 listsubrepos=False):
1470 1470 """return status of files between two nodes or node and working
1471 1471 directory.
1472 1472
1473 1473 If node1 is None, use the first dirstate parent instead.
1474 1474 If node2 is None, compare node1 with working directory.
1475 1475 """
1476 1476
1477 1477 def mfmatches(ctx):
1478 1478 mf = ctx.manifest().copy()
1479 1479 if match.always():
1480 1480 return mf
1481 1481 for fn in mf.keys():
1482 1482 if not match(fn):
1483 1483 del mf[fn]
1484 1484 return mf
1485 1485
1486 1486 ctx1 = self[node1]
1487 1487 ctx2 = self[node2]
1488 1488
1489 1489 working = ctx2.rev() is None
1490 1490 parentworking = working and ctx1 == self['.']
1491 1491 match = match or matchmod.always(self.root, self.getcwd())
1492 1492 listignored, listclean, listunknown = ignored, clean, unknown
1493 1493
1494 1494 # load earliest manifest first for caching reasons
1495 1495 if not working and ctx2.rev() < ctx1.rev():
1496 1496 ctx2.manifest()
1497 1497
1498 1498 if not parentworking:
1499 1499 def bad(f, msg):
1500 1500 # 'f' may be a directory pattern from 'match.files()',
1501 1501 # so 'f not in ctx1' is not enough
1502 1502 if f not in ctx1 and f not in ctx1.dirs():
1503 1503 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1504 1504 match.bad = bad
1505 1505
1506 1506 if working: # we need to scan the working dir
1507 1507 subrepos = []
1508 1508 if '.hgsub' in self.dirstate:
1509 1509 subrepos = sorted(ctx2.substate)
1510 1510 s = self.dirstate.status(match, subrepos, listignored,
1511 1511 listclean, listunknown)
1512 1512 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1513 1513
1514 1514 # check for any possibly clean files
1515 1515 if parentworking and cmp:
1516 1516 fixup = []
1517 1517 # do a full compare of any files that might have changed
1518 1518 for f in sorted(cmp):
1519 1519 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1520 1520 or ctx1[f].cmp(ctx2[f])):
1521 1521 modified.append(f)
1522 1522 else:
1523 1523 fixup.append(f)
1524 1524
1525 1525 # update dirstate for files that are actually clean
1526 1526 if fixup:
1527 1527 if listclean:
1528 1528 clean += fixup
1529 1529
1530 1530 try:
1531 1531 # updating the dirstate is optional
1532 1532 # so we don't wait on the lock
1533 1533 wlock = self.wlock(False)
1534 1534 try:
1535 1535 for f in fixup:
1536 1536 self.dirstate.normal(f)
1537 1537 finally:
1538 1538 wlock.release()
1539 1539 except error.LockError:
1540 1540 pass
1541 1541
1542 1542 if not parentworking:
1543 1543 mf1 = mfmatches(ctx1)
1544 1544 if working:
1545 1545 # we are comparing working dir against non-parent
1546 1546 # generate a pseudo-manifest for the working dir
1547 1547 mf2 = mfmatches(self['.'])
1548 1548 for f in cmp + modified + added:
1549 1549 mf2[f] = None
1550 1550 mf2.set(f, ctx2.flags(f))
1551 1551 for f in removed:
1552 1552 if f in mf2:
1553 1553 del mf2[f]
1554 1554 else:
1555 1555 # we are comparing two revisions
1556 1556 deleted, unknown, ignored = [], [], []
1557 1557 mf2 = mfmatches(ctx2)
1558 1558
1559 1559 modified, added, clean = [], [], []
1560 1560 withflags = mf1.withflags() | mf2.withflags()
1561 1561 for fn, mf2node in mf2.iteritems():
1562 1562 if fn in mf1:
1563 1563 if (fn not in deleted and
1564 1564 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1565 1565 (mf1[fn] != mf2node and
1566 1566 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1567 1567 modified.append(fn)
1568 1568 elif listclean:
1569 1569 clean.append(fn)
1570 1570 del mf1[fn]
1571 1571 elif fn not in deleted:
1572 1572 added.append(fn)
1573 1573 removed = mf1.keys()
1574 1574
1575 1575 if working and modified and not self.dirstate._checklink:
1576 1576 # Symlink placeholders may get non-symlink-like contents
1577 1577 # via user error or dereferencing by NFS or Samba servers,
1578 1578 # so we filter out any placeholders that don't look like a
1579 1579 # symlink
1580 1580 sane = []
1581 1581 for f in modified:
1582 1582 if ctx2.flags(f) == 'l':
1583 1583 d = ctx2[f].data()
1584 1584 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1585 1585 self.ui.debug('ignoring suspect symlink placeholder'
1586 1586 ' "%s"\n' % f)
1587 1587 continue
1588 1588 sane.append(f)
1589 1589 modified = sane
1590 1590
1591 1591 r = modified, added, removed, deleted, unknown, ignored, clean
1592 1592
1593 1593 if listsubrepos:
1594 1594 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1595 1595 if working:
1596 1596 rev2 = None
1597 1597 else:
1598 1598 rev2 = ctx2.substate[subpath][1]
1599 1599 try:
1600 1600 submatch = matchmod.narrowmatcher(subpath, match)
1601 1601 s = sub.status(rev2, match=submatch, ignored=listignored,
1602 1602 clean=listclean, unknown=listunknown,
1603 1603 listsubrepos=True)
1604 1604 for rfiles, sfiles in zip(r, s):
1605 1605 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1606 1606 except error.LookupError:
1607 1607 self.ui.status(_("skipping missing subrepository: %s\n")
1608 1608 % subpath)
1609 1609
1610 1610 for l in r:
1611 1611 l.sort()
1612 1612 return r
1613 1613
1614 1614 def heads(self, start=None):
1615 1615 heads = self.changelog.heads(start)
1616 1616 # sort the output in rev descending order
1617 1617 return sorted(heads, key=self.changelog.rev, reverse=True)
1618 1618
1619 1619 def branchheads(self, branch=None, start=None, closed=False):
1620 1620 '''return a (possibly filtered) list of heads for the given branch
1621 1621
1622 1622 Heads are returned in topological order, from newest to oldest.
1623 1623 If branch is None, use the dirstate branch.
1624 1624 If start is not None, return only heads reachable from start.
1625 1625 If closed is True, return heads that are marked as closed as well.
1626 1626 '''
1627 1627 if branch is None:
1628 1628 branch = self[None].branch()
1629 1629 branches = self.branchmap()
1630 1630 if branch not in branches:
1631 1631 return []
1632 1632 # the cache returns heads ordered lowest to highest
1633 1633 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1634 1634 if start is not None:
1635 1635 # filter out the heads that cannot be reached from startrev
1636 1636 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1637 1637 bheads = [h for h in bheads if h in fbheads]
1638 1638 return bheads
1639 1639
1640 1640 def branches(self, nodes):
1641 1641 if not nodes:
1642 1642 nodes = [self.changelog.tip()]
1643 1643 b = []
1644 1644 for n in nodes:
1645 1645 t = n
1646 1646 while True:
1647 1647 p = self.changelog.parents(n)
1648 1648 if p[1] != nullid or p[0] == nullid:
1649 1649 b.append((t, n, p[0], p[1]))
1650 1650 break
1651 1651 n = p[0]
1652 1652 return b
1653 1653
1654 1654 def between(self, pairs):
1655 1655 r = []
1656 1656
1657 1657 for top, bottom in pairs:
1658 1658 n, l, i = top, [], 0
1659 1659 f = 1
1660 1660
1661 1661 while n != bottom and n != nullid:
1662 1662 p = self.changelog.parents(n)[0]
1663 1663 if i == f:
1664 1664 l.append(n)
1665 1665 f = f * 2
1666 1666 n = p
1667 1667 i += 1
1668 1668
1669 1669 r.append(l)
1670 1670
1671 1671 return r
1672 1672
1673 1673 def pull(self, remote, heads=None, force=False):
1674 1674 return exchange.pull (self, remote, heads, force)
1675 1675
1676 1676 def checkpush(self, pushop):
1677 1677 """Extensions can override this function if additional checks have
1678 1678 to be performed before pushing, or call it if they override push
1679 1679 command.
1680 1680 """
1681 1681 pass
1682 1682
1683 1683 def push(self, remote, force=False, revs=None, newbranch=False):
1684 1684 return exchange.push(self, remote, force, revs, newbranch)
1685 1685
1686 @unfilteredmethod
1687 def addchangegroup(self, source, srctype, url, emptyok=False):
1688 """Add the changegroup returned by source.read() to this repo.
1689 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1690 the URL of the repo where this changegroup is coming from.
1691
1692 Return an integer summarizing the change to this repo:
1693 - nothing changed or no source: 0
1694 - more heads than before: 1+added heads (2..n)
1695 - fewer heads than before: -1-removed heads (-2..-n)
1696 - number of heads stays the same: 1
1697 """
1698 def csmap(x):
1699 self.ui.debug("add changeset %s\n" % short(x))
1700 return len(cl)
1701
1702 def revmap(x):
1703 return cl.rev(x)
1704
1705 if not source:
1706 return 0
1707
1708 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1709
1710 changesets = files = revisions = 0
1711 efiles = set()
1712
1713 # write changelog data to temp files so concurrent readers will not see
1714 # inconsistent view
1715 cl = self.changelog
1716 cl.delayupdate()
1717 oldheads = cl.heads()
1718
1719 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1720 try:
1721 trp = weakref.proxy(tr)
1722 # pull off the changeset group
1723 self.ui.status(_("adding changesets\n"))
1724 clstart = len(cl)
1725 class prog(object):
1726 step = _('changesets')
1727 count = 1
1728 ui = self.ui
1729 total = None
1730 def __call__(self):
1731 self.ui.progress(self.step, self.count, unit=_('chunks'),
1732 total=self.total)
1733 self.count += 1
1734 pr = prog()
1735 source.callback = pr
1736
1737 source.changelogheader()
1738 srccontent = cl.addgroup(source, csmap, trp)
1739 if not (srccontent or emptyok):
1740 raise util.Abort(_("received changelog group is empty"))
1741 clend = len(cl)
1742 changesets = clend - clstart
1743 for c in xrange(clstart, clend):
1744 efiles.update(self[c].files())
1745 efiles = len(efiles)
1746 self.ui.progress(_('changesets'), None)
1747
1748 # pull off the manifest group
1749 self.ui.status(_("adding manifests\n"))
1750 pr.step = _('manifests')
1751 pr.count = 1
1752 pr.total = changesets # manifests <= changesets
1753 # no need to check for empty manifest group here:
1754 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1755 # no new manifest will be created and the manifest group will
1756 # be empty during the pull
1757 source.manifestheader()
1758 self.manifest.addgroup(source, revmap, trp)
1759 self.ui.progress(_('manifests'), None)
1760
1761 needfiles = {}
1762 if self.ui.configbool('server', 'validate', default=False):
1763 # validate incoming csets have their manifests
1764 for cset in xrange(clstart, clend):
1765 mfest = self.changelog.read(self.changelog.node(cset))[0]
1766 mfest = self.manifest.readdelta(mfest)
1767 # store file nodes we must see
1768 for f, n in mfest.iteritems():
1769 needfiles.setdefault(f, set()).add(n)
1770
1771 # process the files
1772 self.ui.status(_("adding file changes\n"))
1773 pr.step = _('files')
1774 pr.count = 1
1775 pr.total = efiles
1776 source.callback = None
1777
1778 newrevs, newfiles = changegroup.addchangegroupfiles(self,
1779 source,
1780 revmap,
1781 trp,
1782 pr,
1783 needfiles)
1784 revisions += newrevs
1785 files += newfiles
1786
1787 dh = 0
1788 if oldheads:
1789 heads = cl.heads()
1790 dh = len(heads) - len(oldheads)
1791 for h in heads:
1792 if h not in oldheads and self[h].closesbranch():
1793 dh -= 1
1794 htext = ""
1795 if dh:
1796 htext = _(" (%+d heads)") % dh
1797
1798 self.ui.status(_("added %d changesets"
1799 " with %d changes to %d files%s\n")
1800 % (changesets, revisions, files, htext))
1801 self.invalidatevolatilesets()
1802
1803 if changesets > 0:
1804 p = lambda: cl.writepending() and self.root or ""
1805 self.hook('pretxnchangegroup', throw=True,
1806 node=hex(cl.node(clstart)), source=srctype,
1807 url=url, pending=p)
1808
1809 added = [cl.node(r) for r in xrange(clstart, clend)]
1810 publishing = self.ui.configbool('phases', 'publish', True)
1811 if srctype == 'push':
1812 # Old servers can not push the boundary themselves.
1813 # New servers won't push the boundary if changeset already
1814 # exists locally as secret
1815 #
1816 # We should not use added here but the list of all change in
1817 # the bundle
1818 if publishing:
1819 phases.advanceboundary(self, phases.public, srccontent)
1820 else:
1821 phases.advanceboundary(self, phases.draft, srccontent)
1822 phases.retractboundary(self, phases.draft, added)
1823 elif srctype != 'strip':
1824 # publishing only alter behavior during push
1825 #
1826 # strip should not touch boundary at all
1827 phases.retractboundary(self, phases.draft, added)
1828
1829 # make changelog see real files again
1830 cl.finalize(trp)
1831
1832 tr.close()
1833
1834 if changesets > 0:
1835 if srctype != 'strip':
1836 # During strip, branchcache is invalid but coming call to
1837 # `destroyed` will repair it.
1838 # In other case we can safely update cache on disk.
1839 branchmap.updatecache(self.filtered('served'))
1840 def runhooks():
1841 # These hooks run when the lock releases, not when the
1842 # transaction closes. So it's possible for the changelog
1843 # to have changed since we last saw it.
1844 if clstart >= len(self):
1845 return
1846
1847 # forcefully update the on-disk branch cache
1848 self.ui.debug("updating the branch cache\n")
1849 self.hook("changegroup", node=hex(cl.node(clstart)),
1850 source=srctype, url=url)
1851
1852 for n in added:
1853 self.hook("incoming", node=hex(n), source=srctype,
1854 url=url)
1855
1856 newheads = [h for h in self.heads() if h not in oldheads]
1857 self.ui.log("incoming",
1858 "%s incoming changes - new heads: %s\n",
1859 len(added),
1860 ', '.join([hex(c[:6]) for c in newheads]))
1861 self._afterlock(runhooks)
1862
1863 finally:
1864 tr.release()
1865 # never return 0 here:
1866 if dh < 0:
1867 return dh - 1
1868 else:
1869 return dh + 1
1870
1871
1872 1686 def stream_in(self, remote, requirements):
1873 1687 lock = self.lock()
1874 1688 try:
1875 1689 # Save remote branchmap. We will use it later
1876 1690 # to speed up branchcache creation
1877 1691 rbranchmap = None
1878 1692 if remote.capable("branchmap"):
1879 1693 rbranchmap = remote.branchmap()
1880 1694
1881 1695 fp = remote.stream_out()
1882 1696 l = fp.readline()
1883 1697 try:
1884 1698 resp = int(l)
1885 1699 except ValueError:
1886 1700 raise error.ResponseError(
1887 1701 _('unexpected response from remote server:'), l)
1888 1702 if resp == 1:
1889 1703 raise util.Abort(_('operation forbidden by server'))
1890 1704 elif resp == 2:
1891 1705 raise util.Abort(_('locking the remote repository failed'))
1892 1706 elif resp != 0:
1893 1707 raise util.Abort(_('the server sent an unknown error code'))
1894 1708 self.ui.status(_('streaming all changes\n'))
1895 1709 l = fp.readline()
1896 1710 try:
1897 1711 total_files, total_bytes = map(int, l.split(' ', 1))
1898 1712 except (ValueError, TypeError):
1899 1713 raise error.ResponseError(
1900 1714 _('unexpected response from remote server:'), l)
1901 1715 self.ui.status(_('%d files to transfer, %s of data\n') %
1902 1716 (total_files, util.bytecount(total_bytes)))
1903 1717 handled_bytes = 0
1904 1718 self.ui.progress(_('clone'), 0, total=total_bytes)
1905 1719 start = time.time()
1906 1720
1907 1721 tr = self.transaction(_('clone'))
1908 1722 try:
1909 1723 for i in xrange(total_files):
1910 1724 # XXX doesn't support '\n' or '\r' in filenames
1911 1725 l = fp.readline()
1912 1726 try:
1913 1727 name, size = l.split('\0', 1)
1914 1728 size = int(size)
1915 1729 except (ValueError, TypeError):
1916 1730 raise error.ResponseError(
1917 1731 _('unexpected response from remote server:'), l)
1918 1732 if self.ui.debugflag:
1919 1733 self.ui.debug('adding %s (%s)\n' %
1920 1734 (name, util.bytecount(size)))
1921 1735 # for backwards compat, name was partially encoded
1922 1736 ofp = self.sopener(store.decodedir(name), 'w')
1923 1737 for chunk in util.filechunkiter(fp, limit=size):
1924 1738 handled_bytes += len(chunk)
1925 1739 self.ui.progress(_('clone'), handled_bytes,
1926 1740 total=total_bytes)
1927 1741 ofp.write(chunk)
1928 1742 ofp.close()
1929 1743 tr.close()
1930 1744 finally:
1931 1745 tr.release()
1932 1746
1933 1747 # Writing straight to files circumvented the inmemory caches
1934 1748 self.invalidate()
1935 1749
1936 1750 elapsed = time.time() - start
1937 1751 if elapsed <= 0:
1938 1752 elapsed = 0.001
1939 1753 self.ui.progress(_('clone'), None)
1940 1754 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1941 1755 (util.bytecount(total_bytes), elapsed,
1942 1756 util.bytecount(total_bytes / elapsed)))
1943 1757
1944 1758 # new requirements = old non-format requirements +
1945 1759 # new format-related
1946 1760 # requirements from the streamed-in repository
1947 1761 requirements.update(set(self.requirements) - self.supportedformats)
1948 1762 self._applyrequirements(requirements)
1949 1763 self._writerequirements()
1950 1764
1951 1765 if rbranchmap:
1952 1766 rbheads = []
1953 1767 for bheads in rbranchmap.itervalues():
1954 1768 rbheads.extend(bheads)
1955 1769
1956 1770 if rbheads:
1957 1771 rtiprev = max((int(self.changelog.rev(node))
1958 1772 for node in rbheads))
1959 1773 cache = branchmap.branchcache(rbranchmap,
1960 1774 self[rtiprev].node(),
1961 1775 rtiprev)
1962 1776 # Try to stick it as low as possible
1963 1777 # filter above served are unlikely to be fetch from a clone
1964 1778 for candidate in ('base', 'immutable', 'served'):
1965 1779 rview = self.filtered(candidate)
1966 1780 if cache.validfor(rview):
1967 1781 self._branchcaches[candidate] = cache
1968 1782 cache.write(rview)
1969 1783 break
1970 1784 self.invalidate()
1971 1785 return len(self.heads()) + 1
1972 1786 finally:
1973 1787 lock.release()
1974 1788
1975 1789 def clone(self, remote, heads=[], stream=False):
1976 1790 '''clone remote repository.
1977 1791
1978 1792 keyword arguments:
1979 1793 heads: list of revs to clone (forces use of pull)
1980 1794 stream: use streaming clone if possible'''
1981 1795
1982 1796 # now, all clients that can request uncompressed clones can
1983 1797 # read repo formats supported by all servers that can serve
1984 1798 # them.
1985 1799
1986 1800 # if revlog format changes, client will have to check version
1987 1801 # and format flags on "stream" capability, and use
1988 1802 # uncompressed only if compatible.
1989 1803
1990 1804 if not stream:
1991 1805 # if the server explicitly prefers to stream (for fast LANs)
1992 1806 stream = remote.capable('stream-preferred')
1993 1807
1994 1808 if stream and not heads:
1995 1809 # 'stream' means remote revlog format is revlogv1 only
1996 1810 if remote.capable('stream'):
1997 1811 return self.stream_in(remote, set(('revlogv1',)))
1998 1812 # otherwise, 'streamreqs' contains the remote revlog format
1999 1813 streamreqs = remote.capable('streamreqs')
2000 1814 if streamreqs:
2001 1815 streamreqs = set(streamreqs.split(','))
2002 1816 # if we support it, stream in and adjust our requirements
2003 1817 if not streamreqs - self.supportedformats:
2004 1818 return self.stream_in(remote, streamreqs)
2005 1819 return self.pull(remote, heads)
2006 1820
2007 1821 def pushkey(self, namespace, key, old, new):
2008 1822 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2009 1823 old=old, new=new)
2010 1824 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2011 1825 ret = pushkey.push(self, namespace, key, old, new)
2012 1826 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2013 1827 ret=ret)
2014 1828 return ret
2015 1829
2016 1830 def listkeys(self, namespace):
2017 1831 self.hook('prelistkeys', throw=True, namespace=namespace)
2018 1832 self.ui.debug('listing keys for "%s"\n' % namespace)
2019 1833 values = pushkey.list(self, namespace)
2020 1834 self.hook('listkeys', namespace=namespace, values=values)
2021 1835 return values
2022 1836
2023 1837 def debugwireargs(self, one, two, three=None, four=None, five=None):
2024 1838 '''used to test argument passing over the wire'''
2025 1839 return "%s %s %s %s %s" % (one, two, three, four, five)
2026 1840
2027 1841 def savecommitmessage(self, text):
2028 1842 fp = self.opener('last-message.txt', 'wb')
2029 1843 try:
2030 1844 fp.write(text)
2031 1845 finally:
2032 1846 fp.close()
2033 1847 return self.pathto(fp.name[len(self.root) + 1:])
2034 1848
2035 1849 # used to avoid circular references so destructors work
2036 1850 def aftertrans(files):
2037 1851 renamefiles = [tuple(t) for t in files]
2038 1852 def a():
2039 1853 for vfs, src, dest in renamefiles:
2040 1854 try:
2041 1855 vfs.rename(src, dest)
2042 1856 except OSError: # journal file does not yet exist
2043 1857 pass
2044 1858 return a
2045 1859
2046 1860 def undoname(fn):
2047 1861 base, name = os.path.split(fn)
2048 1862 assert name.startswith('journal')
2049 1863 return os.path.join(base, name.replace('journal', 'undo', 1))
2050 1864
2051 1865 def instance(ui, path, create):
2052 1866 return localrepository(ui, util.urllocalpath(path), create)
2053 1867
2054 1868 def islocal(path):
2055 1869 return True
@@ -1,178 +1,179 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import os
13 13 import errno
14 14
15 15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 16 """create a bundle with the specified revisions as a backup"""
17 17 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
18 18 backupdir = repo.join("strip-backup")
19 19 if not os.path.isdir(backupdir):
20 20 os.mkdir(backupdir)
21 21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 22 if compress:
23 23 bundletype = "HG10BZ"
24 24 else:
25 25 bundletype = "HG10UN"
26 26 return changegroup.writebundle(cg, name, bundletype)
27 27
28 28 def _collectfiles(repo, striprev):
29 29 """find out the filelogs affected by the strip"""
30 30 files = set()
31 31
32 32 for x in xrange(striprev, len(repo)):
33 33 files.update(repo[x].files())
34 34
35 35 return sorted(files)
36 36
37 37 def _collectbrokencsets(repo, files, striprev):
38 38 """return the changesets which will be broken by the truncation"""
39 39 s = set()
40 40 def collectone(revlog):
41 41 _, brokenset = revlog.getstrippoint(striprev)
42 42 s.update([revlog.linkrev(r) for r in brokenset])
43 43
44 44 collectone(repo.manifest)
45 45 for fname in files:
46 46 collectone(repo.file(fname))
47 47
48 48 return s
49 49
50 50 def strip(ui, repo, nodelist, backup="all", topic='backup'):
51 51 repo = repo.unfiltered()
52 52 repo.destroying()
53 53
54 54 cl = repo.changelog
55 55 # TODO handle undo of merge sets
56 56 if isinstance(nodelist, str):
57 57 nodelist = [nodelist]
58 58 striplist = [cl.rev(node) for node in nodelist]
59 59 striprev = min(striplist)
60 60
61 61 keeppartialbundle = backup == 'strip'
62 62
63 63 # Some revisions with rev > striprev may not be descendants of striprev.
64 64 # We have to find these revisions and put them in a bundle, so that
65 65 # we can restore them after the truncations.
66 66 # To create the bundle we use repo.changegroupsubset which requires
67 67 # the list of heads and bases of the set of interesting revisions.
68 68 # (head = revision in the set that has no descendant in the set;
69 69 # base = revision in the set that has no ancestor in the set)
70 70 tostrip = set(striplist)
71 71 for rev in striplist:
72 72 for desc in cl.descendants([rev]):
73 73 tostrip.add(desc)
74 74
75 75 files = _collectfiles(repo, striprev)
76 76 saverevs = _collectbrokencsets(repo, files, striprev)
77 77
78 78 # compute heads
79 79 saveheads = set(saverevs)
80 80 for r in xrange(striprev + 1, len(cl)):
81 81 if r not in tostrip:
82 82 saverevs.add(r)
83 83 saveheads.difference_update(cl.parentrevs(r))
84 84 saveheads.add(r)
85 85 saveheads = [cl.node(r) for r in saveheads]
86 86
87 87 # compute base nodes
88 88 if saverevs:
89 89 descendants = set(cl.descendants(saverevs))
90 90 saverevs.difference_update(descendants)
91 91 savebases = [cl.node(r) for r in saverevs]
92 92 stripbases = [cl.node(r) for r in tostrip]
93 93
94 94 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
95 95 # is much faster
96 96 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
97 97 if newbmtarget:
98 98 newbmtarget = repo[newbmtarget[0]].node()
99 99 else:
100 100 newbmtarget = '.'
101 101
102 102 bm = repo._bookmarks
103 103 updatebm = []
104 104 for m in bm:
105 105 rev = repo[bm[m]].rev()
106 106 if rev in tostrip:
107 107 updatebm.append(m)
108 108
109 109 # create a changegroup for all the branches we need to keep
110 110 backupfile = None
111 111 if backup == "all":
112 112 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
113 113 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
114 114 repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile)
115 115 if saveheads or savebases:
116 116 # do not compress partial bundle if we remove it from disk later
117 117 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
118 118 compress=keeppartialbundle)
119 119
120 120 mfst = repo.manifest
121 121
122 122 tr = repo.transaction("strip")
123 123 offset = len(tr.entries)
124 124
125 125 try:
126 126 tr.startgroup()
127 127 cl.strip(striprev, tr)
128 128 mfst.strip(striprev, tr)
129 129 for fn in files:
130 130 repo.file(fn).strip(striprev, tr)
131 131 tr.endgroup()
132 132
133 133 try:
134 134 for i in xrange(offset, len(tr.entries)):
135 135 file, troffset, ignore = tr.entries[i]
136 136 repo.sopener(file, 'a').truncate(troffset)
137 137 if troffset == 0:
138 138 repo.store.markremoved(file)
139 139 tr.close()
140 140 except: # re-raises
141 141 tr.abort()
142 142 raise
143 143
144 144 if saveheads or savebases:
145 145 ui.note(_("adding branch\n"))
146 146 f = open(chgrpfile, "rb")
147 147 gen = changegroup.readbundle(f, chgrpfile)
148 148 if not repo.ui.verbose:
149 149 # silence internal shuffling chatter
150 150 repo.ui.pushbuffer()
151 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
151 changegroup.addchangegroup(repo, gen, 'strip',
152 'bundle:' + chgrpfile, True)
152 153 if not repo.ui.verbose:
153 154 repo.ui.popbuffer()
154 155 f.close()
155 156 if not keeppartialbundle:
156 157 os.unlink(chgrpfile)
157 158
158 159 # remove undo files
159 160 for undofile in repo.undofiles():
160 161 try:
161 162 os.unlink(undofile)
162 163 except OSError, e:
163 164 if e.errno != errno.ENOENT:
164 165 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
165 166
166 167 for m in updatebm:
167 168 bm[m] = repo[newbmtarget].node()
168 169 bm.write()
169 170 except: # re-raises
170 171 if backupfile:
171 172 ui.warn(_("strip failed, full bundle stored in '%s'\n")
172 173 % backupfile)
173 174 elif saveheads:
174 175 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
175 176 % chgrpfile)
176 177 raise
177 178
178 179 repo.destroyed()
@@ -1,152 +1,152 b''
1 1 # sshserver.py - ssh protocol server support for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import util, hook, wireproto, changegroup
10 10 import os, sys
11 11
12 12 class sshserver(wireproto.abstractserverproto):
13 13 def __init__(self, ui, repo):
14 14 self.ui = ui
15 15 self.repo = repo
16 16 self.lock = None
17 17 self.fin = ui.fin
18 18 self.fout = ui.fout
19 19
20 20 hook.redirect(True)
21 21 ui.fout = repo.ui.fout = ui.ferr
22 22
23 23 # Prevent insertion/deletion of CRs
24 24 util.setbinary(self.fin)
25 25 util.setbinary(self.fout)
26 26
27 27 def getargs(self, args):
28 28 data = {}
29 29 keys = args.split()
30 30 for n in xrange(len(keys)):
31 31 argline = self.fin.readline()[:-1]
32 32 arg, l = argline.split()
33 33 if arg not in keys:
34 34 raise util.Abort("unexpected parameter %r" % arg)
35 35 if arg == '*':
36 36 star = {}
37 37 for k in xrange(int(l)):
38 38 argline = self.fin.readline()[:-1]
39 39 arg, l = argline.split()
40 40 val = self.fin.read(int(l))
41 41 star[arg] = val
42 42 data['*'] = star
43 43 else:
44 44 val = self.fin.read(int(l))
45 45 data[arg] = val
46 46 return [data[k] for k in keys]
47 47
48 48 def getarg(self, name):
49 49 return self.getargs(name)[0]
50 50
51 51 def getfile(self, fpout):
52 52 self.sendresponse('')
53 53 count = int(self.fin.readline())
54 54 while count:
55 55 fpout.write(self.fin.read(count))
56 56 count = int(self.fin.readline())
57 57
58 58 def redirect(self):
59 59 pass
60 60
61 61 def groupchunks(self, changegroup):
62 62 while True:
63 63 d = changegroup.read(4096)
64 64 if not d:
65 65 break
66 66 yield d
67 67
68 68 def sendresponse(self, v):
69 69 self.fout.write("%d\n" % len(v))
70 70 self.fout.write(v)
71 71 self.fout.flush()
72 72
73 73 def sendstream(self, source):
74 74 write = self.fout.write
75 75 for chunk in source.gen:
76 76 write(chunk)
77 77 self.fout.flush()
78 78
79 79 def sendpushresponse(self, rsp):
80 80 self.sendresponse('')
81 81 self.sendresponse(str(rsp.res))
82 82
83 83 def sendpusherror(self, rsp):
84 84 self.sendresponse(rsp.res)
85 85
86 86 def sendooberror(self, rsp):
87 87 self.ui.ferr.write('%s\n-\n' % rsp.message)
88 88 self.ui.ferr.flush()
89 89 self.fout.write('\n')
90 90 self.fout.flush()
91 91
92 92 def serve_forever(self):
93 93 try:
94 94 while self.serve_one():
95 95 pass
96 96 finally:
97 97 if self.lock is not None:
98 98 self.lock.release()
99 99 sys.exit(0)
100 100
101 101 handlers = {
102 102 str: sendresponse,
103 103 wireproto.streamres: sendstream,
104 104 wireproto.pushres: sendpushresponse,
105 105 wireproto.pusherr: sendpusherror,
106 106 wireproto.ooberror: sendooberror,
107 107 }
108 108
109 109 def serve_one(self):
110 110 cmd = self.fin.readline()[:-1]
111 111 if cmd and cmd in wireproto.commands:
112 112 rsp = wireproto.dispatch(self.repo, self, cmd)
113 113 self.handlers[rsp.__class__](self, rsp)
114 114 elif cmd:
115 115 impl = getattr(self, 'do_' + cmd, None)
116 116 if impl:
117 117 r = impl()
118 118 if r is not None:
119 119 self.sendresponse(r)
120 120 else: self.sendresponse("")
121 121 return cmd != ''
122 122
123 123 def do_lock(self):
124 124 '''DEPRECATED - allowing remote client to lock repo is not safe'''
125 125
126 126 self.lock = self.repo.lock()
127 127 return ""
128 128
129 129 def do_unlock(self):
130 130 '''DEPRECATED'''
131 131
132 132 if self.lock:
133 133 self.lock.release()
134 134 self.lock = None
135 135 return ""
136 136
137 137 def do_addchangegroup(self):
138 138 '''DEPRECATED'''
139 139
140 140 if not self.lock:
141 141 self.sendresponse("not locked")
142 142 return
143 143
144 144 self.sendresponse("")
145 145 cg = changegroup.unbundle10(self.fin, "UN")
146 r = self.repo.addchangegroup(cg, 'serve', self._client())
146 r = changegroup.addchangegroup(self.repo, cg, 'serve', self._client())
147 147 self.lock.release()
148 148 return str(r)
149 149
150 150 def _client(self):
151 151 client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
152 152 return 'remote:ssh:' + client
@@ -1,798 +1,799 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import urllib, tempfile, os, sys
9 9 from i18n import _
10 10 from node import bin, hex
11 11 import changegroup as changegroupmod
12 12 import peer, error, encoding, util, store
13 13
14 14
15 15 class abstractserverproto(object):
16 16 """abstract class that summarizes the protocol API
17 17
18 18 Used as reference and documentation.
19 19 """
20 20
21 21 def getargs(self, args):
22 22 """return the value for arguments in <args>
23 23
24 24 returns a list of values (same order as <args>)"""
25 25 raise NotImplementedError()
26 26
27 27 def getfile(self, fp):
28 28 """write the whole content of a file into a file like object
29 29
30 30 The file is in the form::
31 31
32 32 (<chunk-size>\n<chunk>)+0\n
33 33
34 34 chunk size is the ascii version of the int.
35 35 """
36 36 raise NotImplementedError()
37 37
38 38 def redirect(self):
39 39 """may setup interception for stdout and stderr
40 40
41 41 See also the `restore` method."""
42 42 raise NotImplementedError()
43 43
44 44 # If the `redirect` function does install interception, the `restore`
45 45 # function MUST be defined. If interception is not used, this function
46 46 # MUST NOT be defined.
47 47 #
48 48 # left commented here on purpose
49 49 #
50 50 #def restore(self):
51 51 # """reinstall previous stdout and stderr and return intercepted stdout
52 52 # """
53 53 # raise NotImplementedError()
54 54
55 55 def groupchunks(self, cg):
56 56 """return 4096 chunks from a changegroup object
57 57
58 58 Some protocols may have compressed the contents."""
59 59 raise NotImplementedError()
60 60
61 61 # abstract batching support
62 62
63 63 class future(object):
64 64 '''placeholder for a value to be set later'''
65 65 def set(self, value):
66 66 if util.safehasattr(self, 'value'):
67 67 raise error.RepoError("future is already set")
68 68 self.value = value
69 69
70 70 class batcher(object):
71 71 '''base class for batches of commands submittable in a single request
72 72
73 73 All methods invoked on instances of this class are simply queued and
74 74 return a a future for the result. Once you call submit(), all the queued
75 75 calls are performed and the results set in their respective futures.
76 76 '''
77 77 def __init__(self):
78 78 self.calls = []
79 79 def __getattr__(self, name):
80 80 def call(*args, **opts):
81 81 resref = future()
82 82 self.calls.append((name, args, opts, resref,))
83 83 return resref
84 84 return call
85 85 def submit(self):
86 86 pass
87 87
88 88 class localbatch(batcher):
89 89 '''performs the queued calls directly'''
90 90 def __init__(self, local):
91 91 batcher.__init__(self)
92 92 self.local = local
93 93 def submit(self):
94 94 for name, args, opts, resref in self.calls:
95 95 resref.set(getattr(self.local, name)(*args, **opts))
96 96
97 97 class remotebatch(batcher):
98 98 '''batches the queued calls; uses as few roundtrips as possible'''
99 99 def __init__(self, remote):
100 100 '''remote must support _submitbatch(encbatch) and
101 101 _submitone(op, encargs)'''
102 102 batcher.__init__(self)
103 103 self.remote = remote
104 104 def submit(self):
105 105 req, rsp = [], []
106 106 for name, args, opts, resref in self.calls:
107 107 mtd = getattr(self.remote, name)
108 108 batchablefn = getattr(mtd, 'batchable', None)
109 109 if batchablefn is not None:
110 110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 111 encargsorres, encresref = batchable.next()
112 112 if encresref:
113 113 req.append((name, encargsorres,))
114 114 rsp.append((batchable, encresref, resref,))
115 115 else:
116 116 resref.set(encargsorres)
117 117 else:
118 118 if req:
119 119 self._submitreq(req, rsp)
120 120 req, rsp = [], []
121 121 resref.set(mtd(*args, **opts))
122 122 if req:
123 123 self._submitreq(req, rsp)
124 124 def _submitreq(self, req, rsp):
125 125 encresults = self.remote._submitbatch(req)
126 126 for encres, r in zip(encresults, rsp):
127 127 batchable, encresref, resref = r
128 128 encresref.set(encres)
129 129 resref.set(batchable.next())
130 130
131 131 def batchable(f):
132 132 '''annotation for batchable methods
133 133
134 134 Such methods must implement a coroutine as follows:
135 135
136 136 @batchable
137 137 def sample(self, one, two=None):
138 138 # Handle locally computable results first:
139 139 if not one:
140 140 yield "a local result", None
141 141 # Build list of encoded arguments suitable for your wire protocol:
142 142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 143 # Create future for injection of encoded result:
144 144 encresref = future()
145 145 # Return encoded arguments and future:
146 146 yield encargs, encresref
147 147 # Assuming the future to be filled with the result from the batched
148 148 # request now. Decode it:
149 149 yield decode(encresref.value)
150 150
151 151 The decorator returns a function which wraps this coroutine as a plain
152 152 method, but adds the original method as an attribute called "batchable",
153 153 which is used by remotebatch to split the call into separate encoding and
154 154 decoding phases.
155 155 '''
156 156 def plain(*args, **opts):
157 157 batchable = f(*args, **opts)
158 158 encargsorres, encresref = batchable.next()
159 159 if not encresref:
160 160 return encargsorres # a local result in this case
161 161 self = args[0]
162 162 encresref.set(self._submitone(f.func_name, encargsorres))
163 163 return batchable.next()
164 164 setattr(plain, 'batchable', f)
165 165 return plain
166 166
167 167 # list of nodes encoding / decoding
168 168
169 169 def decodelist(l, sep=' '):
170 170 if l:
171 171 return map(bin, l.split(sep))
172 172 return []
173 173
174 174 def encodelist(l, sep=' '):
175 175 return sep.join(map(hex, l))
176 176
177 177 # batched call argument encoding
178 178
179 179 def escapearg(plain):
180 180 return (plain
181 181 .replace(':', '::')
182 182 .replace(',', ':,')
183 183 .replace(';', ':;')
184 184 .replace('=', ':='))
185 185
186 186 def unescapearg(escaped):
187 187 return (escaped
188 188 .replace(':=', '=')
189 189 .replace(':;', ';')
190 190 .replace(':,', ',')
191 191 .replace('::', ':'))
192 192
193 193 # client side
194 194
195 195 class wirepeer(peer.peerrepository):
196 196
197 197 def batch(self):
198 198 return remotebatch(self)
199 199 def _submitbatch(self, req):
200 200 cmds = []
201 201 for op, argsdict in req:
202 202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
203 203 cmds.append('%s %s' % (op, args))
204 204 rsp = self._call("batch", cmds=';'.join(cmds))
205 205 return rsp.split(';')
206 206 def _submitone(self, op, args):
207 207 return self._call(op, **args)
208 208
209 209 @batchable
210 210 def lookup(self, key):
211 211 self.requirecap('lookup', _('look up remote revision'))
212 212 f = future()
213 213 yield {'key': encoding.fromlocal(key)}, f
214 214 d = f.value
215 215 success, data = d[:-1].split(" ", 1)
216 216 if int(success):
217 217 yield bin(data)
218 218 self._abort(error.RepoError(data))
219 219
220 220 @batchable
221 221 def heads(self):
222 222 f = future()
223 223 yield {}, f
224 224 d = f.value
225 225 try:
226 226 yield decodelist(d[:-1])
227 227 except ValueError:
228 228 self._abort(error.ResponseError(_("unexpected response:"), d))
229 229
230 230 @batchable
231 231 def known(self, nodes):
232 232 f = future()
233 233 yield {'nodes': encodelist(nodes)}, f
234 234 d = f.value
235 235 try:
236 236 yield [bool(int(f)) for f in d]
237 237 except ValueError:
238 238 self._abort(error.ResponseError(_("unexpected response:"), d))
239 239
240 240 @batchable
241 241 def branchmap(self):
242 242 f = future()
243 243 yield {}, f
244 244 d = f.value
245 245 try:
246 246 branchmap = {}
247 247 for branchpart in d.splitlines():
248 248 branchname, branchheads = branchpart.split(' ', 1)
249 249 branchname = encoding.tolocal(urllib.unquote(branchname))
250 250 branchheads = decodelist(branchheads)
251 251 branchmap[branchname] = branchheads
252 252 yield branchmap
253 253 except TypeError:
254 254 self._abort(error.ResponseError(_("unexpected response:"), d))
255 255
256 256 def branches(self, nodes):
257 257 n = encodelist(nodes)
258 258 d = self._call("branches", nodes=n)
259 259 try:
260 260 br = [tuple(decodelist(b)) for b in d.splitlines()]
261 261 return br
262 262 except ValueError:
263 263 self._abort(error.ResponseError(_("unexpected response:"), d))
264 264
265 265 def between(self, pairs):
266 266 batch = 8 # avoid giant requests
267 267 r = []
268 268 for i in xrange(0, len(pairs), batch):
269 269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
270 270 d = self._call("between", pairs=n)
271 271 try:
272 272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
273 273 except ValueError:
274 274 self._abort(error.ResponseError(_("unexpected response:"), d))
275 275 return r
276 276
277 277 @batchable
278 278 def pushkey(self, namespace, key, old, new):
279 279 if not self.capable('pushkey'):
280 280 yield False, None
281 281 f = future()
282 282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
283 283 yield {'namespace': encoding.fromlocal(namespace),
284 284 'key': encoding.fromlocal(key),
285 285 'old': encoding.fromlocal(old),
286 286 'new': encoding.fromlocal(new)}, f
287 287 d = f.value
288 288 d, output = d.split('\n', 1)
289 289 try:
290 290 d = bool(int(d))
291 291 except ValueError:
292 292 raise error.ResponseError(
293 293 _('push failed (unexpected response):'), d)
294 294 for l in output.splitlines(True):
295 295 self.ui.status(_('remote: '), l)
296 296 yield d
297 297
298 298 @batchable
299 299 def listkeys(self, namespace):
300 300 if not self.capable('pushkey'):
301 301 yield {}, None
302 302 f = future()
303 303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
304 304 yield {'namespace': encoding.fromlocal(namespace)}, f
305 305 d = f.value
306 306 r = {}
307 307 for l in d.splitlines():
308 308 k, v = l.split('\t')
309 309 r[encoding.tolocal(k)] = encoding.tolocal(v)
310 310 yield r
311 311
312 312 def stream_out(self):
313 313 return self._callstream('stream_out')
314 314
315 315 def changegroup(self, nodes, kind):
316 316 n = encodelist(nodes)
317 317 f = self._callcompressable("changegroup", roots=n)
318 318 return changegroupmod.unbundle10(f, 'UN')
319 319
320 320 def changegroupsubset(self, bases, heads, kind):
321 321 self.requirecap('changegroupsubset', _('look up remote changes'))
322 322 bases = encodelist(bases)
323 323 heads = encodelist(heads)
324 324 f = self._callcompressable("changegroupsubset",
325 325 bases=bases, heads=heads)
326 326 return changegroupmod.unbundle10(f, 'UN')
327 327
328 328 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
329 329 self.requirecap('getbundle', _('look up remote changes'))
330 330 opts = {}
331 331 if heads is not None:
332 332 opts['heads'] = encodelist(heads)
333 333 if common is not None:
334 334 opts['common'] = encodelist(common)
335 335 if bundlecaps is not None:
336 336 opts['bundlecaps'] = ','.join(bundlecaps)
337 337 f = self._callcompressable("getbundle", **opts)
338 338 return changegroupmod.unbundle10(f, 'UN')
339 339
340 340 def unbundle(self, cg, heads, source):
341 341 '''Send cg (a readable file-like object representing the
342 342 changegroup to push, typically a chunkbuffer object) to the
343 343 remote server as a bundle. Return an integer indicating the
344 344 result of the push (see localrepository.addchangegroup()).'''
345 345
346 346 if heads != ['force'] and self.capable('unbundlehash'):
347 347 heads = encodelist(['hashed',
348 348 util.sha1(''.join(sorted(heads))).digest()])
349 349 else:
350 350 heads = encodelist(heads)
351 351
352 352 ret, output = self._callpush("unbundle", cg, heads=heads)
353 353 if ret == "":
354 354 raise error.ResponseError(
355 355 _('push failed:'), output)
356 356 try:
357 357 ret = int(ret)
358 358 except ValueError:
359 359 raise error.ResponseError(
360 360 _('push failed (unexpected response):'), ret)
361 361
362 362 for l in output.splitlines(True):
363 363 self.ui.status(_('remote: '), l)
364 364 return ret
365 365
366 366 def debugwireargs(self, one, two, three=None, four=None, five=None):
367 367 # don't pass optional arguments left at their default value
368 368 opts = {}
369 369 if three is not None:
370 370 opts['three'] = three
371 371 if four is not None:
372 372 opts['four'] = four
373 373 return self._call('debugwireargs', one=one, two=two, **opts)
374 374
375 375 def _call(self, cmd, **args):
376 376 """execute <cmd> on the server
377 377
378 378 The command is expected to return a simple string.
379 379
380 380 returns the server reply as a string."""
381 381 raise NotImplementedError()
382 382
383 383 def _callstream(self, cmd, **args):
384 384 """execute <cmd> on the server
385 385
386 386 The command is expected to return a stream.
387 387
388 388 returns the server reply as a file like object."""
389 389 raise NotImplementedError()
390 390
391 391 def _callcompressable(self, cmd, **args):
392 392 """execute <cmd> on the server
393 393
394 394 The command is expected to return a stream.
395 395
396 396 The stream may have been compressed in some implementaitons. This
397 397 function takes care of the decompression. This is the only difference
398 398 with _callstream.
399 399
400 400 returns the server reply as a file like object.
401 401 """
402 402 raise NotImplementedError()
403 403
404 404 def _callpush(self, cmd, fp, **args):
405 405 """execute a <cmd> on server
406 406
407 407 The command is expected to be related to a push. Push has a special
408 408 return method.
409 409
410 410 returns the server reply as a (ret, output) tuple. ret is either
411 411 empty (error) or a stringified int.
412 412 """
413 413 raise NotImplementedError()
414 414
415 415 def _abort(self, exception):
416 416 """clearly abort the wire protocol connection and raise the exception
417 417 """
418 418 raise NotImplementedError()
419 419
420 420 # server side
421 421
422 422 # wire protocol command can either return a string or one of these classes.
423 423 class streamres(object):
424 424 """wireproto reply: binary stream
425 425
426 426 The call was successful and the result is a stream.
427 427 Iterate on the `self.gen` attribute to retrieve chunks.
428 428 """
429 429 def __init__(self, gen):
430 430 self.gen = gen
431 431
432 432 class pushres(object):
433 433 """wireproto reply: success with simple integer return
434 434
435 435 The call was successful and returned an integer contained in `self.res`.
436 436 """
437 437 def __init__(self, res):
438 438 self.res = res
439 439
440 440 class pusherr(object):
441 441 """wireproto reply: failure
442 442
443 443 The call failed. The `self.res` attribute contains the error message.
444 444 """
445 445 def __init__(self, res):
446 446 self.res = res
447 447
448 448 class ooberror(object):
449 449 """wireproto reply: failure of a batch of operation
450 450
451 451 Something failed during a batch call. The error message is stored in
452 452 `self.message`.
453 453 """
454 454 def __init__(self, message):
455 455 self.message = message
456 456
457 457 def dispatch(repo, proto, command):
458 458 repo = repo.filtered("served")
459 459 func, spec = commands[command]
460 460 args = proto.getargs(spec)
461 461 return func(repo, proto, *args)
462 462
463 463 def options(cmd, keys, others):
464 464 opts = {}
465 465 for k in keys:
466 466 if k in others:
467 467 opts[k] = others[k]
468 468 del others[k]
469 469 if others:
470 470 sys.stderr.write("abort: %s got unexpected arguments %s\n"
471 471 % (cmd, ",".join(others)))
472 472 return opts
473 473
474 474 # list of commands
475 475 commands = {}
476 476
477 477 def wireprotocommand(name, args=''):
478 478 """decorator for wireprotocol command"""
479 479 def register(func):
480 480 commands[name] = (func, args)
481 481 return func
482 482 return register
483 483
484 484 @wireprotocommand('batch', 'cmds *')
485 485 def batch(repo, proto, cmds, others):
486 486 repo = repo.filtered("served")
487 487 res = []
488 488 for pair in cmds.split(';'):
489 489 op, args = pair.split(' ', 1)
490 490 vals = {}
491 491 for a in args.split(','):
492 492 if a:
493 493 n, v = a.split('=')
494 494 vals[n] = unescapearg(v)
495 495 func, spec = commands[op]
496 496 if spec:
497 497 keys = spec.split()
498 498 data = {}
499 499 for k in keys:
500 500 if k == '*':
501 501 star = {}
502 502 for key in vals.keys():
503 503 if key not in keys:
504 504 star[key] = vals[key]
505 505 data['*'] = star
506 506 else:
507 507 data[k] = vals[k]
508 508 result = func(repo, proto, *[data[k] for k in keys])
509 509 else:
510 510 result = func(repo, proto)
511 511 if isinstance(result, ooberror):
512 512 return result
513 513 res.append(escapearg(result))
514 514 return ';'.join(res)
515 515
516 516 @wireprotocommand('between', 'pairs')
517 517 def between(repo, proto, pairs):
518 518 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
519 519 r = []
520 520 for b in repo.between(pairs):
521 521 r.append(encodelist(b) + "\n")
522 522 return "".join(r)
523 523
524 524 @wireprotocommand('branchmap')
525 525 def branchmap(repo, proto):
526 526 branchmap = repo.branchmap()
527 527 heads = []
528 528 for branch, nodes in branchmap.iteritems():
529 529 branchname = urllib.quote(encoding.fromlocal(branch))
530 530 branchnodes = encodelist(nodes)
531 531 heads.append('%s %s' % (branchname, branchnodes))
532 532 return '\n'.join(heads)
533 533
534 534 @wireprotocommand('branches', 'nodes')
535 535 def branches(repo, proto, nodes):
536 536 nodes = decodelist(nodes)
537 537 r = []
538 538 for b in repo.branches(nodes):
539 539 r.append(encodelist(b) + "\n")
540 540 return "".join(r)
541 541
542 542
543 543 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
544 544 'known', 'getbundle', 'unbundlehash', 'batch']
545 545
546 546 def _capabilities(repo, proto):
547 547 """return a list of capabilities for a repo
548 548
549 549 This function exists to allow extensions to easily wrap capabilities
550 550 computation
551 551
552 552 - returns a lists: easy to alter
553 553 - change done here will be propagated to both `capabilities` and `hello`
554 554 command without any other effort. without any other action needed.
555 555 """
556 556 # copy to prevent modification of the global list
557 557 caps = list(wireprotocaps)
558 558 if _allowstream(repo.ui):
559 559 if repo.ui.configbool('server', 'preferuncompressed', False):
560 560 caps.append('stream-preferred')
561 561 requiredformats = repo.requirements & repo.supportedformats
562 562 # if our local revlogs are just revlogv1, add 'stream' cap
563 563 if not requiredformats - set(('revlogv1',)):
564 564 caps.append('stream')
565 565 # otherwise, add 'streamreqs' detailing our local revlog format
566 566 else:
567 567 caps.append('streamreqs=%s' % ','.join(requiredformats))
568 568 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
569 569 caps.append('httpheader=1024')
570 570 return caps
571 571
572 572 # If you are writting and extension and consider wrapping this function. Wrap
573 573 # `_capabilities` instead.
574 574 @wireprotocommand('capabilities')
575 575 def capabilities(repo, proto):
576 576 return ' '.join(_capabilities(repo, proto))
577 577
578 578 @wireprotocommand('changegroup', 'roots')
579 579 def changegroup(repo, proto, roots):
580 580 nodes = decodelist(roots)
581 581 cg = changegroupmod.changegroup(repo, nodes, 'serve')
582 582 return streamres(proto.groupchunks(cg))
583 583
584 584 @wireprotocommand('changegroupsubset', 'bases heads')
585 585 def changegroupsubset(repo, proto, bases, heads):
586 586 bases = decodelist(bases)
587 587 heads = decodelist(heads)
588 588 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
589 589 return streamres(proto.groupchunks(cg))
590 590
591 591 @wireprotocommand('debugwireargs', 'one two *')
592 592 def debugwireargs(repo, proto, one, two, others):
593 593 # only accept optional args from the known set
594 594 opts = options('debugwireargs', ['three', 'four'], others)
595 595 return repo.debugwireargs(one, two, **opts)
596 596
597 597 @wireprotocommand('getbundle', '*')
598 598 def getbundle(repo, proto, others):
599 599 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
600 600 for k, v in opts.iteritems():
601 601 if k in ('heads', 'common'):
602 602 opts[k] = decodelist(v)
603 603 elif k == 'bundlecaps':
604 604 opts[k] = set(v.split(','))
605 605 cg = changegroupmod.getbundle(repo, 'serve', **opts)
606 606 return streamres(proto.groupchunks(cg))
607 607
608 608 @wireprotocommand('heads')
609 609 def heads(repo, proto):
610 610 h = repo.heads()
611 611 return encodelist(h) + "\n"
612 612
613 613 @wireprotocommand('hello')
614 614 def hello(repo, proto):
615 615 '''the hello command returns a set of lines describing various
616 616 interesting things about the server, in an RFC822-like format.
617 617 Currently the only one defined is "capabilities", which
618 618 consists of a line in the form:
619 619
620 620 capabilities: space separated list of tokens
621 621 '''
622 622 return "capabilities: %s\n" % (capabilities(repo, proto))
623 623
624 624 @wireprotocommand('listkeys', 'namespace')
625 625 def listkeys(repo, proto, namespace):
626 626 d = repo.listkeys(encoding.tolocal(namespace)).items()
627 627 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
628 628 for k, v in d])
629 629 return t
630 630
631 631 @wireprotocommand('lookup', 'key')
632 632 def lookup(repo, proto, key):
633 633 try:
634 634 k = encoding.tolocal(key)
635 635 c = repo[k]
636 636 r = c.hex()
637 637 success = 1
638 638 except Exception, inst:
639 639 r = str(inst)
640 640 success = 0
641 641 return "%s %s\n" % (success, r)
642 642
643 643 @wireprotocommand('known', 'nodes *')
644 644 def known(repo, proto, nodes, others):
645 645 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
646 646
647 647 @wireprotocommand('pushkey', 'namespace key old new')
648 648 def pushkey(repo, proto, namespace, key, old, new):
649 649 # compatibility with pre-1.8 clients which were accidentally
650 650 # sending raw binary nodes rather than utf-8-encoded hex
651 651 if len(new) == 20 and new.encode('string-escape') != new:
652 652 # looks like it could be a binary node
653 653 try:
654 654 new.decode('utf-8')
655 655 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
656 656 except UnicodeDecodeError:
657 657 pass # binary, leave unmodified
658 658 else:
659 659 new = encoding.tolocal(new) # normal path
660 660
661 661 if util.safehasattr(proto, 'restore'):
662 662
663 663 proto.redirect()
664 664
665 665 try:
666 666 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
667 667 encoding.tolocal(old), new) or False
668 668 except util.Abort:
669 669 r = False
670 670
671 671 output = proto.restore()
672 672
673 673 return '%s\n%s' % (int(r), output)
674 674
675 675 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
676 676 encoding.tolocal(old), new)
677 677 return '%s\n' % int(r)
678 678
679 679 def _allowstream(ui):
680 680 return ui.configbool('server', 'uncompressed', True, untrusted=True)
681 681
682 682 def _walkstreamfiles(repo):
683 683 # this is it's own function so extensions can override it
684 684 return repo.store.walk()
685 685
686 686 @wireprotocommand('stream_out')
687 687 def stream(repo, proto):
688 688 '''If the server supports streaming clone, it advertises the "stream"
689 689 capability with a value representing the version and flags of the repo
690 690 it is serving. Client checks to see if it understands the format.
691 691
692 692 The format is simple: the server writes out a line with the amount
693 693 of files, then the total amount of bytes to be transferred (separated
694 694 by a space). Then, for each file, the server first writes the filename
695 695 and filesize (separated by the null character), then the file contents.
696 696 '''
697 697
698 698 if not _allowstream(repo.ui):
699 699 return '1\n'
700 700
701 701 entries = []
702 702 total_bytes = 0
703 703 try:
704 704 # get consistent snapshot of repo, lock during scan
705 705 lock = repo.lock()
706 706 try:
707 707 repo.ui.debug('scanning\n')
708 708 for name, ename, size in _walkstreamfiles(repo):
709 709 if size:
710 710 entries.append((name, size))
711 711 total_bytes += size
712 712 finally:
713 713 lock.release()
714 714 except error.LockError:
715 715 return '2\n' # error: 2
716 716
717 717 def streamer(repo, entries, total):
718 718 '''stream out all metadata files in repository.'''
719 719 yield '0\n' # success
720 720 repo.ui.debug('%d files, %d bytes to transfer\n' %
721 721 (len(entries), total_bytes))
722 722 yield '%d %d\n' % (len(entries), total_bytes)
723 723
724 724 sopener = repo.sopener
725 725 oldaudit = sopener.mustaudit
726 726 debugflag = repo.ui.debugflag
727 727 sopener.mustaudit = False
728 728
729 729 try:
730 730 for name, size in entries:
731 731 if debugflag:
732 732 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
733 733 # partially encode name over the wire for backwards compat
734 734 yield '%s\0%d\n' % (store.encodedir(name), size)
735 735 if size <= 65536:
736 736 fp = sopener(name)
737 737 try:
738 738 data = fp.read(size)
739 739 finally:
740 740 fp.close()
741 741 yield data
742 742 else:
743 743 for chunk in util.filechunkiter(sopener(name), limit=size):
744 744 yield chunk
745 745 # replace with "finally:" when support for python 2.4 has been dropped
746 746 except Exception:
747 747 sopener.mustaudit = oldaudit
748 748 raise
749 749 sopener.mustaudit = oldaudit
750 750
751 751 return streamres(streamer(repo, entries, total_bytes))
752 752
753 753 @wireprotocommand('unbundle', 'heads')
754 754 def unbundle(repo, proto, heads):
755 755 their_heads = decodelist(heads)
756 756
757 757 def check_heads():
758 758 heads = repo.heads()
759 759 heads_hash = util.sha1(''.join(sorted(heads))).digest()
760 760 return (their_heads == ['force'] or their_heads == heads or
761 761 their_heads == ['hashed', heads_hash])
762 762
763 763 proto.redirect()
764 764
765 765 # fail early if possible
766 766 if not check_heads():
767 767 return pusherr('repository changed while preparing changes - '
768 768 'please try again')
769 769
770 770 # write bundle data to temporary file because it can be big
771 771 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
772 772 fp = os.fdopen(fd, 'wb+')
773 773 r = 0
774 774 try:
775 775 proto.getfile(fp)
776 776 lock = repo.lock()
777 777 try:
778 778 if not check_heads():
779 779 # someone else committed/pushed/unbundled while we
780 780 # were transferring data
781 781 return pusherr('repository changed while uploading changes - '
782 782 'please try again')
783 783
784 784 # push can proceed
785 785 fp.seek(0)
786 786 gen = changegroupmod.readbundle(fp, None)
787 787
788 788 try:
789 r = repo.addchangegroup(gen, 'serve', proto._client())
789 r = changegroupmod.addchangegroup(repo, gen, 'serve',
790 proto._client())
790 791 except util.Abort, inst:
791 792 sys.stderr.write("abort: %s\n" % inst)
792 793 finally:
793 794 lock.release()
794 795 return pushres(r)
795 796
796 797 finally:
797 798 fp.close()
798 799 os.unlink(tempname)
General Comments 0
You need to be logged in to leave comments. Login now