##// END OF EJS Templates
revert: apply normallookup on reverted file if size isn't changed (issue4583)...
FUJIWARA Katsunori -
r24843:21b33f04 stable
parent child Browse files
Show More
@@ -1,3243 +1,3247 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import crecord as crecordmod
18 18 import lock as lockmod
19 19
20 20 def parsealiases(cmd):
21 21 return cmd.lstrip("^").split("|")
22 22
23 23 def setupwrapcolorwrite(ui):
24 24 # wrap ui.write so diff output can be labeled/colorized
25 25 def wrapwrite(orig, *args, **kw):
26 26 label = kw.pop('label', '')
27 27 for chunk, l in patch.difflabel(lambda: args):
28 28 orig(chunk, label=label + l)
29 29
30 30 oldwrite = ui.write
31 31 def wrap(*args, **kwargs):
32 32 return wrapwrite(oldwrite, *args, **kwargs)
33 33 setattr(ui, 'write', wrap)
34 34 return oldwrite
35 35
36 36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 37 if usecurses:
38 38 if testfile:
39 39 recordfn = crecordmod.testdecorator(testfile,
40 40 crecordmod.testchunkselector)
41 41 else:
42 42 recordfn = crecordmod.chunkselector
43 43
44 44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45 45
46 46 else:
47 47 return patch.filterpatch(ui, originalhunks)
48 48
49 49 def recordfilter(ui, originalhunks):
50 50 usecurses = ui.configbool('experimental', 'crecord', False)
51 51 testfile = ui.config('experimental', 'crecordtest', None)
52 52 oldwrite = setupwrapcolorwrite(ui)
53 53 try:
54 54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 55 finally:
56 56 ui.write = oldwrite
57 57 return newchunks
58 58
59 59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 60 filterfn, *pats, **opts):
61 61 import merge as mergemod
62 62
63 63 if not ui.interactive():
64 64 raise util.Abort(_('running non-interactively, use %s instead') %
65 65 cmdsuggest)
66 66
67 67 # make sure username is set before going interactive
68 68 if not opts.get('user'):
69 69 ui.username() # raise exception, username not provided
70 70
71 71 def recordfunc(ui, repo, message, match, opts):
72 72 """This is generic record driver.
73 73
74 74 Its job is to interactively filter local changes, and
75 75 accordingly prepare working directory into a state in which the
76 76 job can be delegated to a non-interactive commit command such as
77 77 'commit' or 'qrefresh'.
78 78
79 79 After the actual job is done by non-interactive command, the
80 80 working directory is restored to its original state.
81 81
82 82 In the end we'll record interesting changes, and everything else
83 83 will be left in place, so the user can continue working.
84 84 """
85 85
86 86 checkunfinished(repo, commit=True)
87 87 merge = len(repo[None].parents()) > 1
88 88 if merge:
89 89 raise util.Abort(_('cannot partially commit a merge '
90 90 '(use "hg commit" instead)'))
91 91
92 92 status = repo.status(match=match)
93 93 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
94 94 diffopts.nodates = True
95 95 diffopts.git = True
96 96 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
97 97 originalchunks = patch.parsepatch(originaldiff)
98 98
99 99 # 1. filter patch, so we have intending-to apply subset of it
100 100 try:
101 101 chunks = filterfn(ui, originalchunks)
102 102 except patch.PatchError, err:
103 103 raise util.Abort(_('error parsing patch: %s') % err)
104 104
105 105 contenders = set()
106 106 for h in chunks:
107 107 try:
108 108 contenders.update(set(h.files()))
109 109 except AttributeError:
110 110 pass
111 111
112 112 changed = status.modified + status.added + status.removed
113 113 newfiles = [f for f in changed if f in contenders]
114 114 if not newfiles:
115 115 ui.status(_('no changes to record\n'))
116 116 return 0
117 117
118 118 modified = set(status.modified)
119 119
120 120 # 2. backup changed files, so we can restore them in the end
121 121
122 122 if backupall:
123 123 tobackup = changed
124 124 else:
125 125 tobackup = [f for f in newfiles if f in modified]
126 126
127 127 backups = {}
128 128 if tobackup:
129 129 backupdir = repo.join('record-backups')
130 130 try:
131 131 os.mkdir(backupdir)
132 132 except OSError, err:
133 133 if err.errno != errno.EEXIST:
134 134 raise
135 135 try:
136 136 # backup continues
137 137 for f in tobackup:
138 138 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
139 139 dir=backupdir)
140 140 os.close(fd)
141 141 ui.debug('backup %r as %r\n' % (f, tmpname))
142 142 util.copyfile(repo.wjoin(f), tmpname)
143 143 shutil.copystat(repo.wjoin(f), tmpname)
144 144 backups[f] = tmpname
145 145
146 146 fp = cStringIO.StringIO()
147 147 for c in chunks:
148 148 fname = c.filename()
149 149 if fname in backups:
150 150 c.write(fp)
151 151 dopatch = fp.tell()
152 152 fp.seek(0)
153 153
154 154 # 3a. apply filtered patch to clean repo (clean)
155 155 if backups:
156 156 # Equivalent to hg.revert
157 157 choices = lambda key: key in backups
158 158 mergemod.update(repo, repo.dirstate.p1(),
159 159 False, True, choices)
160 160
161 161 # 3b. (apply)
162 162 if dopatch:
163 163 try:
164 164 ui.debug('applying patch\n')
165 165 ui.debug(fp.getvalue())
166 166 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
167 167 except patch.PatchError, err:
168 168 raise util.Abort(str(err))
169 169 del fp
170 170
171 171 # 4. We prepared working directory according to filtered
172 172 # patch. Now is the time to delegate the job to
173 173 # commit/qrefresh or the like!
174 174
175 175 # Make all of the pathnames absolute.
176 176 newfiles = [repo.wjoin(nf) for nf in newfiles]
177 177 return commitfunc(ui, repo, *newfiles, **opts)
178 178 finally:
179 179 # 5. finally restore backed-up files
180 180 try:
181 181 for realname, tmpname in backups.iteritems():
182 182 ui.debug('restoring %r to %r\n' % (tmpname, realname))
183 183 util.copyfile(tmpname, repo.wjoin(realname))
184 184 # Our calls to copystat() here and above are a
185 185 # hack to trick any editors that have f open that
186 186 # we haven't modified them.
187 187 #
188 188 # Also note that this racy as an editor could
189 189 # notice the file's mtime before we've finished
190 190 # writing it.
191 191 shutil.copystat(tmpname, repo.wjoin(realname))
192 192 os.unlink(tmpname)
193 193 if tobackup:
194 194 os.rmdir(backupdir)
195 195 except OSError:
196 196 pass
197 197
198 198 return commit(ui, repo, recordfunc, pats, opts)
199 199
200 200 def findpossible(cmd, table, strict=False):
201 201 """
202 202 Return cmd -> (aliases, command table entry)
203 203 for each matching command.
204 204 Return debug commands (or their aliases) only if no normal command matches.
205 205 """
206 206 choice = {}
207 207 debugchoice = {}
208 208
209 209 if cmd in table:
210 210 # short-circuit exact matches, "log" alias beats "^log|history"
211 211 keys = [cmd]
212 212 else:
213 213 keys = table.keys()
214 214
215 215 allcmds = []
216 216 for e in keys:
217 217 aliases = parsealiases(e)
218 218 allcmds.extend(aliases)
219 219 found = None
220 220 if cmd in aliases:
221 221 found = cmd
222 222 elif not strict:
223 223 for a in aliases:
224 224 if a.startswith(cmd):
225 225 found = a
226 226 break
227 227 if found is not None:
228 228 if aliases[0].startswith("debug") or found.startswith("debug"):
229 229 debugchoice[found] = (aliases, table[e])
230 230 else:
231 231 choice[found] = (aliases, table[e])
232 232
233 233 if not choice and debugchoice:
234 234 choice = debugchoice
235 235
236 236 return choice, allcmds
237 237
238 238 def findcmd(cmd, table, strict=True):
239 239 """Return (aliases, command table entry) for command string."""
240 240 choice, allcmds = findpossible(cmd, table, strict)
241 241
242 242 if cmd in choice:
243 243 return choice[cmd]
244 244
245 245 if len(choice) > 1:
246 246 clist = choice.keys()
247 247 clist.sort()
248 248 raise error.AmbiguousCommand(cmd, clist)
249 249
250 250 if choice:
251 251 return choice.values()[0]
252 252
253 253 raise error.UnknownCommand(cmd, allcmds)
254 254
255 255 def findrepo(p):
256 256 while not os.path.isdir(os.path.join(p, ".hg")):
257 257 oldp, p = p, os.path.dirname(p)
258 258 if p == oldp:
259 259 return None
260 260
261 261 return p
262 262
263 263 def bailifchanged(repo, merge=True):
264 264 if merge and repo.dirstate.p2() != nullid:
265 265 raise util.Abort(_('outstanding uncommitted merge'))
266 266 modified, added, removed, deleted = repo.status()[:4]
267 267 if modified or added or removed or deleted:
268 268 raise util.Abort(_('uncommitted changes'))
269 269 ctx = repo[None]
270 270 for s in sorted(ctx.substate):
271 271 ctx.sub(s).bailifchanged()
272 272
273 273 def logmessage(ui, opts):
274 274 """ get the log message according to -m and -l option """
275 275 message = opts.get('message')
276 276 logfile = opts.get('logfile')
277 277
278 278 if message and logfile:
279 279 raise util.Abort(_('options --message and --logfile are mutually '
280 280 'exclusive'))
281 281 if not message and logfile:
282 282 try:
283 283 if logfile == '-':
284 284 message = ui.fin.read()
285 285 else:
286 286 message = '\n'.join(util.readfile(logfile).splitlines())
287 287 except IOError, inst:
288 288 raise util.Abort(_("can't read commit message '%s': %s") %
289 289 (logfile, inst.strerror))
290 290 return message
291 291
292 292 def mergeeditform(ctxorbool, baseformname):
293 293 """return appropriate editform name (referencing a committemplate)
294 294
295 295 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
296 296 merging is committed.
297 297
298 298 This returns baseformname with '.merge' appended if it is a merge,
299 299 otherwise '.normal' is appended.
300 300 """
301 301 if isinstance(ctxorbool, bool):
302 302 if ctxorbool:
303 303 return baseformname + ".merge"
304 304 elif 1 < len(ctxorbool.parents()):
305 305 return baseformname + ".merge"
306 306
307 307 return baseformname + ".normal"
308 308
309 309 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
310 310 editform='', **opts):
311 311 """get appropriate commit message editor according to '--edit' option
312 312
313 313 'finishdesc' is a function to be called with edited commit message
314 314 (= 'description' of the new changeset) just after editing, but
315 315 before checking empty-ness. It should return actual text to be
316 316 stored into history. This allows to change description before
317 317 storing.
318 318
319 319 'extramsg' is a extra message to be shown in the editor instead of
320 320 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
321 321 is automatically added.
322 322
323 323 'editform' is a dot-separated list of names, to distinguish
324 324 the purpose of commit text editing.
325 325
326 326 'getcommiteditor' returns 'commitforceeditor' regardless of
327 327 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
328 328 they are specific for usage in MQ.
329 329 """
330 330 if edit or finishdesc or extramsg:
331 331 return lambda r, c, s: commitforceeditor(r, c, s,
332 332 finishdesc=finishdesc,
333 333 extramsg=extramsg,
334 334 editform=editform)
335 335 elif editform:
336 336 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
337 337 else:
338 338 return commiteditor
339 339
340 340 def loglimit(opts):
341 341 """get the log limit according to option -l/--limit"""
342 342 limit = opts.get('limit')
343 343 if limit:
344 344 try:
345 345 limit = int(limit)
346 346 except ValueError:
347 347 raise util.Abort(_('limit must be a positive integer'))
348 348 if limit <= 0:
349 349 raise util.Abort(_('limit must be positive'))
350 350 else:
351 351 limit = None
352 352 return limit
353 353
354 354 def makefilename(repo, pat, node, desc=None,
355 355 total=None, seqno=None, revwidth=None, pathname=None):
356 356 node_expander = {
357 357 'H': lambda: hex(node),
358 358 'R': lambda: str(repo.changelog.rev(node)),
359 359 'h': lambda: short(node),
360 360 'm': lambda: re.sub('[^\w]', '_', str(desc))
361 361 }
362 362 expander = {
363 363 '%': lambda: '%',
364 364 'b': lambda: os.path.basename(repo.root),
365 365 }
366 366
367 367 try:
368 368 if node:
369 369 expander.update(node_expander)
370 370 if node:
371 371 expander['r'] = (lambda:
372 372 str(repo.changelog.rev(node)).zfill(revwidth or 0))
373 373 if total is not None:
374 374 expander['N'] = lambda: str(total)
375 375 if seqno is not None:
376 376 expander['n'] = lambda: str(seqno)
377 377 if total is not None and seqno is not None:
378 378 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
379 379 if pathname is not None:
380 380 expander['s'] = lambda: os.path.basename(pathname)
381 381 expander['d'] = lambda: os.path.dirname(pathname) or '.'
382 382 expander['p'] = lambda: pathname
383 383
384 384 newname = []
385 385 patlen = len(pat)
386 386 i = 0
387 387 while i < patlen:
388 388 c = pat[i]
389 389 if c == '%':
390 390 i += 1
391 391 c = pat[i]
392 392 c = expander[c]()
393 393 newname.append(c)
394 394 i += 1
395 395 return ''.join(newname)
396 396 except KeyError, inst:
397 397 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
398 398 inst.args[0])
399 399
400 400 def makefileobj(repo, pat, node=None, desc=None, total=None,
401 401 seqno=None, revwidth=None, mode='wb', modemap=None,
402 402 pathname=None):
403 403
404 404 writable = mode not in ('r', 'rb')
405 405
406 406 if not pat or pat == '-':
407 407 if writable:
408 408 fp = repo.ui.fout
409 409 else:
410 410 fp = repo.ui.fin
411 411 if util.safehasattr(fp, 'fileno'):
412 412 return os.fdopen(os.dup(fp.fileno()), mode)
413 413 else:
414 414 # if this fp can't be duped properly, return
415 415 # a dummy object that can be closed
416 416 class wrappedfileobj(object):
417 417 noop = lambda x: None
418 418 def __init__(self, f):
419 419 self.f = f
420 420 def __getattr__(self, attr):
421 421 if attr == 'close':
422 422 return self.noop
423 423 else:
424 424 return getattr(self.f, attr)
425 425
426 426 return wrappedfileobj(fp)
427 427 if util.safehasattr(pat, 'write') and writable:
428 428 return pat
429 429 if util.safehasattr(pat, 'read') and 'r' in mode:
430 430 return pat
431 431 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
432 432 if modemap is not None:
433 433 mode = modemap.get(fn, mode)
434 434 if mode == 'wb':
435 435 modemap[fn] = 'ab'
436 436 return open(fn, mode)
437 437
438 438 def openrevlog(repo, cmd, file_, opts):
439 439 """opens the changelog, manifest, a filelog or a given revlog"""
440 440 cl = opts['changelog']
441 441 mf = opts['manifest']
442 442 msg = None
443 443 if cl and mf:
444 444 msg = _('cannot specify --changelog and --manifest at the same time')
445 445 elif cl or mf:
446 446 if file_:
447 447 msg = _('cannot specify filename with --changelog or --manifest')
448 448 elif not repo:
449 449 msg = _('cannot specify --changelog or --manifest '
450 450 'without a repository')
451 451 if msg:
452 452 raise util.Abort(msg)
453 453
454 454 r = None
455 455 if repo:
456 456 if cl:
457 457 r = repo.unfiltered().changelog
458 458 elif mf:
459 459 r = repo.manifest
460 460 elif file_:
461 461 filelog = repo.file(file_)
462 462 if len(filelog):
463 463 r = filelog
464 464 if not r:
465 465 if not file_:
466 466 raise error.CommandError(cmd, _('invalid arguments'))
467 467 if not os.path.isfile(file_):
468 468 raise util.Abort(_("revlog '%s' not found") % file_)
469 469 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
470 470 file_[:-2] + ".i")
471 471 return r
472 472
473 473 def copy(ui, repo, pats, opts, rename=False):
474 474 # called with the repo lock held
475 475 #
476 476 # hgsep => pathname that uses "/" to separate directories
477 477 # ossep => pathname that uses os.sep to separate directories
478 478 cwd = repo.getcwd()
479 479 targets = {}
480 480 after = opts.get("after")
481 481 dryrun = opts.get("dry_run")
482 482 wctx = repo[None]
483 483
484 484 def walkpat(pat):
485 485 srcs = []
486 486 if after:
487 487 badstates = '?'
488 488 else:
489 489 badstates = '?r'
490 490 m = scmutil.match(repo[None], [pat], opts, globbed=True)
491 491 for abs in repo.walk(m):
492 492 state = repo.dirstate[abs]
493 493 rel = m.rel(abs)
494 494 exact = m.exact(abs)
495 495 if state in badstates:
496 496 if exact and state == '?':
497 497 ui.warn(_('%s: not copying - file is not managed\n') % rel)
498 498 if exact and state == 'r':
499 499 ui.warn(_('%s: not copying - file has been marked for'
500 500 ' remove\n') % rel)
501 501 continue
502 502 # abs: hgsep
503 503 # rel: ossep
504 504 srcs.append((abs, rel, exact))
505 505 return srcs
506 506
507 507 # abssrc: hgsep
508 508 # relsrc: ossep
509 509 # otarget: ossep
510 510 def copyfile(abssrc, relsrc, otarget, exact):
511 511 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
512 512 if '/' in abstarget:
513 513 # We cannot normalize abstarget itself, this would prevent
514 514 # case only renames, like a => A.
515 515 abspath, absname = abstarget.rsplit('/', 1)
516 516 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
517 517 reltarget = repo.pathto(abstarget, cwd)
518 518 target = repo.wjoin(abstarget)
519 519 src = repo.wjoin(abssrc)
520 520 state = repo.dirstate[abstarget]
521 521
522 522 scmutil.checkportable(ui, abstarget)
523 523
524 524 # check for collisions
525 525 prevsrc = targets.get(abstarget)
526 526 if prevsrc is not None:
527 527 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
528 528 (reltarget, repo.pathto(abssrc, cwd),
529 529 repo.pathto(prevsrc, cwd)))
530 530 return
531 531
532 532 # check for overwrites
533 533 exists = os.path.lexists(target)
534 534 samefile = False
535 535 if exists and abssrc != abstarget:
536 536 if (repo.dirstate.normalize(abssrc) ==
537 537 repo.dirstate.normalize(abstarget)):
538 538 if not rename:
539 539 ui.warn(_("%s: can't copy - same file\n") % reltarget)
540 540 return
541 541 exists = False
542 542 samefile = True
543 543
544 544 if not after and exists or after and state in 'mn':
545 545 if not opts['force']:
546 546 ui.warn(_('%s: not overwriting - file exists\n') %
547 547 reltarget)
548 548 return
549 549
550 550 if after:
551 551 if not exists:
552 552 if rename:
553 553 ui.warn(_('%s: not recording move - %s does not exist\n') %
554 554 (relsrc, reltarget))
555 555 else:
556 556 ui.warn(_('%s: not recording copy - %s does not exist\n') %
557 557 (relsrc, reltarget))
558 558 return
559 559 elif not dryrun:
560 560 try:
561 561 if exists:
562 562 os.unlink(target)
563 563 targetdir = os.path.dirname(target) or '.'
564 564 if not os.path.isdir(targetdir):
565 565 os.makedirs(targetdir)
566 566 if samefile:
567 567 tmp = target + "~hgrename"
568 568 os.rename(src, tmp)
569 569 os.rename(tmp, target)
570 570 else:
571 571 util.copyfile(src, target)
572 572 srcexists = True
573 573 except IOError, inst:
574 574 if inst.errno == errno.ENOENT:
575 575 ui.warn(_('%s: deleted in working directory\n') % relsrc)
576 576 srcexists = False
577 577 else:
578 578 ui.warn(_('%s: cannot copy - %s\n') %
579 579 (relsrc, inst.strerror))
580 580 return True # report a failure
581 581
582 582 if ui.verbose or not exact:
583 583 if rename:
584 584 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
585 585 else:
586 586 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
587 587
588 588 targets[abstarget] = abssrc
589 589
590 590 # fix up dirstate
591 591 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
592 592 dryrun=dryrun, cwd=cwd)
593 593 if rename and not dryrun:
594 594 if not after and srcexists and not samefile:
595 595 util.unlinkpath(repo.wjoin(abssrc))
596 596 wctx.forget([abssrc])
597 597
598 598 # pat: ossep
599 599 # dest ossep
600 600 # srcs: list of (hgsep, hgsep, ossep, bool)
601 601 # return: function that takes hgsep and returns ossep
602 602 def targetpathfn(pat, dest, srcs):
603 603 if os.path.isdir(pat):
604 604 abspfx = pathutil.canonpath(repo.root, cwd, pat)
605 605 abspfx = util.localpath(abspfx)
606 606 if destdirexists:
607 607 striplen = len(os.path.split(abspfx)[0])
608 608 else:
609 609 striplen = len(abspfx)
610 610 if striplen:
611 611 striplen += len(os.sep)
612 612 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
613 613 elif destdirexists:
614 614 res = lambda p: os.path.join(dest,
615 615 os.path.basename(util.localpath(p)))
616 616 else:
617 617 res = lambda p: dest
618 618 return res
619 619
620 620 # pat: ossep
621 621 # dest ossep
622 622 # srcs: list of (hgsep, hgsep, ossep, bool)
623 623 # return: function that takes hgsep and returns ossep
624 624 def targetpathafterfn(pat, dest, srcs):
625 625 if matchmod.patkind(pat):
626 626 # a mercurial pattern
627 627 res = lambda p: os.path.join(dest,
628 628 os.path.basename(util.localpath(p)))
629 629 else:
630 630 abspfx = pathutil.canonpath(repo.root, cwd, pat)
631 631 if len(abspfx) < len(srcs[0][0]):
632 632 # A directory. Either the target path contains the last
633 633 # component of the source path or it does not.
634 634 def evalpath(striplen):
635 635 score = 0
636 636 for s in srcs:
637 637 t = os.path.join(dest, util.localpath(s[0])[striplen:])
638 638 if os.path.lexists(t):
639 639 score += 1
640 640 return score
641 641
642 642 abspfx = util.localpath(abspfx)
643 643 striplen = len(abspfx)
644 644 if striplen:
645 645 striplen += len(os.sep)
646 646 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
647 647 score = evalpath(striplen)
648 648 striplen1 = len(os.path.split(abspfx)[0])
649 649 if striplen1:
650 650 striplen1 += len(os.sep)
651 651 if evalpath(striplen1) > score:
652 652 striplen = striplen1
653 653 res = lambda p: os.path.join(dest,
654 654 util.localpath(p)[striplen:])
655 655 else:
656 656 # a file
657 657 if destdirexists:
658 658 res = lambda p: os.path.join(dest,
659 659 os.path.basename(util.localpath(p)))
660 660 else:
661 661 res = lambda p: dest
662 662 return res
663 663
664 664 pats = scmutil.expandpats(pats)
665 665 if not pats:
666 666 raise util.Abort(_('no source or destination specified'))
667 667 if len(pats) == 1:
668 668 raise util.Abort(_('no destination specified'))
669 669 dest = pats.pop()
670 670 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
671 671 if not destdirexists:
672 672 if len(pats) > 1 or matchmod.patkind(pats[0]):
673 673 raise util.Abort(_('with multiple sources, destination must be an '
674 674 'existing directory'))
675 675 if util.endswithsep(dest):
676 676 raise util.Abort(_('destination %s is not a directory') % dest)
677 677
678 678 tfn = targetpathfn
679 679 if after:
680 680 tfn = targetpathafterfn
681 681 copylist = []
682 682 for pat in pats:
683 683 srcs = walkpat(pat)
684 684 if not srcs:
685 685 continue
686 686 copylist.append((tfn(pat, dest, srcs), srcs))
687 687 if not copylist:
688 688 raise util.Abort(_('no files to copy'))
689 689
690 690 errors = 0
691 691 for targetpath, srcs in copylist:
692 692 for abssrc, relsrc, exact in srcs:
693 693 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
694 694 errors += 1
695 695
696 696 if errors:
697 697 ui.warn(_('(consider using --after)\n'))
698 698
699 699 return errors != 0
700 700
701 701 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
702 702 runargs=None, appendpid=False):
703 703 '''Run a command as a service.'''
704 704
705 705 def writepid(pid):
706 706 if opts['pid_file']:
707 707 if appendpid:
708 708 mode = 'a'
709 709 else:
710 710 mode = 'w'
711 711 fp = open(opts['pid_file'], mode)
712 712 fp.write(str(pid) + '\n')
713 713 fp.close()
714 714
715 715 if opts['daemon'] and not opts['daemon_pipefds']:
716 716 # Signal child process startup with file removal
717 717 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
718 718 os.close(lockfd)
719 719 try:
720 720 if not runargs:
721 721 runargs = util.hgcmd() + sys.argv[1:]
722 722 runargs.append('--daemon-pipefds=%s' % lockpath)
723 723 # Don't pass --cwd to the child process, because we've already
724 724 # changed directory.
725 725 for i in xrange(1, len(runargs)):
726 726 if runargs[i].startswith('--cwd='):
727 727 del runargs[i]
728 728 break
729 729 elif runargs[i].startswith('--cwd'):
730 730 del runargs[i:i + 2]
731 731 break
732 732 def condfn():
733 733 return not os.path.exists(lockpath)
734 734 pid = util.rundetached(runargs, condfn)
735 735 if pid < 0:
736 736 raise util.Abort(_('child process failed to start'))
737 737 writepid(pid)
738 738 finally:
739 739 try:
740 740 os.unlink(lockpath)
741 741 except OSError, e:
742 742 if e.errno != errno.ENOENT:
743 743 raise
744 744 if parentfn:
745 745 return parentfn(pid)
746 746 else:
747 747 return
748 748
749 749 if initfn:
750 750 initfn()
751 751
752 752 if not opts['daemon']:
753 753 writepid(os.getpid())
754 754
755 755 if opts['daemon_pipefds']:
756 756 lockpath = opts['daemon_pipefds']
757 757 try:
758 758 os.setsid()
759 759 except AttributeError:
760 760 pass
761 761 os.unlink(lockpath)
762 762 util.hidewindow()
763 763 sys.stdout.flush()
764 764 sys.stderr.flush()
765 765
766 766 nullfd = os.open(os.devnull, os.O_RDWR)
767 767 logfilefd = nullfd
768 768 if logfile:
769 769 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
770 770 os.dup2(nullfd, 0)
771 771 os.dup2(logfilefd, 1)
772 772 os.dup2(logfilefd, 2)
773 773 if nullfd not in (0, 1, 2):
774 774 os.close(nullfd)
775 775 if logfile and logfilefd not in (0, 1, 2):
776 776 os.close(logfilefd)
777 777
778 778 if runfn:
779 779 return runfn()
780 780
781 781 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
782 782 """Utility function used by commands.import to import a single patch
783 783
784 784 This function is explicitly defined here to help the evolve extension to
785 785 wrap this part of the import logic.
786 786
787 787 The API is currently a bit ugly because it a simple code translation from
788 788 the import command. Feel free to make it better.
789 789
790 790 :hunk: a patch (as a binary string)
791 791 :parents: nodes that will be parent of the created commit
792 792 :opts: the full dict of option passed to the import command
793 793 :msgs: list to save commit message to.
794 794 (used in case we need to save it when failing)
795 795 :updatefunc: a function that update a repo to a given node
796 796 updatefunc(<repo>, <node>)
797 797 """
798 798 tmpname, message, user, date, branch, nodeid, p1, p2 = \
799 799 patch.extract(ui, hunk)
800 800
801 801 update = not opts.get('bypass')
802 802 strip = opts["strip"]
803 803 prefix = opts["prefix"]
804 804 sim = float(opts.get('similarity') or 0)
805 805 if not tmpname:
806 806 return (None, None, False)
807 807 msg = _('applied to working directory')
808 808
809 809 rejects = False
810 810
811 811 try:
812 812 cmdline_message = logmessage(ui, opts)
813 813 if cmdline_message:
814 814 # pickup the cmdline msg
815 815 message = cmdline_message
816 816 elif message:
817 817 # pickup the patch msg
818 818 message = message.strip()
819 819 else:
820 820 # launch the editor
821 821 message = None
822 822 ui.debug('message:\n%s\n' % message)
823 823
824 824 if len(parents) == 1:
825 825 parents.append(repo[nullid])
826 826 if opts.get('exact'):
827 827 if not nodeid or not p1:
828 828 raise util.Abort(_('not a Mercurial patch'))
829 829 p1 = repo[p1]
830 830 p2 = repo[p2 or nullid]
831 831 elif p2:
832 832 try:
833 833 p1 = repo[p1]
834 834 p2 = repo[p2]
835 835 # Without any options, consider p2 only if the
836 836 # patch is being applied on top of the recorded
837 837 # first parent.
838 838 if p1 != parents[0]:
839 839 p1 = parents[0]
840 840 p2 = repo[nullid]
841 841 except error.RepoError:
842 842 p1, p2 = parents
843 843 if p2.node() == nullid:
844 844 ui.warn(_("warning: import the patch as a normal revision\n"
845 845 "(use --exact to import the patch as a merge)\n"))
846 846 else:
847 847 p1, p2 = parents
848 848
849 849 n = None
850 850 if update:
851 851 repo.dirstate.beginparentchange()
852 852 if p1 != parents[0]:
853 853 updatefunc(repo, p1.node())
854 854 if p2 != parents[1]:
855 855 repo.setparents(p1.node(), p2.node())
856 856
857 857 if opts.get('exact') or opts.get('import_branch'):
858 858 repo.dirstate.setbranch(branch or 'default')
859 859
860 860 partial = opts.get('partial', False)
861 861 files = set()
862 862 try:
863 863 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
864 864 files=files, eolmode=None, similarity=sim / 100.0)
865 865 except patch.PatchError, e:
866 866 if not partial:
867 867 raise util.Abort(str(e))
868 868 if partial:
869 869 rejects = True
870 870
871 871 files = list(files)
872 872 if opts.get('no_commit'):
873 873 if message:
874 874 msgs.append(message)
875 875 else:
876 876 if opts.get('exact') or p2:
877 877 # If you got here, you either use --force and know what
878 878 # you are doing or used --exact or a merge patch while
879 879 # being updated to its first parent.
880 880 m = None
881 881 else:
882 882 m = scmutil.matchfiles(repo, files or [])
883 883 editform = mergeeditform(repo[None], 'import.normal')
884 884 if opts.get('exact'):
885 885 editor = None
886 886 else:
887 887 editor = getcommiteditor(editform=editform, **opts)
888 888 n = repo.commit(message, opts.get('user') or user,
889 889 opts.get('date') or date, match=m,
890 890 editor=editor, force=partial)
891 891 repo.dirstate.endparentchange()
892 892 else:
893 893 if opts.get('exact') or opts.get('import_branch'):
894 894 branch = branch or 'default'
895 895 else:
896 896 branch = p1.branch()
897 897 store = patch.filestore()
898 898 try:
899 899 files = set()
900 900 try:
901 901 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
902 902 files, eolmode=None)
903 903 except patch.PatchError, e:
904 904 raise util.Abort(str(e))
905 905 if opts.get('exact'):
906 906 editor = None
907 907 else:
908 908 editor = getcommiteditor(editform='import.bypass')
909 909 memctx = context.makememctx(repo, (p1.node(), p2.node()),
910 910 message,
911 911 opts.get('user') or user,
912 912 opts.get('date') or date,
913 913 branch, files, store,
914 914 editor=editor)
915 915 n = memctx.commit()
916 916 finally:
917 917 store.close()
918 918 if opts.get('exact') and opts.get('no_commit'):
919 919 # --exact with --no-commit is still useful in that it does merge
920 920 # and branch bits
921 921 ui.warn(_("warning: can't check exact import with --no-commit\n"))
922 922 elif opts.get('exact') and hex(n) != nodeid:
923 923 raise util.Abort(_('patch is damaged or loses information'))
924 924 if n:
925 925 # i18n: refers to a short changeset id
926 926 msg = _('created %s') % short(n)
927 927 return (msg, n, rejects)
928 928 finally:
929 929 os.unlink(tmpname)
930 930
931 931 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
932 932 opts=None):
933 933 '''export changesets as hg patches.'''
934 934
935 935 total = len(revs)
936 936 revwidth = max([len(str(rev)) for rev in revs])
937 937 filemode = {}
938 938
939 939 def single(rev, seqno, fp):
940 940 ctx = repo[rev]
941 941 node = ctx.node()
942 942 parents = [p.node() for p in ctx.parents() if p]
943 943 branch = ctx.branch()
944 944 if switch_parent:
945 945 parents.reverse()
946 946
947 947 if parents:
948 948 prev = parents[0]
949 949 else:
950 950 prev = nullid
951 951
952 952 shouldclose = False
953 953 if not fp and len(template) > 0:
954 954 desc_lines = ctx.description().rstrip().split('\n')
955 955 desc = desc_lines[0] #Commit always has a first line.
956 956 fp = makefileobj(repo, template, node, desc=desc, total=total,
957 957 seqno=seqno, revwidth=revwidth, mode='wb',
958 958 modemap=filemode)
959 959 if fp != template:
960 960 shouldclose = True
961 961 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
962 962 repo.ui.note("%s\n" % fp.name)
963 963
964 964 if not fp:
965 965 write = repo.ui.write
966 966 else:
967 967 def write(s, **kw):
968 968 fp.write(s)
969 969
970 970 write("# HG changeset patch\n")
971 971 write("# User %s\n" % ctx.user())
972 972 write("# Date %d %d\n" % ctx.date())
973 973 write("# %s\n" % util.datestr(ctx.date()))
974 974 if branch and branch != 'default':
975 975 write("# Branch %s\n" % branch)
976 976 write("# Node ID %s\n" % hex(node))
977 977 write("# Parent %s\n" % hex(prev))
978 978 if len(parents) > 1:
979 979 write("# Parent %s\n" % hex(parents[1]))
980 980 write(ctx.description().rstrip())
981 981 write("\n\n")
982 982
983 983 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
984 984 write(chunk, label=label)
985 985
986 986 if shouldclose:
987 987 fp.close()
988 988
989 989 for seqno, rev in enumerate(revs):
990 990 single(rev, seqno + 1, fp)
991 991
992 992 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
993 993 changes=None, stat=False, fp=None, prefix='',
994 994 root='', listsubrepos=False):
995 995 '''show diff or diffstat.'''
996 996 if fp is None:
997 997 write = ui.write
998 998 else:
999 999 def write(s, **kw):
1000 1000 fp.write(s)
1001 1001
1002 1002 if root:
1003 1003 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1004 1004 else:
1005 1005 relroot = ''
1006 1006 if relroot != '':
1007 1007 # XXX relative roots currently don't work if the root is within a
1008 1008 # subrepo
1009 1009 uirelroot = match.uipath(relroot)
1010 1010 relroot += '/'
1011 1011 for matchroot in match.files():
1012 1012 if not matchroot.startswith(relroot):
1013 1013 ui.warn(_('warning: %s not inside relative root %s\n') % (
1014 1014 match.uipath(matchroot), uirelroot))
1015 1015
1016 1016 if stat:
1017 1017 diffopts = diffopts.copy(context=0)
1018 1018 width = 80
1019 1019 if not ui.plain():
1020 1020 width = ui.termwidth()
1021 1021 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1022 1022 prefix=prefix, relroot=relroot)
1023 1023 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1024 1024 width=width,
1025 1025 git=diffopts.git):
1026 1026 write(chunk, label=label)
1027 1027 else:
1028 1028 for chunk, label in patch.diffui(repo, node1, node2, match,
1029 1029 changes, diffopts, prefix=prefix,
1030 1030 relroot=relroot):
1031 1031 write(chunk, label=label)
1032 1032
1033 1033 if listsubrepos:
1034 1034 ctx1 = repo[node1]
1035 1035 ctx2 = repo[node2]
1036 1036 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1037 1037 tempnode2 = node2
1038 1038 try:
1039 1039 if node2 is not None:
1040 1040 tempnode2 = ctx2.substate[subpath][1]
1041 1041 except KeyError:
1042 1042 # A subrepo that existed in node1 was deleted between node1 and
1043 1043 # node2 (inclusive). Thus, ctx2's substate won't contain that
1044 1044 # subpath. The best we can do is to ignore it.
1045 1045 tempnode2 = None
1046 1046 submatch = matchmod.narrowmatcher(subpath, match)
1047 1047 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1048 1048 stat=stat, fp=fp, prefix=prefix)
1049 1049
1050 1050 class changeset_printer(object):
1051 1051 '''show changeset information when templating not requested.'''
1052 1052
1053 1053 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1054 1054 self.ui = ui
1055 1055 self.repo = repo
1056 1056 self.buffered = buffered
1057 1057 self.matchfn = matchfn
1058 1058 self.diffopts = diffopts
1059 1059 self.header = {}
1060 1060 self.hunk = {}
1061 1061 self.lastheader = None
1062 1062 self.footer = None
1063 1063
1064 1064 def flush(self, rev):
1065 1065 if rev in self.header:
1066 1066 h = self.header[rev]
1067 1067 if h != self.lastheader:
1068 1068 self.lastheader = h
1069 1069 self.ui.write(h)
1070 1070 del self.header[rev]
1071 1071 if rev in self.hunk:
1072 1072 self.ui.write(self.hunk[rev])
1073 1073 del self.hunk[rev]
1074 1074 return 1
1075 1075 return 0
1076 1076
1077 1077 def close(self):
1078 1078 if self.footer:
1079 1079 self.ui.write(self.footer)
1080 1080
1081 1081 def show(self, ctx, copies=None, matchfn=None, **props):
1082 1082 if self.buffered:
1083 1083 self.ui.pushbuffer()
1084 1084 self._show(ctx, copies, matchfn, props)
1085 1085 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1086 1086 else:
1087 1087 self._show(ctx, copies, matchfn, props)
1088 1088
1089 1089 def _show(self, ctx, copies, matchfn, props):
1090 1090 '''show a single changeset or file revision'''
1091 1091 changenode = ctx.node()
1092 1092 rev = ctx.rev()
1093 1093 if self.ui.debugflag:
1094 1094 hexfunc = hex
1095 1095 else:
1096 1096 hexfunc = short
1097 1097 if rev is None:
1098 1098 pctx = ctx.p1()
1099 1099 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1100 1100 else:
1101 1101 revnode = (rev, hexfunc(changenode))
1102 1102
1103 1103 if self.ui.quiet:
1104 1104 self.ui.write("%d:%s\n" % revnode, label='log.node')
1105 1105 return
1106 1106
1107 1107 date = util.datestr(ctx.date())
1108 1108
1109 1109 # i18n: column positioning for "hg log"
1110 1110 self.ui.write(_("changeset: %d:%s\n") % revnode,
1111 1111 label='log.changeset changeset.%s' % ctx.phasestr())
1112 1112
1113 1113 # branches are shown first before any other names due to backwards
1114 1114 # compatibility
1115 1115 branch = ctx.branch()
1116 1116 # don't show the default branch name
1117 1117 if branch != 'default':
1118 1118 # i18n: column positioning for "hg log"
1119 1119 self.ui.write(_("branch: %s\n") % branch,
1120 1120 label='log.branch')
1121 1121
1122 1122 for name, ns in self.repo.names.iteritems():
1123 1123 # branches has special logic already handled above, so here we just
1124 1124 # skip it
1125 1125 if name == 'branches':
1126 1126 continue
1127 1127 # we will use the templatename as the color name since those two
1128 1128 # should be the same
1129 1129 for name in ns.names(self.repo, changenode):
1130 1130 self.ui.write(ns.logfmt % name,
1131 1131 label='log.%s' % ns.colorname)
1132 1132 if self.ui.debugflag:
1133 1133 # i18n: column positioning for "hg log"
1134 1134 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1135 1135 label='log.phase')
1136 1136 for pctx in self._meaningful_parentrevs(ctx):
1137 1137 label = 'log.parent changeset.%s' % pctx.phasestr()
1138 1138 # i18n: column positioning for "hg log"
1139 1139 self.ui.write(_("parent: %d:%s\n")
1140 1140 % (pctx.rev(), hexfunc(pctx.node())),
1141 1141 label=label)
1142 1142
1143 1143 if self.ui.debugflag and rev is not None:
1144 1144 mnode = ctx.manifestnode()
1145 1145 # i18n: column positioning for "hg log"
1146 1146 self.ui.write(_("manifest: %d:%s\n") %
1147 1147 (self.repo.manifest.rev(mnode), hex(mnode)),
1148 1148 label='ui.debug log.manifest')
1149 1149 # i18n: column positioning for "hg log"
1150 1150 self.ui.write(_("user: %s\n") % ctx.user(),
1151 1151 label='log.user')
1152 1152 # i18n: column positioning for "hg log"
1153 1153 self.ui.write(_("date: %s\n") % date,
1154 1154 label='log.date')
1155 1155
1156 1156 if self.ui.debugflag:
1157 1157 files = ctx.p1().status(ctx)[:3]
1158 1158 for key, value in zip([# i18n: column positioning for "hg log"
1159 1159 _("files:"),
1160 1160 # i18n: column positioning for "hg log"
1161 1161 _("files+:"),
1162 1162 # i18n: column positioning for "hg log"
1163 1163 _("files-:")], files):
1164 1164 if value:
1165 1165 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1166 1166 label='ui.debug log.files')
1167 1167 elif ctx.files() and self.ui.verbose:
1168 1168 # i18n: column positioning for "hg log"
1169 1169 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1170 1170 label='ui.note log.files')
1171 1171 if copies and self.ui.verbose:
1172 1172 copies = ['%s (%s)' % c for c in copies]
1173 1173 # i18n: column positioning for "hg log"
1174 1174 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1175 1175 label='ui.note log.copies')
1176 1176
1177 1177 extra = ctx.extra()
1178 1178 if extra and self.ui.debugflag:
1179 1179 for key, value in sorted(extra.items()):
1180 1180 # i18n: column positioning for "hg log"
1181 1181 self.ui.write(_("extra: %s=%s\n")
1182 1182 % (key, value.encode('string_escape')),
1183 1183 label='ui.debug log.extra')
1184 1184
1185 1185 description = ctx.description().strip()
1186 1186 if description:
1187 1187 if self.ui.verbose:
1188 1188 self.ui.write(_("description:\n"),
1189 1189 label='ui.note log.description')
1190 1190 self.ui.write(description,
1191 1191 label='ui.note log.description')
1192 1192 self.ui.write("\n\n")
1193 1193 else:
1194 1194 # i18n: column positioning for "hg log"
1195 1195 self.ui.write(_("summary: %s\n") %
1196 1196 description.splitlines()[0],
1197 1197 label='log.summary')
1198 1198 self.ui.write("\n")
1199 1199
1200 1200 self.showpatch(changenode, matchfn)
1201 1201
1202 1202 def showpatch(self, node, matchfn):
1203 1203 if not matchfn:
1204 1204 matchfn = self.matchfn
1205 1205 if matchfn:
1206 1206 stat = self.diffopts.get('stat')
1207 1207 diff = self.diffopts.get('patch')
1208 1208 diffopts = patch.diffallopts(self.ui, self.diffopts)
1209 1209 prev = self.repo.changelog.parents(node)[0]
1210 1210 if stat:
1211 1211 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1212 1212 match=matchfn, stat=True)
1213 1213 if diff:
1214 1214 if stat:
1215 1215 self.ui.write("\n")
1216 1216 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1217 1217 match=matchfn, stat=False)
1218 1218 self.ui.write("\n")
1219 1219
1220 1220 def _meaningful_parentrevs(self, ctx):
1221 1221 """Return list of meaningful (or all if debug) parentrevs for rev.
1222 1222
1223 1223 For merges (two non-nullrev revisions) both parents are meaningful.
1224 1224 Otherwise the first parent revision is considered meaningful if it
1225 1225 is not the preceding revision.
1226 1226 """
1227 1227 parents = ctx.parents()
1228 1228 if len(parents) > 1:
1229 1229 return parents
1230 1230 if self.ui.debugflag:
1231 1231 return [parents[0], self.repo['null']]
1232 1232 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1233 1233 return []
1234 1234 return parents
1235 1235
1236 1236 class jsonchangeset(changeset_printer):
1237 1237 '''format changeset information.'''
1238 1238
1239 1239 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1240 1240 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1241 1241 self.cache = {}
1242 1242 self._first = True
1243 1243
1244 1244 def close(self):
1245 1245 if not self._first:
1246 1246 self.ui.write("\n]\n")
1247 1247 else:
1248 1248 self.ui.write("[]\n")
1249 1249
1250 1250 def _show(self, ctx, copies, matchfn, props):
1251 1251 '''show a single changeset or file revision'''
1252 1252 rev = ctx.rev()
1253 1253 if rev is None:
1254 1254 jrev = jnode = 'null'
1255 1255 else:
1256 1256 jrev = str(rev)
1257 1257 jnode = '"%s"' % hex(ctx.node())
1258 1258 j = encoding.jsonescape
1259 1259
1260 1260 if self._first:
1261 1261 self.ui.write("[\n {")
1262 1262 self._first = False
1263 1263 else:
1264 1264 self.ui.write(",\n {")
1265 1265
1266 1266 if self.ui.quiet:
1267 1267 self.ui.write('\n "rev": %s' % jrev)
1268 1268 self.ui.write(',\n "node": %s' % jnode)
1269 1269 self.ui.write('\n }')
1270 1270 return
1271 1271
1272 1272 self.ui.write('\n "rev": %s' % jrev)
1273 1273 self.ui.write(',\n "node": %s' % jnode)
1274 1274 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1275 1275 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1276 1276 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1277 1277 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1278 1278 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1279 1279
1280 1280 self.ui.write(',\n "bookmarks": [%s]' %
1281 1281 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1282 1282 self.ui.write(',\n "tags": [%s]' %
1283 1283 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1284 1284 self.ui.write(',\n "parents": [%s]' %
1285 1285 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1286 1286
1287 1287 if self.ui.debugflag:
1288 1288 if rev is None:
1289 1289 jmanifestnode = 'null'
1290 1290 else:
1291 1291 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1292 1292 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1293 1293
1294 1294 self.ui.write(',\n "extra": {%s}' %
1295 1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1296 1296 for k, v in ctx.extra().items()))
1297 1297
1298 1298 files = ctx.p1().status(ctx)
1299 1299 self.ui.write(',\n "modified": [%s]' %
1300 1300 ", ".join('"%s"' % j(f) for f in files[0]))
1301 1301 self.ui.write(',\n "added": [%s]' %
1302 1302 ", ".join('"%s"' % j(f) for f in files[1]))
1303 1303 self.ui.write(',\n "removed": [%s]' %
1304 1304 ", ".join('"%s"' % j(f) for f in files[2]))
1305 1305
1306 1306 elif self.ui.verbose:
1307 1307 self.ui.write(',\n "files": [%s]' %
1308 1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1309 1309
1310 1310 if copies:
1311 1311 self.ui.write(',\n "copies": {%s}' %
1312 1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1313 1313 for k, v in copies))
1314 1314
1315 1315 matchfn = self.matchfn
1316 1316 if matchfn:
1317 1317 stat = self.diffopts.get('stat')
1318 1318 diff = self.diffopts.get('patch')
1319 1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1320 1320 node, prev = ctx.node(), ctx.p1().node()
1321 1321 if stat:
1322 1322 self.ui.pushbuffer()
1323 1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1324 1324 match=matchfn, stat=True)
1325 1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1326 1326 if diff:
1327 1327 self.ui.pushbuffer()
1328 1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1329 1329 match=matchfn, stat=False)
1330 1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1331 1331
1332 1332 self.ui.write("\n }")
1333 1333
1334 1334 class changeset_templater(changeset_printer):
1335 1335 '''format changeset information.'''
1336 1336
1337 1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1338 1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1339 1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1340 1340 defaulttempl = {
1341 1341 'parent': '{rev}:{node|formatnode} ',
1342 1342 'manifest': '{rev}:{node|formatnode}',
1343 1343 'file_copy': '{name} ({source})',
1344 1344 'extra': '{key}={value|stringescape}'
1345 1345 }
1346 1346 # filecopy is preserved for compatibility reasons
1347 1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1348 1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1349 1349 cache=defaulttempl)
1350 1350 if tmpl:
1351 1351 self.t.cache['changeset'] = tmpl
1352 1352
1353 1353 self.cache = {}
1354 1354
1355 1355 def _show(self, ctx, copies, matchfn, props):
1356 1356 '''show a single changeset or file revision'''
1357 1357
1358 1358 showlist = templatekw.showlist
1359 1359
1360 1360 # showparents() behaviour depends on ui trace level which
1361 1361 # causes unexpected behaviours at templating level and makes
1362 1362 # it harder to extract it in a standalone function. Its
1363 1363 # behaviour cannot be changed so leave it here for now.
1364 1364 def showparents(**args):
1365 1365 ctx = args['ctx']
1366 1366 parents = [[('rev', p.rev()),
1367 1367 ('node', p.hex()),
1368 1368 ('phase', p.phasestr())]
1369 1369 for p in self._meaningful_parentrevs(ctx)]
1370 1370 return showlist('parent', parents, **args)
1371 1371
1372 1372 props = props.copy()
1373 1373 props.update(templatekw.keywords)
1374 1374 props['parents'] = showparents
1375 1375 props['templ'] = self.t
1376 1376 props['ctx'] = ctx
1377 1377 props['repo'] = self.repo
1378 1378 props['revcache'] = {'copies': copies}
1379 1379 props['cache'] = self.cache
1380 1380
1381 1381 # find correct templates for current mode
1382 1382
1383 1383 tmplmodes = [
1384 1384 (True, None),
1385 1385 (self.ui.verbose, 'verbose'),
1386 1386 (self.ui.quiet, 'quiet'),
1387 1387 (self.ui.debugflag, 'debug'),
1388 1388 ]
1389 1389
1390 1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1391 1391 for mode, postfix in tmplmodes:
1392 1392 for type in types:
1393 1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1394 1394 if mode and cur in self.t:
1395 1395 types[type] = cur
1396 1396
1397 1397 try:
1398 1398
1399 1399 # write header
1400 1400 if types['header']:
1401 1401 h = templater.stringify(self.t(types['header'], **props))
1402 1402 if self.buffered:
1403 1403 self.header[ctx.rev()] = h
1404 1404 else:
1405 1405 if self.lastheader != h:
1406 1406 self.lastheader = h
1407 1407 self.ui.write(h)
1408 1408
1409 1409 # write changeset metadata, then patch if requested
1410 1410 key = types['changeset']
1411 1411 self.ui.write(templater.stringify(self.t(key, **props)))
1412 1412 self.showpatch(ctx.node(), matchfn)
1413 1413
1414 1414 if types['footer']:
1415 1415 if not self.footer:
1416 1416 self.footer = templater.stringify(self.t(types['footer'],
1417 1417 **props))
1418 1418
1419 1419 except KeyError, inst:
1420 1420 msg = _("%s: no key named '%s'")
1421 1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1422 1422 except SyntaxError, inst:
1423 1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1424 1424
1425 1425 def gettemplate(ui, tmpl, style):
1426 1426 """
1427 1427 Find the template matching the given template spec or style.
1428 1428 """
1429 1429
1430 1430 # ui settings
1431 1431 if not tmpl and not style: # template are stronger than style
1432 1432 tmpl = ui.config('ui', 'logtemplate')
1433 1433 if tmpl:
1434 1434 try:
1435 1435 tmpl = templater.parsestring(tmpl)
1436 1436 except SyntaxError:
1437 1437 tmpl = templater.parsestring(tmpl, quoted=False)
1438 1438 return tmpl, None
1439 1439 else:
1440 1440 style = util.expandpath(ui.config('ui', 'style', ''))
1441 1441
1442 1442 if not tmpl and style:
1443 1443 mapfile = style
1444 1444 if not os.path.split(mapfile)[0]:
1445 1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1446 1446 or templater.templatepath(mapfile))
1447 1447 if mapname:
1448 1448 mapfile = mapname
1449 1449 return None, mapfile
1450 1450
1451 1451 if not tmpl:
1452 1452 return None, None
1453 1453
1454 1454 # looks like a literal template?
1455 1455 if '{' in tmpl:
1456 1456 return tmpl, None
1457 1457
1458 1458 # perhaps a stock style?
1459 1459 if not os.path.split(tmpl)[0]:
1460 1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1461 1461 or templater.templatepath(tmpl))
1462 1462 if mapname and os.path.isfile(mapname):
1463 1463 return None, mapname
1464 1464
1465 1465 # perhaps it's a reference to [templates]
1466 1466 t = ui.config('templates', tmpl)
1467 1467 if t:
1468 1468 try:
1469 1469 tmpl = templater.parsestring(t)
1470 1470 except SyntaxError:
1471 1471 tmpl = templater.parsestring(t, quoted=False)
1472 1472 return tmpl, None
1473 1473
1474 1474 if tmpl == 'list':
1475 1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1476 1476 raise util.Abort(_("specify a template"))
1477 1477
1478 1478 # perhaps it's a path to a map or a template
1479 1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1480 1480 # is it a mapfile for a style?
1481 1481 if os.path.basename(tmpl).startswith("map-"):
1482 1482 return None, os.path.realpath(tmpl)
1483 1483 tmpl = open(tmpl).read()
1484 1484 return tmpl, None
1485 1485
1486 1486 # constant string?
1487 1487 return tmpl, None
1488 1488
1489 1489 def show_changeset(ui, repo, opts, buffered=False):
1490 1490 """show one changeset using template or regular display.
1491 1491
1492 1492 Display format will be the first non-empty hit of:
1493 1493 1. option 'template'
1494 1494 2. option 'style'
1495 1495 3. [ui] setting 'logtemplate'
1496 1496 4. [ui] setting 'style'
1497 1497 If all of these values are either the unset or the empty string,
1498 1498 regular display via changeset_printer() is done.
1499 1499 """
1500 1500 # options
1501 1501 matchfn = None
1502 1502 if opts.get('patch') or opts.get('stat'):
1503 1503 matchfn = scmutil.matchall(repo)
1504 1504
1505 1505 if opts.get('template') == 'json':
1506 1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1507 1507
1508 1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1509 1509
1510 1510 if not tmpl and not mapfile:
1511 1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1512 1512
1513 1513 try:
1514 1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1515 1515 buffered)
1516 1516 except SyntaxError, inst:
1517 1517 raise util.Abort(inst.args[0])
1518 1518 return t
1519 1519
1520 1520 def showmarker(ui, marker):
1521 1521 """utility function to display obsolescence marker in a readable way
1522 1522
1523 1523 To be used by debug function."""
1524 1524 ui.write(hex(marker.precnode()))
1525 1525 for repl in marker.succnodes():
1526 1526 ui.write(' ')
1527 1527 ui.write(hex(repl))
1528 1528 ui.write(' %X ' % marker.flags())
1529 1529 parents = marker.parentnodes()
1530 1530 if parents is not None:
1531 1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1532 1532 ui.write('(%s) ' % util.datestr(marker.date()))
1533 1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1534 1534 sorted(marker.metadata().items())
1535 1535 if t[0] != 'date')))
1536 1536 ui.write('\n')
1537 1537
1538 1538 def finddate(ui, repo, date):
1539 1539 """Find the tipmost changeset that matches the given date spec"""
1540 1540
1541 1541 df = util.matchdate(date)
1542 1542 m = scmutil.matchall(repo)
1543 1543 results = {}
1544 1544
1545 1545 def prep(ctx, fns):
1546 1546 d = ctx.date()
1547 1547 if df(d[0]):
1548 1548 results[ctx.rev()] = d
1549 1549
1550 1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1551 1551 rev = ctx.rev()
1552 1552 if rev in results:
1553 1553 ui.status(_("found revision %s from %s\n") %
1554 1554 (rev, util.datestr(results[rev])))
1555 1555 return str(rev)
1556 1556
1557 1557 raise util.Abort(_("revision matching date not found"))
1558 1558
1559 1559 def increasingwindows(windowsize=8, sizelimit=512):
1560 1560 while True:
1561 1561 yield windowsize
1562 1562 if windowsize < sizelimit:
1563 1563 windowsize *= 2
1564 1564
1565 1565 class FileWalkError(Exception):
1566 1566 pass
1567 1567
1568 1568 def walkfilerevs(repo, match, follow, revs, fncache):
1569 1569 '''Walks the file history for the matched files.
1570 1570
1571 1571 Returns the changeset revs that are involved in the file history.
1572 1572
1573 1573 Throws FileWalkError if the file history can't be walked using
1574 1574 filelogs alone.
1575 1575 '''
1576 1576 wanted = set()
1577 1577 copies = []
1578 1578 minrev, maxrev = min(revs), max(revs)
1579 1579 def filerevgen(filelog, last):
1580 1580 """
1581 1581 Only files, no patterns. Check the history of each file.
1582 1582
1583 1583 Examines filelog entries within minrev, maxrev linkrev range
1584 1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1585 1585 tuples in backwards order
1586 1586 """
1587 1587 cl_count = len(repo)
1588 1588 revs = []
1589 1589 for j in xrange(0, last + 1):
1590 1590 linkrev = filelog.linkrev(j)
1591 1591 if linkrev < minrev:
1592 1592 continue
1593 1593 # only yield rev for which we have the changelog, it can
1594 1594 # happen while doing "hg log" during a pull or commit
1595 1595 if linkrev >= cl_count:
1596 1596 break
1597 1597
1598 1598 parentlinkrevs = []
1599 1599 for p in filelog.parentrevs(j):
1600 1600 if p != nullrev:
1601 1601 parentlinkrevs.append(filelog.linkrev(p))
1602 1602 n = filelog.node(j)
1603 1603 revs.append((linkrev, parentlinkrevs,
1604 1604 follow and filelog.renamed(n)))
1605 1605
1606 1606 return reversed(revs)
1607 1607 def iterfiles():
1608 1608 pctx = repo['.']
1609 1609 for filename in match.files():
1610 1610 if follow:
1611 1611 if filename not in pctx:
1612 1612 raise util.Abort(_('cannot follow file not in parent '
1613 1613 'revision: "%s"') % filename)
1614 1614 yield filename, pctx[filename].filenode()
1615 1615 else:
1616 1616 yield filename, None
1617 1617 for filename_node in copies:
1618 1618 yield filename_node
1619 1619
1620 1620 for file_, node in iterfiles():
1621 1621 filelog = repo.file(file_)
1622 1622 if not len(filelog):
1623 1623 if node is None:
1624 1624 # A zero count may be a directory or deleted file, so
1625 1625 # try to find matching entries on the slow path.
1626 1626 if follow:
1627 1627 raise util.Abort(
1628 1628 _('cannot follow nonexistent file: "%s"') % file_)
1629 1629 raise FileWalkError("Cannot walk via filelog")
1630 1630 else:
1631 1631 continue
1632 1632
1633 1633 if node is None:
1634 1634 last = len(filelog) - 1
1635 1635 else:
1636 1636 last = filelog.rev(node)
1637 1637
1638 1638 # keep track of all ancestors of the file
1639 1639 ancestors = set([filelog.linkrev(last)])
1640 1640
1641 1641 # iterate from latest to oldest revision
1642 1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1643 1643 if not follow:
1644 1644 if rev > maxrev:
1645 1645 continue
1646 1646 else:
1647 1647 # Note that last might not be the first interesting
1648 1648 # rev to us:
1649 1649 # if the file has been changed after maxrev, we'll
1650 1650 # have linkrev(last) > maxrev, and we still need
1651 1651 # to explore the file graph
1652 1652 if rev not in ancestors:
1653 1653 continue
1654 1654 # XXX insert 1327 fix here
1655 1655 if flparentlinkrevs:
1656 1656 ancestors.update(flparentlinkrevs)
1657 1657
1658 1658 fncache.setdefault(rev, []).append(file_)
1659 1659 wanted.add(rev)
1660 1660 if copied:
1661 1661 copies.append(copied)
1662 1662
1663 1663 return wanted
1664 1664
1665 1665 class _followfilter(object):
1666 1666 def __init__(self, repo, onlyfirst=False):
1667 1667 self.repo = repo
1668 1668 self.startrev = nullrev
1669 1669 self.roots = set()
1670 1670 self.onlyfirst = onlyfirst
1671 1671
1672 1672 def match(self, rev):
1673 1673 def realparents(rev):
1674 1674 if self.onlyfirst:
1675 1675 return self.repo.changelog.parentrevs(rev)[0:1]
1676 1676 else:
1677 1677 return filter(lambda x: x != nullrev,
1678 1678 self.repo.changelog.parentrevs(rev))
1679 1679
1680 1680 if self.startrev == nullrev:
1681 1681 self.startrev = rev
1682 1682 return True
1683 1683
1684 1684 if rev > self.startrev:
1685 1685 # forward: all descendants
1686 1686 if not self.roots:
1687 1687 self.roots.add(self.startrev)
1688 1688 for parent in realparents(rev):
1689 1689 if parent in self.roots:
1690 1690 self.roots.add(rev)
1691 1691 return True
1692 1692 else:
1693 1693 # backwards: all parents
1694 1694 if not self.roots:
1695 1695 self.roots.update(realparents(self.startrev))
1696 1696 if rev in self.roots:
1697 1697 self.roots.remove(rev)
1698 1698 self.roots.update(realparents(rev))
1699 1699 return True
1700 1700
1701 1701 return False
1702 1702
1703 1703 def walkchangerevs(repo, match, opts, prepare):
1704 1704 '''Iterate over files and the revs in which they changed.
1705 1705
1706 1706 Callers most commonly need to iterate backwards over the history
1707 1707 in which they are interested. Doing so has awful (quadratic-looking)
1708 1708 performance, so we use iterators in a "windowed" way.
1709 1709
1710 1710 We walk a window of revisions in the desired order. Within the
1711 1711 window, we first walk forwards to gather data, then in the desired
1712 1712 order (usually backwards) to display it.
1713 1713
1714 1714 This function returns an iterator yielding contexts. Before
1715 1715 yielding each context, the iterator will first call the prepare
1716 1716 function on each context in the window in forward order.'''
1717 1717
1718 1718 follow = opts.get('follow') or opts.get('follow_first')
1719 1719 revs = _logrevs(repo, opts)
1720 1720 if not revs:
1721 1721 return []
1722 1722 wanted = set()
1723 1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1724 1724 fncache = {}
1725 1725 change = repo.changectx
1726 1726
1727 1727 # First step is to fill wanted, the set of revisions that we want to yield.
1728 1728 # When it does not induce extra cost, we also fill fncache for revisions in
1729 1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1730 1730 # match the file filtering conditions.
1731 1731
1732 1732 if match.always():
1733 1733 # No files, no patterns. Display all revs.
1734 1734 wanted = revs
1735 1735
1736 1736 if not slowpath and match.files():
1737 1737 # We only have to read through the filelog to find wanted revisions
1738 1738
1739 1739 try:
1740 1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1741 1741 except FileWalkError:
1742 1742 slowpath = True
1743 1743
1744 1744 # We decided to fall back to the slowpath because at least one
1745 1745 # of the paths was not a file. Check to see if at least one of them
1746 1746 # existed in history, otherwise simply return
1747 1747 for path in match.files():
1748 1748 if path == '.' or path in repo.store:
1749 1749 break
1750 1750 else:
1751 1751 return []
1752 1752
1753 1753 if slowpath:
1754 1754 # We have to read the changelog to match filenames against
1755 1755 # changed files
1756 1756
1757 1757 if follow:
1758 1758 raise util.Abort(_('can only follow copies/renames for explicit '
1759 1759 'filenames'))
1760 1760
1761 1761 # The slow path checks files modified in every changeset.
1762 1762 # This is really slow on large repos, so compute the set lazily.
1763 1763 class lazywantedset(object):
1764 1764 def __init__(self):
1765 1765 self.set = set()
1766 1766 self.revs = set(revs)
1767 1767
1768 1768 # No need to worry about locality here because it will be accessed
1769 1769 # in the same order as the increasing window below.
1770 1770 def __contains__(self, value):
1771 1771 if value in self.set:
1772 1772 return True
1773 1773 elif not value in self.revs:
1774 1774 return False
1775 1775 else:
1776 1776 self.revs.discard(value)
1777 1777 ctx = change(value)
1778 1778 matches = filter(match, ctx.files())
1779 1779 if matches:
1780 1780 fncache[value] = matches
1781 1781 self.set.add(value)
1782 1782 return True
1783 1783 return False
1784 1784
1785 1785 def discard(self, value):
1786 1786 self.revs.discard(value)
1787 1787 self.set.discard(value)
1788 1788
1789 1789 wanted = lazywantedset()
1790 1790
1791 1791 # it might be worthwhile to do this in the iterator if the rev range
1792 1792 # is descending and the prune args are all within that range
1793 1793 for rev in opts.get('prune', ()):
1794 1794 rev = repo[rev].rev()
1795 1795 ff = _followfilter(repo)
1796 1796 stop = min(revs[0], revs[-1])
1797 1797 for x in xrange(rev, stop - 1, -1):
1798 1798 if ff.match(x):
1799 1799 wanted = wanted - [x]
1800 1800
1801 1801 # Now that wanted is correctly initialized, we can iterate over the
1802 1802 # revision range, yielding only revisions in wanted.
1803 1803 def iterate():
1804 1804 if follow and not match.files():
1805 1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1806 1806 def want(rev):
1807 1807 return ff.match(rev) and rev in wanted
1808 1808 else:
1809 1809 def want(rev):
1810 1810 return rev in wanted
1811 1811
1812 1812 it = iter(revs)
1813 1813 stopiteration = False
1814 1814 for windowsize in increasingwindows():
1815 1815 nrevs = []
1816 1816 for i in xrange(windowsize):
1817 1817 try:
1818 1818 rev = it.next()
1819 1819 if want(rev):
1820 1820 nrevs.append(rev)
1821 1821 except (StopIteration):
1822 1822 stopiteration = True
1823 1823 break
1824 1824 for rev in sorted(nrevs):
1825 1825 fns = fncache.get(rev)
1826 1826 ctx = change(rev)
1827 1827 if not fns:
1828 1828 def fns_generator():
1829 1829 for f in ctx.files():
1830 1830 if match(f):
1831 1831 yield f
1832 1832 fns = fns_generator()
1833 1833 prepare(ctx, fns)
1834 1834 for rev in nrevs:
1835 1835 yield change(rev)
1836 1836
1837 1837 if stopiteration:
1838 1838 break
1839 1839
1840 1840 return iterate()
1841 1841
1842 1842 def _makefollowlogfilematcher(repo, files, followfirst):
1843 1843 # When displaying a revision with --patch --follow FILE, we have
1844 1844 # to know which file of the revision must be diffed. With
1845 1845 # --follow, we want the names of the ancestors of FILE in the
1846 1846 # revision, stored in "fcache". "fcache" is populated by
1847 1847 # reproducing the graph traversal already done by --follow revset
1848 1848 # and relating linkrevs to file names (which is not "correct" but
1849 1849 # good enough).
1850 1850 fcache = {}
1851 1851 fcacheready = [False]
1852 1852 pctx = repo['.']
1853 1853
1854 1854 def populate():
1855 1855 for fn in files:
1856 1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1857 1857 for c in i:
1858 1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1859 1859
1860 1860 def filematcher(rev):
1861 1861 if not fcacheready[0]:
1862 1862 # Lazy initialization
1863 1863 fcacheready[0] = True
1864 1864 populate()
1865 1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1866 1866
1867 1867 return filematcher
1868 1868
1869 1869 def _makenofollowlogfilematcher(repo, pats, opts):
1870 1870 '''hook for extensions to override the filematcher for non-follow cases'''
1871 1871 return None
1872 1872
1873 1873 def _makelogrevset(repo, pats, opts, revs):
1874 1874 """Return (expr, filematcher) where expr is a revset string built
1875 1875 from log options and file patterns or None. If --stat or --patch
1876 1876 are not passed filematcher is None. Otherwise it is a callable
1877 1877 taking a revision number and returning a match objects filtering
1878 1878 the files to be detailed when displaying the revision.
1879 1879 """
1880 1880 opt2revset = {
1881 1881 'no_merges': ('not merge()', None),
1882 1882 'only_merges': ('merge()', None),
1883 1883 '_ancestors': ('ancestors(%(val)s)', None),
1884 1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1885 1885 '_descendants': ('descendants(%(val)s)', None),
1886 1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1887 1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1888 1888 'date': ('date(%(val)r)', None),
1889 1889 'branch': ('branch(%(val)r)', ' or '),
1890 1890 '_patslog': ('filelog(%(val)r)', ' or '),
1891 1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1892 1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1893 1893 'keyword': ('keyword(%(val)r)', ' or '),
1894 1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1895 1895 'user': ('user(%(val)r)', ' or '),
1896 1896 }
1897 1897
1898 1898 opts = dict(opts)
1899 1899 # follow or not follow?
1900 1900 follow = opts.get('follow') or opts.get('follow_first')
1901 1901 if opts.get('follow_first'):
1902 1902 followfirst = 1
1903 1903 else:
1904 1904 followfirst = 0
1905 1905 # --follow with FILE behaviour depends on revs...
1906 1906 it = iter(revs)
1907 1907 startrev = it.next()
1908 1908 try:
1909 1909 followdescendants = startrev < it.next()
1910 1910 except (StopIteration):
1911 1911 followdescendants = False
1912 1912
1913 1913 # branch and only_branch are really aliases and must be handled at
1914 1914 # the same time
1915 1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1916 1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1917 1917 # pats/include/exclude are passed to match.match() directly in
1918 1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1919 1919 # scmutil.match(). The difference is input pats are globbed on
1920 1920 # platforms without shell expansion (windows).
1921 1921 wctx = repo[None]
1922 1922 match, pats = scmutil.matchandpats(wctx, pats, opts)
1923 1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1924 1924 if not slowpath:
1925 1925 for f in match.files():
1926 1926 if follow and f not in wctx:
1927 1927 # If the file exists, it may be a directory, so let it
1928 1928 # take the slow path.
1929 1929 if os.path.exists(repo.wjoin(f)):
1930 1930 slowpath = True
1931 1931 continue
1932 1932 else:
1933 1933 raise util.Abort(_('cannot follow file not in parent '
1934 1934 'revision: "%s"') % f)
1935 1935 filelog = repo.file(f)
1936 1936 if not filelog:
1937 1937 # A zero count may be a directory or deleted file, so
1938 1938 # try to find matching entries on the slow path.
1939 1939 if follow:
1940 1940 raise util.Abort(
1941 1941 _('cannot follow nonexistent file: "%s"') % f)
1942 1942 slowpath = True
1943 1943
1944 1944 # We decided to fall back to the slowpath because at least one
1945 1945 # of the paths was not a file. Check to see if at least one of them
1946 1946 # existed in history - in that case, we'll continue down the
1947 1947 # slowpath; otherwise, we can turn off the slowpath
1948 1948 if slowpath:
1949 1949 for path in match.files():
1950 1950 if path == '.' or path in repo.store:
1951 1951 break
1952 1952 else:
1953 1953 slowpath = False
1954 1954
1955 1955 fpats = ('_patsfollow', '_patsfollowfirst')
1956 1956 fnopats = (('_ancestors', '_fancestors'),
1957 1957 ('_descendants', '_fdescendants'))
1958 1958 if slowpath:
1959 1959 # See walkchangerevs() slow path.
1960 1960 #
1961 1961 # pats/include/exclude cannot be represented as separate
1962 1962 # revset expressions as their filtering logic applies at file
1963 1963 # level. For instance "-I a -X a" matches a revision touching
1964 1964 # "a" and "b" while "file(a) and not file(b)" does
1965 1965 # not. Besides, filesets are evaluated against the working
1966 1966 # directory.
1967 1967 matchargs = ['r:', 'd:relpath']
1968 1968 for p in pats:
1969 1969 matchargs.append('p:' + p)
1970 1970 for p in opts.get('include', []):
1971 1971 matchargs.append('i:' + p)
1972 1972 for p in opts.get('exclude', []):
1973 1973 matchargs.append('x:' + p)
1974 1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1975 1975 opts['_matchfiles'] = matchargs
1976 1976 if follow:
1977 1977 opts[fnopats[0][followfirst]] = '.'
1978 1978 else:
1979 1979 if follow:
1980 1980 if pats:
1981 1981 # follow() revset interprets its file argument as a
1982 1982 # manifest entry, so use match.files(), not pats.
1983 1983 opts[fpats[followfirst]] = list(match.files())
1984 1984 else:
1985 1985 op = fnopats[followdescendants][followfirst]
1986 1986 opts[op] = 'rev(%d)' % startrev
1987 1987 else:
1988 1988 opts['_patslog'] = list(pats)
1989 1989
1990 1990 filematcher = None
1991 1991 if opts.get('patch') or opts.get('stat'):
1992 1992 # When following files, track renames via a special matcher.
1993 1993 # If we're forced to take the slowpath it means we're following
1994 1994 # at least one pattern/directory, so don't bother with rename tracking.
1995 1995 if follow and not match.always() and not slowpath:
1996 1996 # _makefollowlogfilematcher expects its files argument to be
1997 1997 # relative to the repo root, so use match.files(), not pats.
1998 1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1999 1999 followfirst)
2000 2000 else:
2001 2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2002 2002 if filematcher is None:
2003 2003 filematcher = lambda rev: match
2004 2004
2005 2005 expr = []
2006 2006 for op, val in sorted(opts.iteritems()):
2007 2007 if not val:
2008 2008 continue
2009 2009 if op not in opt2revset:
2010 2010 continue
2011 2011 revop, andor = opt2revset[op]
2012 2012 if '%(val)' not in revop:
2013 2013 expr.append(revop)
2014 2014 else:
2015 2015 if not isinstance(val, list):
2016 2016 e = revop % {'val': val}
2017 2017 else:
2018 2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2019 2019 expr.append(e)
2020 2020
2021 2021 if expr:
2022 2022 expr = '(' + ' and '.join(expr) + ')'
2023 2023 else:
2024 2024 expr = None
2025 2025 return expr, filematcher
2026 2026
2027 2027 def _logrevs(repo, opts):
2028 2028 # Default --rev value depends on --follow but --follow behaviour
2029 2029 # depends on revisions resolved from --rev...
2030 2030 follow = opts.get('follow') or opts.get('follow_first')
2031 2031 if opts.get('rev'):
2032 2032 revs = scmutil.revrange(repo, opts['rev'])
2033 2033 elif follow and repo.dirstate.p1() == nullid:
2034 2034 revs = revset.baseset()
2035 2035 elif follow:
2036 2036 revs = repo.revs('reverse(:.)')
2037 2037 else:
2038 2038 revs = revset.spanset(repo)
2039 2039 revs.reverse()
2040 2040 return revs
2041 2041
2042 2042 def getgraphlogrevs(repo, pats, opts):
2043 2043 """Return (revs, expr, filematcher) where revs is an iterable of
2044 2044 revision numbers, expr is a revset string built from log options
2045 2045 and file patterns or None, and used to filter 'revs'. If --stat or
2046 2046 --patch are not passed filematcher is None. Otherwise it is a
2047 2047 callable taking a revision number and returning a match objects
2048 2048 filtering the files to be detailed when displaying the revision.
2049 2049 """
2050 2050 limit = loglimit(opts)
2051 2051 revs = _logrevs(repo, opts)
2052 2052 if not revs:
2053 2053 return revset.baseset(), None, None
2054 2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2055 2055 if opts.get('rev'):
2056 2056 # User-specified revs might be unsorted, but don't sort before
2057 2057 # _makelogrevset because it might depend on the order of revs
2058 2058 revs.sort(reverse=True)
2059 2059 if expr:
2060 2060 # Revset matchers often operate faster on revisions in changelog
2061 2061 # order, because most filters deal with the changelog.
2062 2062 revs.reverse()
2063 2063 matcher = revset.match(repo.ui, expr)
2064 2064 # Revset matches can reorder revisions. "A or B" typically returns
2065 2065 # returns the revision matching A then the revision matching B. Sort
2066 2066 # again to fix that.
2067 2067 revs = matcher(repo, revs)
2068 2068 revs.sort(reverse=True)
2069 2069 if limit is not None:
2070 2070 limitedrevs = []
2071 2071 for idx, rev in enumerate(revs):
2072 2072 if idx >= limit:
2073 2073 break
2074 2074 limitedrevs.append(rev)
2075 2075 revs = revset.baseset(limitedrevs)
2076 2076
2077 2077 return revs, expr, filematcher
2078 2078
2079 2079 def getlogrevs(repo, pats, opts):
2080 2080 """Return (revs, expr, filematcher) where revs is an iterable of
2081 2081 revision numbers, expr is a revset string built from log options
2082 2082 and file patterns or None, and used to filter 'revs'. If --stat or
2083 2083 --patch are not passed filematcher is None. Otherwise it is a
2084 2084 callable taking a revision number and returning a match objects
2085 2085 filtering the files to be detailed when displaying the revision.
2086 2086 """
2087 2087 limit = loglimit(opts)
2088 2088 revs = _logrevs(repo, opts)
2089 2089 if not revs:
2090 2090 return revset.baseset([]), None, None
2091 2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2092 2092 if expr:
2093 2093 # Revset matchers often operate faster on revisions in changelog
2094 2094 # order, because most filters deal with the changelog.
2095 2095 if not opts.get('rev'):
2096 2096 revs.reverse()
2097 2097 matcher = revset.match(repo.ui, expr)
2098 2098 # Revset matches can reorder revisions. "A or B" typically returns
2099 2099 # returns the revision matching A then the revision matching B. Sort
2100 2100 # again to fix that.
2101 2101 revs = matcher(repo, revs)
2102 2102 if not opts.get('rev'):
2103 2103 revs.sort(reverse=True)
2104 2104 if limit is not None:
2105 2105 count = 0
2106 2106 limitedrevs = []
2107 2107 it = iter(revs)
2108 2108 while count < limit:
2109 2109 try:
2110 2110 limitedrevs.append(it.next())
2111 2111 except (StopIteration):
2112 2112 break
2113 2113 count += 1
2114 2114 revs = revset.baseset(limitedrevs)
2115 2115
2116 2116 return revs, expr, filematcher
2117 2117
2118 2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2119 2119 filematcher=None):
2120 2120 seen, state = [], graphmod.asciistate()
2121 2121 for rev, type, ctx, parents in dag:
2122 2122 char = 'o'
2123 2123 if ctx.node() in showparents:
2124 2124 char = '@'
2125 2125 elif ctx.obsolete():
2126 2126 char = 'x'
2127 2127 elif ctx.closesbranch():
2128 2128 char = '_'
2129 2129 copies = None
2130 2130 if getrenamed and ctx.rev():
2131 2131 copies = []
2132 2132 for fn in ctx.files():
2133 2133 rename = getrenamed(fn, ctx.rev())
2134 2134 if rename:
2135 2135 copies.append((fn, rename[0]))
2136 2136 revmatchfn = None
2137 2137 if filematcher is not None:
2138 2138 revmatchfn = filematcher(ctx.rev())
2139 2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2140 2140 lines = displayer.hunk.pop(rev).split('\n')
2141 2141 if not lines[-1]:
2142 2142 del lines[-1]
2143 2143 displayer.flush(rev)
2144 2144 edges = edgefn(type, char, lines, seen, rev, parents)
2145 2145 for type, char, lines, coldata in edges:
2146 2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2147 2147 displayer.close()
2148 2148
2149 2149 def graphlog(ui, repo, *pats, **opts):
2150 2150 # Parameters are identical to log command ones
2151 2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2152 2152 revdag = graphmod.dagwalker(repo, revs)
2153 2153
2154 2154 getrenamed = None
2155 2155 if opts.get('copies'):
2156 2156 endrev = None
2157 2157 if opts.get('rev'):
2158 2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2159 2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2160 2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2161 2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2162 2162 displaygraph(ui, revdag, displayer, showparents,
2163 2163 graphmod.asciiedges, getrenamed, filematcher)
2164 2164
2165 2165 def checkunsupportedgraphflags(pats, opts):
2166 2166 for op in ["newest_first"]:
2167 2167 if op in opts and opts[op]:
2168 2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2169 2169 % op.replace("_", "-"))
2170 2170
2171 2171 def graphrevs(repo, nodes, opts):
2172 2172 limit = loglimit(opts)
2173 2173 nodes.reverse()
2174 2174 if limit is not None:
2175 2175 nodes = nodes[:limit]
2176 2176 return graphmod.nodes(repo, nodes)
2177 2177
2178 2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2179 2179 join = lambda f: os.path.join(prefix, f)
2180 2180 bad = []
2181 2181 oldbad = match.bad
2182 2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2183 2183 names = []
2184 2184 wctx = repo[None]
2185 2185 cca = None
2186 2186 abort, warn = scmutil.checkportabilityalert(ui)
2187 2187 if abort or warn:
2188 2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2189 2189 for f in wctx.walk(match):
2190 2190 exact = match.exact(f)
2191 2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2192 2192 if cca:
2193 2193 cca(f)
2194 2194 names.append(f)
2195 2195 if ui.verbose or not exact:
2196 2196 ui.status(_('adding %s\n') % match.rel(f))
2197 2197
2198 2198 for subpath in sorted(wctx.substate):
2199 2199 sub = wctx.sub(subpath)
2200 2200 try:
2201 2201 submatch = matchmod.narrowmatcher(subpath, match)
2202 2202 if opts.get('subrepos'):
2203 2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2204 2204 else:
2205 2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2206 2206 except error.LookupError:
2207 2207 ui.status(_("skipping missing subrepository: %s\n")
2208 2208 % join(subpath))
2209 2209
2210 2210 if not opts.get('dry_run'):
2211 2211 rejected = wctx.add(names, prefix)
2212 2212 bad.extend(f for f in rejected if f in match.files())
2213 2213 return bad
2214 2214
2215 2215 def forget(ui, repo, match, prefix, explicitonly):
2216 2216 join = lambda f: os.path.join(prefix, f)
2217 2217 bad = []
2218 2218 oldbad = match.bad
2219 2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2220 2220 wctx = repo[None]
2221 2221 forgot = []
2222 2222 s = repo.status(match=match, clean=True)
2223 2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2224 2224 if explicitonly:
2225 2225 forget = [f for f in forget if match.exact(f)]
2226 2226
2227 2227 for subpath in sorted(wctx.substate):
2228 2228 sub = wctx.sub(subpath)
2229 2229 try:
2230 2230 submatch = matchmod.narrowmatcher(subpath, match)
2231 2231 subbad, subforgot = sub.forget(submatch, prefix)
2232 2232 bad.extend([subpath + '/' + f for f in subbad])
2233 2233 forgot.extend([subpath + '/' + f for f in subforgot])
2234 2234 except error.LookupError:
2235 2235 ui.status(_("skipping missing subrepository: %s\n")
2236 2236 % join(subpath))
2237 2237
2238 2238 if not explicitonly:
2239 2239 for f in match.files():
2240 2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2241 2241 if f not in forgot:
2242 2242 if repo.wvfs.exists(f):
2243 2243 # Don't complain if the exact case match wasn't given.
2244 2244 # But don't do this until after checking 'forgot', so
2245 2245 # that subrepo files aren't normalized, and this op is
2246 2246 # purely from data cached by the status walk above.
2247 2247 if repo.dirstate.normalize(f) in repo.dirstate:
2248 2248 continue
2249 2249 ui.warn(_('not removing %s: '
2250 2250 'file is already untracked\n')
2251 2251 % match.rel(f))
2252 2252 bad.append(f)
2253 2253
2254 2254 for f in forget:
2255 2255 if ui.verbose or not match.exact(f):
2256 2256 ui.status(_('removing %s\n') % match.rel(f))
2257 2257
2258 2258 rejected = wctx.forget(forget, prefix)
2259 2259 bad.extend(f for f in rejected if f in match.files())
2260 2260 forgot.extend(f for f in forget if f not in rejected)
2261 2261 return bad, forgot
2262 2262
2263 2263 def files(ui, ctx, m, fm, fmt, subrepos):
2264 2264 rev = ctx.rev()
2265 2265 ret = 1
2266 2266 ds = ctx.repo().dirstate
2267 2267
2268 2268 for f in ctx.matches(m):
2269 2269 if rev is None and ds[f] == 'r':
2270 2270 continue
2271 2271 fm.startitem()
2272 2272 if ui.verbose:
2273 2273 fc = ctx[f]
2274 2274 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2275 2275 fm.data(abspath=f)
2276 2276 fm.write('path', fmt, m.rel(f))
2277 2277 ret = 0
2278 2278
2279 2279 if subrepos:
2280 2280 for subpath in sorted(ctx.substate):
2281 2281 sub = ctx.sub(subpath)
2282 2282 try:
2283 2283 submatch = matchmod.narrowmatcher(subpath, m)
2284 2284 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2285 2285 ret = 0
2286 2286 except error.LookupError:
2287 2287 ui.status(_("skipping missing subrepository: %s\n")
2288 2288 % m.abs(subpath))
2289 2289
2290 2290 return ret
2291 2291
2292 2292 def remove(ui, repo, m, prefix, after, force, subrepos):
2293 2293 join = lambda f: os.path.join(prefix, f)
2294 2294 ret = 0
2295 2295 s = repo.status(match=m, clean=True)
2296 2296 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2297 2297
2298 2298 wctx = repo[None]
2299 2299
2300 2300 for subpath in sorted(wctx.substate):
2301 2301 def matchessubrepo(matcher, subpath):
2302 2302 if matcher.exact(subpath):
2303 2303 return True
2304 2304 for f in matcher.files():
2305 2305 if f.startswith(subpath):
2306 2306 return True
2307 2307 return False
2308 2308
2309 2309 if subrepos or matchessubrepo(m, subpath):
2310 2310 sub = wctx.sub(subpath)
2311 2311 try:
2312 2312 submatch = matchmod.narrowmatcher(subpath, m)
2313 2313 if sub.removefiles(submatch, prefix, after, force, subrepos):
2314 2314 ret = 1
2315 2315 except error.LookupError:
2316 2316 ui.status(_("skipping missing subrepository: %s\n")
2317 2317 % join(subpath))
2318 2318
2319 2319 # warn about failure to delete explicit files/dirs
2320 2320 deleteddirs = util.dirs(deleted)
2321 2321 for f in m.files():
2322 2322 def insubrepo():
2323 2323 for subpath in wctx.substate:
2324 2324 if f.startswith(subpath):
2325 2325 return True
2326 2326 return False
2327 2327
2328 2328 isdir = f in deleteddirs or f in wctx.dirs()
2329 2329 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2330 2330 continue
2331 2331
2332 2332 if repo.wvfs.exists(f):
2333 2333 if repo.wvfs.isdir(f):
2334 2334 ui.warn(_('not removing %s: no tracked files\n')
2335 2335 % m.rel(f))
2336 2336 else:
2337 2337 ui.warn(_('not removing %s: file is untracked\n')
2338 2338 % m.rel(f))
2339 2339 # missing files will generate a warning elsewhere
2340 2340 ret = 1
2341 2341
2342 2342 if force:
2343 2343 list = modified + deleted + clean + added
2344 2344 elif after:
2345 2345 list = deleted
2346 2346 for f in modified + added + clean:
2347 2347 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2348 2348 ret = 1
2349 2349 else:
2350 2350 list = deleted + clean
2351 2351 for f in modified:
2352 2352 ui.warn(_('not removing %s: file is modified (use -f'
2353 2353 ' to force removal)\n') % m.rel(f))
2354 2354 ret = 1
2355 2355 for f in added:
2356 2356 ui.warn(_('not removing %s: file has been marked for add'
2357 2357 ' (use forget to undo)\n') % m.rel(f))
2358 2358 ret = 1
2359 2359
2360 2360 for f in sorted(list):
2361 2361 if ui.verbose or not m.exact(f):
2362 2362 ui.status(_('removing %s\n') % m.rel(f))
2363 2363
2364 2364 wlock = repo.wlock()
2365 2365 try:
2366 2366 if not after:
2367 2367 for f in list:
2368 2368 if f in added:
2369 2369 continue # we never unlink added files on remove
2370 2370 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2371 2371 repo[None].forget(list)
2372 2372 finally:
2373 2373 wlock.release()
2374 2374
2375 2375 return ret
2376 2376
2377 2377 def cat(ui, repo, ctx, matcher, prefix, **opts):
2378 2378 err = 1
2379 2379
2380 2380 def write(path):
2381 2381 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2382 2382 pathname=os.path.join(prefix, path))
2383 2383 data = ctx[path].data()
2384 2384 if opts.get('decode'):
2385 2385 data = repo.wwritedata(path, data)
2386 2386 fp.write(data)
2387 2387 fp.close()
2388 2388
2389 2389 # Automation often uses hg cat on single files, so special case it
2390 2390 # for performance to avoid the cost of parsing the manifest.
2391 2391 if len(matcher.files()) == 1 and not matcher.anypats():
2392 2392 file = matcher.files()[0]
2393 2393 mf = repo.manifest
2394 2394 mfnode = ctx.manifestnode()
2395 2395 if mfnode and mf.find(mfnode, file)[0]:
2396 2396 write(file)
2397 2397 return 0
2398 2398
2399 2399 # Don't warn about "missing" files that are really in subrepos
2400 2400 bad = matcher.bad
2401 2401
2402 2402 def badfn(path, msg):
2403 2403 for subpath in ctx.substate:
2404 2404 if path.startswith(subpath):
2405 2405 return
2406 2406 bad(path, msg)
2407 2407
2408 2408 matcher.bad = badfn
2409 2409
2410 2410 for abs in ctx.walk(matcher):
2411 2411 write(abs)
2412 2412 err = 0
2413 2413
2414 2414 matcher.bad = bad
2415 2415
2416 2416 for subpath in sorted(ctx.substate):
2417 2417 sub = ctx.sub(subpath)
2418 2418 try:
2419 2419 submatch = matchmod.narrowmatcher(subpath, matcher)
2420 2420
2421 2421 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2422 2422 **opts):
2423 2423 err = 0
2424 2424 except error.RepoLookupError:
2425 2425 ui.status(_("skipping missing subrepository: %s\n")
2426 2426 % os.path.join(prefix, subpath))
2427 2427
2428 2428 return err
2429 2429
2430 2430 def commit(ui, repo, commitfunc, pats, opts):
2431 2431 '''commit the specified files or all outstanding changes'''
2432 2432 date = opts.get('date')
2433 2433 if date:
2434 2434 opts['date'] = util.parsedate(date)
2435 2435 message = logmessage(ui, opts)
2436 2436 matcher = scmutil.match(repo[None], pats, opts)
2437 2437
2438 2438 # extract addremove carefully -- this function can be called from a command
2439 2439 # that doesn't support addremove
2440 2440 if opts.get('addremove'):
2441 2441 if scmutil.addremove(repo, matcher, "", opts) != 0:
2442 2442 raise util.Abort(
2443 2443 _("failed to mark all new/missing files as added/removed"))
2444 2444
2445 2445 return commitfunc(ui, repo, message, matcher, opts)
2446 2446
2447 2447 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2448 2448 # amend will reuse the existing user if not specified, but the obsolete
2449 2449 # marker creation requires that the current user's name is specified.
2450 2450 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2451 2451 ui.username() # raise exception if username not set
2452 2452
2453 2453 ui.note(_('amending changeset %s\n') % old)
2454 2454 base = old.p1()
2455 2455
2456 2456 wlock = lock = newid = None
2457 2457 try:
2458 2458 wlock = repo.wlock()
2459 2459 lock = repo.lock()
2460 2460 tr = repo.transaction('amend')
2461 2461 try:
2462 2462 # See if we got a message from -m or -l, if not, open the editor
2463 2463 # with the message of the changeset to amend
2464 2464 message = logmessage(ui, opts)
2465 2465 # ensure logfile does not conflict with later enforcement of the
2466 2466 # message. potential logfile content has been processed by
2467 2467 # `logmessage` anyway.
2468 2468 opts.pop('logfile')
2469 2469 # First, do a regular commit to record all changes in the working
2470 2470 # directory (if there are any)
2471 2471 ui.callhooks = False
2472 2472 currentbookmark = repo._bookmarkcurrent
2473 2473 try:
2474 2474 repo._bookmarkcurrent = None
2475 2475 opts['message'] = 'temporary amend commit for %s' % old
2476 2476 node = commit(ui, repo, commitfunc, pats, opts)
2477 2477 finally:
2478 2478 repo._bookmarkcurrent = currentbookmark
2479 2479 ui.callhooks = True
2480 2480 ctx = repo[node]
2481 2481
2482 2482 # Participating changesets:
2483 2483 #
2484 2484 # node/ctx o - new (intermediate) commit that contains changes
2485 2485 # | from working dir to go into amending commit
2486 2486 # | (or a workingctx if there were no changes)
2487 2487 # |
2488 2488 # old o - changeset to amend
2489 2489 # |
2490 2490 # base o - parent of amending changeset
2491 2491
2492 2492 # Update extra dict from amended commit (e.g. to preserve graft
2493 2493 # source)
2494 2494 extra.update(old.extra())
2495 2495
2496 2496 # Also update it from the intermediate commit or from the wctx
2497 2497 extra.update(ctx.extra())
2498 2498
2499 2499 if len(old.parents()) > 1:
2500 2500 # ctx.files() isn't reliable for merges, so fall back to the
2501 2501 # slower repo.status() method
2502 2502 files = set([fn for st in repo.status(base, old)[:3]
2503 2503 for fn in st])
2504 2504 else:
2505 2505 files = set(old.files())
2506 2506
2507 2507 # Second, we use either the commit we just did, or if there were no
2508 2508 # changes the parent of the working directory as the version of the
2509 2509 # files in the final amend commit
2510 2510 if node:
2511 2511 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2512 2512
2513 2513 user = ctx.user()
2514 2514 date = ctx.date()
2515 2515 # Recompute copies (avoid recording a -> b -> a)
2516 2516 copied = copies.pathcopies(base, ctx)
2517 2517 if old.p2:
2518 2518 copied.update(copies.pathcopies(old.p2(), ctx))
2519 2519
2520 2520 # Prune files which were reverted by the updates: if old
2521 2521 # introduced file X and our intermediate commit, node,
2522 2522 # renamed that file, then those two files are the same and
2523 2523 # we can discard X from our list of files. Likewise if X
2524 2524 # was deleted, it's no longer relevant
2525 2525 files.update(ctx.files())
2526 2526
2527 2527 def samefile(f):
2528 2528 if f in ctx.manifest():
2529 2529 a = ctx.filectx(f)
2530 2530 if f in base.manifest():
2531 2531 b = base.filectx(f)
2532 2532 return (not a.cmp(b)
2533 2533 and a.flags() == b.flags())
2534 2534 else:
2535 2535 return False
2536 2536 else:
2537 2537 return f not in base.manifest()
2538 2538 files = [f for f in files if not samefile(f)]
2539 2539
2540 2540 def filectxfn(repo, ctx_, path):
2541 2541 try:
2542 2542 fctx = ctx[path]
2543 2543 flags = fctx.flags()
2544 2544 mctx = context.memfilectx(repo,
2545 2545 fctx.path(), fctx.data(),
2546 2546 islink='l' in flags,
2547 2547 isexec='x' in flags,
2548 2548 copied=copied.get(path))
2549 2549 return mctx
2550 2550 except KeyError:
2551 2551 return None
2552 2552 else:
2553 2553 ui.note(_('copying changeset %s to %s\n') % (old, base))
2554 2554
2555 2555 # Use version of files as in the old cset
2556 2556 def filectxfn(repo, ctx_, path):
2557 2557 try:
2558 2558 return old.filectx(path)
2559 2559 except KeyError:
2560 2560 return None
2561 2561
2562 2562 user = opts.get('user') or old.user()
2563 2563 date = opts.get('date') or old.date()
2564 2564 editform = mergeeditform(old, 'commit.amend')
2565 2565 editor = getcommiteditor(editform=editform, **opts)
2566 2566 if not message:
2567 2567 editor = getcommiteditor(edit=True, editform=editform)
2568 2568 message = old.description()
2569 2569
2570 2570 pureextra = extra.copy()
2571 2571 extra['amend_source'] = old.hex()
2572 2572
2573 2573 new = context.memctx(repo,
2574 2574 parents=[base.node(), old.p2().node()],
2575 2575 text=message,
2576 2576 files=files,
2577 2577 filectxfn=filectxfn,
2578 2578 user=user,
2579 2579 date=date,
2580 2580 extra=extra,
2581 2581 editor=editor)
2582 2582
2583 2583 newdesc = changelog.stripdesc(new.description())
2584 2584 if ((not node)
2585 2585 and newdesc == old.description()
2586 2586 and user == old.user()
2587 2587 and date == old.date()
2588 2588 and pureextra == old.extra()):
2589 2589 # nothing changed. continuing here would create a new node
2590 2590 # anyway because of the amend_source noise.
2591 2591 #
2592 2592 # This not what we expect from amend.
2593 2593 return old.node()
2594 2594
2595 2595 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2596 2596 try:
2597 2597 if opts.get('secret'):
2598 2598 commitphase = 'secret'
2599 2599 else:
2600 2600 commitphase = old.phase()
2601 2601 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2602 2602 newid = repo.commitctx(new)
2603 2603 finally:
2604 2604 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2605 2605 if newid != old.node():
2606 2606 # Reroute the working copy parent to the new changeset
2607 2607 repo.setparents(newid, nullid)
2608 2608
2609 2609 # Move bookmarks from old parent to amend commit
2610 2610 bms = repo.nodebookmarks(old.node())
2611 2611 if bms:
2612 2612 marks = repo._bookmarks
2613 2613 for bm in bms:
2614 2614 marks[bm] = newid
2615 2615 marks.write()
2616 2616 #commit the whole amend process
2617 2617 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2618 2618 if createmarkers and newid != old.node():
2619 2619 # mark the new changeset as successor of the rewritten one
2620 2620 new = repo[newid]
2621 2621 obs = [(old, (new,))]
2622 2622 if node:
2623 2623 obs.append((ctx, ()))
2624 2624
2625 2625 obsolete.createmarkers(repo, obs)
2626 2626 tr.close()
2627 2627 finally:
2628 2628 tr.release()
2629 2629 if not createmarkers and newid != old.node():
2630 2630 # Strip the intermediate commit (if there was one) and the amended
2631 2631 # commit
2632 2632 if node:
2633 2633 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2634 2634 ui.note(_('stripping amended changeset %s\n') % old)
2635 2635 repair.strip(ui, repo, old.node(), topic='amend-backup')
2636 2636 finally:
2637 2637 if newid is None:
2638 2638 repo.dirstate.invalidate()
2639 2639 lockmod.release(lock, wlock)
2640 2640 return newid
2641 2641
2642 2642 def commiteditor(repo, ctx, subs, editform=''):
2643 2643 if ctx.description():
2644 2644 return ctx.description()
2645 2645 return commitforceeditor(repo, ctx, subs, editform=editform)
2646 2646
2647 2647 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2648 2648 editform=''):
2649 2649 if not extramsg:
2650 2650 extramsg = _("Leave message empty to abort commit.")
2651 2651
2652 2652 forms = [e for e in editform.split('.') if e]
2653 2653 forms.insert(0, 'changeset')
2654 2654 while forms:
2655 2655 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2656 2656 if tmpl:
2657 2657 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2658 2658 break
2659 2659 forms.pop()
2660 2660 else:
2661 2661 committext = buildcommittext(repo, ctx, subs, extramsg)
2662 2662
2663 2663 # run editor in the repository root
2664 2664 olddir = os.getcwd()
2665 2665 os.chdir(repo.root)
2666 2666 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2667 2667 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2668 2668 os.chdir(olddir)
2669 2669
2670 2670 if finishdesc:
2671 2671 text = finishdesc(text)
2672 2672 if not text.strip():
2673 2673 raise util.Abort(_("empty commit message"))
2674 2674
2675 2675 return text
2676 2676
2677 2677 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2678 2678 ui = repo.ui
2679 2679 tmpl, mapfile = gettemplate(ui, tmpl, None)
2680 2680
2681 2681 try:
2682 2682 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2683 2683 except SyntaxError, inst:
2684 2684 raise util.Abort(inst.args[0])
2685 2685
2686 2686 for k, v in repo.ui.configitems('committemplate'):
2687 2687 if k != 'changeset':
2688 2688 t.t.cache[k] = v
2689 2689
2690 2690 if not extramsg:
2691 2691 extramsg = '' # ensure that extramsg is string
2692 2692
2693 2693 ui.pushbuffer()
2694 2694 t.show(ctx, extramsg=extramsg)
2695 2695 return ui.popbuffer()
2696 2696
2697 2697 def buildcommittext(repo, ctx, subs, extramsg):
2698 2698 edittext = []
2699 2699 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2700 2700 if ctx.description():
2701 2701 edittext.append(ctx.description())
2702 2702 edittext.append("")
2703 2703 edittext.append("") # Empty line between message and comments.
2704 2704 edittext.append(_("HG: Enter commit message."
2705 2705 " Lines beginning with 'HG:' are removed."))
2706 2706 edittext.append("HG: %s" % extramsg)
2707 2707 edittext.append("HG: --")
2708 2708 edittext.append(_("HG: user: %s") % ctx.user())
2709 2709 if ctx.p2():
2710 2710 edittext.append(_("HG: branch merge"))
2711 2711 if ctx.branch():
2712 2712 edittext.append(_("HG: branch '%s'") % ctx.branch())
2713 2713 if bookmarks.iscurrent(repo):
2714 2714 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2715 2715 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2716 2716 edittext.extend([_("HG: added %s") % f for f in added])
2717 2717 edittext.extend([_("HG: changed %s") % f for f in modified])
2718 2718 edittext.extend([_("HG: removed %s") % f for f in removed])
2719 2719 if not added and not modified and not removed:
2720 2720 edittext.append(_("HG: no files changed"))
2721 2721 edittext.append("")
2722 2722
2723 2723 return "\n".join(edittext)
2724 2724
2725 2725 def commitstatus(repo, node, branch, bheads=None, opts={}):
2726 2726 ctx = repo[node]
2727 2727 parents = ctx.parents()
2728 2728
2729 2729 if (not opts.get('amend') and bheads and node not in bheads and not
2730 2730 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2731 2731 repo.ui.status(_('created new head\n'))
2732 2732 # The message is not printed for initial roots. For the other
2733 2733 # changesets, it is printed in the following situations:
2734 2734 #
2735 2735 # Par column: for the 2 parents with ...
2736 2736 # N: null or no parent
2737 2737 # B: parent is on another named branch
2738 2738 # C: parent is a regular non head changeset
2739 2739 # H: parent was a branch head of the current branch
2740 2740 # Msg column: whether we print "created new head" message
2741 2741 # In the following, it is assumed that there already exists some
2742 2742 # initial branch heads of the current branch, otherwise nothing is
2743 2743 # printed anyway.
2744 2744 #
2745 2745 # Par Msg Comment
2746 2746 # N N y additional topo root
2747 2747 #
2748 2748 # B N y additional branch root
2749 2749 # C N y additional topo head
2750 2750 # H N n usual case
2751 2751 #
2752 2752 # B B y weird additional branch root
2753 2753 # C B y branch merge
2754 2754 # H B n merge with named branch
2755 2755 #
2756 2756 # C C y additional head from merge
2757 2757 # C H n merge with a head
2758 2758 #
2759 2759 # H H n head merge: head count decreases
2760 2760
2761 2761 if not opts.get('close_branch'):
2762 2762 for r in parents:
2763 2763 if r.closesbranch() and r.branch() == branch:
2764 2764 repo.ui.status(_('reopening closed branch head %d\n') % r)
2765 2765
2766 2766 if repo.ui.debugflag:
2767 2767 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2768 2768 elif repo.ui.verbose:
2769 2769 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2770 2770
2771 2771 def revert(ui, repo, ctx, parents, *pats, **opts):
2772 2772 parent, p2 = parents
2773 2773 node = ctx.node()
2774 2774
2775 2775 mf = ctx.manifest()
2776 2776 if node == p2:
2777 2777 parent = p2
2778 2778 if node == parent:
2779 2779 pmf = mf
2780 2780 else:
2781 2781 pmf = None
2782 2782
2783 2783 # need all matching names in dirstate and manifest of target rev,
2784 2784 # so have to walk both. do not print errors if files exist in one
2785 2785 # but not other. in both cases, filesets should be evaluated against
2786 2786 # workingctx to get consistent result (issue4497). this means 'set:**'
2787 2787 # cannot be used to select missing files from target rev.
2788 2788
2789 2789 # `names` is a mapping for all elements in working copy and target revision
2790 2790 # The mapping is in the form:
2791 2791 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2792 2792 names = {}
2793 2793
2794 2794 wlock = repo.wlock()
2795 2795 try:
2796 2796 ## filling of the `names` mapping
2797 2797 # walk dirstate to fill `names`
2798 2798
2799 2799 interactive = opts.get('interactive', False)
2800 2800 wctx = repo[None]
2801 2801 m = scmutil.match(wctx, pats, opts)
2802 2802
2803 2803 # we'll need this later
2804 2804 targetsubs = sorted(s for s in wctx.substate if m(s))
2805 2805
2806 2806 if not m.always():
2807 2807 m.bad = lambda x, y: False
2808 2808 for abs in repo.walk(m):
2809 2809 names[abs] = m.rel(abs), m.exact(abs)
2810 2810
2811 2811 # walk target manifest to fill `names`
2812 2812
2813 2813 def badfn(path, msg):
2814 2814 if path in names:
2815 2815 return
2816 2816 if path in ctx.substate:
2817 2817 return
2818 2818 path_ = path + '/'
2819 2819 for f in names:
2820 2820 if f.startswith(path_):
2821 2821 return
2822 2822 ui.warn("%s: %s\n" % (m.rel(path), msg))
2823 2823
2824 2824 m.bad = badfn
2825 2825 for abs in ctx.walk(m):
2826 2826 if abs not in names:
2827 2827 names[abs] = m.rel(abs), m.exact(abs)
2828 2828
2829 2829 # Find status of all file in `names`.
2830 2830 m = scmutil.matchfiles(repo, names)
2831 2831
2832 2832 changes = repo.status(node1=node, match=m,
2833 2833 unknown=True, ignored=True, clean=True)
2834 2834 else:
2835 2835 changes = repo.status(node1=node, match=m)
2836 2836 for kind in changes:
2837 2837 for abs in kind:
2838 2838 names[abs] = m.rel(abs), m.exact(abs)
2839 2839
2840 2840 m = scmutil.matchfiles(repo, names)
2841 2841
2842 2842 modified = set(changes.modified)
2843 2843 added = set(changes.added)
2844 2844 removed = set(changes.removed)
2845 2845 _deleted = set(changes.deleted)
2846 2846 unknown = set(changes.unknown)
2847 2847 unknown.update(changes.ignored)
2848 2848 clean = set(changes.clean)
2849 2849 modadded = set()
2850 2850
2851 2851 # split between files known in target manifest and the others
2852 2852 smf = set(mf)
2853 2853
2854 2854 # determine the exact nature of the deleted changesets
2855 2855 deladded = _deleted - smf
2856 2856 deleted = _deleted - deladded
2857 2857
2858 2858 # We need to account for the state of the file in the dirstate,
2859 2859 # even when we revert against something else than parent. This will
2860 2860 # slightly alter the behavior of revert (doing back up or not, delete
2861 2861 # or just forget etc).
2862 2862 if parent == node:
2863 2863 dsmodified = modified
2864 2864 dsadded = added
2865 2865 dsremoved = removed
2866 2866 # store all local modifications, useful later for rename detection
2867 2867 localchanges = dsmodified | dsadded
2868 2868 modified, added, removed = set(), set(), set()
2869 2869 else:
2870 2870 changes = repo.status(node1=parent, match=m)
2871 2871 dsmodified = set(changes.modified)
2872 2872 dsadded = set(changes.added)
2873 2873 dsremoved = set(changes.removed)
2874 2874 # store all local modifications, useful later for rename detection
2875 2875 localchanges = dsmodified | dsadded
2876 2876
2877 2877 # only take into account for removes between wc and target
2878 2878 clean |= dsremoved - removed
2879 2879 dsremoved &= removed
2880 2880 # distinct between dirstate remove and other
2881 2881 removed -= dsremoved
2882 2882
2883 2883 modadded = added & dsmodified
2884 2884 added -= modadded
2885 2885
2886 2886 # tell newly modified apart.
2887 2887 dsmodified &= modified
2888 2888 dsmodified |= modified & dsadded # dirstate added may needs backup
2889 2889 modified -= dsmodified
2890 2890
2891 2891 # We need to wait for some post-processing to update this set
2892 2892 # before making the distinction. The dirstate will be used for
2893 2893 # that purpose.
2894 2894 dsadded = added
2895 2895
2896 2896 # in case of merge, files that are actually added can be reported as
2897 2897 # modified, we need to post process the result
2898 2898 if p2 != nullid:
2899 2899 if pmf is None:
2900 2900 # only need parent manifest in the merge case,
2901 2901 # so do not read by default
2902 2902 pmf = repo[parent].manifest()
2903 2903 mergeadd = dsmodified - set(pmf)
2904 2904 dsadded |= mergeadd
2905 2905 dsmodified -= mergeadd
2906 2906
2907 2907 # if f is a rename, update `names` to also revert the source
2908 2908 cwd = repo.getcwd()
2909 2909 for f in localchanges:
2910 2910 src = repo.dirstate.copied(f)
2911 2911 # XXX should we check for rename down to target node?
2912 2912 if src and src not in names and repo.dirstate[src] == 'r':
2913 2913 dsremoved.add(src)
2914 2914 names[src] = (repo.pathto(src, cwd), True)
2915 2915
2916 2916 # distinguish between file to forget and the other
2917 2917 added = set()
2918 2918 for abs in dsadded:
2919 2919 if repo.dirstate[abs] != 'a':
2920 2920 added.add(abs)
2921 2921 dsadded -= added
2922 2922
2923 2923 for abs in deladded:
2924 2924 if repo.dirstate[abs] == 'a':
2925 2925 dsadded.add(abs)
2926 2926 deladded -= dsadded
2927 2927
2928 2928 # For files marked as removed, we check if an unknown file is present at
2929 2929 # the same path. If a such file exists it may need to be backed up.
2930 2930 # Making the distinction at this stage helps have simpler backup
2931 2931 # logic.
2932 2932 removunk = set()
2933 2933 for abs in removed:
2934 2934 target = repo.wjoin(abs)
2935 2935 if os.path.lexists(target):
2936 2936 removunk.add(abs)
2937 2937 removed -= removunk
2938 2938
2939 2939 dsremovunk = set()
2940 2940 for abs in dsremoved:
2941 2941 target = repo.wjoin(abs)
2942 2942 if os.path.lexists(target):
2943 2943 dsremovunk.add(abs)
2944 2944 dsremoved -= dsremovunk
2945 2945
2946 2946 # action to be actually performed by revert
2947 2947 # (<list of file>, message>) tuple
2948 2948 actions = {'revert': ([], _('reverting %s\n')),
2949 2949 'add': ([], _('adding %s\n')),
2950 2950 'remove': ([], _('removing %s\n')),
2951 2951 'drop': ([], _('removing %s\n')),
2952 2952 'forget': ([], _('forgetting %s\n')),
2953 2953 'undelete': ([], _('undeleting %s\n')),
2954 2954 'noop': (None, _('no changes needed to %s\n')),
2955 2955 'unknown': (None, _('file not managed: %s\n')),
2956 2956 }
2957 2957
2958 2958 # "constant" that convey the backup strategy.
2959 2959 # All set to `discard` if `no-backup` is set do avoid checking
2960 2960 # no_backup lower in the code.
2961 2961 # These values are ordered for comparison purposes
2962 2962 backup = 2 # unconditionally do backup
2963 2963 check = 1 # check if the existing file differs from target
2964 2964 discard = 0 # never do backup
2965 2965 if opts.get('no_backup'):
2966 2966 backup = check = discard
2967 2967
2968 2968 backupanddel = actions['remove']
2969 2969 if not opts.get('no_backup'):
2970 2970 backupanddel = actions['drop']
2971 2971
2972 2972 disptable = (
2973 2973 # dispatch table:
2974 2974 # file state
2975 2975 # action
2976 2976 # make backup
2977 2977
2978 2978 ## Sets that results that will change file on disk
2979 2979 # Modified compared to target, no local change
2980 2980 (modified, actions['revert'], discard),
2981 2981 # Modified compared to target, but local file is deleted
2982 2982 (deleted, actions['revert'], discard),
2983 2983 # Modified compared to target, local change
2984 2984 (dsmodified, actions['revert'], backup),
2985 2985 # Added since target
2986 2986 (added, actions['remove'], discard),
2987 2987 # Added in working directory
2988 2988 (dsadded, actions['forget'], discard),
2989 2989 # Added since target, have local modification
2990 2990 (modadded, backupanddel, backup),
2991 2991 # Added since target but file is missing in working directory
2992 2992 (deladded, actions['drop'], discard),
2993 2993 # Removed since target, before working copy parent
2994 2994 (removed, actions['add'], discard),
2995 2995 # Same as `removed` but an unknown file exists at the same path
2996 2996 (removunk, actions['add'], check),
2997 2997 # Removed since targe, marked as such in working copy parent
2998 2998 (dsremoved, actions['undelete'], discard),
2999 2999 # Same as `dsremoved` but an unknown file exists at the same path
3000 3000 (dsremovunk, actions['undelete'], check),
3001 3001 ## the following sets does not result in any file changes
3002 3002 # File with no modification
3003 3003 (clean, actions['noop'], discard),
3004 3004 # Existing file, not tracked anywhere
3005 3005 (unknown, actions['unknown'], discard),
3006 3006 )
3007 3007
3008 3008 for abs, (rel, exact) in sorted(names.items()):
3009 3009 # target file to be touch on disk (relative to cwd)
3010 3010 target = repo.wjoin(abs)
3011 3011 # search the entry in the dispatch table.
3012 3012 # if the file is in any of these sets, it was touched in the working
3013 3013 # directory parent and we are sure it needs to be reverted.
3014 3014 for table, (xlist, msg), dobackup in disptable:
3015 3015 if abs not in table:
3016 3016 continue
3017 3017 if xlist is not None:
3018 3018 xlist.append(abs)
3019 3019 if dobackup and (backup <= dobackup
3020 3020 or wctx[abs].cmp(ctx[abs])):
3021 3021 bakname = "%s.orig" % rel
3022 3022 ui.note(_('saving current version of %s as %s\n') %
3023 3023 (rel, bakname))
3024 3024 if not opts.get('dry_run'):
3025 3025 if interactive:
3026 3026 util.copyfile(target, bakname)
3027 3027 else:
3028 3028 util.rename(target, bakname)
3029 3029 if ui.verbose or not exact:
3030 3030 if not isinstance(msg, basestring):
3031 3031 msg = msg(abs)
3032 3032 ui.status(msg % rel)
3033 3033 elif exact:
3034 3034 ui.warn(msg % rel)
3035 3035 break
3036 3036
3037 3037 if not opts.get('dry_run'):
3038 3038 needdata = ('revert', 'add', 'undelete')
3039 3039 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3040 3040 _performrevert(repo, parents, ctx, actions, interactive)
3041 3041
3042 3042 if targetsubs:
3043 3043 # Revert the subrepos on the revert list
3044 3044 for sub in targetsubs:
3045 3045 try:
3046 3046 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3047 3047 except KeyError:
3048 3048 raise util.Abort("subrepository '%s' does not exist in %s!"
3049 3049 % (sub, short(ctx.node())))
3050 3050 finally:
3051 3051 wlock.release()
3052 3052
3053 3053 def _revertprefetch(repo, ctx, *files):
3054 3054 """Let extension changing the storage layer prefetch content"""
3055 3055 pass
3056 3056
3057 3057 def _performrevert(repo, parents, ctx, actions, interactive=False):
3058 3058 """function that actually perform all the actions computed for revert
3059 3059
3060 3060 This is an independent function to let extension to plug in and react to
3061 3061 the imminent revert.
3062 3062
3063 3063 Make sure you have the working directory locked when calling this function.
3064 3064 """
3065 3065 parent, p2 = parents
3066 3066 node = ctx.node()
3067 3067 def checkout(f):
3068 3068 fc = ctx[f]
3069 repo.wwrite(f, fc.data(), fc.flags())
3069 return repo.wwrite(f, fc.data(), fc.flags())
3070 3070
3071 3071 audit_path = pathutil.pathauditor(repo.root)
3072 3072 for f in actions['forget'][0]:
3073 3073 repo.dirstate.drop(f)
3074 3074 for f in actions['remove'][0]:
3075 3075 audit_path(f)
3076 3076 util.unlinkpath(repo.wjoin(f))
3077 3077 repo.dirstate.remove(f)
3078 3078 for f in actions['drop'][0]:
3079 3079 audit_path(f)
3080 3080 repo.dirstate.remove(f)
3081 3081
3082 3082 normal = None
3083 3083 if node == parent:
3084 3084 # We're reverting to our parent. If possible, we'd like status
3085 3085 # to report the file as clean. We have to use normallookup for
3086 3086 # merges to avoid losing information about merged/dirty files.
3087 3087 if p2 != nullid:
3088 3088 normal = repo.dirstate.normallookup
3089 3089 else:
3090 3090 normal = repo.dirstate.normal
3091 3091
3092 3092 if interactive:
3093 3093 # Prompt the user for changes to revert
3094 3094 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3095 3095 m = scmutil.match(ctx, torevert, {})
3096 3096 diff = patch.diff(repo, None, ctx.node(), m)
3097 3097 originalchunks = patch.parsepatch(diff)
3098 3098 try:
3099 3099 chunks = recordfilter(repo.ui, originalchunks)
3100 3100 except patch.PatchError, err:
3101 3101 raise util.Abort(_('error parsing patch: %s') % err)
3102 3102
3103 3103 # Apply changes
3104 3104 fp = cStringIO.StringIO()
3105 3105 for c in chunks:
3106 3106 c.write(fp)
3107 3107 dopatch = fp.tell()
3108 3108 fp.seek(0)
3109 3109 if dopatch:
3110 3110 try:
3111 3111 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3112 3112 except patch.PatchError, err:
3113 3113 raise util.Abort(str(err))
3114 3114 del fp
3115 3115 else:
3116 3116 for f in actions['revert'][0]:
3117 checkout(f)
3117 wsize = checkout(f)
3118 3118 if normal:
3119 3119 normal(f)
3120 elif wsize == repo.dirstate._map[f][2]:
3121 # changes may be overlooked without normallookup,
3122 # if size isn't changed at reverting
3123 repo.dirstate.normallookup(f)
3120 3124
3121 3125 for f in actions['add'][0]:
3122 3126 checkout(f)
3123 3127 repo.dirstate.add(f)
3124 3128
3125 3129 normal = repo.dirstate.normallookup
3126 3130 if node == parent and p2 == nullid:
3127 3131 normal = repo.dirstate.normal
3128 3132 for f in actions['undelete'][0]:
3129 3133 checkout(f)
3130 3134 normal(f)
3131 3135
3132 3136 copied = copies.pathcopies(repo[parent], ctx)
3133 3137
3134 3138 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3135 3139 if f in copied:
3136 3140 repo.dirstate.copy(copied[f], f)
3137 3141
3138 3142 def command(table):
3139 3143 """Returns a function object to be used as a decorator for making commands.
3140 3144
3141 3145 This function receives a command table as its argument. The table should
3142 3146 be a dict.
3143 3147
3144 3148 The returned function can be used as a decorator for adding commands
3145 3149 to that command table. This function accepts multiple arguments to define
3146 3150 a command.
3147 3151
3148 3152 The first argument is the command name.
3149 3153
3150 3154 The options argument is an iterable of tuples defining command arguments.
3151 3155 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3152 3156
3153 3157 The synopsis argument defines a short, one line summary of how to use the
3154 3158 command. This shows up in the help output.
3155 3159
3156 3160 The norepo argument defines whether the command does not require a
3157 3161 local repository. Most commands operate against a repository, thus the
3158 3162 default is False.
3159 3163
3160 3164 The optionalrepo argument defines whether the command optionally requires
3161 3165 a local repository.
3162 3166
3163 3167 The inferrepo argument defines whether to try to find a repository from the
3164 3168 command line arguments. If True, arguments will be examined for potential
3165 3169 repository locations. See ``findrepo()``. If a repository is found, it
3166 3170 will be used.
3167 3171 """
3168 3172 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3169 3173 inferrepo=False):
3170 3174 def decorator(func):
3171 3175 if synopsis:
3172 3176 table[name] = func, list(options), synopsis
3173 3177 else:
3174 3178 table[name] = func, list(options)
3175 3179
3176 3180 if norepo:
3177 3181 # Avoid import cycle.
3178 3182 import commands
3179 3183 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3180 3184
3181 3185 if optionalrepo:
3182 3186 import commands
3183 3187 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3184 3188
3185 3189 if inferrepo:
3186 3190 import commands
3187 3191 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3188 3192
3189 3193 return func
3190 3194 return decorator
3191 3195
3192 3196 return cmd
3193 3197
3194 3198 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3195 3199 # commands.outgoing. "missing" is "missing" of the result of
3196 3200 # "findcommonoutgoing()"
3197 3201 outgoinghooks = util.hooks()
3198 3202
3199 3203 # a list of (ui, repo) functions called by commands.summary
3200 3204 summaryhooks = util.hooks()
3201 3205
3202 3206 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3203 3207 #
3204 3208 # functions should return tuple of booleans below, if 'changes' is None:
3205 3209 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3206 3210 #
3207 3211 # otherwise, 'changes' is a tuple of tuples below:
3208 3212 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3209 3213 # - (desturl, destbranch, destpeer, outgoing)
3210 3214 summaryremotehooks = util.hooks()
3211 3215
3212 3216 # A list of state files kept by multistep operations like graft.
3213 3217 # Since graft cannot be aborted, it is considered 'clearable' by update.
3214 3218 # note: bisect is intentionally excluded
3215 3219 # (state file, clearable, allowcommit, error, hint)
3216 3220 unfinishedstates = [
3217 3221 ('graftstate', True, False, _('graft in progress'),
3218 3222 _("use 'hg graft --continue' or 'hg update' to abort")),
3219 3223 ('updatestate', True, False, _('last update was interrupted'),
3220 3224 _("use 'hg update' to get a consistent checkout"))
3221 3225 ]
3222 3226
3223 3227 def checkunfinished(repo, commit=False):
3224 3228 '''Look for an unfinished multistep operation, like graft, and abort
3225 3229 if found. It's probably good to check this right before
3226 3230 bailifchanged().
3227 3231 '''
3228 3232 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3229 3233 if commit and allowcommit:
3230 3234 continue
3231 3235 if repo.vfs.exists(f):
3232 3236 raise util.Abort(msg, hint=hint)
3233 3237
3234 3238 def clearunfinished(repo):
3235 3239 '''Check for unfinished operations (as above), and clear the ones
3236 3240 that are clearable.
3237 3241 '''
3238 3242 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3239 3243 if not clearable and repo.vfs.exists(f):
3240 3244 raise util.Abort(msg, hint=hint)
3241 3245 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3242 3246 if clearable and repo.vfs.exists(f):
3243 3247 util.unlink(repo.join(f))
@@ -1,1967 +1,1972 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 197 'dotencode'))
198 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 199 requirements = ['revlogv1']
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return self.requirements[:]
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 211 self.wopener = self.wvfs
212 212 self.root = self.wvfs.base
213 213 self.path = self.wvfs.join(".hg")
214 214 self.origroot = path
215 215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 216 self.vfs = scmutil.vfs(self.path)
217 217 self.opener = self.vfs
218 218 self.baseui = baseui
219 219 self.ui = baseui.copy()
220 220 self.ui.copy = baseui.copy # prevent copying repo configuration
221 221 # A list of callback to shape the phase if no data were found.
222 222 # Callback are in the form: func(repo, roots) --> processed root.
223 223 # This list it to be filled by extension during repo setup
224 224 self._phasedefaults = []
225 225 try:
226 226 self.ui.readconfig(self.join("hgrc"), self.root)
227 227 extensions.loadall(self.ui)
228 228 except IOError:
229 229 pass
230 230
231 231 if self.featuresetupfuncs:
232 232 self.supported = set(self._basesupported) # use private copy
233 233 extmods = set(m.__name__ for n, m
234 234 in extensions.extensions(self.ui))
235 235 for setupfunc in self.featuresetupfuncs:
236 236 if setupfunc.__module__ in extmods:
237 237 setupfunc(self.ui, self.supported)
238 238 else:
239 239 self.supported = self._basesupported
240 240
241 241 if not self.vfs.isdir():
242 242 if create:
243 243 if not self.wvfs.exists():
244 244 self.wvfs.makedirs()
245 245 self.vfs.makedir(notindexed=True)
246 246 requirements = self._baserequirements(create)
247 247 if self.ui.configbool('format', 'usestore', True):
248 248 self.vfs.mkdir("store")
249 249 requirements.append("store")
250 250 if self.ui.configbool('format', 'usefncache', True):
251 251 requirements.append("fncache")
252 252 if self.ui.configbool('format', 'dotencode', True):
253 253 requirements.append('dotencode')
254 254 # create an invalid changelog
255 255 self.vfs.append(
256 256 "00changelog.i",
257 257 '\0\0\0\2' # represents revlogv2
258 258 ' dummy changelog to prevent using the old repo layout'
259 259 )
260 260 if self.ui.configbool('format', 'generaldelta', False):
261 261 requirements.append("generaldelta")
262 262 if self.ui.configbool('experimental', 'manifestv2', False):
263 263 requirements.append("manifestv2")
264 264 requirements = set(requirements)
265 265 else:
266 266 raise error.RepoError(_("repository %s not found") % path)
267 267 elif create:
268 268 raise error.RepoError(_("repository %s already exists") % path)
269 269 else:
270 270 try:
271 271 requirements = scmutil.readrequires(self.vfs, self.supported)
272 272 except IOError, inst:
273 273 if inst.errno != errno.ENOENT:
274 274 raise
275 275 requirements = set()
276 276
277 277 self.sharedpath = self.path
278 278 try:
279 279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 280 realpath=True)
281 281 s = vfs.base
282 282 if not vfs.exists():
283 283 raise error.RepoError(
284 284 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 285 self.sharedpath = s
286 286 except IOError, inst:
287 287 if inst.errno != errno.ENOENT:
288 288 raise
289 289
290 290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 291 self.spath = self.store.path
292 292 self.svfs = self.store.vfs
293 293 self.sopener = self.svfs
294 294 self.sjoin = self.store.join
295 295 self.vfs.createmode = self.store.createmode
296 296 self._applyrequirements(requirements)
297 297 if create:
298 298 self._writerequirements()
299 299
300 300
301 301 self._branchcaches = {}
302 302 self._revbranchcache = None
303 303 self.filterpats = {}
304 304 self._datafilters = {}
305 305 self._transref = self._lockref = self._wlockref = None
306 306
307 307 # A cache for various files under .hg/ that tracks file changes,
308 308 # (used by the filecache decorator)
309 309 #
310 310 # Maps a property name to its util.filecacheentry
311 311 self._filecache = {}
312 312
313 313 # hold sets of revision to be filtered
314 314 # should be cleared when something might have changed the filter value:
315 315 # - new changesets,
316 316 # - phase change,
317 317 # - new obsolescence marker,
318 318 # - working directory parent change,
319 319 # - bookmark changes
320 320 self.filteredrevcache = {}
321 321
322 322 # generic mapping between names and nodes
323 323 self.names = namespaces.namespaces()
324 324
325 325 def close(self):
326 326 self._writecaches()
327 327
328 328 def _writecaches(self):
329 329 if self._revbranchcache:
330 330 self._revbranchcache.write()
331 331
332 332 def _restrictcapabilities(self, caps):
333 333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 334 caps = set(caps)
335 335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 336 caps.add('bundle2=' + urllib.quote(capsblob))
337 337 return caps
338 338
339 339 def _applyrequirements(self, requirements):
340 340 self.requirements = requirements
341 341 self.svfs.options = dict((r, 1) for r in requirements
342 342 if r in self.openerreqs)
343 343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 344 if chunkcachesize is not None:
345 345 self.svfs.options['chunkcachesize'] = chunkcachesize
346 346 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 347 if maxchainlen is not None:
348 348 self.svfs.options['maxchainlen'] = maxchainlen
349 349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 350 if manifestcachesize is not None:
351 351 self.svfs.options['manifestcachesize'] = manifestcachesize
352 352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 353 if usetreemanifest is not None:
354 354 self.svfs.options['usetreemanifest'] = usetreemanifest
355 355
356 356 def _writerequirements(self):
357 357 reqfile = self.vfs("requires", "w")
358 358 for r in sorted(self.requirements):
359 359 reqfile.write("%s\n" % r)
360 360 reqfile.close()
361 361
362 362 def _checknested(self, path):
363 363 """Determine if path is a legal nested repository."""
364 364 if not path.startswith(self.root):
365 365 return False
366 366 subpath = path[len(self.root) + 1:]
367 367 normsubpath = util.pconvert(subpath)
368 368
369 369 # XXX: Checking against the current working copy is wrong in
370 370 # the sense that it can reject things like
371 371 #
372 372 # $ hg cat -r 10 sub/x.txt
373 373 #
374 374 # if sub/ is no longer a subrepository in the working copy
375 375 # parent revision.
376 376 #
377 377 # However, it can of course also allow things that would have
378 378 # been rejected before, such as the above cat command if sub/
379 379 # is a subrepository now, but was a normal directory before.
380 380 # The old path auditor would have rejected by mistake since it
381 381 # panics when it sees sub/.hg/.
382 382 #
383 383 # All in all, checking against the working copy seems sensible
384 384 # since we want to prevent access to nested repositories on
385 385 # the filesystem *now*.
386 386 ctx = self[None]
387 387 parts = util.splitpath(subpath)
388 388 while parts:
389 389 prefix = '/'.join(parts)
390 390 if prefix in ctx.substate:
391 391 if prefix == normsubpath:
392 392 return True
393 393 else:
394 394 sub = ctx.sub(prefix)
395 395 return sub.checknested(subpath[len(prefix) + 1:])
396 396 else:
397 397 parts.pop()
398 398 return False
399 399
400 400 def peer(self):
401 401 return localpeer(self) # not cached to avoid reference cycle
402 402
403 403 def unfiltered(self):
404 404 """Return unfiltered version of the repository
405 405
406 406 Intended to be overwritten by filtered repo."""
407 407 return self
408 408
409 409 def filtered(self, name):
410 410 """Return a filtered version of a repository"""
411 411 # build a new class with the mixin and the current class
412 412 # (possibly subclass of the repo)
413 413 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 414 pass
415 415 return proxycls(self, name)
416 416
417 417 @repofilecache('bookmarks')
418 418 def _bookmarks(self):
419 419 return bookmarks.bmstore(self)
420 420
421 421 @repofilecache('bookmarks.current')
422 422 def _bookmarkcurrent(self):
423 423 return bookmarks.readcurrent(self)
424 424
425 425 def bookmarkheads(self, bookmark):
426 426 name = bookmark.split('@', 1)[0]
427 427 heads = []
428 428 for mark, n in self._bookmarks.iteritems():
429 429 if mark.split('@', 1)[0] == name:
430 430 heads.append(n)
431 431 return heads
432 432
433 433 @storecache('phaseroots')
434 434 def _phasecache(self):
435 435 return phases.phasecache(self, self._phasedefaults)
436 436
437 437 @storecache('obsstore')
438 438 def obsstore(self):
439 439 # read default format for new obsstore.
440 440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 441 # rely on obsstore class default when possible.
442 442 kwargs = {}
443 443 if defaultformat is not None:
444 444 kwargs['defaultformat'] = defaultformat
445 445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 447 **kwargs)
448 448 if store and readonly:
449 449 self.ui.warn(
450 450 _('obsolete feature not enabled but %i markers found!\n')
451 451 % len(list(store)))
452 452 return store
453 453
454 454 @storecache('00changelog.i')
455 455 def changelog(self):
456 456 c = changelog.changelog(self.svfs)
457 457 if 'HG_PENDING' in os.environ:
458 458 p = os.environ['HG_PENDING']
459 459 if p.startswith(self.root):
460 460 c.readpending('00changelog.i.a')
461 461 return c
462 462
463 463 @storecache('00manifest.i')
464 464 def manifest(self):
465 465 return manifest.manifest(self.svfs)
466 466
467 467 @repofilecache('dirstate')
468 468 def dirstate(self):
469 469 warned = [0]
470 470 def validate(node):
471 471 try:
472 472 self.changelog.rev(node)
473 473 return node
474 474 except error.LookupError:
475 475 if not warned[0]:
476 476 warned[0] = True
477 477 self.ui.warn(_("warning: ignoring unknown"
478 478 " working parent %s!\n") % short(node))
479 479 return nullid
480 480
481 481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482 482
483 483 def __getitem__(self, changeid):
484 484 if changeid is None:
485 485 return context.workingctx(self)
486 486 if isinstance(changeid, slice):
487 487 return [context.changectx(self, i)
488 488 for i in xrange(*changeid.indices(len(self)))
489 489 if i not in self.changelog.filteredrevs]
490 490 return context.changectx(self, changeid)
491 491
492 492 def __contains__(self, changeid):
493 493 try:
494 494 self[changeid]
495 495 return True
496 496 except error.RepoLookupError:
497 497 return False
498 498
499 499 def __nonzero__(self):
500 500 return True
501 501
502 502 def __len__(self):
503 503 return len(self.changelog)
504 504
505 505 def __iter__(self):
506 506 return iter(self.changelog)
507 507
508 508 def revs(self, expr, *args):
509 509 '''Return a list of revisions matching the given revset'''
510 510 expr = revset.formatspec(expr, *args)
511 511 m = revset.match(None, expr)
512 512 return m(self)
513 513
514 514 def set(self, expr, *args):
515 515 '''
516 516 Yield a context for each matching revision, after doing arg
517 517 replacement via revset.formatspec
518 518 '''
519 519 for r in self.revs(expr, *args):
520 520 yield self[r]
521 521
522 522 def url(self):
523 523 return 'file:' + self.root
524 524
525 525 def hook(self, name, throw=False, **args):
526 526 """Call a hook, passing this repo instance.
527 527
528 528 This a convenience method to aid invoking hooks. Extensions likely
529 529 won't call this unless they have registered a custom hook or are
530 530 replacing code that is expected to call a hook.
531 531 """
532 532 return hook.hook(self.ui, self, name, throw, **args)
533 533
534 534 @unfilteredmethod
535 535 def _tag(self, names, node, message, local, user, date, extra={},
536 536 editor=False):
537 537 if isinstance(names, str):
538 538 names = (names,)
539 539
540 540 branches = self.branchmap()
541 541 for name in names:
542 542 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 543 local=local)
544 544 if name in branches:
545 545 self.ui.warn(_("warning: tag %s conflicts with existing"
546 546 " branch name\n") % name)
547 547
548 548 def writetags(fp, names, munge, prevtags):
549 549 fp.seek(0, 2)
550 550 if prevtags and prevtags[-1] != '\n':
551 551 fp.write('\n')
552 552 for name in names:
553 553 if munge:
554 554 m = munge(name)
555 555 else:
556 556 m = name
557 557
558 558 if (self._tagscache.tagtypes and
559 559 name in self._tagscache.tagtypes):
560 560 old = self.tags().get(name, nullid)
561 561 fp.write('%s %s\n' % (hex(old), m))
562 562 fp.write('%s %s\n' % (hex(node), m))
563 563 fp.close()
564 564
565 565 prevtags = ''
566 566 if local:
567 567 try:
568 568 fp = self.vfs('localtags', 'r+')
569 569 except IOError:
570 570 fp = self.vfs('localtags', 'a')
571 571 else:
572 572 prevtags = fp.read()
573 573
574 574 # local tags are stored in the current charset
575 575 writetags(fp, names, None, prevtags)
576 576 for name in names:
577 577 self.hook('tag', node=hex(node), tag=name, local=local)
578 578 return
579 579
580 580 try:
581 581 fp = self.wfile('.hgtags', 'rb+')
582 582 except IOError, e:
583 583 if e.errno != errno.ENOENT:
584 584 raise
585 585 fp = self.wfile('.hgtags', 'ab')
586 586 else:
587 587 prevtags = fp.read()
588 588
589 589 # committed tags are stored in UTF-8
590 590 writetags(fp, names, encoding.fromlocal, prevtags)
591 591
592 592 fp.close()
593 593
594 594 self.invalidatecaches()
595 595
596 596 if '.hgtags' not in self.dirstate:
597 597 self[None].add(['.hgtags'])
598 598
599 599 m = matchmod.exact(self.root, '', ['.hgtags'])
600 600 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 601 editor=editor)
602 602
603 603 for name in names:
604 604 self.hook('tag', node=hex(node), tag=name, local=local)
605 605
606 606 return tagnode
607 607
608 608 def tag(self, names, node, message, local, user, date, editor=False):
609 609 '''tag a revision with one or more symbolic names.
610 610
611 611 names is a list of strings or, when adding a single tag, names may be a
612 612 string.
613 613
614 614 if local is True, the tags are stored in a per-repository file.
615 615 otherwise, they are stored in the .hgtags file, and a new
616 616 changeset is committed with the change.
617 617
618 618 keyword arguments:
619 619
620 620 local: whether to store tags in non-version-controlled file
621 621 (default False)
622 622
623 623 message: commit message to use if committing
624 624
625 625 user: name of user to use if committing
626 626
627 627 date: date tuple to use if committing'''
628 628
629 629 if not local:
630 630 m = matchmod.exact(self.root, '', ['.hgtags'])
631 631 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 632 raise util.Abort(_('working copy of .hgtags is changed'),
633 633 hint=_('please commit .hgtags manually'))
634 634
635 635 self.tags() # instantiate the cache
636 636 self._tag(names, node, message, local, user, date, editor=editor)
637 637
638 638 @filteredpropertycache
639 639 def _tagscache(self):
640 640 '''Returns a tagscache object that contains various tags related
641 641 caches.'''
642 642
643 643 # This simplifies its cache management by having one decorated
644 644 # function (this one) and the rest simply fetch things from it.
645 645 class tagscache(object):
646 646 def __init__(self):
647 647 # These two define the set of tags for this repository. tags
648 648 # maps tag name to node; tagtypes maps tag name to 'global' or
649 649 # 'local'. (Global tags are defined by .hgtags across all
650 650 # heads, and local tags are defined in .hg/localtags.)
651 651 # They constitute the in-memory cache of tags.
652 652 self.tags = self.tagtypes = None
653 653
654 654 self.nodetagscache = self.tagslist = None
655 655
656 656 cache = tagscache()
657 657 cache.tags, cache.tagtypes = self._findtags()
658 658
659 659 return cache
660 660
661 661 def tags(self):
662 662 '''return a mapping of tag to node'''
663 663 t = {}
664 664 if self.changelog.filteredrevs:
665 665 tags, tt = self._findtags()
666 666 else:
667 667 tags = self._tagscache.tags
668 668 for k, v in tags.iteritems():
669 669 try:
670 670 # ignore tags to unknown nodes
671 671 self.changelog.rev(v)
672 672 t[k] = v
673 673 except (error.LookupError, ValueError):
674 674 pass
675 675 return t
676 676
677 677 def _findtags(self):
678 678 '''Do the hard work of finding tags. Return a pair of dicts
679 679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 680 maps tag name to a string like \'global\' or \'local\'.
681 681 Subclasses or extensions are free to add their own tags, but
682 682 should be aware that the returned dicts will be retained for the
683 683 duration of the localrepo object.'''
684 684
685 685 # XXX what tagtype should subclasses/extensions use? Currently
686 686 # mq and bookmarks add tags, but do not set the tagtype at all.
687 687 # Should each extension invent its own tag type? Should there
688 688 # be one tagtype for all such "virtual" tags? Or is the status
689 689 # quo fine?
690 690
691 691 alltags = {} # map tag name to (node, hist)
692 692 tagtypes = {}
693 693
694 694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696 696
697 697 # Build the return dicts. Have to re-encode tag names because
698 698 # the tags module always uses UTF-8 (in order not to lose info
699 699 # writing to the cache), but the rest of Mercurial wants them in
700 700 # local encoding.
701 701 tags = {}
702 702 for (name, (node, hist)) in alltags.iteritems():
703 703 if node != nullid:
704 704 tags[encoding.tolocal(name)] = node
705 705 tags['tip'] = self.changelog.tip()
706 706 tagtypes = dict([(encoding.tolocal(name), value)
707 707 for (name, value) in tagtypes.iteritems()])
708 708 return (tags, tagtypes)
709 709
710 710 def tagtype(self, tagname):
711 711 '''
712 712 return the type of the given tag. result can be:
713 713
714 714 'local' : a local tag
715 715 'global' : a global tag
716 716 None : tag does not exist
717 717 '''
718 718
719 719 return self._tagscache.tagtypes.get(tagname)
720 720
721 721 def tagslist(self):
722 722 '''return a list of tags ordered by revision'''
723 723 if not self._tagscache.tagslist:
724 724 l = []
725 725 for t, n in self.tags().iteritems():
726 726 l.append((self.changelog.rev(n), t, n))
727 727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728 728
729 729 return self._tagscache.tagslist
730 730
731 731 def nodetags(self, node):
732 732 '''return the tags associated with a node'''
733 733 if not self._tagscache.nodetagscache:
734 734 nodetagscache = {}
735 735 for t, n in self._tagscache.tags.iteritems():
736 736 nodetagscache.setdefault(n, []).append(t)
737 737 for tags in nodetagscache.itervalues():
738 738 tags.sort()
739 739 self._tagscache.nodetagscache = nodetagscache
740 740 return self._tagscache.nodetagscache.get(node, [])
741 741
742 742 def nodebookmarks(self, node):
743 743 marks = []
744 744 for bookmark, n in self._bookmarks.iteritems():
745 745 if n == node:
746 746 marks.append(bookmark)
747 747 return sorted(marks)
748 748
749 749 def branchmap(self):
750 750 '''returns a dictionary {branch: [branchheads]} with branchheads
751 751 ordered by increasing revision number'''
752 752 branchmap.updatecache(self)
753 753 return self._branchcaches[self.filtername]
754 754
755 755 @unfilteredmethod
756 756 def revbranchcache(self):
757 757 if not self._revbranchcache:
758 758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 759 return self._revbranchcache
760 760
761 761 def branchtip(self, branch, ignoremissing=False):
762 762 '''return the tip node for a given branch
763 763
764 764 If ignoremissing is True, then this method will not raise an error.
765 765 This is helpful for callers that only expect None for a missing branch
766 766 (e.g. namespace).
767 767
768 768 '''
769 769 try:
770 770 return self.branchmap().branchtip(branch)
771 771 except KeyError:
772 772 if not ignoremissing:
773 773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 774 else:
775 775 pass
776 776
777 777 def lookup(self, key):
778 778 return self[key].node()
779 779
780 780 def lookupbranch(self, key, remote=None):
781 781 repo = remote or self
782 782 if key in repo.branchmap():
783 783 return key
784 784
785 785 repo = (remote and remote.local()) and remote or self
786 786 return repo[key].branch()
787 787
788 788 def known(self, nodes):
789 789 nm = self.changelog.nodemap
790 790 pc = self._phasecache
791 791 result = []
792 792 for n in nodes:
793 793 r = nm.get(n)
794 794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 795 result.append(resp)
796 796 return result
797 797
798 798 def local(self):
799 799 return self
800 800
801 801 def cancopy(self):
802 802 # so statichttprepo's override of local() works
803 803 if not self.local():
804 804 return False
805 805 if not self.ui.configbool('phases', 'publish', True):
806 806 return True
807 807 # if publishing we can't copy if there is filtered content
808 808 return not self.filtered('visible').changelog.filteredrevs
809 809
810 810 def shared(self):
811 811 '''the type of shared repository (None if not shared)'''
812 812 if self.sharedpath != self.path:
813 813 return 'store'
814 814 return None
815 815
816 816 def join(self, f, *insidef):
817 817 return self.vfs.join(os.path.join(f, *insidef))
818 818
819 819 def wjoin(self, f, *insidef):
820 820 return self.vfs.reljoin(self.root, f, *insidef)
821 821
822 822 def file(self, f):
823 823 if f[0] == '/':
824 824 f = f[1:]
825 825 return filelog.filelog(self.svfs, f)
826 826
827 827 def changectx(self, changeid):
828 828 return self[changeid]
829 829
830 830 def parents(self, changeid=None):
831 831 '''get list of changectxs for parents of changeid'''
832 832 return self[changeid].parents()
833 833
834 834 def setparents(self, p1, p2=nullid):
835 835 self.dirstate.beginparentchange()
836 836 copies = self.dirstate.setparents(p1, p2)
837 837 pctx = self[p1]
838 838 if copies:
839 839 # Adjust copy records, the dirstate cannot do it, it
840 840 # requires access to parents manifests. Preserve them
841 841 # only for entries added to first parent.
842 842 for f in copies:
843 843 if f not in pctx and copies[f] in pctx:
844 844 self.dirstate.copy(copies[f], f)
845 845 if p2 == nullid:
846 846 for f, s in sorted(self.dirstate.copies().items()):
847 847 if f not in pctx and s not in pctx:
848 848 self.dirstate.copy(None, f)
849 849 self.dirstate.endparentchange()
850 850
851 851 def filectx(self, path, changeid=None, fileid=None):
852 852 """changeid can be a changeset revision, node, or tag.
853 853 fileid can be a file revision or node."""
854 854 return context.filectx(self, path, changeid, fileid)
855 855
856 856 def getcwd(self):
857 857 return self.dirstate.getcwd()
858 858
859 859 def pathto(self, f, cwd=None):
860 860 return self.dirstate.pathto(f, cwd)
861 861
862 862 def wfile(self, f, mode='r'):
863 863 return self.wvfs(f, mode)
864 864
865 865 def _link(self, f):
866 866 return self.wvfs.islink(f)
867 867
868 868 def _loadfilter(self, filter):
869 869 if filter not in self.filterpats:
870 870 l = []
871 871 for pat, cmd in self.ui.configitems(filter):
872 872 if cmd == '!':
873 873 continue
874 874 mf = matchmod.match(self.root, '', [pat])
875 875 fn = None
876 876 params = cmd
877 877 for name, filterfn in self._datafilters.iteritems():
878 878 if cmd.startswith(name):
879 879 fn = filterfn
880 880 params = cmd[len(name):].lstrip()
881 881 break
882 882 if not fn:
883 883 fn = lambda s, c, **kwargs: util.filter(s, c)
884 884 # Wrap old filters not supporting keyword arguments
885 885 if not inspect.getargspec(fn)[2]:
886 886 oldfn = fn
887 887 fn = lambda s, c, **kwargs: oldfn(s, c)
888 888 l.append((mf, fn, params))
889 889 self.filterpats[filter] = l
890 890 return self.filterpats[filter]
891 891
892 892 def _filter(self, filterpats, filename, data):
893 893 for mf, fn, cmd in filterpats:
894 894 if mf(filename):
895 895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 897 break
898 898
899 899 return data
900 900
901 901 @unfilteredpropertycache
902 902 def _encodefilterpats(self):
903 903 return self._loadfilter('encode')
904 904
905 905 @unfilteredpropertycache
906 906 def _decodefilterpats(self):
907 907 return self._loadfilter('decode')
908 908
909 909 def adddatafilter(self, name, filter):
910 910 self._datafilters[name] = filter
911 911
912 912 def wread(self, filename):
913 913 if self._link(filename):
914 914 data = self.wvfs.readlink(filename)
915 915 else:
916 916 data = self.wvfs.read(filename)
917 917 return self._filter(self._encodefilterpats, filename, data)
918 918
919 919 def wwrite(self, filename, data, flags):
920 """write ``data`` into ``filename`` in the working directory
921
922 This returns length of written (maybe decoded) data.
923 """
920 924 data = self._filter(self._decodefilterpats, filename, data)
921 925 if 'l' in flags:
922 926 self.wvfs.symlink(data, filename)
923 927 else:
924 928 self.wvfs.write(filename, data)
925 929 if 'x' in flags:
926 930 self.wvfs.setflags(filename, False, True)
931 return len(data)
927 932
928 933 def wwritedata(self, filename, data):
929 934 return self._filter(self._decodefilterpats, filename, data)
930 935
931 936 def currenttransaction(self):
932 937 """return the current transaction or None if non exists"""
933 938 if self._transref:
934 939 tr = self._transref()
935 940 else:
936 941 tr = None
937 942
938 943 if tr and tr.running():
939 944 return tr
940 945 return None
941 946
942 947 def transaction(self, desc, report=None):
943 948 if (self.ui.configbool('devel', 'all')
944 949 or self.ui.configbool('devel', 'check-locks')):
945 950 l = self._lockref and self._lockref()
946 951 if l is None or not l.held:
947 952 scmutil.develwarn(self.ui, 'transaction with no lock')
948 953 tr = self.currenttransaction()
949 954 if tr is not None:
950 955 return tr.nest()
951 956
952 957 # abort here if the journal already exists
953 958 if self.svfs.exists("journal"):
954 959 raise error.RepoError(
955 960 _("abandoned transaction found"),
956 961 hint=_("run 'hg recover' to clean up transaction"))
957 962
958 963 self.hook('pretxnopen', throw=True, txnname=desc)
959 964
960 965 self._writejournal(desc)
961 966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
962 967 if report:
963 968 rp = report
964 969 else:
965 970 rp = self.ui.warn
966 971 vfsmap = {'plain': self.vfs} # root of .hg/
967 972 # we must avoid cyclic reference between repo and transaction.
968 973 reporef = weakref.ref(self)
969 974 def validate(tr):
970 975 """will run pre-closing hooks"""
971 976 pending = lambda: tr.writepending() and self.root or ""
972 977 reporef().hook('pretxnclose', throw=True, pending=pending,
973 978 xnname=desc, **tr.hookargs)
974 979
975 980 tr = transaction.transaction(rp, self.sopener, vfsmap,
976 981 "journal",
977 982 "undo",
978 983 aftertrans(renames),
979 984 self.store.createmode,
980 985 validator=validate)
981 986
982 987 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
983 988 tr.hookargs['TXNID'] = trid
984 989 # note: writing the fncache only during finalize mean that the file is
985 990 # outdated when running hooks. As fncache is used for streaming clone,
986 991 # this is not expected to break anything that happen during the hooks.
987 992 tr.addfinalize('flush-fncache', self.store.write)
988 993 def txnclosehook(tr2):
989 994 """To be run if transaction is successful, will schedule a hook run
990 995 """
991 996 def hook():
992 997 reporef().hook('txnclose', throw=False, txnname=desc,
993 998 **tr2.hookargs)
994 999 reporef()._afterlock(hook)
995 1000 tr.addfinalize('txnclose-hook', txnclosehook)
996 1001 def txnaborthook(tr2):
997 1002 """To be run if transaction is aborted
998 1003 """
999 1004 reporef().hook('txnabort', throw=False, txnname=desc,
1000 1005 **tr2.hookargs)
1001 1006 tr.addabort('txnabort-hook', txnaborthook)
1002 1007 self._transref = weakref.ref(tr)
1003 1008 return tr
1004 1009
1005 1010 def _journalfiles(self):
1006 1011 return ((self.svfs, 'journal'),
1007 1012 (self.vfs, 'journal.dirstate'),
1008 1013 (self.vfs, 'journal.branch'),
1009 1014 (self.vfs, 'journal.desc'),
1010 1015 (self.vfs, 'journal.bookmarks'),
1011 1016 (self.svfs, 'journal.phaseroots'))
1012 1017
1013 1018 def undofiles(self):
1014 1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1015 1020
1016 1021 def _writejournal(self, desc):
1017 1022 self.vfs.write("journal.dirstate",
1018 1023 self.vfs.tryread("dirstate"))
1019 1024 self.vfs.write("journal.branch",
1020 1025 encoding.fromlocal(self.dirstate.branch()))
1021 1026 self.vfs.write("journal.desc",
1022 1027 "%d\n%s\n" % (len(self), desc))
1023 1028 self.vfs.write("journal.bookmarks",
1024 1029 self.vfs.tryread("bookmarks"))
1025 1030 self.svfs.write("journal.phaseroots",
1026 1031 self.svfs.tryread("phaseroots"))
1027 1032
1028 1033 def recover(self):
1029 1034 lock = self.lock()
1030 1035 try:
1031 1036 if self.svfs.exists("journal"):
1032 1037 self.ui.status(_("rolling back interrupted transaction\n"))
1033 1038 vfsmap = {'': self.svfs,
1034 1039 'plain': self.vfs,}
1035 1040 transaction.rollback(self.svfs, vfsmap, "journal",
1036 1041 self.ui.warn)
1037 1042 self.invalidate()
1038 1043 return True
1039 1044 else:
1040 1045 self.ui.warn(_("no interrupted transaction available\n"))
1041 1046 return False
1042 1047 finally:
1043 1048 lock.release()
1044 1049
1045 1050 def rollback(self, dryrun=False, force=False):
1046 1051 wlock = lock = None
1047 1052 try:
1048 1053 wlock = self.wlock()
1049 1054 lock = self.lock()
1050 1055 if self.svfs.exists("undo"):
1051 1056 return self._rollback(dryrun, force)
1052 1057 else:
1053 1058 self.ui.warn(_("no rollback information available\n"))
1054 1059 return 1
1055 1060 finally:
1056 1061 release(lock, wlock)
1057 1062
1058 1063 @unfilteredmethod # Until we get smarter cache management
1059 1064 def _rollback(self, dryrun, force):
1060 1065 ui = self.ui
1061 1066 try:
1062 1067 args = self.vfs.read('undo.desc').splitlines()
1063 1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1064 1069 if len(args) >= 3:
1065 1070 detail = args[2]
1066 1071 oldtip = oldlen - 1
1067 1072
1068 1073 if detail and ui.verbose:
1069 1074 msg = (_('repository tip rolled back to revision %s'
1070 1075 ' (undo %s: %s)\n')
1071 1076 % (oldtip, desc, detail))
1072 1077 else:
1073 1078 msg = (_('repository tip rolled back to revision %s'
1074 1079 ' (undo %s)\n')
1075 1080 % (oldtip, desc))
1076 1081 except IOError:
1077 1082 msg = _('rolling back unknown transaction\n')
1078 1083 desc = None
1079 1084
1080 1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1081 1086 raise util.Abort(
1082 1087 _('rollback of last commit while not checked out '
1083 1088 'may lose data'), hint=_('use -f to force'))
1084 1089
1085 1090 ui.status(msg)
1086 1091 if dryrun:
1087 1092 return 0
1088 1093
1089 1094 parents = self.dirstate.parents()
1090 1095 self.destroying()
1091 1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1092 1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1093 1098 if self.vfs.exists('undo.bookmarks'):
1094 1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1095 1100 if self.svfs.exists('undo.phaseroots'):
1096 1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1097 1102 self.invalidate()
1098 1103
1099 1104 parentgone = (parents[0] not in self.changelog.nodemap or
1100 1105 parents[1] not in self.changelog.nodemap)
1101 1106 if parentgone:
1102 1107 self.vfs.rename('undo.dirstate', 'dirstate')
1103 1108 try:
1104 1109 branch = self.vfs.read('undo.branch')
1105 1110 self.dirstate.setbranch(encoding.tolocal(branch))
1106 1111 except IOError:
1107 1112 ui.warn(_('named branch could not be reset: '
1108 1113 'current branch is still \'%s\'\n')
1109 1114 % self.dirstate.branch())
1110 1115
1111 1116 self.dirstate.invalidate()
1112 1117 parents = tuple([p.rev() for p in self.parents()])
1113 1118 if len(parents) > 1:
1114 1119 ui.status(_('working directory now based on '
1115 1120 'revisions %d and %d\n') % parents)
1116 1121 else:
1117 1122 ui.status(_('working directory now based on '
1118 1123 'revision %d\n') % parents)
1119 1124 ms = mergemod.mergestate(self)
1120 1125 ms.reset(self['.'].node())
1121 1126
1122 1127 # TODO: if we know which new heads may result from this rollback, pass
1123 1128 # them to destroy(), which will prevent the branchhead cache from being
1124 1129 # invalidated.
1125 1130 self.destroyed()
1126 1131 return 0
1127 1132
1128 1133 def invalidatecaches(self):
1129 1134
1130 1135 if '_tagscache' in vars(self):
1131 1136 # can't use delattr on proxy
1132 1137 del self.__dict__['_tagscache']
1133 1138
1134 1139 self.unfiltered()._branchcaches.clear()
1135 1140 self.invalidatevolatilesets()
1136 1141
1137 1142 def invalidatevolatilesets(self):
1138 1143 self.filteredrevcache.clear()
1139 1144 obsolete.clearobscaches(self)
1140 1145
1141 1146 def invalidatedirstate(self):
1142 1147 '''Invalidates the dirstate, causing the next call to dirstate
1143 1148 to check if it was modified since the last time it was read,
1144 1149 rereading it if it has.
1145 1150
1146 1151 This is different to dirstate.invalidate() that it doesn't always
1147 1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1148 1153 explicitly read the dirstate again (i.e. restoring it to a previous
1149 1154 known good state).'''
1150 1155 if hasunfilteredcache(self, 'dirstate'):
1151 1156 for k in self.dirstate._filecache:
1152 1157 try:
1153 1158 delattr(self.dirstate, k)
1154 1159 except AttributeError:
1155 1160 pass
1156 1161 delattr(self.unfiltered(), 'dirstate')
1157 1162
1158 1163 def invalidate(self):
1159 1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1160 1165 for k in self._filecache:
1161 1166 # dirstate is invalidated separately in invalidatedirstate()
1162 1167 if k == 'dirstate':
1163 1168 continue
1164 1169
1165 1170 try:
1166 1171 delattr(unfiltered, k)
1167 1172 except AttributeError:
1168 1173 pass
1169 1174 self.invalidatecaches()
1170 1175 self.store.invalidatecaches()
1171 1176
1172 1177 def invalidateall(self):
1173 1178 '''Fully invalidates both store and non-store parts, causing the
1174 1179 subsequent operation to reread any outside changes.'''
1175 1180 # extension should hook this to invalidate its caches
1176 1181 self.invalidate()
1177 1182 self.invalidatedirstate()
1178 1183
1179 1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1180 1185 try:
1181 1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1182 1187 except error.LockHeld, inst:
1183 1188 if not wait:
1184 1189 raise
1185 1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1186 1191 (desc, inst.locker))
1187 1192 # default to 600 seconds timeout
1188 1193 l = lockmod.lock(vfs, lockname,
1189 1194 int(self.ui.config("ui", "timeout", "600")),
1190 1195 releasefn, desc=desc)
1191 1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1192 1197 if acquirefn:
1193 1198 acquirefn()
1194 1199 return l
1195 1200
1196 1201 def _afterlock(self, callback):
1197 1202 """add a callback to be run when the repository is fully unlocked
1198 1203
1199 1204 The callback will be executed when the outermost lock is released
1200 1205 (with wlock being higher level than 'lock')."""
1201 1206 for ref in (self._wlockref, self._lockref):
1202 1207 l = ref and ref()
1203 1208 if l and l.held:
1204 1209 l.postrelease.append(callback)
1205 1210 break
1206 1211 else: # no lock have been found.
1207 1212 callback()
1208 1213
1209 1214 def lock(self, wait=True):
1210 1215 '''Lock the repository store (.hg/store) and return a weak reference
1211 1216 to the lock. Use this before modifying the store (e.g. committing or
1212 1217 stripping). If you are opening a transaction, get a lock as well.)
1213 1218
1214 1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1215 1220 'wlock' first to avoid a dead-lock hazard.'''
1216 1221 l = self._lockref and self._lockref()
1217 1222 if l is not None and l.held:
1218 1223 l.lock()
1219 1224 return l
1220 1225
1221 1226 def unlock():
1222 1227 for k, ce in self._filecache.items():
1223 1228 if k == 'dirstate' or k not in self.__dict__:
1224 1229 continue
1225 1230 ce.refresh()
1226 1231
1227 1232 l = self._lock(self.svfs, "lock", wait, unlock,
1228 1233 self.invalidate, _('repository %s') % self.origroot)
1229 1234 self._lockref = weakref.ref(l)
1230 1235 return l
1231 1236
1232 1237 def wlock(self, wait=True):
1233 1238 '''Lock the non-store parts of the repository (everything under
1234 1239 .hg except .hg/store) and return a weak reference to the lock.
1235 1240
1236 1241 Use this before modifying files in .hg.
1237 1242
1238 1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1239 1244 'wlock' first to avoid a dead-lock hazard.'''
1240 1245 l = self._wlockref and self._wlockref()
1241 1246 if l is not None and l.held:
1242 1247 l.lock()
1243 1248 return l
1244 1249
1245 1250 # We do not need to check for non-waiting lock aquisition. Such
1246 1251 # acquisition would not cause dead-lock as they would just fail.
1247 1252 if wait and (self.ui.configbool('devel', 'all')
1248 1253 or self.ui.configbool('devel', 'check-locks')):
1249 1254 l = self._lockref and self._lockref()
1250 1255 if l is not None and l.held:
1251 1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1252 1257
1253 1258 def unlock():
1254 1259 if self.dirstate.pendingparentchange():
1255 1260 self.dirstate.invalidate()
1256 1261 else:
1257 1262 self.dirstate.write()
1258 1263
1259 1264 self._filecache['dirstate'].refresh()
1260 1265
1261 1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1262 1267 self.invalidatedirstate, _('working directory of %s') %
1263 1268 self.origroot)
1264 1269 self._wlockref = weakref.ref(l)
1265 1270 return l
1266 1271
1267 1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1268 1273 """
1269 1274 commit an individual file as part of a larger transaction
1270 1275 """
1271 1276
1272 1277 fname = fctx.path()
1273 1278 fparent1 = manifest1.get(fname, nullid)
1274 1279 fparent2 = manifest2.get(fname, nullid)
1275 1280 if isinstance(fctx, context.filectx):
1276 1281 node = fctx.filenode()
1277 1282 if node in [fparent1, fparent2]:
1278 1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1279 1284 return node
1280 1285
1281 1286 flog = self.file(fname)
1282 1287 meta = {}
1283 1288 copy = fctx.renamed()
1284 1289 if copy and copy[0] != fname:
1285 1290 # Mark the new revision of this file as a copy of another
1286 1291 # file. This copy data will effectively act as a parent
1287 1292 # of this new revision. If this is a merge, the first
1288 1293 # parent will be the nullid (meaning "look up the copy data")
1289 1294 # and the second one will be the other parent. For example:
1290 1295 #
1291 1296 # 0 --- 1 --- 3 rev1 changes file foo
1292 1297 # \ / rev2 renames foo to bar and changes it
1293 1298 # \- 2 -/ rev3 should have bar with all changes and
1294 1299 # should record that bar descends from
1295 1300 # bar in rev2 and foo in rev1
1296 1301 #
1297 1302 # this allows this merge to succeed:
1298 1303 #
1299 1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1300 1305 # \ / merging rev3 and rev4 should use bar@rev2
1301 1306 # \- 2 --- 4 as the merge base
1302 1307 #
1303 1308
1304 1309 cfname = copy[0]
1305 1310 crev = manifest1.get(cfname)
1306 1311 newfparent = fparent2
1307 1312
1308 1313 if manifest2: # branch merge
1309 1314 if fparent2 == nullid or crev is None: # copied on remote side
1310 1315 if cfname in manifest2:
1311 1316 crev = manifest2[cfname]
1312 1317 newfparent = fparent1
1313 1318
1314 1319 # Here, we used to search backwards through history to try to find
1315 1320 # where the file copy came from if the source of a copy was not in
1316 1321 # the parent directory. However, this doesn't actually make sense to
1317 1322 # do (what does a copy from something not in your working copy even
1318 1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1319 1324 # the user that copy information was dropped, so if they didn't
1320 1325 # expect this outcome it can be fixed, but this is the correct
1321 1326 # behavior in this circumstance.
1322 1327
1323 1328 if crev:
1324 1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1325 1330 meta["copy"] = cfname
1326 1331 meta["copyrev"] = hex(crev)
1327 1332 fparent1, fparent2 = nullid, newfparent
1328 1333 else:
1329 1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1330 1335 "copied from '%s'!\n") % (fname, cfname))
1331 1336
1332 1337 elif fparent1 == nullid:
1333 1338 fparent1, fparent2 = fparent2, nullid
1334 1339 elif fparent2 != nullid:
1335 1340 # is one parent an ancestor of the other?
1336 1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1337 1342 if fparent1 in fparentancestors:
1338 1343 fparent1, fparent2 = fparent2, nullid
1339 1344 elif fparent2 in fparentancestors:
1340 1345 fparent2 = nullid
1341 1346
1342 1347 # is the file changed?
1343 1348 text = fctx.data()
1344 1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1345 1350 changelist.append(fname)
1346 1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1347 1352 # are just the flags changed during merge?
1348 1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1349 1354 changelist.append(fname)
1350 1355
1351 1356 return fparent1
1352 1357
1353 1358 @unfilteredmethod
1354 1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1355 1360 editor=False, extra={}):
1356 1361 """Add a new revision to current repository.
1357 1362
1358 1363 Revision information is gathered from the working directory,
1359 1364 match can be used to filter the committed files. If editor is
1360 1365 supplied, it is called to get a commit message.
1361 1366 """
1362 1367
1363 1368 def fail(f, msg):
1364 1369 raise util.Abort('%s: %s' % (f, msg))
1365 1370
1366 1371 if not match:
1367 1372 match = matchmod.always(self.root, '')
1368 1373
1369 1374 if not force:
1370 1375 vdirs = []
1371 1376 match.explicitdir = vdirs.append
1372 1377 match.bad = fail
1373 1378
1374 1379 wlock = self.wlock()
1375 1380 try:
1376 1381 wctx = self[None]
1377 1382 merge = len(wctx.parents()) > 1
1378 1383
1379 1384 if not force and merge and not match.always():
1380 1385 raise util.Abort(_('cannot partially commit a merge '
1381 1386 '(do not specify files or patterns)'))
1382 1387
1383 1388 status = self.status(match=match, clean=force)
1384 1389 if force:
1385 1390 status.modified.extend(status.clean) # mq may commit clean files
1386 1391
1387 1392 # check subrepos
1388 1393 subs = []
1389 1394 commitsubs = set()
1390 1395 newstate = wctx.substate.copy()
1391 1396 # only manage subrepos and .hgsubstate if .hgsub is present
1392 1397 if '.hgsub' in wctx:
1393 1398 # we'll decide whether to track this ourselves, thanks
1394 1399 for c in status.modified, status.added, status.removed:
1395 1400 if '.hgsubstate' in c:
1396 1401 c.remove('.hgsubstate')
1397 1402
1398 1403 # compare current state to last committed state
1399 1404 # build new substate based on last committed state
1400 1405 oldstate = wctx.p1().substate
1401 1406 for s in sorted(newstate.keys()):
1402 1407 if not match(s):
1403 1408 # ignore working copy, use old state if present
1404 1409 if s in oldstate:
1405 1410 newstate[s] = oldstate[s]
1406 1411 continue
1407 1412 if not force:
1408 1413 raise util.Abort(
1409 1414 _("commit with new subrepo %s excluded") % s)
1410 1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1411 1416 if dirtyreason:
1412 1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1413 1418 raise util.Abort(dirtyreason,
1414 1419 hint=_("use --subrepos for recursive commit"))
1415 1420 subs.append(s)
1416 1421 commitsubs.add(s)
1417 1422 else:
1418 1423 bs = wctx.sub(s).basestate()
1419 1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1420 1425 if oldstate.get(s, (None, None, None))[1] != bs:
1421 1426 subs.append(s)
1422 1427
1423 1428 # check for removed subrepos
1424 1429 for p in wctx.parents():
1425 1430 r = [s for s in p.substate if s not in newstate]
1426 1431 subs += [s for s in r if match(s)]
1427 1432 if subs:
1428 1433 if (not match('.hgsub') and
1429 1434 '.hgsub' in (wctx.modified() + wctx.added())):
1430 1435 raise util.Abort(
1431 1436 _("can't commit subrepos without .hgsub"))
1432 1437 status.modified.insert(0, '.hgsubstate')
1433 1438
1434 1439 elif '.hgsub' in status.removed:
1435 1440 # clean up .hgsubstate when .hgsub is removed
1436 1441 if ('.hgsubstate' in wctx and
1437 1442 '.hgsubstate' not in (status.modified + status.added +
1438 1443 status.removed)):
1439 1444 status.removed.insert(0, '.hgsubstate')
1440 1445
1441 1446 # make sure all explicit patterns are matched
1442 1447 if not force and match.files():
1443 1448 matched = set(status.modified + status.added + status.removed)
1444 1449
1445 1450 for f in match.files():
1446 1451 f = self.dirstate.normalize(f)
1447 1452 if f == '.' or f in matched or f in wctx.substate:
1448 1453 continue
1449 1454 if f in status.deleted:
1450 1455 fail(f, _('file not found!'))
1451 1456 if f in vdirs: # visited directory
1452 1457 d = f + '/'
1453 1458 for mf in matched:
1454 1459 if mf.startswith(d):
1455 1460 break
1456 1461 else:
1457 1462 fail(f, _("no match under directory!"))
1458 1463 elif f not in self.dirstate:
1459 1464 fail(f, _("file not tracked!"))
1460 1465
1461 1466 cctx = context.workingcommitctx(self, status,
1462 1467 text, user, date, extra)
1463 1468
1464 1469 if (not force and not extra.get("close") and not merge
1465 1470 and not cctx.files()
1466 1471 and wctx.branch() == wctx.p1().branch()):
1467 1472 return None
1468 1473
1469 1474 if merge and cctx.deleted():
1470 1475 raise util.Abort(_("cannot commit merge with missing files"))
1471 1476
1472 1477 ms = mergemod.mergestate(self)
1473 1478 for f in status.modified:
1474 1479 if f in ms and ms[f] == 'u':
1475 1480 raise util.Abort(_('unresolved merge conflicts '
1476 1481 '(see "hg help resolve")'))
1477 1482
1478 1483 if editor:
1479 1484 cctx._text = editor(self, cctx, subs)
1480 1485 edited = (text != cctx._text)
1481 1486
1482 1487 # Save commit message in case this transaction gets rolled back
1483 1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1484 1489 # the assumption that the user will use the same editor again.
1485 1490 msgfn = self.savecommitmessage(cctx._text)
1486 1491
1487 1492 # commit subs and write new state
1488 1493 if subs:
1489 1494 for s in sorted(commitsubs):
1490 1495 sub = wctx.sub(s)
1491 1496 self.ui.status(_('committing subrepository %s\n') %
1492 1497 subrepo.subrelpath(sub))
1493 1498 sr = sub.commit(cctx._text, user, date)
1494 1499 newstate[s] = (newstate[s][0], sr)
1495 1500 subrepo.writestate(self, newstate)
1496 1501
1497 1502 p1, p2 = self.dirstate.parents()
1498 1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1499 1504 try:
1500 1505 self.hook("precommit", throw=True, parent1=hookp1,
1501 1506 parent2=hookp2)
1502 1507 ret = self.commitctx(cctx, True)
1503 1508 except: # re-raises
1504 1509 if edited:
1505 1510 self.ui.write(
1506 1511 _('note: commit message saved in %s\n') % msgfn)
1507 1512 raise
1508 1513
1509 1514 # update bookmarks, dirstate and mergestate
1510 1515 bookmarks.update(self, [p1, p2], ret)
1511 1516 cctx.markcommitted(ret)
1512 1517 ms.reset()
1513 1518 finally:
1514 1519 wlock.release()
1515 1520
1516 1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1517 1522 # hack for command that use a temporary commit (eg: histedit)
1518 1523 # temporary commit got stripped before hook release
1519 1524 if node in self:
1520 1525 self.hook("commit", node=node, parent1=parent1,
1521 1526 parent2=parent2)
1522 1527 self._afterlock(commithook)
1523 1528 return ret
1524 1529
1525 1530 @unfilteredmethod
1526 1531 def commitctx(self, ctx, error=False):
1527 1532 """Add a new revision to current repository.
1528 1533 Revision information is passed via the context argument.
1529 1534 """
1530 1535
1531 1536 tr = None
1532 1537 p1, p2 = ctx.p1(), ctx.p2()
1533 1538 user = ctx.user()
1534 1539
1535 1540 lock = self.lock()
1536 1541 try:
1537 1542 tr = self.transaction("commit")
1538 1543 trp = weakref.proxy(tr)
1539 1544
1540 1545 if ctx.files():
1541 1546 m1 = p1.manifest()
1542 1547 m2 = p2.manifest()
1543 1548 m = m1.copy()
1544 1549
1545 1550 # check in files
1546 1551 added = []
1547 1552 changed = []
1548 1553 removed = list(ctx.removed())
1549 1554 linkrev = len(self)
1550 1555 self.ui.note(_("committing files:\n"))
1551 1556 for f in sorted(ctx.modified() + ctx.added()):
1552 1557 self.ui.note(f + "\n")
1553 1558 try:
1554 1559 fctx = ctx[f]
1555 1560 if fctx is None:
1556 1561 removed.append(f)
1557 1562 else:
1558 1563 added.append(f)
1559 1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1560 1565 trp, changed)
1561 1566 m.setflag(f, fctx.flags())
1562 1567 except OSError, inst:
1563 1568 self.ui.warn(_("trouble committing %s!\n") % f)
1564 1569 raise
1565 1570 except IOError, inst:
1566 1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1567 1572 if error or errcode and errcode != errno.ENOENT:
1568 1573 self.ui.warn(_("trouble committing %s!\n") % f)
1569 1574 raise
1570 1575
1571 1576 # update manifest
1572 1577 self.ui.note(_("committing manifest\n"))
1573 1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1574 1579 drop = [f for f in removed if f in m]
1575 1580 for f in drop:
1576 1581 del m[f]
1577 1582 mn = self.manifest.add(m, trp, linkrev,
1578 1583 p1.manifestnode(), p2.manifestnode(),
1579 1584 added, drop)
1580 1585 files = changed + removed
1581 1586 else:
1582 1587 mn = p1.manifestnode()
1583 1588 files = []
1584 1589
1585 1590 # update changelog
1586 1591 self.ui.note(_("committing changelog\n"))
1587 1592 self.changelog.delayupdate(tr)
1588 1593 n = self.changelog.add(mn, files, ctx.description(),
1589 1594 trp, p1.node(), p2.node(),
1590 1595 user, ctx.date(), ctx.extra().copy())
1591 1596 p = lambda: tr.writepending() and self.root or ""
1592 1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1593 1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1594 1599 parent2=xp2, pending=p)
1595 1600 # set the new commit is proper phase
1596 1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1597 1602 if targetphase:
1598 1603 # retract boundary do not alter parent changeset.
1599 1604 # if a parent have higher the resulting phase will
1600 1605 # be compliant anyway
1601 1606 #
1602 1607 # if minimal phase was 0 we don't need to retract anything
1603 1608 phases.retractboundary(self, tr, targetphase, [n])
1604 1609 tr.close()
1605 1610 branchmap.updatecache(self.filtered('served'))
1606 1611 return n
1607 1612 finally:
1608 1613 if tr:
1609 1614 tr.release()
1610 1615 lock.release()
1611 1616
1612 1617 @unfilteredmethod
1613 1618 def destroying(self):
1614 1619 '''Inform the repository that nodes are about to be destroyed.
1615 1620 Intended for use by strip and rollback, so there's a common
1616 1621 place for anything that has to be done before destroying history.
1617 1622
1618 1623 This is mostly useful for saving state that is in memory and waiting
1619 1624 to be flushed when the current lock is released. Because a call to
1620 1625 destroyed is imminent, the repo will be invalidated causing those
1621 1626 changes to stay in memory (waiting for the next unlock), or vanish
1622 1627 completely.
1623 1628 '''
1624 1629 # When using the same lock to commit and strip, the phasecache is left
1625 1630 # dirty after committing. Then when we strip, the repo is invalidated,
1626 1631 # causing those changes to disappear.
1627 1632 if '_phasecache' in vars(self):
1628 1633 self._phasecache.write()
1629 1634
1630 1635 @unfilteredmethod
1631 1636 def destroyed(self):
1632 1637 '''Inform the repository that nodes have been destroyed.
1633 1638 Intended for use by strip and rollback, so there's a common
1634 1639 place for anything that has to be done after destroying history.
1635 1640 '''
1636 1641 # When one tries to:
1637 1642 # 1) destroy nodes thus calling this method (e.g. strip)
1638 1643 # 2) use phasecache somewhere (e.g. commit)
1639 1644 #
1640 1645 # then 2) will fail because the phasecache contains nodes that were
1641 1646 # removed. We can either remove phasecache from the filecache,
1642 1647 # causing it to reload next time it is accessed, or simply filter
1643 1648 # the removed nodes now and write the updated cache.
1644 1649 self._phasecache.filterunknown(self)
1645 1650 self._phasecache.write()
1646 1651
1647 1652 # update the 'served' branch cache to help read only server process
1648 1653 # Thanks to branchcache collaboration this is done from the nearest
1649 1654 # filtered subset and it is expected to be fast.
1650 1655 branchmap.updatecache(self.filtered('served'))
1651 1656
1652 1657 # Ensure the persistent tag cache is updated. Doing it now
1653 1658 # means that the tag cache only has to worry about destroyed
1654 1659 # heads immediately after a strip/rollback. That in turn
1655 1660 # guarantees that "cachetip == currenttip" (comparing both rev
1656 1661 # and node) always means no nodes have been added or destroyed.
1657 1662
1658 1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1659 1664 # head, refresh the tag cache, then immediately add a new head.
1660 1665 # But I think doing it this way is necessary for the "instant
1661 1666 # tag cache retrieval" case to work.
1662 1667 self.invalidate()
1663 1668
1664 1669 def walk(self, match, node=None):
1665 1670 '''
1666 1671 walk recursively through the directory tree or a given
1667 1672 changeset, finding all files matched by the match
1668 1673 function
1669 1674 '''
1670 1675 return self[node].walk(match)
1671 1676
1672 1677 def status(self, node1='.', node2=None, match=None,
1673 1678 ignored=False, clean=False, unknown=False,
1674 1679 listsubrepos=False):
1675 1680 '''a convenience method that calls node1.status(node2)'''
1676 1681 return self[node1].status(node2, match, ignored, clean, unknown,
1677 1682 listsubrepos)
1678 1683
1679 1684 def heads(self, start=None):
1680 1685 heads = self.changelog.heads(start)
1681 1686 # sort the output in rev descending order
1682 1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1683 1688
1684 1689 def branchheads(self, branch=None, start=None, closed=False):
1685 1690 '''return a (possibly filtered) list of heads for the given branch
1686 1691
1687 1692 Heads are returned in topological order, from newest to oldest.
1688 1693 If branch is None, use the dirstate branch.
1689 1694 If start is not None, return only heads reachable from start.
1690 1695 If closed is True, return heads that are marked as closed as well.
1691 1696 '''
1692 1697 if branch is None:
1693 1698 branch = self[None].branch()
1694 1699 branches = self.branchmap()
1695 1700 if branch not in branches:
1696 1701 return []
1697 1702 # the cache returns heads ordered lowest to highest
1698 1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1699 1704 if start is not None:
1700 1705 # filter out the heads that cannot be reached from startrev
1701 1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1702 1707 bheads = [h for h in bheads if h in fbheads]
1703 1708 return bheads
1704 1709
1705 1710 def branches(self, nodes):
1706 1711 if not nodes:
1707 1712 nodes = [self.changelog.tip()]
1708 1713 b = []
1709 1714 for n in nodes:
1710 1715 t = n
1711 1716 while True:
1712 1717 p = self.changelog.parents(n)
1713 1718 if p[1] != nullid or p[0] == nullid:
1714 1719 b.append((t, n, p[0], p[1]))
1715 1720 break
1716 1721 n = p[0]
1717 1722 return b
1718 1723
1719 1724 def between(self, pairs):
1720 1725 r = []
1721 1726
1722 1727 for top, bottom in pairs:
1723 1728 n, l, i = top, [], 0
1724 1729 f = 1
1725 1730
1726 1731 while n != bottom and n != nullid:
1727 1732 p = self.changelog.parents(n)[0]
1728 1733 if i == f:
1729 1734 l.append(n)
1730 1735 f = f * 2
1731 1736 n = p
1732 1737 i += 1
1733 1738
1734 1739 r.append(l)
1735 1740
1736 1741 return r
1737 1742
1738 1743 def checkpush(self, pushop):
1739 1744 """Extensions can override this function if additional checks have
1740 1745 to be performed before pushing, or call it if they override push
1741 1746 command.
1742 1747 """
1743 1748 pass
1744 1749
1745 1750 @unfilteredpropertycache
1746 1751 def prepushoutgoinghooks(self):
1747 1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1748 1753 functions, which are called before pushing changesets.
1749 1754 """
1750 1755 return util.hooks()
1751 1756
1752 1757 def stream_in(self, remote, requirements):
1753 1758 lock = self.lock()
1754 1759 try:
1755 1760 # Save remote branchmap. We will use it later
1756 1761 # to speed up branchcache creation
1757 1762 rbranchmap = None
1758 1763 if remote.capable("branchmap"):
1759 1764 rbranchmap = remote.branchmap()
1760 1765
1761 1766 fp = remote.stream_out()
1762 1767 l = fp.readline()
1763 1768 try:
1764 1769 resp = int(l)
1765 1770 except ValueError:
1766 1771 raise error.ResponseError(
1767 1772 _('unexpected response from remote server:'), l)
1768 1773 if resp == 1:
1769 1774 raise util.Abort(_('operation forbidden by server'))
1770 1775 elif resp == 2:
1771 1776 raise util.Abort(_('locking the remote repository failed'))
1772 1777 elif resp != 0:
1773 1778 raise util.Abort(_('the server sent an unknown error code'))
1774 1779 self.ui.status(_('streaming all changes\n'))
1775 1780 l = fp.readline()
1776 1781 try:
1777 1782 total_files, total_bytes = map(int, l.split(' ', 1))
1778 1783 except (ValueError, TypeError):
1779 1784 raise error.ResponseError(
1780 1785 _('unexpected response from remote server:'), l)
1781 1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1782 1787 (total_files, util.bytecount(total_bytes)))
1783 1788 handled_bytes = 0
1784 1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1785 1790 start = time.time()
1786 1791
1787 1792 tr = self.transaction(_('clone'))
1788 1793 try:
1789 1794 for i in xrange(total_files):
1790 1795 # XXX doesn't support '\n' or '\r' in filenames
1791 1796 l = fp.readline()
1792 1797 try:
1793 1798 name, size = l.split('\0', 1)
1794 1799 size = int(size)
1795 1800 except (ValueError, TypeError):
1796 1801 raise error.ResponseError(
1797 1802 _('unexpected response from remote server:'), l)
1798 1803 if self.ui.debugflag:
1799 1804 self.ui.debug('adding %s (%s)\n' %
1800 1805 (name, util.bytecount(size)))
1801 1806 # for backwards compat, name was partially encoded
1802 1807 ofp = self.svfs(store.decodedir(name), 'w')
1803 1808 for chunk in util.filechunkiter(fp, limit=size):
1804 1809 handled_bytes += len(chunk)
1805 1810 self.ui.progress(_('clone'), handled_bytes,
1806 1811 total=total_bytes)
1807 1812 ofp.write(chunk)
1808 1813 ofp.close()
1809 1814 tr.close()
1810 1815 finally:
1811 1816 tr.release()
1812 1817
1813 1818 # Writing straight to files circumvented the inmemory caches
1814 1819 self.invalidate()
1815 1820
1816 1821 elapsed = time.time() - start
1817 1822 if elapsed <= 0:
1818 1823 elapsed = 0.001
1819 1824 self.ui.progress(_('clone'), None)
1820 1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1821 1826 (util.bytecount(total_bytes), elapsed,
1822 1827 util.bytecount(total_bytes / elapsed)))
1823 1828
1824 1829 # new requirements = old non-format requirements +
1825 1830 # new format-related
1826 1831 # requirements from the streamed-in repository
1827 1832 requirements.update(set(self.requirements) - self.supportedformats)
1828 1833 self._applyrequirements(requirements)
1829 1834 self._writerequirements()
1830 1835
1831 1836 if rbranchmap:
1832 1837 rbheads = []
1833 1838 closed = []
1834 1839 for bheads in rbranchmap.itervalues():
1835 1840 rbheads.extend(bheads)
1836 1841 for h in bheads:
1837 1842 r = self.changelog.rev(h)
1838 1843 b, c = self.changelog.branchinfo(r)
1839 1844 if c:
1840 1845 closed.append(h)
1841 1846
1842 1847 if rbheads:
1843 1848 rtiprev = max((int(self.changelog.rev(node))
1844 1849 for node in rbheads))
1845 1850 cache = branchmap.branchcache(rbranchmap,
1846 1851 self[rtiprev].node(),
1847 1852 rtiprev,
1848 1853 closednodes=closed)
1849 1854 # Try to stick it as low as possible
1850 1855 # filter above served are unlikely to be fetch from a clone
1851 1856 for candidate in ('base', 'immutable', 'served'):
1852 1857 rview = self.filtered(candidate)
1853 1858 if cache.validfor(rview):
1854 1859 self._branchcaches[candidate] = cache
1855 1860 cache.write(rview)
1856 1861 break
1857 1862 self.invalidate()
1858 1863 return len(self.heads()) + 1
1859 1864 finally:
1860 1865 lock.release()
1861 1866
1862 1867 def clone(self, remote, heads=[], stream=None):
1863 1868 '''clone remote repository.
1864 1869
1865 1870 keyword arguments:
1866 1871 heads: list of revs to clone (forces use of pull)
1867 1872 stream: use streaming clone if possible'''
1868 1873
1869 1874 # now, all clients that can request uncompressed clones can
1870 1875 # read repo formats supported by all servers that can serve
1871 1876 # them.
1872 1877
1873 1878 # if revlog format changes, client will have to check version
1874 1879 # and format flags on "stream" capability, and use
1875 1880 # uncompressed only if compatible.
1876 1881
1877 1882 if stream is None:
1878 1883 # if the server explicitly prefers to stream (for fast LANs)
1879 1884 stream = remote.capable('stream-preferred')
1880 1885
1881 1886 if stream and not heads:
1882 1887 # 'stream' means remote revlog format is revlogv1 only
1883 1888 if remote.capable('stream'):
1884 1889 self.stream_in(remote, set(('revlogv1',)))
1885 1890 else:
1886 1891 # otherwise, 'streamreqs' contains the remote revlog format
1887 1892 streamreqs = remote.capable('streamreqs')
1888 1893 if streamreqs:
1889 1894 streamreqs = set(streamreqs.split(','))
1890 1895 # if we support it, stream in and adjust our requirements
1891 1896 if not streamreqs - self.supportedformats:
1892 1897 self.stream_in(remote, streamreqs)
1893 1898
1894 1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1895 1900 try:
1896 1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1897 1902 ret = exchange.pull(self, remote, heads).cgresult
1898 1903 finally:
1899 1904 self.ui.restoreconfig(quiet)
1900 1905 return ret
1901 1906
1902 1907 def pushkey(self, namespace, key, old, new):
1903 1908 try:
1904 1909 tr = self.currenttransaction()
1905 1910 hookargs = {}
1906 1911 if tr is not None:
1907 1912 hookargs.update(tr.hookargs)
1908 1913 pending = lambda: tr.writepending() and self.root or ""
1909 1914 hookargs['pending'] = pending
1910 1915 hookargs['namespace'] = namespace
1911 1916 hookargs['key'] = key
1912 1917 hookargs['old'] = old
1913 1918 hookargs['new'] = new
1914 1919 self.hook('prepushkey', throw=True, **hookargs)
1915 1920 except error.HookAbort, exc:
1916 1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1917 1922 if exc.hint:
1918 1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1919 1924 return False
1920 1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1921 1926 ret = pushkey.push(self, namespace, key, old, new)
1922 1927 def runhook():
1923 1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1924 1929 ret=ret)
1925 1930 self._afterlock(runhook)
1926 1931 return ret
1927 1932
1928 1933 def listkeys(self, namespace):
1929 1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1930 1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1931 1936 values = pushkey.list(self, namespace)
1932 1937 self.hook('listkeys', namespace=namespace, values=values)
1933 1938 return values
1934 1939
1935 1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1936 1941 '''used to test argument passing over the wire'''
1937 1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1938 1943
1939 1944 def savecommitmessage(self, text):
1940 1945 fp = self.vfs('last-message.txt', 'wb')
1941 1946 try:
1942 1947 fp.write(text)
1943 1948 finally:
1944 1949 fp.close()
1945 1950 return self.pathto(fp.name[len(self.root) + 1:])
1946 1951
1947 1952 # used to avoid circular references so destructors work
1948 1953 def aftertrans(files):
1949 1954 renamefiles = [tuple(t) for t in files]
1950 1955 def a():
1951 1956 for vfs, src, dest in renamefiles:
1952 1957 try:
1953 1958 vfs.rename(src, dest)
1954 1959 except OSError: # journal file does not yet exist
1955 1960 pass
1956 1961 return a
1957 1962
1958 1963 def undoname(fn):
1959 1964 base, name = os.path.split(fn)
1960 1965 assert name.startswith('journal')
1961 1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1962 1967
1963 1968 def instance(ui, path, create):
1964 1969 return localrepository(ui, util.urllocalpath(path), create)
1965 1970
1966 1971 def islocal(path):
1967 1972 return True
@@ -1,985 +1,1003 b''
1 1 test merge-tools configuration - mostly exercising filemerge.py
2 2
3 3 $ unset HGMERGE # make sure HGMERGE doesn't interfere with the test
4 4 $ hg init
5 5
6 6 revision 0
7 7
8 8 $ echo "revision 0" > f
9 9 $ echo "space" >> f
10 10 $ hg commit -Am "revision 0"
11 11 adding f
12 12
13 13 revision 1
14 14
15 15 $ echo "revision 1" > f
16 16 $ echo "space" >> f
17 17 $ hg commit -Am "revision 1"
18 18 $ hg update 0 > /dev/null
19 19
20 20 revision 2
21 21
22 22 $ echo "revision 2" > f
23 23 $ echo "space" >> f
24 24 $ hg commit -Am "revision 2"
25 25 created new head
26 26 $ hg update 0 > /dev/null
27 27
28 28 revision 3 - simple to merge
29 29
30 30 $ echo "revision 3" >> f
31 31 $ hg commit -Am "revision 3"
32 32 created new head
33 33
34 34 revision 4 - hard to merge
35 35
36 36 $ hg update 0 > /dev/null
37 37 $ echo "revision 4" > f
38 38 $ hg commit -Am "revision 4"
39 39 created new head
40 40
41 41 $ echo "[merge-tools]" > .hg/hgrc
42 42
43 43 $ beforemerge() {
44 44 > cat .hg/hgrc
45 45 > echo "# hg update -C 1"
46 46 > hg update -C 1 > /dev/null
47 47 > }
48 48 $ aftermerge() {
49 49 > echo "# cat f"
50 50 > cat f
51 51 > echo "# hg stat"
52 52 > hg stat
53 53 > rm -f f.orig
54 54 > }
55 55
56 56 Tool selection
57 57
58 58 default is internal merge:
59 59
60 60 $ beforemerge
61 61 [merge-tools]
62 62 # hg update -C 1
63 63
64 64 hg merge -r 2
65 65 override $PATH to ensure hgmerge not visible; use $PYTHON in case we're
66 66 running from a devel copy, not a temp installation
67 67
68 68 $ PATH="$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
69 69 merging f
70 70 warning: conflicts during merge.
71 71 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
72 72 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
73 73 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
74 74 [1]
75 75 $ aftermerge
76 76 # cat f
77 77 <<<<<<< local: ef83787e2614 - test: revision 1
78 78 revision 1
79 79 =======
80 80 revision 2
81 81 >>>>>>> other: 0185f4e0cf02 - test: revision 2
82 82 space
83 83 # hg stat
84 84 M f
85 85 ? f.orig
86 86
87 87 simplest hgrc using false for merge:
88 88
89 89 $ echo "false.whatever=" >> .hg/hgrc
90 90 $ beforemerge
91 91 [merge-tools]
92 92 false.whatever=
93 93 # hg update -C 1
94 94 $ hg merge -r 2
95 95 merging f
96 96 merging f failed!
97 97 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
98 98 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
99 99 [1]
100 100 $ aftermerge
101 101 # cat f
102 102 revision 1
103 103 space
104 104 # hg stat
105 105 M f
106 106 ? f.orig
107 107
108 108 #if unix-permissions
109 109
110 110 unexecutable file in $PATH shouldn't be found:
111 111
112 112 $ echo "echo fail" > false
113 113 $ hg up -qC 1
114 114 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
115 115 merging f
116 116 warning: conflicts during merge.
117 117 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
118 118 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
119 119 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
120 120 [1]
121 121 $ rm false
122 122
123 123 #endif
124 124
125 125 executable directory in $PATH shouldn't be found:
126 126
127 127 $ mkdir false
128 128 $ hg up -qC 1
129 129 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
130 130 merging f
131 131 warning: conflicts during merge.
132 132 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
133 133 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
134 134 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
135 135 [1]
136 136 $ rmdir false
137 137
138 138 true with higher .priority gets precedence:
139 139
140 140 $ echo "true.priority=1" >> .hg/hgrc
141 141 $ beforemerge
142 142 [merge-tools]
143 143 false.whatever=
144 144 true.priority=1
145 145 # hg update -C 1
146 146 $ hg merge -r 2
147 147 merging f
148 148 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
149 149 (branch merge, don't forget to commit)
150 150 $ aftermerge
151 151 # cat f
152 152 revision 1
153 153 space
154 154 # hg stat
155 155 M f
156 156
157 157 unless lowered on command line:
158 158
159 159 $ beforemerge
160 160 [merge-tools]
161 161 false.whatever=
162 162 true.priority=1
163 163 # hg update -C 1
164 164 $ hg merge -r 2 --config merge-tools.true.priority=-7
165 165 merging f
166 166 merging f failed!
167 167 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
168 168 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
169 169 [1]
170 170 $ aftermerge
171 171 # cat f
172 172 revision 1
173 173 space
174 174 # hg stat
175 175 M f
176 176 ? f.orig
177 177
178 178 or false set higher on command line:
179 179
180 180 $ beforemerge
181 181 [merge-tools]
182 182 false.whatever=
183 183 true.priority=1
184 184 # hg update -C 1
185 185 $ hg merge -r 2 --config merge-tools.false.priority=117
186 186 merging f
187 187 merging f failed!
188 188 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
189 189 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
190 190 [1]
191 191 $ aftermerge
192 192 # cat f
193 193 revision 1
194 194 space
195 195 # hg stat
196 196 M f
197 197 ? f.orig
198 198
199 199 or true.executable not found in PATH:
200 200
201 201 $ beforemerge
202 202 [merge-tools]
203 203 false.whatever=
204 204 true.priority=1
205 205 # hg update -C 1
206 206 $ hg merge -r 2 --config merge-tools.true.executable=nonexistentmergetool
207 207 merging f
208 208 merging f failed!
209 209 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
210 210 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
211 211 [1]
212 212 $ aftermerge
213 213 # cat f
214 214 revision 1
215 215 space
216 216 # hg stat
217 217 M f
218 218 ? f.orig
219 219
220 220 or true.executable with bogus path:
221 221
222 222 $ beforemerge
223 223 [merge-tools]
224 224 false.whatever=
225 225 true.priority=1
226 226 # hg update -C 1
227 227 $ hg merge -r 2 --config merge-tools.true.executable=/nonexistent/mergetool
228 228 merging f
229 229 merging f failed!
230 230 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
231 231 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
232 232 [1]
233 233 $ aftermerge
234 234 # cat f
235 235 revision 1
236 236 space
237 237 # hg stat
238 238 M f
239 239 ? f.orig
240 240
241 241 but true.executable set to cat found in PATH works:
242 242
243 243 $ echo "true.executable=cat" >> .hg/hgrc
244 244 $ beforemerge
245 245 [merge-tools]
246 246 false.whatever=
247 247 true.priority=1
248 248 true.executable=cat
249 249 # hg update -C 1
250 250 $ hg merge -r 2
251 251 merging f
252 252 revision 1
253 253 space
254 254 revision 0
255 255 space
256 256 revision 2
257 257 space
258 258 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
259 259 (branch merge, don't forget to commit)
260 260 $ aftermerge
261 261 # cat f
262 262 revision 1
263 263 space
264 264 # hg stat
265 265 M f
266 266
267 267 and true.executable set to cat with path works:
268 268
269 269 $ beforemerge
270 270 [merge-tools]
271 271 false.whatever=
272 272 true.priority=1
273 273 true.executable=cat
274 274 # hg update -C 1
275 275 $ hg merge -r 2 --config merge-tools.true.executable=cat
276 276 merging f
277 277 revision 1
278 278 space
279 279 revision 0
280 280 space
281 281 revision 2
282 282 space
283 283 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
284 284 (branch merge, don't forget to commit)
285 285 $ aftermerge
286 286 # cat f
287 287 revision 1
288 288 space
289 289 # hg stat
290 290 M f
291 291
292 292 #if unix-permissions
293 293
294 294 environment variables in true.executable are handled:
295 295
296 296 $ echo 'echo "custom merge tool"' > .hg/merge.sh
297 297 $ beforemerge
298 298 [merge-tools]
299 299 false.whatever=
300 300 true.priority=1
301 301 true.executable=cat
302 302 # hg update -C 1
303 303 $ hg --config merge-tools.true.executable='sh' \
304 304 > --config merge-tools.true.args=.hg/merge.sh \
305 305 > merge -r 2
306 306 merging f
307 307 custom merge tool
308 308 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
309 309 (branch merge, don't forget to commit)
310 310 $ aftermerge
311 311 # cat f
312 312 revision 1
313 313 space
314 314 # hg stat
315 315 M f
316 316
317 317 #endif
318 318
319 319 Tool selection and merge-patterns
320 320
321 321 merge-patterns specifies new tool false:
322 322
323 323 $ beforemerge
324 324 [merge-tools]
325 325 false.whatever=
326 326 true.priority=1
327 327 true.executable=cat
328 328 # hg update -C 1
329 329 $ hg merge -r 2 --config merge-patterns.f=false
330 330 merging f
331 331 merging f failed!
332 332 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
333 333 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
334 334 [1]
335 335 $ aftermerge
336 336 # cat f
337 337 revision 1
338 338 space
339 339 # hg stat
340 340 M f
341 341 ? f.orig
342 342
343 343 merge-patterns specifies executable not found in PATH and gets warning:
344 344
345 345 $ beforemerge
346 346 [merge-tools]
347 347 false.whatever=
348 348 true.priority=1
349 349 true.executable=cat
350 350 # hg update -C 1
351 351 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=nonexistentmergetool
352 352 couldn't find merge tool true specified for f
353 353 merging f
354 354 merging f failed!
355 355 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
356 356 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
357 357 [1]
358 358 $ aftermerge
359 359 # cat f
360 360 revision 1
361 361 space
362 362 # hg stat
363 363 M f
364 364 ? f.orig
365 365
366 366 merge-patterns specifies executable with bogus path and gets warning:
367 367
368 368 $ beforemerge
369 369 [merge-tools]
370 370 false.whatever=
371 371 true.priority=1
372 372 true.executable=cat
373 373 # hg update -C 1
374 374 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=/nonexistent/mergetool
375 375 couldn't find merge tool true specified for f
376 376 merging f
377 377 merging f failed!
378 378 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
379 379 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
380 380 [1]
381 381 $ aftermerge
382 382 # cat f
383 383 revision 1
384 384 space
385 385 # hg stat
386 386 M f
387 387 ? f.orig
388 388
389 389 ui.merge overrules priority
390 390
391 391 ui.merge specifies false:
392 392
393 393 $ beforemerge
394 394 [merge-tools]
395 395 false.whatever=
396 396 true.priority=1
397 397 true.executable=cat
398 398 # hg update -C 1
399 399 $ hg merge -r 2 --config ui.merge=false
400 400 merging f
401 401 merging f failed!
402 402 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
403 403 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
404 404 [1]
405 405 $ aftermerge
406 406 # cat f
407 407 revision 1
408 408 space
409 409 # hg stat
410 410 M f
411 411 ? f.orig
412 412
413 413 ui.merge specifies internal:fail:
414 414
415 415 $ beforemerge
416 416 [merge-tools]
417 417 false.whatever=
418 418 true.priority=1
419 419 true.executable=cat
420 420 # hg update -C 1
421 421 $ hg merge -r 2 --config ui.merge=internal:fail
422 422 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
423 423 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
424 424 [1]
425 425 $ aftermerge
426 426 # cat f
427 427 revision 1
428 428 space
429 429 # hg stat
430 430 M f
431 431
432 432 ui.merge specifies :local (without internal prefix):
433 433
434 434 $ beforemerge
435 435 [merge-tools]
436 436 false.whatever=
437 437 true.priority=1
438 438 true.executable=cat
439 439 # hg update -C 1
440 440 $ hg merge -r 2 --config ui.merge=:local
441 441 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
442 442 (branch merge, don't forget to commit)
443 443 $ aftermerge
444 444 # cat f
445 445 revision 1
446 446 space
447 447 # hg stat
448 448 M f
449 449
450 450 ui.merge specifies internal:other:
451 451
452 452 $ beforemerge
453 453 [merge-tools]
454 454 false.whatever=
455 455 true.priority=1
456 456 true.executable=cat
457 457 # hg update -C 1
458 458 $ hg merge -r 2 --config ui.merge=internal:other
459 459 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
460 460 (branch merge, don't forget to commit)
461 461 $ aftermerge
462 462 # cat f
463 463 revision 2
464 464 space
465 465 # hg stat
466 466 M f
467 467
468 468 ui.merge specifies internal:prompt:
469 469
470 470 $ beforemerge
471 471 [merge-tools]
472 472 false.whatever=
473 473 true.priority=1
474 474 true.executable=cat
475 475 # hg update -C 1
476 476 $ hg merge -r 2 --config ui.merge=internal:prompt
477 477 no tool found to merge f
478 478 keep (l)ocal or take (o)ther? l
479 479 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
480 480 (branch merge, don't forget to commit)
481 481 $ aftermerge
482 482 # cat f
483 483 revision 1
484 484 space
485 485 # hg stat
486 486 M f
487 487
488 488 ui.merge specifies internal:dump:
489 489
490 490 $ beforemerge
491 491 [merge-tools]
492 492 false.whatever=
493 493 true.priority=1
494 494 true.executable=cat
495 495 # hg update -C 1
496 496 $ hg merge -r 2 --config ui.merge=internal:dump
497 497 merging f
498 498 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
499 499 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
500 500 [1]
501 501 $ aftermerge
502 502 # cat f
503 503 revision 1
504 504 space
505 505 # hg stat
506 506 M f
507 507 ? f.base
508 508 ? f.local
509 509 ? f.orig
510 510 ? f.other
511 511
512 512 f.base:
513 513
514 514 $ cat f.base
515 515 revision 0
516 516 space
517 517
518 518 f.local:
519 519
520 520 $ cat f.local
521 521 revision 1
522 522 space
523 523
524 524 f.other:
525 525
526 526 $ cat f.other
527 527 revision 2
528 528 space
529 529 $ rm f.base f.local f.other
530 530
531 531 ui.merge specifies internal:other but is overruled by pattern for false:
532 532
533 533 $ beforemerge
534 534 [merge-tools]
535 535 false.whatever=
536 536 true.priority=1
537 537 true.executable=cat
538 538 # hg update -C 1
539 539 $ hg merge -r 2 --config ui.merge=internal:other --config merge-patterns.f=false
540 540 merging f
541 541 merging f failed!
542 542 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
543 543 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
544 544 [1]
545 545 $ aftermerge
546 546 # cat f
547 547 revision 1
548 548 space
549 549 # hg stat
550 550 M f
551 551 ? f.orig
552 552
553 553 Premerge
554 554
555 555 ui.merge specifies internal:other but is overruled by --tool=false
556 556
557 557 $ beforemerge
558 558 [merge-tools]
559 559 false.whatever=
560 560 true.priority=1
561 561 true.executable=cat
562 562 # hg update -C 1
563 563 $ hg merge -r 2 --config ui.merge=internal:other --tool=false
564 564 merging f
565 565 merging f failed!
566 566 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
567 567 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
568 568 [1]
569 569 $ aftermerge
570 570 # cat f
571 571 revision 1
572 572 space
573 573 # hg stat
574 574 M f
575 575 ? f.orig
576 576
577 577 HGMERGE specifies internal:other but is overruled by --tool=false
578 578
579 579 $ HGMERGE=internal:other ; export HGMERGE
580 580 $ beforemerge
581 581 [merge-tools]
582 582 false.whatever=
583 583 true.priority=1
584 584 true.executable=cat
585 585 # hg update -C 1
586 586 $ hg merge -r 2 --tool=false
587 587 merging f
588 588 merging f failed!
589 589 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
590 590 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
591 591 [1]
592 592 $ aftermerge
593 593 # cat f
594 594 revision 1
595 595 space
596 596 # hg stat
597 597 M f
598 598 ? f.orig
599 599
600 600 $ unset HGMERGE # make sure HGMERGE doesn't interfere with remaining tests
601 601
602 602 update is a merge ...
603 603
604 604 $ beforemerge
605 605 [merge-tools]
606 606 false.whatever=
607 607 true.priority=1
608 608 true.executable=cat
609 609 # hg update -C 1
610 610 $ hg update -q 0
611 $ f -s f
612 f: size=17
613 $ touch -t 200001010000 f
614 $ hg status f
611 615 $ hg revert -q -r 1 .
616 $ f -s f
617 f: size=17
618 $ touch -t 200001010000 f
619 $ hg status f
620 M f
612 621 $ hg update -r 2
613 622 merging f
614 623 revision 1
615 624 space
616 625 revision 0
617 626 space
618 627 revision 2
619 628 space
620 629 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
621 630 $ aftermerge
622 631 # cat f
623 632 revision 1
624 633 space
625 634 # hg stat
626 635 M f
627 636
628 637 update should also have --tool
629 638
630 639 $ beforemerge
631 640 [merge-tools]
632 641 false.whatever=
633 642 true.priority=1
634 643 true.executable=cat
635 644 # hg update -C 1
636 645 $ hg update -q 0
646 $ f -s f
647 f: size=17
648 $ touch -t 200001010000 f
649 $ hg status f
637 650 $ hg revert -q -r 1 .
651 $ f -s f
652 f: size=17
653 $ touch -t 200001010000 f
654 $ hg status f
655 M f
638 656 $ hg update -r 2 --tool false
639 657 merging f
640 658 merging f failed!
641 659 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
642 660 use 'hg resolve' to retry unresolved file merges
643 661 [1]
644 662 $ aftermerge
645 663 # cat f
646 664 revision 1
647 665 space
648 666 # hg stat
649 667 M f
650 668 ? f.orig
651 669
652 670 Default is silent simplemerge:
653 671
654 672 $ beforemerge
655 673 [merge-tools]
656 674 false.whatever=
657 675 true.priority=1
658 676 true.executable=cat
659 677 # hg update -C 1
660 678 $ hg merge -r 3
661 679 merging f
662 680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
663 681 (branch merge, don't forget to commit)
664 682 $ aftermerge
665 683 # cat f
666 684 revision 1
667 685 space
668 686 revision 3
669 687 # hg stat
670 688 M f
671 689
672 690 .premerge=True is same:
673 691
674 692 $ beforemerge
675 693 [merge-tools]
676 694 false.whatever=
677 695 true.priority=1
678 696 true.executable=cat
679 697 # hg update -C 1
680 698 $ hg merge -r 3 --config merge-tools.true.premerge=True
681 699 merging f
682 700 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
683 701 (branch merge, don't forget to commit)
684 702 $ aftermerge
685 703 # cat f
686 704 revision 1
687 705 space
688 706 revision 3
689 707 # hg stat
690 708 M f
691 709
692 710 .premerge=False executes merge-tool:
693 711
694 712 $ beforemerge
695 713 [merge-tools]
696 714 false.whatever=
697 715 true.priority=1
698 716 true.executable=cat
699 717 # hg update -C 1
700 718 $ hg merge -r 3 --config merge-tools.true.premerge=False
701 719 merging f
702 720 revision 1
703 721 space
704 722 revision 0
705 723 space
706 724 revision 0
707 725 space
708 726 revision 3
709 727 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
710 728 (branch merge, don't forget to commit)
711 729 $ aftermerge
712 730 # cat f
713 731 revision 1
714 732 space
715 733 # hg stat
716 734 M f
717 735
718 736 premerge=keep keeps conflict markers in:
719 737
720 738 $ beforemerge
721 739 [merge-tools]
722 740 false.whatever=
723 741 true.priority=1
724 742 true.executable=cat
725 743 # hg update -C 1
726 744 $ hg merge -r 4 --config merge-tools.true.premerge=keep
727 745 merging f
728 746 <<<<<<< local: ef83787e2614 - test: revision 1
729 747 revision 1
730 748 space
731 749 =======
732 750 revision 4
733 751 >>>>>>> other: 81448d39c9a0 - test: revision 4
734 752 revision 0
735 753 space
736 754 revision 4
737 755 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
738 756 (branch merge, don't forget to commit)
739 757 $ aftermerge
740 758 # cat f
741 759 <<<<<<< local: ef83787e2614 - test: revision 1
742 760 revision 1
743 761 space
744 762 =======
745 763 revision 4
746 764 >>>>>>> other: 81448d39c9a0 - test: revision 4
747 765 # hg stat
748 766 M f
749 767
750 768 premerge=keep-merge3 keeps conflict markers with base content:
751 769
752 770 $ beforemerge
753 771 [merge-tools]
754 772 false.whatever=
755 773 true.priority=1
756 774 true.executable=cat
757 775 # hg update -C 1
758 776 $ hg merge -r 4 --config merge-tools.true.premerge=keep-merge3
759 777 merging f
760 778 <<<<<<< local: ef83787e2614 - test: revision 1
761 779 revision 1
762 780 space
763 781 ||||||| base
764 782 revision 0
765 783 space
766 784 =======
767 785 revision 4
768 786 >>>>>>> other: 81448d39c9a0 - test: revision 4
769 787 revision 0
770 788 space
771 789 revision 4
772 790 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
773 791 (branch merge, don't forget to commit)
774 792 $ aftermerge
775 793 # cat f
776 794 <<<<<<< local: ef83787e2614 - test: revision 1
777 795 revision 1
778 796 space
779 797 ||||||| base
780 798 revision 0
781 799 space
782 800 =======
783 801 revision 4
784 802 >>>>>>> other: 81448d39c9a0 - test: revision 4
785 803 # hg stat
786 804 M f
787 805
788 806
789 807 Tool execution
790 808
791 809 set tools.args explicit to include $base $local $other $output:
792 810
793 811 $ beforemerge
794 812 [merge-tools]
795 813 false.whatever=
796 814 true.priority=1
797 815 true.executable=cat
798 816 # hg update -C 1
799 817 $ hg merge -r 2 --config merge-tools.true.executable=head --config merge-tools.true.args='$base $local $other $output' \
800 818 > | sed 's,==> .* <==,==> ... <==,g'
801 819 merging f
802 820 ==> ... <==
803 821 revision 0
804 822 space
805 823
806 824 ==> ... <==
807 825 revision 1
808 826 space
809 827
810 828 ==> ... <==
811 829 revision 2
812 830 space
813 831
814 832 ==> ... <==
815 833 revision 1
816 834 space
817 835 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
818 836 (branch merge, don't forget to commit)
819 837 $ aftermerge
820 838 # cat f
821 839 revision 1
822 840 space
823 841 # hg stat
824 842 M f
825 843
826 844 Merge with "echo mergeresult > $local":
827 845
828 846 $ beforemerge
829 847 [merge-tools]
830 848 false.whatever=
831 849 true.priority=1
832 850 true.executable=cat
833 851 # hg update -C 1
834 852 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $local'
835 853 merging f
836 854 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
837 855 (branch merge, don't forget to commit)
838 856 $ aftermerge
839 857 # cat f
840 858 mergeresult
841 859 # hg stat
842 860 M f
843 861
844 862 - and $local is the file f:
845 863
846 864 $ beforemerge
847 865 [merge-tools]
848 866 false.whatever=
849 867 true.priority=1
850 868 true.executable=cat
851 869 # hg update -C 1
852 870 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > f'
853 871 merging f
854 872 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
855 873 (branch merge, don't forget to commit)
856 874 $ aftermerge
857 875 # cat f
858 876 mergeresult
859 877 # hg stat
860 878 M f
861 879
862 880 Merge with "echo mergeresult > $output" - the variable is a bit magic:
863 881
864 882 $ beforemerge
865 883 [merge-tools]
866 884 false.whatever=
867 885 true.priority=1
868 886 true.executable=cat
869 887 # hg update -C 1
870 888 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $output'
871 889 merging f
872 890 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
873 891 (branch merge, don't forget to commit)
874 892 $ aftermerge
875 893 # cat f
876 894 mergeresult
877 895 # hg stat
878 896 M f
879 897
880 898 Merge using tool with a path that must be quoted:
881 899
882 900 $ beforemerge
883 901 [merge-tools]
884 902 false.whatever=
885 903 true.priority=1
886 904 true.executable=cat
887 905 # hg update -C 1
888 906 $ cat <<EOF > 'my merge tool'
889 907 > cat "\$1" "\$2" "\$3" > "\$4"
890 908 > EOF
891 909 $ hg --config merge-tools.true.executable='sh' \
892 910 > --config merge-tools.true.args='"./my merge tool" $base $local $other $output' \
893 911 > merge -r 2
894 912 merging f
895 913 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
896 914 (branch merge, don't forget to commit)
897 915 $ rm -f 'my merge tool'
898 916 $ aftermerge
899 917 # cat f
900 918 revision 0
901 919 space
902 920 revision 1
903 921 space
904 922 revision 2
905 923 space
906 924 # hg stat
907 925 M f
908 926
909 927 Issue3581: Merging a filename that needs to be quoted
910 928 (This test doesn't work on Windows filesystems even on Linux, so check
911 929 for Unix-like permission)
912 930
913 931 #if unix-permissions
914 932 $ beforemerge
915 933 [merge-tools]
916 934 false.whatever=
917 935 true.priority=1
918 936 true.executable=cat
919 937 # hg update -C 1
920 938 $ echo "revision 5" > '"; exit 1; echo "'
921 939 $ hg commit -Am "revision 5"
922 940 adding "; exit 1; echo "
923 941 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
924 942 $ hg update -C 1 > /dev/null
925 943 $ echo "revision 6" > '"; exit 1; echo "'
926 944 $ hg commit -Am "revision 6"
927 945 adding "; exit 1; echo "
928 946 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
929 947 created new head
930 948 $ hg merge --config merge-tools.true.executable="true" -r 5
931 949 merging "; exit 1; echo "
932 950 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
933 951 (branch merge, don't forget to commit)
934 952 $ hg update -C 1 > /dev/null
935 953 #endif
936 954
937 955 Merge post-processing
938 956
939 957 cat is a bad merge-tool and doesn't change:
940 958
941 959 $ beforemerge
942 960 [merge-tools]
943 961 false.whatever=
944 962 true.priority=1
945 963 true.executable=cat
946 964 # hg update -C 1
947 965 $ hg merge -y -r 2 --config merge-tools.true.checkchanged=1
948 966 merging f
949 967 revision 1
950 968 space
951 969 revision 0
952 970 space
953 971 revision 2
954 972 space
955 973 output file f appears unchanged
956 974 was merge successful (yn)? n
957 975 merging f failed!
958 976 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
959 977 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
960 978 [1]
961 979 $ aftermerge
962 980 # cat f
963 981 revision 1
964 982 space
965 983 # hg stat
966 984 M f
967 985 ? f.orig
968 986
969 987 #if symlink
970 988
971 989 internal merge cannot handle symlinks and shouldn't try:
972 990
973 991 $ hg update -q -C 1
974 992 $ rm f
975 993 $ ln -s symlink f
976 994 $ hg commit -qm 'f is symlink'
977 995 $ hg merge -r 2 --tool internal:merge
978 996 merging f
979 997 warning: internal :merge cannot merge symlinks for f
980 998 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
981 999 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
982 1000 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
983 1001 [1]
984 1002
985 1003 #endif
General Comments 0
You need to be logged in to leave comments. Login now