##// END OF EJS Templates
dirstate: make backup methods public...
Mateusz Kwapich -
r29137:d115cbf5 default
parent child Browse files
Show More
@@ -1,3556 +1,3556
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import sys
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 )
24 24
25 25 from . import (
26 26 bookmarks,
27 27 changelog,
28 28 copies,
29 29 crecord as crecordmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 repair,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 templatekw,
45 45 templater,
46 46 util,
47 47 )
48 48 stringio = util.stringio
49 49
50 50 def ishunk(x):
51 51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 52 return isinstance(x, hunkclasses)
53 53
54 54 def newandmodified(chunks, originalchunks):
55 55 newlyaddedandmodifiedfiles = set()
56 56 for chunk in chunks:
57 57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 58 originalchunks:
59 59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 60 return newlyaddedandmodifiedfiles
61 61
62 62 def parsealiases(cmd):
63 63 return cmd.lstrip("^").split("|")
64 64
65 65 def setupwrapcolorwrite(ui):
66 66 # wrap ui.write so diff output can be labeled/colorized
67 67 def wrapwrite(orig, *args, **kw):
68 68 label = kw.pop('label', '')
69 69 for chunk, l in patch.difflabel(lambda: args):
70 70 orig(chunk, label=label + l)
71 71
72 72 oldwrite = ui.write
73 73 def wrap(*args, **kwargs):
74 74 return wrapwrite(oldwrite, *args, **kwargs)
75 75 setattr(ui, 'write', wrap)
76 76 return oldwrite
77 77
78 78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 79 if usecurses:
80 80 if testfile:
81 81 recordfn = crecordmod.testdecorator(testfile,
82 82 crecordmod.testchunkselector)
83 83 else:
84 84 recordfn = crecordmod.chunkselector
85 85
86 86 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
87 87
88 88 else:
89 89 return patch.filterpatch(ui, originalhunks, operation)
90 90
91 91 def recordfilter(ui, originalhunks, operation=None):
92 92 """ Prompts the user to filter the originalhunks and return a list of
93 93 selected hunks.
94 94 *operation* is used for ui purposes to indicate the user
95 95 what kind of filtering they are doing: reverting, committing, shelving, etc.
96 96 *operation* has to be a translated string.
97 97 """
98 98 usecurses = crecordmod.checkcurses(ui)
99 99 testfile = ui.config('experimental', 'crecordtest', None)
100 100 oldwrite = setupwrapcolorwrite(ui)
101 101 try:
102 102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 103 testfile, operation)
104 104 finally:
105 105 ui.write = oldwrite
106 106 return newchunks, newopts
107 107
108 108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 109 filterfn, *pats, **opts):
110 110 from . import merge as mergemod
111 111 if not ui.interactive():
112 112 if cmdsuggest:
113 113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 114 else:
115 115 msg = _('running non-interactively')
116 116 raise error.Abort(msg)
117 117
118 118 # make sure username is set before going interactive
119 119 if not opts.get('user'):
120 120 ui.username() # raise exception, username not provided
121 121
122 122 def recordfunc(ui, repo, message, match, opts):
123 123 """This is generic record driver.
124 124
125 125 Its job is to interactively filter local changes, and
126 126 accordingly prepare working directory into a state in which the
127 127 job can be delegated to a non-interactive commit command such as
128 128 'commit' or 'qrefresh'.
129 129
130 130 After the actual job is done by non-interactive command, the
131 131 working directory is restored to its original state.
132 132
133 133 In the end we'll record interesting changes, and everything else
134 134 will be left in place, so the user can continue working.
135 135 """
136 136
137 137 checkunfinished(repo, commit=True)
138 138 wctx = repo[None]
139 139 merge = len(wctx.parents()) > 1
140 140 if merge:
141 141 raise error.Abort(_('cannot partially commit a merge '
142 142 '(use "hg commit" instead)'))
143 143
144 144 def fail(f, msg):
145 145 raise error.Abort('%s: %s' % (f, msg))
146 146
147 147 force = opts.get('force')
148 148 if not force:
149 149 vdirs = []
150 150 match.explicitdir = vdirs.append
151 151 match.bad = fail
152 152
153 153 status = repo.status(match=match)
154 154 if not force:
155 155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 157 diffopts.nodates = True
158 158 diffopts.git = True
159 159 diffopts.showfunc = True
160 160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 161 originalchunks = patch.parsepatch(originaldiff)
162 162
163 163 # 1. filter patch, since we are intending to apply subset of it
164 164 try:
165 165 chunks, newopts = filterfn(ui, originalchunks)
166 166 except patch.PatchError as err:
167 167 raise error.Abort(_('error parsing patch: %s') % err)
168 168 opts.update(newopts)
169 169
170 170 # We need to keep a backup of files that have been newly added and
171 171 # modified during the recording process because there is a previous
172 172 # version without the edit in the workdir
173 173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 174 contenders = set()
175 175 for h in chunks:
176 176 try:
177 177 contenders.update(set(h.files()))
178 178 except AttributeError:
179 179 pass
180 180
181 181 changed = status.modified + status.added + status.removed
182 182 newfiles = [f for f in changed if f in contenders]
183 183 if not newfiles:
184 184 ui.status(_('no changes to record\n'))
185 185 return 0
186 186
187 187 modified = set(status.modified)
188 188
189 189 # 2. backup changed files, so we can restore them in the end
190 190
191 191 if backupall:
192 192 tobackup = changed
193 193 else:
194 194 tobackup = [f for f in newfiles if f in modified or f in \
195 195 newlyaddedandmodifiedfiles]
196 196 backups = {}
197 197 if tobackup:
198 198 backupdir = repo.join('record-backups')
199 199 try:
200 200 os.mkdir(backupdir)
201 201 except OSError as err:
202 202 if err.errno != errno.EEXIST:
203 203 raise
204 204 try:
205 205 # backup continues
206 206 for f in tobackup:
207 207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 208 dir=backupdir)
209 209 os.close(fd)
210 210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 212 backups[f] = tmpname
213 213
214 214 fp = stringio()
215 215 for c in chunks:
216 216 fname = c.filename()
217 217 if fname in backups:
218 218 c.write(fp)
219 219 dopatch = fp.tell()
220 220 fp.seek(0)
221 221
222 222 # 2.5 optionally review / modify patch in text editor
223 223 if opts.get('review', False):
224 224 patchtext = (crecordmod.diffhelptext
225 225 + crecordmod.patchhelptext
226 226 + fp.read())
227 227 reviewedpatch = ui.edit(patchtext, "",
228 228 extra={"suffix": ".diff"})
229 229 fp.truncate(0)
230 230 fp.write(reviewedpatch)
231 231 fp.seek(0)
232 232
233 233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 234 # 3a. apply filtered patch to clean repo (clean)
235 235 if backups:
236 236 # Equivalent to hg.revert
237 237 m = scmutil.matchfiles(repo, backups.keys())
238 238 mergemod.update(repo, repo.dirstate.p1(),
239 239 False, True, matcher=m)
240 240
241 241 # 3b. (apply)
242 242 if dopatch:
243 243 try:
244 244 ui.debug('applying patch\n')
245 245 ui.debug(fp.getvalue())
246 246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 247 except patch.PatchError as err:
248 248 raise error.Abort(str(err))
249 249 del fp
250 250
251 251 # 4. We prepared working directory according to filtered
252 252 # patch. Now is the time to delegate the job to
253 253 # commit/qrefresh or the like!
254 254
255 255 # Make all of the pathnames absolute.
256 256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 257 return commitfunc(ui, repo, *newfiles, **opts)
258 258 finally:
259 259 # 5. finally restore backed-up files
260 260 try:
261 261 dirstate = repo.dirstate
262 262 for realname, tmpname in backups.iteritems():
263 263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264 264
265 265 if dirstate[realname] == 'n':
266 266 # without normallookup, restoring timestamp
267 267 # may cause partially committed files
268 268 # to be treated as unmodified
269 269 dirstate.normallookup(realname)
270 270
271 271 # copystat=True here and above are a hack to trick any
272 272 # editors that have f open that we haven't modified them.
273 273 #
274 274 # Also note that this racy as an editor could notice the
275 275 # file's mtime before we've finished writing it.
276 276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 277 os.unlink(tmpname)
278 278 if tobackup:
279 279 os.rmdir(backupdir)
280 280 except OSError:
281 281 pass
282 282
283 283 def recordinwlock(ui, repo, message, match, opts):
284 284 with repo.wlock():
285 285 return recordfunc(ui, repo, message, match, opts)
286 286
287 287 return commit(ui, repo, recordinwlock, pats, opts)
288 288
289 289 def findpossible(cmd, table, strict=False):
290 290 """
291 291 Return cmd -> (aliases, command table entry)
292 292 for each matching command.
293 293 Return debug commands (or their aliases) only if no normal command matches.
294 294 """
295 295 choice = {}
296 296 debugchoice = {}
297 297
298 298 if cmd in table:
299 299 # short-circuit exact matches, "log" alias beats "^log|history"
300 300 keys = [cmd]
301 301 else:
302 302 keys = table.keys()
303 303
304 304 allcmds = []
305 305 for e in keys:
306 306 aliases = parsealiases(e)
307 307 allcmds.extend(aliases)
308 308 found = None
309 309 if cmd in aliases:
310 310 found = cmd
311 311 elif not strict:
312 312 for a in aliases:
313 313 if a.startswith(cmd):
314 314 found = a
315 315 break
316 316 if found is not None:
317 317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 318 debugchoice[found] = (aliases, table[e])
319 319 else:
320 320 choice[found] = (aliases, table[e])
321 321
322 322 if not choice and debugchoice:
323 323 choice = debugchoice
324 324
325 325 return choice, allcmds
326 326
327 327 def findcmd(cmd, table, strict=True):
328 328 """Return (aliases, command table entry) for command string."""
329 329 choice, allcmds = findpossible(cmd, table, strict)
330 330
331 331 if cmd in choice:
332 332 return choice[cmd]
333 333
334 334 if len(choice) > 1:
335 335 clist = choice.keys()
336 336 clist.sort()
337 337 raise error.AmbiguousCommand(cmd, clist)
338 338
339 339 if choice:
340 340 return choice.values()[0]
341 341
342 342 raise error.UnknownCommand(cmd, allcmds)
343 343
344 344 def findrepo(p):
345 345 while not os.path.isdir(os.path.join(p, ".hg")):
346 346 oldp, p = p, os.path.dirname(p)
347 347 if p == oldp:
348 348 return None
349 349
350 350 return p
351 351
352 352 def bailifchanged(repo, merge=True):
353 353 if merge and repo.dirstate.p2() != nullid:
354 354 raise error.Abort(_('outstanding uncommitted merge'))
355 355 modified, added, removed, deleted = repo.status()[:4]
356 356 if modified or added or removed or deleted:
357 357 raise error.Abort(_('uncommitted changes'))
358 358 ctx = repo[None]
359 359 for s in sorted(ctx.substate):
360 360 ctx.sub(s).bailifchanged()
361 361
362 362 def logmessage(ui, opts):
363 363 """ get the log message according to -m and -l option """
364 364 message = opts.get('message')
365 365 logfile = opts.get('logfile')
366 366
367 367 if message and logfile:
368 368 raise error.Abort(_('options --message and --logfile are mutually '
369 369 'exclusive'))
370 370 if not message and logfile:
371 371 try:
372 372 if logfile == '-':
373 373 message = ui.fin.read()
374 374 else:
375 375 message = '\n'.join(util.readfile(logfile).splitlines())
376 376 except IOError as inst:
377 377 raise error.Abort(_("can't read commit message '%s': %s") %
378 378 (logfile, inst.strerror))
379 379 return message
380 380
381 381 def mergeeditform(ctxorbool, baseformname):
382 382 """return appropriate editform name (referencing a committemplate)
383 383
384 384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 385 merging is committed.
386 386
387 387 This returns baseformname with '.merge' appended if it is a merge,
388 388 otherwise '.normal' is appended.
389 389 """
390 390 if isinstance(ctxorbool, bool):
391 391 if ctxorbool:
392 392 return baseformname + ".merge"
393 393 elif 1 < len(ctxorbool.parents()):
394 394 return baseformname + ".merge"
395 395
396 396 return baseformname + ".normal"
397 397
398 398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 399 editform='', **opts):
400 400 """get appropriate commit message editor according to '--edit' option
401 401
402 402 'finishdesc' is a function to be called with edited commit message
403 403 (= 'description' of the new changeset) just after editing, but
404 404 before checking empty-ness. It should return actual text to be
405 405 stored into history. This allows to change description before
406 406 storing.
407 407
408 408 'extramsg' is a extra message to be shown in the editor instead of
409 409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 410 is automatically added.
411 411
412 412 'editform' is a dot-separated list of names, to distinguish
413 413 the purpose of commit text editing.
414 414
415 415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 417 they are specific for usage in MQ.
418 418 """
419 419 if edit or finishdesc or extramsg:
420 420 return lambda r, c, s: commitforceeditor(r, c, s,
421 421 finishdesc=finishdesc,
422 422 extramsg=extramsg,
423 423 editform=editform)
424 424 elif editform:
425 425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 426 else:
427 427 return commiteditor
428 428
429 429 def loglimit(opts):
430 430 """get the log limit according to option -l/--limit"""
431 431 limit = opts.get('limit')
432 432 if limit:
433 433 try:
434 434 limit = int(limit)
435 435 except ValueError:
436 436 raise error.Abort(_('limit must be a positive integer'))
437 437 if limit <= 0:
438 438 raise error.Abort(_('limit must be positive'))
439 439 else:
440 440 limit = None
441 441 return limit
442 442
443 443 def makefilename(repo, pat, node, desc=None,
444 444 total=None, seqno=None, revwidth=None, pathname=None):
445 445 node_expander = {
446 446 'H': lambda: hex(node),
447 447 'R': lambda: str(repo.changelog.rev(node)),
448 448 'h': lambda: short(node),
449 449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 450 }
451 451 expander = {
452 452 '%': lambda: '%',
453 453 'b': lambda: os.path.basename(repo.root),
454 454 }
455 455
456 456 try:
457 457 if node:
458 458 expander.update(node_expander)
459 459 if node:
460 460 expander['r'] = (lambda:
461 461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 462 if total is not None:
463 463 expander['N'] = lambda: str(total)
464 464 if seqno is not None:
465 465 expander['n'] = lambda: str(seqno)
466 466 if total is not None and seqno is not None:
467 467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 468 if pathname is not None:
469 469 expander['s'] = lambda: os.path.basename(pathname)
470 470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 471 expander['p'] = lambda: pathname
472 472
473 473 newname = []
474 474 patlen = len(pat)
475 475 i = 0
476 476 while i < patlen:
477 477 c = pat[i]
478 478 if c == '%':
479 479 i += 1
480 480 c = pat[i]
481 481 c = expander[c]()
482 482 newname.append(c)
483 483 i += 1
484 484 return ''.join(newname)
485 485 except KeyError as inst:
486 486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 487 inst.args[0])
488 488
489 489 class _unclosablefile(object):
490 490 def __init__(self, fp):
491 491 self._fp = fp
492 492
493 493 def close(self):
494 494 pass
495 495
496 496 def __iter__(self):
497 497 return iter(self._fp)
498 498
499 499 def __getattr__(self, attr):
500 500 return getattr(self._fp, attr)
501 501
502 502 def makefileobj(repo, pat, node=None, desc=None, total=None,
503 503 seqno=None, revwidth=None, mode='wb', modemap=None,
504 504 pathname=None):
505 505
506 506 writable = mode not in ('r', 'rb')
507 507
508 508 if not pat or pat == '-':
509 509 if writable:
510 510 fp = repo.ui.fout
511 511 else:
512 512 fp = repo.ui.fin
513 513 return _unclosablefile(fp)
514 514 if util.safehasattr(pat, 'write') and writable:
515 515 return pat
516 516 if util.safehasattr(pat, 'read') and 'r' in mode:
517 517 return pat
518 518 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
519 519 if modemap is not None:
520 520 mode = modemap.get(fn, mode)
521 521 if mode == 'wb':
522 522 modemap[fn] = 'ab'
523 523 return open(fn, mode)
524 524
525 525 def openrevlog(repo, cmd, file_, opts):
526 526 """opens the changelog, manifest, a filelog or a given revlog"""
527 527 cl = opts['changelog']
528 528 mf = opts['manifest']
529 529 dir = opts['dir']
530 530 msg = None
531 531 if cl and mf:
532 532 msg = _('cannot specify --changelog and --manifest at the same time')
533 533 elif cl and dir:
534 534 msg = _('cannot specify --changelog and --dir at the same time')
535 535 elif cl or mf:
536 536 if file_:
537 537 msg = _('cannot specify filename with --changelog or --manifest')
538 538 elif not repo:
539 539 msg = _('cannot specify --changelog or --manifest or --dir '
540 540 'without a repository')
541 541 if msg:
542 542 raise error.Abort(msg)
543 543
544 544 r = None
545 545 if repo:
546 546 if cl:
547 547 r = repo.unfiltered().changelog
548 548 elif dir:
549 549 if 'treemanifest' not in repo.requirements:
550 550 raise error.Abort(_("--dir can only be used on repos with "
551 551 "treemanifest enabled"))
552 552 dirlog = repo.dirlog(file_)
553 553 if len(dirlog):
554 554 r = dirlog
555 555 elif mf:
556 556 r = repo.manifest
557 557 elif file_:
558 558 filelog = repo.file(file_)
559 559 if len(filelog):
560 560 r = filelog
561 561 if not r:
562 562 if not file_:
563 563 raise error.CommandError(cmd, _('invalid arguments'))
564 564 if not os.path.isfile(file_):
565 565 raise error.Abort(_("revlog '%s' not found") % file_)
566 566 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
567 567 file_[:-2] + ".i")
568 568 return r
569 569
570 570 def copy(ui, repo, pats, opts, rename=False):
571 571 # called with the repo lock held
572 572 #
573 573 # hgsep => pathname that uses "/" to separate directories
574 574 # ossep => pathname that uses os.sep to separate directories
575 575 cwd = repo.getcwd()
576 576 targets = {}
577 577 after = opts.get("after")
578 578 dryrun = opts.get("dry_run")
579 579 wctx = repo[None]
580 580
581 581 def walkpat(pat):
582 582 srcs = []
583 583 if after:
584 584 badstates = '?'
585 585 else:
586 586 badstates = '?r'
587 587 m = scmutil.match(repo[None], [pat], opts, globbed=True)
588 588 for abs in repo.walk(m):
589 589 state = repo.dirstate[abs]
590 590 rel = m.rel(abs)
591 591 exact = m.exact(abs)
592 592 if state in badstates:
593 593 if exact and state == '?':
594 594 ui.warn(_('%s: not copying - file is not managed\n') % rel)
595 595 if exact and state == 'r':
596 596 ui.warn(_('%s: not copying - file has been marked for'
597 597 ' remove\n') % rel)
598 598 continue
599 599 # abs: hgsep
600 600 # rel: ossep
601 601 srcs.append((abs, rel, exact))
602 602 return srcs
603 603
604 604 # abssrc: hgsep
605 605 # relsrc: ossep
606 606 # otarget: ossep
607 607 def copyfile(abssrc, relsrc, otarget, exact):
608 608 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
609 609 if '/' in abstarget:
610 610 # We cannot normalize abstarget itself, this would prevent
611 611 # case only renames, like a => A.
612 612 abspath, absname = abstarget.rsplit('/', 1)
613 613 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
614 614 reltarget = repo.pathto(abstarget, cwd)
615 615 target = repo.wjoin(abstarget)
616 616 src = repo.wjoin(abssrc)
617 617 state = repo.dirstate[abstarget]
618 618
619 619 scmutil.checkportable(ui, abstarget)
620 620
621 621 # check for collisions
622 622 prevsrc = targets.get(abstarget)
623 623 if prevsrc is not None:
624 624 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
625 625 (reltarget, repo.pathto(abssrc, cwd),
626 626 repo.pathto(prevsrc, cwd)))
627 627 return
628 628
629 629 # check for overwrites
630 630 exists = os.path.lexists(target)
631 631 samefile = False
632 632 if exists and abssrc != abstarget:
633 633 if (repo.dirstate.normalize(abssrc) ==
634 634 repo.dirstate.normalize(abstarget)):
635 635 if not rename:
636 636 ui.warn(_("%s: can't copy - same file\n") % reltarget)
637 637 return
638 638 exists = False
639 639 samefile = True
640 640
641 641 if not after and exists or after and state in 'mn':
642 642 if not opts['force']:
643 643 ui.warn(_('%s: not overwriting - file exists\n') %
644 644 reltarget)
645 645 return
646 646
647 647 if after:
648 648 if not exists:
649 649 if rename:
650 650 ui.warn(_('%s: not recording move - %s does not exist\n') %
651 651 (relsrc, reltarget))
652 652 else:
653 653 ui.warn(_('%s: not recording copy - %s does not exist\n') %
654 654 (relsrc, reltarget))
655 655 return
656 656 elif not dryrun:
657 657 try:
658 658 if exists:
659 659 os.unlink(target)
660 660 targetdir = os.path.dirname(target) or '.'
661 661 if not os.path.isdir(targetdir):
662 662 os.makedirs(targetdir)
663 663 if samefile:
664 664 tmp = target + "~hgrename"
665 665 os.rename(src, tmp)
666 666 os.rename(tmp, target)
667 667 else:
668 668 util.copyfile(src, target)
669 669 srcexists = True
670 670 except IOError as inst:
671 671 if inst.errno == errno.ENOENT:
672 672 ui.warn(_('%s: deleted in working directory\n') % relsrc)
673 673 srcexists = False
674 674 else:
675 675 ui.warn(_('%s: cannot copy - %s\n') %
676 676 (relsrc, inst.strerror))
677 677 return True # report a failure
678 678
679 679 if ui.verbose or not exact:
680 680 if rename:
681 681 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
682 682 else:
683 683 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
684 684
685 685 targets[abstarget] = abssrc
686 686
687 687 # fix up dirstate
688 688 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
689 689 dryrun=dryrun, cwd=cwd)
690 690 if rename and not dryrun:
691 691 if not after and srcexists and not samefile:
692 692 util.unlinkpath(repo.wjoin(abssrc))
693 693 wctx.forget([abssrc])
694 694
695 695 # pat: ossep
696 696 # dest ossep
697 697 # srcs: list of (hgsep, hgsep, ossep, bool)
698 698 # return: function that takes hgsep and returns ossep
699 699 def targetpathfn(pat, dest, srcs):
700 700 if os.path.isdir(pat):
701 701 abspfx = pathutil.canonpath(repo.root, cwd, pat)
702 702 abspfx = util.localpath(abspfx)
703 703 if destdirexists:
704 704 striplen = len(os.path.split(abspfx)[0])
705 705 else:
706 706 striplen = len(abspfx)
707 707 if striplen:
708 708 striplen += len(os.sep)
709 709 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
710 710 elif destdirexists:
711 711 res = lambda p: os.path.join(dest,
712 712 os.path.basename(util.localpath(p)))
713 713 else:
714 714 res = lambda p: dest
715 715 return res
716 716
717 717 # pat: ossep
718 718 # dest ossep
719 719 # srcs: list of (hgsep, hgsep, ossep, bool)
720 720 # return: function that takes hgsep and returns ossep
721 721 def targetpathafterfn(pat, dest, srcs):
722 722 if matchmod.patkind(pat):
723 723 # a mercurial pattern
724 724 res = lambda p: os.path.join(dest,
725 725 os.path.basename(util.localpath(p)))
726 726 else:
727 727 abspfx = pathutil.canonpath(repo.root, cwd, pat)
728 728 if len(abspfx) < len(srcs[0][0]):
729 729 # A directory. Either the target path contains the last
730 730 # component of the source path or it does not.
731 731 def evalpath(striplen):
732 732 score = 0
733 733 for s in srcs:
734 734 t = os.path.join(dest, util.localpath(s[0])[striplen:])
735 735 if os.path.lexists(t):
736 736 score += 1
737 737 return score
738 738
739 739 abspfx = util.localpath(abspfx)
740 740 striplen = len(abspfx)
741 741 if striplen:
742 742 striplen += len(os.sep)
743 743 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
744 744 score = evalpath(striplen)
745 745 striplen1 = len(os.path.split(abspfx)[0])
746 746 if striplen1:
747 747 striplen1 += len(os.sep)
748 748 if evalpath(striplen1) > score:
749 749 striplen = striplen1
750 750 res = lambda p: os.path.join(dest,
751 751 util.localpath(p)[striplen:])
752 752 else:
753 753 # a file
754 754 if destdirexists:
755 755 res = lambda p: os.path.join(dest,
756 756 os.path.basename(util.localpath(p)))
757 757 else:
758 758 res = lambda p: dest
759 759 return res
760 760
761 761 pats = scmutil.expandpats(pats)
762 762 if not pats:
763 763 raise error.Abort(_('no source or destination specified'))
764 764 if len(pats) == 1:
765 765 raise error.Abort(_('no destination specified'))
766 766 dest = pats.pop()
767 767 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
768 768 if not destdirexists:
769 769 if len(pats) > 1 or matchmod.patkind(pats[0]):
770 770 raise error.Abort(_('with multiple sources, destination must be an '
771 771 'existing directory'))
772 772 if util.endswithsep(dest):
773 773 raise error.Abort(_('destination %s is not a directory') % dest)
774 774
775 775 tfn = targetpathfn
776 776 if after:
777 777 tfn = targetpathafterfn
778 778 copylist = []
779 779 for pat in pats:
780 780 srcs = walkpat(pat)
781 781 if not srcs:
782 782 continue
783 783 copylist.append((tfn(pat, dest, srcs), srcs))
784 784 if not copylist:
785 785 raise error.Abort(_('no files to copy'))
786 786
787 787 errors = 0
788 788 for targetpath, srcs in copylist:
789 789 for abssrc, relsrc, exact in srcs:
790 790 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
791 791 errors += 1
792 792
793 793 if errors:
794 794 ui.warn(_('(consider using --after)\n'))
795 795
796 796 return errors != 0
797 797
798 798 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
799 799 runargs=None, appendpid=False):
800 800 '''Run a command as a service.'''
801 801
802 802 def writepid(pid):
803 803 if opts['pid_file']:
804 804 if appendpid:
805 805 mode = 'a'
806 806 else:
807 807 mode = 'w'
808 808 fp = open(opts['pid_file'], mode)
809 809 fp.write(str(pid) + '\n')
810 810 fp.close()
811 811
812 812 if opts['daemon'] and not opts['daemon_postexec']:
813 813 # Signal child process startup with file removal
814 814 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
815 815 os.close(lockfd)
816 816 try:
817 817 if not runargs:
818 818 runargs = util.hgcmd() + sys.argv[1:]
819 819 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
820 820 # Don't pass --cwd to the child process, because we've already
821 821 # changed directory.
822 822 for i in xrange(1, len(runargs)):
823 823 if runargs[i].startswith('--cwd='):
824 824 del runargs[i]
825 825 break
826 826 elif runargs[i].startswith('--cwd'):
827 827 del runargs[i:i + 2]
828 828 break
829 829 def condfn():
830 830 return not os.path.exists(lockpath)
831 831 pid = util.rundetached(runargs, condfn)
832 832 if pid < 0:
833 833 raise error.Abort(_('child process failed to start'))
834 834 writepid(pid)
835 835 finally:
836 836 try:
837 837 os.unlink(lockpath)
838 838 except OSError as e:
839 839 if e.errno != errno.ENOENT:
840 840 raise
841 841 if parentfn:
842 842 return parentfn(pid)
843 843 else:
844 844 return
845 845
846 846 if initfn:
847 847 initfn()
848 848
849 849 if not opts['daemon']:
850 850 writepid(util.getpid())
851 851
852 852 if opts['daemon_postexec']:
853 853 try:
854 854 os.setsid()
855 855 except AttributeError:
856 856 pass
857 857 for inst in opts['daemon_postexec']:
858 858 if inst.startswith('unlink:'):
859 859 lockpath = inst[7:]
860 860 os.unlink(lockpath)
861 861 elif inst.startswith('chdir:'):
862 862 os.chdir(inst[6:])
863 863 elif inst != 'none':
864 864 raise error.Abort(_('invalid value for --daemon-postexec: %s')
865 865 % inst)
866 866 util.hidewindow()
867 867 sys.stdout.flush()
868 868 sys.stderr.flush()
869 869
870 870 nullfd = os.open(os.devnull, os.O_RDWR)
871 871 logfilefd = nullfd
872 872 if logfile:
873 873 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
874 874 os.dup2(nullfd, 0)
875 875 os.dup2(logfilefd, 1)
876 876 os.dup2(logfilefd, 2)
877 877 if nullfd not in (0, 1, 2):
878 878 os.close(nullfd)
879 879 if logfile and logfilefd not in (0, 1, 2):
880 880 os.close(logfilefd)
881 881
882 882 if runfn:
883 883 return runfn()
884 884
885 885 ## facility to let extension process additional data into an import patch
886 886 # list of identifier to be executed in order
887 887 extrapreimport = [] # run before commit
888 888 extrapostimport = [] # run after commit
889 889 # mapping from identifier to actual import function
890 890 #
891 891 # 'preimport' are run before the commit is made and are provided the following
892 892 # arguments:
893 893 # - repo: the localrepository instance,
894 894 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
895 895 # - extra: the future extra dictionary of the changeset, please mutate it,
896 896 # - opts: the import options.
897 897 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
898 898 # mutation of in memory commit and more. Feel free to rework the code to get
899 899 # there.
900 900 extrapreimportmap = {}
901 901 # 'postimport' are run after the commit is made and are provided the following
902 902 # argument:
903 903 # - ctx: the changectx created by import.
904 904 extrapostimportmap = {}
905 905
906 906 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
907 907 """Utility function used by commands.import to import a single patch
908 908
909 909 This function is explicitly defined here to help the evolve extension to
910 910 wrap this part of the import logic.
911 911
912 912 The API is currently a bit ugly because it a simple code translation from
913 913 the import command. Feel free to make it better.
914 914
915 915 :hunk: a patch (as a binary string)
916 916 :parents: nodes that will be parent of the created commit
917 917 :opts: the full dict of option passed to the import command
918 918 :msgs: list to save commit message to.
919 919 (used in case we need to save it when failing)
920 920 :updatefunc: a function that update a repo to a given node
921 921 updatefunc(<repo>, <node>)
922 922 """
923 923 # avoid cycle context -> subrepo -> cmdutil
924 924 from . import context
925 925 extractdata = patch.extract(ui, hunk)
926 926 tmpname = extractdata.get('filename')
927 927 message = extractdata.get('message')
928 928 user = opts.get('user') or extractdata.get('user')
929 929 date = opts.get('date') or extractdata.get('date')
930 930 branch = extractdata.get('branch')
931 931 nodeid = extractdata.get('nodeid')
932 932 p1 = extractdata.get('p1')
933 933 p2 = extractdata.get('p2')
934 934
935 935 nocommit = opts.get('no_commit')
936 936 importbranch = opts.get('import_branch')
937 937 update = not opts.get('bypass')
938 938 strip = opts["strip"]
939 939 prefix = opts["prefix"]
940 940 sim = float(opts.get('similarity') or 0)
941 941 if not tmpname:
942 942 return (None, None, False)
943 943
944 944 rejects = False
945 945
946 946 try:
947 947 cmdline_message = logmessage(ui, opts)
948 948 if cmdline_message:
949 949 # pickup the cmdline msg
950 950 message = cmdline_message
951 951 elif message:
952 952 # pickup the patch msg
953 953 message = message.strip()
954 954 else:
955 955 # launch the editor
956 956 message = None
957 957 ui.debug('message:\n%s\n' % message)
958 958
959 959 if len(parents) == 1:
960 960 parents.append(repo[nullid])
961 961 if opts.get('exact'):
962 962 if not nodeid or not p1:
963 963 raise error.Abort(_('not a Mercurial patch'))
964 964 p1 = repo[p1]
965 965 p2 = repo[p2 or nullid]
966 966 elif p2:
967 967 try:
968 968 p1 = repo[p1]
969 969 p2 = repo[p2]
970 970 # Without any options, consider p2 only if the
971 971 # patch is being applied on top of the recorded
972 972 # first parent.
973 973 if p1 != parents[0]:
974 974 p1 = parents[0]
975 975 p2 = repo[nullid]
976 976 except error.RepoError:
977 977 p1, p2 = parents
978 978 if p2.node() == nullid:
979 979 ui.warn(_("warning: import the patch as a normal revision\n"
980 980 "(use --exact to import the patch as a merge)\n"))
981 981 else:
982 982 p1, p2 = parents
983 983
984 984 n = None
985 985 if update:
986 986 if p1 != parents[0]:
987 987 updatefunc(repo, p1.node())
988 988 if p2 != parents[1]:
989 989 repo.setparents(p1.node(), p2.node())
990 990
991 991 if opts.get('exact') or importbranch:
992 992 repo.dirstate.setbranch(branch or 'default')
993 993
994 994 partial = opts.get('partial', False)
995 995 files = set()
996 996 try:
997 997 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
998 998 files=files, eolmode=None, similarity=sim / 100.0)
999 999 except patch.PatchError as e:
1000 1000 if not partial:
1001 1001 raise error.Abort(str(e))
1002 1002 if partial:
1003 1003 rejects = True
1004 1004
1005 1005 files = list(files)
1006 1006 if nocommit:
1007 1007 if message:
1008 1008 msgs.append(message)
1009 1009 else:
1010 1010 if opts.get('exact') or p2:
1011 1011 # If you got here, you either use --force and know what
1012 1012 # you are doing or used --exact or a merge patch while
1013 1013 # being updated to its first parent.
1014 1014 m = None
1015 1015 else:
1016 1016 m = scmutil.matchfiles(repo, files or [])
1017 1017 editform = mergeeditform(repo[None], 'import.normal')
1018 1018 if opts.get('exact'):
1019 1019 editor = None
1020 1020 else:
1021 1021 editor = getcommiteditor(editform=editform, **opts)
1022 1022 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1023 1023 extra = {}
1024 1024 for idfunc in extrapreimport:
1025 1025 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1026 1026 try:
1027 1027 if partial:
1028 1028 repo.ui.setconfig('ui', 'allowemptycommit', True)
1029 1029 n = repo.commit(message, user,
1030 1030 date, match=m,
1031 1031 editor=editor, extra=extra)
1032 1032 for idfunc in extrapostimport:
1033 1033 extrapostimportmap[idfunc](repo[n])
1034 1034 finally:
1035 1035 repo.ui.restoreconfig(allowemptyback)
1036 1036 else:
1037 1037 if opts.get('exact') or importbranch:
1038 1038 branch = branch or 'default'
1039 1039 else:
1040 1040 branch = p1.branch()
1041 1041 store = patch.filestore()
1042 1042 try:
1043 1043 files = set()
1044 1044 try:
1045 1045 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1046 1046 files, eolmode=None)
1047 1047 except patch.PatchError as e:
1048 1048 raise error.Abort(str(e))
1049 1049 if opts.get('exact'):
1050 1050 editor = None
1051 1051 else:
1052 1052 editor = getcommiteditor(editform='import.bypass')
1053 1053 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1054 1054 message,
1055 1055 user,
1056 1056 date,
1057 1057 branch, files, store,
1058 1058 editor=editor)
1059 1059 n = memctx.commit()
1060 1060 finally:
1061 1061 store.close()
1062 1062 if opts.get('exact') and nocommit:
1063 1063 # --exact with --no-commit is still useful in that it does merge
1064 1064 # and branch bits
1065 1065 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1066 1066 elif opts.get('exact') and hex(n) != nodeid:
1067 1067 raise error.Abort(_('patch is damaged or loses information'))
1068 1068 msg = _('applied to working directory')
1069 1069 if n:
1070 1070 # i18n: refers to a short changeset id
1071 1071 msg = _('created %s') % short(n)
1072 1072 return (msg, n, rejects)
1073 1073 finally:
1074 1074 os.unlink(tmpname)
1075 1075
1076 1076 # facility to let extensions include additional data in an exported patch
1077 1077 # list of identifiers to be executed in order
1078 1078 extraexport = []
1079 1079 # mapping from identifier to actual export function
1080 1080 # function as to return a string to be added to the header or None
1081 1081 # it is given two arguments (sequencenumber, changectx)
1082 1082 extraexportmap = {}
1083 1083
1084 1084 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1085 1085 opts=None, match=None):
1086 1086 '''export changesets as hg patches.'''
1087 1087
1088 1088 total = len(revs)
1089 1089 revwidth = max([len(str(rev)) for rev in revs])
1090 1090 filemode = {}
1091 1091
1092 1092 def single(rev, seqno, fp):
1093 1093 ctx = repo[rev]
1094 1094 node = ctx.node()
1095 1095 parents = [p.node() for p in ctx.parents() if p]
1096 1096 branch = ctx.branch()
1097 1097 if switch_parent:
1098 1098 parents.reverse()
1099 1099
1100 1100 if parents:
1101 1101 prev = parents[0]
1102 1102 else:
1103 1103 prev = nullid
1104 1104
1105 1105 shouldclose = False
1106 1106 if not fp and len(template) > 0:
1107 1107 desc_lines = ctx.description().rstrip().split('\n')
1108 1108 desc = desc_lines[0] #Commit always has a first line.
1109 1109 fp = makefileobj(repo, template, node, desc=desc, total=total,
1110 1110 seqno=seqno, revwidth=revwidth, mode='wb',
1111 1111 modemap=filemode)
1112 1112 shouldclose = True
1113 1113 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1114 1114 repo.ui.note("%s\n" % fp.name)
1115 1115
1116 1116 if not fp:
1117 1117 write = repo.ui.write
1118 1118 else:
1119 1119 def write(s, **kw):
1120 1120 fp.write(s)
1121 1121
1122 1122 write("# HG changeset patch\n")
1123 1123 write("# User %s\n" % ctx.user())
1124 1124 write("# Date %d %d\n" % ctx.date())
1125 1125 write("# %s\n" % util.datestr(ctx.date()))
1126 1126 if branch and branch != 'default':
1127 1127 write("# Branch %s\n" % branch)
1128 1128 write("# Node ID %s\n" % hex(node))
1129 1129 write("# Parent %s\n" % hex(prev))
1130 1130 if len(parents) > 1:
1131 1131 write("# Parent %s\n" % hex(parents[1]))
1132 1132
1133 1133 for headerid in extraexport:
1134 1134 header = extraexportmap[headerid](seqno, ctx)
1135 1135 if header is not None:
1136 1136 write('# %s\n' % header)
1137 1137 write(ctx.description().rstrip())
1138 1138 write("\n\n")
1139 1139
1140 1140 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1141 1141 write(chunk, label=label)
1142 1142
1143 1143 if shouldclose:
1144 1144 fp.close()
1145 1145
1146 1146 for seqno, rev in enumerate(revs):
1147 1147 single(rev, seqno + 1, fp)
1148 1148
1149 1149 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1150 1150 changes=None, stat=False, fp=None, prefix='',
1151 1151 root='', listsubrepos=False):
1152 1152 '''show diff or diffstat.'''
1153 1153 if fp is None:
1154 1154 write = ui.write
1155 1155 else:
1156 1156 def write(s, **kw):
1157 1157 fp.write(s)
1158 1158
1159 1159 if root:
1160 1160 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1161 1161 else:
1162 1162 relroot = ''
1163 1163 if relroot != '':
1164 1164 # XXX relative roots currently don't work if the root is within a
1165 1165 # subrepo
1166 1166 uirelroot = match.uipath(relroot)
1167 1167 relroot += '/'
1168 1168 for matchroot in match.files():
1169 1169 if not matchroot.startswith(relroot):
1170 1170 ui.warn(_('warning: %s not inside relative root %s\n') % (
1171 1171 match.uipath(matchroot), uirelroot))
1172 1172
1173 1173 if stat:
1174 1174 diffopts = diffopts.copy(context=0)
1175 1175 width = 80
1176 1176 if not ui.plain():
1177 1177 width = ui.termwidth()
1178 1178 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1179 1179 prefix=prefix, relroot=relroot)
1180 1180 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1181 1181 width=width,
1182 1182 git=diffopts.git):
1183 1183 write(chunk, label=label)
1184 1184 else:
1185 1185 for chunk, label in patch.diffui(repo, node1, node2, match,
1186 1186 changes, diffopts, prefix=prefix,
1187 1187 relroot=relroot):
1188 1188 write(chunk, label=label)
1189 1189
1190 1190 if listsubrepos:
1191 1191 ctx1 = repo[node1]
1192 1192 ctx2 = repo[node2]
1193 1193 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1194 1194 tempnode2 = node2
1195 1195 try:
1196 1196 if node2 is not None:
1197 1197 tempnode2 = ctx2.substate[subpath][1]
1198 1198 except KeyError:
1199 1199 # A subrepo that existed in node1 was deleted between node1 and
1200 1200 # node2 (inclusive). Thus, ctx2's substate won't contain that
1201 1201 # subpath. The best we can do is to ignore it.
1202 1202 tempnode2 = None
1203 1203 submatch = matchmod.subdirmatcher(subpath, match)
1204 1204 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1205 1205 stat=stat, fp=fp, prefix=prefix)
1206 1206
1207 1207 class changeset_printer(object):
1208 1208 '''show changeset information when templating not requested.'''
1209 1209
1210 1210 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1211 1211 self.ui = ui
1212 1212 self.repo = repo
1213 1213 self.buffered = buffered
1214 1214 self.matchfn = matchfn
1215 1215 self.diffopts = diffopts
1216 1216 self.header = {}
1217 1217 self.hunk = {}
1218 1218 self.lastheader = None
1219 1219 self.footer = None
1220 1220
1221 1221 def flush(self, ctx):
1222 1222 rev = ctx.rev()
1223 1223 if rev in self.header:
1224 1224 h = self.header[rev]
1225 1225 if h != self.lastheader:
1226 1226 self.lastheader = h
1227 1227 self.ui.write(h)
1228 1228 del self.header[rev]
1229 1229 if rev in self.hunk:
1230 1230 self.ui.write(self.hunk[rev])
1231 1231 del self.hunk[rev]
1232 1232 return 1
1233 1233 return 0
1234 1234
1235 1235 def close(self):
1236 1236 if self.footer:
1237 1237 self.ui.write(self.footer)
1238 1238
1239 1239 def show(self, ctx, copies=None, matchfn=None, **props):
1240 1240 if self.buffered:
1241 1241 self.ui.pushbuffer(labeled=True)
1242 1242 self._show(ctx, copies, matchfn, props)
1243 1243 self.hunk[ctx.rev()] = self.ui.popbuffer()
1244 1244 else:
1245 1245 self._show(ctx, copies, matchfn, props)
1246 1246
1247 1247 def _show(self, ctx, copies, matchfn, props):
1248 1248 '''show a single changeset or file revision'''
1249 1249 changenode = ctx.node()
1250 1250 rev = ctx.rev()
1251 1251 if self.ui.debugflag:
1252 1252 hexfunc = hex
1253 1253 else:
1254 1254 hexfunc = short
1255 1255 # as of now, wctx.node() and wctx.rev() return None, but we want to
1256 1256 # show the same values as {node} and {rev} templatekw
1257 1257 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1258 1258
1259 1259 if self.ui.quiet:
1260 1260 self.ui.write("%d:%s\n" % revnode, label='log.node')
1261 1261 return
1262 1262
1263 1263 date = util.datestr(ctx.date())
1264 1264
1265 1265 # i18n: column positioning for "hg log"
1266 1266 self.ui.write(_("changeset: %d:%s\n") % revnode,
1267 1267 label='log.changeset changeset.%s' % ctx.phasestr())
1268 1268
1269 1269 # branches are shown first before any other names due to backwards
1270 1270 # compatibility
1271 1271 branch = ctx.branch()
1272 1272 # don't show the default branch name
1273 1273 if branch != 'default':
1274 1274 # i18n: column positioning for "hg log"
1275 1275 self.ui.write(_("branch: %s\n") % branch,
1276 1276 label='log.branch')
1277 1277
1278 1278 for nsname, ns in self.repo.names.iteritems():
1279 1279 # branches has special logic already handled above, so here we just
1280 1280 # skip it
1281 1281 if nsname == 'branches':
1282 1282 continue
1283 1283 # we will use the templatename as the color name since those two
1284 1284 # should be the same
1285 1285 for name in ns.names(self.repo, changenode):
1286 1286 self.ui.write(ns.logfmt % name,
1287 1287 label='log.%s' % ns.colorname)
1288 1288 if self.ui.debugflag:
1289 1289 # i18n: column positioning for "hg log"
1290 1290 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1291 1291 label='log.phase')
1292 1292 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1293 1293 label = 'log.parent changeset.%s' % pctx.phasestr()
1294 1294 # i18n: column positioning for "hg log"
1295 1295 self.ui.write(_("parent: %d:%s\n")
1296 1296 % (pctx.rev(), hexfunc(pctx.node())),
1297 1297 label=label)
1298 1298
1299 1299 if self.ui.debugflag and rev is not None:
1300 1300 mnode = ctx.manifestnode()
1301 1301 # i18n: column positioning for "hg log"
1302 1302 self.ui.write(_("manifest: %d:%s\n") %
1303 1303 (self.repo.manifest.rev(mnode), hex(mnode)),
1304 1304 label='ui.debug log.manifest')
1305 1305 # i18n: column positioning for "hg log"
1306 1306 self.ui.write(_("user: %s\n") % ctx.user(),
1307 1307 label='log.user')
1308 1308 # i18n: column positioning for "hg log"
1309 1309 self.ui.write(_("date: %s\n") % date,
1310 1310 label='log.date')
1311 1311
1312 1312 if self.ui.debugflag:
1313 1313 files = ctx.p1().status(ctx)[:3]
1314 1314 for key, value in zip([# i18n: column positioning for "hg log"
1315 1315 _("files:"),
1316 1316 # i18n: column positioning for "hg log"
1317 1317 _("files+:"),
1318 1318 # i18n: column positioning for "hg log"
1319 1319 _("files-:")], files):
1320 1320 if value:
1321 1321 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1322 1322 label='ui.debug log.files')
1323 1323 elif ctx.files() and self.ui.verbose:
1324 1324 # i18n: column positioning for "hg log"
1325 1325 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1326 1326 label='ui.note log.files')
1327 1327 if copies and self.ui.verbose:
1328 1328 copies = ['%s (%s)' % c for c in copies]
1329 1329 # i18n: column positioning for "hg log"
1330 1330 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1331 1331 label='ui.note log.copies')
1332 1332
1333 1333 extra = ctx.extra()
1334 1334 if extra and self.ui.debugflag:
1335 1335 for key, value in sorted(extra.items()):
1336 1336 # i18n: column positioning for "hg log"
1337 1337 self.ui.write(_("extra: %s=%s\n")
1338 1338 % (key, value.encode('string_escape')),
1339 1339 label='ui.debug log.extra')
1340 1340
1341 1341 description = ctx.description().strip()
1342 1342 if description:
1343 1343 if self.ui.verbose:
1344 1344 self.ui.write(_("description:\n"),
1345 1345 label='ui.note log.description')
1346 1346 self.ui.write(description,
1347 1347 label='ui.note log.description')
1348 1348 self.ui.write("\n\n")
1349 1349 else:
1350 1350 # i18n: column positioning for "hg log"
1351 1351 self.ui.write(_("summary: %s\n") %
1352 1352 description.splitlines()[0],
1353 1353 label='log.summary')
1354 1354 self.ui.write("\n")
1355 1355
1356 1356 self.showpatch(ctx, matchfn)
1357 1357
1358 1358 def showpatch(self, ctx, matchfn):
1359 1359 if not matchfn:
1360 1360 matchfn = self.matchfn
1361 1361 if matchfn:
1362 1362 stat = self.diffopts.get('stat')
1363 1363 diff = self.diffopts.get('patch')
1364 1364 diffopts = patch.diffallopts(self.ui, self.diffopts)
1365 1365 node = ctx.node()
1366 1366 prev = ctx.p1().node()
1367 1367 if stat:
1368 1368 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1369 1369 match=matchfn, stat=True)
1370 1370 if diff:
1371 1371 if stat:
1372 1372 self.ui.write("\n")
1373 1373 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1374 1374 match=matchfn, stat=False)
1375 1375 self.ui.write("\n")
1376 1376
1377 1377 class jsonchangeset(changeset_printer):
1378 1378 '''format changeset information.'''
1379 1379
1380 1380 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1381 1381 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1382 1382 self.cache = {}
1383 1383 self._first = True
1384 1384
1385 1385 def close(self):
1386 1386 if not self._first:
1387 1387 self.ui.write("\n]\n")
1388 1388 else:
1389 1389 self.ui.write("[]\n")
1390 1390
1391 1391 def _show(self, ctx, copies, matchfn, props):
1392 1392 '''show a single changeset or file revision'''
1393 1393 rev = ctx.rev()
1394 1394 if rev is None:
1395 1395 jrev = jnode = 'null'
1396 1396 else:
1397 1397 jrev = str(rev)
1398 1398 jnode = '"%s"' % hex(ctx.node())
1399 1399 j = encoding.jsonescape
1400 1400
1401 1401 if self._first:
1402 1402 self.ui.write("[\n {")
1403 1403 self._first = False
1404 1404 else:
1405 1405 self.ui.write(",\n {")
1406 1406
1407 1407 if self.ui.quiet:
1408 1408 self.ui.write('\n "rev": %s' % jrev)
1409 1409 self.ui.write(',\n "node": %s' % jnode)
1410 1410 self.ui.write('\n }')
1411 1411 return
1412 1412
1413 1413 self.ui.write('\n "rev": %s' % jrev)
1414 1414 self.ui.write(',\n "node": %s' % jnode)
1415 1415 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1416 1416 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1417 1417 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1418 1418 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1419 1419 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1420 1420
1421 1421 self.ui.write(',\n "bookmarks": [%s]' %
1422 1422 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1423 1423 self.ui.write(',\n "tags": [%s]' %
1424 1424 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1425 1425 self.ui.write(',\n "parents": [%s]' %
1426 1426 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1427 1427
1428 1428 if self.ui.debugflag:
1429 1429 if rev is None:
1430 1430 jmanifestnode = 'null'
1431 1431 else:
1432 1432 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1433 1433 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1434 1434
1435 1435 self.ui.write(',\n "extra": {%s}' %
1436 1436 ", ".join('"%s": "%s"' % (j(k), j(v))
1437 1437 for k, v in ctx.extra().items()))
1438 1438
1439 1439 files = ctx.p1().status(ctx)
1440 1440 self.ui.write(',\n "modified": [%s]' %
1441 1441 ", ".join('"%s"' % j(f) for f in files[0]))
1442 1442 self.ui.write(',\n "added": [%s]' %
1443 1443 ", ".join('"%s"' % j(f) for f in files[1]))
1444 1444 self.ui.write(',\n "removed": [%s]' %
1445 1445 ", ".join('"%s"' % j(f) for f in files[2]))
1446 1446
1447 1447 elif self.ui.verbose:
1448 1448 self.ui.write(',\n "files": [%s]' %
1449 1449 ", ".join('"%s"' % j(f) for f in ctx.files()))
1450 1450
1451 1451 if copies:
1452 1452 self.ui.write(',\n "copies": {%s}' %
1453 1453 ", ".join('"%s": "%s"' % (j(k), j(v))
1454 1454 for k, v in copies))
1455 1455
1456 1456 matchfn = self.matchfn
1457 1457 if matchfn:
1458 1458 stat = self.diffopts.get('stat')
1459 1459 diff = self.diffopts.get('patch')
1460 1460 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1461 1461 node, prev = ctx.node(), ctx.p1().node()
1462 1462 if stat:
1463 1463 self.ui.pushbuffer()
1464 1464 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1465 1465 match=matchfn, stat=True)
1466 1466 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1467 1467 if diff:
1468 1468 self.ui.pushbuffer()
1469 1469 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1470 1470 match=matchfn, stat=False)
1471 1471 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1472 1472
1473 1473 self.ui.write("\n }")
1474 1474
1475 1475 class changeset_templater(changeset_printer):
1476 1476 '''format changeset information.'''
1477 1477
1478 1478 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1479 1479 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1480 1480 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1481 1481 filters = {'formatnode': formatnode}
1482 1482 defaulttempl = {
1483 1483 'parent': '{rev}:{node|formatnode} ',
1484 1484 'manifest': '{rev}:{node|formatnode}',
1485 1485 'file_copy': '{name} ({source})',
1486 1486 'extra': '{key}={value|stringescape}'
1487 1487 }
1488 1488 # filecopy is preserved for compatibility reasons
1489 1489 defaulttempl['filecopy'] = defaulttempl['file_copy']
1490 1490 assert not (tmpl and mapfile)
1491 1491 if mapfile:
1492 1492 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1493 1493 cache=defaulttempl)
1494 1494 else:
1495 1495 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1496 1496 filters=filters,
1497 1497 cache=defaulttempl)
1498 1498
1499 1499 self.cache = {}
1500 1500
1501 1501 # find correct templates for current mode
1502 1502 tmplmodes = [
1503 1503 (True, None),
1504 1504 (self.ui.verbose, 'verbose'),
1505 1505 (self.ui.quiet, 'quiet'),
1506 1506 (self.ui.debugflag, 'debug'),
1507 1507 ]
1508 1508
1509 1509 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1510 1510 'docheader': '', 'docfooter': ''}
1511 1511 for mode, postfix in tmplmodes:
1512 1512 for t in self._parts:
1513 1513 cur = t
1514 1514 if postfix:
1515 1515 cur += "_" + postfix
1516 1516 if mode and cur in self.t:
1517 1517 self._parts[t] = cur
1518 1518
1519 1519 if self._parts['docheader']:
1520 1520 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1521 1521
1522 1522 def close(self):
1523 1523 if self._parts['docfooter']:
1524 1524 if not self.footer:
1525 1525 self.footer = ""
1526 1526 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1527 1527 return super(changeset_templater, self).close()
1528 1528
1529 1529 def _show(self, ctx, copies, matchfn, props):
1530 1530 '''show a single changeset or file revision'''
1531 1531 props = props.copy()
1532 1532 props.update(templatekw.keywords)
1533 1533 props['templ'] = self.t
1534 1534 props['ctx'] = ctx
1535 1535 props['repo'] = self.repo
1536 1536 props['ui'] = self.repo.ui
1537 1537 props['revcache'] = {'copies': copies}
1538 1538 props['cache'] = self.cache
1539 1539
1540 1540 # write header
1541 1541 if self._parts['header']:
1542 1542 h = templater.stringify(self.t(self._parts['header'], **props))
1543 1543 if self.buffered:
1544 1544 self.header[ctx.rev()] = h
1545 1545 else:
1546 1546 if self.lastheader != h:
1547 1547 self.lastheader = h
1548 1548 self.ui.write(h)
1549 1549
1550 1550 # write changeset metadata, then patch if requested
1551 1551 key = self._parts['changeset']
1552 1552 self.ui.write(templater.stringify(self.t(key, **props)))
1553 1553 self.showpatch(ctx, matchfn)
1554 1554
1555 1555 if self._parts['footer']:
1556 1556 if not self.footer:
1557 1557 self.footer = templater.stringify(
1558 1558 self.t(self._parts['footer'], **props))
1559 1559
1560 1560 def gettemplate(ui, tmpl, style):
1561 1561 """
1562 1562 Find the template matching the given template spec or style.
1563 1563 """
1564 1564
1565 1565 # ui settings
1566 1566 if not tmpl and not style: # template are stronger than style
1567 1567 tmpl = ui.config('ui', 'logtemplate')
1568 1568 if tmpl:
1569 1569 return templater.unquotestring(tmpl), None
1570 1570 else:
1571 1571 style = util.expandpath(ui.config('ui', 'style', ''))
1572 1572
1573 1573 if not tmpl and style:
1574 1574 mapfile = style
1575 1575 if not os.path.split(mapfile)[0]:
1576 1576 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1577 1577 or templater.templatepath(mapfile))
1578 1578 if mapname:
1579 1579 mapfile = mapname
1580 1580 return None, mapfile
1581 1581
1582 1582 if not tmpl:
1583 1583 return None, None
1584 1584
1585 1585 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1586 1586
1587 1587 def show_changeset(ui, repo, opts, buffered=False):
1588 1588 """show one changeset using template or regular display.
1589 1589
1590 1590 Display format will be the first non-empty hit of:
1591 1591 1. option 'template'
1592 1592 2. option 'style'
1593 1593 3. [ui] setting 'logtemplate'
1594 1594 4. [ui] setting 'style'
1595 1595 If all of these values are either the unset or the empty string,
1596 1596 regular display via changeset_printer() is done.
1597 1597 """
1598 1598 # options
1599 1599 matchfn = None
1600 1600 if opts.get('patch') or opts.get('stat'):
1601 1601 matchfn = scmutil.matchall(repo)
1602 1602
1603 1603 if opts.get('template') == 'json':
1604 1604 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1605 1605
1606 1606 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1607 1607
1608 1608 if not tmpl and not mapfile:
1609 1609 return changeset_printer(ui, repo, matchfn, opts, buffered)
1610 1610
1611 1611 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1612 1612
1613 1613 def showmarker(ui, marker, index=None):
1614 1614 """utility function to display obsolescence marker in a readable way
1615 1615
1616 1616 To be used by debug function."""
1617 1617 if index is not None:
1618 1618 ui.write("%i " % index)
1619 1619 ui.write(hex(marker.precnode()))
1620 1620 for repl in marker.succnodes():
1621 1621 ui.write(' ')
1622 1622 ui.write(hex(repl))
1623 1623 ui.write(' %X ' % marker.flags())
1624 1624 parents = marker.parentnodes()
1625 1625 if parents is not None:
1626 1626 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1627 1627 ui.write('(%s) ' % util.datestr(marker.date()))
1628 1628 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1629 1629 sorted(marker.metadata().items())
1630 1630 if t[0] != 'date')))
1631 1631 ui.write('\n')
1632 1632
1633 1633 def finddate(ui, repo, date):
1634 1634 """Find the tipmost changeset that matches the given date spec"""
1635 1635
1636 1636 df = util.matchdate(date)
1637 1637 m = scmutil.matchall(repo)
1638 1638 results = {}
1639 1639
1640 1640 def prep(ctx, fns):
1641 1641 d = ctx.date()
1642 1642 if df(d[0]):
1643 1643 results[ctx.rev()] = d
1644 1644
1645 1645 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1646 1646 rev = ctx.rev()
1647 1647 if rev in results:
1648 1648 ui.status(_("found revision %s from %s\n") %
1649 1649 (rev, util.datestr(results[rev])))
1650 1650 return str(rev)
1651 1651
1652 1652 raise error.Abort(_("revision matching date not found"))
1653 1653
1654 1654 def increasingwindows(windowsize=8, sizelimit=512):
1655 1655 while True:
1656 1656 yield windowsize
1657 1657 if windowsize < sizelimit:
1658 1658 windowsize *= 2
1659 1659
1660 1660 class FileWalkError(Exception):
1661 1661 pass
1662 1662
1663 1663 def walkfilerevs(repo, match, follow, revs, fncache):
1664 1664 '''Walks the file history for the matched files.
1665 1665
1666 1666 Returns the changeset revs that are involved in the file history.
1667 1667
1668 1668 Throws FileWalkError if the file history can't be walked using
1669 1669 filelogs alone.
1670 1670 '''
1671 1671 wanted = set()
1672 1672 copies = []
1673 1673 minrev, maxrev = min(revs), max(revs)
1674 1674 def filerevgen(filelog, last):
1675 1675 """
1676 1676 Only files, no patterns. Check the history of each file.
1677 1677
1678 1678 Examines filelog entries within minrev, maxrev linkrev range
1679 1679 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1680 1680 tuples in backwards order
1681 1681 """
1682 1682 cl_count = len(repo)
1683 1683 revs = []
1684 1684 for j in xrange(0, last + 1):
1685 1685 linkrev = filelog.linkrev(j)
1686 1686 if linkrev < minrev:
1687 1687 continue
1688 1688 # only yield rev for which we have the changelog, it can
1689 1689 # happen while doing "hg log" during a pull or commit
1690 1690 if linkrev >= cl_count:
1691 1691 break
1692 1692
1693 1693 parentlinkrevs = []
1694 1694 for p in filelog.parentrevs(j):
1695 1695 if p != nullrev:
1696 1696 parentlinkrevs.append(filelog.linkrev(p))
1697 1697 n = filelog.node(j)
1698 1698 revs.append((linkrev, parentlinkrevs,
1699 1699 follow and filelog.renamed(n)))
1700 1700
1701 1701 return reversed(revs)
1702 1702 def iterfiles():
1703 1703 pctx = repo['.']
1704 1704 for filename in match.files():
1705 1705 if follow:
1706 1706 if filename not in pctx:
1707 1707 raise error.Abort(_('cannot follow file not in parent '
1708 1708 'revision: "%s"') % filename)
1709 1709 yield filename, pctx[filename].filenode()
1710 1710 else:
1711 1711 yield filename, None
1712 1712 for filename_node in copies:
1713 1713 yield filename_node
1714 1714
1715 1715 for file_, node in iterfiles():
1716 1716 filelog = repo.file(file_)
1717 1717 if not len(filelog):
1718 1718 if node is None:
1719 1719 # A zero count may be a directory or deleted file, so
1720 1720 # try to find matching entries on the slow path.
1721 1721 if follow:
1722 1722 raise error.Abort(
1723 1723 _('cannot follow nonexistent file: "%s"') % file_)
1724 1724 raise FileWalkError("Cannot walk via filelog")
1725 1725 else:
1726 1726 continue
1727 1727
1728 1728 if node is None:
1729 1729 last = len(filelog) - 1
1730 1730 else:
1731 1731 last = filelog.rev(node)
1732 1732
1733 1733 # keep track of all ancestors of the file
1734 1734 ancestors = set([filelog.linkrev(last)])
1735 1735
1736 1736 # iterate from latest to oldest revision
1737 1737 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1738 1738 if not follow:
1739 1739 if rev > maxrev:
1740 1740 continue
1741 1741 else:
1742 1742 # Note that last might not be the first interesting
1743 1743 # rev to us:
1744 1744 # if the file has been changed after maxrev, we'll
1745 1745 # have linkrev(last) > maxrev, and we still need
1746 1746 # to explore the file graph
1747 1747 if rev not in ancestors:
1748 1748 continue
1749 1749 # XXX insert 1327 fix here
1750 1750 if flparentlinkrevs:
1751 1751 ancestors.update(flparentlinkrevs)
1752 1752
1753 1753 fncache.setdefault(rev, []).append(file_)
1754 1754 wanted.add(rev)
1755 1755 if copied:
1756 1756 copies.append(copied)
1757 1757
1758 1758 return wanted
1759 1759
1760 1760 class _followfilter(object):
1761 1761 def __init__(self, repo, onlyfirst=False):
1762 1762 self.repo = repo
1763 1763 self.startrev = nullrev
1764 1764 self.roots = set()
1765 1765 self.onlyfirst = onlyfirst
1766 1766
1767 1767 def match(self, rev):
1768 1768 def realparents(rev):
1769 1769 if self.onlyfirst:
1770 1770 return self.repo.changelog.parentrevs(rev)[0:1]
1771 1771 else:
1772 1772 return filter(lambda x: x != nullrev,
1773 1773 self.repo.changelog.parentrevs(rev))
1774 1774
1775 1775 if self.startrev == nullrev:
1776 1776 self.startrev = rev
1777 1777 return True
1778 1778
1779 1779 if rev > self.startrev:
1780 1780 # forward: all descendants
1781 1781 if not self.roots:
1782 1782 self.roots.add(self.startrev)
1783 1783 for parent in realparents(rev):
1784 1784 if parent in self.roots:
1785 1785 self.roots.add(rev)
1786 1786 return True
1787 1787 else:
1788 1788 # backwards: all parents
1789 1789 if not self.roots:
1790 1790 self.roots.update(realparents(self.startrev))
1791 1791 if rev in self.roots:
1792 1792 self.roots.remove(rev)
1793 1793 self.roots.update(realparents(rev))
1794 1794 return True
1795 1795
1796 1796 return False
1797 1797
1798 1798 def walkchangerevs(repo, match, opts, prepare):
1799 1799 '''Iterate over files and the revs in which they changed.
1800 1800
1801 1801 Callers most commonly need to iterate backwards over the history
1802 1802 in which they are interested. Doing so has awful (quadratic-looking)
1803 1803 performance, so we use iterators in a "windowed" way.
1804 1804
1805 1805 We walk a window of revisions in the desired order. Within the
1806 1806 window, we first walk forwards to gather data, then in the desired
1807 1807 order (usually backwards) to display it.
1808 1808
1809 1809 This function returns an iterator yielding contexts. Before
1810 1810 yielding each context, the iterator will first call the prepare
1811 1811 function on each context in the window in forward order.'''
1812 1812
1813 1813 follow = opts.get('follow') or opts.get('follow_first')
1814 1814 revs = _logrevs(repo, opts)
1815 1815 if not revs:
1816 1816 return []
1817 1817 wanted = set()
1818 1818 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1819 1819 opts.get('removed'))
1820 1820 fncache = {}
1821 1821 change = repo.changectx
1822 1822
1823 1823 # First step is to fill wanted, the set of revisions that we want to yield.
1824 1824 # When it does not induce extra cost, we also fill fncache for revisions in
1825 1825 # wanted: a cache of filenames that were changed (ctx.files()) and that
1826 1826 # match the file filtering conditions.
1827 1827
1828 1828 if match.always():
1829 1829 # No files, no patterns. Display all revs.
1830 1830 wanted = revs
1831 1831 elif not slowpath:
1832 1832 # We only have to read through the filelog to find wanted revisions
1833 1833
1834 1834 try:
1835 1835 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1836 1836 except FileWalkError:
1837 1837 slowpath = True
1838 1838
1839 1839 # We decided to fall back to the slowpath because at least one
1840 1840 # of the paths was not a file. Check to see if at least one of them
1841 1841 # existed in history, otherwise simply return
1842 1842 for path in match.files():
1843 1843 if path == '.' or path in repo.store:
1844 1844 break
1845 1845 else:
1846 1846 return []
1847 1847
1848 1848 if slowpath:
1849 1849 # We have to read the changelog to match filenames against
1850 1850 # changed files
1851 1851
1852 1852 if follow:
1853 1853 raise error.Abort(_('can only follow copies/renames for explicit '
1854 1854 'filenames'))
1855 1855
1856 1856 # The slow path checks files modified in every changeset.
1857 1857 # This is really slow on large repos, so compute the set lazily.
1858 1858 class lazywantedset(object):
1859 1859 def __init__(self):
1860 1860 self.set = set()
1861 1861 self.revs = set(revs)
1862 1862
1863 1863 # No need to worry about locality here because it will be accessed
1864 1864 # in the same order as the increasing window below.
1865 1865 def __contains__(self, value):
1866 1866 if value in self.set:
1867 1867 return True
1868 1868 elif not value in self.revs:
1869 1869 return False
1870 1870 else:
1871 1871 self.revs.discard(value)
1872 1872 ctx = change(value)
1873 1873 matches = filter(match, ctx.files())
1874 1874 if matches:
1875 1875 fncache[value] = matches
1876 1876 self.set.add(value)
1877 1877 return True
1878 1878 return False
1879 1879
1880 1880 def discard(self, value):
1881 1881 self.revs.discard(value)
1882 1882 self.set.discard(value)
1883 1883
1884 1884 wanted = lazywantedset()
1885 1885
1886 1886 # it might be worthwhile to do this in the iterator if the rev range
1887 1887 # is descending and the prune args are all within that range
1888 1888 for rev in opts.get('prune', ()):
1889 1889 rev = repo[rev].rev()
1890 1890 ff = _followfilter(repo)
1891 1891 stop = min(revs[0], revs[-1])
1892 1892 for x in xrange(rev, stop - 1, -1):
1893 1893 if ff.match(x):
1894 1894 wanted = wanted - [x]
1895 1895
1896 1896 # Now that wanted is correctly initialized, we can iterate over the
1897 1897 # revision range, yielding only revisions in wanted.
1898 1898 def iterate():
1899 1899 if follow and match.always():
1900 1900 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1901 1901 def want(rev):
1902 1902 return ff.match(rev) and rev in wanted
1903 1903 else:
1904 1904 def want(rev):
1905 1905 return rev in wanted
1906 1906
1907 1907 it = iter(revs)
1908 1908 stopiteration = False
1909 1909 for windowsize in increasingwindows():
1910 1910 nrevs = []
1911 1911 for i in xrange(windowsize):
1912 1912 rev = next(it, None)
1913 1913 if rev is None:
1914 1914 stopiteration = True
1915 1915 break
1916 1916 elif want(rev):
1917 1917 nrevs.append(rev)
1918 1918 for rev in sorted(nrevs):
1919 1919 fns = fncache.get(rev)
1920 1920 ctx = change(rev)
1921 1921 if not fns:
1922 1922 def fns_generator():
1923 1923 for f in ctx.files():
1924 1924 if match(f):
1925 1925 yield f
1926 1926 fns = fns_generator()
1927 1927 prepare(ctx, fns)
1928 1928 for rev in nrevs:
1929 1929 yield change(rev)
1930 1930
1931 1931 if stopiteration:
1932 1932 break
1933 1933
1934 1934 return iterate()
1935 1935
1936 1936 def _makefollowlogfilematcher(repo, files, followfirst):
1937 1937 # When displaying a revision with --patch --follow FILE, we have
1938 1938 # to know which file of the revision must be diffed. With
1939 1939 # --follow, we want the names of the ancestors of FILE in the
1940 1940 # revision, stored in "fcache". "fcache" is populated by
1941 1941 # reproducing the graph traversal already done by --follow revset
1942 1942 # and relating linkrevs to file names (which is not "correct" but
1943 1943 # good enough).
1944 1944 fcache = {}
1945 1945 fcacheready = [False]
1946 1946 pctx = repo['.']
1947 1947
1948 1948 def populate():
1949 1949 for fn in files:
1950 1950 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1951 1951 for c in i:
1952 1952 fcache.setdefault(c.linkrev(), set()).add(c.path())
1953 1953
1954 1954 def filematcher(rev):
1955 1955 if not fcacheready[0]:
1956 1956 # Lazy initialization
1957 1957 fcacheready[0] = True
1958 1958 populate()
1959 1959 return scmutil.matchfiles(repo, fcache.get(rev, []))
1960 1960
1961 1961 return filematcher
1962 1962
1963 1963 def _makenofollowlogfilematcher(repo, pats, opts):
1964 1964 '''hook for extensions to override the filematcher for non-follow cases'''
1965 1965 return None
1966 1966
1967 1967 def _makelogrevset(repo, pats, opts, revs):
1968 1968 """Return (expr, filematcher) where expr is a revset string built
1969 1969 from log options and file patterns or None. If --stat or --patch
1970 1970 are not passed filematcher is None. Otherwise it is a callable
1971 1971 taking a revision number and returning a match objects filtering
1972 1972 the files to be detailed when displaying the revision.
1973 1973 """
1974 1974 opt2revset = {
1975 1975 'no_merges': ('not merge()', None),
1976 1976 'only_merges': ('merge()', None),
1977 1977 '_ancestors': ('ancestors(%(val)s)', None),
1978 1978 '_fancestors': ('_firstancestors(%(val)s)', None),
1979 1979 '_descendants': ('descendants(%(val)s)', None),
1980 1980 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1981 1981 '_matchfiles': ('_matchfiles(%(val)s)', None),
1982 1982 'date': ('date(%(val)r)', None),
1983 1983 'branch': ('branch(%(val)r)', ' or '),
1984 1984 '_patslog': ('filelog(%(val)r)', ' or '),
1985 1985 '_patsfollow': ('follow(%(val)r)', ' or '),
1986 1986 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1987 1987 'keyword': ('keyword(%(val)r)', ' or '),
1988 1988 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1989 1989 'user': ('user(%(val)r)', ' or '),
1990 1990 }
1991 1991
1992 1992 opts = dict(opts)
1993 1993 # follow or not follow?
1994 1994 follow = opts.get('follow') or opts.get('follow_first')
1995 1995 if opts.get('follow_first'):
1996 1996 followfirst = 1
1997 1997 else:
1998 1998 followfirst = 0
1999 1999 # --follow with FILE behavior depends on revs...
2000 2000 it = iter(revs)
2001 2001 startrev = it.next()
2002 2002 followdescendants = startrev < next(it, startrev)
2003 2003
2004 2004 # branch and only_branch are really aliases and must be handled at
2005 2005 # the same time
2006 2006 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2007 2007 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2008 2008 # pats/include/exclude are passed to match.match() directly in
2009 2009 # _matchfiles() revset but walkchangerevs() builds its matcher with
2010 2010 # scmutil.match(). The difference is input pats are globbed on
2011 2011 # platforms without shell expansion (windows).
2012 2012 wctx = repo[None]
2013 2013 match, pats = scmutil.matchandpats(wctx, pats, opts)
2014 2014 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2015 2015 opts.get('removed'))
2016 2016 if not slowpath:
2017 2017 for f in match.files():
2018 2018 if follow and f not in wctx:
2019 2019 # If the file exists, it may be a directory, so let it
2020 2020 # take the slow path.
2021 2021 if os.path.exists(repo.wjoin(f)):
2022 2022 slowpath = True
2023 2023 continue
2024 2024 else:
2025 2025 raise error.Abort(_('cannot follow file not in parent '
2026 2026 'revision: "%s"') % f)
2027 2027 filelog = repo.file(f)
2028 2028 if not filelog:
2029 2029 # A zero count may be a directory or deleted file, so
2030 2030 # try to find matching entries on the slow path.
2031 2031 if follow:
2032 2032 raise error.Abort(
2033 2033 _('cannot follow nonexistent file: "%s"') % f)
2034 2034 slowpath = True
2035 2035
2036 2036 # We decided to fall back to the slowpath because at least one
2037 2037 # of the paths was not a file. Check to see if at least one of them
2038 2038 # existed in history - in that case, we'll continue down the
2039 2039 # slowpath; otherwise, we can turn off the slowpath
2040 2040 if slowpath:
2041 2041 for path in match.files():
2042 2042 if path == '.' or path in repo.store:
2043 2043 break
2044 2044 else:
2045 2045 slowpath = False
2046 2046
2047 2047 fpats = ('_patsfollow', '_patsfollowfirst')
2048 2048 fnopats = (('_ancestors', '_fancestors'),
2049 2049 ('_descendants', '_fdescendants'))
2050 2050 if slowpath:
2051 2051 # See walkchangerevs() slow path.
2052 2052 #
2053 2053 # pats/include/exclude cannot be represented as separate
2054 2054 # revset expressions as their filtering logic applies at file
2055 2055 # level. For instance "-I a -X a" matches a revision touching
2056 2056 # "a" and "b" while "file(a) and not file(b)" does
2057 2057 # not. Besides, filesets are evaluated against the working
2058 2058 # directory.
2059 2059 matchargs = ['r:', 'd:relpath']
2060 2060 for p in pats:
2061 2061 matchargs.append('p:' + p)
2062 2062 for p in opts.get('include', []):
2063 2063 matchargs.append('i:' + p)
2064 2064 for p in opts.get('exclude', []):
2065 2065 matchargs.append('x:' + p)
2066 2066 matchargs = ','.join(('%r' % p) for p in matchargs)
2067 2067 opts['_matchfiles'] = matchargs
2068 2068 if follow:
2069 2069 opts[fnopats[0][followfirst]] = '.'
2070 2070 else:
2071 2071 if follow:
2072 2072 if pats:
2073 2073 # follow() revset interprets its file argument as a
2074 2074 # manifest entry, so use match.files(), not pats.
2075 2075 opts[fpats[followfirst]] = list(match.files())
2076 2076 else:
2077 2077 op = fnopats[followdescendants][followfirst]
2078 2078 opts[op] = 'rev(%d)' % startrev
2079 2079 else:
2080 2080 opts['_patslog'] = list(pats)
2081 2081
2082 2082 filematcher = None
2083 2083 if opts.get('patch') or opts.get('stat'):
2084 2084 # When following files, track renames via a special matcher.
2085 2085 # If we're forced to take the slowpath it means we're following
2086 2086 # at least one pattern/directory, so don't bother with rename tracking.
2087 2087 if follow and not match.always() and not slowpath:
2088 2088 # _makefollowlogfilematcher expects its files argument to be
2089 2089 # relative to the repo root, so use match.files(), not pats.
2090 2090 filematcher = _makefollowlogfilematcher(repo, match.files(),
2091 2091 followfirst)
2092 2092 else:
2093 2093 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2094 2094 if filematcher is None:
2095 2095 filematcher = lambda rev: match
2096 2096
2097 2097 expr = []
2098 2098 for op, val in sorted(opts.iteritems()):
2099 2099 if not val:
2100 2100 continue
2101 2101 if op not in opt2revset:
2102 2102 continue
2103 2103 revop, andor = opt2revset[op]
2104 2104 if '%(val)' not in revop:
2105 2105 expr.append(revop)
2106 2106 else:
2107 2107 if not isinstance(val, list):
2108 2108 e = revop % {'val': val}
2109 2109 else:
2110 2110 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2111 2111 expr.append(e)
2112 2112
2113 2113 if expr:
2114 2114 expr = '(' + ' and '.join(expr) + ')'
2115 2115 else:
2116 2116 expr = None
2117 2117 return expr, filematcher
2118 2118
2119 2119 def _logrevs(repo, opts):
2120 2120 # Default --rev value depends on --follow but --follow behavior
2121 2121 # depends on revisions resolved from --rev...
2122 2122 follow = opts.get('follow') or opts.get('follow_first')
2123 2123 if opts.get('rev'):
2124 2124 revs = scmutil.revrange(repo, opts['rev'])
2125 2125 elif follow and repo.dirstate.p1() == nullid:
2126 2126 revs = revset.baseset()
2127 2127 elif follow:
2128 2128 revs = repo.revs('reverse(:.)')
2129 2129 else:
2130 2130 revs = revset.spanset(repo)
2131 2131 revs.reverse()
2132 2132 return revs
2133 2133
2134 2134 def getgraphlogrevs(repo, pats, opts):
2135 2135 """Return (revs, expr, filematcher) where revs is an iterable of
2136 2136 revision numbers, expr is a revset string built from log options
2137 2137 and file patterns or None, and used to filter 'revs'. If --stat or
2138 2138 --patch are not passed filematcher is None. Otherwise it is a
2139 2139 callable taking a revision number and returning a match objects
2140 2140 filtering the files to be detailed when displaying the revision.
2141 2141 """
2142 2142 limit = loglimit(opts)
2143 2143 revs = _logrevs(repo, opts)
2144 2144 if not revs:
2145 2145 return revset.baseset(), None, None
2146 2146 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2147 2147 if opts.get('rev'):
2148 2148 # User-specified revs might be unsorted, but don't sort before
2149 2149 # _makelogrevset because it might depend on the order of revs
2150 2150 revs.sort(reverse=True)
2151 2151 if expr:
2152 2152 # Revset matchers often operate faster on revisions in changelog
2153 2153 # order, because most filters deal with the changelog.
2154 2154 revs.reverse()
2155 2155 matcher = revset.match(repo.ui, expr)
2156 2156 # Revset matches can reorder revisions. "A or B" typically returns
2157 2157 # returns the revision matching A then the revision matching B. Sort
2158 2158 # again to fix that.
2159 2159 revs = matcher(repo, revs)
2160 2160 revs.sort(reverse=True)
2161 2161 if limit is not None:
2162 2162 limitedrevs = []
2163 2163 for idx, rev in enumerate(revs):
2164 2164 if idx >= limit:
2165 2165 break
2166 2166 limitedrevs.append(rev)
2167 2167 revs = revset.baseset(limitedrevs)
2168 2168
2169 2169 return revs, expr, filematcher
2170 2170
2171 2171 def getlogrevs(repo, pats, opts):
2172 2172 """Return (revs, expr, filematcher) where revs is an iterable of
2173 2173 revision numbers, expr is a revset string built from log options
2174 2174 and file patterns or None, and used to filter 'revs'. If --stat or
2175 2175 --patch are not passed filematcher is None. Otherwise it is a
2176 2176 callable taking a revision number and returning a match objects
2177 2177 filtering the files to be detailed when displaying the revision.
2178 2178 """
2179 2179 limit = loglimit(opts)
2180 2180 revs = _logrevs(repo, opts)
2181 2181 if not revs:
2182 2182 return revset.baseset([]), None, None
2183 2183 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2184 2184 if expr:
2185 2185 # Revset matchers often operate faster on revisions in changelog
2186 2186 # order, because most filters deal with the changelog.
2187 2187 if not opts.get('rev'):
2188 2188 revs.reverse()
2189 2189 matcher = revset.match(repo.ui, expr)
2190 2190 # Revset matches can reorder revisions. "A or B" typically returns
2191 2191 # returns the revision matching A then the revision matching B. Sort
2192 2192 # again to fix that.
2193 2193 fixopts = ['branch', 'only_branch', 'keyword', 'user']
2194 2194 oldrevs = revs
2195 2195 revs = matcher(repo, revs)
2196 2196 if not opts.get('rev'):
2197 2197 revs.sort(reverse=True)
2198 2198 elif len(pats) > 1 or any(len(opts.get(op, [])) > 1 for op in fixopts):
2199 2199 # XXX "A or B" is known to change the order; fix it by filtering
2200 2200 # matched set again (issue5100)
2201 2201 revs = oldrevs & revs
2202 2202 if limit is not None:
2203 2203 limitedrevs = []
2204 2204 for idx, r in enumerate(revs):
2205 2205 if limit <= idx:
2206 2206 break
2207 2207 limitedrevs.append(r)
2208 2208 revs = revset.baseset(limitedrevs)
2209 2209
2210 2210 return revs, expr, filematcher
2211 2211
2212 2212 def _graphnodeformatter(ui, displayer):
2213 2213 spec = ui.config('ui', 'graphnodetemplate')
2214 2214 if not spec:
2215 2215 return templatekw.showgraphnode # fast path for "{graphnode}"
2216 2216
2217 2217 templ = formatter.gettemplater(ui, 'graphnode', spec)
2218 2218 cache = {}
2219 2219 if isinstance(displayer, changeset_templater):
2220 2220 cache = displayer.cache # reuse cache of slow templates
2221 2221 props = templatekw.keywords.copy()
2222 2222 props['templ'] = templ
2223 2223 props['cache'] = cache
2224 2224 def formatnode(repo, ctx):
2225 2225 props['ctx'] = ctx
2226 2226 props['repo'] = repo
2227 2227 props['ui'] = repo.ui
2228 2228 props['revcache'] = {}
2229 2229 return templater.stringify(templ('graphnode', **props))
2230 2230 return formatnode
2231 2231
2232 2232 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2233 2233 filematcher=None):
2234 2234 formatnode = _graphnodeformatter(ui, displayer)
2235 2235 state = graphmod.asciistate()
2236 2236 styles = state['styles']
2237 2237
2238 2238 # only set graph styling if HGPLAIN is not set.
2239 2239 if ui.plain('graph'):
2240 2240 # set all edge styles to |, the default pre-3.8 behaviour
2241 2241 styles.update(dict.fromkeys(styles, '|'))
2242 2242 else:
2243 2243 edgetypes = {
2244 2244 'parent': graphmod.PARENT,
2245 2245 'grandparent': graphmod.GRANDPARENT,
2246 2246 'missing': graphmod.MISSINGPARENT
2247 2247 }
2248 2248 for name, key in edgetypes.items():
2249 2249 # experimental config: experimental.graphstyle.*
2250 2250 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2251 2251 styles[key])
2252 2252 if not styles[key]:
2253 2253 styles[key] = None
2254 2254
2255 2255 # experimental config: experimental.graphshorten
2256 2256 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2257 2257
2258 2258 for rev, type, ctx, parents in dag:
2259 2259 char = formatnode(repo, ctx)
2260 2260 copies = None
2261 2261 if getrenamed and ctx.rev():
2262 2262 copies = []
2263 2263 for fn in ctx.files():
2264 2264 rename = getrenamed(fn, ctx.rev())
2265 2265 if rename:
2266 2266 copies.append((fn, rename[0]))
2267 2267 revmatchfn = None
2268 2268 if filematcher is not None:
2269 2269 revmatchfn = filematcher(ctx.rev())
2270 2270 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2271 2271 lines = displayer.hunk.pop(rev).split('\n')
2272 2272 if not lines[-1]:
2273 2273 del lines[-1]
2274 2274 displayer.flush(ctx)
2275 2275 edges = edgefn(type, char, lines, state, rev, parents)
2276 2276 for type, char, lines, coldata in edges:
2277 2277 graphmod.ascii(ui, state, type, char, lines, coldata)
2278 2278 displayer.close()
2279 2279
2280 2280 def graphlog(ui, repo, *pats, **opts):
2281 2281 # Parameters are identical to log command ones
2282 2282 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2283 2283 revdag = graphmod.dagwalker(repo, revs)
2284 2284
2285 2285 getrenamed = None
2286 2286 if opts.get('copies'):
2287 2287 endrev = None
2288 2288 if opts.get('rev'):
2289 2289 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2290 2290 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2291 2291 displayer = show_changeset(ui, repo, opts, buffered=True)
2292 2292 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2293 2293 filematcher)
2294 2294
2295 2295 def checkunsupportedgraphflags(pats, opts):
2296 2296 for op in ["newest_first"]:
2297 2297 if op in opts and opts[op]:
2298 2298 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2299 2299 % op.replace("_", "-"))
2300 2300
2301 2301 def graphrevs(repo, nodes, opts):
2302 2302 limit = loglimit(opts)
2303 2303 nodes.reverse()
2304 2304 if limit is not None:
2305 2305 nodes = nodes[:limit]
2306 2306 return graphmod.nodes(repo, nodes)
2307 2307
2308 2308 def add(ui, repo, match, prefix, explicitonly, **opts):
2309 2309 join = lambda f: os.path.join(prefix, f)
2310 2310 bad = []
2311 2311
2312 2312 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2313 2313 names = []
2314 2314 wctx = repo[None]
2315 2315 cca = None
2316 2316 abort, warn = scmutil.checkportabilityalert(ui)
2317 2317 if abort or warn:
2318 2318 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2319 2319
2320 2320 badmatch = matchmod.badmatch(match, badfn)
2321 2321 dirstate = repo.dirstate
2322 2322 # We don't want to just call wctx.walk here, since it would return a lot of
2323 2323 # clean files, which we aren't interested in and takes time.
2324 2324 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2325 2325 True, False, full=False)):
2326 2326 exact = match.exact(f)
2327 2327 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2328 2328 if cca:
2329 2329 cca(f)
2330 2330 names.append(f)
2331 2331 if ui.verbose or not exact:
2332 2332 ui.status(_('adding %s\n') % match.rel(f))
2333 2333
2334 2334 for subpath in sorted(wctx.substate):
2335 2335 sub = wctx.sub(subpath)
2336 2336 try:
2337 2337 submatch = matchmod.subdirmatcher(subpath, match)
2338 2338 if opts.get('subrepos'):
2339 2339 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2340 2340 else:
2341 2341 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2342 2342 except error.LookupError:
2343 2343 ui.status(_("skipping missing subrepository: %s\n")
2344 2344 % join(subpath))
2345 2345
2346 2346 if not opts.get('dry_run'):
2347 2347 rejected = wctx.add(names, prefix)
2348 2348 bad.extend(f for f in rejected if f in match.files())
2349 2349 return bad
2350 2350
2351 2351 def forget(ui, repo, match, prefix, explicitonly):
2352 2352 join = lambda f: os.path.join(prefix, f)
2353 2353 bad = []
2354 2354 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2355 2355 wctx = repo[None]
2356 2356 forgot = []
2357 2357
2358 2358 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2359 2359 forget = sorted(s[0] + s[1] + s[3] + s[6])
2360 2360 if explicitonly:
2361 2361 forget = [f for f in forget if match.exact(f)]
2362 2362
2363 2363 for subpath in sorted(wctx.substate):
2364 2364 sub = wctx.sub(subpath)
2365 2365 try:
2366 2366 submatch = matchmod.subdirmatcher(subpath, match)
2367 2367 subbad, subforgot = sub.forget(submatch, prefix)
2368 2368 bad.extend([subpath + '/' + f for f in subbad])
2369 2369 forgot.extend([subpath + '/' + f for f in subforgot])
2370 2370 except error.LookupError:
2371 2371 ui.status(_("skipping missing subrepository: %s\n")
2372 2372 % join(subpath))
2373 2373
2374 2374 if not explicitonly:
2375 2375 for f in match.files():
2376 2376 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2377 2377 if f not in forgot:
2378 2378 if repo.wvfs.exists(f):
2379 2379 # Don't complain if the exact case match wasn't given.
2380 2380 # But don't do this until after checking 'forgot', so
2381 2381 # that subrepo files aren't normalized, and this op is
2382 2382 # purely from data cached by the status walk above.
2383 2383 if repo.dirstate.normalize(f) in repo.dirstate:
2384 2384 continue
2385 2385 ui.warn(_('not removing %s: '
2386 2386 'file is already untracked\n')
2387 2387 % match.rel(f))
2388 2388 bad.append(f)
2389 2389
2390 2390 for f in forget:
2391 2391 if ui.verbose or not match.exact(f):
2392 2392 ui.status(_('removing %s\n') % match.rel(f))
2393 2393
2394 2394 rejected = wctx.forget(forget, prefix)
2395 2395 bad.extend(f for f in rejected if f in match.files())
2396 2396 forgot.extend(f for f in forget if f not in rejected)
2397 2397 return bad, forgot
2398 2398
2399 2399 def files(ui, ctx, m, fm, fmt, subrepos):
2400 2400 rev = ctx.rev()
2401 2401 ret = 1
2402 2402 ds = ctx.repo().dirstate
2403 2403
2404 2404 for f in ctx.matches(m):
2405 2405 if rev is None and ds[f] == 'r':
2406 2406 continue
2407 2407 fm.startitem()
2408 2408 if ui.verbose:
2409 2409 fc = ctx[f]
2410 2410 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2411 2411 fm.data(abspath=f)
2412 2412 fm.write('path', fmt, m.rel(f))
2413 2413 ret = 0
2414 2414
2415 2415 for subpath in sorted(ctx.substate):
2416 2416 def matchessubrepo(subpath):
2417 2417 return (m.exact(subpath)
2418 2418 or any(f.startswith(subpath + '/') for f in m.files()))
2419 2419
2420 2420 if subrepos or matchessubrepo(subpath):
2421 2421 sub = ctx.sub(subpath)
2422 2422 try:
2423 2423 submatch = matchmod.subdirmatcher(subpath, m)
2424 2424 recurse = m.exact(subpath) or subrepos
2425 2425 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2426 2426 ret = 0
2427 2427 except error.LookupError:
2428 2428 ui.status(_("skipping missing subrepository: %s\n")
2429 2429 % m.abs(subpath))
2430 2430
2431 2431 return ret
2432 2432
2433 2433 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2434 2434 join = lambda f: os.path.join(prefix, f)
2435 2435 ret = 0
2436 2436 s = repo.status(match=m, clean=True)
2437 2437 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2438 2438
2439 2439 wctx = repo[None]
2440 2440
2441 2441 if warnings is None:
2442 2442 warnings = []
2443 2443 warn = True
2444 2444 else:
2445 2445 warn = False
2446 2446
2447 2447 subs = sorted(wctx.substate)
2448 2448 total = len(subs)
2449 2449 count = 0
2450 2450 for subpath in subs:
2451 2451 def matchessubrepo(matcher, subpath):
2452 2452 if matcher.exact(subpath):
2453 2453 return True
2454 2454 for f in matcher.files():
2455 2455 if f.startswith(subpath):
2456 2456 return True
2457 2457 return False
2458 2458
2459 2459 count += 1
2460 2460 if subrepos or matchessubrepo(m, subpath):
2461 2461 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2462 2462
2463 2463 sub = wctx.sub(subpath)
2464 2464 try:
2465 2465 submatch = matchmod.subdirmatcher(subpath, m)
2466 2466 if sub.removefiles(submatch, prefix, after, force, subrepos,
2467 2467 warnings):
2468 2468 ret = 1
2469 2469 except error.LookupError:
2470 2470 warnings.append(_("skipping missing subrepository: %s\n")
2471 2471 % join(subpath))
2472 2472 ui.progress(_('searching'), None)
2473 2473
2474 2474 # warn about failure to delete explicit files/dirs
2475 2475 deleteddirs = util.dirs(deleted)
2476 2476 files = m.files()
2477 2477 total = len(files)
2478 2478 count = 0
2479 2479 for f in files:
2480 2480 def insubrepo():
2481 2481 for subpath in wctx.substate:
2482 2482 if f.startswith(subpath):
2483 2483 return True
2484 2484 return False
2485 2485
2486 2486 count += 1
2487 2487 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2488 2488 isdir = f in deleteddirs or wctx.hasdir(f)
2489 2489 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2490 2490 continue
2491 2491
2492 2492 if repo.wvfs.exists(f):
2493 2493 if repo.wvfs.isdir(f):
2494 2494 warnings.append(_('not removing %s: no tracked files\n')
2495 2495 % m.rel(f))
2496 2496 else:
2497 2497 warnings.append(_('not removing %s: file is untracked\n')
2498 2498 % m.rel(f))
2499 2499 # missing files will generate a warning elsewhere
2500 2500 ret = 1
2501 2501 ui.progress(_('deleting'), None)
2502 2502
2503 2503 if force:
2504 2504 list = modified + deleted + clean + added
2505 2505 elif after:
2506 2506 list = deleted
2507 2507 remaining = modified + added + clean
2508 2508 total = len(remaining)
2509 2509 count = 0
2510 2510 for f in remaining:
2511 2511 count += 1
2512 2512 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2513 2513 warnings.append(_('not removing %s: file still exists\n')
2514 2514 % m.rel(f))
2515 2515 ret = 1
2516 2516 ui.progress(_('skipping'), None)
2517 2517 else:
2518 2518 list = deleted + clean
2519 2519 total = len(modified) + len(added)
2520 2520 count = 0
2521 2521 for f in modified:
2522 2522 count += 1
2523 2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 2524 warnings.append(_('not removing %s: file is modified (use -f'
2525 2525 ' to force removal)\n') % m.rel(f))
2526 2526 ret = 1
2527 2527 for f in added:
2528 2528 count += 1
2529 2529 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2530 2530 warnings.append(_('not removing %s: file has been marked for add'
2531 2531 ' (use forget to undo)\n') % m.rel(f))
2532 2532 ret = 1
2533 2533 ui.progress(_('skipping'), None)
2534 2534
2535 2535 list = sorted(list)
2536 2536 total = len(list)
2537 2537 count = 0
2538 2538 for f in list:
2539 2539 count += 1
2540 2540 if ui.verbose or not m.exact(f):
2541 2541 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2542 2542 ui.status(_('removing %s\n') % m.rel(f))
2543 2543 ui.progress(_('deleting'), None)
2544 2544
2545 2545 with repo.wlock():
2546 2546 if not after:
2547 2547 for f in list:
2548 2548 if f in added:
2549 2549 continue # we never unlink added files on remove
2550 2550 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2551 2551 repo[None].forget(list)
2552 2552
2553 2553 if warn:
2554 2554 for warning in warnings:
2555 2555 ui.warn(warning)
2556 2556
2557 2557 return ret
2558 2558
2559 2559 def cat(ui, repo, ctx, matcher, prefix, **opts):
2560 2560 err = 1
2561 2561
2562 2562 def write(path):
2563 2563 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2564 2564 pathname=os.path.join(prefix, path))
2565 2565 data = ctx[path].data()
2566 2566 if opts.get('decode'):
2567 2567 data = repo.wwritedata(path, data)
2568 2568 fp.write(data)
2569 2569 fp.close()
2570 2570
2571 2571 # Automation often uses hg cat on single files, so special case it
2572 2572 # for performance to avoid the cost of parsing the manifest.
2573 2573 if len(matcher.files()) == 1 and not matcher.anypats():
2574 2574 file = matcher.files()[0]
2575 2575 mf = repo.manifest
2576 2576 mfnode = ctx.manifestnode()
2577 2577 if mfnode and mf.find(mfnode, file)[0]:
2578 2578 write(file)
2579 2579 return 0
2580 2580
2581 2581 # Don't warn about "missing" files that are really in subrepos
2582 2582 def badfn(path, msg):
2583 2583 for subpath in ctx.substate:
2584 2584 if path.startswith(subpath):
2585 2585 return
2586 2586 matcher.bad(path, msg)
2587 2587
2588 2588 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2589 2589 write(abs)
2590 2590 err = 0
2591 2591
2592 2592 for subpath in sorted(ctx.substate):
2593 2593 sub = ctx.sub(subpath)
2594 2594 try:
2595 2595 submatch = matchmod.subdirmatcher(subpath, matcher)
2596 2596
2597 2597 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2598 2598 **opts):
2599 2599 err = 0
2600 2600 except error.RepoLookupError:
2601 2601 ui.status(_("skipping missing subrepository: %s\n")
2602 2602 % os.path.join(prefix, subpath))
2603 2603
2604 2604 return err
2605 2605
2606 2606 def commit(ui, repo, commitfunc, pats, opts):
2607 2607 '''commit the specified files or all outstanding changes'''
2608 2608 date = opts.get('date')
2609 2609 if date:
2610 2610 opts['date'] = util.parsedate(date)
2611 2611 message = logmessage(ui, opts)
2612 2612 matcher = scmutil.match(repo[None], pats, opts)
2613 2613
2614 2614 # extract addremove carefully -- this function can be called from a command
2615 2615 # that doesn't support addremove
2616 2616 if opts.get('addremove'):
2617 2617 if scmutil.addremove(repo, matcher, "", opts) != 0:
2618 2618 raise error.Abort(
2619 2619 _("failed to mark all new/missing files as added/removed"))
2620 2620
2621 2621 return commitfunc(ui, repo, message, matcher, opts)
2622 2622
2623 2623 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2624 2624 # avoid cycle context -> subrepo -> cmdutil
2625 2625 from . import context
2626 2626
2627 2627 # amend will reuse the existing user if not specified, but the obsolete
2628 2628 # marker creation requires that the current user's name is specified.
2629 2629 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2630 2630 ui.username() # raise exception if username not set
2631 2631
2632 2632 ui.note(_('amending changeset %s\n') % old)
2633 2633 base = old.p1()
2634 2634 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2635 2635
2636 2636 wlock = lock = newid = None
2637 2637 try:
2638 2638 wlock = repo.wlock()
2639 2639 lock = repo.lock()
2640 2640 with repo.transaction('amend') as tr:
2641 2641 # See if we got a message from -m or -l, if not, open the editor
2642 2642 # with the message of the changeset to amend
2643 2643 message = logmessage(ui, opts)
2644 2644 # ensure logfile does not conflict with later enforcement of the
2645 2645 # message. potential logfile content has been processed by
2646 2646 # `logmessage` anyway.
2647 2647 opts.pop('logfile')
2648 2648 # First, do a regular commit to record all changes in the working
2649 2649 # directory (if there are any)
2650 2650 ui.callhooks = False
2651 2651 activebookmark = repo._bookmarks.active
2652 2652 try:
2653 2653 repo._bookmarks.active = None
2654 2654 opts['message'] = 'temporary amend commit for %s' % old
2655 2655 node = commit(ui, repo, commitfunc, pats, opts)
2656 2656 finally:
2657 2657 repo._bookmarks.active = activebookmark
2658 2658 repo._bookmarks.recordchange(tr)
2659 2659 ui.callhooks = True
2660 2660 ctx = repo[node]
2661 2661
2662 2662 # Participating changesets:
2663 2663 #
2664 2664 # node/ctx o - new (intermediate) commit that contains changes
2665 2665 # | from working dir to go into amending commit
2666 2666 # | (or a workingctx if there were no changes)
2667 2667 # |
2668 2668 # old o - changeset to amend
2669 2669 # |
2670 2670 # base o - parent of amending changeset
2671 2671
2672 2672 # Update extra dict from amended commit (e.g. to preserve graft
2673 2673 # source)
2674 2674 extra.update(old.extra())
2675 2675
2676 2676 # Also update it from the intermediate commit or from the wctx
2677 2677 extra.update(ctx.extra())
2678 2678
2679 2679 if len(old.parents()) > 1:
2680 2680 # ctx.files() isn't reliable for merges, so fall back to the
2681 2681 # slower repo.status() method
2682 2682 files = set([fn for st in repo.status(base, old)[:3]
2683 2683 for fn in st])
2684 2684 else:
2685 2685 files = set(old.files())
2686 2686
2687 2687 # Second, we use either the commit we just did, or if there were no
2688 2688 # changes the parent of the working directory as the version of the
2689 2689 # files in the final amend commit
2690 2690 if node:
2691 2691 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2692 2692
2693 2693 user = ctx.user()
2694 2694 date = ctx.date()
2695 2695 # Recompute copies (avoid recording a -> b -> a)
2696 2696 copied = copies.pathcopies(base, ctx)
2697 2697 if old.p2:
2698 2698 copied.update(copies.pathcopies(old.p2(), ctx))
2699 2699
2700 2700 # Prune files which were reverted by the updates: if old
2701 2701 # introduced file X and our intermediate commit, node,
2702 2702 # renamed that file, then those two files are the same and
2703 2703 # we can discard X from our list of files. Likewise if X
2704 2704 # was deleted, it's no longer relevant
2705 2705 files.update(ctx.files())
2706 2706
2707 2707 def samefile(f):
2708 2708 if f in ctx.manifest():
2709 2709 a = ctx.filectx(f)
2710 2710 if f in base.manifest():
2711 2711 b = base.filectx(f)
2712 2712 return (not a.cmp(b)
2713 2713 and a.flags() == b.flags())
2714 2714 else:
2715 2715 return False
2716 2716 else:
2717 2717 return f not in base.manifest()
2718 2718 files = [f for f in files if not samefile(f)]
2719 2719
2720 2720 def filectxfn(repo, ctx_, path):
2721 2721 try:
2722 2722 fctx = ctx[path]
2723 2723 flags = fctx.flags()
2724 2724 mctx = context.memfilectx(repo,
2725 2725 fctx.path(), fctx.data(),
2726 2726 islink='l' in flags,
2727 2727 isexec='x' in flags,
2728 2728 copied=copied.get(path))
2729 2729 return mctx
2730 2730 except KeyError:
2731 2731 return None
2732 2732 else:
2733 2733 ui.note(_('copying changeset %s to %s\n') % (old, base))
2734 2734
2735 2735 # Use version of files as in the old cset
2736 2736 def filectxfn(repo, ctx_, path):
2737 2737 try:
2738 2738 return old.filectx(path)
2739 2739 except KeyError:
2740 2740 return None
2741 2741
2742 2742 user = opts.get('user') or old.user()
2743 2743 date = opts.get('date') or old.date()
2744 2744 editform = mergeeditform(old, 'commit.amend')
2745 2745 editor = getcommiteditor(editform=editform, **opts)
2746 2746 if not message:
2747 2747 editor = getcommiteditor(edit=True, editform=editform)
2748 2748 message = old.description()
2749 2749
2750 2750 pureextra = extra.copy()
2751 2751 extra['amend_source'] = old.hex()
2752 2752
2753 2753 new = context.memctx(repo,
2754 2754 parents=[base.node(), old.p2().node()],
2755 2755 text=message,
2756 2756 files=files,
2757 2757 filectxfn=filectxfn,
2758 2758 user=user,
2759 2759 date=date,
2760 2760 extra=extra,
2761 2761 editor=editor)
2762 2762
2763 2763 newdesc = changelog.stripdesc(new.description())
2764 2764 if ((not node)
2765 2765 and newdesc == old.description()
2766 2766 and user == old.user()
2767 2767 and date == old.date()
2768 2768 and pureextra == old.extra()):
2769 2769 # nothing changed. continuing here would create a new node
2770 2770 # anyway because of the amend_source noise.
2771 2771 #
2772 2772 # This not what we expect from amend.
2773 2773 return old.node()
2774 2774
2775 2775 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2776 2776 try:
2777 2777 if opts.get('secret'):
2778 2778 commitphase = 'secret'
2779 2779 else:
2780 2780 commitphase = old.phase()
2781 2781 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2782 2782 newid = repo.commitctx(new)
2783 2783 finally:
2784 2784 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2785 2785 if newid != old.node():
2786 2786 # Reroute the working copy parent to the new changeset
2787 2787 repo.setparents(newid, nullid)
2788 2788
2789 2789 # Move bookmarks from old parent to amend commit
2790 2790 bms = repo.nodebookmarks(old.node())
2791 2791 if bms:
2792 2792 marks = repo._bookmarks
2793 2793 for bm in bms:
2794 2794 ui.debug('moving bookmarks %r from %s to %s\n' %
2795 2795 (marks, old.hex(), hex(newid)))
2796 2796 marks[bm] = newid
2797 2797 marks.recordchange(tr)
2798 2798 #commit the whole amend process
2799 2799 if createmarkers:
2800 2800 # mark the new changeset as successor of the rewritten one
2801 2801 new = repo[newid]
2802 2802 obs = [(old, (new,))]
2803 2803 if node:
2804 2804 obs.append((ctx, ()))
2805 2805
2806 2806 obsolete.createmarkers(repo, obs)
2807 2807 if not createmarkers and newid != old.node():
2808 2808 # Strip the intermediate commit (if there was one) and the amended
2809 2809 # commit
2810 2810 if node:
2811 2811 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2812 2812 ui.note(_('stripping amended changeset %s\n') % old)
2813 2813 repair.strip(ui, repo, old.node(), topic='amend-backup')
2814 2814 finally:
2815 2815 lockmod.release(lock, wlock)
2816 2816 return newid
2817 2817
2818 2818 def commiteditor(repo, ctx, subs, editform=''):
2819 2819 if ctx.description():
2820 2820 return ctx.description()
2821 2821 return commitforceeditor(repo, ctx, subs, editform=editform,
2822 2822 unchangedmessagedetection=True)
2823 2823
2824 2824 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2825 2825 editform='', unchangedmessagedetection=False):
2826 2826 if not extramsg:
2827 2827 extramsg = _("Leave message empty to abort commit.")
2828 2828
2829 2829 forms = [e for e in editform.split('.') if e]
2830 2830 forms.insert(0, 'changeset')
2831 2831 templatetext = None
2832 2832 while forms:
2833 2833 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2834 2834 if tmpl:
2835 2835 templatetext = committext = buildcommittemplate(
2836 2836 repo, ctx, subs, extramsg, tmpl)
2837 2837 break
2838 2838 forms.pop()
2839 2839 else:
2840 2840 committext = buildcommittext(repo, ctx, subs, extramsg)
2841 2841
2842 2842 # run editor in the repository root
2843 2843 olddir = os.getcwd()
2844 2844 os.chdir(repo.root)
2845 2845
2846 2846 # make in-memory changes visible to external process
2847 2847 tr = repo.currenttransaction()
2848 2848 repo.dirstate.write(tr)
2849 2849 pending = tr and tr.writepending() and repo.root
2850 2850
2851 2851 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2852 2852 editform=editform, pending=pending)
2853 2853 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2854 2854 os.chdir(olddir)
2855 2855
2856 2856 if finishdesc:
2857 2857 text = finishdesc(text)
2858 2858 if not text.strip():
2859 2859 raise error.Abort(_("empty commit message"))
2860 2860 if unchangedmessagedetection and editortext == templatetext:
2861 2861 raise error.Abort(_("commit message unchanged"))
2862 2862
2863 2863 return text
2864 2864
2865 2865 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2866 2866 ui = repo.ui
2867 2867 tmpl, mapfile = gettemplate(ui, tmpl, None)
2868 2868
2869 2869 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2870 2870
2871 2871 for k, v in repo.ui.configitems('committemplate'):
2872 2872 if k != 'changeset':
2873 2873 t.t.cache[k] = v
2874 2874
2875 2875 if not extramsg:
2876 2876 extramsg = '' # ensure that extramsg is string
2877 2877
2878 2878 ui.pushbuffer()
2879 2879 t.show(ctx, extramsg=extramsg)
2880 2880 return ui.popbuffer()
2881 2881
2882 2882 def hgprefix(msg):
2883 2883 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2884 2884
2885 2885 def buildcommittext(repo, ctx, subs, extramsg):
2886 2886 edittext = []
2887 2887 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2888 2888 if ctx.description():
2889 2889 edittext.append(ctx.description())
2890 2890 edittext.append("")
2891 2891 edittext.append("") # Empty line between message and comments.
2892 2892 edittext.append(hgprefix(_("Enter commit message."
2893 2893 " Lines beginning with 'HG:' are removed.")))
2894 2894 edittext.append(hgprefix(extramsg))
2895 2895 edittext.append("HG: --")
2896 2896 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2897 2897 if ctx.p2():
2898 2898 edittext.append(hgprefix(_("branch merge")))
2899 2899 if ctx.branch():
2900 2900 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2901 2901 if bookmarks.isactivewdirparent(repo):
2902 2902 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2903 2903 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2904 2904 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2905 2905 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2906 2906 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2907 2907 if not added and not modified and not removed:
2908 2908 edittext.append(hgprefix(_("no files changed")))
2909 2909 edittext.append("")
2910 2910
2911 2911 return "\n".join(edittext)
2912 2912
2913 2913 def commitstatus(repo, node, branch, bheads=None, opts=None):
2914 2914 if opts is None:
2915 2915 opts = {}
2916 2916 ctx = repo[node]
2917 2917 parents = ctx.parents()
2918 2918
2919 2919 if (not opts.get('amend') and bheads and node not in bheads and not
2920 2920 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2921 2921 repo.ui.status(_('created new head\n'))
2922 2922 # The message is not printed for initial roots. For the other
2923 2923 # changesets, it is printed in the following situations:
2924 2924 #
2925 2925 # Par column: for the 2 parents with ...
2926 2926 # N: null or no parent
2927 2927 # B: parent is on another named branch
2928 2928 # C: parent is a regular non head changeset
2929 2929 # H: parent was a branch head of the current branch
2930 2930 # Msg column: whether we print "created new head" message
2931 2931 # In the following, it is assumed that there already exists some
2932 2932 # initial branch heads of the current branch, otherwise nothing is
2933 2933 # printed anyway.
2934 2934 #
2935 2935 # Par Msg Comment
2936 2936 # N N y additional topo root
2937 2937 #
2938 2938 # B N y additional branch root
2939 2939 # C N y additional topo head
2940 2940 # H N n usual case
2941 2941 #
2942 2942 # B B y weird additional branch root
2943 2943 # C B y branch merge
2944 2944 # H B n merge with named branch
2945 2945 #
2946 2946 # C C y additional head from merge
2947 2947 # C H n merge with a head
2948 2948 #
2949 2949 # H H n head merge: head count decreases
2950 2950
2951 2951 if not opts.get('close_branch'):
2952 2952 for r in parents:
2953 2953 if r.closesbranch() and r.branch() == branch:
2954 2954 repo.ui.status(_('reopening closed branch head %d\n') % r)
2955 2955
2956 2956 if repo.ui.debugflag:
2957 2957 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2958 2958 elif repo.ui.verbose:
2959 2959 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2960 2960
2961 2961 def postcommitstatus(repo, pats, opts):
2962 2962 return repo.status(match=scmutil.match(repo[None], pats, opts))
2963 2963
2964 2964 def revert(ui, repo, ctx, parents, *pats, **opts):
2965 2965 parent, p2 = parents
2966 2966 node = ctx.node()
2967 2967
2968 2968 mf = ctx.manifest()
2969 2969 if node == p2:
2970 2970 parent = p2
2971 2971
2972 2972 # need all matching names in dirstate and manifest of target rev,
2973 2973 # so have to walk both. do not print errors if files exist in one
2974 2974 # but not other. in both cases, filesets should be evaluated against
2975 2975 # workingctx to get consistent result (issue4497). this means 'set:**'
2976 2976 # cannot be used to select missing files from target rev.
2977 2977
2978 2978 # `names` is a mapping for all elements in working copy and target revision
2979 2979 # The mapping is in the form:
2980 2980 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2981 2981 names = {}
2982 2982
2983 2983 with repo.wlock():
2984 2984 ## filling of the `names` mapping
2985 2985 # walk dirstate to fill `names`
2986 2986
2987 2987 interactive = opts.get('interactive', False)
2988 2988 wctx = repo[None]
2989 2989 m = scmutil.match(wctx, pats, opts)
2990 2990
2991 2991 # we'll need this later
2992 2992 targetsubs = sorted(s for s in wctx.substate if m(s))
2993 2993
2994 2994 if not m.always():
2995 2995 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2996 2996 names[abs] = m.rel(abs), m.exact(abs)
2997 2997
2998 2998 # walk target manifest to fill `names`
2999 2999
3000 3000 def badfn(path, msg):
3001 3001 if path in names:
3002 3002 return
3003 3003 if path in ctx.substate:
3004 3004 return
3005 3005 path_ = path + '/'
3006 3006 for f in names:
3007 3007 if f.startswith(path_):
3008 3008 return
3009 3009 ui.warn("%s: %s\n" % (m.rel(path), msg))
3010 3010
3011 3011 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3012 3012 if abs not in names:
3013 3013 names[abs] = m.rel(abs), m.exact(abs)
3014 3014
3015 3015 # Find status of all file in `names`.
3016 3016 m = scmutil.matchfiles(repo, names)
3017 3017
3018 3018 changes = repo.status(node1=node, match=m,
3019 3019 unknown=True, ignored=True, clean=True)
3020 3020 else:
3021 3021 changes = repo.status(node1=node, match=m)
3022 3022 for kind in changes:
3023 3023 for abs in kind:
3024 3024 names[abs] = m.rel(abs), m.exact(abs)
3025 3025
3026 3026 m = scmutil.matchfiles(repo, names)
3027 3027
3028 3028 modified = set(changes.modified)
3029 3029 added = set(changes.added)
3030 3030 removed = set(changes.removed)
3031 3031 _deleted = set(changes.deleted)
3032 3032 unknown = set(changes.unknown)
3033 3033 unknown.update(changes.ignored)
3034 3034 clean = set(changes.clean)
3035 3035 modadded = set()
3036 3036
3037 3037 # split between files known in target manifest and the others
3038 3038 smf = set(mf)
3039 3039
3040 3040 # determine the exact nature of the deleted changesets
3041 3041 deladded = _deleted - smf
3042 3042 deleted = _deleted - deladded
3043 3043
3044 3044 # We need to account for the state of the file in the dirstate,
3045 3045 # even when we revert against something else than parent. This will
3046 3046 # slightly alter the behavior of revert (doing back up or not, delete
3047 3047 # or just forget etc).
3048 3048 if parent == node:
3049 3049 dsmodified = modified
3050 3050 dsadded = added
3051 3051 dsremoved = removed
3052 3052 # store all local modifications, useful later for rename detection
3053 3053 localchanges = dsmodified | dsadded
3054 3054 modified, added, removed = set(), set(), set()
3055 3055 else:
3056 3056 changes = repo.status(node1=parent, match=m)
3057 3057 dsmodified = set(changes.modified)
3058 3058 dsadded = set(changes.added)
3059 3059 dsremoved = set(changes.removed)
3060 3060 # store all local modifications, useful later for rename detection
3061 3061 localchanges = dsmodified | dsadded
3062 3062
3063 3063 # only take into account for removes between wc and target
3064 3064 clean |= dsremoved - removed
3065 3065 dsremoved &= removed
3066 3066 # distinct between dirstate remove and other
3067 3067 removed -= dsremoved
3068 3068
3069 3069 modadded = added & dsmodified
3070 3070 added -= modadded
3071 3071
3072 3072 # tell newly modified apart.
3073 3073 dsmodified &= modified
3074 3074 dsmodified |= modified & dsadded # dirstate added may need backup
3075 3075 modified -= dsmodified
3076 3076
3077 3077 # We need to wait for some post-processing to update this set
3078 3078 # before making the distinction. The dirstate will be used for
3079 3079 # that purpose.
3080 3080 dsadded = added
3081 3081
3082 3082 # in case of merge, files that are actually added can be reported as
3083 3083 # modified, we need to post process the result
3084 3084 if p2 != nullid:
3085 3085 mergeadd = dsmodified - smf
3086 3086 dsadded |= mergeadd
3087 3087 dsmodified -= mergeadd
3088 3088
3089 3089 # if f is a rename, update `names` to also revert the source
3090 3090 cwd = repo.getcwd()
3091 3091 for f in localchanges:
3092 3092 src = repo.dirstate.copied(f)
3093 3093 # XXX should we check for rename down to target node?
3094 3094 if src and src not in names and repo.dirstate[src] == 'r':
3095 3095 dsremoved.add(src)
3096 3096 names[src] = (repo.pathto(src, cwd), True)
3097 3097
3098 3098 # distinguish between file to forget and the other
3099 3099 added = set()
3100 3100 for abs in dsadded:
3101 3101 if repo.dirstate[abs] != 'a':
3102 3102 added.add(abs)
3103 3103 dsadded -= added
3104 3104
3105 3105 for abs in deladded:
3106 3106 if repo.dirstate[abs] == 'a':
3107 3107 dsadded.add(abs)
3108 3108 deladded -= dsadded
3109 3109
3110 3110 # For files marked as removed, we check if an unknown file is present at
3111 3111 # the same path. If a such file exists it may need to be backed up.
3112 3112 # Making the distinction at this stage helps have simpler backup
3113 3113 # logic.
3114 3114 removunk = set()
3115 3115 for abs in removed:
3116 3116 target = repo.wjoin(abs)
3117 3117 if os.path.lexists(target):
3118 3118 removunk.add(abs)
3119 3119 removed -= removunk
3120 3120
3121 3121 dsremovunk = set()
3122 3122 for abs in dsremoved:
3123 3123 target = repo.wjoin(abs)
3124 3124 if os.path.lexists(target):
3125 3125 dsremovunk.add(abs)
3126 3126 dsremoved -= dsremovunk
3127 3127
3128 3128 # action to be actually performed by revert
3129 3129 # (<list of file>, message>) tuple
3130 3130 actions = {'revert': ([], _('reverting %s\n')),
3131 3131 'add': ([], _('adding %s\n')),
3132 3132 'remove': ([], _('removing %s\n')),
3133 3133 'drop': ([], _('removing %s\n')),
3134 3134 'forget': ([], _('forgetting %s\n')),
3135 3135 'undelete': ([], _('undeleting %s\n')),
3136 3136 'noop': (None, _('no changes needed to %s\n')),
3137 3137 'unknown': (None, _('file not managed: %s\n')),
3138 3138 }
3139 3139
3140 3140 # "constant" that convey the backup strategy.
3141 3141 # All set to `discard` if `no-backup` is set do avoid checking
3142 3142 # no_backup lower in the code.
3143 3143 # These values are ordered for comparison purposes
3144 3144 backup = 2 # unconditionally do backup
3145 3145 check = 1 # check if the existing file differs from target
3146 3146 discard = 0 # never do backup
3147 3147 if opts.get('no_backup'):
3148 3148 backup = check = discard
3149 3149
3150 3150 backupanddel = actions['remove']
3151 3151 if not opts.get('no_backup'):
3152 3152 backupanddel = actions['drop']
3153 3153
3154 3154 disptable = (
3155 3155 # dispatch table:
3156 3156 # file state
3157 3157 # action
3158 3158 # make backup
3159 3159
3160 3160 ## Sets that results that will change file on disk
3161 3161 # Modified compared to target, no local change
3162 3162 (modified, actions['revert'], discard),
3163 3163 # Modified compared to target, but local file is deleted
3164 3164 (deleted, actions['revert'], discard),
3165 3165 # Modified compared to target, local change
3166 3166 (dsmodified, actions['revert'], backup),
3167 3167 # Added since target
3168 3168 (added, actions['remove'], discard),
3169 3169 # Added in working directory
3170 3170 (dsadded, actions['forget'], discard),
3171 3171 # Added since target, have local modification
3172 3172 (modadded, backupanddel, backup),
3173 3173 # Added since target but file is missing in working directory
3174 3174 (deladded, actions['drop'], discard),
3175 3175 # Removed since target, before working copy parent
3176 3176 (removed, actions['add'], discard),
3177 3177 # Same as `removed` but an unknown file exists at the same path
3178 3178 (removunk, actions['add'], check),
3179 3179 # Removed since targe, marked as such in working copy parent
3180 3180 (dsremoved, actions['undelete'], discard),
3181 3181 # Same as `dsremoved` but an unknown file exists at the same path
3182 3182 (dsremovunk, actions['undelete'], check),
3183 3183 ## the following sets does not result in any file changes
3184 3184 # File with no modification
3185 3185 (clean, actions['noop'], discard),
3186 3186 # Existing file, not tracked anywhere
3187 3187 (unknown, actions['unknown'], discard),
3188 3188 )
3189 3189
3190 3190 for abs, (rel, exact) in sorted(names.items()):
3191 3191 # target file to be touch on disk (relative to cwd)
3192 3192 target = repo.wjoin(abs)
3193 3193 # search the entry in the dispatch table.
3194 3194 # if the file is in any of these sets, it was touched in the working
3195 3195 # directory parent and we are sure it needs to be reverted.
3196 3196 for table, (xlist, msg), dobackup in disptable:
3197 3197 if abs not in table:
3198 3198 continue
3199 3199 if xlist is not None:
3200 3200 xlist.append(abs)
3201 3201 if dobackup and (backup <= dobackup
3202 3202 or wctx[abs].cmp(ctx[abs])):
3203 3203 bakname = scmutil.origpath(ui, repo, rel)
3204 3204 ui.note(_('saving current version of %s as %s\n') %
3205 3205 (rel, bakname))
3206 3206 if not opts.get('dry_run'):
3207 3207 if interactive:
3208 3208 util.copyfile(target, bakname)
3209 3209 else:
3210 3210 util.rename(target, bakname)
3211 3211 if ui.verbose or not exact:
3212 3212 if not isinstance(msg, basestring):
3213 3213 msg = msg(abs)
3214 3214 ui.status(msg % rel)
3215 3215 elif exact:
3216 3216 ui.warn(msg % rel)
3217 3217 break
3218 3218
3219 3219 if not opts.get('dry_run'):
3220 3220 needdata = ('revert', 'add', 'undelete')
3221 3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3222 3222 _performrevert(repo, parents, ctx, actions, interactive)
3223 3223
3224 3224 if targetsubs:
3225 3225 # Revert the subrepos on the revert list
3226 3226 for sub in targetsubs:
3227 3227 try:
3228 3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3229 3229 except KeyError:
3230 3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3231 3231 % (sub, short(ctx.node())))
3232 3232
3233 3233 def _revertprefetch(repo, ctx, *files):
3234 3234 """Let extension changing the storage layer prefetch content"""
3235 3235 pass
3236 3236
3237 3237 def _performrevert(repo, parents, ctx, actions, interactive=False):
3238 3238 """function that actually perform all the actions computed for revert
3239 3239
3240 3240 This is an independent function to let extension to plug in and react to
3241 3241 the imminent revert.
3242 3242
3243 3243 Make sure you have the working directory locked when calling this function.
3244 3244 """
3245 3245 parent, p2 = parents
3246 3246 node = ctx.node()
3247 3247 excluded_files = []
3248 3248 matcher_opts = {"exclude": excluded_files}
3249 3249
3250 3250 def checkout(f):
3251 3251 fc = ctx[f]
3252 3252 repo.wwrite(f, fc.data(), fc.flags())
3253 3253
3254 3254 audit_path = pathutil.pathauditor(repo.root)
3255 3255 for f in actions['forget'][0]:
3256 3256 if interactive:
3257 3257 choice = \
3258 3258 repo.ui.promptchoice(
3259 3259 _("forget added file %s (yn)?$$ &Yes $$ &No")
3260 3260 % f)
3261 3261 if choice == 0:
3262 3262 repo.dirstate.drop(f)
3263 3263 else:
3264 3264 excluded_files.append(repo.wjoin(f))
3265 3265 else:
3266 3266 repo.dirstate.drop(f)
3267 3267 for f in actions['remove'][0]:
3268 3268 audit_path(f)
3269 3269 try:
3270 3270 util.unlinkpath(repo.wjoin(f))
3271 3271 except OSError:
3272 3272 pass
3273 3273 repo.dirstate.remove(f)
3274 3274 for f in actions['drop'][0]:
3275 3275 audit_path(f)
3276 3276 repo.dirstate.remove(f)
3277 3277
3278 3278 normal = None
3279 3279 if node == parent:
3280 3280 # We're reverting to our parent. If possible, we'd like status
3281 3281 # to report the file as clean. We have to use normallookup for
3282 3282 # merges to avoid losing information about merged/dirty files.
3283 3283 if p2 != nullid:
3284 3284 normal = repo.dirstate.normallookup
3285 3285 else:
3286 3286 normal = repo.dirstate.normal
3287 3287
3288 3288 newlyaddedandmodifiedfiles = set()
3289 3289 if interactive:
3290 3290 # Prompt the user for changes to revert
3291 3291 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3292 3292 m = scmutil.match(ctx, torevert, matcher_opts)
3293 3293 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3294 3294 diffopts.nodates = True
3295 3295 diffopts.git = True
3296 3296 reversehunks = repo.ui.configbool('experimental',
3297 3297 'revertalternateinteractivemode',
3298 3298 True)
3299 3299 if reversehunks:
3300 3300 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3301 3301 else:
3302 3302 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3303 3303 originalchunks = patch.parsepatch(diff)
3304 3304
3305 3305 try:
3306 3306
3307 3307 chunks, opts = recordfilter(repo.ui, originalchunks)
3308 3308 if reversehunks:
3309 3309 chunks = patch.reversehunks(chunks)
3310 3310
3311 3311 except patch.PatchError as err:
3312 3312 raise error.Abort(_('error parsing patch: %s') % err)
3313 3313
3314 3314 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3315 3315 # Apply changes
3316 3316 fp = stringio()
3317 3317 for c in chunks:
3318 3318 c.write(fp)
3319 3319 dopatch = fp.tell()
3320 3320 fp.seek(0)
3321 3321 if dopatch:
3322 3322 try:
3323 3323 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3324 3324 except patch.PatchError as err:
3325 3325 raise error.Abort(str(err))
3326 3326 del fp
3327 3327 else:
3328 3328 for f in actions['revert'][0]:
3329 3329 checkout(f)
3330 3330 if normal:
3331 3331 normal(f)
3332 3332
3333 3333 for f in actions['add'][0]:
3334 3334 # Don't checkout modified files, they are already created by the diff
3335 3335 if f not in newlyaddedandmodifiedfiles:
3336 3336 checkout(f)
3337 3337 repo.dirstate.add(f)
3338 3338
3339 3339 normal = repo.dirstate.normallookup
3340 3340 if node == parent and p2 == nullid:
3341 3341 normal = repo.dirstate.normal
3342 3342 for f in actions['undelete'][0]:
3343 3343 checkout(f)
3344 3344 normal(f)
3345 3345
3346 3346 copied = copies.pathcopies(repo[parent], ctx)
3347 3347
3348 3348 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3349 3349 if f in copied:
3350 3350 repo.dirstate.copy(copied[f], f)
3351 3351
3352 3352 def command(table):
3353 3353 """Returns a function object to be used as a decorator for making commands.
3354 3354
3355 3355 This function receives a command table as its argument. The table should
3356 3356 be a dict.
3357 3357
3358 3358 The returned function can be used as a decorator for adding commands
3359 3359 to that command table. This function accepts multiple arguments to define
3360 3360 a command.
3361 3361
3362 3362 The first argument is the command name.
3363 3363
3364 3364 The options argument is an iterable of tuples defining command arguments.
3365 3365 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3366 3366
3367 3367 The synopsis argument defines a short, one line summary of how to use the
3368 3368 command. This shows up in the help output.
3369 3369
3370 3370 The norepo argument defines whether the command does not require a
3371 3371 local repository. Most commands operate against a repository, thus the
3372 3372 default is False.
3373 3373
3374 3374 The optionalrepo argument defines whether the command optionally requires
3375 3375 a local repository.
3376 3376
3377 3377 The inferrepo argument defines whether to try to find a repository from the
3378 3378 command line arguments. If True, arguments will be examined for potential
3379 3379 repository locations. See ``findrepo()``. If a repository is found, it
3380 3380 will be used.
3381 3381 """
3382 3382 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3383 3383 inferrepo=False):
3384 3384 def decorator(func):
3385 3385 func.norepo = norepo
3386 3386 func.optionalrepo = optionalrepo
3387 3387 func.inferrepo = inferrepo
3388 3388 if synopsis:
3389 3389 table[name] = func, list(options), synopsis
3390 3390 else:
3391 3391 table[name] = func, list(options)
3392 3392 return func
3393 3393 return decorator
3394 3394
3395 3395 return cmd
3396 3396
3397 3397 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3398 3398 # commands.outgoing. "missing" is "missing" of the result of
3399 3399 # "findcommonoutgoing()"
3400 3400 outgoinghooks = util.hooks()
3401 3401
3402 3402 # a list of (ui, repo) functions called by commands.summary
3403 3403 summaryhooks = util.hooks()
3404 3404
3405 3405 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3406 3406 #
3407 3407 # functions should return tuple of booleans below, if 'changes' is None:
3408 3408 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3409 3409 #
3410 3410 # otherwise, 'changes' is a tuple of tuples below:
3411 3411 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3412 3412 # - (desturl, destbranch, destpeer, outgoing)
3413 3413 summaryremotehooks = util.hooks()
3414 3414
3415 3415 # A list of state files kept by multistep operations like graft.
3416 3416 # Since graft cannot be aborted, it is considered 'clearable' by update.
3417 3417 # note: bisect is intentionally excluded
3418 3418 # (state file, clearable, allowcommit, error, hint)
3419 3419 unfinishedstates = [
3420 3420 ('graftstate', True, False, _('graft in progress'),
3421 3421 _("use 'hg graft --continue' or 'hg update' to abort")),
3422 3422 ('updatestate', True, False, _('last update was interrupted'),
3423 3423 _("use 'hg update' to get a consistent checkout"))
3424 3424 ]
3425 3425
3426 3426 def checkunfinished(repo, commit=False):
3427 3427 '''Look for an unfinished multistep operation, like graft, and abort
3428 3428 if found. It's probably good to check this right before
3429 3429 bailifchanged().
3430 3430 '''
3431 3431 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3432 3432 if commit and allowcommit:
3433 3433 continue
3434 3434 if repo.vfs.exists(f):
3435 3435 raise error.Abort(msg, hint=hint)
3436 3436
3437 3437 def clearunfinished(repo):
3438 3438 '''Check for unfinished operations (as above), and clear the ones
3439 3439 that are clearable.
3440 3440 '''
3441 3441 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3442 3442 if not clearable and repo.vfs.exists(f):
3443 3443 raise error.Abort(msg, hint=hint)
3444 3444 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3445 3445 if clearable and repo.vfs.exists(f):
3446 3446 util.unlink(repo.join(f))
3447 3447
3448 3448 afterresolvedstates = [
3449 3449 ('graftstate',
3450 3450 _('hg graft --continue')),
3451 3451 ]
3452 3452
3453 3453 def howtocontinue(repo):
3454 3454 '''Check for an unfinished operation and return the command to finish
3455 3455 it.
3456 3456
3457 3457 afterresolvedstates tupples define a .hg/{file} and the corresponding
3458 3458 command needed to finish it.
3459 3459
3460 3460 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3461 3461 a boolean.
3462 3462 '''
3463 3463 contmsg = _("continue: %s")
3464 3464 for f, msg in afterresolvedstates:
3465 3465 if repo.vfs.exists(f):
3466 3466 return contmsg % msg, True
3467 3467 workingctx = repo[None]
3468 3468 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3469 3469 for s in workingctx.substate)
3470 3470 if dirty:
3471 3471 return contmsg % _("hg commit"), False
3472 3472 return None, None
3473 3473
3474 3474 def checkafterresolved(repo):
3475 3475 '''Inform the user about the next action after completing hg resolve
3476 3476
3477 3477 If there's a matching afterresolvedstates, howtocontinue will yield
3478 3478 repo.ui.warn as the reporter.
3479 3479
3480 3480 Otherwise, it will yield repo.ui.note.
3481 3481 '''
3482 3482 msg, warning = howtocontinue(repo)
3483 3483 if msg is not None:
3484 3484 if warning:
3485 3485 repo.ui.warn("%s\n" % msg)
3486 3486 else:
3487 3487 repo.ui.note("%s\n" % msg)
3488 3488
3489 3489 def wrongtooltocontinue(repo, task):
3490 3490 '''Raise an abort suggesting how to properly continue if there is an
3491 3491 active task.
3492 3492
3493 3493 Uses howtocontinue() to find the active task.
3494 3494
3495 3495 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3496 3496 a hint.
3497 3497 '''
3498 3498 after = howtocontinue(repo)
3499 3499 hint = None
3500 3500 if after[1]:
3501 3501 hint = after[0]
3502 3502 raise error.Abort(_('no %s in progress') % task, hint=hint)
3503 3503
3504 3504 class dirstateguard(object):
3505 3505 '''Restore dirstate at unexpected failure.
3506 3506
3507 3507 At the construction, this class does:
3508 3508
3509 3509 - write current ``repo.dirstate`` out, and
3510 3510 - save ``.hg/dirstate`` into the backup file
3511 3511
3512 3512 This restores ``.hg/dirstate`` from backup file, if ``release()``
3513 3513 is invoked before ``close()``.
3514 3514
3515 3515 This just removes the backup file at ``close()`` before ``release()``.
3516 3516 '''
3517 3517
3518 3518 def __init__(self, repo, name):
3519 3519 self._repo = repo
3520 3520 self._suffix = '.backup.%s.%d' % (name, id(self))
3521 repo.dirstate._savebackup(repo.currenttransaction(), self._suffix)
3521 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3522 3522 self._active = True
3523 3523 self._closed = False
3524 3524
3525 3525 def __del__(self):
3526 3526 if self._active: # still active
3527 3527 # this may occur, even if this class is used correctly:
3528 3528 # for example, releasing other resources like transaction
3529 3529 # may raise exception before ``dirstateguard.release`` in
3530 3530 # ``release(tr, ....)``.
3531 3531 self._abort()
3532 3532
3533 3533 def close(self):
3534 3534 if not self._active: # already inactivated
3535 3535 msg = (_("can't close already inactivated backup: dirstate%s")
3536 3536 % self._suffix)
3537 3537 raise error.Abort(msg)
3538 3538
3539 self._repo.dirstate._clearbackup(self._repo.currenttransaction(),
3539 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3540 3540 self._suffix)
3541 3541 self._active = False
3542 3542 self._closed = True
3543 3543
3544 3544 def _abort(self):
3545 self._repo.dirstate._restorebackup(self._repo.currenttransaction(),
3545 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3546 3546 self._suffix)
3547 3547 self._active = False
3548 3548
3549 3549 def release(self):
3550 3550 if not self._closed:
3551 3551 if not self._active: # already inactivated
3552 3552 msg = (_("can't release already inactivated backup:"
3553 3553 " dirstate%s")
3554 3554 % self._suffix)
3555 3555 raise error.Abort(msg)
3556 3556 self._abort()
@@ -1,1242 +1,1242
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 scmutil,
25 25 util,
26 26 )
27 27
28 28 propertycache = util.propertycache
29 29 filecache = scmutil.filecache
30 30 _rangemask = 0x7fffffff
31 31
32 32 dirstatetuple = parsers.dirstatetuple
33 33
34 34 class repocache(filecache):
35 35 """filecache for files in .hg/"""
36 36 def join(self, obj, fname):
37 37 return obj._opener.join(fname)
38 38
39 39 class rootcache(filecache):
40 40 """filecache for files in the repository root"""
41 41 def join(self, obj, fname):
42 42 return obj._join(fname)
43 43
44 44 def _getfsnow(vfs):
45 45 '''Get "now" timestamp on filesystem'''
46 46 tmpfd, tmpname = vfs.mkstemp()
47 47 try:
48 48 return os.fstat(tmpfd).st_mtime
49 49 finally:
50 50 os.close(tmpfd)
51 51 vfs.unlink(tmpname)
52 52
53 53 def nonnormalentries(dmap):
54 54 '''Compute the nonnormal dirstate entries from the dmap'''
55 55 try:
56 56 return parsers.nonnormalentries(dmap)
57 57 except AttributeError:
58 58 return set(fname for fname, e in dmap.iteritems()
59 59 if e[0] != 'n' or e[3] == -1)
60 60
61 61 def _trypending(root, vfs, filename):
62 62 '''Open file to be read according to HG_PENDING environment variable
63 63
64 64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 65 is equal to 'root'.
66 66
67 67 This returns '(fp, is_pending_opened)' tuple.
68 68 '''
69 69 if root == os.environ.get('HG_PENDING'):
70 70 try:
71 71 return (vfs('%s.pending' % filename), True)
72 72 except IOError as inst:
73 73 if inst.errno != errno.ENOENT:
74 74 raise
75 75 return (vfs(filename), False)
76 76
77 77 class dirstate(object):
78 78
79 79 def __init__(self, opener, ui, root, validate):
80 80 '''Create a new dirstate object.
81 81
82 82 opener is an open()-like callable that can be used to open the
83 83 dirstate file; root is the root of the directory tracked by
84 84 the dirstate.
85 85 '''
86 86 self._opener = opener
87 87 self._validate = validate
88 88 self._root = root
89 89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 90 # UNC path pointing to root share (issue4557)
91 91 self._rootdir = pathutil.normasprefix(root)
92 92 # internal config: ui.forcecwd
93 93 forcecwd = ui.config('ui', 'forcecwd')
94 94 if forcecwd:
95 95 self._cwd = forcecwd
96 96 self._dirty = False
97 97 self._dirtypl = False
98 98 self._lastnormaltime = 0
99 99 self._ui = ui
100 100 self._filecache = {}
101 101 self._parentwriters = 0
102 102 self._filename = 'dirstate'
103 103 self._pendingfilename = '%s.pending' % self._filename
104 104
105 105 # for consistent view between _pl() and _read() invocations
106 106 self._pendingmode = None
107 107
108 108 def beginparentchange(self):
109 109 '''Marks the beginning of a set of changes that involve changing
110 110 the dirstate parents. If there is an exception during this time,
111 111 the dirstate will not be written when the wlock is released. This
112 112 prevents writing an incoherent dirstate where the parent doesn't
113 113 match the contents.
114 114 '''
115 115 self._parentwriters += 1
116 116
117 117 def endparentchange(self):
118 118 '''Marks the end of a set of changes that involve changing the
119 119 dirstate parents. Once all parent changes have been marked done,
120 120 the wlock will be free to write the dirstate on release.
121 121 '''
122 122 if self._parentwriters > 0:
123 123 self._parentwriters -= 1
124 124
125 125 def pendingparentchange(self):
126 126 '''Returns true if the dirstate is in the middle of a set of changes
127 127 that modify the dirstate parent.
128 128 '''
129 129 return self._parentwriters > 0
130 130
131 131 @propertycache
132 132 def _map(self):
133 133 '''Return the dirstate contents as a map from filename to
134 134 (state, mode, size, time).'''
135 135 self._read()
136 136 return self._map
137 137
138 138 @propertycache
139 139 def _copymap(self):
140 140 self._read()
141 141 return self._copymap
142 142
143 143 @propertycache
144 144 def _nonnormalset(self):
145 145 return nonnormalentries(self._map)
146 146
147 147 @propertycache
148 148 def _filefoldmap(self):
149 149 try:
150 150 makefilefoldmap = parsers.make_file_foldmap
151 151 except AttributeError:
152 152 pass
153 153 else:
154 154 return makefilefoldmap(self._map, util.normcasespec,
155 155 util.normcasefallback)
156 156
157 157 f = {}
158 158 normcase = util.normcase
159 159 for name, s in self._map.iteritems():
160 160 if s[0] != 'r':
161 161 f[normcase(name)] = name
162 162 f['.'] = '.' # prevents useless util.fspath() invocation
163 163 return f
164 164
165 165 @propertycache
166 166 def _dirfoldmap(self):
167 167 f = {}
168 168 normcase = util.normcase
169 169 for name in self._dirs:
170 170 f[normcase(name)] = name
171 171 return f
172 172
173 173 @repocache('branch')
174 174 def _branch(self):
175 175 try:
176 176 return self._opener.read("branch").strip() or "default"
177 177 except IOError as inst:
178 178 if inst.errno != errno.ENOENT:
179 179 raise
180 180 return "default"
181 181
182 182 @propertycache
183 183 def _pl(self):
184 184 try:
185 185 fp = self._opendirstatefile()
186 186 st = fp.read(40)
187 187 fp.close()
188 188 l = len(st)
189 189 if l == 40:
190 190 return st[:20], st[20:40]
191 191 elif l > 0 and l < 40:
192 192 raise error.Abort(_('working directory state appears damaged!'))
193 193 except IOError as err:
194 194 if err.errno != errno.ENOENT:
195 195 raise
196 196 return [nullid, nullid]
197 197
198 198 @propertycache
199 199 def _dirs(self):
200 200 return util.dirs(self._map, 'r')
201 201
202 202 def dirs(self):
203 203 return self._dirs
204 204
205 205 @rootcache('.hgignore')
206 206 def _ignore(self):
207 207 files = self._ignorefiles()
208 208 if not files:
209 209 return util.never
210 210
211 211 pats = ['include:%s' % f for f in files]
212 212 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
213 213
214 214 @propertycache
215 215 def _slash(self):
216 216 return self._ui.configbool('ui', 'slash') and os.sep != '/'
217 217
218 218 @propertycache
219 219 def _checklink(self):
220 220 return util.checklink(self._root)
221 221
222 222 @propertycache
223 223 def _checkexec(self):
224 224 return util.checkexec(self._root)
225 225
226 226 @propertycache
227 227 def _checkcase(self):
228 228 return not util.checkcase(self._join('.hg'))
229 229
230 230 def _join(self, f):
231 231 # much faster than os.path.join()
232 232 # it's safe because f is always a relative path
233 233 return self._rootdir + f
234 234
235 235 def flagfunc(self, buildfallback):
236 236 if self._checklink and self._checkexec:
237 237 def f(x):
238 238 try:
239 239 st = os.lstat(self._join(x))
240 240 if util.statislink(st):
241 241 return 'l'
242 242 if util.statisexec(st):
243 243 return 'x'
244 244 except OSError:
245 245 pass
246 246 return ''
247 247 return f
248 248
249 249 fallback = buildfallback()
250 250 if self._checklink:
251 251 def f(x):
252 252 if os.path.islink(self._join(x)):
253 253 return 'l'
254 254 if 'x' in fallback(x):
255 255 return 'x'
256 256 return ''
257 257 return f
258 258 if self._checkexec:
259 259 def f(x):
260 260 if 'l' in fallback(x):
261 261 return 'l'
262 262 if util.isexec(self._join(x)):
263 263 return 'x'
264 264 return ''
265 265 return f
266 266 else:
267 267 return fallback
268 268
269 269 @propertycache
270 270 def _cwd(self):
271 271 return os.getcwd()
272 272
273 273 def getcwd(self):
274 274 '''Return the path from which a canonical path is calculated.
275 275
276 276 This path should be used to resolve file patterns or to convert
277 277 canonical paths back to file paths for display. It shouldn't be
278 278 used to get real file paths. Use vfs functions instead.
279 279 '''
280 280 cwd = self._cwd
281 281 if cwd == self._root:
282 282 return ''
283 283 # self._root ends with a path separator if self._root is '/' or 'C:\'
284 284 rootsep = self._root
285 285 if not util.endswithsep(rootsep):
286 286 rootsep += os.sep
287 287 if cwd.startswith(rootsep):
288 288 return cwd[len(rootsep):]
289 289 else:
290 290 # we're outside the repo. return an absolute path.
291 291 return cwd
292 292
293 293 def pathto(self, f, cwd=None):
294 294 if cwd is None:
295 295 cwd = self.getcwd()
296 296 path = util.pathto(self._root, cwd, f)
297 297 if self._slash:
298 298 return util.pconvert(path)
299 299 return path
300 300
301 301 def __getitem__(self, key):
302 302 '''Return the current state of key (a filename) in the dirstate.
303 303
304 304 States are:
305 305 n normal
306 306 m needs merging
307 307 r marked for removal
308 308 a marked for addition
309 309 ? not tracked
310 310 '''
311 311 return self._map.get(key, ("?",))[0]
312 312
313 313 def __contains__(self, key):
314 314 return key in self._map
315 315
316 316 def __iter__(self):
317 317 for x in sorted(self._map):
318 318 yield x
319 319
320 320 def iteritems(self):
321 321 return self._map.iteritems()
322 322
323 323 def parents(self):
324 324 return [self._validate(p) for p in self._pl]
325 325
326 326 def p1(self):
327 327 return self._validate(self._pl[0])
328 328
329 329 def p2(self):
330 330 return self._validate(self._pl[1])
331 331
332 332 def branch(self):
333 333 return encoding.tolocal(self._branch)
334 334
335 335 def setparents(self, p1, p2=nullid):
336 336 """Set dirstate parents to p1 and p2.
337 337
338 338 When moving from two parents to one, 'm' merged entries a
339 339 adjusted to normal and previous copy records discarded and
340 340 returned by the call.
341 341
342 342 See localrepo.setparents()
343 343 """
344 344 if self._parentwriters == 0:
345 345 raise ValueError("cannot set dirstate parent without "
346 346 "calling dirstate.beginparentchange")
347 347
348 348 self._dirty = self._dirtypl = True
349 349 oldp2 = self._pl[1]
350 350 self._pl = p1, p2
351 351 copies = {}
352 352 if oldp2 != nullid and p2 == nullid:
353 353 for f, s in self._map.iteritems():
354 354 # Discard 'm' markers when moving away from a merge state
355 355 if s[0] == 'm':
356 356 if f in self._copymap:
357 357 copies[f] = self._copymap[f]
358 358 self.normallookup(f)
359 359 # Also fix up otherparent markers
360 360 elif s[0] == 'n' and s[2] == -2:
361 361 if f in self._copymap:
362 362 copies[f] = self._copymap[f]
363 363 self.add(f)
364 364 return copies
365 365
366 366 def setbranch(self, branch):
367 367 self._branch = encoding.fromlocal(branch)
368 368 f = self._opener('branch', 'w', atomictemp=True)
369 369 try:
370 370 f.write(self._branch + '\n')
371 371 f.close()
372 372
373 373 # make sure filecache has the correct stat info for _branch after
374 374 # replacing the underlying file
375 375 ce = self._filecache['_branch']
376 376 if ce:
377 377 ce.refresh()
378 378 except: # re-raises
379 379 f.discard()
380 380 raise
381 381
382 382 def _opendirstatefile(self):
383 383 fp, mode = _trypending(self._root, self._opener, self._filename)
384 384 if self._pendingmode is not None and self._pendingmode != mode:
385 385 fp.close()
386 386 raise error.Abort(_('working directory state may be '
387 387 'changed parallelly'))
388 388 self._pendingmode = mode
389 389 return fp
390 390
391 391 def _read(self):
392 392 self._map = {}
393 393 self._copymap = {}
394 394 try:
395 395 fp = self._opendirstatefile()
396 396 try:
397 397 st = fp.read()
398 398 finally:
399 399 fp.close()
400 400 except IOError as err:
401 401 if err.errno != errno.ENOENT:
402 402 raise
403 403 return
404 404 if not st:
405 405 return
406 406
407 407 if util.safehasattr(parsers, 'dict_new_presized'):
408 408 # Make an estimate of the number of files in the dirstate based on
409 409 # its size. From a linear regression on a set of real-world repos,
410 410 # all over 10,000 files, the size of a dirstate entry is 85
411 411 # bytes. The cost of resizing is significantly higher than the cost
412 412 # of filling in a larger presized dict, so subtract 20% from the
413 413 # size.
414 414 #
415 415 # This heuristic is imperfect in many ways, so in a future dirstate
416 416 # format update it makes sense to just record the number of entries
417 417 # on write.
418 418 self._map = parsers.dict_new_presized(len(st) / 71)
419 419
420 420 # Python's garbage collector triggers a GC each time a certain number
421 421 # of container objects (the number being defined by
422 422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 423 # for each file in the dirstate. The C version then immediately marks
424 424 # them as not to be tracked by the collector. However, this has no
425 425 # effect on when GCs are triggered, only on what objects the GC looks
426 426 # into. This means that O(number of files) GCs are unavoidable.
427 427 # Depending on when in the process's lifetime the dirstate is parsed,
428 428 # this can get very expensive. As a workaround, disable GC while
429 429 # parsing the dirstate.
430 430 #
431 431 # (we cannot decorate the function directly since it is in a C module)
432 432 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 433 p = parse_dirstate(self._map, self._copymap, st)
434 434 if not self._dirtypl:
435 435 self._pl = p
436 436
437 437 def invalidate(self):
438 438 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
439 439 "_pl", "_dirs", "_ignore", "_nonnormalset"):
440 440 if a in self.__dict__:
441 441 delattr(self, a)
442 442 self._lastnormaltime = 0
443 443 self._dirty = False
444 444 self._parentwriters = 0
445 445
446 446 def copy(self, source, dest):
447 447 """Mark dest as a copy of source. Unmark dest if source is None."""
448 448 if source == dest:
449 449 return
450 450 self._dirty = True
451 451 if source is not None:
452 452 self._copymap[dest] = source
453 453 elif dest in self._copymap:
454 454 del self._copymap[dest]
455 455
456 456 def copied(self, file):
457 457 return self._copymap.get(file, None)
458 458
459 459 def copies(self):
460 460 return self._copymap
461 461
462 462 def _droppath(self, f):
463 463 if self[f] not in "?r" and "_dirs" in self.__dict__:
464 464 self._dirs.delpath(f)
465 465
466 466 if "_filefoldmap" in self.__dict__:
467 467 normed = util.normcase(f)
468 468 if normed in self._filefoldmap:
469 469 del self._filefoldmap[normed]
470 470
471 471 def _addpath(self, f, state, mode, size, mtime):
472 472 oldstate = self[f]
473 473 if state == 'a' or oldstate == 'r':
474 474 scmutil.checkfilename(f)
475 475 if f in self._dirs:
476 476 raise error.Abort(_('directory %r already in dirstate') % f)
477 477 # shadows
478 478 for d in util.finddirs(f):
479 479 if d in self._dirs:
480 480 break
481 481 if d in self._map and self[d] != 'r':
482 482 raise error.Abort(
483 483 _('file %r in dirstate clashes with %r') % (d, f))
484 484 if oldstate in "?r" and "_dirs" in self.__dict__:
485 485 self._dirs.addpath(f)
486 486 self._dirty = True
487 487 self._map[f] = dirstatetuple(state, mode, size, mtime)
488 488 if state != 'n' or mtime == -1:
489 489 self._nonnormalset.add(f)
490 490
491 491 def normal(self, f):
492 492 '''Mark a file normal and clean.'''
493 493 s = os.lstat(self._join(f))
494 494 mtime = s.st_mtime
495 495 self._addpath(f, 'n', s.st_mode,
496 496 s.st_size & _rangemask, mtime & _rangemask)
497 497 if f in self._copymap:
498 498 del self._copymap[f]
499 499 if f in self._nonnormalset:
500 500 self._nonnormalset.remove(f)
501 501 if mtime > self._lastnormaltime:
502 502 # Remember the most recent modification timeslot for status(),
503 503 # to make sure we won't miss future size-preserving file content
504 504 # modifications that happen within the same timeslot.
505 505 self._lastnormaltime = mtime
506 506
507 507 def normallookup(self, f):
508 508 '''Mark a file normal, but possibly dirty.'''
509 509 if self._pl[1] != nullid and f in self._map:
510 510 # if there is a merge going on and the file was either
511 511 # in state 'm' (-1) or coming from other parent (-2) before
512 512 # being removed, restore that state.
513 513 entry = self._map[f]
514 514 if entry[0] == 'r' and entry[2] in (-1, -2):
515 515 source = self._copymap.get(f)
516 516 if entry[2] == -1:
517 517 self.merge(f)
518 518 elif entry[2] == -2:
519 519 self.otherparent(f)
520 520 if source:
521 521 self.copy(source, f)
522 522 return
523 523 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
524 524 return
525 525 self._addpath(f, 'n', 0, -1, -1)
526 526 if f in self._copymap:
527 527 del self._copymap[f]
528 528 if f in self._nonnormalset:
529 529 self._nonnormalset.remove(f)
530 530
531 531 def otherparent(self, f):
532 532 '''Mark as coming from the other parent, always dirty.'''
533 533 if self._pl[1] == nullid:
534 534 raise error.Abort(_("setting %r to other parent "
535 535 "only allowed in merges") % f)
536 536 if f in self and self[f] == 'n':
537 537 # merge-like
538 538 self._addpath(f, 'm', 0, -2, -1)
539 539 else:
540 540 # add-like
541 541 self._addpath(f, 'n', 0, -2, -1)
542 542
543 543 if f in self._copymap:
544 544 del self._copymap[f]
545 545
546 546 def add(self, f):
547 547 '''Mark a file added.'''
548 548 self._addpath(f, 'a', 0, -1, -1)
549 549 if f in self._copymap:
550 550 del self._copymap[f]
551 551
552 552 def remove(self, f):
553 553 '''Mark a file removed.'''
554 554 self._dirty = True
555 555 self._droppath(f)
556 556 size = 0
557 557 if self._pl[1] != nullid and f in self._map:
558 558 # backup the previous state
559 559 entry = self._map[f]
560 560 if entry[0] == 'm': # merge
561 561 size = -1
562 562 elif entry[0] == 'n' and entry[2] == -2: # other parent
563 563 size = -2
564 564 self._map[f] = dirstatetuple('r', 0, size, 0)
565 565 self._nonnormalset.add(f)
566 566 if size == 0 and f in self._copymap:
567 567 del self._copymap[f]
568 568
569 569 def merge(self, f):
570 570 '''Mark a file merged.'''
571 571 if self._pl[1] == nullid:
572 572 return self.normallookup(f)
573 573 return self.otherparent(f)
574 574
575 575 def drop(self, f):
576 576 '''Drop a file from the dirstate'''
577 577 if f in self._map:
578 578 self._dirty = True
579 579 self._droppath(f)
580 580 del self._map[f]
581 581 if f in self._nonnormalset:
582 582 self._nonnormalset.remove(f)
583 583
584 584 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
585 585 if exists is None:
586 586 exists = os.path.lexists(os.path.join(self._root, path))
587 587 if not exists:
588 588 # Maybe a path component exists
589 589 if not ignoremissing and '/' in path:
590 590 d, f = path.rsplit('/', 1)
591 591 d = self._normalize(d, False, ignoremissing, None)
592 592 folded = d + "/" + f
593 593 else:
594 594 # No path components, preserve original case
595 595 folded = path
596 596 else:
597 597 # recursively normalize leading directory components
598 598 # against dirstate
599 599 if '/' in normed:
600 600 d, f = normed.rsplit('/', 1)
601 601 d = self._normalize(d, False, ignoremissing, True)
602 602 r = self._root + "/" + d
603 603 folded = d + "/" + util.fspath(f, r)
604 604 else:
605 605 folded = util.fspath(normed, self._root)
606 606 storemap[normed] = folded
607 607
608 608 return folded
609 609
610 610 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
611 611 normed = util.normcase(path)
612 612 folded = self._filefoldmap.get(normed, None)
613 613 if folded is None:
614 614 if isknown:
615 615 folded = path
616 616 else:
617 617 folded = self._discoverpath(path, normed, ignoremissing, exists,
618 618 self._filefoldmap)
619 619 return folded
620 620
621 621 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
622 622 normed = util.normcase(path)
623 623 folded = self._filefoldmap.get(normed, None)
624 624 if folded is None:
625 625 folded = self._dirfoldmap.get(normed, None)
626 626 if folded is None:
627 627 if isknown:
628 628 folded = path
629 629 else:
630 630 # store discovered result in dirfoldmap so that future
631 631 # normalizefile calls don't start matching directories
632 632 folded = self._discoverpath(path, normed, ignoremissing, exists,
633 633 self._dirfoldmap)
634 634 return folded
635 635
636 636 def normalize(self, path, isknown=False, ignoremissing=False):
637 637 '''
638 638 normalize the case of a pathname when on a casefolding filesystem
639 639
640 640 isknown specifies whether the filename came from walking the
641 641 disk, to avoid extra filesystem access.
642 642
643 643 If ignoremissing is True, missing path are returned
644 644 unchanged. Otherwise, we try harder to normalize possibly
645 645 existing path components.
646 646
647 647 The normalized case is determined based on the following precedence:
648 648
649 649 - version of name already stored in the dirstate
650 650 - version of name stored on disk
651 651 - version provided via command arguments
652 652 '''
653 653
654 654 if self._checkcase:
655 655 return self._normalize(path, isknown, ignoremissing)
656 656 return path
657 657
658 658 def clear(self):
659 659 self._map = {}
660 660 self._nonnormalset = set()
661 661 if "_dirs" in self.__dict__:
662 662 delattr(self, "_dirs")
663 663 self._copymap = {}
664 664 self._pl = [nullid, nullid]
665 665 self._lastnormaltime = 0
666 666 self._dirty = True
667 667
668 668 def rebuild(self, parent, allfiles, changedfiles=None):
669 669 if changedfiles is None:
670 670 # Rebuild entire dirstate
671 671 changedfiles = allfiles
672 672 lastnormaltime = self._lastnormaltime
673 673 self.clear()
674 674 self._lastnormaltime = lastnormaltime
675 675
676 676 for f in changedfiles:
677 677 mode = 0o666
678 678 if f in allfiles and 'x' in allfiles.flags(f):
679 679 mode = 0o777
680 680
681 681 if f in allfiles:
682 682 self._map[f] = dirstatetuple('n', mode, -1, 0)
683 683 else:
684 684 self._map.pop(f, None)
685 685 if f in self._nonnormalset:
686 686 self._nonnormalset.remove(f)
687 687
688 688 self._pl = (parent, nullid)
689 689 self._dirty = True
690 690
691 691 def write(self, tr=False):
692 692 if not self._dirty:
693 693 return
694 694
695 695 filename = self._filename
696 696 if tr is False: # not explicitly specified
697 697 self._ui.develwarn('use dirstate.write with '
698 698 'repo.currenttransaction()',
699 699 config='check-dirstate-write')
700 700
701 701 if self._opener.lexists(self._pendingfilename):
702 702 # if pending file already exists, in-memory changes
703 703 # should be written into it, because it has priority
704 704 # to '.hg/dirstate' at reading under HG_PENDING mode
705 705 filename = self._pendingfilename
706 706 elif tr:
707 707 # 'dirstate.write()' is not only for writing in-memory
708 708 # changes out, but also for dropping ambiguous timestamp.
709 709 # delayed writing re-raise "ambiguous timestamp issue".
710 710 # See also the wiki page below for detail:
711 711 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
712 712
713 713 # emulate dropping timestamp in 'parsers.pack_dirstate'
714 714 now = _getfsnow(self._opener)
715 715 dmap = self._map
716 716 for f, e in dmap.iteritems():
717 717 if e[0] == 'n' and e[3] == now:
718 718 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
719 719 self._nonnormalset.add(f)
720 720
721 721 # emulate that all 'dirstate.normal' results are written out
722 722 self._lastnormaltime = 0
723 723
724 724 # delay writing in-memory changes out
725 725 tr.addfilegenerator('dirstate', (self._filename,),
726 726 self._writedirstate, location='plain')
727 727 return
728 728
729 729 st = self._opener(filename, "w", atomictemp=True)
730 730 self._writedirstate(st)
731 731
732 732 def _writedirstate(self, st):
733 733 # use the modification time of the newly created temporary file as the
734 734 # filesystem's notion of 'now'
735 735 now = util.fstat(st).st_mtime & _rangemask
736 736
737 737 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
738 738 # timestamp of each entries in dirstate, because of 'now > mtime'
739 739 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
740 740 if delaywrite > 0:
741 741 # do we have any files to delay for?
742 742 for f, e in self._map.iteritems():
743 743 if e[0] == 'n' and e[3] == now:
744 744 import time # to avoid useless import
745 745 # rather than sleep n seconds, sleep until the next
746 746 # multiple of n seconds
747 747 clock = time.time()
748 748 start = int(clock) - (int(clock) % delaywrite)
749 749 end = start + delaywrite
750 750 time.sleep(end - clock)
751 751 break
752 752
753 753 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
754 754 self._nonnormalset = nonnormalentries(self._map)
755 755 st.close()
756 756 self._lastnormaltime = 0
757 757 self._dirty = self._dirtypl = False
758 758
759 759 def _dirignore(self, f):
760 760 if f == '.':
761 761 return False
762 762 if self._ignore(f):
763 763 return True
764 764 for p in util.finddirs(f):
765 765 if self._ignore(p):
766 766 return True
767 767 return False
768 768
769 769 def _ignorefiles(self):
770 770 files = []
771 771 if os.path.exists(self._join('.hgignore')):
772 772 files.append(self._join('.hgignore'))
773 773 for name, path in self._ui.configitems("ui"):
774 774 if name == 'ignore' or name.startswith('ignore.'):
775 775 # we need to use os.path.join here rather than self._join
776 776 # because path is arbitrary and user-specified
777 777 files.append(os.path.join(self._rootdir, util.expandpath(path)))
778 778 return files
779 779
780 780 def _ignorefileandline(self, f):
781 781 files = collections.deque(self._ignorefiles())
782 782 visited = set()
783 783 while files:
784 784 i = files.popleft()
785 785 patterns = matchmod.readpatternfile(i, self._ui.warn,
786 786 sourceinfo=True)
787 787 for pattern, lineno, line in patterns:
788 788 kind, p = matchmod._patsplit(pattern, 'glob')
789 789 if kind == "subinclude":
790 790 if p not in visited:
791 791 files.append(p)
792 792 continue
793 793 m = matchmod.match(self._root, '', [], [pattern],
794 794 warn=self._ui.warn)
795 795 if m(f):
796 796 return (i, lineno, line)
797 797 visited.add(i)
798 798 return (None, -1, "")
799 799
800 800 def _walkexplicit(self, match, subrepos):
801 801 '''Get stat data about the files explicitly specified by match.
802 802
803 803 Return a triple (results, dirsfound, dirsnotfound).
804 804 - results is a mapping from filename to stat result. It also contains
805 805 listings mapping subrepos and .hg to None.
806 806 - dirsfound is a list of files found to be directories.
807 807 - dirsnotfound is a list of files that the dirstate thinks are
808 808 directories and that were not found.'''
809 809
810 810 def badtype(mode):
811 811 kind = _('unknown')
812 812 if stat.S_ISCHR(mode):
813 813 kind = _('character device')
814 814 elif stat.S_ISBLK(mode):
815 815 kind = _('block device')
816 816 elif stat.S_ISFIFO(mode):
817 817 kind = _('fifo')
818 818 elif stat.S_ISSOCK(mode):
819 819 kind = _('socket')
820 820 elif stat.S_ISDIR(mode):
821 821 kind = _('directory')
822 822 return _('unsupported file type (type is %s)') % kind
823 823
824 824 matchedir = match.explicitdir
825 825 badfn = match.bad
826 826 dmap = self._map
827 827 lstat = os.lstat
828 828 getkind = stat.S_IFMT
829 829 dirkind = stat.S_IFDIR
830 830 regkind = stat.S_IFREG
831 831 lnkkind = stat.S_IFLNK
832 832 join = self._join
833 833 dirsfound = []
834 834 foundadd = dirsfound.append
835 835 dirsnotfound = []
836 836 notfoundadd = dirsnotfound.append
837 837
838 838 if not match.isexact() and self._checkcase:
839 839 normalize = self._normalize
840 840 else:
841 841 normalize = None
842 842
843 843 files = sorted(match.files())
844 844 subrepos.sort()
845 845 i, j = 0, 0
846 846 while i < len(files) and j < len(subrepos):
847 847 subpath = subrepos[j] + "/"
848 848 if files[i] < subpath:
849 849 i += 1
850 850 continue
851 851 while i < len(files) and files[i].startswith(subpath):
852 852 del files[i]
853 853 j += 1
854 854
855 855 if not files or '.' in files:
856 856 files = ['.']
857 857 results = dict.fromkeys(subrepos)
858 858 results['.hg'] = None
859 859
860 860 alldirs = None
861 861 for ff in files:
862 862 # constructing the foldmap is expensive, so don't do it for the
863 863 # common case where files is ['.']
864 864 if normalize and ff != '.':
865 865 nf = normalize(ff, False, True)
866 866 else:
867 867 nf = ff
868 868 if nf in results:
869 869 continue
870 870
871 871 try:
872 872 st = lstat(join(nf))
873 873 kind = getkind(st.st_mode)
874 874 if kind == dirkind:
875 875 if nf in dmap:
876 876 # file replaced by dir on disk but still in dirstate
877 877 results[nf] = None
878 878 if matchedir:
879 879 matchedir(nf)
880 880 foundadd((nf, ff))
881 881 elif kind == regkind or kind == lnkkind:
882 882 results[nf] = st
883 883 else:
884 884 badfn(ff, badtype(kind))
885 885 if nf in dmap:
886 886 results[nf] = None
887 887 except OSError as inst: # nf not found on disk - it is dirstate only
888 888 if nf in dmap: # does it exactly match a missing file?
889 889 results[nf] = None
890 890 else: # does it match a missing directory?
891 891 if alldirs is None:
892 892 alldirs = util.dirs(dmap)
893 893 if nf in alldirs:
894 894 if matchedir:
895 895 matchedir(nf)
896 896 notfoundadd(nf)
897 897 else:
898 898 badfn(ff, inst.strerror)
899 899
900 900 # Case insensitive filesystems cannot rely on lstat() failing to detect
901 901 # a case-only rename. Prune the stat object for any file that does not
902 902 # match the case in the filesystem, if there are multiple files that
903 903 # normalize to the same path.
904 904 if match.isexact() and self._checkcase:
905 905 normed = {}
906 906
907 907 for f, st in results.iteritems():
908 908 if st is None:
909 909 continue
910 910
911 911 nc = util.normcase(f)
912 912 paths = normed.get(nc)
913 913
914 914 if paths is None:
915 915 paths = set()
916 916 normed[nc] = paths
917 917
918 918 paths.add(f)
919 919
920 920 for norm, paths in normed.iteritems():
921 921 if len(paths) > 1:
922 922 for path in paths:
923 923 folded = self._discoverpath(path, norm, True, None,
924 924 self._dirfoldmap)
925 925 if path != folded:
926 926 results[path] = None
927 927
928 928 return results, dirsfound, dirsnotfound
929 929
930 930 def walk(self, match, subrepos, unknown, ignored, full=True):
931 931 '''
932 932 Walk recursively through the directory tree, finding all files
933 933 matched by match.
934 934
935 935 If full is False, maybe skip some known-clean files.
936 936
937 937 Return a dict mapping filename to stat-like object (either
938 938 mercurial.osutil.stat instance or return value of os.stat()).
939 939
940 940 '''
941 941 # full is a flag that extensions that hook into walk can use -- this
942 942 # implementation doesn't use it at all. This satisfies the contract
943 943 # because we only guarantee a "maybe".
944 944
945 945 if ignored:
946 946 ignore = util.never
947 947 dirignore = util.never
948 948 elif unknown:
949 949 ignore = self._ignore
950 950 dirignore = self._dirignore
951 951 else:
952 952 # if not unknown and not ignored, drop dir recursion and step 2
953 953 ignore = util.always
954 954 dirignore = util.always
955 955
956 956 matchfn = match.matchfn
957 957 matchalways = match.always()
958 958 matchtdir = match.traversedir
959 959 dmap = self._map
960 960 listdir = osutil.listdir
961 961 lstat = os.lstat
962 962 dirkind = stat.S_IFDIR
963 963 regkind = stat.S_IFREG
964 964 lnkkind = stat.S_IFLNK
965 965 join = self._join
966 966
967 967 exact = skipstep3 = False
968 968 if match.isexact(): # match.exact
969 969 exact = True
970 970 dirignore = util.always # skip step 2
971 971 elif match.prefix(): # match.match, no patterns
972 972 skipstep3 = True
973 973
974 974 if not exact and self._checkcase:
975 975 normalize = self._normalize
976 976 normalizefile = self._normalizefile
977 977 skipstep3 = False
978 978 else:
979 979 normalize = self._normalize
980 980 normalizefile = None
981 981
982 982 # step 1: find all explicit files
983 983 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
984 984
985 985 skipstep3 = skipstep3 and not (work or dirsnotfound)
986 986 work = [d for d in work if not dirignore(d[0])]
987 987
988 988 # step 2: visit subdirectories
989 989 def traverse(work, alreadynormed):
990 990 wadd = work.append
991 991 while work:
992 992 nd = work.pop()
993 993 skip = None
994 994 if nd == '.':
995 995 nd = ''
996 996 else:
997 997 skip = '.hg'
998 998 try:
999 999 entries = listdir(join(nd), stat=True, skip=skip)
1000 1000 except OSError as inst:
1001 1001 if inst.errno in (errno.EACCES, errno.ENOENT):
1002 1002 match.bad(self.pathto(nd), inst.strerror)
1003 1003 continue
1004 1004 raise
1005 1005 for f, kind, st in entries:
1006 1006 if normalizefile:
1007 1007 # even though f might be a directory, we're only
1008 1008 # interested in comparing it to files currently in the
1009 1009 # dmap -- therefore normalizefile is enough
1010 1010 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1011 1011 True)
1012 1012 else:
1013 1013 nf = nd and (nd + "/" + f) or f
1014 1014 if nf not in results:
1015 1015 if kind == dirkind:
1016 1016 if not ignore(nf):
1017 1017 if matchtdir:
1018 1018 matchtdir(nf)
1019 1019 wadd(nf)
1020 1020 if nf in dmap and (matchalways or matchfn(nf)):
1021 1021 results[nf] = None
1022 1022 elif kind == regkind or kind == lnkkind:
1023 1023 if nf in dmap:
1024 1024 if matchalways or matchfn(nf):
1025 1025 results[nf] = st
1026 1026 elif ((matchalways or matchfn(nf))
1027 1027 and not ignore(nf)):
1028 1028 # unknown file -- normalize if necessary
1029 1029 if not alreadynormed:
1030 1030 nf = normalize(nf, False, True)
1031 1031 results[nf] = st
1032 1032 elif nf in dmap and (matchalways or matchfn(nf)):
1033 1033 results[nf] = None
1034 1034
1035 1035 for nd, d in work:
1036 1036 # alreadynormed means that processwork doesn't have to do any
1037 1037 # expensive directory normalization
1038 1038 alreadynormed = not normalize or nd == d
1039 1039 traverse([d], alreadynormed)
1040 1040
1041 1041 for s in subrepos:
1042 1042 del results[s]
1043 1043 del results['.hg']
1044 1044
1045 1045 # step 3: visit remaining files from dmap
1046 1046 if not skipstep3 and not exact:
1047 1047 # If a dmap file is not in results yet, it was either
1048 1048 # a) not matching matchfn b) ignored, c) missing, or d) under a
1049 1049 # symlink directory.
1050 1050 if not results and matchalways:
1051 1051 visit = dmap.keys()
1052 1052 else:
1053 1053 visit = [f for f in dmap if f not in results and matchfn(f)]
1054 1054 visit.sort()
1055 1055
1056 1056 if unknown:
1057 1057 # unknown == True means we walked all dirs under the roots
1058 1058 # that wasn't ignored, and everything that matched was stat'ed
1059 1059 # and is already in results.
1060 1060 # The rest must thus be ignored or under a symlink.
1061 1061 audit_path = pathutil.pathauditor(self._root)
1062 1062
1063 1063 for nf in iter(visit):
1064 1064 # If a stat for the same file was already added with a
1065 1065 # different case, don't add one for this, since that would
1066 1066 # make it appear as if the file exists under both names
1067 1067 # on disk.
1068 1068 if (normalizefile and
1069 1069 normalizefile(nf, True, True) in results):
1070 1070 results[nf] = None
1071 1071 # Report ignored items in the dmap as long as they are not
1072 1072 # under a symlink directory.
1073 1073 elif audit_path.check(nf):
1074 1074 try:
1075 1075 results[nf] = lstat(join(nf))
1076 1076 # file was just ignored, no links, and exists
1077 1077 except OSError:
1078 1078 # file doesn't exist
1079 1079 results[nf] = None
1080 1080 else:
1081 1081 # It's either missing or under a symlink directory
1082 1082 # which we in this case report as missing
1083 1083 results[nf] = None
1084 1084 else:
1085 1085 # We may not have walked the full directory tree above,
1086 1086 # so stat and check everything we missed.
1087 1087 nf = iter(visit).next
1088 1088 for st in util.statfiles([join(i) for i in visit]):
1089 1089 results[nf()] = st
1090 1090 return results
1091 1091
1092 1092 def status(self, match, subrepos, ignored, clean, unknown):
1093 1093 '''Determine the status of the working copy relative to the
1094 1094 dirstate and return a pair of (unsure, status), where status is of type
1095 1095 scmutil.status and:
1096 1096
1097 1097 unsure:
1098 1098 files that might have been modified since the dirstate was
1099 1099 written, but need to be read to be sure (size is the same
1100 1100 but mtime differs)
1101 1101 status.modified:
1102 1102 files that have definitely been modified since the dirstate
1103 1103 was written (different size or mode)
1104 1104 status.clean:
1105 1105 files that have definitely not been modified since the
1106 1106 dirstate was written
1107 1107 '''
1108 1108 listignored, listclean, listunknown = ignored, clean, unknown
1109 1109 lookup, modified, added, unknown, ignored = [], [], [], [], []
1110 1110 removed, deleted, clean = [], [], []
1111 1111
1112 1112 dmap = self._map
1113 1113 ladd = lookup.append # aka "unsure"
1114 1114 madd = modified.append
1115 1115 aadd = added.append
1116 1116 uadd = unknown.append
1117 1117 iadd = ignored.append
1118 1118 radd = removed.append
1119 1119 dadd = deleted.append
1120 1120 cadd = clean.append
1121 1121 mexact = match.exact
1122 1122 dirignore = self._dirignore
1123 1123 checkexec = self._checkexec
1124 1124 copymap = self._copymap
1125 1125 lastnormaltime = self._lastnormaltime
1126 1126
1127 1127 # We need to do full walks when either
1128 1128 # - we're listing all clean files, or
1129 1129 # - match.traversedir does something, because match.traversedir should
1130 1130 # be called for every dir in the working dir
1131 1131 full = listclean or match.traversedir is not None
1132 1132 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1133 1133 full=full).iteritems():
1134 1134 if fn not in dmap:
1135 1135 if (listignored or mexact(fn)) and dirignore(fn):
1136 1136 if listignored:
1137 1137 iadd(fn)
1138 1138 else:
1139 1139 uadd(fn)
1140 1140 continue
1141 1141
1142 1142 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1143 1143 # written like that for performance reasons. dmap[fn] is not a
1144 1144 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1145 1145 # opcode has fast paths when the value to be unpacked is a tuple or
1146 1146 # a list, but falls back to creating a full-fledged iterator in
1147 1147 # general. That is much slower than simply accessing and storing the
1148 1148 # tuple members one by one.
1149 1149 t = dmap[fn]
1150 1150 state = t[0]
1151 1151 mode = t[1]
1152 1152 size = t[2]
1153 1153 time = t[3]
1154 1154
1155 1155 if not st and state in "nma":
1156 1156 dadd(fn)
1157 1157 elif state == 'n':
1158 1158 if (size >= 0 and
1159 1159 ((size != st.st_size and size != st.st_size & _rangemask)
1160 1160 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1161 1161 or size == -2 # other parent
1162 1162 or fn in copymap):
1163 1163 madd(fn)
1164 1164 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1165 1165 ladd(fn)
1166 1166 elif st.st_mtime == lastnormaltime:
1167 1167 # fn may have just been marked as normal and it may have
1168 1168 # changed in the same second without changing its size.
1169 1169 # This can happen if we quickly do multiple commits.
1170 1170 # Force lookup, so we don't miss such a racy file change.
1171 1171 ladd(fn)
1172 1172 elif listclean:
1173 1173 cadd(fn)
1174 1174 elif state == 'm':
1175 1175 madd(fn)
1176 1176 elif state == 'a':
1177 1177 aadd(fn)
1178 1178 elif state == 'r':
1179 1179 radd(fn)
1180 1180
1181 1181 return (lookup, scmutil.status(modified, added, removed, deleted,
1182 1182 unknown, ignored, clean))
1183 1183
1184 1184 def matches(self, match):
1185 1185 '''
1186 1186 return files in the dirstate (in whatever state) filtered by match
1187 1187 '''
1188 1188 dmap = self._map
1189 1189 if match.always():
1190 1190 return dmap.keys()
1191 1191 files = match.files()
1192 1192 if match.isexact():
1193 1193 # fast path -- filter the other way around, since typically files is
1194 1194 # much smaller than dmap
1195 1195 return [f for f in files if f in dmap]
1196 1196 if match.prefix() and all(fn in dmap for fn in files):
1197 1197 # fast path -- all the values are known to be files, so just return
1198 1198 # that
1199 1199 return list(files)
1200 1200 return [f for f in dmap if match(f)]
1201 1201
1202 1202 def _actualfilename(self, tr):
1203 1203 if tr:
1204 1204 return self._pendingfilename
1205 1205 else:
1206 1206 return self._filename
1207 1207
1208 def _savebackup(self, tr, suffix):
1208 def savebackup(self, tr, suffix):
1209 1209 '''Save current dirstate into backup file with suffix'''
1210 1210 filename = self._actualfilename(tr)
1211 1211
1212 1212 # use '_writedirstate' instead of 'write' to write changes certainly,
1213 1213 # because the latter omits writing out if transaction is running.
1214 1214 # output file will be used to create backup of dirstate at this point.
1215 1215 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1216 1216
1217 1217 if tr:
1218 1218 # ensure that subsequent tr.writepending returns True for
1219 1219 # changes written out above, even if dirstate is never
1220 1220 # changed after this
1221 1221 tr.addfilegenerator('dirstate', (self._filename,),
1222 1222 self._writedirstate, location='plain')
1223 1223
1224 1224 # ensure that pending file written above is unlinked at
1225 1225 # failure, even if tr.writepending isn't invoked until the
1226 1226 # end of this transaction
1227 1227 tr.registertmp(filename, location='plain')
1228 1228
1229 1229 self._opener.write(filename + suffix, self._opener.tryread(filename))
1230 1230
1231 def _restorebackup(self, tr, suffix):
1231 def restorebackup(self, tr, suffix):
1232 1232 '''Restore dirstate by backup file with suffix'''
1233 1233 # this "invalidate()" prevents "wlock.release()" from writing
1234 1234 # changes of dirstate out after restoring from backup file
1235 1235 self.invalidate()
1236 1236 filename = self._actualfilename(tr)
1237 1237 self._opener.rename(filename + suffix, filename)
1238 1238
1239 def _clearbackup(self, tr, suffix):
1239 def clearbackup(self, tr, suffix):
1240 1240 '''Clear backup file with suffix'''
1241 1241 filename = self._actualfilename(tr)
1242 1242 self._opener.unlink(filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now