##// END OF EJS Templates
manifest: remove manifest.find...
Durham Goode -
r30340:608ba935 default
parent child Browse files
Show More
@@ -1,3574 +1,3577 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import sys
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 )
24 24
25 25 from . import (
26 26 bookmarks,
27 27 changelog,
28 28 copies,
29 29 crecord as crecordmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 repair,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 templatekw,
45 45 templater,
46 46 util,
47 47 )
48 48 stringio = util.stringio
49 49
50 50 def ishunk(x):
51 51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 52 return isinstance(x, hunkclasses)
53 53
54 54 def newandmodified(chunks, originalchunks):
55 55 newlyaddedandmodifiedfiles = set()
56 56 for chunk in chunks:
57 57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 58 originalchunks:
59 59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 60 return newlyaddedandmodifiedfiles
61 61
62 62 def parsealiases(cmd):
63 63 return cmd.lstrip("^").split("|")
64 64
65 65 def setupwrapcolorwrite(ui):
66 66 # wrap ui.write so diff output can be labeled/colorized
67 67 def wrapwrite(orig, *args, **kw):
68 68 label = kw.pop('label', '')
69 69 for chunk, l in patch.difflabel(lambda: args):
70 70 orig(chunk, label=label + l)
71 71
72 72 oldwrite = ui.write
73 73 def wrap(*args, **kwargs):
74 74 return wrapwrite(oldwrite, *args, **kwargs)
75 75 setattr(ui, 'write', wrap)
76 76 return oldwrite
77 77
78 78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 79 if usecurses:
80 80 if testfile:
81 81 recordfn = crecordmod.testdecorator(testfile,
82 82 crecordmod.testchunkselector)
83 83 else:
84 84 recordfn = crecordmod.chunkselector
85 85
86 86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
87 87
88 88 else:
89 89 return patch.filterpatch(ui, originalhunks, operation)
90 90
91 91 def recordfilter(ui, originalhunks, operation=None):
92 92 """ Prompts the user to filter the originalhunks and return a list of
93 93 selected hunks.
94 94 *operation* is used for to build ui messages to indicate the user what
95 95 kind of filtering they are doing: reverting, committing, shelving, etc.
96 96 (see patch.filterpatch).
97 97 """
98 98 usecurses = crecordmod.checkcurses(ui)
99 99 testfile = ui.config('experimental', 'crecordtest', None)
100 100 oldwrite = setupwrapcolorwrite(ui)
101 101 try:
102 102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 103 testfile, operation)
104 104 finally:
105 105 ui.write = oldwrite
106 106 return newchunks, newopts
107 107
108 108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 109 filterfn, *pats, **opts):
110 110 from . import merge as mergemod
111 111 if not ui.interactive():
112 112 if cmdsuggest:
113 113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 114 else:
115 115 msg = _('running non-interactively')
116 116 raise error.Abort(msg)
117 117
118 118 # make sure username is set before going interactive
119 119 if not opts.get('user'):
120 120 ui.username() # raise exception, username not provided
121 121
122 122 def recordfunc(ui, repo, message, match, opts):
123 123 """This is generic record driver.
124 124
125 125 Its job is to interactively filter local changes, and
126 126 accordingly prepare working directory into a state in which the
127 127 job can be delegated to a non-interactive commit command such as
128 128 'commit' or 'qrefresh'.
129 129
130 130 After the actual job is done by non-interactive command, the
131 131 working directory is restored to its original state.
132 132
133 133 In the end we'll record interesting changes, and everything else
134 134 will be left in place, so the user can continue working.
135 135 """
136 136
137 137 checkunfinished(repo, commit=True)
138 138 wctx = repo[None]
139 139 merge = len(wctx.parents()) > 1
140 140 if merge:
141 141 raise error.Abort(_('cannot partially commit a merge '
142 142 '(use "hg commit" instead)'))
143 143
144 144 def fail(f, msg):
145 145 raise error.Abort('%s: %s' % (f, msg))
146 146
147 147 force = opts.get('force')
148 148 if not force:
149 149 vdirs = []
150 150 match.explicitdir = vdirs.append
151 151 match.bad = fail
152 152
153 153 status = repo.status(match=match)
154 154 if not force:
155 155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 157 diffopts.nodates = True
158 158 diffopts.git = True
159 159 diffopts.showfunc = True
160 160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 161 originalchunks = patch.parsepatch(originaldiff)
162 162
163 163 # 1. filter patch, since we are intending to apply subset of it
164 164 try:
165 165 chunks, newopts = filterfn(ui, originalchunks)
166 166 except patch.PatchError as err:
167 167 raise error.Abort(_('error parsing patch: %s') % err)
168 168 opts.update(newopts)
169 169
170 170 # We need to keep a backup of files that have been newly added and
171 171 # modified during the recording process because there is a previous
172 172 # version without the edit in the workdir
173 173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 174 contenders = set()
175 175 for h in chunks:
176 176 try:
177 177 contenders.update(set(h.files()))
178 178 except AttributeError:
179 179 pass
180 180
181 181 changed = status.modified + status.added + status.removed
182 182 newfiles = [f for f in changed if f in contenders]
183 183 if not newfiles:
184 184 ui.status(_('no changes to record\n'))
185 185 return 0
186 186
187 187 modified = set(status.modified)
188 188
189 189 # 2. backup changed files, so we can restore them in the end
190 190
191 191 if backupall:
192 192 tobackup = changed
193 193 else:
194 194 tobackup = [f for f in newfiles if f in modified or f in \
195 195 newlyaddedandmodifiedfiles]
196 196 backups = {}
197 197 if tobackup:
198 198 backupdir = repo.join('record-backups')
199 199 try:
200 200 os.mkdir(backupdir)
201 201 except OSError as err:
202 202 if err.errno != errno.EEXIST:
203 203 raise
204 204 try:
205 205 # backup continues
206 206 for f in tobackup:
207 207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 208 dir=backupdir)
209 209 os.close(fd)
210 210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 212 backups[f] = tmpname
213 213
214 214 fp = stringio()
215 215 for c in chunks:
216 216 fname = c.filename()
217 217 if fname in backups:
218 218 c.write(fp)
219 219 dopatch = fp.tell()
220 220 fp.seek(0)
221 221
222 222 # 2.5 optionally review / modify patch in text editor
223 223 if opts.get('review', False):
224 224 patchtext = (crecordmod.diffhelptext
225 225 + crecordmod.patchhelptext
226 226 + fp.read())
227 227 reviewedpatch = ui.edit(patchtext, "",
228 228 extra={"suffix": ".diff"})
229 229 fp.truncate(0)
230 230 fp.write(reviewedpatch)
231 231 fp.seek(0)
232 232
233 233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 234 # 3a. apply filtered patch to clean repo (clean)
235 235 if backups:
236 236 # Equivalent to hg.revert
237 237 m = scmutil.matchfiles(repo, backups.keys())
238 238 mergemod.update(repo, repo.dirstate.p1(),
239 239 False, True, matcher=m)
240 240
241 241 # 3b. (apply)
242 242 if dopatch:
243 243 try:
244 244 ui.debug('applying patch\n')
245 245 ui.debug(fp.getvalue())
246 246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 247 except patch.PatchError as err:
248 248 raise error.Abort(str(err))
249 249 del fp
250 250
251 251 # 4. We prepared working directory according to filtered
252 252 # patch. Now is the time to delegate the job to
253 253 # commit/qrefresh or the like!
254 254
255 255 # Make all of the pathnames absolute.
256 256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 257 return commitfunc(ui, repo, *newfiles, **opts)
258 258 finally:
259 259 # 5. finally restore backed-up files
260 260 try:
261 261 dirstate = repo.dirstate
262 262 for realname, tmpname in backups.iteritems():
263 263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264 264
265 265 if dirstate[realname] == 'n':
266 266 # without normallookup, restoring timestamp
267 267 # may cause partially committed files
268 268 # to be treated as unmodified
269 269 dirstate.normallookup(realname)
270 270
271 271 # copystat=True here and above are a hack to trick any
272 272 # editors that have f open that we haven't modified them.
273 273 #
274 274 # Also note that this racy as an editor could notice the
275 275 # file's mtime before we've finished writing it.
276 276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 277 os.unlink(tmpname)
278 278 if tobackup:
279 279 os.rmdir(backupdir)
280 280 except OSError:
281 281 pass
282 282
283 283 def recordinwlock(ui, repo, message, match, opts):
284 284 with repo.wlock():
285 285 return recordfunc(ui, repo, message, match, opts)
286 286
287 287 return commit(ui, repo, recordinwlock, pats, opts)
288 288
289 289 def findpossible(cmd, table, strict=False):
290 290 """
291 291 Return cmd -> (aliases, command table entry)
292 292 for each matching command.
293 293 Return debug commands (or their aliases) only if no normal command matches.
294 294 """
295 295 choice = {}
296 296 debugchoice = {}
297 297
298 298 if cmd in table:
299 299 # short-circuit exact matches, "log" alias beats "^log|history"
300 300 keys = [cmd]
301 301 else:
302 302 keys = table.keys()
303 303
304 304 allcmds = []
305 305 for e in keys:
306 306 aliases = parsealiases(e)
307 307 allcmds.extend(aliases)
308 308 found = None
309 309 if cmd in aliases:
310 310 found = cmd
311 311 elif not strict:
312 312 for a in aliases:
313 313 if a.startswith(cmd):
314 314 found = a
315 315 break
316 316 if found is not None:
317 317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 318 debugchoice[found] = (aliases, table[e])
319 319 else:
320 320 choice[found] = (aliases, table[e])
321 321
322 322 if not choice and debugchoice:
323 323 choice = debugchoice
324 324
325 325 return choice, allcmds
326 326
327 327 def findcmd(cmd, table, strict=True):
328 328 """Return (aliases, command table entry) for command string."""
329 329 choice, allcmds = findpossible(cmd, table, strict)
330 330
331 331 if cmd in choice:
332 332 return choice[cmd]
333 333
334 334 if len(choice) > 1:
335 335 clist = choice.keys()
336 336 clist.sort()
337 337 raise error.AmbiguousCommand(cmd, clist)
338 338
339 339 if choice:
340 340 return choice.values()[0]
341 341
342 342 raise error.UnknownCommand(cmd, allcmds)
343 343
344 344 def findrepo(p):
345 345 while not os.path.isdir(os.path.join(p, ".hg")):
346 346 oldp, p = p, os.path.dirname(p)
347 347 if p == oldp:
348 348 return None
349 349
350 350 return p
351 351
352 352 def bailifchanged(repo, merge=True):
353 353 if merge and repo.dirstate.p2() != nullid:
354 354 raise error.Abort(_('outstanding uncommitted merge'))
355 355 modified, added, removed, deleted = repo.status()[:4]
356 356 if modified or added or removed or deleted:
357 357 raise error.Abort(_('uncommitted changes'))
358 358 ctx = repo[None]
359 359 for s in sorted(ctx.substate):
360 360 ctx.sub(s).bailifchanged()
361 361
362 362 def logmessage(ui, opts):
363 363 """ get the log message according to -m and -l option """
364 364 message = opts.get('message')
365 365 logfile = opts.get('logfile')
366 366
367 367 if message and logfile:
368 368 raise error.Abort(_('options --message and --logfile are mutually '
369 369 'exclusive'))
370 370 if not message and logfile:
371 371 try:
372 372 if logfile == '-':
373 373 message = ui.fin.read()
374 374 else:
375 375 message = '\n'.join(util.readfile(logfile).splitlines())
376 376 except IOError as inst:
377 377 raise error.Abort(_("can't read commit message '%s': %s") %
378 378 (logfile, inst.strerror))
379 379 return message
380 380
381 381 def mergeeditform(ctxorbool, baseformname):
382 382 """return appropriate editform name (referencing a committemplate)
383 383
384 384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 385 merging is committed.
386 386
387 387 This returns baseformname with '.merge' appended if it is a merge,
388 388 otherwise '.normal' is appended.
389 389 """
390 390 if isinstance(ctxorbool, bool):
391 391 if ctxorbool:
392 392 return baseformname + ".merge"
393 393 elif 1 < len(ctxorbool.parents()):
394 394 return baseformname + ".merge"
395 395
396 396 return baseformname + ".normal"
397 397
398 398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 399 editform='', **opts):
400 400 """get appropriate commit message editor according to '--edit' option
401 401
402 402 'finishdesc' is a function to be called with edited commit message
403 403 (= 'description' of the new changeset) just after editing, but
404 404 before checking empty-ness. It should return actual text to be
405 405 stored into history. This allows to change description before
406 406 storing.
407 407
408 408 'extramsg' is a extra message to be shown in the editor instead of
409 409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 410 is automatically added.
411 411
412 412 'editform' is a dot-separated list of names, to distinguish
413 413 the purpose of commit text editing.
414 414
415 415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 417 they are specific for usage in MQ.
418 418 """
419 419 if edit or finishdesc or extramsg:
420 420 return lambda r, c, s: commitforceeditor(r, c, s,
421 421 finishdesc=finishdesc,
422 422 extramsg=extramsg,
423 423 editform=editform)
424 424 elif editform:
425 425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 426 else:
427 427 return commiteditor
428 428
429 429 def loglimit(opts):
430 430 """get the log limit according to option -l/--limit"""
431 431 limit = opts.get('limit')
432 432 if limit:
433 433 try:
434 434 limit = int(limit)
435 435 except ValueError:
436 436 raise error.Abort(_('limit must be a positive integer'))
437 437 if limit <= 0:
438 438 raise error.Abort(_('limit must be positive'))
439 439 else:
440 440 limit = None
441 441 return limit
442 442
443 443 def makefilename(repo, pat, node, desc=None,
444 444 total=None, seqno=None, revwidth=None, pathname=None):
445 445 node_expander = {
446 446 'H': lambda: hex(node),
447 447 'R': lambda: str(repo.changelog.rev(node)),
448 448 'h': lambda: short(node),
449 449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 450 }
451 451 expander = {
452 452 '%': lambda: '%',
453 453 'b': lambda: os.path.basename(repo.root),
454 454 }
455 455
456 456 try:
457 457 if node:
458 458 expander.update(node_expander)
459 459 if node:
460 460 expander['r'] = (lambda:
461 461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 462 if total is not None:
463 463 expander['N'] = lambda: str(total)
464 464 if seqno is not None:
465 465 expander['n'] = lambda: str(seqno)
466 466 if total is not None and seqno is not None:
467 467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 468 if pathname is not None:
469 469 expander['s'] = lambda: os.path.basename(pathname)
470 470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 471 expander['p'] = lambda: pathname
472 472
473 473 newname = []
474 474 patlen = len(pat)
475 475 i = 0
476 476 while i < patlen:
477 477 c = pat[i]
478 478 if c == '%':
479 479 i += 1
480 480 c = pat[i]
481 481 c = expander[c]()
482 482 newname.append(c)
483 483 i += 1
484 484 return ''.join(newname)
485 485 except KeyError as inst:
486 486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 487 inst.args[0])
488 488
489 489 class _unclosablefile(object):
490 490 def __init__(self, fp):
491 491 self._fp = fp
492 492
493 493 def close(self):
494 494 pass
495 495
496 496 def __iter__(self):
497 497 return iter(self._fp)
498 498
499 499 def __getattr__(self, attr):
500 500 return getattr(self._fp, attr)
501 501
502 502 def __enter__(self):
503 503 return self
504 504
505 505 def __exit__(self, exc_type, exc_value, exc_tb):
506 506 pass
507 507
508 508 def makefileobj(repo, pat, node=None, desc=None, total=None,
509 509 seqno=None, revwidth=None, mode='wb', modemap=None,
510 510 pathname=None):
511 511
512 512 writable = mode not in ('r', 'rb')
513 513
514 514 if not pat or pat == '-':
515 515 if writable:
516 516 fp = repo.ui.fout
517 517 else:
518 518 fp = repo.ui.fin
519 519 return _unclosablefile(fp)
520 520 if util.safehasattr(pat, 'write') and writable:
521 521 return pat
522 522 if util.safehasattr(pat, 'read') and 'r' in mode:
523 523 return pat
524 524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
525 525 if modemap is not None:
526 526 mode = modemap.get(fn, mode)
527 527 if mode == 'wb':
528 528 modemap[fn] = 'ab'
529 529 return open(fn, mode)
530 530
531 531 def openrevlog(repo, cmd, file_, opts):
532 532 """opens the changelog, manifest, a filelog or a given revlog"""
533 533 cl = opts['changelog']
534 534 mf = opts['manifest']
535 535 dir = opts['dir']
536 536 msg = None
537 537 if cl and mf:
538 538 msg = _('cannot specify --changelog and --manifest at the same time')
539 539 elif cl and dir:
540 540 msg = _('cannot specify --changelog and --dir at the same time')
541 541 elif cl or mf or dir:
542 542 if file_:
543 543 msg = _('cannot specify filename with --changelog or --manifest')
544 544 elif not repo:
545 545 msg = _('cannot specify --changelog or --manifest or --dir '
546 546 'without a repository')
547 547 if msg:
548 548 raise error.Abort(msg)
549 549
550 550 r = None
551 551 if repo:
552 552 if cl:
553 553 r = repo.unfiltered().changelog
554 554 elif dir:
555 555 if 'treemanifest' not in repo.requirements:
556 556 raise error.Abort(_("--dir can only be used on repos with "
557 557 "treemanifest enabled"))
558 558 dirlog = repo.manifest.dirlog(dir)
559 559 if len(dirlog):
560 560 r = dirlog
561 561 elif mf:
562 562 r = repo.manifest
563 563 elif file_:
564 564 filelog = repo.file(file_)
565 565 if len(filelog):
566 566 r = filelog
567 567 if not r:
568 568 if not file_:
569 569 raise error.CommandError(cmd, _('invalid arguments'))
570 570 if not os.path.isfile(file_):
571 571 raise error.Abort(_("revlog '%s' not found") % file_)
572 572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
573 573 file_[:-2] + ".i")
574 574 return r
575 575
576 576 def copy(ui, repo, pats, opts, rename=False):
577 577 # called with the repo lock held
578 578 #
579 579 # hgsep => pathname that uses "/" to separate directories
580 580 # ossep => pathname that uses os.sep to separate directories
581 581 cwd = repo.getcwd()
582 582 targets = {}
583 583 after = opts.get("after")
584 584 dryrun = opts.get("dry_run")
585 585 wctx = repo[None]
586 586
587 587 def walkpat(pat):
588 588 srcs = []
589 589 if after:
590 590 badstates = '?'
591 591 else:
592 592 badstates = '?r'
593 593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
594 594 for abs in repo.walk(m):
595 595 state = repo.dirstate[abs]
596 596 rel = m.rel(abs)
597 597 exact = m.exact(abs)
598 598 if state in badstates:
599 599 if exact and state == '?':
600 600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
601 601 if exact and state == 'r':
602 602 ui.warn(_('%s: not copying - file has been marked for'
603 603 ' remove\n') % rel)
604 604 continue
605 605 # abs: hgsep
606 606 # rel: ossep
607 607 srcs.append((abs, rel, exact))
608 608 return srcs
609 609
610 610 # abssrc: hgsep
611 611 # relsrc: ossep
612 612 # otarget: ossep
613 613 def copyfile(abssrc, relsrc, otarget, exact):
614 614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
615 615 if '/' in abstarget:
616 616 # We cannot normalize abstarget itself, this would prevent
617 617 # case only renames, like a => A.
618 618 abspath, absname = abstarget.rsplit('/', 1)
619 619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
620 620 reltarget = repo.pathto(abstarget, cwd)
621 621 target = repo.wjoin(abstarget)
622 622 src = repo.wjoin(abssrc)
623 623 state = repo.dirstate[abstarget]
624 624
625 625 scmutil.checkportable(ui, abstarget)
626 626
627 627 # check for collisions
628 628 prevsrc = targets.get(abstarget)
629 629 if prevsrc is not None:
630 630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
631 631 (reltarget, repo.pathto(abssrc, cwd),
632 632 repo.pathto(prevsrc, cwd)))
633 633 return
634 634
635 635 # check for overwrites
636 636 exists = os.path.lexists(target)
637 637 samefile = False
638 638 if exists and abssrc != abstarget:
639 639 if (repo.dirstate.normalize(abssrc) ==
640 640 repo.dirstate.normalize(abstarget)):
641 641 if not rename:
642 642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
643 643 return
644 644 exists = False
645 645 samefile = True
646 646
647 647 if not after and exists or after and state in 'mn':
648 648 if not opts['force']:
649 649 if state in 'mn':
650 650 msg = _('%s: not overwriting - file already committed\n')
651 651 if after:
652 652 flags = '--after --force'
653 653 else:
654 654 flags = '--force'
655 655 if rename:
656 656 hint = _('(hg rename %s to replace the file by '
657 657 'recording a rename)\n') % flags
658 658 else:
659 659 hint = _('(hg copy %s to replace the file by '
660 660 'recording a copy)\n') % flags
661 661 else:
662 662 msg = _('%s: not overwriting - file exists\n')
663 663 if rename:
664 664 hint = _('(hg rename --after to record the rename)\n')
665 665 else:
666 666 hint = _('(hg copy --after to record the copy)\n')
667 667 ui.warn(msg % reltarget)
668 668 ui.warn(hint)
669 669 return
670 670
671 671 if after:
672 672 if not exists:
673 673 if rename:
674 674 ui.warn(_('%s: not recording move - %s does not exist\n') %
675 675 (relsrc, reltarget))
676 676 else:
677 677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
678 678 (relsrc, reltarget))
679 679 return
680 680 elif not dryrun:
681 681 try:
682 682 if exists:
683 683 os.unlink(target)
684 684 targetdir = os.path.dirname(target) or '.'
685 685 if not os.path.isdir(targetdir):
686 686 os.makedirs(targetdir)
687 687 if samefile:
688 688 tmp = target + "~hgrename"
689 689 os.rename(src, tmp)
690 690 os.rename(tmp, target)
691 691 else:
692 692 util.copyfile(src, target)
693 693 srcexists = True
694 694 except IOError as inst:
695 695 if inst.errno == errno.ENOENT:
696 696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
697 697 srcexists = False
698 698 else:
699 699 ui.warn(_('%s: cannot copy - %s\n') %
700 700 (relsrc, inst.strerror))
701 701 return True # report a failure
702 702
703 703 if ui.verbose or not exact:
704 704 if rename:
705 705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
706 706 else:
707 707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
708 708
709 709 targets[abstarget] = abssrc
710 710
711 711 # fix up dirstate
712 712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
713 713 dryrun=dryrun, cwd=cwd)
714 714 if rename and not dryrun:
715 715 if not after and srcexists and not samefile:
716 716 util.unlinkpath(repo.wjoin(abssrc))
717 717 wctx.forget([abssrc])
718 718
719 719 # pat: ossep
720 720 # dest ossep
721 721 # srcs: list of (hgsep, hgsep, ossep, bool)
722 722 # return: function that takes hgsep and returns ossep
723 723 def targetpathfn(pat, dest, srcs):
724 724 if os.path.isdir(pat):
725 725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
726 726 abspfx = util.localpath(abspfx)
727 727 if destdirexists:
728 728 striplen = len(os.path.split(abspfx)[0])
729 729 else:
730 730 striplen = len(abspfx)
731 731 if striplen:
732 732 striplen += len(os.sep)
733 733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
734 734 elif destdirexists:
735 735 res = lambda p: os.path.join(dest,
736 736 os.path.basename(util.localpath(p)))
737 737 else:
738 738 res = lambda p: dest
739 739 return res
740 740
741 741 # pat: ossep
742 742 # dest ossep
743 743 # srcs: list of (hgsep, hgsep, ossep, bool)
744 744 # return: function that takes hgsep and returns ossep
745 745 def targetpathafterfn(pat, dest, srcs):
746 746 if matchmod.patkind(pat):
747 747 # a mercurial pattern
748 748 res = lambda p: os.path.join(dest,
749 749 os.path.basename(util.localpath(p)))
750 750 else:
751 751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
752 752 if len(abspfx) < len(srcs[0][0]):
753 753 # A directory. Either the target path contains the last
754 754 # component of the source path or it does not.
755 755 def evalpath(striplen):
756 756 score = 0
757 757 for s in srcs:
758 758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
759 759 if os.path.lexists(t):
760 760 score += 1
761 761 return score
762 762
763 763 abspfx = util.localpath(abspfx)
764 764 striplen = len(abspfx)
765 765 if striplen:
766 766 striplen += len(os.sep)
767 767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
768 768 score = evalpath(striplen)
769 769 striplen1 = len(os.path.split(abspfx)[0])
770 770 if striplen1:
771 771 striplen1 += len(os.sep)
772 772 if evalpath(striplen1) > score:
773 773 striplen = striplen1
774 774 res = lambda p: os.path.join(dest,
775 775 util.localpath(p)[striplen:])
776 776 else:
777 777 # a file
778 778 if destdirexists:
779 779 res = lambda p: os.path.join(dest,
780 780 os.path.basename(util.localpath(p)))
781 781 else:
782 782 res = lambda p: dest
783 783 return res
784 784
785 785 pats = scmutil.expandpats(pats)
786 786 if not pats:
787 787 raise error.Abort(_('no source or destination specified'))
788 788 if len(pats) == 1:
789 789 raise error.Abort(_('no destination specified'))
790 790 dest = pats.pop()
791 791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
792 792 if not destdirexists:
793 793 if len(pats) > 1 or matchmod.patkind(pats[0]):
794 794 raise error.Abort(_('with multiple sources, destination must be an '
795 795 'existing directory'))
796 796 if util.endswithsep(dest):
797 797 raise error.Abort(_('destination %s is not a directory') % dest)
798 798
799 799 tfn = targetpathfn
800 800 if after:
801 801 tfn = targetpathafterfn
802 802 copylist = []
803 803 for pat in pats:
804 804 srcs = walkpat(pat)
805 805 if not srcs:
806 806 continue
807 807 copylist.append((tfn(pat, dest, srcs), srcs))
808 808 if not copylist:
809 809 raise error.Abort(_('no files to copy'))
810 810
811 811 errors = 0
812 812 for targetpath, srcs in copylist:
813 813 for abssrc, relsrc, exact in srcs:
814 814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
815 815 errors += 1
816 816
817 817 if errors:
818 818 ui.warn(_('(consider using --after)\n'))
819 819
820 820 return errors != 0
821 821
822 822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
823 823 runargs=None, appendpid=False):
824 824 '''Run a command as a service.'''
825 825
826 826 def writepid(pid):
827 827 if opts['pid_file']:
828 828 if appendpid:
829 829 mode = 'a'
830 830 else:
831 831 mode = 'w'
832 832 fp = open(opts['pid_file'], mode)
833 833 fp.write(str(pid) + '\n')
834 834 fp.close()
835 835
836 836 if opts['daemon'] and not opts['daemon_postexec']:
837 837 # Signal child process startup with file removal
838 838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
839 839 os.close(lockfd)
840 840 try:
841 841 if not runargs:
842 842 runargs = util.hgcmd() + sys.argv[1:]
843 843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
844 844 # Don't pass --cwd to the child process, because we've already
845 845 # changed directory.
846 846 for i in xrange(1, len(runargs)):
847 847 if runargs[i].startswith('--cwd='):
848 848 del runargs[i]
849 849 break
850 850 elif runargs[i].startswith('--cwd'):
851 851 del runargs[i:i + 2]
852 852 break
853 853 def condfn():
854 854 return not os.path.exists(lockpath)
855 855 pid = util.rundetached(runargs, condfn)
856 856 if pid < 0:
857 857 raise error.Abort(_('child process failed to start'))
858 858 writepid(pid)
859 859 finally:
860 860 try:
861 861 os.unlink(lockpath)
862 862 except OSError as e:
863 863 if e.errno != errno.ENOENT:
864 864 raise
865 865 if parentfn:
866 866 return parentfn(pid)
867 867 else:
868 868 return
869 869
870 870 if initfn:
871 871 initfn()
872 872
873 873 if not opts['daemon']:
874 874 writepid(util.getpid())
875 875
876 876 if opts['daemon_postexec']:
877 877 try:
878 878 os.setsid()
879 879 except AttributeError:
880 880 pass
881 881 for inst in opts['daemon_postexec']:
882 882 if inst.startswith('unlink:'):
883 883 lockpath = inst[7:]
884 884 os.unlink(lockpath)
885 885 elif inst.startswith('chdir:'):
886 886 os.chdir(inst[6:])
887 887 elif inst != 'none':
888 888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
889 889 % inst)
890 890 util.hidewindow()
891 891 sys.stdout.flush()
892 892 sys.stderr.flush()
893 893
894 894 nullfd = os.open(os.devnull, os.O_RDWR)
895 895 logfilefd = nullfd
896 896 if logfile:
897 897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
898 898 os.dup2(nullfd, 0)
899 899 os.dup2(logfilefd, 1)
900 900 os.dup2(logfilefd, 2)
901 901 if nullfd not in (0, 1, 2):
902 902 os.close(nullfd)
903 903 if logfile and logfilefd not in (0, 1, 2):
904 904 os.close(logfilefd)
905 905
906 906 if runfn:
907 907 return runfn()
908 908
909 909 ## facility to let extension process additional data into an import patch
910 910 # list of identifier to be executed in order
911 911 extrapreimport = [] # run before commit
912 912 extrapostimport = [] # run after commit
913 913 # mapping from identifier to actual import function
914 914 #
915 915 # 'preimport' are run before the commit is made and are provided the following
916 916 # arguments:
917 917 # - repo: the localrepository instance,
918 918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
919 919 # - extra: the future extra dictionary of the changeset, please mutate it,
920 920 # - opts: the import options.
921 921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
922 922 # mutation of in memory commit and more. Feel free to rework the code to get
923 923 # there.
924 924 extrapreimportmap = {}
925 925 # 'postimport' are run after the commit is made and are provided the following
926 926 # argument:
927 927 # - ctx: the changectx created by import.
928 928 extrapostimportmap = {}
929 929
930 930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
931 931 """Utility function used by commands.import to import a single patch
932 932
933 933 This function is explicitly defined here to help the evolve extension to
934 934 wrap this part of the import logic.
935 935
936 936 The API is currently a bit ugly because it a simple code translation from
937 937 the import command. Feel free to make it better.
938 938
939 939 :hunk: a patch (as a binary string)
940 940 :parents: nodes that will be parent of the created commit
941 941 :opts: the full dict of option passed to the import command
942 942 :msgs: list to save commit message to.
943 943 (used in case we need to save it when failing)
944 944 :updatefunc: a function that update a repo to a given node
945 945 updatefunc(<repo>, <node>)
946 946 """
947 947 # avoid cycle context -> subrepo -> cmdutil
948 948 from . import context
949 949 extractdata = patch.extract(ui, hunk)
950 950 tmpname = extractdata.get('filename')
951 951 message = extractdata.get('message')
952 952 user = opts.get('user') or extractdata.get('user')
953 953 date = opts.get('date') or extractdata.get('date')
954 954 branch = extractdata.get('branch')
955 955 nodeid = extractdata.get('nodeid')
956 956 p1 = extractdata.get('p1')
957 957 p2 = extractdata.get('p2')
958 958
959 959 nocommit = opts.get('no_commit')
960 960 importbranch = opts.get('import_branch')
961 961 update = not opts.get('bypass')
962 962 strip = opts["strip"]
963 963 prefix = opts["prefix"]
964 964 sim = float(opts.get('similarity') or 0)
965 965 if not tmpname:
966 966 return (None, None, False)
967 967
968 968 rejects = False
969 969
970 970 try:
971 971 cmdline_message = logmessage(ui, opts)
972 972 if cmdline_message:
973 973 # pickup the cmdline msg
974 974 message = cmdline_message
975 975 elif message:
976 976 # pickup the patch msg
977 977 message = message.strip()
978 978 else:
979 979 # launch the editor
980 980 message = None
981 981 ui.debug('message:\n%s\n' % message)
982 982
983 983 if len(parents) == 1:
984 984 parents.append(repo[nullid])
985 985 if opts.get('exact'):
986 986 if not nodeid or not p1:
987 987 raise error.Abort(_('not a Mercurial patch'))
988 988 p1 = repo[p1]
989 989 p2 = repo[p2 or nullid]
990 990 elif p2:
991 991 try:
992 992 p1 = repo[p1]
993 993 p2 = repo[p2]
994 994 # Without any options, consider p2 only if the
995 995 # patch is being applied on top of the recorded
996 996 # first parent.
997 997 if p1 != parents[0]:
998 998 p1 = parents[0]
999 999 p2 = repo[nullid]
1000 1000 except error.RepoError:
1001 1001 p1, p2 = parents
1002 1002 if p2.node() == nullid:
1003 1003 ui.warn(_("warning: import the patch as a normal revision\n"
1004 1004 "(use --exact to import the patch as a merge)\n"))
1005 1005 else:
1006 1006 p1, p2 = parents
1007 1007
1008 1008 n = None
1009 1009 if update:
1010 1010 if p1 != parents[0]:
1011 1011 updatefunc(repo, p1.node())
1012 1012 if p2 != parents[1]:
1013 1013 repo.setparents(p1.node(), p2.node())
1014 1014
1015 1015 if opts.get('exact') or importbranch:
1016 1016 repo.dirstate.setbranch(branch or 'default')
1017 1017
1018 1018 partial = opts.get('partial', False)
1019 1019 files = set()
1020 1020 try:
1021 1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1022 1022 files=files, eolmode=None, similarity=sim / 100.0)
1023 1023 except patch.PatchError as e:
1024 1024 if not partial:
1025 1025 raise error.Abort(str(e))
1026 1026 if partial:
1027 1027 rejects = True
1028 1028
1029 1029 files = list(files)
1030 1030 if nocommit:
1031 1031 if message:
1032 1032 msgs.append(message)
1033 1033 else:
1034 1034 if opts.get('exact') or p2:
1035 1035 # If you got here, you either use --force and know what
1036 1036 # you are doing or used --exact or a merge patch while
1037 1037 # being updated to its first parent.
1038 1038 m = None
1039 1039 else:
1040 1040 m = scmutil.matchfiles(repo, files or [])
1041 1041 editform = mergeeditform(repo[None], 'import.normal')
1042 1042 if opts.get('exact'):
1043 1043 editor = None
1044 1044 else:
1045 1045 editor = getcommiteditor(editform=editform, **opts)
1046 1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1047 1047 extra = {}
1048 1048 for idfunc in extrapreimport:
1049 1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1050 1050 try:
1051 1051 if partial:
1052 1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1053 1053 n = repo.commit(message, user,
1054 1054 date, match=m,
1055 1055 editor=editor, extra=extra)
1056 1056 for idfunc in extrapostimport:
1057 1057 extrapostimportmap[idfunc](repo[n])
1058 1058 finally:
1059 1059 repo.ui.restoreconfig(allowemptyback)
1060 1060 else:
1061 1061 if opts.get('exact') or importbranch:
1062 1062 branch = branch or 'default'
1063 1063 else:
1064 1064 branch = p1.branch()
1065 1065 store = patch.filestore()
1066 1066 try:
1067 1067 files = set()
1068 1068 try:
1069 1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1070 1070 files, eolmode=None)
1071 1071 except patch.PatchError as e:
1072 1072 raise error.Abort(str(e))
1073 1073 if opts.get('exact'):
1074 1074 editor = None
1075 1075 else:
1076 1076 editor = getcommiteditor(editform='import.bypass')
1077 1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1078 1078 message,
1079 1079 user,
1080 1080 date,
1081 1081 branch, files, store,
1082 1082 editor=editor)
1083 1083 n = memctx.commit()
1084 1084 finally:
1085 1085 store.close()
1086 1086 if opts.get('exact') and nocommit:
1087 1087 # --exact with --no-commit is still useful in that it does merge
1088 1088 # and branch bits
1089 1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1090 1090 elif opts.get('exact') and hex(n) != nodeid:
1091 1091 raise error.Abort(_('patch is damaged or loses information'))
1092 1092 msg = _('applied to working directory')
1093 1093 if n:
1094 1094 # i18n: refers to a short changeset id
1095 1095 msg = _('created %s') % short(n)
1096 1096 return (msg, n, rejects)
1097 1097 finally:
1098 1098 os.unlink(tmpname)
1099 1099
1100 1100 # facility to let extensions include additional data in an exported patch
1101 1101 # list of identifiers to be executed in order
1102 1102 extraexport = []
1103 1103 # mapping from identifier to actual export function
1104 1104 # function as to return a string to be added to the header or None
1105 1105 # it is given two arguments (sequencenumber, changectx)
1106 1106 extraexportmap = {}
1107 1107
1108 1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1109 1109 opts=None, match=None):
1110 1110 '''export changesets as hg patches.'''
1111 1111
1112 1112 total = len(revs)
1113 1113 revwidth = max([len(str(rev)) for rev in revs])
1114 1114 filemode = {}
1115 1115
1116 1116 def single(rev, seqno, fp):
1117 1117 ctx = repo[rev]
1118 1118 node = ctx.node()
1119 1119 parents = [p.node() for p in ctx.parents() if p]
1120 1120 branch = ctx.branch()
1121 1121 if switch_parent:
1122 1122 parents.reverse()
1123 1123
1124 1124 if parents:
1125 1125 prev = parents[0]
1126 1126 else:
1127 1127 prev = nullid
1128 1128
1129 1129 shouldclose = False
1130 1130 if not fp and len(template) > 0:
1131 1131 desc_lines = ctx.description().rstrip().split('\n')
1132 1132 desc = desc_lines[0] #Commit always has a first line.
1133 1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1134 1134 seqno=seqno, revwidth=revwidth, mode='wb',
1135 1135 modemap=filemode)
1136 1136 shouldclose = True
1137 1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1138 1138 repo.ui.note("%s\n" % fp.name)
1139 1139
1140 1140 if not fp:
1141 1141 write = repo.ui.write
1142 1142 else:
1143 1143 def write(s, **kw):
1144 1144 fp.write(s)
1145 1145
1146 1146 write("# HG changeset patch\n")
1147 1147 write("# User %s\n" % ctx.user())
1148 1148 write("# Date %d %d\n" % ctx.date())
1149 1149 write("# %s\n" % util.datestr(ctx.date()))
1150 1150 if branch and branch != 'default':
1151 1151 write("# Branch %s\n" % branch)
1152 1152 write("# Node ID %s\n" % hex(node))
1153 1153 write("# Parent %s\n" % hex(prev))
1154 1154 if len(parents) > 1:
1155 1155 write("# Parent %s\n" % hex(parents[1]))
1156 1156
1157 1157 for headerid in extraexport:
1158 1158 header = extraexportmap[headerid](seqno, ctx)
1159 1159 if header is not None:
1160 1160 write('# %s\n' % header)
1161 1161 write(ctx.description().rstrip())
1162 1162 write("\n\n")
1163 1163
1164 1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1165 1165 write(chunk, label=label)
1166 1166
1167 1167 if shouldclose:
1168 1168 fp.close()
1169 1169
1170 1170 for seqno, rev in enumerate(revs):
1171 1171 single(rev, seqno + 1, fp)
1172 1172
1173 1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1174 1174 changes=None, stat=False, fp=None, prefix='',
1175 1175 root='', listsubrepos=False):
1176 1176 '''show diff or diffstat.'''
1177 1177 if fp is None:
1178 1178 write = ui.write
1179 1179 else:
1180 1180 def write(s, **kw):
1181 1181 fp.write(s)
1182 1182
1183 1183 if root:
1184 1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1185 1185 else:
1186 1186 relroot = ''
1187 1187 if relroot != '':
1188 1188 # XXX relative roots currently don't work if the root is within a
1189 1189 # subrepo
1190 1190 uirelroot = match.uipath(relroot)
1191 1191 relroot += '/'
1192 1192 for matchroot in match.files():
1193 1193 if not matchroot.startswith(relroot):
1194 1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1195 1195 match.uipath(matchroot), uirelroot))
1196 1196
1197 1197 if stat:
1198 1198 diffopts = diffopts.copy(context=0)
1199 1199 width = 80
1200 1200 if not ui.plain():
1201 1201 width = ui.termwidth()
1202 1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1203 1203 prefix=prefix, relroot=relroot)
1204 1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1205 1205 width=width,
1206 1206 git=diffopts.git):
1207 1207 write(chunk, label=label)
1208 1208 else:
1209 1209 for chunk, label in patch.diffui(repo, node1, node2, match,
1210 1210 changes, diffopts, prefix=prefix,
1211 1211 relroot=relroot):
1212 1212 write(chunk, label=label)
1213 1213
1214 1214 if listsubrepos:
1215 1215 ctx1 = repo[node1]
1216 1216 ctx2 = repo[node2]
1217 1217 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1218 1218 tempnode2 = node2
1219 1219 try:
1220 1220 if node2 is not None:
1221 1221 tempnode2 = ctx2.substate[subpath][1]
1222 1222 except KeyError:
1223 1223 # A subrepo that existed in node1 was deleted between node1 and
1224 1224 # node2 (inclusive). Thus, ctx2's substate won't contain that
1225 1225 # subpath. The best we can do is to ignore it.
1226 1226 tempnode2 = None
1227 1227 submatch = matchmod.subdirmatcher(subpath, match)
1228 1228 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1229 1229 stat=stat, fp=fp, prefix=prefix)
1230 1230
1231 1231 class changeset_printer(object):
1232 1232 '''show changeset information when templating not requested.'''
1233 1233
1234 1234 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1235 1235 self.ui = ui
1236 1236 self.repo = repo
1237 1237 self.buffered = buffered
1238 1238 self.matchfn = matchfn
1239 1239 self.diffopts = diffopts
1240 1240 self.header = {}
1241 1241 self.hunk = {}
1242 1242 self.lastheader = None
1243 1243 self.footer = None
1244 1244
1245 1245 def flush(self, ctx):
1246 1246 rev = ctx.rev()
1247 1247 if rev in self.header:
1248 1248 h = self.header[rev]
1249 1249 if h != self.lastheader:
1250 1250 self.lastheader = h
1251 1251 self.ui.write(h)
1252 1252 del self.header[rev]
1253 1253 if rev in self.hunk:
1254 1254 self.ui.write(self.hunk[rev])
1255 1255 del self.hunk[rev]
1256 1256 return 1
1257 1257 return 0
1258 1258
1259 1259 def close(self):
1260 1260 if self.footer:
1261 1261 self.ui.write(self.footer)
1262 1262
1263 1263 def show(self, ctx, copies=None, matchfn=None, **props):
1264 1264 if self.buffered:
1265 1265 self.ui.pushbuffer(labeled=True)
1266 1266 self._show(ctx, copies, matchfn, props)
1267 1267 self.hunk[ctx.rev()] = self.ui.popbuffer()
1268 1268 else:
1269 1269 self._show(ctx, copies, matchfn, props)
1270 1270
1271 1271 def _show(self, ctx, copies, matchfn, props):
1272 1272 '''show a single changeset or file revision'''
1273 1273 changenode = ctx.node()
1274 1274 rev = ctx.rev()
1275 1275 if self.ui.debugflag:
1276 1276 hexfunc = hex
1277 1277 else:
1278 1278 hexfunc = short
1279 1279 # as of now, wctx.node() and wctx.rev() return None, but we want to
1280 1280 # show the same values as {node} and {rev} templatekw
1281 1281 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1282 1282
1283 1283 if self.ui.quiet:
1284 1284 self.ui.write("%d:%s\n" % revnode, label='log.node')
1285 1285 return
1286 1286
1287 1287 date = util.datestr(ctx.date())
1288 1288
1289 1289 # i18n: column positioning for "hg log"
1290 1290 self.ui.write(_("changeset: %d:%s\n") % revnode,
1291 1291 label='log.changeset changeset.%s' % ctx.phasestr())
1292 1292
1293 1293 # branches are shown first before any other names due to backwards
1294 1294 # compatibility
1295 1295 branch = ctx.branch()
1296 1296 # don't show the default branch name
1297 1297 if branch != 'default':
1298 1298 # i18n: column positioning for "hg log"
1299 1299 self.ui.write(_("branch: %s\n") % branch,
1300 1300 label='log.branch')
1301 1301
1302 1302 for nsname, ns in self.repo.names.iteritems():
1303 1303 # branches has special logic already handled above, so here we just
1304 1304 # skip it
1305 1305 if nsname == 'branches':
1306 1306 continue
1307 1307 # we will use the templatename as the color name since those two
1308 1308 # should be the same
1309 1309 for name in ns.names(self.repo, changenode):
1310 1310 self.ui.write(ns.logfmt % name,
1311 1311 label='log.%s' % ns.colorname)
1312 1312 if self.ui.debugflag:
1313 1313 # i18n: column positioning for "hg log"
1314 1314 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1315 1315 label='log.phase')
1316 1316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1317 1317 label = 'log.parent changeset.%s' % pctx.phasestr()
1318 1318 # i18n: column positioning for "hg log"
1319 1319 self.ui.write(_("parent: %d:%s\n")
1320 1320 % (pctx.rev(), hexfunc(pctx.node())),
1321 1321 label=label)
1322 1322
1323 1323 if self.ui.debugflag and rev is not None:
1324 1324 mnode = ctx.manifestnode()
1325 1325 # i18n: column positioning for "hg log"
1326 1326 self.ui.write(_("manifest: %d:%s\n") %
1327 1327 (self.repo.manifest.rev(mnode), hex(mnode)),
1328 1328 label='ui.debug log.manifest')
1329 1329 # i18n: column positioning for "hg log"
1330 1330 self.ui.write(_("user: %s\n") % ctx.user(),
1331 1331 label='log.user')
1332 1332 # i18n: column positioning for "hg log"
1333 1333 self.ui.write(_("date: %s\n") % date,
1334 1334 label='log.date')
1335 1335
1336 1336 if self.ui.debugflag:
1337 1337 files = ctx.p1().status(ctx)[:3]
1338 1338 for key, value in zip([# i18n: column positioning for "hg log"
1339 1339 _("files:"),
1340 1340 # i18n: column positioning for "hg log"
1341 1341 _("files+:"),
1342 1342 # i18n: column positioning for "hg log"
1343 1343 _("files-:")], files):
1344 1344 if value:
1345 1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1346 1346 label='ui.debug log.files')
1347 1347 elif ctx.files() and self.ui.verbose:
1348 1348 # i18n: column positioning for "hg log"
1349 1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1350 1350 label='ui.note log.files')
1351 1351 if copies and self.ui.verbose:
1352 1352 copies = ['%s (%s)' % c for c in copies]
1353 1353 # i18n: column positioning for "hg log"
1354 1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1355 1355 label='ui.note log.copies')
1356 1356
1357 1357 extra = ctx.extra()
1358 1358 if extra and self.ui.debugflag:
1359 1359 for key, value in sorted(extra.items()):
1360 1360 # i18n: column positioning for "hg log"
1361 1361 self.ui.write(_("extra: %s=%s\n")
1362 1362 % (key, value.encode('string_escape')),
1363 1363 label='ui.debug log.extra')
1364 1364
1365 1365 description = ctx.description().strip()
1366 1366 if description:
1367 1367 if self.ui.verbose:
1368 1368 self.ui.write(_("description:\n"),
1369 1369 label='ui.note log.description')
1370 1370 self.ui.write(description,
1371 1371 label='ui.note log.description')
1372 1372 self.ui.write("\n\n")
1373 1373 else:
1374 1374 # i18n: column positioning for "hg log"
1375 1375 self.ui.write(_("summary: %s\n") %
1376 1376 description.splitlines()[0],
1377 1377 label='log.summary')
1378 1378 self.ui.write("\n")
1379 1379
1380 1380 self.showpatch(ctx, matchfn)
1381 1381
1382 1382 def showpatch(self, ctx, matchfn):
1383 1383 if not matchfn:
1384 1384 matchfn = self.matchfn
1385 1385 if matchfn:
1386 1386 stat = self.diffopts.get('stat')
1387 1387 diff = self.diffopts.get('patch')
1388 1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1389 1389 node = ctx.node()
1390 1390 prev = ctx.p1().node()
1391 1391 if stat:
1392 1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1393 1393 match=matchfn, stat=True)
1394 1394 if diff:
1395 1395 if stat:
1396 1396 self.ui.write("\n")
1397 1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1398 1398 match=matchfn, stat=False)
1399 1399 self.ui.write("\n")
1400 1400
1401 1401 class jsonchangeset(changeset_printer):
1402 1402 '''format changeset information.'''
1403 1403
1404 1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1405 1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1406 1406 self.cache = {}
1407 1407 self._first = True
1408 1408
1409 1409 def close(self):
1410 1410 if not self._first:
1411 1411 self.ui.write("\n]\n")
1412 1412 else:
1413 1413 self.ui.write("[]\n")
1414 1414
1415 1415 def _show(self, ctx, copies, matchfn, props):
1416 1416 '''show a single changeset or file revision'''
1417 1417 rev = ctx.rev()
1418 1418 if rev is None:
1419 1419 jrev = jnode = 'null'
1420 1420 else:
1421 1421 jrev = str(rev)
1422 1422 jnode = '"%s"' % hex(ctx.node())
1423 1423 j = encoding.jsonescape
1424 1424
1425 1425 if self._first:
1426 1426 self.ui.write("[\n {")
1427 1427 self._first = False
1428 1428 else:
1429 1429 self.ui.write(",\n {")
1430 1430
1431 1431 if self.ui.quiet:
1432 1432 self.ui.write(('\n "rev": %s') % jrev)
1433 1433 self.ui.write((',\n "node": %s') % jnode)
1434 1434 self.ui.write('\n }')
1435 1435 return
1436 1436
1437 1437 self.ui.write(('\n "rev": %s') % jrev)
1438 1438 self.ui.write((',\n "node": %s') % jnode)
1439 1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1440 1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1441 1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1442 1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1443 1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1444 1444
1445 1445 self.ui.write((',\n "bookmarks": [%s]') %
1446 1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1447 1447 self.ui.write((',\n "tags": [%s]') %
1448 1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1449 1449 self.ui.write((',\n "parents": [%s]') %
1450 1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1451 1451
1452 1452 if self.ui.debugflag:
1453 1453 if rev is None:
1454 1454 jmanifestnode = 'null'
1455 1455 else:
1456 1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1457 1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1458 1458
1459 1459 self.ui.write((',\n "extra": {%s}') %
1460 1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1461 1461 for k, v in ctx.extra().items()))
1462 1462
1463 1463 files = ctx.p1().status(ctx)
1464 1464 self.ui.write((',\n "modified": [%s]') %
1465 1465 ", ".join('"%s"' % j(f) for f in files[0]))
1466 1466 self.ui.write((',\n "added": [%s]') %
1467 1467 ", ".join('"%s"' % j(f) for f in files[1]))
1468 1468 self.ui.write((',\n "removed": [%s]') %
1469 1469 ", ".join('"%s"' % j(f) for f in files[2]))
1470 1470
1471 1471 elif self.ui.verbose:
1472 1472 self.ui.write((',\n "files": [%s]') %
1473 1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1474 1474
1475 1475 if copies:
1476 1476 self.ui.write((',\n "copies": {%s}') %
1477 1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1478 1478 for k, v in copies))
1479 1479
1480 1480 matchfn = self.matchfn
1481 1481 if matchfn:
1482 1482 stat = self.diffopts.get('stat')
1483 1483 diff = self.diffopts.get('patch')
1484 1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1485 1485 node, prev = ctx.node(), ctx.p1().node()
1486 1486 if stat:
1487 1487 self.ui.pushbuffer()
1488 1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1489 1489 match=matchfn, stat=True)
1490 1490 self.ui.write((',\n "diffstat": "%s"')
1491 1491 % j(self.ui.popbuffer()))
1492 1492 if diff:
1493 1493 self.ui.pushbuffer()
1494 1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1495 1495 match=matchfn, stat=False)
1496 1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1497 1497
1498 1498 self.ui.write("\n }")
1499 1499
1500 1500 class changeset_templater(changeset_printer):
1501 1501 '''format changeset information.'''
1502 1502
1503 1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1504 1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1505 1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1506 1506 filters = {'formatnode': formatnode}
1507 1507 defaulttempl = {
1508 1508 'parent': '{rev}:{node|formatnode} ',
1509 1509 'manifest': '{rev}:{node|formatnode}',
1510 1510 'file_copy': '{name} ({source})',
1511 1511 'extra': '{key}={value|stringescape}'
1512 1512 }
1513 1513 # filecopy is preserved for compatibility reasons
1514 1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1515 1515 assert not (tmpl and mapfile)
1516 1516 if mapfile:
1517 1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1518 1518 cache=defaulttempl)
1519 1519 else:
1520 1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1521 1521 filters=filters,
1522 1522 cache=defaulttempl)
1523 1523
1524 1524 self.cache = {}
1525 1525
1526 1526 # find correct templates for current mode
1527 1527 tmplmodes = [
1528 1528 (True, None),
1529 1529 (self.ui.verbose, 'verbose'),
1530 1530 (self.ui.quiet, 'quiet'),
1531 1531 (self.ui.debugflag, 'debug'),
1532 1532 ]
1533 1533
1534 1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1535 1535 'docheader': '', 'docfooter': ''}
1536 1536 for mode, postfix in tmplmodes:
1537 1537 for t in self._parts:
1538 1538 cur = t
1539 1539 if postfix:
1540 1540 cur += "_" + postfix
1541 1541 if mode and cur in self.t:
1542 1542 self._parts[t] = cur
1543 1543
1544 1544 if self._parts['docheader']:
1545 1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1546 1546
1547 1547 def close(self):
1548 1548 if self._parts['docfooter']:
1549 1549 if not self.footer:
1550 1550 self.footer = ""
1551 1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1552 1552 return super(changeset_templater, self).close()
1553 1553
1554 1554 def _show(self, ctx, copies, matchfn, props):
1555 1555 '''show a single changeset or file revision'''
1556 1556 props = props.copy()
1557 1557 props.update(templatekw.keywords)
1558 1558 props['templ'] = self.t
1559 1559 props['ctx'] = ctx
1560 1560 props['repo'] = self.repo
1561 1561 props['ui'] = self.repo.ui
1562 1562 props['revcache'] = {'copies': copies}
1563 1563 props['cache'] = self.cache
1564 1564
1565 1565 # write header
1566 1566 if self._parts['header']:
1567 1567 h = templater.stringify(self.t(self._parts['header'], **props))
1568 1568 if self.buffered:
1569 1569 self.header[ctx.rev()] = h
1570 1570 else:
1571 1571 if self.lastheader != h:
1572 1572 self.lastheader = h
1573 1573 self.ui.write(h)
1574 1574
1575 1575 # write changeset metadata, then patch if requested
1576 1576 key = self._parts['changeset']
1577 1577 self.ui.write(templater.stringify(self.t(key, **props)))
1578 1578 self.showpatch(ctx, matchfn)
1579 1579
1580 1580 if self._parts['footer']:
1581 1581 if not self.footer:
1582 1582 self.footer = templater.stringify(
1583 1583 self.t(self._parts['footer'], **props))
1584 1584
1585 1585 def gettemplate(ui, tmpl, style):
1586 1586 """
1587 1587 Find the template matching the given template spec or style.
1588 1588 """
1589 1589
1590 1590 # ui settings
1591 1591 if not tmpl and not style: # template are stronger than style
1592 1592 tmpl = ui.config('ui', 'logtemplate')
1593 1593 if tmpl:
1594 1594 return templater.unquotestring(tmpl), None
1595 1595 else:
1596 1596 style = util.expandpath(ui.config('ui', 'style', ''))
1597 1597
1598 1598 if not tmpl and style:
1599 1599 mapfile = style
1600 1600 if not os.path.split(mapfile)[0]:
1601 1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1602 1602 or templater.templatepath(mapfile))
1603 1603 if mapname:
1604 1604 mapfile = mapname
1605 1605 return None, mapfile
1606 1606
1607 1607 if not tmpl:
1608 1608 return None, None
1609 1609
1610 1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1611 1611
1612 1612 def show_changeset(ui, repo, opts, buffered=False):
1613 1613 """show one changeset using template or regular display.
1614 1614
1615 1615 Display format will be the first non-empty hit of:
1616 1616 1. option 'template'
1617 1617 2. option 'style'
1618 1618 3. [ui] setting 'logtemplate'
1619 1619 4. [ui] setting 'style'
1620 1620 If all of these values are either the unset or the empty string,
1621 1621 regular display via changeset_printer() is done.
1622 1622 """
1623 1623 # options
1624 1624 matchfn = None
1625 1625 if opts.get('patch') or opts.get('stat'):
1626 1626 matchfn = scmutil.matchall(repo)
1627 1627
1628 1628 if opts.get('template') == 'json':
1629 1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1630 1630
1631 1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1632 1632
1633 1633 if not tmpl and not mapfile:
1634 1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1635 1635
1636 1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1637 1637
1638 1638 def showmarker(fm, marker, index=None):
1639 1639 """utility function to display obsolescence marker in a readable way
1640 1640
1641 1641 To be used by debug function."""
1642 1642 if index is not None:
1643 1643 fm.write('index', '%i ', index)
1644 1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1645 1645 succs = marker.succnodes()
1646 1646 fm.condwrite(succs, 'succnodes', '%s ',
1647 1647 fm.formatlist(map(hex, succs), name='node'))
1648 1648 fm.write('flag', '%X ', marker.flags())
1649 1649 parents = marker.parentnodes()
1650 1650 if parents is not None:
1651 1651 fm.write('parentnodes', '{%s} ',
1652 1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1653 1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1654 1654 meta = marker.metadata().copy()
1655 1655 meta.pop('date', None)
1656 1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1657 1657 fm.plain('\n')
1658 1658
1659 1659 def finddate(ui, repo, date):
1660 1660 """Find the tipmost changeset that matches the given date spec"""
1661 1661
1662 1662 df = util.matchdate(date)
1663 1663 m = scmutil.matchall(repo)
1664 1664 results = {}
1665 1665
1666 1666 def prep(ctx, fns):
1667 1667 d = ctx.date()
1668 1668 if df(d[0]):
1669 1669 results[ctx.rev()] = d
1670 1670
1671 1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1672 1672 rev = ctx.rev()
1673 1673 if rev in results:
1674 1674 ui.status(_("found revision %s from %s\n") %
1675 1675 (rev, util.datestr(results[rev])))
1676 1676 return str(rev)
1677 1677
1678 1678 raise error.Abort(_("revision matching date not found"))
1679 1679
1680 1680 def increasingwindows(windowsize=8, sizelimit=512):
1681 1681 while True:
1682 1682 yield windowsize
1683 1683 if windowsize < sizelimit:
1684 1684 windowsize *= 2
1685 1685
1686 1686 class FileWalkError(Exception):
1687 1687 pass
1688 1688
1689 1689 def walkfilerevs(repo, match, follow, revs, fncache):
1690 1690 '''Walks the file history for the matched files.
1691 1691
1692 1692 Returns the changeset revs that are involved in the file history.
1693 1693
1694 1694 Throws FileWalkError if the file history can't be walked using
1695 1695 filelogs alone.
1696 1696 '''
1697 1697 wanted = set()
1698 1698 copies = []
1699 1699 minrev, maxrev = min(revs), max(revs)
1700 1700 def filerevgen(filelog, last):
1701 1701 """
1702 1702 Only files, no patterns. Check the history of each file.
1703 1703
1704 1704 Examines filelog entries within minrev, maxrev linkrev range
1705 1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1706 1706 tuples in backwards order
1707 1707 """
1708 1708 cl_count = len(repo)
1709 1709 revs = []
1710 1710 for j in xrange(0, last + 1):
1711 1711 linkrev = filelog.linkrev(j)
1712 1712 if linkrev < minrev:
1713 1713 continue
1714 1714 # only yield rev for which we have the changelog, it can
1715 1715 # happen while doing "hg log" during a pull or commit
1716 1716 if linkrev >= cl_count:
1717 1717 break
1718 1718
1719 1719 parentlinkrevs = []
1720 1720 for p in filelog.parentrevs(j):
1721 1721 if p != nullrev:
1722 1722 parentlinkrevs.append(filelog.linkrev(p))
1723 1723 n = filelog.node(j)
1724 1724 revs.append((linkrev, parentlinkrevs,
1725 1725 follow and filelog.renamed(n)))
1726 1726
1727 1727 return reversed(revs)
1728 1728 def iterfiles():
1729 1729 pctx = repo['.']
1730 1730 for filename in match.files():
1731 1731 if follow:
1732 1732 if filename not in pctx:
1733 1733 raise error.Abort(_('cannot follow file not in parent '
1734 1734 'revision: "%s"') % filename)
1735 1735 yield filename, pctx[filename].filenode()
1736 1736 else:
1737 1737 yield filename, None
1738 1738 for filename_node in copies:
1739 1739 yield filename_node
1740 1740
1741 1741 for file_, node in iterfiles():
1742 1742 filelog = repo.file(file_)
1743 1743 if not len(filelog):
1744 1744 if node is None:
1745 1745 # A zero count may be a directory or deleted file, so
1746 1746 # try to find matching entries on the slow path.
1747 1747 if follow:
1748 1748 raise error.Abort(
1749 1749 _('cannot follow nonexistent file: "%s"') % file_)
1750 1750 raise FileWalkError("Cannot walk via filelog")
1751 1751 else:
1752 1752 continue
1753 1753
1754 1754 if node is None:
1755 1755 last = len(filelog) - 1
1756 1756 else:
1757 1757 last = filelog.rev(node)
1758 1758
1759 1759 # keep track of all ancestors of the file
1760 1760 ancestors = set([filelog.linkrev(last)])
1761 1761
1762 1762 # iterate from latest to oldest revision
1763 1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1764 1764 if not follow:
1765 1765 if rev > maxrev:
1766 1766 continue
1767 1767 else:
1768 1768 # Note that last might not be the first interesting
1769 1769 # rev to us:
1770 1770 # if the file has been changed after maxrev, we'll
1771 1771 # have linkrev(last) > maxrev, and we still need
1772 1772 # to explore the file graph
1773 1773 if rev not in ancestors:
1774 1774 continue
1775 1775 # XXX insert 1327 fix here
1776 1776 if flparentlinkrevs:
1777 1777 ancestors.update(flparentlinkrevs)
1778 1778
1779 1779 fncache.setdefault(rev, []).append(file_)
1780 1780 wanted.add(rev)
1781 1781 if copied:
1782 1782 copies.append(copied)
1783 1783
1784 1784 return wanted
1785 1785
1786 1786 class _followfilter(object):
1787 1787 def __init__(self, repo, onlyfirst=False):
1788 1788 self.repo = repo
1789 1789 self.startrev = nullrev
1790 1790 self.roots = set()
1791 1791 self.onlyfirst = onlyfirst
1792 1792
1793 1793 def match(self, rev):
1794 1794 def realparents(rev):
1795 1795 if self.onlyfirst:
1796 1796 return self.repo.changelog.parentrevs(rev)[0:1]
1797 1797 else:
1798 1798 return filter(lambda x: x != nullrev,
1799 1799 self.repo.changelog.parentrevs(rev))
1800 1800
1801 1801 if self.startrev == nullrev:
1802 1802 self.startrev = rev
1803 1803 return True
1804 1804
1805 1805 if rev > self.startrev:
1806 1806 # forward: all descendants
1807 1807 if not self.roots:
1808 1808 self.roots.add(self.startrev)
1809 1809 for parent in realparents(rev):
1810 1810 if parent in self.roots:
1811 1811 self.roots.add(rev)
1812 1812 return True
1813 1813 else:
1814 1814 # backwards: all parents
1815 1815 if not self.roots:
1816 1816 self.roots.update(realparents(self.startrev))
1817 1817 if rev in self.roots:
1818 1818 self.roots.remove(rev)
1819 1819 self.roots.update(realparents(rev))
1820 1820 return True
1821 1821
1822 1822 return False
1823 1823
1824 1824 def walkchangerevs(repo, match, opts, prepare):
1825 1825 '''Iterate over files and the revs in which they changed.
1826 1826
1827 1827 Callers most commonly need to iterate backwards over the history
1828 1828 in which they are interested. Doing so has awful (quadratic-looking)
1829 1829 performance, so we use iterators in a "windowed" way.
1830 1830
1831 1831 We walk a window of revisions in the desired order. Within the
1832 1832 window, we first walk forwards to gather data, then in the desired
1833 1833 order (usually backwards) to display it.
1834 1834
1835 1835 This function returns an iterator yielding contexts. Before
1836 1836 yielding each context, the iterator will first call the prepare
1837 1837 function on each context in the window in forward order.'''
1838 1838
1839 1839 follow = opts.get('follow') or opts.get('follow_first')
1840 1840 revs = _logrevs(repo, opts)
1841 1841 if not revs:
1842 1842 return []
1843 1843 wanted = set()
1844 1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1845 1845 opts.get('removed'))
1846 1846 fncache = {}
1847 1847 change = repo.changectx
1848 1848
1849 1849 # First step is to fill wanted, the set of revisions that we want to yield.
1850 1850 # When it does not induce extra cost, we also fill fncache for revisions in
1851 1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1852 1852 # match the file filtering conditions.
1853 1853
1854 1854 if match.always():
1855 1855 # No files, no patterns. Display all revs.
1856 1856 wanted = revs
1857 1857 elif not slowpath:
1858 1858 # We only have to read through the filelog to find wanted revisions
1859 1859
1860 1860 try:
1861 1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1862 1862 except FileWalkError:
1863 1863 slowpath = True
1864 1864
1865 1865 # We decided to fall back to the slowpath because at least one
1866 1866 # of the paths was not a file. Check to see if at least one of them
1867 1867 # existed in history, otherwise simply return
1868 1868 for path in match.files():
1869 1869 if path == '.' or path in repo.store:
1870 1870 break
1871 1871 else:
1872 1872 return []
1873 1873
1874 1874 if slowpath:
1875 1875 # We have to read the changelog to match filenames against
1876 1876 # changed files
1877 1877
1878 1878 if follow:
1879 1879 raise error.Abort(_('can only follow copies/renames for explicit '
1880 1880 'filenames'))
1881 1881
1882 1882 # The slow path checks files modified in every changeset.
1883 1883 # This is really slow on large repos, so compute the set lazily.
1884 1884 class lazywantedset(object):
1885 1885 def __init__(self):
1886 1886 self.set = set()
1887 1887 self.revs = set(revs)
1888 1888
1889 1889 # No need to worry about locality here because it will be accessed
1890 1890 # in the same order as the increasing window below.
1891 1891 def __contains__(self, value):
1892 1892 if value in self.set:
1893 1893 return True
1894 1894 elif not value in self.revs:
1895 1895 return False
1896 1896 else:
1897 1897 self.revs.discard(value)
1898 1898 ctx = change(value)
1899 1899 matches = filter(match, ctx.files())
1900 1900 if matches:
1901 1901 fncache[value] = matches
1902 1902 self.set.add(value)
1903 1903 return True
1904 1904 return False
1905 1905
1906 1906 def discard(self, value):
1907 1907 self.revs.discard(value)
1908 1908 self.set.discard(value)
1909 1909
1910 1910 wanted = lazywantedset()
1911 1911
1912 1912 # it might be worthwhile to do this in the iterator if the rev range
1913 1913 # is descending and the prune args are all within that range
1914 1914 for rev in opts.get('prune', ()):
1915 1915 rev = repo[rev].rev()
1916 1916 ff = _followfilter(repo)
1917 1917 stop = min(revs[0], revs[-1])
1918 1918 for x in xrange(rev, stop - 1, -1):
1919 1919 if ff.match(x):
1920 1920 wanted = wanted - [x]
1921 1921
1922 1922 # Now that wanted is correctly initialized, we can iterate over the
1923 1923 # revision range, yielding only revisions in wanted.
1924 1924 def iterate():
1925 1925 if follow and match.always():
1926 1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1927 1927 def want(rev):
1928 1928 return ff.match(rev) and rev in wanted
1929 1929 else:
1930 1930 def want(rev):
1931 1931 return rev in wanted
1932 1932
1933 1933 it = iter(revs)
1934 1934 stopiteration = False
1935 1935 for windowsize in increasingwindows():
1936 1936 nrevs = []
1937 1937 for i in xrange(windowsize):
1938 1938 rev = next(it, None)
1939 1939 if rev is None:
1940 1940 stopiteration = True
1941 1941 break
1942 1942 elif want(rev):
1943 1943 nrevs.append(rev)
1944 1944 for rev in sorted(nrevs):
1945 1945 fns = fncache.get(rev)
1946 1946 ctx = change(rev)
1947 1947 if not fns:
1948 1948 def fns_generator():
1949 1949 for f in ctx.files():
1950 1950 if match(f):
1951 1951 yield f
1952 1952 fns = fns_generator()
1953 1953 prepare(ctx, fns)
1954 1954 for rev in nrevs:
1955 1955 yield change(rev)
1956 1956
1957 1957 if stopiteration:
1958 1958 break
1959 1959
1960 1960 return iterate()
1961 1961
1962 1962 def _makefollowlogfilematcher(repo, files, followfirst):
1963 1963 # When displaying a revision with --patch --follow FILE, we have
1964 1964 # to know which file of the revision must be diffed. With
1965 1965 # --follow, we want the names of the ancestors of FILE in the
1966 1966 # revision, stored in "fcache". "fcache" is populated by
1967 1967 # reproducing the graph traversal already done by --follow revset
1968 1968 # and relating revs to file names (which is not "correct" but
1969 1969 # good enough).
1970 1970 fcache = {}
1971 1971 fcacheready = [False]
1972 1972 pctx = repo['.']
1973 1973
1974 1974 def populate():
1975 1975 for fn in files:
1976 1976 fctx = pctx[fn]
1977 1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1978 1978 for c in fctx.ancestors(followfirst=followfirst):
1979 1979 fcache.setdefault(c.rev(), set()).add(c.path())
1980 1980
1981 1981 def filematcher(rev):
1982 1982 if not fcacheready[0]:
1983 1983 # Lazy initialization
1984 1984 fcacheready[0] = True
1985 1985 populate()
1986 1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1987 1987
1988 1988 return filematcher
1989 1989
1990 1990 def _makenofollowlogfilematcher(repo, pats, opts):
1991 1991 '''hook for extensions to override the filematcher for non-follow cases'''
1992 1992 return None
1993 1993
1994 1994 def _makelogrevset(repo, pats, opts, revs):
1995 1995 """Return (expr, filematcher) where expr is a revset string built
1996 1996 from log options and file patterns or None. If --stat or --patch
1997 1997 are not passed filematcher is None. Otherwise it is a callable
1998 1998 taking a revision number and returning a match objects filtering
1999 1999 the files to be detailed when displaying the revision.
2000 2000 """
2001 2001 opt2revset = {
2002 2002 'no_merges': ('not merge()', None),
2003 2003 'only_merges': ('merge()', None),
2004 2004 '_ancestors': ('ancestors(%(val)s)', None),
2005 2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2006 2006 '_descendants': ('descendants(%(val)s)', None),
2007 2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2008 2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2009 2009 'date': ('date(%(val)r)', None),
2010 2010 'branch': ('branch(%(val)r)', ' or '),
2011 2011 '_patslog': ('filelog(%(val)r)', ' or '),
2012 2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2013 2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2014 2014 'keyword': ('keyword(%(val)r)', ' or '),
2015 2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2016 2016 'user': ('user(%(val)r)', ' or '),
2017 2017 }
2018 2018
2019 2019 opts = dict(opts)
2020 2020 # follow or not follow?
2021 2021 follow = opts.get('follow') or opts.get('follow_first')
2022 2022 if opts.get('follow_first'):
2023 2023 followfirst = 1
2024 2024 else:
2025 2025 followfirst = 0
2026 2026 # --follow with FILE behavior depends on revs...
2027 2027 it = iter(revs)
2028 2028 startrev = next(it)
2029 2029 followdescendants = startrev < next(it, startrev)
2030 2030
2031 2031 # branch and only_branch are really aliases and must be handled at
2032 2032 # the same time
2033 2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2034 2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2035 2035 # pats/include/exclude are passed to match.match() directly in
2036 2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2037 2037 # scmutil.match(). The difference is input pats are globbed on
2038 2038 # platforms without shell expansion (windows).
2039 2039 wctx = repo[None]
2040 2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2041 2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2042 2042 opts.get('removed'))
2043 2043 if not slowpath:
2044 2044 for f in match.files():
2045 2045 if follow and f not in wctx:
2046 2046 # If the file exists, it may be a directory, so let it
2047 2047 # take the slow path.
2048 2048 if os.path.exists(repo.wjoin(f)):
2049 2049 slowpath = True
2050 2050 continue
2051 2051 else:
2052 2052 raise error.Abort(_('cannot follow file not in parent '
2053 2053 'revision: "%s"') % f)
2054 2054 filelog = repo.file(f)
2055 2055 if not filelog:
2056 2056 # A zero count may be a directory or deleted file, so
2057 2057 # try to find matching entries on the slow path.
2058 2058 if follow:
2059 2059 raise error.Abort(
2060 2060 _('cannot follow nonexistent file: "%s"') % f)
2061 2061 slowpath = True
2062 2062
2063 2063 # We decided to fall back to the slowpath because at least one
2064 2064 # of the paths was not a file. Check to see if at least one of them
2065 2065 # existed in history - in that case, we'll continue down the
2066 2066 # slowpath; otherwise, we can turn off the slowpath
2067 2067 if slowpath:
2068 2068 for path in match.files():
2069 2069 if path == '.' or path in repo.store:
2070 2070 break
2071 2071 else:
2072 2072 slowpath = False
2073 2073
2074 2074 fpats = ('_patsfollow', '_patsfollowfirst')
2075 2075 fnopats = (('_ancestors', '_fancestors'),
2076 2076 ('_descendants', '_fdescendants'))
2077 2077 if slowpath:
2078 2078 # See walkchangerevs() slow path.
2079 2079 #
2080 2080 # pats/include/exclude cannot be represented as separate
2081 2081 # revset expressions as their filtering logic applies at file
2082 2082 # level. For instance "-I a -X a" matches a revision touching
2083 2083 # "a" and "b" while "file(a) and not file(b)" does
2084 2084 # not. Besides, filesets are evaluated against the working
2085 2085 # directory.
2086 2086 matchargs = ['r:', 'd:relpath']
2087 2087 for p in pats:
2088 2088 matchargs.append('p:' + p)
2089 2089 for p in opts.get('include', []):
2090 2090 matchargs.append('i:' + p)
2091 2091 for p in opts.get('exclude', []):
2092 2092 matchargs.append('x:' + p)
2093 2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2094 2094 opts['_matchfiles'] = matchargs
2095 2095 if follow:
2096 2096 opts[fnopats[0][followfirst]] = '.'
2097 2097 else:
2098 2098 if follow:
2099 2099 if pats:
2100 2100 # follow() revset interprets its file argument as a
2101 2101 # manifest entry, so use match.files(), not pats.
2102 2102 opts[fpats[followfirst]] = list(match.files())
2103 2103 else:
2104 2104 op = fnopats[followdescendants][followfirst]
2105 2105 opts[op] = 'rev(%d)' % startrev
2106 2106 else:
2107 2107 opts['_patslog'] = list(pats)
2108 2108
2109 2109 filematcher = None
2110 2110 if opts.get('patch') or opts.get('stat'):
2111 2111 # When following files, track renames via a special matcher.
2112 2112 # If we're forced to take the slowpath it means we're following
2113 2113 # at least one pattern/directory, so don't bother with rename tracking.
2114 2114 if follow and not match.always() and not slowpath:
2115 2115 # _makefollowlogfilematcher expects its files argument to be
2116 2116 # relative to the repo root, so use match.files(), not pats.
2117 2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2118 2118 followfirst)
2119 2119 else:
2120 2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2121 2121 if filematcher is None:
2122 2122 filematcher = lambda rev: match
2123 2123
2124 2124 expr = []
2125 2125 for op, val in sorted(opts.iteritems()):
2126 2126 if not val:
2127 2127 continue
2128 2128 if op not in opt2revset:
2129 2129 continue
2130 2130 revop, andor = opt2revset[op]
2131 2131 if '%(val)' not in revop:
2132 2132 expr.append(revop)
2133 2133 else:
2134 2134 if not isinstance(val, list):
2135 2135 e = revop % {'val': val}
2136 2136 else:
2137 2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2138 2138 expr.append(e)
2139 2139
2140 2140 if expr:
2141 2141 expr = '(' + ' and '.join(expr) + ')'
2142 2142 else:
2143 2143 expr = None
2144 2144 return expr, filematcher
2145 2145
2146 2146 def _logrevs(repo, opts):
2147 2147 # Default --rev value depends on --follow but --follow behavior
2148 2148 # depends on revisions resolved from --rev...
2149 2149 follow = opts.get('follow') or opts.get('follow_first')
2150 2150 if opts.get('rev'):
2151 2151 revs = scmutil.revrange(repo, opts['rev'])
2152 2152 elif follow and repo.dirstate.p1() == nullid:
2153 2153 revs = revset.baseset()
2154 2154 elif follow:
2155 2155 revs = repo.revs('reverse(:.)')
2156 2156 else:
2157 2157 revs = revset.spanset(repo)
2158 2158 revs.reverse()
2159 2159 return revs
2160 2160
2161 2161 def getgraphlogrevs(repo, pats, opts):
2162 2162 """Return (revs, expr, filematcher) where revs is an iterable of
2163 2163 revision numbers, expr is a revset string built from log options
2164 2164 and file patterns or None, and used to filter 'revs'. If --stat or
2165 2165 --patch are not passed filematcher is None. Otherwise it is a
2166 2166 callable taking a revision number and returning a match objects
2167 2167 filtering the files to be detailed when displaying the revision.
2168 2168 """
2169 2169 limit = loglimit(opts)
2170 2170 revs = _logrevs(repo, opts)
2171 2171 if not revs:
2172 2172 return revset.baseset(), None, None
2173 2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2174 2174 if opts.get('rev'):
2175 2175 # User-specified revs might be unsorted, but don't sort before
2176 2176 # _makelogrevset because it might depend on the order of revs
2177 2177 if not (revs.isdescending() or revs.istopo()):
2178 2178 revs.sort(reverse=True)
2179 2179 if expr:
2180 2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2181 2181 revs = matcher(repo, revs)
2182 2182 if limit is not None:
2183 2183 limitedrevs = []
2184 2184 for idx, rev in enumerate(revs):
2185 2185 if idx >= limit:
2186 2186 break
2187 2187 limitedrevs.append(rev)
2188 2188 revs = revset.baseset(limitedrevs)
2189 2189
2190 2190 return revs, expr, filematcher
2191 2191
2192 2192 def getlogrevs(repo, pats, opts):
2193 2193 """Return (revs, expr, filematcher) where revs is an iterable of
2194 2194 revision numbers, expr is a revset string built from log options
2195 2195 and file patterns or None, and used to filter 'revs'. If --stat or
2196 2196 --patch are not passed filematcher is None. Otherwise it is a
2197 2197 callable taking a revision number and returning a match objects
2198 2198 filtering the files to be detailed when displaying the revision.
2199 2199 """
2200 2200 limit = loglimit(opts)
2201 2201 revs = _logrevs(repo, opts)
2202 2202 if not revs:
2203 2203 return revset.baseset([]), None, None
2204 2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2205 2205 if expr:
2206 2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2207 2207 revs = matcher(repo, revs)
2208 2208 if limit is not None:
2209 2209 limitedrevs = []
2210 2210 for idx, r in enumerate(revs):
2211 2211 if limit <= idx:
2212 2212 break
2213 2213 limitedrevs.append(r)
2214 2214 revs = revset.baseset(limitedrevs)
2215 2215
2216 2216 return revs, expr, filematcher
2217 2217
2218 2218 def _graphnodeformatter(ui, displayer):
2219 2219 spec = ui.config('ui', 'graphnodetemplate')
2220 2220 if not spec:
2221 2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2222 2222
2223 2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2224 2224 cache = {}
2225 2225 if isinstance(displayer, changeset_templater):
2226 2226 cache = displayer.cache # reuse cache of slow templates
2227 2227 props = templatekw.keywords.copy()
2228 2228 props['templ'] = templ
2229 2229 props['cache'] = cache
2230 2230 def formatnode(repo, ctx):
2231 2231 props['ctx'] = ctx
2232 2232 props['repo'] = repo
2233 2233 props['ui'] = repo.ui
2234 2234 props['revcache'] = {}
2235 2235 return templater.stringify(templ('graphnode', **props))
2236 2236 return formatnode
2237 2237
2238 2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2239 2239 filematcher=None):
2240 2240 formatnode = _graphnodeformatter(ui, displayer)
2241 2241 state = graphmod.asciistate()
2242 2242 styles = state['styles']
2243 2243
2244 2244 # only set graph styling if HGPLAIN is not set.
2245 2245 if ui.plain('graph'):
2246 2246 # set all edge styles to |, the default pre-3.8 behaviour
2247 2247 styles.update(dict.fromkeys(styles, '|'))
2248 2248 else:
2249 2249 edgetypes = {
2250 2250 'parent': graphmod.PARENT,
2251 2251 'grandparent': graphmod.GRANDPARENT,
2252 2252 'missing': graphmod.MISSINGPARENT
2253 2253 }
2254 2254 for name, key in edgetypes.items():
2255 2255 # experimental config: experimental.graphstyle.*
2256 2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2257 2257 styles[key])
2258 2258 if not styles[key]:
2259 2259 styles[key] = None
2260 2260
2261 2261 # experimental config: experimental.graphshorten
2262 2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2263 2263
2264 2264 for rev, type, ctx, parents in dag:
2265 2265 char = formatnode(repo, ctx)
2266 2266 copies = None
2267 2267 if getrenamed and ctx.rev():
2268 2268 copies = []
2269 2269 for fn in ctx.files():
2270 2270 rename = getrenamed(fn, ctx.rev())
2271 2271 if rename:
2272 2272 copies.append((fn, rename[0]))
2273 2273 revmatchfn = None
2274 2274 if filematcher is not None:
2275 2275 revmatchfn = filematcher(ctx.rev())
2276 2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2277 2277 lines = displayer.hunk.pop(rev).split('\n')
2278 2278 if not lines[-1]:
2279 2279 del lines[-1]
2280 2280 displayer.flush(ctx)
2281 2281 edges = edgefn(type, char, lines, state, rev, parents)
2282 2282 for type, char, lines, coldata in edges:
2283 2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2284 2284 displayer.close()
2285 2285
2286 2286 def graphlog(ui, repo, *pats, **opts):
2287 2287 # Parameters are identical to log command ones
2288 2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2289 2289 revdag = graphmod.dagwalker(repo, revs)
2290 2290
2291 2291 getrenamed = None
2292 2292 if opts.get('copies'):
2293 2293 endrev = None
2294 2294 if opts.get('rev'):
2295 2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2296 2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2297 2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2298 2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2299 2299 filematcher)
2300 2300
2301 2301 def checkunsupportedgraphflags(pats, opts):
2302 2302 for op in ["newest_first"]:
2303 2303 if op in opts and opts[op]:
2304 2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2305 2305 % op.replace("_", "-"))
2306 2306
2307 2307 def graphrevs(repo, nodes, opts):
2308 2308 limit = loglimit(opts)
2309 2309 nodes.reverse()
2310 2310 if limit is not None:
2311 2311 nodes = nodes[:limit]
2312 2312 return graphmod.nodes(repo, nodes)
2313 2313
2314 2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2315 2315 join = lambda f: os.path.join(prefix, f)
2316 2316 bad = []
2317 2317
2318 2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2319 2319 names = []
2320 2320 wctx = repo[None]
2321 2321 cca = None
2322 2322 abort, warn = scmutil.checkportabilityalert(ui)
2323 2323 if abort or warn:
2324 2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2325 2325
2326 2326 badmatch = matchmod.badmatch(match, badfn)
2327 2327 dirstate = repo.dirstate
2328 2328 # We don't want to just call wctx.walk here, since it would return a lot of
2329 2329 # clean files, which we aren't interested in and takes time.
2330 2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2331 2331 True, False, full=False)):
2332 2332 exact = match.exact(f)
2333 2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2334 2334 if cca:
2335 2335 cca(f)
2336 2336 names.append(f)
2337 2337 if ui.verbose or not exact:
2338 2338 ui.status(_('adding %s\n') % match.rel(f))
2339 2339
2340 2340 for subpath in sorted(wctx.substate):
2341 2341 sub = wctx.sub(subpath)
2342 2342 try:
2343 2343 submatch = matchmod.subdirmatcher(subpath, match)
2344 2344 if opts.get('subrepos'):
2345 2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2346 2346 else:
2347 2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2348 2348 except error.LookupError:
2349 2349 ui.status(_("skipping missing subrepository: %s\n")
2350 2350 % join(subpath))
2351 2351
2352 2352 if not opts.get('dry_run'):
2353 2353 rejected = wctx.add(names, prefix)
2354 2354 bad.extend(f for f in rejected if f in match.files())
2355 2355 return bad
2356 2356
2357 2357 def forget(ui, repo, match, prefix, explicitonly):
2358 2358 join = lambda f: os.path.join(prefix, f)
2359 2359 bad = []
2360 2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2361 2361 wctx = repo[None]
2362 2362 forgot = []
2363 2363
2364 2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2365 2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2366 2366 if explicitonly:
2367 2367 forget = [f for f in forget if match.exact(f)]
2368 2368
2369 2369 for subpath in sorted(wctx.substate):
2370 2370 sub = wctx.sub(subpath)
2371 2371 try:
2372 2372 submatch = matchmod.subdirmatcher(subpath, match)
2373 2373 subbad, subforgot = sub.forget(submatch, prefix)
2374 2374 bad.extend([subpath + '/' + f for f in subbad])
2375 2375 forgot.extend([subpath + '/' + f for f in subforgot])
2376 2376 except error.LookupError:
2377 2377 ui.status(_("skipping missing subrepository: %s\n")
2378 2378 % join(subpath))
2379 2379
2380 2380 if not explicitonly:
2381 2381 for f in match.files():
2382 2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2383 2383 if f not in forgot:
2384 2384 if repo.wvfs.exists(f):
2385 2385 # Don't complain if the exact case match wasn't given.
2386 2386 # But don't do this until after checking 'forgot', so
2387 2387 # that subrepo files aren't normalized, and this op is
2388 2388 # purely from data cached by the status walk above.
2389 2389 if repo.dirstate.normalize(f) in repo.dirstate:
2390 2390 continue
2391 2391 ui.warn(_('not removing %s: '
2392 2392 'file is already untracked\n')
2393 2393 % match.rel(f))
2394 2394 bad.append(f)
2395 2395
2396 2396 for f in forget:
2397 2397 if ui.verbose or not match.exact(f):
2398 2398 ui.status(_('removing %s\n') % match.rel(f))
2399 2399
2400 2400 rejected = wctx.forget(forget, prefix)
2401 2401 bad.extend(f for f in rejected if f in match.files())
2402 2402 forgot.extend(f for f in forget if f not in rejected)
2403 2403 return bad, forgot
2404 2404
2405 2405 def files(ui, ctx, m, fm, fmt, subrepos):
2406 2406 rev = ctx.rev()
2407 2407 ret = 1
2408 2408 ds = ctx.repo().dirstate
2409 2409
2410 2410 for f in ctx.matches(m):
2411 2411 if rev is None and ds[f] == 'r':
2412 2412 continue
2413 2413 fm.startitem()
2414 2414 if ui.verbose:
2415 2415 fc = ctx[f]
2416 2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2417 2417 fm.data(abspath=f)
2418 2418 fm.write('path', fmt, m.rel(f))
2419 2419 ret = 0
2420 2420
2421 2421 for subpath in sorted(ctx.substate):
2422 2422 submatch = matchmod.subdirmatcher(subpath, m)
2423 2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2424 2424 sub = ctx.sub(subpath)
2425 2425 try:
2426 2426 recurse = m.exact(subpath) or subrepos
2427 2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2428 2428 ret = 0
2429 2429 except error.LookupError:
2430 2430 ui.status(_("skipping missing subrepository: %s\n")
2431 2431 % m.abs(subpath))
2432 2432
2433 2433 return ret
2434 2434
2435 2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2436 2436 join = lambda f: os.path.join(prefix, f)
2437 2437 ret = 0
2438 2438 s = repo.status(match=m, clean=True)
2439 2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2440 2440
2441 2441 wctx = repo[None]
2442 2442
2443 2443 if warnings is None:
2444 2444 warnings = []
2445 2445 warn = True
2446 2446 else:
2447 2447 warn = False
2448 2448
2449 2449 subs = sorted(wctx.substate)
2450 2450 total = len(subs)
2451 2451 count = 0
2452 2452 for subpath in subs:
2453 2453 count += 1
2454 2454 submatch = matchmod.subdirmatcher(subpath, m)
2455 2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2456 2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2457 2457 sub = wctx.sub(subpath)
2458 2458 try:
2459 2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2460 2460 warnings):
2461 2461 ret = 1
2462 2462 except error.LookupError:
2463 2463 warnings.append(_("skipping missing subrepository: %s\n")
2464 2464 % join(subpath))
2465 2465 ui.progress(_('searching'), None)
2466 2466
2467 2467 # warn about failure to delete explicit files/dirs
2468 2468 deleteddirs = util.dirs(deleted)
2469 2469 files = m.files()
2470 2470 total = len(files)
2471 2471 count = 0
2472 2472 for f in files:
2473 2473 def insubrepo():
2474 2474 for subpath in wctx.substate:
2475 2475 if f.startswith(subpath + '/'):
2476 2476 return True
2477 2477 return False
2478 2478
2479 2479 count += 1
2480 2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2481 2481 isdir = f in deleteddirs or wctx.hasdir(f)
2482 2482 if (f in repo.dirstate or isdir or f == '.'
2483 2483 or insubrepo() or f in subs):
2484 2484 continue
2485 2485
2486 2486 if repo.wvfs.exists(f):
2487 2487 if repo.wvfs.isdir(f):
2488 2488 warnings.append(_('not removing %s: no tracked files\n')
2489 2489 % m.rel(f))
2490 2490 else:
2491 2491 warnings.append(_('not removing %s: file is untracked\n')
2492 2492 % m.rel(f))
2493 2493 # missing files will generate a warning elsewhere
2494 2494 ret = 1
2495 2495 ui.progress(_('deleting'), None)
2496 2496
2497 2497 if force:
2498 2498 list = modified + deleted + clean + added
2499 2499 elif after:
2500 2500 list = deleted
2501 2501 remaining = modified + added + clean
2502 2502 total = len(remaining)
2503 2503 count = 0
2504 2504 for f in remaining:
2505 2505 count += 1
2506 2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2507 2507 warnings.append(_('not removing %s: file still exists\n')
2508 2508 % m.rel(f))
2509 2509 ret = 1
2510 2510 ui.progress(_('skipping'), None)
2511 2511 else:
2512 2512 list = deleted + clean
2513 2513 total = len(modified) + len(added)
2514 2514 count = 0
2515 2515 for f in modified:
2516 2516 count += 1
2517 2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2518 2518 warnings.append(_('not removing %s: file is modified (use -f'
2519 2519 ' to force removal)\n') % m.rel(f))
2520 2520 ret = 1
2521 2521 for f in added:
2522 2522 count += 1
2523 2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 2524 warnings.append(_("not removing %s: file has been marked for add"
2525 2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2526 2526 ret = 1
2527 2527 ui.progress(_('skipping'), None)
2528 2528
2529 2529 list = sorted(list)
2530 2530 total = len(list)
2531 2531 count = 0
2532 2532 for f in list:
2533 2533 count += 1
2534 2534 if ui.verbose or not m.exact(f):
2535 2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2536 2536 ui.status(_('removing %s\n') % m.rel(f))
2537 2537 ui.progress(_('deleting'), None)
2538 2538
2539 2539 with repo.wlock():
2540 2540 if not after:
2541 2541 for f in list:
2542 2542 if f in added:
2543 2543 continue # we never unlink added files on remove
2544 2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2545 2545 repo[None].forget(list)
2546 2546
2547 2547 if warn:
2548 2548 for warning in warnings:
2549 2549 ui.warn(warning)
2550 2550
2551 2551 return ret
2552 2552
2553 2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2554 2554 err = 1
2555 2555
2556 2556 def write(path):
2557 2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2558 2558 pathname=os.path.join(prefix, path))
2559 2559 data = ctx[path].data()
2560 2560 if opts.get('decode'):
2561 2561 data = repo.wwritedata(path, data)
2562 2562 fp.write(data)
2563 2563 fp.close()
2564 2564
2565 2565 # Automation often uses hg cat on single files, so special case it
2566 2566 # for performance to avoid the cost of parsing the manifest.
2567 2567 if len(matcher.files()) == 1 and not matcher.anypats():
2568 2568 file = matcher.files()[0]
2569 mf = repo.manifest
2569 mfl = repo.manifestlog
2570 2570 mfnode = ctx.manifestnode()
2571 if mfnode and mf.find(mfnode, file)[0]:
2572 write(file)
2573 return 0
2571 try:
2572 if mfnode and mfl[mfnode].find(file)[0]:
2573 write(file)
2574 return 0
2575 except KeyError:
2576 pass
2574 2577
2575 2578 for abs in ctx.walk(matcher):
2576 2579 write(abs)
2577 2580 err = 0
2578 2581
2579 2582 for subpath in sorted(ctx.substate):
2580 2583 sub = ctx.sub(subpath)
2581 2584 try:
2582 2585 submatch = matchmod.subdirmatcher(subpath, matcher)
2583 2586
2584 2587 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2585 2588 **opts):
2586 2589 err = 0
2587 2590 except error.RepoLookupError:
2588 2591 ui.status(_("skipping missing subrepository: %s\n")
2589 2592 % os.path.join(prefix, subpath))
2590 2593
2591 2594 return err
2592 2595
2593 2596 def commit(ui, repo, commitfunc, pats, opts):
2594 2597 '''commit the specified files or all outstanding changes'''
2595 2598 date = opts.get('date')
2596 2599 if date:
2597 2600 opts['date'] = util.parsedate(date)
2598 2601 message = logmessage(ui, opts)
2599 2602 matcher = scmutil.match(repo[None], pats, opts)
2600 2603
2601 2604 # extract addremove carefully -- this function can be called from a command
2602 2605 # that doesn't support addremove
2603 2606 if opts.get('addremove'):
2604 2607 if scmutil.addremove(repo, matcher, "", opts) != 0:
2605 2608 raise error.Abort(
2606 2609 _("failed to mark all new/missing files as added/removed"))
2607 2610
2608 2611 return commitfunc(ui, repo, message, matcher, opts)
2609 2612
2610 2613 def samefile(f, ctx1, ctx2):
2611 2614 if f in ctx1.manifest():
2612 2615 a = ctx1.filectx(f)
2613 2616 if f in ctx2.manifest():
2614 2617 b = ctx2.filectx(f)
2615 2618 return (not a.cmp(b)
2616 2619 and a.flags() == b.flags())
2617 2620 else:
2618 2621 return False
2619 2622 else:
2620 2623 return f not in ctx2.manifest()
2621 2624
2622 2625 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2623 2626 # avoid cycle context -> subrepo -> cmdutil
2624 2627 from . import context
2625 2628
2626 2629 # amend will reuse the existing user if not specified, but the obsolete
2627 2630 # marker creation requires that the current user's name is specified.
2628 2631 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2629 2632 ui.username() # raise exception if username not set
2630 2633
2631 2634 ui.note(_('amending changeset %s\n') % old)
2632 2635 base = old.p1()
2633 2636 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2634 2637
2635 2638 wlock = lock = newid = None
2636 2639 try:
2637 2640 wlock = repo.wlock()
2638 2641 lock = repo.lock()
2639 2642 with repo.transaction('amend') as tr:
2640 2643 # See if we got a message from -m or -l, if not, open the editor
2641 2644 # with the message of the changeset to amend
2642 2645 message = logmessage(ui, opts)
2643 2646 # ensure logfile does not conflict with later enforcement of the
2644 2647 # message. potential logfile content has been processed by
2645 2648 # `logmessage` anyway.
2646 2649 opts.pop('logfile')
2647 2650 # First, do a regular commit to record all changes in the working
2648 2651 # directory (if there are any)
2649 2652 ui.callhooks = False
2650 2653 activebookmark = repo._bookmarks.active
2651 2654 try:
2652 2655 repo._bookmarks.active = None
2653 2656 opts['message'] = 'temporary amend commit for %s' % old
2654 2657 node = commit(ui, repo, commitfunc, pats, opts)
2655 2658 finally:
2656 2659 repo._bookmarks.active = activebookmark
2657 2660 repo._bookmarks.recordchange(tr)
2658 2661 ui.callhooks = True
2659 2662 ctx = repo[node]
2660 2663
2661 2664 # Participating changesets:
2662 2665 #
2663 2666 # node/ctx o - new (intermediate) commit that contains changes
2664 2667 # | from working dir to go into amending commit
2665 2668 # | (or a workingctx if there were no changes)
2666 2669 # |
2667 2670 # old o - changeset to amend
2668 2671 # |
2669 2672 # base o - parent of amending changeset
2670 2673
2671 2674 # Update extra dict from amended commit (e.g. to preserve graft
2672 2675 # source)
2673 2676 extra.update(old.extra())
2674 2677
2675 2678 # Also update it from the intermediate commit or from the wctx
2676 2679 extra.update(ctx.extra())
2677 2680
2678 2681 if len(old.parents()) > 1:
2679 2682 # ctx.files() isn't reliable for merges, so fall back to the
2680 2683 # slower repo.status() method
2681 2684 files = set([fn for st in repo.status(base, old)[:3]
2682 2685 for fn in st])
2683 2686 else:
2684 2687 files = set(old.files())
2685 2688
2686 2689 # Second, we use either the commit we just did, or if there were no
2687 2690 # changes the parent of the working directory as the version of the
2688 2691 # files in the final amend commit
2689 2692 if node:
2690 2693 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2691 2694
2692 2695 user = ctx.user()
2693 2696 date = ctx.date()
2694 2697 # Recompute copies (avoid recording a -> b -> a)
2695 2698 copied = copies.pathcopies(base, ctx)
2696 2699 if old.p2:
2697 2700 copied.update(copies.pathcopies(old.p2(), ctx))
2698 2701
2699 2702 # Prune files which were reverted by the updates: if old
2700 2703 # introduced file X and our intermediate commit, node,
2701 2704 # renamed that file, then those two files are the same and
2702 2705 # we can discard X from our list of files. Likewise if X
2703 2706 # was deleted, it's no longer relevant
2704 2707 files.update(ctx.files())
2705 2708 files = [f for f in files if not samefile(f, ctx, base)]
2706 2709
2707 2710 def filectxfn(repo, ctx_, path):
2708 2711 try:
2709 2712 fctx = ctx[path]
2710 2713 flags = fctx.flags()
2711 2714 mctx = context.memfilectx(repo,
2712 2715 fctx.path(), fctx.data(),
2713 2716 islink='l' in flags,
2714 2717 isexec='x' in flags,
2715 2718 copied=copied.get(path))
2716 2719 return mctx
2717 2720 except KeyError:
2718 2721 return None
2719 2722 else:
2720 2723 ui.note(_('copying changeset %s to %s\n') % (old, base))
2721 2724
2722 2725 # Use version of files as in the old cset
2723 2726 def filectxfn(repo, ctx_, path):
2724 2727 try:
2725 2728 return old.filectx(path)
2726 2729 except KeyError:
2727 2730 return None
2728 2731
2729 2732 user = opts.get('user') or old.user()
2730 2733 date = opts.get('date') or old.date()
2731 2734 editform = mergeeditform(old, 'commit.amend')
2732 2735 editor = getcommiteditor(editform=editform, **opts)
2733 2736 if not message:
2734 2737 editor = getcommiteditor(edit=True, editform=editform)
2735 2738 message = old.description()
2736 2739
2737 2740 pureextra = extra.copy()
2738 2741 extra['amend_source'] = old.hex()
2739 2742
2740 2743 new = context.memctx(repo,
2741 2744 parents=[base.node(), old.p2().node()],
2742 2745 text=message,
2743 2746 files=files,
2744 2747 filectxfn=filectxfn,
2745 2748 user=user,
2746 2749 date=date,
2747 2750 extra=extra,
2748 2751 editor=editor)
2749 2752
2750 2753 newdesc = changelog.stripdesc(new.description())
2751 2754 if ((not node)
2752 2755 and newdesc == old.description()
2753 2756 and user == old.user()
2754 2757 and date == old.date()
2755 2758 and pureextra == old.extra()):
2756 2759 # nothing changed. continuing here would create a new node
2757 2760 # anyway because of the amend_source noise.
2758 2761 #
2759 2762 # This not what we expect from amend.
2760 2763 return old.node()
2761 2764
2762 2765 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2763 2766 try:
2764 2767 if opts.get('secret'):
2765 2768 commitphase = 'secret'
2766 2769 else:
2767 2770 commitphase = old.phase()
2768 2771 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2769 2772 newid = repo.commitctx(new)
2770 2773 finally:
2771 2774 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2772 2775 if newid != old.node():
2773 2776 # Reroute the working copy parent to the new changeset
2774 2777 repo.setparents(newid, nullid)
2775 2778
2776 2779 # Move bookmarks from old parent to amend commit
2777 2780 bms = repo.nodebookmarks(old.node())
2778 2781 if bms:
2779 2782 marks = repo._bookmarks
2780 2783 for bm in bms:
2781 2784 ui.debug('moving bookmarks %r from %s to %s\n' %
2782 2785 (marks, old.hex(), hex(newid)))
2783 2786 marks[bm] = newid
2784 2787 marks.recordchange(tr)
2785 2788 #commit the whole amend process
2786 2789 if createmarkers:
2787 2790 # mark the new changeset as successor of the rewritten one
2788 2791 new = repo[newid]
2789 2792 obs = [(old, (new,))]
2790 2793 if node:
2791 2794 obs.append((ctx, ()))
2792 2795
2793 2796 obsolete.createmarkers(repo, obs)
2794 2797 if not createmarkers and newid != old.node():
2795 2798 # Strip the intermediate commit (if there was one) and the amended
2796 2799 # commit
2797 2800 if node:
2798 2801 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2799 2802 ui.note(_('stripping amended changeset %s\n') % old)
2800 2803 repair.strip(ui, repo, old.node(), topic='amend-backup')
2801 2804 finally:
2802 2805 lockmod.release(lock, wlock)
2803 2806 return newid
2804 2807
2805 2808 def commiteditor(repo, ctx, subs, editform=''):
2806 2809 if ctx.description():
2807 2810 return ctx.description()
2808 2811 return commitforceeditor(repo, ctx, subs, editform=editform,
2809 2812 unchangedmessagedetection=True)
2810 2813
2811 2814 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2812 2815 editform='', unchangedmessagedetection=False):
2813 2816 if not extramsg:
2814 2817 extramsg = _("Leave message empty to abort commit.")
2815 2818
2816 2819 forms = [e for e in editform.split('.') if e]
2817 2820 forms.insert(0, 'changeset')
2818 2821 templatetext = None
2819 2822 while forms:
2820 2823 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2821 2824 if tmpl:
2822 2825 templatetext = committext = buildcommittemplate(
2823 2826 repo, ctx, subs, extramsg, tmpl)
2824 2827 break
2825 2828 forms.pop()
2826 2829 else:
2827 2830 committext = buildcommittext(repo, ctx, subs, extramsg)
2828 2831
2829 2832 # run editor in the repository root
2830 2833 olddir = os.getcwd()
2831 2834 os.chdir(repo.root)
2832 2835
2833 2836 # make in-memory changes visible to external process
2834 2837 tr = repo.currenttransaction()
2835 2838 repo.dirstate.write(tr)
2836 2839 pending = tr and tr.writepending() and repo.root
2837 2840
2838 2841 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2839 2842 editform=editform, pending=pending)
2840 2843 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2841 2844 os.chdir(olddir)
2842 2845
2843 2846 if finishdesc:
2844 2847 text = finishdesc(text)
2845 2848 if not text.strip():
2846 2849 raise error.Abort(_("empty commit message"))
2847 2850 if unchangedmessagedetection and editortext == templatetext:
2848 2851 raise error.Abort(_("commit message unchanged"))
2849 2852
2850 2853 return text
2851 2854
2852 2855 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2853 2856 ui = repo.ui
2854 2857 tmpl, mapfile = gettemplate(ui, tmpl, None)
2855 2858
2856 2859 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2857 2860
2858 2861 for k, v in repo.ui.configitems('committemplate'):
2859 2862 if k != 'changeset':
2860 2863 t.t.cache[k] = v
2861 2864
2862 2865 if not extramsg:
2863 2866 extramsg = '' # ensure that extramsg is string
2864 2867
2865 2868 ui.pushbuffer()
2866 2869 t.show(ctx, extramsg=extramsg)
2867 2870 return ui.popbuffer()
2868 2871
2869 2872 def hgprefix(msg):
2870 2873 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2871 2874
2872 2875 def buildcommittext(repo, ctx, subs, extramsg):
2873 2876 edittext = []
2874 2877 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2875 2878 if ctx.description():
2876 2879 edittext.append(ctx.description())
2877 2880 edittext.append("")
2878 2881 edittext.append("") # Empty line between message and comments.
2879 2882 edittext.append(hgprefix(_("Enter commit message."
2880 2883 " Lines beginning with 'HG:' are removed.")))
2881 2884 edittext.append(hgprefix(extramsg))
2882 2885 edittext.append("HG: --")
2883 2886 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2884 2887 if ctx.p2():
2885 2888 edittext.append(hgprefix(_("branch merge")))
2886 2889 if ctx.branch():
2887 2890 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2888 2891 if bookmarks.isactivewdirparent(repo):
2889 2892 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2890 2893 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2891 2894 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2892 2895 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2893 2896 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2894 2897 if not added and not modified and not removed:
2895 2898 edittext.append(hgprefix(_("no files changed")))
2896 2899 edittext.append("")
2897 2900
2898 2901 return "\n".join(edittext)
2899 2902
2900 2903 def commitstatus(repo, node, branch, bheads=None, opts=None):
2901 2904 if opts is None:
2902 2905 opts = {}
2903 2906 ctx = repo[node]
2904 2907 parents = ctx.parents()
2905 2908
2906 2909 if (not opts.get('amend') and bheads and node not in bheads and not
2907 2910 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2908 2911 repo.ui.status(_('created new head\n'))
2909 2912 # The message is not printed for initial roots. For the other
2910 2913 # changesets, it is printed in the following situations:
2911 2914 #
2912 2915 # Par column: for the 2 parents with ...
2913 2916 # N: null or no parent
2914 2917 # B: parent is on another named branch
2915 2918 # C: parent is a regular non head changeset
2916 2919 # H: parent was a branch head of the current branch
2917 2920 # Msg column: whether we print "created new head" message
2918 2921 # In the following, it is assumed that there already exists some
2919 2922 # initial branch heads of the current branch, otherwise nothing is
2920 2923 # printed anyway.
2921 2924 #
2922 2925 # Par Msg Comment
2923 2926 # N N y additional topo root
2924 2927 #
2925 2928 # B N y additional branch root
2926 2929 # C N y additional topo head
2927 2930 # H N n usual case
2928 2931 #
2929 2932 # B B y weird additional branch root
2930 2933 # C B y branch merge
2931 2934 # H B n merge with named branch
2932 2935 #
2933 2936 # C C y additional head from merge
2934 2937 # C H n merge with a head
2935 2938 #
2936 2939 # H H n head merge: head count decreases
2937 2940
2938 2941 if not opts.get('close_branch'):
2939 2942 for r in parents:
2940 2943 if r.closesbranch() and r.branch() == branch:
2941 2944 repo.ui.status(_('reopening closed branch head %d\n') % r)
2942 2945
2943 2946 if repo.ui.debugflag:
2944 2947 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2945 2948 elif repo.ui.verbose:
2946 2949 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2947 2950
2948 2951 def postcommitstatus(repo, pats, opts):
2949 2952 return repo.status(match=scmutil.match(repo[None], pats, opts))
2950 2953
2951 2954 def revert(ui, repo, ctx, parents, *pats, **opts):
2952 2955 parent, p2 = parents
2953 2956 node = ctx.node()
2954 2957
2955 2958 mf = ctx.manifest()
2956 2959 if node == p2:
2957 2960 parent = p2
2958 2961
2959 2962 # need all matching names in dirstate and manifest of target rev,
2960 2963 # so have to walk both. do not print errors if files exist in one
2961 2964 # but not other. in both cases, filesets should be evaluated against
2962 2965 # workingctx to get consistent result (issue4497). this means 'set:**'
2963 2966 # cannot be used to select missing files from target rev.
2964 2967
2965 2968 # `names` is a mapping for all elements in working copy and target revision
2966 2969 # The mapping is in the form:
2967 2970 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2968 2971 names = {}
2969 2972
2970 2973 with repo.wlock():
2971 2974 ## filling of the `names` mapping
2972 2975 # walk dirstate to fill `names`
2973 2976
2974 2977 interactive = opts.get('interactive', False)
2975 2978 wctx = repo[None]
2976 2979 m = scmutil.match(wctx, pats, opts)
2977 2980
2978 2981 # we'll need this later
2979 2982 targetsubs = sorted(s for s in wctx.substate if m(s))
2980 2983
2981 2984 if not m.always():
2982 2985 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2983 2986 names[abs] = m.rel(abs), m.exact(abs)
2984 2987
2985 2988 # walk target manifest to fill `names`
2986 2989
2987 2990 def badfn(path, msg):
2988 2991 if path in names:
2989 2992 return
2990 2993 if path in ctx.substate:
2991 2994 return
2992 2995 path_ = path + '/'
2993 2996 for f in names:
2994 2997 if f.startswith(path_):
2995 2998 return
2996 2999 ui.warn("%s: %s\n" % (m.rel(path), msg))
2997 3000
2998 3001 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2999 3002 if abs not in names:
3000 3003 names[abs] = m.rel(abs), m.exact(abs)
3001 3004
3002 3005 # Find status of all file in `names`.
3003 3006 m = scmutil.matchfiles(repo, names)
3004 3007
3005 3008 changes = repo.status(node1=node, match=m,
3006 3009 unknown=True, ignored=True, clean=True)
3007 3010 else:
3008 3011 changes = repo.status(node1=node, match=m)
3009 3012 for kind in changes:
3010 3013 for abs in kind:
3011 3014 names[abs] = m.rel(abs), m.exact(abs)
3012 3015
3013 3016 m = scmutil.matchfiles(repo, names)
3014 3017
3015 3018 modified = set(changes.modified)
3016 3019 added = set(changes.added)
3017 3020 removed = set(changes.removed)
3018 3021 _deleted = set(changes.deleted)
3019 3022 unknown = set(changes.unknown)
3020 3023 unknown.update(changes.ignored)
3021 3024 clean = set(changes.clean)
3022 3025 modadded = set()
3023 3026
3024 3027 # split between files known in target manifest and the others
3025 3028 smf = set(mf)
3026 3029
3027 3030 # determine the exact nature of the deleted changesets
3028 3031 deladded = _deleted - smf
3029 3032 deleted = _deleted - deladded
3030 3033
3031 3034 # We need to account for the state of the file in the dirstate,
3032 3035 # even when we revert against something else than parent. This will
3033 3036 # slightly alter the behavior of revert (doing back up or not, delete
3034 3037 # or just forget etc).
3035 3038 if parent == node:
3036 3039 dsmodified = modified
3037 3040 dsadded = added
3038 3041 dsremoved = removed
3039 3042 # store all local modifications, useful later for rename detection
3040 3043 localchanges = dsmodified | dsadded
3041 3044 modified, added, removed = set(), set(), set()
3042 3045 else:
3043 3046 changes = repo.status(node1=parent, match=m)
3044 3047 dsmodified = set(changes.modified)
3045 3048 dsadded = set(changes.added)
3046 3049 dsremoved = set(changes.removed)
3047 3050 # store all local modifications, useful later for rename detection
3048 3051 localchanges = dsmodified | dsadded
3049 3052
3050 3053 # only take into account for removes between wc and target
3051 3054 clean |= dsremoved - removed
3052 3055 dsremoved &= removed
3053 3056 # distinct between dirstate remove and other
3054 3057 removed -= dsremoved
3055 3058
3056 3059 modadded = added & dsmodified
3057 3060 added -= modadded
3058 3061
3059 3062 # tell newly modified apart.
3060 3063 dsmodified &= modified
3061 3064 dsmodified |= modified & dsadded # dirstate added may need backup
3062 3065 modified -= dsmodified
3063 3066
3064 3067 # We need to wait for some post-processing to update this set
3065 3068 # before making the distinction. The dirstate will be used for
3066 3069 # that purpose.
3067 3070 dsadded = added
3068 3071
3069 3072 # in case of merge, files that are actually added can be reported as
3070 3073 # modified, we need to post process the result
3071 3074 if p2 != nullid:
3072 3075 mergeadd = dsmodified - smf
3073 3076 dsadded |= mergeadd
3074 3077 dsmodified -= mergeadd
3075 3078
3076 3079 # if f is a rename, update `names` to also revert the source
3077 3080 cwd = repo.getcwd()
3078 3081 for f in localchanges:
3079 3082 src = repo.dirstate.copied(f)
3080 3083 # XXX should we check for rename down to target node?
3081 3084 if src and src not in names and repo.dirstate[src] == 'r':
3082 3085 dsremoved.add(src)
3083 3086 names[src] = (repo.pathto(src, cwd), True)
3084 3087
3085 3088 # distinguish between file to forget and the other
3086 3089 added = set()
3087 3090 for abs in dsadded:
3088 3091 if repo.dirstate[abs] != 'a':
3089 3092 added.add(abs)
3090 3093 dsadded -= added
3091 3094
3092 3095 for abs in deladded:
3093 3096 if repo.dirstate[abs] == 'a':
3094 3097 dsadded.add(abs)
3095 3098 deladded -= dsadded
3096 3099
3097 3100 # For files marked as removed, we check if an unknown file is present at
3098 3101 # the same path. If a such file exists it may need to be backed up.
3099 3102 # Making the distinction at this stage helps have simpler backup
3100 3103 # logic.
3101 3104 removunk = set()
3102 3105 for abs in removed:
3103 3106 target = repo.wjoin(abs)
3104 3107 if os.path.lexists(target):
3105 3108 removunk.add(abs)
3106 3109 removed -= removunk
3107 3110
3108 3111 dsremovunk = set()
3109 3112 for abs in dsremoved:
3110 3113 target = repo.wjoin(abs)
3111 3114 if os.path.lexists(target):
3112 3115 dsremovunk.add(abs)
3113 3116 dsremoved -= dsremovunk
3114 3117
3115 3118 # action to be actually performed by revert
3116 3119 # (<list of file>, message>) tuple
3117 3120 actions = {'revert': ([], _('reverting %s\n')),
3118 3121 'add': ([], _('adding %s\n')),
3119 3122 'remove': ([], _('removing %s\n')),
3120 3123 'drop': ([], _('removing %s\n')),
3121 3124 'forget': ([], _('forgetting %s\n')),
3122 3125 'undelete': ([], _('undeleting %s\n')),
3123 3126 'noop': (None, _('no changes needed to %s\n')),
3124 3127 'unknown': (None, _('file not managed: %s\n')),
3125 3128 }
3126 3129
3127 3130 # "constant" that convey the backup strategy.
3128 3131 # All set to `discard` if `no-backup` is set do avoid checking
3129 3132 # no_backup lower in the code.
3130 3133 # These values are ordered for comparison purposes
3131 3134 backupinteractive = 3 # do backup if interactively modified
3132 3135 backup = 2 # unconditionally do backup
3133 3136 check = 1 # check if the existing file differs from target
3134 3137 discard = 0 # never do backup
3135 3138 if opts.get('no_backup'):
3136 3139 backupinteractive = backup = check = discard
3137 3140 if interactive:
3138 3141 dsmodifiedbackup = backupinteractive
3139 3142 else:
3140 3143 dsmodifiedbackup = backup
3141 3144 tobackup = set()
3142 3145
3143 3146 backupanddel = actions['remove']
3144 3147 if not opts.get('no_backup'):
3145 3148 backupanddel = actions['drop']
3146 3149
3147 3150 disptable = (
3148 3151 # dispatch table:
3149 3152 # file state
3150 3153 # action
3151 3154 # make backup
3152 3155
3153 3156 ## Sets that results that will change file on disk
3154 3157 # Modified compared to target, no local change
3155 3158 (modified, actions['revert'], discard),
3156 3159 # Modified compared to target, but local file is deleted
3157 3160 (deleted, actions['revert'], discard),
3158 3161 # Modified compared to target, local change
3159 3162 (dsmodified, actions['revert'], dsmodifiedbackup),
3160 3163 # Added since target
3161 3164 (added, actions['remove'], discard),
3162 3165 # Added in working directory
3163 3166 (dsadded, actions['forget'], discard),
3164 3167 # Added since target, have local modification
3165 3168 (modadded, backupanddel, backup),
3166 3169 # Added since target but file is missing in working directory
3167 3170 (deladded, actions['drop'], discard),
3168 3171 # Removed since target, before working copy parent
3169 3172 (removed, actions['add'], discard),
3170 3173 # Same as `removed` but an unknown file exists at the same path
3171 3174 (removunk, actions['add'], check),
3172 3175 # Removed since targe, marked as such in working copy parent
3173 3176 (dsremoved, actions['undelete'], discard),
3174 3177 # Same as `dsremoved` but an unknown file exists at the same path
3175 3178 (dsremovunk, actions['undelete'], check),
3176 3179 ## the following sets does not result in any file changes
3177 3180 # File with no modification
3178 3181 (clean, actions['noop'], discard),
3179 3182 # Existing file, not tracked anywhere
3180 3183 (unknown, actions['unknown'], discard),
3181 3184 )
3182 3185
3183 3186 for abs, (rel, exact) in sorted(names.items()):
3184 3187 # target file to be touch on disk (relative to cwd)
3185 3188 target = repo.wjoin(abs)
3186 3189 # search the entry in the dispatch table.
3187 3190 # if the file is in any of these sets, it was touched in the working
3188 3191 # directory parent and we are sure it needs to be reverted.
3189 3192 for table, (xlist, msg), dobackup in disptable:
3190 3193 if abs not in table:
3191 3194 continue
3192 3195 if xlist is not None:
3193 3196 xlist.append(abs)
3194 3197 if dobackup:
3195 3198 # If in interactive mode, don't automatically create
3196 3199 # .orig files (issue4793)
3197 3200 if dobackup == backupinteractive:
3198 3201 tobackup.add(abs)
3199 3202 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3200 3203 bakname = scmutil.origpath(ui, repo, rel)
3201 3204 ui.note(_('saving current version of %s as %s\n') %
3202 3205 (rel, bakname))
3203 3206 if not opts.get('dry_run'):
3204 3207 if interactive:
3205 3208 util.copyfile(target, bakname)
3206 3209 else:
3207 3210 util.rename(target, bakname)
3208 3211 if ui.verbose or not exact:
3209 3212 if not isinstance(msg, basestring):
3210 3213 msg = msg(abs)
3211 3214 ui.status(msg % rel)
3212 3215 elif exact:
3213 3216 ui.warn(msg % rel)
3214 3217 break
3215 3218
3216 3219 if not opts.get('dry_run'):
3217 3220 needdata = ('revert', 'add', 'undelete')
3218 3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3219 3222 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3220 3223
3221 3224 if targetsubs:
3222 3225 # Revert the subrepos on the revert list
3223 3226 for sub in targetsubs:
3224 3227 try:
3225 3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3226 3229 except KeyError:
3227 3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3228 3231 % (sub, short(ctx.node())))
3229 3232
3230 3233 def _revertprefetch(repo, ctx, *files):
3231 3234 """Let extension changing the storage layer prefetch content"""
3232 3235 pass
3233 3236
3234 3237 def _performrevert(repo, parents, ctx, actions, interactive=False,
3235 3238 tobackup=None):
3236 3239 """function that actually perform all the actions computed for revert
3237 3240
3238 3241 This is an independent function to let extension to plug in and react to
3239 3242 the imminent revert.
3240 3243
3241 3244 Make sure you have the working directory locked when calling this function.
3242 3245 """
3243 3246 parent, p2 = parents
3244 3247 node = ctx.node()
3245 3248 excluded_files = []
3246 3249 matcher_opts = {"exclude": excluded_files}
3247 3250
3248 3251 def checkout(f):
3249 3252 fc = ctx[f]
3250 3253 repo.wwrite(f, fc.data(), fc.flags())
3251 3254
3252 3255 audit_path = pathutil.pathauditor(repo.root)
3253 3256 for f in actions['forget'][0]:
3254 3257 if interactive:
3255 3258 choice = \
3256 3259 repo.ui.promptchoice(
3257 3260 _("forget added file %s (yn)?$$ &Yes $$ &No")
3258 3261 % f)
3259 3262 if choice == 0:
3260 3263 repo.dirstate.drop(f)
3261 3264 else:
3262 3265 excluded_files.append(repo.wjoin(f))
3263 3266 else:
3264 3267 repo.dirstate.drop(f)
3265 3268 for f in actions['remove'][0]:
3266 3269 audit_path(f)
3267 3270 try:
3268 3271 util.unlinkpath(repo.wjoin(f))
3269 3272 except OSError:
3270 3273 pass
3271 3274 repo.dirstate.remove(f)
3272 3275 for f in actions['drop'][0]:
3273 3276 audit_path(f)
3274 3277 repo.dirstate.remove(f)
3275 3278
3276 3279 normal = None
3277 3280 if node == parent:
3278 3281 # We're reverting to our parent. If possible, we'd like status
3279 3282 # to report the file as clean. We have to use normallookup for
3280 3283 # merges to avoid losing information about merged/dirty files.
3281 3284 if p2 != nullid:
3282 3285 normal = repo.dirstate.normallookup
3283 3286 else:
3284 3287 normal = repo.dirstate.normal
3285 3288
3286 3289 newlyaddedandmodifiedfiles = set()
3287 3290 if interactive:
3288 3291 # Prompt the user for changes to revert
3289 3292 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3290 3293 m = scmutil.match(ctx, torevert, matcher_opts)
3291 3294 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3292 3295 diffopts.nodates = True
3293 3296 diffopts.git = True
3294 3297 reversehunks = repo.ui.configbool('experimental',
3295 3298 'revertalternateinteractivemode',
3296 3299 True)
3297 3300 if reversehunks:
3298 3301 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3299 3302 else:
3300 3303 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3301 3304 originalchunks = patch.parsepatch(diff)
3302 3305 operation = 'discard' if node == parent else 'revert'
3303 3306
3304 3307 try:
3305 3308
3306 3309 chunks, opts = recordfilter(repo.ui, originalchunks,
3307 3310 operation=operation)
3308 3311 if reversehunks:
3309 3312 chunks = patch.reversehunks(chunks)
3310 3313
3311 3314 except patch.PatchError as err:
3312 3315 raise error.Abort(_('error parsing patch: %s') % err)
3313 3316
3314 3317 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3315 3318 if tobackup is None:
3316 3319 tobackup = set()
3317 3320 # Apply changes
3318 3321 fp = stringio()
3319 3322 for c in chunks:
3320 3323 # Create a backup file only if this hunk should be backed up
3321 3324 if ishunk(c) and c.header.filename() in tobackup:
3322 3325 abs = c.header.filename()
3323 3326 target = repo.wjoin(abs)
3324 3327 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3325 3328 util.copyfile(target, bakname)
3326 3329 tobackup.remove(abs)
3327 3330 c.write(fp)
3328 3331 dopatch = fp.tell()
3329 3332 fp.seek(0)
3330 3333 if dopatch:
3331 3334 try:
3332 3335 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3333 3336 except patch.PatchError as err:
3334 3337 raise error.Abort(str(err))
3335 3338 del fp
3336 3339 else:
3337 3340 for f in actions['revert'][0]:
3338 3341 checkout(f)
3339 3342 if normal:
3340 3343 normal(f)
3341 3344
3342 3345 for f in actions['add'][0]:
3343 3346 # Don't checkout modified files, they are already created by the diff
3344 3347 if f not in newlyaddedandmodifiedfiles:
3345 3348 checkout(f)
3346 3349 repo.dirstate.add(f)
3347 3350
3348 3351 normal = repo.dirstate.normallookup
3349 3352 if node == parent and p2 == nullid:
3350 3353 normal = repo.dirstate.normal
3351 3354 for f in actions['undelete'][0]:
3352 3355 checkout(f)
3353 3356 normal(f)
3354 3357
3355 3358 copied = copies.pathcopies(repo[parent], ctx)
3356 3359
3357 3360 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3358 3361 if f in copied:
3359 3362 repo.dirstate.copy(copied[f], f)
3360 3363
3361 3364 def command(table):
3362 3365 """Returns a function object to be used as a decorator for making commands.
3363 3366
3364 3367 This function receives a command table as its argument. The table should
3365 3368 be a dict.
3366 3369
3367 3370 The returned function can be used as a decorator for adding commands
3368 3371 to that command table. This function accepts multiple arguments to define
3369 3372 a command.
3370 3373
3371 3374 The first argument is the command name.
3372 3375
3373 3376 The options argument is an iterable of tuples defining command arguments.
3374 3377 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3375 3378
3376 3379 The synopsis argument defines a short, one line summary of how to use the
3377 3380 command. This shows up in the help output.
3378 3381
3379 3382 The norepo argument defines whether the command does not require a
3380 3383 local repository. Most commands operate against a repository, thus the
3381 3384 default is False.
3382 3385
3383 3386 The optionalrepo argument defines whether the command optionally requires
3384 3387 a local repository.
3385 3388
3386 3389 The inferrepo argument defines whether to try to find a repository from the
3387 3390 command line arguments. If True, arguments will be examined for potential
3388 3391 repository locations. See ``findrepo()``. If a repository is found, it
3389 3392 will be used.
3390 3393 """
3391 3394 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3392 3395 inferrepo=False):
3393 3396 def decorator(func):
3394 3397 func.norepo = norepo
3395 3398 func.optionalrepo = optionalrepo
3396 3399 func.inferrepo = inferrepo
3397 3400 if synopsis:
3398 3401 table[name] = func, list(options), synopsis
3399 3402 else:
3400 3403 table[name] = func, list(options)
3401 3404 return func
3402 3405 return decorator
3403 3406
3404 3407 return cmd
3405 3408
3406 3409 def checkunresolved(ms):
3407 3410 if list(ms.unresolved()):
3408 3411 raise error.Abort(_("unresolved merge conflicts "
3409 3412 "(see 'hg help resolve')"))
3410 3413 if ms.mdstate() != 's' or list(ms.driverresolved()):
3411 3414 raise error.Abort(_('driver-resolved merge conflicts'),
3412 3415 hint=_('run "hg resolve --all" to resolve'))
3413 3416
3414 3417 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3415 3418 # commands.outgoing. "missing" is "missing" of the result of
3416 3419 # "findcommonoutgoing()"
3417 3420 outgoinghooks = util.hooks()
3418 3421
3419 3422 # a list of (ui, repo) functions called by commands.summary
3420 3423 summaryhooks = util.hooks()
3421 3424
3422 3425 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3423 3426 #
3424 3427 # functions should return tuple of booleans below, if 'changes' is None:
3425 3428 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3426 3429 #
3427 3430 # otherwise, 'changes' is a tuple of tuples below:
3428 3431 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3429 3432 # - (desturl, destbranch, destpeer, outgoing)
3430 3433 summaryremotehooks = util.hooks()
3431 3434
3432 3435 # A list of state files kept by multistep operations like graft.
3433 3436 # Since graft cannot be aborted, it is considered 'clearable' by update.
3434 3437 # note: bisect is intentionally excluded
3435 3438 # (state file, clearable, allowcommit, error, hint)
3436 3439 unfinishedstates = [
3437 3440 ('graftstate', True, False, _('graft in progress'),
3438 3441 _("use 'hg graft --continue' or 'hg update' to abort")),
3439 3442 ('updatestate', True, False, _('last update was interrupted'),
3440 3443 _("use 'hg update' to get a consistent checkout"))
3441 3444 ]
3442 3445
3443 3446 def checkunfinished(repo, commit=False):
3444 3447 '''Look for an unfinished multistep operation, like graft, and abort
3445 3448 if found. It's probably good to check this right before
3446 3449 bailifchanged().
3447 3450 '''
3448 3451 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3449 3452 if commit and allowcommit:
3450 3453 continue
3451 3454 if repo.vfs.exists(f):
3452 3455 raise error.Abort(msg, hint=hint)
3453 3456
3454 3457 def clearunfinished(repo):
3455 3458 '''Check for unfinished operations (as above), and clear the ones
3456 3459 that are clearable.
3457 3460 '''
3458 3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3459 3462 if not clearable and repo.vfs.exists(f):
3460 3463 raise error.Abort(msg, hint=hint)
3461 3464 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3462 3465 if clearable and repo.vfs.exists(f):
3463 3466 util.unlink(repo.join(f))
3464 3467
3465 3468 afterresolvedstates = [
3466 3469 ('graftstate',
3467 3470 _('hg graft --continue')),
3468 3471 ]
3469 3472
3470 3473 def howtocontinue(repo):
3471 3474 '''Check for an unfinished operation and return the command to finish
3472 3475 it.
3473 3476
3474 3477 afterresolvedstates tuples define a .hg/{file} and the corresponding
3475 3478 command needed to finish it.
3476 3479
3477 3480 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3478 3481 a boolean.
3479 3482 '''
3480 3483 contmsg = _("continue: %s")
3481 3484 for f, msg in afterresolvedstates:
3482 3485 if repo.vfs.exists(f):
3483 3486 return contmsg % msg, True
3484 3487 workingctx = repo[None]
3485 3488 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3486 3489 for s in workingctx.substate)
3487 3490 if dirty:
3488 3491 return contmsg % _("hg commit"), False
3489 3492 return None, None
3490 3493
3491 3494 def checkafterresolved(repo):
3492 3495 '''Inform the user about the next action after completing hg resolve
3493 3496
3494 3497 If there's a matching afterresolvedstates, howtocontinue will yield
3495 3498 repo.ui.warn as the reporter.
3496 3499
3497 3500 Otherwise, it will yield repo.ui.note.
3498 3501 '''
3499 3502 msg, warning = howtocontinue(repo)
3500 3503 if msg is not None:
3501 3504 if warning:
3502 3505 repo.ui.warn("%s\n" % msg)
3503 3506 else:
3504 3507 repo.ui.note("%s\n" % msg)
3505 3508
3506 3509 def wrongtooltocontinue(repo, task):
3507 3510 '''Raise an abort suggesting how to properly continue if there is an
3508 3511 active task.
3509 3512
3510 3513 Uses howtocontinue() to find the active task.
3511 3514
3512 3515 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3513 3516 a hint.
3514 3517 '''
3515 3518 after = howtocontinue(repo)
3516 3519 hint = None
3517 3520 if after[1]:
3518 3521 hint = after[0]
3519 3522 raise error.Abort(_('no %s in progress') % task, hint=hint)
3520 3523
3521 3524 class dirstateguard(object):
3522 3525 '''Restore dirstate at unexpected failure.
3523 3526
3524 3527 At the construction, this class does:
3525 3528
3526 3529 - write current ``repo.dirstate`` out, and
3527 3530 - save ``.hg/dirstate`` into the backup file
3528 3531
3529 3532 This restores ``.hg/dirstate`` from backup file, if ``release()``
3530 3533 is invoked before ``close()``.
3531 3534
3532 3535 This just removes the backup file at ``close()`` before ``release()``.
3533 3536 '''
3534 3537
3535 3538 def __init__(self, repo, name):
3536 3539 self._repo = repo
3537 3540 self._active = False
3538 3541 self._closed = False
3539 3542 self._suffix = '.backup.%s.%d' % (name, id(self))
3540 3543 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3541 3544 self._active = True
3542 3545
3543 3546 def __del__(self):
3544 3547 if self._active: # still active
3545 3548 # this may occur, even if this class is used correctly:
3546 3549 # for example, releasing other resources like transaction
3547 3550 # may raise exception before ``dirstateguard.release`` in
3548 3551 # ``release(tr, ....)``.
3549 3552 self._abort()
3550 3553
3551 3554 def close(self):
3552 3555 if not self._active: # already inactivated
3553 3556 msg = (_("can't close already inactivated backup: dirstate%s")
3554 3557 % self._suffix)
3555 3558 raise error.Abort(msg)
3556 3559
3557 3560 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3558 3561 self._suffix)
3559 3562 self._active = False
3560 3563 self._closed = True
3561 3564
3562 3565 def _abort(self):
3563 3566 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3564 3567 self._suffix)
3565 3568 self._active = False
3566 3569
3567 3570 def release(self):
3568 3571 if not self._closed:
3569 3572 if not self._active: # already inactivated
3570 3573 msg = (_("can't release already inactivated backup:"
3571 3574 " dirstate%s")
3572 3575 % self._suffix)
3573 3576 raise error.Abort(msg)
3574 3577 self._abort()
@@ -1,1982 +1,1984 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirid,
23 23 )
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 fileset,
28 28 match as matchmod,
29 29 mdiff,
30 30 obsolete as obsmod,
31 31 patch,
32 32 phases,
33 33 repoview,
34 34 revlog,
35 35 scmutil,
36 36 subrepo,
37 37 util,
38 38 )
39 39
40 40 propertycache = util.propertycache
41 41
42 42 # Phony node value to stand-in for new files in some uses of
43 43 # manifests. Manifests support 21-byte hashes for nodes which are
44 44 # dirty in the working copy.
45 45 _newnode = '!' * 21
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 return short(self.node())
70 70
71 71 def __int__(self):
72 72 return self.rev()
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _manifestmatches(self, match, s):
96 96 """generate a new manifest filtered by the match argument
97 97
98 98 This method is for internal use only and mainly exists to provide an
99 99 object oriented way for other contexts to customize the manifest
100 100 generation.
101 101 """
102 102 return self.manifest().matches(match)
103 103
104 104 def _matchstatus(self, other, match):
105 105 """return match.always if match is none
106 106
107 107 This internal method provides a way for child objects to override the
108 108 match operator.
109 109 """
110 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 111
112 112 def _buildstatus(self, other, s, match, listignored, listclean,
113 113 listunknown):
114 114 """build a status with respect to another context"""
115 115 # Load earliest manifest first for caching reasons. More specifically,
116 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 119 # delta to what's in the cache. So that's one full reconstruction + one
120 120 # delta application.
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 self.manifest()
123 123 mf1 = other._manifestmatches(match, s)
124 124 mf2 = self._manifestmatches(match, s)
125 125
126 126 modified, added = [], []
127 127 removed = []
128 128 clean = []
129 129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 130 deletedset = set(deleted)
131 131 d = mf1.diff(mf2, clean=listclean)
132 132 for fn, value in d.iteritems():
133 133 if fn in deletedset:
134 134 continue
135 135 if value is None:
136 136 clean.append(fn)
137 137 continue
138 138 (node1, flag1), (node2, flag2) = value
139 139 if node1 is None:
140 140 added.append(fn)
141 141 elif node2 is None:
142 142 removed.append(fn)
143 143 elif flag1 != flag2:
144 144 modified.append(fn)
145 145 elif node2 != _newnode:
146 146 # When comparing files between two commits, we save time by
147 147 # not comparing the file contents when the nodeids differ.
148 148 # Note that this means we incorrectly report a reverted change
149 149 # to a file as a modification.
150 150 modified.append(fn)
151 151 elif self[fn].cmp(other[fn]):
152 152 modified.append(fn)
153 153 else:
154 154 clean.append(fn)
155 155
156 156 if removed:
157 157 # need to filter files if they are already reported as removed
158 158 unknown = [fn for fn in unknown if fn not in mf1]
159 159 ignored = [fn for fn in ignored if fn not in mf1]
160 160 # if they're deleted, don't report them as removed
161 161 removed = [fn for fn in removed if fn not in deletedset]
162 162
163 163 return scmutil.status(modified, added, removed, deleted, unknown,
164 164 ignored, clean)
165 165
166 166 @propertycache
167 167 def substate(self):
168 168 return subrepo.state(self, self._repo.ui)
169 169
170 170 def subrev(self, subpath):
171 171 return self.substate[subpath][1]
172 172
173 173 def rev(self):
174 174 return self._rev
175 175 def node(self):
176 176 return self._node
177 177 def hex(self):
178 178 return hex(self.node())
179 179 def manifest(self):
180 180 return self._manifest
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 if not node:
262 mfl = self._repo.manifestlog
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
264 266 raise error.ManifestLookupError(self._node, path,
265 267 _('not found in manifest'))
266 268
267 269 return node, flag
268 270
269 271 def filenode(self, path):
270 272 return self._fileinfo(path)[0]
271 273
272 274 def flags(self, path):
273 275 try:
274 276 return self._fileinfo(path)[1]
275 277 except error.LookupError:
276 278 return ''
277 279
278 280 def sub(self, path, allowcreate=True):
279 281 '''return a subrepo for the stored revision of path, never wdir()'''
280 282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281 283
282 284 def nullsub(self, path, pctx):
283 285 return subrepo.nullsubrepo(self, path, pctx)
284 286
285 287 def workingsub(self, path):
286 288 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 289 context.
288 290 '''
289 291 return subrepo.subrepo(self, path, allowwdir=True)
290 292
291 293 def match(self, pats=[], include=None, exclude=None, default='glob',
292 294 listsubrepos=False, badfn=None):
293 295 r = self._repo
294 296 return matchmod.match(r.root, r.getcwd(), pats,
295 297 include, exclude, default,
296 298 auditor=r.nofsauditor, ctx=self,
297 299 listsubrepos=listsubrepos, badfn=badfn)
298 300
299 301 def diff(self, ctx2=None, match=None, **opts):
300 302 """Returns a diff generator for the given contexts and matcher"""
301 303 if ctx2 is None:
302 304 ctx2 = self.p1()
303 305 if ctx2 is not None:
304 306 ctx2 = self._repo[ctx2]
305 307 diffopts = patch.diffopts(self._repo.ui, opts)
306 308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307 309
308 310 def dirs(self):
309 311 return self._manifest.dirs()
310 312
311 313 def hasdir(self, dir):
312 314 return self._manifest.hasdir(dir)
313 315
314 316 def dirty(self, missing=False, merge=True, branch=True):
315 317 return False
316 318
317 319 def status(self, other=None, match=None, listignored=False,
318 320 listclean=False, listunknown=False, listsubrepos=False):
319 321 """return status of files between two nodes or node and working
320 322 directory.
321 323
322 324 If other is None, compare this node with working directory.
323 325
324 326 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 327 """
326 328
327 329 ctx1 = self
328 330 ctx2 = self._repo[other]
329 331
330 332 # This next code block is, admittedly, fragile logic that tests for
331 333 # reversing the contexts and wouldn't need to exist if it weren't for
332 334 # the fast (and common) code path of comparing the working directory
333 335 # with its first parent.
334 336 #
335 337 # What we're aiming for here is the ability to call:
336 338 #
337 339 # workingctx.status(parentctx)
338 340 #
339 341 # If we always built the manifest for each context and compared those,
340 342 # then we'd be done. But the special case of the above call means we
341 343 # just copy the manifest of the parent.
342 344 reversed = False
343 345 if (not isinstance(ctx1, changectx)
344 346 and isinstance(ctx2, changectx)):
345 347 reversed = True
346 348 ctx1, ctx2 = ctx2, ctx1
347 349
348 350 match = ctx2._matchstatus(ctx1, match)
349 351 r = scmutil.status([], [], [], [], [], [], [])
350 352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 353 listunknown)
352 354
353 355 if reversed:
354 356 # Reverse added and removed. Clear deleted, unknown and ignored as
355 357 # these make no sense to reverse.
356 358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 359 r.clean)
358 360
359 361 if listsubrepos:
360 362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 363 try:
362 364 rev2 = ctx2.subrev(subpath)
363 365 except KeyError:
364 366 # A subrepo that existed in node1 was deleted between
365 367 # node1 and node2 (inclusive). Thus, ctx2's substate
366 368 # won't contain that subpath. The best we can do ignore it.
367 369 rev2 = None
368 370 submatch = matchmod.subdirmatcher(subpath, match)
369 371 s = sub.status(rev2, match=submatch, ignored=listignored,
370 372 clean=listclean, unknown=listunknown,
371 373 listsubrepos=True)
372 374 for rfiles, sfiles in zip(r, s):
373 375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 376
375 377 for l in r:
376 378 l.sort()
377 379
378 380 return r
379 381
380 382
381 383 def makememctx(repo, parents, text, user, date, branch, files, store,
382 384 editor=None, extra=None):
383 385 def getfilectx(repo, memctx, path):
384 386 data, mode, copied = store.getfile(path)
385 387 if data is None:
386 388 return None
387 389 islink, isexec = mode
388 390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 391 copied=copied, memctx=memctx)
390 392 if extra is None:
391 393 extra = {}
392 394 if branch:
393 395 extra['branch'] = encoding.fromlocal(branch)
394 396 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 397 date, extra, editor)
396 398 return ctx
397 399
398 400 class changectx(basectx):
399 401 """A changecontext object makes access to data related to a particular
400 402 changeset convenient. It represents a read-only context already present in
401 403 the repo."""
402 404 def __init__(self, repo, changeid=''):
403 405 """changeid is a revision number, node, or tag"""
404 406
405 407 # since basectx.__new__ already took care of copying the object, we
406 408 # don't need to do anything in __init__, so we just exit here
407 409 if isinstance(changeid, basectx):
408 410 return
409 411
410 412 if changeid == '':
411 413 changeid = '.'
412 414 self._repo = repo
413 415
414 416 try:
415 417 if isinstance(changeid, int):
416 418 self._node = repo.changelog.node(changeid)
417 419 self._rev = changeid
418 420 return
419 421 if isinstance(changeid, long):
420 422 changeid = str(changeid)
421 423 if changeid == 'null':
422 424 self._node = nullid
423 425 self._rev = nullrev
424 426 return
425 427 if changeid == 'tip':
426 428 self._node = repo.changelog.tip()
427 429 self._rev = repo.changelog.rev(self._node)
428 430 return
429 431 if changeid == '.' or changeid == repo.dirstate.p1():
430 432 # this is a hack to delay/avoid loading obsmarkers
431 433 # when we know that '.' won't be hidden
432 434 self._node = repo.dirstate.p1()
433 435 self._rev = repo.unfiltered().changelog.rev(self._node)
434 436 return
435 437 if len(changeid) == 20:
436 438 try:
437 439 self._node = changeid
438 440 self._rev = repo.changelog.rev(changeid)
439 441 return
440 442 except error.FilteredRepoLookupError:
441 443 raise
442 444 except LookupError:
443 445 pass
444 446
445 447 try:
446 448 r = int(changeid)
447 449 if str(r) != changeid:
448 450 raise ValueError
449 451 l = len(repo.changelog)
450 452 if r < 0:
451 453 r += l
452 454 if r < 0 or r >= l:
453 455 raise ValueError
454 456 self._rev = r
455 457 self._node = repo.changelog.node(r)
456 458 return
457 459 except error.FilteredIndexError:
458 460 raise
459 461 except (ValueError, OverflowError, IndexError):
460 462 pass
461 463
462 464 if len(changeid) == 40:
463 465 try:
464 466 self._node = bin(changeid)
465 467 self._rev = repo.changelog.rev(self._node)
466 468 return
467 469 except error.FilteredLookupError:
468 470 raise
469 471 except (TypeError, LookupError):
470 472 pass
471 473
472 474 # lookup bookmarks through the name interface
473 475 try:
474 476 self._node = repo.names.singlenode(repo, changeid)
475 477 self._rev = repo.changelog.rev(self._node)
476 478 return
477 479 except KeyError:
478 480 pass
479 481 except error.FilteredRepoLookupError:
480 482 raise
481 483 except error.RepoLookupError:
482 484 pass
483 485
484 486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 487 if self._node is not None:
486 488 self._rev = repo.changelog.rev(self._node)
487 489 return
488 490
489 491 # lookup failed
490 492 # check if it might have come from damaged dirstate
491 493 #
492 494 # XXX we could avoid the unfiltered if we had a recognizable
493 495 # exception for filtered changeset access
494 496 if changeid in repo.unfiltered().dirstate.parents():
495 497 msg = _("working directory has unknown parent '%s'!")
496 498 raise error.Abort(msg % short(changeid))
497 499 try:
498 500 if len(changeid) == 20 and nonascii(changeid):
499 501 changeid = hex(changeid)
500 502 except TypeError:
501 503 pass
502 504 except (error.FilteredIndexError, error.FilteredLookupError,
503 505 error.FilteredRepoLookupError):
504 506 if repo.filtername.startswith('visible'):
505 507 msg = _("hidden revision '%s'") % changeid
506 508 hint = _('use --hidden to access hidden revisions')
507 509 raise error.FilteredRepoLookupError(msg, hint=hint)
508 510 msg = _("filtered revision '%s' (not in '%s' subset)")
509 511 msg %= (changeid, repo.filtername)
510 512 raise error.FilteredRepoLookupError(msg)
511 513 except IndexError:
512 514 pass
513 515 raise error.RepoLookupError(
514 516 _("unknown revision '%s'") % changeid)
515 517
516 518 def __hash__(self):
517 519 try:
518 520 return hash(self._rev)
519 521 except AttributeError:
520 522 return id(self)
521 523
522 524 def __nonzero__(self):
523 525 return self._rev != nullrev
524 526
525 527 @propertycache
526 528 def _changeset(self):
527 529 return self._repo.changelog.changelogrevision(self.rev())
528 530
529 531 @propertycache
530 532 def _manifest(self):
531 533 return self._repo.manifestlog[self._changeset.manifest].read()
532 534
533 535 @propertycache
534 536 def _manifestdelta(self):
535 537 mfnode = self._changeset.manifest
536 538 return self._repo.manifestlog[mfnode].readdelta()
537 539
538 540 @propertycache
539 541 def _parents(self):
540 542 repo = self._repo
541 543 p1, p2 = repo.changelog.parentrevs(self._rev)
542 544 if p2 == nullrev:
543 545 return [changectx(repo, p1)]
544 546 return [changectx(repo, p1), changectx(repo, p2)]
545 547
546 548 def changeset(self):
547 549 c = self._changeset
548 550 return (
549 551 c.manifest,
550 552 c.user,
551 553 c.date,
552 554 c.files,
553 555 c.description,
554 556 c.extra,
555 557 )
556 558 def manifestnode(self):
557 559 return self._changeset.manifest
558 560
559 561 def user(self):
560 562 return self._changeset.user
561 563 def date(self):
562 564 return self._changeset.date
563 565 def files(self):
564 566 return self._changeset.files
565 567 def description(self):
566 568 return self._changeset.description
567 569 def branch(self):
568 570 return encoding.tolocal(self._changeset.extra.get("branch"))
569 571 def closesbranch(self):
570 572 return 'close' in self._changeset.extra
571 573 def extra(self):
572 574 return self._changeset.extra
573 575 def tags(self):
574 576 return self._repo.nodetags(self._node)
575 577 def bookmarks(self):
576 578 return self._repo.nodebookmarks(self._node)
577 579 def phase(self):
578 580 return self._repo._phasecache.phase(self._repo, self._rev)
579 581 def hidden(self):
580 582 return self._rev in repoview.filterrevs(self._repo, 'visible')
581 583
582 584 def children(self):
583 585 """return contexts for each child changeset"""
584 586 c = self._repo.changelog.children(self._node)
585 587 return [changectx(self._repo, x) for x in c]
586 588
587 589 def ancestors(self):
588 590 for a in self._repo.changelog.ancestors([self._rev]):
589 591 yield changectx(self._repo, a)
590 592
591 593 def descendants(self):
592 594 for d in self._repo.changelog.descendants([self._rev]):
593 595 yield changectx(self._repo, d)
594 596
595 597 def filectx(self, path, fileid=None, filelog=None):
596 598 """get a file context from this changeset"""
597 599 if fileid is None:
598 600 fileid = self.filenode(path)
599 601 return filectx(self._repo, path, fileid=fileid,
600 602 changectx=self, filelog=filelog)
601 603
602 604 def ancestor(self, c2, warn=False):
603 605 """return the "best" ancestor context of self and c2
604 606
605 607 If there are multiple candidates, it will show a message and check
606 608 merge.preferancestor configuration before falling back to the
607 609 revlog ancestor."""
608 610 # deal with workingctxs
609 611 n2 = c2._node
610 612 if n2 is None:
611 613 n2 = c2._parents[0]._node
612 614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 615 if not cahs:
614 616 anc = nullid
615 617 elif len(cahs) == 1:
616 618 anc = cahs[0]
617 619 else:
618 620 # experimental config: merge.preferancestor
619 621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 622 try:
621 623 ctx = changectx(self._repo, r)
622 624 except error.RepoLookupError:
623 625 continue
624 626 anc = ctx.node()
625 627 if anc in cahs:
626 628 break
627 629 else:
628 630 anc = self._repo.changelog.ancestor(self._node, n2)
629 631 if warn:
630 632 self._repo.ui.status(
631 633 (_("note: using %s as ancestor of %s and %s\n") %
632 634 (short(anc), short(self._node), short(n2))) +
633 635 ''.join(_(" alternatively, use --config "
634 636 "merge.preferancestor=%s\n") %
635 637 short(n) for n in sorted(cahs) if n != anc))
636 638 return changectx(self._repo, anc)
637 639
638 640 def descendant(self, other):
639 641 """True if other is descendant of this changeset"""
640 642 return self._repo.changelog.descendant(self._rev, other._rev)
641 643
642 644 def walk(self, match):
643 645 '''Generates matching file names.'''
644 646
645 647 # Wrap match.bad method to have message with nodeid
646 648 def bad(fn, msg):
647 649 # The manifest doesn't know about subrepos, so don't complain about
648 650 # paths into valid subrepos.
649 651 if any(fn == s or fn.startswith(s + '/')
650 652 for s in self.substate):
651 653 return
652 654 match.bad(fn, _('no such file in rev %s') % self)
653 655
654 656 m = matchmod.badmatch(match, bad)
655 657 return self._manifest.walk(m)
656 658
657 659 def matches(self, match):
658 660 return self.walk(match)
659 661
660 662 class basefilectx(object):
661 663 """A filecontext object represents the common logic for its children:
662 664 filectx: read-only access to a filerevision that is already present
663 665 in the repo,
664 666 workingfilectx: a filecontext that represents files from the working
665 667 directory,
666 668 memfilectx: a filecontext that represents files in-memory."""
667 669 def __new__(cls, repo, path, *args, **kwargs):
668 670 return super(basefilectx, cls).__new__(cls)
669 671
670 672 @propertycache
671 673 def _filelog(self):
672 674 return self._repo.file(self._path)
673 675
674 676 @propertycache
675 677 def _changeid(self):
676 678 if '_changeid' in self.__dict__:
677 679 return self._changeid
678 680 elif '_changectx' in self.__dict__:
679 681 return self._changectx.rev()
680 682 elif '_descendantrev' in self.__dict__:
681 683 # this file context was created from a revision with a known
682 684 # descendant, we can (lazily) correct for linkrev aliases
683 685 return self._adjustlinkrev(self._descendantrev)
684 686 else:
685 687 return self._filelog.linkrev(self._filerev)
686 688
687 689 @propertycache
688 690 def _filenode(self):
689 691 if '_fileid' in self.__dict__:
690 692 return self._filelog.lookup(self._fileid)
691 693 else:
692 694 return self._changectx.filenode(self._path)
693 695
694 696 @propertycache
695 697 def _filerev(self):
696 698 return self._filelog.rev(self._filenode)
697 699
698 700 @propertycache
699 701 def _repopath(self):
700 702 return self._path
701 703
702 704 def __nonzero__(self):
703 705 try:
704 706 self._filenode
705 707 return True
706 708 except error.LookupError:
707 709 # file is missing
708 710 return False
709 711
710 712 def __str__(self):
711 713 try:
712 714 return "%s@%s" % (self.path(), self._changectx)
713 715 except error.LookupError:
714 716 return "%s@???" % self.path()
715 717
716 718 def __repr__(self):
717 719 return "<%s %s>" % (type(self).__name__, str(self))
718 720
719 721 def __hash__(self):
720 722 try:
721 723 return hash((self._path, self._filenode))
722 724 except AttributeError:
723 725 return id(self)
724 726
725 727 def __eq__(self, other):
726 728 try:
727 729 return (type(self) == type(other) and self._path == other._path
728 730 and self._filenode == other._filenode)
729 731 except AttributeError:
730 732 return False
731 733
732 734 def __ne__(self, other):
733 735 return not (self == other)
734 736
735 737 def filerev(self):
736 738 return self._filerev
737 739 def filenode(self):
738 740 return self._filenode
739 741 def flags(self):
740 742 return self._changectx.flags(self._path)
741 743 def filelog(self):
742 744 return self._filelog
743 745 def rev(self):
744 746 return self._changeid
745 747 def linkrev(self):
746 748 return self._filelog.linkrev(self._filerev)
747 749 def node(self):
748 750 return self._changectx.node()
749 751 def hex(self):
750 752 return self._changectx.hex()
751 753 def user(self):
752 754 return self._changectx.user()
753 755 def date(self):
754 756 return self._changectx.date()
755 757 def files(self):
756 758 return self._changectx.files()
757 759 def description(self):
758 760 return self._changectx.description()
759 761 def branch(self):
760 762 return self._changectx.branch()
761 763 def extra(self):
762 764 return self._changectx.extra()
763 765 def phase(self):
764 766 return self._changectx.phase()
765 767 def phasestr(self):
766 768 return self._changectx.phasestr()
767 769 def manifest(self):
768 770 return self._changectx.manifest()
769 771 def changectx(self):
770 772 return self._changectx
771 773 def repo(self):
772 774 return self._repo
773 775
774 776 def path(self):
775 777 return self._path
776 778
777 779 def isbinary(self):
778 780 try:
779 781 return util.binary(self.data())
780 782 except IOError:
781 783 return False
782 784 def isexec(self):
783 785 return 'x' in self.flags()
784 786 def islink(self):
785 787 return 'l' in self.flags()
786 788
787 789 def isabsent(self):
788 790 """whether this filectx represents a file not in self._changectx
789 791
790 792 This is mainly for merge code to detect change/delete conflicts. This is
791 793 expected to be True for all subclasses of basectx."""
792 794 return False
793 795
794 796 _customcmp = False
795 797 def cmp(self, fctx):
796 798 """compare with other file context
797 799
798 800 returns True if different than fctx.
799 801 """
800 802 if fctx._customcmp:
801 803 return fctx.cmp(self)
802 804
803 805 if (fctx._filenode is None
804 806 and (self._repo._encodefilterpats
805 807 # if file data starts with '\1\n', empty metadata block is
806 808 # prepended, which adds 4 bytes to filelog.size().
807 809 or self.size() - 4 == fctx.size())
808 810 or self.size() == fctx.size()):
809 811 return self._filelog.cmp(self._filenode, fctx.data())
810 812
811 813 return True
812 814
813 815 def _adjustlinkrev(self, srcrev, inclusive=False):
814 816 """return the first ancestor of <srcrev> introducing <fnode>
815 817
816 818 If the linkrev of the file revision does not point to an ancestor of
817 819 srcrev, we'll walk down the ancestors until we find one introducing
818 820 this file revision.
819 821
820 822 :srcrev: the changeset revision we search ancestors from
821 823 :inclusive: if true, the src revision will also be checked
822 824 """
823 825 repo = self._repo
824 826 cl = repo.unfiltered().changelog
825 827 mfl = repo.manifestlog
826 828 # fetch the linkrev
827 829 lkr = self.linkrev()
828 830 # hack to reuse ancestor computation when searching for renames
829 831 memberanc = getattr(self, '_ancestrycontext', None)
830 832 iteranc = None
831 833 if srcrev is None:
832 834 # wctx case, used by workingfilectx during mergecopy
833 835 revs = [p.rev() for p in self._repo[None].parents()]
834 836 inclusive = True # we skipped the real (revless) source
835 837 else:
836 838 revs = [srcrev]
837 839 if memberanc is None:
838 840 memberanc = iteranc = cl.ancestors(revs, lkr,
839 841 inclusive=inclusive)
840 842 # check if this linkrev is an ancestor of srcrev
841 843 if lkr not in memberanc:
842 844 if iteranc is None:
843 845 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
844 846 fnode = self._filenode
845 847 path = self._path
846 848 for a in iteranc:
847 849 ac = cl.read(a) # get changeset data (we avoid object creation)
848 850 if path in ac[3]: # checking the 'files' field.
849 851 # The file has been touched, check if the content is
850 852 # similar to the one we search for.
851 853 if fnode == mfl[ac[0]].readfast().get(path):
852 854 return a
853 855 # In theory, we should never get out of that loop without a result.
854 856 # But if manifest uses a buggy file revision (not children of the
855 857 # one it replaces) we could. Such a buggy situation will likely
856 858 # result is crash somewhere else at to some point.
857 859 return lkr
858 860
859 861 def introrev(self):
860 862 """return the rev of the changeset which introduced this file revision
861 863
862 864 This method is different from linkrev because it take into account the
863 865 changeset the filectx was created from. It ensures the returned
864 866 revision is one of its ancestors. This prevents bugs from
865 867 'linkrev-shadowing' when a file revision is used by multiple
866 868 changesets.
867 869 """
868 870 lkr = self.linkrev()
869 871 attrs = vars(self)
870 872 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
871 873 if noctx or self.rev() == lkr:
872 874 return self.linkrev()
873 875 return self._adjustlinkrev(self.rev(), inclusive=True)
874 876
875 877 def _parentfilectx(self, path, fileid, filelog):
876 878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
877 879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
878 880 if '_changeid' in vars(self) or '_changectx' in vars(self):
879 881 # If self is associated with a changeset (probably explicitly
880 882 # fed), ensure the created filectx is associated with a
881 883 # changeset that is an ancestor of self.changectx.
882 884 # This lets us later use _adjustlinkrev to get a correct link.
883 885 fctx._descendantrev = self.rev()
884 886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
885 887 elif '_descendantrev' in vars(self):
886 888 # Otherwise propagate _descendantrev if we have one associated.
887 889 fctx._descendantrev = self._descendantrev
888 890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 891 return fctx
890 892
891 893 def parents(self):
892 894 _path = self._path
893 895 fl = self._filelog
894 896 parents = self._filelog.parents(self._filenode)
895 897 pl = [(_path, node, fl) for node in parents if node != nullid]
896 898
897 899 r = fl.renamed(self._filenode)
898 900 if r:
899 901 # - In the simple rename case, both parent are nullid, pl is empty.
900 902 # - In case of merge, only one of the parent is null id and should
901 903 # be replaced with the rename information. This parent is -always-
902 904 # the first one.
903 905 #
904 906 # As null id have always been filtered out in the previous list
905 907 # comprehension, inserting to 0 will always result in "replacing
906 908 # first nullid parent with rename information.
907 909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
908 910
909 911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
910 912
911 913 def p1(self):
912 914 return self.parents()[0]
913 915
914 916 def p2(self):
915 917 p = self.parents()
916 918 if len(p) == 2:
917 919 return p[1]
918 920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
919 921
920 922 def annotate(self, follow=False, linenumber=False, diffopts=None):
921 923 '''returns a list of tuples of ((ctx, number), line) for each line
922 924 in the file, where ctx is the filectx of the node where
923 925 that line was last changed; if linenumber parameter is true, number is
924 926 the line number at the first appearance in the managed file, otherwise,
925 927 number has a fixed value of False.
926 928 '''
927 929
928 930 def lines(text):
929 931 if text.endswith("\n"):
930 932 return text.count("\n")
931 933 return text.count("\n") + int(bool(text))
932 934
933 935 if linenumber:
934 936 def decorate(text, rev):
935 937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
936 938 else:
937 939 def decorate(text, rev):
938 940 return ([(rev, False)] * lines(text), text)
939 941
940 942 def pair(parent, child):
941 943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
942 944 for (a1, a2, b1, b2), t in blocks:
943 945 # Changed blocks ('!') or blocks made only of blank lines ('~')
944 946 # belong to the child.
945 947 if t == '=':
946 948 child[0][b1:b2] = parent[0][a1:a2]
947 949 return child
948 950
949 951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950 952
951 953 def parents(f):
952 954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 957 # isn't an ancestor of the srcrev.
956 958 f._changeid
957 959 pl = f.parents()
958 960
959 961 # Don't return renamed parents if we aren't following.
960 962 if not follow:
961 963 pl = [p for p in pl if p.path() == f.path()]
962 964
963 965 # renamed filectx won't have a filelog yet, so set it
964 966 # from the cache to save time
965 967 for p in pl:
966 968 if not '_filelog' in p.__dict__:
967 969 p._filelog = getlog(p.path())
968 970
969 971 return pl
970 972
971 973 # use linkrev to find the first changeset where self appeared
972 974 base = self
973 975 introrev = self.introrev()
974 976 if self.rev() != introrev:
975 977 base = self.filectx(self.filenode(), changeid=introrev)
976 978 if getattr(base, '_ancestrycontext', None) is None:
977 979 cl = self._repo.changelog
978 980 if introrev is None:
979 981 # wctx is not inclusive, but works because _ancestrycontext
980 982 # is used to test filelog revisions
981 983 ac = cl.ancestors([p.rev() for p in base.parents()],
982 984 inclusive=True)
983 985 else:
984 986 ac = cl.ancestors([introrev], inclusive=True)
985 987 base._ancestrycontext = ac
986 988
987 989 # This algorithm would prefer to be recursive, but Python is a
988 990 # bit recursion-hostile. Instead we do an iterative
989 991 # depth-first search.
990 992
991 993 # 1st DFS pre-calculates pcache and needed
992 994 visit = [base]
993 995 pcache = {}
994 996 needed = {base: 1}
995 997 while visit:
996 998 f = visit.pop()
997 999 if f in pcache:
998 1000 continue
999 1001 pl = parents(f)
1000 1002 pcache[f] = pl
1001 1003 for p in pl:
1002 1004 needed[p] = needed.get(p, 0) + 1
1003 1005 if p not in pcache:
1004 1006 visit.append(p)
1005 1007
1006 1008 # 2nd DFS does the actual annotate
1007 1009 visit[:] = [base]
1008 1010 hist = {}
1009 1011 while visit:
1010 1012 f = visit[-1]
1011 1013 if f in hist:
1012 1014 visit.pop()
1013 1015 continue
1014 1016
1015 1017 ready = True
1016 1018 pl = pcache[f]
1017 1019 for p in pl:
1018 1020 if p not in hist:
1019 1021 ready = False
1020 1022 visit.append(p)
1021 1023 if ready:
1022 1024 visit.pop()
1023 1025 curr = decorate(f.data(), f)
1024 1026 for p in pl:
1025 1027 curr = pair(hist[p], curr)
1026 1028 if needed[p] == 1:
1027 1029 del hist[p]
1028 1030 del needed[p]
1029 1031 else:
1030 1032 needed[p] -= 1
1031 1033
1032 1034 hist[f] = curr
1033 1035 del pcache[f]
1034 1036
1035 1037 return zip(hist[base][0], hist[base][1].splitlines(True))
1036 1038
1037 1039 def ancestors(self, followfirst=False):
1038 1040 visit = {}
1039 1041 c = self
1040 1042 if followfirst:
1041 1043 cut = 1
1042 1044 else:
1043 1045 cut = None
1044 1046
1045 1047 while True:
1046 1048 for parent in c.parents()[:cut]:
1047 1049 visit[(parent.linkrev(), parent.filenode())] = parent
1048 1050 if not visit:
1049 1051 break
1050 1052 c = visit.pop(max(visit))
1051 1053 yield c
1052 1054
1053 1055 class filectx(basefilectx):
1054 1056 """A filecontext object makes access to data related to a particular
1055 1057 filerevision convenient."""
1056 1058 def __init__(self, repo, path, changeid=None, fileid=None,
1057 1059 filelog=None, changectx=None):
1058 1060 """changeid can be a changeset revision, node, or tag.
1059 1061 fileid can be a file revision or node."""
1060 1062 self._repo = repo
1061 1063 self._path = path
1062 1064
1063 1065 assert (changeid is not None
1064 1066 or fileid is not None
1065 1067 or changectx is not None), \
1066 1068 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1067 1069 % (changeid, fileid, changectx))
1068 1070
1069 1071 if filelog is not None:
1070 1072 self._filelog = filelog
1071 1073
1072 1074 if changeid is not None:
1073 1075 self._changeid = changeid
1074 1076 if changectx is not None:
1075 1077 self._changectx = changectx
1076 1078 if fileid is not None:
1077 1079 self._fileid = fileid
1078 1080
1079 1081 @propertycache
1080 1082 def _changectx(self):
1081 1083 try:
1082 1084 return changectx(self._repo, self._changeid)
1083 1085 except error.FilteredRepoLookupError:
1084 1086 # Linkrev may point to any revision in the repository. When the
1085 1087 # repository is filtered this may lead to `filectx` trying to build
1086 1088 # `changectx` for filtered revision. In such case we fallback to
1087 1089 # creating `changectx` on the unfiltered version of the reposition.
1088 1090 # This fallback should not be an issue because `changectx` from
1089 1091 # `filectx` are not used in complex operations that care about
1090 1092 # filtering.
1091 1093 #
1092 1094 # This fallback is a cheap and dirty fix that prevent several
1093 1095 # crashes. It does not ensure the behavior is correct. However the
1094 1096 # behavior was not correct before filtering either and "incorrect
1095 1097 # behavior" is seen as better as "crash"
1096 1098 #
1097 1099 # Linkrevs have several serious troubles with filtering that are
1098 1100 # complicated to solve. Proper handling of the issue here should be
1099 1101 # considered when solving linkrev issue are on the table.
1100 1102 return changectx(self._repo.unfiltered(), self._changeid)
1101 1103
1102 1104 def filectx(self, fileid, changeid=None):
1103 1105 '''opens an arbitrary revision of the file without
1104 1106 opening a new filelog'''
1105 1107 return filectx(self._repo, self._path, fileid=fileid,
1106 1108 filelog=self._filelog, changeid=changeid)
1107 1109
1108 1110 def data(self):
1109 1111 try:
1110 1112 return self._filelog.read(self._filenode)
1111 1113 except error.CensoredNodeError:
1112 1114 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1113 1115 return ""
1114 1116 raise error.Abort(_("censored node: %s") % short(self._filenode),
1115 1117 hint=_("set censor.policy to ignore errors"))
1116 1118
1117 1119 def size(self):
1118 1120 return self._filelog.size(self._filerev)
1119 1121
1120 1122 def renamed(self):
1121 1123 """check if file was actually renamed in this changeset revision
1122 1124
1123 1125 If rename logged in file revision, we report copy for changeset only
1124 1126 if file revisions linkrev points back to the changeset in question
1125 1127 or both changeset parents contain different file revisions.
1126 1128 """
1127 1129
1128 1130 renamed = self._filelog.renamed(self._filenode)
1129 1131 if not renamed:
1130 1132 return renamed
1131 1133
1132 1134 if self.rev() == self.linkrev():
1133 1135 return renamed
1134 1136
1135 1137 name = self.path()
1136 1138 fnode = self._filenode
1137 1139 for p in self._changectx.parents():
1138 1140 try:
1139 1141 if fnode == p.filenode(name):
1140 1142 return None
1141 1143 except error.LookupError:
1142 1144 pass
1143 1145 return renamed
1144 1146
1145 1147 def children(self):
1146 1148 # hard for renames
1147 1149 c = self._filelog.children(self._filenode)
1148 1150 return [filectx(self._repo, self._path, fileid=x,
1149 1151 filelog=self._filelog) for x in c]
1150 1152
1151 1153 class committablectx(basectx):
1152 1154 """A committablectx object provides common functionality for a context that
1153 1155 wants the ability to commit, e.g. workingctx or memctx."""
1154 1156 def __init__(self, repo, text="", user=None, date=None, extra=None,
1155 1157 changes=None):
1156 1158 self._repo = repo
1157 1159 self._rev = None
1158 1160 self._node = None
1159 1161 self._text = text
1160 1162 if date:
1161 1163 self._date = util.parsedate(date)
1162 1164 if user:
1163 1165 self._user = user
1164 1166 if changes:
1165 1167 self._status = changes
1166 1168
1167 1169 self._extra = {}
1168 1170 if extra:
1169 1171 self._extra = extra.copy()
1170 1172 if 'branch' not in self._extra:
1171 1173 try:
1172 1174 branch = encoding.fromlocal(self._repo.dirstate.branch())
1173 1175 except UnicodeDecodeError:
1174 1176 raise error.Abort(_('branch name not in UTF-8!'))
1175 1177 self._extra['branch'] = branch
1176 1178 if self._extra['branch'] == '':
1177 1179 self._extra['branch'] = 'default'
1178 1180
1179 1181 def __str__(self):
1180 1182 return str(self._parents[0]) + "+"
1181 1183
1182 1184 def __nonzero__(self):
1183 1185 return True
1184 1186
1185 1187 def _buildflagfunc(self):
1186 1188 # Create a fallback function for getting file flags when the
1187 1189 # filesystem doesn't support them
1188 1190
1189 1191 copiesget = self._repo.dirstate.copies().get
1190 1192 parents = self.parents()
1191 1193 if len(parents) < 2:
1192 1194 # when we have one parent, it's easy: copy from parent
1193 1195 man = parents[0].manifest()
1194 1196 def func(f):
1195 1197 f = copiesget(f, f)
1196 1198 return man.flags(f)
1197 1199 else:
1198 1200 # merges are tricky: we try to reconstruct the unstored
1199 1201 # result from the merge (issue1802)
1200 1202 p1, p2 = parents
1201 1203 pa = p1.ancestor(p2)
1202 1204 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1203 1205
1204 1206 def func(f):
1205 1207 f = copiesget(f, f) # may be wrong for merges with copies
1206 1208 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1207 1209 if fl1 == fl2:
1208 1210 return fl1
1209 1211 if fl1 == fla:
1210 1212 return fl2
1211 1213 if fl2 == fla:
1212 1214 return fl1
1213 1215 return '' # punt for conflicts
1214 1216
1215 1217 return func
1216 1218
1217 1219 @propertycache
1218 1220 def _flagfunc(self):
1219 1221 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1220 1222
1221 1223 @propertycache
1222 1224 def _manifest(self):
1223 1225 """generate a manifest corresponding to the values in self._status
1224 1226
1225 1227 This reuse the file nodeid from parent, but we append an extra letter
1226 1228 when modified. Modified files get an extra 'm' while added files get
1227 1229 an extra 'a'. This is used by manifests merge to see that files
1228 1230 are different and by update logic to avoid deleting newly added files.
1229 1231 """
1230 1232 parents = self.parents()
1231 1233
1232 1234 man1 = parents[0].manifest()
1233 1235 man = man1.copy()
1234 1236 if len(parents) > 1:
1235 1237 man2 = self.p2().manifest()
1236 1238 def getman(f):
1237 1239 if f in man1:
1238 1240 return man1
1239 1241 return man2
1240 1242 else:
1241 1243 getman = lambda f: man1
1242 1244
1243 1245 copied = self._repo.dirstate.copies()
1244 1246 ff = self._flagfunc
1245 1247 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1246 1248 for f in l:
1247 1249 orig = copied.get(f, f)
1248 1250 man[f] = getman(orig).get(orig, nullid) + i
1249 1251 try:
1250 1252 man.setflag(f, ff(f))
1251 1253 except OSError:
1252 1254 pass
1253 1255
1254 1256 for f in self._status.deleted + self._status.removed:
1255 1257 if f in man:
1256 1258 del man[f]
1257 1259
1258 1260 return man
1259 1261
1260 1262 @propertycache
1261 1263 def _status(self):
1262 1264 return self._repo.status()
1263 1265
1264 1266 @propertycache
1265 1267 def _user(self):
1266 1268 return self._repo.ui.username()
1267 1269
1268 1270 @propertycache
1269 1271 def _date(self):
1270 1272 return util.makedate()
1271 1273
1272 1274 def subrev(self, subpath):
1273 1275 return None
1274 1276
1275 1277 def manifestnode(self):
1276 1278 return None
1277 1279 def user(self):
1278 1280 return self._user or self._repo.ui.username()
1279 1281 def date(self):
1280 1282 return self._date
1281 1283 def description(self):
1282 1284 return self._text
1283 1285 def files(self):
1284 1286 return sorted(self._status.modified + self._status.added +
1285 1287 self._status.removed)
1286 1288
1287 1289 def modified(self):
1288 1290 return self._status.modified
1289 1291 def added(self):
1290 1292 return self._status.added
1291 1293 def removed(self):
1292 1294 return self._status.removed
1293 1295 def deleted(self):
1294 1296 return self._status.deleted
1295 1297 def branch(self):
1296 1298 return encoding.tolocal(self._extra['branch'])
1297 1299 def closesbranch(self):
1298 1300 return 'close' in self._extra
1299 1301 def extra(self):
1300 1302 return self._extra
1301 1303
1302 1304 def tags(self):
1303 1305 return []
1304 1306
1305 1307 def bookmarks(self):
1306 1308 b = []
1307 1309 for p in self.parents():
1308 1310 b.extend(p.bookmarks())
1309 1311 return b
1310 1312
1311 1313 def phase(self):
1312 1314 phase = phases.draft # default phase to draft
1313 1315 for p in self.parents():
1314 1316 phase = max(phase, p.phase())
1315 1317 return phase
1316 1318
1317 1319 def hidden(self):
1318 1320 return False
1319 1321
1320 1322 def children(self):
1321 1323 return []
1322 1324
1323 1325 def flags(self, path):
1324 1326 if '_manifest' in self.__dict__:
1325 1327 try:
1326 1328 return self._manifest.flags(path)
1327 1329 except KeyError:
1328 1330 return ''
1329 1331
1330 1332 try:
1331 1333 return self._flagfunc(path)
1332 1334 except OSError:
1333 1335 return ''
1334 1336
1335 1337 def ancestor(self, c2):
1336 1338 """return the "best" ancestor context of self and c2"""
1337 1339 return self._parents[0].ancestor(c2) # punt on two parents for now
1338 1340
1339 1341 def walk(self, match):
1340 1342 '''Generates matching file names.'''
1341 1343 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1342 1344 True, False))
1343 1345
1344 1346 def matches(self, match):
1345 1347 return sorted(self._repo.dirstate.matches(match))
1346 1348
1347 1349 def ancestors(self):
1348 1350 for p in self._parents:
1349 1351 yield p
1350 1352 for a in self._repo.changelog.ancestors(
1351 1353 [p.rev() for p in self._parents]):
1352 1354 yield changectx(self._repo, a)
1353 1355
1354 1356 def markcommitted(self, node):
1355 1357 """Perform post-commit cleanup necessary after committing this ctx
1356 1358
1357 1359 Specifically, this updates backing stores this working context
1358 1360 wraps to reflect the fact that the changes reflected by this
1359 1361 workingctx have been committed. For example, it marks
1360 1362 modified and added files as normal in the dirstate.
1361 1363
1362 1364 """
1363 1365
1364 1366 self._repo.dirstate.beginparentchange()
1365 1367 for f in self.modified() + self.added():
1366 1368 self._repo.dirstate.normal(f)
1367 1369 for f in self.removed():
1368 1370 self._repo.dirstate.drop(f)
1369 1371 self._repo.dirstate.setparents(node)
1370 1372 self._repo.dirstate.endparentchange()
1371 1373
1372 1374 # write changes out explicitly, because nesting wlock at
1373 1375 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1374 1376 # from immediately doing so for subsequent changing files
1375 1377 self._repo.dirstate.write(self._repo.currenttransaction())
1376 1378
1377 1379 class workingctx(committablectx):
1378 1380 """A workingctx object makes access to data related to
1379 1381 the current working directory convenient.
1380 1382 date - any valid date string or (unixtime, offset), or None.
1381 1383 user - username string, or None.
1382 1384 extra - a dictionary of extra values, or None.
1383 1385 changes - a list of file lists as returned by localrepo.status()
1384 1386 or None to use the repository status.
1385 1387 """
1386 1388 def __init__(self, repo, text="", user=None, date=None, extra=None,
1387 1389 changes=None):
1388 1390 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1389 1391
1390 1392 def __iter__(self):
1391 1393 d = self._repo.dirstate
1392 1394 for f in d:
1393 1395 if d[f] != 'r':
1394 1396 yield f
1395 1397
1396 1398 def __contains__(self, key):
1397 1399 return self._repo.dirstate[key] not in "?r"
1398 1400
1399 1401 def hex(self):
1400 1402 return hex(wdirid)
1401 1403
1402 1404 @propertycache
1403 1405 def _parents(self):
1404 1406 p = self._repo.dirstate.parents()
1405 1407 if p[1] == nullid:
1406 1408 p = p[:-1]
1407 1409 return [changectx(self._repo, x) for x in p]
1408 1410
1409 1411 def filectx(self, path, filelog=None):
1410 1412 """get a file context from the working directory"""
1411 1413 return workingfilectx(self._repo, path, workingctx=self,
1412 1414 filelog=filelog)
1413 1415
1414 1416 def dirty(self, missing=False, merge=True, branch=True):
1415 1417 "check whether a working directory is modified"
1416 1418 # check subrepos first
1417 1419 for s in sorted(self.substate):
1418 1420 if self.sub(s).dirty():
1419 1421 return True
1420 1422 # check current working dir
1421 1423 return ((merge and self.p2()) or
1422 1424 (branch and self.branch() != self.p1().branch()) or
1423 1425 self.modified() or self.added() or self.removed() or
1424 1426 (missing and self.deleted()))
1425 1427
1426 1428 def add(self, list, prefix=""):
1427 1429 join = lambda f: os.path.join(prefix, f)
1428 1430 with self._repo.wlock():
1429 1431 ui, ds = self._repo.ui, self._repo.dirstate
1430 1432 rejected = []
1431 1433 lstat = self._repo.wvfs.lstat
1432 1434 for f in list:
1433 1435 scmutil.checkportable(ui, join(f))
1434 1436 try:
1435 1437 st = lstat(f)
1436 1438 except OSError:
1437 1439 ui.warn(_("%s does not exist!\n") % join(f))
1438 1440 rejected.append(f)
1439 1441 continue
1440 1442 if st.st_size > 10000000:
1441 1443 ui.warn(_("%s: up to %d MB of RAM may be required "
1442 1444 "to manage this file\n"
1443 1445 "(use 'hg revert %s' to cancel the "
1444 1446 "pending addition)\n")
1445 1447 % (f, 3 * st.st_size // 1000000, join(f)))
1446 1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1447 1449 ui.warn(_("%s not added: only files and symlinks "
1448 1450 "supported currently\n") % join(f))
1449 1451 rejected.append(f)
1450 1452 elif ds[f] in 'amn':
1451 1453 ui.warn(_("%s already tracked!\n") % join(f))
1452 1454 elif ds[f] == 'r':
1453 1455 ds.normallookup(f)
1454 1456 else:
1455 1457 ds.add(f)
1456 1458 return rejected
1457 1459
1458 1460 def forget(self, files, prefix=""):
1459 1461 join = lambda f: os.path.join(prefix, f)
1460 1462 with self._repo.wlock():
1461 1463 rejected = []
1462 1464 for f in files:
1463 1465 if f not in self._repo.dirstate:
1464 1466 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1465 1467 rejected.append(f)
1466 1468 elif self._repo.dirstate[f] != 'a':
1467 1469 self._repo.dirstate.remove(f)
1468 1470 else:
1469 1471 self._repo.dirstate.drop(f)
1470 1472 return rejected
1471 1473
1472 1474 def undelete(self, list):
1473 1475 pctxs = self.parents()
1474 1476 with self._repo.wlock():
1475 1477 for f in list:
1476 1478 if self._repo.dirstate[f] != 'r':
1477 1479 self._repo.ui.warn(_("%s not removed!\n") % f)
1478 1480 else:
1479 1481 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1480 1482 t = fctx.data()
1481 1483 self._repo.wwrite(f, t, fctx.flags())
1482 1484 self._repo.dirstate.normal(f)
1483 1485
1484 1486 def copy(self, source, dest):
1485 1487 try:
1486 1488 st = self._repo.wvfs.lstat(dest)
1487 1489 except OSError as err:
1488 1490 if err.errno != errno.ENOENT:
1489 1491 raise
1490 1492 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1491 1493 return
1492 1494 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1493 1495 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1494 1496 "symbolic link\n") % dest)
1495 1497 else:
1496 1498 with self._repo.wlock():
1497 1499 if self._repo.dirstate[dest] in '?':
1498 1500 self._repo.dirstate.add(dest)
1499 1501 elif self._repo.dirstate[dest] in 'r':
1500 1502 self._repo.dirstate.normallookup(dest)
1501 1503 self._repo.dirstate.copy(source, dest)
1502 1504
1503 1505 def match(self, pats=[], include=None, exclude=None, default='glob',
1504 1506 listsubrepos=False, badfn=None):
1505 1507 r = self._repo
1506 1508
1507 1509 # Only a case insensitive filesystem needs magic to translate user input
1508 1510 # to actual case in the filesystem.
1509 1511 if not util.fscasesensitive(r.root):
1510 1512 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1511 1513 exclude, default, r.auditor, self,
1512 1514 listsubrepos=listsubrepos,
1513 1515 badfn=badfn)
1514 1516 return matchmod.match(r.root, r.getcwd(), pats,
1515 1517 include, exclude, default,
1516 1518 auditor=r.auditor, ctx=self,
1517 1519 listsubrepos=listsubrepos, badfn=badfn)
1518 1520
1519 1521 def _filtersuspectsymlink(self, files):
1520 1522 if not files or self._repo.dirstate._checklink:
1521 1523 return files
1522 1524
1523 1525 # Symlink placeholders may get non-symlink-like contents
1524 1526 # via user error or dereferencing by NFS or Samba servers,
1525 1527 # so we filter out any placeholders that don't look like a
1526 1528 # symlink
1527 1529 sane = []
1528 1530 for f in files:
1529 1531 if self.flags(f) == 'l':
1530 1532 d = self[f].data()
1531 1533 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1532 1534 self._repo.ui.debug('ignoring suspect symlink placeholder'
1533 1535 ' "%s"\n' % f)
1534 1536 continue
1535 1537 sane.append(f)
1536 1538 return sane
1537 1539
1538 1540 def _checklookup(self, files):
1539 1541 # check for any possibly clean files
1540 1542 if not files:
1541 1543 return [], []
1542 1544
1543 1545 modified = []
1544 1546 fixup = []
1545 1547 pctx = self._parents[0]
1546 1548 # do a full compare of any files that might have changed
1547 1549 for f in sorted(files):
1548 1550 if (f not in pctx or self.flags(f) != pctx.flags(f)
1549 1551 or pctx[f].cmp(self[f])):
1550 1552 modified.append(f)
1551 1553 else:
1552 1554 fixup.append(f)
1553 1555
1554 1556 # update dirstate for files that are actually clean
1555 1557 if fixup:
1556 1558 try:
1557 1559 # updating the dirstate is optional
1558 1560 # so we don't wait on the lock
1559 1561 # wlock can invalidate the dirstate, so cache normal _after_
1560 1562 # taking the lock
1561 1563 with self._repo.wlock(False):
1562 1564 normal = self._repo.dirstate.normal
1563 1565 for f in fixup:
1564 1566 normal(f)
1565 1567 # write changes out explicitly, because nesting
1566 1568 # wlock at runtime may prevent 'wlock.release()'
1567 1569 # after this block from doing so for subsequent
1568 1570 # changing files
1569 1571 self._repo.dirstate.write(self._repo.currenttransaction())
1570 1572 except error.LockError:
1571 1573 pass
1572 1574 return modified, fixup
1573 1575
1574 1576 def _manifestmatches(self, match, s):
1575 1577 """Slow path for workingctx
1576 1578
1577 1579 The fast path is when we compare the working directory to its parent
1578 1580 which means this function is comparing with a non-parent; therefore we
1579 1581 need to build a manifest and return what matches.
1580 1582 """
1581 1583 mf = self._repo['.']._manifestmatches(match, s)
1582 1584 for f in s.modified + s.added:
1583 1585 mf[f] = _newnode
1584 1586 mf.setflag(f, self.flags(f))
1585 1587 for f in s.removed:
1586 1588 if f in mf:
1587 1589 del mf[f]
1588 1590 return mf
1589 1591
1590 1592 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1591 1593 unknown=False):
1592 1594 '''Gets the status from the dirstate -- internal use only.'''
1593 1595 listignored, listclean, listunknown = ignored, clean, unknown
1594 1596 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1595 1597 subrepos = []
1596 1598 if '.hgsub' in self:
1597 1599 subrepos = sorted(self.substate)
1598 1600 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1599 1601 listclean, listunknown)
1600 1602
1601 1603 # check for any possibly clean files
1602 1604 if cmp:
1603 1605 modified2, fixup = self._checklookup(cmp)
1604 1606 s.modified.extend(modified2)
1605 1607
1606 1608 # update dirstate for files that are actually clean
1607 1609 if fixup and listclean:
1608 1610 s.clean.extend(fixup)
1609 1611
1610 1612 if match.always():
1611 1613 # cache for performance
1612 1614 if s.unknown or s.ignored or s.clean:
1613 1615 # "_status" is cached with list*=False in the normal route
1614 1616 self._status = scmutil.status(s.modified, s.added, s.removed,
1615 1617 s.deleted, [], [], [])
1616 1618 else:
1617 1619 self._status = s
1618 1620
1619 1621 return s
1620 1622
1621 1623 def _buildstatus(self, other, s, match, listignored, listclean,
1622 1624 listunknown):
1623 1625 """build a status with respect to another context
1624 1626
1625 1627 This includes logic for maintaining the fast path of status when
1626 1628 comparing the working directory against its parent, which is to skip
1627 1629 building a new manifest if self (working directory) is not comparing
1628 1630 against its parent (repo['.']).
1629 1631 """
1630 1632 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1631 1633 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1632 1634 # might have accidentally ended up with the entire contents of the file
1633 1635 # they are supposed to be linking to.
1634 1636 s.modified[:] = self._filtersuspectsymlink(s.modified)
1635 1637 if other != self._repo['.']:
1636 1638 s = super(workingctx, self)._buildstatus(other, s, match,
1637 1639 listignored, listclean,
1638 1640 listunknown)
1639 1641 return s
1640 1642
1641 1643 def _matchstatus(self, other, match):
1642 1644 """override the match method with a filter for directory patterns
1643 1645
1644 1646 We use inheritance to customize the match.bad method only in cases of
1645 1647 workingctx since it belongs only to the working directory when
1646 1648 comparing against the parent changeset.
1647 1649
1648 1650 If we aren't comparing against the working directory's parent, then we
1649 1651 just use the default match object sent to us.
1650 1652 """
1651 1653 superself = super(workingctx, self)
1652 1654 match = superself._matchstatus(other, match)
1653 1655 if other != self._repo['.']:
1654 1656 def bad(f, msg):
1655 1657 # 'f' may be a directory pattern from 'match.files()',
1656 1658 # so 'f not in ctx1' is not enough
1657 1659 if f not in other and not other.hasdir(f):
1658 1660 self._repo.ui.warn('%s: %s\n' %
1659 1661 (self._repo.dirstate.pathto(f), msg))
1660 1662 match.bad = bad
1661 1663 return match
1662 1664
1663 1665 class committablefilectx(basefilectx):
1664 1666 """A committablefilectx provides common functionality for a file context
1665 1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1666 1668 def __init__(self, repo, path, filelog=None, ctx=None):
1667 1669 self._repo = repo
1668 1670 self._path = path
1669 1671 self._changeid = None
1670 1672 self._filerev = self._filenode = None
1671 1673
1672 1674 if filelog is not None:
1673 1675 self._filelog = filelog
1674 1676 if ctx:
1675 1677 self._changectx = ctx
1676 1678
1677 1679 def __nonzero__(self):
1678 1680 return True
1679 1681
1680 1682 def linkrev(self):
1681 1683 # linked to self._changectx no matter if file is modified or not
1682 1684 return self.rev()
1683 1685
1684 1686 def parents(self):
1685 1687 '''return parent filectxs, following copies if necessary'''
1686 1688 def filenode(ctx, path):
1687 1689 return ctx._manifest.get(path, nullid)
1688 1690
1689 1691 path = self._path
1690 1692 fl = self._filelog
1691 1693 pcl = self._changectx._parents
1692 1694 renamed = self.renamed()
1693 1695
1694 1696 if renamed:
1695 1697 pl = [renamed + (None,)]
1696 1698 else:
1697 1699 pl = [(path, filenode(pcl[0], path), fl)]
1698 1700
1699 1701 for pc in pcl[1:]:
1700 1702 pl.append((path, filenode(pc, path), fl))
1701 1703
1702 1704 return [self._parentfilectx(p, fileid=n, filelog=l)
1703 1705 for p, n, l in pl if n != nullid]
1704 1706
1705 1707 def children(self):
1706 1708 return []
1707 1709
1708 1710 class workingfilectx(committablefilectx):
1709 1711 """A workingfilectx object makes access to data related to a particular
1710 1712 file in the working directory convenient."""
1711 1713 def __init__(self, repo, path, filelog=None, workingctx=None):
1712 1714 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1713 1715
1714 1716 @propertycache
1715 1717 def _changectx(self):
1716 1718 return workingctx(self._repo)
1717 1719
1718 1720 def data(self):
1719 1721 return self._repo.wread(self._path)
1720 1722 def renamed(self):
1721 1723 rp = self._repo.dirstate.copied(self._path)
1722 1724 if not rp:
1723 1725 return None
1724 1726 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1725 1727
1726 1728 def size(self):
1727 1729 return self._repo.wvfs.lstat(self._path).st_size
1728 1730 def date(self):
1729 1731 t, tz = self._changectx.date()
1730 1732 try:
1731 1733 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1732 1734 except OSError as err:
1733 1735 if err.errno != errno.ENOENT:
1734 1736 raise
1735 1737 return (t, tz)
1736 1738
1737 1739 def cmp(self, fctx):
1738 1740 """compare with other file context
1739 1741
1740 1742 returns True if different than fctx.
1741 1743 """
1742 1744 # fctx should be a filectx (not a workingfilectx)
1743 1745 # invert comparison to reuse the same code path
1744 1746 return fctx.cmp(self)
1745 1747
1746 1748 def remove(self, ignoremissing=False):
1747 1749 """wraps unlink for a repo's working directory"""
1748 1750 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1749 1751
1750 1752 def write(self, data, flags):
1751 1753 """wraps repo.wwrite"""
1752 1754 self._repo.wwrite(self._path, data, flags)
1753 1755
1754 1756 class workingcommitctx(workingctx):
1755 1757 """A workingcommitctx object makes access to data related to
1756 1758 the revision being committed convenient.
1757 1759
1758 1760 This hides changes in the working directory, if they aren't
1759 1761 committed in this context.
1760 1762 """
1761 1763 def __init__(self, repo, changes,
1762 1764 text="", user=None, date=None, extra=None):
1763 1765 super(workingctx, self).__init__(repo, text, user, date, extra,
1764 1766 changes)
1765 1767
1766 1768 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1767 1769 unknown=False):
1768 1770 """Return matched files only in ``self._status``
1769 1771
1770 1772 Uncommitted files appear "clean" via this context, even if
1771 1773 they aren't actually so in the working directory.
1772 1774 """
1773 1775 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1774 1776 if clean:
1775 1777 clean = [f for f in self._manifest if f not in self._changedset]
1776 1778 else:
1777 1779 clean = []
1778 1780 return scmutil.status([f for f in self._status.modified if match(f)],
1779 1781 [f for f in self._status.added if match(f)],
1780 1782 [f for f in self._status.removed if match(f)],
1781 1783 [], [], [], clean)
1782 1784
1783 1785 @propertycache
1784 1786 def _changedset(self):
1785 1787 """Return the set of files changed in this context
1786 1788 """
1787 1789 changed = set(self._status.modified)
1788 1790 changed.update(self._status.added)
1789 1791 changed.update(self._status.removed)
1790 1792 return changed
1791 1793
1792 1794 def makecachingfilectxfn(func):
1793 1795 """Create a filectxfn that caches based on the path.
1794 1796
1795 1797 We can't use util.cachefunc because it uses all arguments as the cache
1796 1798 key and this creates a cycle since the arguments include the repo and
1797 1799 memctx.
1798 1800 """
1799 1801 cache = {}
1800 1802
1801 1803 def getfilectx(repo, memctx, path):
1802 1804 if path not in cache:
1803 1805 cache[path] = func(repo, memctx, path)
1804 1806 return cache[path]
1805 1807
1806 1808 return getfilectx
1807 1809
1808 1810 class memctx(committablectx):
1809 1811 """Use memctx to perform in-memory commits via localrepo.commitctx().
1810 1812
1811 1813 Revision information is supplied at initialization time while
1812 1814 related files data and is made available through a callback
1813 1815 mechanism. 'repo' is the current localrepo, 'parents' is a
1814 1816 sequence of two parent revisions identifiers (pass None for every
1815 1817 missing parent), 'text' is the commit message and 'files' lists
1816 1818 names of files touched by the revision (normalized and relative to
1817 1819 repository root).
1818 1820
1819 1821 filectxfn(repo, memctx, path) is a callable receiving the
1820 1822 repository, the current memctx object and the normalized path of
1821 1823 requested file, relative to repository root. It is fired by the
1822 1824 commit function for every file in 'files', but calls order is
1823 1825 undefined. If the file is available in the revision being
1824 1826 committed (updated or added), filectxfn returns a memfilectx
1825 1827 object. If the file was removed, filectxfn raises an
1826 1828 IOError. Moved files are represented by marking the source file
1827 1829 removed and the new file added with copy information (see
1828 1830 memfilectx).
1829 1831
1830 1832 user receives the committer name and defaults to current
1831 1833 repository username, date is the commit date in any format
1832 1834 supported by util.parsedate() and defaults to current date, extra
1833 1835 is a dictionary of metadata or is left empty.
1834 1836 """
1835 1837
1836 1838 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1837 1839 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1838 1840 # this field to determine what to do in filectxfn.
1839 1841 _returnnoneformissingfiles = True
1840 1842
1841 1843 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1842 1844 date=None, extra=None, editor=False):
1843 1845 super(memctx, self).__init__(repo, text, user, date, extra)
1844 1846 self._rev = None
1845 1847 self._node = None
1846 1848 parents = [(p or nullid) for p in parents]
1847 1849 p1, p2 = parents
1848 1850 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1849 1851 files = sorted(set(files))
1850 1852 self._files = files
1851 1853 self.substate = {}
1852 1854
1853 1855 # if store is not callable, wrap it in a function
1854 1856 if not callable(filectxfn):
1855 1857 def getfilectx(repo, memctx, path):
1856 1858 fctx = filectxfn[path]
1857 1859 # this is weird but apparently we only keep track of one parent
1858 1860 # (why not only store that instead of a tuple?)
1859 1861 copied = fctx.renamed()
1860 1862 if copied:
1861 1863 copied = copied[0]
1862 1864 return memfilectx(repo, path, fctx.data(),
1863 1865 islink=fctx.islink(), isexec=fctx.isexec(),
1864 1866 copied=copied, memctx=memctx)
1865 1867 self._filectxfn = getfilectx
1866 1868 else:
1867 1869 # memoizing increases performance for e.g. vcs convert scenarios.
1868 1870 self._filectxfn = makecachingfilectxfn(filectxfn)
1869 1871
1870 1872 if extra:
1871 1873 self._extra = extra.copy()
1872 1874 else:
1873 1875 self._extra = {}
1874 1876
1875 1877 if self._extra.get('branch', '') == '':
1876 1878 self._extra['branch'] = 'default'
1877 1879
1878 1880 if editor:
1879 1881 self._text = editor(self._repo, self, [])
1880 1882 self._repo.savecommitmessage(self._text)
1881 1883
1882 1884 def filectx(self, path, filelog=None):
1883 1885 """get a file context from the working directory
1884 1886
1885 1887 Returns None if file doesn't exist and should be removed."""
1886 1888 return self._filectxfn(self._repo, self, path)
1887 1889
1888 1890 def commit(self):
1889 1891 """commit context to the repo"""
1890 1892 return self._repo.commitctx(self)
1891 1893
1892 1894 @propertycache
1893 1895 def _manifest(self):
1894 1896 """generate a manifest based on the return values of filectxfn"""
1895 1897
1896 1898 # keep this simple for now; just worry about p1
1897 1899 pctx = self._parents[0]
1898 1900 man = pctx.manifest().copy()
1899 1901
1900 1902 for f in self._status.modified:
1901 1903 p1node = nullid
1902 1904 p2node = nullid
1903 1905 p = pctx[f].parents() # if file isn't in pctx, check p2?
1904 1906 if len(p) > 0:
1905 1907 p1node = p[0].filenode()
1906 1908 if len(p) > 1:
1907 1909 p2node = p[1].filenode()
1908 1910 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1909 1911
1910 1912 for f in self._status.added:
1911 1913 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1912 1914
1913 1915 for f in self._status.removed:
1914 1916 if f in man:
1915 1917 del man[f]
1916 1918
1917 1919 return man
1918 1920
1919 1921 @propertycache
1920 1922 def _status(self):
1921 1923 """Calculate exact status from ``files`` specified at construction
1922 1924 """
1923 1925 man1 = self.p1().manifest()
1924 1926 p2 = self._parents[1]
1925 1927 # "1 < len(self._parents)" can't be used for checking
1926 1928 # existence of the 2nd parent, because "memctx._parents" is
1927 1929 # explicitly initialized by the list, of which length is 2.
1928 1930 if p2.node() != nullid:
1929 1931 man2 = p2.manifest()
1930 1932 managing = lambda f: f in man1 or f in man2
1931 1933 else:
1932 1934 managing = lambda f: f in man1
1933 1935
1934 1936 modified, added, removed = [], [], []
1935 1937 for f in self._files:
1936 1938 if not managing(f):
1937 1939 added.append(f)
1938 1940 elif self[f]:
1939 1941 modified.append(f)
1940 1942 else:
1941 1943 removed.append(f)
1942 1944
1943 1945 return scmutil.status(modified, added, removed, [], [], [], [])
1944 1946
1945 1947 class memfilectx(committablefilectx):
1946 1948 """memfilectx represents an in-memory file to commit.
1947 1949
1948 1950 See memctx and committablefilectx for more details.
1949 1951 """
1950 1952 def __init__(self, repo, path, data, islink=False,
1951 1953 isexec=False, copied=None, memctx=None):
1952 1954 """
1953 1955 path is the normalized file path relative to repository root.
1954 1956 data is the file content as a string.
1955 1957 islink is True if the file is a symbolic link.
1956 1958 isexec is True if the file is executable.
1957 1959 copied is the source file path if current file was copied in the
1958 1960 revision being committed, or None."""
1959 1961 super(memfilectx, self).__init__(repo, path, None, memctx)
1960 1962 self._data = data
1961 1963 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1962 1964 self._copied = None
1963 1965 if copied:
1964 1966 self._copied = (copied, nullid)
1965 1967
1966 1968 def data(self):
1967 1969 return self._data
1968 1970 def size(self):
1969 1971 return len(self.data())
1970 1972 def flags(self):
1971 1973 return self._flags
1972 1974 def renamed(self):
1973 1975 return self._copied
1974 1976
1975 1977 def remove(self, ignoremissing=False):
1976 1978 """wraps unlink for a repo's working directory"""
1977 1979 # need to figure out what to do here
1978 1980 del self._changectx[self._path]
1979 1981
1980 1982 def write(self, data, flags):
1981 1983 """wraps repo.wwrite"""
1982 1984 self._data = data
@@ -1,1562 +1,1559 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import array
11 11 import heapq
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 error,
18 18 mdiff,
19 19 parsers,
20 20 revlog,
21 21 util,
22 22 )
23 23
24 24 propertycache = util.propertycache
25 25
26 26 def _parsev1(data):
27 27 # This method does a little bit of excessive-looking
28 28 # precondition checking. This is so that the behavior of this
29 29 # class exactly matches its C counterpart to try and help
30 30 # prevent surprise breakage for anyone that develops against
31 31 # the pure version.
32 32 if data and data[-1] != '\n':
33 33 raise ValueError('Manifest did not end in a newline.')
34 34 prev = None
35 35 for l in data.splitlines():
36 36 if prev is not None and prev > l:
37 37 raise ValueError('Manifest lines not in sorted order.')
38 38 prev = l
39 39 f, n = l.split('\0')
40 40 if len(n) > 40:
41 41 yield f, revlog.bin(n[:40]), n[40:]
42 42 else:
43 43 yield f, revlog.bin(n), ''
44 44
45 45 def _parsev2(data):
46 46 metadataend = data.find('\n')
47 47 # Just ignore metadata for now
48 48 pos = metadataend + 1
49 49 prevf = ''
50 50 while pos < len(data):
51 51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 52 if end == -1:
53 53 raise ValueError('Manifest ended with incomplete file entry.')
54 54 stemlen = ord(data[pos])
55 55 items = data[pos + 1:end].split('\0')
56 56 f = prevf[:stemlen] + items[0]
57 57 if prevf > f:
58 58 raise ValueError('Manifest entries not in sorted order.')
59 59 fl = items[1]
60 60 # Just ignore metadata (items[2:] for now)
61 61 n = data[end + 1:end + 21]
62 62 yield f, n, fl
63 63 pos = end + 22
64 64 prevf = f
65 65
66 66 def _parse(data):
67 67 """Generates (path, node, flags) tuples from a manifest text"""
68 68 if data.startswith('\0'):
69 69 return iter(_parsev2(data))
70 70 else:
71 71 return iter(_parsev1(data))
72 72
73 73 def _text(it, usemanifestv2):
74 74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 75 text"""
76 76 if usemanifestv2:
77 77 return _textv2(it)
78 78 else:
79 79 return _textv1(it)
80 80
81 81 def _textv1(it):
82 82 files = []
83 83 lines = []
84 84 _hex = revlog.hex
85 85 for f, n, fl in it:
86 86 files.append(f)
87 87 # if this is changed to support newlines in filenames,
88 88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90 90
91 91 _checkforbidden(files)
92 92 return ''.join(lines)
93 93
94 94 def _textv2(it):
95 95 files = []
96 96 lines = ['\0\n']
97 97 prevf = ''
98 98 for f, n, fl in it:
99 99 files.append(f)
100 100 stem = os.path.commonprefix([prevf, f])
101 101 stemlen = min(len(stem), 255)
102 102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 103 prevf = f
104 104 _checkforbidden(files)
105 105 return ''.join(lines)
106 106
107 107 class lazymanifestiter(object):
108 108 def __init__(self, lm):
109 109 self.pos = 0
110 110 self.lm = lm
111 111
112 112 def __iter__(self):
113 113 return self
114 114
115 115 def next(self):
116 116 try:
117 117 data, pos = self.lm._get(self.pos)
118 118 except IndexError:
119 119 raise StopIteration
120 120 if pos == -1:
121 121 self.pos += 1
122 122 return data[0]
123 123 self.pos += 1
124 124 zeropos = data.find('\x00', pos)
125 125 return data[pos:zeropos]
126 126
127 127 class lazymanifestiterentries(object):
128 128 def __init__(self, lm):
129 129 self.lm = lm
130 130 self.pos = 0
131 131
132 132 def __iter__(self):
133 133 return self
134 134
135 135 def next(self):
136 136 try:
137 137 data, pos = self.lm._get(self.pos)
138 138 except IndexError:
139 139 raise StopIteration
140 140 if pos == -1:
141 141 self.pos += 1
142 142 return data
143 143 zeropos = data.find('\x00', pos)
144 144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
145 145 zeropos + 1, 40)
146 146 flags = self.lm._getflags(data, self.pos, zeropos)
147 147 self.pos += 1
148 148 return (data[pos:zeropos], hashval, flags)
149 149
150 150 def unhexlify(data, extra, pos, length):
151 151 s = data[pos:pos + length].decode('hex')
152 152 if extra:
153 153 s += chr(extra & 0xff)
154 154 return s
155 155
156 156 def _cmp(a, b):
157 157 return (a > b) - (a < b)
158 158
159 159 class _lazymanifest(object):
160 160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
161 161 if positions is None:
162 162 self.positions = self.findlines(data)
163 163 self.extrainfo = [0] * len(self.positions)
164 164 self.data = data
165 165 self.extradata = []
166 166 else:
167 167 self.positions = positions[:]
168 168 self.extrainfo = extrainfo[:]
169 169 self.extradata = extradata[:]
170 170 self.data = data
171 171
172 172 def findlines(self, data):
173 173 if not data:
174 174 return []
175 175 pos = data.find("\n")
176 176 if pos == -1 or data[-1] != '\n':
177 177 raise ValueError("Manifest did not end in a newline.")
178 178 positions = [0]
179 179 prev = data[:data.find('\x00')]
180 180 while pos < len(data) - 1 and pos != -1:
181 181 positions.append(pos + 1)
182 182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
183 183 if nexts < prev:
184 184 raise ValueError("Manifest lines not in sorted order.")
185 185 prev = nexts
186 186 pos = data.find("\n", pos + 1)
187 187 return positions
188 188
189 189 def _get(self, index):
190 190 # get the position encoded in pos:
191 191 # positive number is an index in 'data'
192 192 # negative number is in extrapieces
193 193 pos = self.positions[index]
194 194 if pos >= 0:
195 195 return self.data, pos
196 196 return self.extradata[-pos - 1], -1
197 197
198 198 def _getkey(self, pos):
199 199 if pos >= 0:
200 200 return self.data[pos:self.data.find('\x00', pos + 1)]
201 201 return self.extradata[-pos - 1][0]
202 202
203 203 def bsearch(self, key):
204 204 first = 0
205 205 last = len(self.positions) - 1
206 206
207 207 while first <= last:
208 208 midpoint = (first + last)//2
209 209 nextpos = self.positions[midpoint]
210 210 candidate = self._getkey(nextpos)
211 211 r = _cmp(key, candidate)
212 212 if r == 0:
213 213 return midpoint
214 214 else:
215 215 if r < 0:
216 216 last = midpoint - 1
217 217 else:
218 218 first = midpoint + 1
219 219 return -1
220 220
221 221 def bsearch2(self, key):
222 222 # same as the above, but will always return the position
223 223 # done for performance reasons
224 224 first = 0
225 225 last = len(self.positions) - 1
226 226
227 227 while first <= last:
228 228 midpoint = (first + last)//2
229 229 nextpos = self.positions[midpoint]
230 230 candidate = self._getkey(nextpos)
231 231 r = _cmp(key, candidate)
232 232 if r == 0:
233 233 return (midpoint, True)
234 234 else:
235 235 if r < 0:
236 236 last = midpoint - 1
237 237 else:
238 238 first = midpoint + 1
239 239 return (first, False)
240 240
241 241 def __contains__(self, key):
242 242 return self.bsearch(key) != -1
243 243
244 244 def _getflags(self, data, needle, pos):
245 245 start = pos + 41
246 246 end = data.find("\n", start)
247 247 if end == -1:
248 248 end = len(data) - 1
249 249 if start == end:
250 250 return ''
251 251 return self.data[start:end]
252 252
253 253 def __getitem__(self, key):
254 254 if not isinstance(key, str):
255 255 raise TypeError("getitem: manifest keys must be a string.")
256 256 needle = self.bsearch(key)
257 257 if needle == -1:
258 258 raise KeyError
259 259 data, pos = self._get(needle)
260 260 if pos == -1:
261 261 return (data[1], data[2])
262 262 zeropos = data.find('\x00', pos)
263 263 assert 0 <= needle <= len(self.positions)
264 264 assert len(self.extrainfo) == len(self.positions)
265 265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
266 266 flags = self._getflags(data, needle, zeropos)
267 267 return (hashval, flags)
268 268
269 269 def __delitem__(self, key):
270 270 needle, found = self.bsearch2(key)
271 271 if not found:
272 272 raise KeyError
273 273 cur = self.positions[needle]
274 274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
275 275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
276 276 if cur >= 0:
277 277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
278 278
279 279 def __setitem__(self, key, value):
280 280 if not isinstance(key, str):
281 281 raise TypeError("setitem: manifest keys must be a string.")
282 282 if not isinstance(value, tuple) or len(value) != 2:
283 283 raise TypeError("Manifest values must be a tuple of (node, flags).")
284 284 hashval = value[0]
285 285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
286 286 raise TypeError("node must be a 20-byte string")
287 287 flags = value[1]
288 288 if len(hashval) == 22:
289 289 hashval = hashval[:-1]
290 290 if not isinstance(flags, str) or len(flags) > 1:
291 291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
292 292 needle, found = self.bsearch2(key)
293 293 if found:
294 294 # put the item
295 295 pos = self.positions[needle]
296 296 if pos < 0:
297 297 self.extradata[-pos - 1] = (key, hashval, value[1])
298 298 else:
299 299 # just don't bother
300 300 self.extradata.append((key, hashval, value[1]))
301 301 self.positions[needle] = -len(self.extradata)
302 302 else:
303 303 # not found, put it in with extra positions
304 304 self.extradata.append((key, hashval, value[1]))
305 305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
306 306 + self.positions[needle:])
307 307 self.extrainfo = (self.extrainfo[:needle] + [0] +
308 308 self.extrainfo[needle:])
309 309
310 310 def copy(self):
311 311 # XXX call _compact like in C?
312 312 return _lazymanifest(self.data, self.positions, self.extrainfo,
313 313 self.extradata)
314 314
315 315 def _compact(self):
316 316 # hopefully not called TOO often
317 317 if len(self.extradata) == 0:
318 318 return
319 319 l = []
320 320 last_cut = 0
321 321 i = 0
322 322 offset = 0
323 323 self.extrainfo = [0] * len(self.positions)
324 324 while i < len(self.positions):
325 325 if self.positions[i] >= 0:
326 326 cur = self.positions[i]
327 327 last_cut = cur
328 328 while True:
329 329 self.positions[i] = offset
330 330 i += 1
331 331 if i == len(self.positions) or self.positions[i] < 0:
332 332 break
333 333 offset += self.positions[i] - cur
334 334 cur = self.positions[i]
335 335 end_cut = self.data.find('\n', cur)
336 336 if end_cut != -1:
337 337 end_cut += 1
338 338 offset += end_cut - cur
339 339 l.append(self.data[last_cut:end_cut])
340 340 else:
341 341 while i < len(self.positions) and self.positions[i] < 0:
342 342 cur = self.positions[i]
343 343 t = self.extradata[-cur - 1]
344 344 l.append(self._pack(t))
345 345 self.positions[i] = offset
346 346 if len(t[1]) > 20:
347 347 self.extrainfo[i] = ord(t[1][21])
348 348 offset += len(l[-1])
349 349 i += 1
350 350 self.data = ''.join(l)
351 351 self.extradata = []
352 352
353 353 def _pack(self, d):
354 354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
355 355
356 356 def text(self):
357 357 self._compact()
358 358 return self.data
359 359
360 360 def diff(self, m2, clean=False):
361 361 '''Finds changes between the current manifest and m2.'''
362 362 # XXX think whether efficiency matters here
363 363 diff = {}
364 364
365 365 for fn, e1, flags in self.iterentries():
366 366 if fn not in m2:
367 367 diff[fn] = (e1, flags), (None, '')
368 368 else:
369 369 e2 = m2[fn]
370 370 if (e1, flags) != e2:
371 371 diff[fn] = (e1, flags), e2
372 372 elif clean:
373 373 diff[fn] = None
374 374
375 375 for fn, e2, flags in m2.iterentries():
376 376 if fn not in self:
377 377 diff[fn] = (None, ''), (e2, flags)
378 378
379 379 return diff
380 380
381 381 def iterentries(self):
382 382 return lazymanifestiterentries(self)
383 383
384 384 def iterkeys(self):
385 385 return lazymanifestiter(self)
386 386
387 387 def __iter__(self):
388 388 return lazymanifestiter(self)
389 389
390 390 def __len__(self):
391 391 return len(self.positions)
392 392
393 393 def filtercopy(self, filterfn):
394 394 # XXX should be optimized
395 395 c = _lazymanifest('')
396 396 for f, n, fl in self.iterentries():
397 397 if filterfn(f):
398 398 c[f] = n, fl
399 399 return c
400 400
401 401 try:
402 402 _lazymanifest = parsers.lazymanifest
403 403 except AttributeError:
404 404 pass
405 405
406 406 class manifestdict(object):
407 407 def __init__(self, data=''):
408 408 if data.startswith('\0'):
409 409 #_lazymanifest can not parse v2
410 410 self._lm = _lazymanifest('')
411 411 for f, n, fl in _parsev2(data):
412 412 self._lm[f] = n, fl
413 413 else:
414 414 self._lm = _lazymanifest(data)
415 415
416 416 def __getitem__(self, key):
417 417 return self._lm[key][0]
418 418
419 419 def find(self, key):
420 420 return self._lm[key]
421 421
422 422 def __len__(self):
423 423 return len(self._lm)
424 424
425 425 def __nonzero__(self):
426 426 # nonzero is covered by the __len__ function, but implementing it here
427 427 # makes it easier for extensions to override.
428 428 return len(self._lm) != 0
429 429
430 430 def __setitem__(self, key, node):
431 431 self._lm[key] = node, self.flags(key, '')
432 432
433 433 def __contains__(self, key):
434 434 return key in self._lm
435 435
436 436 def __delitem__(self, key):
437 437 del self._lm[key]
438 438
439 439 def __iter__(self):
440 440 return self._lm.__iter__()
441 441
442 442 def iterkeys(self):
443 443 return self._lm.iterkeys()
444 444
445 445 def keys(self):
446 446 return list(self.iterkeys())
447 447
448 448 def filesnotin(self, m2):
449 449 '''Set of files in this manifest that are not in the other'''
450 450 diff = self.diff(m2)
451 451 files = set(filepath
452 452 for filepath, hashflags in diff.iteritems()
453 453 if hashflags[1][0] is None)
454 454 return files
455 455
456 456 @propertycache
457 457 def _dirs(self):
458 458 return util.dirs(self)
459 459
460 460 def dirs(self):
461 461 return self._dirs
462 462
463 463 def hasdir(self, dir):
464 464 return dir in self._dirs
465 465
466 466 def _filesfastpath(self, match):
467 467 '''Checks whether we can correctly and quickly iterate over matcher
468 468 files instead of over manifest files.'''
469 469 files = match.files()
470 470 return (len(files) < 100 and (match.isexact() or
471 471 (match.prefix() and all(fn in self for fn in files))))
472 472
473 473 def walk(self, match):
474 474 '''Generates matching file names.
475 475
476 476 Equivalent to manifest.matches(match).iterkeys(), but without creating
477 477 an entirely new manifest.
478 478
479 479 It also reports nonexistent files by marking them bad with match.bad().
480 480 '''
481 481 if match.always():
482 482 for f in iter(self):
483 483 yield f
484 484 return
485 485
486 486 fset = set(match.files())
487 487
488 488 # avoid the entire walk if we're only looking for specific files
489 489 if self._filesfastpath(match):
490 490 for fn in sorted(fset):
491 491 yield fn
492 492 return
493 493
494 494 for fn in self:
495 495 if fn in fset:
496 496 # specified pattern is the exact name
497 497 fset.remove(fn)
498 498 if match(fn):
499 499 yield fn
500 500
501 501 # for dirstate.walk, files=['.'] means "walk the whole tree".
502 502 # follow that here, too
503 503 fset.discard('.')
504 504
505 505 for fn in sorted(fset):
506 506 if not self.hasdir(fn):
507 507 match.bad(fn, None)
508 508
509 509 def matches(self, match):
510 510 '''generate a new manifest filtered by the match argument'''
511 511 if match.always():
512 512 return self.copy()
513 513
514 514 if self._filesfastpath(match):
515 515 m = manifestdict()
516 516 lm = self._lm
517 517 for fn in match.files():
518 518 if fn in lm:
519 519 m._lm[fn] = lm[fn]
520 520 return m
521 521
522 522 m = manifestdict()
523 523 m._lm = self._lm.filtercopy(match)
524 524 return m
525 525
526 526 def diff(self, m2, clean=False):
527 527 '''Finds changes between the current manifest and m2.
528 528
529 529 Args:
530 530 m2: the manifest to which this manifest should be compared.
531 531 clean: if true, include files unchanged between these manifests
532 532 with a None value in the returned dictionary.
533 533
534 534 The result is returned as a dict with filename as key and
535 535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
536 536 nodeid in the current/other manifest and fl1/fl2 is the flag
537 537 in the current/other manifest. Where the file does not exist,
538 538 the nodeid will be None and the flags will be the empty
539 539 string.
540 540 '''
541 541 return self._lm.diff(m2._lm, clean)
542 542
543 543 def setflag(self, key, flag):
544 544 self._lm[key] = self[key], flag
545 545
546 546 def get(self, key, default=None):
547 547 try:
548 548 return self._lm[key][0]
549 549 except KeyError:
550 550 return default
551 551
552 552 def flags(self, key, default=''):
553 553 try:
554 554 return self._lm[key][1]
555 555 except KeyError:
556 556 return default
557 557
558 558 def copy(self):
559 559 c = manifestdict()
560 560 c._lm = self._lm.copy()
561 561 return c
562 562
563 563 def iteritems(self):
564 564 return (x[:2] for x in self._lm.iterentries())
565 565
566 566 def iterentries(self):
567 567 return self._lm.iterentries()
568 568
569 569 def text(self, usemanifestv2=False):
570 570 if usemanifestv2:
571 571 return _textv2(self._lm.iterentries())
572 572 else:
573 573 # use (probably) native version for v1
574 574 return self._lm.text()
575 575
576 576 def fastdelta(self, base, changes):
577 577 """Given a base manifest text as an array.array and a list of changes
578 578 relative to that text, compute a delta that can be used by revlog.
579 579 """
580 580 delta = []
581 581 dstart = None
582 582 dend = None
583 583 dline = [""]
584 584 start = 0
585 585 # zero copy representation of base as a buffer
586 586 addbuf = util.buffer(base)
587 587
588 588 changes = list(changes)
589 589 if len(changes) < 1000:
590 590 # start with a readonly loop that finds the offset of
591 591 # each line and creates the deltas
592 592 for f, todelete in changes:
593 593 # bs will either be the index of the item or the insert point
594 594 start, end = _msearch(addbuf, f, start)
595 595 if not todelete:
596 596 h, fl = self._lm[f]
597 597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
598 598 else:
599 599 if start == end:
600 600 # item we want to delete was not found, error out
601 601 raise AssertionError(
602 602 _("failed to remove %s from manifest") % f)
603 603 l = ""
604 604 if dstart is not None and dstart <= start and dend >= start:
605 605 if dend < end:
606 606 dend = end
607 607 if l:
608 608 dline.append(l)
609 609 else:
610 610 if dstart is not None:
611 611 delta.append([dstart, dend, "".join(dline)])
612 612 dstart = start
613 613 dend = end
614 614 dline = [l]
615 615
616 616 if dstart is not None:
617 617 delta.append([dstart, dend, "".join(dline)])
618 618 # apply the delta to the base, and get a delta for addrevision
619 619 deltatext, arraytext = _addlistdelta(base, delta)
620 620 else:
621 621 # For large changes, it's much cheaper to just build the text and
622 622 # diff it.
623 623 arraytext = array.array('c', self.text())
624 624 deltatext = mdiff.textdiff(base, arraytext)
625 625
626 626 return arraytext, deltatext
627 627
628 628 def _msearch(m, s, lo=0, hi=None):
629 629 '''return a tuple (start, end) that says where to find s within m.
630 630
631 631 If the string is found m[start:end] are the line containing
632 632 that string. If start == end the string was not found and
633 633 they indicate the proper sorted insertion point.
634 634
635 635 m should be a buffer or a string
636 636 s is a string'''
637 637 def advance(i, c):
638 638 while i < lenm and m[i] != c:
639 639 i += 1
640 640 return i
641 641 if not s:
642 642 return (lo, lo)
643 643 lenm = len(m)
644 644 if not hi:
645 645 hi = lenm
646 646 while lo < hi:
647 647 mid = (lo + hi) // 2
648 648 start = mid
649 649 while start > 0 and m[start - 1] != '\n':
650 650 start -= 1
651 651 end = advance(start, '\0')
652 652 if m[start:end] < s:
653 653 # we know that after the null there are 40 bytes of sha1
654 654 # this translates to the bisect lo = mid + 1
655 655 lo = advance(end + 40, '\n') + 1
656 656 else:
657 657 # this translates to the bisect hi = mid
658 658 hi = start
659 659 end = advance(lo, '\0')
660 660 found = m[lo:end]
661 661 if s == found:
662 662 # we know that after the null there are 40 bytes of sha1
663 663 end = advance(end + 40, '\n')
664 664 return (lo, end + 1)
665 665 else:
666 666 return (lo, lo)
667 667
668 668 def _checkforbidden(l):
669 669 """Check filenames for illegal characters."""
670 670 for f in l:
671 671 if '\n' in f or '\r' in f:
672 672 raise error.RevlogError(
673 673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
674 674
675 675
676 676 # apply the changes collected during the bisect loop to our addlist
677 677 # return a delta suitable for addrevision
678 678 def _addlistdelta(addlist, x):
679 679 # for large addlist arrays, building a new array is cheaper
680 680 # than repeatedly modifying the existing one
681 681 currentposition = 0
682 682 newaddlist = array.array('c')
683 683
684 684 for start, end, content in x:
685 685 newaddlist += addlist[currentposition:start]
686 686 if content:
687 687 newaddlist += array.array('c', content)
688 688
689 689 currentposition = end
690 690
691 691 newaddlist += addlist[currentposition:]
692 692
693 693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
694 694 + content for start, end, content in x)
695 695 return deltatext, newaddlist
696 696
697 697 def _splittopdir(f):
698 698 if '/' in f:
699 699 dir, subpath = f.split('/', 1)
700 700 return dir + '/', subpath
701 701 else:
702 702 return '', f
703 703
704 704 _noop = lambda s: None
705 705
706 706 class treemanifest(object):
707 707 def __init__(self, dir='', text=''):
708 708 self._dir = dir
709 709 self._node = revlog.nullid
710 710 self._loadfunc = _noop
711 711 self._copyfunc = _noop
712 712 self._dirty = False
713 713 self._dirs = {}
714 714 # Using _lazymanifest here is a little slower than plain old dicts
715 715 self._files = {}
716 716 self._flags = {}
717 717 if text:
718 718 def readsubtree(subdir, subm):
719 719 raise AssertionError('treemanifest constructor only accepts '
720 720 'flat manifests')
721 721 self.parse(text, readsubtree)
722 722 self._dirty = True # Mark flat manifest dirty after parsing
723 723
724 724 def _subpath(self, path):
725 725 return self._dir + path
726 726
727 727 def __len__(self):
728 728 self._load()
729 729 size = len(self._files)
730 730 for m in self._dirs.values():
731 731 size += m.__len__()
732 732 return size
733 733
734 734 def _isempty(self):
735 735 self._load() # for consistency; already loaded by all callers
736 736 return (not self._files and (not self._dirs or
737 737 all(m._isempty() for m in self._dirs.values())))
738 738
739 739 def __repr__(self):
740 740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
741 741 (self._dir, revlog.hex(self._node),
742 742 bool(self._loadfunc is _noop),
743 743 self._dirty, id(self)))
744 744
745 745 def dir(self):
746 746 '''The directory that this tree manifest represents, including a
747 747 trailing '/'. Empty string for the repo root directory.'''
748 748 return self._dir
749 749
750 750 def node(self):
751 751 '''This node of this instance. nullid for unsaved instances. Should
752 752 be updated when the instance is read or written from a revlog.
753 753 '''
754 754 assert not self._dirty
755 755 return self._node
756 756
757 757 def setnode(self, node):
758 758 self._node = node
759 759 self._dirty = False
760 760
761 761 def iterentries(self):
762 762 self._load()
763 763 for p, n in sorted(self._dirs.items() + self._files.items()):
764 764 if p in self._files:
765 765 yield self._subpath(p), n, self._flags.get(p, '')
766 766 else:
767 767 for x in n.iterentries():
768 768 yield x
769 769
770 770 def iteritems(self):
771 771 self._load()
772 772 for p, n in sorted(self._dirs.items() + self._files.items()):
773 773 if p in self._files:
774 774 yield self._subpath(p), n
775 775 else:
776 776 for f, sn in n.iteritems():
777 777 yield f, sn
778 778
779 779 def iterkeys(self):
780 780 self._load()
781 781 for p in sorted(self._dirs.keys() + self._files.keys()):
782 782 if p in self._files:
783 783 yield self._subpath(p)
784 784 else:
785 785 for f in self._dirs[p].iterkeys():
786 786 yield f
787 787
788 788 def keys(self):
789 789 return list(self.iterkeys())
790 790
791 791 def __iter__(self):
792 792 return self.iterkeys()
793 793
794 794 def __contains__(self, f):
795 795 if f is None:
796 796 return False
797 797 self._load()
798 798 dir, subpath = _splittopdir(f)
799 799 if dir:
800 800 if dir not in self._dirs:
801 801 return False
802 802 return self._dirs[dir].__contains__(subpath)
803 803 else:
804 804 return f in self._files
805 805
806 806 def get(self, f, default=None):
807 807 self._load()
808 808 dir, subpath = _splittopdir(f)
809 809 if dir:
810 810 if dir not in self._dirs:
811 811 return default
812 812 return self._dirs[dir].get(subpath, default)
813 813 else:
814 814 return self._files.get(f, default)
815 815
816 816 def __getitem__(self, f):
817 817 self._load()
818 818 dir, subpath = _splittopdir(f)
819 819 if dir:
820 820 return self._dirs[dir].__getitem__(subpath)
821 821 else:
822 822 return self._files[f]
823 823
824 824 def flags(self, f):
825 825 self._load()
826 826 dir, subpath = _splittopdir(f)
827 827 if dir:
828 828 if dir not in self._dirs:
829 829 return ''
830 830 return self._dirs[dir].flags(subpath)
831 831 else:
832 832 if f in self._dirs:
833 833 return ''
834 834 return self._flags.get(f, '')
835 835
836 836 def find(self, f):
837 837 self._load()
838 838 dir, subpath = _splittopdir(f)
839 839 if dir:
840 840 return self._dirs[dir].find(subpath)
841 841 else:
842 842 return self._files[f], self._flags.get(f, '')
843 843
844 844 def __delitem__(self, f):
845 845 self._load()
846 846 dir, subpath = _splittopdir(f)
847 847 if dir:
848 848 self._dirs[dir].__delitem__(subpath)
849 849 # If the directory is now empty, remove it
850 850 if self._dirs[dir]._isempty():
851 851 del self._dirs[dir]
852 852 else:
853 853 del self._files[f]
854 854 if f in self._flags:
855 855 del self._flags[f]
856 856 self._dirty = True
857 857
858 858 def __setitem__(self, f, n):
859 859 assert n is not None
860 860 self._load()
861 861 dir, subpath = _splittopdir(f)
862 862 if dir:
863 863 if dir not in self._dirs:
864 864 self._dirs[dir] = treemanifest(self._subpath(dir))
865 865 self._dirs[dir].__setitem__(subpath, n)
866 866 else:
867 867 self._files[f] = n[:21] # to match manifestdict's behavior
868 868 self._dirty = True
869 869
870 870 def _load(self):
871 871 if self._loadfunc is not _noop:
872 872 lf, self._loadfunc = self._loadfunc, _noop
873 873 lf(self)
874 874 elif self._copyfunc is not _noop:
875 875 cf, self._copyfunc = self._copyfunc, _noop
876 876 cf(self)
877 877
878 878 def setflag(self, f, flags):
879 879 """Set the flags (symlink, executable) for path f."""
880 880 self._load()
881 881 dir, subpath = _splittopdir(f)
882 882 if dir:
883 883 if dir not in self._dirs:
884 884 self._dirs[dir] = treemanifest(self._subpath(dir))
885 885 self._dirs[dir].setflag(subpath, flags)
886 886 else:
887 887 self._flags[f] = flags
888 888 self._dirty = True
889 889
890 890 def copy(self):
891 891 copy = treemanifest(self._dir)
892 892 copy._node = self._node
893 893 copy._dirty = self._dirty
894 894 if self._copyfunc is _noop:
895 895 def _copyfunc(s):
896 896 self._load()
897 897 for d in self._dirs:
898 898 s._dirs[d] = self._dirs[d].copy()
899 899 s._files = dict.copy(self._files)
900 900 s._flags = dict.copy(self._flags)
901 901 if self._loadfunc is _noop:
902 902 _copyfunc(copy)
903 903 else:
904 904 copy._copyfunc = _copyfunc
905 905 else:
906 906 copy._copyfunc = self._copyfunc
907 907 return copy
908 908
909 909 def filesnotin(self, m2):
910 910 '''Set of files in this manifest that are not in the other'''
911 911 files = set()
912 912 def _filesnotin(t1, t2):
913 913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
914 914 return
915 915 t1._load()
916 916 t2._load()
917 917 for d, m1 in t1._dirs.iteritems():
918 918 if d in t2._dirs:
919 919 m2 = t2._dirs[d]
920 920 _filesnotin(m1, m2)
921 921 else:
922 922 files.update(m1.iterkeys())
923 923
924 924 for fn in t1._files.iterkeys():
925 925 if fn not in t2._files:
926 926 files.add(t1._subpath(fn))
927 927
928 928 _filesnotin(self, m2)
929 929 return files
930 930
931 931 @propertycache
932 932 def _alldirs(self):
933 933 return util.dirs(self)
934 934
935 935 def dirs(self):
936 936 return self._alldirs
937 937
938 938 def hasdir(self, dir):
939 939 self._load()
940 940 topdir, subdir = _splittopdir(dir)
941 941 if topdir:
942 942 if topdir in self._dirs:
943 943 return self._dirs[topdir].hasdir(subdir)
944 944 return False
945 945 return (dir + '/') in self._dirs
946 946
947 947 def walk(self, match):
948 948 '''Generates matching file names.
949 949
950 950 Equivalent to manifest.matches(match).iterkeys(), but without creating
951 951 an entirely new manifest.
952 952
953 953 It also reports nonexistent files by marking them bad with match.bad().
954 954 '''
955 955 if match.always():
956 956 for f in iter(self):
957 957 yield f
958 958 return
959 959
960 960 fset = set(match.files())
961 961
962 962 for fn in self._walk(match):
963 963 if fn in fset:
964 964 # specified pattern is the exact name
965 965 fset.remove(fn)
966 966 yield fn
967 967
968 968 # for dirstate.walk, files=['.'] means "walk the whole tree".
969 969 # follow that here, too
970 970 fset.discard('.')
971 971
972 972 for fn in sorted(fset):
973 973 if not self.hasdir(fn):
974 974 match.bad(fn, None)
975 975
976 976 def _walk(self, match):
977 977 '''Recursively generates matching file names for walk().'''
978 978 if not match.visitdir(self._dir[:-1] or '.'):
979 979 return
980 980
981 981 # yield this dir's files and walk its submanifests
982 982 self._load()
983 983 for p in sorted(self._dirs.keys() + self._files.keys()):
984 984 if p in self._files:
985 985 fullp = self._subpath(p)
986 986 if match(fullp):
987 987 yield fullp
988 988 else:
989 989 for f in self._dirs[p]._walk(match):
990 990 yield f
991 991
992 992 def matches(self, match):
993 993 '''generate a new manifest filtered by the match argument'''
994 994 if match.always():
995 995 return self.copy()
996 996
997 997 return self._matches(match)
998 998
999 999 def _matches(self, match):
1000 1000 '''recursively generate a new manifest filtered by the match argument.
1001 1001 '''
1002 1002
1003 1003 visit = match.visitdir(self._dir[:-1] or '.')
1004 1004 if visit == 'all':
1005 1005 return self.copy()
1006 1006 ret = treemanifest(self._dir)
1007 1007 if not visit:
1008 1008 return ret
1009 1009
1010 1010 self._load()
1011 1011 for fn in self._files:
1012 1012 fullp = self._subpath(fn)
1013 1013 if not match(fullp):
1014 1014 continue
1015 1015 ret._files[fn] = self._files[fn]
1016 1016 if fn in self._flags:
1017 1017 ret._flags[fn] = self._flags[fn]
1018 1018
1019 1019 for dir, subm in self._dirs.iteritems():
1020 1020 m = subm._matches(match)
1021 1021 if not m._isempty():
1022 1022 ret._dirs[dir] = m
1023 1023
1024 1024 if not ret._isempty():
1025 1025 ret._dirty = True
1026 1026 return ret
1027 1027
1028 1028 def diff(self, m2, clean=False):
1029 1029 '''Finds changes between the current manifest and m2.
1030 1030
1031 1031 Args:
1032 1032 m2: the manifest to which this manifest should be compared.
1033 1033 clean: if true, include files unchanged between these manifests
1034 1034 with a None value in the returned dictionary.
1035 1035
1036 1036 The result is returned as a dict with filename as key and
1037 1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1038 1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1039 1039 in the current/other manifest. Where the file does not exist,
1040 1040 the nodeid will be None and the flags will be the empty
1041 1041 string.
1042 1042 '''
1043 1043 result = {}
1044 1044 emptytree = treemanifest()
1045 1045 def _diff(t1, t2):
1046 1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1047 1047 return
1048 1048 t1._load()
1049 1049 t2._load()
1050 1050 for d, m1 in t1._dirs.iteritems():
1051 1051 m2 = t2._dirs.get(d, emptytree)
1052 1052 _diff(m1, m2)
1053 1053
1054 1054 for d, m2 in t2._dirs.iteritems():
1055 1055 if d not in t1._dirs:
1056 1056 _diff(emptytree, m2)
1057 1057
1058 1058 for fn, n1 in t1._files.iteritems():
1059 1059 fl1 = t1._flags.get(fn, '')
1060 1060 n2 = t2._files.get(fn, None)
1061 1061 fl2 = t2._flags.get(fn, '')
1062 1062 if n1 != n2 or fl1 != fl2:
1063 1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1064 1064 elif clean:
1065 1065 result[t1._subpath(fn)] = None
1066 1066
1067 1067 for fn, n2 in t2._files.iteritems():
1068 1068 if fn not in t1._files:
1069 1069 fl2 = t2._flags.get(fn, '')
1070 1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1071 1071
1072 1072 _diff(self, m2)
1073 1073 return result
1074 1074
1075 1075 def unmodifiedsince(self, m2):
1076 1076 return not self._dirty and not m2._dirty and self._node == m2._node
1077 1077
1078 1078 def parse(self, text, readsubtree):
1079 1079 for f, n, fl in _parse(text):
1080 1080 if fl == 't':
1081 1081 f = f + '/'
1082 1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1083 1083 elif '/' in f:
1084 1084 # This is a flat manifest, so use __setitem__ and setflag rather
1085 1085 # than assigning directly to _files and _flags, so we can
1086 1086 # assign a path in a subdirectory, and to mark dirty (compared
1087 1087 # to nullid).
1088 1088 self[f] = n
1089 1089 if fl:
1090 1090 self.setflag(f, fl)
1091 1091 else:
1092 1092 # Assigning to _files and _flags avoids marking as dirty,
1093 1093 # and should be a little faster.
1094 1094 self._files[f] = n
1095 1095 if fl:
1096 1096 self._flags[f] = fl
1097 1097
1098 1098 def text(self, usemanifestv2=False):
1099 1099 """Get the full data of this manifest as a bytestring."""
1100 1100 self._load()
1101 1101 return _text(self.iterentries(), usemanifestv2)
1102 1102
1103 1103 def dirtext(self, usemanifestv2=False):
1104 1104 """Get the full data of this directory as a bytestring. Make sure that
1105 1105 any submanifests have been written first, so their nodeids are correct.
1106 1106 """
1107 1107 self._load()
1108 1108 flags = self.flags
1109 1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1110 1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1111 1111 return _text(sorted(dirs + files), usemanifestv2)
1112 1112
1113 1113 def read(self, gettext, readsubtree):
1114 1114 def _load_for_read(s):
1115 1115 s.parse(gettext(), readsubtree)
1116 1116 s._dirty = False
1117 1117 self._loadfunc = _load_for_read
1118 1118
1119 1119 def writesubtrees(self, m1, m2, writesubtree):
1120 1120 self._load() # for consistency; should never have any effect here
1121 1121 m1._load()
1122 1122 m2._load()
1123 1123 emptytree = treemanifest()
1124 1124 for d, subm in self._dirs.iteritems():
1125 1125 subp1 = m1._dirs.get(d, emptytree)._node
1126 1126 subp2 = m2._dirs.get(d, emptytree)._node
1127 1127 if subp1 == revlog.nullid:
1128 1128 subp1, subp2 = subp2, subp1
1129 1129 writesubtree(subm, subp1, subp2)
1130 1130
1131 1131 class manifestrevlog(revlog.revlog):
1132 1132 '''A revlog that stores manifest texts. This is responsible for caching the
1133 1133 full-text manifest contents.
1134 1134 '''
1135 1135 def __init__(self, opener, dir='', dirlogcache=None):
1136 1136 # During normal operations, we expect to deal with not more than four
1137 1137 # revs at a time (such as during commit --amend). When rebasing large
1138 1138 # stacks of commits, the number can go up, hence the config knob below.
1139 1139 cachesize = 4
1140 1140 usetreemanifest = False
1141 1141 usemanifestv2 = False
1142 1142 opts = getattr(opener, 'options', None)
1143 1143 if opts is not None:
1144 1144 cachesize = opts.get('manifestcachesize', cachesize)
1145 1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1146 1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1147 1147
1148 1148 self._treeondisk = usetreemanifest
1149 1149 self._usemanifestv2 = usemanifestv2
1150 1150
1151 1151 self._fulltextcache = util.lrucachedict(cachesize)
1152 1152
1153 1153 indexfile = "00manifest.i"
1154 1154 if dir:
1155 1155 assert self._treeondisk, 'opts is %r' % opts
1156 1156 if not dir.endswith('/'):
1157 1157 dir = dir + '/'
1158 1158 indexfile = "meta/" + dir + "00manifest.i"
1159 1159 self._dir = dir
1160 1160 # The dirlogcache is kept on the root manifest log
1161 1161 if dir:
1162 1162 self._dirlogcache = dirlogcache
1163 1163 else:
1164 1164 self._dirlogcache = {'': self}
1165 1165
1166 1166 super(manifestrevlog, self).__init__(opener, indexfile,
1167 1167 checkambig=bool(dir))
1168 1168
1169 1169 @property
1170 1170 def fulltextcache(self):
1171 1171 return self._fulltextcache
1172 1172
1173 1173 def clearcaches(self):
1174 1174 super(manifestrevlog, self).clearcaches()
1175 1175 self._fulltextcache.clear()
1176 1176 self._dirlogcache = {'': self}
1177 1177
1178 1178 def dirlog(self, dir):
1179 1179 if dir:
1180 1180 assert self._treeondisk
1181 1181 if dir not in self._dirlogcache:
1182 1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1183 1183 self._dirlogcache)
1184 1184 return self._dirlogcache[dir]
1185 1185
1186 1186 def add(self, m, transaction, link, p1, p2, added, removed):
1187 1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1188 1188 and not self._usemanifestv2):
1189 1189 # If our first parent is in the manifest cache, we can
1190 1190 # compute a delta here using properties we know about the
1191 1191 # manifest up-front, which may save time later for the
1192 1192 # revlog layer.
1193 1193
1194 1194 _checkforbidden(added)
1195 1195 # combine the changed lists into one sorted iterator
1196 1196 work = heapq.merge([(x, False) for x in added],
1197 1197 [(x, True) for x in removed])
1198 1198
1199 1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1200 1200 cachedelta = self.rev(p1), deltatext
1201 1201 text = util.buffer(arraytext)
1202 1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1203 1203 else:
1204 1204 # The first parent manifest isn't already loaded, so we'll
1205 1205 # just encode a fulltext of the manifest and pass that
1206 1206 # through to the revlog layer, and let it handle the delta
1207 1207 # process.
1208 1208 if self._treeondisk:
1209 1209 m1 = self.read(p1)
1210 1210 m2 = self.read(p2)
1211 1211 n = self._addtree(m, transaction, link, m1, m2)
1212 1212 arraytext = None
1213 1213 else:
1214 1214 text = m.text(self._usemanifestv2)
1215 1215 n = self.addrevision(text, transaction, link, p1, p2)
1216 1216 arraytext = array.array('c', text)
1217 1217
1218 1218 if arraytext is not None:
1219 1219 self.fulltextcache[n] = arraytext
1220 1220
1221 1221 return n
1222 1222
1223 1223 def _addtree(self, m, transaction, link, m1, m2):
1224 1224 # If the manifest is unchanged compared to one parent,
1225 1225 # don't write a new revision
1226 1226 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1227 1227 return m.node()
1228 1228 def writesubtree(subm, subp1, subp2):
1229 1229 sublog = self.dirlog(subm.dir())
1230 1230 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1231 1231 m.writesubtrees(m1, m2, writesubtree)
1232 1232 text = m.dirtext(self._usemanifestv2)
1233 1233 # Double-check whether contents are unchanged to one parent
1234 1234 if text == m1.dirtext(self._usemanifestv2):
1235 1235 n = m1.node()
1236 1236 elif text == m2.dirtext(self._usemanifestv2):
1237 1237 n = m2.node()
1238 1238 else:
1239 1239 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1240 1240 # Save nodeid so parent manifest can calculate its nodeid
1241 1241 m.setnode(n)
1242 1242 return n
1243 1243
1244 1244 class manifestlog(object):
1245 1245 """A collection class representing the collection of manifest snapshots
1246 1246 referenced by commits in the repository.
1247 1247
1248 1248 In this situation, 'manifest' refers to the abstract concept of a snapshot
1249 1249 of the list of files in the given commit. Consumers of the output of this
1250 1250 class do not care about the implementation details of the actual manifests
1251 1251 they receive (i.e. tree or flat or lazily loaded, etc)."""
1252 1252 def __init__(self, opener, repo):
1253 1253 self._repo = repo
1254 1254
1255 1255 usetreemanifest = False
1256 1256
1257 1257 opts = getattr(opener, 'options', None)
1258 1258 if opts is not None:
1259 1259 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1260 1260 self._treeinmem = usetreemanifest
1261 1261
1262 1262 self._oldmanifest = repo._constructmanifest()
1263 1263 self._revlog = self._oldmanifest
1264 1264
1265 1265 # A cache of the manifestctx or treemanifestctx for each directory
1266 1266 self._dirmancache = {}
1267 1267
1268 1268 # We'll separate this into it's own cache once oldmanifest is no longer
1269 1269 # used
1270 1270 self._mancache = self._oldmanifest._mancache
1271 1271 self._dirmancache[''] = self._mancache
1272 1272
1273 1273 # A future patch makes this use the same config value as the existing
1274 1274 # mancache
1275 1275 self.cachesize = 4
1276 1276
1277 1277 def __getitem__(self, node):
1278 1278 """Retrieves the manifest instance for the given node. Throws a
1279 1279 LookupError if not found.
1280 1280 """
1281 1281 return self.get('', node)
1282 1282
1283 1283 def get(self, dir, node):
1284 1284 """Retrieves the manifest instance for the given node. Throws a
1285 1285 LookupError if not found.
1286 1286 """
1287 1287 if node in self._dirmancache.get(dir, ()):
1288 1288 cachemf = self._dirmancache[dir][node]
1289 1289 # The old manifest may put non-ctx manifests in the cache, so
1290 1290 # skip those since they don't implement the full api.
1291 1291 if (isinstance(cachemf, manifestctx) or
1292 1292 isinstance(cachemf, treemanifestctx)):
1293 1293 return cachemf
1294 1294
1295 1295 if dir:
1296 1296 if self._revlog._treeondisk:
1297 1297 dirlog = self._revlog.dirlog(dir)
1298 1298 if node not in dirlog.nodemap:
1299 1299 raise LookupError(node, dirlog.indexfile,
1300 1300 _('no node'))
1301 1301 m = treemanifestctx(self._repo, dir, node)
1302 1302 else:
1303 1303 raise error.Abort(
1304 1304 _("cannot ask for manifest directory '%s' in a flat "
1305 1305 "manifest") % dir)
1306 1306 else:
1307 1307 if node not in self._revlog.nodemap:
1308 1308 raise LookupError(node, self._revlog.indexfile,
1309 1309 _('no node'))
1310 1310 if self._treeinmem:
1311 1311 m = treemanifestctx(self._repo, '', node)
1312 1312 else:
1313 1313 m = manifestctx(self._repo, node)
1314 1314
1315 1315 if node != revlog.nullid:
1316 1316 mancache = self._dirmancache.get(dir)
1317 1317 if not mancache:
1318 1318 mancache = util.lrucachedict(self.cachesize)
1319 1319 self._dirmancache[dir] = mancache
1320 1320 mancache[node] = m
1321 1321 return m
1322 1322
1323 1323 def add(self, m, transaction, link, p1, p2, added, removed):
1324 1324 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1325 1325
1326 1326 class manifestctx(object):
1327 1327 """A class representing a single revision of a manifest, including its
1328 1328 contents, its parent revs, and its linkrev.
1329 1329 """
1330 1330 def __init__(self, repo, node):
1331 1331 self._repo = repo
1332 1332 self._data = None
1333 1333
1334 1334 self._node = node
1335 1335
1336 1336 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1337 1337 # but let's add it later when something needs it and we can load it
1338 1338 # lazily.
1339 1339 #self.p1, self.p2 = revlog.parents(node)
1340 1340 #rev = revlog.rev(node)
1341 1341 #self.linkrev = revlog.linkrev(rev)
1342 1342
1343 1343 def node(self):
1344 1344 return self._node
1345 1345
1346 1346 def read(self):
1347 1347 if not self._data:
1348 1348 if self._node == revlog.nullid:
1349 1349 self._data = manifestdict()
1350 1350 else:
1351 1351 rl = self._repo.manifestlog._revlog
1352 1352 text = rl.revision(self._node)
1353 1353 arraytext = array.array('c', text)
1354 1354 rl._fulltextcache[self._node] = arraytext
1355 1355 self._data = manifestdict(text)
1356 1356 return self._data
1357 1357
1358 1358 def readfast(self, shallow=False):
1359 1359 '''Calls either readdelta or read, based on which would be less work.
1360 1360 readdelta is called if the delta is against the p1, and therefore can be
1361 1361 read quickly.
1362 1362
1363 1363 If `shallow` is True, nothing changes since this is a flat manifest.
1364 1364 '''
1365 1365 rl = self._repo.manifestlog._revlog
1366 1366 r = rl.rev(self._node)
1367 1367 deltaparent = rl.deltaparent(r)
1368 1368 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1369 1369 return self.readdelta()
1370 1370 return self.read()
1371 1371
1372 1372 def readdelta(self, shallow=False):
1373 1373 '''Returns a manifest containing just the entries that are present
1374 1374 in this manifest, but not in its p1 manifest. This is efficient to read
1375 1375 if the revlog delta is already p1.
1376 1376
1377 1377 Changing the value of `shallow` has no effect on flat manifests.
1378 1378 '''
1379 1379 revlog = self._repo.manifestlog._revlog
1380 1380 if revlog._usemanifestv2:
1381 1381 # Need to perform a slow delta
1382 1382 r0 = revlog.deltaparent(revlog.rev(self._node))
1383 1383 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1384 1384 m1 = self.read()
1385 1385 md = manifestdict()
1386 1386 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1387 1387 if n1:
1388 1388 md[f] = n1
1389 1389 if fl1:
1390 1390 md.setflag(f, fl1)
1391 1391 return md
1392 1392
1393 1393 r = revlog.rev(self._node)
1394 1394 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1395 1395 return manifestdict(d)
1396 1396
1397 def find(self, key):
1398 return self.read().find(key)
1399
1397 1400 class treemanifestctx(object):
1398 1401 def __init__(self, repo, dir, node):
1399 1402 self._repo = repo
1400 1403 self._dir = dir
1401 1404 self._data = None
1402 1405
1403 1406 self._node = node
1404 1407
1405 1408 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1406 1409 # we can instantiate treemanifestctx objects for directories we don't
1407 1410 # have on disk.
1408 1411 #self.p1, self.p2 = revlog.parents(node)
1409 1412 #rev = revlog.rev(node)
1410 1413 #self.linkrev = revlog.linkrev(rev)
1411 1414
1412 1415 def _revlog(self):
1413 1416 return self._repo.manifestlog._revlog.dirlog(self._dir)
1414 1417
1415 1418 def read(self):
1416 1419 if not self._data:
1417 1420 rl = self._revlog()
1418 1421 if self._node == revlog.nullid:
1419 1422 self._data = treemanifest()
1420 1423 elif rl._treeondisk:
1421 1424 m = treemanifest(dir=self._dir)
1422 1425 def gettext():
1423 1426 return rl.revision(self._node)
1424 1427 def readsubtree(dir, subm):
1425 1428 return treemanifestctx(self._repo, dir, subm).read()
1426 1429 m.read(gettext, readsubtree)
1427 1430 m.setnode(self._node)
1428 1431 self._data = m
1429 1432 else:
1430 1433 text = rl.revision(self._node)
1431 1434 arraytext = array.array('c', text)
1432 1435 rl.fulltextcache[self._node] = arraytext
1433 1436 self._data = treemanifest(dir=self._dir, text=text)
1434 1437
1435 1438 return self._data
1436 1439
1437 1440 def node(self):
1438 1441 return self._node
1439 1442
1440 1443 def readdelta(self, shallow=False):
1441 1444 '''Returns a manifest containing just the entries that are present
1442 1445 in this manifest, but not in its p1 manifest. This is efficient to read
1443 1446 if the revlog delta is already p1.
1444 1447
1445 1448 If `shallow` is True, this will read the delta for this directory,
1446 1449 without recursively reading subdirectory manifests. Instead, any
1447 1450 subdirectory entry will be reported as it appears in the manifest, i.e.
1448 1451 the subdirectory will be reported among files and distinguished only by
1449 1452 its 't' flag.
1450 1453 '''
1451 1454 revlog = self._revlog()
1452 1455 if shallow and not revlog._usemanifestv2:
1453 1456 r = revlog.rev(self._node)
1454 1457 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1455 1458 return manifestdict(d)
1456 1459 else:
1457 1460 # Need to perform a slow delta
1458 1461 r0 = revlog.deltaparent(revlog.rev(self._node))
1459 1462 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1460 1463 m1 = self.read()
1461 1464 md = treemanifest(dir=self._dir)
1462 1465 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1463 1466 if n1:
1464 1467 md[f] = n1
1465 1468 if fl1:
1466 1469 md.setflag(f, fl1)
1467 1470 return md
1468 1471
1469 1472 def readfast(self, shallow=False):
1470 1473 '''Calls either readdelta or read, based on which would be less work.
1471 1474 readdelta is called if the delta is against the p1, and therefore can be
1472 1475 read quickly.
1473 1476
1474 1477 If `shallow` is True, it only returns the entries from this manifest,
1475 1478 and not any submanifests.
1476 1479 '''
1477 1480 rl = self._revlog()
1478 1481 r = rl.rev(self._node)
1479 1482 deltaparent = rl.deltaparent(r)
1480 1483 if (deltaparent != revlog.nullrev and
1481 1484 deltaparent in rl.parentrevs(r)):
1482 1485 return self.readdelta(shallow=shallow)
1483 1486
1484 1487 if shallow:
1485 1488 return manifestdict(rl.revision(self._node))
1486 1489 else:
1487 1490 return self.read()
1488 1491
1492 def find(self, key):
1493 return self.read().find(key)
1494
1489 1495 class manifest(manifestrevlog):
1490 1496 def __init__(self, opener, dir='', dirlogcache=None):
1491 1497 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1492 1498 manifest.manifest only. External users should create a root manifest
1493 1499 log with manifest.manifest(opener) and call dirlog() on it.
1494 1500 '''
1495 1501 # During normal operations, we expect to deal with not more than four
1496 1502 # revs at a time (such as during commit --amend). When rebasing large
1497 1503 # stacks of commits, the number can go up, hence the config knob below.
1498 1504 cachesize = 4
1499 1505 usetreemanifest = False
1500 1506 opts = getattr(opener, 'options', None)
1501 1507 if opts is not None:
1502 1508 cachesize = opts.get('manifestcachesize', cachesize)
1503 1509 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1504 1510 self._mancache = util.lrucachedict(cachesize)
1505 1511 self._treeinmem = usetreemanifest
1506 1512 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1507 1513
1508 1514 def _newmanifest(self, data=''):
1509 1515 if self._treeinmem:
1510 1516 return treemanifest(self._dir, data)
1511 1517 return manifestdict(data)
1512 1518
1513 1519 def dirlog(self, dir):
1514 1520 """This overrides the base revlog implementation to allow construction
1515 1521 'manifest' types instead of manifestrevlog types. This is only needed
1516 1522 until we migrate off the 'manifest' type."""
1517 1523 if dir:
1518 1524 assert self._treeondisk
1519 1525 if dir not in self._dirlogcache:
1520 1526 self._dirlogcache[dir] = manifest(self.opener, dir,
1521 1527 self._dirlogcache)
1522 1528 return self._dirlogcache[dir]
1523 1529
1524 1530 def read(self, node):
1525 1531 if node == revlog.nullid:
1526 1532 return self._newmanifest() # don't upset local cache
1527 1533 if node in self._mancache:
1528 1534 cached = self._mancache[node]
1529 1535 if (isinstance(cached, manifestctx) or
1530 1536 isinstance(cached, treemanifestctx)):
1531 1537 cached = cached.read()
1532 1538 return cached
1533 1539 if self._treeondisk:
1534 1540 def gettext():
1535 1541 return self.revision(node)
1536 1542 def readsubtree(dir, subm):
1537 1543 return self.dirlog(dir).read(subm)
1538 1544 m = self._newmanifest()
1539 1545 m.read(gettext, readsubtree)
1540 1546 m.setnode(node)
1541 1547 arraytext = None
1542 1548 else:
1543 1549 text = self.revision(node)
1544 1550 m = self._newmanifest(text)
1545 1551 arraytext = array.array('c', text)
1546 1552 self._mancache[node] = m
1547 1553 if arraytext is not None:
1548 1554 self.fulltextcache[node] = arraytext
1549 1555 return m
1550 1556
1551 def find(self, node, f):
1552 '''look up entry for a single file efficiently.
1553 return (node, flags) pair if found, (None, None) if not.'''
1554 m = self.read(node)
1555 try:
1556 return m.find(f)
1557 except KeyError:
1558 return None, None
1559
1560 1557 def clearcaches(self):
1561 1558 super(manifest, self).clearcaches()
1562 1559 self._mancache.clear()
General Comments 0
You need to be logged in to leave comments. Login now