##// END OF EJS Templates
docs: add args/returns docs for some cmdutil, context, and registrar functions...
rlevasseur@google.com -
r35106:b22a0d9e default
parent child Browse files
Show More
@@ -1,3969 +1,3977
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dagop,
30 30 dirstateguard,
31 31 encoding,
32 32 error,
33 33 formatter,
34 34 graphmod,
35 35 match as matchmod,
36 36 mdiff,
37 37 obsolete,
38 38 patch,
39 39 pathutil,
40 40 pycompat,
41 41 registrar,
42 42 revlog,
43 43 revset,
44 44 scmutil,
45 45 smartset,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51 stringio = util.stringio
52 52
53 53 # templates of common command options
54 54
55 55 dryrunopts = [
56 56 ('n', 'dry-run', None,
57 57 _('do not perform actions, just print output')),
58 58 ]
59 59
60 60 remoteopts = [
61 61 ('e', 'ssh', '',
62 62 _('specify ssh command to use'), _('CMD')),
63 63 ('', 'remotecmd', '',
64 64 _('specify hg command to run on the remote side'), _('CMD')),
65 65 ('', 'insecure', None,
66 66 _('do not verify server certificate (ignoring web.cacerts config)')),
67 67 ]
68 68
69 69 walkopts = [
70 70 ('I', 'include', [],
71 71 _('include names matching the given patterns'), _('PATTERN')),
72 72 ('X', 'exclude', [],
73 73 _('exclude names matching the given patterns'), _('PATTERN')),
74 74 ]
75 75
76 76 commitopts = [
77 77 ('m', 'message', '',
78 78 _('use text as commit message'), _('TEXT')),
79 79 ('l', 'logfile', '',
80 80 _('read commit message from file'), _('FILE')),
81 81 ]
82 82
83 83 commitopts2 = [
84 84 ('d', 'date', '',
85 85 _('record the specified date as commit date'), _('DATE')),
86 86 ('u', 'user', '',
87 87 _('record the specified user as committer'), _('USER')),
88 88 ]
89 89
90 90 # hidden for now
91 91 formatteropts = [
92 92 ('T', 'template', '',
93 93 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
94 94 ]
95 95
96 96 templateopts = [
97 97 ('', 'style', '',
98 98 _('display using template map file (DEPRECATED)'), _('STYLE')),
99 99 ('T', 'template', '',
100 100 _('display with template'), _('TEMPLATE')),
101 101 ]
102 102
103 103 logopts = [
104 104 ('p', 'patch', None, _('show patch')),
105 105 ('g', 'git', None, _('use git extended diff format')),
106 106 ('l', 'limit', '',
107 107 _('limit number of changes displayed'), _('NUM')),
108 108 ('M', 'no-merges', None, _('do not show merges')),
109 109 ('', 'stat', None, _('output diffstat-style summary of changes')),
110 110 ('G', 'graph', None, _("show the revision DAG")),
111 111 ] + templateopts
112 112
113 113 diffopts = [
114 114 ('a', 'text', None, _('treat all files as text')),
115 115 ('g', 'git', None, _('use git extended diff format')),
116 116 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
117 117 ('', 'nodates', None, _('omit dates from diff headers'))
118 118 ]
119 119
120 120 diffwsopts = [
121 121 ('w', 'ignore-all-space', None,
122 122 _('ignore white space when comparing lines')),
123 123 ('b', 'ignore-space-change', None,
124 124 _('ignore changes in the amount of white space')),
125 125 ('B', 'ignore-blank-lines', None,
126 126 _('ignore changes whose lines are all blank')),
127 127 ('Z', 'ignore-space-at-eol', None,
128 128 _('ignore changes in whitespace at EOL')),
129 129 ]
130 130
131 131 diffopts2 = [
132 132 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
133 133 ('p', 'show-function', None, _('show which function each change is in')),
134 134 ('', 'reverse', None, _('produce a diff that undoes the changes')),
135 135 ] + diffwsopts + [
136 136 ('U', 'unified', '',
137 137 _('number of lines of context to show'), _('NUM')),
138 138 ('', 'stat', None, _('output diffstat-style summary of changes')),
139 139 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
140 140 ]
141 141
142 142 mergetoolopts = [
143 143 ('t', 'tool', '', _('specify merge tool')),
144 144 ]
145 145
146 146 similarityopts = [
147 147 ('s', 'similarity', '',
148 148 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
149 149 ]
150 150
151 151 subrepoopts = [
152 152 ('S', 'subrepos', None,
153 153 _('recurse into subrepositories'))
154 154 ]
155 155
156 156 debugrevlogopts = [
157 157 ('c', 'changelog', False, _('open changelog')),
158 158 ('m', 'manifest', False, _('open manifest')),
159 159 ('', 'dir', '', _('open directory manifest')),
160 160 ]
161 161
162 162 # special string such that everything below this line will be ingored in the
163 163 # editor text
164 164 _linebelow = "^HG: ------------------------ >8 ------------------------$"
165 165
166 166 def ishunk(x):
167 167 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
168 168 return isinstance(x, hunkclasses)
169 169
170 170 def newandmodified(chunks, originalchunks):
171 171 newlyaddedandmodifiedfiles = set()
172 172 for chunk in chunks:
173 173 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
174 174 originalchunks:
175 175 newlyaddedandmodifiedfiles.add(chunk.header.filename())
176 176 return newlyaddedandmodifiedfiles
177 177
178 178 def parsealiases(cmd):
179 179 return cmd.lstrip("^").split("|")
180 180
181 181 def setupwrapcolorwrite(ui):
182 182 # wrap ui.write so diff output can be labeled/colorized
183 183 def wrapwrite(orig, *args, **kw):
184 184 label = kw.pop('label', '')
185 185 for chunk, l in patch.difflabel(lambda: args):
186 186 orig(chunk, label=label + l)
187 187
188 188 oldwrite = ui.write
189 189 def wrap(*args, **kwargs):
190 190 return wrapwrite(oldwrite, *args, **kwargs)
191 191 setattr(ui, 'write', wrap)
192 192 return oldwrite
193 193
194 194 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
195 195 if usecurses:
196 196 if testfile:
197 197 recordfn = crecordmod.testdecorator(testfile,
198 198 crecordmod.testchunkselector)
199 199 else:
200 200 recordfn = crecordmod.chunkselector
201 201
202 202 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
203 203
204 204 else:
205 205 return patch.filterpatch(ui, originalhunks, operation)
206 206
207 207 def recordfilter(ui, originalhunks, operation=None):
208 208 """ Prompts the user to filter the originalhunks and return a list of
209 209 selected hunks.
210 210 *operation* is used for to build ui messages to indicate the user what
211 211 kind of filtering they are doing: reverting, committing, shelving, etc.
212 212 (see patch.filterpatch).
213 213 """
214 214 usecurses = crecordmod.checkcurses(ui)
215 215 testfile = ui.config('experimental', 'crecordtest')
216 216 oldwrite = setupwrapcolorwrite(ui)
217 217 try:
218 218 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
219 219 testfile, operation)
220 220 finally:
221 221 ui.write = oldwrite
222 222 return newchunks, newopts
223 223
224 224 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
225 225 filterfn, *pats, **opts):
226 226 from . import merge as mergemod
227 227 opts = pycompat.byteskwargs(opts)
228 228 if not ui.interactive():
229 229 if cmdsuggest:
230 230 msg = _('running non-interactively, use %s instead') % cmdsuggest
231 231 else:
232 232 msg = _('running non-interactively')
233 233 raise error.Abort(msg)
234 234
235 235 # make sure username is set before going interactive
236 236 if not opts.get('user'):
237 237 ui.username() # raise exception, username not provided
238 238
239 239 def recordfunc(ui, repo, message, match, opts):
240 240 """This is generic record driver.
241 241
242 242 Its job is to interactively filter local changes, and
243 243 accordingly prepare working directory into a state in which the
244 244 job can be delegated to a non-interactive commit command such as
245 245 'commit' or 'qrefresh'.
246 246
247 247 After the actual job is done by non-interactive command, the
248 248 working directory is restored to its original state.
249 249
250 250 In the end we'll record interesting changes, and everything else
251 251 will be left in place, so the user can continue working.
252 252 """
253 253
254 254 checkunfinished(repo, commit=True)
255 255 wctx = repo[None]
256 256 merge = len(wctx.parents()) > 1
257 257 if merge:
258 258 raise error.Abort(_('cannot partially commit a merge '
259 259 '(use "hg commit" instead)'))
260 260
261 261 def fail(f, msg):
262 262 raise error.Abort('%s: %s' % (f, msg))
263 263
264 264 force = opts.get('force')
265 265 if not force:
266 266 vdirs = []
267 267 match.explicitdir = vdirs.append
268 268 match.bad = fail
269 269
270 270 status = repo.status(match=match)
271 271 if not force:
272 272 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
273 273 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
274 274 diffopts.nodates = True
275 275 diffopts.git = True
276 276 diffopts.showfunc = True
277 277 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
278 278 originalchunks = patch.parsepatch(originaldiff)
279 279
280 280 # 1. filter patch, since we are intending to apply subset of it
281 281 try:
282 282 chunks, newopts = filterfn(ui, originalchunks)
283 283 except error.PatchError as err:
284 284 raise error.Abort(_('error parsing patch: %s') % err)
285 285 opts.update(newopts)
286 286
287 287 # We need to keep a backup of files that have been newly added and
288 288 # modified during the recording process because there is a previous
289 289 # version without the edit in the workdir
290 290 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
291 291 contenders = set()
292 292 for h in chunks:
293 293 try:
294 294 contenders.update(set(h.files()))
295 295 except AttributeError:
296 296 pass
297 297
298 298 changed = status.modified + status.added + status.removed
299 299 newfiles = [f for f in changed if f in contenders]
300 300 if not newfiles:
301 301 ui.status(_('no changes to record\n'))
302 302 return 0
303 303
304 304 modified = set(status.modified)
305 305
306 306 # 2. backup changed files, so we can restore them in the end
307 307
308 308 if backupall:
309 309 tobackup = changed
310 310 else:
311 311 tobackup = [f for f in newfiles if f in modified or f in \
312 312 newlyaddedandmodifiedfiles]
313 313 backups = {}
314 314 if tobackup:
315 315 backupdir = repo.vfs.join('record-backups')
316 316 try:
317 317 os.mkdir(backupdir)
318 318 except OSError as err:
319 319 if err.errno != errno.EEXIST:
320 320 raise
321 321 try:
322 322 # backup continues
323 323 for f in tobackup:
324 324 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
325 325 dir=backupdir)
326 326 os.close(fd)
327 327 ui.debug('backup %r as %r\n' % (f, tmpname))
328 328 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
329 329 backups[f] = tmpname
330 330
331 331 fp = stringio()
332 332 for c in chunks:
333 333 fname = c.filename()
334 334 if fname in backups:
335 335 c.write(fp)
336 336 dopatch = fp.tell()
337 337 fp.seek(0)
338 338
339 339 # 2.5 optionally review / modify patch in text editor
340 340 if opts.get('review', False):
341 341 patchtext = (crecordmod.diffhelptext
342 342 + crecordmod.patchhelptext
343 343 + fp.read())
344 344 reviewedpatch = ui.edit(patchtext, "",
345 345 action="diff",
346 346 repopath=repo.path)
347 347 fp.truncate(0)
348 348 fp.write(reviewedpatch)
349 349 fp.seek(0)
350 350
351 351 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
352 352 # 3a. apply filtered patch to clean repo (clean)
353 353 if backups:
354 354 # Equivalent to hg.revert
355 355 m = scmutil.matchfiles(repo, backups.keys())
356 356 mergemod.update(repo, repo.dirstate.p1(),
357 357 False, True, matcher=m)
358 358
359 359 # 3b. (apply)
360 360 if dopatch:
361 361 try:
362 362 ui.debug('applying patch\n')
363 363 ui.debug(fp.getvalue())
364 364 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
365 365 except error.PatchError as err:
366 366 raise error.Abort(str(err))
367 367 del fp
368 368
369 369 # 4. We prepared working directory according to filtered
370 370 # patch. Now is the time to delegate the job to
371 371 # commit/qrefresh or the like!
372 372
373 373 # Make all of the pathnames absolute.
374 374 newfiles = [repo.wjoin(nf) for nf in newfiles]
375 375 return commitfunc(ui, repo, *newfiles, **opts)
376 376 finally:
377 377 # 5. finally restore backed-up files
378 378 try:
379 379 dirstate = repo.dirstate
380 380 for realname, tmpname in backups.iteritems():
381 381 ui.debug('restoring %r to %r\n' % (tmpname, realname))
382 382
383 383 if dirstate[realname] == 'n':
384 384 # without normallookup, restoring timestamp
385 385 # may cause partially committed files
386 386 # to be treated as unmodified
387 387 dirstate.normallookup(realname)
388 388
389 389 # copystat=True here and above are a hack to trick any
390 390 # editors that have f open that we haven't modified them.
391 391 #
392 392 # Also note that this racy as an editor could notice the
393 393 # file's mtime before we've finished writing it.
394 394 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
395 395 os.unlink(tmpname)
396 396 if tobackup:
397 397 os.rmdir(backupdir)
398 398 except OSError:
399 399 pass
400 400
401 401 def recordinwlock(ui, repo, message, match, opts):
402 402 with repo.wlock():
403 403 return recordfunc(ui, repo, message, match, opts)
404 404
405 405 return commit(ui, repo, recordinwlock, pats, opts)
406 406
407 407 class dirnode(object):
408 408 """
409 409 Represent a directory in user working copy with information required for
410 410 the purpose of tersing its status.
411 411
412 412 path is the path to the directory
413 413
414 414 statuses is a set of statuses of all files in this directory (this includes
415 415 all the files in all the subdirectories too)
416 416
417 417 files is a list of files which are direct child of this directory
418 418
419 419 subdirs is a dictionary of sub-directory name as the key and it's own
420 420 dirnode object as the value
421 421 """
422 422
423 423 def __init__(self, dirpath):
424 424 self.path = dirpath
425 425 self.statuses = set([])
426 426 self.files = []
427 427 self.subdirs = {}
428 428
429 429 def _addfileindir(self, filename, status):
430 430 """Add a file in this directory as a direct child."""
431 431 self.files.append((filename, status))
432 432
433 433 def addfile(self, filename, status):
434 434 """
435 435 Add a file to this directory or to its direct parent directory.
436 436
437 437 If the file is not direct child of this directory, we traverse to the
438 438 directory of which this file is a direct child of and add the file
439 439 there.
440 440 """
441 441
442 442 # the filename contains a path separator, it means it's not the direct
443 443 # child of this directory
444 444 if '/' in filename:
445 445 subdir, filep = filename.split('/', 1)
446 446
447 447 # does the dirnode object for subdir exists
448 448 if subdir not in self.subdirs:
449 449 subdirpath = os.path.join(self.path, subdir)
450 450 self.subdirs[subdir] = dirnode(subdirpath)
451 451
452 452 # try adding the file in subdir
453 453 self.subdirs[subdir].addfile(filep, status)
454 454
455 455 else:
456 456 self._addfileindir(filename, status)
457 457
458 458 if status not in self.statuses:
459 459 self.statuses.add(status)
460 460
461 461 def iterfilepaths(self):
462 462 """Yield (status, path) for files directly under this directory."""
463 463 for f, st in self.files:
464 464 yield st, os.path.join(self.path, f)
465 465
466 466 def tersewalk(self, terseargs):
467 467 """
468 468 Yield (status, path) obtained by processing the status of this
469 469 dirnode.
470 470
471 471 terseargs is the string of arguments passed by the user with `--terse`
472 472 flag.
473 473
474 474 Following are the cases which can happen:
475 475
476 476 1) All the files in the directory (including all the files in its
477 477 subdirectories) share the same status and the user has asked us to terse
478 478 that status. -> yield (status, dirpath)
479 479
480 480 2) Otherwise, we do following:
481 481
482 482 a) Yield (status, filepath) for all the files which are in this
483 483 directory (only the ones in this directory, not the subdirs)
484 484
485 485 b) Recurse the function on all the subdirectories of this
486 486 directory
487 487 """
488 488
489 489 if len(self.statuses) == 1:
490 490 onlyst = self.statuses.pop()
491 491
492 492 # Making sure we terse only when the status abbreviation is
493 493 # passed as terse argument
494 494 if onlyst in terseargs:
495 495 yield onlyst, self.path + pycompat.ossep
496 496 return
497 497
498 498 # add the files to status list
499 499 for st, fpath in self.iterfilepaths():
500 500 yield st, fpath
501 501
502 502 #recurse on the subdirs
503 503 for dirobj in self.subdirs.values():
504 504 for st, fpath in dirobj.tersewalk(terseargs):
505 505 yield st, fpath
506 506
507 507 def tersedir(statuslist, terseargs):
508 508 """
509 509 Terse the status if all the files in a directory shares the same status.
510 510
511 511 statuslist is scmutil.status() object which contains a list of files for
512 512 each status.
513 513 terseargs is string which is passed by the user as the argument to `--terse`
514 514 flag.
515 515
516 516 The function makes a tree of objects of dirnode class, and at each node it
517 517 stores the information required to know whether we can terse a certain
518 518 directory or not.
519 519 """
520 520 # the order matters here as that is used to produce final list
521 521 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
522 522
523 523 # checking the argument validity
524 524 for s in pycompat.bytestr(terseargs):
525 525 if s not in allst:
526 526 raise error.Abort(_("'%s' not recognized") % s)
527 527
528 528 # creating a dirnode object for the root of the repo
529 529 rootobj = dirnode('')
530 530 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
531 531 'ignored', 'removed')
532 532
533 533 tersedict = {}
534 534 for attrname in pstatus:
535 535 statuschar = attrname[0:1]
536 536 for f in getattr(statuslist, attrname):
537 537 rootobj.addfile(f, statuschar)
538 538 tersedict[statuschar] = []
539 539
540 540 # we won't be tersing the root dir, so add files in it
541 541 for st, fpath in rootobj.iterfilepaths():
542 542 tersedict[st].append(fpath)
543 543
544 544 # process each sub-directory and build tersedict
545 545 for subdir in rootobj.subdirs.values():
546 546 for st, f in subdir.tersewalk(terseargs):
547 547 tersedict[st].append(f)
548 548
549 549 tersedlist = []
550 550 for st in allst:
551 551 tersedict[st].sort()
552 552 tersedlist.append(tersedict[st])
553 553
554 554 return tersedlist
555 555
556 556 def _commentlines(raw):
557 557 '''Surround lineswith a comment char and a new line'''
558 558 lines = raw.splitlines()
559 559 commentedlines = ['# %s' % line for line in lines]
560 560 return '\n'.join(commentedlines) + '\n'
561 561
562 562 def _conflictsmsg(repo):
563 563 # avoid merge cycle
564 564 from . import merge as mergemod
565 565 mergestate = mergemod.mergestate.read(repo)
566 566 if not mergestate.active():
567 567 return
568 568
569 569 m = scmutil.match(repo[None])
570 570 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
571 571 if unresolvedlist:
572 572 mergeliststr = '\n'.join(
573 573 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
574 574 for path in unresolvedlist])
575 575 msg = _('''Unresolved merge conflicts:
576 576
577 577 %s
578 578
579 579 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
580 580 else:
581 581 msg = _('No unresolved merge conflicts.')
582 582
583 583 return _commentlines(msg)
584 584
585 585 def _helpmessage(continuecmd, abortcmd):
586 586 msg = _('To continue: %s\n'
587 587 'To abort: %s') % (continuecmd, abortcmd)
588 588 return _commentlines(msg)
589 589
590 590 def _rebasemsg():
591 591 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
592 592
593 593 def _histeditmsg():
594 594 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
595 595
596 596 def _unshelvemsg():
597 597 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
598 598
599 599 def _updatecleanmsg(dest=None):
600 600 warning = _('warning: this will discard uncommitted changes')
601 601 return 'hg update --clean %s (%s)' % (dest or '.', warning)
602 602
603 603 def _graftmsg():
604 604 # tweakdefaults requires `update` to have a rev hence the `.`
605 605 return _helpmessage('hg graft --continue', _updatecleanmsg())
606 606
607 607 def _mergemsg():
608 608 # tweakdefaults requires `update` to have a rev hence the `.`
609 609 return _helpmessage('hg commit', _updatecleanmsg())
610 610
611 611 def _bisectmsg():
612 612 msg = _('To mark the changeset good: hg bisect --good\n'
613 613 'To mark the changeset bad: hg bisect --bad\n'
614 614 'To abort: hg bisect --reset\n')
615 615 return _commentlines(msg)
616 616
617 617 def fileexistspredicate(filename):
618 618 return lambda repo: repo.vfs.exists(filename)
619 619
620 620 def _mergepredicate(repo):
621 621 return len(repo[None].parents()) > 1
622 622
623 623 STATES = (
624 624 # (state, predicate to detect states, helpful message function)
625 625 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
626 626 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
627 627 ('graft', fileexistspredicate('graftstate'), _graftmsg),
628 628 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
629 629 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
630 630 # The merge state is part of a list that will be iterated over.
631 631 # They need to be last because some of the other unfinished states may also
632 632 # be in a merge or update state (eg. rebase, histedit, graft, etc).
633 633 # We want those to have priority.
634 634 ('merge', _mergepredicate, _mergemsg),
635 635 )
636 636
637 637 def _getrepostate(repo):
638 638 # experimental config: commands.status.skipstates
639 639 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
640 640 for state, statedetectionpredicate, msgfn in STATES:
641 641 if state in skip:
642 642 continue
643 643 if statedetectionpredicate(repo):
644 644 return (state, statedetectionpredicate, msgfn)
645 645
646 646 def morestatus(repo, fm):
647 647 statetuple = _getrepostate(repo)
648 648 label = 'status.morestatus'
649 649 if statetuple:
650 650 fm.startitem()
651 651 state, statedetectionpredicate, helpfulmsg = statetuple
652 652 statemsg = _('The repository is in an unfinished *%s* state.') % state
653 653 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
654 654 conmsg = _conflictsmsg(repo)
655 655 if conmsg:
656 656 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
657 657 if helpfulmsg:
658 658 helpmsg = helpfulmsg()
659 659 fm.write('helpmsg', '%s\n', helpmsg, label=label)
660 660
661 661 def findpossible(cmd, table, strict=False):
662 662 """
663 663 Return cmd -> (aliases, command table entry)
664 664 for each matching command.
665 665 Return debug commands (or their aliases) only if no normal command matches.
666 666 """
667 667 choice = {}
668 668 debugchoice = {}
669 669
670 670 if cmd in table:
671 671 # short-circuit exact matches, "log" alias beats "^log|history"
672 672 keys = [cmd]
673 673 else:
674 674 keys = table.keys()
675 675
676 676 allcmds = []
677 677 for e in keys:
678 678 aliases = parsealiases(e)
679 679 allcmds.extend(aliases)
680 680 found = None
681 681 if cmd in aliases:
682 682 found = cmd
683 683 elif not strict:
684 684 for a in aliases:
685 685 if a.startswith(cmd):
686 686 found = a
687 687 break
688 688 if found is not None:
689 689 if aliases[0].startswith("debug") or found.startswith("debug"):
690 690 debugchoice[found] = (aliases, table[e])
691 691 else:
692 692 choice[found] = (aliases, table[e])
693 693
694 694 if not choice and debugchoice:
695 695 choice = debugchoice
696 696
697 697 return choice, allcmds
698 698
699 699 def findcmd(cmd, table, strict=True):
700 700 """Return (aliases, command table entry) for command string."""
701 701 choice, allcmds = findpossible(cmd, table, strict)
702 702
703 703 if cmd in choice:
704 704 return choice[cmd]
705 705
706 706 if len(choice) > 1:
707 707 clist = sorted(choice)
708 708 raise error.AmbiguousCommand(cmd, clist)
709 709
710 710 if choice:
711 711 return list(choice.values())[0]
712 712
713 713 raise error.UnknownCommand(cmd, allcmds)
714 714
715 715 def findrepo(p):
716 716 while not os.path.isdir(os.path.join(p, ".hg")):
717 717 oldp, p = p, os.path.dirname(p)
718 718 if p == oldp:
719 719 return None
720 720
721 721 return p
722 722
723 723 def bailifchanged(repo, merge=True, hint=None):
724 724 """ enforce the precondition that working directory must be clean.
725 725
726 726 'merge' can be set to false if a pending uncommitted merge should be
727 727 ignored (such as when 'update --check' runs).
728 728
729 729 'hint' is the usual hint given to Abort exception.
730 730 """
731 731
732 732 if merge and repo.dirstate.p2() != nullid:
733 733 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
734 734 modified, added, removed, deleted = repo.status()[:4]
735 735 if modified or added or removed or deleted:
736 736 raise error.Abort(_('uncommitted changes'), hint=hint)
737 737 ctx = repo[None]
738 738 for s in sorted(ctx.substate):
739 739 ctx.sub(s).bailifchanged(hint=hint)
740 740
741 741 def logmessage(ui, opts):
742 742 """ get the log message according to -m and -l option """
743 743 message = opts.get('message')
744 744 logfile = opts.get('logfile')
745 745
746 746 if message and logfile:
747 747 raise error.Abort(_('options --message and --logfile are mutually '
748 748 'exclusive'))
749 749 if not message and logfile:
750 750 try:
751 751 if isstdiofilename(logfile):
752 752 message = ui.fin.read()
753 753 else:
754 754 message = '\n'.join(util.readfile(logfile).splitlines())
755 755 except IOError as inst:
756 756 raise error.Abort(_("can't read commit message '%s': %s") %
757 757 (logfile, encoding.strtolocal(inst.strerror)))
758 758 return message
759 759
760 760 def mergeeditform(ctxorbool, baseformname):
761 761 """return appropriate editform name (referencing a committemplate)
762 762
763 763 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
764 764 merging is committed.
765 765
766 766 This returns baseformname with '.merge' appended if it is a merge,
767 767 otherwise '.normal' is appended.
768 768 """
769 769 if isinstance(ctxorbool, bool):
770 770 if ctxorbool:
771 771 return baseformname + ".merge"
772 772 elif 1 < len(ctxorbool.parents()):
773 773 return baseformname + ".merge"
774 774
775 775 return baseformname + ".normal"
776 776
777 777 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
778 778 editform='', **opts):
779 779 """get appropriate commit message editor according to '--edit' option
780 780
781 781 'finishdesc' is a function to be called with edited commit message
782 782 (= 'description' of the new changeset) just after editing, but
783 783 before checking empty-ness. It should return actual text to be
784 784 stored into history. This allows to change description before
785 785 storing.
786 786
787 787 'extramsg' is a extra message to be shown in the editor instead of
788 788 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
789 789 is automatically added.
790 790
791 791 'editform' is a dot-separated list of names, to distinguish
792 792 the purpose of commit text editing.
793 793
794 794 'getcommiteditor' returns 'commitforceeditor' regardless of
795 795 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
796 796 they are specific for usage in MQ.
797 797 """
798 798 if edit or finishdesc or extramsg:
799 799 return lambda r, c, s: commitforceeditor(r, c, s,
800 800 finishdesc=finishdesc,
801 801 extramsg=extramsg,
802 802 editform=editform)
803 803 elif editform:
804 804 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
805 805 else:
806 806 return commiteditor
807 807
808 808 def loglimit(opts):
809 809 """get the log limit according to option -l/--limit"""
810 810 limit = opts.get('limit')
811 811 if limit:
812 812 try:
813 813 limit = int(limit)
814 814 except ValueError:
815 815 raise error.Abort(_('limit must be a positive integer'))
816 816 if limit <= 0:
817 817 raise error.Abort(_('limit must be positive'))
818 818 else:
819 819 limit = None
820 820 return limit
821 821
822 822 def makefilename(repo, pat, node, desc=None,
823 823 total=None, seqno=None, revwidth=None, pathname=None):
824 824 node_expander = {
825 825 'H': lambda: hex(node),
826 826 'R': lambda: str(repo.changelog.rev(node)),
827 827 'h': lambda: short(node),
828 828 'm': lambda: re.sub('[^\w]', '_', str(desc))
829 829 }
830 830 expander = {
831 831 '%': lambda: '%',
832 832 'b': lambda: os.path.basename(repo.root),
833 833 }
834 834
835 835 try:
836 836 if node:
837 837 expander.update(node_expander)
838 838 if node:
839 839 expander['r'] = (lambda:
840 840 str(repo.changelog.rev(node)).zfill(revwidth or 0))
841 841 if total is not None:
842 842 expander['N'] = lambda: str(total)
843 843 if seqno is not None:
844 844 expander['n'] = lambda: str(seqno)
845 845 if total is not None and seqno is not None:
846 846 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
847 847 if pathname is not None:
848 848 expander['s'] = lambda: os.path.basename(pathname)
849 849 expander['d'] = lambda: os.path.dirname(pathname) or '.'
850 850 expander['p'] = lambda: pathname
851 851
852 852 newname = []
853 853 patlen = len(pat)
854 854 i = 0
855 855 while i < patlen:
856 856 c = pat[i:i + 1]
857 857 if c == '%':
858 858 i += 1
859 859 c = pat[i:i + 1]
860 860 c = expander[c]()
861 861 newname.append(c)
862 862 i += 1
863 863 return ''.join(newname)
864 864 except KeyError as inst:
865 865 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
866 866 inst.args[0])
867 867
868 868 def isstdiofilename(pat):
869 869 """True if the given pat looks like a filename denoting stdin/stdout"""
870 870 return not pat or pat == '-'
871 871
872 872 class _unclosablefile(object):
873 873 def __init__(self, fp):
874 874 self._fp = fp
875 875
876 876 def close(self):
877 877 pass
878 878
879 879 def __iter__(self):
880 880 return iter(self._fp)
881 881
882 882 def __getattr__(self, attr):
883 883 return getattr(self._fp, attr)
884 884
885 885 def __enter__(self):
886 886 return self
887 887
888 888 def __exit__(self, exc_type, exc_value, exc_tb):
889 889 pass
890 890
891 891 def makefileobj(repo, pat, node=None, desc=None, total=None,
892 892 seqno=None, revwidth=None, mode='wb', modemap=None,
893 893 pathname=None):
894 894
895 895 writable = mode not in ('r', 'rb')
896 896
897 897 if isstdiofilename(pat):
898 898 if writable:
899 899 fp = repo.ui.fout
900 900 else:
901 901 fp = repo.ui.fin
902 902 return _unclosablefile(fp)
903 903 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
904 904 if modemap is not None:
905 905 mode = modemap.get(fn, mode)
906 906 if mode == 'wb':
907 907 modemap[fn] = 'ab'
908 908 return open(fn, mode)
909 909
910 910 def openrevlog(repo, cmd, file_, opts):
911 911 """opens the changelog, manifest, a filelog or a given revlog"""
912 912 cl = opts['changelog']
913 913 mf = opts['manifest']
914 914 dir = opts['dir']
915 915 msg = None
916 916 if cl and mf:
917 917 msg = _('cannot specify --changelog and --manifest at the same time')
918 918 elif cl and dir:
919 919 msg = _('cannot specify --changelog and --dir at the same time')
920 920 elif cl or mf or dir:
921 921 if file_:
922 922 msg = _('cannot specify filename with --changelog or --manifest')
923 923 elif not repo:
924 924 msg = _('cannot specify --changelog or --manifest or --dir '
925 925 'without a repository')
926 926 if msg:
927 927 raise error.Abort(msg)
928 928
929 929 r = None
930 930 if repo:
931 931 if cl:
932 932 r = repo.unfiltered().changelog
933 933 elif dir:
934 934 if 'treemanifest' not in repo.requirements:
935 935 raise error.Abort(_("--dir can only be used on repos with "
936 936 "treemanifest enabled"))
937 937 dirlog = repo.manifestlog._revlog.dirlog(dir)
938 938 if len(dirlog):
939 939 r = dirlog
940 940 elif mf:
941 941 r = repo.manifestlog._revlog
942 942 elif file_:
943 943 filelog = repo.file(file_)
944 944 if len(filelog):
945 945 r = filelog
946 946 if not r:
947 947 if not file_:
948 948 raise error.CommandError(cmd, _('invalid arguments'))
949 949 if not os.path.isfile(file_):
950 950 raise error.Abort(_("revlog '%s' not found") % file_)
951 951 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
952 952 file_[:-2] + ".i")
953 953 return r
954 954
955 955 def copy(ui, repo, pats, opts, rename=False):
956 956 # called with the repo lock held
957 957 #
958 958 # hgsep => pathname that uses "/" to separate directories
959 959 # ossep => pathname that uses os.sep to separate directories
960 960 cwd = repo.getcwd()
961 961 targets = {}
962 962 after = opts.get("after")
963 963 dryrun = opts.get("dry_run")
964 964 wctx = repo[None]
965 965
966 966 def walkpat(pat):
967 967 srcs = []
968 968 if after:
969 969 badstates = '?'
970 970 else:
971 971 badstates = '?r'
972 972 m = scmutil.match(wctx, [pat], opts, globbed=True)
973 973 for abs in wctx.walk(m):
974 974 state = repo.dirstate[abs]
975 975 rel = m.rel(abs)
976 976 exact = m.exact(abs)
977 977 if state in badstates:
978 978 if exact and state == '?':
979 979 ui.warn(_('%s: not copying - file is not managed\n') % rel)
980 980 if exact and state == 'r':
981 981 ui.warn(_('%s: not copying - file has been marked for'
982 982 ' remove\n') % rel)
983 983 continue
984 984 # abs: hgsep
985 985 # rel: ossep
986 986 srcs.append((abs, rel, exact))
987 987 return srcs
988 988
989 989 # abssrc: hgsep
990 990 # relsrc: ossep
991 991 # otarget: ossep
992 992 def copyfile(abssrc, relsrc, otarget, exact):
993 993 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
994 994 if '/' in abstarget:
995 995 # We cannot normalize abstarget itself, this would prevent
996 996 # case only renames, like a => A.
997 997 abspath, absname = abstarget.rsplit('/', 1)
998 998 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
999 999 reltarget = repo.pathto(abstarget, cwd)
1000 1000 target = repo.wjoin(abstarget)
1001 1001 src = repo.wjoin(abssrc)
1002 1002 state = repo.dirstate[abstarget]
1003 1003
1004 1004 scmutil.checkportable(ui, abstarget)
1005 1005
1006 1006 # check for collisions
1007 1007 prevsrc = targets.get(abstarget)
1008 1008 if prevsrc is not None:
1009 1009 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1010 1010 (reltarget, repo.pathto(abssrc, cwd),
1011 1011 repo.pathto(prevsrc, cwd)))
1012 1012 return
1013 1013
1014 1014 # check for overwrites
1015 1015 exists = os.path.lexists(target)
1016 1016 samefile = False
1017 1017 if exists and abssrc != abstarget:
1018 1018 if (repo.dirstate.normalize(abssrc) ==
1019 1019 repo.dirstate.normalize(abstarget)):
1020 1020 if not rename:
1021 1021 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1022 1022 return
1023 1023 exists = False
1024 1024 samefile = True
1025 1025
1026 1026 if not after and exists or after and state in 'mn':
1027 1027 if not opts['force']:
1028 1028 if state in 'mn':
1029 1029 msg = _('%s: not overwriting - file already committed\n')
1030 1030 if after:
1031 1031 flags = '--after --force'
1032 1032 else:
1033 1033 flags = '--force'
1034 1034 if rename:
1035 1035 hint = _('(hg rename %s to replace the file by '
1036 1036 'recording a rename)\n') % flags
1037 1037 else:
1038 1038 hint = _('(hg copy %s to replace the file by '
1039 1039 'recording a copy)\n') % flags
1040 1040 else:
1041 1041 msg = _('%s: not overwriting - file exists\n')
1042 1042 if rename:
1043 1043 hint = _('(hg rename --after to record the rename)\n')
1044 1044 else:
1045 1045 hint = _('(hg copy --after to record the copy)\n')
1046 1046 ui.warn(msg % reltarget)
1047 1047 ui.warn(hint)
1048 1048 return
1049 1049
1050 1050 if after:
1051 1051 if not exists:
1052 1052 if rename:
1053 1053 ui.warn(_('%s: not recording move - %s does not exist\n') %
1054 1054 (relsrc, reltarget))
1055 1055 else:
1056 1056 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1057 1057 (relsrc, reltarget))
1058 1058 return
1059 1059 elif not dryrun:
1060 1060 try:
1061 1061 if exists:
1062 1062 os.unlink(target)
1063 1063 targetdir = os.path.dirname(target) or '.'
1064 1064 if not os.path.isdir(targetdir):
1065 1065 os.makedirs(targetdir)
1066 1066 if samefile:
1067 1067 tmp = target + "~hgrename"
1068 1068 os.rename(src, tmp)
1069 1069 os.rename(tmp, target)
1070 1070 else:
1071 1071 util.copyfile(src, target)
1072 1072 srcexists = True
1073 1073 except IOError as inst:
1074 1074 if inst.errno == errno.ENOENT:
1075 1075 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1076 1076 srcexists = False
1077 1077 else:
1078 1078 ui.warn(_('%s: cannot copy - %s\n') %
1079 1079 (relsrc, encoding.strtolocal(inst.strerror)))
1080 1080 return True # report a failure
1081 1081
1082 1082 if ui.verbose or not exact:
1083 1083 if rename:
1084 1084 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1085 1085 else:
1086 1086 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1087 1087
1088 1088 targets[abstarget] = abssrc
1089 1089
1090 1090 # fix up dirstate
1091 1091 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1092 1092 dryrun=dryrun, cwd=cwd)
1093 1093 if rename and not dryrun:
1094 1094 if not after and srcexists and not samefile:
1095 1095 repo.wvfs.unlinkpath(abssrc)
1096 1096 wctx.forget([abssrc])
1097 1097
1098 1098 # pat: ossep
1099 1099 # dest ossep
1100 1100 # srcs: list of (hgsep, hgsep, ossep, bool)
1101 1101 # return: function that takes hgsep and returns ossep
1102 1102 def targetpathfn(pat, dest, srcs):
1103 1103 if os.path.isdir(pat):
1104 1104 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1105 1105 abspfx = util.localpath(abspfx)
1106 1106 if destdirexists:
1107 1107 striplen = len(os.path.split(abspfx)[0])
1108 1108 else:
1109 1109 striplen = len(abspfx)
1110 1110 if striplen:
1111 1111 striplen += len(pycompat.ossep)
1112 1112 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1113 1113 elif destdirexists:
1114 1114 res = lambda p: os.path.join(dest,
1115 1115 os.path.basename(util.localpath(p)))
1116 1116 else:
1117 1117 res = lambda p: dest
1118 1118 return res
1119 1119
1120 1120 # pat: ossep
1121 1121 # dest ossep
1122 1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1123 1123 # return: function that takes hgsep and returns ossep
1124 1124 def targetpathafterfn(pat, dest, srcs):
1125 1125 if matchmod.patkind(pat):
1126 1126 # a mercurial pattern
1127 1127 res = lambda p: os.path.join(dest,
1128 1128 os.path.basename(util.localpath(p)))
1129 1129 else:
1130 1130 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1131 1131 if len(abspfx) < len(srcs[0][0]):
1132 1132 # A directory. Either the target path contains the last
1133 1133 # component of the source path or it does not.
1134 1134 def evalpath(striplen):
1135 1135 score = 0
1136 1136 for s in srcs:
1137 1137 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1138 1138 if os.path.lexists(t):
1139 1139 score += 1
1140 1140 return score
1141 1141
1142 1142 abspfx = util.localpath(abspfx)
1143 1143 striplen = len(abspfx)
1144 1144 if striplen:
1145 1145 striplen += len(pycompat.ossep)
1146 1146 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1147 1147 score = evalpath(striplen)
1148 1148 striplen1 = len(os.path.split(abspfx)[0])
1149 1149 if striplen1:
1150 1150 striplen1 += len(pycompat.ossep)
1151 1151 if evalpath(striplen1) > score:
1152 1152 striplen = striplen1
1153 1153 res = lambda p: os.path.join(dest,
1154 1154 util.localpath(p)[striplen:])
1155 1155 else:
1156 1156 # a file
1157 1157 if destdirexists:
1158 1158 res = lambda p: os.path.join(dest,
1159 1159 os.path.basename(util.localpath(p)))
1160 1160 else:
1161 1161 res = lambda p: dest
1162 1162 return res
1163 1163
1164 1164 pats = scmutil.expandpats(pats)
1165 1165 if not pats:
1166 1166 raise error.Abort(_('no source or destination specified'))
1167 1167 if len(pats) == 1:
1168 1168 raise error.Abort(_('no destination specified'))
1169 1169 dest = pats.pop()
1170 1170 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1171 1171 if not destdirexists:
1172 1172 if len(pats) > 1 or matchmod.patkind(pats[0]):
1173 1173 raise error.Abort(_('with multiple sources, destination must be an '
1174 1174 'existing directory'))
1175 1175 if util.endswithsep(dest):
1176 1176 raise error.Abort(_('destination %s is not a directory') % dest)
1177 1177
1178 1178 tfn = targetpathfn
1179 1179 if after:
1180 1180 tfn = targetpathafterfn
1181 1181 copylist = []
1182 1182 for pat in pats:
1183 1183 srcs = walkpat(pat)
1184 1184 if not srcs:
1185 1185 continue
1186 1186 copylist.append((tfn(pat, dest, srcs), srcs))
1187 1187 if not copylist:
1188 1188 raise error.Abort(_('no files to copy'))
1189 1189
1190 1190 errors = 0
1191 1191 for targetpath, srcs in copylist:
1192 1192 for abssrc, relsrc, exact in srcs:
1193 1193 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1194 1194 errors += 1
1195 1195
1196 1196 if errors:
1197 1197 ui.warn(_('(consider using --after)\n'))
1198 1198
1199 1199 return errors != 0
1200 1200
1201 1201 ## facility to let extension process additional data into an import patch
1202 1202 # list of identifier to be executed in order
1203 1203 extrapreimport = [] # run before commit
1204 1204 extrapostimport = [] # run after commit
1205 1205 # mapping from identifier to actual import function
1206 1206 #
1207 1207 # 'preimport' are run before the commit is made and are provided the following
1208 1208 # arguments:
1209 1209 # - repo: the localrepository instance,
1210 1210 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1211 1211 # - extra: the future extra dictionary of the changeset, please mutate it,
1212 1212 # - opts: the import options.
1213 1213 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1214 1214 # mutation of in memory commit and more. Feel free to rework the code to get
1215 1215 # there.
1216 1216 extrapreimportmap = {}
1217 1217 # 'postimport' are run after the commit is made and are provided the following
1218 1218 # argument:
1219 1219 # - ctx: the changectx created by import.
1220 1220 extrapostimportmap = {}
1221 1221
1222 1222 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1223 1223 """Utility function used by commands.import to import a single patch
1224 1224
1225 1225 This function is explicitly defined here to help the evolve extension to
1226 1226 wrap this part of the import logic.
1227 1227
1228 1228 The API is currently a bit ugly because it a simple code translation from
1229 1229 the import command. Feel free to make it better.
1230 1230
1231 1231 :hunk: a patch (as a binary string)
1232 1232 :parents: nodes that will be parent of the created commit
1233 1233 :opts: the full dict of option passed to the import command
1234 1234 :msgs: list to save commit message to.
1235 1235 (used in case we need to save it when failing)
1236 1236 :updatefunc: a function that update a repo to a given node
1237 1237 updatefunc(<repo>, <node>)
1238 1238 """
1239 1239 # avoid cycle context -> subrepo -> cmdutil
1240 1240 from . import context
1241 1241 extractdata = patch.extract(ui, hunk)
1242 1242 tmpname = extractdata.get('filename')
1243 1243 message = extractdata.get('message')
1244 1244 user = opts.get('user') or extractdata.get('user')
1245 1245 date = opts.get('date') or extractdata.get('date')
1246 1246 branch = extractdata.get('branch')
1247 1247 nodeid = extractdata.get('nodeid')
1248 1248 p1 = extractdata.get('p1')
1249 1249 p2 = extractdata.get('p2')
1250 1250
1251 1251 nocommit = opts.get('no_commit')
1252 1252 importbranch = opts.get('import_branch')
1253 1253 update = not opts.get('bypass')
1254 1254 strip = opts["strip"]
1255 1255 prefix = opts["prefix"]
1256 1256 sim = float(opts.get('similarity') or 0)
1257 1257 if not tmpname:
1258 1258 return (None, None, False)
1259 1259
1260 1260 rejects = False
1261 1261
1262 1262 try:
1263 1263 cmdline_message = logmessage(ui, opts)
1264 1264 if cmdline_message:
1265 1265 # pickup the cmdline msg
1266 1266 message = cmdline_message
1267 1267 elif message:
1268 1268 # pickup the patch msg
1269 1269 message = message.strip()
1270 1270 else:
1271 1271 # launch the editor
1272 1272 message = None
1273 1273 ui.debug('message:\n%s\n' % message)
1274 1274
1275 1275 if len(parents) == 1:
1276 1276 parents.append(repo[nullid])
1277 1277 if opts.get('exact'):
1278 1278 if not nodeid or not p1:
1279 1279 raise error.Abort(_('not a Mercurial patch'))
1280 1280 p1 = repo[p1]
1281 1281 p2 = repo[p2 or nullid]
1282 1282 elif p2:
1283 1283 try:
1284 1284 p1 = repo[p1]
1285 1285 p2 = repo[p2]
1286 1286 # Without any options, consider p2 only if the
1287 1287 # patch is being applied on top of the recorded
1288 1288 # first parent.
1289 1289 if p1 != parents[0]:
1290 1290 p1 = parents[0]
1291 1291 p2 = repo[nullid]
1292 1292 except error.RepoError:
1293 1293 p1, p2 = parents
1294 1294 if p2.node() == nullid:
1295 1295 ui.warn(_("warning: import the patch as a normal revision\n"
1296 1296 "(use --exact to import the patch as a merge)\n"))
1297 1297 else:
1298 1298 p1, p2 = parents
1299 1299
1300 1300 n = None
1301 1301 if update:
1302 1302 if p1 != parents[0]:
1303 1303 updatefunc(repo, p1.node())
1304 1304 if p2 != parents[1]:
1305 1305 repo.setparents(p1.node(), p2.node())
1306 1306
1307 1307 if opts.get('exact') or importbranch:
1308 1308 repo.dirstate.setbranch(branch or 'default')
1309 1309
1310 1310 partial = opts.get('partial', False)
1311 1311 files = set()
1312 1312 try:
1313 1313 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1314 1314 files=files, eolmode=None, similarity=sim / 100.0)
1315 1315 except error.PatchError as e:
1316 1316 if not partial:
1317 1317 raise error.Abort(str(e))
1318 1318 if partial:
1319 1319 rejects = True
1320 1320
1321 1321 files = list(files)
1322 1322 if nocommit:
1323 1323 if message:
1324 1324 msgs.append(message)
1325 1325 else:
1326 1326 if opts.get('exact') or p2:
1327 1327 # If you got here, you either use --force and know what
1328 1328 # you are doing or used --exact or a merge patch while
1329 1329 # being updated to its first parent.
1330 1330 m = None
1331 1331 else:
1332 1332 m = scmutil.matchfiles(repo, files or [])
1333 1333 editform = mergeeditform(repo[None], 'import.normal')
1334 1334 if opts.get('exact'):
1335 1335 editor = None
1336 1336 else:
1337 1337 editor = getcommiteditor(editform=editform, **opts)
1338 1338 extra = {}
1339 1339 for idfunc in extrapreimport:
1340 1340 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1341 1341 overrides = {}
1342 1342 if partial:
1343 1343 overrides[('ui', 'allowemptycommit')] = True
1344 1344 with repo.ui.configoverride(overrides, 'import'):
1345 1345 n = repo.commit(message, user,
1346 1346 date, match=m,
1347 1347 editor=editor, extra=extra)
1348 1348 for idfunc in extrapostimport:
1349 1349 extrapostimportmap[idfunc](repo[n])
1350 1350 else:
1351 1351 if opts.get('exact') or importbranch:
1352 1352 branch = branch or 'default'
1353 1353 else:
1354 1354 branch = p1.branch()
1355 1355 store = patch.filestore()
1356 1356 try:
1357 1357 files = set()
1358 1358 try:
1359 1359 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1360 1360 files, eolmode=None)
1361 1361 except error.PatchError as e:
1362 1362 raise error.Abort(str(e))
1363 1363 if opts.get('exact'):
1364 1364 editor = None
1365 1365 else:
1366 1366 editor = getcommiteditor(editform='import.bypass')
1367 1367 memctx = context.memctx(repo, (p1.node(), p2.node()),
1368 1368 message,
1369 1369 files=files,
1370 1370 filectxfn=store,
1371 1371 user=user,
1372 1372 date=date,
1373 1373 branch=branch,
1374 1374 editor=editor)
1375 1375 n = memctx.commit()
1376 1376 finally:
1377 1377 store.close()
1378 1378 if opts.get('exact') and nocommit:
1379 1379 # --exact with --no-commit is still useful in that it does merge
1380 1380 # and branch bits
1381 1381 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1382 1382 elif opts.get('exact') and hex(n) != nodeid:
1383 1383 raise error.Abort(_('patch is damaged or loses information'))
1384 1384 msg = _('applied to working directory')
1385 1385 if n:
1386 1386 # i18n: refers to a short changeset id
1387 1387 msg = _('created %s') % short(n)
1388 1388 return (msg, n, rejects)
1389 1389 finally:
1390 1390 os.unlink(tmpname)
1391 1391
1392 1392 # facility to let extensions include additional data in an exported patch
1393 1393 # list of identifiers to be executed in order
1394 1394 extraexport = []
1395 1395 # mapping from identifier to actual export function
1396 1396 # function as to return a string to be added to the header or None
1397 1397 # it is given two arguments (sequencenumber, changectx)
1398 1398 extraexportmap = {}
1399 1399
1400 1400 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1401 1401 node = scmutil.binnode(ctx)
1402 1402 parents = [p.node() for p in ctx.parents() if p]
1403 1403 branch = ctx.branch()
1404 1404 if switch_parent:
1405 1405 parents.reverse()
1406 1406
1407 1407 if parents:
1408 1408 prev = parents[0]
1409 1409 else:
1410 1410 prev = nullid
1411 1411
1412 1412 write("# HG changeset patch\n")
1413 1413 write("# User %s\n" % ctx.user())
1414 1414 write("# Date %d %d\n" % ctx.date())
1415 1415 write("# %s\n" % util.datestr(ctx.date()))
1416 1416 if branch and branch != 'default':
1417 1417 write("# Branch %s\n" % branch)
1418 1418 write("# Node ID %s\n" % hex(node))
1419 1419 write("# Parent %s\n" % hex(prev))
1420 1420 if len(parents) > 1:
1421 1421 write("# Parent %s\n" % hex(parents[1]))
1422 1422
1423 1423 for headerid in extraexport:
1424 1424 header = extraexportmap[headerid](seqno, ctx)
1425 1425 if header is not None:
1426 1426 write('# %s\n' % header)
1427 1427 write(ctx.description().rstrip())
1428 1428 write("\n\n")
1429 1429
1430 1430 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1431 1431 write(chunk, label=label)
1432 1432
1433 1433 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1434 1434 opts=None, match=None):
1435 1435 '''export changesets as hg patches
1436 1436
1437 1437 Args:
1438 1438 repo: The repository from which we're exporting revisions.
1439 1439 revs: A list of revisions to export as revision numbers.
1440 1440 fntemplate: An optional string to use for generating patch file names.
1441 1441 fp: An optional file-like object to which patches should be written.
1442 1442 switch_parent: If True, show diffs against second parent when not nullid.
1443 1443 Default is false, which always shows diff against p1.
1444 1444 opts: diff options to use for generating the patch.
1445 1445 match: If specified, only export changes to files matching this matcher.
1446 1446
1447 1447 Returns:
1448 1448 Nothing.
1449 1449
1450 1450 Side Effect:
1451 1451 "HG Changeset Patch" data is emitted to one of the following
1452 1452 destinations:
1453 1453 fp is specified: All revs are written to the specified
1454 1454 file-like object.
1455 1455 fntemplate specified: Each rev is written to a unique file named using
1456 1456 the given template.
1457 1457 Neither fp nor template specified: All revs written to repo.ui.write()
1458 1458 '''
1459 1459
1460 1460 total = len(revs)
1461 1461 revwidth = max(len(str(rev)) for rev in revs)
1462 1462 filemode = {}
1463 1463
1464 1464 write = None
1465 1465 dest = '<unnamed>'
1466 1466 if fp:
1467 1467 dest = getattr(fp, 'name', dest)
1468 1468 def write(s, **kw):
1469 1469 fp.write(s)
1470 1470 elif not fntemplate:
1471 1471 write = repo.ui.write
1472 1472
1473 1473 for seqno, rev in enumerate(revs, 1):
1474 1474 ctx = repo[rev]
1475 1475 fo = None
1476 1476 if not fp and fntemplate:
1477 1477 desc_lines = ctx.description().rstrip().split('\n')
1478 1478 desc = desc_lines[0] #Commit always has a first line.
1479 1479 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1480 1480 total=total, seqno=seqno, revwidth=revwidth,
1481 1481 mode='wb', modemap=filemode)
1482 1482 dest = fo.name
1483 1483 def write(s, **kw):
1484 1484 fo.write(s)
1485 1485 if not dest.startswith('<'):
1486 1486 repo.ui.note("%s\n" % dest)
1487 1487 _exportsingle(
1488 1488 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1489 1489 if fo is not None:
1490 1490 fo.close()
1491 1491
1492 1492 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1493 1493 changes=None, stat=False, fp=None, prefix='',
1494 1494 root='', listsubrepos=False, hunksfilterfn=None):
1495 1495 '''show diff or diffstat.'''
1496 1496 if fp is None:
1497 1497 write = ui.write
1498 1498 else:
1499 1499 def write(s, **kw):
1500 1500 fp.write(s)
1501 1501
1502 1502 if root:
1503 1503 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1504 1504 else:
1505 1505 relroot = ''
1506 1506 if relroot != '':
1507 1507 # XXX relative roots currently don't work if the root is within a
1508 1508 # subrepo
1509 1509 uirelroot = match.uipath(relroot)
1510 1510 relroot += '/'
1511 1511 for matchroot in match.files():
1512 1512 if not matchroot.startswith(relroot):
1513 1513 ui.warn(_('warning: %s not inside relative root %s\n') % (
1514 1514 match.uipath(matchroot), uirelroot))
1515 1515
1516 1516 if stat:
1517 1517 diffopts = diffopts.copy(context=0)
1518 1518 width = 80
1519 1519 if not ui.plain():
1520 1520 width = ui.termwidth()
1521 1521 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1522 1522 prefix=prefix, relroot=relroot,
1523 1523 hunksfilterfn=hunksfilterfn)
1524 1524 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1525 1525 width=width):
1526 1526 write(chunk, label=label)
1527 1527 else:
1528 1528 for chunk, label in patch.diffui(repo, node1, node2, match,
1529 1529 changes, diffopts, prefix=prefix,
1530 1530 relroot=relroot,
1531 1531 hunksfilterfn=hunksfilterfn):
1532 1532 write(chunk, label=label)
1533 1533
1534 1534 if listsubrepos:
1535 1535 ctx1 = repo[node1]
1536 1536 ctx2 = repo[node2]
1537 1537 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1538 1538 tempnode2 = node2
1539 1539 try:
1540 1540 if node2 is not None:
1541 1541 tempnode2 = ctx2.substate[subpath][1]
1542 1542 except KeyError:
1543 1543 # A subrepo that existed in node1 was deleted between node1 and
1544 1544 # node2 (inclusive). Thus, ctx2's substate won't contain that
1545 1545 # subpath. The best we can do is to ignore it.
1546 1546 tempnode2 = None
1547 1547 submatch = matchmod.subdirmatcher(subpath, match)
1548 1548 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1549 1549 stat=stat, fp=fp, prefix=prefix)
1550 1550
1551 1551 def _changesetlabels(ctx):
1552 1552 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1553 1553 if ctx.obsolete():
1554 1554 labels.append('changeset.obsolete')
1555 1555 if ctx.isunstable():
1556 1556 labels.append('changeset.unstable')
1557 1557 for instability in ctx.instabilities():
1558 1558 labels.append('instability.%s' % instability)
1559 1559 return ' '.join(labels)
1560 1560
1561 1561 class changeset_printer(object):
1562 1562 '''show changeset information when templating not requested.'''
1563 1563
1564 1564 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1565 1565 self.ui = ui
1566 1566 self.repo = repo
1567 1567 self.buffered = buffered
1568 1568 self.matchfn = matchfn
1569 1569 self.diffopts = diffopts
1570 1570 self.header = {}
1571 1571 self.hunk = {}
1572 1572 self.lastheader = None
1573 1573 self.footer = None
1574 1574
1575 1575 def flush(self, ctx):
1576 1576 rev = ctx.rev()
1577 1577 if rev in self.header:
1578 1578 h = self.header[rev]
1579 1579 if h != self.lastheader:
1580 1580 self.lastheader = h
1581 1581 self.ui.write(h)
1582 1582 del self.header[rev]
1583 1583 if rev in self.hunk:
1584 1584 self.ui.write(self.hunk[rev])
1585 1585 del self.hunk[rev]
1586 1586 return 1
1587 1587 return 0
1588 1588
1589 1589 def close(self):
1590 1590 if self.footer:
1591 1591 self.ui.write(self.footer)
1592 1592
1593 1593 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1594 1594 **props):
1595 1595 props = pycompat.byteskwargs(props)
1596 1596 if self.buffered:
1597 1597 self.ui.pushbuffer(labeled=True)
1598 1598 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1599 1599 self.hunk[ctx.rev()] = self.ui.popbuffer()
1600 1600 else:
1601 1601 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1602 1602
1603 1603 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1604 1604 '''show a single changeset or file revision'''
1605 1605 changenode = ctx.node()
1606 1606 rev = ctx.rev()
1607 1607
1608 1608 if self.ui.quiet:
1609 1609 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1610 1610 label='log.node')
1611 1611 return
1612 1612
1613 1613 date = util.datestr(ctx.date())
1614 1614
1615 1615 # i18n: column positioning for "hg log"
1616 1616 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1617 1617 label=_changesetlabels(ctx))
1618 1618
1619 1619 # branches are shown first before any other names due to backwards
1620 1620 # compatibility
1621 1621 branch = ctx.branch()
1622 1622 # don't show the default branch name
1623 1623 if branch != 'default':
1624 1624 # i18n: column positioning for "hg log"
1625 1625 self.ui.write(_("branch: %s\n") % branch,
1626 1626 label='log.branch')
1627 1627
1628 1628 for nsname, ns in self.repo.names.iteritems():
1629 1629 # branches has special logic already handled above, so here we just
1630 1630 # skip it
1631 1631 if nsname == 'branches':
1632 1632 continue
1633 1633 # we will use the templatename as the color name since those two
1634 1634 # should be the same
1635 1635 for name in ns.names(self.repo, changenode):
1636 1636 self.ui.write(ns.logfmt % name,
1637 1637 label='log.%s' % ns.colorname)
1638 1638 if self.ui.debugflag:
1639 1639 # i18n: column positioning for "hg log"
1640 1640 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1641 1641 label='log.phase')
1642 1642 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1643 1643 label = 'log.parent changeset.%s' % pctx.phasestr()
1644 1644 # i18n: column positioning for "hg log"
1645 1645 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1646 1646 label=label)
1647 1647
1648 1648 if self.ui.debugflag and rev is not None:
1649 1649 mnode = ctx.manifestnode()
1650 1650 mrev = self.repo.manifestlog._revlog.rev(mnode)
1651 1651 # i18n: column positioning for "hg log"
1652 1652 self.ui.write(_("manifest: %s\n")
1653 1653 % scmutil.formatrevnode(self.ui, mrev, mnode),
1654 1654 label='ui.debug log.manifest')
1655 1655 # i18n: column positioning for "hg log"
1656 1656 self.ui.write(_("user: %s\n") % ctx.user(),
1657 1657 label='log.user')
1658 1658 # i18n: column positioning for "hg log"
1659 1659 self.ui.write(_("date: %s\n") % date,
1660 1660 label='log.date')
1661 1661
1662 1662 if ctx.isunstable():
1663 1663 # i18n: column positioning for "hg log"
1664 1664 instabilities = ctx.instabilities()
1665 1665 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1666 1666 label='log.instability')
1667 1667
1668 1668 elif ctx.obsolete():
1669 1669 self._showobsfate(ctx)
1670 1670
1671 1671 self._exthook(ctx)
1672 1672
1673 1673 if self.ui.debugflag:
1674 1674 files = ctx.p1().status(ctx)[:3]
1675 1675 for key, value in zip([# i18n: column positioning for "hg log"
1676 1676 _("files:"),
1677 1677 # i18n: column positioning for "hg log"
1678 1678 _("files+:"),
1679 1679 # i18n: column positioning for "hg log"
1680 1680 _("files-:")], files):
1681 1681 if value:
1682 1682 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1683 1683 label='ui.debug log.files')
1684 1684 elif ctx.files() and self.ui.verbose:
1685 1685 # i18n: column positioning for "hg log"
1686 1686 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1687 1687 label='ui.note log.files')
1688 1688 if copies and self.ui.verbose:
1689 1689 copies = ['%s (%s)' % c for c in copies]
1690 1690 # i18n: column positioning for "hg log"
1691 1691 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1692 1692 label='ui.note log.copies')
1693 1693
1694 1694 extra = ctx.extra()
1695 1695 if extra and self.ui.debugflag:
1696 1696 for key, value in sorted(extra.items()):
1697 1697 # i18n: column positioning for "hg log"
1698 1698 self.ui.write(_("extra: %s=%s\n")
1699 1699 % (key, util.escapestr(value)),
1700 1700 label='ui.debug log.extra')
1701 1701
1702 1702 description = ctx.description().strip()
1703 1703 if description:
1704 1704 if self.ui.verbose:
1705 1705 self.ui.write(_("description:\n"),
1706 1706 label='ui.note log.description')
1707 1707 self.ui.write(description,
1708 1708 label='ui.note log.description')
1709 1709 self.ui.write("\n\n")
1710 1710 else:
1711 1711 # i18n: column positioning for "hg log"
1712 1712 self.ui.write(_("summary: %s\n") %
1713 1713 description.splitlines()[0],
1714 1714 label='log.summary')
1715 1715 self.ui.write("\n")
1716 1716
1717 1717 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1718 1718
1719 1719 def _showobsfate(self, ctx):
1720 1720 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1721 1721
1722 1722 if obsfate:
1723 1723 for obsfateline in obsfate:
1724 1724 # i18n: column positioning for "hg log"
1725 1725 self.ui.write(_("obsolete: %s\n") % obsfateline,
1726 1726 label='log.obsfate')
1727 1727
1728 1728 def _exthook(self, ctx):
1729 1729 '''empty method used by extension as a hook point
1730 1730 '''
1731 1731
1732 1732 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1733 1733 if not matchfn:
1734 1734 matchfn = self.matchfn
1735 1735 if matchfn:
1736 1736 stat = self.diffopts.get('stat')
1737 1737 diff = self.diffopts.get('patch')
1738 1738 diffopts = patch.diffallopts(self.ui, self.diffopts)
1739 1739 node = ctx.node()
1740 1740 prev = ctx.p1().node()
1741 1741 if stat:
1742 1742 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1743 1743 match=matchfn, stat=True,
1744 1744 hunksfilterfn=hunksfilterfn)
1745 1745 if diff:
1746 1746 if stat:
1747 1747 self.ui.write("\n")
1748 1748 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1749 1749 match=matchfn, stat=False,
1750 1750 hunksfilterfn=hunksfilterfn)
1751 1751 self.ui.write("\n")
1752 1752
1753 1753 class jsonchangeset(changeset_printer):
1754 1754 '''format changeset information.'''
1755 1755
1756 1756 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1757 1757 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1758 1758 self.cache = {}
1759 1759 self._first = True
1760 1760
1761 1761 def close(self):
1762 1762 if not self._first:
1763 1763 self.ui.write("\n]\n")
1764 1764 else:
1765 1765 self.ui.write("[]\n")
1766 1766
1767 1767 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1768 1768 '''show a single changeset or file revision'''
1769 1769 rev = ctx.rev()
1770 1770 if rev is None:
1771 1771 jrev = jnode = 'null'
1772 1772 else:
1773 1773 jrev = '%d' % rev
1774 1774 jnode = '"%s"' % hex(ctx.node())
1775 1775 j = encoding.jsonescape
1776 1776
1777 1777 if self._first:
1778 1778 self.ui.write("[\n {")
1779 1779 self._first = False
1780 1780 else:
1781 1781 self.ui.write(",\n {")
1782 1782
1783 1783 if self.ui.quiet:
1784 1784 self.ui.write(('\n "rev": %s') % jrev)
1785 1785 self.ui.write((',\n "node": %s') % jnode)
1786 1786 self.ui.write('\n }')
1787 1787 return
1788 1788
1789 1789 self.ui.write(('\n "rev": %s') % jrev)
1790 1790 self.ui.write((',\n "node": %s') % jnode)
1791 1791 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1792 1792 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1793 1793 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1794 1794 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1795 1795 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1796 1796
1797 1797 self.ui.write((',\n "bookmarks": [%s]') %
1798 1798 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1799 1799 self.ui.write((',\n "tags": [%s]') %
1800 1800 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1801 1801 self.ui.write((',\n "parents": [%s]') %
1802 1802 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1803 1803
1804 1804 if self.ui.debugflag:
1805 1805 if rev is None:
1806 1806 jmanifestnode = 'null'
1807 1807 else:
1808 1808 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1809 1809 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1810 1810
1811 1811 self.ui.write((',\n "extra": {%s}') %
1812 1812 ", ".join('"%s": "%s"' % (j(k), j(v))
1813 1813 for k, v in ctx.extra().items()))
1814 1814
1815 1815 files = ctx.p1().status(ctx)
1816 1816 self.ui.write((',\n "modified": [%s]') %
1817 1817 ", ".join('"%s"' % j(f) for f in files[0]))
1818 1818 self.ui.write((',\n "added": [%s]') %
1819 1819 ", ".join('"%s"' % j(f) for f in files[1]))
1820 1820 self.ui.write((',\n "removed": [%s]') %
1821 1821 ", ".join('"%s"' % j(f) for f in files[2]))
1822 1822
1823 1823 elif self.ui.verbose:
1824 1824 self.ui.write((',\n "files": [%s]') %
1825 1825 ", ".join('"%s"' % j(f) for f in ctx.files()))
1826 1826
1827 1827 if copies:
1828 1828 self.ui.write((',\n "copies": {%s}') %
1829 1829 ", ".join('"%s": "%s"' % (j(k), j(v))
1830 1830 for k, v in copies))
1831 1831
1832 1832 matchfn = self.matchfn
1833 1833 if matchfn:
1834 1834 stat = self.diffopts.get('stat')
1835 1835 diff = self.diffopts.get('patch')
1836 1836 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1837 1837 node, prev = ctx.node(), ctx.p1().node()
1838 1838 if stat:
1839 1839 self.ui.pushbuffer()
1840 1840 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1841 1841 match=matchfn, stat=True)
1842 1842 self.ui.write((',\n "diffstat": "%s"')
1843 1843 % j(self.ui.popbuffer()))
1844 1844 if diff:
1845 1845 self.ui.pushbuffer()
1846 1846 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1847 1847 match=matchfn, stat=False)
1848 1848 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1849 1849
1850 1850 self.ui.write("\n }")
1851 1851
1852 1852 class changeset_templater(changeset_printer):
1853 '''format changeset information.'''
1853 '''format changeset information.
1854
1855 Note: there are a variety of convenience functions to build a
1856 changeset_templater for common cases. See functions such as:
1857 makelogtemplater, show_changeset, buildcommittemplate, or other
1858 functions that use changesest_templater.
1859 '''
1854 1860
1855 1861 # Arguments before "buffered" used to be positional. Consider not
1856 1862 # adding/removing arguments before "buffered" to not break callers.
1857 1863 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1858 1864 buffered=False):
1859 1865 diffopts = diffopts or {}
1860 1866
1861 1867 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1862 1868 self.t = formatter.loadtemplater(ui, tmplspec,
1863 1869 cache=templatekw.defaulttempl)
1864 1870 self._counter = itertools.count()
1865 1871 self.cache = {}
1866 1872
1867 1873 self._tref = tmplspec.ref
1868 1874 self._parts = {'header': '', 'footer': '',
1869 1875 tmplspec.ref: tmplspec.ref,
1870 1876 'docheader': '', 'docfooter': '',
1871 1877 'separator': ''}
1872 1878 if tmplspec.mapfile:
1873 1879 # find correct templates for current mode, for backward
1874 1880 # compatibility with 'log -v/-q/--debug' using a mapfile
1875 1881 tmplmodes = [
1876 1882 (True, ''),
1877 1883 (self.ui.verbose, '_verbose'),
1878 1884 (self.ui.quiet, '_quiet'),
1879 1885 (self.ui.debugflag, '_debug'),
1880 1886 ]
1881 1887 for mode, postfix in tmplmodes:
1882 1888 for t in self._parts:
1883 1889 cur = t + postfix
1884 1890 if mode and cur in self.t:
1885 1891 self._parts[t] = cur
1886 1892 else:
1887 1893 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1888 1894 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1889 1895 self._parts.update(m)
1890 1896
1891 1897 if self._parts['docheader']:
1892 1898 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1893 1899
1894 1900 def close(self):
1895 1901 if self._parts['docfooter']:
1896 1902 if not self.footer:
1897 1903 self.footer = ""
1898 1904 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1899 1905 return super(changeset_templater, self).close()
1900 1906
1901 1907 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1902 1908 '''show a single changeset or file revision'''
1903 1909 props = props.copy()
1904 1910 props.update(templatekw.keywords)
1905 1911 props['templ'] = self.t
1906 1912 props['ctx'] = ctx
1907 1913 props['repo'] = self.repo
1908 1914 props['ui'] = self.repo.ui
1909 1915 props['index'] = index = next(self._counter)
1910 1916 props['revcache'] = {'copies': copies}
1911 1917 props['cache'] = self.cache
1912 1918 props = pycompat.strkwargs(props)
1913 1919
1914 1920 # write separator, which wouldn't work well with the header part below
1915 1921 # since there's inherently a conflict between header (across items) and
1916 1922 # separator (per item)
1917 1923 if self._parts['separator'] and index > 0:
1918 1924 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1919 1925
1920 1926 # write header
1921 1927 if self._parts['header']:
1922 1928 h = templater.stringify(self.t(self._parts['header'], **props))
1923 1929 if self.buffered:
1924 1930 self.header[ctx.rev()] = h
1925 1931 else:
1926 1932 if self.lastheader != h:
1927 1933 self.lastheader = h
1928 1934 self.ui.write(h)
1929 1935
1930 1936 # write changeset metadata, then patch if requested
1931 1937 key = self._parts[self._tref]
1932 1938 self.ui.write(templater.stringify(self.t(key, **props)))
1933 1939 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1934 1940
1935 1941 if self._parts['footer']:
1936 1942 if not self.footer:
1937 1943 self.footer = templater.stringify(
1938 1944 self.t(self._parts['footer'], **props))
1939 1945
1940 1946 def logtemplatespec(tmpl, mapfile):
1941 1947 if mapfile:
1942 1948 return formatter.templatespec('changeset', tmpl, mapfile)
1943 1949 else:
1944 1950 return formatter.templatespec('', tmpl, None)
1945 1951
1946 1952 def _lookuplogtemplate(ui, tmpl, style):
1947 1953 """Find the template matching the given template spec or style
1948 1954
1949 1955 See formatter.lookuptemplate() for details.
1950 1956 """
1951 1957
1952 1958 # ui settings
1953 1959 if not tmpl and not style: # template are stronger than style
1954 1960 tmpl = ui.config('ui', 'logtemplate')
1955 1961 if tmpl:
1956 1962 return logtemplatespec(templater.unquotestring(tmpl), None)
1957 1963 else:
1958 1964 style = util.expandpath(ui.config('ui', 'style'))
1959 1965
1960 1966 if not tmpl and style:
1961 1967 mapfile = style
1962 1968 if not os.path.split(mapfile)[0]:
1963 1969 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1964 1970 or templater.templatepath(mapfile))
1965 1971 if mapname:
1966 1972 mapfile = mapname
1967 1973 return logtemplatespec(None, mapfile)
1968 1974
1969 1975 if not tmpl:
1970 1976 return logtemplatespec(None, None)
1971 1977
1972 1978 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1973 1979
1974 1980 def makelogtemplater(ui, repo, tmpl, buffered=False):
1975 """Create a changeset_templater from a literal template 'tmpl'"""
1981 """Create a changeset_templater from a literal template 'tmpl'
1982 byte-string."""
1976 1983 spec = logtemplatespec(tmpl, None)
1977 1984 return changeset_templater(ui, repo, spec, buffered=buffered)
1978 1985
1979 1986 def show_changeset(ui, repo, opts, buffered=False):
1980 1987 """show one changeset using template or regular display.
1981 1988
1982 1989 Display format will be the first non-empty hit of:
1983 1990 1. option 'template'
1984 1991 2. option 'style'
1985 1992 3. [ui] setting 'logtemplate'
1986 1993 4. [ui] setting 'style'
1987 1994 If all of these values are either the unset or the empty string,
1988 1995 regular display via changeset_printer() is done.
1989 1996 """
1990 1997 # options
1991 1998 match = None
1992 1999 if opts.get('patch') or opts.get('stat'):
1993 2000 match = scmutil.matchall(repo)
1994 2001
1995 2002 if opts.get('template') == 'json':
1996 2003 return jsonchangeset(ui, repo, match, opts, buffered)
1997 2004
1998 2005 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1999 2006
2000 2007 if not spec.ref and not spec.tmpl and not spec.mapfile:
2001 2008 return changeset_printer(ui, repo, match, opts, buffered)
2002 2009
2003 2010 return changeset_templater(ui, repo, spec, match, opts, buffered)
2004 2011
2005 2012 def showmarker(fm, marker, index=None):
2006 2013 """utility function to display obsolescence marker in a readable way
2007 2014
2008 2015 To be used by debug function."""
2009 2016 if index is not None:
2010 2017 fm.write('index', '%i ', index)
2011 2018 fm.write('prednode', '%s ', hex(marker.prednode()))
2012 2019 succs = marker.succnodes()
2013 2020 fm.condwrite(succs, 'succnodes', '%s ',
2014 2021 fm.formatlist(map(hex, succs), name='node'))
2015 2022 fm.write('flag', '%X ', marker.flags())
2016 2023 parents = marker.parentnodes()
2017 2024 if parents is not None:
2018 2025 fm.write('parentnodes', '{%s} ',
2019 2026 fm.formatlist(map(hex, parents), name='node', sep=', '))
2020 2027 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2021 2028 meta = marker.metadata().copy()
2022 2029 meta.pop('date', None)
2023 2030 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2024 2031 fm.plain('\n')
2025 2032
2026 2033 def finddate(ui, repo, date):
2027 2034 """Find the tipmost changeset that matches the given date spec"""
2028 2035
2029 2036 df = util.matchdate(date)
2030 2037 m = scmutil.matchall(repo)
2031 2038 results = {}
2032 2039
2033 2040 def prep(ctx, fns):
2034 2041 d = ctx.date()
2035 2042 if df(d[0]):
2036 2043 results[ctx.rev()] = d
2037 2044
2038 2045 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2039 2046 rev = ctx.rev()
2040 2047 if rev in results:
2041 2048 ui.status(_("found revision %s from %s\n") %
2042 2049 (rev, util.datestr(results[rev])))
2043 2050 return '%d' % rev
2044 2051
2045 2052 raise error.Abort(_("revision matching date not found"))
2046 2053
2047 2054 def increasingwindows(windowsize=8, sizelimit=512):
2048 2055 while True:
2049 2056 yield windowsize
2050 2057 if windowsize < sizelimit:
2051 2058 windowsize *= 2
2052 2059
2053 2060 class FileWalkError(Exception):
2054 2061 pass
2055 2062
2056 2063 def walkfilerevs(repo, match, follow, revs, fncache):
2057 2064 '''Walks the file history for the matched files.
2058 2065
2059 2066 Returns the changeset revs that are involved in the file history.
2060 2067
2061 2068 Throws FileWalkError if the file history can't be walked using
2062 2069 filelogs alone.
2063 2070 '''
2064 2071 wanted = set()
2065 2072 copies = []
2066 2073 minrev, maxrev = min(revs), max(revs)
2067 2074 def filerevgen(filelog, last):
2068 2075 """
2069 2076 Only files, no patterns. Check the history of each file.
2070 2077
2071 2078 Examines filelog entries within minrev, maxrev linkrev range
2072 2079 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2073 2080 tuples in backwards order
2074 2081 """
2075 2082 cl_count = len(repo)
2076 2083 revs = []
2077 2084 for j in xrange(0, last + 1):
2078 2085 linkrev = filelog.linkrev(j)
2079 2086 if linkrev < minrev:
2080 2087 continue
2081 2088 # only yield rev for which we have the changelog, it can
2082 2089 # happen while doing "hg log" during a pull or commit
2083 2090 if linkrev >= cl_count:
2084 2091 break
2085 2092
2086 2093 parentlinkrevs = []
2087 2094 for p in filelog.parentrevs(j):
2088 2095 if p != nullrev:
2089 2096 parentlinkrevs.append(filelog.linkrev(p))
2090 2097 n = filelog.node(j)
2091 2098 revs.append((linkrev, parentlinkrevs,
2092 2099 follow and filelog.renamed(n)))
2093 2100
2094 2101 return reversed(revs)
2095 2102 def iterfiles():
2096 2103 pctx = repo['.']
2097 2104 for filename in match.files():
2098 2105 if follow:
2099 2106 if filename not in pctx:
2100 2107 raise error.Abort(_('cannot follow file not in parent '
2101 2108 'revision: "%s"') % filename)
2102 2109 yield filename, pctx[filename].filenode()
2103 2110 else:
2104 2111 yield filename, None
2105 2112 for filename_node in copies:
2106 2113 yield filename_node
2107 2114
2108 2115 for file_, node in iterfiles():
2109 2116 filelog = repo.file(file_)
2110 2117 if not len(filelog):
2111 2118 if node is None:
2112 2119 # A zero count may be a directory or deleted file, so
2113 2120 # try to find matching entries on the slow path.
2114 2121 if follow:
2115 2122 raise error.Abort(
2116 2123 _('cannot follow nonexistent file: "%s"') % file_)
2117 2124 raise FileWalkError("Cannot walk via filelog")
2118 2125 else:
2119 2126 continue
2120 2127
2121 2128 if node is None:
2122 2129 last = len(filelog) - 1
2123 2130 else:
2124 2131 last = filelog.rev(node)
2125 2132
2126 2133 # keep track of all ancestors of the file
2127 2134 ancestors = {filelog.linkrev(last)}
2128 2135
2129 2136 # iterate from latest to oldest revision
2130 2137 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2131 2138 if not follow:
2132 2139 if rev > maxrev:
2133 2140 continue
2134 2141 else:
2135 2142 # Note that last might not be the first interesting
2136 2143 # rev to us:
2137 2144 # if the file has been changed after maxrev, we'll
2138 2145 # have linkrev(last) > maxrev, and we still need
2139 2146 # to explore the file graph
2140 2147 if rev not in ancestors:
2141 2148 continue
2142 2149 # XXX insert 1327 fix here
2143 2150 if flparentlinkrevs:
2144 2151 ancestors.update(flparentlinkrevs)
2145 2152
2146 2153 fncache.setdefault(rev, []).append(file_)
2147 2154 wanted.add(rev)
2148 2155 if copied:
2149 2156 copies.append(copied)
2150 2157
2151 2158 return wanted
2152 2159
2153 2160 class _followfilter(object):
2154 2161 def __init__(self, repo, onlyfirst=False):
2155 2162 self.repo = repo
2156 2163 self.startrev = nullrev
2157 2164 self.roots = set()
2158 2165 self.onlyfirst = onlyfirst
2159 2166
2160 2167 def match(self, rev):
2161 2168 def realparents(rev):
2162 2169 if self.onlyfirst:
2163 2170 return self.repo.changelog.parentrevs(rev)[0:1]
2164 2171 else:
2165 2172 return filter(lambda x: x != nullrev,
2166 2173 self.repo.changelog.parentrevs(rev))
2167 2174
2168 2175 if self.startrev == nullrev:
2169 2176 self.startrev = rev
2170 2177 return True
2171 2178
2172 2179 if rev > self.startrev:
2173 2180 # forward: all descendants
2174 2181 if not self.roots:
2175 2182 self.roots.add(self.startrev)
2176 2183 for parent in realparents(rev):
2177 2184 if parent in self.roots:
2178 2185 self.roots.add(rev)
2179 2186 return True
2180 2187 else:
2181 2188 # backwards: all parents
2182 2189 if not self.roots:
2183 2190 self.roots.update(realparents(self.startrev))
2184 2191 if rev in self.roots:
2185 2192 self.roots.remove(rev)
2186 2193 self.roots.update(realparents(rev))
2187 2194 return True
2188 2195
2189 2196 return False
2190 2197
2191 2198 def walkchangerevs(repo, match, opts, prepare):
2192 2199 '''Iterate over files and the revs in which they changed.
2193 2200
2194 2201 Callers most commonly need to iterate backwards over the history
2195 2202 in which they are interested. Doing so has awful (quadratic-looking)
2196 2203 performance, so we use iterators in a "windowed" way.
2197 2204
2198 2205 We walk a window of revisions in the desired order. Within the
2199 2206 window, we first walk forwards to gather data, then in the desired
2200 2207 order (usually backwards) to display it.
2201 2208
2202 2209 This function returns an iterator yielding contexts. Before
2203 2210 yielding each context, the iterator will first call the prepare
2204 2211 function on each context in the window in forward order.'''
2205 2212
2206 2213 follow = opts.get('follow') or opts.get('follow_first')
2207 2214 revs = _logrevs(repo, opts)
2208 2215 if not revs:
2209 2216 return []
2210 2217 wanted = set()
2211 2218 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2212 2219 opts.get('removed'))
2213 2220 fncache = {}
2214 2221 change = repo.changectx
2215 2222
2216 2223 # First step is to fill wanted, the set of revisions that we want to yield.
2217 2224 # When it does not induce extra cost, we also fill fncache for revisions in
2218 2225 # wanted: a cache of filenames that were changed (ctx.files()) and that
2219 2226 # match the file filtering conditions.
2220 2227
2221 2228 if match.always():
2222 2229 # No files, no patterns. Display all revs.
2223 2230 wanted = revs
2224 2231 elif not slowpath:
2225 2232 # We only have to read through the filelog to find wanted revisions
2226 2233
2227 2234 try:
2228 2235 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2229 2236 except FileWalkError:
2230 2237 slowpath = True
2231 2238
2232 2239 # We decided to fall back to the slowpath because at least one
2233 2240 # of the paths was not a file. Check to see if at least one of them
2234 2241 # existed in history, otherwise simply return
2235 2242 for path in match.files():
2236 2243 if path == '.' or path in repo.store:
2237 2244 break
2238 2245 else:
2239 2246 return []
2240 2247
2241 2248 if slowpath:
2242 2249 # We have to read the changelog to match filenames against
2243 2250 # changed files
2244 2251
2245 2252 if follow:
2246 2253 raise error.Abort(_('can only follow copies/renames for explicit '
2247 2254 'filenames'))
2248 2255
2249 2256 # The slow path checks files modified in every changeset.
2250 2257 # This is really slow on large repos, so compute the set lazily.
2251 2258 class lazywantedset(object):
2252 2259 def __init__(self):
2253 2260 self.set = set()
2254 2261 self.revs = set(revs)
2255 2262
2256 2263 # No need to worry about locality here because it will be accessed
2257 2264 # in the same order as the increasing window below.
2258 2265 def __contains__(self, value):
2259 2266 if value in self.set:
2260 2267 return True
2261 2268 elif not value in self.revs:
2262 2269 return False
2263 2270 else:
2264 2271 self.revs.discard(value)
2265 2272 ctx = change(value)
2266 2273 matches = filter(match, ctx.files())
2267 2274 if matches:
2268 2275 fncache[value] = matches
2269 2276 self.set.add(value)
2270 2277 return True
2271 2278 return False
2272 2279
2273 2280 def discard(self, value):
2274 2281 self.revs.discard(value)
2275 2282 self.set.discard(value)
2276 2283
2277 2284 wanted = lazywantedset()
2278 2285
2279 2286 # it might be worthwhile to do this in the iterator if the rev range
2280 2287 # is descending and the prune args are all within that range
2281 2288 for rev in opts.get('prune', ()):
2282 2289 rev = repo[rev].rev()
2283 2290 ff = _followfilter(repo)
2284 2291 stop = min(revs[0], revs[-1])
2285 2292 for x in xrange(rev, stop - 1, -1):
2286 2293 if ff.match(x):
2287 2294 wanted = wanted - [x]
2288 2295
2289 2296 # Now that wanted is correctly initialized, we can iterate over the
2290 2297 # revision range, yielding only revisions in wanted.
2291 2298 def iterate():
2292 2299 if follow and match.always():
2293 2300 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2294 2301 def want(rev):
2295 2302 return ff.match(rev) and rev in wanted
2296 2303 else:
2297 2304 def want(rev):
2298 2305 return rev in wanted
2299 2306
2300 2307 it = iter(revs)
2301 2308 stopiteration = False
2302 2309 for windowsize in increasingwindows():
2303 2310 nrevs = []
2304 2311 for i in xrange(windowsize):
2305 2312 rev = next(it, None)
2306 2313 if rev is None:
2307 2314 stopiteration = True
2308 2315 break
2309 2316 elif want(rev):
2310 2317 nrevs.append(rev)
2311 2318 for rev in sorted(nrevs):
2312 2319 fns = fncache.get(rev)
2313 2320 ctx = change(rev)
2314 2321 if not fns:
2315 2322 def fns_generator():
2316 2323 for f in ctx.files():
2317 2324 if match(f):
2318 2325 yield f
2319 2326 fns = fns_generator()
2320 2327 prepare(ctx, fns)
2321 2328 for rev in nrevs:
2322 2329 yield change(rev)
2323 2330
2324 2331 if stopiteration:
2325 2332 break
2326 2333
2327 2334 return iterate()
2328 2335
2329 2336 def _makefollowlogfilematcher(repo, files, followfirst):
2330 2337 # When displaying a revision with --patch --follow FILE, we have
2331 2338 # to know which file of the revision must be diffed. With
2332 2339 # --follow, we want the names of the ancestors of FILE in the
2333 2340 # revision, stored in "fcache". "fcache" is populated by
2334 2341 # reproducing the graph traversal already done by --follow revset
2335 2342 # and relating revs to file names (which is not "correct" but
2336 2343 # good enough).
2337 2344 fcache = {}
2338 2345 fcacheready = [False]
2339 2346 pctx = repo['.']
2340 2347
2341 2348 def populate():
2342 2349 for fn in files:
2343 2350 fctx = pctx[fn]
2344 2351 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2345 2352 for c in fctx.ancestors(followfirst=followfirst):
2346 2353 fcache.setdefault(c.rev(), set()).add(c.path())
2347 2354
2348 2355 def filematcher(rev):
2349 2356 if not fcacheready[0]:
2350 2357 # Lazy initialization
2351 2358 fcacheready[0] = True
2352 2359 populate()
2353 2360 return scmutil.matchfiles(repo, fcache.get(rev, []))
2354 2361
2355 2362 return filematcher
2356 2363
2357 2364 def _makenofollowlogfilematcher(repo, pats, opts):
2358 2365 '''hook for extensions to override the filematcher for non-follow cases'''
2359 2366 return None
2360 2367
2361 2368 def _makelogrevset(repo, pats, opts, revs):
2362 2369 """Return (expr, filematcher) where expr is a revset string built
2363 2370 from log options and file patterns or None. If --stat or --patch
2364 2371 are not passed filematcher is None. Otherwise it is a callable
2365 2372 taking a revision number and returning a match objects filtering
2366 2373 the files to be detailed when displaying the revision.
2367 2374 """
2368 2375 opt2revset = {
2369 2376 'no_merges': ('not merge()', None),
2370 2377 'only_merges': ('merge()', None),
2371 2378 '_ancestors': ('ancestors(%(val)s)', None),
2372 2379 '_fancestors': ('_firstancestors(%(val)s)', None),
2373 2380 '_descendants': ('descendants(%(val)s)', None),
2374 2381 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2375 2382 '_matchfiles': ('_matchfiles(%(val)s)', None),
2376 2383 'date': ('date(%(val)r)', None),
2377 2384 'branch': ('branch(%(val)r)', ' or '),
2378 2385 '_patslog': ('filelog(%(val)r)', ' or '),
2379 2386 '_patsfollow': ('follow(%(val)r)', ' or '),
2380 2387 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2381 2388 'keyword': ('keyword(%(val)r)', ' or '),
2382 2389 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2383 2390 'user': ('user(%(val)r)', ' or '),
2384 2391 }
2385 2392
2386 2393 opts = dict(opts)
2387 2394 # follow or not follow?
2388 2395 follow = opts.get('follow') or opts.get('follow_first')
2389 2396 if opts.get('follow_first'):
2390 2397 followfirst = 1
2391 2398 else:
2392 2399 followfirst = 0
2393 2400 # --follow with FILE behavior depends on revs...
2394 2401 it = iter(revs)
2395 2402 startrev = next(it)
2396 2403 followdescendants = startrev < next(it, startrev)
2397 2404
2398 2405 # branch and only_branch are really aliases and must be handled at
2399 2406 # the same time
2400 2407 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2401 2408 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2402 2409 # pats/include/exclude are passed to match.match() directly in
2403 2410 # _matchfiles() revset but walkchangerevs() builds its matcher with
2404 2411 # scmutil.match(). The difference is input pats are globbed on
2405 2412 # platforms without shell expansion (windows).
2406 2413 wctx = repo[None]
2407 2414 match, pats = scmutil.matchandpats(wctx, pats, opts)
2408 2415 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2409 2416 opts.get('removed'))
2410 2417 if not slowpath:
2411 2418 for f in match.files():
2412 2419 if follow and f not in wctx:
2413 2420 # If the file exists, it may be a directory, so let it
2414 2421 # take the slow path.
2415 2422 if os.path.exists(repo.wjoin(f)):
2416 2423 slowpath = True
2417 2424 continue
2418 2425 else:
2419 2426 raise error.Abort(_('cannot follow file not in parent '
2420 2427 'revision: "%s"') % f)
2421 2428 filelog = repo.file(f)
2422 2429 if not filelog:
2423 2430 # A zero count may be a directory or deleted file, so
2424 2431 # try to find matching entries on the slow path.
2425 2432 if follow:
2426 2433 raise error.Abort(
2427 2434 _('cannot follow nonexistent file: "%s"') % f)
2428 2435 slowpath = True
2429 2436
2430 2437 # We decided to fall back to the slowpath because at least one
2431 2438 # of the paths was not a file. Check to see if at least one of them
2432 2439 # existed in history - in that case, we'll continue down the
2433 2440 # slowpath; otherwise, we can turn off the slowpath
2434 2441 if slowpath:
2435 2442 for path in match.files():
2436 2443 if path == '.' or path in repo.store:
2437 2444 break
2438 2445 else:
2439 2446 slowpath = False
2440 2447
2441 2448 fpats = ('_patsfollow', '_patsfollowfirst')
2442 2449 fnopats = (('_ancestors', '_fancestors'),
2443 2450 ('_descendants', '_fdescendants'))
2444 2451 if slowpath:
2445 2452 # See walkchangerevs() slow path.
2446 2453 #
2447 2454 # pats/include/exclude cannot be represented as separate
2448 2455 # revset expressions as their filtering logic applies at file
2449 2456 # level. For instance "-I a -X a" matches a revision touching
2450 2457 # "a" and "b" while "file(a) and not file(b)" does
2451 2458 # not. Besides, filesets are evaluated against the working
2452 2459 # directory.
2453 2460 matchargs = ['r:', 'd:relpath']
2454 2461 for p in pats:
2455 2462 matchargs.append('p:' + p)
2456 2463 for p in opts.get('include', []):
2457 2464 matchargs.append('i:' + p)
2458 2465 for p in opts.get('exclude', []):
2459 2466 matchargs.append('x:' + p)
2460 2467 matchargs = ','.join(('%r' % p) for p in matchargs)
2461 2468 opts['_matchfiles'] = matchargs
2462 2469 if follow:
2463 2470 opts[fnopats[0][followfirst]] = '.'
2464 2471 else:
2465 2472 if follow:
2466 2473 if pats:
2467 2474 # follow() revset interprets its file argument as a
2468 2475 # manifest entry, so use match.files(), not pats.
2469 2476 opts[fpats[followfirst]] = list(match.files())
2470 2477 else:
2471 2478 op = fnopats[followdescendants][followfirst]
2472 2479 opts[op] = 'rev(%d)' % startrev
2473 2480 else:
2474 2481 opts['_patslog'] = list(pats)
2475 2482
2476 2483 filematcher = None
2477 2484 if opts.get('patch') or opts.get('stat'):
2478 2485 # When following files, track renames via a special matcher.
2479 2486 # If we're forced to take the slowpath it means we're following
2480 2487 # at least one pattern/directory, so don't bother with rename tracking.
2481 2488 if follow and not match.always() and not slowpath:
2482 2489 # _makefollowlogfilematcher expects its files argument to be
2483 2490 # relative to the repo root, so use match.files(), not pats.
2484 2491 filematcher = _makefollowlogfilematcher(repo, match.files(),
2485 2492 followfirst)
2486 2493 else:
2487 2494 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2488 2495 if filematcher is None:
2489 2496 filematcher = lambda rev: match
2490 2497
2491 2498 expr = []
2492 2499 for op, val in sorted(opts.iteritems()):
2493 2500 if not val:
2494 2501 continue
2495 2502 if op not in opt2revset:
2496 2503 continue
2497 2504 revop, andor = opt2revset[op]
2498 2505 if '%(val)' not in revop:
2499 2506 expr.append(revop)
2500 2507 else:
2501 2508 if not isinstance(val, list):
2502 2509 e = revop % {'val': val}
2503 2510 else:
2504 2511 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2505 2512 expr.append(e)
2506 2513
2507 2514 if expr:
2508 2515 expr = '(' + ' and '.join(expr) + ')'
2509 2516 else:
2510 2517 expr = None
2511 2518 return expr, filematcher
2512 2519
2513 2520 def _logrevs(repo, opts):
2514 2521 # Default --rev value depends on --follow but --follow behavior
2515 2522 # depends on revisions resolved from --rev...
2516 2523 follow = opts.get('follow') or opts.get('follow_first')
2517 2524 if opts.get('rev'):
2518 2525 revs = scmutil.revrange(repo, opts['rev'])
2519 2526 elif follow and repo.dirstate.p1() == nullid:
2520 2527 revs = smartset.baseset()
2521 2528 elif follow:
2522 2529 revs = repo.revs('reverse(:.)')
2523 2530 else:
2524 2531 revs = smartset.spanset(repo)
2525 2532 revs.reverse()
2526 2533 return revs
2527 2534
2528 2535 def getgraphlogrevs(repo, pats, opts):
2529 2536 """Return (revs, expr, filematcher) where revs is an iterable of
2530 2537 revision numbers, expr is a revset string built from log options
2531 2538 and file patterns or None, and used to filter 'revs'. If --stat or
2532 2539 --patch are not passed filematcher is None. Otherwise it is a
2533 2540 callable taking a revision number and returning a match objects
2534 2541 filtering the files to be detailed when displaying the revision.
2535 2542 """
2536 2543 limit = loglimit(opts)
2537 2544 revs = _logrevs(repo, opts)
2538 2545 if not revs:
2539 2546 return smartset.baseset(), None, None
2540 2547 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2541 2548 if opts.get('rev'):
2542 2549 # User-specified revs might be unsorted, but don't sort before
2543 2550 # _makelogrevset because it might depend on the order of revs
2544 2551 if not (revs.isdescending() or revs.istopo()):
2545 2552 revs.sort(reverse=True)
2546 2553 if expr:
2547 2554 matcher = revset.match(repo.ui, expr)
2548 2555 revs = matcher(repo, revs)
2549 2556 if limit is not None:
2550 2557 limitedrevs = []
2551 2558 for idx, rev in enumerate(revs):
2552 2559 if idx >= limit:
2553 2560 break
2554 2561 limitedrevs.append(rev)
2555 2562 revs = smartset.baseset(limitedrevs)
2556 2563
2557 2564 return revs, expr, filematcher
2558 2565
2559 2566 def getlogrevs(repo, pats, opts):
2560 2567 """Return (revs, expr, filematcher) where revs is an iterable of
2561 2568 revision numbers, expr is a revset string built from log options
2562 2569 and file patterns or None, and used to filter 'revs'. If --stat or
2563 2570 --patch are not passed filematcher is None. Otherwise it is a
2564 2571 callable taking a revision number and returning a match objects
2565 2572 filtering the files to be detailed when displaying the revision.
2566 2573 """
2567 2574 limit = loglimit(opts)
2568 2575 revs = _logrevs(repo, opts)
2569 2576 if not revs:
2570 2577 return smartset.baseset([]), None, None
2571 2578 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2572 2579 if expr:
2573 2580 matcher = revset.match(repo.ui, expr)
2574 2581 revs = matcher(repo, revs)
2575 2582 if limit is not None:
2576 2583 limitedrevs = []
2577 2584 for idx, r in enumerate(revs):
2578 2585 if limit <= idx:
2579 2586 break
2580 2587 limitedrevs.append(r)
2581 2588 revs = smartset.baseset(limitedrevs)
2582 2589
2583 2590 return revs, expr, filematcher
2584 2591
2585 2592 def _parselinerangelogopt(repo, opts):
2586 2593 """Parse --line-range log option and return a list of tuples (filename,
2587 2594 (fromline, toline)).
2588 2595 """
2589 2596 linerangebyfname = []
2590 2597 for pat in opts.get('line_range', []):
2591 2598 try:
2592 2599 pat, linerange = pat.rsplit(',', 1)
2593 2600 except ValueError:
2594 2601 raise error.Abort(_('malformatted line-range pattern %s') % pat)
2595 2602 try:
2596 2603 fromline, toline = map(int, linerange.split(':'))
2597 2604 except ValueError:
2598 2605 raise error.Abort(_("invalid line range for %s") % pat)
2599 2606 msg = _("line range pattern '%s' must match exactly one file") % pat
2600 2607 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
2601 2608 linerangebyfname.append(
2602 2609 (fname, util.processlinerange(fromline, toline)))
2603 2610 return linerangebyfname
2604 2611
2605 2612 def getloglinerangerevs(repo, userrevs, opts):
2606 2613 """Return (revs, filematcher, hunksfilter).
2607 2614
2608 2615 "revs" are revisions obtained by processing "line-range" log options and
2609 2616 walking block ancestors of each specified file/line-range.
2610 2617
2611 2618 "filematcher(rev) -> match" is a factory function returning a match object
2612 2619 for a given revision for file patterns specified in --line-range option.
2613 2620 If neither --stat nor --patch options are passed, "filematcher" is None.
2614 2621
2615 2622 "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
2616 2623 returning a hunks filtering function.
2617 2624 If neither --stat nor --patch options are passed, "filterhunks" is None.
2618 2625 """
2619 2626 wctx = repo[None]
2620 2627
2621 2628 # Two-levels map of "rev -> file ctx -> [line range]".
2622 2629 linerangesbyrev = {}
2623 2630 for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
2624 2631 if fname not in wctx:
2625 2632 raise error.Abort(_('cannot follow file not in parent '
2626 2633 'revision: "%s"') % fname)
2627 2634 fctx = wctx.filectx(fname)
2628 2635 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
2629 2636 rev = fctx.introrev()
2630 2637 if rev not in userrevs:
2631 2638 continue
2632 2639 linerangesbyrev.setdefault(
2633 2640 rev, {}).setdefault(
2634 2641 fctx.path(), []).append(linerange)
2635 2642
2636 2643 filematcher = None
2637 2644 hunksfilter = None
2638 2645 if opts.get('patch') or opts.get('stat'):
2639 2646
2640 2647 def nofilterhunksfn(fctx, hunks):
2641 2648 return hunks
2642 2649
2643 2650 def hunksfilter(rev):
2644 2651 fctxlineranges = linerangesbyrev.get(rev)
2645 2652 if fctxlineranges is None:
2646 2653 return nofilterhunksfn
2647 2654
2648 2655 def filterfn(fctx, hunks):
2649 2656 lineranges = fctxlineranges.get(fctx.path())
2650 2657 if lineranges is not None:
2651 2658 for hr, lines in hunks:
2652 2659 if hr is None: # binary
2653 2660 yield hr, lines
2654 2661 continue
2655 2662 if any(mdiff.hunkinrange(hr[2:], lr)
2656 2663 for lr in lineranges):
2657 2664 yield hr, lines
2658 2665 else:
2659 2666 for hunk in hunks:
2660 2667 yield hunk
2661 2668
2662 2669 return filterfn
2663 2670
2664 2671 def filematcher(rev):
2665 2672 files = list(linerangesbyrev.get(rev, []))
2666 2673 return scmutil.matchfiles(repo, files)
2667 2674
2668 2675 revs = sorted(linerangesbyrev, reverse=True)
2669 2676
2670 2677 return revs, filematcher, hunksfilter
2671 2678
2672 2679 def _graphnodeformatter(ui, displayer):
2673 2680 spec = ui.config('ui', 'graphnodetemplate')
2674 2681 if not spec:
2675 2682 return templatekw.showgraphnode # fast path for "{graphnode}"
2676 2683
2677 2684 spec = templater.unquotestring(spec)
2678 2685 templ = formatter.maketemplater(ui, spec)
2679 2686 cache = {}
2680 2687 if isinstance(displayer, changeset_templater):
2681 2688 cache = displayer.cache # reuse cache of slow templates
2682 2689 props = templatekw.keywords.copy()
2683 2690 props['templ'] = templ
2684 2691 props['cache'] = cache
2685 2692 def formatnode(repo, ctx):
2686 2693 props['ctx'] = ctx
2687 2694 props['repo'] = repo
2688 2695 props['ui'] = repo.ui
2689 2696 props['revcache'] = {}
2690 2697 return templ.render(props)
2691 2698 return formatnode
2692 2699
2693 2700 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2694 2701 filematcher=None, props=None):
2695 2702 props = props or {}
2696 2703 formatnode = _graphnodeformatter(ui, displayer)
2697 2704 state = graphmod.asciistate()
2698 2705 styles = state['styles']
2699 2706
2700 2707 # only set graph styling if HGPLAIN is not set.
2701 2708 if ui.plain('graph'):
2702 2709 # set all edge styles to |, the default pre-3.8 behaviour
2703 2710 styles.update(dict.fromkeys(styles, '|'))
2704 2711 else:
2705 2712 edgetypes = {
2706 2713 'parent': graphmod.PARENT,
2707 2714 'grandparent': graphmod.GRANDPARENT,
2708 2715 'missing': graphmod.MISSINGPARENT
2709 2716 }
2710 2717 for name, key in edgetypes.items():
2711 2718 # experimental config: experimental.graphstyle.*
2712 2719 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2713 2720 styles[key])
2714 2721 if not styles[key]:
2715 2722 styles[key] = None
2716 2723
2717 2724 # experimental config: experimental.graphshorten
2718 2725 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2719 2726
2720 2727 for rev, type, ctx, parents in dag:
2721 2728 char = formatnode(repo, ctx)
2722 2729 copies = None
2723 2730 if getrenamed and ctx.rev():
2724 2731 copies = []
2725 2732 for fn in ctx.files():
2726 2733 rename = getrenamed(fn, ctx.rev())
2727 2734 if rename:
2728 2735 copies.append((fn, rename[0]))
2729 2736 revmatchfn = None
2730 2737 if filematcher is not None:
2731 2738 revmatchfn = filematcher(ctx.rev())
2732 2739 edges = edgefn(type, char, state, rev, parents)
2733 2740 firstedge = next(edges)
2734 2741 width = firstedge[2]
2735 2742 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2736 2743 _graphwidth=width, **props)
2737 2744 lines = displayer.hunk.pop(rev).split('\n')
2738 2745 if not lines[-1]:
2739 2746 del lines[-1]
2740 2747 displayer.flush(ctx)
2741 2748 for type, char, width, coldata in itertools.chain([firstedge], edges):
2742 2749 graphmod.ascii(ui, state, type, char, lines, coldata)
2743 2750 lines = []
2744 2751 displayer.close()
2745 2752
2746 2753 def graphlog(ui, repo, pats, opts):
2747 2754 # Parameters are identical to log command ones
2748 2755 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2749 2756 revdag = graphmod.dagwalker(repo, revs)
2750 2757
2751 2758 getrenamed = None
2752 2759 if opts.get('copies'):
2753 2760 endrev = None
2754 2761 if opts.get('rev'):
2755 2762 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2756 2763 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2757 2764
2758 2765 ui.pager('log')
2759 2766 displayer = show_changeset(ui, repo, opts, buffered=True)
2760 2767 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2761 2768 filematcher)
2762 2769
2763 2770 def checkunsupportedgraphflags(pats, opts):
2764 2771 for op in ["newest_first"]:
2765 2772 if op in opts and opts[op]:
2766 2773 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2767 2774 % op.replace("_", "-"))
2768 2775
2769 2776 def graphrevs(repo, nodes, opts):
2770 2777 limit = loglimit(opts)
2771 2778 nodes.reverse()
2772 2779 if limit is not None:
2773 2780 nodes = nodes[:limit]
2774 2781 return graphmod.nodes(repo, nodes)
2775 2782
2776 2783 def add(ui, repo, match, prefix, explicitonly, **opts):
2777 2784 join = lambda f: os.path.join(prefix, f)
2778 2785 bad = []
2779 2786
2780 2787 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2781 2788 names = []
2782 2789 wctx = repo[None]
2783 2790 cca = None
2784 2791 abort, warn = scmutil.checkportabilityalert(ui)
2785 2792 if abort or warn:
2786 2793 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2787 2794
2788 2795 badmatch = matchmod.badmatch(match, badfn)
2789 2796 dirstate = repo.dirstate
2790 2797 # We don't want to just call wctx.walk here, since it would return a lot of
2791 2798 # clean files, which we aren't interested in and takes time.
2792 2799 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2793 2800 unknown=True, ignored=False, full=False)):
2794 2801 exact = match.exact(f)
2795 2802 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2796 2803 if cca:
2797 2804 cca(f)
2798 2805 names.append(f)
2799 2806 if ui.verbose or not exact:
2800 2807 ui.status(_('adding %s\n') % match.rel(f))
2801 2808
2802 2809 for subpath in sorted(wctx.substate):
2803 2810 sub = wctx.sub(subpath)
2804 2811 try:
2805 2812 submatch = matchmod.subdirmatcher(subpath, match)
2806 2813 if opts.get(r'subrepos'):
2807 2814 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2808 2815 else:
2809 2816 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2810 2817 except error.LookupError:
2811 2818 ui.status(_("skipping missing subrepository: %s\n")
2812 2819 % join(subpath))
2813 2820
2814 2821 if not opts.get(r'dry_run'):
2815 2822 rejected = wctx.add(names, prefix)
2816 2823 bad.extend(f for f in rejected if f in match.files())
2817 2824 return bad
2818 2825
2819 2826 def addwebdirpath(repo, serverpath, webconf):
2820 2827 webconf[serverpath] = repo.root
2821 2828 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2822 2829
2823 2830 for r in repo.revs('filelog("path:.hgsub")'):
2824 2831 ctx = repo[r]
2825 2832 for subpath in ctx.substate:
2826 2833 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2827 2834
2828 2835 def forget(ui, repo, match, prefix, explicitonly):
2829 2836 join = lambda f: os.path.join(prefix, f)
2830 2837 bad = []
2831 2838 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2832 2839 wctx = repo[None]
2833 2840 forgot = []
2834 2841
2835 2842 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2836 2843 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2837 2844 if explicitonly:
2838 2845 forget = [f for f in forget if match.exact(f)]
2839 2846
2840 2847 for subpath in sorted(wctx.substate):
2841 2848 sub = wctx.sub(subpath)
2842 2849 try:
2843 2850 submatch = matchmod.subdirmatcher(subpath, match)
2844 2851 subbad, subforgot = sub.forget(submatch, prefix)
2845 2852 bad.extend([subpath + '/' + f for f in subbad])
2846 2853 forgot.extend([subpath + '/' + f for f in subforgot])
2847 2854 except error.LookupError:
2848 2855 ui.status(_("skipping missing subrepository: %s\n")
2849 2856 % join(subpath))
2850 2857
2851 2858 if not explicitonly:
2852 2859 for f in match.files():
2853 2860 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2854 2861 if f not in forgot:
2855 2862 if repo.wvfs.exists(f):
2856 2863 # Don't complain if the exact case match wasn't given.
2857 2864 # But don't do this until after checking 'forgot', so
2858 2865 # that subrepo files aren't normalized, and this op is
2859 2866 # purely from data cached by the status walk above.
2860 2867 if repo.dirstate.normalize(f) in repo.dirstate:
2861 2868 continue
2862 2869 ui.warn(_('not removing %s: '
2863 2870 'file is already untracked\n')
2864 2871 % match.rel(f))
2865 2872 bad.append(f)
2866 2873
2867 2874 for f in forget:
2868 2875 if ui.verbose or not match.exact(f):
2869 2876 ui.status(_('removing %s\n') % match.rel(f))
2870 2877
2871 2878 rejected = wctx.forget(forget, prefix)
2872 2879 bad.extend(f for f in rejected if f in match.files())
2873 2880 forgot.extend(f for f in forget if f not in rejected)
2874 2881 return bad, forgot
2875 2882
2876 2883 def files(ui, ctx, m, fm, fmt, subrepos):
2877 2884 rev = ctx.rev()
2878 2885 ret = 1
2879 2886 ds = ctx.repo().dirstate
2880 2887
2881 2888 for f in ctx.matches(m):
2882 2889 if rev is None and ds[f] == 'r':
2883 2890 continue
2884 2891 fm.startitem()
2885 2892 if ui.verbose:
2886 2893 fc = ctx[f]
2887 2894 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2888 2895 fm.data(abspath=f)
2889 2896 fm.write('path', fmt, m.rel(f))
2890 2897 ret = 0
2891 2898
2892 2899 for subpath in sorted(ctx.substate):
2893 2900 submatch = matchmod.subdirmatcher(subpath, m)
2894 2901 if (subrepos or m.exact(subpath) or any(submatch.files())):
2895 2902 sub = ctx.sub(subpath)
2896 2903 try:
2897 2904 recurse = m.exact(subpath) or subrepos
2898 2905 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2899 2906 ret = 0
2900 2907 except error.LookupError:
2901 2908 ui.status(_("skipping missing subrepository: %s\n")
2902 2909 % m.abs(subpath))
2903 2910
2904 2911 return ret
2905 2912
2906 2913 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2907 2914 join = lambda f: os.path.join(prefix, f)
2908 2915 ret = 0
2909 2916 s = repo.status(match=m, clean=True)
2910 2917 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2911 2918
2912 2919 wctx = repo[None]
2913 2920
2914 2921 if warnings is None:
2915 2922 warnings = []
2916 2923 warn = True
2917 2924 else:
2918 2925 warn = False
2919 2926
2920 2927 subs = sorted(wctx.substate)
2921 2928 total = len(subs)
2922 2929 count = 0
2923 2930 for subpath in subs:
2924 2931 count += 1
2925 2932 submatch = matchmod.subdirmatcher(subpath, m)
2926 2933 if subrepos or m.exact(subpath) or any(submatch.files()):
2927 2934 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2928 2935 sub = wctx.sub(subpath)
2929 2936 try:
2930 2937 if sub.removefiles(submatch, prefix, after, force, subrepos,
2931 2938 warnings):
2932 2939 ret = 1
2933 2940 except error.LookupError:
2934 2941 warnings.append(_("skipping missing subrepository: %s\n")
2935 2942 % join(subpath))
2936 2943 ui.progress(_('searching'), None)
2937 2944
2938 2945 # warn about failure to delete explicit files/dirs
2939 2946 deleteddirs = util.dirs(deleted)
2940 2947 files = m.files()
2941 2948 total = len(files)
2942 2949 count = 0
2943 2950 for f in files:
2944 2951 def insubrepo():
2945 2952 for subpath in wctx.substate:
2946 2953 if f.startswith(subpath + '/'):
2947 2954 return True
2948 2955 return False
2949 2956
2950 2957 count += 1
2951 2958 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2952 2959 isdir = f in deleteddirs or wctx.hasdir(f)
2953 2960 if (f in repo.dirstate or isdir or f == '.'
2954 2961 or insubrepo() or f in subs):
2955 2962 continue
2956 2963
2957 2964 if repo.wvfs.exists(f):
2958 2965 if repo.wvfs.isdir(f):
2959 2966 warnings.append(_('not removing %s: no tracked files\n')
2960 2967 % m.rel(f))
2961 2968 else:
2962 2969 warnings.append(_('not removing %s: file is untracked\n')
2963 2970 % m.rel(f))
2964 2971 # missing files will generate a warning elsewhere
2965 2972 ret = 1
2966 2973 ui.progress(_('deleting'), None)
2967 2974
2968 2975 if force:
2969 2976 list = modified + deleted + clean + added
2970 2977 elif after:
2971 2978 list = deleted
2972 2979 remaining = modified + added + clean
2973 2980 total = len(remaining)
2974 2981 count = 0
2975 2982 for f in remaining:
2976 2983 count += 1
2977 2984 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2978 2985 warnings.append(_('not removing %s: file still exists\n')
2979 2986 % m.rel(f))
2980 2987 ret = 1
2981 2988 ui.progress(_('skipping'), None)
2982 2989 else:
2983 2990 list = deleted + clean
2984 2991 total = len(modified) + len(added)
2985 2992 count = 0
2986 2993 for f in modified:
2987 2994 count += 1
2988 2995 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2989 2996 warnings.append(_('not removing %s: file is modified (use -f'
2990 2997 ' to force removal)\n') % m.rel(f))
2991 2998 ret = 1
2992 2999 for f in added:
2993 3000 count += 1
2994 3001 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2995 3002 warnings.append(_("not removing %s: file has been marked for add"
2996 3003 " (use 'hg forget' to undo add)\n") % m.rel(f))
2997 3004 ret = 1
2998 3005 ui.progress(_('skipping'), None)
2999 3006
3000 3007 list = sorted(list)
3001 3008 total = len(list)
3002 3009 count = 0
3003 3010 for f in list:
3004 3011 count += 1
3005 3012 if ui.verbose or not m.exact(f):
3006 3013 ui.progress(_('deleting'), count, total=total, unit=_('files'))
3007 3014 ui.status(_('removing %s\n') % m.rel(f))
3008 3015 ui.progress(_('deleting'), None)
3009 3016
3010 3017 with repo.wlock():
3011 3018 if not after:
3012 3019 for f in list:
3013 3020 if f in added:
3014 3021 continue # we never unlink added files on remove
3015 3022 repo.wvfs.unlinkpath(f, ignoremissing=True)
3016 3023 repo[None].forget(list)
3017 3024
3018 3025 if warn:
3019 3026 for warning in warnings:
3020 3027 ui.warn(warning)
3021 3028
3022 3029 return ret
3023 3030
3024 3031 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
3025 3032 err = 1
3026 3033
3027 3034 def write(path):
3028 3035 filename = None
3029 3036 if fntemplate:
3030 3037 filename = makefilename(repo, fntemplate, ctx.node(),
3031 3038 pathname=os.path.join(prefix, path))
3032 3039 # attempt to create the directory if it does not already exist
3033 3040 try:
3034 3041 os.makedirs(os.path.dirname(filename))
3035 3042 except OSError:
3036 3043 pass
3037 3044 with formatter.maybereopen(basefm, filename, opts) as fm:
3038 3045 data = ctx[path].data()
3039 3046 if opts.get('decode'):
3040 3047 data = repo.wwritedata(path, data)
3041 3048 fm.startitem()
3042 3049 fm.write('data', '%s', data)
3043 3050 fm.data(abspath=path, path=matcher.rel(path))
3044 3051
3045 3052 # Automation often uses hg cat on single files, so special case it
3046 3053 # for performance to avoid the cost of parsing the manifest.
3047 3054 if len(matcher.files()) == 1 and not matcher.anypats():
3048 3055 file = matcher.files()[0]
3049 3056 mfl = repo.manifestlog
3050 3057 mfnode = ctx.manifestnode()
3051 3058 try:
3052 3059 if mfnode and mfl[mfnode].find(file)[0]:
3053 3060 write(file)
3054 3061 return 0
3055 3062 except KeyError:
3056 3063 pass
3057 3064
3058 3065 for abs in ctx.walk(matcher):
3059 3066 write(abs)
3060 3067 err = 0
3061 3068
3062 3069 for subpath in sorted(ctx.substate):
3063 3070 sub = ctx.sub(subpath)
3064 3071 try:
3065 3072 submatch = matchmod.subdirmatcher(subpath, matcher)
3066 3073
3067 3074 if not sub.cat(submatch, basefm, fntemplate,
3068 3075 os.path.join(prefix, sub._path), **opts):
3069 3076 err = 0
3070 3077 except error.RepoLookupError:
3071 3078 ui.status(_("skipping missing subrepository: %s\n")
3072 3079 % os.path.join(prefix, subpath))
3073 3080
3074 3081 return err
3075 3082
3076 3083 def commit(ui, repo, commitfunc, pats, opts):
3077 3084 '''commit the specified files or all outstanding changes'''
3078 3085 date = opts.get('date')
3079 3086 if date:
3080 3087 opts['date'] = util.parsedate(date)
3081 3088 message = logmessage(ui, opts)
3082 3089 matcher = scmutil.match(repo[None], pats, opts)
3083 3090
3084 3091 dsguard = None
3085 3092 # extract addremove carefully -- this function can be called from a command
3086 3093 # that doesn't support addremove
3087 3094 if opts.get('addremove'):
3088 3095 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3089 3096 with dsguard or util.nullcontextmanager():
3090 3097 if dsguard:
3091 3098 if scmutil.addremove(repo, matcher, "", opts) != 0:
3092 3099 raise error.Abort(
3093 3100 _("failed to mark all new/missing files as added/removed"))
3094 3101
3095 3102 return commitfunc(ui, repo, message, matcher, opts)
3096 3103
3097 3104 def samefile(f, ctx1, ctx2):
3098 3105 if f in ctx1.manifest():
3099 3106 a = ctx1.filectx(f)
3100 3107 if f in ctx2.manifest():
3101 3108 b = ctx2.filectx(f)
3102 3109 return (not a.cmp(b)
3103 3110 and a.flags() == b.flags())
3104 3111 else:
3105 3112 return False
3106 3113 else:
3107 3114 return f not in ctx2.manifest()
3108 3115
3109 3116 def amend(ui, repo, old, extra, pats, opts):
3110 3117 # avoid cycle context -> subrepo -> cmdutil
3111 3118 from . import context
3112 3119
3113 3120 # amend will reuse the existing user if not specified, but the obsolete
3114 3121 # marker creation requires that the current user's name is specified.
3115 3122 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3116 3123 ui.username() # raise exception if username not set
3117 3124
3118 3125 ui.note(_('amending changeset %s\n') % old)
3119 3126 base = old.p1()
3120 3127
3121 3128 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3122 3129 # Participating changesets:
3123 3130 #
3124 3131 # wctx o - workingctx that contains changes from working copy
3125 3132 # | to go into amending commit
3126 3133 # |
3127 3134 # old o - changeset to amend
3128 3135 # |
3129 3136 # base o - first parent of the changeset to amend
3130 3137 wctx = repo[None]
3131 3138
3132 3139 # Update extra dict from amended commit (e.g. to preserve graft
3133 3140 # source)
3134 3141 extra.update(old.extra())
3135 3142
3136 3143 # Also update it from the from the wctx
3137 3144 extra.update(wctx.extra())
3138 3145
3139 3146 user = opts.get('user') or old.user()
3140 3147 date = opts.get('date') or old.date()
3141 3148
3142 3149 # Parse the date to allow comparison between date and old.date()
3143 3150 date = util.parsedate(date)
3144 3151
3145 3152 if len(old.parents()) > 1:
3146 3153 # ctx.files() isn't reliable for merges, so fall back to the
3147 3154 # slower repo.status() method
3148 3155 files = set([fn for st in repo.status(base, old)[:3]
3149 3156 for fn in st])
3150 3157 else:
3151 3158 files = set(old.files())
3152 3159
3153 3160 # add/remove the files to the working copy if the "addremove" option
3154 3161 # was specified.
3155 3162 matcher = scmutil.match(wctx, pats, opts)
3156 3163 if (opts.get('addremove')
3157 3164 and scmutil.addremove(repo, matcher, "", opts)):
3158 3165 raise error.Abort(
3159 3166 _("failed to mark all new/missing files as added/removed"))
3160 3167
3161 3168 filestoamend = set(f for f in wctx.files() if matcher(f))
3162 3169
3163 3170 changes = (len(filestoamend) > 0)
3164 3171 if changes:
3165 3172 # Recompute copies (avoid recording a -> b -> a)
3166 3173 copied = copies.pathcopies(base, wctx, matcher)
3167 3174 if old.p2:
3168 3175 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3169 3176
3170 3177 # Prune files which were reverted by the updates: if old
3171 3178 # introduced file X and the file was renamed in the working
3172 3179 # copy, then those two files are the same and
3173 3180 # we can discard X from our list of files. Likewise if X
3174 3181 # was deleted, it's no longer relevant
3175 3182 files.update(filestoamend)
3176 3183 files = [f for f in files if not samefile(f, wctx, base)]
3177 3184
3178 3185 def filectxfn(repo, ctx_, path):
3179 3186 try:
3180 3187 # If the file being considered is not amongst the files
3181 3188 # to be amended, we should return the file context from the
3182 3189 # old changeset. This avoids issues when only some files in
3183 3190 # the working copy are being amended but there are also
3184 3191 # changes to other files from the old changeset.
3185 3192 if path not in filestoamend:
3186 3193 return old.filectx(path)
3187 3194
3188 3195 fctx = wctx[path]
3189 3196
3190 3197 # Return None for removed files.
3191 3198 if not fctx.exists():
3192 3199 return None
3193 3200
3194 3201 flags = fctx.flags()
3195 3202 mctx = context.memfilectx(repo,
3196 3203 fctx.path(), fctx.data(),
3197 3204 islink='l' in flags,
3198 3205 isexec='x' in flags,
3199 3206 copied=copied.get(path))
3200 3207 return mctx
3201 3208 except KeyError:
3202 3209 return None
3203 3210 else:
3204 3211 ui.note(_('copying changeset %s to %s\n') % (old, base))
3205 3212
3206 3213 # Use version of files as in the old cset
3207 3214 def filectxfn(repo, ctx_, path):
3208 3215 try:
3209 3216 return old.filectx(path)
3210 3217 except KeyError:
3211 3218 return None
3212 3219
3213 3220 # See if we got a message from -m or -l, if not, open the editor with
3214 3221 # the message of the changeset to amend.
3215 3222 message = logmessage(ui, opts)
3216 3223
3217 3224 editform = mergeeditform(old, 'commit.amend')
3218 3225 editor = getcommiteditor(editform=editform,
3219 3226 **pycompat.strkwargs(opts))
3220 3227
3221 3228 if not message:
3222 3229 editor = getcommiteditor(edit=True, editform=editform)
3223 3230 message = old.description()
3224 3231
3225 3232 pureextra = extra.copy()
3226 3233 extra['amend_source'] = old.hex()
3227 3234
3228 3235 new = context.memctx(repo,
3229 3236 parents=[base.node(), old.p2().node()],
3230 3237 text=message,
3231 3238 files=files,
3232 3239 filectxfn=filectxfn,
3233 3240 user=user,
3234 3241 date=date,
3235 3242 extra=extra,
3236 3243 editor=editor)
3237 3244
3238 3245 newdesc = changelog.stripdesc(new.description())
3239 3246 if ((not changes)
3240 3247 and newdesc == old.description()
3241 3248 and user == old.user()
3242 3249 and date == old.date()
3243 3250 and pureextra == old.extra()):
3244 3251 # nothing changed. continuing here would create a new node
3245 3252 # anyway because of the amend_source noise.
3246 3253 #
3247 3254 # This not what we expect from amend.
3248 3255 return old.node()
3249 3256
3250 3257 if opts.get('secret'):
3251 3258 commitphase = 'secret'
3252 3259 else:
3253 3260 commitphase = old.phase()
3254 3261 overrides = {('phases', 'new-commit'): commitphase}
3255 3262 with ui.configoverride(overrides, 'amend'):
3256 3263 newid = repo.commitctx(new)
3257 3264
3258 3265 # Reroute the working copy parent to the new changeset
3259 3266 repo.setparents(newid, nullid)
3260 3267 mapping = {old.node(): (newid,)}
3261 3268 obsmetadata = None
3262 3269 if opts.get('note'):
3263 3270 obsmetadata = {'note': opts['note']}
3264 3271 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3265 3272
3266 3273 # Fixing the dirstate because localrepo.commitctx does not update
3267 3274 # it. This is rather convenient because we did not need to update
3268 3275 # the dirstate for all the files in the new commit which commitctx
3269 3276 # could have done if it updated the dirstate. Now, we can
3270 3277 # selectively update the dirstate only for the amended files.
3271 3278 dirstate = repo.dirstate
3272 3279
3273 3280 # Update the state of the files which were added and
3274 3281 # and modified in the amend to "normal" in the dirstate.
3275 3282 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3276 3283 for f in normalfiles:
3277 3284 dirstate.normal(f)
3278 3285
3279 3286 # Update the state of files which were removed in the amend
3280 3287 # to "removed" in the dirstate.
3281 3288 removedfiles = set(wctx.removed()) & filestoamend
3282 3289 for f in removedfiles:
3283 3290 dirstate.drop(f)
3284 3291
3285 3292 return newid
3286 3293
3287 3294 def commiteditor(repo, ctx, subs, editform=''):
3288 3295 if ctx.description():
3289 3296 return ctx.description()
3290 3297 return commitforceeditor(repo, ctx, subs, editform=editform,
3291 3298 unchangedmessagedetection=True)
3292 3299
3293 3300 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3294 3301 editform='', unchangedmessagedetection=False):
3295 3302 if not extramsg:
3296 3303 extramsg = _("Leave message empty to abort commit.")
3297 3304
3298 3305 forms = [e for e in editform.split('.') if e]
3299 3306 forms.insert(0, 'changeset')
3300 3307 templatetext = None
3301 3308 while forms:
3302 3309 ref = '.'.join(forms)
3303 3310 if repo.ui.config('committemplate', ref):
3304 3311 templatetext = committext = buildcommittemplate(
3305 3312 repo, ctx, subs, extramsg, ref)
3306 3313 break
3307 3314 forms.pop()
3308 3315 else:
3309 3316 committext = buildcommittext(repo, ctx, subs, extramsg)
3310 3317
3311 3318 # run editor in the repository root
3312 3319 olddir = pycompat.getcwd()
3313 3320 os.chdir(repo.root)
3314 3321
3315 3322 # make in-memory changes visible to external process
3316 3323 tr = repo.currenttransaction()
3317 3324 repo.dirstate.write(tr)
3318 3325 pending = tr and tr.writepending() and repo.root
3319 3326
3320 3327 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3321 3328 editform=editform, pending=pending,
3322 3329 repopath=repo.path, action='commit')
3323 3330 text = editortext
3324 3331
3325 3332 # strip away anything below this special string (used for editors that want
3326 3333 # to display the diff)
3327 3334 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3328 3335 if stripbelow:
3329 3336 text = text[:stripbelow.start()]
3330 3337
3331 3338 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3332 3339 os.chdir(olddir)
3333 3340
3334 3341 if finishdesc:
3335 3342 text = finishdesc(text)
3336 3343 if not text.strip():
3337 3344 raise error.Abort(_("empty commit message"))
3338 3345 if unchangedmessagedetection and editortext == templatetext:
3339 3346 raise error.Abort(_("commit message unchanged"))
3340 3347
3341 3348 return text
3342 3349
3343 3350 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3344 3351 ui = repo.ui
3345 3352 spec = formatter.templatespec(ref, None, None)
3346 3353 t = changeset_templater(ui, repo, spec, None, {}, False)
3347 3354 t.t.cache.update((k, templater.unquotestring(v))
3348 3355 for k, v in repo.ui.configitems('committemplate'))
3349 3356
3350 3357 if not extramsg:
3351 3358 extramsg = '' # ensure that extramsg is string
3352 3359
3353 3360 ui.pushbuffer()
3354 3361 t.show(ctx, extramsg=extramsg)
3355 3362 return ui.popbuffer()
3356 3363
3357 3364 def hgprefix(msg):
3358 3365 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3359 3366
3360 3367 def buildcommittext(repo, ctx, subs, extramsg):
3361 3368 edittext = []
3362 3369 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3363 3370 if ctx.description():
3364 3371 edittext.append(ctx.description())
3365 3372 edittext.append("")
3366 3373 edittext.append("") # Empty line between message and comments.
3367 3374 edittext.append(hgprefix(_("Enter commit message."
3368 3375 " Lines beginning with 'HG:' are removed.")))
3369 3376 edittext.append(hgprefix(extramsg))
3370 3377 edittext.append("HG: --")
3371 3378 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3372 3379 if ctx.p2():
3373 3380 edittext.append(hgprefix(_("branch merge")))
3374 3381 if ctx.branch():
3375 3382 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3376 3383 if bookmarks.isactivewdirparent(repo):
3377 3384 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3378 3385 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3379 3386 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3380 3387 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3381 3388 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3382 3389 if not added and not modified and not removed:
3383 3390 edittext.append(hgprefix(_("no files changed")))
3384 3391 edittext.append("")
3385 3392
3386 3393 return "\n".join(edittext)
3387 3394
3388 3395 def commitstatus(repo, node, branch, bheads=None, opts=None):
3389 3396 if opts is None:
3390 3397 opts = {}
3391 3398 ctx = repo[node]
3392 3399 parents = ctx.parents()
3393 3400
3394 3401 if (not opts.get('amend') and bheads and node not in bheads and not
3395 3402 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3396 3403 repo.ui.status(_('created new head\n'))
3397 3404 # The message is not printed for initial roots. For the other
3398 3405 # changesets, it is printed in the following situations:
3399 3406 #
3400 3407 # Par column: for the 2 parents with ...
3401 3408 # N: null or no parent
3402 3409 # B: parent is on another named branch
3403 3410 # C: parent is a regular non head changeset
3404 3411 # H: parent was a branch head of the current branch
3405 3412 # Msg column: whether we print "created new head" message
3406 3413 # In the following, it is assumed that there already exists some
3407 3414 # initial branch heads of the current branch, otherwise nothing is
3408 3415 # printed anyway.
3409 3416 #
3410 3417 # Par Msg Comment
3411 3418 # N N y additional topo root
3412 3419 #
3413 3420 # B N y additional branch root
3414 3421 # C N y additional topo head
3415 3422 # H N n usual case
3416 3423 #
3417 3424 # B B y weird additional branch root
3418 3425 # C B y branch merge
3419 3426 # H B n merge with named branch
3420 3427 #
3421 3428 # C C y additional head from merge
3422 3429 # C H n merge with a head
3423 3430 #
3424 3431 # H H n head merge: head count decreases
3425 3432
3426 3433 if not opts.get('close_branch'):
3427 3434 for r in parents:
3428 3435 if r.closesbranch() and r.branch() == branch:
3429 3436 repo.ui.status(_('reopening closed branch head %d\n') % r)
3430 3437
3431 3438 if repo.ui.debugflag:
3432 3439 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3433 3440 elif repo.ui.verbose:
3434 3441 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3435 3442
3436 3443 def postcommitstatus(repo, pats, opts):
3437 3444 return repo.status(match=scmutil.match(repo[None], pats, opts))
3438 3445
3439 3446 def revert(ui, repo, ctx, parents, *pats, **opts):
3440 3447 parent, p2 = parents
3441 3448 node = ctx.node()
3442 3449
3443 3450 mf = ctx.manifest()
3444 3451 if node == p2:
3445 3452 parent = p2
3446 3453
3447 3454 # need all matching names in dirstate and manifest of target rev,
3448 3455 # so have to walk both. do not print errors if files exist in one
3449 3456 # but not other. in both cases, filesets should be evaluated against
3450 3457 # workingctx to get consistent result (issue4497). this means 'set:**'
3451 3458 # cannot be used to select missing files from target rev.
3452 3459
3453 3460 # `names` is a mapping for all elements in working copy and target revision
3454 3461 # The mapping is in the form:
3455 3462 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3456 3463 names = {}
3457 3464
3458 3465 with repo.wlock():
3459 3466 ## filling of the `names` mapping
3460 3467 # walk dirstate to fill `names`
3461 3468
3462 3469 interactive = opts.get('interactive', False)
3463 3470 wctx = repo[None]
3464 3471 m = scmutil.match(wctx, pats, opts)
3465 3472
3466 3473 # we'll need this later
3467 3474 targetsubs = sorted(s for s in wctx.substate if m(s))
3468 3475
3469 3476 if not m.always():
3470 3477 matcher = matchmod.badmatch(m, lambda x, y: False)
3471 3478 for abs in wctx.walk(matcher):
3472 3479 names[abs] = m.rel(abs), m.exact(abs)
3473 3480
3474 3481 # walk target manifest to fill `names`
3475 3482
3476 3483 def badfn(path, msg):
3477 3484 if path in names:
3478 3485 return
3479 3486 if path in ctx.substate:
3480 3487 return
3481 3488 path_ = path + '/'
3482 3489 for f in names:
3483 3490 if f.startswith(path_):
3484 3491 return
3485 3492 ui.warn("%s: %s\n" % (m.rel(path), msg))
3486 3493
3487 3494 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3488 3495 if abs not in names:
3489 3496 names[abs] = m.rel(abs), m.exact(abs)
3490 3497
3491 3498 # Find status of all file in `names`.
3492 3499 m = scmutil.matchfiles(repo, names)
3493 3500
3494 3501 changes = repo.status(node1=node, match=m,
3495 3502 unknown=True, ignored=True, clean=True)
3496 3503 else:
3497 3504 changes = repo.status(node1=node, match=m)
3498 3505 for kind in changes:
3499 3506 for abs in kind:
3500 3507 names[abs] = m.rel(abs), m.exact(abs)
3501 3508
3502 3509 m = scmutil.matchfiles(repo, names)
3503 3510
3504 3511 modified = set(changes.modified)
3505 3512 added = set(changes.added)
3506 3513 removed = set(changes.removed)
3507 3514 _deleted = set(changes.deleted)
3508 3515 unknown = set(changes.unknown)
3509 3516 unknown.update(changes.ignored)
3510 3517 clean = set(changes.clean)
3511 3518 modadded = set()
3512 3519
3513 3520 # We need to account for the state of the file in the dirstate,
3514 3521 # even when we revert against something else than parent. This will
3515 3522 # slightly alter the behavior of revert (doing back up or not, delete
3516 3523 # or just forget etc).
3517 3524 if parent == node:
3518 3525 dsmodified = modified
3519 3526 dsadded = added
3520 3527 dsremoved = removed
3521 3528 # store all local modifications, useful later for rename detection
3522 3529 localchanges = dsmodified | dsadded
3523 3530 modified, added, removed = set(), set(), set()
3524 3531 else:
3525 3532 changes = repo.status(node1=parent, match=m)
3526 3533 dsmodified = set(changes.modified)
3527 3534 dsadded = set(changes.added)
3528 3535 dsremoved = set(changes.removed)
3529 3536 # store all local modifications, useful later for rename detection
3530 3537 localchanges = dsmodified | dsadded
3531 3538
3532 3539 # only take into account for removes between wc and target
3533 3540 clean |= dsremoved - removed
3534 3541 dsremoved &= removed
3535 3542 # distinct between dirstate remove and other
3536 3543 removed -= dsremoved
3537 3544
3538 3545 modadded = added & dsmodified
3539 3546 added -= modadded
3540 3547
3541 3548 # tell newly modified apart.
3542 3549 dsmodified &= modified
3543 3550 dsmodified |= modified & dsadded # dirstate added may need backup
3544 3551 modified -= dsmodified
3545 3552
3546 3553 # We need to wait for some post-processing to update this set
3547 3554 # before making the distinction. The dirstate will be used for
3548 3555 # that purpose.
3549 3556 dsadded = added
3550 3557
3551 3558 # in case of merge, files that are actually added can be reported as
3552 3559 # modified, we need to post process the result
3553 3560 if p2 != nullid:
3554 3561 mergeadd = set(dsmodified)
3555 3562 for path in dsmodified:
3556 3563 if path in mf:
3557 3564 mergeadd.remove(path)
3558 3565 dsadded |= mergeadd
3559 3566 dsmodified -= mergeadd
3560 3567
3561 3568 # if f is a rename, update `names` to also revert the source
3562 3569 cwd = repo.getcwd()
3563 3570 for f in localchanges:
3564 3571 src = repo.dirstate.copied(f)
3565 3572 # XXX should we check for rename down to target node?
3566 3573 if src and src not in names and repo.dirstate[src] == 'r':
3567 3574 dsremoved.add(src)
3568 3575 names[src] = (repo.pathto(src, cwd), True)
3569 3576
3570 3577 # determine the exact nature of the deleted changesets
3571 3578 deladded = set(_deleted)
3572 3579 for path in _deleted:
3573 3580 if path in mf:
3574 3581 deladded.remove(path)
3575 3582 deleted = _deleted - deladded
3576 3583
3577 3584 # distinguish between file to forget and the other
3578 3585 added = set()
3579 3586 for abs in dsadded:
3580 3587 if repo.dirstate[abs] != 'a':
3581 3588 added.add(abs)
3582 3589 dsadded -= added
3583 3590
3584 3591 for abs in deladded:
3585 3592 if repo.dirstate[abs] == 'a':
3586 3593 dsadded.add(abs)
3587 3594 deladded -= dsadded
3588 3595
3589 3596 # For files marked as removed, we check if an unknown file is present at
3590 3597 # the same path. If a such file exists it may need to be backed up.
3591 3598 # Making the distinction at this stage helps have simpler backup
3592 3599 # logic.
3593 3600 removunk = set()
3594 3601 for abs in removed:
3595 3602 target = repo.wjoin(abs)
3596 3603 if os.path.lexists(target):
3597 3604 removunk.add(abs)
3598 3605 removed -= removunk
3599 3606
3600 3607 dsremovunk = set()
3601 3608 for abs in dsremoved:
3602 3609 target = repo.wjoin(abs)
3603 3610 if os.path.lexists(target):
3604 3611 dsremovunk.add(abs)
3605 3612 dsremoved -= dsremovunk
3606 3613
3607 3614 # action to be actually performed by revert
3608 3615 # (<list of file>, message>) tuple
3609 3616 actions = {'revert': ([], _('reverting %s\n')),
3610 3617 'add': ([], _('adding %s\n')),
3611 3618 'remove': ([], _('removing %s\n')),
3612 3619 'drop': ([], _('removing %s\n')),
3613 3620 'forget': ([], _('forgetting %s\n')),
3614 3621 'undelete': ([], _('undeleting %s\n')),
3615 3622 'noop': (None, _('no changes needed to %s\n')),
3616 3623 'unknown': (None, _('file not managed: %s\n')),
3617 3624 }
3618 3625
3619 3626 # "constant" that convey the backup strategy.
3620 3627 # All set to `discard` if `no-backup` is set do avoid checking
3621 3628 # no_backup lower in the code.
3622 3629 # These values are ordered for comparison purposes
3623 3630 backupinteractive = 3 # do backup if interactively modified
3624 3631 backup = 2 # unconditionally do backup
3625 3632 check = 1 # check if the existing file differs from target
3626 3633 discard = 0 # never do backup
3627 3634 if opts.get('no_backup'):
3628 3635 backupinteractive = backup = check = discard
3629 3636 if interactive:
3630 3637 dsmodifiedbackup = backupinteractive
3631 3638 else:
3632 3639 dsmodifiedbackup = backup
3633 3640 tobackup = set()
3634 3641
3635 3642 backupanddel = actions['remove']
3636 3643 if not opts.get('no_backup'):
3637 3644 backupanddel = actions['drop']
3638 3645
3639 3646 disptable = (
3640 3647 # dispatch table:
3641 3648 # file state
3642 3649 # action
3643 3650 # make backup
3644 3651
3645 3652 ## Sets that results that will change file on disk
3646 3653 # Modified compared to target, no local change
3647 3654 (modified, actions['revert'], discard),
3648 3655 # Modified compared to target, but local file is deleted
3649 3656 (deleted, actions['revert'], discard),
3650 3657 # Modified compared to target, local change
3651 3658 (dsmodified, actions['revert'], dsmodifiedbackup),
3652 3659 # Added since target
3653 3660 (added, actions['remove'], discard),
3654 3661 # Added in working directory
3655 3662 (dsadded, actions['forget'], discard),
3656 3663 # Added since target, have local modification
3657 3664 (modadded, backupanddel, backup),
3658 3665 # Added since target but file is missing in working directory
3659 3666 (deladded, actions['drop'], discard),
3660 3667 # Removed since target, before working copy parent
3661 3668 (removed, actions['add'], discard),
3662 3669 # Same as `removed` but an unknown file exists at the same path
3663 3670 (removunk, actions['add'], check),
3664 3671 # Removed since targe, marked as such in working copy parent
3665 3672 (dsremoved, actions['undelete'], discard),
3666 3673 # Same as `dsremoved` but an unknown file exists at the same path
3667 3674 (dsremovunk, actions['undelete'], check),
3668 3675 ## the following sets does not result in any file changes
3669 3676 # File with no modification
3670 3677 (clean, actions['noop'], discard),
3671 3678 # Existing file, not tracked anywhere
3672 3679 (unknown, actions['unknown'], discard),
3673 3680 )
3674 3681
3675 3682 for abs, (rel, exact) in sorted(names.items()):
3676 3683 # target file to be touch on disk (relative to cwd)
3677 3684 target = repo.wjoin(abs)
3678 3685 # search the entry in the dispatch table.
3679 3686 # if the file is in any of these sets, it was touched in the working
3680 3687 # directory parent and we are sure it needs to be reverted.
3681 3688 for table, (xlist, msg), dobackup in disptable:
3682 3689 if abs not in table:
3683 3690 continue
3684 3691 if xlist is not None:
3685 3692 xlist.append(abs)
3686 3693 if dobackup:
3687 3694 # If in interactive mode, don't automatically create
3688 3695 # .orig files (issue4793)
3689 3696 if dobackup == backupinteractive:
3690 3697 tobackup.add(abs)
3691 3698 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3692 3699 bakname = scmutil.origpath(ui, repo, rel)
3693 3700 ui.note(_('saving current version of %s as %s\n') %
3694 3701 (rel, bakname))
3695 3702 if not opts.get('dry_run'):
3696 3703 if interactive:
3697 3704 util.copyfile(target, bakname)
3698 3705 else:
3699 3706 util.rename(target, bakname)
3700 3707 if ui.verbose or not exact:
3701 3708 if not isinstance(msg, basestring):
3702 3709 msg = msg(abs)
3703 3710 ui.status(msg % rel)
3704 3711 elif exact:
3705 3712 ui.warn(msg % rel)
3706 3713 break
3707 3714
3708 3715 if not opts.get('dry_run'):
3709 3716 needdata = ('revert', 'add', 'undelete')
3710 3717 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3711 3718 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3712 3719
3713 3720 if targetsubs:
3714 3721 # Revert the subrepos on the revert list
3715 3722 for sub in targetsubs:
3716 3723 try:
3717 3724 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3718 3725 except KeyError:
3719 3726 raise error.Abort("subrepository '%s' does not exist in %s!"
3720 3727 % (sub, short(ctx.node())))
3721 3728
3722 3729 def _revertprefetch(repo, ctx, *files):
3723 3730 """Let extension changing the storage layer prefetch content"""
3724 3731
3725 3732 def _performrevert(repo, parents, ctx, actions, interactive=False,
3726 3733 tobackup=None):
3727 3734 """function that actually perform all the actions computed for revert
3728 3735
3729 3736 This is an independent function to let extension to plug in and react to
3730 3737 the imminent revert.
3731 3738
3732 3739 Make sure you have the working directory locked when calling this function.
3733 3740 """
3734 3741 parent, p2 = parents
3735 3742 node = ctx.node()
3736 3743 excluded_files = []
3737 3744 matcher_opts = {"exclude": excluded_files}
3738 3745
3739 3746 def checkout(f):
3740 3747 fc = ctx[f]
3741 3748 repo.wwrite(f, fc.data(), fc.flags())
3742 3749
3743 3750 def doremove(f):
3744 3751 try:
3745 3752 repo.wvfs.unlinkpath(f)
3746 3753 except OSError:
3747 3754 pass
3748 3755 repo.dirstate.remove(f)
3749 3756
3750 3757 audit_path = pathutil.pathauditor(repo.root, cached=True)
3751 3758 for f in actions['forget'][0]:
3752 3759 if interactive:
3753 3760 choice = repo.ui.promptchoice(
3754 3761 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3755 3762 if choice == 0:
3756 3763 repo.dirstate.drop(f)
3757 3764 else:
3758 3765 excluded_files.append(repo.wjoin(f))
3759 3766 else:
3760 3767 repo.dirstate.drop(f)
3761 3768 for f in actions['remove'][0]:
3762 3769 audit_path(f)
3763 3770 if interactive:
3764 3771 choice = repo.ui.promptchoice(
3765 3772 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3766 3773 if choice == 0:
3767 3774 doremove(f)
3768 3775 else:
3769 3776 excluded_files.append(repo.wjoin(f))
3770 3777 else:
3771 3778 doremove(f)
3772 3779 for f in actions['drop'][0]:
3773 3780 audit_path(f)
3774 3781 repo.dirstate.remove(f)
3775 3782
3776 3783 normal = None
3777 3784 if node == parent:
3778 3785 # We're reverting to our parent. If possible, we'd like status
3779 3786 # to report the file as clean. We have to use normallookup for
3780 3787 # merges to avoid losing information about merged/dirty files.
3781 3788 if p2 != nullid:
3782 3789 normal = repo.dirstate.normallookup
3783 3790 else:
3784 3791 normal = repo.dirstate.normal
3785 3792
3786 3793 newlyaddedandmodifiedfiles = set()
3787 3794 if interactive:
3788 3795 # Prompt the user for changes to revert
3789 3796 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3790 3797 m = scmutil.match(ctx, torevert, matcher_opts)
3791 3798 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3792 3799 diffopts.nodates = True
3793 3800 diffopts.git = True
3794 3801 operation = 'discard'
3795 3802 reversehunks = True
3796 3803 if node != parent:
3797 3804 operation = 'apply'
3798 3805 reversehunks = False
3799 3806 if reversehunks:
3800 3807 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3801 3808 else:
3802 3809 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3803 3810 originalchunks = patch.parsepatch(diff)
3804 3811
3805 3812 try:
3806 3813
3807 3814 chunks, opts = recordfilter(repo.ui, originalchunks,
3808 3815 operation=operation)
3809 3816 if reversehunks:
3810 3817 chunks = patch.reversehunks(chunks)
3811 3818
3812 3819 except error.PatchError as err:
3813 3820 raise error.Abort(_('error parsing patch: %s') % err)
3814 3821
3815 3822 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3816 3823 if tobackup is None:
3817 3824 tobackup = set()
3818 3825 # Apply changes
3819 3826 fp = stringio()
3820 3827 for c in chunks:
3821 3828 # Create a backup file only if this hunk should be backed up
3822 3829 if ishunk(c) and c.header.filename() in tobackup:
3823 3830 abs = c.header.filename()
3824 3831 target = repo.wjoin(abs)
3825 3832 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3826 3833 util.copyfile(target, bakname)
3827 3834 tobackup.remove(abs)
3828 3835 c.write(fp)
3829 3836 dopatch = fp.tell()
3830 3837 fp.seek(0)
3831 3838 if dopatch:
3832 3839 try:
3833 3840 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3834 3841 except error.PatchError as err:
3835 3842 raise error.Abort(str(err))
3836 3843 del fp
3837 3844 else:
3838 3845 for f in actions['revert'][0]:
3839 3846 checkout(f)
3840 3847 if normal:
3841 3848 normal(f)
3842 3849
3843 3850 for f in actions['add'][0]:
3844 3851 # Don't checkout modified files, they are already created by the diff
3845 3852 if f not in newlyaddedandmodifiedfiles:
3846 3853 checkout(f)
3847 3854 repo.dirstate.add(f)
3848 3855
3849 3856 normal = repo.dirstate.normallookup
3850 3857 if node == parent and p2 == nullid:
3851 3858 normal = repo.dirstate.normal
3852 3859 for f in actions['undelete'][0]:
3853 3860 checkout(f)
3854 3861 normal(f)
3855 3862
3856 3863 copied = copies.pathcopies(repo[parent], ctx)
3857 3864
3858 3865 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3859 3866 if f in copied:
3860 3867 repo.dirstate.copy(copied[f], f)
3861 3868
3862 3869 class command(registrar.command):
3870 """deprecated: used registrar.command instead"""
3863 3871 def _doregister(self, func, name, *args, **kwargs):
3864 3872 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3865 3873 return super(command, self)._doregister(func, name, *args, **kwargs)
3866 3874
3867 3875 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3868 3876 # commands.outgoing. "missing" is "missing" of the result of
3869 3877 # "findcommonoutgoing()"
3870 3878 outgoinghooks = util.hooks()
3871 3879
3872 3880 # a list of (ui, repo) functions called by commands.summary
3873 3881 summaryhooks = util.hooks()
3874 3882
3875 3883 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3876 3884 #
3877 3885 # functions should return tuple of booleans below, if 'changes' is None:
3878 3886 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3879 3887 #
3880 3888 # otherwise, 'changes' is a tuple of tuples below:
3881 3889 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3882 3890 # - (desturl, destbranch, destpeer, outgoing)
3883 3891 summaryremotehooks = util.hooks()
3884 3892
3885 3893 # A list of state files kept by multistep operations like graft.
3886 3894 # Since graft cannot be aborted, it is considered 'clearable' by update.
3887 3895 # note: bisect is intentionally excluded
3888 3896 # (state file, clearable, allowcommit, error, hint)
3889 3897 unfinishedstates = [
3890 3898 ('graftstate', True, False, _('graft in progress'),
3891 3899 _("use 'hg graft --continue' or 'hg update' to abort")),
3892 3900 ('updatestate', True, False, _('last update was interrupted'),
3893 3901 _("use 'hg update' to get a consistent checkout"))
3894 3902 ]
3895 3903
3896 3904 def checkunfinished(repo, commit=False):
3897 3905 '''Look for an unfinished multistep operation, like graft, and abort
3898 3906 if found. It's probably good to check this right before
3899 3907 bailifchanged().
3900 3908 '''
3901 3909 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3902 3910 if commit and allowcommit:
3903 3911 continue
3904 3912 if repo.vfs.exists(f):
3905 3913 raise error.Abort(msg, hint=hint)
3906 3914
3907 3915 def clearunfinished(repo):
3908 3916 '''Check for unfinished operations (as above), and clear the ones
3909 3917 that are clearable.
3910 3918 '''
3911 3919 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3912 3920 if not clearable and repo.vfs.exists(f):
3913 3921 raise error.Abort(msg, hint=hint)
3914 3922 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3915 3923 if clearable and repo.vfs.exists(f):
3916 3924 util.unlink(repo.vfs.join(f))
3917 3925
3918 3926 afterresolvedstates = [
3919 3927 ('graftstate',
3920 3928 _('hg graft --continue')),
3921 3929 ]
3922 3930
3923 3931 def howtocontinue(repo):
3924 3932 '''Check for an unfinished operation and return the command to finish
3925 3933 it.
3926 3934
3927 3935 afterresolvedstates tuples define a .hg/{file} and the corresponding
3928 3936 command needed to finish it.
3929 3937
3930 3938 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3931 3939 a boolean.
3932 3940 '''
3933 3941 contmsg = _("continue: %s")
3934 3942 for f, msg in afterresolvedstates:
3935 3943 if repo.vfs.exists(f):
3936 3944 return contmsg % msg, True
3937 3945 if repo[None].dirty(missing=True, merge=False, branch=False):
3938 3946 return contmsg % _("hg commit"), False
3939 3947 return None, None
3940 3948
3941 3949 def checkafterresolved(repo):
3942 3950 '''Inform the user about the next action after completing hg resolve
3943 3951
3944 3952 If there's a matching afterresolvedstates, howtocontinue will yield
3945 3953 repo.ui.warn as the reporter.
3946 3954
3947 3955 Otherwise, it will yield repo.ui.note.
3948 3956 '''
3949 3957 msg, warning = howtocontinue(repo)
3950 3958 if msg is not None:
3951 3959 if warning:
3952 3960 repo.ui.warn("%s\n" % msg)
3953 3961 else:
3954 3962 repo.ui.note("%s\n" % msg)
3955 3963
3956 3964 def wrongtooltocontinue(repo, task):
3957 3965 '''Raise an abort suggesting how to properly continue if there is an
3958 3966 active task.
3959 3967
3960 3968 Uses howtocontinue() to find the active task.
3961 3969
3962 3970 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3963 3971 a hint.
3964 3972 '''
3965 3973 after = howtocontinue(repo)
3966 3974 hint = None
3967 3975 if after[1]:
3968 3976 hint = after[0]
3969 3977 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2608 +1,2619
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 revlog,
45 45 scmutil,
46 46 sparse,
47 47 subrepo,
48 48 util,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 nonascii = re.compile(r'[^\x21-\x7f]').search
54 54
55 55 class basectx(object):
56 56 """A basectx object represents the common logic for its children:
57 57 changectx: read-only context that is already present in the repo,
58 58 workingctx: a context that represents the working directory and can
59 59 be committed,
60 60 memctx: a context that represents changes in-memory and can also
61 61 be committed."""
62 62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 63 if isinstance(changeid, basectx):
64 64 return changeid
65 65
66 66 o = super(basectx, cls).__new__(cls)
67 67
68 68 o._repo = repo
69 69 o._rev = nullrev
70 70 o._node = nullid
71 71
72 72 return o
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 __str__ = encoding.strmethod(__bytes__)
78 78
79 79 def __int__(self):
80 80 return self.rev()
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 msg = ("'context.unstable' is deprecated, "
210 210 "use 'context.orphan'")
211 211 self._repo.ui.deprecwarn(msg, '4.4')
212 212 return self.orphan()
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete but it's ancestor are"""
216 216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217 217
218 218 def bumped(self):
219 219 msg = ("'context.bumped' is deprecated, "
220 220 "use 'context.phasedivergent'")
221 221 self._repo.ui.deprecwarn(msg, '4.4')
222 222 return self.phasedivergent()
223 223
224 224 def phasedivergent(self):
225 225 """True if the changeset try to be a successor of a public changeset
226 226
227 227 Only non-public and non-obsolete changesets may be bumped.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230 230
231 231 def divergent(self):
232 232 msg = ("'context.divergent' is deprecated, "
233 233 "use 'context.contentdivergent'")
234 234 self._repo.ui.deprecwarn(msg, '4.4')
235 235 return self.contentdivergent()
236 236
237 237 def contentdivergent(self):
238 238 """Is a successors of a changeset with multiple possible successors set
239 239
240 240 Only non-public and non-obsolete changesets may be divergent.
241 241 """
242 242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243 243
244 244 def troubled(self):
245 245 msg = ("'context.troubled' is deprecated, "
246 246 "use 'context.isunstable'")
247 247 self._repo.ui.deprecwarn(msg, '4.4')
248 248 return self.isunstable()
249 249
250 250 def isunstable(self):
251 251 """True if the changeset is either unstable, bumped or divergent"""
252 252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253 253
254 254 def troubles(self):
255 255 """Keep the old version around in order to avoid breaking extensions
256 256 about different return values.
257 257 """
258 258 msg = ("'context.troubles' is deprecated, "
259 259 "use 'context.instabilities'")
260 260 self._repo.ui.deprecwarn(msg, '4.4')
261 261
262 262 troubles = []
263 263 if self.orphan():
264 264 troubles.append('orphan')
265 265 if self.phasedivergent():
266 266 troubles.append('bumped')
267 267 if self.contentdivergent():
268 268 troubles.append('divergent')
269 269 return troubles
270 270
271 271 def instabilities(self):
272 272 """return the list of instabilities affecting this changeset.
273 273
274 274 Instabilities are returned as strings. possible values are:
275 275 - orphan,
276 276 - phase-divergent,
277 277 - content-divergent.
278 278 """
279 279 instabilities = []
280 280 if self.orphan():
281 281 instabilities.append('orphan')
282 282 if self.phasedivergent():
283 283 instabilities.append('phase-divergent')
284 284 if self.contentdivergent():
285 285 instabilities.append('content-divergent')
286 286 return instabilities
287 287
288 288 def parents(self):
289 289 """return contexts for each parent changeset"""
290 290 return self._parents
291 291
292 292 def p1(self):
293 293 return self._parents[0]
294 294
295 295 def p2(self):
296 296 parents = self._parents
297 297 if len(parents) == 2:
298 298 return parents[1]
299 299 return changectx(self._repo, nullrev)
300 300
301 301 def _fileinfo(self, path):
302 302 if r'_manifest' in self.__dict__:
303 303 try:
304 304 return self._manifest[path], self._manifest.flags(path)
305 305 except KeyError:
306 306 raise error.ManifestLookupError(self._node, path,
307 307 _('not found in manifest'))
308 308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 309 if path in self._manifestdelta:
310 310 return (self._manifestdelta[path],
311 311 self._manifestdelta.flags(path))
312 312 mfl = self._repo.manifestlog
313 313 try:
314 314 node, flag = mfl[self._changeset.manifest].find(path)
315 315 except KeyError:
316 316 raise error.ManifestLookupError(self._node, path,
317 317 _('not found in manifest'))
318 318
319 319 return node, flag
320 320
321 321 def filenode(self, path):
322 322 return self._fileinfo(path)[0]
323 323
324 324 def flags(self, path):
325 325 try:
326 326 return self._fileinfo(path)[1]
327 327 except error.LookupError:
328 328 return ''
329 329
330 330 def sub(self, path, allowcreate=True):
331 331 '''return a subrepo for the stored revision of path, never wdir()'''
332 332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333 333
334 334 def nullsub(self, path, pctx):
335 335 return subrepo.nullsubrepo(self, path, pctx)
336 336
337 337 def workingsub(self, path):
338 338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 339 context.
340 340 '''
341 341 return subrepo.subrepo(self, path, allowwdir=True)
342 342
343 343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 344 listsubrepos=False, badfn=None):
345 345 r = self._repo
346 346 return matchmod.match(r.root, r.getcwd(), pats,
347 347 include, exclude, default,
348 348 auditor=r.nofsauditor, ctx=self,
349 349 listsubrepos=listsubrepos, badfn=badfn)
350 350
351 351 def diff(self, ctx2=None, match=None, **opts):
352 352 """Returns a diff generator for the given contexts and matcher"""
353 353 if ctx2 is None:
354 354 ctx2 = self.p1()
355 355 if ctx2 is not None:
356 356 ctx2 = self._repo[ctx2]
357 357 diffopts = patch.diffopts(self._repo.ui, opts)
358 358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359 359
360 360 def dirs(self):
361 361 return self._manifest.dirs()
362 362
363 363 def hasdir(self, dir):
364 364 return self._manifest.hasdir(dir)
365 365
366 366 def status(self, other=None, match=None, listignored=False,
367 367 listclean=False, listunknown=False, listsubrepos=False):
368 368 """return status of files between two nodes or node and working
369 369 directory.
370 370
371 371 If other is None, compare this node with working directory.
372 372
373 373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 374 """
375 375
376 376 ctx1 = self
377 377 ctx2 = self._repo[other]
378 378
379 379 # This next code block is, admittedly, fragile logic that tests for
380 380 # reversing the contexts and wouldn't need to exist if it weren't for
381 381 # the fast (and common) code path of comparing the working directory
382 382 # with its first parent.
383 383 #
384 384 # What we're aiming for here is the ability to call:
385 385 #
386 386 # workingctx.status(parentctx)
387 387 #
388 388 # If we always built the manifest for each context and compared those,
389 389 # then we'd be done. But the special case of the above call means we
390 390 # just copy the manifest of the parent.
391 391 reversed = False
392 392 if (not isinstance(ctx1, changectx)
393 393 and isinstance(ctx2, changectx)):
394 394 reversed = True
395 395 ctx1, ctx2 = ctx2, ctx1
396 396
397 397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 398 match = ctx2._matchstatus(ctx1, match)
399 399 r = scmutil.status([], [], [], [], [], [], [])
400 400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 401 listunknown)
402 402
403 403 if reversed:
404 404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 405 # these make no sense to reverse.
406 406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 407 r.clean)
408 408
409 409 if listsubrepos:
410 410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 411 try:
412 412 rev2 = ctx2.subrev(subpath)
413 413 except KeyError:
414 414 # A subrepo that existed in node1 was deleted between
415 415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 416 # won't contain that subpath. The best we can do ignore it.
417 417 rev2 = None
418 418 submatch = matchmod.subdirmatcher(subpath, match)
419 419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 420 clean=listclean, unknown=listunknown,
421 421 listsubrepos=True)
422 422 for rfiles, sfiles in zip(r, s):
423 423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424 424
425 425 for l in r:
426 426 l.sort()
427 427
428 428 return r
429 429
430 430 def _filterederror(repo, changeid):
431 431 """build an exception to be raised about a filtered changeid
432 432
433 433 This is extracted in a function to help extensions (eg: evolve) to
434 434 experiment with various message variants."""
435 435 if repo.filtername.startswith('visible'):
436 436 msg = _("hidden revision '%s'") % changeid
437 437 hint = _('use --hidden to access hidden revisions')
438 438 return error.FilteredRepoLookupError(msg, hint=hint)
439 439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 440 msg %= (changeid, repo.filtername)
441 441 return error.FilteredRepoLookupError(msg)
442 442
443 443 class changectx(basectx):
444 444 """A changecontext object makes access to data related to a particular
445 445 changeset convenient. It represents a read-only context already present in
446 446 the repo."""
447 447 def __init__(self, repo, changeid=''):
448 448 """changeid is a revision number, node, or tag"""
449 449
450 450 # since basectx.__new__ already took care of copying the object, we
451 451 # don't need to do anything in __init__, so we just exit here
452 452 if isinstance(changeid, basectx):
453 453 return
454 454
455 455 if changeid == '':
456 456 changeid = '.'
457 457 self._repo = repo
458 458
459 459 try:
460 460 if isinstance(changeid, int):
461 461 self._node = repo.changelog.node(changeid)
462 462 self._rev = changeid
463 463 return
464 464 if not pycompat.ispy3 and isinstance(changeid, long):
465 465 changeid = str(changeid)
466 466 if changeid == 'null':
467 467 self._node = nullid
468 468 self._rev = nullrev
469 469 return
470 470 if changeid == 'tip':
471 471 self._node = repo.changelog.tip()
472 472 self._rev = repo.changelog.rev(self._node)
473 473 return
474 474 if (changeid == '.'
475 475 or repo.local() and changeid == repo.dirstate.p1()):
476 476 # this is a hack to delay/avoid loading obsmarkers
477 477 # when we know that '.' won't be hidden
478 478 self._node = repo.dirstate.p1()
479 479 self._rev = repo.unfiltered().changelog.rev(self._node)
480 480 return
481 481 if len(changeid) == 20:
482 482 try:
483 483 self._node = changeid
484 484 self._rev = repo.changelog.rev(changeid)
485 485 return
486 486 except error.FilteredRepoLookupError:
487 487 raise
488 488 except LookupError:
489 489 pass
490 490
491 491 try:
492 492 r = int(changeid)
493 493 if '%d' % r != changeid:
494 494 raise ValueError
495 495 l = len(repo.changelog)
496 496 if r < 0:
497 497 r += l
498 498 if r < 0 or r >= l and r != wdirrev:
499 499 raise ValueError
500 500 self._rev = r
501 501 self._node = repo.changelog.node(r)
502 502 return
503 503 except error.FilteredIndexError:
504 504 raise
505 505 except (ValueError, OverflowError, IndexError):
506 506 pass
507 507
508 508 if len(changeid) == 40:
509 509 try:
510 510 self._node = bin(changeid)
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513 except error.FilteredLookupError:
514 514 raise
515 515 except (TypeError, LookupError):
516 516 pass
517 517
518 518 # lookup bookmarks through the name interface
519 519 try:
520 520 self._node = repo.names.singlenode(repo, changeid)
521 521 self._rev = repo.changelog.rev(self._node)
522 522 return
523 523 except KeyError:
524 524 pass
525 525 except error.FilteredRepoLookupError:
526 526 raise
527 527 except error.RepoLookupError:
528 528 pass
529 529
530 530 self._node = repo.unfiltered().changelog._partialmatch(changeid)
531 531 if self._node is not None:
532 532 self._rev = repo.changelog.rev(self._node)
533 533 return
534 534
535 535 # lookup failed
536 536 # check if it might have come from damaged dirstate
537 537 #
538 538 # XXX we could avoid the unfiltered if we had a recognizable
539 539 # exception for filtered changeset access
540 540 if (repo.local()
541 541 and changeid in repo.unfiltered().dirstate.parents()):
542 542 msg = _("working directory has unknown parent '%s'!")
543 543 raise error.Abort(msg % short(changeid))
544 544 try:
545 545 if len(changeid) == 20 and nonascii(changeid):
546 546 changeid = hex(changeid)
547 547 except TypeError:
548 548 pass
549 549 except (error.FilteredIndexError, error.FilteredLookupError,
550 550 error.FilteredRepoLookupError):
551 551 raise _filterederror(repo, changeid)
552 552 except IndexError:
553 553 pass
554 554 raise error.RepoLookupError(
555 555 _("unknown revision '%s'") % changeid)
556 556
557 557 def __hash__(self):
558 558 try:
559 559 return hash(self._rev)
560 560 except AttributeError:
561 561 return id(self)
562 562
563 563 def __nonzero__(self):
564 564 return self._rev != nullrev
565 565
566 566 __bool__ = __nonzero__
567 567
568 568 @propertycache
569 569 def _changeset(self):
570 570 return self._repo.changelog.changelogrevision(self.rev())
571 571
572 572 @propertycache
573 573 def _manifest(self):
574 574 return self._manifestctx.read()
575 575
576 576 @property
577 577 def _manifestctx(self):
578 578 return self._repo.manifestlog[self._changeset.manifest]
579 579
580 580 @propertycache
581 581 def _manifestdelta(self):
582 582 return self._manifestctx.readdelta()
583 583
584 584 @propertycache
585 585 def _parents(self):
586 586 repo = self._repo
587 587 p1, p2 = repo.changelog.parentrevs(self._rev)
588 588 if p2 == nullrev:
589 589 return [changectx(repo, p1)]
590 590 return [changectx(repo, p1), changectx(repo, p2)]
591 591
592 592 def changeset(self):
593 593 c = self._changeset
594 594 return (
595 595 c.manifest,
596 596 c.user,
597 597 c.date,
598 598 c.files,
599 599 c.description,
600 600 c.extra,
601 601 )
602 602 def manifestnode(self):
603 603 return self._changeset.manifest
604 604
605 605 def user(self):
606 606 return self._changeset.user
607 607 def date(self):
608 608 return self._changeset.date
609 609 def files(self):
610 610 return self._changeset.files
611 611 def description(self):
612 612 return self._changeset.description
613 613 def branch(self):
614 614 return encoding.tolocal(self._changeset.extra.get("branch"))
615 615 def closesbranch(self):
616 616 return 'close' in self._changeset.extra
617 617 def extra(self):
618 """Return a dict of extra information."""
618 619 return self._changeset.extra
619 620 def tags(self):
621 """Return a list of byte tag names"""
620 622 return self._repo.nodetags(self._node)
621 623 def bookmarks(self):
624 """Return a list of byte bookmark names."""
622 625 return self._repo.nodebookmarks(self._node)
623 626 def phase(self):
624 627 return self._repo._phasecache.phase(self._repo, self._rev)
625 628 def hidden(self):
626 629 return self._rev in repoview.filterrevs(self._repo, 'visible')
627 630
628 631 def isinmemory(self):
629 632 return False
630 633
631 634 def children(self):
632 """return contexts for each child changeset"""
635 """return list of changectx contexts for each child changeset.
636
637 This returns only the immediate child changesets. Use descendants() to
638 recursively walk children.
639 """
633 640 c = self._repo.changelog.children(self._node)
634 641 return [changectx(self._repo, x) for x in c]
635 642
636 643 def ancestors(self):
637 644 for a in self._repo.changelog.ancestors([self._rev]):
638 645 yield changectx(self._repo, a)
639 646
640 647 def descendants(self):
648 """Recursively yield all children of the changeset.
649
650 For just the immediate children, use children()
651 """
641 652 for d in self._repo.changelog.descendants([self._rev]):
642 653 yield changectx(self._repo, d)
643 654
644 655 def filectx(self, path, fileid=None, filelog=None):
645 656 """get a file context from this changeset"""
646 657 if fileid is None:
647 658 fileid = self.filenode(path)
648 659 return filectx(self._repo, path, fileid=fileid,
649 660 changectx=self, filelog=filelog)
650 661
651 662 def ancestor(self, c2, warn=False):
652 663 """return the "best" ancestor context of self and c2
653 664
654 665 If there are multiple candidates, it will show a message and check
655 666 merge.preferancestor configuration before falling back to the
656 667 revlog ancestor."""
657 668 # deal with workingctxs
658 669 n2 = c2._node
659 670 if n2 is None:
660 671 n2 = c2._parents[0]._node
661 672 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
662 673 if not cahs:
663 674 anc = nullid
664 675 elif len(cahs) == 1:
665 676 anc = cahs[0]
666 677 else:
667 678 # experimental config: merge.preferancestor
668 679 for r in self._repo.ui.configlist('merge', 'preferancestor'):
669 680 try:
670 681 ctx = changectx(self._repo, r)
671 682 except error.RepoLookupError:
672 683 continue
673 684 anc = ctx.node()
674 685 if anc in cahs:
675 686 break
676 687 else:
677 688 anc = self._repo.changelog.ancestor(self._node, n2)
678 689 if warn:
679 690 self._repo.ui.status(
680 691 (_("note: using %s as ancestor of %s and %s\n") %
681 692 (short(anc), short(self._node), short(n2))) +
682 693 ''.join(_(" alternatively, use --config "
683 694 "merge.preferancestor=%s\n") %
684 695 short(n) for n in sorted(cahs) if n != anc))
685 696 return changectx(self._repo, anc)
686 697
687 698 def descendant(self, other):
688 699 """True if other is descendant of this changeset"""
689 700 return self._repo.changelog.descendant(self._rev, other._rev)
690 701
691 702 def walk(self, match):
692 703 '''Generates matching file names.'''
693 704
694 705 # Wrap match.bad method to have message with nodeid
695 706 def bad(fn, msg):
696 707 # The manifest doesn't know about subrepos, so don't complain about
697 708 # paths into valid subrepos.
698 709 if any(fn == s or fn.startswith(s + '/')
699 710 for s in self.substate):
700 711 return
701 712 match.bad(fn, _('no such file in rev %s') % self)
702 713
703 714 m = matchmod.badmatch(match, bad)
704 715 return self._manifest.walk(m)
705 716
706 717 def matches(self, match):
707 718 return self.walk(match)
708 719
709 720 class basefilectx(object):
710 721 """A filecontext object represents the common logic for its children:
711 722 filectx: read-only access to a filerevision that is already present
712 723 in the repo,
713 724 workingfilectx: a filecontext that represents files from the working
714 725 directory,
715 726 memfilectx: a filecontext that represents files in-memory,
716 727 overlayfilectx: duplicate another filecontext with some fields overridden.
717 728 """
718 729 @propertycache
719 730 def _filelog(self):
720 731 return self._repo.file(self._path)
721 732
722 733 @propertycache
723 734 def _changeid(self):
724 735 if r'_changeid' in self.__dict__:
725 736 return self._changeid
726 737 elif r'_changectx' in self.__dict__:
727 738 return self._changectx.rev()
728 739 elif r'_descendantrev' in self.__dict__:
729 740 # this file context was created from a revision with a known
730 741 # descendant, we can (lazily) correct for linkrev aliases
731 742 return self._adjustlinkrev(self._descendantrev)
732 743 else:
733 744 return self._filelog.linkrev(self._filerev)
734 745
735 746 @propertycache
736 747 def _filenode(self):
737 748 if r'_fileid' in self.__dict__:
738 749 return self._filelog.lookup(self._fileid)
739 750 else:
740 751 return self._changectx.filenode(self._path)
741 752
742 753 @propertycache
743 754 def _filerev(self):
744 755 return self._filelog.rev(self._filenode)
745 756
746 757 @propertycache
747 758 def _repopath(self):
748 759 return self._path
749 760
750 761 def __nonzero__(self):
751 762 try:
752 763 self._filenode
753 764 return True
754 765 except error.LookupError:
755 766 # file is missing
756 767 return False
757 768
758 769 __bool__ = __nonzero__
759 770
760 771 def __bytes__(self):
761 772 try:
762 773 return "%s@%s" % (self.path(), self._changectx)
763 774 except error.LookupError:
764 775 return "%s@???" % self.path()
765 776
766 777 __str__ = encoding.strmethod(__bytes__)
767 778
768 779 def __repr__(self):
769 780 return "<%s %s>" % (type(self).__name__, str(self))
770 781
771 782 def __hash__(self):
772 783 try:
773 784 return hash((self._path, self._filenode))
774 785 except AttributeError:
775 786 return id(self)
776 787
777 788 def __eq__(self, other):
778 789 try:
779 790 return (type(self) == type(other) and self._path == other._path
780 791 and self._filenode == other._filenode)
781 792 except AttributeError:
782 793 return False
783 794
784 795 def __ne__(self, other):
785 796 return not (self == other)
786 797
787 798 def filerev(self):
788 799 return self._filerev
789 800 def filenode(self):
790 801 return self._filenode
791 802 @propertycache
792 803 def _flags(self):
793 804 return self._changectx.flags(self._path)
794 805 def flags(self):
795 806 return self._flags
796 807 def filelog(self):
797 808 return self._filelog
798 809 def rev(self):
799 810 return self._changeid
800 811 def linkrev(self):
801 812 return self._filelog.linkrev(self._filerev)
802 813 def node(self):
803 814 return self._changectx.node()
804 815 def hex(self):
805 816 return self._changectx.hex()
806 817 def user(self):
807 818 return self._changectx.user()
808 819 def date(self):
809 820 return self._changectx.date()
810 821 def files(self):
811 822 return self._changectx.files()
812 823 def description(self):
813 824 return self._changectx.description()
814 825 def branch(self):
815 826 return self._changectx.branch()
816 827 def extra(self):
817 828 return self._changectx.extra()
818 829 def phase(self):
819 830 return self._changectx.phase()
820 831 def phasestr(self):
821 832 return self._changectx.phasestr()
822 833 def obsolete(self):
823 834 return self._changectx.obsolete()
824 835 def instabilities(self):
825 836 return self._changectx.instabilities()
826 837 def manifest(self):
827 838 return self._changectx.manifest()
828 839 def changectx(self):
829 840 return self._changectx
830 841 def renamed(self):
831 842 return self._copied
832 843 def repo(self):
833 844 return self._repo
834 845 def size(self):
835 846 return len(self.data())
836 847
837 848 def path(self):
838 849 return self._path
839 850
840 851 def isbinary(self):
841 852 try:
842 853 return util.binary(self.data())
843 854 except IOError:
844 855 return False
845 856 def isexec(self):
846 857 return 'x' in self.flags()
847 858 def islink(self):
848 859 return 'l' in self.flags()
849 860
850 861 def isabsent(self):
851 862 """whether this filectx represents a file not in self._changectx
852 863
853 864 This is mainly for merge code to detect change/delete conflicts. This is
854 865 expected to be True for all subclasses of basectx."""
855 866 return False
856 867
857 868 _customcmp = False
858 869 def cmp(self, fctx):
859 870 """compare with other file context
860 871
861 872 returns True if different than fctx.
862 873 """
863 874 if fctx._customcmp:
864 875 return fctx.cmp(self)
865 876
866 877 if (fctx._filenode is None
867 878 and (self._repo._encodefilterpats
868 879 # if file data starts with '\1\n', empty metadata block is
869 880 # prepended, which adds 4 bytes to filelog.size().
870 881 or self.size() - 4 == fctx.size())
871 882 or self.size() == fctx.size()):
872 883 return self._filelog.cmp(self._filenode, fctx.data())
873 884
874 885 return True
875 886
876 887 def _adjustlinkrev(self, srcrev, inclusive=False):
877 888 """return the first ancestor of <srcrev> introducing <fnode>
878 889
879 890 If the linkrev of the file revision does not point to an ancestor of
880 891 srcrev, we'll walk down the ancestors until we find one introducing
881 892 this file revision.
882 893
883 894 :srcrev: the changeset revision we search ancestors from
884 895 :inclusive: if true, the src revision will also be checked
885 896 """
886 897 repo = self._repo
887 898 cl = repo.unfiltered().changelog
888 899 mfl = repo.manifestlog
889 900 # fetch the linkrev
890 901 lkr = self.linkrev()
891 902 # hack to reuse ancestor computation when searching for renames
892 903 memberanc = getattr(self, '_ancestrycontext', None)
893 904 iteranc = None
894 905 if srcrev is None:
895 906 # wctx case, used by workingfilectx during mergecopy
896 907 revs = [p.rev() for p in self._repo[None].parents()]
897 908 inclusive = True # we skipped the real (revless) source
898 909 else:
899 910 revs = [srcrev]
900 911 if memberanc is None:
901 912 memberanc = iteranc = cl.ancestors(revs, lkr,
902 913 inclusive=inclusive)
903 914 # check if this linkrev is an ancestor of srcrev
904 915 if lkr not in memberanc:
905 916 if iteranc is None:
906 917 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
907 918 fnode = self._filenode
908 919 path = self._path
909 920 for a in iteranc:
910 921 ac = cl.read(a) # get changeset data (we avoid object creation)
911 922 if path in ac[3]: # checking the 'files' field.
912 923 # The file has been touched, check if the content is
913 924 # similar to the one we search for.
914 925 if fnode == mfl[ac[0]].readfast().get(path):
915 926 return a
916 927 # In theory, we should never get out of that loop without a result.
917 928 # But if manifest uses a buggy file revision (not children of the
918 929 # one it replaces) we could. Such a buggy situation will likely
919 930 # result is crash somewhere else at to some point.
920 931 return lkr
921 932
922 933 def introrev(self):
923 934 """return the rev of the changeset which introduced this file revision
924 935
925 936 This method is different from linkrev because it take into account the
926 937 changeset the filectx was created from. It ensures the returned
927 938 revision is one of its ancestors. This prevents bugs from
928 939 'linkrev-shadowing' when a file revision is used by multiple
929 940 changesets.
930 941 """
931 942 lkr = self.linkrev()
932 943 attrs = vars(self)
933 944 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
934 945 if noctx or self.rev() == lkr:
935 946 return self.linkrev()
936 947 return self._adjustlinkrev(self.rev(), inclusive=True)
937 948
938 949 def _parentfilectx(self, path, fileid, filelog):
939 950 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
940 951 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
941 952 if '_changeid' in vars(self) or '_changectx' in vars(self):
942 953 # If self is associated with a changeset (probably explicitly
943 954 # fed), ensure the created filectx is associated with a
944 955 # changeset that is an ancestor of self.changectx.
945 956 # This lets us later use _adjustlinkrev to get a correct link.
946 957 fctx._descendantrev = self.rev()
947 958 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
948 959 elif '_descendantrev' in vars(self):
949 960 # Otherwise propagate _descendantrev if we have one associated.
950 961 fctx._descendantrev = self._descendantrev
951 962 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
952 963 return fctx
953 964
954 965 def parents(self):
955 966 _path = self._path
956 967 fl = self._filelog
957 968 parents = self._filelog.parents(self._filenode)
958 969 pl = [(_path, node, fl) for node in parents if node != nullid]
959 970
960 971 r = fl.renamed(self._filenode)
961 972 if r:
962 973 # - In the simple rename case, both parent are nullid, pl is empty.
963 974 # - In case of merge, only one of the parent is null id and should
964 975 # be replaced with the rename information. This parent is -always-
965 976 # the first one.
966 977 #
967 978 # As null id have always been filtered out in the previous list
968 979 # comprehension, inserting to 0 will always result in "replacing
969 980 # first nullid parent with rename information.
970 981 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
971 982
972 983 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
973 984
974 985 def p1(self):
975 986 return self.parents()[0]
976 987
977 988 def p2(self):
978 989 p = self.parents()
979 990 if len(p) == 2:
980 991 return p[1]
981 992 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
982 993
983 994 def annotate(self, follow=False, linenumber=False, skiprevs=None,
984 995 diffopts=None):
985 996 '''returns a list of tuples of ((ctx, number), line) for each line
986 997 in the file, where ctx is the filectx of the node where
987 998 that line was last changed; if linenumber parameter is true, number is
988 999 the line number at the first appearance in the managed file, otherwise,
989 1000 number has a fixed value of False.
990 1001 '''
991 1002
992 1003 def lines(text):
993 1004 if text.endswith("\n"):
994 1005 return text.count("\n")
995 1006 return text.count("\n") + int(bool(text))
996 1007
997 1008 if linenumber:
998 1009 def decorate(text, rev):
999 1010 return ([annotateline(fctx=rev, lineno=i)
1000 1011 for i in xrange(1, lines(text) + 1)], text)
1001 1012 else:
1002 1013 def decorate(text, rev):
1003 1014 return ([annotateline(fctx=rev)] * lines(text), text)
1004 1015
1005 1016 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1006 1017
1007 1018 def parents(f):
1008 1019 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1009 1020 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1010 1021 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1011 1022 # isn't an ancestor of the srcrev.
1012 1023 f._changeid
1013 1024 pl = f.parents()
1014 1025
1015 1026 # Don't return renamed parents if we aren't following.
1016 1027 if not follow:
1017 1028 pl = [p for p in pl if p.path() == f.path()]
1018 1029
1019 1030 # renamed filectx won't have a filelog yet, so set it
1020 1031 # from the cache to save time
1021 1032 for p in pl:
1022 1033 if not '_filelog' in p.__dict__:
1023 1034 p._filelog = getlog(p.path())
1024 1035
1025 1036 return pl
1026 1037
1027 1038 # use linkrev to find the first changeset where self appeared
1028 1039 base = self
1029 1040 introrev = self.introrev()
1030 1041 if self.rev() != introrev:
1031 1042 base = self.filectx(self.filenode(), changeid=introrev)
1032 1043 if getattr(base, '_ancestrycontext', None) is None:
1033 1044 cl = self._repo.changelog
1034 1045 if introrev is None:
1035 1046 # wctx is not inclusive, but works because _ancestrycontext
1036 1047 # is used to test filelog revisions
1037 1048 ac = cl.ancestors([p.rev() for p in base.parents()],
1038 1049 inclusive=True)
1039 1050 else:
1040 1051 ac = cl.ancestors([introrev], inclusive=True)
1041 1052 base._ancestrycontext = ac
1042 1053
1043 1054 # This algorithm would prefer to be recursive, but Python is a
1044 1055 # bit recursion-hostile. Instead we do an iterative
1045 1056 # depth-first search.
1046 1057
1047 1058 # 1st DFS pre-calculates pcache and needed
1048 1059 visit = [base]
1049 1060 pcache = {}
1050 1061 needed = {base: 1}
1051 1062 while visit:
1052 1063 f = visit.pop()
1053 1064 if f in pcache:
1054 1065 continue
1055 1066 pl = parents(f)
1056 1067 pcache[f] = pl
1057 1068 for p in pl:
1058 1069 needed[p] = needed.get(p, 0) + 1
1059 1070 if p not in pcache:
1060 1071 visit.append(p)
1061 1072
1062 1073 # 2nd DFS does the actual annotate
1063 1074 visit[:] = [base]
1064 1075 hist = {}
1065 1076 while visit:
1066 1077 f = visit[-1]
1067 1078 if f in hist:
1068 1079 visit.pop()
1069 1080 continue
1070 1081
1071 1082 ready = True
1072 1083 pl = pcache[f]
1073 1084 for p in pl:
1074 1085 if p not in hist:
1075 1086 ready = False
1076 1087 visit.append(p)
1077 1088 if ready:
1078 1089 visit.pop()
1079 1090 curr = decorate(f.data(), f)
1080 1091 skipchild = False
1081 1092 if skiprevs is not None:
1082 1093 skipchild = f._changeid in skiprevs
1083 1094 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1084 1095 diffopts)
1085 1096 for p in pl:
1086 1097 if needed[p] == 1:
1087 1098 del hist[p]
1088 1099 del needed[p]
1089 1100 else:
1090 1101 needed[p] -= 1
1091 1102
1092 1103 hist[f] = curr
1093 1104 del pcache[f]
1094 1105
1095 1106 return zip(hist[base][0], hist[base][1].splitlines(True))
1096 1107
1097 1108 def ancestors(self, followfirst=False):
1098 1109 visit = {}
1099 1110 c = self
1100 1111 if followfirst:
1101 1112 cut = 1
1102 1113 else:
1103 1114 cut = None
1104 1115
1105 1116 while True:
1106 1117 for parent in c.parents()[:cut]:
1107 1118 visit[(parent.linkrev(), parent.filenode())] = parent
1108 1119 if not visit:
1109 1120 break
1110 1121 c = visit.pop(max(visit))
1111 1122 yield c
1112 1123
1113 1124 def decodeddata(self):
1114 1125 """Returns `data()` after running repository decoding filters.
1115 1126
1116 1127 This is often equivalent to how the data would be expressed on disk.
1117 1128 """
1118 1129 return self._repo.wwritedata(self.path(), self.data())
1119 1130
1120 1131 @attr.s(slots=True, frozen=True)
1121 1132 class annotateline(object):
1122 1133 fctx = attr.ib()
1123 1134 lineno = attr.ib(default=False)
1124 1135 # Whether this annotation was the result of a skip-annotate.
1125 1136 skip = attr.ib(default=False)
1126 1137
1127 1138 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1128 1139 r'''
1129 1140 Given parent and child fctxes and annotate data for parents, for all lines
1130 1141 in either parent that match the child, annotate the child with the parent's
1131 1142 data.
1132 1143
1133 1144 Additionally, if `skipchild` is True, replace all other lines with parent
1134 1145 annotate data as well such that child is never blamed for any lines.
1135 1146
1136 1147 See test-annotate.py for unit tests.
1137 1148 '''
1138 1149 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1139 1150 for parent in parents]
1140 1151
1141 1152 if skipchild:
1142 1153 # Need to iterate over the blocks twice -- make it a list
1143 1154 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1144 1155 # Mercurial currently prefers p2 over p1 for annotate.
1145 1156 # TODO: change this?
1146 1157 for parent, blocks in pblocks:
1147 1158 for (a1, a2, b1, b2), t in blocks:
1148 1159 # Changed blocks ('!') or blocks made only of blank lines ('~')
1149 1160 # belong to the child.
1150 1161 if t == '=':
1151 1162 child[0][b1:b2] = parent[0][a1:a2]
1152 1163
1153 1164 if skipchild:
1154 1165 # Now try and match up anything that couldn't be matched,
1155 1166 # Reversing pblocks maintains bias towards p2, matching above
1156 1167 # behavior.
1157 1168 pblocks.reverse()
1158 1169
1159 1170 # The heuristics are:
1160 1171 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1161 1172 # This could potentially be smarter but works well enough.
1162 1173 # * For a non-matching section, do a best-effort fit. Match lines in
1163 1174 # diff hunks 1:1, dropping lines as necessary.
1164 1175 # * Repeat the last line as a last resort.
1165 1176
1166 1177 # First, replace as much as possible without repeating the last line.
1167 1178 remaining = [(parent, []) for parent, _blocks in pblocks]
1168 1179 for idx, (parent, blocks) in enumerate(pblocks):
1169 1180 for (a1, a2, b1, b2), _t in blocks:
1170 1181 if a2 - a1 >= b2 - b1:
1171 1182 for bk in xrange(b1, b2):
1172 1183 if child[0][bk].fctx == childfctx:
1173 1184 ak = min(a1 + (bk - b1), a2 - 1)
1174 1185 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1175 1186 else:
1176 1187 remaining[idx][1].append((a1, a2, b1, b2))
1177 1188
1178 1189 # Then, look at anything left, which might involve repeating the last
1179 1190 # line.
1180 1191 for parent, blocks in remaining:
1181 1192 for a1, a2, b1, b2 in blocks:
1182 1193 for bk in xrange(b1, b2):
1183 1194 if child[0][bk].fctx == childfctx:
1184 1195 ak = min(a1 + (bk - b1), a2 - 1)
1185 1196 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1186 1197 return child
1187 1198
1188 1199 class filectx(basefilectx):
1189 1200 """A filecontext object makes access to data related to a particular
1190 1201 filerevision convenient."""
1191 1202 def __init__(self, repo, path, changeid=None, fileid=None,
1192 1203 filelog=None, changectx=None):
1193 1204 """changeid can be a changeset revision, node, or tag.
1194 1205 fileid can be a file revision or node."""
1195 1206 self._repo = repo
1196 1207 self._path = path
1197 1208
1198 1209 assert (changeid is not None
1199 1210 or fileid is not None
1200 1211 or changectx is not None), \
1201 1212 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1202 1213 % (changeid, fileid, changectx))
1203 1214
1204 1215 if filelog is not None:
1205 1216 self._filelog = filelog
1206 1217
1207 1218 if changeid is not None:
1208 1219 self._changeid = changeid
1209 1220 if changectx is not None:
1210 1221 self._changectx = changectx
1211 1222 if fileid is not None:
1212 1223 self._fileid = fileid
1213 1224
1214 1225 @propertycache
1215 1226 def _changectx(self):
1216 1227 try:
1217 1228 return changectx(self._repo, self._changeid)
1218 1229 except error.FilteredRepoLookupError:
1219 1230 # Linkrev may point to any revision in the repository. When the
1220 1231 # repository is filtered this may lead to `filectx` trying to build
1221 1232 # `changectx` for filtered revision. In such case we fallback to
1222 1233 # creating `changectx` on the unfiltered version of the reposition.
1223 1234 # This fallback should not be an issue because `changectx` from
1224 1235 # `filectx` are not used in complex operations that care about
1225 1236 # filtering.
1226 1237 #
1227 1238 # This fallback is a cheap and dirty fix that prevent several
1228 1239 # crashes. It does not ensure the behavior is correct. However the
1229 1240 # behavior was not correct before filtering either and "incorrect
1230 1241 # behavior" is seen as better as "crash"
1231 1242 #
1232 1243 # Linkrevs have several serious troubles with filtering that are
1233 1244 # complicated to solve. Proper handling of the issue here should be
1234 1245 # considered when solving linkrev issue are on the table.
1235 1246 return changectx(self._repo.unfiltered(), self._changeid)
1236 1247
1237 1248 def filectx(self, fileid, changeid=None):
1238 1249 '''opens an arbitrary revision of the file without
1239 1250 opening a new filelog'''
1240 1251 return filectx(self._repo, self._path, fileid=fileid,
1241 1252 filelog=self._filelog, changeid=changeid)
1242 1253
1243 1254 def rawdata(self):
1244 1255 return self._filelog.revision(self._filenode, raw=True)
1245 1256
1246 1257 def rawflags(self):
1247 1258 """low-level revlog flags"""
1248 1259 return self._filelog.flags(self._filerev)
1249 1260
1250 1261 def data(self):
1251 1262 try:
1252 1263 return self._filelog.read(self._filenode)
1253 1264 except error.CensoredNodeError:
1254 1265 if self._repo.ui.config("censor", "policy") == "ignore":
1255 1266 return ""
1256 1267 raise error.Abort(_("censored node: %s") % short(self._filenode),
1257 1268 hint=_("set censor.policy to ignore errors"))
1258 1269
1259 1270 def size(self):
1260 1271 return self._filelog.size(self._filerev)
1261 1272
1262 1273 @propertycache
1263 1274 def _copied(self):
1264 1275 """check if file was actually renamed in this changeset revision
1265 1276
1266 1277 If rename logged in file revision, we report copy for changeset only
1267 1278 if file revisions linkrev points back to the changeset in question
1268 1279 or both changeset parents contain different file revisions.
1269 1280 """
1270 1281
1271 1282 renamed = self._filelog.renamed(self._filenode)
1272 1283 if not renamed:
1273 1284 return renamed
1274 1285
1275 1286 if self.rev() == self.linkrev():
1276 1287 return renamed
1277 1288
1278 1289 name = self.path()
1279 1290 fnode = self._filenode
1280 1291 for p in self._changectx.parents():
1281 1292 try:
1282 1293 if fnode == p.filenode(name):
1283 1294 return None
1284 1295 except error.LookupError:
1285 1296 pass
1286 1297 return renamed
1287 1298
1288 1299 def children(self):
1289 1300 # hard for renames
1290 1301 c = self._filelog.children(self._filenode)
1291 1302 return [filectx(self._repo, self._path, fileid=x,
1292 1303 filelog=self._filelog) for x in c]
1293 1304
1294 1305 class committablectx(basectx):
1295 1306 """A committablectx object provides common functionality for a context that
1296 1307 wants the ability to commit, e.g. workingctx or memctx."""
1297 1308 def __init__(self, repo, text="", user=None, date=None, extra=None,
1298 1309 changes=None):
1299 1310 self._repo = repo
1300 1311 self._rev = None
1301 1312 self._node = None
1302 1313 self._text = text
1303 1314 if date:
1304 1315 self._date = util.parsedate(date)
1305 1316 if user:
1306 1317 self._user = user
1307 1318 if changes:
1308 1319 self._status = changes
1309 1320
1310 1321 self._extra = {}
1311 1322 if extra:
1312 1323 self._extra = extra.copy()
1313 1324 if 'branch' not in self._extra:
1314 1325 try:
1315 1326 branch = encoding.fromlocal(self._repo.dirstate.branch())
1316 1327 except UnicodeDecodeError:
1317 1328 raise error.Abort(_('branch name not in UTF-8!'))
1318 1329 self._extra['branch'] = branch
1319 1330 if self._extra['branch'] == '':
1320 1331 self._extra['branch'] = 'default'
1321 1332
1322 1333 def __bytes__(self):
1323 1334 return bytes(self._parents[0]) + "+"
1324 1335
1325 1336 __str__ = encoding.strmethod(__bytes__)
1326 1337
1327 1338 def __nonzero__(self):
1328 1339 return True
1329 1340
1330 1341 __bool__ = __nonzero__
1331 1342
1332 1343 def _buildflagfunc(self):
1333 1344 # Create a fallback function for getting file flags when the
1334 1345 # filesystem doesn't support them
1335 1346
1336 1347 copiesget = self._repo.dirstate.copies().get
1337 1348 parents = self.parents()
1338 1349 if len(parents) < 2:
1339 1350 # when we have one parent, it's easy: copy from parent
1340 1351 man = parents[0].manifest()
1341 1352 def func(f):
1342 1353 f = copiesget(f, f)
1343 1354 return man.flags(f)
1344 1355 else:
1345 1356 # merges are tricky: we try to reconstruct the unstored
1346 1357 # result from the merge (issue1802)
1347 1358 p1, p2 = parents
1348 1359 pa = p1.ancestor(p2)
1349 1360 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1350 1361
1351 1362 def func(f):
1352 1363 f = copiesget(f, f) # may be wrong for merges with copies
1353 1364 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1354 1365 if fl1 == fl2:
1355 1366 return fl1
1356 1367 if fl1 == fla:
1357 1368 return fl2
1358 1369 if fl2 == fla:
1359 1370 return fl1
1360 1371 return '' # punt for conflicts
1361 1372
1362 1373 return func
1363 1374
1364 1375 @propertycache
1365 1376 def _flagfunc(self):
1366 1377 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1367 1378
1368 1379 @propertycache
1369 1380 def _status(self):
1370 1381 return self._repo.status()
1371 1382
1372 1383 @propertycache
1373 1384 def _user(self):
1374 1385 return self._repo.ui.username()
1375 1386
1376 1387 @propertycache
1377 1388 def _date(self):
1378 1389 ui = self._repo.ui
1379 1390 date = ui.configdate('devel', 'default-date')
1380 1391 if date is None:
1381 1392 date = util.makedate()
1382 1393 return date
1383 1394
1384 1395 def subrev(self, subpath):
1385 1396 return None
1386 1397
1387 1398 def manifestnode(self):
1388 1399 return None
1389 1400 def user(self):
1390 1401 return self._user or self._repo.ui.username()
1391 1402 def date(self):
1392 1403 return self._date
1393 1404 def description(self):
1394 1405 return self._text
1395 1406 def files(self):
1396 1407 return sorted(self._status.modified + self._status.added +
1397 1408 self._status.removed)
1398 1409
1399 1410 def modified(self):
1400 1411 return self._status.modified
1401 1412 def added(self):
1402 1413 return self._status.added
1403 1414 def removed(self):
1404 1415 return self._status.removed
1405 1416 def deleted(self):
1406 1417 return self._status.deleted
1407 1418 def branch(self):
1408 1419 return encoding.tolocal(self._extra['branch'])
1409 1420 def closesbranch(self):
1410 1421 return 'close' in self._extra
1411 1422 def extra(self):
1412 1423 return self._extra
1413 1424
1414 1425 def isinmemory(self):
1415 1426 return False
1416 1427
1417 1428 def tags(self):
1418 1429 return []
1419 1430
1420 1431 def bookmarks(self):
1421 1432 b = []
1422 1433 for p in self.parents():
1423 1434 b.extend(p.bookmarks())
1424 1435 return b
1425 1436
1426 1437 def phase(self):
1427 1438 phase = phases.draft # default phase to draft
1428 1439 for p in self.parents():
1429 1440 phase = max(phase, p.phase())
1430 1441 return phase
1431 1442
1432 1443 def hidden(self):
1433 1444 return False
1434 1445
1435 1446 def children(self):
1436 1447 return []
1437 1448
1438 1449 def flags(self, path):
1439 1450 if r'_manifest' in self.__dict__:
1440 1451 try:
1441 1452 return self._manifest.flags(path)
1442 1453 except KeyError:
1443 1454 return ''
1444 1455
1445 1456 try:
1446 1457 return self._flagfunc(path)
1447 1458 except OSError:
1448 1459 return ''
1449 1460
1450 1461 def ancestor(self, c2):
1451 1462 """return the "best" ancestor context of self and c2"""
1452 1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1453 1464
1454 1465 def walk(self, match):
1455 1466 '''Generates matching file names.'''
1456 1467 return sorted(self._repo.dirstate.walk(match,
1457 1468 subrepos=sorted(self.substate),
1458 1469 unknown=True, ignored=False))
1459 1470
1460 1471 def matches(self, match):
1461 1472 return sorted(self._repo.dirstate.matches(match))
1462 1473
1463 1474 def ancestors(self):
1464 1475 for p in self._parents:
1465 1476 yield p
1466 1477 for a in self._repo.changelog.ancestors(
1467 1478 [p.rev() for p in self._parents]):
1468 1479 yield changectx(self._repo, a)
1469 1480
1470 1481 def markcommitted(self, node):
1471 1482 """Perform post-commit cleanup necessary after committing this ctx
1472 1483
1473 1484 Specifically, this updates backing stores this working context
1474 1485 wraps to reflect the fact that the changes reflected by this
1475 1486 workingctx have been committed. For example, it marks
1476 1487 modified and added files as normal in the dirstate.
1477 1488
1478 1489 """
1479 1490
1480 1491 with self._repo.dirstate.parentchange():
1481 1492 for f in self.modified() + self.added():
1482 1493 self._repo.dirstate.normal(f)
1483 1494 for f in self.removed():
1484 1495 self._repo.dirstate.drop(f)
1485 1496 self._repo.dirstate.setparents(node)
1486 1497
1487 1498 # write changes out explicitly, because nesting wlock at
1488 1499 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1489 1500 # from immediately doing so for subsequent changing files
1490 1501 self._repo.dirstate.write(self._repo.currenttransaction())
1491 1502
1492 1503 def dirty(self, missing=False, merge=True, branch=True):
1493 1504 return False
1494 1505
1495 1506 class workingctx(committablectx):
1496 1507 """A workingctx object makes access to data related to
1497 1508 the current working directory convenient.
1498 1509 date - any valid date string or (unixtime, offset), or None.
1499 1510 user - username string, or None.
1500 1511 extra - a dictionary of extra values, or None.
1501 1512 changes - a list of file lists as returned by localrepo.status()
1502 1513 or None to use the repository status.
1503 1514 """
1504 1515 def __init__(self, repo, text="", user=None, date=None, extra=None,
1505 1516 changes=None):
1506 1517 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1507 1518
1508 1519 def __iter__(self):
1509 1520 d = self._repo.dirstate
1510 1521 for f in d:
1511 1522 if d[f] != 'r':
1512 1523 yield f
1513 1524
1514 1525 def __contains__(self, key):
1515 1526 return self._repo.dirstate[key] not in "?r"
1516 1527
1517 1528 def hex(self):
1518 1529 return hex(wdirid)
1519 1530
1520 1531 @propertycache
1521 1532 def _parents(self):
1522 1533 p = self._repo.dirstate.parents()
1523 1534 if p[1] == nullid:
1524 1535 p = p[:-1]
1525 1536 return [changectx(self._repo, x) for x in p]
1526 1537
1527 1538 def filectx(self, path, filelog=None):
1528 1539 """get a file context from the working directory"""
1529 1540 return workingfilectx(self._repo, path, workingctx=self,
1530 1541 filelog=filelog)
1531 1542
1532 1543 def dirty(self, missing=False, merge=True, branch=True):
1533 1544 "check whether a working directory is modified"
1534 1545 # check subrepos first
1535 1546 for s in sorted(self.substate):
1536 1547 if self.sub(s).dirty(missing=missing):
1537 1548 return True
1538 1549 # check current working dir
1539 1550 return ((merge and self.p2()) or
1540 1551 (branch and self.branch() != self.p1().branch()) or
1541 1552 self.modified() or self.added() or self.removed() or
1542 1553 (missing and self.deleted()))
1543 1554
1544 1555 def add(self, list, prefix=""):
1545 1556 with self._repo.wlock():
1546 1557 ui, ds = self._repo.ui, self._repo.dirstate
1547 1558 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1548 1559 rejected = []
1549 1560 lstat = self._repo.wvfs.lstat
1550 1561 for f in list:
1551 1562 # ds.pathto() returns an absolute file when this is invoked from
1552 1563 # the keyword extension. That gets flagged as non-portable on
1553 1564 # Windows, since it contains the drive letter and colon.
1554 1565 scmutil.checkportable(ui, os.path.join(prefix, f))
1555 1566 try:
1556 1567 st = lstat(f)
1557 1568 except OSError:
1558 1569 ui.warn(_("%s does not exist!\n") % uipath(f))
1559 1570 rejected.append(f)
1560 1571 continue
1561 1572 if st.st_size > 10000000:
1562 1573 ui.warn(_("%s: up to %d MB of RAM may be required "
1563 1574 "to manage this file\n"
1564 1575 "(use 'hg revert %s' to cancel the "
1565 1576 "pending addition)\n")
1566 1577 % (f, 3 * st.st_size // 1000000, uipath(f)))
1567 1578 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1568 1579 ui.warn(_("%s not added: only files and symlinks "
1569 1580 "supported currently\n") % uipath(f))
1570 1581 rejected.append(f)
1571 1582 elif ds[f] in 'amn':
1572 1583 ui.warn(_("%s already tracked!\n") % uipath(f))
1573 1584 elif ds[f] == 'r':
1574 1585 ds.normallookup(f)
1575 1586 else:
1576 1587 ds.add(f)
1577 1588 return rejected
1578 1589
1579 1590 def forget(self, files, prefix=""):
1580 1591 with self._repo.wlock():
1581 1592 ds = self._repo.dirstate
1582 1593 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1583 1594 rejected = []
1584 1595 for f in files:
1585 1596 if f not in self._repo.dirstate:
1586 1597 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1587 1598 rejected.append(f)
1588 1599 elif self._repo.dirstate[f] != 'a':
1589 1600 self._repo.dirstate.remove(f)
1590 1601 else:
1591 1602 self._repo.dirstate.drop(f)
1592 1603 return rejected
1593 1604
1594 1605 def undelete(self, list):
1595 1606 pctxs = self.parents()
1596 1607 with self._repo.wlock():
1597 1608 ds = self._repo.dirstate
1598 1609 for f in list:
1599 1610 if self._repo.dirstate[f] != 'r':
1600 1611 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1601 1612 else:
1602 1613 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1603 1614 t = fctx.data()
1604 1615 self._repo.wwrite(f, t, fctx.flags())
1605 1616 self._repo.dirstate.normal(f)
1606 1617
1607 1618 def copy(self, source, dest):
1608 1619 try:
1609 1620 st = self._repo.wvfs.lstat(dest)
1610 1621 except OSError as err:
1611 1622 if err.errno != errno.ENOENT:
1612 1623 raise
1613 1624 self._repo.ui.warn(_("%s does not exist!\n")
1614 1625 % self._repo.dirstate.pathto(dest))
1615 1626 return
1616 1627 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1617 1628 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1618 1629 "symbolic link\n")
1619 1630 % self._repo.dirstate.pathto(dest))
1620 1631 else:
1621 1632 with self._repo.wlock():
1622 1633 if self._repo.dirstate[dest] in '?':
1623 1634 self._repo.dirstate.add(dest)
1624 1635 elif self._repo.dirstate[dest] in 'r':
1625 1636 self._repo.dirstate.normallookup(dest)
1626 1637 self._repo.dirstate.copy(source, dest)
1627 1638
1628 1639 def match(self, pats=None, include=None, exclude=None, default='glob',
1629 1640 listsubrepos=False, badfn=None):
1630 1641 r = self._repo
1631 1642
1632 1643 # Only a case insensitive filesystem needs magic to translate user input
1633 1644 # to actual case in the filesystem.
1634 1645 icasefs = not util.fscasesensitive(r.root)
1635 1646 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1636 1647 default, auditor=r.auditor, ctx=self,
1637 1648 listsubrepos=listsubrepos, badfn=badfn,
1638 1649 icasefs=icasefs)
1639 1650
1640 1651 def flushall(self):
1641 1652 pass # For overlayworkingfilectx compatibility.
1642 1653
1643 1654 def _filtersuspectsymlink(self, files):
1644 1655 if not files or self._repo.dirstate._checklink:
1645 1656 return files
1646 1657
1647 1658 # Symlink placeholders may get non-symlink-like contents
1648 1659 # via user error or dereferencing by NFS or Samba servers,
1649 1660 # so we filter out any placeholders that don't look like a
1650 1661 # symlink
1651 1662 sane = []
1652 1663 for f in files:
1653 1664 if self.flags(f) == 'l':
1654 1665 d = self[f].data()
1655 1666 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1656 1667 self._repo.ui.debug('ignoring suspect symlink placeholder'
1657 1668 ' "%s"\n' % f)
1658 1669 continue
1659 1670 sane.append(f)
1660 1671 return sane
1661 1672
1662 1673 def _checklookup(self, files):
1663 1674 # check for any possibly clean files
1664 1675 if not files:
1665 1676 return [], [], []
1666 1677
1667 1678 modified = []
1668 1679 deleted = []
1669 1680 fixup = []
1670 1681 pctx = self._parents[0]
1671 1682 # do a full compare of any files that might have changed
1672 1683 for f in sorted(files):
1673 1684 try:
1674 1685 # This will return True for a file that got replaced by a
1675 1686 # directory in the interim, but fixing that is pretty hard.
1676 1687 if (f not in pctx or self.flags(f) != pctx.flags(f)
1677 1688 or pctx[f].cmp(self[f])):
1678 1689 modified.append(f)
1679 1690 else:
1680 1691 fixup.append(f)
1681 1692 except (IOError, OSError):
1682 1693 # A file become inaccessible in between? Mark it as deleted,
1683 1694 # matching dirstate behavior (issue5584).
1684 1695 # The dirstate has more complex behavior around whether a
1685 1696 # missing file matches a directory, etc, but we don't need to
1686 1697 # bother with that: if f has made it to this point, we're sure
1687 1698 # it's in the dirstate.
1688 1699 deleted.append(f)
1689 1700
1690 1701 return modified, deleted, fixup
1691 1702
1692 1703 def _poststatusfixup(self, status, fixup):
1693 1704 """update dirstate for files that are actually clean"""
1694 1705 poststatus = self._repo.postdsstatus()
1695 1706 if fixup or poststatus:
1696 1707 try:
1697 1708 oldid = self._repo.dirstate.identity()
1698 1709
1699 1710 # updating the dirstate is optional
1700 1711 # so we don't wait on the lock
1701 1712 # wlock can invalidate the dirstate, so cache normal _after_
1702 1713 # taking the lock
1703 1714 with self._repo.wlock(False):
1704 1715 if self._repo.dirstate.identity() == oldid:
1705 1716 if fixup:
1706 1717 normal = self._repo.dirstate.normal
1707 1718 for f in fixup:
1708 1719 normal(f)
1709 1720 # write changes out explicitly, because nesting
1710 1721 # wlock at runtime may prevent 'wlock.release()'
1711 1722 # after this block from doing so for subsequent
1712 1723 # changing files
1713 1724 tr = self._repo.currenttransaction()
1714 1725 self._repo.dirstate.write(tr)
1715 1726
1716 1727 if poststatus:
1717 1728 for ps in poststatus:
1718 1729 ps(self, status)
1719 1730 else:
1720 1731 # in this case, writing changes out breaks
1721 1732 # consistency, because .hg/dirstate was
1722 1733 # already changed simultaneously after last
1723 1734 # caching (see also issue5584 for detail)
1724 1735 self._repo.ui.debug('skip updating dirstate: '
1725 1736 'identity mismatch\n')
1726 1737 except error.LockError:
1727 1738 pass
1728 1739 finally:
1729 1740 # Even if the wlock couldn't be grabbed, clear out the list.
1730 1741 self._repo.clearpostdsstatus()
1731 1742
1732 1743 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1733 1744 '''Gets the status from the dirstate -- internal use only.'''
1734 1745 subrepos = []
1735 1746 if '.hgsub' in self:
1736 1747 subrepos = sorted(self.substate)
1737 1748 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1738 1749 clean=clean, unknown=unknown)
1739 1750
1740 1751 # check for any possibly clean files
1741 1752 fixup = []
1742 1753 if cmp:
1743 1754 modified2, deleted2, fixup = self._checklookup(cmp)
1744 1755 s.modified.extend(modified2)
1745 1756 s.deleted.extend(deleted2)
1746 1757
1747 1758 if fixup and clean:
1748 1759 s.clean.extend(fixup)
1749 1760
1750 1761 self._poststatusfixup(s, fixup)
1751 1762
1752 1763 if match.always():
1753 1764 # cache for performance
1754 1765 if s.unknown or s.ignored or s.clean:
1755 1766 # "_status" is cached with list*=False in the normal route
1756 1767 self._status = scmutil.status(s.modified, s.added, s.removed,
1757 1768 s.deleted, [], [], [])
1758 1769 else:
1759 1770 self._status = s
1760 1771
1761 1772 return s
1762 1773
1763 1774 @propertycache
1764 1775 def _manifest(self):
1765 1776 """generate a manifest corresponding to the values in self._status
1766 1777
1767 1778 This reuse the file nodeid from parent, but we use special node
1768 1779 identifiers for added and modified files. This is used by manifests
1769 1780 merge to see that files are different and by update logic to avoid
1770 1781 deleting newly added files.
1771 1782 """
1772 1783 return self._buildstatusmanifest(self._status)
1773 1784
1774 1785 def _buildstatusmanifest(self, status):
1775 1786 """Builds a manifest that includes the given status results."""
1776 1787 parents = self.parents()
1777 1788
1778 1789 man = parents[0].manifest().copy()
1779 1790
1780 1791 ff = self._flagfunc
1781 1792 for i, l in ((addednodeid, status.added),
1782 1793 (modifiednodeid, status.modified)):
1783 1794 for f in l:
1784 1795 man[f] = i
1785 1796 try:
1786 1797 man.setflag(f, ff(f))
1787 1798 except OSError:
1788 1799 pass
1789 1800
1790 1801 for f in status.deleted + status.removed:
1791 1802 if f in man:
1792 1803 del man[f]
1793 1804
1794 1805 return man
1795 1806
1796 1807 def _buildstatus(self, other, s, match, listignored, listclean,
1797 1808 listunknown):
1798 1809 """build a status with respect to another context
1799 1810
1800 1811 This includes logic for maintaining the fast path of status when
1801 1812 comparing the working directory against its parent, which is to skip
1802 1813 building a new manifest if self (working directory) is not comparing
1803 1814 against its parent (repo['.']).
1804 1815 """
1805 1816 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1806 1817 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1807 1818 # might have accidentally ended up with the entire contents of the file
1808 1819 # they are supposed to be linking to.
1809 1820 s.modified[:] = self._filtersuspectsymlink(s.modified)
1810 1821 if other != self._repo['.']:
1811 1822 s = super(workingctx, self)._buildstatus(other, s, match,
1812 1823 listignored, listclean,
1813 1824 listunknown)
1814 1825 return s
1815 1826
1816 1827 def _matchstatus(self, other, match):
1817 1828 """override the match method with a filter for directory patterns
1818 1829
1819 1830 We use inheritance to customize the match.bad method only in cases of
1820 1831 workingctx since it belongs only to the working directory when
1821 1832 comparing against the parent changeset.
1822 1833
1823 1834 If we aren't comparing against the working directory's parent, then we
1824 1835 just use the default match object sent to us.
1825 1836 """
1826 1837 if other != self._repo['.']:
1827 1838 def bad(f, msg):
1828 1839 # 'f' may be a directory pattern from 'match.files()',
1829 1840 # so 'f not in ctx1' is not enough
1830 1841 if f not in other and not other.hasdir(f):
1831 1842 self._repo.ui.warn('%s: %s\n' %
1832 1843 (self._repo.dirstate.pathto(f), msg))
1833 1844 match.bad = bad
1834 1845 return match
1835 1846
1836 1847 def markcommitted(self, node):
1837 1848 super(workingctx, self).markcommitted(node)
1838 1849
1839 1850 sparse.aftercommit(self._repo, node)
1840 1851
1841 1852 class committablefilectx(basefilectx):
1842 1853 """A committablefilectx provides common functionality for a file context
1843 1854 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1844 1855 def __init__(self, repo, path, filelog=None, ctx=None):
1845 1856 self._repo = repo
1846 1857 self._path = path
1847 1858 self._changeid = None
1848 1859 self._filerev = self._filenode = None
1849 1860
1850 1861 if filelog is not None:
1851 1862 self._filelog = filelog
1852 1863 if ctx:
1853 1864 self._changectx = ctx
1854 1865
1855 1866 def __nonzero__(self):
1856 1867 return True
1857 1868
1858 1869 __bool__ = __nonzero__
1859 1870
1860 1871 def linkrev(self):
1861 1872 # linked to self._changectx no matter if file is modified or not
1862 1873 return self.rev()
1863 1874
1864 1875 def parents(self):
1865 1876 '''return parent filectxs, following copies if necessary'''
1866 1877 def filenode(ctx, path):
1867 1878 return ctx._manifest.get(path, nullid)
1868 1879
1869 1880 path = self._path
1870 1881 fl = self._filelog
1871 1882 pcl = self._changectx._parents
1872 1883 renamed = self.renamed()
1873 1884
1874 1885 if renamed:
1875 1886 pl = [renamed + (None,)]
1876 1887 else:
1877 1888 pl = [(path, filenode(pcl[0], path), fl)]
1878 1889
1879 1890 for pc in pcl[1:]:
1880 1891 pl.append((path, filenode(pc, path), fl))
1881 1892
1882 1893 return [self._parentfilectx(p, fileid=n, filelog=l)
1883 1894 for p, n, l in pl if n != nullid]
1884 1895
1885 1896 def children(self):
1886 1897 return []
1887 1898
1888 1899 class workingfilectx(committablefilectx):
1889 1900 """A workingfilectx object makes access to data related to a particular
1890 1901 file in the working directory convenient."""
1891 1902 def __init__(self, repo, path, filelog=None, workingctx=None):
1892 1903 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1893 1904
1894 1905 @propertycache
1895 1906 def _changectx(self):
1896 1907 return workingctx(self._repo)
1897 1908
1898 1909 def data(self):
1899 1910 return self._repo.wread(self._path)
1900 1911 def renamed(self):
1901 1912 rp = self._repo.dirstate.copied(self._path)
1902 1913 if not rp:
1903 1914 return None
1904 1915 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1905 1916
1906 1917 def size(self):
1907 1918 return self._repo.wvfs.lstat(self._path).st_size
1908 1919 def date(self):
1909 1920 t, tz = self._changectx.date()
1910 1921 try:
1911 1922 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1912 1923 except OSError as err:
1913 1924 if err.errno != errno.ENOENT:
1914 1925 raise
1915 1926 return (t, tz)
1916 1927
1917 1928 def exists(self):
1918 1929 return self._repo.wvfs.exists(self._path)
1919 1930
1920 1931 def lexists(self):
1921 1932 return self._repo.wvfs.lexists(self._path)
1922 1933
1923 1934 def audit(self):
1924 1935 return self._repo.wvfs.audit(self._path)
1925 1936
1926 1937 def cmp(self, fctx):
1927 1938 """compare with other file context
1928 1939
1929 1940 returns True if different than fctx.
1930 1941 """
1931 1942 # fctx should be a filectx (not a workingfilectx)
1932 1943 # invert comparison to reuse the same code path
1933 1944 return fctx.cmp(self)
1934 1945
1935 1946 def remove(self, ignoremissing=False):
1936 1947 """wraps unlink for a repo's working directory"""
1937 1948 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1938 1949
1939 1950 def write(self, data, flags, backgroundclose=False):
1940 1951 """wraps repo.wwrite"""
1941 1952 self._repo.wwrite(self._path, data, flags,
1942 1953 backgroundclose=backgroundclose)
1943 1954
1944 1955 def markcopied(self, src):
1945 1956 """marks this file a copy of `src`"""
1946 1957 if self._repo.dirstate[self._path] in "nma":
1947 1958 self._repo.dirstate.copy(src, self._path)
1948 1959
1949 1960 def clearunknown(self):
1950 1961 """Removes conflicting items in the working directory so that
1951 1962 ``write()`` can be called successfully.
1952 1963 """
1953 1964 wvfs = self._repo.wvfs
1954 1965 f = self._path
1955 1966 wvfs.audit(f)
1956 1967 if wvfs.isdir(f) and not wvfs.islink(f):
1957 1968 wvfs.rmtree(f, forcibly=True)
1958 1969 for p in reversed(list(util.finddirs(f))):
1959 1970 if wvfs.isfileorlink(p):
1960 1971 wvfs.unlink(p)
1961 1972 break
1962 1973
1963 1974 def setflags(self, l, x):
1964 1975 self._repo.wvfs.setflags(self._path, l, x)
1965 1976
1966 1977 class overlayworkingctx(workingctx):
1967 1978 """Wraps another mutable context with a write-back cache that can be flushed
1968 1979 at a later time.
1969 1980
1970 1981 self._cache[path] maps to a dict with keys: {
1971 1982 'exists': bool?
1972 1983 'date': date?
1973 1984 'data': str?
1974 1985 'flags': str?
1975 1986 }
1976 1987 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1977 1988 is `False`, the file was deleted.
1978 1989 """
1979 1990
1980 1991 def __init__(self, repo, wrappedctx):
1981 1992 super(overlayworkingctx, self).__init__(repo)
1982 1993 self._repo = repo
1983 1994 self._wrappedctx = wrappedctx
1984 1995 self._clean()
1985 1996
1986 1997 def data(self, path):
1987 1998 if self.isdirty(path):
1988 1999 if self._cache[path]['exists']:
1989 2000 if self._cache[path]['data']:
1990 2001 return self._cache[path]['data']
1991 2002 else:
1992 2003 # Must fallback here, too, because we only set flags.
1993 2004 return self._wrappedctx[path].data()
1994 2005 else:
1995 2006 raise error.ProgrammingError("No such file or directory: %s" %
1996 2007 self._path)
1997 2008 else:
1998 2009 return self._wrappedctx[path].data()
1999 2010
2000 2011 def isinmemory(self):
2001 2012 return True
2002 2013
2003 2014 def filedate(self, path):
2004 2015 if self.isdirty(path):
2005 2016 return self._cache[path]['date']
2006 2017 else:
2007 2018 return self._wrappedctx[path].date()
2008 2019
2009 2020 def flags(self, path):
2010 2021 if self.isdirty(path):
2011 2022 if self._cache[path]['exists']:
2012 2023 return self._cache[path]['flags']
2013 2024 else:
2014 2025 raise error.ProgrammingError("No such file or directory: %s" %
2015 2026 self._path)
2016 2027 else:
2017 2028 return self._wrappedctx[path].flags()
2018 2029
2019 2030 def write(self, path, data, flags=''):
2020 2031 if data is None:
2021 2032 raise error.ProgrammingError("data must be non-None")
2022 2033 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2023 2034 flags=flags)
2024 2035
2025 2036 def setflags(self, path, l, x):
2026 2037 self._markdirty(path, exists=True, date=util.makedate(),
2027 2038 flags=(l and 'l' or '') + (x and 'x' or ''))
2028 2039
2029 2040 def remove(self, path):
2030 2041 self._markdirty(path, exists=False)
2031 2042
2032 2043 def exists(self, path):
2033 2044 """exists behaves like `lexists`, but needs to follow symlinks and
2034 2045 return False if they are broken.
2035 2046 """
2036 2047 if self.isdirty(path):
2037 2048 # If this path exists and is a symlink, "follow" it by calling
2038 2049 # exists on the destination path.
2039 2050 if (self._cache[path]['exists'] and
2040 2051 'l' in self._cache[path]['flags']):
2041 2052 return self.exists(self._cache[path]['data'].strip())
2042 2053 else:
2043 2054 return self._cache[path]['exists']
2044 2055 return self._wrappedctx[path].exists()
2045 2056
2046 2057 def lexists(self, path):
2047 2058 """lexists returns True if the path exists"""
2048 2059 if self.isdirty(path):
2049 2060 return self._cache[path]['exists']
2050 2061 return self._wrappedctx[path].lexists()
2051 2062
2052 2063 def size(self, path):
2053 2064 if self.isdirty(path):
2054 2065 if self._cache[path]['exists']:
2055 2066 return len(self._cache[path]['data'])
2056 2067 else:
2057 2068 raise error.ProgrammingError("No such file or directory: %s" %
2058 2069 self._path)
2059 2070 return self._wrappedctx[path].size()
2060 2071
2061 2072 def flushall(self):
2062 2073 for path in self._writeorder:
2063 2074 entry = self._cache[path]
2064 2075 if entry['exists']:
2065 2076 self._wrappedctx[path].clearunknown()
2066 2077 if entry['data'] is not None:
2067 2078 if entry['flags'] is None:
2068 2079 raise error.ProgrammingError('data set but not flags')
2069 2080 self._wrappedctx[path].write(
2070 2081 entry['data'],
2071 2082 entry['flags'])
2072 2083 else:
2073 2084 self._wrappedctx[path].setflags(
2074 2085 'l' in entry['flags'],
2075 2086 'x' in entry['flags'])
2076 2087 else:
2077 2088 self._wrappedctx[path].remove(path)
2078 2089 self._clean()
2079 2090
2080 2091 def isdirty(self, path):
2081 2092 return path in self._cache
2082 2093
2083 2094 def _clean(self):
2084 2095 self._cache = {}
2085 2096 self._writeorder = []
2086 2097
2087 2098 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2088 2099 if path not in self._cache:
2089 2100 self._writeorder.append(path)
2090 2101
2091 2102 self._cache[path] = {
2092 2103 'exists': exists,
2093 2104 'data': data,
2094 2105 'date': date,
2095 2106 'flags': flags,
2096 2107 }
2097 2108
2098 2109 def filectx(self, path, filelog=None):
2099 2110 return overlayworkingfilectx(self._repo, path, parent=self,
2100 2111 filelog=filelog)
2101 2112
2102 2113 class overlayworkingfilectx(workingfilectx):
2103 2114 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2104 2115 cache, which can be flushed through later by calling ``flush()``."""
2105 2116
2106 2117 def __init__(self, repo, path, filelog=None, parent=None):
2107 2118 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2108 2119 parent)
2109 2120 self._repo = repo
2110 2121 self._parent = parent
2111 2122 self._path = path
2112 2123
2113 2124 def cmp(self, fctx):
2114 2125 return self.data() != fctx.data()
2115 2126
2116 2127 def ctx(self):
2117 2128 return self._parent
2118 2129
2119 2130 def data(self):
2120 2131 return self._parent.data(self._path)
2121 2132
2122 2133 def date(self):
2123 2134 return self._parent.filedate(self._path)
2124 2135
2125 2136 def exists(self):
2126 2137 return self.lexists()
2127 2138
2128 2139 def lexists(self):
2129 2140 return self._parent.exists(self._path)
2130 2141
2131 2142 def renamed(self):
2132 2143 # Copies are currently tracked in the dirstate as before. Straight copy
2133 2144 # from workingfilectx.
2134 2145 rp = self._repo.dirstate.copied(self._path)
2135 2146 if not rp:
2136 2147 return None
2137 2148 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2138 2149
2139 2150 def size(self):
2140 2151 return self._parent.size(self._path)
2141 2152
2142 2153 def audit(self):
2143 2154 pass
2144 2155
2145 2156 def flags(self):
2146 2157 return self._parent.flags(self._path)
2147 2158
2148 2159 def setflags(self, islink, isexec):
2149 2160 return self._parent.setflags(self._path, islink, isexec)
2150 2161
2151 2162 def write(self, data, flags, backgroundclose=False):
2152 2163 return self._parent.write(self._path, data, flags)
2153 2164
2154 2165 def remove(self, ignoremissing=False):
2155 2166 return self._parent.remove(self._path)
2156 2167
2157 2168 class workingcommitctx(workingctx):
2158 2169 """A workingcommitctx object makes access to data related to
2159 2170 the revision being committed convenient.
2160 2171
2161 2172 This hides changes in the working directory, if they aren't
2162 2173 committed in this context.
2163 2174 """
2164 2175 def __init__(self, repo, changes,
2165 2176 text="", user=None, date=None, extra=None):
2166 2177 super(workingctx, self).__init__(repo, text, user, date, extra,
2167 2178 changes)
2168 2179
2169 2180 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2170 2181 """Return matched files only in ``self._status``
2171 2182
2172 2183 Uncommitted files appear "clean" via this context, even if
2173 2184 they aren't actually so in the working directory.
2174 2185 """
2175 2186 if clean:
2176 2187 clean = [f for f in self._manifest if f not in self._changedset]
2177 2188 else:
2178 2189 clean = []
2179 2190 return scmutil.status([f for f in self._status.modified if match(f)],
2180 2191 [f for f in self._status.added if match(f)],
2181 2192 [f for f in self._status.removed if match(f)],
2182 2193 [], [], [], clean)
2183 2194
2184 2195 @propertycache
2185 2196 def _changedset(self):
2186 2197 """Return the set of files changed in this context
2187 2198 """
2188 2199 changed = set(self._status.modified)
2189 2200 changed.update(self._status.added)
2190 2201 changed.update(self._status.removed)
2191 2202 return changed
2192 2203
2193 2204 def makecachingfilectxfn(func):
2194 2205 """Create a filectxfn that caches based on the path.
2195 2206
2196 2207 We can't use util.cachefunc because it uses all arguments as the cache
2197 2208 key and this creates a cycle since the arguments include the repo and
2198 2209 memctx.
2199 2210 """
2200 2211 cache = {}
2201 2212
2202 2213 def getfilectx(repo, memctx, path):
2203 2214 if path not in cache:
2204 2215 cache[path] = func(repo, memctx, path)
2205 2216 return cache[path]
2206 2217
2207 2218 return getfilectx
2208 2219
2209 2220 def memfilefromctx(ctx):
2210 2221 """Given a context return a memfilectx for ctx[path]
2211 2222
2212 2223 This is a convenience method for building a memctx based on another
2213 2224 context.
2214 2225 """
2215 2226 def getfilectx(repo, memctx, path):
2216 2227 fctx = ctx[path]
2217 2228 # this is weird but apparently we only keep track of one parent
2218 2229 # (why not only store that instead of a tuple?)
2219 2230 copied = fctx.renamed()
2220 2231 if copied:
2221 2232 copied = copied[0]
2222 2233 return memfilectx(repo, path, fctx.data(),
2223 2234 islink=fctx.islink(), isexec=fctx.isexec(),
2224 2235 copied=copied, memctx=memctx)
2225 2236
2226 2237 return getfilectx
2227 2238
2228 2239 def memfilefrompatch(patchstore):
2229 2240 """Given a patch (e.g. patchstore object) return a memfilectx
2230 2241
2231 2242 This is a convenience method for building a memctx based on a patchstore.
2232 2243 """
2233 2244 def getfilectx(repo, memctx, path):
2234 2245 data, mode, copied = patchstore.getfile(path)
2235 2246 if data is None:
2236 2247 return None
2237 2248 islink, isexec = mode
2238 2249 return memfilectx(repo, path, data, islink=islink,
2239 2250 isexec=isexec, copied=copied,
2240 2251 memctx=memctx)
2241 2252
2242 2253 return getfilectx
2243 2254
2244 2255 class memctx(committablectx):
2245 2256 """Use memctx to perform in-memory commits via localrepo.commitctx().
2246 2257
2247 2258 Revision information is supplied at initialization time while
2248 2259 related files data and is made available through a callback
2249 2260 mechanism. 'repo' is the current localrepo, 'parents' is a
2250 2261 sequence of two parent revisions identifiers (pass None for every
2251 2262 missing parent), 'text' is the commit message and 'files' lists
2252 2263 names of files touched by the revision (normalized and relative to
2253 2264 repository root).
2254 2265
2255 2266 filectxfn(repo, memctx, path) is a callable receiving the
2256 2267 repository, the current memctx object and the normalized path of
2257 2268 requested file, relative to repository root. It is fired by the
2258 2269 commit function for every file in 'files', but calls order is
2259 2270 undefined. If the file is available in the revision being
2260 2271 committed (updated or added), filectxfn returns a memfilectx
2261 2272 object. If the file was removed, filectxfn return None for recent
2262 2273 Mercurial. Moved files are represented by marking the source file
2263 2274 removed and the new file added with copy information (see
2264 2275 memfilectx).
2265 2276
2266 2277 user receives the committer name and defaults to current
2267 2278 repository username, date is the commit date in any format
2268 2279 supported by util.parsedate() and defaults to current date, extra
2269 2280 is a dictionary of metadata or is left empty.
2270 2281 """
2271 2282
2272 2283 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2273 2284 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2274 2285 # this field to determine what to do in filectxfn.
2275 2286 _returnnoneformissingfiles = True
2276 2287
2277 2288 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2278 2289 date=None, extra=None, branch=None, editor=False):
2279 2290 super(memctx, self).__init__(repo, text, user, date, extra)
2280 2291 self._rev = None
2281 2292 self._node = None
2282 2293 parents = [(p or nullid) for p in parents]
2283 2294 p1, p2 = parents
2284 2295 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2285 2296 files = sorted(set(files))
2286 2297 self._files = files
2287 2298 if branch is not None:
2288 2299 self._extra['branch'] = encoding.fromlocal(branch)
2289 2300 self.substate = {}
2290 2301
2291 2302 if isinstance(filectxfn, patch.filestore):
2292 2303 filectxfn = memfilefrompatch(filectxfn)
2293 2304 elif not callable(filectxfn):
2294 2305 # if store is not callable, wrap it in a function
2295 2306 filectxfn = memfilefromctx(filectxfn)
2296 2307
2297 2308 # memoizing increases performance for e.g. vcs convert scenarios.
2298 2309 self._filectxfn = makecachingfilectxfn(filectxfn)
2299 2310
2300 2311 if editor:
2301 2312 self._text = editor(self._repo, self, [])
2302 2313 self._repo.savecommitmessage(self._text)
2303 2314
2304 2315 def filectx(self, path, filelog=None):
2305 2316 """get a file context from the working directory
2306 2317
2307 2318 Returns None if file doesn't exist and should be removed."""
2308 2319 return self._filectxfn(self._repo, self, path)
2309 2320
2310 2321 def commit(self):
2311 2322 """commit context to the repo"""
2312 2323 return self._repo.commitctx(self)
2313 2324
2314 2325 @propertycache
2315 2326 def _manifest(self):
2316 2327 """generate a manifest based on the return values of filectxfn"""
2317 2328
2318 2329 # keep this simple for now; just worry about p1
2319 2330 pctx = self._parents[0]
2320 2331 man = pctx.manifest().copy()
2321 2332
2322 2333 for f in self._status.modified:
2323 2334 p1node = nullid
2324 2335 p2node = nullid
2325 2336 p = pctx[f].parents() # if file isn't in pctx, check p2?
2326 2337 if len(p) > 0:
2327 2338 p1node = p[0].filenode()
2328 2339 if len(p) > 1:
2329 2340 p2node = p[1].filenode()
2330 2341 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2331 2342
2332 2343 for f in self._status.added:
2333 2344 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2334 2345
2335 2346 for f in self._status.removed:
2336 2347 if f in man:
2337 2348 del man[f]
2338 2349
2339 2350 return man
2340 2351
2341 2352 @propertycache
2342 2353 def _status(self):
2343 2354 """Calculate exact status from ``files`` specified at construction
2344 2355 """
2345 2356 man1 = self.p1().manifest()
2346 2357 p2 = self._parents[1]
2347 2358 # "1 < len(self._parents)" can't be used for checking
2348 2359 # existence of the 2nd parent, because "memctx._parents" is
2349 2360 # explicitly initialized by the list, of which length is 2.
2350 2361 if p2.node() != nullid:
2351 2362 man2 = p2.manifest()
2352 2363 managing = lambda f: f in man1 or f in man2
2353 2364 else:
2354 2365 managing = lambda f: f in man1
2355 2366
2356 2367 modified, added, removed = [], [], []
2357 2368 for f in self._files:
2358 2369 if not managing(f):
2359 2370 added.append(f)
2360 2371 elif self[f]:
2361 2372 modified.append(f)
2362 2373 else:
2363 2374 removed.append(f)
2364 2375
2365 2376 return scmutil.status(modified, added, removed, [], [], [], [])
2366 2377
2367 2378 class memfilectx(committablefilectx):
2368 2379 """memfilectx represents an in-memory file to commit.
2369 2380
2370 2381 See memctx and committablefilectx for more details.
2371 2382 """
2372 2383 def __init__(self, repo, path, data, islink=False,
2373 2384 isexec=False, copied=None, memctx=None):
2374 2385 """
2375 2386 path is the normalized file path relative to repository root.
2376 2387 data is the file content as a string.
2377 2388 islink is True if the file is a symbolic link.
2378 2389 isexec is True if the file is executable.
2379 2390 copied is the source file path if current file was copied in the
2380 2391 revision being committed, or None."""
2381 2392 super(memfilectx, self).__init__(repo, path, None, memctx)
2382 2393 self._data = data
2383 2394 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2384 2395 self._copied = None
2385 2396 if copied:
2386 2397 self._copied = (copied, nullid)
2387 2398
2388 2399 def data(self):
2389 2400 return self._data
2390 2401
2391 2402 def remove(self, ignoremissing=False):
2392 2403 """wraps unlink for a repo's working directory"""
2393 2404 # need to figure out what to do here
2394 2405 del self._changectx[self._path]
2395 2406
2396 2407 def write(self, data, flags):
2397 2408 """wraps repo.wwrite"""
2398 2409 self._data = data
2399 2410
2400 2411 class overlayfilectx(committablefilectx):
2401 2412 """Like memfilectx but take an original filectx and optional parameters to
2402 2413 override parts of it. This is useful when fctx.data() is expensive (i.e.
2403 2414 flag processor is expensive) and raw data, flags, and filenode could be
2404 2415 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2405 2416 """
2406 2417
2407 2418 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2408 2419 copied=None, ctx=None):
2409 2420 """originalfctx: filecontext to duplicate
2410 2421
2411 2422 datafunc: None or a function to override data (file content). It is a
2412 2423 function to be lazy. path, flags, copied, ctx: None or overridden value
2413 2424
2414 2425 copied could be (path, rev), or False. copied could also be just path,
2415 2426 and will be converted to (path, nullid). This simplifies some callers.
2416 2427 """
2417 2428
2418 2429 if path is None:
2419 2430 path = originalfctx.path()
2420 2431 if ctx is None:
2421 2432 ctx = originalfctx.changectx()
2422 2433 ctxmatch = lambda: True
2423 2434 else:
2424 2435 ctxmatch = lambda: ctx == originalfctx.changectx()
2425 2436
2426 2437 repo = originalfctx.repo()
2427 2438 flog = originalfctx.filelog()
2428 2439 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2429 2440
2430 2441 if copied is None:
2431 2442 copied = originalfctx.renamed()
2432 2443 copiedmatch = lambda: True
2433 2444 else:
2434 2445 if copied and not isinstance(copied, tuple):
2435 2446 # repo._filecommit will recalculate copyrev so nullid is okay
2436 2447 copied = (copied, nullid)
2437 2448 copiedmatch = lambda: copied == originalfctx.renamed()
2438 2449
2439 2450 # When data, copied (could affect data), ctx (could affect filelog
2440 2451 # parents) are not overridden, rawdata, rawflags, and filenode may be
2441 2452 # reused (repo._filecommit should double check filelog parents).
2442 2453 #
2443 2454 # path, flags are not hashed in filelog (but in manifestlog) so they do
2444 2455 # not affect reusable here.
2445 2456 #
2446 2457 # If ctx or copied is overridden to a same value with originalfctx,
2447 2458 # still consider it's reusable. originalfctx.renamed() may be a bit
2448 2459 # expensive so it's not called unless necessary. Assuming datafunc is
2449 2460 # always expensive, do not call it for this "reusable" test.
2450 2461 reusable = datafunc is None and ctxmatch() and copiedmatch()
2451 2462
2452 2463 if datafunc is None:
2453 2464 datafunc = originalfctx.data
2454 2465 if flags is None:
2455 2466 flags = originalfctx.flags()
2456 2467
2457 2468 self._datafunc = datafunc
2458 2469 self._flags = flags
2459 2470 self._copied = copied
2460 2471
2461 2472 if reusable:
2462 2473 # copy extra fields from originalfctx
2463 2474 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2464 2475 for attr_ in attrs:
2465 2476 if util.safehasattr(originalfctx, attr_):
2466 2477 setattr(self, attr_, getattr(originalfctx, attr_))
2467 2478
2468 2479 def data(self):
2469 2480 return self._datafunc()
2470 2481
2471 2482 class metadataonlyctx(committablectx):
2472 2483 """Like memctx but it's reusing the manifest of different commit.
2473 2484 Intended to be used by lightweight operations that are creating
2474 2485 metadata-only changes.
2475 2486
2476 2487 Revision information is supplied at initialization time. 'repo' is the
2477 2488 current localrepo, 'ctx' is original revision which manifest we're reuisng
2478 2489 'parents' is a sequence of two parent revisions identifiers (pass None for
2479 2490 every missing parent), 'text' is the commit.
2480 2491
2481 2492 user receives the committer name and defaults to current repository
2482 2493 username, date is the commit date in any format supported by
2483 2494 util.parsedate() and defaults to current date, extra is a dictionary of
2484 2495 metadata or is left empty.
2485 2496 """
2486 2497 def __new__(cls, repo, originalctx, *args, **kwargs):
2487 2498 return super(metadataonlyctx, cls).__new__(cls, repo)
2488 2499
2489 2500 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2490 2501 date=None, extra=None, editor=False):
2491 2502 if text is None:
2492 2503 text = originalctx.description()
2493 2504 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2494 2505 self._rev = None
2495 2506 self._node = None
2496 2507 self._originalctx = originalctx
2497 2508 self._manifestnode = originalctx.manifestnode()
2498 2509 if parents is None:
2499 2510 parents = originalctx.parents()
2500 2511 else:
2501 2512 parents = [repo[p] for p in parents if p is not None]
2502 2513 parents = parents[:]
2503 2514 while len(parents) < 2:
2504 2515 parents.append(repo[nullid])
2505 2516 p1, p2 = self._parents = parents
2506 2517
2507 2518 # sanity check to ensure that the reused manifest parents are
2508 2519 # manifests of our commit parents
2509 2520 mp1, mp2 = self.manifestctx().parents
2510 2521 if p1 != nullid and p1.manifestnode() != mp1:
2511 2522 raise RuntimeError('can\'t reuse the manifest: '
2512 2523 'its p1 doesn\'t match the new ctx p1')
2513 2524 if p2 != nullid and p2.manifestnode() != mp2:
2514 2525 raise RuntimeError('can\'t reuse the manifest: '
2515 2526 'its p2 doesn\'t match the new ctx p2')
2516 2527
2517 2528 self._files = originalctx.files()
2518 2529 self.substate = {}
2519 2530
2520 2531 if editor:
2521 2532 self._text = editor(self._repo, self, [])
2522 2533 self._repo.savecommitmessage(self._text)
2523 2534
2524 2535 def manifestnode(self):
2525 2536 return self._manifestnode
2526 2537
2527 2538 @property
2528 2539 def _manifestctx(self):
2529 2540 return self._repo.manifestlog[self._manifestnode]
2530 2541
2531 2542 def filectx(self, path, filelog=None):
2532 2543 return self._originalctx.filectx(path, filelog=filelog)
2533 2544
2534 2545 def commit(self):
2535 2546 """commit context to the repo"""
2536 2547 return self._repo.commitctx(self)
2537 2548
2538 2549 @property
2539 2550 def _manifest(self):
2540 2551 return self._originalctx.manifest()
2541 2552
2542 2553 @propertycache
2543 2554 def _status(self):
2544 2555 """Calculate exact status from ``files`` specified in the ``origctx``
2545 2556 and parents manifests.
2546 2557 """
2547 2558 man1 = self.p1().manifest()
2548 2559 p2 = self._parents[1]
2549 2560 # "1 < len(self._parents)" can't be used for checking
2550 2561 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2551 2562 # explicitly initialized by the list, of which length is 2.
2552 2563 if p2.node() != nullid:
2553 2564 man2 = p2.manifest()
2554 2565 managing = lambda f: f in man1 or f in man2
2555 2566 else:
2556 2567 managing = lambda f: f in man1
2557 2568
2558 2569 modified, added, removed = [], [], []
2559 2570 for f in self._files:
2560 2571 if not managing(f):
2561 2572 added.append(f)
2562 2573 elif f in self:
2563 2574 modified.append(f)
2564 2575 else:
2565 2576 removed.append(f)
2566 2577
2567 2578 return scmutil.status(modified, added, removed, [], [], [], [])
2568 2579
2569 2580 class arbitraryfilectx(object):
2570 2581 """Allows you to use filectx-like functions on a file in an arbitrary
2571 2582 location on disk, possibly not in the working directory.
2572 2583 """
2573 2584 def __init__(self, path, repo=None):
2574 2585 # Repo is optional because contrib/simplemerge uses this class.
2575 2586 self._repo = repo
2576 2587 self._path = path
2577 2588
2578 2589 def cmp(self, fctx):
2579 2590 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2580 2591 # path if either side is a symlink.
2581 2592 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2582 2593 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2583 2594 # Add a fast-path for merge if both sides are disk-backed.
2584 2595 # Note that filecmp uses the opposite return values (True if same)
2585 2596 # from our cmp functions (True if different).
2586 2597 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2587 2598 return self.data() != fctx.data()
2588 2599
2589 2600 def path(self):
2590 2601 return self._path
2591 2602
2592 2603 def flags(self):
2593 2604 return ''
2594 2605
2595 2606 def data(self):
2596 2607 return util.readfile(self._path)
2597 2608
2598 2609 def decodeddata(self):
2599 2610 with open(self._path, "rb") as f:
2600 2611 return f.read()
2601 2612
2602 2613 def remove(self):
2603 2614 util.unlink(self._path)
2604 2615
2605 2616 def write(self, data, flags):
2606 2617 assert not flags
2607 2618 with open(self._path, "w") as f:
2608 2619 f.write(data)
@@ -1,403 +1,421
1 1 # registrar.py - utilities to register function for specific purpose
2 2 #
3 3 # Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 configitems,
12 12 error,
13 13 pycompat,
14 14 util,
15 15 )
16 16
17 17 # unlike the other registered items, config options are neither functions or
18 18 # classes. Registering the option is just small function call.
19 19 #
20 20 # We still add the official API to the registrar module for consistency with
21 21 # the other items extensions want might to register.
22 22 configitem = configitems.getitemregister
23 23
24 24 class _funcregistrarbase(object):
25 25 """Base of decorator to register a function for specific purpose
26 26
27 27 This decorator stores decorated functions into own dict 'table'.
28 28
29 29 The least derived class can be defined by overriding 'formatdoc',
30 30 for example::
31 31
32 32 class keyword(_funcregistrarbase):
33 33 _docformat = ":%s: %s"
34 34
35 35 This should be used as below:
36 36
37 37 keyword = registrar.keyword()
38 38
39 39 @keyword('bar')
40 40 def barfunc(*args, **kwargs):
41 41 '''Explanation of bar keyword ....
42 42 '''
43 43 pass
44 44
45 45 In this case:
46 46
47 47 - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above
48 48 - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword"
49 49 """
50 50 def __init__(self, table=None):
51 51 if table is None:
52 52 self._table = {}
53 53 else:
54 54 self._table = table
55 55
56 56 def __call__(self, decl, *args, **kwargs):
57 57 return lambda func: self._doregister(func, decl, *args, **kwargs)
58 58
59 59 def _doregister(self, func, decl, *args, **kwargs):
60 60 name = self._getname(decl)
61 61
62 62 if name in self._table:
63 63 msg = 'duplicate registration for name: "%s"' % name
64 64 raise error.ProgrammingError(msg)
65 65
66 66 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
67 67 doc = pycompat.sysbytes(func.__doc__).strip()
68 68 func._origdoc = doc
69 69 func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc))
70 70
71 71 self._table[name] = func
72 72 self._extrasetup(name, func, *args, **kwargs)
73 73
74 74 return func
75 75
76 76 def _parsefuncdecl(self, decl):
77 77 """Parse function declaration and return the name of function in it
78 78 """
79 79 i = decl.find('(')
80 80 if i >= 0:
81 81 return decl[:i]
82 82 else:
83 83 return decl
84 84
85 85 def _getname(self, decl):
86 86 """Return the name of the registered function from decl
87 87
88 88 Derived class should override this, if it allows more
89 89 descriptive 'decl' string than just a name.
90 90 """
91 91 return decl
92 92
93 93 _docformat = None
94 94
95 95 def _formatdoc(self, decl, doc):
96 96 """Return formatted document of the registered function for help
97 97
98 98 'doc' is '__doc__.strip()' of the registered function.
99 99 """
100 100 return self._docformat % (decl, doc)
101 101
102 102 def _extrasetup(self, name, func):
103 103 """Execute exra setup for registered function, if needed
104 104 """
105 105
106 106 class command(_funcregistrarbase):
107 107 """Decorator to register a command function to table
108 108
109 109 This class receives a command table as its argument. The table should
110 110 be a dict.
111 111
112 112 The created object can be used as a decorator for adding commands to
113 113 that command table. This accepts multiple arguments to define a command.
114 114
115 The first argument is the command name.
115 The first argument is the command name (as bytes).
116 116
117 The options argument is an iterable of tuples defining command arguments.
118 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
117 The `options` keyword argument is an iterable of tuples defining command
118 arguments. See ``mercurial.fancyopts.fancyopts()`` for the format of each
119 tuple.
119 120
120 The synopsis argument defines a short, one line summary of how to use the
121 The `synopsis` argument defines a short, one line summary of how to use the
121 122 command. This shows up in the help output.
122 123
123 The norepo argument defines whether the command does not require a
124 There are three arguments that control what repository (if any) is found
125 and passed to the decorated function: `norepo`, `optionalrepo`, and
126 `inferrepo`.
127
128 The `norepo` argument defines whether the command does not require a
124 129 local repository. Most commands operate against a repository, thus the
125 default is False.
130 default is False. When True, no repository will be passed.
126 131
127 The optionalrepo argument defines whether the command optionally requires
128 a local repository.
132 The `optionalrepo` argument defines whether the command optionally requires
133 a local repository. If no repository can be found, None will be passed
134 to the decorated function.
129 135
130 The inferrepo argument defines whether to try to find a repository from the
131 command line arguments. If True, arguments will be examined for potential
132 repository locations. See ``findrepo()``. If a repository is found, it
133 will be used.
136 The `inferrepo` argument defines whether to try to find a repository from
137 the command line arguments. If True, arguments will be examined for
138 potential repository locations. See ``findrepo()``. If a repository is
139 found, it will be used and passed to the decorated function.
134 140
135 141 There are three constants in the class which tells what type of the command
136 142 that is. That information will be helpful at various places. It will be also
137 143 be used to decide what level of access the command has on hidden commits.
138 144 The constants are:
139 145
140 unrecoverablewrite is for those write commands which can't be recovered like
141 push.
142 recoverablewrite is for write commands which can be recovered like commit.
143 readonly is for commands which are read only.
146 `unrecoverablewrite` is for those write commands which can't be recovered
147 like push.
148 `recoverablewrite` is for write commands which can be recovered like commit.
149 `readonly` is for commands which are read only.
150
151 The signature of the decorated function looks like this:
152 def cmd(ui[, repo] [, <args>] [, <options>])
153
154 `repo` is required if `norepo` is False.
155 `<args>` are positional args (or `*args`) arguments, of non-option
156 arguments from the command line.
157 `<options>` are keyword arguments (or `**options`) of option arguments
158 from the command line.
159
160 See the WritingExtensions and MercurialApi documentation for more exhaustive
161 descriptions and examples.
144 162 """
145 163
146 164 unrecoverablewrite = "unrecoverable"
147 165 recoverablewrite = "recoverable"
148 166 readonly = "readonly"
149 167
150 168 possiblecmdtypes = {unrecoverablewrite, recoverablewrite, readonly}
151 169
152 170 def _doregister(self, func, name, options=(), synopsis=None,
153 171 norepo=False, optionalrepo=False, inferrepo=False,
154 172 cmdtype=unrecoverablewrite):
155 173
156 174 if cmdtype not in self.possiblecmdtypes:
157 175 raise error.ProgrammingError("unknown cmdtype value '%s' for "
158 176 "'%s' command" % (cmdtype, name))
159 177 func.norepo = norepo
160 178 func.optionalrepo = optionalrepo
161 179 func.inferrepo = inferrepo
162 180 func.cmdtype = cmdtype
163 181 if synopsis:
164 182 self._table[name] = func, list(options), synopsis
165 183 else:
166 184 self._table[name] = func, list(options)
167 185 return func
168 186
169 187 class revsetpredicate(_funcregistrarbase):
170 188 """Decorator to register revset predicate
171 189
172 190 Usage::
173 191
174 192 revsetpredicate = registrar.revsetpredicate()
175 193
176 194 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
177 195 def mypredicatefunc(repo, subset, x):
178 196 '''Explanation of this revset predicate ....
179 197 '''
180 198 pass
181 199
182 200 The first string argument is used also in online help.
183 201
184 202 Optional argument 'safe' indicates whether a predicate is safe for
185 203 DoS attack (False by default).
186 204
187 205 Optional argument 'takeorder' indicates whether a predicate function
188 206 takes ordering policy as the last argument.
189 207
190 208 Optional argument 'weight' indicates the estimated run-time cost, useful
191 209 for static optimization, default is 1. Higher weight means more expensive.
192 210 Usually, revsets that are fast and return only one revision has a weight of
193 211 0.5 (ex. a symbol); revsets with O(changelog) complexity and read only the
194 212 changelog have weight 10 (ex. author); revsets reading manifest deltas have
195 213 weight 30 (ex. adds); revset reading manifest contents have weight 100
196 214 (ex. contains). Note: those values are flexible. If the revset has a
197 215 same big-O time complexity as 'contains', but with a smaller constant, it
198 216 might have a weight of 90.
199 217
200 218 'revsetpredicate' instance in example above can be used to
201 219 decorate multiple functions.
202 220
203 221 Decorated functions are registered automatically at loading
204 222 extension, if an instance named as 'revsetpredicate' is used for
205 223 decorating in extension.
206 224
207 225 Otherwise, explicit 'revset.loadpredicate()' is needed.
208 226 """
209 227 _getname = _funcregistrarbase._parsefuncdecl
210 228 _docformat = "``%s``\n %s"
211 229
212 230 def _extrasetup(self, name, func, safe=False, takeorder=False, weight=1):
213 231 func._safe = safe
214 232 func._takeorder = takeorder
215 233 func._weight = weight
216 234
217 235 class filesetpredicate(_funcregistrarbase):
218 236 """Decorator to register fileset predicate
219 237
220 238 Usage::
221 239
222 240 filesetpredicate = registrar.filesetpredicate()
223 241
224 242 @filesetpredicate('mypredicate()')
225 243 def mypredicatefunc(mctx, x):
226 244 '''Explanation of this fileset predicate ....
227 245 '''
228 246 pass
229 247
230 248 The first string argument is used also in online help.
231 249
232 250 Optional argument 'callstatus' indicates whether a predicate
233 251 implies 'matchctx.status()' at runtime or not (False, by
234 252 default).
235 253
236 254 Optional argument 'callexisting' indicates whether a predicate
237 255 implies 'matchctx.existing()' at runtime or not (False, by
238 256 default).
239 257
240 258 'filesetpredicate' instance in example above can be used to
241 259 decorate multiple functions.
242 260
243 261 Decorated functions are registered automatically at loading
244 262 extension, if an instance named as 'filesetpredicate' is used for
245 263 decorating in extension.
246 264
247 265 Otherwise, explicit 'fileset.loadpredicate()' is needed.
248 266 """
249 267 _getname = _funcregistrarbase._parsefuncdecl
250 268 _docformat = "``%s``\n %s"
251 269
252 270 def _extrasetup(self, name, func, callstatus=False, callexisting=False):
253 271 func._callstatus = callstatus
254 272 func._callexisting = callexisting
255 273
256 274 class _templateregistrarbase(_funcregistrarbase):
257 275 """Base of decorator to register functions as template specific one
258 276 """
259 277 _docformat = ":%s: %s"
260 278
261 279 class templatekeyword(_templateregistrarbase):
262 280 """Decorator to register template keyword
263 281
264 282 Usage::
265 283
266 284 templatekeyword = registrar.templatekeyword()
267 285
268 286 @templatekeyword('mykeyword')
269 287 def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
270 288 '''Explanation of this template keyword ....
271 289 '''
272 290 pass
273 291
274 292 The first string argument is used also in online help.
275 293
276 294 'templatekeyword' instance in example above can be used to
277 295 decorate multiple functions.
278 296
279 297 Decorated functions are registered automatically at loading
280 298 extension, if an instance named as 'templatekeyword' is used for
281 299 decorating in extension.
282 300
283 301 Otherwise, explicit 'templatekw.loadkeyword()' is needed.
284 302 """
285 303
286 304 class templatefilter(_templateregistrarbase):
287 305 """Decorator to register template filer
288 306
289 307 Usage::
290 308
291 309 templatefilter = registrar.templatefilter()
292 310
293 311 @templatefilter('myfilter')
294 312 def myfilterfunc(text):
295 313 '''Explanation of this template filter ....
296 314 '''
297 315 pass
298 316
299 317 The first string argument is used also in online help.
300 318
301 319 'templatefilter' instance in example above can be used to
302 320 decorate multiple functions.
303 321
304 322 Decorated functions are registered automatically at loading
305 323 extension, if an instance named as 'templatefilter' is used for
306 324 decorating in extension.
307 325
308 326 Otherwise, explicit 'templatefilters.loadkeyword()' is needed.
309 327 """
310 328
311 329 class templatefunc(_templateregistrarbase):
312 330 """Decorator to register template function
313 331
314 332 Usage::
315 333
316 334 templatefunc = registrar.templatefunc()
317 335
318 336 @templatefunc('myfunc(arg1, arg2[, arg3])', argspec='arg1 arg2 arg3')
319 337 def myfuncfunc(context, mapping, args):
320 338 '''Explanation of this template function ....
321 339 '''
322 340 pass
323 341
324 342 The first string argument is used also in online help.
325 343
326 344 If optional 'argspec' is defined, the function will receive 'args' as
327 345 a dict of named arguments. Otherwise 'args' is a list of positional
328 346 arguments.
329 347
330 348 'templatefunc' instance in example above can be used to
331 349 decorate multiple functions.
332 350
333 351 Decorated functions are registered automatically at loading
334 352 extension, if an instance named as 'templatefunc' is used for
335 353 decorating in extension.
336 354
337 355 Otherwise, explicit 'templater.loadfunction()' is needed.
338 356 """
339 357 _getname = _funcregistrarbase._parsefuncdecl
340 358
341 359 def _extrasetup(self, name, func, argspec=None):
342 360 func._argspec = argspec
343 361
344 362 class internalmerge(_funcregistrarbase):
345 363 """Decorator to register in-process merge tool
346 364
347 365 Usage::
348 366
349 367 internalmerge = registrar.internalmerge()
350 368
351 369 @internalmerge('mymerge', internalmerge.mergeonly,
352 370 onfailure=None, precheck=None):
353 371 def mymergefunc(repo, mynode, orig, fcd, fco, fca,
354 372 toolconf, files, labels=None):
355 373 '''Explanation of this internal merge tool ....
356 374 '''
357 375 return 1, False # means "conflicted", "no deletion needed"
358 376
359 377 The first string argument is used to compose actual merge tool name,
360 378 ":name" and "internal:name" (the latter is historical one).
361 379
362 380 The second argument is one of merge types below:
363 381
364 382 ========== ======== ======== =========
365 383 merge type precheck premerge fullmerge
366 384 ========== ======== ======== =========
367 385 nomerge x x x
368 386 mergeonly o x o
369 387 fullmerge o o o
370 388 ========== ======== ======== =========
371 389
372 390 Optional argument 'onfailure' is the format of warning message
373 391 to be used at failure of merging (target filename is specified
374 392 at formatting). Or, None or so, if warning message should be
375 393 suppressed.
376 394
377 395 Optional argument 'precheck' is the function to be used
378 396 before actual invocation of internal merge tool itself.
379 397 It takes as same arguments as internal merge tool does, other than
380 398 'files' and 'labels'. If it returns false value, merging is aborted
381 399 immediately (and file is marked as "unresolved").
382 400
383 401 'internalmerge' instance in example above can be used to
384 402 decorate multiple functions.
385 403
386 404 Decorated functions are registered automatically at loading
387 405 extension, if an instance named as 'internalmerge' is used for
388 406 decorating in extension.
389 407
390 408 Otherwise, explicit 'filemerge.loadinternalmerge()' is needed.
391 409 """
392 410 _docformat = "``:%s``\n %s"
393 411
394 412 # merge type definitions:
395 413 nomerge = None
396 414 mergeonly = 'mergeonly' # just the full merge, no premerge
397 415 fullmerge = 'fullmerge' # both premerge and merge
398 416
399 417 def _extrasetup(self, name, func, mergetype,
400 418 onfailure=None, precheck=None):
401 419 func.mergetype = mergetype
402 420 func.onfailure = onfailure
403 421 func.precheck = precheck
General Comments 0
You need to be logged in to leave comments. Login now