##// END OF EJS Templates
refactor: prefer lookup by revision, even for null...
Joerg Sonnenberger -
r47600:ad878e3f default
parent child Browse files
Show More
@@ -1,3925 +1,3926 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 nullrev,
19 20 short,
20 21 )
21 22 from .pycompat import (
22 23 getattr,
23 24 open,
24 25 setattr,
25 26 )
26 27 from .thirdparty import attr
27 28
28 29 from . import (
29 30 bookmarks,
30 31 changelog,
31 32 copies,
32 33 crecord as crecordmod,
33 34 dirstateguard,
34 35 encoding,
35 36 error,
36 37 formatter,
37 38 logcmdutil,
38 39 match as matchmod,
39 40 merge as mergemod,
40 41 mergestate as mergestatemod,
41 42 mergeutil,
42 43 obsolete,
43 44 patch,
44 45 pathutil,
45 46 phases,
46 47 pycompat,
47 48 repair,
48 49 revlog,
49 50 rewriteutil,
50 51 scmutil,
51 52 state as statemod,
52 53 subrepoutil,
53 54 templatekw,
54 55 templater,
55 56 util,
56 57 vfs as vfsmod,
57 58 )
58 59
59 60 from .utils import (
60 61 dateutil,
61 62 stringutil,
62 63 )
63 64
64 65 if pycompat.TYPE_CHECKING:
65 66 from typing import (
66 67 Any,
67 68 Dict,
68 69 )
69 70
70 71 for t in (Any, Dict):
71 72 assert t
72 73
73 74 stringio = util.stringio
74 75
75 76 # templates of common command options
76 77
77 78 dryrunopts = [
78 79 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
79 80 ]
80 81
81 82 confirmopts = [
82 83 (b'', b'confirm', None, _(b'ask before applying actions')),
83 84 ]
84 85
85 86 remoteopts = [
86 87 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
87 88 (
88 89 b'',
89 90 b'remotecmd',
90 91 b'',
91 92 _(b'specify hg command to run on the remote side'),
92 93 _(b'CMD'),
93 94 ),
94 95 (
95 96 b'',
96 97 b'insecure',
97 98 None,
98 99 _(b'do not verify server certificate (ignoring web.cacerts config)'),
99 100 ),
100 101 ]
101 102
102 103 walkopts = [
103 104 (
104 105 b'I',
105 106 b'include',
106 107 [],
107 108 _(b'include names matching the given patterns'),
108 109 _(b'PATTERN'),
109 110 ),
110 111 (
111 112 b'X',
112 113 b'exclude',
113 114 [],
114 115 _(b'exclude names matching the given patterns'),
115 116 _(b'PATTERN'),
116 117 ),
117 118 ]
118 119
119 120 commitopts = [
120 121 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
121 122 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
122 123 ]
123 124
124 125 commitopts2 = [
125 126 (
126 127 b'd',
127 128 b'date',
128 129 b'',
129 130 _(b'record the specified date as commit date'),
130 131 _(b'DATE'),
131 132 ),
132 133 (
133 134 b'u',
134 135 b'user',
135 136 b'',
136 137 _(b'record the specified user as committer'),
137 138 _(b'USER'),
138 139 ),
139 140 ]
140 141
141 142 commitopts3 = [
142 143 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
143 144 (b'U', b'currentuser', None, _(b'record the current user as committer')),
144 145 ]
145 146
146 147 formatteropts = [
147 148 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
148 149 ]
149 150
150 151 templateopts = [
151 152 (
152 153 b'',
153 154 b'style',
154 155 b'',
155 156 _(b'display using template map file (DEPRECATED)'),
156 157 _(b'STYLE'),
157 158 ),
158 159 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
159 160 ]
160 161
161 162 logopts = [
162 163 (b'p', b'patch', None, _(b'show patch')),
163 164 (b'g', b'git', None, _(b'use git extended diff format')),
164 165 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
165 166 (b'M', b'no-merges', None, _(b'do not show merges')),
166 167 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
167 168 (b'G', b'graph', None, _(b"show the revision DAG")),
168 169 ] + templateopts
169 170
170 171 diffopts = [
171 172 (b'a', b'text', None, _(b'treat all files as text')),
172 173 (
173 174 b'g',
174 175 b'git',
175 176 None,
176 177 _(b'use git extended diff format (DEFAULT: diff.git)'),
177 178 ),
178 179 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
179 180 (b'', b'nodates', None, _(b'omit dates from diff headers')),
180 181 ]
181 182
182 183 diffwsopts = [
183 184 (
184 185 b'w',
185 186 b'ignore-all-space',
186 187 None,
187 188 _(b'ignore white space when comparing lines'),
188 189 ),
189 190 (
190 191 b'b',
191 192 b'ignore-space-change',
192 193 None,
193 194 _(b'ignore changes in the amount of white space'),
194 195 ),
195 196 (
196 197 b'B',
197 198 b'ignore-blank-lines',
198 199 None,
199 200 _(b'ignore changes whose lines are all blank'),
200 201 ),
201 202 (
202 203 b'Z',
203 204 b'ignore-space-at-eol',
204 205 None,
205 206 _(b'ignore changes in whitespace at EOL'),
206 207 ),
207 208 ]
208 209
209 210 diffopts2 = (
210 211 [
211 212 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
212 213 (
213 214 b'p',
214 215 b'show-function',
215 216 None,
216 217 _(
217 218 b'show which function each change is in (DEFAULT: diff.showfunc)'
218 219 ),
219 220 ),
220 221 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
221 222 ]
222 223 + diffwsopts
223 224 + [
224 225 (
225 226 b'U',
226 227 b'unified',
227 228 b'',
228 229 _(b'number of lines of context to show'),
229 230 _(b'NUM'),
230 231 ),
231 232 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
232 233 (
233 234 b'',
234 235 b'root',
235 236 b'',
236 237 _(b'produce diffs relative to subdirectory'),
237 238 _(b'DIR'),
238 239 ),
239 240 ]
240 241 )
241 242
242 243 mergetoolopts = [
243 244 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
244 245 ]
245 246
246 247 similarityopts = [
247 248 (
248 249 b's',
249 250 b'similarity',
250 251 b'',
251 252 _(b'guess renamed files by similarity (0<=s<=100)'),
252 253 _(b'SIMILARITY'),
253 254 )
254 255 ]
255 256
256 257 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
257 258
258 259 debugrevlogopts = [
259 260 (b'c', b'changelog', False, _(b'open changelog')),
260 261 (b'm', b'manifest', False, _(b'open manifest')),
261 262 (b'', b'dir', b'', _(b'open directory manifest')),
262 263 ]
263 264
264 265 # special string such that everything below this line will be ingored in the
265 266 # editor text
266 267 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
267 268
268 269
269 270 def check_at_most_one_arg(opts, *args):
270 271 """abort if more than one of the arguments are in opts
271 272
272 273 Returns the unique argument or None if none of them were specified.
273 274 """
274 275
275 276 def to_display(name):
276 277 return pycompat.sysbytes(name).replace(b'_', b'-')
277 278
278 279 previous = None
279 280 for x in args:
280 281 if opts.get(x):
281 282 if previous:
282 283 raise error.InputError(
283 284 _(b'cannot specify both --%s and --%s')
284 285 % (to_display(previous), to_display(x))
285 286 )
286 287 previous = x
287 288 return previous
288 289
289 290
290 291 def check_incompatible_arguments(opts, first, others):
291 292 """abort if the first argument is given along with any of the others
292 293
293 294 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
294 295 among themselves, and they're passed as a single collection.
295 296 """
296 297 for other in others:
297 298 check_at_most_one_arg(opts, first, other)
298 299
299 300
300 301 def resolvecommitoptions(ui, opts):
301 302 """modify commit options dict to handle related options
302 303
303 304 The return value indicates that ``rewrite.update-timestamp`` is the reason
304 305 the ``date`` option is set.
305 306 """
306 307 check_at_most_one_arg(opts, b'date', b'currentdate')
307 308 check_at_most_one_arg(opts, b'user', b'currentuser')
308 309
309 310 datemaydiffer = False # date-only change should be ignored?
310 311
311 312 if opts.get(b'currentdate'):
312 313 opts[b'date'] = b'%d %d' % dateutil.makedate()
313 314 elif (
314 315 not opts.get(b'date')
315 316 and ui.configbool(b'rewrite', b'update-timestamp')
316 317 and opts.get(b'currentdate') is None
317 318 ):
318 319 opts[b'date'] = b'%d %d' % dateutil.makedate()
319 320 datemaydiffer = True
320 321
321 322 if opts.get(b'currentuser'):
322 323 opts[b'user'] = ui.username()
323 324
324 325 return datemaydiffer
325 326
326 327
327 328 def checknotesize(ui, opts):
328 329 """ make sure note is of valid format """
329 330
330 331 note = opts.get(b'note')
331 332 if not note:
332 333 return
333 334
334 335 if len(note) > 255:
335 336 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
336 337 if b'\n' in note:
337 338 raise error.InputError(_(b"note cannot contain a newline"))
338 339
339 340
340 341 def ishunk(x):
341 342 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
342 343 return isinstance(x, hunkclasses)
343 344
344 345
345 346 def newandmodified(chunks, originalchunks):
346 347 newlyaddedandmodifiedfiles = set()
347 348 alsorestore = set()
348 349 for chunk in chunks:
349 350 if (
350 351 ishunk(chunk)
351 352 and chunk.header.isnewfile()
352 353 and chunk not in originalchunks
353 354 ):
354 355 newlyaddedandmodifiedfiles.add(chunk.header.filename())
355 356 alsorestore.update(
356 357 set(chunk.header.files()) - {chunk.header.filename()}
357 358 )
358 359 return newlyaddedandmodifiedfiles, alsorestore
359 360
360 361
361 362 def parsealiases(cmd):
362 363 base_aliases = cmd.split(b"|")
363 364 all_aliases = set(base_aliases)
364 365 extra_aliases = []
365 366 for alias in base_aliases:
366 367 if b'-' in alias:
367 368 folded_alias = alias.replace(b'-', b'')
368 369 if folded_alias not in all_aliases:
369 370 all_aliases.add(folded_alias)
370 371 extra_aliases.append(folded_alias)
371 372 base_aliases.extend(extra_aliases)
372 373 return base_aliases
373 374
374 375
375 376 def setupwrapcolorwrite(ui):
376 377 # wrap ui.write so diff output can be labeled/colorized
377 378 def wrapwrite(orig, *args, **kw):
378 379 label = kw.pop('label', b'')
379 380 for chunk, l in patch.difflabel(lambda: args):
380 381 orig(chunk, label=label + l)
381 382
382 383 oldwrite = ui.write
383 384
384 385 def wrap(*args, **kwargs):
385 386 return wrapwrite(oldwrite, *args, **kwargs)
386 387
387 388 setattr(ui, 'write', wrap)
388 389 return oldwrite
389 390
390 391
391 392 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
392 393 try:
393 394 if usecurses:
394 395 if testfile:
395 396 recordfn = crecordmod.testdecorator(
396 397 testfile, crecordmod.testchunkselector
397 398 )
398 399 else:
399 400 recordfn = crecordmod.chunkselector
400 401
401 402 return crecordmod.filterpatch(
402 403 ui, originalhunks, recordfn, operation
403 404 )
404 405 except crecordmod.fallbackerror as e:
405 406 ui.warn(b'%s\n' % e)
406 407 ui.warn(_(b'falling back to text mode\n'))
407 408
408 409 return patch.filterpatch(ui, originalhunks, match, operation)
409 410
410 411
411 412 def recordfilter(ui, originalhunks, match, operation=None):
412 413 """Prompts the user to filter the originalhunks and return a list of
413 414 selected hunks.
414 415 *operation* is used for to build ui messages to indicate the user what
415 416 kind of filtering they are doing: reverting, committing, shelving, etc.
416 417 (see patch.filterpatch).
417 418 """
418 419 usecurses = crecordmod.checkcurses(ui)
419 420 testfile = ui.config(b'experimental', b'crecordtest')
420 421 oldwrite = setupwrapcolorwrite(ui)
421 422 try:
422 423 newchunks, newopts = filterchunks(
423 424 ui, originalhunks, usecurses, testfile, match, operation
424 425 )
425 426 finally:
426 427 ui.write = oldwrite
427 428 return newchunks, newopts
428 429
429 430
430 431 def dorecord(
431 432 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
432 433 ):
433 434 opts = pycompat.byteskwargs(opts)
434 435 if not ui.interactive():
435 436 if cmdsuggest:
436 437 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
437 438 else:
438 439 msg = _(b'running non-interactively')
439 440 raise error.InputError(msg)
440 441
441 442 # make sure username is set before going interactive
442 443 if not opts.get(b'user'):
443 444 ui.username() # raise exception, username not provided
444 445
445 446 def recordfunc(ui, repo, message, match, opts):
446 447 """This is generic record driver.
447 448
448 449 Its job is to interactively filter local changes, and
449 450 accordingly prepare working directory into a state in which the
450 451 job can be delegated to a non-interactive commit command such as
451 452 'commit' or 'qrefresh'.
452 453
453 454 After the actual job is done by non-interactive command, the
454 455 working directory is restored to its original state.
455 456
456 457 In the end we'll record interesting changes, and everything else
457 458 will be left in place, so the user can continue working.
458 459 """
459 460 if not opts.get(b'interactive-unshelve'):
460 461 checkunfinished(repo, commit=True)
461 462 wctx = repo[None]
462 463 merge = len(wctx.parents()) > 1
463 464 if merge:
464 465 raise error.InputError(
465 466 _(
466 467 b'cannot partially commit a merge '
467 468 b'(use "hg commit" instead)'
468 469 )
469 470 )
470 471
471 472 def fail(f, msg):
472 473 raise error.InputError(b'%s: %s' % (f, msg))
473 474
474 475 force = opts.get(b'force')
475 476 if not force:
476 477 match = matchmod.badmatch(match, fail)
477 478
478 479 status = repo.status(match=match)
479 480
480 481 overrides = {(b'ui', b'commitsubrepos'): True}
481 482
482 483 with repo.ui.configoverride(overrides, b'record'):
483 484 # subrepoutil.precommit() modifies the status
484 485 tmpstatus = scmutil.status(
485 486 copymod.copy(status.modified),
486 487 copymod.copy(status.added),
487 488 copymod.copy(status.removed),
488 489 copymod.copy(status.deleted),
489 490 copymod.copy(status.unknown),
490 491 copymod.copy(status.ignored),
491 492 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
492 493 )
493 494
494 495 # Force allows -X subrepo to skip the subrepo.
495 496 subs, commitsubs, newstate = subrepoutil.precommit(
496 497 repo.ui, wctx, tmpstatus, match, force=True
497 498 )
498 499 for s in subs:
499 500 if s in commitsubs:
500 501 dirtyreason = wctx.sub(s).dirtyreason(True)
501 502 raise error.Abort(dirtyreason)
502 503
503 504 if not force:
504 505 repo.checkcommitpatterns(wctx, match, status, fail)
505 506 diffopts = patch.difffeatureopts(
506 507 ui,
507 508 opts=opts,
508 509 whitespace=True,
509 510 section=b'commands',
510 511 configprefix=b'commit.interactive.',
511 512 )
512 513 diffopts.nodates = True
513 514 diffopts.git = True
514 515 diffopts.showfunc = True
515 516 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
516 517 originalchunks = patch.parsepatch(originaldiff)
517 518 match = scmutil.match(repo[None], pats)
518 519
519 520 # 1. filter patch, since we are intending to apply subset of it
520 521 try:
521 522 chunks, newopts = filterfn(ui, originalchunks, match)
522 523 except error.PatchError as err:
523 524 raise error.InputError(_(b'error parsing patch: %s') % err)
524 525 opts.update(newopts)
525 526
526 527 # We need to keep a backup of files that have been newly added and
527 528 # modified during the recording process because there is a previous
528 529 # version without the edit in the workdir. We also will need to restore
529 530 # files that were the sources of renames so that the patch application
530 531 # works.
531 532 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
532 533 chunks, originalchunks
533 534 )
534 535 contenders = set()
535 536 for h in chunks:
536 537 try:
537 538 contenders.update(set(h.files()))
538 539 except AttributeError:
539 540 pass
540 541
541 542 changed = status.modified + status.added + status.removed
542 543 newfiles = [f for f in changed if f in contenders]
543 544 if not newfiles:
544 545 ui.status(_(b'no changes to record\n'))
545 546 return 0
546 547
547 548 modified = set(status.modified)
548 549
549 550 # 2. backup changed files, so we can restore them in the end
550 551
551 552 if backupall:
552 553 tobackup = changed
553 554 else:
554 555 tobackup = [
555 556 f
556 557 for f in newfiles
557 558 if f in modified or f in newlyaddedandmodifiedfiles
558 559 ]
559 560 backups = {}
560 561 if tobackup:
561 562 backupdir = repo.vfs.join(b'record-backups')
562 563 try:
563 564 os.mkdir(backupdir)
564 565 except OSError as err:
565 566 if err.errno != errno.EEXIST:
566 567 raise
567 568 try:
568 569 # backup continues
569 570 for f in tobackup:
570 571 fd, tmpname = pycompat.mkstemp(
571 572 prefix=os.path.basename(f) + b'.', dir=backupdir
572 573 )
573 574 os.close(fd)
574 575 ui.debug(b'backup %r as %r\n' % (f, tmpname))
575 576 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
576 577 backups[f] = tmpname
577 578
578 579 fp = stringio()
579 580 for c in chunks:
580 581 fname = c.filename()
581 582 if fname in backups:
582 583 c.write(fp)
583 584 dopatch = fp.tell()
584 585 fp.seek(0)
585 586
586 587 # 2.5 optionally review / modify patch in text editor
587 588 if opts.get(b'review', False):
588 589 patchtext = (
589 590 crecordmod.diffhelptext
590 591 + crecordmod.patchhelptext
591 592 + fp.read()
592 593 )
593 594 reviewedpatch = ui.edit(
594 595 patchtext, b"", action=b"diff", repopath=repo.path
595 596 )
596 597 fp.truncate(0)
597 598 fp.write(reviewedpatch)
598 599 fp.seek(0)
599 600
600 601 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
601 602 # 3a. apply filtered patch to clean repo (clean)
602 603 if backups:
603 604 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
604 605 mergemod.revert_to(repo[b'.'], matcher=m)
605 606
606 607 # 3b. (apply)
607 608 if dopatch:
608 609 try:
609 610 ui.debug(b'applying patch\n')
610 611 ui.debug(fp.getvalue())
611 612 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
612 613 except error.PatchError as err:
613 614 raise error.InputError(pycompat.bytestr(err))
614 615 del fp
615 616
616 617 # 4. We prepared working directory according to filtered
617 618 # patch. Now is the time to delegate the job to
618 619 # commit/qrefresh or the like!
619 620
620 621 # Make all of the pathnames absolute.
621 622 newfiles = [repo.wjoin(nf) for nf in newfiles]
622 623 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
623 624 finally:
624 625 # 5. finally restore backed-up files
625 626 try:
626 627 dirstate = repo.dirstate
627 628 for realname, tmpname in pycompat.iteritems(backups):
628 629 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
629 630
630 631 if dirstate[realname] == b'n':
631 632 # without normallookup, restoring timestamp
632 633 # may cause partially committed files
633 634 # to be treated as unmodified
634 635 dirstate.normallookup(realname)
635 636
636 637 # copystat=True here and above are a hack to trick any
637 638 # editors that have f open that we haven't modified them.
638 639 #
639 640 # Also note that this racy as an editor could notice the
640 641 # file's mtime before we've finished writing it.
641 642 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
642 643 os.unlink(tmpname)
643 644 if tobackup:
644 645 os.rmdir(backupdir)
645 646 except OSError:
646 647 pass
647 648
648 649 def recordinwlock(ui, repo, message, match, opts):
649 650 with repo.wlock():
650 651 return recordfunc(ui, repo, message, match, opts)
651 652
652 653 return commit(ui, repo, recordinwlock, pats, opts)
653 654
654 655
655 656 class dirnode(object):
656 657 """
657 658 Represent a directory in user working copy with information required for
658 659 the purpose of tersing its status.
659 660
660 661 path is the path to the directory, without a trailing '/'
661 662
662 663 statuses is a set of statuses of all files in this directory (this includes
663 664 all the files in all the subdirectories too)
664 665
665 666 files is a list of files which are direct child of this directory
666 667
667 668 subdirs is a dictionary of sub-directory name as the key and it's own
668 669 dirnode object as the value
669 670 """
670 671
671 672 def __init__(self, dirpath):
672 673 self.path = dirpath
673 674 self.statuses = set()
674 675 self.files = []
675 676 self.subdirs = {}
676 677
677 678 def _addfileindir(self, filename, status):
678 679 """Add a file in this directory as a direct child."""
679 680 self.files.append((filename, status))
680 681
681 682 def addfile(self, filename, status):
682 683 """
683 684 Add a file to this directory or to its direct parent directory.
684 685
685 686 If the file is not direct child of this directory, we traverse to the
686 687 directory of which this file is a direct child of and add the file
687 688 there.
688 689 """
689 690
690 691 # the filename contains a path separator, it means it's not the direct
691 692 # child of this directory
692 693 if b'/' in filename:
693 694 subdir, filep = filename.split(b'/', 1)
694 695
695 696 # does the dirnode object for subdir exists
696 697 if subdir not in self.subdirs:
697 698 subdirpath = pathutil.join(self.path, subdir)
698 699 self.subdirs[subdir] = dirnode(subdirpath)
699 700
700 701 # try adding the file in subdir
701 702 self.subdirs[subdir].addfile(filep, status)
702 703
703 704 else:
704 705 self._addfileindir(filename, status)
705 706
706 707 if status not in self.statuses:
707 708 self.statuses.add(status)
708 709
709 710 def iterfilepaths(self):
710 711 """Yield (status, path) for files directly under this directory."""
711 712 for f, st in self.files:
712 713 yield st, pathutil.join(self.path, f)
713 714
714 715 def tersewalk(self, terseargs):
715 716 """
716 717 Yield (status, path) obtained by processing the status of this
717 718 dirnode.
718 719
719 720 terseargs is the string of arguments passed by the user with `--terse`
720 721 flag.
721 722
722 723 Following are the cases which can happen:
723 724
724 725 1) All the files in the directory (including all the files in its
725 726 subdirectories) share the same status and the user has asked us to terse
726 727 that status. -> yield (status, dirpath). dirpath will end in '/'.
727 728
728 729 2) Otherwise, we do following:
729 730
730 731 a) Yield (status, filepath) for all the files which are in this
731 732 directory (only the ones in this directory, not the subdirs)
732 733
733 734 b) Recurse the function on all the subdirectories of this
734 735 directory
735 736 """
736 737
737 738 if len(self.statuses) == 1:
738 739 onlyst = self.statuses.pop()
739 740
740 741 # Making sure we terse only when the status abbreviation is
741 742 # passed as terse argument
742 743 if onlyst in terseargs:
743 744 yield onlyst, self.path + b'/'
744 745 return
745 746
746 747 # add the files to status list
747 748 for st, fpath in self.iterfilepaths():
748 749 yield st, fpath
749 750
750 751 # recurse on the subdirs
751 752 for dirobj in self.subdirs.values():
752 753 for st, fpath in dirobj.tersewalk(terseargs):
753 754 yield st, fpath
754 755
755 756
756 757 def tersedir(statuslist, terseargs):
757 758 """
758 759 Terse the status if all the files in a directory shares the same status.
759 760
760 761 statuslist is scmutil.status() object which contains a list of files for
761 762 each status.
762 763 terseargs is string which is passed by the user as the argument to `--terse`
763 764 flag.
764 765
765 766 The function makes a tree of objects of dirnode class, and at each node it
766 767 stores the information required to know whether we can terse a certain
767 768 directory or not.
768 769 """
769 770 # the order matters here as that is used to produce final list
770 771 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
771 772
772 773 # checking the argument validity
773 774 for s in pycompat.bytestr(terseargs):
774 775 if s not in allst:
775 776 raise error.InputError(_(b"'%s' not recognized") % s)
776 777
777 778 # creating a dirnode object for the root of the repo
778 779 rootobj = dirnode(b'')
779 780 pstatus = (
780 781 b'modified',
781 782 b'added',
782 783 b'deleted',
783 784 b'clean',
784 785 b'unknown',
785 786 b'ignored',
786 787 b'removed',
787 788 )
788 789
789 790 tersedict = {}
790 791 for attrname in pstatus:
791 792 statuschar = attrname[0:1]
792 793 for f in getattr(statuslist, attrname):
793 794 rootobj.addfile(f, statuschar)
794 795 tersedict[statuschar] = []
795 796
796 797 # we won't be tersing the root dir, so add files in it
797 798 for st, fpath in rootobj.iterfilepaths():
798 799 tersedict[st].append(fpath)
799 800
800 801 # process each sub-directory and build tersedict
801 802 for subdir in rootobj.subdirs.values():
802 803 for st, f in subdir.tersewalk(terseargs):
803 804 tersedict[st].append(f)
804 805
805 806 tersedlist = []
806 807 for st in allst:
807 808 tersedict[st].sort()
808 809 tersedlist.append(tersedict[st])
809 810
810 811 return scmutil.status(*tersedlist)
811 812
812 813
813 814 def _commentlines(raw):
814 815 '''Surround lineswith a comment char and a new line'''
815 816 lines = raw.splitlines()
816 817 commentedlines = [b'# %s' % line for line in lines]
817 818 return b'\n'.join(commentedlines) + b'\n'
818 819
819 820
820 821 @attr.s(frozen=True)
821 822 class morestatus(object):
822 823 reporoot = attr.ib()
823 824 unfinishedop = attr.ib()
824 825 unfinishedmsg = attr.ib()
825 826 activemerge = attr.ib()
826 827 unresolvedpaths = attr.ib()
827 828 _formattedpaths = attr.ib(init=False, default=set())
828 829 _label = b'status.morestatus'
829 830
830 831 def formatfile(self, path, fm):
831 832 self._formattedpaths.add(path)
832 833 if self.activemerge and path in self.unresolvedpaths:
833 834 fm.data(unresolved=True)
834 835
835 836 def formatfooter(self, fm):
836 837 if self.unfinishedop or self.unfinishedmsg:
837 838 fm.startitem()
838 839 fm.data(itemtype=b'morestatus')
839 840
840 841 if self.unfinishedop:
841 842 fm.data(unfinished=self.unfinishedop)
842 843 statemsg = (
843 844 _(b'The repository is in an unfinished *%s* state.')
844 845 % self.unfinishedop
845 846 )
846 847 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
847 848 if self.unfinishedmsg:
848 849 fm.data(unfinishedmsg=self.unfinishedmsg)
849 850
850 851 # May also start new data items.
851 852 self._formatconflicts(fm)
852 853
853 854 if self.unfinishedmsg:
854 855 fm.plain(
855 856 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
856 857 )
857 858
858 859 def _formatconflicts(self, fm):
859 860 if not self.activemerge:
860 861 return
861 862
862 863 if self.unresolvedpaths:
863 864 mergeliststr = b'\n'.join(
864 865 [
865 866 b' %s'
866 867 % util.pathto(self.reporoot, encoding.getcwd(), path)
867 868 for path in self.unresolvedpaths
868 869 ]
869 870 )
870 871 msg = (
871 872 _(
872 873 b'''Unresolved merge conflicts:
873 874
874 875 %s
875 876
876 877 To mark files as resolved: hg resolve --mark FILE'''
877 878 )
878 879 % mergeliststr
879 880 )
880 881
881 882 # If any paths with unresolved conflicts were not previously
882 883 # formatted, output them now.
883 884 for f in self.unresolvedpaths:
884 885 if f in self._formattedpaths:
885 886 # Already output.
886 887 continue
887 888 fm.startitem()
888 889 # We can't claim to know the status of the file - it may just
889 890 # have been in one of the states that were not requested for
890 891 # display, so it could be anything.
891 892 fm.data(itemtype=b'file', path=f, unresolved=True)
892 893
893 894 else:
894 895 msg = _(b'No unresolved merge conflicts.')
895 896
896 897 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
897 898
898 899
899 900 def readmorestatus(repo):
900 901 """Returns a morestatus object if the repo has unfinished state."""
901 902 statetuple = statemod.getrepostate(repo)
902 903 mergestate = mergestatemod.mergestate.read(repo)
903 904 activemerge = mergestate.active()
904 905 if not statetuple and not activemerge:
905 906 return None
906 907
907 908 unfinishedop = unfinishedmsg = unresolved = None
908 909 if statetuple:
909 910 unfinishedop, unfinishedmsg = statetuple
910 911 if activemerge:
911 912 unresolved = sorted(mergestate.unresolved())
912 913 return morestatus(
913 914 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
914 915 )
915 916
916 917
917 918 def findpossible(cmd, table, strict=False):
918 919 """
919 920 Return cmd -> (aliases, command table entry)
920 921 for each matching command.
921 922 Return debug commands (or their aliases) only if no normal command matches.
922 923 """
923 924 choice = {}
924 925 debugchoice = {}
925 926
926 927 if cmd in table:
927 928 # short-circuit exact matches, "log" alias beats "log|history"
928 929 keys = [cmd]
929 930 else:
930 931 keys = table.keys()
931 932
932 933 allcmds = []
933 934 for e in keys:
934 935 aliases = parsealiases(e)
935 936 allcmds.extend(aliases)
936 937 found = None
937 938 if cmd in aliases:
938 939 found = cmd
939 940 elif not strict:
940 941 for a in aliases:
941 942 if a.startswith(cmd):
942 943 found = a
943 944 break
944 945 if found is not None:
945 946 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
946 947 debugchoice[found] = (aliases, table[e])
947 948 else:
948 949 choice[found] = (aliases, table[e])
949 950
950 951 if not choice and debugchoice:
951 952 choice = debugchoice
952 953
953 954 return choice, allcmds
954 955
955 956
956 957 def findcmd(cmd, table, strict=True):
957 958 """Return (aliases, command table entry) for command string."""
958 959 choice, allcmds = findpossible(cmd, table, strict)
959 960
960 961 if cmd in choice:
961 962 return choice[cmd]
962 963
963 964 if len(choice) > 1:
964 965 clist = sorted(choice)
965 966 raise error.AmbiguousCommand(cmd, clist)
966 967
967 968 if choice:
968 969 return list(choice.values())[0]
969 970
970 971 raise error.UnknownCommand(cmd, allcmds)
971 972
972 973
973 974 def changebranch(ui, repo, revs, label, opts):
974 975 """ Change the branch name of given revs to label """
975 976
976 977 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
977 978 # abort in case of uncommitted merge or dirty wdir
978 979 bailifchanged(repo)
979 980 revs = scmutil.revrange(repo, revs)
980 981 if not revs:
981 982 raise error.InputError(b"empty revision set")
982 983 roots = repo.revs(b'roots(%ld)', revs)
983 984 if len(roots) > 1:
984 985 raise error.InputError(
985 986 _(b"cannot change branch of non-linear revisions")
986 987 )
987 988 rewriteutil.precheck(repo, revs, b'change branch of')
988 989
989 990 root = repo[roots.first()]
990 991 rpb = {parent.branch() for parent in root.parents()}
991 992 if (
992 993 not opts.get(b'force')
993 994 and label not in rpb
994 995 and label in repo.branchmap()
995 996 ):
996 997 raise error.InputError(
997 998 _(b"a branch of the same name already exists")
998 999 )
999 1000
1000 1001 if repo.revs(b'obsolete() and %ld', revs):
1001 1002 raise error.InputError(
1002 1003 _(b"cannot change branch of a obsolete changeset")
1003 1004 )
1004 1005
1005 1006 # make sure only topological heads
1006 1007 if repo.revs(b'heads(%ld) - head()', revs):
1007 1008 raise error.InputError(
1008 1009 _(b"cannot change branch in middle of a stack")
1009 1010 )
1010 1011
1011 1012 replacements = {}
1012 1013 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1013 1014 # mercurial.subrepo -> mercurial.cmdutil
1014 1015 from . import context
1015 1016
1016 1017 for rev in revs:
1017 1018 ctx = repo[rev]
1018 1019 oldbranch = ctx.branch()
1019 1020 # check if ctx has same branch
1020 1021 if oldbranch == label:
1021 1022 continue
1022 1023
1023 1024 def filectxfn(repo, newctx, path):
1024 1025 try:
1025 1026 return ctx[path]
1026 1027 except error.ManifestLookupError:
1027 1028 return None
1028 1029
1029 1030 ui.debug(
1030 1031 b"changing branch of '%s' from '%s' to '%s'\n"
1031 1032 % (hex(ctx.node()), oldbranch, label)
1032 1033 )
1033 1034 extra = ctx.extra()
1034 1035 extra[b'branch_change'] = hex(ctx.node())
1035 1036 # While changing branch of set of linear commits, make sure that
1036 1037 # we base our commits on new parent rather than old parent which
1037 1038 # was obsoleted while changing the branch
1038 1039 p1 = ctx.p1().node()
1039 1040 p2 = ctx.p2().node()
1040 1041 if p1 in replacements:
1041 1042 p1 = replacements[p1][0]
1042 1043 if p2 in replacements:
1043 1044 p2 = replacements[p2][0]
1044 1045
1045 1046 mc = context.memctx(
1046 1047 repo,
1047 1048 (p1, p2),
1048 1049 ctx.description(),
1049 1050 ctx.files(),
1050 1051 filectxfn,
1051 1052 user=ctx.user(),
1052 1053 date=ctx.date(),
1053 1054 extra=extra,
1054 1055 branch=label,
1055 1056 )
1056 1057
1057 1058 newnode = repo.commitctx(mc)
1058 1059 replacements[ctx.node()] = (newnode,)
1059 1060 ui.debug(b'new node id is %s\n' % hex(newnode))
1060 1061
1061 1062 # create obsmarkers and move bookmarks
1062 1063 scmutil.cleanupnodes(
1063 1064 repo, replacements, b'branch-change', fixphase=True
1064 1065 )
1065 1066
1066 1067 # move the working copy too
1067 1068 wctx = repo[None]
1068 1069 # in-progress merge is a bit too complex for now.
1069 1070 if len(wctx.parents()) == 1:
1070 1071 newid = replacements.get(wctx.p1().node())
1071 1072 if newid is not None:
1072 1073 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1073 1074 # mercurial.cmdutil
1074 1075 from . import hg
1075 1076
1076 1077 hg.update(repo, newid[0], quietempty=True)
1077 1078
1078 1079 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1079 1080
1080 1081
1081 1082 def findrepo(p):
1082 1083 while not os.path.isdir(os.path.join(p, b".hg")):
1083 1084 oldp, p = p, os.path.dirname(p)
1084 1085 if p == oldp:
1085 1086 return None
1086 1087
1087 1088 return p
1088 1089
1089 1090
1090 1091 def bailifchanged(repo, merge=True, hint=None):
1091 1092 """enforce the precondition that working directory must be clean.
1092 1093
1093 1094 'merge' can be set to false if a pending uncommitted merge should be
1094 1095 ignored (such as when 'update --check' runs).
1095 1096
1096 1097 'hint' is the usual hint given to Abort exception.
1097 1098 """
1098 1099
1099 1100 if merge and repo.dirstate.p2() != nullid:
1100 1101 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1101 1102 st = repo.status()
1102 1103 if st.modified or st.added or st.removed or st.deleted:
1103 1104 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1104 1105 ctx = repo[None]
1105 1106 for s in sorted(ctx.substate):
1106 1107 ctx.sub(s).bailifchanged(hint=hint)
1107 1108
1108 1109
1109 1110 def logmessage(ui, opts):
1110 1111 """ get the log message according to -m and -l option """
1111 1112
1112 1113 check_at_most_one_arg(opts, b'message', b'logfile')
1113 1114
1114 1115 message = opts.get(b'message')
1115 1116 logfile = opts.get(b'logfile')
1116 1117
1117 1118 if not message and logfile:
1118 1119 try:
1119 1120 if isstdiofilename(logfile):
1120 1121 message = ui.fin.read()
1121 1122 else:
1122 1123 message = b'\n'.join(util.readfile(logfile).splitlines())
1123 1124 except IOError as inst:
1124 1125 raise error.Abort(
1125 1126 _(b"can't read commit message '%s': %s")
1126 1127 % (logfile, encoding.strtolocal(inst.strerror))
1127 1128 )
1128 1129 return message
1129 1130
1130 1131
1131 1132 def mergeeditform(ctxorbool, baseformname):
1132 1133 """return appropriate editform name (referencing a committemplate)
1133 1134
1134 1135 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1135 1136 merging is committed.
1136 1137
1137 1138 This returns baseformname with '.merge' appended if it is a merge,
1138 1139 otherwise '.normal' is appended.
1139 1140 """
1140 1141 if isinstance(ctxorbool, bool):
1141 1142 if ctxorbool:
1142 1143 return baseformname + b".merge"
1143 1144 elif len(ctxorbool.parents()) > 1:
1144 1145 return baseformname + b".merge"
1145 1146
1146 1147 return baseformname + b".normal"
1147 1148
1148 1149
1149 1150 def getcommiteditor(
1150 1151 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1151 1152 ):
1152 1153 """get appropriate commit message editor according to '--edit' option
1153 1154
1154 1155 'finishdesc' is a function to be called with edited commit message
1155 1156 (= 'description' of the new changeset) just after editing, but
1156 1157 before checking empty-ness. It should return actual text to be
1157 1158 stored into history. This allows to change description before
1158 1159 storing.
1159 1160
1160 1161 'extramsg' is a extra message to be shown in the editor instead of
1161 1162 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1162 1163 is automatically added.
1163 1164
1164 1165 'editform' is a dot-separated list of names, to distinguish
1165 1166 the purpose of commit text editing.
1166 1167
1167 1168 'getcommiteditor' returns 'commitforceeditor' regardless of
1168 1169 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1169 1170 they are specific for usage in MQ.
1170 1171 """
1171 1172 if edit or finishdesc or extramsg:
1172 1173 return lambda r, c, s: commitforceeditor(
1173 1174 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1174 1175 )
1175 1176 elif editform:
1176 1177 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1177 1178 else:
1178 1179 return commiteditor
1179 1180
1180 1181
1181 1182 def _escapecommandtemplate(tmpl):
1182 1183 parts = []
1183 1184 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1184 1185 if typ == b'string':
1185 1186 parts.append(stringutil.escapestr(tmpl[start:end]))
1186 1187 else:
1187 1188 parts.append(tmpl[start:end])
1188 1189 return b''.join(parts)
1189 1190
1190 1191
1191 1192 def rendercommandtemplate(ui, tmpl, props):
1192 1193 r"""Expand a literal template 'tmpl' in a way suitable for command line
1193 1194
1194 1195 '\' in outermost string is not taken as an escape character because it
1195 1196 is a directory separator on Windows.
1196 1197
1197 1198 >>> from . import ui as uimod
1198 1199 >>> ui = uimod.ui()
1199 1200 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1200 1201 'c:\\foo'
1201 1202 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1202 1203 'c:{path}'
1203 1204 """
1204 1205 if not tmpl:
1205 1206 return tmpl
1206 1207 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1207 1208 return t.renderdefault(props)
1208 1209
1209 1210
1210 1211 def rendertemplate(ctx, tmpl, props=None):
1211 1212 """Expand a literal template 'tmpl' byte-string against one changeset
1212 1213
1213 1214 Each props item must be a stringify-able value or a callable returning
1214 1215 such value, i.e. no bare list nor dict should be passed.
1215 1216 """
1216 1217 repo = ctx.repo()
1217 1218 tres = formatter.templateresources(repo.ui, repo)
1218 1219 t = formatter.maketemplater(
1219 1220 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1220 1221 )
1221 1222 mapping = {b'ctx': ctx}
1222 1223 if props:
1223 1224 mapping.update(props)
1224 1225 return t.renderdefault(mapping)
1225 1226
1226 1227
1227 1228 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1228 1229 """Format a changeset summary (one line)."""
1229 1230 spec = None
1230 1231 if command:
1231 1232 spec = ui.config(
1232 1233 b'command-templates', b'oneline-summary.%s' % command, None
1233 1234 )
1234 1235 if not spec:
1235 1236 spec = ui.config(b'command-templates', b'oneline-summary')
1236 1237 if not spec:
1237 1238 spec = default_spec
1238 1239 if not spec:
1239 1240 spec = (
1240 1241 b'{separate(" ", '
1241 1242 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1242 1243 b', '
1243 1244 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1244 1245 b')} '
1245 1246 b'"{label("oneline-summary.desc", desc|firstline)}"'
1246 1247 )
1247 1248 text = rendertemplate(ctx, spec)
1248 1249 return text.split(b'\n')[0]
1249 1250
1250 1251
1251 1252 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1252 1253 r"""Convert old-style filename format string to template string
1253 1254
1254 1255 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1255 1256 'foo-{reporoot|basename}-{seqno}.patch'
1256 1257 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1257 1258 '{rev}{tags % "{tag}"}{node}'
1258 1259
1259 1260 '\' in outermost strings has to be escaped because it is a directory
1260 1261 separator on Windows:
1261 1262
1262 1263 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1263 1264 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1264 1265 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1265 1266 '\\\\\\\\foo\\\\bar.patch'
1266 1267 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1267 1268 '\\\\{tags % "{tag}"}'
1268 1269
1269 1270 but inner strings follow the template rules (i.e. '\' is taken as an
1270 1271 escape character):
1271 1272
1272 1273 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1273 1274 '{"c:\\tmp"}'
1274 1275 """
1275 1276 expander = {
1276 1277 b'H': b'{node}',
1277 1278 b'R': b'{rev}',
1278 1279 b'h': b'{node|short}',
1279 1280 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1280 1281 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1281 1282 b'%': b'%',
1282 1283 b'b': b'{reporoot|basename}',
1283 1284 }
1284 1285 if total is not None:
1285 1286 expander[b'N'] = b'{total}'
1286 1287 if seqno is not None:
1287 1288 expander[b'n'] = b'{seqno}'
1288 1289 if total is not None and seqno is not None:
1289 1290 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1290 1291 if pathname is not None:
1291 1292 expander[b's'] = b'{pathname|basename}'
1292 1293 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1293 1294 expander[b'p'] = b'{pathname}'
1294 1295
1295 1296 newname = []
1296 1297 for typ, start, end in templater.scantemplate(pat, raw=True):
1297 1298 if typ != b'string':
1298 1299 newname.append(pat[start:end])
1299 1300 continue
1300 1301 i = start
1301 1302 while i < end:
1302 1303 n = pat.find(b'%', i, end)
1303 1304 if n < 0:
1304 1305 newname.append(stringutil.escapestr(pat[i:end]))
1305 1306 break
1306 1307 newname.append(stringutil.escapestr(pat[i:n]))
1307 1308 if n + 2 > end:
1308 1309 raise error.Abort(
1309 1310 _(b"incomplete format spec in output filename")
1310 1311 )
1311 1312 c = pat[n + 1 : n + 2]
1312 1313 i = n + 2
1313 1314 try:
1314 1315 newname.append(expander[c])
1315 1316 except KeyError:
1316 1317 raise error.Abort(
1317 1318 _(b"invalid format spec '%%%s' in output filename") % c
1318 1319 )
1319 1320 return b''.join(newname)
1320 1321
1321 1322
1322 1323 def makefilename(ctx, pat, **props):
1323 1324 if not pat:
1324 1325 return pat
1325 1326 tmpl = _buildfntemplate(pat, **props)
1326 1327 # BUG: alias expansion shouldn't be made against template fragments
1327 1328 # rewritten from %-format strings, but we have no easy way to partially
1328 1329 # disable the expansion.
1329 1330 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1330 1331
1331 1332
1332 1333 def isstdiofilename(pat):
1333 1334 """True if the given pat looks like a filename denoting stdin/stdout"""
1334 1335 return not pat or pat == b'-'
1335 1336
1336 1337
1337 1338 class _unclosablefile(object):
1338 1339 def __init__(self, fp):
1339 1340 self._fp = fp
1340 1341
1341 1342 def close(self):
1342 1343 pass
1343 1344
1344 1345 def __iter__(self):
1345 1346 return iter(self._fp)
1346 1347
1347 1348 def __getattr__(self, attr):
1348 1349 return getattr(self._fp, attr)
1349 1350
1350 1351 def __enter__(self):
1351 1352 return self
1352 1353
1353 1354 def __exit__(self, exc_type, exc_value, exc_tb):
1354 1355 pass
1355 1356
1356 1357
1357 1358 def makefileobj(ctx, pat, mode=b'wb', **props):
1358 1359 writable = mode not in (b'r', b'rb')
1359 1360
1360 1361 if isstdiofilename(pat):
1361 1362 repo = ctx.repo()
1362 1363 if writable:
1363 1364 fp = repo.ui.fout
1364 1365 else:
1365 1366 fp = repo.ui.fin
1366 1367 return _unclosablefile(fp)
1367 1368 fn = makefilename(ctx, pat, **props)
1368 1369 return open(fn, mode)
1369 1370
1370 1371
1371 1372 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1372 1373 """opens the changelog, manifest, a filelog or a given revlog"""
1373 1374 cl = opts[b'changelog']
1374 1375 mf = opts[b'manifest']
1375 1376 dir = opts[b'dir']
1376 1377 msg = None
1377 1378 if cl and mf:
1378 1379 msg = _(b'cannot specify --changelog and --manifest at the same time')
1379 1380 elif cl and dir:
1380 1381 msg = _(b'cannot specify --changelog and --dir at the same time')
1381 1382 elif cl or mf or dir:
1382 1383 if file_:
1383 1384 msg = _(b'cannot specify filename with --changelog or --manifest')
1384 1385 elif not repo:
1385 1386 msg = _(
1386 1387 b'cannot specify --changelog or --manifest or --dir '
1387 1388 b'without a repository'
1388 1389 )
1389 1390 if msg:
1390 1391 raise error.InputError(msg)
1391 1392
1392 1393 r = None
1393 1394 if repo:
1394 1395 if cl:
1395 1396 r = repo.unfiltered().changelog
1396 1397 elif dir:
1397 1398 if not scmutil.istreemanifest(repo):
1398 1399 raise error.InputError(
1399 1400 _(
1400 1401 b"--dir can only be used on repos with "
1401 1402 b"treemanifest enabled"
1402 1403 )
1403 1404 )
1404 1405 if not dir.endswith(b'/'):
1405 1406 dir = dir + b'/'
1406 1407 dirlog = repo.manifestlog.getstorage(dir)
1407 1408 if len(dirlog):
1408 1409 r = dirlog
1409 1410 elif mf:
1410 1411 r = repo.manifestlog.getstorage(b'')
1411 1412 elif file_:
1412 1413 filelog = repo.file(file_)
1413 1414 if len(filelog):
1414 1415 r = filelog
1415 1416
1416 1417 # Not all storage may be revlogs. If requested, try to return an actual
1417 1418 # revlog instance.
1418 1419 if returnrevlog:
1419 1420 if isinstance(r, revlog.revlog):
1420 1421 pass
1421 1422 elif util.safehasattr(r, b'_revlog'):
1422 1423 r = r._revlog # pytype: disable=attribute-error
1423 1424 elif r is not None:
1424 1425 raise error.InputError(
1425 1426 _(b'%r does not appear to be a revlog') % r
1426 1427 )
1427 1428
1428 1429 if not r:
1429 1430 if not returnrevlog:
1430 1431 raise error.InputError(_(b'cannot give path to non-revlog'))
1431 1432
1432 1433 if not file_:
1433 1434 raise error.CommandError(cmd, _(b'invalid arguments'))
1434 1435 if not os.path.isfile(file_):
1435 1436 raise error.InputError(_(b"revlog '%s' not found") % file_)
1436 1437 r = revlog.revlog(
1437 1438 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1438 1439 )
1439 1440 return r
1440 1441
1441 1442
1442 1443 def openrevlog(repo, cmd, file_, opts):
1443 1444 """Obtain a revlog backing storage of an item.
1444 1445
1445 1446 This is similar to ``openstorage()`` except it always returns a revlog.
1446 1447
1447 1448 In most cases, a caller cares about the main storage object - not the
1448 1449 revlog backing it. Therefore, this function should only be used by code
1449 1450 that needs to examine low-level revlog implementation details. e.g. debug
1450 1451 commands.
1451 1452 """
1452 1453 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1453 1454
1454 1455
1455 1456 def copy(ui, repo, pats, opts, rename=False):
1456 1457 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1457 1458
1458 1459 # called with the repo lock held
1459 1460 #
1460 1461 # hgsep => pathname that uses "/" to separate directories
1461 1462 # ossep => pathname that uses os.sep to separate directories
1462 1463 cwd = repo.getcwd()
1463 1464 targets = {}
1464 1465 forget = opts.get(b"forget")
1465 1466 after = opts.get(b"after")
1466 1467 dryrun = opts.get(b"dry_run")
1467 1468 rev = opts.get(b'at_rev')
1468 1469 if rev:
1469 1470 if not forget and not after:
1470 1471 # TODO: Remove this restriction and make it also create the copy
1471 1472 # targets (and remove the rename source if rename==True).
1472 1473 raise error.InputError(_(b'--at-rev requires --after'))
1473 1474 ctx = scmutil.revsingle(repo, rev)
1474 1475 if len(ctx.parents()) > 1:
1475 1476 raise error.InputError(
1476 1477 _(b'cannot mark/unmark copy in merge commit')
1477 1478 )
1478 1479 else:
1479 1480 ctx = repo[None]
1480 1481
1481 1482 pctx = ctx.p1()
1482 1483
1483 1484 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1484 1485
1485 1486 if forget:
1486 1487 if ctx.rev() is None:
1487 1488 new_ctx = ctx
1488 1489 else:
1489 1490 if len(ctx.parents()) > 1:
1490 1491 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1491 1492 # avoid cycle context -> subrepo -> cmdutil
1492 1493 from . import context
1493 1494
1494 1495 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1495 1496 new_ctx = context.overlayworkingctx(repo)
1496 1497 new_ctx.setbase(ctx.p1())
1497 1498 mergemod.graft(repo, ctx, wctx=new_ctx)
1498 1499
1499 1500 match = scmutil.match(ctx, pats, opts)
1500 1501
1501 1502 current_copies = ctx.p1copies()
1502 1503 current_copies.update(ctx.p2copies())
1503 1504
1504 1505 uipathfn = scmutil.getuipathfn(repo)
1505 1506 for f in ctx.walk(match):
1506 1507 if f in current_copies:
1507 1508 new_ctx[f].markcopied(None)
1508 1509 elif match.exact(f):
1509 1510 ui.warn(
1510 1511 _(
1511 1512 b'%s: not unmarking as copy - file is not marked as copied\n'
1512 1513 )
1513 1514 % uipathfn(f)
1514 1515 )
1515 1516
1516 1517 if ctx.rev() is not None:
1517 1518 with repo.lock():
1518 1519 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1519 1520 new_node = mem_ctx.commit()
1520 1521
1521 1522 if repo.dirstate.p1() == ctx.node():
1522 1523 with repo.dirstate.parentchange():
1523 1524 scmutil.movedirstate(repo, repo[new_node])
1524 1525 replacements = {ctx.node(): [new_node]}
1525 1526 scmutil.cleanupnodes(
1526 1527 repo, replacements, b'uncopy', fixphase=True
1527 1528 )
1528 1529
1529 1530 return
1530 1531
1531 1532 pats = scmutil.expandpats(pats)
1532 1533 if not pats:
1533 1534 raise error.InputError(_(b'no source or destination specified'))
1534 1535 if len(pats) == 1:
1535 1536 raise error.InputError(_(b'no destination specified'))
1536 1537 dest = pats.pop()
1537 1538
1538 1539 def walkpat(pat):
1539 1540 srcs = []
1540 1541 # TODO: Inline and simplify the non-working-copy version of this code
1541 1542 # since it shares very little with the working-copy version of it.
1542 1543 ctx_to_walk = ctx if ctx.rev() is None else pctx
1543 1544 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1544 1545 for abs in ctx_to_walk.walk(m):
1545 1546 rel = uipathfn(abs)
1546 1547 exact = m.exact(abs)
1547 1548 if abs not in ctx:
1548 1549 if abs in pctx:
1549 1550 if not after:
1550 1551 if exact:
1551 1552 ui.warn(
1552 1553 _(
1553 1554 b'%s: not copying - file has been marked '
1554 1555 b'for remove\n'
1555 1556 )
1556 1557 % rel
1557 1558 )
1558 1559 continue
1559 1560 else:
1560 1561 if exact:
1561 1562 ui.warn(
1562 1563 _(b'%s: not copying - file is not managed\n') % rel
1563 1564 )
1564 1565 continue
1565 1566
1566 1567 # abs: hgsep
1567 1568 # rel: ossep
1568 1569 srcs.append((abs, rel, exact))
1569 1570 return srcs
1570 1571
1571 1572 if ctx.rev() is not None:
1572 1573 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1573 1574 absdest = pathutil.canonpath(repo.root, cwd, dest)
1574 1575 if ctx.hasdir(absdest):
1575 1576 raise error.InputError(
1576 1577 _(b'%s: --at-rev does not support a directory as destination')
1577 1578 % uipathfn(absdest)
1578 1579 )
1579 1580 if absdest not in ctx:
1580 1581 raise error.InputError(
1581 1582 _(b'%s: copy destination does not exist in %s')
1582 1583 % (uipathfn(absdest), ctx)
1583 1584 )
1584 1585
1585 1586 # avoid cycle context -> subrepo -> cmdutil
1586 1587 from . import context
1587 1588
1588 1589 copylist = []
1589 1590 for pat in pats:
1590 1591 srcs = walkpat(pat)
1591 1592 if not srcs:
1592 1593 continue
1593 1594 for abs, rel, exact in srcs:
1594 1595 copylist.append(abs)
1595 1596
1596 1597 if not copylist:
1597 1598 raise error.InputError(_(b'no files to copy'))
1598 1599 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1599 1600 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1600 1601 # existing functions below.
1601 1602 if len(copylist) != 1:
1602 1603 raise error.InputError(_(b'--at-rev requires a single source'))
1603 1604
1604 1605 new_ctx = context.overlayworkingctx(repo)
1605 1606 new_ctx.setbase(ctx.p1())
1606 1607 mergemod.graft(repo, ctx, wctx=new_ctx)
1607 1608
1608 1609 new_ctx.markcopied(absdest, copylist[0])
1609 1610
1610 1611 with repo.lock():
1611 1612 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1612 1613 new_node = mem_ctx.commit()
1613 1614
1614 1615 if repo.dirstate.p1() == ctx.node():
1615 1616 with repo.dirstate.parentchange():
1616 1617 scmutil.movedirstate(repo, repo[new_node])
1617 1618 replacements = {ctx.node(): [new_node]}
1618 1619 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1619 1620
1620 1621 return
1621 1622
1622 1623 # abssrc: hgsep
1623 1624 # relsrc: ossep
1624 1625 # otarget: ossep
1625 1626 def copyfile(abssrc, relsrc, otarget, exact):
1626 1627 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1627 1628 if b'/' in abstarget:
1628 1629 # We cannot normalize abstarget itself, this would prevent
1629 1630 # case only renames, like a => A.
1630 1631 abspath, absname = abstarget.rsplit(b'/', 1)
1631 1632 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1632 1633 reltarget = repo.pathto(abstarget, cwd)
1633 1634 target = repo.wjoin(abstarget)
1634 1635 src = repo.wjoin(abssrc)
1635 1636 state = repo.dirstate[abstarget]
1636 1637
1637 1638 scmutil.checkportable(ui, abstarget)
1638 1639
1639 1640 # check for collisions
1640 1641 prevsrc = targets.get(abstarget)
1641 1642 if prevsrc is not None:
1642 1643 ui.warn(
1643 1644 _(b'%s: not overwriting - %s collides with %s\n')
1644 1645 % (
1645 1646 reltarget,
1646 1647 repo.pathto(abssrc, cwd),
1647 1648 repo.pathto(prevsrc, cwd),
1648 1649 )
1649 1650 )
1650 1651 return True # report a failure
1651 1652
1652 1653 # check for overwrites
1653 1654 exists = os.path.lexists(target)
1654 1655 samefile = False
1655 1656 if exists and abssrc != abstarget:
1656 1657 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1657 1658 abstarget
1658 1659 ):
1659 1660 if not rename:
1660 1661 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1661 1662 return True # report a failure
1662 1663 exists = False
1663 1664 samefile = True
1664 1665
1665 1666 if not after and exists or after and state in b'mn':
1666 1667 if not opts[b'force']:
1667 1668 if state in b'mn':
1668 1669 msg = _(b'%s: not overwriting - file already committed\n')
1669 1670 if after:
1670 1671 flags = b'--after --force'
1671 1672 else:
1672 1673 flags = b'--force'
1673 1674 if rename:
1674 1675 hint = (
1675 1676 _(
1676 1677 b"('hg rename %s' to replace the file by "
1677 1678 b'recording a rename)\n'
1678 1679 )
1679 1680 % flags
1680 1681 )
1681 1682 else:
1682 1683 hint = (
1683 1684 _(
1684 1685 b"('hg copy %s' to replace the file by "
1685 1686 b'recording a copy)\n'
1686 1687 )
1687 1688 % flags
1688 1689 )
1689 1690 else:
1690 1691 msg = _(b'%s: not overwriting - file exists\n')
1691 1692 if rename:
1692 1693 hint = _(
1693 1694 b"('hg rename --after' to record the rename)\n"
1694 1695 )
1695 1696 else:
1696 1697 hint = _(b"('hg copy --after' to record the copy)\n")
1697 1698 ui.warn(msg % reltarget)
1698 1699 ui.warn(hint)
1699 1700 return True # report a failure
1700 1701
1701 1702 if after:
1702 1703 if not exists:
1703 1704 if rename:
1704 1705 ui.warn(
1705 1706 _(b'%s: not recording move - %s does not exist\n')
1706 1707 % (relsrc, reltarget)
1707 1708 )
1708 1709 else:
1709 1710 ui.warn(
1710 1711 _(b'%s: not recording copy - %s does not exist\n')
1711 1712 % (relsrc, reltarget)
1712 1713 )
1713 1714 return True # report a failure
1714 1715 elif not dryrun:
1715 1716 try:
1716 1717 if exists:
1717 1718 os.unlink(target)
1718 1719 targetdir = os.path.dirname(target) or b'.'
1719 1720 if not os.path.isdir(targetdir):
1720 1721 os.makedirs(targetdir)
1721 1722 if samefile:
1722 1723 tmp = target + b"~hgrename"
1723 1724 os.rename(src, tmp)
1724 1725 os.rename(tmp, target)
1725 1726 else:
1726 1727 # Preserve stat info on renames, not on copies; this matches
1727 1728 # Linux CLI behavior.
1728 1729 util.copyfile(src, target, copystat=rename)
1729 1730 srcexists = True
1730 1731 except IOError as inst:
1731 1732 if inst.errno == errno.ENOENT:
1732 1733 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1733 1734 srcexists = False
1734 1735 else:
1735 1736 ui.warn(
1736 1737 _(b'%s: cannot copy - %s\n')
1737 1738 % (relsrc, encoding.strtolocal(inst.strerror))
1738 1739 )
1739 1740 return True # report a failure
1740 1741
1741 1742 if ui.verbose or not exact:
1742 1743 if rename:
1743 1744 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1744 1745 else:
1745 1746 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1746 1747
1747 1748 targets[abstarget] = abssrc
1748 1749
1749 1750 # fix up dirstate
1750 1751 scmutil.dirstatecopy(
1751 1752 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1752 1753 )
1753 1754 if rename and not dryrun:
1754 1755 if not after and srcexists and not samefile:
1755 1756 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1756 1757 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1757 1758 ctx.forget([abssrc])
1758 1759
1759 1760 # pat: ossep
1760 1761 # dest ossep
1761 1762 # srcs: list of (hgsep, hgsep, ossep, bool)
1762 1763 # return: function that takes hgsep and returns ossep
1763 1764 def targetpathfn(pat, dest, srcs):
1764 1765 if os.path.isdir(pat):
1765 1766 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1766 1767 abspfx = util.localpath(abspfx)
1767 1768 if destdirexists:
1768 1769 striplen = len(os.path.split(abspfx)[0])
1769 1770 else:
1770 1771 striplen = len(abspfx)
1771 1772 if striplen:
1772 1773 striplen += len(pycompat.ossep)
1773 1774 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1774 1775 elif destdirexists:
1775 1776 res = lambda p: os.path.join(
1776 1777 dest, os.path.basename(util.localpath(p))
1777 1778 )
1778 1779 else:
1779 1780 res = lambda p: dest
1780 1781 return res
1781 1782
1782 1783 # pat: ossep
1783 1784 # dest ossep
1784 1785 # srcs: list of (hgsep, hgsep, ossep, bool)
1785 1786 # return: function that takes hgsep and returns ossep
1786 1787 def targetpathafterfn(pat, dest, srcs):
1787 1788 if matchmod.patkind(pat):
1788 1789 # a mercurial pattern
1789 1790 res = lambda p: os.path.join(
1790 1791 dest, os.path.basename(util.localpath(p))
1791 1792 )
1792 1793 else:
1793 1794 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1794 1795 if len(abspfx) < len(srcs[0][0]):
1795 1796 # A directory. Either the target path contains the last
1796 1797 # component of the source path or it does not.
1797 1798 def evalpath(striplen):
1798 1799 score = 0
1799 1800 for s in srcs:
1800 1801 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1801 1802 if os.path.lexists(t):
1802 1803 score += 1
1803 1804 return score
1804 1805
1805 1806 abspfx = util.localpath(abspfx)
1806 1807 striplen = len(abspfx)
1807 1808 if striplen:
1808 1809 striplen += len(pycompat.ossep)
1809 1810 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1810 1811 score = evalpath(striplen)
1811 1812 striplen1 = len(os.path.split(abspfx)[0])
1812 1813 if striplen1:
1813 1814 striplen1 += len(pycompat.ossep)
1814 1815 if evalpath(striplen1) > score:
1815 1816 striplen = striplen1
1816 1817 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1817 1818 else:
1818 1819 # a file
1819 1820 if destdirexists:
1820 1821 res = lambda p: os.path.join(
1821 1822 dest, os.path.basename(util.localpath(p))
1822 1823 )
1823 1824 else:
1824 1825 res = lambda p: dest
1825 1826 return res
1826 1827
1827 1828 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1828 1829 if not destdirexists:
1829 1830 if len(pats) > 1 or matchmod.patkind(pats[0]):
1830 1831 raise error.InputError(
1831 1832 _(
1832 1833 b'with multiple sources, destination must be an '
1833 1834 b'existing directory'
1834 1835 )
1835 1836 )
1836 1837 if util.endswithsep(dest):
1837 1838 raise error.InputError(
1838 1839 _(b'destination %s is not a directory') % dest
1839 1840 )
1840 1841
1841 1842 tfn = targetpathfn
1842 1843 if after:
1843 1844 tfn = targetpathafterfn
1844 1845 copylist = []
1845 1846 for pat in pats:
1846 1847 srcs = walkpat(pat)
1847 1848 if not srcs:
1848 1849 continue
1849 1850 copylist.append((tfn(pat, dest, srcs), srcs))
1850 1851 if not copylist:
1851 1852 raise error.InputError(_(b'no files to copy'))
1852 1853
1853 1854 errors = 0
1854 1855 for targetpath, srcs in copylist:
1855 1856 for abssrc, relsrc, exact in srcs:
1856 1857 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1857 1858 errors += 1
1858 1859
1859 1860 return errors != 0
1860 1861
1861 1862
1862 1863 ## facility to let extension process additional data into an import patch
1863 1864 # list of identifier to be executed in order
1864 1865 extrapreimport = [] # run before commit
1865 1866 extrapostimport = [] # run after commit
1866 1867 # mapping from identifier to actual import function
1867 1868 #
1868 1869 # 'preimport' are run before the commit is made and are provided the following
1869 1870 # arguments:
1870 1871 # - repo: the localrepository instance,
1871 1872 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1872 1873 # - extra: the future extra dictionary of the changeset, please mutate it,
1873 1874 # - opts: the import options.
1874 1875 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1875 1876 # mutation of in memory commit and more. Feel free to rework the code to get
1876 1877 # there.
1877 1878 extrapreimportmap = {}
1878 1879 # 'postimport' are run after the commit is made and are provided the following
1879 1880 # argument:
1880 1881 # - ctx: the changectx created by import.
1881 1882 extrapostimportmap = {}
1882 1883
1883 1884
1884 1885 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1885 1886 """Utility function used by commands.import to import a single patch
1886 1887
1887 1888 This function is explicitly defined here to help the evolve extension to
1888 1889 wrap this part of the import logic.
1889 1890
1890 1891 The API is currently a bit ugly because it a simple code translation from
1891 1892 the import command. Feel free to make it better.
1892 1893
1893 1894 :patchdata: a dictionary containing parsed patch data (such as from
1894 1895 ``patch.extract()``)
1895 1896 :parents: nodes that will be parent of the created commit
1896 1897 :opts: the full dict of option passed to the import command
1897 1898 :msgs: list to save commit message to.
1898 1899 (used in case we need to save it when failing)
1899 1900 :updatefunc: a function that update a repo to a given node
1900 1901 updatefunc(<repo>, <node>)
1901 1902 """
1902 1903 # avoid cycle context -> subrepo -> cmdutil
1903 1904 from . import context
1904 1905
1905 1906 tmpname = patchdata.get(b'filename')
1906 1907 message = patchdata.get(b'message')
1907 1908 user = opts.get(b'user') or patchdata.get(b'user')
1908 1909 date = opts.get(b'date') or patchdata.get(b'date')
1909 1910 branch = patchdata.get(b'branch')
1910 1911 nodeid = patchdata.get(b'nodeid')
1911 1912 p1 = patchdata.get(b'p1')
1912 1913 p2 = patchdata.get(b'p2')
1913 1914
1914 1915 nocommit = opts.get(b'no_commit')
1915 1916 importbranch = opts.get(b'import_branch')
1916 1917 update = not opts.get(b'bypass')
1917 1918 strip = opts[b"strip"]
1918 1919 prefix = opts[b"prefix"]
1919 1920 sim = float(opts.get(b'similarity') or 0)
1920 1921
1921 1922 if not tmpname:
1922 1923 return None, None, False
1923 1924
1924 1925 rejects = False
1925 1926
1926 1927 cmdline_message = logmessage(ui, opts)
1927 1928 if cmdline_message:
1928 1929 # pickup the cmdline msg
1929 1930 message = cmdline_message
1930 1931 elif message:
1931 1932 # pickup the patch msg
1932 1933 message = message.strip()
1933 1934 else:
1934 1935 # launch the editor
1935 1936 message = None
1936 1937 ui.debug(b'message:\n%s\n' % (message or b''))
1937 1938
1938 1939 if len(parents) == 1:
1939 parents.append(repo[nullid])
1940 parents.append(repo[nullrev])
1940 1941 if opts.get(b'exact'):
1941 1942 if not nodeid or not p1:
1942 1943 raise error.InputError(_(b'not a Mercurial patch'))
1943 1944 p1 = repo[p1]
1944 p2 = repo[p2 or nullid]
1945 p2 = repo[p2 or nullrev]
1945 1946 elif p2:
1946 1947 try:
1947 1948 p1 = repo[p1]
1948 1949 p2 = repo[p2]
1949 1950 # Without any options, consider p2 only if the
1950 1951 # patch is being applied on top of the recorded
1951 1952 # first parent.
1952 1953 if p1 != parents[0]:
1953 1954 p1 = parents[0]
1954 p2 = repo[nullid]
1955 p2 = repo[nullrev]
1955 1956 except error.RepoError:
1956 1957 p1, p2 = parents
1957 if p2.node() == nullid:
1958 if p2.rev() == nullrev:
1958 1959 ui.warn(
1959 1960 _(
1960 1961 b"warning: import the patch as a normal revision\n"
1961 1962 b"(use --exact to import the patch as a merge)\n"
1962 1963 )
1963 1964 )
1964 1965 else:
1965 1966 p1, p2 = parents
1966 1967
1967 1968 n = None
1968 1969 if update:
1969 1970 if p1 != parents[0]:
1970 1971 updatefunc(repo, p1.node())
1971 1972 if p2 != parents[1]:
1972 1973 repo.setparents(p1.node(), p2.node())
1973 1974
1974 1975 if opts.get(b'exact') or importbranch:
1975 1976 repo.dirstate.setbranch(branch or b'default')
1976 1977
1977 1978 partial = opts.get(b'partial', False)
1978 1979 files = set()
1979 1980 try:
1980 1981 patch.patch(
1981 1982 ui,
1982 1983 repo,
1983 1984 tmpname,
1984 1985 strip=strip,
1985 1986 prefix=prefix,
1986 1987 files=files,
1987 1988 eolmode=None,
1988 1989 similarity=sim / 100.0,
1989 1990 )
1990 1991 except error.PatchError as e:
1991 1992 if not partial:
1992 1993 raise error.Abort(pycompat.bytestr(e))
1993 1994 if partial:
1994 1995 rejects = True
1995 1996
1996 1997 files = list(files)
1997 1998 if nocommit:
1998 1999 if message:
1999 2000 msgs.append(message)
2000 2001 else:
2001 2002 if opts.get(b'exact') or p2:
2002 2003 # If you got here, you either use --force and know what
2003 2004 # you are doing or used --exact or a merge patch while
2004 2005 # being updated to its first parent.
2005 2006 m = None
2006 2007 else:
2007 2008 m = scmutil.matchfiles(repo, files or [])
2008 2009 editform = mergeeditform(repo[None], b'import.normal')
2009 2010 if opts.get(b'exact'):
2010 2011 editor = None
2011 2012 else:
2012 2013 editor = getcommiteditor(
2013 2014 editform=editform, **pycompat.strkwargs(opts)
2014 2015 )
2015 2016 extra = {}
2016 2017 for idfunc in extrapreimport:
2017 2018 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2018 2019 overrides = {}
2019 2020 if partial:
2020 2021 overrides[(b'ui', b'allowemptycommit')] = True
2021 2022 if opts.get(b'secret'):
2022 2023 overrides[(b'phases', b'new-commit')] = b'secret'
2023 2024 with repo.ui.configoverride(overrides, b'import'):
2024 2025 n = repo.commit(
2025 2026 message, user, date, match=m, editor=editor, extra=extra
2026 2027 )
2027 2028 for idfunc in extrapostimport:
2028 2029 extrapostimportmap[idfunc](repo[n])
2029 2030 else:
2030 2031 if opts.get(b'exact') or importbranch:
2031 2032 branch = branch or b'default'
2032 2033 else:
2033 2034 branch = p1.branch()
2034 2035 store = patch.filestore()
2035 2036 try:
2036 2037 files = set()
2037 2038 try:
2038 2039 patch.patchrepo(
2039 2040 ui,
2040 2041 repo,
2041 2042 p1,
2042 2043 store,
2043 2044 tmpname,
2044 2045 strip,
2045 2046 prefix,
2046 2047 files,
2047 2048 eolmode=None,
2048 2049 )
2049 2050 except error.PatchError as e:
2050 2051 raise error.Abort(stringutil.forcebytestr(e))
2051 2052 if opts.get(b'exact'):
2052 2053 editor = None
2053 2054 else:
2054 2055 editor = getcommiteditor(editform=b'import.bypass')
2055 2056 memctx = context.memctx(
2056 2057 repo,
2057 2058 (p1.node(), p2.node()),
2058 2059 message,
2059 2060 files=files,
2060 2061 filectxfn=store,
2061 2062 user=user,
2062 2063 date=date,
2063 2064 branch=branch,
2064 2065 editor=editor,
2065 2066 )
2066 2067
2067 2068 overrides = {}
2068 2069 if opts.get(b'secret'):
2069 2070 overrides[(b'phases', b'new-commit')] = b'secret'
2070 2071 with repo.ui.configoverride(overrides, b'import'):
2071 2072 n = memctx.commit()
2072 2073 finally:
2073 2074 store.close()
2074 2075 if opts.get(b'exact') and nocommit:
2075 2076 # --exact with --no-commit is still useful in that it does merge
2076 2077 # and branch bits
2077 2078 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2078 2079 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2079 2080 raise error.Abort(_(b'patch is damaged or loses information'))
2080 2081 msg = _(b'applied to working directory')
2081 2082 if n:
2082 2083 # i18n: refers to a short changeset id
2083 2084 msg = _(b'created %s') % short(n)
2084 2085 return msg, n, rejects
2085 2086
2086 2087
2087 2088 # facility to let extensions include additional data in an exported patch
2088 2089 # list of identifiers to be executed in order
2089 2090 extraexport = []
2090 2091 # mapping from identifier to actual export function
2091 2092 # function as to return a string to be added to the header or None
2092 2093 # it is given two arguments (sequencenumber, changectx)
2093 2094 extraexportmap = {}
2094 2095
2095 2096
2096 2097 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2097 2098 node = scmutil.binnode(ctx)
2098 2099 parents = [p.node() for p in ctx.parents() if p]
2099 2100 branch = ctx.branch()
2100 2101 if switch_parent:
2101 2102 parents.reverse()
2102 2103
2103 2104 if parents:
2104 2105 prev = parents[0]
2105 2106 else:
2106 2107 prev = nullid
2107 2108
2108 2109 fm.context(ctx=ctx)
2109 2110 fm.plain(b'# HG changeset patch\n')
2110 2111 fm.write(b'user', b'# User %s\n', ctx.user())
2111 2112 fm.plain(b'# Date %d %d\n' % ctx.date())
2112 2113 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2113 2114 fm.condwrite(
2114 2115 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2115 2116 )
2116 2117 fm.write(b'node', b'# Node ID %s\n', hex(node))
2117 2118 fm.plain(b'# Parent %s\n' % hex(prev))
2118 2119 if len(parents) > 1:
2119 2120 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2120 2121 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2121 2122
2122 2123 # TODO: redesign extraexportmap function to support formatter
2123 2124 for headerid in extraexport:
2124 2125 header = extraexportmap[headerid](seqno, ctx)
2125 2126 if header is not None:
2126 2127 fm.plain(b'# %s\n' % header)
2127 2128
2128 2129 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2129 2130 fm.plain(b'\n')
2130 2131
2131 2132 if fm.isplain():
2132 2133 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2133 2134 for chunk, label in chunkiter:
2134 2135 fm.plain(chunk, label=label)
2135 2136 else:
2136 2137 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2137 2138 # TODO: make it structured?
2138 2139 fm.data(diff=b''.join(chunkiter))
2139 2140
2140 2141
2141 2142 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2142 2143 """Export changesets to stdout or a single file"""
2143 2144 for seqno, rev in enumerate(revs, 1):
2144 2145 ctx = repo[rev]
2145 2146 if not dest.startswith(b'<'):
2146 2147 repo.ui.note(b"%s\n" % dest)
2147 2148 fm.startitem()
2148 2149 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2149 2150
2150 2151
2151 2152 def _exportfntemplate(
2152 2153 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2153 2154 ):
2154 2155 """Export changesets to possibly multiple files"""
2155 2156 total = len(revs)
2156 2157 revwidth = max(len(str(rev)) for rev in revs)
2157 2158 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2158 2159
2159 2160 for seqno, rev in enumerate(revs, 1):
2160 2161 ctx = repo[rev]
2161 2162 dest = makefilename(
2162 2163 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2163 2164 )
2164 2165 filemap.setdefault(dest, []).append((seqno, rev))
2165 2166
2166 2167 for dest in filemap:
2167 2168 with formatter.maybereopen(basefm, dest) as fm:
2168 2169 repo.ui.note(b"%s\n" % dest)
2169 2170 for seqno, rev in filemap[dest]:
2170 2171 fm.startitem()
2171 2172 ctx = repo[rev]
2172 2173 _exportsingle(
2173 2174 repo, ctx, fm, match, switch_parent, seqno, diffopts
2174 2175 )
2175 2176
2176 2177
2177 2178 def _prefetchchangedfiles(repo, revs, match):
2178 2179 allfiles = set()
2179 2180 for rev in revs:
2180 2181 for file in repo[rev].files():
2181 2182 if not match or match(file):
2182 2183 allfiles.add(file)
2183 2184 match = scmutil.matchfiles(repo, allfiles)
2184 2185 revmatches = [(rev, match) for rev in revs]
2185 2186 scmutil.prefetchfiles(repo, revmatches)
2186 2187
2187 2188
2188 2189 def export(
2189 2190 repo,
2190 2191 revs,
2191 2192 basefm,
2192 2193 fntemplate=b'hg-%h.patch',
2193 2194 switch_parent=False,
2194 2195 opts=None,
2195 2196 match=None,
2196 2197 ):
2197 2198 """export changesets as hg patches
2198 2199
2199 2200 Args:
2200 2201 repo: The repository from which we're exporting revisions.
2201 2202 revs: A list of revisions to export as revision numbers.
2202 2203 basefm: A formatter to which patches should be written.
2203 2204 fntemplate: An optional string to use for generating patch file names.
2204 2205 switch_parent: If True, show diffs against second parent when not nullid.
2205 2206 Default is false, which always shows diff against p1.
2206 2207 opts: diff options to use for generating the patch.
2207 2208 match: If specified, only export changes to files matching this matcher.
2208 2209
2209 2210 Returns:
2210 2211 Nothing.
2211 2212
2212 2213 Side Effect:
2213 2214 "HG Changeset Patch" data is emitted to one of the following
2214 2215 destinations:
2215 2216 fntemplate specified: Each rev is written to a unique file named using
2216 2217 the given template.
2217 2218 Otherwise: All revs will be written to basefm.
2218 2219 """
2219 2220 _prefetchchangedfiles(repo, revs, match)
2220 2221
2221 2222 if not fntemplate:
2222 2223 _exportfile(
2223 2224 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2224 2225 )
2225 2226 else:
2226 2227 _exportfntemplate(
2227 2228 repo, revs, basefm, fntemplate, switch_parent, opts, match
2228 2229 )
2229 2230
2230 2231
2231 2232 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2232 2233 """Export changesets to the given file stream"""
2233 2234 _prefetchchangedfiles(repo, revs, match)
2234 2235
2235 2236 dest = getattr(fp, 'name', b'<unnamed>')
2236 2237 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2237 2238 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2238 2239
2239 2240
2240 2241 def showmarker(fm, marker, index=None):
2241 2242 """utility function to display obsolescence marker in a readable way
2242 2243
2243 2244 To be used by debug function."""
2244 2245 if index is not None:
2245 2246 fm.write(b'index', b'%i ', index)
2246 2247 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2247 2248 succs = marker.succnodes()
2248 2249 fm.condwrite(
2249 2250 succs,
2250 2251 b'succnodes',
2251 2252 b'%s ',
2252 2253 fm.formatlist(map(hex, succs), name=b'node'),
2253 2254 )
2254 2255 fm.write(b'flag', b'%X ', marker.flags())
2255 2256 parents = marker.parentnodes()
2256 2257 if parents is not None:
2257 2258 fm.write(
2258 2259 b'parentnodes',
2259 2260 b'{%s} ',
2260 2261 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2261 2262 )
2262 2263 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2263 2264 meta = marker.metadata().copy()
2264 2265 meta.pop(b'date', None)
2265 2266 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2266 2267 fm.write(
2267 2268 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2268 2269 )
2269 2270 fm.plain(b'\n')
2270 2271
2271 2272
2272 2273 def finddate(ui, repo, date):
2273 2274 """Find the tipmost changeset that matches the given date spec"""
2274 2275 mrevs = repo.revs(b'date(%s)', date)
2275 2276 try:
2276 2277 rev = mrevs.max()
2277 2278 except ValueError:
2278 2279 raise error.InputError(_(b"revision matching date not found"))
2279 2280
2280 2281 ui.status(
2281 2282 _(b"found revision %d from %s\n")
2282 2283 % (rev, dateutil.datestr(repo[rev].date()))
2283 2284 )
2284 2285 return b'%d' % rev
2285 2286
2286 2287
2287 2288 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2288 2289 bad = []
2289 2290
2290 2291 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2291 2292 names = []
2292 2293 wctx = repo[None]
2293 2294 cca = None
2294 2295 abort, warn = scmutil.checkportabilityalert(ui)
2295 2296 if abort or warn:
2296 2297 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2297 2298
2298 2299 match = repo.narrowmatch(match, includeexact=True)
2299 2300 badmatch = matchmod.badmatch(match, badfn)
2300 2301 dirstate = repo.dirstate
2301 2302 # We don't want to just call wctx.walk here, since it would return a lot of
2302 2303 # clean files, which we aren't interested in and takes time.
2303 2304 for f in sorted(
2304 2305 dirstate.walk(
2305 2306 badmatch,
2306 2307 subrepos=sorted(wctx.substate),
2307 2308 unknown=True,
2308 2309 ignored=False,
2309 2310 full=False,
2310 2311 )
2311 2312 ):
2312 2313 exact = match.exact(f)
2313 2314 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2314 2315 if cca:
2315 2316 cca(f)
2316 2317 names.append(f)
2317 2318 if ui.verbose or not exact:
2318 2319 ui.status(
2319 2320 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2320 2321 )
2321 2322
2322 2323 for subpath in sorted(wctx.substate):
2323 2324 sub = wctx.sub(subpath)
2324 2325 try:
2325 2326 submatch = matchmod.subdirmatcher(subpath, match)
2326 2327 subprefix = repo.wvfs.reljoin(prefix, subpath)
2327 2328 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2328 2329 if opts.get('subrepos'):
2329 2330 bad.extend(
2330 2331 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2331 2332 )
2332 2333 else:
2333 2334 bad.extend(
2334 2335 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2335 2336 )
2336 2337 except error.LookupError:
2337 2338 ui.status(
2338 2339 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2339 2340 )
2340 2341
2341 2342 if not opts.get('dry_run'):
2342 2343 rejected = wctx.add(names, prefix)
2343 2344 bad.extend(f for f in rejected if f in match.files())
2344 2345 return bad
2345 2346
2346 2347
2347 2348 def addwebdirpath(repo, serverpath, webconf):
2348 2349 webconf[serverpath] = repo.root
2349 2350 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2350 2351
2351 2352 for r in repo.revs(b'filelog("path:.hgsub")'):
2352 2353 ctx = repo[r]
2353 2354 for subpath in ctx.substate:
2354 2355 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2355 2356
2356 2357
2357 2358 def forget(
2358 2359 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2359 2360 ):
2360 2361 if dryrun and interactive:
2361 2362 raise error.InputError(
2362 2363 _(b"cannot specify both --dry-run and --interactive")
2363 2364 )
2364 2365 bad = []
2365 2366 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2366 2367 wctx = repo[None]
2367 2368 forgot = []
2368 2369
2369 2370 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2370 2371 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2371 2372 if explicitonly:
2372 2373 forget = [f for f in forget if match.exact(f)]
2373 2374
2374 2375 for subpath in sorted(wctx.substate):
2375 2376 sub = wctx.sub(subpath)
2376 2377 submatch = matchmod.subdirmatcher(subpath, match)
2377 2378 subprefix = repo.wvfs.reljoin(prefix, subpath)
2378 2379 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2379 2380 try:
2380 2381 subbad, subforgot = sub.forget(
2381 2382 submatch,
2382 2383 subprefix,
2383 2384 subuipathfn,
2384 2385 dryrun=dryrun,
2385 2386 interactive=interactive,
2386 2387 )
2387 2388 bad.extend([subpath + b'/' + f for f in subbad])
2388 2389 forgot.extend([subpath + b'/' + f for f in subforgot])
2389 2390 except error.LookupError:
2390 2391 ui.status(
2391 2392 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2392 2393 )
2393 2394
2394 2395 if not explicitonly:
2395 2396 for f in match.files():
2396 2397 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2397 2398 if f not in forgot:
2398 2399 if repo.wvfs.exists(f):
2399 2400 # Don't complain if the exact case match wasn't given.
2400 2401 # But don't do this until after checking 'forgot', so
2401 2402 # that subrepo files aren't normalized, and this op is
2402 2403 # purely from data cached by the status walk above.
2403 2404 if repo.dirstate.normalize(f) in repo.dirstate:
2404 2405 continue
2405 2406 ui.warn(
2406 2407 _(
2407 2408 b'not removing %s: '
2408 2409 b'file is already untracked\n'
2409 2410 )
2410 2411 % uipathfn(f)
2411 2412 )
2412 2413 bad.append(f)
2413 2414
2414 2415 if interactive:
2415 2416 responses = _(
2416 2417 b'[Ynsa?]'
2417 2418 b'$$ &Yes, forget this file'
2418 2419 b'$$ &No, skip this file'
2419 2420 b'$$ &Skip remaining files'
2420 2421 b'$$ Include &all remaining files'
2421 2422 b'$$ &? (display help)'
2422 2423 )
2423 2424 for filename in forget[:]:
2424 2425 r = ui.promptchoice(
2425 2426 _(b'forget %s %s') % (uipathfn(filename), responses)
2426 2427 )
2427 2428 if r == 4: # ?
2428 2429 while r == 4:
2429 2430 for c, t in ui.extractchoices(responses)[1]:
2430 2431 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2431 2432 r = ui.promptchoice(
2432 2433 _(b'forget %s %s') % (uipathfn(filename), responses)
2433 2434 )
2434 2435 if r == 0: # yes
2435 2436 continue
2436 2437 elif r == 1: # no
2437 2438 forget.remove(filename)
2438 2439 elif r == 2: # Skip
2439 2440 fnindex = forget.index(filename)
2440 2441 del forget[fnindex:]
2441 2442 break
2442 2443 elif r == 3: # All
2443 2444 break
2444 2445
2445 2446 for f in forget:
2446 2447 if ui.verbose or not match.exact(f) or interactive:
2447 2448 ui.status(
2448 2449 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2449 2450 )
2450 2451
2451 2452 if not dryrun:
2452 2453 rejected = wctx.forget(forget, prefix)
2453 2454 bad.extend(f for f in rejected if f in match.files())
2454 2455 forgot.extend(f for f in forget if f not in rejected)
2455 2456 return bad, forgot
2456 2457
2457 2458
2458 2459 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2459 2460 ret = 1
2460 2461
2461 2462 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2462 2463 if fm.isplain() and not needsfctx:
2463 2464 # Fast path. The speed-up comes from skipping the formatter, and batching
2464 2465 # calls to ui.write.
2465 2466 buf = []
2466 2467 for f in ctx.matches(m):
2467 2468 buf.append(fmt % uipathfn(f))
2468 2469 if len(buf) > 100:
2469 2470 ui.write(b''.join(buf))
2470 2471 del buf[:]
2471 2472 ret = 0
2472 2473 if buf:
2473 2474 ui.write(b''.join(buf))
2474 2475 else:
2475 2476 for f in ctx.matches(m):
2476 2477 fm.startitem()
2477 2478 fm.context(ctx=ctx)
2478 2479 if needsfctx:
2479 2480 fc = ctx[f]
2480 2481 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2481 2482 fm.data(path=f)
2482 2483 fm.plain(fmt % uipathfn(f))
2483 2484 ret = 0
2484 2485
2485 2486 for subpath in sorted(ctx.substate):
2486 2487 submatch = matchmod.subdirmatcher(subpath, m)
2487 2488 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2488 2489 if subrepos or m.exact(subpath) or any(submatch.files()):
2489 2490 sub = ctx.sub(subpath)
2490 2491 try:
2491 2492 recurse = m.exact(subpath) or subrepos
2492 2493 if (
2493 2494 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2494 2495 == 0
2495 2496 ):
2496 2497 ret = 0
2497 2498 except error.LookupError:
2498 2499 ui.status(
2499 2500 _(b"skipping missing subrepository: %s\n")
2500 2501 % uipathfn(subpath)
2501 2502 )
2502 2503
2503 2504 return ret
2504 2505
2505 2506
2506 2507 def remove(
2507 2508 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2508 2509 ):
2509 2510 ret = 0
2510 2511 s = repo.status(match=m, clean=True)
2511 2512 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2512 2513
2513 2514 wctx = repo[None]
2514 2515
2515 2516 if warnings is None:
2516 2517 warnings = []
2517 2518 warn = True
2518 2519 else:
2519 2520 warn = False
2520 2521
2521 2522 subs = sorted(wctx.substate)
2522 2523 progress = ui.makeprogress(
2523 2524 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2524 2525 )
2525 2526 for subpath in subs:
2526 2527 submatch = matchmod.subdirmatcher(subpath, m)
2527 2528 subprefix = repo.wvfs.reljoin(prefix, subpath)
2528 2529 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2529 2530 if subrepos or m.exact(subpath) or any(submatch.files()):
2530 2531 progress.increment()
2531 2532 sub = wctx.sub(subpath)
2532 2533 try:
2533 2534 if sub.removefiles(
2534 2535 submatch,
2535 2536 subprefix,
2536 2537 subuipathfn,
2537 2538 after,
2538 2539 force,
2539 2540 subrepos,
2540 2541 dryrun,
2541 2542 warnings,
2542 2543 ):
2543 2544 ret = 1
2544 2545 except error.LookupError:
2545 2546 warnings.append(
2546 2547 _(b"skipping missing subrepository: %s\n")
2547 2548 % uipathfn(subpath)
2548 2549 )
2549 2550 progress.complete()
2550 2551
2551 2552 # warn about failure to delete explicit files/dirs
2552 2553 deleteddirs = pathutil.dirs(deleted)
2553 2554 files = m.files()
2554 2555 progress = ui.makeprogress(
2555 2556 _(b'deleting'), total=len(files), unit=_(b'files')
2556 2557 )
2557 2558 for f in files:
2558 2559
2559 2560 def insubrepo():
2560 2561 for subpath in wctx.substate:
2561 2562 if f.startswith(subpath + b'/'):
2562 2563 return True
2563 2564 return False
2564 2565
2565 2566 progress.increment()
2566 2567 isdir = f in deleteddirs or wctx.hasdir(f)
2567 2568 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2568 2569 continue
2569 2570
2570 2571 if repo.wvfs.exists(f):
2571 2572 if repo.wvfs.isdir(f):
2572 2573 warnings.append(
2573 2574 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2574 2575 )
2575 2576 else:
2576 2577 warnings.append(
2577 2578 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2578 2579 )
2579 2580 # missing files will generate a warning elsewhere
2580 2581 ret = 1
2581 2582 progress.complete()
2582 2583
2583 2584 if force:
2584 2585 list = modified + deleted + clean + added
2585 2586 elif after:
2586 2587 list = deleted
2587 2588 remaining = modified + added + clean
2588 2589 progress = ui.makeprogress(
2589 2590 _(b'skipping'), total=len(remaining), unit=_(b'files')
2590 2591 )
2591 2592 for f in remaining:
2592 2593 progress.increment()
2593 2594 if ui.verbose or (f in files):
2594 2595 warnings.append(
2595 2596 _(b'not removing %s: file still exists\n') % uipathfn(f)
2596 2597 )
2597 2598 ret = 1
2598 2599 progress.complete()
2599 2600 else:
2600 2601 list = deleted + clean
2601 2602 progress = ui.makeprogress(
2602 2603 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2603 2604 )
2604 2605 for f in modified:
2605 2606 progress.increment()
2606 2607 warnings.append(
2607 2608 _(
2608 2609 b'not removing %s: file is modified (use -f'
2609 2610 b' to force removal)\n'
2610 2611 )
2611 2612 % uipathfn(f)
2612 2613 )
2613 2614 ret = 1
2614 2615 for f in added:
2615 2616 progress.increment()
2616 2617 warnings.append(
2617 2618 _(
2618 2619 b"not removing %s: file has been marked for add"
2619 2620 b" (use 'hg forget' to undo add)\n"
2620 2621 )
2621 2622 % uipathfn(f)
2622 2623 )
2623 2624 ret = 1
2624 2625 progress.complete()
2625 2626
2626 2627 list = sorted(list)
2627 2628 progress = ui.makeprogress(
2628 2629 _(b'deleting'), total=len(list), unit=_(b'files')
2629 2630 )
2630 2631 for f in list:
2631 2632 if ui.verbose or not m.exact(f):
2632 2633 progress.increment()
2633 2634 ui.status(
2634 2635 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2635 2636 )
2636 2637 progress.complete()
2637 2638
2638 2639 if not dryrun:
2639 2640 with repo.wlock():
2640 2641 if not after:
2641 2642 for f in list:
2642 2643 if f in added:
2643 2644 continue # we never unlink added files on remove
2644 2645 rmdir = repo.ui.configbool(
2645 2646 b'experimental', b'removeemptydirs'
2646 2647 )
2647 2648 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2648 2649 repo[None].forget(list)
2649 2650
2650 2651 if warn:
2651 2652 for warning in warnings:
2652 2653 ui.warn(warning)
2653 2654
2654 2655 return ret
2655 2656
2656 2657
2657 2658 def _catfmtneedsdata(fm):
2658 2659 return not fm.datahint() or b'data' in fm.datahint()
2659 2660
2660 2661
2661 2662 def _updatecatformatter(fm, ctx, matcher, path, decode):
2662 2663 """Hook for adding data to the formatter used by ``hg cat``.
2663 2664
2664 2665 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2665 2666 this method first."""
2666 2667
2667 2668 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2668 2669 # wasn't requested.
2669 2670 data = b''
2670 2671 if _catfmtneedsdata(fm):
2671 2672 data = ctx[path].data()
2672 2673 if decode:
2673 2674 data = ctx.repo().wwritedata(path, data)
2674 2675 fm.startitem()
2675 2676 fm.context(ctx=ctx)
2676 2677 fm.write(b'data', b'%s', data)
2677 2678 fm.data(path=path)
2678 2679
2679 2680
2680 2681 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2681 2682 err = 1
2682 2683 opts = pycompat.byteskwargs(opts)
2683 2684
2684 2685 def write(path):
2685 2686 filename = None
2686 2687 if fntemplate:
2687 2688 filename = makefilename(
2688 2689 ctx, fntemplate, pathname=os.path.join(prefix, path)
2689 2690 )
2690 2691 # attempt to create the directory if it does not already exist
2691 2692 try:
2692 2693 os.makedirs(os.path.dirname(filename))
2693 2694 except OSError:
2694 2695 pass
2695 2696 with formatter.maybereopen(basefm, filename) as fm:
2696 2697 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2697 2698
2698 2699 # Automation often uses hg cat on single files, so special case it
2699 2700 # for performance to avoid the cost of parsing the manifest.
2700 2701 if len(matcher.files()) == 1 and not matcher.anypats():
2701 2702 file = matcher.files()[0]
2702 2703 mfl = repo.manifestlog
2703 2704 mfnode = ctx.manifestnode()
2704 2705 try:
2705 2706 if mfnode and mfl[mfnode].find(file)[0]:
2706 2707 if _catfmtneedsdata(basefm):
2707 2708 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2708 2709 write(file)
2709 2710 return 0
2710 2711 except KeyError:
2711 2712 pass
2712 2713
2713 2714 if _catfmtneedsdata(basefm):
2714 2715 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2715 2716
2716 2717 for abs in ctx.walk(matcher):
2717 2718 write(abs)
2718 2719 err = 0
2719 2720
2720 2721 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2721 2722 for subpath in sorted(ctx.substate):
2722 2723 sub = ctx.sub(subpath)
2723 2724 try:
2724 2725 submatch = matchmod.subdirmatcher(subpath, matcher)
2725 2726 subprefix = os.path.join(prefix, subpath)
2726 2727 if not sub.cat(
2727 2728 submatch,
2728 2729 basefm,
2729 2730 fntemplate,
2730 2731 subprefix,
2731 2732 **pycompat.strkwargs(opts)
2732 2733 ):
2733 2734 err = 0
2734 2735 except error.RepoLookupError:
2735 2736 ui.status(
2736 2737 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2737 2738 )
2738 2739
2739 2740 return err
2740 2741
2741 2742
2742 2743 def commit(ui, repo, commitfunc, pats, opts):
2743 2744 '''commit the specified files or all outstanding changes'''
2744 2745 date = opts.get(b'date')
2745 2746 if date:
2746 2747 opts[b'date'] = dateutil.parsedate(date)
2747 2748 message = logmessage(ui, opts)
2748 2749 matcher = scmutil.match(repo[None], pats, opts)
2749 2750
2750 2751 dsguard = None
2751 2752 # extract addremove carefully -- this function can be called from a command
2752 2753 # that doesn't support addremove
2753 2754 if opts.get(b'addremove'):
2754 2755 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2755 2756 with dsguard or util.nullcontextmanager():
2756 2757 if dsguard:
2757 2758 relative = scmutil.anypats(pats, opts)
2758 2759 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2759 2760 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2760 2761 raise error.Abort(
2761 2762 _(b"failed to mark all new/missing files as added/removed")
2762 2763 )
2763 2764
2764 2765 return commitfunc(ui, repo, message, matcher, opts)
2765 2766
2766 2767
2767 2768 def samefile(f, ctx1, ctx2):
2768 2769 if f in ctx1.manifest():
2769 2770 a = ctx1.filectx(f)
2770 2771 if f in ctx2.manifest():
2771 2772 b = ctx2.filectx(f)
2772 2773 return not a.cmp(b) and a.flags() == b.flags()
2773 2774 else:
2774 2775 return False
2775 2776 else:
2776 2777 return f not in ctx2.manifest()
2777 2778
2778 2779
2779 2780 def amend(ui, repo, old, extra, pats, opts):
2780 2781 # avoid cycle context -> subrepo -> cmdutil
2781 2782 from . import context
2782 2783
2783 2784 # amend will reuse the existing user if not specified, but the obsolete
2784 2785 # marker creation requires that the current user's name is specified.
2785 2786 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2786 2787 ui.username() # raise exception if username not set
2787 2788
2788 2789 ui.note(_(b'amending changeset %s\n') % old)
2789 2790 base = old.p1()
2790 2791
2791 2792 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2792 2793 # Participating changesets:
2793 2794 #
2794 2795 # wctx o - workingctx that contains changes from working copy
2795 2796 # | to go into amending commit
2796 2797 # |
2797 2798 # old o - changeset to amend
2798 2799 # |
2799 2800 # base o - first parent of the changeset to amend
2800 2801 wctx = repo[None]
2801 2802
2802 2803 # Copy to avoid mutating input
2803 2804 extra = extra.copy()
2804 2805 # Update extra dict from amended commit (e.g. to preserve graft
2805 2806 # source)
2806 2807 extra.update(old.extra())
2807 2808
2808 2809 # Also update it from the from the wctx
2809 2810 extra.update(wctx.extra())
2810 2811
2811 2812 # date-only change should be ignored?
2812 2813 datemaydiffer = resolvecommitoptions(ui, opts)
2813 2814
2814 2815 date = old.date()
2815 2816 if opts.get(b'date'):
2816 2817 date = dateutil.parsedate(opts.get(b'date'))
2817 2818 user = opts.get(b'user') or old.user()
2818 2819
2819 2820 if len(old.parents()) > 1:
2820 2821 # ctx.files() isn't reliable for merges, so fall back to the
2821 2822 # slower repo.status() method
2822 2823 st = base.status(old)
2823 2824 files = set(st.modified) | set(st.added) | set(st.removed)
2824 2825 else:
2825 2826 files = set(old.files())
2826 2827
2827 2828 # add/remove the files to the working copy if the "addremove" option
2828 2829 # was specified.
2829 2830 matcher = scmutil.match(wctx, pats, opts)
2830 2831 relative = scmutil.anypats(pats, opts)
2831 2832 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2832 2833 if opts.get(b'addremove') and scmutil.addremove(
2833 2834 repo, matcher, b"", uipathfn, opts
2834 2835 ):
2835 2836 raise error.Abort(
2836 2837 _(b"failed to mark all new/missing files as added/removed")
2837 2838 )
2838 2839
2839 2840 # Check subrepos. This depends on in-place wctx._status update in
2840 2841 # subrepo.precommit(). To minimize the risk of this hack, we do
2841 2842 # nothing if .hgsub does not exist.
2842 2843 if b'.hgsub' in wctx or b'.hgsub' in old:
2843 2844 subs, commitsubs, newsubstate = subrepoutil.precommit(
2844 2845 ui, wctx, wctx._status, matcher
2845 2846 )
2846 2847 # amend should abort if commitsubrepos is enabled
2847 2848 assert not commitsubs
2848 2849 if subs:
2849 2850 subrepoutil.writestate(repo, newsubstate)
2850 2851
2851 2852 ms = mergestatemod.mergestate.read(repo)
2852 2853 mergeutil.checkunresolved(ms)
2853 2854
2854 2855 filestoamend = {f for f in wctx.files() if matcher(f)}
2855 2856
2856 2857 changes = len(filestoamend) > 0
2857 2858 if changes:
2858 2859 # Recompute copies (avoid recording a -> b -> a)
2859 2860 copied = copies.pathcopies(base, wctx, matcher)
2860 2861 if old.p2:
2861 2862 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2862 2863
2863 2864 # Prune files which were reverted by the updates: if old
2864 2865 # introduced file X and the file was renamed in the working
2865 2866 # copy, then those two files are the same and
2866 2867 # we can discard X from our list of files. Likewise if X
2867 2868 # was removed, it's no longer relevant. If X is missing (aka
2868 2869 # deleted), old X must be preserved.
2869 2870 files.update(filestoamend)
2870 2871 files = [
2871 2872 f
2872 2873 for f in files
2873 2874 if (f not in filestoamend or not samefile(f, wctx, base))
2874 2875 ]
2875 2876
2876 2877 def filectxfn(repo, ctx_, path):
2877 2878 try:
2878 2879 # If the file being considered is not amongst the files
2879 2880 # to be amended, we should return the file context from the
2880 2881 # old changeset. This avoids issues when only some files in
2881 2882 # the working copy are being amended but there are also
2882 2883 # changes to other files from the old changeset.
2883 2884 if path not in filestoamend:
2884 2885 return old.filectx(path)
2885 2886
2886 2887 # Return None for removed files.
2887 2888 if path in wctx.removed():
2888 2889 return None
2889 2890
2890 2891 fctx = wctx[path]
2891 2892 flags = fctx.flags()
2892 2893 mctx = context.memfilectx(
2893 2894 repo,
2894 2895 ctx_,
2895 2896 fctx.path(),
2896 2897 fctx.data(),
2897 2898 islink=b'l' in flags,
2898 2899 isexec=b'x' in flags,
2899 2900 copysource=copied.get(path),
2900 2901 )
2901 2902 return mctx
2902 2903 except KeyError:
2903 2904 return None
2904 2905
2905 2906 else:
2906 2907 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2907 2908
2908 2909 # Use version of files as in the old cset
2909 2910 def filectxfn(repo, ctx_, path):
2910 2911 try:
2911 2912 return old.filectx(path)
2912 2913 except KeyError:
2913 2914 return None
2914 2915
2915 2916 # See if we got a message from -m or -l, if not, open the editor with
2916 2917 # the message of the changeset to amend.
2917 2918 message = logmessage(ui, opts)
2918 2919
2919 2920 editform = mergeeditform(old, b'commit.amend')
2920 2921
2921 2922 if not message:
2922 2923 message = old.description()
2923 2924 # Default if message isn't provided and --edit is not passed is to
2924 2925 # invoke editor, but allow --no-edit. If somehow we don't have any
2925 2926 # description, let's always start the editor.
2926 2927 doedit = not message or opts.get(b'edit') in [True, None]
2927 2928 else:
2928 2929 # Default if message is provided is to not invoke editor, but allow
2929 2930 # --edit.
2930 2931 doedit = opts.get(b'edit') is True
2931 2932 editor = getcommiteditor(edit=doedit, editform=editform)
2932 2933
2933 2934 pureextra = extra.copy()
2934 2935 extra[b'amend_source'] = old.hex()
2935 2936
2936 2937 new = context.memctx(
2937 2938 repo,
2938 2939 parents=[base.node(), old.p2().node()],
2939 2940 text=message,
2940 2941 files=files,
2941 2942 filectxfn=filectxfn,
2942 2943 user=user,
2943 2944 date=date,
2944 2945 extra=extra,
2945 2946 editor=editor,
2946 2947 )
2947 2948
2948 2949 newdesc = changelog.stripdesc(new.description())
2949 2950 if (
2950 2951 (not changes)
2951 2952 and newdesc == old.description()
2952 2953 and user == old.user()
2953 2954 and (date == old.date() or datemaydiffer)
2954 2955 and pureextra == old.extra()
2955 2956 ):
2956 2957 # nothing changed. continuing here would create a new node
2957 2958 # anyway because of the amend_source noise.
2958 2959 #
2959 2960 # This not what we expect from amend.
2960 2961 return old.node()
2961 2962
2962 2963 commitphase = None
2963 2964 if opts.get(b'secret'):
2964 2965 commitphase = phases.secret
2965 2966 newid = repo.commitctx(new)
2966 2967 ms.reset()
2967 2968
2968 2969 # Reroute the working copy parent to the new changeset
2969 2970 repo.setparents(newid, nullid)
2970 2971
2971 2972 # Fixing the dirstate because localrepo.commitctx does not update
2972 2973 # it. This is rather convenient because we did not need to update
2973 2974 # the dirstate for all the files in the new commit which commitctx
2974 2975 # could have done if it updated the dirstate. Now, we can
2975 2976 # selectively update the dirstate only for the amended files.
2976 2977 dirstate = repo.dirstate
2977 2978
2978 2979 # Update the state of the files which were added and modified in the
2979 2980 # amend to "normal" in the dirstate. We need to use "normallookup" since
2980 2981 # the files may have changed since the command started; using "normal"
2981 2982 # would mark them as clean but with uncommitted contents.
2982 2983 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2983 2984 for f in normalfiles:
2984 2985 dirstate.normallookup(f)
2985 2986
2986 2987 # Update the state of files which were removed in the amend
2987 2988 # to "removed" in the dirstate.
2988 2989 removedfiles = set(wctx.removed()) & filestoamend
2989 2990 for f in removedfiles:
2990 2991 dirstate.drop(f)
2991 2992
2992 2993 mapping = {old.node(): (newid,)}
2993 2994 obsmetadata = None
2994 2995 if opts.get(b'note'):
2995 2996 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
2996 2997 backup = ui.configbool(b'rewrite', b'backup-bundle')
2997 2998 scmutil.cleanupnodes(
2998 2999 repo,
2999 3000 mapping,
3000 3001 b'amend',
3001 3002 metadata=obsmetadata,
3002 3003 fixphase=True,
3003 3004 targetphase=commitphase,
3004 3005 backup=backup,
3005 3006 )
3006 3007
3007 3008 return newid
3008 3009
3009 3010
3010 3011 def commiteditor(repo, ctx, subs, editform=b''):
3011 3012 if ctx.description():
3012 3013 return ctx.description()
3013 3014 return commitforceeditor(
3014 3015 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3015 3016 )
3016 3017
3017 3018
3018 3019 def commitforceeditor(
3019 3020 repo,
3020 3021 ctx,
3021 3022 subs,
3022 3023 finishdesc=None,
3023 3024 extramsg=None,
3024 3025 editform=b'',
3025 3026 unchangedmessagedetection=False,
3026 3027 ):
3027 3028 if not extramsg:
3028 3029 extramsg = _(b"Leave message empty to abort commit.")
3029 3030
3030 3031 forms = [e for e in editform.split(b'.') if e]
3031 3032 forms.insert(0, b'changeset')
3032 3033 templatetext = None
3033 3034 while forms:
3034 3035 ref = b'.'.join(forms)
3035 3036 if repo.ui.config(b'committemplate', ref):
3036 3037 templatetext = committext = buildcommittemplate(
3037 3038 repo, ctx, subs, extramsg, ref
3038 3039 )
3039 3040 break
3040 3041 forms.pop()
3041 3042 else:
3042 3043 committext = buildcommittext(repo, ctx, subs, extramsg)
3043 3044
3044 3045 # run editor in the repository root
3045 3046 olddir = encoding.getcwd()
3046 3047 os.chdir(repo.root)
3047 3048
3048 3049 # make in-memory changes visible to external process
3049 3050 tr = repo.currenttransaction()
3050 3051 repo.dirstate.write(tr)
3051 3052 pending = tr and tr.writepending() and repo.root
3052 3053
3053 3054 editortext = repo.ui.edit(
3054 3055 committext,
3055 3056 ctx.user(),
3056 3057 ctx.extra(),
3057 3058 editform=editform,
3058 3059 pending=pending,
3059 3060 repopath=repo.path,
3060 3061 action=b'commit',
3061 3062 )
3062 3063 text = editortext
3063 3064
3064 3065 # strip away anything below this special string (used for editors that want
3065 3066 # to display the diff)
3066 3067 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3067 3068 if stripbelow:
3068 3069 text = text[: stripbelow.start()]
3069 3070
3070 3071 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3071 3072 os.chdir(olddir)
3072 3073
3073 3074 if finishdesc:
3074 3075 text = finishdesc(text)
3075 3076 if not text.strip():
3076 3077 raise error.InputError(_(b"empty commit message"))
3077 3078 if unchangedmessagedetection and editortext == templatetext:
3078 3079 raise error.InputError(_(b"commit message unchanged"))
3079 3080
3080 3081 return text
3081 3082
3082 3083
3083 3084 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3084 3085 ui = repo.ui
3085 3086 spec = formatter.reference_templatespec(ref)
3086 3087 t = logcmdutil.changesettemplater(ui, repo, spec)
3087 3088 t.t.cache.update(
3088 3089 (k, templater.unquotestring(v))
3089 3090 for k, v in repo.ui.configitems(b'committemplate')
3090 3091 )
3091 3092
3092 3093 if not extramsg:
3093 3094 extramsg = b'' # ensure that extramsg is string
3094 3095
3095 3096 ui.pushbuffer()
3096 3097 t.show(ctx, extramsg=extramsg)
3097 3098 return ui.popbuffer()
3098 3099
3099 3100
3100 3101 def hgprefix(msg):
3101 3102 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3102 3103
3103 3104
3104 3105 def buildcommittext(repo, ctx, subs, extramsg):
3105 3106 edittext = []
3106 3107 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3107 3108 if ctx.description():
3108 3109 edittext.append(ctx.description())
3109 3110 edittext.append(b"")
3110 3111 edittext.append(b"") # Empty line between message and comments.
3111 3112 edittext.append(
3112 3113 hgprefix(
3113 3114 _(
3114 3115 b"Enter commit message."
3115 3116 b" Lines beginning with 'HG:' are removed."
3116 3117 )
3117 3118 )
3118 3119 )
3119 3120 edittext.append(hgprefix(extramsg))
3120 3121 edittext.append(b"HG: --")
3121 3122 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3122 3123 if ctx.p2():
3123 3124 edittext.append(hgprefix(_(b"branch merge")))
3124 3125 if ctx.branch():
3125 3126 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3126 3127 if bookmarks.isactivewdirparent(repo):
3127 3128 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3128 3129 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3129 3130 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3130 3131 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3131 3132 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3132 3133 if not added and not modified and not removed:
3133 3134 edittext.append(hgprefix(_(b"no files changed")))
3134 3135 edittext.append(b"")
3135 3136
3136 3137 return b"\n".join(edittext)
3137 3138
3138 3139
3139 3140 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3140 3141 if opts is None:
3141 3142 opts = {}
3142 3143 ctx = repo[node]
3143 3144 parents = ctx.parents()
3144 3145
3145 3146 if tip is not None and repo.changelog.tip() == tip:
3146 3147 # avoid reporting something like "committed new head" when
3147 3148 # recommitting old changesets, and issue a helpful warning
3148 3149 # for most instances
3149 3150 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3150 3151 elif (
3151 3152 not opts.get(b'amend')
3152 3153 and bheads
3153 3154 and node not in bheads
3154 3155 and not any(
3155 3156 p.node() in bheads and p.branch() == branch for p in parents
3156 3157 )
3157 3158 ):
3158 3159 repo.ui.status(_(b'created new head\n'))
3159 3160 # The message is not printed for initial roots. For the other
3160 3161 # changesets, it is printed in the following situations:
3161 3162 #
3162 3163 # Par column: for the 2 parents with ...
3163 3164 # N: null or no parent
3164 3165 # B: parent is on another named branch
3165 3166 # C: parent is a regular non head changeset
3166 3167 # H: parent was a branch head of the current branch
3167 3168 # Msg column: whether we print "created new head" message
3168 3169 # In the following, it is assumed that there already exists some
3169 3170 # initial branch heads of the current branch, otherwise nothing is
3170 3171 # printed anyway.
3171 3172 #
3172 3173 # Par Msg Comment
3173 3174 # N N y additional topo root
3174 3175 #
3175 3176 # B N y additional branch root
3176 3177 # C N y additional topo head
3177 3178 # H N n usual case
3178 3179 #
3179 3180 # B B y weird additional branch root
3180 3181 # C B y branch merge
3181 3182 # H B n merge with named branch
3182 3183 #
3183 3184 # C C y additional head from merge
3184 3185 # C H n merge with a head
3185 3186 #
3186 3187 # H H n head merge: head count decreases
3187 3188
3188 3189 if not opts.get(b'close_branch'):
3189 3190 for r in parents:
3190 3191 if r.closesbranch() and r.branch() == branch:
3191 3192 repo.ui.status(
3192 3193 _(b'reopening closed branch head %d\n') % r.rev()
3193 3194 )
3194 3195
3195 3196 if repo.ui.debugflag:
3196 3197 repo.ui.write(
3197 3198 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3198 3199 )
3199 3200 elif repo.ui.verbose:
3200 3201 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3201 3202
3202 3203
3203 3204 def postcommitstatus(repo, pats, opts):
3204 3205 return repo.status(match=scmutil.match(repo[None], pats, opts))
3205 3206
3206 3207
3207 3208 def revert(ui, repo, ctx, *pats, **opts):
3208 3209 opts = pycompat.byteskwargs(opts)
3209 3210 parent, p2 = repo.dirstate.parents()
3210 3211 node = ctx.node()
3211 3212
3212 3213 mf = ctx.manifest()
3213 3214 if node == p2:
3214 3215 parent = p2
3215 3216
3216 3217 # need all matching names in dirstate and manifest of target rev,
3217 3218 # so have to walk both. do not print errors if files exist in one
3218 3219 # but not other. in both cases, filesets should be evaluated against
3219 3220 # workingctx to get consistent result (issue4497). this means 'set:**'
3220 3221 # cannot be used to select missing files from target rev.
3221 3222
3222 3223 # `names` is a mapping for all elements in working copy and target revision
3223 3224 # The mapping is in the form:
3224 3225 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3225 3226 names = {}
3226 3227 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3227 3228
3228 3229 with repo.wlock():
3229 3230 ## filling of the `names` mapping
3230 3231 # walk dirstate to fill `names`
3231 3232
3232 3233 interactive = opts.get(b'interactive', False)
3233 3234 wctx = repo[None]
3234 3235 m = scmutil.match(wctx, pats, opts)
3235 3236
3236 3237 # we'll need this later
3237 3238 targetsubs = sorted(s for s in wctx.substate if m(s))
3238 3239
3239 3240 if not m.always():
3240 3241 matcher = matchmod.badmatch(m, lambda x, y: False)
3241 3242 for abs in wctx.walk(matcher):
3242 3243 names[abs] = m.exact(abs)
3243 3244
3244 3245 # walk target manifest to fill `names`
3245 3246
3246 3247 def badfn(path, msg):
3247 3248 if path in names:
3248 3249 return
3249 3250 if path in ctx.substate:
3250 3251 return
3251 3252 path_ = path + b'/'
3252 3253 for f in names:
3253 3254 if f.startswith(path_):
3254 3255 return
3255 3256 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3256 3257
3257 3258 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3258 3259 if abs not in names:
3259 3260 names[abs] = m.exact(abs)
3260 3261
3261 3262 # Find status of all file in `names`.
3262 3263 m = scmutil.matchfiles(repo, names)
3263 3264
3264 3265 changes = repo.status(
3265 3266 node1=node, match=m, unknown=True, ignored=True, clean=True
3266 3267 )
3267 3268 else:
3268 3269 changes = repo.status(node1=node, match=m)
3269 3270 for kind in changes:
3270 3271 for abs in kind:
3271 3272 names[abs] = m.exact(abs)
3272 3273
3273 3274 m = scmutil.matchfiles(repo, names)
3274 3275
3275 3276 modified = set(changes.modified)
3276 3277 added = set(changes.added)
3277 3278 removed = set(changes.removed)
3278 3279 _deleted = set(changes.deleted)
3279 3280 unknown = set(changes.unknown)
3280 3281 unknown.update(changes.ignored)
3281 3282 clean = set(changes.clean)
3282 3283 modadded = set()
3283 3284
3284 3285 # We need to account for the state of the file in the dirstate,
3285 3286 # even when we revert against something else than parent. This will
3286 3287 # slightly alter the behavior of revert (doing back up or not, delete
3287 3288 # or just forget etc).
3288 3289 if parent == node:
3289 3290 dsmodified = modified
3290 3291 dsadded = added
3291 3292 dsremoved = removed
3292 3293 # store all local modifications, useful later for rename detection
3293 3294 localchanges = dsmodified | dsadded
3294 3295 modified, added, removed = set(), set(), set()
3295 3296 else:
3296 3297 changes = repo.status(node1=parent, match=m)
3297 3298 dsmodified = set(changes.modified)
3298 3299 dsadded = set(changes.added)
3299 3300 dsremoved = set(changes.removed)
3300 3301 # store all local modifications, useful later for rename detection
3301 3302 localchanges = dsmodified | dsadded
3302 3303
3303 3304 # only take into account for removes between wc and target
3304 3305 clean |= dsremoved - removed
3305 3306 dsremoved &= removed
3306 3307 # distinct between dirstate remove and other
3307 3308 removed -= dsremoved
3308 3309
3309 3310 modadded = added & dsmodified
3310 3311 added -= modadded
3311 3312
3312 3313 # tell newly modified apart.
3313 3314 dsmodified &= modified
3314 3315 dsmodified |= modified & dsadded # dirstate added may need backup
3315 3316 modified -= dsmodified
3316 3317
3317 3318 # We need to wait for some post-processing to update this set
3318 3319 # before making the distinction. The dirstate will be used for
3319 3320 # that purpose.
3320 3321 dsadded = added
3321 3322
3322 3323 # in case of merge, files that are actually added can be reported as
3323 3324 # modified, we need to post process the result
3324 3325 if p2 != nullid:
3325 3326 mergeadd = set(dsmodified)
3326 3327 for path in dsmodified:
3327 3328 if path in mf:
3328 3329 mergeadd.remove(path)
3329 3330 dsadded |= mergeadd
3330 3331 dsmodified -= mergeadd
3331 3332
3332 3333 # if f is a rename, update `names` to also revert the source
3333 3334 for f in localchanges:
3334 3335 src = repo.dirstate.copied(f)
3335 3336 # XXX should we check for rename down to target node?
3336 3337 if src and src not in names and repo.dirstate[src] == b'r':
3337 3338 dsremoved.add(src)
3338 3339 names[src] = True
3339 3340
3340 3341 # determine the exact nature of the deleted changesets
3341 3342 deladded = set(_deleted)
3342 3343 for path in _deleted:
3343 3344 if path in mf:
3344 3345 deladded.remove(path)
3345 3346 deleted = _deleted - deladded
3346 3347
3347 3348 # distinguish between file to forget and the other
3348 3349 added = set()
3349 3350 for abs in dsadded:
3350 3351 if repo.dirstate[abs] != b'a':
3351 3352 added.add(abs)
3352 3353 dsadded -= added
3353 3354
3354 3355 for abs in deladded:
3355 3356 if repo.dirstate[abs] == b'a':
3356 3357 dsadded.add(abs)
3357 3358 deladded -= dsadded
3358 3359
3359 3360 # For files marked as removed, we check if an unknown file is present at
3360 3361 # the same path. If a such file exists it may need to be backed up.
3361 3362 # Making the distinction at this stage helps have simpler backup
3362 3363 # logic.
3363 3364 removunk = set()
3364 3365 for abs in removed:
3365 3366 target = repo.wjoin(abs)
3366 3367 if os.path.lexists(target):
3367 3368 removunk.add(abs)
3368 3369 removed -= removunk
3369 3370
3370 3371 dsremovunk = set()
3371 3372 for abs in dsremoved:
3372 3373 target = repo.wjoin(abs)
3373 3374 if os.path.lexists(target):
3374 3375 dsremovunk.add(abs)
3375 3376 dsremoved -= dsremovunk
3376 3377
3377 3378 # action to be actually performed by revert
3378 3379 # (<list of file>, message>) tuple
3379 3380 actions = {
3380 3381 b'revert': ([], _(b'reverting %s\n')),
3381 3382 b'add': ([], _(b'adding %s\n')),
3382 3383 b'remove': ([], _(b'removing %s\n')),
3383 3384 b'drop': ([], _(b'removing %s\n')),
3384 3385 b'forget': ([], _(b'forgetting %s\n')),
3385 3386 b'undelete': ([], _(b'undeleting %s\n')),
3386 3387 b'noop': (None, _(b'no changes needed to %s\n')),
3387 3388 b'unknown': (None, _(b'file not managed: %s\n')),
3388 3389 }
3389 3390
3390 3391 # "constant" that convey the backup strategy.
3391 3392 # All set to `discard` if `no-backup` is set do avoid checking
3392 3393 # no_backup lower in the code.
3393 3394 # These values are ordered for comparison purposes
3394 3395 backupinteractive = 3 # do backup if interactively modified
3395 3396 backup = 2 # unconditionally do backup
3396 3397 check = 1 # check if the existing file differs from target
3397 3398 discard = 0 # never do backup
3398 3399 if opts.get(b'no_backup'):
3399 3400 backupinteractive = backup = check = discard
3400 3401 if interactive:
3401 3402 dsmodifiedbackup = backupinteractive
3402 3403 else:
3403 3404 dsmodifiedbackup = backup
3404 3405 tobackup = set()
3405 3406
3406 3407 backupanddel = actions[b'remove']
3407 3408 if not opts.get(b'no_backup'):
3408 3409 backupanddel = actions[b'drop']
3409 3410
3410 3411 disptable = (
3411 3412 # dispatch table:
3412 3413 # file state
3413 3414 # action
3414 3415 # make backup
3415 3416 ## Sets that results that will change file on disk
3416 3417 # Modified compared to target, no local change
3417 3418 (modified, actions[b'revert'], discard),
3418 3419 # Modified compared to target, but local file is deleted
3419 3420 (deleted, actions[b'revert'], discard),
3420 3421 # Modified compared to target, local change
3421 3422 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3422 3423 # Added since target
3423 3424 (added, actions[b'remove'], discard),
3424 3425 # Added in working directory
3425 3426 (dsadded, actions[b'forget'], discard),
3426 3427 # Added since target, have local modification
3427 3428 (modadded, backupanddel, backup),
3428 3429 # Added since target but file is missing in working directory
3429 3430 (deladded, actions[b'drop'], discard),
3430 3431 # Removed since target, before working copy parent
3431 3432 (removed, actions[b'add'], discard),
3432 3433 # Same as `removed` but an unknown file exists at the same path
3433 3434 (removunk, actions[b'add'], check),
3434 3435 # Removed since targe, marked as such in working copy parent
3435 3436 (dsremoved, actions[b'undelete'], discard),
3436 3437 # Same as `dsremoved` but an unknown file exists at the same path
3437 3438 (dsremovunk, actions[b'undelete'], check),
3438 3439 ## the following sets does not result in any file changes
3439 3440 # File with no modification
3440 3441 (clean, actions[b'noop'], discard),
3441 3442 # Existing file, not tracked anywhere
3442 3443 (unknown, actions[b'unknown'], discard),
3443 3444 )
3444 3445
3445 3446 for abs, exact in sorted(names.items()):
3446 3447 # target file to be touch on disk (relative to cwd)
3447 3448 target = repo.wjoin(abs)
3448 3449 # search the entry in the dispatch table.
3449 3450 # if the file is in any of these sets, it was touched in the working
3450 3451 # directory parent and we are sure it needs to be reverted.
3451 3452 for table, (xlist, msg), dobackup in disptable:
3452 3453 if abs not in table:
3453 3454 continue
3454 3455 if xlist is not None:
3455 3456 xlist.append(abs)
3456 3457 if dobackup:
3457 3458 # If in interactive mode, don't automatically create
3458 3459 # .orig files (issue4793)
3459 3460 if dobackup == backupinteractive:
3460 3461 tobackup.add(abs)
3461 3462 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3462 3463 absbakname = scmutil.backuppath(ui, repo, abs)
3463 3464 bakname = os.path.relpath(
3464 3465 absbakname, start=repo.root
3465 3466 )
3466 3467 ui.note(
3467 3468 _(b'saving current version of %s as %s\n')
3468 3469 % (uipathfn(abs), uipathfn(bakname))
3469 3470 )
3470 3471 if not opts.get(b'dry_run'):
3471 3472 if interactive:
3472 3473 util.copyfile(target, absbakname)
3473 3474 else:
3474 3475 util.rename(target, absbakname)
3475 3476 if opts.get(b'dry_run'):
3476 3477 if ui.verbose or not exact:
3477 3478 ui.status(msg % uipathfn(abs))
3478 3479 elif exact:
3479 3480 ui.warn(msg % uipathfn(abs))
3480 3481 break
3481 3482
3482 3483 if not opts.get(b'dry_run'):
3483 3484 needdata = (b'revert', b'add', b'undelete')
3484 3485 oplist = [actions[name][0] for name in needdata]
3485 3486 prefetch = scmutil.prefetchfiles
3486 3487 matchfiles = scmutil.matchfiles(
3487 3488 repo, [f for sublist in oplist for f in sublist]
3488 3489 )
3489 3490 prefetch(
3490 3491 repo,
3491 3492 [(ctx.rev(), matchfiles)],
3492 3493 )
3493 3494 match = scmutil.match(repo[None], pats)
3494 3495 _performrevert(
3495 3496 repo,
3496 3497 ctx,
3497 3498 names,
3498 3499 uipathfn,
3499 3500 actions,
3500 3501 match,
3501 3502 interactive,
3502 3503 tobackup,
3503 3504 )
3504 3505
3505 3506 if targetsubs:
3506 3507 # Revert the subrepos on the revert list
3507 3508 for sub in targetsubs:
3508 3509 try:
3509 3510 wctx.sub(sub).revert(
3510 3511 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3511 3512 )
3512 3513 except KeyError:
3513 3514 raise error.Abort(
3514 3515 b"subrepository '%s' does not exist in %s!"
3515 3516 % (sub, short(ctx.node()))
3516 3517 )
3517 3518
3518 3519
3519 3520 def _performrevert(
3520 3521 repo,
3521 3522 ctx,
3522 3523 names,
3523 3524 uipathfn,
3524 3525 actions,
3525 3526 match,
3526 3527 interactive=False,
3527 3528 tobackup=None,
3528 3529 ):
3529 3530 """function that actually perform all the actions computed for revert
3530 3531
3531 3532 This is an independent function to let extension to plug in and react to
3532 3533 the imminent revert.
3533 3534
3534 3535 Make sure you have the working directory locked when calling this function.
3535 3536 """
3536 3537 parent, p2 = repo.dirstate.parents()
3537 3538 node = ctx.node()
3538 3539 excluded_files = []
3539 3540
3540 3541 def checkout(f):
3541 3542 fc = ctx[f]
3542 3543 repo.wwrite(f, fc.data(), fc.flags())
3543 3544
3544 3545 def doremove(f):
3545 3546 try:
3546 3547 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3547 3548 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3548 3549 except OSError:
3549 3550 pass
3550 3551 repo.dirstate.remove(f)
3551 3552
3552 3553 def prntstatusmsg(action, f):
3553 3554 exact = names[f]
3554 3555 if repo.ui.verbose or not exact:
3555 3556 repo.ui.status(actions[action][1] % uipathfn(f))
3556 3557
3557 3558 audit_path = pathutil.pathauditor(repo.root, cached=True)
3558 3559 for f in actions[b'forget'][0]:
3559 3560 if interactive:
3560 3561 choice = repo.ui.promptchoice(
3561 3562 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3562 3563 )
3563 3564 if choice == 0:
3564 3565 prntstatusmsg(b'forget', f)
3565 3566 repo.dirstate.drop(f)
3566 3567 else:
3567 3568 excluded_files.append(f)
3568 3569 else:
3569 3570 prntstatusmsg(b'forget', f)
3570 3571 repo.dirstate.drop(f)
3571 3572 for f in actions[b'remove'][0]:
3572 3573 audit_path(f)
3573 3574 if interactive:
3574 3575 choice = repo.ui.promptchoice(
3575 3576 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3576 3577 )
3577 3578 if choice == 0:
3578 3579 prntstatusmsg(b'remove', f)
3579 3580 doremove(f)
3580 3581 else:
3581 3582 excluded_files.append(f)
3582 3583 else:
3583 3584 prntstatusmsg(b'remove', f)
3584 3585 doremove(f)
3585 3586 for f in actions[b'drop'][0]:
3586 3587 audit_path(f)
3587 3588 prntstatusmsg(b'drop', f)
3588 3589 repo.dirstate.remove(f)
3589 3590
3590 3591 normal = None
3591 3592 if node == parent:
3592 3593 # We're reverting to our parent. If possible, we'd like status
3593 3594 # to report the file as clean. We have to use normallookup for
3594 3595 # merges to avoid losing information about merged/dirty files.
3595 3596 if p2 != nullid:
3596 3597 normal = repo.dirstate.normallookup
3597 3598 else:
3598 3599 normal = repo.dirstate.normal
3599 3600
3600 3601 newlyaddedandmodifiedfiles = set()
3601 3602 if interactive:
3602 3603 # Prompt the user for changes to revert
3603 3604 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3604 3605 m = scmutil.matchfiles(repo, torevert)
3605 3606 diffopts = patch.difffeatureopts(
3606 3607 repo.ui,
3607 3608 whitespace=True,
3608 3609 section=b'commands',
3609 3610 configprefix=b'revert.interactive.',
3610 3611 )
3611 3612 diffopts.nodates = True
3612 3613 diffopts.git = True
3613 3614 operation = b'apply'
3614 3615 if node == parent:
3615 3616 if repo.ui.configbool(
3616 3617 b'experimental', b'revert.interactive.select-to-keep'
3617 3618 ):
3618 3619 operation = b'keep'
3619 3620 else:
3620 3621 operation = b'discard'
3621 3622
3622 3623 if operation == b'apply':
3623 3624 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3624 3625 else:
3625 3626 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3626 3627 originalchunks = patch.parsepatch(diff)
3627 3628
3628 3629 try:
3629 3630
3630 3631 chunks, opts = recordfilter(
3631 3632 repo.ui, originalchunks, match, operation=operation
3632 3633 )
3633 3634 if operation == b'discard':
3634 3635 chunks = patch.reversehunks(chunks)
3635 3636
3636 3637 except error.PatchError as err:
3637 3638 raise error.Abort(_(b'error parsing patch: %s') % err)
3638 3639
3639 3640 # FIXME: when doing an interactive revert of a copy, there's no way of
3640 3641 # performing a partial revert of the added file, the only option is
3641 3642 # "remove added file <name> (Yn)?", so we don't need to worry about the
3642 3643 # alsorestore value. Ideally we'd be able to partially revert
3643 3644 # copied/renamed files.
3644 3645 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3645 3646 chunks, originalchunks
3646 3647 )
3647 3648 if tobackup is None:
3648 3649 tobackup = set()
3649 3650 # Apply changes
3650 3651 fp = stringio()
3651 3652 # chunks are serialized per file, but files aren't sorted
3652 3653 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3653 3654 prntstatusmsg(b'revert', f)
3654 3655 files = set()
3655 3656 for c in chunks:
3656 3657 if ishunk(c):
3657 3658 abs = c.header.filename()
3658 3659 # Create a backup file only if this hunk should be backed up
3659 3660 if c.header.filename() in tobackup:
3660 3661 target = repo.wjoin(abs)
3661 3662 bakname = scmutil.backuppath(repo.ui, repo, abs)
3662 3663 util.copyfile(target, bakname)
3663 3664 tobackup.remove(abs)
3664 3665 if abs not in files:
3665 3666 files.add(abs)
3666 3667 if operation == b'keep':
3667 3668 checkout(abs)
3668 3669 c.write(fp)
3669 3670 dopatch = fp.tell()
3670 3671 fp.seek(0)
3671 3672 if dopatch:
3672 3673 try:
3673 3674 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3674 3675 except error.PatchError as err:
3675 3676 raise error.Abort(pycompat.bytestr(err))
3676 3677 del fp
3677 3678 else:
3678 3679 for f in actions[b'revert'][0]:
3679 3680 prntstatusmsg(b'revert', f)
3680 3681 checkout(f)
3681 3682 if normal:
3682 3683 normal(f)
3683 3684
3684 3685 for f in actions[b'add'][0]:
3685 3686 # Don't checkout modified files, they are already created by the diff
3686 3687 if f not in newlyaddedandmodifiedfiles:
3687 3688 prntstatusmsg(b'add', f)
3688 3689 checkout(f)
3689 3690 repo.dirstate.add(f)
3690 3691
3691 3692 normal = repo.dirstate.normallookup
3692 3693 if node == parent and p2 == nullid:
3693 3694 normal = repo.dirstate.normal
3694 3695 for f in actions[b'undelete'][0]:
3695 3696 if interactive:
3696 3697 choice = repo.ui.promptchoice(
3697 3698 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3698 3699 )
3699 3700 if choice == 0:
3700 3701 prntstatusmsg(b'undelete', f)
3701 3702 checkout(f)
3702 3703 normal(f)
3703 3704 else:
3704 3705 excluded_files.append(f)
3705 3706 else:
3706 3707 prntstatusmsg(b'undelete', f)
3707 3708 checkout(f)
3708 3709 normal(f)
3709 3710
3710 3711 copied = copies.pathcopies(repo[parent], ctx)
3711 3712
3712 3713 for f in (
3713 3714 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3714 3715 ):
3715 3716 if f in copied:
3716 3717 repo.dirstate.copy(copied[f], f)
3717 3718
3718 3719
3719 3720 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3720 3721 # commands.outgoing. "missing" is "missing" of the result of
3721 3722 # "findcommonoutgoing()"
3722 3723 outgoinghooks = util.hooks()
3723 3724
3724 3725 # a list of (ui, repo) functions called by commands.summary
3725 3726 summaryhooks = util.hooks()
3726 3727
3727 3728 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3728 3729 #
3729 3730 # functions should return tuple of booleans below, if 'changes' is None:
3730 3731 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3731 3732 #
3732 3733 # otherwise, 'changes' is a tuple of tuples below:
3733 3734 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3734 3735 # - (desturl, destbranch, destpeer, outgoing)
3735 3736 summaryremotehooks = util.hooks()
3736 3737
3737 3738
3738 3739 def checkunfinished(repo, commit=False, skipmerge=False):
3739 3740 """Look for an unfinished multistep operation, like graft, and abort
3740 3741 if found. It's probably good to check this right before
3741 3742 bailifchanged().
3742 3743 """
3743 3744 # Check for non-clearable states first, so things like rebase will take
3744 3745 # precedence over update.
3745 3746 for state in statemod._unfinishedstates:
3746 3747 if (
3747 3748 state._clearable
3748 3749 or (commit and state._allowcommit)
3749 3750 or state._reportonly
3750 3751 ):
3751 3752 continue
3752 3753 if state.isunfinished(repo):
3753 3754 raise error.StateError(state.msg(), hint=state.hint())
3754 3755
3755 3756 for s in statemod._unfinishedstates:
3756 3757 if (
3757 3758 not s._clearable
3758 3759 or (commit and s._allowcommit)
3759 3760 or (s._opname == b'merge' and skipmerge)
3760 3761 or s._reportonly
3761 3762 ):
3762 3763 continue
3763 3764 if s.isunfinished(repo):
3764 3765 raise error.StateError(s.msg(), hint=s.hint())
3765 3766
3766 3767
3767 3768 def clearunfinished(repo):
3768 3769 """Check for unfinished operations (as above), and clear the ones
3769 3770 that are clearable.
3770 3771 """
3771 3772 for state in statemod._unfinishedstates:
3772 3773 if state._reportonly:
3773 3774 continue
3774 3775 if not state._clearable and state.isunfinished(repo):
3775 3776 raise error.StateError(state.msg(), hint=state.hint())
3776 3777
3777 3778 for s in statemod._unfinishedstates:
3778 3779 if s._opname == b'merge' or state._reportonly:
3779 3780 continue
3780 3781 if s._clearable and s.isunfinished(repo):
3781 3782 util.unlink(repo.vfs.join(s._fname))
3782 3783
3783 3784
3784 3785 def getunfinishedstate(repo):
3785 3786 """Checks for unfinished operations and returns statecheck object
3786 3787 for it"""
3787 3788 for state in statemod._unfinishedstates:
3788 3789 if state.isunfinished(repo):
3789 3790 return state
3790 3791 return None
3791 3792
3792 3793
3793 3794 def howtocontinue(repo):
3794 3795 """Check for an unfinished operation and return the command to finish
3795 3796 it.
3796 3797
3797 3798 statemod._unfinishedstates list is checked for an unfinished operation
3798 3799 and the corresponding message to finish it is generated if a method to
3799 3800 continue is supported by the operation.
3800 3801
3801 3802 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3802 3803 a boolean.
3803 3804 """
3804 3805 contmsg = _(b"continue: %s")
3805 3806 for state in statemod._unfinishedstates:
3806 3807 if not state._continueflag:
3807 3808 continue
3808 3809 if state.isunfinished(repo):
3809 3810 return contmsg % state.continuemsg(), True
3810 3811 if repo[None].dirty(missing=True, merge=False, branch=False):
3811 3812 return contmsg % _(b"hg commit"), False
3812 3813 return None, None
3813 3814
3814 3815
3815 3816 def checkafterresolved(repo):
3816 3817 """Inform the user about the next action after completing hg resolve
3817 3818
3818 3819 If there's a an unfinished operation that supports continue flag,
3819 3820 howtocontinue will yield repo.ui.warn as the reporter.
3820 3821
3821 3822 Otherwise, it will yield repo.ui.note.
3822 3823 """
3823 3824 msg, warning = howtocontinue(repo)
3824 3825 if msg is not None:
3825 3826 if warning:
3826 3827 repo.ui.warn(b"%s\n" % msg)
3827 3828 else:
3828 3829 repo.ui.note(b"%s\n" % msg)
3829 3830
3830 3831
3831 3832 def wrongtooltocontinue(repo, task):
3832 3833 """Raise an abort suggesting how to properly continue if there is an
3833 3834 active task.
3834 3835
3835 3836 Uses howtocontinue() to find the active task.
3836 3837
3837 3838 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3838 3839 a hint.
3839 3840 """
3840 3841 after = howtocontinue(repo)
3841 3842 hint = None
3842 3843 if after[1]:
3843 3844 hint = after[0]
3844 3845 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3845 3846
3846 3847
3847 3848 def abortgraft(ui, repo, graftstate):
3848 3849 """abort the interrupted graft and rollbacks to the state before interrupted
3849 3850 graft"""
3850 3851 if not graftstate.exists():
3851 3852 raise error.StateError(_(b"no interrupted graft to abort"))
3852 3853 statedata = readgraftstate(repo, graftstate)
3853 3854 newnodes = statedata.get(b'newnodes')
3854 3855 if newnodes is None:
3855 3856 # and old graft state which does not have all the data required to abort
3856 3857 # the graft
3857 3858 raise error.Abort(_(b"cannot abort using an old graftstate"))
3858 3859
3859 3860 # changeset from which graft operation was started
3860 3861 if len(newnodes) > 0:
3861 3862 startctx = repo[newnodes[0]].p1()
3862 3863 else:
3863 3864 startctx = repo[b'.']
3864 3865 # whether to strip or not
3865 3866 cleanup = False
3866 3867
3867 3868 if newnodes:
3868 3869 newnodes = [repo[r].rev() for r in newnodes]
3869 3870 cleanup = True
3870 3871 # checking that none of the newnodes turned public or is public
3871 3872 immutable = [c for c in newnodes if not repo[c].mutable()]
3872 3873 if immutable:
3873 3874 repo.ui.warn(
3874 3875 _(b"cannot clean up public changesets %s\n")
3875 3876 % b', '.join(bytes(repo[r]) for r in immutable),
3876 3877 hint=_(b"see 'hg help phases' for details"),
3877 3878 )
3878 3879 cleanup = False
3879 3880
3880 3881 # checking that no new nodes are created on top of grafted revs
3881 3882 desc = set(repo.changelog.descendants(newnodes))
3882 3883 if desc - set(newnodes):
3883 3884 repo.ui.warn(
3884 3885 _(
3885 3886 b"new changesets detected on destination "
3886 3887 b"branch, can't strip\n"
3887 3888 )
3888 3889 )
3889 3890 cleanup = False
3890 3891
3891 3892 if cleanup:
3892 3893 with repo.wlock(), repo.lock():
3893 3894 mergemod.clean_update(startctx)
3894 3895 # stripping the new nodes created
3895 3896 strippoints = [
3896 3897 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3897 3898 ]
3898 3899 repair.strip(repo.ui, repo, strippoints, backup=False)
3899 3900
3900 3901 if not cleanup:
3901 3902 # we don't update to the startnode if we can't strip
3902 3903 startctx = repo[b'.']
3903 3904 mergemod.clean_update(startctx)
3904 3905
3905 3906 ui.status(_(b"graft aborted\n"))
3906 3907 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3907 3908 graftstate.delete()
3908 3909 return 0
3909 3910
3910 3911
3911 3912 def readgraftstate(repo, graftstate):
3912 3913 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3913 3914 """read the graft state file and return a dict of the data stored in it"""
3914 3915 try:
3915 3916 return graftstate.read()
3916 3917 except error.CorruptedState:
3917 3918 nodes = repo.vfs.read(b'graftstate').splitlines()
3918 3919 return {b'nodes': nodes}
3919 3920
3920 3921
3921 3922 def hgabortgraft(ui, repo):
3922 3923 """ abort logic for aborting graft using 'hg abort'"""
3923 3924 with repo.wlock():
3924 3925 graftstate = statemod.cmdstate(repo, b'graftstate')
3925 3926 return abortgraft(ui, repo, graftstate)
@@ -1,3113 +1,3113 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 dagop,
32 32 encoding,
33 33 error,
34 34 fileset,
35 35 match as matchmod,
36 36 mergestate as mergestatemod,
37 37 metadata,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 subrepoutil,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 dateutil,
52 52 stringutil,
53 53 )
54 54
55 55 propertycache = util.propertycache
56 56
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65
66 66 def __init__(self, repo):
67 67 self._repo = repo
68 68
69 69 def __bytes__(self):
70 70 return short(self.node())
71 71
72 72 __str__ = encoding.strmethod(__bytes__)
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _buildstatusmanifest(self, status):
96 96 """Builds a manifest that includes the given status results, if this is
97 97 a working copy context. For non-working copy contexts, it just returns
98 98 the normal manifest."""
99 99 return self.manifest()
100 100
101 101 def _matchstatus(self, other, match):
102 102 """This internal method provides a way for child objects to override the
103 103 match operator.
104 104 """
105 105 return match
106 106
107 107 def _buildstatus(
108 108 self, other, s, match, listignored, listclean, listunknown
109 109 ):
110 110 """build a status with respect to another context"""
111 111 # Load earliest manifest first for caching reasons. More specifically,
112 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 115 # delta to what's in the cache. So that's one full reconstruction + one
116 116 # delta application.
117 117 mf2 = None
118 118 if self.rev() is not None and self.rev() < other.rev():
119 119 mf2 = self._buildstatusmanifest(s)
120 120 mf1 = other._buildstatusmanifest(s)
121 121 if mf2 is None:
122 122 mf2 = self._buildstatusmanifest(s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, match=match, clean=listclean)
130 130 for fn, value in pycompat.iteritems(d):
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 not in wdirfilenodeids:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [
157 157 fn
158 158 for fn in unknown
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 ignored = [
162 162 fn
163 163 for fn in ignored
164 164 if fn not in mf1 and (not match or match(fn))
165 165 ]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(
170 170 modified, added, removed, deleted, unknown, ignored, clean
171 171 )
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182
183 183 def node(self):
184 184 return self._node
185 185
186 186 def hex(self):
187 187 return hex(self.node())
188 188
189 189 def manifest(self):
190 190 return self._manifest
191 191
192 192 def manifestctx(self):
193 193 return self._manifestctx
194 194
195 195 def repo(self):
196 196 return self._repo
197 197
198 198 def phasestr(self):
199 199 return phases.phasenames[self.phase()]
200 200
201 201 def mutable(self):
202 202 return self.phase() > phases.public
203 203
204 204 def matchfileset(self, cwd, expr, badfn=None):
205 205 return fileset.match(self, cwd, expr, badfn=badfn)
206 206
207 207 def obsolete(self):
208 208 """True if the changeset is obsolete"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210 210
211 211 def extinct(self):
212 212 """True if the changeset is extinct"""
213 213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete, but its ancestor is"""
217 217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218 218
219 219 def phasedivergent(self):
220 220 """True if the changeset tries to be a successor of a public changeset
221 221
222 222 Only non-public and non-obsolete changesets may be phase-divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225 225
226 226 def contentdivergent(self):
227 227 """Is a successor of a changeset with multiple possible successor sets
228 228
229 229 Only non-public and non-obsolete changesets may be content-divergent.
230 230 """
231 231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232 232
233 233 def isunstable(self):
234 234 """True if the changeset is either orphan, phase-divergent or
235 235 content-divergent"""
236 236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237 237
238 238 def instabilities(self):
239 239 """return the list of instabilities affecting this changeset.
240 240
241 241 Instabilities are returned as strings. possible values are:
242 242 - orphan,
243 243 - phase-divergent,
244 244 - content-divergent.
245 245 """
246 246 instabilities = []
247 247 if self.orphan():
248 248 instabilities.append(b'orphan')
249 249 if self.phasedivergent():
250 250 instabilities.append(b'phase-divergent')
251 251 if self.contentdivergent():
252 252 instabilities.append(b'content-divergent')
253 253 return instabilities
254 254
255 255 def parents(self):
256 256 """return contexts for each parent changeset"""
257 257 return self._parents
258 258
259 259 def p1(self):
260 260 return self._parents[0]
261 261
262 262 def p2(self):
263 263 parents = self._parents
264 264 if len(parents) == 2:
265 265 return parents[1]
266 266 return self._repo[nullrev]
267 267
268 268 def _fileinfo(self, path):
269 269 if '_manifest' in self.__dict__:
270 270 try:
271 271 return self._manifest.find(path)
272 272 except KeyError:
273 273 raise error.ManifestLookupError(
274 274 self._node or b'None', path, _(b'not found in manifest')
275 275 )
276 276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 277 if path in self._manifestdelta:
278 278 return (
279 279 self._manifestdelta[path],
280 280 self._manifestdelta.flags(path),
281 281 )
282 282 mfl = self._repo.manifestlog
283 283 try:
284 284 node, flag = mfl[self._changeset.manifest].find(path)
285 285 except KeyError:
286 286 raise error.ManifestLookupError(
287 287 self._node or b'None', path, _(b'not found in manifest')
288 288 )
289 289
290 290 return node, flag
291 291
292 292 def filenode(self, path):
293 293 return self._fileinfo(path)[0]
294 294
295 295 def flags(self, path):
296 296 try:
297 297 return self._fileinfo(path)[1]
298 298 except error.LookupError:
299 299 return b''
300 300
301 301 @propertycache
302 302 def _copies(self):
303 303 return metadata.computechangesetcopies(self)
304 304
305 305 def p1copies(self):
306 306 return self._copies[0]
307 307
308 308 def p2copies(self):
309 309 return self._copies[1]
310 310
311 311 def sub(self, path, allowcreate=True):
312 312 '''return a subrepo for the stored revision of path, never wdir()'''
313 313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314 314
315 315 def nullsub(self, path, pctx):
316 316 return subrepo.nullsubrepo(self, path, pctx)
317 317
318 318 def workingsub(self, path):
319 319 """return a subrepo for the stored revision, or wdir if this is a wdir
320 320 context.
321 321 """
322 322 return subrepo.subrepo(self, path, allowwdir=True)
323 323
324 324 def match(
325 325 self,
326 326 pats=None,
327 327 include=None,
328 328 exclude=None,
329 329 default=b'glob',
330 330 listsubrepos=False,
331 331 badfn=None,
332 332 cwd=None,
333 333 ):
334 334 r = self._repo
335 335 if not cwd:
336 336 cwd = r.getcwd()
337 337 return matchmod.match(
338 338 r.root,
339 339 cwd,
340 340 pats,
341 341 include,
342 342 exclude,
343 343 default,
344 344 auditor=r.nofsauditor,
345 345 ctx=self,
346 346 listsubrepos=listsubrepos,
347 347 badfn=badfn,
348 348 )
349 349
350 350 def diff(
351 351 self,
352 352 ctx2=None,
353 353 match=None,
354 354 changes=None,
355 355 opts=None,
356 356 losedatafn=None,
357 357 pathfn=None,
358 358 copy=None,
359 359 copysourcematch=None,
360 360 hunksfilterfn=None,
361 361 ):
362 362 """Returns a diff generator for the given contexts and matcher"""
363 363 if ctx2 is None:
364 364 ctx2 = self.p1()
365 365 if ctx2 is not None:
366 366 ctx2 = self._repo[ctx2]
367 367 return patch.diff(
368 368 self._repo,
369 369 ctx2,
370 370 self,
371 371 match=match,
372 372 changes=changes,
373 373 opts=opts,
374 374 losedatafn=losedatafn,
375 375 pathfn=pathfn,
376 376 copy=copy,
377 377 copysourcematch=copysourcematch,
378 378 hunksfilterfn=hunksfilterfn,
379 379 )
380 380
381 381 def dirs(self):
382 382 return self._manifest.dirs()
383 383
384 384 def hasdir(self, dir):
385 385 return self._manifest.hasdir(dir)
386 386
387 387 def status(
388 388 self,
389 389 other=None,
390 390 match=None,
391 391 listignored=False,
392 392 listclean=False,
393 393 listunknown=False,
394 394 listsubrepos=False,
395 395 ):
396 396 """return status of files between two nodes or node and working
397 397 directory.
398 398
399 399 If other is None, compare this node with working directory.
400 400
401 401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
402 402
403 403 Returns a mercurial.scmutils.status object.
404 404
405 405 Data can be accessed using either tuple notation:
406 406
407 407 (modified, added, removed, deleted, unknown, ignored, clean)
408 408
409 409 or direct attribute access:
410 410
411 411 s.modified, s.added, ...
412 412 """
413 413
414 414 ctx1 = self
415 415 ctx2 = self._repo[other]
416 416
417 417 # This next code block is, admittedly, fragile logic that tests for
418 418 # reversing the contexts and wouldn't need to exist if it weren't for
419 419 # the fast (and common) code path of comparing the working directory
420 420 # with its first parent.
421 421 #
422 422 # What we're aiming for here is the ability to call:
423 423 #
424 424 # workingctx.status(parentctx)
425 425 #
426 426 # If we always built the manifest for each context and compared those,
427 427 # then we'd be done. But the special case of the above call means we
428 428 # just copy the manifest of the parent.
429 429 reversed = False
430 430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
431 431 reversed = True
432 432 ctx1, ctx2 = ctx2, ctx1
433 433
434 434 match = self._repo.narrowmatch(match)
435 435 match = ctx2._matchstatus(ctx1, match)
436 436 r = scmutil.status([], [], [], [], [], [], [])
437 437 r = ctx2._buildstatus(
438 438 ctx1, r, match, listignored, listclean, listunknown
439 439 )
440 440
441 441 if reversed:
442 442 # Reverse added and removed. Clear deleted, unknown and ignored as
443 443 # these make no sense to reverse.
444 444 r = scmutil.status(
445 445 r.modified, r.removed, r.added, [], [], [], r.clean
446 446 )
447 447
448 448 if listsubrepos:
449 449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
450 450 try:
451 451 rev2 = ctx2.subrev(subpath)
452 452 except KeyError:
453 453 # A subrepo that existed in node1 was deleted between
454 454 # node1 and node2 (inclusive). Thus, ctx2's substate
455 455 # won't contain that subpath. The best we can do ignore it.
456 456 rev2 = None
457 457 submatch = matchmod.subdirmatcher(subpath, match)
458 458 s = sub.status(
459 459 rev2,
460 460 match=submatch,
461 461 ignored=listignored,
462 462 clean=listclean,
463 463 unknown=listunknown,
464 464 listsubrepos=True,
465 465 )
466 466 for k in (
467 467 'modified',
468 468 'added',
469 469 'removed',
470 470 'deleted',
471 471 'unknown',
472 472 'ignored',
473 473 'clean',
474 474 ):
475 475 rfiles, sfiles = getattr(r, k), getattr(s, k)
476 476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
477 477
478 478 r.modified.sort()
479 479 r.added.sort()
480 480 r.removed.sort()
481 481 r.deleted.sort()
482 482 r.unknown.sort()
483 483 r.ignored.sort()
484 484 r.clean.sort()
485 485
486 486 return r
487 487
488 488 def mergestate(self, clean=False):
489 489 """Get a mergestate object for this context."""
490 490 raise NotImplementedError(
491 491 '%s does not implement mergestate()' % self.__class__
492 492 )
493 493
494 494 def isempty(self):
495 495 return not (
496 496 len(self.parents()) > 1
497 497 or self.branch() != self.p1().branch()
498 498 or self.closesbranch()
499 499 or self.files()
500 500 )
501 501
502 502
503 503 class changectx(basectx):
504 504 """A changecontext object makes access to data related to a particular
505 505 changeset convenient. It represents a read-only context already present in
506 506 the repo."""
507 507
508 508 def __init__(self, repo, rev, node, maybe_filtered=True):
509 509 super(changectx, self).__init__(repo)
510 510 self._rev = rev
511 511 self._node = node
512 512 # When maybe_filtered is True, the revision might be affected by
513 513 # changelog filtering and operation through the filtered changelog must be used.
514 514 #
515 515 # When maybe_filtered is False, the revision has already been checked
516 516 # against filtering and is not filtered. Operation through the
517 517 # unfiltered changelog might be used in some case.
518 518 self._maybe_filtered = maybe_filtered
519 519
520 520 def __hash__(self):
521 521 try:
522 522 return hash(self._rev)
523 523 except AttributeError:
524 524 return id(self)
525 525
526 526 def __nonzero__(self):
527 527 return self._rev != nullrev
528 528
529 529 __bool__ = __nonzero__
530 530
531 531 @propertycache
532 532 def _changeset(self):
533 533 if self._maybe_filtered:
534 534 repo = self._repo
535 535 else:
536 536 repo = self._repo.unfiltered()
537 537 return repo.changelog.changelogrevision(self.rev())
538 538
539 539 @propertycache
540 540 def _manifest(self):
541 541 return self._manifestctx.read()
542 542
543 543 @property
544 544 def _manifestctx(self):
545 545 return self._repo.manifestlog[self._changeset.manifest]
546 546
547 547 @propertycache
548 548 def _manifestdelta(self):
549 549 return self._manifestctx.readdelta()
550 550
551 551 @propertycache
552 552 def _parents(self):
553 553 repo = self._repo
554 554 if self._maybe_filtered:
555 555 cl = repo.changelog
556 556 else:
557 557 cl = repo.unfiltered().changelog
558 558
559 559 p1, p2 = cl.parentrevs(self._rev)
560 560 if p2 == nullrev:
561 561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
562 562 return [
563 563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
564 564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
565 565 ]
566 566
567 567 def changeset(self):
568 568 c = self._changeset
569 569 return (
570 570 c.manifest,
571 571 c.user,
572 572 c.date,
573 573 c.files,
574 574 c.description,
575 575 c.extra,
576 576 )
577 577
578 578 def manifestnode(self):
579 579 return self._changeset.manifest
580 580
581 581 def user(self):
582 582 return self._changeset.user
583 583
584 584 def date(self):
585 585 return self._changeset.date
586 586
587 587 def files(self):
588 588 return self._changeset.files
589 589
590 590 def filesmodified(self):
591 591 modified = set(self.files())
592 592 modified.difference_update(self.filesadded())
593 593 modified.difference_update(self.filesremoved())
594 594 return sorted(modified)
595 595
596 596 def filesadded(self):
597 597 filesadded = self._changeset.filesadded
598 598 compute_on_none = True
599 599 if self._repo.filecopiesmode == b'changeset-sidedata':
600 600 compute_on_none = False
601 601 else:
602 602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 603 if source == b'changeset-only':
604 604 compute_on_none = False
605 605 elif source != b'compatibility':
606 606 # filelog mode, ignore any changelog content
607 607 filesadded = None
608 608 if filesadded is None:
609 609 if compute_on_none:
610 610 filesadded = metadata.computechangesetfilesadded(self)
611 611 else:
612 612 filesadded = []
613 613 return filesadded
614 614
615 615 def filesremoved(self):
616 616 filesremoved = self._changeset.filesremoved
617 617 compute_on_none = True
618 618 if self._repo.filecopiesmode == b'changeset-sidedata':
619 619 compute_on_none = False
620 620 else:
621 621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
622 622 if source == b'changeset-only':
623 623 compute_on_none = False
624 624 elif source != b'compatibility':
625 625 # filelog mode, ignore any changelog content
626 626 filesremoved = None
627 627 if filesremoved is None:
628 628 if compute_on_none:
629 629 filesremoved = metadata.computechangesetfilesremoved(self)
630 630 else:
631 631 filesremoved = []
632 632 return filesremoved
633 633
634 634 @propertycache
635 635 def _copies(self):
636 636 p1copies = self._changeset.p1copies
637 637 p2copies = self._changeset.p2copies
638 638 compute_on_none = True
639 639 if self._repo.filecopiesmode == b'changeset-sidedata':
640 640 compute_on_none = False
641 641 else:
642 642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
643 643 # If config says to get copy metadata only from changeset, then
644 644 # return that, defaulting to {} if there was no copy metadata. In
645 645 # compatibility mode, we return copy data from the changeset if it
646 646 # was recorded there, and otherwise we fall back to getting it from
647 647 # the filelogs (below).
648 648 #
649 649 # If we are in compatiblity mode and there is not data in the
650 650 # changeset), we get the copy metadata from the filelogs.
651 651 #
652 652 # otherwise, when config said to read only from filelog, we get the
653 653 # copy metadata from the filelogs.
654 654 if source == b'changeset-only':
655 655 compute_on_none = False
656 656 elif source != b'compatibility':
657 657 # filelog mode, ignore any changelog content
658 658 p1copies = p2copies = None
659 659 if p1copies is None:
660 660 if compute_on_none:
661 661 p1copies, p2copies = super(changectx, self)._copies
662 662 else:
663 663 if p1copies is None:
664 664 p1copies = {}
665 665 if p2copies is None:
666 666 p2copies = {}
667 667 return p1copies, p2copies
668 668
669 669 def description(self):
670 670 return self._changeset.description
671 671
672 672 def branch(self):
673 673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
674 674
675 675 def closesbranch(self):
676 676 return b'close' in self._changeset.extra
677 677
678 678 def extra(self):
679 679 """Return a dict of extra information."""
680 680 return self._changeset.extra
681 681
682 682 def tags(self):
683 683 """Return a list of byte tag names"""
684 684 return self._repo.nodetags(self._node)
685 685
686 686 def bookmarks(self):
687 687 """Return a list of byte bookmark names."""
688 688 return self._repo.nodebookmarks(self._node)
689 689
690 690 def phase(self):
691 691 return self._repo._phasecache.phase(self._repo, self._rev)
692 692
693 693 def hidden(self):
694 694 return self._rev in repoview.filterrevs(self._repo, b'visible')
695 695
696 696 def isinmemory(self):
697 697 return False
698 698
699 699 def children(self):
700 700 """return list of changectx contexts for each child changeset.
701 701
702 702 This returns only the immediate child changesets. Use descendants() to
703 703 recursively walk children.
704 704 """
705 705 c = self._repo.changelog.children(self._node)
706 706 return [self._repo[x] for x in c]
707 707
708 708 def ancestors(self):
709 709 for a in self._repo.changelog.ancestors([self._rev]):
710 710 yield self._repo[a]
711 711
712 712 def descendants(self):
713 713 """Recursively yield all children of the changeset.
714 714
715 715 For just the immediate children, use children()
716 716 """
717 717 for d in self._repo.changelog.descendants([self._rev]):
718 718 yield self._repo[d]
719 719
720 720 def filectx(self, path, fileid=None, filelog=None):
721 721 """get a file context from this changeset"""
722 722 if fileid is None:
723 723 fileid = self.filenode(path)
724 724 return filectx(
725 725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 726 )
727 727
728 728 def ancestor(self, c2, warn=False):
729 729 """return the "best" ancestor context of self and c2
730 730
731 731 If there are multiple candidates, it will show a message and check
732 732 merge.preferancestor configuration before falling back to the
733 733 revlog ancestor."""
734 734 # deal with workingctxs
735 735 n2 = c2._node
736 736 if n2 is None:
737 737 n2 = c2._parents[0]._node
738 738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 739 if not cahs:
740 740 anc = nullid
741 741 elif len(cahs) == 1:
742 742 anc = cahs[0]
743 743 else:
744 744 # experimental config: merge.preferancestor
745 745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 746 try:
747 747 ctx = scmutil.revsymbol(self._repo, r)
748 748 except error.RepoLookupError:
749 749 continue
750 750 anc = ctx.node()
751 751 if anc in cahs:
752 752 break
753 753 else:
754 754 anc = self._repo.changelog.ancestor(self._node, n2)
755 755 if warn:
756 756 self._repo.ui.status(
757 757 (
758 758 _(b"note: using %s as ancestor of %s and %s\n")
759 759 % (short(anc), short(self._node), short(n2))
760 760 )
761 761 + b''.join(
762 762 _(
763 763 b" alternatively, use --config "
764 764 b"merge.preferancestor=%s\n"
765 765 )
766 766 % short(n)
767 767 for n in sorted(cahs)
768 768 if n != anc
769 769 )
770 770 )
771 771 return self._repo[anc]
772 772
773 773 def isancestorof(self, other):
774 774 """True if this changeset is an ancestor of other"""
775 775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776 776
777 777 def walk(self, match):
778 778 '''Generates matching file names.'''
779 779
780 780 # Wrap match.bad method to have message with nodeid
781 781 def bad(fn, msg):
782 782 # The manifest doesn't know about subrepos, so don't complain about
783 783 # paths into valid subrepos.
784 784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 785 return
786 786 match.bad(fn, _(b'no such file in rev %s') % self)
787 787
788 788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 789 return self._manifest.walk(m)
790 790
791 791 def matches(self, match):
792 792 return self.walk(match)
793 793
794 794
795 795 class basefilectx(object):
796 796 """A filecontext object represents the common logic for its children:
797 797 filectx: read-only access to a filerevision that is already present
798 798 in the repo,
799 799 workingfilectx: a filecontext that represents files from the working
800 800 directory,
801 801 memfilectx: a filecontext that represents files in-memory,
802 802 """
803 803
804 804 @propertycache
805 805 def _filelog(self):
806 806 return self._repo.file(self._path)
807 807
808 808 @propertycache
809 809 def _changeid(self):
810 810 if '_changectx' in self.__dict__:
811 811 return self._changectx.rev()
812 812 elif '_descendantrev' in self.__dict__:
813 813 # this file context was created from a revision with a known
814 814 # descendant, we can (lazily) correct for linkrev aliases
815 815 return self._adjustlinkrev(self._descendantrev)
816 816 else:
817 817 return self._filelog.linkrev(self._filerev)
818 818
819 819 @propertycache
820 820 def _filenode(self):
821 821 if '_fileid' in self.__dict__:
822 822 return self._filelog.lookup(self._fileid)
823 823 else:
824 824 return self._changectx.filenode(self._path)
825 825
826 826 @propertycache
827 827 def _filerev(self):
828 828 return self._filelog.rev(self._filenode)
829 829
830 830 @propertycache
831 831 def _repopath(self):
832 832 return self._path
833 833
834 834 def __nonzero__(self):
835 835 try:
836 836 self._filenode
837 837 return True
838 838 except error.LookupError:
839 839 # file is missing
840 840 return False
841 841
842 842 __bool__ = __nonzero__
843 843
844 844 def __bytes__(self):
845 845 try:
846 846 return b"%s@%s" % (self.path(), self._changectx)
847 847 except error.LookupError:
848 848 return b"%s@???" % self.path()
849 849
850 850 __str__ = encoding.strmethod(__bytes__)
851 851
852 852 def __repr__(self):
853 853 return "<%s %s>" % (type(self).__name__, str(self))
854 854
855 855 def __hash__(self):
856 856 try:
857 857 return hash((self._path, self._filenode))
858 858 except AttributeError:
859 859 return id(self)
860 860
861 861 def __eq__(self, other):
862 862 try:
863 863 return (
864 864 type(self) == type(other)
865 865 and self._path == other._path
866 866 and self._filenode == other._filenode
867 867 )
868 868 except AttributeError:
869 869 return False
870 870
871 871 def __ne__(self, other):
872 872 return not (self == other)
873 873
874 874 def filerev(self):
875 875 return self._filerev
876 876
877 877 def filenode(self):
878 878 return self._filenode
879 879
880 880 @propertycache
881 881 def _flags(self):
882 882 return self._changectx.flags(self._path)
883 883
884 884 def flags(self):
885 885 return self._flags
886 886
887 887 def filelog(self):
888 888 return self._filelog
889 889
890 890 def rev(self):
891 891 return self._changeid
892 892
893 893 def linkrev(self):
894 894 return self._filelog.linkrev(self._filerev)
895 895
896 896 def node(self):
897 897 return self._changectx.node()
898 898
899 899 def hex(self):
900 900 return self._changectx.hex()
901 901
902 902 def user(self):
903 903 return self._changectx.user()
904 904
905 905 def date(self):
906 906 return self._changectx.date()
907 907
908 908 def files(self):
909 909 return self._changectx.files()
910 910
911 911 def description(self):
912 912 return self._changectx.description()
913 913
914 914 def branch(self):
915 915 return self._changectx.branch()
916 916
917 917 def extra(self):
918 918 return self._changectx.extra()
919 919
920 920 def phase(self):
921 921 return self._changectx.phase()
922 922
923 923 def phasestr(self):
924 924 return self._changectx.phasestr()
925 925
926 926 def obsolete(self):
927 927 return self._changectx.obsolete()
928 928
929 929 def instabilities(self):
930 930 return self._changectx.instabilities()
931 931
932 932 def manifest(self):
933 933 return self._changectx.manifest()
934 934
935 935 def changectx(self):
936 936 return self._changectx
937 937
938 938 def renamed(self):
939 939 return self._copied
940 940
941 941 def copysource(self):
942 942 return self._copied and self._copied[0]
943 943
944 944 def repo(self):
945 945 return self._repo
946 946
947 947 def size(self):
948 948 return len(self.data())
949 949
950 950 def path(self):
951 951 return self._path
952 952
953 953 def isbinary(self):
954 954 try:
955 955 return stringutil.binary(self.data())
956 956 except IOError:
957 957 return False
958 958
959 959 def isexec(self):
960 960 return b'x' in self.flags()
961 961
962 962 def islink(self):
963 963 return b'l' in self.flags()
964 964
965 965 def isabsent(self):
966 966 """whether this filectx represents a file not in self._changectx
967 967
968 968 This is mainly for merge code to detect change/delete conflicts. This is
969 969 expected to be True for all subclasses of basectx."""
970 970 return False
971 971
972 972 _customcmp = False
973 973
974 974 def cmp(self, fctx):
975 975 """compare with other file context
976 976
977 977 returns True if different than fctx.
978 978 """
979 979 if fctx._customcmp:
980 980 return fctx.cmp(self)
981 981
982 982 if self._filenode is None:
983 983 raise error.ProgrammingError(
984 984 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 985 )
986 986
987 987 if fctx._filenode is None:
988 988 if self._repo._encodefilterpats:
989 989 # can't rely on size() because wdir content may be decoded
990 990 return self._filelog.cmp(self._filenode, fctx.data())
991 991 if self.size() - 4 == fctx.size():
992 992 # size() can match:
993 993 # if file data starts with '\1\n', empty metadata block is
994 994 # prepended, which adds 4 bytes to filelog.size().
995 995 return self._filelog.cmp(self._filenode, fctx.data())
996 996 if self.size() == fctx.size() or self.flags() == b'l':
997 997 # size() matches: need to compare content
998 998 # issue6456: Always compare symlinks because size can represent
999 999 # encrypted string for EXT-4 encryption(fscrypt).
1000 1000 return self._filelog.cmp(self._filenode, fctx.data())
1001 1001
1002 1002 # size() differs
1003 1003 return True
1004 1004
1005 1005 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1006 1006 """return the first ancestor of <srcrev> introducing <fnode>
1007 1007
1008 1008 If the linkrev of the file revision does not point to an ancestor of
1009 1009 srcrev, we'll walk down the ancestors until we find one introducing
1010 1010 this file revision.
1011 1011
1012 1012 :srcrev: the changeset revision we search ancestors from
1013 1013 :inclusive: if true, the src revision will also be checked
1014 1014 :stoprev: an optional revision to stop the walk at. If no introduction
1015 1015 of this file content could be found before this floor
1016 1016 revision, the function will returns "None" and stops its
1017 1017 iteration.
1018 1018 """
1019 1019 repo = self._repo
1020 1020 cl = repo.unfiltered().changelog
1021 1021 mfl = repo.manifestlog
1022 1022 # fetch the linkrev
1023 1023 lkr = self.linkrev()
1024 1024 if srcrev == lkr:
1025 1025 return lkr
1026 1026 # hack to reuse ancestor computation when searching for renames
1027 1027 memberanc = getattr(self, '_ancestrycontext', None)
1028 1028 iteranc = None
1029 1029 if srcrev is None:
1030 1030 # wctx case, used by workingfilectx during mergecopy
1031 1031 revs = [p.rev() for p in self._repo[None].parents()]
1032 1032 inclusive = True # we skipped the real (revless) source
1033 1033 else:
1034 1034 revs = [srcrev]
1035 1035 if memberanc is None:
1036 1036 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1037 1037 # check if this linkrev is an ancestor of srcrev
1038 1038 if lkr not in memberanc:
1039 1039 if iteranc is None:
1040 1040 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1041 1041 fnode = self._filenode
1042 1042 path = self._path
1043 1043 for a in iteranc:
1044 1044 if stoprev is not None and a < stoprev:
1045 1045 return None
1046 1046 ac = cl.read(a) # get changeset data (we avoid object creation)
1047 1047 if path in ac[3]: # checking the 'files' field.
1048 1048 # The file has been touched, check if the content is
1049 1049 # similar to the one we search for.
1050 1050 if fnode == mfl[ac[0]].readfast().get(path):
1051 1051 return a
1052 1052 # In theory, we should never get out of that loop without a result.
1053 1053 # But if manifest uses a buggy file revision (not children of the
1054 1054 # one it replaces) we could. Such a buggy situation will likely
1055 1055 # result is crash somewhere else at to some point.
1056 1056 return lkr
1057 1057
1058 1058 def isintroducedafter(self, changelogrev):
1059 1059 """True if a filectx has been introduced after a given floor revision"""
1060 1060 if self.linkrev() >= changelogrev:
1061 1061 return True
1062 1062 introrev = self._introrev(stoprev=changelogrev)
1063 1063 if introrev is None:
1064 1064 return False
1065 1065 return introrev >= changelogrev
1066 1066
1067 1067 def introrev(self):
1068 1068 """return the rev of the changeset which introduced this file revision
1069 1069
1070 1070 This method is different from linkrev because it take into account the
1071 1071 changeset the filectx was created from. It ensures the returned
1072 1072 revision is one of its ancestors. This prevents bugs from
1073 1073 'linkrev-shadowing' when a file revision is used by multiple
1074 1074 changesets.
1075 1075 """
1076 1076 return self._introrev()
1077 1077
1078 1078 def _introrev(self, stoprev=None):
1079 1079 """
1080 1080 Same as `introrev` but, with an extra argument to limit changelog
1081 1081 iteration range in some internal usecase.
1082 1082
1083 1083 If `stoprev` is set, the `introrev` will not be searched past that
1084 1084 `stoprev` revision and "None" might be returned. This is useful to
1085 1085 limit the iteration range.
1086 1086 """
1087 1087 toprev = None
1088 1088 attrs = vars(self)
1089 1089 if '_changeid' in attrs:
1090 1090 # We have a cached value already
1091 1091 toprev = self._changeid
1092 1092 elif '_changectx' in attrs:
1093 1093 # We know which changelog entry we are coming from
1094 1094 toprev = self._changectx.rev()
1095 1095
1096 1096 if toprev is not None:
1097 1097 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1098 1098 elif '_descendantrev' in attrs:
1099 1099 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1100 1100 # be nice and cache the result of the computation
1101 1101 if introrev is not None:
1102 1102 self._changeid = introrev
1103 1103 return introrev
1104 1104 else:
1105 1105 return self.linkrev()
1106 1106
1107 1107 def introfilectx(self):
1108 1108 """Return filectx having identical contents, but pointing to the
1109 1109 changeset revision where this filectx was introduced"""
1110 1110 introrev = self.introrev()
1111 1111 if self.rev() == introrev:
1112 1112 return self
1113 1113 return self.filectx(self.filenode(), changeid=introrev)
1114 1114
1115 1115 def _parentfilectx(self, path, fileid, filelog):
1116 1116 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1117 1117 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1118 1118 if '_changeid' in vars(self) or '_changectx' in vars(self):
1119 1119 # If self is associated with a changeset (probably explicitly
1120 1120 # fed), ensure the created filectx is associated with a
1121 1121 # changeset that is an ancestor of self.changectx.
1122 1122 # This lets us later use _adjustlinkrev to get a correct link.
1123 1123 fctx._descendantrev = self.rev()
1124 1124 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1125 1125 elif '_descendantrev' in vars(self):
1126 1126 # Otherwise propagate _descendantrev if we have one associated.
1127 1127 fctx._descendantrev = self._descendantrev
1128 1128 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1129 1129 return fctx
1130 1130
1131 1131 def parents(self):
1132 1132 _path = self._path
1133 1133 fl = self._filelog
1134 1134 parents = self._filelog.parents(self._filenode)
1135 1135 pl = [(_path, node, fl) for node in parents if node != nullid]
1136 1136
1137 1137 r = fl.renamed(self._filenode)
1138 1138 if r:
1139 1139 # - In the simple rename case, both parent are nullid, pl is empty.
1140 1140 # - In case of merge, only one of the parent is null id and should
1141 1141 # be replaced with the rename information. This parent is -always-
1142 1142 # the first one.
1143 1143 #
1144 1144 # As null id have always been filtered out in the previous list
1145 1145 # comprehension, inserting to 0 will always result in "replacing
1146 1146 # first nullid parent with rename information.
1147 1147 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1148 1148
1149 1149 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1150 1150
1151 1151 def p1(self):
1152 1152 return self.parents()[0]
1153 1153
1154 1154 def p2(self):
1155 1155 p = self.parents()
1156 1156 if len(p) == 2:
1157 1157 return p[1]
1158 1158 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1159 1159
1160 1160 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1161 1161 """Returns a list of annotateline objects for each line in the file
1162 1162
1163 1163 - line.fctx is the filectx of the node where that line was last changed
1164 1164 - line.lineno is the line number at the first appearance in the managed
1165 1165 file
1166 1166 - line.text is the data on that line (including newline character)
1167 1167 """
1168 1168 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1169 1169
1170 1170 def parents(f):
1171 1171 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1172 1172 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1173 1173 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1174 1174 # isn't an ancestor of the srcrev.
1175 1175 f._changeid
1176 1176 pl = f.parents()
1177 1177
1178 1178 # Don't return renamed parents if we aren't following.
1179 1179 if not follow:
1180 1180 pl = [p for p in pl if p.path() == f.path()]
1181 1181
1182 1182 # renamed filectx won't have a filelog yet, so set it
1183 1183 # from the cache to save time
1184 1184 for p in pl:
1185 1185 if not '_filelog' in p.__dict__:
1186 1186 p._filelog = getlog(p.path())
1187 1187
1188 1188 return pl
1189 1189
1190 1190 # use linkrev to find the first changeset where self appeared
1191 1191 base = self.introfilectx()
1192 1192 if getattr(base, '_ancestrycontext', None) is None:
1193 1193 # it is safe to use an unfiltered repository here because we are
1194 1194 # walking ancestors only.
1195 1195 cl = self._repo.unfiltered().changelog
1196 1196 if base.rev() is None:
1197 1197 # wctx is not inclusive, but works because _ancestrycontext
1198 1198 # is used to test filelog revisions
1199 1199 ac = cl.ancestors(
1200 1200 [p.rev() for p in base.parents()], inclusive=True
1201 1201 )
1202 1202 else:
1203 1203 ac = cl.ancestors([base.rev()], inclusive=True)
1204 1204 base._ancestrycontext = ac
1205 1205
1206 1206 return dagop.annotate(
1207 1207 base, parents, skiprevs=skiprevs, diffopts=diffopts
1208 1208 )
1209 1209
1210 1210 def ancestors(self, followfirst=False):
1211 1211 visit = {}
1212 1212 c = self
1213 1213 if followfirst:
1214 1214 cut = 1
1215 1215 else:
1216 1216 cut = None
1217 1217
1218 1218 while True:
1219 1219 for parent in c.parents()[:cut]:
1220 1220 visit[(parent.linkrev(), parent.filenode())] = parent
1221 1221 if not visit:
1222 1222 break
1223 1223 c = visit.pop(max(visit))
1224 1224 yield c
1225 1225
1226 1226 def decodeddata(self):
1227 1227 """Returns `data()` after running repository decoding filters.
1228 1228
1229 1229 This is often equivalent to how the data would be expressed on disk.
1230 1230 """
1231 1231 return self._repo.wwritedata(self.path(), self.data())
1232 1232
1233 1233
1234 1234 class filectx(basefilectx):
1235 1235 """A filecontext object makes access to data related to a particular
1236 1236 filerevision convenient."""
1237 1237
1238 1238 def __init__(
1239 1239 self,
1240 1240 repo,
1241 1241 path,
1242 1242 changeid=None,
1243 1243 fileid=None,
1244 1244 filelog=None,
1245 1245 changectx=None,
1246 1246 ):
1247 1247 """changeid must be a revision number, if specified.
1248 1248 fileid can be a file revision or node."""
1249 1249 self._repo = repo
1250 1250 self._path = path
1251 1251
1252 1252 assert (
1253 1253 changeid is not None or fileid is not None or changectx is not None
1254 1254 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1255 1255 changeid,
1256 1256 fileid,
1257 1257 changectx,
1258 1258 )
1259 1259
1260 1260 if filelog is not None:
1261 1261 self._filelog = filelog
1262 1262
1263 1263 if changeid is not None:
1264 1264 self._changeid = changeid
1265 1265 if changectx is not None:
1266 1266 self._changectx = changectx
1267 1267 if fileid is not None:
1268 1268 self._fileid = fileid
1269 1269
1270 1270 @propertycache
1271 1271 def _changectx(self):
1272 1272 try:
1273 1273 return self._repo[self._changeid]
1274 1274 except error.FilteredRepoLookupError:
1275 1275 # Linkrev may point to any revision in the repository. When the
1276 1276 # repository is filtered this may lead to `filectx` trying to build
1277 1277 # `changectx` for filtered revision. In such case we fallback to
1278 1278 # creating `changectx` on the unfiltered version of the reposition.
1279 1279 # This fallback should not be an issue because `changectx` from
1280 1280 # `filectx` are not used in complex operations that care about
1281 1281 # filtering.
1282 1282 #
1283 1283 # This fallback is a cheap and dirty fix that prevent several
1284 1284 # crashes. It does not ensure the behavior is correct. However the
1285 1285 # behavior was not correct before filtering either and "incorrect
1286 1286 # behavior" is seen as better as "crash"
1287 1287 #
1288 1288 # Linkrevs have several serious troubles with filtering that are
1289 1289 # complicated to solve. Proper handling of the issue here should be
1290 1290 # considered when solving linkrev issue are on the table.
1291 1291 return self._repo.unfiltered()[self._changeid]
1292 1292
1293 1293 def filectx(self, fileid, changeid=None):
1294 1294 """opens an arbitrary revision of the file without
1295 1295 opening a new filelog"""
1296 1296 return filectx(
1297 1297 self._repo,
1298 1298 self._path,
1299 1299 fileid=fileid,
1300 1300 filelog=self._filelog,
1301 1301 changeid=changeid,
1302 1302 )
1303 1303
1304 1304 def rawdata(self):
1305 1305 return self._filelog.rawdata(self._filenode)
1306 1306
1307 1307 def rawflags(self):
1308 1308 """low-level revlog flags"""
1309 1309 return self._filelog.flags(self._filerev)
1310 1310
1311 1311 def data(self):
1312 1312 try:
1313 1313 return self._filelog.read(self._filenode)
1314 1314 except error.CensoredNodeError:
1315 1315 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1316 1316 return b""
1317 1317 raise error.Abort(
1318 1318 _(b"censored node: %s") % short(self._filenode),
1319 1319 hint=_(b"set censor.policy to ignore errors"),
1320 1320 )
1321 1321
1322 1322 def size(self):
1323 1323 return self._filelog.size(self._filerev)
1324 1324
1325 1325 @propertycache
1326 1326 def _copied(self):
1327 1327 """check if file was actually renamed in this changeset revision
1328 1328
1329 1329 If rename logged in file revision, we report copy for changeset only
1330 1330 if file revisions linkrev points back to the changeset in question
1331 1331 or both changeset parents contain different file revisions.
1332 1332 """
1333 1333
1334 1334 renamed = self._filelog.renamed(self._filenode)
1335 1335 if not renamed:
1336 1336 return None
1337 1337
1338 1338 if self.rev() == self.linkrev():
1339 1339 return renamed
1340 1340
1341 1341 name = self.path()
1342 1342 fnode = self._filenode
1343 1343 for p in self._changectx.parents():
1344 1344 try:
1345 1345 if fnode == p.filenode(name):
1346 1346 return None
1347 1347 except error.LookupError:
1348 1348 pass
1349 1349 return renamed
1350 1350
1351 1351 def children(self):
1352 1352 # hard for renames
1353 1353 c = self._filelog.children(self._filenode)
1354 1354 return [
1355 1355 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1356 1356 for x in c
1357 1357 ]
1358 1358
1359 1359
1360 1360 class committablectx(basectx):
1361 1361 """A committablectx object provides common functionality for a context that
1362 1362 wants the ability to commit, e.g. workingctx or memctx."""
1363 1363
1364 1364 def __init__(
1365 1365 self,
1366 1366 repo,
1367 1367 text=b"",
1368 1368 user=None,
1369 1369 date=None,
1370 1370 extra=None,
1371 1371 changes=None,
1372 1372 branch=None,
1373 1373 ):
1374 1374 super(committablectx, self).__init__(repo)
1375 1375 self._rev = None
1376 1376 self._node = None
1377 1377 self._text = text
1378 1378 if date:
1379 1379 self._date = dateutil.parsedate(date)
1380 1380 if user:
1381 1381 self._user = user
1382 1382 if changes:
1383 1383 self._status = changes
1384 1384
1385 1385 self._extra = {}
1386 1386 if extra:
1387 1387 self._extra = extra.copy()
1388 1388 if branch is not None:
1389 1389 self._extra[b'branch'] = encoding.fromlocal(branch)
1390 1390 if not self._extra.get(b'branch'):
1391 1391 self._extra[b'branch'] = b'default'
1392 1392
1393 1393 def __bytes__(self):
1394 1394 return bytes(self._parents[0]) + b"+"
1395 1395
1396 1396 __str__ = encoding.strmethod(__bytes__)
1397 1397
1398 1398 def __nonzero__(self):
1399 1399 return True
1400 1400
1401 1401 __bool__ = __nonzero__
1402 1402
1403 1403 @propertycache
1404 1404 def _status(self):
1405 1405 return self._repo.status()
1406 1406
1407 1407 @propertycache
1408 1408 def _user(self):
1409 1409 return self._repo.ui.username()
1410 1410
1411 1411 @propertycache
1412 1412 def _date(self):
1413 1413 ui = self._repo.ui
1414 1414 date = ui.configdate(b'devel', b'default-date')
1415 1415 if date is None:
1416 1416 date = dateutil.makedate()
1417 1417 return date
1418 1418
1419 1419 def subrev(self, subpath):
1420 1420 return None
1421 1421
1422 1422 def manifestnode(self):
1423 1423 return None
1424 1424
1425 1425 def user(self):
1426 1426 return self._user or self._repo.ui.username()
1427 1427
1428 1428 def date(self):
1429 1429 return self._date
1430 1430
1431 1431 def description(self):
1432 1432 return self._text
1433 1433
1434 1434 def files(self):
1435 1435 return sorted(
1436 1436 self._status.modified + self._status.added + self._status.removed
1437 1437 )
1438 1438
1439 1439 def modified(self):
1440 1440 return self._status.modified
1441 1441
1442 1442 def added(self):
1443 1443 return self._status.added
1444 1444
1445 1445 def removed(self):
1446 1446 return self._status.removed
1447 1447
1448 1448 def deleted(self):
1449 1449 return self._status.deleted
1450 1450
1451 1451 filesmodified = modified
1452 1452 filesadded = added
1453 1453 filesremoved = removed
1454 1454
1455 1455 def branch(self):
1456 1456 return encoding.tolocal(self._extra[b'branch'])
1457 1457
1458 1458 def closesbranch(self):
1459 1459 return b'close' in self._extra
1460 1460
1461 1461 def extra(self):
1462 1462 return self._extra
1463 1463
1464 1464 def isinmemory(self):
1465 1465 return False
1466 1466
1467 1467 def tags(self):
1468 1468 return []
1469 1469
1470 1470 def bookmarks(self):
1471 1471 b = []
1472 1472 for p in self.parents():
1473 1473 b.extend(p.bookmarks())
1474 1474 return b
1475 1475
1476 1476 def phase(self):
1477 1477 phase = phases.newcommitphase(self._repo.ui)
1478 1478 for p in self.parents():
1479 1479 phase = max(phase, p.phase())
1480 1480 return phase
1481 1481
1482 1482 def hidden(self):
1483 1483 return False
1484 1484
1485 1485 def children(self):
1486 1486 return []
1487 1487
1488 1488 def flags(self, path):
1489 1489 if '_manifest' in self.__dict__:
1490 1490 try:
1491 1491 return self._manifest.flags(path)
1492 1492 except KeyError:
1493 1493 return b''
1494 1494
1495 1495 try:
1496 1496 return self._flagfunc(path)
1497 1497 except OSError:
1498 1498 return b''
1499 1499
1500 1500 def ancestor(self, c2):
1501 1501 """return the "best" ancestor context of self and c2"""
1502 1502 return self._parents[0].ancestor(c2) # punt on two parents for now
1503 1503
1504 1504 def ancestors(self):
1505 1505 for p in self._parents:
1506 1506 yield p
1507 1507 for a in self._repo.changelog.ancestors(
1508 1508 [p.rev() for p in self._parents]
1509 1509 ):
1510 1510 yield self._repo[a]
1511 1511
1512 1512 def markcommitted(self, node):
1513 1513 """Perform post-commit cleanup necessary after committing this ctx
1514 1514
1515 1515 Specifically, this updates backing stores this working context
1516 1516 wraps to reflect the fact that the changes reflected by this
1517 1517 workingctx have been committed. For example, it marks
1518 1518 modified and added files as normal in the dirstate.
1519 1519
1520 1520 """
1521 1521
1522 1522 def dirty(self, missing=False, merge=True, branch=True):
1523 1523 return False
1524 1524
1525 1525
1526 1526 class workingctx(committablectx):
1527 1527 """A workingctx object makes access to data related to
1528 1528 the current working directory convenient.
1529 1529 date - any valid date string or (unixtime, offset), or None.
1530 1530 user - username string, or None.
1531 1531 extra - a dictionary of extra values, or None.
1532 1532 changes - a list of file lists as returned by localrepo.status()
1533 1533 or None to use the repository status.
1534 1534 """
1535 1535
1536 1536 def __init__(
1537 1537 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1538 1538 ):
1539 1539 branch = None
1540 1540 if not extra or b'branch' not in extra:
1541 1541 try:
1542 1542 branch = repo.dirstate.branch()
1543 1543 except UnicodeDecodeError:
1544 1544 raise error.Abort(_(b'branch name not in UTF-8!'))
1545 1545 super(workingctx, self).__init__(
1546 1546 repo, text, user, date, extra, changes, branch=branch
1547 1547 )
1548 1548
1549 1549 def __iter__(self):
1550 1550 d = self._repo.dirstate
1551 1551 for f in d:
1552 1552 if d[f] != b'r':
1553 1553 yield f
1554 1554
1555 1555 def __contains__(self, key):
1556 1556 return self._repo.dirstate[key] not in b"?r"
1557 1557
1558 1558 def hex(self):
1559 1559 return wdirhex
1560 1560
1561 1561 @propertycache
1562 1562 def _parents(self):
1563 1563 p = self._repo.dirstate.parents()
1564 1564 if p[1] == nullid:
1565 1565 p = p[:-1]
1566 1566 # use unfiltered repo to delay/avoid loading obsmarkers
1567 1567 unfi = self._repo.unfiltered()
1568 1568 return [
1569 1569 changectx(
1570 1570 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1571 1571 )
1572 1572 for n in p
1573 1573 ]
1574 1574
1575 1575 def setparents(self, p1node, p2node=nullid):
1576 1576 dirstate = self._repo.dirstate
1577 1577 with dirstate.parentchange():
1578 1578 copies = dirstate.setparents(p1node, p2node)
1579 1579 pctx = self._repo[p1node]
1580 1580 if copies:
1581 1581 # Adjust copy records, the dirstate cannot do it, it
1582 1582 # requires access to parents manifests. Preserve them
1583 1583 # only for entries added to first parent.
1584 1584 for f in copies:
1585 1585 if f not in pctx and copies[f] in pctx:
1586 1586 dirstate.copy(copies[f], f)
1587 1587 if p2node == nullid:
1588 1588 for f, s in sorted(dirstate.copies().items()):
1589 1589 if f not in pctx and s not in pctx:
1590 1590 dirstate.copy(None, f)
1591 1591
1592 1592 def _fileinfo(self, path):
1593 1593 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1594 1594 self._manifest
1595 1595 return super(workingctx, self)._fileinfo(path)
1596 1596
1597 1597 def _buildflagfunc(self):
1598 1598 # Create a fallback function for getting file flags when the
1599 1599 # filesystem doesn't support them
1600 1600
1601 1601 copiesget = self._repo.dirstate.copies().get
1602 1602 parents = self.parents()
1603 1603 if len(parents) < 2:
1604 1604 # when we have one parent, it's easy: copy from parent
1605 1605 man = parents[0].manifest()
1606 1606
1607 1607 def func(f):
1608 1608 f = copiesget(f, f)
1609 1609 return man.flags(f)
1610 1610
1611 1611 else:
1612 1612 # merges are tricky: we try to reconstruct the unstored
1613 1613 # result from the merge (issue1802)
1614 1614 p1, p2 = parents
1615 1615 pa = p1.ancestor(p2)
1616 1616 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1617 1617
1618 1618 def func(f):
1619 1619 f = copiesget(f, f) # may be wrong for merges with copies
1620 1620 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1621 1621 if fl1 == fl2:
1622 1622 return fl1
1623 1623 if fl1 == fla:
1624 1624 return fl2
1625 1625 if fl2 == fla:
1626 1626 return fl1
1627 1627 return b'' # punt for conflicts
1628 1628
1629 1629 return func
1630 1630
1631 1631 @propertycache
1632 1632 def _flagfunc(self):
1633 1633 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1634 1634
1635 1635 def flags(self, path):
1636 1636 try:
1637 1637 return self._flagfunc(path)
1638 1638 except OSError:
1639 1639 return b''
1640 1640
1641 1641 def filectx(self, path, filelog=None):
1642 1642 """get a file context from the working directory"""
1643 1643 return workingfilectx(
1644 1644 self._repo, path, workingctx=self, filelog=filelog
1645 1645 )
1646 1646
1647 1647 def dirty(self, missing=False, merge=True, branch=True):
1648 1648 """check whether a working directory is modified"""
1649 1649 # check subrepos first
1650 1650 for s in sorted(self.substate):
1651 1651 if self.sub(s).dirty(missing=missing):
1652 1652 return True
1653 1653 # check current working dir
1654 1654 return (
1655 1655 (merge and self.p2())
1656 1656 or (branch and self.branch() != self.p1().branch())
1657 1657 or self.modified()
1658 1658 or self.added()
1659 1659 or self.removed()
1660 1660 or (missing and self.deleted())
1661 1661 )
1662 1662
1663 1663 def add(self, list, prefix=b""):
1664 1664 with self._repo.wlock():
1665 1665 ui, ds = self._repo.ui, self._repo.dirstate
1666 1666 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1667 1667 rejected = []
1668 1668 lstat = self._repo.wvfs.lstat
1669 1669 for f in list:
1670 1670 # ds.pathto() returns an absolute file when this is invoked from
1671 1671 # the keyword extension. That gets flagged as non-portable on
1672 1672 # Windows, since it contains the drive letter and colon.
1673 1673 scmutil.checkportable(ui, os.path.join(prefix, f))
1674 1674 try:
1675 1675 st = lstat(f)
1676 1676 except OSError:
1677 1677 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1678 1678 rejected.append(f)
1679 1679 continue
1680 1680 limit = ui.configbytes(b'ui', b'large-file-limit')
1681 1681 if limit != 0 and st.st_size > limit:
1682 1682 ui.warn(
1683 1683 _(
1684 1684 b"%s: up to %d MB of RAM may be required "
1685 1685 b"to manage this file\n"
1686 1686 b"(use 'hg revert %s' to cancel the "
1687 1687 b"pending addition)\n"
1688 1688 )
1689 1689 % (f, 3 * st.st_size // 1000000, uipath(f))
1690 1690 )
1691 1691 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1692 1692 ui.warn(
1693 1693 _(
1694 1694 b"%s not added: only files and symlinks "
1695 1695 b"supported currently\n"
1696 1696 )
1697 1697 % uipath(f)
1698 1698 )
1699 1699 rejected.append(f)
1700 1700 elif ds[f] in b'amn':
1701 1701 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1702 1702 elif ds[f] == b'r':
1703 1703 ds.normallookup(f)
1704 1704 else:
1705 1705 ds.add(f)
1706 1706 return rejected
1707 1707
1708 1708 def forget(self, files, prefix=b""):
1709 1709 with self._repo.wlock():
1710 1710 ds = self._repo.dirstate
1711 1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 1712 rejected = []
1713 1713 for f in files:
1714 1714 if f not in ds:
1715 1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 1716 rejected.append(f)
1717 1717 elif ds[f] != b'a':
1718 1718 ds.remove(f)
1719 1719 else:
1720 1720 ds.drop(f)
1721 1721 return rejected
1722 1722
1723 1723 def copy(self, source, dest):
1724 1724 try:
1725 1725 st = self._repo.wvfs.lstat(dest)
1726 1726 except OSError as err:
1727 1727 if err.errno != errno.ENOENT:
1728 1728 raise
1729 1729 self._repo.ui.warn(
1730 1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1731 1731 )
1732 1732 return
1733 1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1734 1734 self._repo.ui.warn(
1735 1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1736 1736 % self._repo.dirstate.pathto(dest)
1737 1737 )
1738 1738 else:
1739 1739 with self._repo.wlock():
1740 1740 ds = self._repo.dirstate
1741 1741 if ds[dest] in b'?':
1742 1742 ds.add(dest)
1743 1743 elif ds[dest] in b'r':
1744 1744 ds.normallookup(dest)
1745 1745 ds.copy(source, dest)
1746 1746
1747 1747 def match(
1748 1748 self,
1749 1749 pats=None,
1750 1750 include=None,
1751 1751 exclude=None,
1752 1752 default=b'glob',
1753 1753 listsubrepos=False,
1754 1754 badfn=None,
1755 1755 cwd=None,
1756 1756 ):
1757 1757 r = self._repo
1758 1758 if not cwd:
1759 1759 cwd = r.getcwd()
1760 1760
1761 1761 # Only a case insensitive filesystem needs magic to translate user input
1762 1762 # to actual case in the filesystem.
1763 1763 icasefs = not util.fscasesensitive(r.root)
1764 1764 return matchmod.match(
1765 1765 r.root,
1766 1766 cwd,
1767 1767 pats,
1768 1768 include,
1769 1769 exclude,
1770 1770 default,
1771 1771 auditor=r.auditor,
1772 1772 ctx=self,
1773 1773 listsubrepos=listsubrepos,
1774 1774 badfn=badfn,
1775 1775 icasefs=icasefs,
1776 1776 )
1777 1777
1778 1778 def _filtersuspectsymlink(self, files):
1779 1779 if not files or self._repo.dirstate._checklink:
1780 1780 return files
1781 1781
1782 1782 # Symlink placeholders may get non-symlink-like contents
1783 1783 # via user error or dereferencing by NFS or Samba servers,
1784 1784 # so we filter out any placeholders that don't look like a
1785 1785 # symlink
1786 1786 sane = []
1787 1787 for f in files:
1788 1788 if self.flags(f) == b'l':
1789 1789 d = self[f].data()
1790 1790 if (
1791 1791 d == b''
1792 1792 or len(d) >= 1024
1793 1793 or b'\n' in d
1794 1794 or stringutil.binary(d)
1795 1795 ):
1796 1796 self._repo.ui.debug(
1797 1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1798 1798 )
1799 1799 continue
1800 1800 sane.append(f)
1801 1801 return sane
1802 1802
1803 1803 def _checklookup(self, files):
1804 1804 # check for any possibly clean files
1805 1805 if not files:
1806 1806 return [], [], []
1807 1807
1808 1808 modified = []
1809 1809 deleted = []
1810 1810 fixup = []
1811 1811 pctx = self._parents[0]
1812 1812 # do a full compare of any files that might have changed
1813 1813 for f in sorted(files):
1814 1814 try:
1815 1815 # This will return True for a file that got replaced by a
1816 1816 # directory in the interim, but fixing that is pretty hard.
1817 1817 if (
1818 1818 f not in pctx
1819 1819 or self.flags(f) != pctx.flags(f)
1820 1820 or pctx[f].cmp(self[f])
1821 1821 ):
1822 1822 modified.append(f)
1823 1823 else:
1824 1824 fixup.append(f)
1825 1825 except (IOError, OSError):
1826 1826 # A file become inaccessible in between? Mark it as deleted,
1827 1827 # matching dirstate behavior (issue5584).
1828 1828 # The dirstate has more complex behavior around whether a
1829 1829 # missing file matches a directory, etc, but we don't need to
1830 1830 # bother with that: if f has made it to this point, we're sure
1831 1831 # it's in the dirstate.
1832 1832 deleted.append(f)
1833 1833
1834 1834 return modified, deleted, fixup
1835 1835
1836 1836 def _poststatusfixup(self, status, fixup):
1837 1837 """update dirstate for files that are actually clean"""
1838 1838 poststatus = self._repo.postdsstatus()
1839 1839 if fixup or poststatus:
1840 1840 try:
1841 1841 oldid = self._repo.dirstate.identity()
1842 1842
1843 1843 # updating the dirstate is optional
1844 1844 # so we don't wait on the lock
1845 1845 # wlock can invalidate the dirstate, so cache normal _after_
1846 1846 # taking the lock
1847 1847 with self._repo.wlock(False):
1848 1848 if self._repo.dirstate.identity() == oldid:
1849 1849 if fixup:
1850 1850 normal = self._repo.dirstate.normal
1851 1851 for f in fixup:
1852 1852 normal(f)
1853 1853 # write changes out explicitly, because nesting
1854 1854 # wlock at runtime may prevent 'wlock.release()'
1855 1855 # after this block from doing so for subsequent
1856 1856 # changing files
1857 1857 tr = self._repo.currenttransaction()
1858 1858 self._repo.dirstate.write(tr)
1859 1859
1860 1860 if poststatus:
1861 1861 for ps in poststatus:
1862 1862 ps(self, status)
1863 1863 else:
1864 1864 # in this case, writing changes out breaks
1865 1865 # consistency, because .hg/dirstate was
1866 1866 # already changed simultaneously after last
1867 1867 # caching (see also issue5584 for detail)
1868 1868 self._repo.ui.debug(
1869 1869 b'skip updating dirstate: identity mismatch\n'
1870 1870 )
1871 1871 except error.LockError:
1872 1872 pass
1873 1873 finally:
1874 1874 # Even if the wlock couldn't be grabbed, clear out the list.
1875 1875 self._repo.clearpostdsstatus()
1876 1876
1877 1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1878 1878 '''Gets the status from the dirstate -- internal use only.'''
1879 1879 subrepos = []
1880 1880 if b'.hgsub' in self:
1881 1881 subrepos = sorted(self.substate)
1882 1882 cmp, s = self._repo.dirstate.status(
1883 1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1884 1884 )
1885 1885
1886 1886 # check for any possibly clean files
1887 1887 fixup = []
1888 1888 if cmp:
1889 1889 modified2, deleted2, fixup = self._checklookup(cmp)
1890 1890 s.modified.extend(modified2)
1891 1891 s.deleted.extend(deleted2)
1892 1892
1893 1893 if fixup and clean:
1894 1894 s.clean.extend(fixup)
1895 1895
1896 1896 self._poststatusfixup(s, fixup)
1897 1897
1898 1898 if match.always():
1899 1899 # cache for performance
1900 1900 if s.unknown or s.ignored or s.clean:
1901 1901 # "_status" is cached with list*=False in the normal route
1902 1902 self._status = scmutil.status(
1903 1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1904 1904 )
1905 1905 else:
1906 1906 self._status = s
1907 1907
1908 1908 return s
1909 1909
1910 1910 @propertycache
1911 1911 def _copies(self):
1912 1912 p1copies = {}
1913 1913 p2copies = {}
1914 1914 parents = self._repo.dirstate.parents()
1915 1915 p1manifest = self._repo[parents[0]].manifest()
1916 1916 p2manifest = self._repo[parents[1]].manifest()
1917 1917 changedset = set(self.added()) | set(self.modified())
1918 1918 narrowmatch = self._repo.narrowmatch()
1919 1919 for dst, src in self._repo.dirstate.copies().items():
1920 1920 if dst not in changedset or not narrowmatch(dst):
1921 1921 continue
1922 1922 if src in p1manifest:
1923 1923 p1copies[dst] = src
1924 1924 elif src in p2manifest:
1925 1925 p2copies[dst] = src
1926 1926 return p1copies, p2copies
1927 1927
1928 1928 @propertycache
1929 1929 def _manifest(self):
1930 1930 """generate a manifest corresponding to the values in self._status
1931 1931
1932 1932 This reuse the file nodeid from parent, but we use special node
1933 1933 identifiers for added and modified files. This is used by manifests
1934 1934 merge to see that files are different and by update logic to avoid
1935 1935 deleting newly added files.
1936 1936 """
1937 1937 return self._buildstatusmanifest(self._status)
1938 1938
1939 1939 def _buildstatusmanifest(self, status):
1940 1940 """Builds a manifest that includes the given status results."""
1941 1941 parents = self.parents()
1942 1942
1943 1943 man = parents[0].manifest().copy()
1944 1944
1945 1945 ff = self._flagfunc
1946 1946 for i, l in (
1947 1947 (addednodeid, status.added),
1948 1948 (modifiednodeid, status.modified),
1949 1949 ):
1950 1950 for f in l:
1951 1951 man[f] = i
1952 1952 try:
1953 1953 man.setflag(f, ff(f))
1954 1954 except OSError:
1955 1955 pass
1956 1956
1957 1957 for f in status.deleted + status.removed:
1958 1958 if f in man:
1959 1959 del man[f]
1960 1960
1961 1961 return man
1962 1962
1963 1963 def _buildstatus(
1964 1964 self, other, s, match, listignored, listclean, listunknown
1965 1965 ):
1966 1966 """build a status with respect to another context
1967 1967
1968 1968 This includes logic for maintaining the fast path of status when
1969 1969 comparing the working directory against its parent, which is to skip
1970 1970 building a new manifest if self (working directory) is not comparing
1971 1971 against its parent (repo['.']).
1972 1972 """
1973 1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1974 1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1975 1975 # might have accidentally ended up with the entire contents of the file
1976 1976 # they are supposed to be linking to.
1977 1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1978 1978 if other != self._repo[b'.']:
1979 1979 s = super(workingctx, self)._buildstatus(
1980 1980 other, s, match, listignored, listclean, listunknown
1981 1981 )
1982 1982 return s
1983 1983
1984 1984 def _matchstatus(self, other, match):
1985 1985 """override the match method with a filter for directory patterns
1986 1986
1987 1987 We use inheritance to customize the match.bad method only in cases of
1988 1988 workingctx since it belongs only to the working directory when
1989 1989 comparing against the parent changeset.
1990 1990
1991 1991 If we aren't comparing against the working directory's parent, then we
1992 1992 just use the default match object sent to us.
1993 1993 """
1994 1994 if other != self._repo[b'.']:
1995 1995
1996 1996 def bad(f, msg):
1997 1997 # 'f' may be a directory pattern from 'match.files()',
1998 1998 # so 'f not in ctx1' is not enough
1999 1999 if f not in other and not other.hasdir(f):
2000 2000 self._repo.ui.warn(
2001 2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2002 2002 )
2003 2003
2004 2004 match.bad = bad
2005 2005 return match
2006 2006
2007 2007 def walk(self, match):
2008 2008 '''Generates matching file names.'''
2009 2009 return sorted(
2010 2010 self._repo.dirstate.walk(
2011 2011 self._repo.narrowmatch(match),
2012 2012 subrepos=sorted(self.substate),
2013 2013 unknown=True,
2014 2014 ignored=False,
2015 2015 )
2016 2016 )
2017 2017
2018 2018 def matches(self, match):
2019 2019 match = self._repo.narrowmatch(match)
2020 2020 ds = self._repo.dirstate
2021 2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2022 2022
2023 2023 def markcommitted(self, node):
2024 2024 with self._repo.dirstate.parentchange():
2025 2025 for f in self.modified() + self.added():
2026 2026 self._repo.dirstate.normal(f)
2027 2027 for f in self.removed():
2028 2028 self._repo.dirstate.drop(f)
2029 2029 self._repo.dirstate.setparents(node)
2030 2030 self._repo._quick_access_changeid_invalidate()
2031 2031
2032 2032 # write changes out explicitly, because nesting wlock at
2033 2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2034 2034 # from immediately doing so for subsequent changing files
2035 2035 self._repo.dirstate.write(self._repo.currenttransaction())
2036 2036
2037 2037 sparse.aftercommit(self._repo, node)
2038 2038
2039 2039 def mergestate(self, clean=False):
2040 2040 if clean:
2041 2041 return mergestatemod.mergestate.clean(self._repo)
2042 2042 return mergestatemod.mergestate.read(self._repo)
2043 2043
2044 2044
2045 2045 class committablefilectx(basefilectx):
2046 2046 """A committablefilectx provides common functionality for a file context
2047 2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2048 2048
2049 2049 def __init__(self, repo, path, filelog=None, ctx=None):
2050 2050 self._repo = repo
2051 2051 self._path = path
2052 2052 self._changeid = None
2053 2053 self._filerev = self._filenode = None
2054 2054
2055 2055 if filelog is not None:
2056 2056 self._filelog = filelog
2057 2057 if ctx:
2058 2058 self._changectx = ctx
2059 2059
2060 2060 def __nonzero__(self):
2061 2061 return True
2062 2062
2063 2063 __bool__ = __nonzero__
2064 2064
2065 2065 def linkrev(self):
2066 2066 # linked to self._changectx no matter if file is modified or not
2067 2067 return self.rev()
2068 2068
2069 2069 def renamed(self):
2070 2070 path = self.copysource()
2071 2071 if not path:
2072 2072 return None
2073 2073 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2074 2074
2075 2075 def parents(self):
2076 2076 '''return parent filectxs, following copies if necessary'''
2077 2077
2078 2078 def filenode(ctx, path):
2079 2079 return ctx._manifest.get(path, nullid)
2080 2080
2081 2081 path = self._path
2082 2082 fl = self._filelog
2083 2083 pcl = self._changectx._parents
2084 2084 renamed = self.renamed()
2085 2085
2086 2086 if renamed:
2087 2087 pl = [renamed + (None,)]
2088 2088 else:
2089 2089 pl = [(path, filenode(pcl[0], path), fl)]
2090 2090
2091 2091 for pc in pcl[1:]:
2092 2092 pl.append((path, filenode(pc, path), fl))
2093 2093
2094 2094 return [
2095 2095 self._parentfilectx(p, fileid=n, filelog=l)
2096 2096 for p, n, l in pl
2097 2097 if n != nullid
2098 2098 ]
2099 2099
2100 2100 def children(self):
2101 2101 return []
2102 2102
2103 2103
2104 2104 class workingfilectx(committablefilectx):
2105 2105 """A workingfilectx object makes access to data related to a particular
2106 2106 file in the working directory convenient."""
2107 2107
2108 2108 def __init__(self, repo, path, filelog=None, workingctx=None):
2109 2109 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2110 2110
2111 2111 @propertycache
2112 2112 def _changectx(self):
2113 2113 return workingctx(self._repo)
2114 2114
2115 2115 def data(self):
2116 2116 return self._repo.wread(self._path)
2117 2117
2118 2118 def copysource(self):
2119 2119 return self._repo.dirstate.copied(self._path)
2120 2120
2121 2121 def size(self):
2122 2122 return self._repo.wvfs.lstat(self._path).st_size
2123 2123
2124 2124 def lstat(self):
2125 2125 return self._repo.wvfs.lstat(self._path)
2126 2126
2127 2127 def date(self):
2128 2128 t, tz = self._changectx.date()
2129 2129 try:
2130 2130 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2131 2131 except OSError as err:
2132 2132 if err.errno != errno.ENOENT:
2133 2133 raise
2134 2134 return (t, tz)
2135 2135
2136 2136 def exists(self):
2137 2137 return self._repo.wvfs.exists(self._path)
2138 2138
2139 2139 def lexists(self):
2140 2140 return self._repo.wvfs.lexists(self._path)
2141 2141
2142 2142 def audit(self):
2143 2143 return self._repo.wvfs.audit(self._path)
2144 2144
2145 2145 def cmp(self, fctx):
2146 2146 """compare with other file context
2147 2147
2148 2148 returns True if different than fctx.
2149 2149 """
2150 2150 # fctx should be a filectx (not a workingfilectx)
2151 2151 # invert comparison to reuse the same code path
2152 2152 return fctx.cmp(self)
2153 2153
2154 2154 def remove(self, ignoremissing=False):
2155 2155 """wraps unlink for a repo's working directory"""
2156 2156 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2157 2157 self._repo.wvfs.unlinkpath(
2158 2158 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2159 2159 )
2160 2160
2161 2161 def write(self, data, flags, backgroundclose=False, **kwargs):
2162 2162 """wraps repo.wwrite"""
2163 2163 return self._repo.wwrite(
2164 2164 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2165 2165 )
2166 2166
2167 2167 def markcopied(self, src):
2168 2168 """marks this file a copy of `src`"""
2169 2169 self._repo.dirstate.copy(src, self._path)
2170 2170
2171 2171 def clearunknown(self):
2172 2172 """Removes conflicting items in the working directory so that
2173 2173 ``write()`` can be called successfully.
2174 2174 """
2175 2175 wvfs = self._repo.wvfs
2176 2176 f = self._path
2177 2177 wvfs.audit(f)
2178 2178 if self._repo.ui.configbool(
2179 2179 b'experimental', b'merge.checkpathconflicts'
2180 2180 ):
2181 2181 # remove files under the directory as they should already be
2182 2182 # warned and backed up
2183 2183 if wvfs.isdir(f) and not wvfs.islink(f):
2184 2184 wvfs.rmtree(f, forcibly=True)
2185 2185 for p in reversed(list(pathutil.finddirs(f))):
2186 2186 if wvfs.isfileorlink(p):
2187 2187 wvfs.unlink(p)
2188 2188 break
2189 2189 else:
2190 2190 # don't remove files if path conflicts are not processed
2191 2191 if wvfs.isdir(f) and not wvfs.islink(f):
2192 2192 wvfs.removedirs(f)
2193 2193
2194 2194 def setflags(self, l, x):
2195 2195 self._repo.wvfs.setflags(self._path, l, x)
2196 2196
2197 2197
2198 2198 class overlayworkingctx(committablectx):
2199 2199 """Wraps another mutable context with a write-back cache that can be
2200 2200 converted into a commit context.
2201 2201
2202 2202 self._cache[path] maps to a dict with keys: {
2203 2203 'exists': bool?
2204 2204 'date': date?
2205 2205 'data': str?
2206 2206 'flags': str?
2207 2207 'copied': str? (path or None)
2208 2208 }
2209 2209 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2210 2210 is `False`, the file was deleted.
2211 2211 """
2212 2212
2213 2213 def __init__(self, repo):
2214 2214 super(overlayworkingctx, self).__init__(repo)
2215 2215 self.clean()
2216 2216
2217 2217 def setbase(self, wrappedctx):
2218 2218 self._wrappedctx = wrappedctx
2219 2219 self._parents = [wrappedctx]
2220 2220 # Drop old manifest cache as it is now out of date.
2221 2221 # This is necessary when, e.g., rebasing several nodes with one
2222 2222 # ``overlayworkingctx`` (e.g. with --collapse).
2223 2223 util.clearcachedproperty(self, b'_manifest')
2224 2224
2225 2225 def setparents(self, p1node, p2node=nullid):
2226 2226 assert p1node == self._wrappedctx.node()
2227 2227 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2228 2228
2229 2229 def data(self, path):
2230 2230 if self.isdirty(path):
2231 2231 if self._cache[path][b'exists']:
2232 2232 if self._cache[path][b'data'] is not None:
2233 2233 return self._cache[path][b'data']
2234 2234 else:
2235 2235 # Must fallback here, too, because we only set flags.
2236 2236 return self._wrappedctx[path].data()
2237 2237 else:
2238 2238 raise error.ProgrammingError(
2239 2239 b"No such file or directory: %s" % path
2240 2240 )
2241 2241 else:
2242 2242 return self._wrappedctx[path].data()
2243 2243
2244 2244 @propertycache
2245 2245 def _manifest(self):
2246 2246 parents = self.parents()
2247 2247 man = parents[0].manifest().copy()
2248 2248
2249 2249 flag = self._flagfunc
2250 2250 for path in self.added():
2251 2251 man[path] = addednodeid
2252 2252 man.setflag(path, flag(path))
2253 2253 for path in self.modified():
2254 2254 man[path] = modifiednodeid
2255 2255 man.setflag(path, flag(path))
2256 2256 for path in self.removed():
2257 2257 del man[path]
2258 2258 return man
2259 2259
2260 2260 @propertycache
2261 2261 def _flagfunc(self):
2262 2262 def f(path):
2263 2263 return self._cache[path][b'flags']
2264 2264
2265 2265 return f
2266 2266
2267 2267 def files(self):
2268 2268 return sorted(self.added() + self.modified() + self.removed())
2269 2269
2270 2270 def modified(self):
2271 2271 return [
2272 2272 f
2273 2273 for f in self._cache.keys()
2274 2274 if self._cache[f][b'exists'] and self._existsinparent(f)
2275 2275 ]
2276 2276
2277 2277 def added(self):
2278 2278 return [
2279 2279 f
2280 2280 for f in self._cache.keys()
2281 2281 if self._cache[f][b'exists'] and not self._existsinparent(f)
2282 2282 ]
2283 2283
2284 2284 def removed(self):
2285 2285 return [
2286 2286 f
2287 2287 for f in self._cache.keys()
2288 2288 if not self._cache[f][b'exists'] and self._existsinparent(f)
2289 2289 ]
2290 2290
2291 2291 def p1copies(self):
2292 2292 copies = {}
2293 2293 narrowmatch = self._repo.narrowmatch()
2294 2294 for f in self._cache.keys():
2295 2295 if not narrowmatch(f):
2296 2296 continue
2297 2297 copies.pop(f, None) # delete if it exists
2298 2298 source = self._cache[f][b'copied']
2299 2299 if source:
2300 2300 copies[f] = source
2301 2301 return copies
2302 2302
2303 2303 def p2copies(self):
2304 2304 copies = {}
2305 2305 narrowmatch = self._repo.narrowmatch()
2306 2306 for f in self._cache.keys():
2307 2307 if not narrowmatch(f):
2308 2308 continue
2309 2309 copies.pop(f, None) # delete if it exists
2310 2310 source = self._cache[f][b'copied']
2311 2311 if source:
2312 2312 copies[f] = source
2313 2313 return copies
2314 2314
2315 2315 def isinmemory(self):
2316 2316 return True
2317 2317
2318 2318 def filedate(self, path):
2319 2319 if self.isdirty(path):
2320 2320 return self._cache[path][b'date']
2321 2321 else:
2322 2322 return self._wrappedctx[path].date()
2323 2323
2324 2324 def markcopied(self, path, origin):
2325 2325 self._markdirty(
2326 2326 path,
2327 2327 exists=True,
2328 2328 date=self.filedate(path),
2329 2329 flags=self.flags(path),
2330 2330 copied=origin,
2331 2331 )
2332 2332
2333 2333 def copydata(self, path):
2334 2334 if self.isdirty(path):
2335 2335 return self._cache[path][b'copied']
2336 2336 else:
2337 2337 return None
2338 2338
2339 2339 def flags(self, path):
2340 2340 if self.isdirty(path):
2341 2341 if self._cache[path][b'exists']:
2342 2342 return self._cache[path][b'flags']
2343 2343 else:
2344 2344 raise error.ProgrammingError(
2345 2345 b"No such file or directory: %s" % path
2346 2346 )
2347 2347 else:
2348 2348 return self._wrappedctx[path].flags()
2349 2349
2350 2350 def __contains__(self, key):
2351 2351 if key in self._cache:
2352 2352 return self._cache[key][b'exists']
2353 2353 return key in self.p1()
2354 2354
2355 2355 def _existsinparent(self, path):
2356 2356 try:
2357 2357 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2358 2358 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2359 2359 # with an ``exists()`` function.
2360 2360 self._wrappedctx[path]
2361 2361 return True
2362 2362 except error.ManifestLookupError:
2363 2363 return False
2364 2364
2365 2365 def _auditconflicts(self, path):
2366 2366 """Replicates conflict checks done by wvfs.write().
2367 2367
2368 2368 Since we never write to the filesystem and never call `applyupdates` in
2369 2369 IMM, we'll never check that a path is actually writable -- e.g., because
2370 2370 it adds `a/foo`, but `a` is actually a file in the other commit.
2371 2371 """
2372 2372
2373 2373 def fail(path, component):
2374 2374 # p1() is the base and we're receiving "writes" for p2()'s
2375 2375 # files.
2376 2376 if b'l' in self.p1()[component].flags():
2377 2377 raise error.Abort(
2378 2378 b"error: %s conflicts with symlink %s "
2379 2379 b"in %d." % (path, component, self.p1().rev())
2380 2380 )
2381 2381 else:
2382 2382 raise error.Abort(
2383 2383 b"error: '%s' conflicts with file '%s' in "
2384 2384 b"%d." % (path, component, self.p1().rev())
2385 2385 )
2386 2386
2387 2387 # Test that each new directory to be created to write this path from p2
2388 2388 # is not a file in p1.
2389 2389 components = path.split(b'/')
2390 2390 for i in pycompat.xrange(len(components)):
2391 2391 component = b"/".join(components[0:i])
2392 2392 if component in self:
2393 2393 fail(path, component)
2394 2394
2395 2395 # Test the other direction -- that this path from p2 isn't a directory
2396 2396 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2397 2397 match = self.match([path], default=b'path')
2398 2398 mfiles = list(self.p1().manifest().walk(match))
2399 2399 if len(mfiles) > 0:
2400 2400 if len(mfiles) == 1 and mfiles[0] == path:
2401 2401 return
2402 2402 # omit the files which are deleted in current IMM wctx
2403 2403 mfiles = [m for m in mfiles if m in self]
2404 2404 if not mfiles:
2405 2405 return
2406 2406 raise error.Abort(
2407 2407 b"error: file '%s' cannot be written because "
2408 2408 b" '%s/' is a directory in %s (containing %d "
2409 2409 b"entries: %s)"
2410 2410 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2411 2411 )
2412 2412
2413 2413 def write(self, path, data, flags=b'', **kwargs):
2414 2414 if data is None:
2415 2415 raise error.ProgrammingError(b"data must be non-None")
2416 2416 self._auditconflicts(path)
2417 2417 self._markdirty(
2418 2418 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2419 2419 )
2420 2420
2421 2421 def setflags(self, path, l, x):
2422 2422 flag = b''
2423 2423 if l:
2424 2424 flag = b'l'
2425 2425 elif x:
2426 2426 flag = b'x'
2427 2427 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2428 2428
2429 2429 def remove(self, path):
2430 2430 self._markdirty(path, exists=False)
2431 2431
2432 2432 def exists(self, path):
2433 2433 """exists behaves like `lexists`, but needs to follow symlinks and
2434 2434 return False if they are broken.
2435 2435 """
2436 2436 if self.isdirty(path):
2437 2437 # If this path exists and is a symlink, "follow" it by calling
2438 2438 # exists on the destination path.
2439 2439 if (
2440 2440 self._cache[path][b'exists']
2441 2441 and b'l' in self._cache[path][b'flags']
2442 2442 ):
2443 2443 return self.exists(self._cache[path][b'data'].strip())
2444 2444 else:
2445 2445 return self._cache[path][b'exists']
2446 2446
2447 2447 return self._existsinparent(path)
2448 2448
2449 2449 def lexists(self, path):
2450 2450 """lexists returns True if the path exists"""
2451 2451 if self.isdirty(path):
2452 2452 return self._cache[path][b'exists']
2453 2453
2454 2454 return self._existsinparent(path)
2455 2455
2456 2456 def size(self, path):
2457 2457 if self.isdirty(path):
2458 2458 if self._cache[path][b'exists']:
2459 2459 return len(self._cache[path][b'data'])
2460 2460 else:
2461 2461 raise error.ProgrammingError(
2462 2462 b"No such file or directory: %s" % path
2463 2463 )
2464 2464 return self._wrappedctx[path].size()
2465 2465
2466 2466 def tomemctx(
2467 2467 self,
2468 2468 text,
2469 2469 branch=None,
2470 2470 extra=None,
2471 2471 date=None,
2472 2472 parents=None,
2473 2473 user=None,
2474 2474 editor=None,
2475 2475 ):
2476 2476 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2477 2477 committed.
2478 2478
2479 2479 ``text`` is the commit message.
2480 2480 ``parents`` (optional) are rev numbers.
2481 2481 """
2482 2482 # Default parents to the wrapped context if not passed.
2483 2483 if parents is None:
2484 2484 parents = self.parents()
2485 2485 if len(parents) == 1:
2486 2486 parents = (parents[0], None)
2487 2487
2488 2488 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2489 2489 if parents[1] is None:
2490 2490 parents = (self._repo[parents[0]], None)
2491 2491 else:
2492 2492 parents = (self._repo[parents[0]], self._repo[parents[1]])
2493 2493
2494 2494 files = self.files()
2495 2495
2496 2496 def getfile(repo, memctx, path):
2497 2497 if self._cache[path][b'exists']:
2498 2498 return memfilectx(
2499 2499 repo,
2500 2500 memctx,
2501 2501 path,
2502 2502 self._cache[path][b'data'],
2503 2503 b'l' in self._cache[path][b'flags'],
2504 2504 b'x' in self._cache[path][b'flags'],
2505 2505 self._cache[path][b'copied'],
2506 2506 )
2507 2507 else:
2508 2508 # Returning None, but including the path in `files`, is
2509 2509 # necessary for memctx to register a deletion.
2510 2510 return None
2511 2511
2512 2512 if branch is None:
2513 2513 branch = self._wrappedctx.branch()
2514 2514
2515 2515 return memctx(
2516 2516 self._repo,
2517 2517 parents,
2518 2518 text,
2519 2519 files,
2520 2520 getfile,
2521 2521 date=date,
2522 2522 extra=extra,
2523 2523 user=user,
2524 2524 branch=branch,
2525 2525 editor=editor,
2526 2526 )
2527 2527
2528 2528 def tomemctx_for_amend(self, precursor):
2529 2529 extra = precursor.extra().copy()
2530 2530 extra[b'amend_source'] = precursor.hex()
2531 2531 return self.tomemctx(
2532 2532 text=precursor.description(),
2533 2533 branch=precursor.branch(),
2534 2534 extra=extra,
2535 2535 date=precursor.date(),
2536 2536 user=precursor.user(),
2537 2537 )
2538 2538
2539 2539 def isdirty(self, path):
2540 2540 return path in self._cache
2541 2541
2542 2542 def clean(self):
2543 2543 self._mergestate = None
2544 2544 self._cache = {}
2545 2545
2546 2546 def _compact(self):
2547 2547 """Removes keys from the cache that are actually clean, by comparing
2548 2548 them with the underlying context.
2549 2549
2550 2550 This can occur during the merge process, e.g. by passing --tool :local
2551 2551 to resolve a conflict.
2552 2552 """
2553 2553 keys = []
2554 2554 # This won't be perfect, but can help performance significantly when
2555 2555 # using things like remotefilelog.
2556 2556 scmutil.prefetchfiles(
2557 2557 self.repo(),
2558 2558 [
2559 2559 (
2560 2560 self.p1().rev(),
2561 2561 scmutil.matchfiles(self.repo(), self._cache.keys()),
2562 2562 )
2563 2563 ],
2564 2564 )
2565 2565
2566 2566 for path in self._cache.keys():
2567 2567 cache = self._cache[path]
2568 2568 try:
2569 2569 underlying = self._wrappedctx[path]
2570 2570 if (
2571 2571 underlying.data() == cache[b'data']
2572 2572 and underlying.flags() == cache[b'flags']
2573 2573 ):
2574 2574 keys.append(path)
2575 2575 except error.ManifestLookupError:
2576 2576 # Path not in the underlying manifest (created).
2577 2577 continue
2578 2578
2579 2579 for path in keys:
2580 2580 del self._cache[path]
2581 2581 return keys
2582 2582
2583 2583 def _markdirty(
2584 2584 self, path, exists, data=None, date=None, flags=b'', copied=None
2585 2585 ):
2586 2586 # data not provided, let's see if we already have some; if not, let's
2587 2587 # grab it from our underlying context, so that we always have data if
2588 2588 # the file is marked as existing.
2589 2589 if exists and data is None:
2590 2590 oldentry = self._cache.get(path) or {}
2591 2591 data = oldentry.get(b'data')
2592 2592 if data is None:
2593 2593 data = self._wrappedctx[path].data()
2594 2594
2595 2595 self._cache[path] = {
2596 2596 b'exists': exists,
2597 2597 b'data': data,
2598 2598 b'date': date,
2599 2599 b'flags': flags,
2600 2600 b'copied': copied,
2601 2601 }
2602 2602 util.clearcachedproperty(self, b'_manifest')
2603 2603
2604 2604 def filectx(self, path, filelog=None):
2605 2605 return overlayworkingfilectx(
2606 2606 self._repo, path, parent=self, filelog=filelog
2607 2607 )
2608 2608
2609 2609 def mergestate(self, clean=False):
2610 2610 if clean or self._mergestate is None:
2611 2611 self._mergestate = mergestatemod.memmergestate(self._repo)
2612 2612 return self._mergestate
2613 2613
2614 2614
2615 2615 class overlayworkingfilectx(committablefilectx):
2616 2616 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2617 2617 cache, which can be flushed through later by calling ``flush()``."""
2618 2618
2619 2619 def __init__(self, repo, path, filelog=None, parent=None):
2620 2620 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2621 2621 self._repo = repo
2622 2622 self._parent = parent
2623 2623 self._path = path
2624 2624
2625 2625 def cmp(self, fctx):
2626 2626 return self.data() != fctx.data()
2627 2627
2628 2628 def changectx(self):
2629 2629 return self._parent
2630 2630
2631 2631 def data(self):
2632 2632 return self._parent.data(self._path)
2633 2633
2634 2634 def date(self):
2635 2635 return self._parent.filedate(self._path)
2636 2636
2637 2637 def exists(self):
2638 2638 return self.lexists()
2639 2639
2640 2640 def lexists(self):
2641 2641 return self._parent.exists(self._path)
2642 2642
2643 2643 def copysource(self):
2644 2644 return self._parent.copydata(self._path)
2645 2645
2646 2646 def size(self):
2647 2647 return self._parent.size(self._path)
2648 2648
2649 2649 def markcopied(self, origin):
2650 2650 self._parent.markcopied(self._path, origin)
2651 2651
2652 2652 def audit(self):
2653 2653 pass
2654 2654
2655 2655 def flags(self):
2656 2656 return self._parent.flags(self._path)
2657 2657
2658 2658 def setflags(self, islink, isexec):
2659 2659 return self._parent.setflags(self._path, islink, isexec)
2660 2660
2661 2661 def write(self, data, flags, backgroundclose=False, **kwargs):
2662 2662 return self._parent.write(self._path, data, flags, **kwargs)
2663 2663
2664 2664 def remove(self, ignoremissing=False):
2665 2665 return self._parent.remove(self._path)
2666 2666
2667 2667 def clearunknown(self):
2668 2668 pass
2669 2669
2670 2670
2671 2671 class workingcommitctx(workingctx):
2672 2672 """A workingcommitctx object makes access to data related to
2673 2673 the revision being committed convenient.
2674 2674
2675 2675 This hides changes in the working directory, if they aren't
2676 2676 committed in this context.
2677 2677 """
2678 2678
2679 2679 def __init__(
2680 2680 self, repo, changes, text=b"", user=None, date=None, extra=None
2681 2681 ):
2682 2682 super(workingcommitctx, self).__init__(
2683 2683 repo, text, user, date, extra, changes
2684 2684 )
2685 2685
2686 2686 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2687 2687 """Return matched files only in ``self._status``
2688 2688
2689 2689 Uncommitted files appear "clean" via this context, even if
2690 2690 they aren't actually so in the working directory.
2691 2691 """
2692 2692 if clean:
2693 2693 clean = [f for f in self._manifest if f not in self._changedset]
2694 2694 else:
2695 2695 clean = []
2696 2696 return scmutil.status(
2697 2697 [f for f in self._status.modified if match(f)],
2698 2698 [f for f in self._status.added if match(f)],
2699 2699 [f for f in self._status.removed if match(f)],
2700 2700 [],
2701 2701 [],
2702 2702 [],
2703 2703 clean,
2704 2704 )
2705 2705
2706 2706 @propertycache
2707 2707 def _changedset(self):
2708 2708 """Return the set of files changed in this context"""
2709 2709 changed = set(self._status.modified)
2710 2710 changed.update(self._status.added)
2711 2711 changed.update(self._status.removed)
2712 2712 return changed
2713 2713
2714 2714
2715 2715 def makecachingfilectxfn(func):
2716 2716 """Create a filectxfn that caches based on the path.
2717 2717
2718 2718 We can't use util.cachefunc because it uses all arguments as the cache
2719 2719 key and this creates a cycle since the arguments include the repo and
2720 2720 memctx.
2721 2721 """
2722 2722 cache = {}
2723 2723
2724 2724 def getfilectx(repo, memctx, path):
2725 2725 if path not in cache:
2726 2726 cache[path] = func(repo, memctx, path)
2727 2727 return cache[path]
2728 2728
2729 2729 return getfilectx
2730 2730
2731 2731
2732 2732 def memfilefromctx(ctx):
2733 2733 """Given a context return a memfilectx for ctx[path]
2734 2734
2735 2735 This is a convenience method for building a memctx based on another
2736 2736 context.
2737 2737 """
2738 2738
2739 2739 def getfilectx(repo, memctx, path):
2740 2740 fctx = ctx[path]
2741 2741 copysource = fctx.copysource()
2742 2742 return memfilectx(
2743 2743 repo,
2744 2744 memctx,
2745 2745 path,
2746 2746 fctx.data(),
2747 2747 islink=fctx.islink(),
2748 2748 isexec=fctx.isexec(),
2749 2749 copysource=copysource,
2750 2750 )
2751 2751
2752 2752 return getfilectx
2753 2753
2754 2754
2755 2755 def memfilefrompatch(patchstore):
2756 2756 """Given a patch (e.g. patchstore object) return a memfilectx
2757 2757
2758 2758 This is a convenience method for building a memctx based on a patchstore.
2759 2759 """
2760 2760
2761 2761 def getfilectx(repo, memctx, path):
2762 2762 data, mode, copysource = patchstore.getfile(path)
2763 2763 if data is None:
2764 2764 return None
2765 2765 islink, isexec = mode
2766 2766 return memfilectx(
2767 2767 repo,
2768 2768 memctx,
2769 2769 path,
2770 2770 data,
2771 2771 islink=islink,
2772 2772 isexec=isexec,
2773 2773 copysource=copysource,
2774 2774 )
2775 2775
2776 2776 return getfilectx
2777 2777
2778 2778
2779 2779 class memctx(committablectx):
2780 2780 """Use memctx to perform in-memory commits via localrepo.commitctx().
2781 2781
2782 2782 Revision information is supplied at initialization time while
2783 2783 related files data and is made available through a callback
2784 2784 mechanism. 'repo' is the current localrepo, 'parents' is a
2785 2785 sequence of two parent revisions identifiers (pass None for every
2786 2786 missing parent), 'text' is the commit message and 'files' lists
2787 2787 names of files touched by the revision (normalized and relative to
2788 2788 repository root).
2789 2789
2790 2790 filectxfn(repo, memctx, path) is a callable receiving the
2791 2791 repository, the current memctx object and the normalized path of
2792 2792 requested file, relative to repository root. It is fired by the
2793 2793 commit function for every file in 'files', but calls order is
2794 2794 undefined. If the file is available in the revision being
2795 2795 committed (updated or added), filectxfn returns a memfilectx
2796 2796 object. If the file was removed, filectxfn return None for recent
2797 2797 Mercurial. Moved files are represented by marking the source file
2798 2798 removed and the new file added with copy information (see
2799 2799 memfilectx).
2800 2800
2801 2801 user receives the committer name and defaults to current
2802 2802 repository username, date is the commit date in any format
2803 2803 supported by dateutil.parsedate() and defaults to current date, extra
2804 2804 is a dictionary of metadata or is left empty.
2805 2805 """
2806 2806
2807 2807 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2808 2808 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2809 2809 # this field to determine what to do in filectxfn.
2810 2810 _returnnoneformissingfiles = True
2811 2811
2812 2812 def __init__(
2813 2813 self,
2814 2814 repo,
2815 2815 parents,
2816 2816 text,
2817 2817 files,
2818 2818 filectxfn,
2819 2819 user=None,
2820 2820 date=None,
2821 2821 extra=None,
2822 2822 branch=None,
2823 2823 editor=None,
2824 2824 ):
2825 2825 super(memctx, self).__init__(
2826 2826 repo, text, user, date, extra, branch=branch
2827 2827 )
2828 2828 self._rev = None
2829 2829 self._node = None
2830 2830 parents = [(p or nullid) for p in parents]
2831 2831 p1, p2 = parents
2832 2832 self._parents = [self._repo[p] for p in (p1, p2)]
2833 2833 files = sorted(set(files))
2834 2834 self._files = files
2835 2835 self.substate = {}
2836 2836
2837 2837 if isinstance(filectxfn, patch.filestore):
2838 2838 filectxfn = memfilefrompatch(filectxfn)
2839 2839 elif not callable(filectxfn):
2840 2840 # if store is not callable, wrap it in a function
2841 2841 filectxfn = memfilefromctx(filectxfn)
2842 2842
2843 2843 # memoizing increases performance for e.g. vcs convert scenarios.
2844 2844 self._filectxfn = makecachingfilectxfn(filectxfn)
2845 2845
2846 2846 if editor:
2847 2847 self._text = editor(self._repo, self, [])
2848 2848 self._repo.savecommitmessage(self._text)
2849 2849
2850 2850 def filectx(self, path, filelog=None):
2851 2851 """get a file context from the working directory
2852 2852
2853 2853 Returns None if file doesn't exist and should be removed."""
2854 2854 return self._filectxfn(self._repo, self, path)
2855 2855
2856 2856 def commit(self):
2857 2857 """commit context to the repo"""
2858 2858 return self._repo.commitctx(self)
2859 2859
2860 2860 @propertycache
2861 2861 def _manifest(self):
2862 2862 """generate a manifest based on the return values of filectxfn"""
2863 2863
2864 2864 # keep this simple for now; just worry about p1
2865 2865 pctx = self._parents[0]
2866 2866 man = pctx.manifest().copy()
2867 2867
2868 2868 for f in self._status.modified:
2869 2869 man[f] = modifiednodeid
2870 2870
2871 2871 for f in self._status.added:
2872 2872 man[f] = addednodeid
2873 2873
2874 2874 for f in self._status.removed:
2875 2875 if f in man:
2876 2876 del man[f]
2877 2877
2878 2878 return man
2879 2879
2880 2880 @propertycache
2881 2881 def _status(self):
2882 2882 """Calculate exact status from ``files`` specified at construction"""
2883 2883 man1 = self.p1().manifest()
2884 2884 p2 = self._parents[1]
2885 2885 # "1 < len(self._parents)" can't be used for checking
2886 2886 # existence of the 2nd parent, because "memctx._parents" is
2887 2887 # explicitly initialized by the list, of which length is 2.
2888 2888 if p2.node() != nullid:
2889 2889 man2 = p2.manifest()
2890 2890 managing = lambda f: f in man1 or f in man2
2891 2891 else:
2892 2892 managing = lambda f: f in man1
2893 2893
2894 2894 modified, added, removed = [], [], []
2895 2895 for f in self._files:
2896 2896 if not managing(f):
2897 2897 added.append(f)
2898 2898 elif self[f]:
2899 2899 modified.append(f)
2900 2900 else:
2901 2901 removed.append(f)
2902 2902
2903 2903 return scmutil.status(modified, added, removed, [], [], [], [])
2904 2904
2905 2905 def parents(self):
2906 2906 if self._parents[1].node() == nullid:
2907 2907 return [self._parents[0]]
2908 2908 return self._parents
2909 2909
2910 2910
2911 2911 class memfilectx(committablefilectx):
2912 2912 """memfilectx represents an in-memory file to commit.
2913 2913
2914 2914 See memctx and committablefilectx for more details.
2915 2915 """
2916 2916
2917 2917 def __init__(
2918 2918 self,
2919 2919 repo,
2920 2920 changectx,
2921 2921 path,
2922 2922 data,
2923 2923 islink=False,
2924 2924 isexec=False,
2925 2925 copysource=None,
2926 2926 ):
2927 2927 """
2928 2928 path is the normalized file path relative to repository root.
2929 2929 data is the file content as a string.
2930 2930 islink is True if the file is a symbolic link.
2931 2931 isexec is True if the file is executable.
2932 2932 copied is the source file path if current file was copied in the
2933 2933 revision being committed, or None."""
2934 2934 super(memfilectx, self).__init__(repo, path, None, changectx)
2935 2935 self._data = data
2936 2936 if islink:
2937 2937 self._flags = b'l'
2938 2938 elif isexec:
2939 2939 self._flags = b'x'
2940 2940 else:
2941 2941 self._flags = b''
2942 2942 self._copysource = copysource
2943 2943
2944 2944 def copysource(self):
2945 2945 return self._copysource
2946 2946
2947 2947 def cmp(self, fctx):
2948 2948 return self.data() != fctx.data()
2949 2949
2950 2950 def data(self):
2951 2951 return self._data
2952 2952
2953 2953 def remove(self, ignoremissing=False):
2954 2954 """wraps unlink for a repo's working directory"""
2955 2955 # need to figure out what to do here
2956 2956 del self._changectx[self._path]
2957 2957
2958 2958 def write(self, data, flags, **kwargs):
2959 2959 """wraps repo.wwrite"""
2960 2960 self._data = data
2961 2961
2962 2962
2963 2963 class metadataonlyctx(committablectx):
2964 2964 """Like memctx but it's reusing the manifest of different commit.
2965 2965 Intended to be used by lightweight operations that are creating
2966 2966 metadata-only changes.
2967 2967
2968 2968 Revision information is supplied at initialization time. 'repo' is the
2969 2969 current localrepo, 'ctx' is original revision which manifest we're reuisng
2970 2970 'parents' is a sequence of two parent revisions identifiers (pass None for
2971 2971 every missing parent), 'text' is the commit.
2972 2972
2973 2973 user receives the committer name and defaults to current repository
2974 2974 username, date is the commit date in any format supported by
2975 2975 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2976 2976 metadata or is left empty.
2977 2977 """
2978 2978
2979 2979 def __init__(
2980 2980 self,
2981 2981 repo,
2982 2982 originalctx,
2983 2983 parents=None,
2984 2984 text=None,
2985 2985 user=None,
2986 2986 date=None,
2987 2987 extra=None,
2988 2988 editor=None,
2989 2989 ):
2990 2990 if text is None:
2991 2991 text = originalctx.description()
2992 2992 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2993 2993 self._rev = None
2994 2994 self._node = None
2995 2995 self._originalctx = originalctx
2996 2996 self._manifestnode = originalctx.manifestnode()
2997 2997 if parents is None:
2998 2998 parents = originalctx.parents()
2999 2999 else:
3000 3000 parents = [repo[p] for p in parents if p is not None]
3001 3001 parents = parents[:]
3002 3002 while len(parents) < 2:
3003 parents.append(repo[nullid])
3003 parents.append(repo[nullrev])
3004 3004 p1, p2 = self._parents = parents
3005 3005
3006 3006 # sanity check to ensure that the reused manifest parents are
3007 3007 # manifests of our commit parents
3008 3008 mp1, mp2 = self.manifestctx().parents
3009 3009 if p1 != nullid and p1.manifestnode() != mp1:
3010 3010 raise RuntimeError(
3011 3011 r"can't reuse the manifest: its p1 "
3012 3012 r"doesn't match the new ctx p1"
3013 3013 )
3014 3014 if p2 != nullid and p2.manifestnode() != mp2:
3015 3015 raise RuntimeError(
3016 3016 r"can't reuse the manifest: "
3017 3017 r"its p2 doesn't match the new ctx p2"
3018 3018 )
3019 3019
3020 3020 self._files = originalctx.files()
3021 3021 self.substate = {}
3022 3022
3023 3023 if editor:
3024 3024 self._text = editor(self._repo, self, [])
3025 3025 self._repo.savecommitmessage(self._text)
3026 3026
3027 3027 def manifestnode(self):
3028 3028 return self._manifestnode
3029 3029
3030 3030 @property
3031 3031 def _manifestctx(self):
3032 3032 return self._repo.manifestlog[self._manifestnode]
3033 3033
3034 3034 def filectx(self, path, filelog=None):
3035 3035 return self._originalctx.filectx(path, filelog=filelog)
3036 3036
3037 3037 def commit(self):
3038 3038 """commit context to the repo"""
3039 3039 return self._repo.commitctx(self)
3040 3040
3041 3041 @property
3042 3042 def _manifest(self):
3043 3043 return self._originalctx.manifest()
3044 3044
3045 3045 @propertycache
3046 3046 def _status(self):
3047 3047 """Calculate exact status from ``files`` specified in the ``origctx``
3048 3048 and parents manifests.
3049 3049 """
3050 3050 man1 = self.p1().manifest()
3051 3051 p2 = self._parents[1]
3052 3052 # "1 < len(self._parents)" can't be used for checking
3053 3053 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3054 3054 # explicitly initialized by the list, of which length is 2.
3055 3055 if p2.node() != nullid:
3056 3056 man2 = p2.manifest()
3057 3057 managing = lambda f: f in man1 or f in man2
3058 3058 else:
3059 3059 managing = lambda f: f in man1
3060 3060
3061 3061 modified, added, removed = [], [], []
3062 3062 for f in self._files:
3063 3063 if not managing(f):
3064 3064 added.append(f)
3065 3065 elif f in self:
3066 3066 modified.append(f)
3067 3067 else:
3068 3068 removed.append(f)
3069 3069
3070 3070 return scmutil.status(modified, added, removed, [], [], [], [])
3071 3071
3072 3072
3073 3073 class arbitraryfilectx(object):
3074 3074 """Allows you to use filectx-like functions on a file in an arbitrary
3075 3075 location on disk, possibly not in the working directory.
3076 3076 """
3077 3077
3078 3078 def __init__(self, path, repo=None):
3079 3079 # Repo is optional because contrib/simplemerge uses this class.
3080 3080 self._repo = repo
3081 3081 self._path = path
3082 3082
3083 3083 def cmp(self, fctx):
3084 3084 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3085 3085 # path if either side is a symlink.
3086 3086 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3087 3087 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3088 3088 # Add a fast-path for merge if both sides are disk-backed.
3089 3089 # Note that filecmp uses the opposite return values (True if same)
3090 3090 # from our cmp functions (True if different).
3091 3091 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3092 3092 return self.data() != fctx.data()
3093 3093
3094 3094 def path(self):
3095 3095 return self._path
3096 3096
3097 3097 def flags(self):
3098 3098 return b''
3099 3099
3100 3100 def data(self):
3101 3101 return util.readfile(self._path)
3102 3102
3103 3103 def decodeddata(self):
3104 3104 with open(self._path, b"rb") as f:
3105 3105 return f.read()
3106 3106
3107 3107 def remove(self):
3108 3108 util.unlink(self._path)
3109 3109
3110 3110 def write(self, data, flags, **kwargs):
3111 3111 assert not flags
3112 3112 with open(self._path, b"wb") as f:
3113 3113 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now