##// END OF EJS Templates
refactor: prefer checks against nullrev over nullid...
Joerg Sonnenberger -
r47601:728d89f6 default
parent child Browse files
Show More
@@ -1,803 +1,803 b''
1 1 # extdiff.py - external diff program support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to allow external programs to compare revisions
9 9
10 10 The extdiff Mercurial extension allows you to use external programs
11 11 to compare revisions, or revision with working directory. The external
12 12 diff programs are called with a configurable set of options and two
13 13 non-option arguments: paths to directories containing snapshots of
14 14 files to compare.
15 15
16 16 If there is more than one file being compared and the "child" revision
17 17 is the working directory, any modifications made in the external diff
18 18 program will be copied back to the working directory from the temporary
19 19 directory.
20 20
21 21 The extdiff extension also allows you to configure new diff commands, so
22 22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
23 23
24 24 [extdiff]
25 25 # add new command that runs GNU diff(1) in 'context diff' mode
26 26 cdiff = gdiff -Nprc5
27 27 ## or the old way:
28 28 #cmd.cdiff = gdiff
29 29 #opts.cdiff = -Nprc5
30 30
31 31 # add new command called meld, runs meld (no need to name twice). If
32 32 # the meld executable is not available, the meld tool in [merge-tools]
33 33 # will be used, if available
34 34 meld =
35 35
36 36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
37 37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
38 38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
39 39 # your .vimrc
40 40 vimdiff = gvim -f "+next" \\
41 41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
42 42
43 43 Tool arguments can include variables that are expanded at runtime::
44 44
45 45 $parent1, $plabel1 - filename, descriptive label of first parent
46 46 $child, $clabel - filename, descriptive label of child revision
47 47 $parent2, $plabel2 - filename, descriptive label of second parent
48 48 $root - repository root
49 49 $parent is an alias for $parent1.
50 50
51 51 The extdiff extension will look in your [diff-tools] and [merge-tools]
52 52 sections for diff tool arguments, when none are specified in [extdiff].
53 53
54 54 ::
55 55
56 56 [extdiff]
57 57 kdiff3 =
58 58
59 59 [diff-tools]
60 60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
61 61
62 62 If a program has a graphical interface, it might be interesting to tell
63 63 Mercurial about it. It will prevent the program from being mistakenly
64 64 used in a terminal-only environment (such as an SSH terminal session),
65 65 and will make :hg:`extdiff --per-file` open multiple file diffs at once
66 66 instead of one by one (if you still want to open file diffs one by one,
67 67 you can use the --confirm option).
68 68
69 69 Declaring that a tool has a graphical interface can be done with the
70 70 ``gui`` flag next to where ``diffargs`` are specified:
71 71
72 72 ::
73 73
74 74 [diff-tools]
75 75 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
76 76 kdiff3.gui = true
77 77
78 78 You can use -I/-X and list of file or directory names like normal
79 79 :hg:`diff` command. The extdiff extension makes snapshots of only
80 80 needed files, so running the external diff program will actually be
81 81 pretty fast (at least faster than having to compare the entire tree).
82 82 '''
83 83
84 84 from __future__ import absolute_import
85 85
86 86 import os
87 87 import re
88 88 import shutil
89 89 import stat
90 90 import subprocess
91 91
92 92 from mercurial.i18n import _
93 93 from mercurial.node import (
94 nullid,
94 nullrev,
95 95 short,
96 96 )
97 97 from mercurial import (
98 98 archival,
99 99 cmdutil,
100 100 encoding,
101 101 error,
102 102 filemerge,
103 103 formatter,
104 104 pycompat,
105 105 registrar,
106 106 scmutil,
107 107 util,
108 108 )
109 109 from mercurial.utils import (
110 110 procutil,
111 111 stringutil,
112 112 )
113 113
114 114 cmdtable = {}
115 115 command = registrar.command(cmdtable)
116 116
117 117 configtable = {}
118 118 configitem = registrar.configitem(configtable)
119 119
120 120 configitem(
121 121 b'extdiff',
122 122 br'opts\..*',
123 123 default=b'',
124 124 generic=True,
125 125 )
126 126
127 127 configitem(
128 128 b'extdiff',
129 129 br'gui\..*',
130 130 generic=True,
131 131 )
132 132
133 133 configitem(
134 134 b'diff-tools',
135 135 br'.*\.diffargs$',
136 136 default=None,
137 137 generic=True,
138 138 )
139 139
140 140 configitem(
141 141 b'diff-tools',
142 142 br'.*\.gui$',
143 143 generic=True,
144 144 )
145 145
146 146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 148 # be specifying the version(s) of Mercurial they are tested with, or
149 149 # leave the attribute unspecified.
150 150 testedwith = b'ships-with-hg-core'
151 151
152 152
153 153 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
154 154 """snapshot files as of some revision
155 155 if not using snapshot, -I/-X does not work and recursive diff
156 156 in tools like kdiff3 and meld displays too many files."""
157 157 dirname = os.path.basename(repo.root)
158 158 if dirname == b"":
159 159 dirname = b"root"
160 160 if node is not None:
161 161 dirname = b'%s.%s' % (dirname, short(node))
162 162 base = os.path.join(tmproot, dirname)
163 163 os.mkdir(base)
164 164 fnsandstat = []
165 165
166 166 if node is not None:
167 167 ui.note(
168 168 _(b'making snapshot of %d files from rev %s\n')
169 169 % (len(files), short(node))
170 170 )
171 171 else:
172 172 ui.note(
173 173 _(b'making snapshot of %d files from working directory\n')
174 174 % (len(files))
175 175 )
176 176
177 177 if files:
178 178 repo.ui.setconfig(b"ui", b"archivemeta", False)
179 179
180 180 archival.archive(
181 181 repo,
182 182 base,
183 183 node,
184 184 b'files',
185 185 match=scmutil.matchfiles(repo, files),
186 186 subrepos=listsubrepos,
187 187 )
188 188
189 189 for fn in sorted(files):
190 190 wfn = util.pconvert(fn)
191 191 ui.note(b' %s\n' % wfn)
192 192
193 193 if node is None:
194 194 dest = os.path.join(base, wfn)
195 195
196 196 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
197 197 return dirname, fnsandstat
198 198
199 199
200 200 def formatcmdline(
201 201 cmdline,
202 202 repo_root,
203 203 do3way,
204 204 parent1,
205 205 plabel1,
206 206 parent2,
207 207 plabel2,
208 208 child,
209 209 clabel,
210 210 ):
211 211 # Function to quote file/dir names in the argument string.
212 212 # When not operating in 3-way mode, an empty string is
213 213 # returned for parent2
214 214 replace = {
215 215 b'parent': parent1,
216 216 b'parent1': parent1,
217 217 b'parent2': parent2,
218 218 b'plabel1': plabel1,
219 219 b'plabel2': plabel2,
220 220 b'child': child,
221 221 b'clabel': clabel,
222 222 b'root': repo_root,
223 223 }
224 224
225 225 def quote(match):
226 226 pre = match.group(2)
227 227 key = match.group(3)
228 228 if not do3way and key == b'parent2':
229 229 return pre
230 230 return pre + procutil.shellquote(replace[key])
231 231
232 232 # Match parent2 first, so 'parent1?' will match both parent1 and parent
233 233 regex = (
234 234 br'''(['"]?)([^\s'"$]*)'''
235 235 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1'
236 236 )
237 237 if not do3way and not re.search(regex, cmdline):
238 238 cmdline += b' $parent1 $child'
239 239 return re.sub(regex, quote, cmdline)
240 240
241 241
242 242 def _systembackground(cmd, environ=None, cwd=None):
243 243 """like 'procutil.system', but returns the Popen object directly
244 244 so we don't have to wait on it.
245 245 """
246 246 env = procutil.shellenviron(environ)
247 247 proc = subprocess.Popen(
248 248 procutil.tonativestr(cmd),
249 249 shell=True,
250 250 close_fds=procutil.closefds,
251 251 env=procutil.tonativeenv(env),
252 252 cwd=pycompat.rapply(procutil.tonativestr, cwd),
253 253 )
254 254 return proc
255 255
256 256
257 257 def _runperfilediff(
258 258 cmdline,
259 259 repo_root,
260 260 ui,
261 261 guitool,
262 262 do3way,
263 263 confirm,
264 264 commonfiles,
265 265 tmproot,
266 266 dir1a,
267 267 dir1b,
268 268 dir2,
269 269 rev1a,
270 270 rev1b,
271 271 rev2,
272 272 ):
273 273 # Note that we need to sort the list of files because it was
274 274 # built in an "unstable" way and it's annoying to get files in a
275 275 # random order, especially when "confirm" mode is enabled.
276 276 waitprocs = []
277 277 totalfiles = len(commonfiles)
278 278 for idx, commonfile in enumerate(sorted(commonfiles)):
279 279 path1a = os.path.join(dir1a, commonfile)
280 280 label1a = commonfile + rev1a
281 281 if not os.path.isfile(path1a):
282 282 path1a = pycompat.osdevnull
283 283
284 284 path1b = b''
285 285 label1b = b''
286 286 if do3way:
287 287 path1b = os.path.join(dir1b, commonfile)
288 288 label1b = commonfile + rev1b
289 289 if not os.path.isfile(path1b):
290 290 path1b = pycompat.osdevnull
291 291
292 292 path2 = os.path.join(dir2, commonfile)
293 293 label2 = commonfile + rev2
294 294
295 295 if confirm:
296 296 # Prompt before showing this diff
297 297 difffiles = _(b'diff %s (%d of %d)') % (
298 298 commonfile,
299 299 idx + 1,
300 300 totalfiles,
301 301 )
302 302 responses = _(
303 303 b'[Yns?]'
304 304 b'$$ &Yes, show diff'
305 305 b'$$ &No, skip this diff'
306 306 b'$$ &Skip remaining diffs'
307 307 b'$$ &? (display help)'
308 308 )
309 309 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
310 310 if r == 3: # ?
311 311 while r == 3:
312 312 for c, t in ui.extractchoices(responses)[1]:
313 313 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
314 314 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
315 315 if r == 0: # yes
316 316 pass
317 317 elif r == 1: # no
318 318 continue
319 319 elif r == 2: # skip
320 320 break
321 321
322 322 curcmdline = formatcmdline(
323 323 cmdline,
324 324 repo_root,
325 325 do3way=do3way,
326 326 parent1=path1a,
327 327 plabel1=label1a,
328 328 parent2=path1b,
329 329 plabel2=label1b,
330 330 child=path2,
331 331 clabel=label2,
332 332 )
333 333
334 334 if confirm or not guitool:
335 335 # Run the comparison program and wait for it to exit
336 336 # before we show the next file.
337 337 # This is because either we need to wait for confirmation
338 338 # from the user between each invocation, or because, as far
339 339 # as we know, the tool doesn't have a GUI, in which case
340 340 # we can't run multiple CLI programs at the same time.
341 341 ui.debug(
342 342 b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot)
343 343 )
344 344 ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff')
345 345 else:
346 346 # Run the comparison program but don't wait, as we're
347 347 # going to rapid-fire each file diff and then wait on
348 348 # the whole group.
349 349 ui.debug(
350 350 b'running %r in %s (backgrounded)\n'
351 351 % (pycompat.bytestr(curcmdline), tmproot)
352 352 )
353 353 proc = _systembackground(curcmdline, cwd=tmproot)
354 354 waitprocs.append(proc)
355 355
356 356 if waitprocs:
357 357 with ui.timeblockedsection(b'extdiff'):
358 358 for proc in waitprocs:
359 359 proc.wait()
360 360
361 361
362 362 def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline):
363 363 template = b'hg-%h.patch'
364 364 # write patches to temporary files
365 365 with formatter.nullformatter(ui, b'extdiff', {}) as fm:
366 366 cmdutil.export(
367 367 repo,
368 368 [repo[node1].rev(), repo[node2].rev()],
369 369 fm,
370 370 fntemplate=repo.vfs.reljoin(tmproot, template),
371 371 match=matcher,
372 372 )
373 373 label1 = cmdutil.makefilename(repo[node1], template)
374 374 label2 = cmdutil.makefilename(repo[node2], template)
375 375 file1 = repo.vfs.reljoin(tmproot, label1)
376 376 file2 = repo.vfs.reljoin(tmproot, label2)
377 377 cmdline = formatcmdline(
378 378 cmdline,
379 379 repo.root,
380 380 # no 3way while comparing patches
381 381 do3way=False,
382 382 parent1=file1,
383 383 plabel1=label1,
384 384 # while comparing patches, there is no second parent
385 385 parent2=None,
386 386 plabel2=None,
387 387 child=file2,
388 388 clabel=label2,
389 389 )
390 390 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
391 391 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
392 392 return 1
393 393
394 394
395 395 def diffrevs(
396 396 ui,
397 397 repo,
398 398 ctx1a,
399 399 ctx1b,
400 400 ctx2,
401 401 matcher,
402 402 tmproot,
403 403 cmdline,
404 404 do3way,
405 405 guitool,
406 406 opts,
407 407 ):
408 408
409 409 subrepos = opts.get(b'subrepos')
410 410
411 411 # calculate list of files changed between both revs
412 412 st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos)
413 413 mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
414 414 if do3way:
415 415 stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos)
416 416 mod_b, add_b, rem_b = (
417 417 set(stb.modified),
418 418 set(stb.added),
419 419 set(stb.removed),
420 420 )
421 421 else:
422 422 mod_b, add_b, rem_b = set(), set(), set()
423 423 modadd = mod_a | add_a | mod_b | add_b
424 424 common = modadd | rem_a | rem_b
425 425 if not common:
426 426 return 0
427 427
428 428 # Always make a copy of ctx1a (and ctx1b, if applicable)
429 429 # dir1a should contain files which are:
430 430 # * modified or removed from ctx1a to ctx2
431 431 # * modified or added from ctx1b to ctx2
432 432 # (except file added from ctx1a to ctx2 as they were not present in
433 433 # ctx1a)
434 434 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
435 435 dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0]
436 436 rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev()
437 437 if do3way:
438 438 # file calculation criteria same as dir1a
439 439 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
440 440 dir1b = snapshot(
441 441 ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos
442 442 )[0]
443 443 rev1b = b'@%d' % ctx1b.rev()
444 444 else:
445 445 dir1b = None
446 446 rev1b = b''
447 447
448 448 fnsandstat = []
449 449
450 450 # If ctx2 is not the wc or there is >1 change, copy it
451 451 dir2root = b''
452 452 rev2 = b''
453 453 if ctx2.node() is not None:
454 454 dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0]
455 455 rev2 = b'@%d' % ctx2.rev()
456 456 elif len(common) > 1:
457 457 # we only actually need to get the files to copy back to
458 458 # the working dir in this case (because the other cases
459 459 # are: diffing 2 revisions or single file -- in which case
460 460 # the file is already directly passed to the diff tool).
461 461 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos)
462 462 else:
463 463 # This lets the diff tool open the changed file directly
464 464 dir2 = b''
465 465 dir2root = repo.root
466 466
467 467 label1a = rev1a
468 468 label1b = rev1b
469 469 label2 = rev2
470 470
471 471 if not opts.get(b'per_file'):
472 472 # If only one change, diff the files instead of the directories
473 473 # Handle bogus modifies correctly by checking if the files exist
474 474 if len(common) == 1:
475 475 common_file = util.localpath(common.pop())
476 476 dir1a = os.path.join(tmproot, dir1a, common_file)
477 477 label1a = common_file + rev1a
478 478 if not os.path.isfile(dir1a):
479 479 dir1a = pycompat.osdevnull
480 480 if do3way:
481 481 dir1b = os.path.join(tmproot, dir1b, common_file)
482 482 label1b = common_file + rev1b
483 483 if not os.path.isfile(dir1b):
484 484 dir1b = pycompat.osdevnull
485 485 dir2 = os.path.join(dir2root, dir2, common_file)
486 486 label2 = common_file + rev2
487 487
488 488 # Run the external tool on the 2 temp directories or the patches
489 489 cmdline = formatcmdline(
490 490 cmdline,
491 491 repo.root,
492 492 do3way=do3way,
493 493 parent1=dir1a,
494 494 plabel1=label1a,
495 495 parent2=dir1b,
496 496 plabel2=label1b,
497 497 child=dir2,
498 498 clabel=label2,
499 499 )
500 500 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
501 501 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
502 502 else:
503 503 # Run the external tool once for each pair of files
504 504 _runperfilediff(
505 505 cmdline,
506 506 repo.root,
507 507 ui,
508 508 guitool=guitool,
509 509 do3way=do3way,
510 510 confirm=opts.get(b'confirm'),
511 511 commonfiles=common,
512 512 tmproot=tmproot,
513 513 dir1a=os.path.join(tmproot, dir1a),
514 514 dir1b=os.path.join(tmproot, dir1b) if do3way else None,
515 515 dir2=os.path.join(dir2root, dir2),
516 516 rev1a=rev1a,
517 517 rev1b=rev1b,
518 518 rev2=rev2,
519 519 )
520 520
521 521 for copy_fn, working_fn, st in fnsandstat:
522 522 cpstat = os.lstat(copy_fn)
523 523 # Some tools copy the file and attributes, so mtime may not detect
524 524 # all changes. A size check will detect more cases, but not all.
525 525 # The only certain way to detect every case is to diff all files,
526 526 # which could be expensive.
527 527 # copyfile() carries over the permission, so the mode check could
528 528 # be in an 'elif' branch, but for the case where the file has
529 529 # changed without affecting mtime or size.
530 530 if (
531 531 cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
532 532 or cpstat.st_size != st.st_size
533 533 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
534 534 ):
535 535 ui.debug(
536 536 b'file changed while diffing. '
537 537 b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
538 538 )
539 539 util.copyfile(copy_fn, working_fn)
540 540
541 541 return 1
542 542
543 543
544 544 def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
545 545 """Do the actual diff:
546 546
547 547 - copy to a temp structure if diffing 2 internal revisions
548 548 - copy to a temp structure if diffing working revision with
549 549 another one and more than 1 file is changed
550 550 - just invoke the diff for a single file in the working dir
551 551 """
552 552
553 553 cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
554 554 revs = opts.get(b'rev')
555 555 from_rev = opts.get(b'from')
556 556 to_rev = opts.get(b'to')
557 557 change = opts.get(b'change')
558 558 do3way = b'$parent2' in cmdline
559 559
560 560 if change:
561 561 ctx2 = scmutil.revsingle(repo, change, None)
562 562 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 563 elif from_rev or to_rev:
564 564 repo = scmutil.unhidehashlikerevs(
565 565 repo, [from_rev] + [to_rev], b'nowarn'
566 566 )
567 567 ctx1a = scmutil.revsingle(repo, from_rev, None)
568 ctx1b = repo[nullid]
568 ctx1b = repo[nullrev]
569 569 ctx2 = scmutil.revsingle(repo, to_rev, None)
570 570 else:
571 571 ctx1a, ctx2 = scmutil.revpair(repo, revs)
572 572 if not revs:
573 573 ctx1b = repo[None].p2()
574 574 else:
575 ctx1b = repo[nullid]
575 ctx1b = repo[nullrev]
576 576
577 577 # Disable 3-way merge if there is only one parent
578 578 if do3way:
579 if ctx1b.node() == nullid:
579 if ctx1b.rev() == nullrev:
580 580 do3way = False
581 581
582 582 matcher = scmutil.match(ctx2, pats, opts)
583 583
584 584 if opts.get(b'patch'):
585 585 if opts.get(b'subrepos'):
586 586 raise error.Abort(_(b'--patch cannot be used with --subrepos'))
587 587 if opts.get(b'per_file'):
588 588 raise error.Abort(_(b'--patch cannot be used with --per-file'))
589 589 if ctx2.node() is None:
590 590 raise error.Abort(_(b'--patch requires two revisions'))
591 591
592 592 tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
593 593 try:
594 594 if opts.get(b'patch'):
595 595 return diffpatch(
596 596 ui, repo, ctx1a.node(), ctx2.node(), tmproot, matcher, cmdline
597 597 )
598 598
599 599 return diffrevs(
600 600 ui,
601 601 repo,
602 602 ctx1a,
603 603 ctx1b,
604 604 ctx2,
605 605 matcher,
606 606 tmproot,
607 607 cmdline,
608 608 do3way,
609 609 guitool,
610 610 opts,
611 611 )
612 612
613 613 finally:
614 614 ui.note(_(b'cleaning up temp directory\n'))
615 615 shutil.rmtree(tmproot)
616 616
617 617
618 618 extdiffopts = (
619 619 [
620 620 (
621 621 b'o',
622 622 b'option',
623 623 [],
624 624 _(b'pass option to comparison program'),
625 625 _(b'OPT'),
626 626 ),
627 627 (b'r', b'rev', [], _(b'revision (DEPRECATED)'), _(b'REV')),
628 628 (b'', b'from', b'', _(b'revision to diff from'), _(b'REV1')),
629 629 (b'', b'to', b'', _(b'revision to diff to'), _(b'REV2')),
630 630 (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
631 631 (
632 632 b'',
633 633 b'per-file',
634 634 False,
635 635 _(b'compare each file instead of revision snapshots'),
636 636 ),
637 637 (
638 638 b'',
639 639 b'confirm',
640 640 False,
641 641 _(b'prompt user before each external program invocation'),
642 642 ),
643 643 (b'', b'patch', None, _(b'compare patches for two revisions')),
644 644 ]
645 645 + cmdutil.walkopts
646 646 + cmdutil.subrepoopts
647 647 )
648 648
649 649
650 650 @command(
651 651 b'extdiff',
652 652 [
653 653 (b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),
654 654 ]
655 655 + extdiffopts,
656 656 _(b'hg extdiff [OPT]... [FILE]...'),
657 657 helpcategory=command.CATEGORY_FILE_CONTENTS,
658 658 inferrepo=True,
659 659 )
660 660 def extdiff(ui, repo, *pats, **opts):
661 661 """use external program to diff repository (or selected files)
662 662
663 663 Show differences between revisions for the specified files, using
664 664 an external program. The default program used is diff, with
665 665 default options "-Npru".
666 666
667 667 To select a different program, use the -p/--program option. The
668 668 program will be passed the names of two directories to compare,
669 669 unless the --per-file option is specified (see below). To pass
670 670 additional options to the program, use -o/--option. These will be
671 671 passed before the names of the directories or files to compare.
672 672
673 673 The --from, --to, and --change options work the same way they do for
674 674 :hg:`diff`.
675 675
676 676 The --per-file option runs the external program repeatedly on each
677 677 file to diff, instead of once on two directories. By default,
678 678 this happens one by one, where the next file diff is open in the
679 679 external program only once the previous external program (for the
680 680 previous file diff) has exited. If the external program has a
681 681 graphical interface, it can open all the file diffs at once instead
682 682 of one by one. See :hg:`help -e extdiff` for information about how
683 683 to tell Mercurial that a given program has a graphical interface.
684 684
685 685 The --confirm option will prompt the user before each invocation of
686 686 the external program. It is ignored if --per-file isn't specified.
687 687 """
688 688 opts = pycompat.byteskwargs(opts)
689 689 program = opts.get(b'program')
690 690 option = opts.get(b'option')
691 691 if not program:
692 692 program = b'diff'
693 693 option = option or [b'-Npru']
694 694 cmdline = b' '.join(map(procutil.shellquote, [program] + option))
695 695 return dodiff(ui, repo, cmdline, pats, opts)
696 696
697 697
698 698 class savedcmd(object):
699 699 """use external program to diff repository (or selected files)
700 700
701 701 Show differences between revisions for the specified files, using
702 702 the following program::
703 703
704 704 %(path)s
705 705
706 706 When two revision arguments are given, then changes are shown
707 707 between those revisions. If only one revision is specified then
708 708 that revision is compared to the working directory, and, when no
709 709 revisions are specified, the working directory files are compared
710 710 to its parent.
711 711 """
712 712
713 713 def __init__(self, path, cmdline, isgui):
714 714 # We can't pass non-ASCII through docstrings (and path is
715 715 # in an unknown encoding anyway), but avoid double separators on
716 716 # Windows
717 717 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
718 718 self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
719 719 self._cmdline = cmdline
720 720 self._isgui = isgui
721 721
722 722 def __call__(self, ui, repo, *pats, **opts):
723 723 opts = pycompat.byteskwargs(opts)
724 724 options = b' '.join(map(procutil.shellquote, opts[b'option']))
725 725 if options:
726 726 options = b' ' + options
727 727 return dodiff(
728 728 ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui
729 729 )
730 730
731 731
732 732 def _gettooldetails(ui, cmd, path):
733 733 """
734 734 returns following things for a
735 735 ```
736 736 [extdiff]
737 737 <cmd> = <path>
738 738 ```
739 739 entry:
740 740
741 741 cmd: command/tool name
742 742 path: path to the tool
743 743 cmdline: the command which should be run
744 744 isgui: whether the tool uses GUI or not
745 745
746 746 Reads all external tools related configs, whether it be extdiff section,
747 747 diff-tools or merge-tools section, or its specified in an old format or
748 748 the latest format.
749 749 """
750 750 path = util.expandpath(path)
751 751 if cmd.startswith(b'cmd.'):
752 752 cmd = cmd[4:]
753 753 if not path:
754 754 path = procutil.findexe(cmd)
755 755 if path is None:
756 756 path = filemerge.findexternaltool(ui, cmd) or cmd
757 757 diffopts = ui.config(b'extdiff', b'opts.' + cmd)
758 758 cmdline = procutil.shellquote(path)
759 759 if diffopts:
760 760 cmdline += b' ' + diffopts
761 761 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
762 762 else:
763 763 if path:
764 764 # case "cmd = path opts"
765 765 cmdline = path
766 766 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
767 767 else:
768 768 # case "cmd ="
769 769 path = procutil.findexe(cmd)
770 770 if path is None:
771 771 path = filemerge.findexternaltool(ui, cmd) or cmd
772 772 cmdline = procutil.shellquote(path)
773 773 diffopts = False
774 774 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
775 775 # look for diff arguments in [diff-tools] then [merge-tools]
776 776 if not diffopts:
777 777 key = cmd + b'.diffargs'
778 778 for section in (b'diff-tools', b'merge-tools'):
779 779 args = ui.config(section, key)
780 780 if args:
781 781 cmdline += b' ' + args
782 782 if isgui is None:
783 783 isgui = ui.configbool(section, cmd + b'.gui') or False
784 784 break
785 785 return cmd, path, cmdline, isgui
786 786
787 787
788 788 def uisetup(ui):
789 789 for cmd, path in ui.configitems(b'extdiff'):
790 790 if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
791 791 continue
792 792 cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
793 793 command(
794 794 cmd,
795 795 extdiffopts[:],
796 796 _(b'hg %s [OPTION]... [FILE]...') % cmd,
797 797 helpcategory=command.CATEGORY_FILE_CONTENTS,
798 798 inferrepo=True,
799 799 )(savedcmd(path, cmdline, isgui))
800 800
801 801
802 802 # tell hggettext to extract docstrings from these functions:
803 803 i18nfunctions = [savedcmd]
@@ -1,197 +1,197 b''
1 1 # split.py - split a changeset into smaller ones
2 2 #
3 3 # Copyright 2015 Laurent Charignon <lcharignon@fb.com>
4 4 # Copyright 2017 Facebook, Inc.
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """command to split a changeset into smaller ones (EXPERIMENTAL)"""
9 9
10 10 from __future__ import absolute_import
11 11
12 12 from mercurial.i18n import _
13 13
14 14 from mercurial.node import (
15 nullid,
15 nullrev,
16 16 short,
17 17 )
18 18
19 19 from mercurial import (
20 20 bookmarks,
21 21 cmdutil,
22 22 commands,
23 23 error,
24 24 hg,
25 25 pycompat,
26 26 registrar,
27 27 revsetlang,
28 28 rewriteutil,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 # allow people to use split without explicitly enabling rebase extension
34 34 from . import rebase
35 35
36 36 cmdtable = {}
37 37 command = registrar.command(cmdtable)
38 38
39 39 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
40 40 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
41 41 # be specifying the version(s) of Mercurial they are tested with, or
42 42 # leave the attribute unspecified.
43 43 testedwith = b'ships-with-hg-core'
44 44
45 45
46 46 @command(
47 47 b'split',
48 48 [
49 49 (b'r', b'rev', b'', _(b"revision to split"), _(b'REV')),
50 50 (b'', b'rebase', True, _(b'rebase descendants after split')),
51 51 ]
52 52 + cmdutil.commitopts2,
53 53 _(b'hg split [--no-rebase] [[-r] REV]'),
54 54 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
55 55 helpbasic=True,
56 56 )
57 57 def split(ui, repo, *revs, **opts):
58 58 """split a changeset into smaller ones
59 59
60 60 Repeatedly prompt changes and commit message for new changesets until there
61 61 is nothing left in the original changeset.
62 62
63 63 If --rev was not given, split the working directory parent.
64 64
65 65 By default, rebase connected non-obsoleted descendants onto the new
66 66 changeset. Use --no-rebase to avoid the rebase.
67 67 """
68 68 opts = pycompat.byteskwargs(opts)
69 69 revlist = []
70 70 if opts.get(b'rev'):
71 71 revlist.append(opts.get(b'rev'))
72 72 revlist.extend(revs)
73 73 with repo.wlock(), repo.lock():
74 74 tr = repo.transaction(b'split')
75 75 # If the rebase somehow runs into conflicts, make sure
76 76 # we close the transaction so the user can continue it.
77 77 with util.acceptintervention(tr):
78 78 revs = scmutil.revrange(repo, revlist or [b'.'])
79 79 if len(revs) > 1:
80 80 raise error.InputError(_(b'cannot split multiple revisions'))
81 81
82 82 rev = revs.first()
83 ctx = repo[rev]
84 # Handle nullid specially here (instead of leaving for precheck()
83 # Handle nullrev specially here (instead of leaving for precheck()
85 84 # below) so we get a nicer message and error code.
86 if rev is None or ctx.node() == nullid:
85 if rev is None or rev == nullrev:
87 86 ui.status(_(b'nothing to split\n'))
88 87 return 1
88 ctx = repo[rev]
89 89 if ctx.node() is None:
90 90 raise error.InputError(_(b'cannot split working directory'))
91 91
92 92 if opts.get(b'rebase'):
93 93 # Skip obsoleted descendants and their descendants so the rebase
94 94 # won't cause conflicts for sure.
95 95 descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
96 96 torebase = list(
97 97 repo.revs(
98 98 b'%ld - (%ld & obsolete())::', descendants, descendants
99 99 )
100 100 )
101 101 else:
102 102 torebase = []
103 103 rewriteutil.precheck(repo, [rev] + torebase, b'split')
104 104
105 105 if len(ctx.parents()) > 1:
106 106 raise error.InputError(_(b'cannot split a merge changeset'))
107 107
108 108 cmdutil.bailifchanged(repo)
109 109
110 110 # Deactivate bookmark temporarily so it won't get moved
111 111 # unintentionally
112 112 bname = repo._activebookmark
113 113 if bname and repo._bookmarks[bname] != ctx.node():
114 114 bookmarks.deactivate(repo)
115 115
116 116 wnode = repo[b'.'].node()
117 117 top = None
118 118 try:
119 119 top = dosplit(ui, repo, tr, ctx, opts)
120 120 finally:
121 121 # top is None: split failed, need update --clean recovery.
122 122 # wnode == ctx.node(): wnode split, no need to update.
123 123 if top is None or wnode != ctx.node():
124 124 hg.clean(repo, wnode, show_stats=False)
125 125 if bname:
126 126 bookmarks.activate(repo, bname)
127 127 if torebase and top:
128 128 dorebase(ui, repo, torebase, top)
129 129
130 130
131 131 def dosplit(ui, repo, tr, ctx, opts):
132 132 committed = [] # [ctx]
133 133
134 134 # Set working parent to ctx.p1(), and keep working copy as ctx's content
135 135 if ctx.node() != repo.dirstate.p1():
136 136 hg.clean(repo, ctx.node(), show_stats=False)
137 137 with repo.dirstate.parentchange():
138 138 scmutil.movedirstate(repo, ctx.p1())
139 139
140 140 # Any modified, added, removed, deleted result means split is incomplete
141 141 def incomplete(repo):
142 142 st = repo.status()
143 143 return any((st.modified, st.added, st.removed, st.deleted))
144 144
145 145 # Main split loop
146 146 while incomplete(repo):
147 147 if committed:
148 148 header = _(
149 149 b'HG: Splitting %s. So far it has been split into:\n'
150 150 ) % short(ctx.node())
151 151 # We don't want color codes in the commit message template, so
152 152 # disable the label() template function while we render it.
153 153 with ui.configoverride(
154 154 {(b'templatealias', b'label(l,x)'): b"x"}, b'split'
155 155 ):
156 156 for c in committed:
157 157 summary = cmdutil.format_changeset_summary(ui, c, b'split')
158 158 header += _(b'HG: - %s\n') % summary
159 159 header += _(
160 160 b'HG: Write commit message for the next split changeset.\n'
161 161 )
162 162 else:
163 163 header = _(
164 164 b'HG: Splitting %s. Write commit message for the '
165 165 b'first split changeset.\n'
166 166 ) % short(ctx.node())
167 167 opts.update(
168 168 {
169 169 b'edit': True,
170 170 b'interactive': True,
171 171 b'message': header + ctx.description(),
172 172 }
173 173 )
174 174 commands.commit(ui, repo, **pycompat.strkwargs(opts))
175 175 newctx = repo[b'.']
176 176 committed.append(newctx)
177 177
178 178 if not committed:
179 179 raise error.InputError(_(b'cannot split an empty revision'))
180 180
181 181 scmutil.cleanupnodes(
182 182 repo,
183 183 {ctx.node(): [c.node() for c in committed]},
184 184 operation=b'split',
185 185 fixphase=True,
186 186 )
187 187
188 188 return committed[-1]
189 189
190 190
191 191 def dorebase(ui, repo, src, destctx):
192 192 rebase.rebase(
193 193 ui,
194 194 repo,
195 195 rev=[revsetlang.formatspec(b'%ld', src)],
196 196 dest=revsetlang.formatspec(b'%d', destctx.rev()),
197 197 )
@@ -1,3113 +1,3113 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 dagop,
32 32 encoding,
33 33 error,
34 34 fileset,
35 35 match as matchmod,
36 36 mergestate as mergestatemod,
37 37 metadata,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 subrepoutil,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 dateutil,
52 52 stringutil,
53 53 )
54 54
55 55 propertycache = util.propertycache
56 56
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65
66 66 def __init__(self, repo):
67 67 self._repo = repo
68 68
69 69 def __bytes__(self):
70 70 return short(self.node())
71 71
72 72 __str__ = encoding.strmethod(__bytes__)
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _buildstatusmanifest(self, status):
96 96 """Builds a manifest that includes the given status results, if this is
97 97 a working copy context. For non-working copy contexts, it just returns
98 98 the normal manifest."""
99 99 return self.manifest()
100 100
101 101 def _matchstatus(self, other, match):
102 102 """This internal method provides a way for child objects to override the
103 103 match operator.
104 104 """
105 105 return match
106 106
107 107 def _buildstatus(
108 108 self, other, s, match, listignored, listclean, listunknown
109 109 ):
110 110 """build a status with respect to another context"""
111 111 # Load earliest manifest first for caching reasons. More specifically,
112 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 115 # delta to what's in the cache. So that's one full reconstruction + one
116 116 # delta application.
117 117 mf2 = None
118 118 if self.rev() is not None and self.rev() < other.rev():
119 119 mf2 = self._buildstatusmanifest(s)
120 120 mf1 = other._buildstatusmanifest(s)
121 121 if mf2 is None:
122 122 mf2 = self._buildstatusmanifest(s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, match=match, clean=listclean)
130 130 for fn, value in pycompat.iteritems(d):
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 not in wdirfilenodeids:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [
157 157 fn
158 158 for fn in unknown
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 ignored = [
162 162 fn
163 163 for fn in ignored
164 164 if fn not in mf1 and (not match or match(fn))
165 165 ]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(
170 170 modified, added, removed, deleted, unknown, ignored, clean
171 171 )
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182
183 183 def node(self):
184 184 return self._node
185 185
186 186 def hex(self):
187 187 return hex(self.node())
188 188
189 189 def manifest(self):
190 190 return self._manifest
191 191
192 192 def manifestctx(self):
193 193 return self._manifestctx
194 194
195 195 def repo(self):
196 196 return self._repo
197 197
198 198 def phasestr(self):
199 199 return phases.phasenames[self.phase()]
200 200
201 201 def mutable(self):
202 202 return self.phase() > phases.public
203 203
204 204 def matchfileset(self, cwd, expr, badfn=None):
205 205 return fileset.match(self, cwd, expr, badfn=badfn)
206 206
207 207 def obsolete(self):
208 208 """True if the changeset is obsolete"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210 210
211 211 def extinct(self):
212 212 """True if the changeset is extinct"""
213 213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete, but its ancestor is"""
217 217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218 218
219 219 def phasedivergent(self):
220 220 """True if the changeset tries to be a successor of a public changeset
221 221
222 222 Only non-public and non-obsolete changesets may be phase-divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225 225
226 226 def contentdivergent(self):
227 227 """Is a successor of a changeset with multiple possible successor sets
228 228
229 229 Only non-public and non-obsolete changesets may be content-divergent.
230 230 """
231 231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232 232
233 233 def isunstable(self):
234 234 """True if the changeset is either orphan, phase-divergent or
235 235 content-divergent"""
236 236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237 237
238 238 def instabilities(self):
239 239 """return the list of instabilities affecting this changeset.
240 240
241 241 Instabilities are returned as strings. possible values are:
242 242 - orphan,
243 243 - phase-divergent,
244 244 - content-divergent.
245 245 """
246 246 instabilities = []
247 247 if self.orphan():
248 248 instabilities.append(b'orphan')
249 249 if self.phasedivergent():
250 250 instabilities.append(b'phase-divergent')
251 251 if self.contentdivergent():
252 252 instabilities.append(b'content-divergent')
253 253 return instabilities
254 254
255 255 def parents(self):
256 256 """return contexts for each parent changeset"""
257 257 return self._parents
258 258
259 259 def p1(self):
260 260 return self._parents[0]
261 261
262 262 def p2(self):
263 263 parents = self._parents
264 264 if len(parents) == 2:
265 265 return parents[1]
266 266 return self._repo[nullrev]
267 267
268 268 def _fileinfo(self, path):
269 269 if '_manifest' in self.__dict__:
270 270 try:
271 271 return self._manifest.find(path)
272 272 except KeyError:
273 273 raise error.ManifestLookupError(
274 274 self._node or b'None', path, _(b'not found in manifest')
275 275 )
276 276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 277 if path in self._manifestdelta:
278 278 return (
279 279 self._manifestdelta[path],
280 280 self._manifestdelta.flags(path),
281 281 )
282 282 mfl = self._repo.manifestlog
283 283 try:
284 284 node, flag = mfl[self._changeset.manifest].find(path)
285 285 except KeyError:
286 286 raise error.ManifestLookupError(
287 287 self._node or b'None', path, _(b'not found in manifest')
288 288 )
289 289
290 290 return node, flag
291 291
292 292 def filenode(self, path):
293 293 return self._fileinfo(path)[0]
294 294
295 295 def flags(self, path):
296 296 try:
297 297 return self._fileinfo(path)[1]
298 298 except error.LookupError:
299 299 return b''
300 300
301 301 @propertycache
302 302 def _copies(self):
303 303 return metadata.computechangesetcopies(self)
304 304
305 305 def p1copies(self):
306 306 return self._copies[0]
307 307
308 308 def p2copies(self):
309 309 return self._copies[1]
310 310
311 311 def sub(self, path, allowcreate=True):
312 312 '''return a subrepo for the stored revision of path, never wdir()'''
313 313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314 314
315 315 def nullsub(self, path, pctx):
316 316 return subrepo.nullsubrepo(self, path, pctx)
317 317
318 318 def workingsub(self, path):
319 319 """return a subrepo for the stored revision, or wdir if this is a wdir
320 320 context.
321 321 """
322 322 return subrepo.subrepo(self, path, allowwdir=True)
323 323
324 324 def match(
325 325 self,
326 326 pats=None,
327 327 include=None,
328 328 exclude=None,
329 329 default=b'glob',
330 330 listsubrepos=False,
331 331 badfn=None,
332 332 cwd=None,
333 333 ):
334 334 r = self._repo
335 335 if not cwd:
336 336 cwd = r.getcwd()
337 337 return matchmod.match(
338 338 r.root,
339 339 cwd,
340 340 pats,
341 341 include,
342 342 exclude,
343 343 default,
344 344 auditor=r.nofsauditor,
345 345 ctx=self,
346 346 listsubrepos=listsubrepos,
347 347 badfn=badfn,
348 348 )
349 349
350 350 def diff(
351 351 self,
352 352 ctx2=None,
353 353 match=None,
354 354 changes=None,
355 355 opts=None,
356 356 losedatafn=None,
357 357 pathfn=None,
358 358 copy=None,
359 359 copysourcematch=None,
360 360 hunksfilterfn=None,
361 361 ):
362 362 """Returns a diff generator for the given contexts and matcher"""
363 363 if ctx2 is None:
364 364 ctx2 = self.p1()
365 365 if ctx2 is not None:
366 366 ctx2 = self._repo[ctx2]
367 367 return patch.diff(
368 368 self._repo,
369 369 ctx2,
370 370 self,
371 371 match=match,
372 372 changes=changes,
373 373 opts=opts,
374 374 losedatafn=losedatafn,
375 375 pathfn=pathfn,
376 376 copy=copy,
377 377 copysourcematch=copysourcematch,
378 378 hunksfilterfn=hunksfilterfn,
379 379 )
380 380
381 381 def dirs(self):
382 382 return self._manifest.dirs()
383 383
384 384 def hasdir(self, dir):
385 385 return self._manifest.hasdir(dir)
386 386
387 387 def status(
388 388 self,
389 389 other=None,
390 390 match=None,
391 391 listignored=False,
392 392 listclean=False,
393 393 listunknown=False,
394 394 listsubrepos=False,
395 395 ):
396 396 """return status of files between two nodes or node and working
397 397 directory.
398 398
399 399 If other is None, compare this node with working directory.
400 400
401 401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
402 402
403 403 Returns a mercurial.scmutils.status object.
404 404
405 405 Data can be accessed using either tuple notation:
406 406
407 407 (modified, added, removed, deleted, unknown, ignored, clean)
408 408
409 409 or direct attribute access:
410 410
411 411 s.modified, s.added, ...
412 412 """
413 413
414 414 ctx1 = self
415 415 ctx2 = self._repo[other]
416 416
417 417 # This next code block is, admittedly, fragile logic that tests for
418 418 # reversing the contexts and wouldn't need to exist if it weren't for
419 419 # the fast (and common) code path of comparing the working directory
420 420 # with its first parent.
421 421 #
422 422 # What we're aiming for here is the ability to call:
423 423 #
424 424 # workingctx.status(parentctx)
425 425 #
426 426 # If we always built the manifest for each context and compared those,
427 427 # then we'd be done. But the special case of the above call means we
428 428 # just copy the manifest of the parent.
429 429 reversed = False
430 430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
431 431 reversed = True
432 432 ctx1, ctx2 = ctx2, ctx1
433 433
434 434 match = self._repo.narrowmatch(match)
435 435 match = ctx2._matchstatus(ctx1, match)
436 436 r = scmutil.status([], [], [], [], [], [], [])
437 437 r = ctx2._buildstatus(
438 438 ctx1, r, match, listignored, listclean, listunknown
439 439 )
440 440
441 441 if reversed:
442 442 # Reverse added and removed. Clear deleted, unknown and ignored as
443 443 # these make no sense to reverse.
444 444 r = scmutil.status(
445 445 r.modified, r.removed, r.added, [], [], [], r.clean
446 446 )
447 447
448 448 if listsubrepos:
449 449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
450 450 try:
451 451 rev2 = ctx2.subrev(subpath)
452 452 except KeyError:
453 453 # A subrepo that existed in node1 was deleted between
454 454 # node1 and node2 (inclusive). Thus, ctx2's substate
455 455 # won't contain that subpath. The best we can do ignore it.
456 456 rev2 = None
457 457 submatch = matchmod.subdirmatcher(subpath, match)
458 458 s = sub.status(
459 459 rev2,
460 460 match=submatch,
461 461 ignored=listignored,
462 462 clean=listclean,
463 463 unknown=listunknown,
464 464 listsubrepos=True,
465 465 )
466 466 for k in (
467 467 'modified',
468 468 'added',
469 469 'removed',
470 470 'deleted',
471 471 'unknown',
472 472 'ignored',
473 473 'clean',
474 474 ):
475 475 rfiles, sfiles = getattr(r, k), getattr(s, k)
476 476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
477 477
478 478 r.modified.sort()
479 479 r.added.sort()
480 480 r.removed.sort()
481 481 r.deleted.sort()
482 482 r.unknown.sort()
483 483 r.ignored.sort()
484 484 r.clean.sort()
485 485
486 486 return r
487 487
488 488 def mergestate(self, clean=False):
489 489 """Get a mergestate object for this context."""
490 490 raise NotImplementedError(
491 491 '%s does not implement mergestate()' % self.__class__
492 492 )
493 493
494 494 def isempty(self):
495 495 return not (
496 496 len(self.parents()) > 1
497 497 or self.branch() != self.p1().branch()
498 498 or self.closesbranch()
499 499 or self.files()
500 500 )
501 501
502 502
503 503 class changectx(basectx):
504 504 """A changecontext object makes access to data related to a particular
505 505 changeset convenient. It represents a read-only context already present in
506 506 the repo."""
507 507
508 508 def __init__(self, repo, rev, node, maybe_filtered=True):
509 509 super(changectx, self).__init__(repo)
510 510 self._rev = rev
511 511 self._node = node
512 512 # When maybe_filtered is True, the revision might be affected by
513 513 # changelog filtering and operation through the filtered changelog must be used.
514 514 #
515 515 # When maybe_filtered is False, the revision has already been checked
516 516 # against filtering and is not filtered. Operation through the
517 517 # unfiltered changelog might be used in some case.
518 518 self._maybe_filtered = maybe_filtered
519 519
520 520 def __hash__(self):
521 521 try:
522 522 return hash(self._rev)
523 523 except AttributeError:
524 524 return id(self)
525 525
526 526 def __nonzero__(self):
527 527 return self._rev != nullrev
528 528
529 529 __bool__ = __nonzero__
530 530
531 531 @propertycache
532 532 def _changeset(self):
533 533 if self._maybe_filtered:
534 534 repo = self._repo
535 535 else:
536 536 repo = self._repo.unfiltered()
537 537 return repo.changelog.changelogrevision(self.rev())
538 538
539 539 @propertycache
540 540 def _manifest(self):
541 541 return self._manifestctx.read()
542 542
543 543 @property
544 544 def _manifestctx(self):
545 545 return self._repo.manifestlog[self._changeset.manifest]
546 546
547 547 @propertycache
548 548 def _manifestdelta(self):
549 549 return self._manifestctx.readdelta()
550 550
551 551 @propertycache
552 552 def _parents(self):
553 553 repo = self._repo
554 554 if self._maybe_filtered:
555 555 cl = repo.changelog
556 556 else:
557 557 cl = repo.unfiltered().changelog
558 558
559 559 p1, p2 = cl.parentrevs(self._rev)
560 560 if p2 == nullrev:
561 561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
562 562 return [
563 563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
564 564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
565 565 ]
566 566
567 567 def changeset(self):
568 568 c = self._changeset
569 569 return (
570 570 c.manifest,
571 571 c.user,
572 572 c.date,
573 573 c.files,
574 574 c.description,
575 575 c.extra,
576 576 )
577 577
578 578 def manifestnode(self):
579 579 return self._changeset.manifest
580 580
581 581 def user(self):
582 582 return self._changeset.user
583 583
584 584 def date(self):
585 585 return self._changeset.date
586 586
587 587 def files(self):
588 588 return self._changeset.files
589 589
590 590 def filesmodified(self):
591 591 modified = set(self.files())
592 592 modified.difference_update(self.filesadded())
593 593 modified.difference_update(self.filesremoved())
594 594 return sorted(modified)
595 595
596 596 def filesadded(self):
597 597 filesadded = self._changeset.filesadded
598 598 compute_on_none = True
599 599 if self._repo.filecopiesmode == b'changeset-sidedata':
600 600 compute_on_none = False
601 601 else:
602 602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 603 if source == b'changeset-only':
604 604 compute_on_none = False
605 605 elif source != b'compatibility':
606 606 # filelog mode, ignore any changelog content
607 607 filesadded = None
608 608 if filesadded is None:
609 609 if compute_on_none:
610 610 filesadded = metadata.computechangesetfilesadded(self)
611 611 else:
612 612 filesadded = []
613 613 return filesadded
614 614
615 615 def filesremoved(self):
616 616 filesremoved = self._changeset.filesremoved
617 617 compute_on_none = True
618 618 if self._repo.filecopiesmode == b'changeset-sidedata':
619 619 compute_on_none = False
620 620 else:
621 621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
622 622 if source == b'changeset-only':
623 623 compute_on_none = False
624 624 elif source != b'compatibility':
625 625 # filelog mode, ignore any changelog content
626 626 filesremoved = None
627 627 if filesremoved is None:
628 628 if compute_on_none:
629 629 filesremoved = metadata.computechangesetfilesremoved(self)
630 630 else:
631 631 filesremoved = []
632 632 return filesremoved
633 633
634 634 @propertycache
635 635 def _copies(self):
636 636 p1copies = self._changeset.p1copies
637 637 p2copies = self._changeset.p2copies
638 638 compute_on_none = True
639 639 if self._repo.filecopiesmode == b'changeset-sidedata':
640 640 compute_on_none = False
641 641 else:
642 642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
643 643 # If config says to get copy metadata only from changeset, then
644 644 # return that, defaulting to {} if there was no copy metadata. In
645 645 # compatibility mode, we return copy data from the changeset if it
646 646 # was recorded there, and otherwise we fall back to getting it from
647 647 # the filelogs (below).
648 648 #
649 649 # If we are in compatiblity mode and there is not data in the
650 650 # changeset), we get the copy metadata from the filelogs.
651 651 #
652 652 # otherwise, when config said to read only from filelog, we get the
653 653 # copy metadata from the filelogs.
654 654 if source == b'changeset-only':
655 655 compute_on_none = False
656 656 elif source != b'compatibility':
657 657 # filelog mode, ignore any changelog content
658 658 p1copies = p2copies = None
659 659 if p1copies is None:
660 660 if compute_on_none:
661 661 p1copies, p2copies = super(changectx, self)._copies
662 662 else:
663 663 if p1copies is None:
664 664 p1copies = {}
665 665 if p2copies is None:
666 666 p2copies = {}
667 667 return p1copies, p2copies
668 668
669 669 def description(self):
670 670 return self._changeset.description
671 671
672 672 def branch(self):
673 673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
674 674
675 675 def closesbranch(self):
676 676 return b'close' in self._changeset.extra
677 677
678 678 def extra(self):
679 679 """Return a dict of extra information."""
680 680 return self._changeset.extra
681 681
682 682 def tags(self):
683 683 """Return a list of byte tag names"""
684 684 return self._repo.nodetags(self._node)
685 685
686 686 def bookmarks(self):
687 687 """Return a list of byte bookmark names."""
688 688 return self._repo.nodebookmarks(self._node)
689 689
690 690 def phase(self):
691 691 return self._repo._phasecache.phase(self._repo, self._rev)
692 692
693 693 def hidden(self):
694 694 return self._rev in repoview.filterrevs(self._repo, b'visible')
695 695
696 696 def isinmemory(self):
697 697 return False
698 698
699 699 def children(self):
700 700 """return list of changectx contexts for each child changeset.
701 701
702 702 This returns only the immediate child changesets. Use descendants() to
703 703 recursively walk children.
704 704 """
705 705 c = self._repo.changelog.children(self._node)
706 706 return [self._repo[x] for x in c]
707 707
708 708 def ancestors(self):
709 709 for a in self._repo.changelog.ancestors([self._rev]):
710 710 yield self._repo[a]
711 711
712 712 def descendants(self):
713 713 """Recursively yield all children of the changeset.
714 714
715 715 For just the immediate children, use children()
716 716 """
717 717 for d in self._repo.changelog.descendants([self._rev]):
718 718 yield self._repo[d]
719 719
720 720 def filectx(self, path, fileid=None, filelog=None):
721 721 """get a file context from this changeset"""
722 722 if fileid is None:
723 723 fileid = self.filenode(path)
724 724 return filectx(
725 725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 726 )
727 727
728 728 def ancestor(self, c2, warn=False):
729 729 """return the "best" ancestor context of self and c2
730 730
731 731 If there are multiple candidates, it will show a message and check
732 732 merge.preferancestor configuration before falling back to the
733 733 revlog ancestor."""
734 734 # deal with workingctxs
735 735 n2 = c2._node
736 736 if n2 is None:
737 737 n2 = c2._parents[0]._node
738 738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 739 if not cahs:
740 740 anc = nullid
741 741 elif len(cahs) == 1:
742 742 anc = cahs[0]
743 743 else:
744 744 # experimental config: merge.preferancestor
745 745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 746 try:
747 747 ctx = scmutil.revsymbol(self._repo, r)
748 748 except error.RepoLookupError:
749 749 continue
750 750 anc = ctx.node()
751 751 if anc in cahs:
752 752 break
753 753 else:
754 754 anc = self._repo.changelog.ancestor(self._node, n2)
755 755 if warn:
756 756 self._repo.ui.status(
757 757 (
758 758 _(b"note: using %s as ancestor of %s and %s\n")
759 759 % (short(anc), short(self._node), short(n2))
760 760 )
761 761 + b''.join(
762 762 _(
763 763 b" alternatively, use --config "
764 764 b"merge.preferancestor=%s\n"
765 765 )
766 766 % short(n)
767 767 for n in sorted(cahs)
768 768 if n != anc
769 769 )
770 770 )
771 771 return self._repo[anc]
772 772
773 773 def isancestorof(self, other):
774 774 """True if this changeset is an ancestor of other"""
775 775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776 776
777 777 def walk(self, match):
778 778 '''Generates matching file names.'''
779 779
780 780 # Wrap match.bad method to have message with nodeid
781 781 def bad(fn, msg):
782 782 # The manifest doesn't know about subrepos, so don't complain about
783 783 # paths into valid subrepos.
784 784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 785 return
786 786 match.bad(fn, _(b'no such file in rev %s') % self)
787 787
788 788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 789 return self._manifest.walk(m)
790 790
791 791 def matches(self, match):
792 792 return self.walk(match)
793 793
794 794
795 795 class basefilectx(object):
796 796 """A filecontext object represents the common logic for its children:
797 797 filectx: read-only access to a filerevision that is already present
798 798 in the repo,
799 799 workingfilectx: a filecontext that represents files from the working
800 800 directory,
801 801 memfilectx: a filecontext that represents files in-memory,
802 802 """
803 803
804 804 @propertycache
805 805 def _filelog(self):
806 806 return self._repo.file(self._path)
807 807
808 808 @propertycache
809 809 def _changeid(self):
810 810 if '_changectx' in self.__dict__:
811 811 return self._changectx.rev()
812 812 elif '_descendantrev' in self.__dict__:
813 813 # this file context was created from a revision with a known
814 814 # descendant, we can (lazily) correct for linkrev aliases
815 815 return self._adjustlinkrev(self._descendantrev)
816 816 else:
817 817 return self._filelog.linkrev(self._filerev)
818 818
819 819 @propertycache
820 820 def _filenode(self):
821 821 if '_fileid' in self.__dict__:
822 822 return self._filelog.lookup(self._fileid)
823 823 else:
824 824 return self._changectx.filenode(self._path)
825 825
826 826 @propertycache
827 827 def _filerev(self):
828 828 return self._filelog.rev(self._filenode)
829 829
830 830 @propertycache
831 831 def _repopath(self):
832 832 return self._path
833 833
834 834 def __nonzero__(self):
835 835 try:
836 836 self._filenode
837 837 return True
838 838 except error.LookupError:
839 839 # file is missing
840 840 return False
841 841
842 842 __bool__ = __nonzero__
843 843
844 844 def __bytes__(self):
845 845 try:
846 846 return b"%s@%s" % (self.path(), self._changectx)
847 847 except error.LookupError:
848 848 return b"%s@???" % self.path()
849 849
850 850 __str__ = encoding.strmethod(__bytes__)
851 851
852 852 def __repr__(self):
853 853 return "<%s %s>" % (type(self).__name__, str(self))
854 854
855 855 def __hash__(self):
856 856 try:
857 857 return hash((self._path, self._filenode))
858 858 except AttributeError:
859 859 return id(self)
860 860
861 861 def __eq__(self, other):
862 862 try:
863 863 return (
864 864 type(self) == type(other)
865 865 and self._path == other._path
866 866 and self._filenode == other._filenode
867 867 )
868 868 except AttributeError:
869 869 return False
870 870
871 871 def __ne__(self, other):
872 872 return not (self == other)
873 873
874 874 def filerev(self):
875 875 return self._filerev
876 876
877 877 def filenode(self):
878 878 return self._filenode
879 879
880 880 @propertycache
881 881 def _flags(self):
882 882 return self._changectx.flags(self._path)
883 883
884 884 def flags(self):
885 885 return self._flags
886 886
887 887 def filelog(self):
888 888 return self._filelog
889 889
890 890 def rev(self):
891 891 return self._changeid
892 892
893 893 def linkrev(self):
894 894 return self._filelog.linkrev(self._filerev)
895 895
896 896 def node(self):
897 897 return self._changectx.node()
898 898
899 899 def hex(self):
900 900 return self._changectx.hex()
901 901
902 902 def user(self):
903 903 return self._changectx.user()
904 904
905 905 def date(self):
906 906 return self._changectx.date()
907 907
908 908 def files(self):
909 909 return self._changectx.files()
910 910
911 911 def description(self):
912 912 return self._changectx.description()
913 913
914 914 def branch(self):
915 915 return self._changectx.branch()
916 916
917 917 def extra(self):
918 918 return self._changectx.extra()
919 919
920 920 def phase(self):
921 921 return self._changectx.phase()
922 922
923 923 def phasestr(self):
924 924 return self._changectx.phasestr()
925 925
926 926 def obsolete(self):
927 927 return self._changectx.obsolete()
928 928
929 929 def instabilities(self):
930 930 return self._changectx.instabilities()
931 931
932 932 def manifest(self):
933 933 return self._changectx.manifest()
934 934
935 935 def changectx(self):
936 936 return self._changectx
937 937
938 938 def renamed(self):
939 939 return self._copied
940 940
941 941 def copysource(self):
942 942 return self._copied and self._copied[0]
943 943
944 944 def repo(self):
945 945 return self._repo
946 946
947 947 def size(self):
948 948 return len(self.data())
949 949
950 950 def path(self):
951 951 return self._path
952 952
953 953 def isbinary(self):
954 954 try:
955 955 return stringutil.binary(self.data())
956 956 except IOError:
957 957 return False
958 958
959 959 def isexec(self):
960 960 return b'x' in self.flags()
961 961
962 962 def islink(self):
963 963 return b'l' in self.flags()
964 964
965 965 def isabsent(self):
966 966 """whether this filectx represents a file not in self._changectx
967 967
968 968 This is mainly for merge code to detect change/delete conflicts. This is
969 969 expected to be True for all subclasses of basectx."""
970 970 return False
971 971
972 972 _customcmp = False
973 973
974 974 def cmp(self, fctx):
975 975 """compare with other file context
976 976
977 977 returns True if different than fctx.
978 978 """
979 979 if fctx._customcmp:
980 980 return fctx.cmp(self)
981 981
982 982 if self._filenode is None:
983 983 raise error.ProgrammingError(
984 984 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 985 )
986 986
987 987 if fctx._filenode is None:
988 988 if self._repo._encodefilterpats:
989 989 # can't rely on size() because wdir content may be decoded
990 990 return self._filelog.cmp(self._filenode, fctx.data())
991 991 if self.size() - 4 == fctx.size():
992 992 # size() can match:
993 993 # if file data starts with '\1\n', empty metadata block is
994 994 # prepended, which adds 4 bytes to filelog.size().
995 995 return self._filelog.cmp(self._filenode, fctx.data())
996 996 if self.size() == fctx.size() or self.flags() == b'l':
997 997 # size() matches: need to compare content
998 998 # issue6456: Always compare symlinks because size can represent
999 999 # encrypted string for EXT-4 encryption(fscrypt).
1000 1000 return self._filelog.cmp(self._filenode, fctx.data())
1001 1001
1002 1002 # size() differs
1003 1003 return True
1004 1004
1005 1005 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1006 1006 """return the first ancestor of <srcrev> introducing <fnode>
1007 1007
1008 1008 If the linkrev of the file revision does not point to an ancestor of
1009 1009 srcrev, we'll walk down the ancestors until we find one introducing
1010 1010 this file revision.
1011 1011
1012 1012 :srcrev: the changeset revision we search ancestors from
1013 1013 :inclusive: if true, the src revision will also be checked
1014 1014 :stoprev: an optional revision to stop the walk at. If no introduction
1015 1015 of this file content could be found before this floor
1016 1016 revision, the function will returns "None" and stops its
1017 1017 iteration.
1018 1018 """
1019 1019 repo = self._repo
1020 1020 cl = repo.unfiltered().changelog
1021 1021 mfl = repo.manifestlog
1022 1022 # fetch the linkrev
1023 1023 lkr = self.linkrev()
1024 1024 if srcrev == lkr:
1025 1025 return lkr
1026 1026 # hack to reuse ancestor computation when searching for renames
1027 1027 memberanc = getattr(self, '_ancestrycontext', None)
1028 1028 iteranc = None
1029 1029 if srcrev is None:
1030 1030 # wctx case, used by workingfilectx during mergecopy
1031 1031 revs = [p.rev() for p in self._repo[None].parents()]
1032 1032 inclusive = True # we skipped the real (revless) source
1033 1033 else:
1034 1034 revs = [srcrev]
1035 1035 if memberanc is None:
1036 1036 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1037 1037 # check if this linkrev is an ancestor of srcrev
1038 1038 if lkr not in memberanc:
1039 1039 if iteranc is None:
1040 1040 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1041 1041 fnode = self._filenode
1042 1042 path = self._path
1043 1043 for a in iteranc:
1044 1044 if stoprev is not None and a < stoprev:
1045 1045 return None
1046 1046 ac = cl.read(a) # get changeset data (we avoid object creation)
1047 1047 if path in ac[3]: # checking the 'files' field.
1048 1048 # The file has been touched, check if the content is
1049 1049 # similar to the one we search for.
1050 1050 if fnode == mfl[ac[0]].readfast().get(path):
1051 1051 return a
1052 1052 # In theory, we should never get out of that loop without a result.
1053 1053 # But if manifest uses a buggy file revision (not children of the
1054 1054 # one it replaces) we could. Such a buggy situation will likely
1055 1055 # result is crash somewhere else at to some point.
1056 1056 return lkr
1057 1057
1058 1058 def isintroducedafter(self, changelogrev):
1059 1059 """True if a filectx has been introduced after a given floor revision"""
1060 1060 if self.linkrev() >= changelogrev:
1061 1061 return True
1062 1062 introrev = self._introrev(stoprev=changelogrev)
1063 1063 if introrev is None:
1064 1064 return False
1065 1065 return introrev >= changelogrev
1066 1066
1067 1067 def introrev(self):
1068 1068 """return the rev of the changeset which introduced this file revision
1069 1069
1070 1070 This method is different from linkrev because it take into account the
1071 1071 changeset the filectx was created from. It ensures the returned
1072 1072 revision is one of its ancestors. This prevents bugs from
1073 1073 'linkrev-shadowing' when a file revision is used by multiple
1074 1074 changesets.
1075 1075 """
1076 1076 return self._introrev()
1077 1077
1078 1078 def _introrev(self, stoprev=None):
1079 1079 """
1080 1080 Same as `introrev` but, with an extra argument to limit changelog
1081 1081 iteration range in some internal usecase.
1082 1082
1083 1083 If `stoprev` is set, the `introrev` will not be searched past that
1084 1084 `stoprev` revision and "None" might be returned. This is useful to
1085 1085 limit the iteration range.
1086 1086 """
1087 1087 toprev = None
1088 1088 attrs = vars(self)
1089 1089 if '_changeid' in attrs:
1090 1090 # We have a cached value already
1091 1091 toprev = self._changeid
1092 1092 elif '_changectx' in attrs:
1093 1093 # We know which changelog entry we are coming from
1094 1094 toprev = self._changectx.rev()
1095 1095
1096 1096 if toprev is not None:
1097 1097 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1098 1098 elif '_descendantrev' in attrs:
1099 1099 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1100 1100 # be nice and cache the result of the computation
1101 1101 if introrev is not None:
1102 1102 self._changeid = introrev
1103 1103 return introrev
1104 1104 else:
1105 1105 return self.linkrev()
1106 1106
1107 1107 def introfilectx(self):
1108 1108 """Return filectx having identical contents, but pointing to the
1109 1109 changeset revision where this filectx was introduced"""
1110 1110 introrev = self.introrev()
1111 1111 if self.rev() == introrev:
1112 1112 return self
1113 1113 return self.filectx(self.filenode(), changeid=introrev)
1114 1114
1115 1115 def _parentfilectx(self, path, fileid, filelog):
1116 1116 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1117 1117 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1118 1118 if '_changeid' in vars(self) or '_changectx' in vars(self):
1119 1119 # If self is associated with a changeset (probably explicitly
1120 1120 # fed), ensure the created filectx is associated with a
1121 1121 # changeset that is an ancestor of self.changectx.
1122 1122 # This lets us later use _adjustlinkrev to get a correct link.
1123 1123 fctx._descendantrev = self.rev()
1124 1124 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1125 1125 elif '_descendantrev' in vars(self):
1126 1126 # Otherwise propagate _descendantrev if we have one associated.
1127 1127 fctx._descendantrev = self._descendantrev
1128 1128 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1129 1129 return fctx
1130 1130
1131 1131 def parents(self):
1132 1132 _path = self._path
1133 1133 fl = self._filelog
1134 1134 parents = self._filelog.parents(self._filenode)
1135 1135 pl = [(_path, node, fl) for node in parents if node != nullid]
1136 1136
1137 1137 r = fl.renamed(self._filenode)
1138 1138 if r:
1139 1139 # - In the simple rename case, both parent are nullid, pl is empty.
1140 1140 # - In case of merge, only one of the parent is null id and should
1141 1141 # be replaced with the rename information. This parent is -always-
1142 1142 # the first one.
1143 1143 #
1144 1144 # As null id have always been filtered out in the previous list
1145 1145 # comprehension, inserting to 0 will always result in "replacing
1146 1146 # first nullid parent with rename information.
1147 1147 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1148 1148
1149 1149 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1150 1150
1151 1151 def p1(self):
1152 1152 return self.parents()[0]
1153 1153
1154 1154 def p2(self):
1155 1155 p = self.parents()
1156 1156 if len(p) == 2:
1157 1157 return p[1]
1158 1158 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1159 1159
1160 1160 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1161 1161 """Returns a list of annotateline objects for each line in the file
1162 1162
1163 1163 - line.fctx is the filectx of the node where that line was last changed
1164 1164 - line.lineno is the line number at the first appearance in the managed
1165 1165 file
1166 1166 - line.text is the data on that line (including newline character)
1167 1167 """
1168 1168 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1169 1169
1170 1170 def parents(f):
1171 1171 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1172 1172 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1173 1173 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1174 1174 # isn't an ancestor of the srcrev.
1175 1175 f._changeid
1176 1176 pl = f.parents()
1177 1177
1178 1178 # Don't return renamed parents if we aren't following.
1179 1179 if not follow:
1180 1180 pl = [p for p in pl if p.path() == f.path()]
1181 1181
1182 1182 # renamed filectx won't have a filelog yet, so set it
1183 1183 # from the cache to save time
1184 1184 for p in pl:
1185 1185 if not '_filelog' in p.__dict__:
1186 1186 p._filelog = getlog(p.path())
1187 1187
1188 1188 return pl
1189 1189
1190 1190 # use linkrev to find the first changeset where self appeared
1191 1191 base = self.introfilectx()
1192 1192 if getattr(base, '_ancestrycontext', None) is None:
1193 1193 # it is safe to use an unfiltered repository here because we are
1194 1194 # walking ancestors only.
1195 1195 cl = self._repo.unfiltered().changelog
1196 1196 if base.rev() is None:
1197 1197 # wctx is not inclusive, but works because _ancestrycontext
1198 1198 # is used to test filelog revisions
1199 1199 ac = cl.ancestors(
1200 1200 [p.rev() for p in base.parents()], inclusive=True
1201 1201 )
1202 1202 else:
1203 1203 ac = cl.ancestors([base.rev()], inclusive=True)
1204 1204 base._ancestrycontext = ac
1205 1205
1206 1206 return dagop.annotate(
1207 1207 base, parents, skiprevs=skiprevs, diffopts=diffopts
1208 1208 )
1209 1209
1210 1210 def ancestors(self, followfirst=False):
1211 1211 visit = {}
1212 1212 c = self
1213 1213 if followfirst:
1214 1214 cut = 1
1215 1215 else:
1216 1216 cut = None
1217 1217
1218 1218 while True:
1219 1219 for parent in c.parents()[:cut]:
1220 1220 visit[(parent.linkrev(), parent.filenode())] = parent
1221 1221 if not visit:
1222 1222 break
1223 1223 c = visit.pop(max(visit))
1224 1224 yield c
1225 1225
1226 1226 def decodeddata(self):
1227 1227 """Returns `data()` after running repository decoding filters.
1228 1228
1229 1229 This is often equivalent to how the data would be expressed on disk.
1230 1230 """
1231 1231 return self._repo.wwritedata(self.path(), self.data())
1232 1232
1233 1233
1234 1234 class filectx(basefilectx):
1235 1235 """A filecontext object makes access to data related to a particular
1236 1236 filerevision convenient."""
1237 1237
1238 1238 def __init__(
1239 1239 self,
1240 1240 repo,
1241 1241 path,
1242 1242 changeid=None,
1243 1243 fileid=None,
1244 1244 filelog=None,
1245 1245 changectx=None,
1246 1246 ):
1247 1247 """changeid must be a revision number, if specified.
1248 1248 fileid can be a file revision or node."""
1249 1249 self._repo = repo
1250 1250 self._path = path
1251 1251
1252 1252 assert (
1253 1253 changeid is not None or fileid is not None or changectx is not None
1254 1254 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1255 1255 changeid,
1256 1256 fileid,
1257 1257 changectx,
1258 1258 )
1259 1259
1260 1260 if filelog is not None:
1261 1261 self._filelog = filelog
1262 1262
1263 1263 if changeid is not None:
1264 1264 self._changeid = changeid
1265 1265 if changectx is not None:
1266 1266 self._changectx = changectx
1267 1267 if fileid is not None:
1268 1268 self._fileid = fileid
1269 1269
1270 1270 @propertycache
1271 1271 def _changectx(self):
1272 1272 try:
1273 1273 return self._repo[self._changeid]
1274 1274 except error.FilteredRepoLookupError:
1275 1275 # Linkrev may point to any revision in the repository. When the
1276 1276 # repository is filtered this may lead to `filectx` trying to build
1277 1277 # `changectx` for filtered revision. In such case we fallback to
1278 1278 # creating `changectx` on the unfiltered version of the reposition.
1279 1279 # This fallback should not be an issue because `changectx` from
1280 1280 # `filectx` are not used in complex operations that care about
1281 1281 # filtering.
1282 1282 #
1283 1283 # This fallback is a cheap and dirty fix that prevent several
1284 1284 # crashes. It does not ensure the behavior is correct. However the
1285 1285 # behavior was not correct before filtering either and "incorrect
1286 1286 # behavior" is seen as better as "crash"
1287 1287 #
1288 1288 # Linkrevs have several serious troubles with filtering that are
1289 1289 # complicated to solve. Proper handling of the issue here should be
1290 1290 # considered when solving linkrev issue are on the table.
1291 1291 return self._repo.unfiltered()[self._changeid]
1292 1292
1293 1293 def filectx(self, fileid, changeid=None):
1294 1294 """opens an arbitrary revision of the file without
1295 1295 opening a new filelog"""
1296 1296 return filectx(
1297 1297 self._repo,
1298 1298 self._path,
1299 1299 fileid=fileid,
1300 1300 filelog=self._filelog,
1301 1301 changeid=changeid,
1302 1302 )
1303 1303
1304 1304 def rawdata(self):
1305 1305 return self._filelog.rawdata(self._filenode)
1306 1306
1307 1307 def rawflags(self):
1308 1308 """low-level revlog flags"""
1309 1309 return self._filelog.flags(self._filerev)
1310 1310
1311 1311 def data(self):
1312 1312 try:
1313 1313 return self._filelog.read(self._filenode)
1314 1314 except error.CensoredNodeError:
1315 1315 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1316 1316 return b""
1317 1317 raise error.Abort(
1318 1318 _(b"censored node: %s") % short(self._filenode),
1319 1319 hint=_(b"set censor.policy to ignore errors"),
1320 1320 )
1321 1321
1322 1322 def size(self):
1323 1323 return self._filelog.size(self._filerev)
1324 1324
1325 1325 @propertycache
1326 1326 def _copied(self):
1327 1327 """check if file was actually renamed in this changeset revision
1328 1328
1329 1329 If rename logged in file revision, we report copy for changeset only
1330 1330 if file revisions linkrev points back to the changeset in question
1331 1331 or both changeset parents contain different file revisions.
1332 1332 """
1333 1333
1334 1334 renamed = self._filelog.renamed(self._filenode)
1335 1335 if not renamed:
1336 1336 return None
1337 1337
1338 1338 if self.rev() == self.linkrev():
1339 1339 return renamed
1340 1340
1341 1341 name = self.path()
1342 1342 fnode = self._filenode
1343 1343 for p in self._changectx.parents():
1344 1344 try:
1345 1345 if fnode == p.filenode(name):
1346 1346 return None
1347 1347 except error.LookupError:
1348 1348 pass
1349 1349 return renamed
1350 1350
1351 1351 def children(self):
1352 1352 # hard for renames
1353 1353 c = self._filelog.children(self._filenode)
1354 1354 return [
1355 1355 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1356 1356 for x in c
1357 1357 ]
1358 1358
1359 1359
1360 1360 class committablectx(basectx):
1361 1361 """A committablectx object provides common functionality for a context that
1362 1362 wants the ability to commit, e.g. workingctx or memctx."""
1363 1363
1364 1364 def __init__(
1365 1365 self,
1366 1366 repo,
1367 1367 text=b"",
1368 1368 user=None,
1369 1369 date=None,
1370 1370 extra=None,
1371 1371 changes=None,
1372 1372 branch=None,
1373 1373 ):
1374 1374 super(committablectx, self).__init__(repo)
1375 1375 self._rev = None
1376 1376 self._node = None
1377 1377 self._text = text
1378 1378 if date:
1379 1379 self._date = dateutil.parsedate(date)
1380 1380 if user:
1381 1381 self._user = user
1382 1382 if changes:
1383 1383 self._status = changes
1384 1384
1385 1385 self._extra = {}
1386 1386 if extra:
1387 1387 self._extra = extra.copy()
1388 1388 if branch is not None:
1389 1389 self._extra[b'branch'] = encoding.fromlocal(branch)
1390 1390 if not self._extra.get(b'branch'):
1391 1391 self._extra[b'branch'] = b'default'
1392 1392
1393 1393 def __bytes__(self):
1394 1394 return bytes(self._parents[0]) + b"+"
1395 1395
1396 1396 __str__ = encoding.strmethod(__bytes__)
1397 1397
1398 1398 def __nonzero__(self):
1399 1399 return True
1400 1400
1401 1401 __bool__ = __nonzero__
1402 1402
1403 1403 @propertycache
1404 1404 def _status(self):
1405 1405 return self._repo.status()
1406 1406
1407 1407 @propertycache
1408 1408 def _user(self):
1409 1409 return self._repo.ui.username()
1410 1410
1411 1411 @propertycache
1412 1412 def _date(self):
1413 1413 ui = self._repo.ui
1414 1414 date = ui.configdate(b'devel', b'default-date')
1415 1415 if date is None:
1416 1416 date = dateutil.makedate()
1417 1417 return date
1418 1418
1419 1419 def subrev(self, subpath):
1420 1420 return None
1421 1421
1422 1422 def manifestnode(self):
1423 1423 return None
1424 1424
1425 1425 def user(self):
1426 1426 return self._user or self._repo.ui.username()
1427 1427
1428 1428 def date(self):
1429 1429 return self._date
1430 1430
1431 1431 def description(self):
1432 1432 return self._text
1433 1433
1434 1434 def files(self):
1435 1435 return sorted(
1436 1436 self._status.modified + self._status.added + self._status.removed
1437 1437 )
1438 1438
1439 1439 def modified(self):
1440 1440 return self._status.modified
1441 1441
1442 1442 def added(self):
1443 1443 return self._status.added
1444 1444
1445 1445 def removed(self):
1446 1446 return self._status.removed
1447 1447
1448 1448 def deleted(self):
1449 1449 return self._status.deleted
1450 1450
1451 1451 filesmodified = modified
1452 1452 filesadded = added
1453 1453 filesremoved = removed
1454 1454
1455 1455 def branch(self):
1456 1456 return encoding.tolocal(self._extra[b'branch'])
1457 1457
1458 1458 def closesbranch(self):
1459 1459 return b'close' in self._extra
1460 1460
1461 1461 def extra(self):
1462 1462 return self._extra
1463 1463
1464 1464 def isinmemory(self):
1465 1465 return False
1466 1466
1467 1467 def tags(self):
1468 1468 return []
1469 1469
1470 1470 def bookmarks(self):
1471 1471 b = []
1472 1472 for p in self.parents():
1473 1473 b.extend(p.bookmarks())
1474 1474 return b
1475 1475
1476 1476 def phase(self):
1477 1477 phase = phases.newcommitphase(self._repo.ui)
1478 1478 for p in self.parents():
1479 1479 phase = max(phase, p.phase())
1480 1480 return phase
1481 1481
1482 1482 def hidden(self):
1483 1483 return False
1484 1484
1485 1485 def children(self):
1486 1486 return []
1487 1487
1488 1488 def flags(self, path):
1489 1489 if '_manifest' in self.__dict__:
1490 1490 try:
1491 1491 return self._manifest.flags(path)
1492 1492 except KeyError:
1493 1493 return b''
1494 1494
1495 1495 try:
1496 1496 return self._flagfunc(path)
1497 1497 except OSError:
1498 1498 return b''
1499 1499
1500 1500 def ancestor(self, c2):
1501 1501 """return the "best" ancestor context of self and c2"""
1502 1502 return self._parents[0].ancestor(c2) # punt on two parents for now
1503 1503
1504 1504 def ancestors(self):
1505 1505 for p in self._parents:
1506 1506 yield p
1507 1507 for a in self._repo.changelog.ancestors(
1508 1508 [p.rev() for p in self._parents]
1509 1509 ):
1510 1510 yield self._repo[a]
1511 1511
1512 1512 def markcommitted(self, node):
1513 1513 """Perform post-commit cleanup necessary after committing this ctx
1514 1514
1515 1515 Specifically, this updates backing stores this working context
1516 1516 wraps to reflect the fact that the changes reflected by this
1517 1517 workingctx have been committed. For example, it marks
1518 1518 modified and added files as normal in the dirstate.
1519 1519
1520 1520 """
1521 1521
1522 1522 def dirty(self, missing=False, merge=True, branch=True):
1523 1523 return False
1524 1524
1525 1525
1526 1526 class workingctx(committablectx):
1527 1527 """A workingctx object makes access to data related to
1528 1528 the current working directory convenient.
1529 1529 date - any valid date string or (unixtime, offset), or None.
1530 1530 user - username string, or None.
1531 1531 extra - a dictionary of extra values, or None.
1532 1532 changes - a list of file lists as returned by localrepo.status()
1533 1533 or None to use the repository status.
1534 1534 """
1535 1535
1536 1536 def __init__(
1537 1537 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1538 1538 ):
1539 1539 branch = None
1540 1540 if not extra or b'branch' not in extra:
1541 1541 try:
1542 1542 branch = repo.dirstate.branch()
1543 1543 except UnicodeDecodeError:
1544 1544 raise error.Abort(_(b'branch name not in UTF-8!'))
1545 1545 super(workingctx, self).__init__(
1546 1546 repo, text, user, date, extra, changes, branch=branch
1547 1547 )
1548 1548
1549 1549 def __iter__(self):
1550 1550 d = self._repo.dirstate
1551 1551 for f in d:
1552 1552 if d[f] != b'r':
1553 1553 yield f
1554 1554
1555 1555 def __contains__(self, key):
1556 1556 return self._repo.dirstate[key] not in b"?r"
1557 1557
1558 1558 def hex(self):
1559 1559 return wdirhex
1560 1560
1561 1561 @propertycache
1562 1562 def _parents(self):
1563 1563 p = self._repo.dirstate.parents()
1564 1564 if p[1] == nullid:
1565 1565 p = p[:-1]
1566 1566 # use unfiltered repo to delay/avoid loading obsmarkers
1567 1567 unfi = self._repo.unfiltered()
1568 1568 return [
1569 1569 changectx(
1570 1570 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1571 1571 )
1572 1572 for n in p
1573 1573 ]
1574 1574
1575 1575 def setparents(self, p1node, p2node=nullid):
1576 1576 dirstate = self._repo.dirstate
1577 1577 with dirstate.parentchange():
1578 1578 copies = dirstate.setparents(p1node, p2node)
1579 1579 pctx = self._repo[p1node]
1580 1580 if copies:
1581 1581 # Adjust copy records, the dirstate cannot do it, it
1582 1582 # requires access to parents manifests. Preserve them
1583 1583 # only for entries added to first parent.
1584 1584 for f in copies:
1585 1585 if f not in pctx and copies[f] in pctx:
1586 1586 dirstate.copy(copies[f], f)
1587 1587 if p2node == nullid:
1588 1588 for f, s in sorted(dirstate.copies().items()):
1589 1589 if f not in pctx and s not in pctx:
1590 1590 dirstate.copy(None, f)
1591 1591
1592 1592 def _fileinfo(self, path):
1593 1593 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1594 1594 self._manifest
1595 1595 return super(workingctx, self)._fileinfo(path)
1596 1596
1597 1597 def _buildflagfunc(self):
1598 1598 # Create a fallback function for getting file flags when the
1599 1599 # filesystem doesn't support them
1600 1600
1601 1601 copiesget = self._repo.dirstate.copies().get
1602 1602 parents = self.parents()
1603 1603 if len(parents) < 2:
1604 1604 # when we have one parent, it's easy: copy from parent
1605 1605 man = parents[0].manifest()
1606 1606
1607 1607 def func(f):
1608 1608 f = copiesget(f, f)
1609 1609 return man.flags(f)
1610 1610
1611 1611 else:
1612 1612 # merges are tricky: we try to reconstruct the unstored
1613 1613 # result from the merge (issue1802)
1614 1614 p1, p2 = parents
1615 1615 pa = p1.ancestor(p2)
1616 1616 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1617 1617
1618 1618 def func(f):
1619 1619 f = copiesget(f, f) # may be wrong for merges with copies
1620 1620 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1621 1621 if fl1 == fl2:
1622 1622 return fl1
1623 1623 if fl1 == fla:
1624 1624 return fl2
1625 1625 if fl2 == fla:
1626 1626 return fl1
1627 1627 return b'' # punt for conflicts
1628 1628
1629 1629 return func
1630 1630
1631 1631 @propertycache
1632 1632 def _flagfunc(self):
1633 1633 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1634 1634
1635 1635 def flags(self, path):
1636 1636 try:
1637 1637 return self._flagfunc(path)
1638 1638 except OSError:
1639 1639 return b''
1640 1640
1641 1641 def filectx(self, path, filelog=None):
1642 1642 """get a file context from the working directory"""
1643 1643 return workingfilectx(
1644 1644 self._repo, path, workingctx=self, filelog=filelog
1645 1645 )
1646 1646
1647 1647 def dirty(self, missing=False, merge=True, branch=True):
1648 1648 """check whether a working directory is modified"""
1649 1649 # check subrepos first
1650 1650 for s in sorted(self.substate):
1651 1651 if self.sub(s).dirty(missing=missing):
1652 1652 return True
1653 1653 # check current working dir
1654 1654 return (
1655 1655 (merge and self.p2())
1656 1656 or (branch and self.branch() != self.p1().branch())
1657 1657 or self.modified()
1658 1658 or self.added()
1659 1659 or self.removed()
1660 1660 or (missing and self.deleted())
1661 1661 )
1662 1662
1663 1663 def add(self, list, prefix=b""):
1664 1664 with self._repo.wlock():
1665 1665 ui, ds = self._repo.ui, self._repo.dirstate
1666 1666 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1667 1667 rejected = []
1668 1668 lstat = self._repo.wvfs.lstat
1669 1669 for f in list:
1670 1670 # ds.pathto() returns an absolute file when this is invoked from
1671 1671 # the keyword extension. That gets flagged as non-portable on
1672 1672 # Windows, since it contains the drive letter and colon.
1673 1673 scmutil.checkportable(ui, os.path.join(prefix, f))
1674 1674 try:
1675 1675 st = lstat(f)
1676 1676 except OSError:
1677 1677 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1678 1678 rejected.append(f)
1679 1679 continue
1680 1680 limit = ui.configbytes(b'ui', b'large-file-limit')
1681 1681 if limit != 0 and st.st_size > limit:
1682 1682 ui.warn(
1683 1683 _(
1684 1684 b"%s: up to %d MB of RAM may be required "
1685 1685 b"to manage this file\n"
1686 1686 b"(use 'hg revert %s' to cancel the "
1687 1687 b"pending addition)\n"
1688 1688 )
1689 1689 % (f, 3 * st.st_size // 1000000, uipath(f))
1690 1690 )
1691 1691 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1692 1692 ui.warn(
1693 1693 _(
1694 1694 b"%s not added: only files and symlinks "
1695 1695 b"supported currently\n"
1696 1696 )
1697 1697 % uipath(f)
1698 1698 )
1699 1699 rejected.append(f)
1700 1700 elif ds[f] in b'amn':
1701 1701 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1702 1702 elif ds[f] == b'r':
1703 1703 ds.normallookup(f)
1704 1704 else:
1705 1705 ds.add(f)
1706 1706 return rejected
1707 1707
1708 1708 def forget(self, files, prefix=b""):
1709 1709 with self._repo.wlock():
1710 1710 ds = self._repo.dirstate
1711 1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 1712 rejected = []
1713 1713 for f in files:
1714 1714 if f not in ds:
1715 1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 1716 rejected.append(f)
1717 1717 elif ds[f] != b'a':
1718 1718 ds.remove(f)
1719 1719 else:
1720 1720 ds.drop(f)
1721 1721 return rejected
1722 1722
1723 1723 def copy(self, source, dest):
1724 1724 try:
1725 1725 st = self._repo.wvfs.lstat(dest)
1726 1726 except OSError as err:
1727 1727 if err.errno != errno.ENOENT:
1728 1728 raise
1729 1729 self._repo.ui.warn(
1730 1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1731 1731 )
1732 1732 return
1733 1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1734 1734 self._repo.ui.warn(
1735 1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1736 1736 % self._repo.dirstate.pathto(dest)
1737 1737 )
1738 1738 else:
1739 1739 with self._repo.wlock():
1740 1740 ds = self._repo.dirstate
1741 1741 if ds[dest] in b'?':
1742 1742 ds.add(dest)
1743 1743 elif ds[dest] in b'r':
1744 1744 ds.normallookup(dest)
1745 1745 ds.copy(source, dest)
1746 1746
1747 1747 def match(
1748 1748 self,
1749 1749 pats=None,
1750 1750 include=None,
1751 1751 exclude=None,
1752 1752 default=b'glob',
1753 1753 listsubrepos=False,
1754 1754 badfn=None,
1755 1755 cwd=None,
1756 1756 ):
1757 1757 r = self._repo
1758 1758 if not cwd:
1759 1759 cwd = r.getcwd()
1760 1760
1761 1761 # Only a case insensitive filesystem needs magic to translate user input
1762 1762 # to actual case in the filesystem.
1763 1763 icasefs = not util.fscasesensitive(r.root)
1764 1764 return matchmod.match(
1765 1765 r.root,
1766 1766 cwd,
1767 1767 pats,
1768 1768 include,
1769 1769 exclude,
1770 1770 default,
1771 1771 auditor=r.auditor,
1772 1772 ctx=self,
1773 1773 listsubrepos=listsubrepos,
1774 1774 badfn=badfn,
1775 1775 icasefs=icasefs,
1776 1776 )
1777 1777
1778 1778 def _filtersuspectsymlink(self, files):
1779 1779 if not files or self._repo.dirstate._checklink:
1780 1780 return files
1781 1781
1782 1782 # Symlink placeholders may get non-symlink-like contents
1783 1783 # via user error or dereferencing by NFS or Samba servers,
1784 1784 # so we filter out any placeholders that don't look like a
1785 1785 # symlink
1786 1786 sane = []
1787 1787 for f in files:
1788 1788 if self.flags(f) == b'l':
1789 1789 d = self[f].data()
1790 1790 if (
1791 1791 d == b''
1792 1792 or len(d) >= 1024
1793 1793 or b'\n' in d
1794 1794 or stringutil.binary(d)
1795 1795 ):
1796 1796 self._repo.ui.debug(
1797 1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1798 1798 )
1799 1799 continue
1800 1800 sane.append(f)
1801 1801 return sane
1802 1802
1803 1803 def _checklookup(self, files):
1804 1804 # check for any possibly clean files
1805 1805 if not files:
1806 1806 return [], [], []
1807 1807
1808 1808 modified = []
1809 1809 deleted = []
1810 1810 fixup = []
1811 1811 pctx = self._parents[0]
1812 1812 # do a full compare of any files that might have changed
1813 1813 for f in sorted(files):
1814 1814 try:
1815 1815 # This will return True for a file that got replaced by a
1816 1816 # directory in the interim, but fixing that is pretty hard.
1817 1817 if (
1818 1818 f not in pctx
1819 1819 or self.flags(f) != pctx.flags(f)
1820 1820 or pctx[f].cmp(self[f])
1821 1821 ):
1822 1822 modified.append(f)
1823 1823 else:
1824 1824 fixup.append(f)
1825 1825 except (IOError, OSError):
1826 1826 # A file become inaccessible in between? Mark it as deleted,
1827 1827 # matching dirstate behavior (issue5584).
1828 1828 # The dirstate has more complex behavior around whether a
1829 1829 # missing file matches a directory, etc, but we don't need to
1830 1830 # bother with that: if f has made it to this point, we're sure
1831 1831 # it's in the dirstate.
1832 1832 deleted.append(f)
1833 1833
1834 1834 return modified, deleted, fixup
1835 1835
1836 1836 def _poststatusfixup(self, status, fixup):
1837 1837 """update dirstate for files that are actually clean"""
1838 1838 poststatus = self._repo.postdsstatus()
1839 1839 if fixup or poststatus:
1840 1840 try:
1841 1841 oldid = self._repo.dirstate.identity()
1842 1842
1843 1843 # updating the dirstate is optional
1844 1844 # so we don't wait on the lock
1845 1845 # wlock can invalidate the dirstate, so cache normal _after_
1846 1846 # taking the lock
1847 1847 with self._repo.wlock(False):
1848 1848 if self._repo.dirstate.identity() == oldid:
1849 1849 if fixup:
1850 1850 normal = self._repo.dirstate.normal
1851 1851 for f in fixup:
1852 1852 normal(f)
1853 1853 # write changes out explicitly, because nesting
1854 1854 # wlock at runtime may prevent 'wlock.release()'
1855 1855 # after this block from doing so for subsequent
1856 1856 # changing files
1857 1857 tr = self._repo.currenttransaction()
1858 1858 self._repo.dirstate.write(tr)
1859 1859
1860 1860 if poststatus:
1861 1861 for ps in poststatus:
1862 1862 ps(self, status)
1863 1863 else:
1864 1864 # in this case, writing changes out breaks
1865 1865 # consistency, because .hg/dirstate was
1866 1866 # already changed simultaneously after last
1867 1867 # caching (see also issue5584 for detail)
1868 1868 self._repo.ui.debug(
1869 1869 b'skip updating dirstate: identity mismatch\n'
1870 1870 )
1871 1871 except error.LockError:
1872 1872 pass
1873 1873 finally:
1874 1874 # Even if the wlock couldn't be grabbed, clear out the list.
1875 1875 self._repo.clearpostdsstatus()
1876 1876
1877 1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1878 1878 '''Gets the status from the dirstate -- internal use only.'''
1879 1879 subrepos = []
1880 1880 if b'.hgsub' in self:
1881 1881 subrepos = sorted(self.substate)
1882 1882 cmp, s = self._repo.dirstate.status(
1883 1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1884 1884 )
1885 1885
1886 1886 # check for any possibly clean files
1887 1887 fixup = []
1888 1888 if cmp:
1889 1889 modified2, deleted2, fixup = self._checklookup(cmp)
1890 1890 s.modified.extend(modified2)
1891 1891 s.deleted.extend(deleted2)
1892 1892
1893 1893 if fixup and clean:
1894 1894 s.clean.extend(fixup)
1895 1895
1896 1896 self._poststatusfixup(s, fixup)
1897 1897
1898 1898 if match.always():
1899 1899 # cache for performance
1900 1900 if s.unknown or s.ignored or s.clean:
1901 1901 # "_status" is cached with list*=False in the normal route
1902 1902 self._status = scmutil.status(
1903 1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1904 1904 )
1905 1905 else:
1906 1906 self._status = s
1907 1907
1908 1908 return s
1909 1909
1910 1910 @propertycache
1911 1911 def _copies(self):
1912 1912 p1copies = {}
1913 1913 p2copies = {}
1914 1914 parents = self._repo.dirstate.parents()
1915 1915 p1manifest = self._repo[parents[0]].manifest()
1916 1916 p2manifest = self._repo[parents[1]].manifest()
1917 1917 changedset = set(self.added()) | set(self.modified())
1918 1918 narrowmatch = self._repo.narrowmatch()
1919 1919 for dst, src in self._repo.dirstate.copies().items():
1920 1920 if dst not in changedset or not narrowmatch(dst):
1921 1921 continue
1922 1922 if src in p1manifest:
1923 1923 p1copies[dst] = src
1924 1924 elif src in p2manifest:
1925 1925 p2copies[dst] = src
1926 1926 return p1copies, p2copies
1927 1927
1928 1928 @propertycache
1929 1929 def _manifest(self):
1930 1930 """generate a manifest corresponding to the values in self._status
1931 1931
1932 1932 This reuse the file nodeid from parent, but we use special node
1933 1933 identifiers for added and modified files. This is used by manifests
1934 1934 merge to see that files are different and by update logic to avoid
1935 1935 deleting newly added files.
1936 1936 """
1937 1937 return self._buildstatusmanifest(self._status)
1938 1938
1939 1939 def _buildstatusmanifest(self, status):
1940 1940 """Builds a manifest that includes the given status results."""
1941 1941 parents = self.parents()
1942 1942
1943 1943 man = parents[0].manifest().copy()
1944 1944
1945 1945 ff = self._flagfunc
1946 1946 for i, l in (
1947 1947 (addednodeid, status.added),
1948 1948 (modifiednodeid, status.modified),
1949 1949 ):
1950 1950 for f in l:
1951 1951 man[f] = i
1952 1952 try:
1953 1953 man.setflag(f, ff(f))
1954 1954 except OSError:
1955 1955 pass
1956 1956
1957 1957 for f in status.deleted + status.removed:
1958 1958 if f in man:
1959 1959 del man[f]
1960 1960
1961 1961 return man
1962 1962
1963 1963 def _buildstatus(
1964 1964 self, other, s, match, listignored, listclean, listunknown
1965 1965 ):
1966 1966 """build a status with respect to another context
1967 1967
1968 1968 This includes logic for maintaining the fast path of status when
1969 1969 comparing the working directory against its parent, which is to skip
1970 1970 building a new manifest if self (working directory) is not comparing
1971 1971 against its parent (repo['.']).
1972 1972 """
1973 1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1974 1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1975 1975 # might have accidentally ended up with the entire contents of the file
1976 1976 # they are supposed to be linking to.
1977 1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1978 1978 if other != self._repo[b'.']:
1979 1979 s = super(workingctx, self)._buildstatus(
1980 1980 other, s, match, listignored, listclean, listunknown
1981 1981 )
1982 1982 return s
1983 1983
1984 1984 def _matchstatus(self, other, match):
1985 1985 """override the match method with a filter for directory patterns
1986 1986
1987 1987 We use inheritance to customize the match.bad method only in cases of
1988 1988 workingctx since it belongs only to the working directory when
1989 1989 comparing against the parent changeset.
1990 1990
1991 1991 If we aren't comparing against the working directory's parent, then we
1992 1992 just use the default match object sent to us.
1993 1993 """
1994 1994 if other != self._repo[b'.']:
1995 1995
1996 1996 def bad(f, msg):
1997 1997 # 'f' may be a directory pattern from 'match.files()',
1998 1998 # so 'f not in ctx1' is not enough
1999 1999 if f not in other and not other.hasdir(f):
2000 2000 self._repo.ui.warn(
2001 2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2002 2002 )
2003 2003
2004 2004 match.bad = bad
2005 2005 return match
2006 2006
2007 2007 def walk(self, match):
2008 2008 '''Generates matching file names.'''
2009 2009 return sorted(
2010 2010 self._repo.dirstate.walk(
2011 2011 self._repo.narrowmatch(match),
2012 2012 subrepos=sorted(self.substate),
2013 2013 unknown=True,
2014 2014 ignored=False,
2015 2015 )
2016 2016 )
2017 2017
2018 2018 def matches(self, match):
2019 2019 match = self._repo.narrowmatch(match)
2020 2020 ds = self._repo.dirstate
2021 2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2022 2022
2023 2023 def markcommitted(self, node):
2024 2024 with self._repo.dirstate.parentchange():
2025 2025 for f in self.modified() + self.added():
2026 2026 self._repo.dirstate.normal(f)
2027 2027 for f in self.removed():
2028 2028 self._repo.dirstate.drop(f)
2029 2029 self._repo.dirstate.setparents(node)
2030 2030 self._repo._quick_access_changeid_invalidate()
2031 2031
2032 2032 # write changes out explicitly, because nesting wlock at
2033 2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2034 2034 # from immediately doing so for subsequent changing files
2035 2035 self._repo.dirstate.write(self._repo.currenttransaction())
2036 2036
2037 2037 sparse.aftercommit(self._repo, node)
2038 2038
2039 2039 def mergestate(self, clean=False):
2040 2040 if clean:
2041 2041 return mergestatemod.mergestate.clean(self._repo)
2042 2042 return mergestatemod.mergestate.read(self._repo)
2043 2043
2044 2044
2045 2045 class committablefilectx(basefilectx):
2046 2046 """A committablefilectx provides common functionality for a file context
2047 2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2048 2048
2049 2049 def __init__(self, repo, path, filelog=None, ctx=None):
2050 2050 self._repo = repo
2051 2051 self._path = path
2052 2052 self._changeid = None
2053 2053 self._filerev = self._filenode = None
2054 2054
2055 2055 if filelog is not None:
2056 2056 self._filelog = filelog
2057 2057 if ctx:
2058 2058 self._changectx = ctx
2059 2059
2060 2060 def __nonzero__(self):
2061 2061 return True
2062 2062
2063 2063 __bool__ = __nonzero__
2064 2064
2065 2065 def linkrev(self):
2066 2066 # linked to self._changectx no matter if file is modified or not
2067 2067 return self.rev()
2068 2068
2069 2069 def renamed(self):
2070 2070 path = self.copysource()
2071 2071 if not path:
2072 2072 return None
2073 2073 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2074 2074
2075 2075 def parents(self):
2076 2076 '''return parent filectxs, following copies if necessary'''
2077 2077
2078 2078 def filenode(ctx, path):
2079 2079 return ctx._manifest.get(path, nullid)
2080 2080
2081 2081 path = self._path
2082 2082 fl = self._filelog
2083 2083 pcl = self._changectx._parents
2084 2084 renamed = self.renamed()
2085 2085
2086 2086 if renamed:
2087 2087 pl = [renamed + (None,)]
2088 2088 else:
2089 2089 pl = [(path, filenode(pcl[0], path), fl)]
2090 2090
2091 2091 for pc in pcl[1:]:
2092 2092 pl.append((path, filenode(pc, path), fl))
2093 2093
2094 2094 return [
2095 2095 self._parentfilectx(p, fileid=n, filelog=l)
2096 2096 for p, n, l in pl
2097 2097 if n != nullid
2098 2098 ]
2099 2099
2100 2100 def children(self):
2101 2101 return []
2102 2102
2103 2103
2104 2104 class workingfilectx(committablefilectx):
2105 2105 """A workingfilectx object makes access to data related to a particular
2106 2106 file in the working directory convenient."""
2107 2107
2108 2108 def __init__(self, repo, path, filelog=None, workingctx=None):
2109 2109 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2110 2110
2111 2111 @propertycache
2112 2112 def _changectx(self):
2113 2113 return workingctx(self._repo)
2114 2114
2115 2115 def data(self):
2116 2116 return self._repo.wread(self._path)
2117 2117
2118 2118 def copysource(self):
2119 2119 return self._repo.dirstate.copied(self._path)
2120 2120
2121 2121 def size(self):
2122 2122 return self._repo.wvfs.lstat(self._path).st_size
2123 2123
2124 2124 def lstat(self):
2125 2125 return self._repo.wvfs.lstat(self._path)
2126 2126
2127 2127 def date(self):
2128 2128 t, tz = self._changectx.date()
2129 2129 try:
2130 2130 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2131 2131 except OSError as err:
2132 2132 if err.errno != errno.ENOENT:
2133 2133 raise
2134 2134 return (t, tz)
2135 2135
2136 2136 def exists(self):
2137 2137 return self._repo.wvfs.exists(self._path)
2138 2138
2139 2139 def lexists(self):
2140 2140 return self._repo.wvfs.lexists(self._path)
2141 2141
2142 2142 def audit(self):
2143 2143 return self._repo.wvfs.audit(self._path)
2144 2144
2145 2145 def cmp(self, fctx):
2146 2146 """compare with other file context
2147 2147
2148 2148 returns True if different than fctx.
2149 2149 """
2150 2150 # fctx should be a filectx (not a workingfilectx)
2151 2151 # invert comparison to reuse the same code path
2152 2152 return fctx.cmp(self)
2153 2153
2154 2154 def remove(self, ignoremissing=False):
2155 2155 """wraps unlink for a repo's working directory"""
2156 2156 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2157 2157 self._repo.wvfs.unlinkpath(
2158 2158 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2159 2159 )
2160 2160
2161 2161 def write(self, data, flags, backgroundclose=False, **kwargs):
2162 2162 """wraps repo.wwrite"""
2163 2163 return self._repo.wwrite(
2164 2164 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2165 2165 )
2166 2166
2167 2167 def markcopied(self, src):
2168 2168 """marks this file a copy of `src`"""
2169 2169 self._repo.dirstate.copy(src, self._path)
2170 2170
2171 2171 def clearunknown(self):
2172 2172 """Removes conflicting items in the working directory so that
2173 2173 ``write()`` can be called successfully.
2174 2174 """
2175 2175 wvfs = self._repo.wvfs
2176 2176 f = self._path
2177 2177 wvfs.audit(f)
2178 2178 if self._repo.ui.configbool(
2179 2179 b'experimental', b'merge.checkpathconflicts'
2180 2180 ):
2181 2181 # remove files under the directory as they should already be
2182 2182 # warned and backed up
2183 2183 if wvfs.isdir(f) and not wvfs.islink(f):
2184 2184 wvfs.rmtree(f, forcibly=True)
2185 2185 for p in reversed(list(pathutil.finddirs(f))):
2186 2186 if wvfs.isfileorlink(p):
2187 2187 wvfs.unlink(p)
2188 2188 break
2189 2189 else:
2190 2190 # don't remove files if path conflicts are not processed
2191 2191 if wvfs.isdir(f) and not wvfs.islink(f):
2192 2192 wvfs.removedirs(f)
2193 2193
2194 2194 def setflags(self, l, x):
2195 2195 self._repo.wvfs.setflags(self._path, l, x)
2196 2196
2197 2197
2198 2198 class overlayworkingctx(committablectx):
2199 2199 """Wraps another mutable context with a write-back cache that can be
2200 2200 converted into a commit context.
2201 2201
2202 2202 self._cache[path] maps to a dict with keys: {
2203 2203 'exists': bool?
2204 2204 'date': date?
2205 2205 'data': str?
2206 2206 'flags': str?
2207 2207 'copied': str? (path or None)
2208 2208 }
2209 2209 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2210 2210 is `False`, the file was deleted.
2211 2211 """
2212 2212
2213 2213 def __init__(self, repo):
2214 2214 super(overlayworkingctx, self).__init__(repo)
2215 2215 self.clean()
2216 2216
2217 2217 def setbase(self, wrappedctx):
2218 2218 self._wrappedctx = wrappedctx
2219 2219 self._parents = [wrappedctx]
2220 2220 # Drop old manifest cache as it is now out of date.
2221 2221 # This is necessary when, e.g., rebasing several nodes with one
2222 2222 # ``overlayworkingctx`` (e.g. with --collapse).
2223 2223 util.clearcachedproperty(self, b'_manifest')
2224 2224
2225 2225 def setparents(self, p1node, p2node=nullid):
2226 2226 assert p1node == self._wrappedctx.node()
2227 2227 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2228 2228
2229 2229 def data(self, path):
2230 2230 if self.isdirty(path):
2231 2231 if self._cache[path][b'exists']:
2232 2232 if self._cache[path][b'data'] is not None:
2233 2233 return self._cache[path][b'data']
2234 2234 else:
2235 2235 # Must fallback here, too, because we only set flags.
2236 2236 return self._wrappedctx[path].data()
2237 2237 else:
2238 2238 raise error.ProgrammingError(
2239 2239 b"No such file or directory: %s" % path
2240 2240 )
2241 2241 else:
2242 2242 return self._wrappedctx[path].data()
2243 2243
2244 2244 @propertycache
2245 2245 def _manifest(self):
2246 2246 parents = self.parents()
2247 2247 man = parents[0].manifest().copy()
2248 2248
2249 2249 flag = self._flagfunc
2250 2250 for path in self.added():
2251 2251 man[path] = addednodeid
2252 2252 man.setflag(path, flag(path))
2253 2253 for path in self.modified():
2254 2254 man[path] = modifiednodeid
2255 2255 man.setflag(path, flag(path))
2256 2256 for path in self.removed():
2257 2257 del man[path]
2258 2258 return man
2259 2259
2260 2260 @propertycache
2261 2261 def _flagfunc(self):
2262 2262 def f(path):
2263 2263 return self._cache[path][b'flags']
2264 2264
2265 2265 return f
2266 2266
2267 2267 def files(self):
2268 2268 return sorted(self.added() + self.modified() + self.removed())
2269 2269
2270 2270 def modified(self):
2271 2271 return [
2272 2272 f
2273 2273 for f in self._cache.keys()
2274 2274 if self._cache[f][b'exists'] and self._existsinparent(f)
2275 2275 ]
2276 2276
2277 2277 def added(self):
2278 2278 return [
2279 2279 f
2280 2280 for f in self._cache.keys()
2281 2281 if self._cache[f][b'exists'] and not self._existsinparent(f)
2282 2282 ]
2283 2283
2284 2284 def removed(self):
2285 2285 return [
2286 2286 f
2287 2287 for f in self._cache.keys()
2288 2288 if not self._cache[f][b'exists'] and self._existsinparent(f)
2289 2289 ]
2290 2290
2291 2291 def p1copies(self):
2292 2292 copies = {}
2293 2293 narrowmatch = self._repo.narrowmatch()
2294 2294 for f in self._cache.keys():
2295 2295 if not narrowmatch(f):
2296 2296 continue
2297 2297 copies.pop(f, None) # delete if it exists
2298 2298 source = self._cache[f][b'copied']
2299 2299 if source:
2300 2300 copies[f] = source
2301 2301 return copies
2302 2302
2303 2303 def p2copies(self):
2304 2304 copies = {}
2305 2305 narrowmatch = self._repo.narrowmatch()
2306 2306 for f in self._cache.keys():
2307 2307 if not narrowmatch(f):
2308 2308 continue
2309 2309 copies.pop(f, None) # delete if it exists
2310 2310 source = self._cache[f][b'copied']
2311 2311 if source:
2312 2312 copies[f] = source
2313 2313 return copies
2314 2314
2315 2315 def isinmemory(self):
2316 2316 return True
2317 2317
2318 2318 def filedate(self, path):
2319 2319 if self.isdirty(path):
2320 2320 return self._cache[path][b'date']
2321 2321 else:
2322 2322 return self._wrappedctx[path].date()
2323 2323
2324 2324 def markcopied(self, path, origin):
2325 2325 self._markdirty(
2326 2326 path,
2327 2327 exists=True,
2328 2328 date=self.filedate(path),
2329 2329 flags=self.flags(path),
2330 2330 copied=origin,
2331 2331 )
2332 2332
2333 2333 def copydata(self, path):
2334 2334 if self.isdirty(path):
2335 2335 return self._cache[path][b'copied']
2336 2336 else:
2337 2337 return None
2338 2338
2339 2339 def flags(self, path):
2340 2340 if self.isdirty(path):
2341 2341 if self._cache[path][b'exists']:
2342 2342 return self._cache[path][b'flags']
2343 2343 else:
2344 2344 raise error.ProgrammingError(
2345 2345 b"No such file or directory: %s" % path
2346 2346 )
2347 2347 else:
2348 2348 return self._wrappedctx[path].flags()
2349 2349
2350 2350 def __contains__(self, key):
2351 2351 if key in self._cache:
2352 2352 return self._cache[key][b'exists']
2353 2353 return key in self.p1()
2354 2354
2355 2355 def _existsinparent(self, path):
2356 2356 try:
2357 2357 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2358 2358 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2359 2359 # with an ``exists()`` function.
2360 2360 self._wrappedctx[path]
2361 2361 return True
2362 2362 except error.ManifestLookupError:
2363 2363 return False
2364 2364
2365 2365 def _auditconflicts(self, path):
2366 2366 """Replicates conflict checks done by wvfs.write().
2367 2367
2368 2368 Since we never write to the filesystem and never call `applyupdates` in
2369 2369 IMM, we'll never check that a path is actually writable -- e.g., because
2370 2370 it adds `a/foo`, but `a` is actually a file in the other commit.
2371 2371 """
2372 2372
2373 2373 def fail(path, component):
2374 2374 # p1() is the base and we're receiving "writes" for p2()'s
2375 2375 # files.
2376 2376 if b'l' in self.p1()[component].flags():
2377 2377 raise error.Abort(
2378 2378 b"error: %s conflicts with symlink %s "
2379 2379 b"in %d." % (path, component, self.p1().rev())
2380 2380 )
2381 2381 else:
2382 2382 raise error.Abort(
2383 2383 b"error: '%s' conflicts with file '%s' in "
2384 2384 b"%d." % (path, component, self.p1().rev())
2385 2385 )
2386 2386
2387 2387 # Test that each new directory to be created to write this path from p2
2388 2388 # is not a file in p1.
2389 2389 components = path.split(b'/')
2390 2390 for i in pycompat.xrange(len(components)):
2391 2391 component = b"/".join(components[0:i])
2392 2392 if component in self:
2393 2393 fail(path, component)
2394 2394
2395 2395 # Test the other direction -- that this path from p2 isn't a directory
2396 2396 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2397 2397 match = self.match([path], default=b'path')
2398 2398 mfiles = list(self.p1().manifest().walk(match))
2399 2399 if len(mfiles) > 0:
2400 2400 if len(mfiles) == 1 and mfiles[0] == path:
2401 2401 return
2402 2402 # omit the files which are deleted in current IMM wctx
2403 2403 mfiles = [m for m in mfiles if m in self]
2404 2404 if not mfiles:
2405 2405 return
2406 2406 raise error.Abort(
2407 2407 b"error: file '%s' cannot be written because "
2408 2408 b" '%s/' is a directory in %s (containing %d "
2409 2409 b"entries: %s)"
2410 2410 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2411 2411 )
2412 2412
2413 2413 def write(self, path, data, flags=b'', **kwargs):
2414 2414 if data is None:
2415 2415 raise error.ProgrammingError(b"data must be non-None")
2416 2416 self._auditconflicts(path)
2417 2417 self._markdirty(
2418 2418 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2419 2419 )
2420 2420
2421 2421 def setflags(self, path, l, x):
2422 2422 flag = b''
2423 2423 if l:
2424 2424 flag = b'l'
2425 2425 elif x:
2426 2426 flag = b'x'
2427 2427 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2428 2428
2429 2429 def remove(self, path):
2430 2430 self._markdirty(path, exists=False)
2431 2431
2432 2432 def exists(self, path):
2433 2433 """exists behaves like `lexists`, but needs to follow symlinks and
2434 2434 return False if they are broken.
2435 2435 """
2436 2436 if self.isdirty(path):
2437 2437 # If this path exists and is a symlink, "follow" it by calling
2438 2438 # exists on the destination path.
2439 2439 if (
2440 2440 self._cache[path][b'exists']
2441 2441 and b'l' in self._cache[path][b'flags']
2442 2442 ):
2443 2443 return self.exists(self._cache[path][b'data'].strip())
2444 2444 else:
2445 2445 return self._cache[path][b'exists']
2446 2446
2447 2447 return self._existsinparent(path)
2448 2448
2449 2449 def lexists(self, path):
2450 2450 """lexists returns True if the path exists"""
2451 2451 if self.isdirty(path):
2452 2452 return self._cache[path][b'exists']
2453 2453
2454 2454 return self._existsinparent(path)
2455 2455
2456 2456 def size(self, path):
2457 2457 if self.isdirty(path):
2458 2458 if self._cache[path][b'exists']:
2459 2459 return len(self._cache[path][b'data'])
2460 2460 else:
2461 2461 raise error.ProgrammingError(
2462 2462 b"No such file or directory: %s" % path
2463 2463 )
2464 2464 return self._wrappedctx[path].size()
2465 2465
2466 2466 def tomemctx(
2467 2467 self,
2468 2468 text,
2469 2469 branch=None,
2470 2470 extra=None,
2471 2471 date=None,
2472 2472 parents=None,
2473 2473 user=None,
2474 2474 editor=None,
2475 2475 ):
2476 2476 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2477 2477 committed.
2478 2478
2479 2479 ``text`` is the commit message.
2480 2480 ``parents`` (optional) are rev numbers.
2481 2481 """
2482 2482 # Default parents to the wrapped context if not passed.
2483 2483 if parents is None:
2484 2484 parents = self.parents()
2485 2485 if len(parents) == 1:
2486 2486 parents = (parents[0], None)
2487 2487
2488 2488 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2489 2489 if parents[1] is None:
2490 2490 parents = (self._repo[parents[0]], None)
2491 2491 else:
2492 2492 parents = (self._repo[parents[0]], self._repo[parents[1]])
2493 2493
2494 2494 files = self.files()
2495 2495
2496 2496 def getfile(repo, memctx, path):
2497 2497 if self._cache[path][b'exists']:
2498 2498 return memfilectx(
2499 2499 repo,
2500 2500 memctx,
2501 2501 path,
2502 2502 self._cache[path][b'data'],
2503 2503 b'l' in self._cache[path][b'flags'],
2504 2504 b'x' in self._cache[path][b'flags'],
2505 2505 self._cache[path][b'copied'],
2506 2506 )
2507 2507 else:
2508 2508 # Returning None, but including the path in `files`, is
2509 2509 # necessary for memctx to register a deletion.
2510 2510 return None
2511 2511
2512 2512 if branch is None:
2513 2513 branch = self._wrappedctx.branch()
2514 2514
2515 2515 return memctx(
2516 2516 self._repo,
2517 2517 parents,
2518 2518 text,
2519 2519 files,
2520 2520 getfile,
2521 2521 date=date,
2522 2522 extra=extra,
2523 2523 user=user,
2524 2524 branch=branch,
2525 2525 editor=editor,
2526 2526 )
2527 2527
2528 2528 def tomemctx_for_amend(self, precursor):
2529 2529 extra = precursor.extra().copy()
2530 2530 extra[b'amend_source'] = precursor.hex()
2531 2531 return self.tomemctx(
2532 2532 text=precursor.description(),
2533 2533 branch=precursor.branch(),
2534 2534 extra=extra,
2535 2535 date=precursor.date(),
2536 2536 user=precursor.user(),
2537 2537 )
2538 2538
2539 2539 def isdirty(self, path):
2540 2540 return path in self._cache
2541 2541
2542 2542 def clean(self):
2543 2543 self._mergestate = None
2544 2544 self._cache = {}
2545 2545
2546 2546 def _compact(self):
2547 2547 """Removes keys from the cache that are actually clean, by comparing
2548 2548 them with the underlying context.
2549 2549
2550 2550 This can occur during the merge process, e.g. by passing --tool :local
2551 2551 to resolve a conflict.
2552 2552 """
2553 2553 keys = []
2554 2554 # This won't be perfect, but can help performance significantly when
2555 2555 # using things like remotefilelog.
2556 2556 scmutil.prefetchfiles(
2557 2557 self.repo(),
2558 2558 [
2559 2559 (
2560 2560 self.p1().rev(),
2561 2561 scmutil.matchfiles(self.repo(), self._cache.keys()),
2562 2562 )
2563 2563 ],
2564 2564 )
2565 2565
2566 2566 for path in self._cache.keys():
2567 2567 cache = self._cache[path]
2568 2568 try:
2569 2569 underlying = self._wrappedctx[path]
2570 2570 if (
2571 2571 underlying.data() == cache[b'data']
2572 2572 and underlying.flags() == cache[b'flags']
2573 2573 ):
2574 2574 keys.append(path)
2575 2575 except error.ManifestLookupError:
2576 2576 # Path not in the underlying manifest (created).
2577 2577 continue
2578 2578
2579 2579 for path in keys:
2580 2580 del self._cache[path]
2581 2581 return keys
2582 2582
2583 2583 def _markdirty(
2584 2584 self, path, exists, data=None, date=None, flags=b'', copied=None
2585 2585 ):
2586 2586 # data not provided, let's see if we already have some; if not, let's
2587 2587 # grab it from our underlying context, so that we always have data if
2588 2588 # the file is marked as existing.
2589 2589 if exists and data is None:
2590 2590 oldentry = self._cache.get(path) or {}
2591 2591 data = oldentry.get(b'data')
2592 2592 if data is None:
2593 2593 data = self._wrappedctx[path].data()
2594 2594
2595 2595 self._cache[path] = {
2596 2596 b'exists': exists,
2597 2597 b'data': data,
2598 2598 b'date': date,
2599 2599 b'flags': flags,
2600 2600 b'copied': copied,
2601 2601 }
2602 2602 util.clearcachedproperty(self, b'_manifest')
2603 2603
2604 2604 def filectx(self, path, filelog=None):
2605 2605 return overlayworkingfilectx(
2606 2606 self._repo, path, parent=self, filelog=filelog
2607 2607 )
2608 2608
2609 2609 def mergestate(self, clean=False):
2610 2610 if clean or self._mergestate is None:
2611 2611 self._mergestate = mergestatemod.memmergestate(self._repo)
2612 2612 return self._mergestate
2613 2613
2614 2614
2615 2615 class overlayworkingfilectx(committablefilectx):
2616 2616 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2617 2617 cache, which can be flushed through later by calling ``flush()``."""
2618 2618
2619 2619 def __init__(self, repo, path, filelog=None, parent=None):
2620 2620 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2621 2621 self._repo = repo
2622 2622 self._parent = parent
2623 2623 self._path = path
2624 2624
2625 2625 def cmp(self, fctx):
2626 2626 return self.data() != fctx.data()
2627 2627
2628 2628 def changectx(self):
2629 2629 return self._parent
2630 2630
2631 2631 def data(self):
2632 2632 return self._parent.data(self._path)
2633 2633
2634 2634 def date(self):
2635 2635 return self._parent.filedate(self._path)
2636 2636
2637 2637 def exists(self):
2638 2638 return self.lexists()
2639 2639
2640 2640 def lexists(self):
2641 2641 return self._parent.exists(self._path)
2642 2642
2643 2643 def copysource(self):
2644 2644 return self._parent.copydata(self._path)
2645 2645
2646 2646 def size(self):
2647 2647 return self._parent.size(self._path)
2648 2648
2649 2649 def markcopied(self, origin):
2650 2650 self._parent.markcopied(self._path, origin)
2651 2651
2652 2652 def audit(self):
2653 2653 pass
2654 2654
2655 2655 def flags(self):
2656 2656 return self._parent.flags(self._path)
2657 2657
2658 2658 def setflags(self, islink, isexec):
2659 2659 return self._parent.setflags(self._path, islink, isexec)
2660 2660
2661 2661 def write(self, data, flags, backgroundclose=False, **kwargs):
2662 2662 return self._parent.write(self._path, data, flags, **kwargs)
2663 2663
2664 2664 def remove(self, ignoremissing=False):
2665 2665 return self._parent.remove(self._path)
2666 2666
2667 2667 def clearunknown(self):
2668 2668 pass
2669 2669
2670 2670
2671 2671 class workingcommitctx(workingctx):
2672 2672 """A workingcommitctx object makes access to data related to
2673 2673 the revision being committed convenient.
2674 2674
2675 2675 This hides changes in the working directory, if they aren't
2676 2676 committed in this context.
2677 2677 """
2678 2678
2679 2679 def __init__(
2680 2680 self, repo, changes, text=b"", user=None, date=None, extra=None
2681 2681 ):
2682 2682 super(workingcommitctx, self).__init__(
2683 2683 repo, text, user, date, extra, changes
2684 2684 )
2685 2685
2686 2686 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2687 2687 """Return matched files only in ``self._status``
2688 2688
2689 2689 Uncommitted files appear "clean" via this context, even if
2690 2690 they aren't actually so in the working directory.
2691 2691 """
2692 2692 if clean:
2693 2693 clean = [f for f in self._manifest if f not in self._changedset]
2694 2694 else:
2695 2695 clean = []
2696 2696 return scmutil.status(
2697 2697 [f for f in self._status.modified if match(f)],
2698 2698 [f for f in self._status.added if match(f)],
2699 2699 [f for f in self._status.removed if match(f)],
2700 2700 [],
2701 2701 [],
2702 2702 [],
2703 2703 clean,
2704 2704 )
2705 2705
2706 2706 @propertycache
2707 2707 def _changedset(self):
2708 2708 """Return the set of files changed in this context"""
2709 2709 changed = set(self._status.modified)
2710 2710 changed.update(self._status.added)
2711 2711 changed.update(self._status.removed)
2712 2712 return changed
2713 2713
2714 2714
2715 2715 def makecachingfilectxfn(func):
2716 2716 """Create a filectxfn that caches based on the path.
2717 2717
2718 2718 We can't use util.cachefunc because it uses all arguments as the cache
2719 2719 key and this creates a cycle since the arguments include the repo and
2720 2720 memctx.
2721 2721 """
2722 2722 cache = {}
2723 2723
2724 2724 def getfilectx(repo, memctx, path):
2725 2725 if path not in cache:
2726 2726 cache[path] = func(repo, memctx, path)
2727 2727 return cache[path]
2728 2728
2729 2729 return getfilectx
2730 2730
2731 2731
2732 2732 def memfilefromctx(ctx):
2733 2733 """Given a context return a memfilectx for ctx[path]
2734 2734
2735 2735 This is a convenience method for building a memctx based on another
2736 2736 context.
2737 2737 """
2738 2738
2739 2739 def getfilectx(repo, memctx, path):
2740 2740 fctx = ctx[path]
2741 2741 copysource = fctx.copysource()
2742 2742 return memfilectx(
2743 2743 repo,
2744 2744 memctx,
2745 2745 path,
2746 2746 fctx.data(),
2747 2747 islink=fctx.islink(),
2748 2748 isexec=fctx.isexec(),
2749 2749 copysource=copysource,
2750 2750 )
2751 2751
2752 2752 return getfilectx
2753 2753
2754 2754
2755 2755 def memfilefrompatch(patchstore):
2756 2756 """Given a patch (e.g. patchstore object) return a memfilectx
2757 2757
2758 2758 This is a convenience method for building a memctx based on a patchstore.
2759 2759 """
2760 2760
2761 2761 def getfilectx(repo, memctx, path):
2762 2762 data, mode, copysource = patchstore.getfile(path)
2763 2763 if data is None:
2764 2764 return None
2765 2765 islink, isexec = mode
2766 2766 return memfilectx(
2767 2767 repo,
2768 2768 memctx,
2769 2769 path,
2770 2770 data,
2771 2771 islink=islink,
2772 2772 isexec=isexec,
2773 2773 copysource=copysource,
2774 2774 )
2775 2775
2776 2776 return getfilectx
2777 2777
2778 2778
2779 2779 class memctx(committablectx):
2780 2780 """Use memctx to perform in-memory commits via localrepo.commitctx().
2781 2781
2782 2782 Revision information is supplied at initialization time while
2783 2783 related files data and is made available through a callback
2784 2784 mechanism. 'repo' is the current localrepo, 'parents' is a
2785 2785 sequence of two parent revisions identifiers (pass None for every
2786 2786 missing parent), 'text' is the commit message and 'files' lists
2787 2787 names of files touched by the revision (normalized and relative to
2788 2788 repository root).
2789 2789
2790 2790 filectxfn(repo, memctx, path) is a callable receiving the
2791 2791 repository, the current memctx object and the normalized path of
2792 2792 requested file, relative to repository root. It is fired by the
2793 2793 commit function for every file in 'files', but calls order is
2794 2794 undefined. If the file is available in the revision being
2795 2795 committed (updated or added), filectxfn returns a memfilectx
2796 2796 object. If the file was removed, filectxfn return None for recent
2797 2797 Mercurial. Moved files are represented by marking the source file
2798 2798 removed and the new file added with copy information (see
2799 2799 memfilectx).
2800 2800
2801 2801 user receives the committer name and defaults to current
2802 2802 repository username, date is the commit date in any format
2803 2803 supported by dateutil.parsedate() and defaults to current date, extra
2804 2804 is a dictionary of metadata or is left empty.
2805 2805 """
2806 2806
2807 2807 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2808 2808 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2809 2809 # this field to determine what to do in filectxfn.
2810 2810 _returnnoneformissingfiles = True
2811 2811
2812 2812 def __init__(
2813 2813 self,
2814 2814 repo,
2815 2815 parents,
2816 2816 text,
2817 2817 files,
2818 2818 filectxfn,
2819 2819 user=None,
2820 2820 date=None,
2821 2821 extra=None,
2822 2822 branch=None,
2823 2823 editor=None,
2824 2824 ):
2825 2825 super(memctx, self).__init__(
2826 2826 repo, text, user, date, extra, branch=branch
2827 2827 )
2828 2828 self._rev = None
2829 2829 self._node = None
2830 2830 parents = [(p or nullid) for p in parents]
2831 2831 p1, p2 = parents
2832 2832 self._parents = [self._repo[p] for p in (p1, p2)]
2833 2833 files = sorted(set(files))
2834 2834 self._files = files
2835 2835 self.substate = {}
2836 2836
2837 2837 if isinstance(filectxfn, patch.filestore):
2838 2838 filectxfn = memfilefrompatch(filectxfn)
2839 2839 elif not callable(filectxfn):
2840 2840 # if store is not callable, wrap it in a function
2841 2841 filectxfn = memfilefromctx(filectxfn)
2842 2842
2843 2843 # memoizing increases performance for e.g. vcs convert scenarios.
2844 2844 self._filectxfn = makecachingfilectxfn(filectxfn)
2845 2845
2846 2846 if editor:
2847 2847 self._text = editor(self._repo, self, [])
2848 2848 self._repo.savecommitmessage(self._text)
2849 2849
2850 2850 def filectx(self, path, filelog=None):
2851 2851 """get a file context from the working directory
2852 2852
2853 2853 Returns None if file doesn't exist and should be removed."""
2854 2854 return self._filectxfn(self._repo, self, path)
2855 2855
2856 2856 def commit(self):
2857 2857 """commit context to the repo"""
2858 2858 return self._repo.commitctx(self)
2859 2859
2860 2860 @propertycache
2861 2861 def _manifest(self):
2862 2862 """generate a manifest based on the return values of filectxfn"""
2863 2863
2864 2864 # keep this simple for now; just worry about p1
2865 2865 pctx = self._parents[0]
2866 2866 man = pctx.manifest().copy()
2867 2867
2868 2868 for f in self._status.modified:
2869 2869 man[f] = modifiednodeid
2870 2870
2871 2871 for f in self._status.added:
2872 2872 man[f] = addednodeid
2873 2873
2874 2874 for f in self._status.removed:
2875 2875 if f in man:
2876 2876 del man[f]
2877 2877
2878 2878 return man
2879 2879
2880 2880 @propertycache
2881 2881 def _status(self):
2882 2882 """Calculate exact status from ``files`` specified at construction"""
2883 2883 man1 = self.p1().manifest()
2884 2884 p2 = self._parents[1]
2885 2885 # "1 < len(self._parents)" can't be used for checking
2886 2886 # existence of the 2nd parent, because "memctx._parents" is
2887 2887 # explicitly initialized by the list, of which length is 2.
2888 if p2.node() != nullid:
2888 if p2.rev() != nullrev:
2889 2889 man2 = p2.manifest()
2890 2890 managing = lambda f: f in man1 or f in man2
2891 2891 else:
2892 2892 managing = lambda f: f in man1
2893 2893
2894 2894 modified, added, removed = [], [], []
2895 2895 for f in self._files:
2896 2896 if not managing(f):
2897 2897 added.append(f)
2898 2898 elif self[f]:
2899 2899 modified.append(f)
2900 2900 else:
2901 2901 removed.append(f)
2902 2902
2903 2903 return scmutil.status(modified, added, removed, [], [], [], [])
2904 2904
2905 2905 def parents(self):
2906 if self._parents[1].node() == nullid:
2906 if self._parents[1].rev() == nullrev:
2907 2907 return [self._parents[0]]
2908 2908 return self._parents
2909 2909
2910 2910
2911 2911 class memfilectx(committablefilectx):
2912 2912 """memfilectx represents an in-memory file to commit.
2913 2913
2914 2914 See memctx and committablefilectx for more details.
2915 2915 """
2916 2916
2917 2917 def __init__(
2918 2918 self,
2919 2919 repo,
2920 2920 changectx,
2921 2921 path,
2922 2922 data,
2923 2923 islink=False,
2924 2924 isexec=False,
2925 2925 copysource=None,
2926 2926 ):
2927 2927 """
2928 2928 path is the normalized file path relative to repository root.
2929 2929 data is the file content as a string.
2930 2930 islink is True if the file is a symbolic link.
2931 2931 isexec is True if the file is executable.
2932 2932 copied is the source file path if current file was copied in the
2933 2933 revision being committed, or None."""
2934 2934 super(memfilectx, self).__init__(repo, path, None, changectx)
2935 2935 self._data = data
2936 2936 if islink:
2937 2937 self._flags = b'l'
2938 2938 elif isexec:
2939 2939 self._flags = b'x'
2940 2940 else:
2941 2941 self._flags = b''
2942 2942 self._copysource = copysource
2943 2943
2944 2944 def copysource(self):
2945 2945 return self._copysource
2946 2946
2947 2947 def cmp(self, fctx):
2948 2948 return self.data() != fctx.data()
2949 2949
2950 2950 def data(self):
2951 2951 return self._data
2952 2952
2953 2953 def remove(self, ignoremissing=False):
2954 2954 """wraps unlink for a repo's working directory"""
2955 2955 # need to figure out what to do here
2956 2956 del self._changectx[self._path]
2957 2957
2958 2958 def write(self, data, flags, **kwargs):
2959 2959 """wraps repo.wwrite"""
2960 2960 self._data = data
2961 2961
2962 2962
2963 2963 class metadataonlyctx(committablectx):
2964 2964 """Like memctx but it's reusing the manifest of different commit.
2965 2965 Intended to be used by lightweight operations that are creating
2966 2966 metadata-only changes.
2967 2967
2968 2968 Revision information is supplied at initialization time. 'repo' is the
2969 2969 current localrepo, 'ctx' is original revision which manifest we're reuisng
2970 2970 'parents' is a sequence of two parent revisions identifiers (pass None for
2971 2971 every missing parent), 'text' is the commit.
2972 2972
2973 2973 user receives the committer name and defaults to current repository
2974 2974 username, date is the commit date in any format supported by
2975 2975 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2976 2976 metadata or is left empty.
2977 2977 """
2978 2978
2979 2979 def __init__(
2980 2980 self,
2981 2981 repo,
2982 2982 originalctx,
2983 2983 parents=None,
2984 2984 text=None,
2985 2985 user=None,
2986 2986 date=None,
2987 2987 extra=None,
2988 2988 editor=None,
2989 2989 ):
2990 2990 if text is None:
2991 2991 text = originalctx.description()
2992 2992 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2993 2993 self._rev = None
2994 2994 self._node = None
2995 2995 self._originalctx = originalctx
2996 2996 self._manifestnode = originalctx.manifestnode()
2997 2997 if parents is None:
2998 2998 parents = originalctx.parents()
2999 2999 else:
3000 3000 parents = [repo[p] for p in parents if p is not None]
3001 3001 parents = parents[:]
3002 3002 while len(parents) < 2:
3003 3003 parents.append(repo[nullrev])
3004 3004 p1, p2 = self._parents = parents
3005 3005
3006 3006 # sanity check to ensure that the reused manifest parents are
3007 3007 # manifests of our commit parents
3008 3008 mp1, mp2 = self.manifestctx().parents
3009 3009 if p1 != nullid and p1.manifestnode() != mp1:
3010 3010 raise RuntimeError(
3011 3011 r"can't reuse the manifest: its p1 "
3012 3012 r"doesn't match the new ctx p1"
3013 3013 )
3014 3014 if p2 != nullid and p2.manifestnode() != mp2:
3015 3015 raise RuntimeError(
3016 3016 r"can't reuse the manifest: "
3017 3017 r"its p2 doesn't match the new ctx p2"
3018 3018 )
3019 3019
3020 3020 self._files = originalctx.files()
3021 3021 self.substate = {}
3022 3022
3023 3023 if editor:
3024 3024 self._text = editor(self._repo, self, [])
3025 3025 self._repo.savecommitmessage(self._text)
3026 3026
3027 3027 def manifestnode(self):
3028 3028 return self._manifestnode
3029 3029
3030 3030 @property
3031 3031 def _manifestctx(self):
3032 3032 return self._repo.manifestlog[self._manifestnode]
3033 3033
3034 3034 def filectx(self, path, filelog=None):
3035 3035 return self._originalctx.filectx(path, filelog=filelog)
3036 3036
3037 3037 def commit(self):
3038 3038 """commit context to the repo"""
3039 3039 return self._repo.commitctx(self)
3040 3040
3041 3041 @property
3042 3042 def _manifest(self):
3043 3043 return self._originalctx.manifest()
3044 3044
3045 3045 @propertycache
3046 3046 def _status(self):
3047 3047 """Calculate exact status from ``files`` specified in the ``origctx``
3048 3048 and parents manifests.
3049 3049 """
3050 3050 man1 = self.p1().manifest()
3051 3051 p2 = self._parents[1]
3052 3052 # "1 < len(self._parents)" can't be used for checking
3053 3053 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3054 3054 # explicitly initialized by the list, of which length is 2.
3055 if p2.node() != nullid:
3055 if p2.rev() != nullrev:
3056 3056 man2 = p2.manifest()
3057 3057 managing = lambda f: f in man1 or f in man2
3058 3058 else:
3059 3059 managing = lambda f: f in man1
3060 3060
3061 3061 modified, added, removed = [], [], []
3062 3062 for f in self._files:
3063 3063 if not managing(f):
3064 3064 added.append(f)
3065 3065 elif f in self:
3066 3066 modified.append(f)
3067 3067 else:
3068 3068 removed.append(f)
3069 3069
3070 3070 return scmutil.status(modified, added, removed, [], [], [], [])
3071 3071
3072 3072
3073 3073 class arbitraryfilectx(object):
3074 3074 """Allows you to use filectx-like functions on a file in an arbitrary
3075 3075 location on disk, possibly not in the working directory.
3076 3076 """
3077 3077
3078 3078 def __init__(self, path, repo=None):
3079 3079 # Repo is optional because contrib/simplemerge uses this class.
3080 3080 self._repo = repo
3081 3081 self._path = path
3082 3082
3083 3083 def cmp(self, fctx):
3084 3084 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3085 3085 # path if either side is a symlink.
3086 3086 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3087 3087 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3088 3088 # Add a fast-path for merge if both sides are disk-backed.
3089 3089 # Note that filecmp uses the opposite return values (True if same)
3090 3090 # from our cmp functions (True if different).
3091 3091 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3092 3092 return self.data() != fctx.data()
3093 3093
3094 3094 def path(self):
3095 3095 return self._path
3096 3096
3097 3097 def flags(self):
3098 3098 return b''
3099 3099
3100 3100 def data(self):
3101 3101 return util.readfile(self._path)
3102 3102
3103 3103 def decodeddata(self):
3104 3104 with open(self._path, b"rb") as f:
3105 3105 return f.read()
3106 3106
3107 3107 def remove(self):
3108 3108 util.unlink(self._path)
3109 3109
3110 3110 def write(self, data, flags, **kwargs):
3111 3111 assert not flags
3112 3112 with open(self._path, b"wb") as f:
3113 3113 f.write(data)
@@ -1,1308 +1,1308 b''
1 1 # coding: utf8
2 2 # copies.py - copy detection for Mercurial
3 3 #
4 4 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import collections
12 12 import os
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 nullid,
17 17 nullrev,
18 18 )
19 19
20 20 from . import (
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 util,
26 26 )
27 27
28 28
29 29 from .utils import stringutil
30 30
31 31 from .revlogutils import (
32 32 flagutil,
33 33 sidedata as sidedatamod,
34 34 )
35 35
36 36 rustmod = policy.importrust("copy_tracing")
37 37
38 38
39 39 def _filter(src, dst, t):
40 40 """filters out invalid copies after chaining"""
41 41
42 42 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
43 43 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
44 44 # in the following table (not including trivial cases). For example, case 6
45 45 # is where a file existed in 'src' and remained under that name in 'mid' and
46 46 # then was renamed between 'mid' and 'dst'.
47 47 #
48 48 # case src mid dst result
49 49 # 1 x y - -
50 50 # 2 x y y x->y
51 51 # 3 x y x -
52 52 # 4 x y z x->z
53 53 # 5 - x y -
54 54 # 6 x x y x->y
55 55 #
56 56 # _chain() takes care of chaining the copies in 'a' and 'b', but it
57 57 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
58 58 # between 5 and 6, so it includes all cases in its result.
59 59 # Cases 1, 3, and 5 are then removed by _filter().
60 60
61 61 for k, v in list(t.items()):
62 62 if k == v: # case 3
63 63 del t[k]
64 64 elif v not in src: # case 5
65 65 # remove copies from files that didn't exist
66 66 del t[k]
67 67 elif k not in dst: # case 1
68 68 # remove copies to files that were then removed
69 69 del t[k]
70 70
71 71
72 72 def _chain(prefix, suffix):
73 73 """chain two sets of copies 'prefix' and 'suffix'"""
74 74 result = prefix.copy()
75 75 for key, value in pycompat.iteritems(suffix):
76 76 result[key] = prefix.get(value, value)
77 77 return result
78 78
79 79
80 80 def _tracefile(fctx, am, basemf):
81 81 """return file context that is the ancestor of fctx present in ancestor
82 82 manifest am
83 83
84 84 Note: we used to try and stop after a given limit, however checking if that
85 85 limit is reached turned out to be very expensive. we are better off
86 86 disabling that feature."""
87 87
88 88 for f in fctx.ancestors():
89 89 path = f.path()
90 90 if am.get(path, None) == f.filenode():
91 91 return path
92 92 if basemf and basemf.get(path, None) == f.filenode():
93 93 return path
94 94
95 95
96 96 def _dirstatecopies(repo, match=None):
97 97 ds = repo.dirstate
98 98 c = ds.copies().copy()
99 99 for k in list(c):
100 100 if ds[k] not in b'anm' or (match and not match(k)):
101 101 del c[k]
102 102 return c
103 103
104 104
105 105 def _computeforwardmissing(a, b, match=None):
106 106 """Computes which files are in b but not a.
107 107 This is its own function so extensions can easily wrap this call to see what
108 108 files _forwardcopies is about to process.
109 109 """
110 110 ma = a.manifest()
111 111 mb = b.manifest()
112 112 return mb.filesnotin(ma, match=match)
113 113
114 114
115 115 def usechangesetcentricalgo(repo):
116 116 """Checks if we should use changeset-centric copy algorithms"""
117 117 if repo.filecopiesmode == b'changeset-sidedata':
118 118 return True
119 119 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
120 120 changesetsource = (b'changeset-only', b'compatibility')
121 121 return readfrom in changesetsource
122 122
123 123
124 124 def _committedforwardcopies(a, b, base, match):
125 125 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
126 126 # files might have to be traced back to the fctx parent of the last
127 127 # one-side-only changeset, but not further back than that
128 128 repo = a._repo
129 129
130 130 if usechangesetcentricalgo(repo):
131 131 return _changesetforwardcopies(a, b, match)
132 132
133 133 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
134 134 dbg = repo.ui.debug
135 135 if debug:
136 136 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
137 137 am = a.manifest()
138 138 basemf = None if base is None else base.manifest()
139 139
140 140 # find where new files came from
141 141 # we currently don't try to find where old files went, too expensive
142 142 # this means we can miss a case like 'hg rm b; hg cp a b'
143 143 cm = {}
144 144
145 145 # Computing the forward missing is quite expensive on large manifests, since
146 146 # it compares the entire manifests. We can optimize it in the common use
147 147 # case of computing what copies are in a commit versus its parent (like
148 148 # during a rebase or histedit). Note, we exclude merge commits from this
149 149 # optimization, since the ctx.files() for a merge commit is not correct for
150 150 # this comparison.
151 151 forwardmissingmatch = match
152 if b.p1() == a and b.p2().node() == nullid:
152 if b.p1() == a and b.p2().rev() == nullrev:
153 153 filesmatcher = matchmod.exact(b.files())
154 154 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
155 155 if repo.ui.configbool(b'devel', b'copy-tracing.trace-all-files'):
156 156 missing = list(b.walk(match))
157 157 # _computeforwardmissing(a, b, match=forwardmissingmatch)
158 158 if debug:
159 159 dbg(b'debug.copies: searching all files: %d\n' % len(missing))
160 160 else:
161 161 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
162 162 if debug:
163 163 dbg(
164 164 b'debug.copies: missing files to search: %d\n'
165 165 % len(missing)
166 166 )
167 167
168 168 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
169 169
170 170 for f in sorted(missing):
171 171 if debug:
172 172 dbg(b'debug.copies: tracing file: %s\n' % f)
173 173 fctx = b[f]
174 174 fctx._ancestrycontext = ancestrycontext
175 175
176 176 if debug:
177 177 start = util.timer()
178 178 opath = _tracefile(fctx, am, basemf)
179 179 if opath:
180 180 if debug:
181 181 dbg(b'debug.copies: rename of: %s\n' % opath)
182 182 cm[f] = opath
183 183 if debug:
184 184 dbg(
185 185 b'debug.copies: time: %f seconds\n'
186 186 % (util.timer() - start)
187 187 )
188 188 return cm
189 189
190 190
191 191 def _revinfo_getter(repo, match):
192 192 """returns a function that returns the following data given a <rev>"
193 193
194 194 * p1: revision number of first parent
195 195 * p2: revision number of first parent
196 196 * changes: a ChangingFiles object
197 197 """
198 198 cl = repo.changelog
199 199 parents = cl.parentrevs
200 200 flags = cl.flags
201 201
202 202 HASCOPIESINFO = flagutil.REVIDX_HASCOPIESINFO
203 203
204 204 changelogrevision = cl.changelogrevision
205 205
206 206 if rustmod is not None:
207 207
208 208 def revinfo(rev):
209 209 p1, p2 = parents(rev)
210 210 if flags(rev) & HASCOPIESINFO:
211 211 raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES)
212 212 else:
213 213 raw = None
214 214 return (p1, p2, raw)
215 215
216 216 else:
217 217
218 218 def revinfo(rev):
219 219 p1, p2 = parents(rev)
220 220 if flags(rev) & HASCOPIESINFO:
221 221 changes = changelogrevision(rev).changes
222 222 else:
223 223 changes = None
224 224 return (p1, p2, changes)
225 225
226 226 return revinfo
227 227
228 228
229 229 def cached_is_ancestor(is_ancestor):
230 230 """return a cached version of is_ancestor"""
231 231 cache = {}
232 232
233 233 def _is_ancestor(anc, desc):
234 234 if anc > desc:
235 235 return False
236 236 elif anc == desc:
237 237 return True
238 238 key = (anc, desc)
239 239 ret = cache.get(key)
240 240 if ret is None:
241 241 ret = cache[key] = is_ancestor(anc, desc)
242 242 return ret
243 243
244 244 return _is_ancestor
245 245
246 246
247 247 def _changesetforwardcopies(a, b, match):
248 248 if a.rev() in (nullrev, b.rev()):
249 249 return {}
250 250
251 251 repo = a.repo().unfiltered()
252 252 children = {}
253 253
254 254 cl = repo.changelog
255 255 isancestor = cl.isancestorrev
256 256
257 257 # To track rename from "A" to B, we need to gather all parent β†’ children
258 258 # edges that are contains in `::B` but not in `::A`.
259 259 #
260 260 #
261 261 # To do so, we need to gather all revisions exclusiveΒΉ to "B" (ieΒΉ: `::b -
262 262 # ::a`) and also all the "roots point", ie the parents of the exclusive set
263 263 # that belong to ::a. These are exactly all the revisions needed to express
264 264 # the parent β†’ children we need to combine.
265 265 #
266 266 # [1] actually, we need to gather all the edges within `(::a)::b`, ie:
267 267 # excluding paths that leads to roots that are not ancestors of `a`. We
268 268 # keep this out of the explanation because it is hard enough without this special case..
269 269
270 270 parents = cl._uncheckedparentrevs
271 271 graph_roots = (nullrev, nullrev)
272 272
273 273 ancestors = cl.ancestors([a.rev()], inclusive=True)
274 274 revs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
275 275 roots = set()
276 276 has_graph_roots = False
277 277 multi_thread = repo.ui.configbool(b'devel', b'copy-tracing.multi-thread')
278 278
279 279 # iterate over `only(B, A)`
280 280 for r in revs:
281 281 ps = parents(r)
282 282 if ps == graph_roots:
283 283 has_graph_roots = True
284 284 else:
285 285 p1, p2 = ps
286 286
287 287 # find all the "root points" (see larger comment above)
288 288 if p1 != nullrev and p1 in ancestors:
289 289 roots.add(p1)
290 290 if p2 != nullrev and p2 in ancestors:
291 291 roots.add(p2)
292 292 if not roots:
293 293 # no common revision to track copies from
294 294 return {}
295 295 if has_graph_roots:
296 296 # this deal with the special case mentionned in the [1] footnotes. We
297 297 # must filter out revisions that leads to non-common graphroots.
298 298 roots = list(roots)
299 299 m = min(roots)
300 300 h = [b.rev()]
301 301 roots_to_head = cl.reachableroots(m, h, roots, includepath=True)
302 302 roots_to_head = set(roots_to_head)
303 303 revs = [r for r in revs if r in roots_to_head]
304 304
305 305 if repo.filecopiesmode == b'changeset-sidedata':
306 306 # When using side-data, we will process the edges "from" the children.
307 307 # We iterate over the childre, gathering previous collected data for
308 308 # the parents. Do know when the parents data is no longer necessary, we
309 309 # keep a counter of how many children each revision has.
310 310 #
311 311 # An interresting property of `children_count` is that it only contains
312 312 # revision that will be relevant for a edge of the graph. So if a
313 313 # children has parent not in `children_count`, that edges should not be
314 314 # processed.
315 315 children_count = dict((r, 0) for r in roots)
316 316 for r in revs:
317 317 for p in cl.parentrevs(r):
318 318 if p == nullrev:
319 319 continue
320 320 children_count[r] = 0
321 321 if p in children_count:
322 322 children_count[p] += 1
323 323 revinfo = _revinfo_getter(repo, match)
324 324 return _combine_changeset_copies(
325 325 revs,
326 326 children_count,
327 327 b.rev(),
328 328 revinfo,
329 329 match,
330 330 isancestor,
331 331 multi_thread,
332 332 )
333 333 else:
334 334 # When not using side-data, we will process the edges "from" the parent.
335 335 # so we need a full mapping of the parent -> children relation.
336 336 children = dict((r, []) for r in roots)
337 337 for r in revs:
338 338 for p in cl.parentrevs(r):
339 339 if p == nullrev:
340 340 continue
341 341 children[r] = []
342 342 if p in children:
343 343 children[p].append(r)
344 344 x = revs.pop()
345 345 assert x == b.rev()
346 346 revs.extend(roots)
347 347 revs.sort()
348 348
349 349 revinfo = _revinfo_getter_extra(repo)
350 350 return _combine_changeset_copies_extra(
351 351 revs, children, b.rev(), revinfo, match, isancestor
352 352 )
353 353
354 354
355 355 def _combine_changeset_copies(
356 356 revs, children_count, targetrev, revinfo, match, isancestor, multi_thread
357 357 ):
358 358 """combine the copies information for each item of iterrevs
359 359
360 360 revs: sorted iterable of revision to visit
361 361 children_count: a {parent: <number-of-relevant-children>} mapping.
362 362 targetrev: the final copies destination revision (not in iterrevs)
363 363 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
364 364 match: a matcher
365 365
366 366 It returns the aggregated copies information for `targetrev`.
367 367 """
368 368
369 369 alwaysmatch = match.always()
370 370
371 371 if rustmod is not None:
372 372 final_copies = rustmod.combine_changeset_copies(
373 373 list(revs), children_count, targetrev, revinfo, multi_thread
374 374 )
375 375 else:
376 376 isancestor = cached_is_ancestor(isancestor)
377 377
378 378 all_copies = {}
379 379 # iterate over all the "children" side of copy tracing "edge"
380 380 for current_rev in revs:
381 381 p1, p2, changes = revinfo(current_rev)
382 382 current_copies = None
383 383 # iterate over all parents to chain the existing data with the
384 384 # data from the parent β†’ child edge.
385 385 for parent, parent_rev in ((1, p1), (2, p2)):
386 386 if parent_rev == nullrev:
387 387 continue
388 388 remaining_children = children_count.get(parent_rev)
389 389 if remaining_children is None:
390 390 continue
391 391 remaining_children -= 1
392 392 children_count[parent_rev] = remaining_children
393 393 if remaining_children:
394 394 copies = all_copies.get(parent_rev, None)
395 395 else:
396 396 copies = all_copies.pop(parent_rev, None)
397 397
398 398 if copies is None:
399 399 # this is a root
400 400 newcopies = copies = {}
401 401 elif remaining_children:
402 402 newcopies = copies.copy()
403 403 else:
404 404 newcopies = copies
405 405 # chain the data in the edge with the existing data
406 406 if changes is not None:
407 407 childcopies = {}
408 408 if parent == 1:
409 409 childcopies = changes.copied_from_p1
410 410 elif parent == 2:
411 411 childcopies = changes.copied_from_p2
412 412
413 413 if childcopies:
414 414 newcopies = copies.copy()
415 415 for dest, source in pycompat.iteritems(childcopies):
416 416 prev = copies.get(source)
417 417 if prev is not None and prev[1] is not None:
418 418 source = prev[1]
419 419 newcopies[dest] = (current_rev, source)
420 420 assert newcopies is not copies
421 421 if changes.removed:
422 422 for f in changes.removed:
423 423 if f in newcopies:
424 424 if newcopies is copies:
425 425 # copy on write to avoid affecting potential other
426 426 # branches. when there are no other branches, this
427 427 # could be avoided.
428 428 newcopies = copies.copy()
429 429 newcopies[f] = (current_rev, None)
430 430 # check potential need to combine the data from another parent (for
431 431 # that child). See comment below for details.
432 432 if current_copies is None:
433 433 current_copies = newcopies
434 434 else:
435 435 # we are the second parent to work on c, we need to merge our
436 436 # work with the other.
437 437 #
438 438 # In case of conflict, parent 1 take precedence over parent 2.
439 439 # This is an arbitrary choice made anew when implementing
440 440 # changeset based copies. It was made without regards with
441 441 # potential filelog related behavior.
442 442 assert parent == 2
443 443 current_copies = _merge_copies_dict(
444 444 newcopies,
445 445 current_copies,
446 446 isancestor,
447 447 changes,
448 448 current_rev,
449 449 )
450 450 all_copies[current_rev] = current_copies
451 451
452 452 # filter out internal details and return a {dest: source mapping}
453 453 final_copies = {}
454 454 for dest, (tt, source) in all_copies[targetrev].items():
455 455 if source is not None:
456 456 final_copies[dest] = source
457 457 if not alwaysmatch:
458 458 for filename in list(final_copies.keys()):
459 459 if not match(filename):
460 460 del final_copies[filename]
461 461 return final_copies
462 462
463 463
464 464 # constant to decide which side to pick with _merge_copies_dict
465 465 PICK_MINOR = 0
466 466 PICK_MAJOR = 1
467 467 PICK_EITHER = 2
468 468
469 469
470 470 def _merge_copies_dict(minor, major, isancestor, changes, current_merge):
471 471 """merge two copies-mapping together, minor and major
472 472
473 473 In case of conflict, value from "major" will be picked.
474 474
475 475 - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
476 476 ancestors of `high_rev`,
477 477
478 478 - `ismerged(path)`: callable return True if `path` have been merged in the
479 479 current revision,
480 480
481 481 return the resulting dict (in practice, the "minor" object, updated)
482 482 """
483 483 for dest, value in major.items():
484 484 other = minor.get(dest)
485 485 if other is None:
486 486 minor[dest] = value
487 487 else:
488 488 pick, overwrite = _compare_values(
489 489 changes, isancestor, dest, other, value
490 490 )
491 491 if overwrite:
492 492 if pick == PICK_MAJOR:
493 493 minor[dest] = (current_merge, value[1])
494 494 else:
495 495 minor[dest] = (current_merge, other[1])
496 496 elif pick == PICK_MAJOR:
497 497 minor[dest] = value
498 498 return minor
499 499
500 500
501 501 def _compare_values(changes, isancestor, dest, minor, major):
502 502 """compare two value within a _merge_copies_dict loop iteration
503 503
504 504 return (pick, overwrite).
505 505
506 506 - pick is one of PICK_MINOR, PICK_MAJOR or PICK_EITHER
507 507 - overwrite is True if pick is a return of an ambiguity that needs resolution.
508 508 """
509 509 major_tt, major_value = major
510 510 minor_tt, minor_value = minor
511 511
512 512 if major_tt == minor_tt:
513 513 # if it comes from the same revision it must be the same value
514 514 assert major_value == minor_value
515 515 return PICK_EITHER, False
516 516 elif (
517 517 changes is not None
518 518 and minor_value is not None
519 519 and major_value is None
520 520 and dest in changes.salvaged
521 521 ):
522 522 # In this case, a deletion was reverted, the "alive" value overwrite
523 523 # the deleted one.
524 524 return PICK_MINOR, True
525 525 elif (
526 526 changes is not None
527 527 and major_value is not None
528 528 and minor_value is None
529 529 and dest in changes.salvaged
530 530 ):
531 531 # In this case, a deletion was reverted, the "alive" value overwrite
532 532 # the deleted one.
533 533 return PICK_MAJOR, True
534 534 elif isancestor(minor_tt, major_tt):
535 535 if changes is not None and dest in changes.merged:
536 536 # change to dest happened on the branch without copy-source change,
537 537 # so both source are valid and "major" wins.
538 538 return PICK_MAJOR, True
539 539 else:
540 540 return PICK_MAJOR, False
541 541 elif isancestor(major_tt, minor_tt):
542 542 if changes is not None and dest in changes.merged:
543 543 # change to dest happened on the branch without copy-source change,
544 544 # so both source are valid and "major" wins.
545 545 return PICK_MAJOR, True
546 546 else:
547 547 return PICK_MINOR, False
548 548 elif minor_value is None:
549 549 # in case of conflict, the "alive" side wins.
550 550 return PICK_MAJOR, True
551 551 elif major_value is None:
552 552 # in case of conflict, the "alive" side wins.
553 553 return PICK_MINOR, True
554 554 else:
555 555 # in case of conflict where both side are alive, major wins.
556 556 return PICK_MAJOR, True
557 557
558 558
559 559 def _revinfo_getter_extra(repo):
560 560 """return a function that return multiple data given a <rev>"i
561 561
562 562 * p1: revision number of first parent
563 563 * p2: revision number of first parent
564 564 * p1copies: mapping of copies from p1
565 565 * p2copies: mapping of copies from p2
566 566 * removed: a list of removed files
567 567 * ismerged: a callback to know if file was merged in that revision
568 568 """
569 569 cl = repo.changelog
570 570 parents = cl.parentrevs
571 571
572 572 def get_ismerged(rev):
573 573 ctx = repo[rev]
574 574
575 575 def ismerged(path):
576 576 if path not in ctx.files():
577 577 return False
578 578 fctx = ctx[path]
579 579 parents = fctx._filelog.parents(fctx._filenode)
580 580 nb_parents = 0
581 581 for n in parents:
582 582 if n != nullid:
583 583 nb_parents += 1
584 584 return nb_parents >= 2
585 585
586 586 return ismerged
587 587
588 588 def revinfo(rev):
589 589 p1, p2 = parents(rev)
590 590 ctx = repo[rev]
591 591 p1copies, p2copies = ctx._copies
592 592 removed = ctx.filesremoved()
593 593 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
594 594
595 595 return revinfo
596 596
597 597
598 598 def _combine_changeset_copies_extra(
599 599 revs, children, targetrev, revinfo, match, isancestor
600 600 ):
601 601 """version of `_combine_changeset_copies` that works with the Google
602 602 specific "extra" based storage for copy information"""
603 603 all_copies = {}
604 604 alwaysmatch = match.always()
605 605 for r in revs:
606 606 copies = all_copies.pop(r, None)
607 607 if copies is None:
608 608 # this is a root
609 609 copies = {}
610 610 for i, c in enumerate(children[r]):
611 611 p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
612 612 if r == p1:
613 613 parent = 1
614 614 childcopies = p1copies
615 615 else:
616 616 assert r == p2
617 617 parent = 2
618 618 childcopies = p2copies
619 619 if not alwaysmatch:
620 620 childcopies = {
621 621 dst: src for dst, src in childcopies.items() if match(dst)
622 622 }
623 623 newcopies = copies
624 624 if childcopies:
625 625 newcopies = copies.copy()
626 626 for dest, source in pycompat.iteritems(childcopies):
627 627 prev = copies.get(source)
628 628 if prev is not None and prev[1] is not None:
629 629 source = prev[1]
630 630 newcopies[dest] = (c, source)
631 631 assert newcopies is not copies
632 632 for f in removed:
633 633 if f in newcopies:
634 634 if newcopies is copies:
635 635 # copy on write to avoid affecting potential other
636 636 # branches. when there are no other branches, this
637 637 # could be avoided.
638 638 newcopies = copies.copy()
639 639 newcopies[f] = (c, None)
640 640 othercopies = all_copies.get(c)
641 641 if othercopies is None:
642 642 all_copies[c] = newcopies
643 643 else:
644 644 # we are the second parent to work on c, we need to merge our
645 645 # work with the other.
646 646 #
647 647 # In case of conflict, parent 1 take precedence over parent 2.
648 648 # This is an arbitrary choice made anew when implementing
649 649 # changeset based copies. It was made without regards with
650 650 # potential filelog related behavior.
651 651 if parent == 1:
652 652 _merge_copies_dict_extra(
653 653 othercopies, newcopies, isancestor, ismerged
654 654 )
655 655 else:
656 656 _merge_copies_dict_extra(
657 657 newcopies, othercopies, isancestor, ismerged
658 658 )
659 659 all_copies[c] = newcopies
660 660
661 661 final_copies = {}
662 662 for dest, (tt, source) in all_copies[targetrev].items():
663 663 if source is not None:
664 664 final_copies[dest] = source
665 665 return final_copies
666 666
667 667
668 668 def _merge_copies_dict_extra(minor, major, isancestor, ismerged):
669 669 """version of `_merge_copies_dict` that works with the Google
670 670 specific "extra" based storage for copy information"""
671 671 for dest, value in major.items():
672 672 other = minor.get(dest)
673 673 if other is None:
674 674 minor[dest] = value
675 675 else:
676 676 new_tt = value[0]
677 677 other_tt = other[0]
678 678 if value[1] == other[1]:
679 679 continue
680 680 # content from "major" wins, unless it is older
681 681 # than the branch point or there is a merge
682 682 if (
683 683 new_tt == other_tt
684 684 or not isancestor(new_tt, other_tt)
685 685 or ismerged(dest)
686 686 ):
687 687 minor[dest] = value
688 688
689 689
690 690 def _forwardcopies(a, b, base=None, match=None):
691 691 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
692 692
693 693 if base is None:
694 694 base = a
695 695 match = a.repo().narrowmatch(match)
696 696 # check for working copy
697 697 if b.rev() is None:
698 698 cm = _committedforwardcopies(a, b.p1(), base, match)
699 699 # combine copies from dirstate if necessary
700 700 copies = _chain(cm, _dirstatecopies(b._repo, match))
701 701 else:
702 702 copies = _committedforwardcopies(a, b, base, match)
703 703 return copies
704 704
705 705
706 706 def _backwardrenames(a, b, match):
707 707 """find renames from a to b"""
708 708 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
709 709 return {}
710 710
711 711 # We don't want to pass in "match" here, since that would filter
712 712 # the destination by it. Since we're reversing the copies, we want
713 713 # to filter the source instead.
714 714 copies = _forwardcopies(b, a)
715 715 return _reverse_renames(copies, a, match)
716 716
717 717
718 718 def _reverse_renames(copies, dst, match):
719 719 """given copies to context 'dst', finds renames from that context"""
720 720 # Even though we're not taking copies into account, 1:n rename situations
721 721 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
722 722 # arbitrarily pick one of the renames.
723 723 r = {}
724 724 for k, v in sorted(pycompat.iteritems(copies)):
725 725 if match and not match(v):
726 726 continue
727 727 # remove copies
728 728 if v in dst:
729 729 continue
730 730 r[v] = k
731 731 return r
732 732
733 733
734 734 def pathcopies(x, y, match=None):
735 735 """find {dst@y: src@x} copy mapping for directed compare"""
736 736 repo = x._repo
737 737 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
738 738 if debug:
739 739 repo.ui.debug(
740 740 b'debug.copies: searching copies from %s to %s\n' % (x, y)
741 741 )
742 742 if x == y or not x or not y:
743 743 return {}
744 744 if y.rev() is None and x == y.p1():
745 745 if debug:
746 746 repo.ui.debug(b'debug.copies: search mode: dirstate\n')
747 747 # short-circuit to avoid issues with merge states
748 748 return _dirstatecopies(repo, match)
749 749 a = y.ancestor(x)
750 750 if a == x:
751 751 if debug:
752 752 repo.ui.debug(b'debug.copies: search mode: forward\n')
753 753 copies = _forwardcopies(x, y, match=match)
754 754 elif a == y:
755 755 if debug:
756 756 repo.ui.debug(b'debug.copies: search mode: backward\n')
757 757 copies = _backwardrenames(x, y, match=match)
758 758 else:
759 759 if debug:
760 760 repo.ui.debug(b'debug.copies: search mode: combined\n')
761 761 base = None
762 762 if a.rev() != nullrev:
763 763 base = x
764 764 x_copies = _forwardcopies(a, x)
765 765 y_copies = _forwardcopies(a, y, base, match=match)
766 766 same_keys = set(x_copies) & set(y_copies)
767 767 for k in same_keys:
768 768 if x_copies.get(k) == y_copies.get(k):
769 769 del x_copies[k]
770 770 del y_copies[k]
771 771 x_backward_renames = _reverse_renames(x_copies, x, match)
772 772 copies = _chain(
773 773 x_backward_renames,
774 774 y_copies,
775 775 )
776 776 _filter(x, y, copies)
777 777 return copies
778 778
779 779
780 780 def mergecopies(repo, c1, c2, base):
781 781 """
782 782 Finds moves and copies between context c1 and c2 that are relevant for
783 783 merging. 'base' will be used as the merge base.
784 784
785 785 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
786 786 files that were moved/ copied in one merge parent and modified in another.
787 787 For example:
788 788
789 789 o ---> 4 another commit
790 790 |
791 791 | o ---> 3 commit that modifies a.txt
792 792 | /
793 793 o / ---> 2 commit that moves a.txt to b.txt
794 794 |/
795 795 o ---> 1 merge base
796 796
797 797 If we try to rebase revision 3 on revision 4, since there is no a.txt in
798 798 revision 4, and if user have copytrace disabled, we prints the following
799 799 message:
800 800
801 801 ```other changed <file> which local deleted```
802 802
803 803 Returns a tuple where:
804 804
805 805 "branch_copies" an instance of branch_copies.
806 806
807 807 "diverge" is a mapping of source name -> list of destination names
808 808 for divergent renames.
809 809
810 810 This function calls different copytracing algorithms based on config.
811 811 """
812 812 # avoid silly behavior for update from empty dir
813 813 if not c1 or not c2 or c1 == c2:
814 814 return branch_copies(), branch_copies(), {}
815 815
816 816 narrowmatch = c1.repo().narrowmatch()
817 817
818 818 # avoid silly behavior for parent -> working dir
819 819 if c2.node() is None and c1.node() == repo.dirstate.p1():
820 820 return (
821 821 branch_copies(_dirstatecopies(repo, narrowmatch)),
822 822 branch_copies(),
823 823 {},
824 824 )
825 825
826 826 copytracing = repo.ui.config(b'experimental', b'copytrace')
827 827 if stringutil.parsebool(copytracing) is False:
828 828 # stringutil.parsebool() returns None when it is unable to parse the
829 829 # value, so we should rely on making sure copytracing is on such cases
830 830 return branch_copies(), branch_copies(), {}
831 831
832 832 if usechangesetcentricalgo(repo):
833 833 # The heuristics don't make sense when we need changeset-centric algos
834 834 return _fullcopytracing(repo, c1, c2, base)
835 835
836 836 # Copy trace disabling is explicitly below the node == p1 logic above
837 837 # because the logic above is required for a simple copy to be kept across a
838 838 # rebase.
839 839 if copytracing == b'heuristics':
840 840 # Do full copytracing if only non-public revisions are involved as
841 841 # that will be fast enough and will also cover the copies which could
842 842 # be missed by heuristics
843 843 if _isfullcopytraceable(repo, c1, base):
844 844 return _fullcopytracing(repo, c1, c2, base)
845 845 return _heuristicscopytracing(repo, c1, c2, base)
846 846 else:
847 847 return _fullcopytracing(repo, c1, c2, base)
848 848
849 849
850 850 def _isfullcopytraceable(repo, c1, base):
851 851 """Checks that if base, source and destination are all no-public branches,
852 852 if yes let's use the full copytrace algorithm for increased capabilities
853 853 since it will be fast enough.
854 854
855 855 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
856 856 number of changesets from c1 to base such that if number of changesets are
857 857 more than the limit, full copytracing algorithm won't be used.
858 858 """
859 859 if c1.rev() is None:
860 860 c1 = c1.p1()
861 861 if c1.mutable() and base.mutable():
862 862 sourcecommitlimit = repo.ui.configint(
863 863 b'experimental', b'copytrace.sourcecommitlimit'
864 864 )
865 865 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
866 866 return commits < sourcecommitlimit
867 867 return False
868 868
869 869
870 870 def _checksinglesidecopies(
871 871 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
872 872 ):
873 873 if src not in m2:
874 874 # deleted on side 2
875 875 if src not in m1:
876 876 # renamed on side 1, deleted on side 2
877 877 renamedelete[src] = dsts1
878 878 elif src not in mb:
879 879 # Work around the "short-circuit to avoid issues with merge states"
880 880 # thing in pathcopies(): pathcopies(x, y) can return a copy where the
881 881 # destination doesn't exist in y.
882 882 pass
883 883 elif mb[src] != m2[src] and not _related(c2[src], base[src]):
884 884 return
885 885 elif mb[src] != m2[src] or mb.flags(src) != m2.flags(src):
886 886 # modified on side 2
887 887 for dst in dsts1:
888 888 copy[dst] = src
889 889
890 890
891 891 class branch_copies(object):
892 892 """Information about copies made on one side of a merge/graft.
893 893
894 894 "copy" is a mapping from destination name -> source name,
895 895 where source is in c1 and destination is in c2 or vice-versa.
896 896
897 897 "movewithdir" is a mapping from source name -> destination name,
898 898 where the file at source present in one context but not the other
899 899 needs to be moved to destination by the merge process, because the
900 900 other context moved the directory it is in.
901 901
902 902 "renamedelete" is a mapping of source name -> list of destination
903 903 names for files deleted in c1 that were renamed in c2 or vice-versa.
904 904
905 905 "dirmove" is a mapping of detected source dir -> destination dir renames.
906 906 This is needed for handling changes to new files previously grafted into
907 907 renamed directories.
908 908 """
909 909
910 910 def __init__(
911 911 self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
912 912 ):
913 913 self.copy = {} if copy is None else copy
914 914 self.renamedelete = {} if renamedelete is None else renamedelete
915 915 self.dirmove = {} if dirmove is None else dirmove
916 916 self.movewithdir = {} if movewithdir is None else movewithdir
917 917
918 918 def __repr__(self):
919 919 return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % (
920 920 self.copy,
921 921 self.renamedelete,
922 922 self.dirmove,
923 923 self.movewithdir,
924 924 )
925 925
926 926
927 927 def _fullcopytracing(repo, c1, c2, base):
928 928 """The full copytracing algorithm which finds all the new files that were
929 929 added from merge base up to the top commit and for each file it checks if
930 930 this file was copied from another file.
931 931
932 932 This is pretty slow when a lot of changesets are involved but will track all
933 933 the copies.
934 934 """
935 935 m1 = c1.manifest()
936 936 m2 = c2.manifest()
937 937 mb = base.manifest()
938 938
939 939 copies1 = pathcopies(base, c1)
940 940 copies2 = pathcopies(base, c2)
941 941
942 942 if not (copies1 or copies2):
943 943 return branch_copies(), branch_copies(), {}
944 944
945 945 inversecopies1 = {}
946 946 inversecopies2 = {}
947 947 for dst, src in copies1.items():
948 948 inversecopies1.setdefault(src, []).append(dst)
949 949 for dst, src in copies2.items():
950 950 inversecopies2.setdefault(src, []).append(dst)
951 951
952 952 copy1 = {}
953 953 copy2 = {}
954 954 diverge = {}
955 955 renamedelete1 = {}
956 956 renamedelete2 = {}
957 957 allsources = set(inversecopies1) | set(inversecopies2)
958 958 for src in allsources:
959 959 dsts1 = inversecopies1.get(src)
960 960 dsts2 = inversecopies2.get(src)
961 961 if dsts1 and dsts2:
962 962 # copied/renamed on both sides
963 963 if src not in m1 and src not in m2:
964 964 # renamed on both sides
965 965 dsts1 = set(dsts1)
966 966 dsts2 = set(dsts2)
967 967 # If there's some overlap in the rename destinations, we
968 968 # consider it not divergent. For example, if side 1 copies 'a'
969 969 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
970 970 # and 'd' and deletes 'a'.
971 971 if dsts1 & dsts2:
972 972 for dst in dsts1 & dsts2:
973 973 copy1[dst] = src
974 974 copy2[dst] = src
975 975 else:
976 976 diverge[src] = sorted(dsts1 | dsts2)
977 977 elif src in m1 and src in m2:
978 978 # copied on both sides
979 979 dsts1 = set(dsts1)
980 980 dsts2 = set(dsts2)
981 981 for dst in dsts1 & dsts2:
982 982 copy1[dst] = src
983 983 copy2[dst] = src
984 984 # TODO: Handle cases where it was renamed on one side and copied
985 985 # on the other side
986 986 elif dsts1:
987 987 # copied/renamed only on side 1
988 988 _checksinglesidecopies(
989 989 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
990 990 )
991 991 elif dsts2:
992 992 # copied/renamed only on side 2
993 993 _checksinglesidecopies(
994 994 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
995 995 )
996 996
997 997 # find interesting file sets from manifests
998 998 cache = []
999 999
1000 1000 def _get_addedfiles(idx):
1001 1001 if not cache:
1002 1002 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
1003 1003 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
1004 1004 u1 = sorted(addedinm1 - addedinm2)
1005 1005 u2 = sorted(addedinm2 - addedinm1)
1006 1006 cache.extend((u1, u2))
1007 1007 return cache[idx]
1008 1008
1009 1009 u1fn = lambda: _get_addedfiles(0)
1010 1010 u2fn = lambda: _get_addedfiles(1)
1011 1011 if repo.ui.debugflag:
1012 1012 u1 = u1fn()
1013 1013 u2 = u2fn()
1014 1014
1015 1015 header = b" unmatched files in %s"
1016 1016 if u1:
1017 1017 repo.ui.debug(
1018 1018 b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1))
1019 1019 )
1020 1020 if u2:
1021 1021 repo.ui.debug(
1022 1022 b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2))
1023 1023 )
1024 1024
1025 1025 renamedeleteset = set()
1026 1026 divergeset = set()
1027 1027 for dsts in diverge.values():
1028 1028 divergeset.update(dsts)
1029 1029 for dsts in renamedelete1.values():
1030 1030 renamedeleteset.update(dsts)
1031 1031 for dsts in renamedelete2.values():
1032 1032 renamedeleteset.update(dsts)
1033 1033
1034 1034 repo.ui.debug(
1035 1035 b" all copies found (* = to merge, ! = divergent, "
1036 1036 b"% = renamed and deleted):\n"
1037 1037 )
1038 1038 for side, copies in ((b"local", copies1), (b"remote", copies2)):
1039 1039 if not copies:
1040 1040 continue
1041 1041 repo.ui.debug(b" on %s side:\n" % side)
1042 1042 for f in sorted(copies):
1043 1043 note = b""
1044 1044 if f in copy1 or f in copy2:
1045 1045 note += b"*"
1046 1046 if f in divergeset:
1047 1047 note += b"!"
1048 1048 if f in renamedeleteset:
1049 1049 note += b"%"
1050 1050 repo.ui.debug(
1051 1051 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
1052 1052 )
1053 1053 del renamedeleteset
1054 1054 del divergeset
1055 1055
1056 1056 repo.ui.debug(b" checking for directory renames\n")
1057 1057
1058 1058 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2fn)
1059 1059 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1fn)
1060 1060
1061 1061 branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
1062 1062 branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
1063 1063
1064 1064 return branch_copies1, branch_copies2, diverge
1065 1065
1066 1066
1067 1067 def _dir_renames(repo, ctx, copy, fullcopy, addedfilesfn):
1068 1068 """Finds moved directories and files that should move with them.
1069 1069
1070 1070 ctx: the context for one of the sides
1071 1071 copy: files copied on the same side (as ctx)
1072 1072 fullcopy: files copied on the same side (as ctx), including those that
1073 1073 merge.manifestmerge() won't care about
1074 1074 addedfilesfn: function returning added files on the other side (compared to
1075 1075 ctx)
1076 1076 """
1077 1077 # generate a directory move map
1078 1078 invalid = set()
1079 1079 dirmove = {}
1080 1080
1081 1081 # examine each file copy for a potential directory move, which is
1082 1082 # when all the files in a directory are moved to a new directory
1083 1083 for dst, src in pycompat.iteritems(fullcopy):
1084 1084 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
1085 1085 if dsrc in invalid:
1086 1086 # already seen to be uninteresting
1087 1087 continue
1088 1088 elif ctx.hasdir(dsrc) and ctx.hasdir(ddst):
1089 1089 # directory wasn't entirely moved locally
1090 1090 invalid.add(dsrc)
1091 1091 elif dsrc in dirmove and dirmove[dsrc] != ddst:
1092 1092 # files from the same directory moved to two different places
1093 1093 invalid.add(dsrc)
1094 1094 else:
1095 1095 # looks good so far
1096 1096 dirmove[dsrc] = ddst
1097 1097
1098 1098 for i in invalid:
1099 1099 if i in dirmove:
1100 1100 del dirmove[i]
1101 1101 del invalid
1102 1102
1103 1103 if not dirmove:
1104 1104 return {}, {}
1105 1105
1106 1106 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
1107 1107
1108 1108 for d in dirmove:
1109 1109 repo.ui.debug(
1110 1110 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
1111 1111 )
1112 1112
1113 1113 # Sort the directories in reverse order, so we find children first
1114 1114 # For example, if dir1/ was renamed to dir2/, and dir1/subdir1/
1115 1115 # was renamed to dir2/subdir2/, we want to move dir1/subdir1/file
1116 1116 # to dir2/subdir2/file (not dir2/subdir1/file)
1117 1117 dirmove_children_first = sorted(dirmove, reverse=True)
1118 1118
1119 1119 movewithdir = {}
1120 1120 # check unaccounted nonoverlapping files against directory moves
1121 1121 for f in addedfilesfn():
1122 1122 if f not in fullcopy:
1123 1123 for d in dirmove_children_first:
1124 1124 if f.startswith(d):
1125 1125 # new file added in a directory that was moved, move it
1126 1126 df = dirmove[d] + f[len(d) :]
1127 1127 if df not in copy:
1128 1128 movewithdir[f] = df
1129 1129 repo.ui.debug(
1130 1130 b" pending file src: '%s' -> dst: '%s'\n"
1131 1131 % (f, df)
1132 1132 )
1133 1133 break
1134 1134
1135 1135 return dirmove, movewithdir
1136 1136
1137 1137
1138 1138 def _heuristicscopytracing(repo, c1, c2, base):
1139 1139 """Fast copytracing using filename heuristics
1140 1140
1141 1141 Assumes that moves or renames are of following two types:
1142 1142
1143 1143 1) Inside a directory only (same directory name but different filenames)
1144 1144 2) Move from one directory to another
1145 1145 (same filenames but different directory names)
1146 1146
1147 1147 Works only when there are no merge commits in the "source branch".
1148 1148 Source branch is commits from base up to c2 not including base.
1149 1149
1150 1150 If merge is involved it fallbacks to _fullcopytracing().
1151 1151
1152 1152 Can be used by setting the following config:
1153 1153
1154 1154 [experimental]
1155 1155 copytrace = heuristics
1156 1156
1157 1157 In some cases the copy/move candidates found by heuristics can be very large
1158 1158 in number and that will make the algorithm slow. The number of possible
1159 1159 candidates to check can be limited by using the config
1160 1160 `experimental.copytrace.movecandidateslimit` which defaults to 100.
1161 1161 """
1162 1162
1163 1163 if c1.rev() is None:
1164 1164 c1 = c1.p1()
1165 1165 if c2.rev() is None:
1166 1166 c2 = c2.p1()
1167 1167
1168 1168 changedfiles = set()
1169 1169 m1 = c1.manifest()
1170 1170 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
1171 1171 # If base is not in c2 branch, we switch to fullcopytracing
1172 1172 repo.ui.debug(
1173 1173 b"switching to full copytracing as base is not "
1174 1174 b"an ancestor of c2\n"
1175 1175 )
1176 1176 return _fullcopytracing(repo, c1, c2, base)
1177 1177
1178 1178 ctx = c2
1179 1179 while ctx != base:
1180 1180 if len(ctx.parents()) == 2:
1181 1181 # To keep things simple let's not handle merges
1182 1182 repo.ui.debug(b"switching to full copytracing because of merges\n")
1183 1183 return _fullcopytracing(repo, c1, c2, base)
1184 1184 changedfiles.update(ctx.files())
1185 1185 ctx = ctx.p1()
1186 1186
1187 1187 copies2 = {}
1188 1188 cp = _forwardcopies(base, c2)
1189 1189 for dst, src in pycompat.iteritems(cp):
1190 1190 if src in m1:
1191 1191 copies2[dst] = src
1192 1192
1193 1193 # file is missing if it isn't present in the destination, but is present in
1194 1194 # the base and present in the source.
1195 1195 # Presence in the base is important to exclude added files, presence in the
1196 1196 # source is important to exclude removed files.
1197 1197 filt = lambda f: f not in m1 and f in base and f in c2
1198 1198 missingfiles = [f for f in changedfiles if filt(f)]
1199 1199
1200 1200 copies1 = {}
1201 1201 if missingfiles:
1202 1202 basenametofilename = collections.defaultdict(list)
1203 1203 dirnametofilename = collections.defaultdict(list)
1204 1204
1205 1205 for f in m1.filesnotin(base.manifest()):
1206 1206 basename = os.path.basename(f)
1207 1207 dirname = os.path.dirname(f)
1208 1208 basenametofilename[basename].append(f)
1209 1209 dirnametofilename[dirname].append(f)
1210 1210
1211 1211 for f in missingfiles:
1212 1212 basename = os.path.basename(f)
1213 1213 dirname = os.path.dirname(f)
1214 1214 samebasename = basenametofilename[basename]
1215 1215 samedirname = dirnametofilename[dirname]
1216 1216 movecandidates = samebasename + samedirname
1217 1217 # f is guaranteed to be present in c2, that's why
1218 1218 # c2.filectx(f) won't fail
1219 1219 f2 = c2.filectx(f)
1220 1220 # we can have a lot of candidates which can slow down the heuristics
1221 1221 # config value to limit the number of candidates moves to check
1222 1222 maxcandidates = repo.ui.configint(
1223 1223 b'experimental', b'copytrace.movecandidateslimit'
1224 1224 )
1225 1225
1226 1226 if len(movecandidates) > maxcandidates:
1227 1227 repo.ui.status(
1228 1228 _(
1229 1229 b"skipping copytracing for '%s', more "
1230 1230 b"candidates than the limit: %d\n"
1231 1231 )
1232 1232 % (f, len(movecandidates))
1233 1233 )
1234 1234 continue
1235 1235
1236 1236 for candidate in movecandidates:
1237 1237 f1 = c1.filectx(candidate)
1238 1238 if _related(f1, f2):
1239 1239 # if there are a few related copies then we'll merge
1240 1240 # changes into all of them. This matches the behaviour
1241 1241 # of upstream copytracing
1242 1242 copies1[candidate] = f
1243 1243
1244 1244 return branch_copies(copies1), branch_copies(copies2), {}
1245 1245
1246 1246
1247 1247 def _related(f1, f2):
1248 1248 """return True if f1 and f2 filectx have a common ancestor
1249 1249
1250 1250 Walk back to common ancestor to see if the two files originate
1251 1251 from the same file. Since workingfilectx's rev() is None it messes
1252 1252 up the integer comparison logic, hence the pre-step check for
1253 1253 None (f1 and f2 can only be workingfilectx's initially).
1254 1254 """
1255 1255
1256 1256 if f1 == f2:
1257 1257 return True # a match
1258 1258
1259 1259 g1, g2 = f1.ancestors(), f2.ancestors()
1260 1260 try:
1261 1261 f1r, f2r = f1.linkrev(), f2.linkrev()
1262 1262
1263 1263 if f1r is None:
1264 1264 f1 = next(g1)
1265 1265 if f2r is None:
1266 1266 f2 = next(g2)
1267 1267
1268 1268 while True:
1269 1269 f1r, f2r = f1.linkrev(), f2.linkrev()
1270 1270 if f1r > f2r:
1271 1271 f1 = next(g1)
1272 1272 elif f2r > f1r:
1273 1273 f2 = next(g2)
1274 1274 else: # f1 and f2 point to files in the same linkrev
1275 1275 return f1 == f2 # true if they point to the same file
1276 1276 except StopIteration:
1277 1277 return False
1278 1278
1279 1279
1280 1280 def graftcopies(wctx, ctx, base):
1281 1281 """reproduce copies between base and ctx in the wctx
1282 1282
1283 1283 Unlike mergecopies(), this function will only consider copies between base
1284 1284 and ctx; it will ignore copies between base and wctx. Also unlike
1285 1285 mergecopies(), this function will apply copies to the working copy (instead
1286 1286 of just returning information about the copies). That makes it cheaper
1287 1287 (especially in the common case of base==ctx.p1()) and useful also when
1288 1288 experimental.copytrace=off.
1289 1289
1290 1290 merge.update() will have already marked most copies, but it will only
1291 1291 mark copies if it thinks the source files are related (see
1292 1292 merge._related()). It will also not mark copies if the file wasn't modified
1293 1293 on the local side. This function adds the copies that were "missed"
1294 1294 by merge.update().
1295 1295 """
1296 1296 new_copies = pathcopies(base, ctx)
1297 1297 parent = wctx.p1()
1298 1298 _filter(parent, wctx, new_copies)
1299 1299 # Extra filtering to drop copy information for files that existed before
1300 1300 # the graft. This is to handle the case of grafting a rename onto a commit
1301 1301 # that already has the rename. Otherwise the presence of copy information
1302 1302 # would result in the creation of an empty commit where we would prefer to
1303 1303 # not create one.
1304 1304 for dest, __ in list(new_copies.items()):
1305 1305 if dest in parent:
1306 1306 del new_copies[dest]
1307 1307 for dst, src in pycompat.iteritems(new_copies):
1308 1308 wctx[dst].markcopied(src)
@@ -1,1251 +1,1252 b''
1 1 # logcmdutil.py - utility for log-like commands
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import itertools
11 11 import os
12 12 import posixpath
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 nullid,
17 nullrev,
17 18 wdirid,
18 19 wdirrev,
19 20 )
20 21
21 22 from .thirdparty import attr
22 23
23 24 from . import (
24 25 dagop,
25 26 error,
26 27 formatter,
27 28 graphmod,
28 29 match as matchmod,
29 30 mdiff,
30 31 merge,
31 32 patch,
32 33 pathutil,
33 34 pycompat,
34 35 revset,
35 36 revsetlang,
36 37 scmutil,
37 38 smartset,
38 39 templatekw,
39 40 templater,
40 41 util,
41 42 )
42 43 from .utils import (
43 44 dateutil,
44 45 stringutil,
45 46 )
46 47
47 48
48 49 if pycompat.TYPE_CHECKING:
49 50 from typing import (
50 51 Any,
51 52 Callable,
52 53 Dict,
53 54 List,
54 55 Optional,
55 56 Sequence,
56 57 Tuple,
57 58 )
58 59
59 60 for t in (Any, Callable, Dict, List, Optional, Tuple):
60 61 assert t
61 62
62 63
63 64 def getlimit(opts):
64 65 """get the log limit according to option -l/--limit"""
65 66 limit = opts.get(b'limit')
66 67 if limit:
67 68 try:
68 69 limit = int(limit)
69 70 except ValueError:
70 71 raise error.Abort(_(b'limit must be a positive integer'))
71 72 if limit <= 0:
72 73 raise error.Abort(_(b'limit must be positive'))
73 74 else:
74 75 limit = None
75 76 return limit
76 77
77 78
78 79 def diff_parent(ctx):
79 80 """get the context object to use as parent when diffing
80 81
81 82
82 83 If diff.merge is enabled, an overlayworkingctx of the auto-merged parents will be returned.
83 84 """
84 85 repo = ctx.repo()
85 if repo.ui.configbool(b"diff", b"merge") and ctx.p2().node() != nullid:
86 if repo.ui.configbool(b"diff", b"merge") and ctx.p2().rev() != nullrev:
86 87 # avoid cycle context -> subrepo -> cmdutil -> logcmdutil
87 88 from . import context
88 89
89 90 wctx = context.overlayworkingctx(repo)
90 91 wctx.setbase(ctx.p1())
91 92 with repo.ui.configoverride(
92 93 {
93 94 (
94 95 b"ui",
95 96 b"forcemerge",
96 97 ): b"internal:merge3-lie-about-conflicts",
97 98 },
98 99 b"merge-diff",
99 100 ):
100 101 repo.ui.pushbuffer()
101 102 merge.merge(ctx.p2(), wc=wctx)
102 103 repo.ui.popbuffer()
103 104 return wctx
104 105 else:
105 106 return ctx.p1()
106 107
107 108
108 109 def diffordiffstat(
109 110 ui,
110 111 repo,
111 112 diffopts,
112 113 ctx1,
113 114 ctx2,
114 115 match,
115 116 changes=None,
116 117 stat=False,
117 118 fp=None,
118 119 graphwidth=0,
119 120 prefix=b'',
120 121 root=b'',
121 122 listsubrepos=False,
122 123 hunksfilterfn=None,
123 124 ):
124 125 '''show diff or diffstat.'''
125 126 if root:
126 127 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
127 128 else:
128 129 relroot = b''
129 130 copysourcematch = None
130 131
131 132 def compose(f, g):
132 133 return lambda x: f(g(x))
133 134
134 135 def pathfn(f):
135 136 return posixpath.join(prefix, f)
136 137
137 138 if relroot != b'':
138 139 # XXX relative roots currently don't work if the root is within a
139 140 # subrepo
140 141 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
141 142 uirelroot = uipathfn(pathfn(relroot))
142 143 relroot += b'/'
143 144 for matchroot in match.files():
144 145 if not matchroot.startswith(relroot):
145 146 ui.warn(
146 147 _(b'warning: %s not inside relative root %s\n')
147 148 % (uipathfn(pathfn(matchroot)), uirelroot)
148 149 )
149 150
150 151 relrootmatch = scmutil.match(ctx2, pats=[relroot], default=b'path')
151 152 match = matchmod.intersectmatchers(match, relrootmatch)
152 153 copysourcematch = relrootmatch
153 154
154 155 checkroot = repo.ui.configbool(
155 156 b'devel', b'all-warnings'
156 157 ) or repo.ui.configbool(b'devel', b'check-relroot')
157 158
158 159 def relrootpathfn(f):
159 160 if checkroot and not f.startswith(relroot):
160 161 raise AssertionError(
161 162 b"file %s doesn't start with relroot %s" % (f, relroot)
162 163 )
163 164 return f[len(relroot) :]
164 165
165 166 pathfn = compose(relrootpathfn, pathfn)
166 167
167 168 if stat:
168 169 diffopts = diffopts.copy(context=0, noprefix=False)
169 170 width = 80
170 171 if not ui.plain():
171 172 width = ui.termwidth() - graphwidth
172 173 # If an explicit --root was given, don't respect ui.relative-paths
173 174 if not relroot:
174 175 pathfn = compose(scmutil.getuipathfn(repo), pathfn)
175 176
176 177 chunks = ctx2.diff(
177 178 ctx1,
178 179 match,
179 180 changes,
180 181 opts=diffopts,
181 182 pathfn=pathfn,
182 183 copysourcematch=copysourcematch,
183 184 hunksfilterfn=hunksfilterfn,
184 185 )
185 186
186 187 if fp is not None or ui.canwritewithoutlabels():
187 188 out = fp or ui
188 189 if stat:
189 190 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
190 191 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
191 192 out.write(chunk)
192 193 else:
193 194 if stat:
194 195 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
195 196 else:
196 197 chunks = patch.difflabel(
197 198 lambda chunks, **kwargs: chunks, chunks, opts=diffopts
198 199 )
199 200 if ui.canbatchlabeledwrites():
200 201
201 202 def gen():
202 203 for chunk, label in chunks:
203 204 yield ui.label(chunk, label=label)
204 205
205 206 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
206 207 ui.write(chunk)
207 208 else:
208 209 for chunk, label in chunks:
209 210 ui.write(chunk, label=label)
210 211
211 212 node2 = ctx2.node()
212 213 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
213 214 tempnode2 = node2
214 215 try:
215 216 if node2 is not None:
216 217 tempnode2 = ctx2.substate[subpath][1]
217 218 except KeyError:
218 219 # A subrepo that existed in node1 was deleted between node1 and
219 220 # node2 (inclusive). Thus, ctx2's substate won't contain that
220 221 # subpath. The best we can do is to ignore it.
221 222 tempnode2 = None
222 223 submatch = matchmod.subdirmatcher(subpath, match)
223 224 subprefix = repo.wvfs.reljoin(prefix, subpath)
224 225 if listsubrepos or match.exact(subpath) or any(submatch.files()):
225 226 sub.diff(
226 227 ui,
227 228 diffopts,
228 229 tempnode2,
229 230 submatch,
230 231 changes=changes,
231 232 stat=stat,
232 233 fp=fp,
233 234 prefix=subprefix,
234 235 )
235 236
236 237
237 238 class changesetdiffer(object):
238 239 """Generate diff of changeset with pre-configured filtering functions"""
239 240
240 241 def _makefilematcher(self, ctx):
241 242 return scmutil.matchall(ctx.repo())
242 243
243 244 def _makehunksfilter(self, ctx):
244 245 return None
245 246
246 247 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
247 248 diffordiffstat(
248 249 ui,
249 250 ctx.repo(),
250 251 diffopts,
251 252 diff_parent(ctx),
252 253 ctx,
253 254 match=self._makefilematcher(ctx),
254 255 stat=stat,
255 256 graphwidth=graphwidth,
256 257 hunksfilterfn=self._makehunksfilter(ctx),
257 258 )
258 259
259 260
260 261 def changesetlabels(ctx):
261 262 labels = [b'log.changeset', b'changeset.%s' % ctx.phasestr()]
262 263 if ctx.obsolete():
263 264 labels.append(b'changeset.obsolete')
264 265 if ctx.isunstable():
265 266 labels.append(b'changeset.unstable')
266 267 for instability in ctx.instabilities():
267 268 labels.append(b'instability.%s' % instability)
268 269 return b' '.join(labels)
269 270
270 271
271 272 class changesetprinter(object):
272 273 '''show changeset information when templating not requested.'''
273 274
274 275 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
275 276 self.ui = ui
276 277 self.repo = repo
277 278 self.buffered = buffered
278 279 self._differ = differ or changesetdiffer()
279 280 self._diffopts = patch.diffallopts(ui, diffopts)
280 281 self._includestat = diffopts and diffopts.get(b'stat')
281 282 self._includediff = diffopts and diffopts.get(b'patch')
282 283 self.header = {}
283 284 self.hunk = {}
284 285 self.lastheader = None
285 286 self.footer = None
286 287 self._columns = templatekw.getlogcolumns()
287 288
288 289 def flush(self, ctx):
289 290 rev = ctx.rev()
290 291 if rev in self.header:
291 292 h = self.header[rev]
292 293 if h != self.lastheader:
293 294 self.lastheader = h
294 295 self.ui.write(h)
295 296 del self.header[rev]
296 297 if rev in self.hunk:
297 298 self.ui.write(self.hunk[rev])
298 299 del self.hunk[rev]
299 300
300 301 def close(self):
301 302 if self.footer:
302 303 self.ui.write(self.footer)
303 304
304 305 def show(self, ctx, copies=None, **props):
305 306 props = pycompat.byteskwargs(props)
306 307 if self.buffered:
307 308 self.ui.pushbuffer(labeled=True)
308 309 self._show(ctx, copies, props)
309 310 self.hunk[ctx.rev()] = self.ui.popbuffer()
310 311 else:
311 312 self._show(ctx, copies, props)
312 313
313 314 def _show(self, ctx, copies, props):
314 315 '''show a single changeset or file revision'''
315 316 changenode = ctx.node()
316 317 graphwidth = props.get(b'graphwidth', 0)
317 318
318 319 if self.ui.quiet:
319 320 self.ui.write(
320 321 b"%s\n" % scmutil.formatchangeid(ctx), label=b'log.node'
321 322 )
322 323 return
323 324
324 325 columns = self._columns
325 326 self.ui.write(
326 327 columns[b'changeset'] % scmutil.formatchangeid(ctx),
327 328 label=changesetlabels(ctx),
328 329 )
329 330
330 331 # branches are shown first before any other names due to backwards
331 332 # compatibility
332 333 branch = ctx.branch()
333 334 # don't show the default branch name
334 335 if branch != b'default':
335 336 self.ui.write(columns[b'branch'] % branch, label=b'log.branch')
336 337
337 338 for nsname, ns in pycompat.iteritems(self.repo.names):
338 339 # branches has special logic already handled above, so here we just
339 340 # skip it
340 341 if nsname == b'branches':
341 342 continue
342 343 # we will use the templatename as the color name since those two
343 344 # should be the same
344 345 for name in ns.names(self.repo, changenode):
345 346 self.ui.write(ns.logfmt % name, label=b'log.%s' % ns.colorname)
346 347 if self.ui.debugflag:
347 348 self.ui.write(
348 349 columns[b'phase'] % ctx.phasestr(), label=b'log.phase'
349 350 )
350 351 for pctx in scmutil.meaningfulparents(self.repo, ctx):
351 352 label = b'log.parent changeset.%s' % pctx.phasestr()
352 353 self.ui.write(
353 354 columns[b'parent'] % scmutil.formatchangeid(pctx), label=label
354 355 )
355 356
356 357 if self.ui.debugflag:
357 358 mnode = ctx.manifestnode()
358 359 if mnode is None:
359 360 mnode = wdirid
360 361 mrev = wdirrev
361 362 else:
362 363 mrev = self.repo.manifestlog.rev(mnode)
363 364 self.ui.write(
364 365 columns[b'manifest']
365 366 % scmutil.formatrevnode(self.ui, mrev, mnode),
366 367 label=b'ui.debug log.manifest',
367 368 )
368 369 self.ui.write(columns[b'user'] % ctx.user(), label=b'log.user')
369 370 self.ui.write(
370 371 columns[b'date'] % dateutil.datestr(ctx.date()), label=b'log.date'
371 372 )
372 373
373 374 if ctx.isunstable():
374 375 instabilities = ctx.instabilities()
375 376 self.ui.write(
376 377 columns[b'instability'] % b', '.join(instabilities),
377 378 label=b'log.instability',
378 379 )
379 380
380 381 elif ctx.obsolete():
381 382 self._showobsfate(ctx)
382 383
383 384 self._exthook(ctx)
384 385
385 386 if self.ui.debugflag:
386 387 files = ctx.p1().status(ctx)
387 388 for key, value in zip(
388 389 [b'files', b'files+', b'files-'],
389 390 [files.modified, files.added, files.removed],
390 391 ):
391 392 if value:
392 393 self.ui.write(
393 394 columns[key] % b" ".join(value),
394 395 label=b'ui.debug log.files',
395 396 )
396 397 elif ctx.files() and self.ui.verbose:
397 398 self.ui.write(
398 399 columns[b'files'] % b" ".join(ctx.files()),
399 400 label=b'ui.note log.files',
400 401 )
401 402 if copies and self.ui.verbose:
402 403 copies = [b'%s (%s)' % c for c in copies]
403 404 self.ui.write(
404 405 columns[b'copies'] % b' '.join(copies),
405 406 label=b'ui.note log.copies',
406 407 )
407 408
408 409 extra = ctx.extra()
409 410 if extra and self.ui.debugflag:
410 411 for key, value in sorted(extra.items()):
411 412 self.ui.write(
412 413 columns[b'extra'] % (key, stringutil.escapestr(value)),
413 414 label=b'ui.debug log.extra',
414 415 )
415 416
416 417 description = ctx.description().strip()
417 418 if description:
418 419 if self.ui.verbose:
419 420 self.ui.write(
420 421 _(b"description:\n"), label=b'ui.note log.description'
421 422 )
422 423 self.ui.write(description, label=b'ui.note log.description')
423 424 self.ui.write(b"\n\n")
424 425 else:
425 426 self.ui.write(
426 427 columns[b'summary'] % description.splitlines()[0],
427 428 label=b'log.summary',
428 429 )
429 430 self.ui.write(b"\n")
430 431
431 432 self._showpatch(ctx, graphwidth)
432 433
433 434 def _showobsfate(self, ctx):
434 435 # TODO: do not depend on templater
435 436 tres = formatter.templateresources(self.repo.ui, self.repo)
436 437 t = formatter.maketemplater(
437 438 self.repo.ui,
438 439 b'{join(obsfate, "\n")}',
439 440 defaults=templatekw.keywords,
440 441 resources=tres,
441 442 )
442 443 obsfate = t.renderdefault({b'ctx': ctx}).splitlines()
443 444
444 445 if obsfate:
445 446 for obsfateline in obsfate:
446 447 self.ui.write(
447 448 self._columns[b'obsolete'] % obsfateline,
448 449 label=b'log.obsfate',
449 450 )
450 451
451 452 def _exthook(self, ctx):
452 453 """empty method used by extension as a hook point"""
453 454
454 455 def _showpatch(self, ctx, graphwidth=0):
455 456 if self._includestat:
456 457 self._differ.showdiff(
457 458 self.ui, ctx, self._diffopts, graphwidth, stat=True
458 459 )
459 460 if self._includestat and self._includediff:
460 461 self.ui.write(b"\n")
461 462 if self._includediff:
462 463 self._differ.showdiff(
463 464 self.ui, ctx, self._diffopts, graphwidth, stat=False
464 465 )
465 466 if self._includestat or self._includediff:
466 467 self.ui.write(b"\n")
467 468
468 469
469 470 class changesetformatter(changesetprinter):
470 471 """Format changeset information by generic formatter"""
471 472
472 473 def __init__(
473 474 self, ui, repo, fm, differ=None, diffopts=None, buffered=False
474 475 ):
475 476 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
476 477 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
477 478 self._fm = fm
478 479
479 480 def close(self):
480 481 self._fm.end()
481 482
482 483 def _show(self, ctx, copies, props):
483 484 '''show a single changeset or file revision'''
484 485 fm = self._fm
485 486 fm.startitem()
486 487 fm.context(ctx=ctx)
487 488 fm.data(rev=scmutil.intrev(ctx), node=fm.hexfunc(scmutil.binnode(ctx)))
488 489
489 490 datahint = fm.datahint()
490 491 if self.ui.quiet and not datahint:
491 492 return
492 493
493 494 fm.data(
494 495 branch=ctx.branch(),
495 496 phase=ctx.phasestr(),
496 497 user=ctx.user(),
497 498 date=fm.formatdate(ctx.date()),
498 499 desc=ctx.description(),
499 500 bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'),
500 501 tags=fm.formatlist(ctx.tags(), name=b'tag'),
501 502 parents=fm.formatlist(
502 503 [fm.hexfunc(c.node()) for c in ctx.parents()], name=b'node'
503 504 ),
504 505 )
505 506
506 507 if self.ui.debugflag or b'manifest' in datahint:
507 508 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
508 509 if self.ui.debugflag or b'extra' in datahint:
509 510 fm.data(extra=fm.formatdict(ctx.extra()))
510 511
511 512 if (
512 513 self.ui.debugflag
513 514 or b'modified' in datahint
514 515 or b'added' in datahint
515 516 or b'removed' in datahint
516 517 ):
517 518 files = ctx.p1().status(ctx)
518 519 fm.data(
519 520 modified=fm.formatlist(files.modified, name=b'file'),
520 521 added=fm.formatlist(files.added, name=b'file'),
521 522 removed=fm.formatlist(files.removed, name=b'file'),
522 523 )
523 524
524 525 verbose = not self.ui.debugflag and self.ui.verbose
525 526 if verbose or b'files' in datahint:
526 527 fm.data(files=fm.formatlist(ctx.files(), name=b'file'))
527 528 if verbose and copies or b'copies' in datahint:
528 529 fm.data(
529 530 copies=fm.formatdict(copies or {}, key=b'name', value=b'source')
530 531 )
531 532
532 533 if self._includestat or b'diffstat' in datahint:
533 534 self.ui.pushbuffer()
534 535 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
535 536 fm.data(diffstat=self.ui.popbuffer())
536 537 if self._includediff or b'diff' in datahint:
537 538 self.ui.pushbuffer()
538 539 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
539 540 fm.data(diff=self.ui.popbuffer())
540 541
541 542
542 543 class changesettemplater(changesetprinter):
543 544 """format changeset information.
544 545
545 546 Note: there are a variety of convenience functions to build a
546 547 changesettemplater for common cases. See functions such as:
547 548 maketemplater, changesetdisplayer, buildcommittemplate, or other
548 549 functions that use changesest_templater.
549 550 """
550 551
551 552 # Arguments before "buffered" used to be positional. Consider not
552 553 # adding/removing arguments before "buffered" to not break callers.
553 554 def __init__(
554 555 self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False
555 556 ):
556 557 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
557 558 # tres is shared with _graphnodeformatter()
558 559 self._tresources = tres = formatter.templateresources(ui, repo)
559 560 self.t = formatter.loadtemplater(
560 561 ui,
561 562 tmplspec,
562 563 defaults=templatekw.keywords,
563 564 resources=tres,
564 565 cache=templatekw.defaulttempl,
565 566 )
566 567 self._counter = itertools.count()
567 568
568 569 self._tref = tmplspec.ref
569 570 self._parts = {
570 571 b'header': b'',
571 572 b'footer': b'',
572 573 tmplspec.ref: tmplspec.ref,
573 574 b'docheader': b'',
574 575 b'docfooter': b'',
575 576 b'separator': b'',
576 577 }
577 578 if tmplspec.mapfile:
578 579 # find correct templates for current mode, for backward
579 580 # compatibility with 'log -v/-q/--debug' using a mapfile
580 581 tmplmodes = [
581 582 (True, b''),
582 583 (self.ui.verbose, b'_verbose'),
583 584 (self.ui.quiet, b'_quiet'),
584 585 (self.ui.debugflag, b'_debug'),
585 586 ]
586 587 for mode, postfix in tmplmodes:
587 588 for t in self._parts:
588 589 cur = t + postfix
589 590 if mode and cur in self.t:
590 591 self._parts[t] = cur
591 592 else:
592 593 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
593 594 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
594 595 self._parts.update(m)
595 596
596 597 if self._parts[b'docheader']:
597 598 self.ui.write(self.t.render(self._parts[b'docheader'], {}))
598 599
599 600 def close(self):
600 601 if self._parts[b'docfooter']:
601 602 if not self.footer:
602 603 self.footer = b""
603 604 self.footer += self.t.render(self._parts[b'docfooter'], {})
604 605 return super(changesettemplater, self).close()
605 606
606 607 def _show(self, ctx, copies, props):
607 608 '''show a single changeset or file revision'''
608 609 props = props.copy()
609 610 props[b'ctx'] = ctx
610 611 props[b'index'] = index = next(self._counter)
611 612 props[b'revcache'] = {b'copies': copies}
612 613 graphwidth = props.get(b'graphwidth', 0)
613 614
614 615 # write separator, which wouldn't work well with the header part below
615 616 # since there's inherently a conflict between header (across items) and
616 617 # separator (per item)
617 618 if self._parts[b'separator'] and index > 0:
618 619 self.ui.write(self.t.render(self._parts[b'separator'], {}))
619 620
620 621 # write header
621 622 if self._parts[b'header']:
622 623 h = self.t.render(self._parts[b'header'], props)
623 624 if self.buffered:
624 625 self.header[ctx.rev()] = h
625 626 else:
626 627 if self.lastheader != h:
627 628 self.lastheader = h
628 629 self.ui.write(h)
629 630
630 631 # write changeset metadata, then patch if requested
631 632 key = self._parts[self._tref]
632 633 self.ui.write(self.t.render(key, props))
633 634 self._exthook(ctx)
634 635 self._showpatch(ctx, graphwidth)
635 636
636 637 if self._parts[b'footer']:
637 638 if not self.footer:
638 639 self.footer = self.t.render(self._parts[b'footer'], props)
639 640
640 641
641 642 def templatespec(tmpl, mapfile):
642 643 assert not (tmpl and mapfile)
643 644 if mapfile:
644 645 return formatter.mapfile_templatespec(b'changeset', mapfile)
645 646 else:
646 647 return formatter.literal_templatespec(tmpl)
647 648
648 649
649 650 def _lookuptemplate(ui, tmpl, style):
650 651 """Find the template matching the given template spec or style
651 652
652 653 See formatter.lookuptemplate() for details.
653 654 """
654 655
655 656 # ui settings
656 657 if not tmpl and not style: # template are stronger than style
657 658 tmpl = ui.config(b'command-templates', b'log')
658 659 if tmpl:
659 660 return formatter.literal_templatespec(templater.unquotestring(tmpl))
660 661 else:
661 662 style = util.expandpath(ui.config(b'ui', b'style'))
662 663
663 664 if not tmpl and style:
664 665 mapfile = style
665 666 fp = None
666 667 if not os.path.split(mapfile)[0]:
667 668 (mapname, fp) = templater.try_open_template(
668 669 b'map-cmdline.' + mapfile
669 670 ) or templater.try_open_template(mapfile)
670 671 if mapname:
671 672 mapfile = mapname
672 673 return formatter.mapfile_templatespec(b'changeset', mapfile, fp)
673 674
674 675 return formatter.lookuptemplate(ui, b'changeset', tmpl)
675 676
676 677
677 678 def maketemplater(ui, repo, tmpl, buffered=False):
678 679 """Create a changesettemplater from a literal template 'tmpl'
679 680 byte-string."""
680 681 spec = formatter.literal_templatespec(tmpl)
681 682 return changesettemplater(ui, repo, spec, buffered=buffered)
682 683
683 684
684 685 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
685 686 """show one changeset using template or regular display.
686 687
687 688 Display format will be the first non-empty hit of:
688 689 1. option 'template'
689 690 2. option 'style'
690 691 3. [command-templates] setting 'log'
691 692 4. [ui] setting 'style'
692 693 If all of these values are either the unset or the empty string,
693 694 regular display via changesetprinter() is done.
694 695 """
695 696 postargs = (differ, opts, buffered)
696 697 spec = _lookuptemplate(ui, opts.get(b'template'), opts.get(b'style'))
697 698
698 699 # machine-readable formats have slightly different keyword set than
699 700 # plain templates, which are handled by changesetformatter.
700 701 # note that {b'pickle', b'debug'} can also be added to the list if needed.
701 702 if spec.ref in {b'cbor', b'json'}:
702 703 fm = ui.formatter(b'log', opts)
703 704 return changesetformatter(ui, repo, fm, *postargs)
704 705
705 706 if not spec.ref and not spec.tmpl and not spec.mapfile:
706 707 return changesetprinter(ui, repo, *postargs)
707 708
708 709 return changesettemplater(ui, repo, spec, *postargs)
709 710
710 711
711 712 @attr.s
712 713 class walkopts(object):
713 714 """Options to configure a set of revisions and file matcher factory
714 715 to scan revision/file history
715 716 """
716 717
717 718 # raw command-line parameters, which a matcher will be built from
718 719 pats = attr.ib() # type: List[bytes]
719 720 opts = attr.ib() # type: Dict[bytes, Any]
720 721
721 722 # a list of revset expressions to be traversed; if follow, it specifies
722 723 # the start revisions
723 724 revspec = attr.ib() # type: List[bytes]
724 725
725 726 # miscellaneous queries to filter revisions (see "hg help log" for details)
726 727 bookmarks = attr.ib(default=attr.Factory(list)) # type: List[bytes]
727 728 branches = attr.ib(default=attr.Factory(list)) # type: List[bytes]
728 729 date = attr.ib(default=None) # type: Optional[bytes]
729 730 keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes]
730 731 no_merges = attr.ib(default=False) # type: bool
731 732 only_merges = attr.ib(default=False) # type: bool
732 733 prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes]
733 734 users = attr.ib(default=attr.Factory(list)) # type: List[bytes]
734 735
735 736 # miscellaneous matcher arguments
736 737 include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
737 738 exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
738 739
739 740 # 0: no follow, 1: follow first, 2: follow both parents
740 741 follow = attr.ib(default=0) # type: int
741 742
742 743 # do not attempt filelog-based traversal, which may be fast but cannot
743 744 # include revisions where files were removed
744 745 force_changelog_traversal = attr.ib(default=False) # type: bool
745 746
746 747 # filter revisions by file patterns, which should be disabled only if
747 748 # you want to include revisions where files were unmodified
748 749 filter_revisions_by_pats = attr.ib(default=True) # type: bool
749 750
750 751 # sort revisions prior to traversal: 'desc', 'topo', or None
751 752 sort_revisions = attr.ib(default=None) # type: Optional[bytes]
752 753
753 754 # limit number of changes displayed; None means unlimited
754 755 limit = attr.ib(default=None) # type: Optional[int]
755 756
756 757
757 758 def parseopts(ui, pats, opts):
758 759 # type: (Any, Sequence[bytes], Dict[bytes, Any]) -> walkopts
759 760 """Parse log command options into walkopts
760 761
761 762 The returned walkopts will be passed in to getrevs() or makewalker().
762 763 """
763 764 if opts.get(b'follow_first'):
764 765 follow = 1
765 766 elif opts.get(b'follow'):
766 767 follow = 2
767 768 else:
768 769 follow = 0
769 770
770 771 if opts.get(b'graph'):
771 772 if ui.configbool(b'experimental', b'log.topo'):
772 773 sort_revisions = b'topo'
773 774 else:
774 775 sort_revisions = b'desc'
775 776 else:
776 777 sort_revisions = None
777 778
778 779 return walkopts(
779 780 pats=pats,
780 781 opts=opts,
781 782 revspec=opts.get(b'rev', []),
782 783 bookmarks=opts.get(b'bookmark', []),
783 784 # branch and only_branch are really aliases and must be handled at
784 785 # the same time
785 786 branches=opts.get(b'branch', []) + opts.get(b'only_branch', []),
786 787 date=opts.get(b'date'),
787 788 keywords=opts.get(b'keyword', []),
788 789 no_merges=bool(opts.get(b'no_merges')),
789 790 only_merges=bool(opts.get(b'only_merges')),
790 791 prune_ancestors=opts.get(b'prune', []),
791 792 users=opts.get(b'user', []),
792 793 include_pats=opts.get(b'include', []),
793 794 exclude_pats=opts.get(b'exclude', []),
794 795 follow=follow,
795 796 force_changelog_traversal=bool(opts.get(b'removed')),
796 797 sort_revisions=sort_revisions,
797 798 limit=getlimit(opts),
798 799 )
799 800
800 801
801 802 def _makematcher(repo, revs, wopts):
802 803 """Build matcher and expanded patterns from log options
803 804
804 805 If --follow, revs are the revisions to follow from.
805 806
806 807 Returns (match, pats, slowpath) where
807 808 - match: a matcher built from the given pats and -I/-X opts
808 809 - pats: patterns used (globs are expanded on Windows)
809 810 - slowpath: True if patterns aren't as simple as scanning filelogs
810 811 """
811 812 # pats/include/exclude are passed to match.match() directly in
812 813 # _matchfiles() revset, but a log-like command should build its matcher
813 814 # with scmutil.match(). The difference is input pats are globbed on
814 815 # platforms without shell expansion (windows).
815 816 wctx = repo[None]
816 817 match, pats = scmutil.matchandpats(wctx, wopts.pats, wopts.opts)
817 818 slowpath = match.anypats() or (
818 819 not match.always() and wopts.force_changelog_traversal
819 820 )
820 821 if not slowpath:
821 822 if wopts.follow and wopts.revspec:
822 823 # There may be the case that a path doesn't exist in some (but
823 824 # not all) of the specified start revisions, but let's consider
824 825 # the path is valid. Missing files will be warned by the matcher.
825 826 startctxs = [repo[r] for r in revs]
826 827 for f in match.files():
827 828 found = False
828 829 for c in startctxs:
829 830 if f in c:
830 831 found = True
831 832 elif c.hasdir(f):
832 833 # If a directory exists in any of the start revisions,
833 834 # take the slow path.
834 835 found = slowpath = True
835 836 if not found:
836 837 raise error.Abort(
837 838 _(
838 839 b'cannot follow file not in any of the specified '
839 840 b'revisions: "%s"'
840 841 )
841 842 % f
842 843 )
843 844 elif wopts.follow:
844 845 for f in match.files():
845 846 if f not in wctx:
846 847 # If the file exists, it may be a directory, so let it
847 848 # take the slow path.
848 849 if os.path.exists(repo.wjoin(f)):
849 850 slowpath = True
850 851 continue
851 852 else:
852 853 raise error.Abort(
853 854 _(
854 855 b'cannot follow file not in parent '
855 856 b'revision: "%s"'
856 857 )
857 858 % f
858 859 )
859 860 filelog = repo.file(f)
860 861 if not filelog:
861 862 # A file exists in wdir but not in history, which means
862 863 # the file isn't committed yet.
863 864 raise error.Abort(
864 865 _(b'cannot follow nonexistent file: "%s"') % f
865 866 )
866 867 else:
867 868 for f in match.files():
868 869 filelog = repo.file(f)
869 870 if not filelog:
870 871 # A zero count may be a directory or deleted file, so
871 872 # try to find matching entries on the slow path.
872 873 slowpath = True
873 874
874 875 # We decided to fall back to the slowpath because at least one
875 876 # of the paths was not a file. Check to see if at least one of them
876 877 # existed in history - in that case, we'll continue down the
877 878 # slowpath; otherwise, we can turn off the slowpath
878 879 if slowpath:
879 880 for path in match.files():
880 881 if not path or path in repo.store:
881 882 break
882 883 else:
883 884 slowpath = False
884 885
885 886 return match, pats, slowpath
886 887
887 888
888 889 def _fileancestors(repo, revs, match, followfirst):
889 890 fctxs = []
890 891 for r in revs:
891 892 ctx = repo[r]
892 893 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
893 894
894 895 # When displaying a revision with --patch --follow FILE, we have
895 896 # to know which file of the revision must be diffed. With
896 897 # --follow, we want the names of the ancestors of FILE in the
897 898 # revision, stored in "fcache". "fcache" is populated as a side effect
898 899 # of the graph traversal.
899 900 fcache = {}
900 901
901 902 def filematcher(ctx):
902 903 return scmutil.matchfiles(repo, fcache.get(scmutil.intrev(ctx), []))
903 904
904 905 def revgen():
905 906 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
906 907 fcache[rev] = [c.path() for c in cs]
907 908 yield rev
908 909
909 910 return smartset.generatorset(revgen(), iterasc=False), filematcher
910 911
911 912
912 913 def _makenofollowfilematcher(repo, pats, opts):
913 914 '''hook for extensions to override the filematcher for non-follow cases'''
914 915 return None
915 916
916 917
917 918 _opt2logrevset = {
918 919 b'no_merges': (b'not merge()', None),
919 920 b'only_merges': (b'merge()', None),
920 921 b'_matchfiles': (None, b'_matchfiles(%ps)'),
921 922 b'date': (b'date(%s)', None),
922 923 b'branch': (b'branch(%s)', b'%lr'),
923 924 b'_patslog': (b'filelog(%s)', b'%lr'),
924 925 b'keyword': (b'keyword(%s)', b'%lr'),
925 926 b'prune': (b'ancestors(%s)', b'not %lr'),
926 927 b'user': (b'user(%s)', b'%lr'),
927 928 }
928 929
929 930
930 931 def _makerevset(repo, wopts, slowpath):
931 932 """Return a revset string built from log options and file patterns"""
932 933 opts = {
933 934 b'branch': [b'literal:' + repo.lookupbranch(b) for b in wopts.branches],
934 935 b'date': wopts.date,
935 936 b'keyword': wopts.keywords,
936 937 b'no_merges': wopts.no_merges,
937 938 b'only_merges': wopts.only_merges,
938 939 b'prune': wopts.prune_ancestors,
939 940 b'user': [b'literal:' + v for v in wopts.users],
940 941 }
941 942
942 943 if wopts.filter_revisions_by_pats and slowpath:
943 944 # pats/include/exclude cannot be represented as separate
944 945 # revset expressions as their filtering logic applies at file
945 946 # level. For instance "-I a -X b" matches a revision touching
946 947 # "a" and "b" while "file(a) and not file(b)" does
947 948 # not. Besides, filesets are evaluated against the working
948 949 # directory.
949 950 matchargs = [b'r:', b'd:relpath']
950 951 for p in wopts.pats:
951 952 matchargs.append(b'p:' + p)
952 953 for p in wopts.include_pats:
953 954 matchargs.append(b'i:' + p)
954 955 for p in wopts.exclude_pats:
955 956 matchargs.append(b'x:' + p)
956 957 opts[b'_matchfiles'] = matchargs
957 958 elif wopts.filter_revisions_by_pats and not wopts.follow:
958 959 opts[b'_patslog'] = list(wopts.pats)
959 960
960 961 expr = []
961 962 for op, val in sorted(pycompat.iteritems(opts)):
962 963 if not val:
963 964 continue
964 965 revop, listop = _opt2logrevset[op]
965 966 if revop and b'%' not in revop:
966 967 expr.append(revop)
967 968 elif not listop:
968 969 expr.append(revsetlang.formatspec(revop, val))
969 970 else:
970 971 if revop:
971 972 val = [revsetlang.formatspec(revop, v) for v in val]
972 973 expr.append(revsetlang.formatspec(listop, val))
973 974
974 975 if wopts.bookmarks:
975 976 expr.append(
976 977 revsetlang.formatspec(
977 978 b'%lr',
978 979 [scmutil.format_bookmark_revspec(v) for v in wopts.bookmarks],
979 980 )
980 981 )
981 982
982 983 if expr:
983 984 expr = b'(' + b' and '.join(expr) + b')'
984 985 else:
985 986 expr = None
986 987 return expr
987 988
988 989
989 990 def _initialrevs(repo, wopts):
990 991 """Return the initial set of revisions to be filtered or followed"""
991 992 if wopts.revspec:
992 993 revs = scmutil.revrange(repo, wopts.revspec)
993 994 elif wopts.follow and repo.dirstate.p1() == nullid:
994 995 revs = smartset.baseset()
995 996 elif wopts.follow:
996 997 revs = repo.revs(b'.')
997 998 else:
998 999 revs = smartset.spanset(repo)
999 1000 revs.reverse()
1000 1001 return revs
1001 1002
1002 1003
1003 1004 def makewalker(repo, wopts):
1004 1005 # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[Callable[[Any], matchmod.basematcher]]]
1005 1006 """Build (revs, makefilematcher) to scan revision/file history
1006 1007
1007 1008 - revs is the smartset to be traversed.
1008 1009 - makefilematcher is a function to map ctx to a matcher for that revision
1009 1010 """
1010 1011 revs = _initialrevs(repo, wopts)
1011 1012 if not revs:
1012 1013 return smartset.baseset(), None
1013 1014 # TODO: might want to merge slowpath with wopts.force_changelog_traversal
1014 1015 match, pats, slowpath = _makematcher(repo, revs, wopts)
1015 1016 wopts = attr.evolve(wopts, pats=pats)
1016 1017
1017 1018 filematcher = None
1018 1019 if wopts.follow:
1019 1020 if slowpath or match.always():
1020 1021 revs = dagop.revancestors(repo, revs, followfirst=wopts.follow == 1)
1021 1022 else:
1022 1023 assert not wopts.force_changelog_traversal
1023 1024 revs, filematcher = _fileancestors(
1024 1025 repo, revs, match, followfirst=wopts.follow == 1
1025 1026 )
1026 1027 revs.reverse()
1027 1028 if filematcher is None:
1028 1029 filematcher = _makenofollowfilematcher(repo, wopts.pats, wopts.opts)
1029 1030 if filematcher is None:
1030 1031
1031 1032 def filematcher(ctx):
1032 1033 return match
1033 1034
1034 1035 expr = _makerevset(repo, wopts, slowpath)
1035 1036 if wopts.sort_revisions:
1036 1037 assert wopts.sort_revisions in {b'topo', b'desc'}
1037 1038 if wopts.sort_revisions == b'topo':
1038 1039 if not revs.istopo():
1039 1040 revs = dagop.toposort(revs, repo.changelog.parentrevs)
1040 1041 # TODO: try to iterate the set lazily
1041 1042 revs = revset.baseset(list(revs), istopo=True)
1042 1043 elif not (revs.isdescending() or revs.istopo()):
1043 1044 # User-specified revs might be unsorted
1044 1045 revs.sort(reverse=True)
1045 1046 if expr:
1046 1047 matcher = revset.match(None, expr)
1047 1048 revs = matcher(repo, revs)
1048 1049 if wopts.limit is not None:
1049 1050 revs = revs.slice(0, wopts.limit)
1050 1051
1051 1052 return revs, filematcher
1052 1053
1053 1054
1054 1055 def getrevs(repo, wopts):
1055 1056 # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[changesetdiffer]]
1056 1057 """Return (revs, differ) where revs is a smartset
1057 1058
1058 1059 differ is a changesetdiffer with pre-configured file matcher.
1059 1060 """
1060 1061 revs, filematcher = makewalker(repo, wopts)
1061 1062 if not revs:
1062 1063 return revs, None
1063 1064 differ = changesetdiffer()
1064 1065 differ._makefilematcher = filematcher
1065 1066 return revs, differ
1066 1067
1067 1068
1068 1069 def _parselinerangeopt(repo, opts):
1069 1070 """Parse --line-range log option and return a list of tuples (filename,
1070 1071 (fromline, toline)).
1071 1072 """
1072 1073 linerangebyfname = []
1073 1074 for pat in opts.get(b'line_range', []):
1074 1075 try:
1075 1076 pat, linerange = pat.rsplit(b',', 1)
1076 1077 except ValueError:
1077 1078 raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
1078 1079 try:
1079 1080 fromline, toline = map(int, linerange.split(b':'))
1080 1081 except ValueError:
1081 1082 raise error.Abort(_(b"invalid line range for %s") % pat)
1082 1083 msg = _(b"line range pattern '%s' must match exactly one file") % pat
1083 1084 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
1084 1085 linerangebyfname.append(
1085 1086 (fname, util.processlinerange(fromline, toline))
1086 1087 )
1087 1088 return linerangebyfname
1088 1089
1089 1090
1090 1091 def getlinerangerevs(repo, userrevs, opts):
1091 1092 """Return (revs, differ).
1092 1093
1093 1094 "revs" are revisions obtained by processing "line-range" log options and
1094 1095 walking block ancestors of each specified file/line-range.
1095 1096
1096 1097 "differ" is a changesetdiffer with pre-configured file matcher and hunks
1097 1098 filter.
1098 1099 """
1099 1100 wctx = repo[None]
1100 1101
1101 1102 # Two-levels map of "rev -> file ctx -> [line range]".
1102 1103 linerangesbyrev = {}
1103 1104 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
1104 1105 if fname not in wctx:
1105 1106 raise error.Abort(
1106 1107 _(b'cannot follow file not in parent revision: "%s"') % fname
1107 1108 )
1108 1109 fctx = wctx.filectx(fname)
1109 1110 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
1110 1111 rev = fctx.introrev()
1111 1112 if rev is None:
1112 1113 rev = wdirrev
1113 1114 if rev not in userrevs:
1114 1115 continue
1115 1116 linerangesbyrev.setdefault(rev, {}).setdefault(
1116 1117 fctx.path(), []
1117 1118 ).append(linerange)
1118 1119
1119 1120 def nofilterhunksfn(fctx, hunks):
1120 1121 return hunks
1121 1122
1122 1123 def hunksfilter(ctx):
1123 1124 fctxlineranges = linerangesbyrev.get(scmutil.intrev(ctx))
1124 1125 if fctxlineranges is None:
1125 1126 return nofilterhunksfn
1126 1127
1127 1128 def filterfn(fctx, hunks):
1128 1129 lineranges = fctxlineranges.get(fctx.path())
1129 1130 if lineranges is not None:
1130 1131 for hr, lines in hunks:
1131 1132 if hr is None: # binary
1132 1133 yield hr, lines
1133 1134 continue
1134 1135 if any(mdiff.hunkinrange(hr[2:], lr) for lr in lineranges):
1135 1136 yield hr, lines
1136 1137 else:
1137 1138 for hunk in hunks:
1138 1139 yield hunk
1139 1140
1140 1141 return filterfn
1141 1142
1142 1143 def filematcher(ctx):
1143 1144 files = list(linerangesbyrev.get(scmutil.intrev(ctx), []))
1144 1145 return scmutil.matchfiles(repo, files)
1145 1146
1146 1147 revs = sorted(linerangesbyrev, reverse=True)
1147 1148
1148 1149 differ = changesetdiffer()
1149 1150 differ._makefilematcher = filematcher
1150 1151 differ._makehunksfilter = hunksfilter
1151 1152 return smartset.baseset(revs), differ
1152 1153
1153 1154
1154 1155 def _graphnodeformatter(ui, displayer):
1155 1156 spec = ui.config(b'command-templates', b'graphnode')
1156 1157 if not spec:
1157 1158 return templatekw.getgraphnode # fast path for "{graphnode}"
1158 1159
1159 1160 spec = templater.unquotestring(spec)
1160 1161 if isinstance(displayer, changesettemplater):
1161 1162 # reuse cache of slow templates
1162 1163 tres = displayer._tresources
1163 1164 else:
1164 1165 tres = formatter.templateresources(ui)
1165 1166 templ = formatter.maketemplater(
1166 1167 ui, spec, defaults=templatekw.keywords, resources=tres
1167 1168 )
1168 1169
1169 1170 def formatnode(repo, ctx, cache):
1170 1171 props = {b'ctx': ctx, b'repo': repo}
1171 1172 return templ.renderdefault(props)
1172 1173
1173 1174 return formatnode
1174 1175
1175 1176
1176 1177 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
1177 1178 props = props or {}
1178 1179 formatnode = _graphnodeformatter(ui, displayer)
1179 1180 state = graphmod.asciistate()
1180 1181 styles = state.styles
1181 1182
1182 1183 # only set graph styling if HGPLAIN is not set.
1183 1184 if ui.plain(b'graph'):
1184 1185 # set all edge styles to |, the default pre-3.8 behaviour
1185 1186 styles.update(dict.fromkeys(styles, b'|'))
1186 1187 else:
1187 1188 edgetypes = {
1188 1189 b'parent': graphmod.PARENT,
1189 1190 b'grandparent': graphmod.GRANDPARENT,
1190 1191 b'missing': graphmod.MISSINGPARENT,
1191 1192 }
1192 1193 for name, key in edgetypes.items():
1193 1194 # experimental config: experimental.graphstyle.*
1194 1195 styles[key] = ui.config(
1195 1196 b'experimental', b'graphstyle.%s' % name, styles[key]
1196 1197 )
1197 1198 if not styles[key]:
1198 1199 styles[key] = None
1199 1200
1200 1201 # experimental config: experimental.graphshorten
1201 1202 state.graphshorten = ui.configbool(b'experimental', b'graphshorten')
1202 1203
1203 1204 formatnode_cache = {}
1204 1205 for rev, type, ctx, parents in dag:
1205 1206 char = formatnode(repo, ctx, formatnode_cache)
1206 1207 copies = getcopies(ctx) if getcopies else None
1207 1208 edges = edgefn(type, char, state, rev, parents)
1208 1209 firstedge = next(edges)
1209 1210 width = firstedge[2]
1210 1211 displayer.show(
1211 1212 ctx, copies=copies, graphwidth=width, **pycompat.strkwargs(props)
1212 1213 )
1213 1214 lines = displayer.hunk.pop(rev).split(b'\n')
1214 1215 if not lines[-1]:
1215 1216 del lines[-1]
1216 1217 displayer.flush(ctx)
1217 1218 for type, char, width, coldata in itertools.chain([firstedge], edges):
1218 1219 graphmod.ascii(ui, state, type, char, lines, coldata)
1219 1220 lines = []
1220 1221 displayer.close()
1221 1222
1222 1223
1223 1224 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
1224 1225 revdag = graphmod.dagwalker(repo, revs)
1225 1226 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
1226 1227
1227 1228
1228 1229 def displayrevs(ui, repo, revs, displayer, getcopies):
1229 1230 for rev in revs:
1230 1231 ctx = repo[rev]
1231 1232 copies = getcopies(ctx) if getcopies else None
1232 1233 displayer.show(ctx, copies=copies)
1233 1234 displayer.flush(ctx)
1234 1235 displayer.close()
1235 1236
1236 1237
1237 1238 def checkunsupportedgraphflags(pats, opts):
1238 1239 for op in [b"newest_first"]:
1239 1240 if op in opts and opts[op]:
1240 1241 raise error.Abort(
1241 1242 _(b"-G/--graph option is incompatible with --%s")
1242 1243 % op.replace(b"_", b"-")
1243 1244 )
1244 1245
1245 1246
1246 1247 def graphrevs(repo, nodes, opts):
1247 1248 limit = getlimit(opts)
1248 1249 nodes.reverse()
1249 1250 if limit is not None:
1250 1251 nodes = nodes[:limit]
1251 1252 return graphmod.nodes(repo, nodes)
@@ -1,840 +1,841 b''
1 1 from __future__ import absolute_import
2 2
3 3 import collections
4 4 import errno
5 5 import shutil
6 6 import struct
7 7
8 8 from .i18n import _
9 9 from .node import (
10 10 bin,
11 11 hex,
12 12 nullhex,
13 13 nullid,
14 nullrev,
14 15 )
15 16 from . import (
16 17 error,
17 18 filemerge,
18 19 pycompat,
19 20 util,
20 21 )
21 22 from .utils import hashutil
22 23
23 24 _pack = struct.pack
24 25 _unpack = struct.unpack
25 26
26 27
27 28 def _droponode(data):
28 29 # used for compatibility for v1
29 30 bits = data.split(b'\0')
30 31 bits = bits[:-2] + bits[-1:]
31 32 return b'\0'.join(bits)
32 33
33 34
34 35 def _filectxorabsent(hexnode, ctx, f):
35 36 if hexnode == nullhex:
36 37 return filemerge.absentfilectx(ctx, f)
37 38 else:
38 39 return ctx[f]
39 40
40 41
41 42 # Merge state record types. See ``mergestate`` docs for more.
42 43
43 44 ####
44 45 # merge records which records metadata about a current merge
45 46 # exists only once in a mergestate
46 47 #####
47 48 RECORD_LOCAL = b'L'
48 49 RECORD_OTHER = b'O'
49 50 # record merge labels
50 51 RECORD_LABELS = b'l'
51 52
52 53 #####
53 54 # record extra information about files, with one entry containing info about one
54 55 # file. Hence, multiple of them can exists
55 56 #####
56 57 RECORD_FILE_VALUES = b'f'
57 58
58 59 #####
59 60 # merge records which represents state of individual merges of files/folders
60 61 # These are top level records for each entry containing merge related info.
61 62 # Each record of these has info about one file. Hence multiple of them can
62 63 # exists
63 64 #####
64 65 RECORD_MERGED = b'F'
65 66 RECORD_CHANGEDELETE_CONFLICT = b'C'
66 67 # the path was dir on one side of merge and file on another
67 68 RECORD_PATH_CONFLICT = b'P'
68 69
69 70 #####
70 71 # possible state which a merge entry can have. These are stored inside top-level
71 72 # merge records mentioned just above.
72 73 #####
73 74 MERGE_RECORD_UNRESOLVED = b'u'
74 75 MERGE_RECORD_RESOLVED = b'r'
75 76 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
76 77 MERGE_RECORD_RESOLVED_PATH = b'pr'
77 78 # represents that the file was automatically merged in favor
78 79 # of other version. This info is used on commit.
79 80 # This is now deprecated and commit related information is now
80 81 # stored in RECORD_FILE_VALUES
81 82 MERGE_RECORD_MERGED_OTHER = b'o'
82 83
83 84 #####
84 85 # top level record which stores other unknown records. Multiple of these can
85 86 # exists
86 87 #####
87 88 RECORD_OVERRIDE = b't'
88 89
89 90 #####
90 91 # legacy records which are no longer used but kept to prevent breaking BC
91 92 #####
92 93 # This record was release in 5.4 and usage was removed in 5.5
93 94 LEGACY_RECORD_RESOLVED_OTHER = b'R'
94 95 # This record was release in 3.7 and usage was removed in 5.6
95 96 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
96 97 # This record was release in 3.7 and usage was removed in 5.6
97 98 LEGACY_MERGE_DRIVER_STATE = b'm'
98 99 # This record was release in 3.7 and usage was removed in 5.6
99 100 LEGACY_MERGE_DRIVER_MERGE = b'D'
100 101
101 102
102 103 ACTION_FORGET = b'f'
103 104 ACTION_REMOVE = b'r'
104 105 ACTION_ADD = b'a'
105 106 ACTION_GET = b'g'
106 107 ACTION_PATH_CONFLICT = b'p'
107 108 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
108 109 ACTION_ADD_MODIFIED = b'am'
109 110 ACTION_CREATED = b'c'
110 111 ACTION_DELETED_CHANGED = b'dc'
111 112 ACTION_CHANGED_DELETED = b'cd'
112 113 ACTION_MERGE = b'm'
113 114 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
114 115 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
115 116 ACTION_KEEP = b'k'
116 117 # the file was absent on local side before merge and we should
117 118 # keep it absent (absent means file not present, it can be a result
118 119 # of file deletion, rename etc.)
119 120 ACTION_KEEP_ABSENT = b'ka'
120 121 # the file is absent on the ancestor and remote side of the merge
121 122 # hence this file is new and we should keep it
122 123 ACTION_KEEP_NEW = b'kn'
123 124 ACTION_EXEC = b'e'
124 125 ACTION_CREATED_MERGE = b'cm'
125 126
126 127 # actions which are no op
127 128 NO_OP_ACTIONS = (
128 129 ACTION_KEEP,
129 130 ACTION_KEEP_ABSENT,
130 131 ACTION_KEEP_NEW,
131 132 )
132 133
133 134
134 135 class _mergestate_base(object):
135 136 """track 3-way merge state of individual files
136 137
137 138 The merge state is stored on disk when needed. Two files are used: one with
138 139 an old format (version 1), and one with a new format (version 2). Version 2
139 140 stores a superset of the data in version 1, including new kinds of records
140 141 in the future. For more about the new format, see the documentation for
141 142 `_readrecordsv2`.
142 143
143 144 Each record can contain arbitrary content, and has an associated type. This
144 145 `type` should be a letter. If `type` is uppercase, the record is mandatory:
145 146 versions of Mercurial that don't support it should abort. If `type` is
146 147 lowercase, the record can be safely ignored.
147 148
148 149 Currently known records:
149 150
150 151 L: the node of the "local" part of the merge (hexified version)
151 152 O: the node of the "other" part of the merge (hexified version)
152 153 F: a file to be merged entry
153 154 C: a change/delete or delete/change conflict
154 155 P: a path conflict (file vs directory)
155 156 f: a (filename, dictionary) tuple of optional values for a given file
156 157 l: the labels for the parts of the merge.
157 158
158 159 Merge record states (stored in self._state, indexed by filename):
159 160 u: unresolved conflict
160 161 r: resolved conflict
161 162 pu: unresolved path conflict (file conflicts with directory)
162 163 pr: resolved path conflict
163 164 o: file was merged in favor of other parent of merge (DEPRECATED)
164 165
165 166 The resolve command transitions between 'u' and 'r' for conflicts and
166 167 'pu' and 'pr' for path conflicts.
167 168 """
168 169
169 170 def __init__(self, repo):
170 171 """Initialize the merge state.
171 172
172 173 Do not use this directly! Instead call read() or clean()."""
173 174 self._repo = repo
174 175 self._state = {}
175 176 self._stateextras = collections.defaultdict(dict)
176 177 self._local = None
177 178 self._other = None
178 179 self._labels = None
179 180 # contains a mapping of form:
180 181 # {filename : (merge_return_value, action_to_be_performed}
181 182 # these are results of re-running merge process
182 183 # this dict is used to perform actions on dirstate caused by re-running
183 184 # the merge
184 185 self._results = {}
185 186 self._dirty = False
186 187
187 188 def reset(self):
188 189 pass
189 190
190 191 def start(self, node, other, labels=None):
191 192 self._local = node
192 193 self._other = other
193 194 self._labels = labels
194 195
195 196 @util.propertycache
196 197 def local(self):
197 198 if self._local is None:
198 199 msg = b"local accessed but self._local isn't set"
199 200 raise error.ProgrammingError(msg)
200 201 return self._local
201 202
202 203 @util.propertycache
203 204 def localctx(self):
204 205 return self._repo[self.local]
205 206
206 207 @util.propertycache
207 208 def other(self):
208 209 if self._other is None:
209 210 msg = b"other accessed but self._other isn't set"
210 211 raise error.ProgrammingError(msg)
211 212 return self._other
212 213
213 214 @util.propertycache
214 215 def otherctx(self):
215 216 return self._repo[self.other]
216 217
217 218 def active(self):
218 219 """Whether mergestate is active.
219 220
220 221 Returns True if there appears to be mergestate. This is a rough proxy
221 222 for "is a merge in progress."
222 223 """
223 224 return bool(self._local) or bool(self._state)
224 225
225 226 def commit(self):
226 227 """Write current state on disk (if necessary)"""
227 228
228 229 @staticmethod
229 230 def getlocalkey(path):
230 231 """hash the path of a local file context for storage in the .hg/merge
231 232 directory."""
232 233
233 234 return hex(hashutil.sha1(path).digest())
234 235
235 236 def _make_backup(self, fctx, localkey):
236 237 raise NotImplementedError()
237 238
238 239 def _restore_backup(self, fctx, localkey, flags):
239 240 raise NotImplementedError()
240 241
241 242 def add(self, fcl, fco, fca, fd):
242 243 """add a new (potentially?) conflicting file the merge state
243 244 fcl: file context for local,
244 245 fco: file context for remote,
245 246 fca: file context for ancestors,
246 247 fd: file path of the resulting merge.
247 248
248 249 note: also write the local version to the `.hg/merge` directory.
249 250 """
250 251 if fcl.isabsent():
251 252 localkey = nullhex
252 253 else:
253 254 localkey = mergestate.getlocalkey(fcl.path())
254 255 self._make_backup(fcl, localkey)
255 256 self._state[fd] = [
256 257 MERGE_RECORD_UNRESOLVED,
257 258 localkey,
258 259 fcl.path(),
259 260 fca.path(),
260 261 hex(fca.filenode()),
261 262 fco.path(),
262 263 hex(fco.filenode()),
263 264 fcl.flags(),
264 265 ]
265 266 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
266 267 self._dirty = True
267 268
268 269 def addpathconflict(self, path, frename, forigin):
269 270 """add a new conflicting path to the merge state
270 271 path: the path that conflicts
271 272 frename: the filename the conflicting file was renamed to
272 273 forigin: origin of the file ('l' or 'r' for local/remote)
273 274 """
274 275 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
275 276 self._dirty = True
276 277
277 278 def addcommitinfo(self, path, data):
278 279 """stores information which is required at commit
279 280 into _stateextras"""
280 281 self._stateextras[path].update(data)
281 282 self._dirty = True
282 283
283 284 def __contains__(self, dfile):
284 285 return dfile in self._state
285 286
286 287 def __getitem__(self, dfile):
287 288 return self._state[dfile][0]
288 289
289 290 def __iter__(self):
290 291 return iter(sorted(self._state))
291 292
292 293 def files(self):
293 294 return self._state.keys()
294 295
295 296 def mark(self, dfile, state):
296 297 self._state[dfile][0] = state
297 298 self._dirty = True
298 299
299 300 def unresolved(self):
300 301 """Obtain the paths of unresolved files."""
301 302
302 303 for f, entry in pycompat.iteritems(self._state):
303 304 if entry[0] in (
304 305 MERGE_RECORD_UNRESOLVED,
305 306 MERGE_RECORD_UNRESOLVED_PATH,
306 307 ):
307 308 yield f
308 309
309 310 def allextras(self):
310 311 """ return all extras information stored with the mergestate """
311 312 return self._stateextras
312 313
313 314 def extras(self, filename):
314 315 """ return extras stored with the mergestate for the given filename """
315 316 return self._stateextras[filename]
316 317
317 318 def _resolve(self, preresolve, dfile, wctx):
318 319 """rerun merge process for file path `dfile`.
319 320 Returns whether the merge was completed and the return value of merge
320 321 obtained from filemerge._filemerge().
321 322 """
322 323 if self[dfile] in (
323 324 MERGE_RECORD_RESOLVED,
324 325 LEGACY_RECORD_DRIVER_RESOLVED,
325 326 ):
326 327 return True, 0
327 328 stateentry = self._state[dfile]
328 329 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
329 330 octx = self._repo[self._other]
330 331 extras = self.extras(dfile)
331 332 anccommitnode = extras.get(b'ancestorlinknode')
332 333 if anccommitnode:
333 334 actx = self._repo[anccommitnode]
334 335 else:
335 336 actx = None
336 337 fcd = _filectxorabsent(localkey, wctx, dfile)
337 338 fco = _filectxorabsent(onode, octx, ofile)
338 339 # TODO: move this to filectxorabsent
339 340 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
340 341 # "premerge" x flags
341 342 flo = fco.flags()
342 343 fla = fca.flags()
343 344 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
344 if fca.node() == nullid and flags != flo:
345 if fca.rev() == nullrev and flags != flo:
345 346 if preresolve:
346 347 self._repo.ui.warn(
347 348 _(
348 349 b'warning: cannot merge flags for %s '
349 350 b'without common ancestor - keeping local flags\n'
350 351 )
351 352 % afile
352 353 )
353 354 elif flags == fla:
354 355 flags = flo
355 356 if preresolve:
356 357 # restore local
357 358 if localkey != nullhex:
358 359 self._restore_backup(wctx[dfile], localkey, flags)
359 360 else:
360 361 wctx[dfile].remove(ignoremissing=True)
361 362 complete, merge_ret, deleted = filemerge.premerge(
362 363 self._repo,
363 364 wctx,
364 365 self._local,
365 366 lfile,
366 367 fcd,
367 368 fco,
368 369 fca,
369 370 labels=self._labels,
370 371 )
371 372 else:
372 373 complete, merge_ret, deleted = filemerge.filemerge(
373 374 self._repo,
374 375 wctx,
375 376 self._local,
376 377 lfile,
377 378 fcd,
378 379 fco,
379 380 fca,
380 381 labels=self._labels,
381 382 )
382 383 if merge_ret is None:
383 384 # If return value of merge is None, then there are no real conflict
384 385 del self._state[dfile]
385 386 self._dirty = True
386 387 elif not merge_ret:
387 388 self.mark(dfile, MERGE_RECORD_RESOLVED)
388 389
389 390 if complete:
390 391 action = None
391 392 if deleted:
392 393 if fcd.isabsent():
393 394 # dc: local picked. Need to drop if present, which may
394 395 # happen on re-resolves.
395 396 action = ACTION_FORGET
396 397 else:
397 398 # cd: remote picked (or otherwise deleted)
398 399 action = ACTION_REMOVE
399 400 else:
400 401 if fcd.isabsent(): # dc: remote picked
401 402 action = ACTION_GET
402 403 elif fco.isabsent(): # cd: local picked
403 404 if dfile in self.localctx:
404 405 action = ACTION_ADD_MODIFIED
405 406 else:
406 407 action = ACTION_ADD
407 408 # else: regular merges (no action necessary)
408 409 self._results[dfile] = merge_ret, action
409 410
410 411 return complete, merge_ret
411 412
412 413 def preresolve(self, dfile, wctx):
413 414 """run premerge process for dfile
414 415
415 416 Returns whether the merge is complete, and the exit code."""
416 417 return self._resolve(True, dfile, wctx)
417 418
418 419 def resolve(self, dfile, wctx):
419 420 """run merge process (assuming premerge was run) for dfile
420 421
421 422 Returns the exit code of the merge."""
422 423 return self._resolve(False, dfile, wctx)[1]
423 424
424 425 def counts(self):
425 426 """return counts for updated, merged and removed files in this
426 427 session"""
427 428 updated, merged, removed = 0, 0, 0
428 429 for r, action in pycompat.itervalues(self._results):
429 430 if r is None:
430 431 updated += 1
431 432 elif r == 0:
432 433 if action == ACTION_REMOVE:
433 434 removed += 1
434 435 else:
435 436 merged += 1
436 437 return updated, merged, removed
437 438
438 439 def unresolvedcount(self):
439 440 """get unresolved count for this merge (persistent)"""
440 441 return len(list(self.unresolved()))
441 442
442 443 def actions(self):
443 444 """return lists of actions to perform on the dirstate"""
444 445 actions = {
445 446 ACTION_REMOVE: [],
446 447 ACTION_FORGET: [],
447 448 ACTION_ADD: [],
448 449 ACTION_ADD_MODIFIED: [],
449 450 ACTION_GET: [],
450 451 }
451 452 for f, (r, action) in pycompat.iteritems(self._results):
452 453 if action is not None:
453 454 actions[action].append((f, None, b"merge result"))
454 455 return actions
455 456
456 457
457 458 class mergestate(_mergestate_base):
458 459
459 460 statepathv1 = b'merge/state'
460 461 statepathv2 = b'merge/state2'
461 462
462 463 @staticmethod
463 464 def clean(repo):
464 465 """Initialize a brand new merge state, removing any existing state on
465 466 disk."""
466 467 ms = mergestate(repo)
467 468 ms.reset()
468 469 return ms
469 470
470 471 @staticmethod
471 472 def read(repo):
472 473 """Initialize the merge state, reading it from disk."""
473 474 ms = mergestate(repo)
474 475 ms._read()
475 476 return ms
476 477
477 478 def _read(self):
478 479 """Analyse each record content to restore a serialized state from disk
479 480
480 481 This function process "record" entry produced by the de-serialization
481 482 of on disk file.
482 483 """
483 484 unsupported = set()
484 485 records = self._readrecords()
485 486 for rtype, record in records:
486 487 if rtype == RECORD_LOCAL:
487 488 self._local = bin(record)
488 489 elif rtype == RECORD_OTHER:
489 490 self._other = bin(record)
490 491 elif rtype == LEGACY_MERGE_DRIVER_STATE:
491 492 pass
492 493 elif rtype in (
493 494 RECORD_MERGED,
494 495 RECORD_CHANGEDELETE_CONFLICT,
495 496 RECORD_PATH_CONFLICT,
496 497 LEGACY_MERGE_DRIVER_MERGE,
497 498 LEGACY_RECORD_RESOLVED_OTHER,
498 499 ):
499 500 bits = record.split(b'\0')
500 501 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
501 502 # and we now store related information in _stateextras, so
502 503 # lets write to _stateextras directly
503 504 if bits[1] == MERGE_RECORD_MERGED_OTHER:
504 505 self._stateextras[bits[0]][b'filenode-source'] = b'other'
505 506 else:
506 507 self._state[bits[0]] = bits[1:]
507 508 elif rtype == RECORD_FILE_VALUES:
508 509 filename, rawextras = record.split(b'\0', 1)
509 510 extraparts = rawextras.split(b'\0')
510 511 extras = {}
511 512 i = 0
512 513 while i < len(extraparts):
513 514 extras[extraparts[i]] = extraparts[i + 1]
514 515 i += 2
515 516
516 517 self._stateextras[filename] = extras
517 518 elif rtype == RECORD_LABELS:
518 519 labels = record.split(b'\0', 2)
519 520 self._labels = [l for l in labels if len(l) > 0]
520 521 elif not rtype.islower():
521 522 unsupported.add(rtype)
522 523
523 524 if unsupported:
524 525 raise error.UnsupportedMergeRecords(unsupported)
525 526
526 527 def _readrecords(self):
527 528 """Read merge state from disk and return a list of record (TYPE, data)
528 529
529 530 We read data from both v1 and v2 files and decide which one to use.
530 531
531 532 V1 has been used by version prior to 2.9.1 and contains less data than
532 533 v2. We read both versions and check if no data in v2 contradicts
533 534 v1. If there is not contradiction we can safely assume that both v1
534 535 and v2 were written at the same time and use the extract data in v2. If
535 536 there is contradiction we ignore v2 content as we assume an old version
536 537 of Mercurial has overwritten the mergestate file and left an old v2
537 538 file around.
538 539
539 540 returns list of record [(TYPE, data), ...]"""
540 541 v1records = self._readrecordsv1()
541 542 v2records = self._readrecordsv2()
542 543 if self._v1v2match(v1records, v2records):
543 544 return v2records
544 545 else:
545 546 # v1 file is newer than v2 file, use it
546 547 # we have to infer the "other" changeset of the merge
547 548 # we cannot do better than that with v1 of the format
548 549 mctx = self._repo[None].parents()[-1]
549 550 v1records.append((RECORD_OTHER, mctx.hex()))
550 551 # add place holder "other" file node information
551 552 # nobody is using it yet so we do no need to fetch the data
552 553 # if mctx was wrong `mctx[bits[-2]]` may fails.
553 554 for idx, r in enumerate(v1records):
554 555 if r[0] == RECORD_MERGED:
555 556 bits = r[1].split(b'\0')
556 557 bits.insert(-2, b'')
557 558 v1records[idx] = (r[0], b'\0'.join(bits))
558 559 return v1records
559 560
560 561 def _v1v2match(self, v1records, v2records):
561 562 oldv2 = set() # old format version of v2 record
562 563 for rec in v2records:
563 564 if rec[0] == RECORD_LOCAL:
564 565 oldv2.add(rec)
565 566 elif rec[0] == RECORD_MERGED:
566 567 # drop the onode data (not contained in v1)
567 568 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
568 569 for rec in v1records:
569 570 if rec not in oldv2:
570 571 return False
571 572 else:
572 573 return True
573 574
574 575 def _readrecordsv1(self):
575 576 """read on disk merge state for version 1 file
576 577
577 578 returns list of record [(TYPE, data), ...]
578 579
579 580 Note: the "F" data from this file are one entry short
580 581 (no "other file node" entry)
581 582 """
582 583 records = []
583 584 try:
584 585 f = self._repo.vfs(self.statepathv1)
585 586 for i, l in enumerate(f):
586 587 if i == 0:
587 588 records.append((RECORD_LOCAL, l[:-1]))
588 589 else:
589 590 records.append((RECORD_MERGED, l[:-1]))
590 591 f.close()
591 592 except IOError as err:
592 593 if err.errno != errno.ENOENT:
593 594 raise
594 595 return records
595 596
596 597 def _readrecordsv2(self):
597 598 """read on disk merge state for version 2 file
598 599
599 600 This format is a list of arbitrary records of the form:
600 601
601 602 [type][length][content]
602 603
603 604 `type` is a single character, `length` is a 4 byte integer, and
604 605 `content` is an arbitrary byte sequence of length `length`.
605 606
606 607 Mercurial versions prior to 3.7 have a bug where if there are
607 608 unsupported mandatory merge records, attempting to clear out the merge
608 609 state with hg update --clean or similar aborts. The 't' record type
609 610 works around that by writing out what those versions treat as an
610 611 advisory record, but later versions interpret as special: the first
611 612 character is the 'real' record type and everything onwards is the data.
612 613
613 614 Returns list of records [(TYPE, data), ...]."""
614 615 records = []
615 616 try:
616 617 f = self._repo.vfs(self.statepathv2)
617 618 data = f.read()
618 619 off = 0
619 620 end = len(data)
620 621 while off < end:
621 622 rtype = data[off : off + 1]
622 623 off += 1
623 624 length = _unpack(b'>I', data[off : (off + 4)])[0]
624 625 off += 4
625 626 record = data[off : (off + length)]
626 627 off += length
627 628 if rtype == RECORD_OVERRIDE:
628 629 rtype, record = record[0:1], record[1:]
629 630 records.append((rtype, record))
630 631 f.close()
631 632 except IOError as err:
632 633 if err.errno != errno.ENOENT:
633 634 raise
634 635 return records
635 636
636 637 def commit(self):
637 638 if self._dirty:
638 639 records = self._makerecords()
639 640 self._writerecords(records)
640 641 self._dirty = False
641 642
642 643 def _makerecords(self):
643 644 records = []
644 645 records.append((RECORD_LOCAL, hex(self._local)))
645 646 records.append((RECORD_OTHER, hex(self._other)))
646 647 # Write out state items. In all cases, the value of the state map entry
647 648 # is written as the contents of the record. The record type depends on
648 649 # the type of state that is stored, and capital-letter records are used
649 650 # to prevent older versions of Mercurial that do not support the feature
650 651 # from loading them.
651 652 for filename, v in pycompat.iteritems(self._state):
652 653 if v[0] in (
653 654 MERGE_RECORD_UNRESOLVED_PATH,
654 655 MERGE_RECORD_RESOLVED_PATH,
655 656 ):
656 657 # Path conflicts. These are stored in 'P' records. The current
657 658 # resolution state ('pu' or 'pr') is stored within the record.
658 659 records.append(
659 660 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
660 661 )
661 662 elif v[1] == nullhex or v[6] == nullhex:
662 663 # Change/Delete or Delete/Change conflicts. These are stored in
663 664 # 'C' records. v[1] is the local file, and is nullhex when the
664 665 # file is deleted locally ('dc'). v[6] is the remote file, and
665 666 # is nullhex when the file is deleted remotely ('cd').
666 667 records.append(
667 668 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
668 669 )
669 670 else:
670 671 # Normal files. These are stored in 'F' records.
671 672 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
672 673 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
673 674 rawextras = b'\0'.join(
674 675 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
675 676 )
676 677 records.append(
677 678 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
678 679 )
679 680 if self._labels is not None:
680 681 labels = b'\0'.join(self._labels)
681 682 records.append((RECORD_LABELS, labels))
682 683 return records
683 684
684 685 def _writerecords(self, records):
685 686 """Write current state on disk (both v1 and v2)"""
686 687 self._writerecordsv1(records)
687 688 self._writerecordsv2(records)
688 689
689 690 def _writerecordsv1(self, records):
690 691 """Write current state on disk in a version 1 file"""
691 692 f = self._repo.vfs(self.statepathv1, b'wb')
692 693 irecords = iter(records)
693 694 lrecords = next(irecords)
694 695 assert lrecords[0] == RECORD_LOCAL
695 696 f.write(hex(self._local) + b'\n')
696 697 for rtype, data in irecords:
697 698 if rtype == RECORD_MERGED:
698 699 f.write(b'%s\n' % _droponode(data))
699 700 f.close()
700 701
701 702 def _writerecordsv2(self, records):
702 703 """Write current state on disk in a version 2 file
703 704
704 705 See the docstring for _readrecordsv2 for why we use 't'."""
705 706 # these are the records that all version 2 clients can read
706 707 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
707 708 f = self._repo.vfs(self.statepathv2, b'wb')
708 709 for key, data in records:
709 710 assert len(key) == 1
710 711 if key not in allowlist:
711 712 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
712 713 format = b'>sI%is' % len(data)
713 714 f.write(_pack(format, key, len(data), data))
714 715 f.close()
715 716
716 717 def _make_backup(self, fctx, localkey):
717 718 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
718 719
719 720 def _restore_backup(self, fctx, localkey, flags):
720 721 with self._repo.vfs(b'merge/' + localkey) as f:
721 722 fctx.write(f.read(), flags)
722 723
723 724 def reset(self):
724 725 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
725 726
726 727
727 728 class memmergestate(_mergestate_base):
728 729 def __init__(self, repo):
729 730 super(memmergestate, self).__init__(repo)
730 731 self._backups = {}
731 732
732 733 def _make_backup(self, fctx, localkey):
733 734 self._backups[localkey] = fctx.data()
734 735
735 736 def _restore_backup(self, fctx, localkey, flags):
736 737 fctx.write(self._backups[localkey], flags)
737 738
738 739
739 740 def recordupdates(repo, actions, branchmerge, getfiledata):
740 741 """record merge actions to the dirstate"""
741 742 # remove (must come first)
742 743 for f, args, msg in actions.get(ACTION_REMOVE, []):
743 744 if branchmerge:
744 745 repo.dirstate.remove(f)
745 746 else:
746 747 repo.dirstate.drop(f)
747 748
748 749 # forget (must come first)
749 750 for f, args, msg in actions.get(ACTION_FORGET, []):
750 751 repo.dirstate.drop(f)
751 752
752 753 # resolve path conflicts
753 754 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
754 755 (f0, origf0) = args
755 756 repo.dirstate.add(f)
756 757 repo.dirstate.copy(origf0, f)
757 758 if f0 == origf0:
758 759 repo.dirstate.remove(f0)
759 760 else:
760 761 repo.dirstate.drop(f0)
761 762
762 763 # re-add
763 764 for f, args, msg in actions.get(ACTION_ADD, []):
764 765 repo.dirstate.add(f)
765 766
766 767 # re-add/mark as modified
767 768 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
768 769 if branchmerge:
769 770 repo.dirstate.normallookup(f)
770 771 else:
771 772 repo.dirstate.add(f)
772 773
773 774 # exec change
774 775 for f, args, msg in actions.get(ACTION_EXEC, []):
775 776 repo.dirstate.normallookup(f)
776 777
777 778 # keep
778 779 for f, args, msg in actions.get(ACTION_KEEP, []):
779 780 pass
780 781
781 782 # keep deleted
782 783 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
783 784 pass
784 785
785 786 # keep new
786 787 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
787 788 pass
788 789
789 790 # get
790 791 for f, args, msg in actions.get(ACTION_GET, []):
791 792 if branchmerge:
792 793 repo.dirstate.otherparent(f)
793 794 else:
794 795 parentfiledata = getfiledata[f] if getfiledata else None
795 796 repo.dirstate.normal(f, parentfiledata=parentfiledata)
796 797
797 798 # merge
798 799 for f, args, msg in actions.get(ACTION_MERGE, []):
799 800 f1, f2, fa, move, anc = args
800 801 if branchmerge:
801 802 # We've done a branch merge, mark this file as merged
802 803 # so that we properly record the merger later
803 804 repo.dirstate.merge(f)
804 805 if f1 != f2: # copy/rename
805 806 if move:
806 807 repo.dirstate.remove(f1)
807 808 if f1 != f:
808 809 repo.dirstate.copy(f1, f)
809 810 else:
810 811 repo.dirstate.copy(f2, f)
811 812 else:
812 813 # We've update-merged a locally modified file, so
813 814 # we set the dirstate to emulate a normal checkout
814 815 # of that file some time in the past. Thus our
815 816 # merge will appear as a normal local file
816 817 # modification.
817 818 if f2 == f: # file not locally copied/moved
818 819 repo.dirstate.normallookup(f)
819 820 if move:
820 821 repo.dirstate.drop(f1)
821 822
822 823 # directory rename, move local
823 824 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
824 825 f0, flag = args
825 826 if branchmerge:
826 827 repo.dirstate.add(f)
827 828 repo.dirstate.remove(f0)
828 829 repo.dirstate.copy(f0, f)
829 830 else:
830 831 repo.dirstate.normal(f)
831 832 repo.dirstate.drop(f0)
832 833
833 834 # directory rename, get
834 835 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
835 836 f0, flag = args
836 837 if branchmerge:
837 838 repo.dirstate.add(f)
838 839 repo.dirstate.copy(f0, f)
839 840 else:
840 841 repo.dirstate.normal(f)
@@ -1,1188 +1,1188 b''
1 1 # shelve.py - save/restore working directory state
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """save and restore changes to the working directory
9 9
10 10 The "hg shelve" command saves changes made to the working directory
11 11 and reverts those changes, resetting the working directory to a clean
12 12 state.
13 13
14 14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 15 shelve". Changes can be restored even after updating to a different
16 16 parent, in which case Mercurial's merge machinery will resolve any
17 17 conflicts if necessary.
18 18
19 19 You can have more than one shelved change outstanding at a time; each
20 20 shelved change has a distinct name. For details, see the help for "hg
21 21 shelve".
22 22 """
23 23 from __future__ import absolute_import
24 24
25 25 import collections
26 26 import errno
27 27 import itertools
28 28 import stat
29 29
30 30 from .i18n import _
31 31 from .node import (
32 32 bin,
33 33 hex,
34 34 nullid,
35 35 nullrev,
36 36 )
37 37 from . import (
38 38 bookmarks,
39 39 bundle2,
40 40 changegroup,
41 41 cmdutil,
42 42 discovery,
43 43 error,
44 44 exchange,
45 45 hg,
46 46 lock as lockmod,
47 47 mdiff,
48 48 merge,
49 49 mergestate as mergestatemod,
50 50 patch,
51 51 phases,
52 52 pycompat,
53 53 repair,
54 54 scmutil,
55 55 templatefilters,
56 56 util,
57 57 vfs as vfsmod,
58 58 )
59 59 from .utils import (
60 60 dateutil,
61 61 stringutil,
62 62 )
63 63
64 64 backupdir = b'shelve-backup'
65 65 shelvedir = b'shelved'
66 66 shelvefileextensions = [b'hg', b'patch', b'shelve']
67 67
68 68 # we never need the user, so we use a
69 69 # generic user for all shelve operations
70 70 shelveuser = b'shelve@localhost'
71 71
72 72
73 73 class ShelfDir(object):
74 74 def __init__(self, repo, for_backups=False):
75 75 if for_backups:
76 76 self.vfs = vfsmod.vfs(repo.vfs.join(backupdir))
77 77 else:
78 78 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
79 79
80 80 def get(self, name):
81 81 return Shelf(self.vfs, name)
82 82
83 83 def listshelves(self):
84 84 """return all shelves in repo as list of (time, name)"""
85 85 try:
86 86 names = self.vfs.listdir()
87 87 except OSError as err:
88 88 if err.errno != errno.ENOENT:
89 89 raise
90 90 return []
91 91 info = []
92 92 seen = set()
93 93 for filename in names:
94 94 name = filename.rsplit(b'.', 1)[0]
95 95 if name in seen:
96 96 continue
97 97 seen.add(name)
98 98 shelf = self.get(name)
99 99 if not shelf.exists():
100 100 continue
101 101 mtime = shelf.mtime()
102 102 info.append((mtime, name))
103 103 return sorted(info, reverse=True)
104 104
105 105
106 106 class Shelf(object):
107 107 """Represents a shelf, including possibly multiple files storing it.
108 108
109 109 Old shelves will have a .patch and a .hg file. Newer shelves will
110 110 also have a .shelve file. This class abstracts away some of the
111 111 differences and lets you work with the shelf as a whole.
112 112 """
113 113
114 114 def __init__(self, vfs, name):
115 115 self.vfs = vfs
116 116 self.name = name
117 117
118 118 def exists(self):
119 119 return self.vfs.exists(self.name + b'.patch') and self.vfs.exists(
120 120 self.name + b'.hg'
121 121 )
122 122
123 123 def mtime(self):
124 124 return self.vfs.stat(self.name + b'.patch')[stat.ST_MTIME]
125 125
126 126 def writeinfo(self, info):
127 127 scmutil.simplekeyvaluefile(self.vfs, self.name + b'.shelve').write(info)
128 128
129 129 def hasinfo(self):
130 130 return self.vfs.exists(self.name + b'.shelve')
131 131
132 132 def readinfo(self):
133 133 return scmutil.simplekeyvaluefile(
134 134 self.vfs, self.name + b'.shelve'
135 135 ).read()
136 136
137 137 def writebundle(self, repo, bases, node):
138 138 cgversion = changegroup.safeversion(repo)
139 139 if cgversion == b'01':
140 140 btype = b'HG10BZ'
141 141 compression = None
142 142 else:
143 143 btype = b'HG20'
144 144 compression = b'BZ'
145 145
146 146 repo = repo.unfiltered()
147 147
148 148 outgoing = discovery.outgoing(
149 149 repo, missingroots=bases, ancestorsof=[node]
150 150 )
151 151 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
152 152
153 153 bundle_filename = self.vfs.join(self.name + b'.hg')
154 154 bundle2.writebundle(
155 155 repo.ui,
156 156 cg,
157 157 bundle_filename,
158 158 btype,
159 159 self.vfs,
160 160 compression=compression,
161 161 )
162 162
163 163 def applybundle(self, repo, tr):
164 164 filename = self.name + b'.hg'
165 165 fp = self.vfs(filename)
166 166 try:
167 167 targetphase = phases.internal
168 168 if not phases.supportinternal(repo):
169 169 targetphase = phases.secret
170 170 gen = exchange.readbundle(repo.ui, fp, filename, self.vfs)
171 171 pretip = repo[b'tip']
172 172 bundle2.applybundle(
173 173 repo,
174 174 gen,
175 175 tr,
176 176 source=b'unshelve',
177 177 url=b'bundle:' + self.vfs.join(filename),
178 178 targetphase=targetphase,
179 179 )
180 180 shelvectx = repo[b'tip']
181 181 if pretip == shelvectx:
182 182 shelverev = tr.changes[b'revduplicates'][-1]
183 183 shelvectx = repo[shelverev]
184 184 return shelvectx
185 185 finally:
186 186 fp.close()
187 187
188 188 def open_patch(self, mode=b'rb'):
189 189 return self.vfs(self.name + b'.patch', mode)
190 190
191 191 def _backupfilename(self, backupvfs, filename):
192 192 def gennames(base):
193 193 yield base
194 194 base, ext = base.rsplit(b'.', 1)
195 195 for i in itertools.count(1):
196 196 yield b'%s-%d.%s' % (base, i, ext)
197 197
198 198 for n in gennames(filename):
199 199 if not backupvfs.exists(n):
200 200 return backupvfs.join(n)
201 201
202 202 def movetobackup(self, backupvfs):
203 203 if not backupvfs.isdir():
204 204 backupvfs.makedir()
205 205 for suffix in shelvefileextensions:
206 206 filename = self.name + b'.' + suffix
207 207 if self.vfs.exists(filename):
208 208 util.rename(
209 209 self.vfs.join(filename),
210 210 self._backupfilename(backupvfs, filename),
211 211 )
212 212
213 213 def delete(self):
214 214 for ext in shelvefileextensions:
215 215 self.vfs.tryunlink(self.name + b'.' + ext)
216 216
217 217
218 218 class shelvedstate(object):
219 219 """Handle persistence during unshelving operations.
220 220
221 221 Handles saving and restoring a shelved state. Ensures that different
222 222 versions of a shelved state are possible and handles them appropriately.
223 223 """
224 224
225 225 _version = 2
226 226 _filename = b'shelvedstate'
227 227 _keep = b'keep'
228 228 _nokeep = b'nokeep'
229 229 # colon is essential to differentiate from a real bookmark name
230 230 _noactivebook = b':no-active-bookmark'
231 231 _interactive = b'interactive'
232 232
233 233 @classmethod
234 234 def _verifyandtransform(cls, d):
235 235 """Some basic shelvestate syntactic verification and transformation"""
236 236 try:
237 237 d[b'originalwctx'] = bin(d[b'originalwctx'])
238 238 d[b'pendingctx'] = bin(d[b'pendingctx'])
239 239 d[b'parents'] = [bin(h) for h in d[b'parents'].split(b' ')]
240 240 d[b'nodestoremove'] = [
241 241 bin(h) for h in d[b'nodestoremove'].split(b' ')
242 242 ]
243 243 except (ValueError, TypeError, KeyError) as err:
244 244 raise error.CorruptedState(stringutil.forcebytestr(err))
245 245
246 246 @classmethod
247 247 def _getversion(cls, repo):
248 248 """Read version information from shelvestate file"""
249 249 fp = repo.vfs(cls._filename)
250 250 try:
251 251 version = int(fp.readline().strip())
252 252 except ValueError as err:
253 253 raise error.CorruptedState(stringutil.forcebytestr(err))
254 254 finally:
255 255 fp.close()
256 256 return version
257 257
258 258 @classmethod
259 259 def _readold(cls, repo):
260 260 """Read the old position-based version of a shelvestate file"""
261 261 # Order is important, because old shelvestate file uses it
262 262 # to detemine values of fields (i.g. name is on the second line,
263 263 # originalwctx is on the third and so forth). Please do not change.
264 264 keys = [
265 265 b'version',
266 266 b'name',
267 267 b'originalwctx',
268 268 b'pendingctx',
269 269 b'parents',
270 270 b'nodestoremove',
271 271 b'branchtorestore',
272 272 b'keep',
273 273 b'activebook',
274 274 ]
275 275 # this is executed only seldomly, so it is not a big deal
276 276 # that we open this file twice
277 277 fp = repo.vfs(cls._filename)
278 278 d = {}
279 279 try:
280 280 for key in keys:
281 281 d[key] = fp.readline().strip()
282 282 finally:
283 283 fp.close()
284 284 return d
285 285
286 286 @classmethod
287 287 def load(cls, repo):
288 288 version = cls._getversion(repo)
289 289 if version < cls._version:
290 290 d = cls._readold(repo)
291 291 elif version == cls._version:
292 292 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
293 293 firstlinenonkeyval=True
294 294 )
295 295 else:
296 296 raise error.Abort(
297 297 _(
298 298 b'this version of shelve is incompatible '
299 299 b'with the version used in this repo'
300 300 )
301 301 )
302 302
303 303 cls._verifyandtransform(d)
304 304 try:
305 305 obj = cls()
306 306 obj.name = d[b'name']
307 307 obj.wctx = repo[d[b'originalwctx']]
308 308 obj.pendingctx = repo[d[b'pendingctx']]
309 309 obj.parents = d[b'parents']
310 310 obj.nodestoremove = d[b'nodestoremove']
311 311 obj.branchtorestore = d.get(b'branchtorestore', b'')
312 312 obj.keep = d.get(b'keep') == cls._keep
313 313 obj.activebookmark = b''
314 314 if d.get(b'activebook', b'') != cls._noactivebook:
315 315 obj.activebookmark = d.get(b'activebook', b'')
316 316 obj.interactive = d.get(b'interactive') == cls._interactive
317 317 except (error.RepoLookupError, KeyError) as err:
318 318 raise error.CorruptedState(pycompat.bytestr(err))
319 319
320 320 return obj
321 321
322 322 @classmethod
323 323 def save(
324 324 cls,
325 325 repo,
326 326 name,
327 327 originalwctx,
328 328 pendingctx,
329 329 nodestoremove,
330 330 branchtorestore,
331 331 keep=False,
332 332 activebook=b'',
333 333 interactive=False,
334 334 ):
335 335 info = {
336 336 b"name": name,
337 337 b"originalwctx": hex(originalwctx.node()),
338 338 b"pendingctx": hex(pendingctx.node()),
339 339 b"parents": b' '.join([hex(p) for p in repo.dirstate.parents()]),
340 340 b"nodestoremove": b' '.join([hex(n) for n in nodestoremove]),
341 341 b"branchtorestore": branchtorestore,
342 342 b"keep": cls._keep if keep else cls._nokeep,
343 343 b"activebook": activebook or cls._noactivebook,
344 344 }
345 345 if interactive:
346 346 info[b'interactive'] = cls._interactive
347 347 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
348 348 info, firstline=(b"%d" % cls._version)
349 349 )
350 350
351 351 @classmethod
352 352 def clear(cls, repo):
353 353 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
354 354
355 355
356 356 def cleanupoldbackups(repo):
357 357 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
358 358 backup_dir = ShelfDir(repo, for_backups=True)
359 359 hgfiles = backup_dir.listshelves()
360 360 if maxbackups > 0 and maxbackups < len(hgfiles):
361 361 bordermtime = hgfiles[maxbackups - 1][0]
362 362 else:
363 363 bordermtime = None
364 364 for mtime, name in hgfiles[maxbackups:]:
365 365 if mtime == bordermtime:
366 366 # keep it, because timestamp can't decide exact order of backups
367 367 continue
368 368 backup_dir.get(name).delete()
369 369
370 370
371 371 def _backupactivebookmark(repo):
372 372 activebookmark = repo._activebookmark
373 373 if activebookmark:
374 374 bookmarks.deactivate(repo)
375 375 return activebookmark
376 376
377 377
378 378 def _restoreactivebookmark(repo, mark):
379 379 if mark:
380 380 bookmarks.activate(repo, mark)
381 381
382 382
383 383 def _aborttransaction(repo, tr):
384 384 """Abort current transaction for shelve/unshelve, but keep dirstate"""
385 385 dirstatebackupname = b'dirstate.shelve'
386 386 repo.dirstate.savebackup(tr, dirstatebackupname)
387 387 tr.abort()
388 388 repo.dirstate.restorebackup(None, dirstatebackupname)
389 389
390 390
391 391 def getshelvename(repo, parent, opts):
392 392 """Decide on the name this shelve is going to have"""
393 393
394 394 def gennames():
395 395 yield label
396 396 for i in itertools.count(1):
397 397 yield b'%s-%02d' % (label, i)
398 398
399 399 name = opts.get(b'name')
400 400 label = repo._activebookmark or parent.branch() or b'default'
401 401 # slashes aren't allowed in filenames, therefore we rename it
402 402 label = label.replace(b'/', b'_')
403 403 label = label.replace(b'\\', b'_')
404 404 # filenames must not start with '.' as it should not be hidden
405 405 if label.startswith(b'.'):
406 406 label = label.replace(b'.', b'_', 1)
407 407
408 408 if name:
409 409 if ShelfDir(repo).get(name).exists():
410 410 e = _(b"a shelved change named '%s' already exists") % name
411 411 raise error.Abort(e)
412 412
413 413 # ensure we are not creating a subdirectory or a hidden file
414 414 if b'/' in name or b'\\' in name:
415 415 raise error.Abort(
416 416 _(b'shelved change names can not contain slashes')
417 417 )
418 418 if name.startswith(b'.'):
419 419 raise error.Abort(_(b"shelved change names can not start with '.'"))
420 420
421 421 else:
422 422 shelf_dir = ShelfDir(repo)
423 423 for n in gennames():
424 424 if not shelf_dir.get(n).exists():
425 425 name = n
426 426 break
427 427
428 428 return name
429 429
430 430
431 431 def mutableancestors(ctx):
432 432 """return all mutable ancestors for ctx (included)
433 433
434 434 Much faster than the revset ancestors(ctx) & draft()"""
435 435 seen = {nullrev}
436 436 visit = collections.deque()
437 437 visit.append(ctx)
438 438 while visit:
439 439 ctx = visit.popleft()
440 440 yield ctx.node()
441 441 for parent in ctx.parents():
442 442 rev = parent.rev()
443 443 if rev not in seen:
444 444 seen.add(rev)
445 445 if parent.mutable():
446 446 visit.append(parent)
447 447
448 448
449 449 def getcommitfunc(extra, interactive, editor=False):
450 450 def commitfunc(ui, repo, message, match, opts):
451 451 hasmq = util.safehasattr(repo, b'mq')
452 452 if hasmq:
453 453 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
454 454
455 455 targetphase = phases.internal
456 456 if not phases.supportinternal(repo):
457 457 targetphase = phases.secret
458 458 overrides = {(b'phases', b'new-commit'): targetphase}
459 459 try:
460 460 editor_ = False
461 461 if editor:
462 462 editor_ = cmdutil.getcommiteditor(
463 463 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
464 464 )
465 465 with repo.ui.configoverride(overrides):
466 466 return repo.commit(
467 467 message,
468 468 shelveuser,
469 469 opts.get(b'date'),
470 470 match,
471 471 editor=editor_,
472 472 extra=extra,
473 473 )
474 474 finally:
475 475 if hasmq:
476 476 repo.mq.checkapplied = saved
477 477
478 478 def interactivecommitfunc(ui, repo, *pats, **opts):
479 479 opts = pycompat.byteskwargs(opts)
480 480 match = scmutil.match(repo[b'.'], pats, {})
481 481 message = opts[b'message']
482 482 return commitfunc(ui, repo, message, match, opts)
483 483
484 484 return interactivecommitfunc if interactive else commitfunc
485 485
486 486
487 487 def _nothingtoshelvemessaging(ui, repo, pats, opts):
488 488 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
489 489 if stat.deleted:
490 490 ui.status(
491 491 _(b"nothing changed (%d missing files, see 'hg status')\n")
492 492 % len(stat.deleted)
493 493 )
494 494 else:
495 495 ui.status(_(b"nothing changed\n"))
496 496
497 497
498 498 def _shelvecreatedcommit(repo, node, name, match):
499 499 info = {b'node': hex(node)}
500 500 shelf = ShelfDir(repo).get(name)
501 501 shelf.writeinfo(info)
502 502 bases = list(mutableancestors(repo[node]))
503 503 shelf.writebundle(repo, bases, node)
504 504 with shelf.open_patch(b'wb') as fp:
505 505 cmdutil.exportfile(
506 506 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
507 507 )
508 508
509 509
510 510 def _includeunknownfiles(repo, pats, opts, extra):
511 511 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
512 512 if s.unknown:
513 513 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
514 514 repo[None].add(s.unknown)
515 515
516 516
517 517 def _finishshelve(repo, tr):
518 518 if phases.supportinternal(repo):
519 519 tr.close()
520 520 else:
521 521 _aborttransaction(repo, tr)
522 522
523 523
524 524 def createcmd(ui, repo, pats, opts):
525 525 """subcommand that creates a new shelve"""
526 526 with repo.wlock():
527 527 cmdutil.checkunfinished(repo)
528 528 return _docreatecmd(ui, repo, pats, opts)
529 529
530 530
531 531 def _docreatecmd(ui, repo, pats, opts):
532 532 wctx = repo[None]
533 533 parents = wctx.parents()
534 534 parent = parents[0]
535 535 origbranch = wctx.branch()
536 536
537 if parent.node() != nullid:
537 if parent.rev() != nullrev:
538 538 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
539 539 else:
540 540 desc = b'(changes in empty repository)'
541 541
542 542 if not opts.get(b'message'):
543 543 opts[b'message'] = desc
544 544
545 545 lock = tr = activebookmark = None
546 546 try:
547 547 lock = repo.lock()
548 548
549 549 # use an uncommitted transaction to generate the bundle to avoid
550 550 # pull races. ensure we don't print the abort message to stderr.
551 551 tr = repo.transaction(b'shelve', report=lambda x: None)
552 552
553 553 interactive = opts.get(b'interactive', False)
554 554 includeunknown = opts.get(b'unknown', False) and not opts.get(
555 555 b'addremove', False
556 556 )
557 557
558 558 name = getshelvename(repo, parent, opts)
559 559 activebookmark = _backupactivebookmark(repo)
560 560 extra = {b'internal': b'shelve'}
561 561 if includeunknown:
562 562 _includeunknownfiles(repo, pats, opts, extra)
563 563
564 564 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
565 565 # In non-bare shelve we don't store newly created branch
566 566 # at bundled commit
567 567 repo.dirstate.setbranch(repo[b'.'].branch())
568 568
569 569 commitfunc = getcommitfunc(extra, interactive, editor=True)
570 570 if not interactive:
571 571 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
572 572 else:
573 573 node = cmdutil.dorecord(
574 574 ui,
575 575 repo,
576 576 commitfunc,
577 577 None,
578 578 False,
579 579 cmdutil.recordfilter,
580 580 *pats,
581 581 **pycompat.strkwargs(opts)
582 582 )
583 583 if not node:
584 584 _nothingtoshelvemessaging(ui, repo, pats, opts)
585 585 return 1
586 586
587 587 # Create a matcher so that prefetch doesn't attempt to fetch
588 588 # the entire repository pointlessly, and as an optimisation
589 589 # for movedirstate, if needed.
590 590 match = scmutil.matchfiles(repo, repo[node].files())
591 591 _shelvecreatedcommit(repo, node, name, match)
592 592
593 593 ui.status(_(b'shelved as %s\n') % name)
594 594 if opts[b'keep']:
595 595 with repo.dirstate.parentchange():
596 596 scmutil.movedirstate(repo, parent, match)
597 597 else:
598 598 hg.update(repo, parent.node())
599 599 ms = mergestatemod.mergestate.read(repo)
600 600 if not ms.unresolvedcount():
601 601 ms.reset()
602 602
603 603 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
604 604 repo.dirstate.setbranch(origbranch)
605 605
606 606 _finishshelve(repo, tr)
607 607 finally:
608 608 _restoreactivebookmark(repo, activebookmark)
609 609 lockmod.release(tr, lock)
610 610
611 611
612 612 def _isbareshelve(pats, opts):
613 613 return (
614 614 not pats
615 615 and not opts.get(b'interactive', False)
616 616 and not opts.get(b'include', False)
617 617 and not opts.get(b'exclude', False)
618 618 )
619 619
620 620
621 621 def _iswctxonnewbranch(repo):
622 622 return repo[None].branch() != repo[b'.'].branch()
623 623
624 624
625 625 def cleanupcmd(ui, repo):
626 626 """subcommand that deletes all shelves"""
627 627
628 628 with repo.wlock():
629 629 shelf_dir = ShelfDir(repo)
630 630 backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
631 631 for _mtime, name in shelf_dir.listshelves():
632 632 shelf_dir.get(name).movetobackup(backupvfs)
633 633 cleanupoldbackups(repo)
634 634
635 635
636 636 def deletecmd(ui, repo, pats):
637 637 """subcommand that deletes a specific shelve"""
638 638 if not pats:
639 639 raise error.InputError(_(b'no shelved changes specified!'))
640 640 with repo.wlock():
641 641 backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
642 642 for name in pats:
643 643 shelf = ShelfDir(repo).get(name)
644 644 if not shelf.exists():
645 645 raise error.InputError(
646 646 _(b"shelved change '%s' not found") % name
647 647 )
648 648 shelf.movetobackup(backupvfs)
649 649 cleanupoldbackups(repo)
650 650
651 651
652 652 def listcmd(ui, repo, pats, opts):
653 653 """subcommand that displays the list of shelves"""
654 654 pats = set(pats)
655 655 width = 80
656 656 if not ui.plain():
657 657 width = ui.termwidth()
658 658 namelabel = b'shelve.newest'
659 659 ui.pager(b'shelve')
660 660 shelf_dir = ShelfDir(repo)
661 661 for mtime, name in shelf_dir.listshelves():
662 662 if pats and name not in pats:
663 663 continue
664 664 ui.write(name, label=namelabel)
665 665 namelabel = b'shelve.name'
666 666 if ui.quiet:
667 667 ui.write(b'\n')
668 668 continue
669 669 ui.write(b' ' * (16 - len(name)))
670 670 used = 16
671 671 date = dateutil.makedate(mtime)
672 672 age = b'(%s)' % templatefilters.age(date, abbrev=True)
673 673 ui.write(age, label=b'shelve.age')
674 674 ui.write(b' ' * (12 - len(age)))
675 675 used += 12
676 676 with shelf_dir.get(name).open_patch() as fp:
677 677 while True:
678 678 line = fp.readline()
679 679 if not line:
680 680 break
681 681 if not line.startswith(b'#'):
682 682 desc = line.rstrip()
683 683 if ui.formatted():
684 684 desc = stringutil.ellipsis(desc, width - used)
685 685 ui.write(desc)
686 686 break
687 687 ui.write(b'\n')
688 688 if not (opts[b'patch'] or opts[b'stat']):
689 689 continue
690 690 difflines = fp.readlines()
691 691 if opts[b'patch']:
692 692 for chunk, label in patch.difflabel(iter, difflines):
693 693 ui.write(chunk, label=label)
694 694 if opts[b'stat']:
695 695 for chunk, label in patch.diffstatui(difflines, width=width):
696 696 ui.write(chunk, label=label)
697 697
698 698
699 699 def patchcmds(ui, repo, pats, opts):
700 700 """subcommand that displays shelves"""
701 701 shelf_dir = ShelfDir(repo)
702 702 if len(pats) == 0:
703 703 shelves = shelf_dir.listshelves()
704 704 if not shelves:
705 705 raise error.Abort(_(b"there are no shelves to show"))
706 706 mtime, name = shelves[0]
707 707 pats = [name]
708 708
709 709 for shelfname in pats:
710 710 if not shelf_dir.get(shelfname).exists():
711 711 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
712 712
713 713 listcmd(ui, repo, pats, opts)
714 714
715 715
716 716 def checkparents(repo, state):
717 717 """check parent while resuming an unshelve"""
718 718 if state.parents != repo.dirstate.parents():
719 719 raise error.Abort(
720 720 _(b'working directory parents do not match unshelve state')
721 721 )
722 722
723 723
724 724 def _loadshelvedstate(ui, repo, opts):
725 725 try:
726 726 state = shelvedstate.load(repo)
727 727 if opts.get(b'keep') is None:
728 728 opts[b'keep'] = state.keep
729 729 except IOError as err:
730 730 if err.errno != errno.ENOENT:
731 731 raise
732 732 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
733 733 except error.CorruptedState as err:
734 734 ui.debug(pycompat.bytestr(err) + b'\n')
735 735 if opts.get(b'continue'):
736 736 msg = _(b'corrupted shelved state file')
737 737 hint = _(
738 738 b'please run hg unshelve --abort to abort unshelve '
739 739 b'operation'
740 740 )
741 741 raise error.Abort(msg, hint=hint)
742 742 elif opts.get(b'abort'):
743 743 shelvedstate.clear(repo)
744 744 raise error.Abort(
745 745 _(
746 746 b'could not read shelved state file, your '
747 747 b'working copy may be in an unexpected state\n'
748 748 b'please update to some commit\n'
749 749 )
750 750 )
751 751 return state
752 752
753 753
754 754 def unshelveabort(ui, repo, state):
755 755 """subcommand that abort an in-progress unshelve"""
756 756 with repo.lock():
757 757 try:
758 758 checkparents(repo, state)
759 759
760 760 merge.clean_update(state.pendingctx)
761 761 if state.activebookmark and state.activebookmark in repo._bookmarks:
762 762 bookmarks.activate(repo, state.activebookmark)
763 763 mergefiles(ui, repo, state.wctx, state.pendingctx)
764 764 if not phases.supportinternal(repo):
765 765 repair.strip(
766 766 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
767 767 )
768 768 finally:
769 769 shelvedstate.clear(repo)
770 770 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
771 771
772 772
773 773 def hgabortunshelve(ui, repo):
774 774 """logic to abort unshelve using 'hg abort"""
775 775 with repo.wlock():
776 776 state = _loadshelvedstate(ui, repo, {b'abort': True})
777 777 return unshelveabort(ui, repo, state)
778 778
779 779
780 780 def mergefiles(ui, repo, wctx, shelvectx):
781 781 """updates to wctx and merges the changes from shelvectx into the
782 782 dirstate."""
783 783 with ui.configoverride({(b'ui', b'quiet'): True}):
784 784 hg.update(repo, wctx.node())
785 785 ui.pushbuffer(True)
786 786 cmdutil.revert(ui, repo, shelvectx)
787 787 ui.popbuffer()
788 788
789 789
790 790 def restorebranch(ui, repo, branchtorestore):
791 791 if branchtorestore and branchtorestore != repo.dirstate.branch():
792 792 repo.dirstate.setbranch(branchtorestore)
793 793 ui.status(
794 794 _(b'marked working directory as branch %s\n') % branchtorestore
795 795 )
796 796
797 797
798 798 def unshelvecleanup(ui, repo, name, opts):
799 799 """remove related files after an unshelve"""
800 800 if not opts.get(b'keep'):
801 801 backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
802 802 ShelfDir(repo).get(name).movetobackup(backupvfs)
803 803 cleanupoldbackups(repo)
804 804
805 805
806 806 def unshelvecontinue(ui, repo, state, opts):
807 807 """subcommand to continue an in-progress unshelve"""
808 808 # We're finishing off a merge. First parent is our original
809 809 # parent, second is the temporary "fake" commit we're unshelving.
810 810 interactive = state.interactive
811 811 basename = state.name
812 812 with repo.lock():
813 813 checkparents(repo, state)
814 814 ms = mergestatemod.mergestate.read(repo)
815 815 if ms.unresolvedcount():
816 816 raise error.Abort(
817 817 _(b"unresolved conflicts, can't continue"),
818 818 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
819 819 )
820 820
821 821 shelvectx = repo[state.parents[1]]
822 822 pendingctx = state.pendingctx
823 823
824 824 with repo.dirstate.parentchange():
825 825 repo.setparents(state.pendingctx.node(), nullid)
826 826 repo.dirstate.write(repo.currenttransaction())
827 827
828 828 targetphase = phases.internal
829 829 if not phases.supportinternal(repo):
830 830 targetphase = phases.secret
831 831 overrides = {(b'phases', b'new-commit'): targetphase}
832 832 with repo.ui.configoverride(overrides, b'unshelve'):
833 833 with repo.dirstate.parentchange():
834 834 repo.setparents(state.parents[0], nullid)
835 835 newnode, ispartialunshelve = _createunshelvectx(
836 836 ui, repo, shelvectx, basename, interactive, opts
837 837 )
838 838
839 839 if newnode is None:
840 840 shelvectx = state.pendingctx
841 841 msg = _(
842 842 b'note: unshelved changes already existed '
843 843 b'in the working copy\n'
844 844 )
845 845 ui.status(msg)
846 846 else:
847 847 # only strip the shelvectx if we produced one
848 848 state.nodestoremove.append(newnode)
849 849 shelvectx = repo[newnode]
850 850
851 851 merge.update(pendingctx)
852 852 mergefiles(ui, repo, state.wctx, shelvectx)
853 853 restorebranch(ui, repo, state.branchtorestore)
854 854
855 855 if not phases.supportinternal(repo):
856 856 repair.strip(
857 857 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
858 858 )
859 859 shelvedstate.clear(repo)
860 860 if not ispartialunshelve:
861 861 unshelvecleanup(ui, repo, state.name, opts)
862 862 _restoreactivebookmark(repo, state.activebookmark)
863 863 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
864 864
865 865
866 866 def hgcontinueunshelve(ui, repo):
867 867 """logic to resume unshelve using 'hg continue'"""
868 868 with repo.wlock():
869 869 state = _loadshelvedstate(ui, repo, {b'continue': True})
870 870 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
871 871
872 872
873 873 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
874 874 """Temporarily commit working copy changes before moving unshelve commit"""
875 875 # Store pending changes in a commit and remember added in case a shelve
876 876 # contains unknown files that are part of the pending change
877 877 s = repo.status()
878 878 addedbefore = frozenset(s.added)
879 879 if not (s.modified or s.added or s.removed):
880 880 return tmpwctx, addedbefore
881 881 ui.status(
882 882 _(
883 883 b"temporarily committing pending changes "
884 884 b"(restore with 'hg unshelve --abort')\n"
885 885 )
886 886 )
887 887 extra = {b'internal': b'shelve'}
888 888 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
889 889 tempopts = {}
890 890 tempopts[b'message'] = b"pending changes temporary commit"
891 891 tempopts[b'date'] = opts.get(b'date')
892 892 with ui.configoverride({(b'ui', b'quiet'): True}):
893 893 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
894 894 tmpwctx = repo[node]
895 895 return tmpwctx, addedbefore
896 896
897 897
898 898 def _unshelverestorecommit(ui, repo, tr, basename):
899 899 """Recreate commit in the repository during the unshelve"""
900 900 repo = repo.unfiltered()
901 901 node = None
902 902 shelf = ShelfDir(repo).get(basename)
903 903 if shelf.hasinfo():
904 904 node = shelf.readinfo()[b'node']
905 905 if node is None or node not in repo:
906 906 with ui.configoverride({(b'ui', b'quiet'): True}):
907 907 shelvectx = shelf.applybundle(repo, tr)
908 908 # We might not strip the unbundled changeset, so we should keep track of
909 909 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
910 910 if node is None:
911 911 info = {b'node': hex(shelvectx.node())}
912 912 shelf.writeinfo(info)
913 913 else:
914 914 shelvectx = repo[node]
915 915
916 916 return repo, shelvectx
917 917
918 918
919 919 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
920 920 """Handles the creation of unshelve commit and updates the shelve if it
921 921 was partially unshelved.
922 922
923 923 If interactive is:
924 924
925 925 * False: Commits all the changes in the working directory.
926 926 * True: Prompts the user to select changes to unshelve and commit them.
927 927 Update the shelve with remaining changes.
928 928
929 929 Returns the node of the new commit formed and a bool indicating whether
930 930 the shelve was partially unshelved.Creates a commit ctx to unshelve
931 931 interactively or non-interactively.
932 932
933 933 The user might want to unshelve certain changes only from the stored
934 934 shelve in interactive. So, we would create two commits. One with requested
935 935 changes to unshelve at that time and the latter is shelved for future.
936 936
937 937 Here, we return both the newnode which is created interactively and a
938 938 bool to know whether the shelve is partly done or completely done.
939 939 """
940 940 opts[b'message'] = shelvectx.description()
941 941 opts[b'interactive-unshelve'] = True
942 942 pats = []
943 943 if not interactive:
944 944 newnode = repo.commit(
945 945 text=shelvectx.description(),
946 946 extra=shelvectx.extra(),
947 947 user=shelvectx.user(),
948 948 date=shelvectx.date(),
949 949 )
950 950 return newnode, False
951 951
952 952 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
953 953 newnode = cmdutil.dorecord(
954 954 ui,
955 955 repo,
956 956 commitfunc,
957 957 None,
958 958 False,
959 959 cmdutil.recordfilter,
960 960 *pats,
961 961 **pycompat.strkwargs(opts)
962 962 )
963 963 snode = repo.commit(
964 964 text=shelvectx.description(),
965 965 extra=shelvectx.extra(),
966 966 user=shelvectx.user(),
967 967 )
968 968 if snode:
969 969 m = scmutil.matchfiles(repo, repo[snode].files())
970 970 _shelvecreatedcommit(repo, snode, basename, m)
971 971
972 972 return newnode, bool(snode)
973 973
974 974
975 975 def _rebaserestoredcommit(
976 976 ui,
977 977 repo,
978 978 opts,
979 979 tr,
980 980 oldtiprev,
981 981 basename,
982 982 pctx,
983 983 tmpwctx,
984 984 shelvectx,
985 985 branchtorestore,
986 986 activebookmark,
987 987 ):
988 988 """Rebase restored commit from its original location to a destination"""
989 989 # If the shelve is not immediately on top of the commit
990 990 # we'll be merging with, rebase it to be on top.
991 991 interactive = opts.get(b'interactive')
992 992 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
993 993 # We won't skip on interactive mode because, the user might want to
994 994 # unshelve certain changes only.
995 995 return shelvectx, False
996 996
997 997 overrides = {
998 998 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
999 999 (b'phases', b'new-commit'): phases.secret,
1000 1000 }
1001 1001 with repo.ui.configoverride(overrides, b'unshelve'):
1002 1002 ui.status(_(b'rebasing shelved changes\n'))
1003 1003 stats = merge.graft(
1004 1004 repo,
1005 1005 shelvectx,
1006 1006 labels=[b'working-copy', b'shelve'],
1007 1007 keepconflictparent=True,
1008 1008 )
1009 1009 if stats.unresolvedcount:
1010 1010 tr.close()
1011 1011
1012 1012 nodestoremove = [
1013 1013 repo.changelog.node(rev)
1014 1014 for rev in pycompat.xrange(oldtiprev, len(repo))
1015 1015 ]
1016 1016 shelvedstate.save(
1017 1017 repo,
1018 1018 basename,
1019 1019 pctx,
1020 1020 tmpwctx,
1021 1021 nodestoremove,
1022 1022 branchtorestore,
1023 1023 opts.get(b'keep'),
1024 1024 activebookmark,
1025 1025 interactive,
1026 1026 )
1027 1027 raise error.ConflictResolutionRequired(b'unshelve')
1028 1028
1029 1029 with repo.dirstate.parentchange():
1030 1030 repo.setparents(tmpwctx.node(), nullid)
1031 1031 newnode, ispartialunshelve = _createunshelvectx(
1032 1032 ui, repo, shelvectx, basename, interactive, opts
1033 1033 )
1034 1034
1035 1035 if newnode is None:
1036 1036 shelvectx = tmpwctx
1037 1037 msg = _(
1038 1038 b'note: unshelved changes already existed '
1039 1039 b'in the working copy\n'
1040 1040 )
1041 1041 ui.status(msg)
1042 1042 else:
1043 1043 shelvectx = repo[newnode]
1044 1044 merge.update(tmpwctx)
1045 1045
1046 1046 return shelvectx, ispartialunshelve
1047 1047
1048 1048
1049 1049 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1050 1050 # Forget any files that were unknown before the shelve, unknown before
1051 1051 # unshelve started, but are now added.
1052 1052 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1053 1053 if not shelveunknown:
1054 1054 return
1055 1055 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1056 1056 addedafter = frozenset(repo.status().added)
1057 1057 toforget = (addedafter & shelveunknown) - addedbefore
1058 1058 repo[None].forget(toforget)
1059 1059
1060 1060
1061 1061 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1062 1062 _restoreactivebookmark(repo, activebookmark)
1063 1063 # The transaction aborting will strip all the commits for us,
1064 1064 # but it doesn't update the inmemory structures, so addchangegroup
1065 1065 # hooks still fire and try to operate on the missing commits.
1066 1066 # Clean up manually to prevent this.
1067 1067 repo.unfiltered().changelog.strip(oldtiprev, tr)
1068 1068 _aborttransaction(repo, tr)
1069 1069
1070 1070
1071 1071 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1072 1072 """Check potential problems which may result from working
1073 1073 copy having untracked changes."""
1074 1074 wcdeleted = set(repo.status().deleted)
1075 1075 shelvetouched = set(shelvectx.files())
1076 1076 intersection = wcdeleted.intersection(shelvetouched)
1077 1077 if intersection:
1078 1078 m = _(b"shelved change touches missing files")
1079 1079 hint = _(b"run hg status to see which files are missing")
1080 1080 raise error.Abort(m, hint=hint)
1081 1081
1082 1082
1083 1083 def unshelvecmd(ui, repo, *shelved, **opts):
1084 1084 opts = pycompat.byteskwargs(opts)
1085 1085 abortf = opts.get(b'abort')
1086 1086 continuef = opts.get(b'continue')
1087 1087 interactive = opts.get(b'interactive')
1088 1088 if not abortf and not continuef:
1089 1089 cmdutil.checkunfinished(repo)
1090 1090 shelved = list(shelved)
1091 1091 if opts.get(b"name"):
1092 1092 shelved.append(opts[b"name"])
1093 1093
1094 1094 if interactive and opts.get(b'keep'):
1095 1095 raise error.InputError(
1096 1096 _(b'--keep on --interactive is not yet supported')
1097 1097 )
1098 1098 if abortf or continuef:
1099 1099 if abortf and continuef:
1100 1100 raise error.InputError(_(b'cannot use both abort and continue'))
1101 1101 if shelved:
1102 1102 raise error.InputError(
1103 1103 _(
1104 1104 b'cannot combine abort/continue with '
1105 1105 b'naming a shelved change'
1106 1106 )
1107 1107 )
1108 1108 if abortf and opts.get(b'tool', False):
1109 1109 ui.warn(_(b'tool option will be ignored\n'))
1110 1110
1111 1111 state = _loadshelvedstate(ui, repo, opts)
1112 1112 if abortf:
1113 1113 return unshelveabort(ui, repo, state)
1114 1114 elif continuef and interactive:
1115 1115 raise error.InputError(
1116 1116 _(b'cannot use both continue and interactive')
1117 1117 )
1118 1118 elif continuef:
1119 1119 return unshelvecontinue(ui, repo, state, opts)
1120 1120 elif len(shelved) > 1:
1121 1121 raise error.InputError(_(b'can only unshelve one change at a time'))
1122 1122 elif not shelved:
1123 1123 shelved = ShelfDir(repo).listshelves()
1124 1124 if not shelved:
1125 1125 raise error.StateError(_(b'no shelved changes to apply!'))
1126 1126 basename = shelved[0][1]
1127 1127 ui.status(_(b"unshelving change '%s'\n") % basename)
1128 1128 else:
1129 1129 basename = shelved[0]
1130 1130
1131 1131 if not ShelfDir(repo).get(basename).exists():
1132 1132 raise error.InputError(_(b"shelved change '%s' not found") % basename)
1133 1133
1134 1134 return _dounshelve(ui, repo, basename, opts)
1135 1135
1136 1136
1137 1137 def _dounshelve(ui, repo, basename, opts):
1138 1138 repo = repo.unfiltered()
1139 1139 lock = tr = None
1140 1140 try:
1141 1141 lock = repo.lock()
1142 1142 tr = repo.transaction(b'unshelve', report=lambda x: None)
1143 1143 oldtiprev = len(repo)
1144 1144
1145 1145 pctx = repo[b'.']
1146 1146 tmpwctx = pctx
1147 1147 # The goal is to have a commit structure like so:
1148 1148 # ...-> pctx -> tmpwctx -> shelvectx
1149 1149 # where tmpwctx is an optional commit with the user's pending changes
1150 1150 # and shelvectx is the unshelved changes. Then we merge it all down
1151 1151 # to the original pctx.
1152 1152
1153 1153 activebookmark = _backupactivebookmark(repo)
1154 1154 tmpwctx, addedbefore = _commitworkingcopychanges(
1155 1155 ui, repo, opts, tmpwctx
1156 1156 )
1157 1157 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1158 1158 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1159 1159 branchtorestore = b''
1160 1160 if shelvectx.branch() != shelvectx.p1().branch():
1161 1161 branchtorestore = shelvectx.branch()
1162 1162
1163 1163 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1164 1164 ui,
1165 1165 repo,
1166 1166 opts,
1167 1167 tr,
1168 1168 oldtiprev,
1169 1169 basename,
1170 1170 pctx,
1171 1171 tmpwctx,
1172 1172 shelvectx,
1173 1173 branchtorestore,
1174 1174 activebookmark,
1175 1175 )
1176 1176 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1177 1177 with ui.configoverride(overrides, b'unshelve'):
1178 1178 mergefiles(ui, repo, pctx, shelvectx)
1179 1179 restorebranch(ui, repo, branchtorestore)
1180 1180 shelvedstate.clear(repo)
1181 1181 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1182 1182 _forgetunknownfiles(repo, shelvectx, addedbefore)
1183 1183 if not ispartialunshelve:
1184 1184 unshelvecleanup(ui, repo, basename, opts)
1185 1185 finally:
1186 1186 if tr:
1187 1187 tr.release()
1188 1188 lockmod.release(lock)
@@ -1,566 +1,566 b''
1 1 # Copyright (C) 2004, 2005 Canonical Ltd
2 2 #
3 3 # This program is free software; you can redistribute it and/or modify
4 4 # it under the terms of the GNU General Public License as published by
5 5 # the Free Software Foundation; either version 2 of the License, or
6 6 # (at your option) any later version.
7 7 #
8 8 # This program is distributed in the hope that it will be useful,
9 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 11 # GNU General Public License for more details.
12 12 #
13 13 # You should have received a copy of the GNU General Public License
14 14 # along with this program; if not, see <http://www.gnu.org/licenses/>.
15 15
16 16 # mbp: "you know that thing where cvs gives you conflict markers?"
17 17 # s: "i hate that."
18 18
19 19 from __future__ import absolute_import
20 20
21 21 from .i18n import _
22 from .node import nullid
22 from .node import nullrev
23 23 from . import (
24 24 error,
25 25 mdiff,
26 26 pycompat,
27 27 util,
28 28 )
29 29 from .utils import stringutil
30 30
31 31
32 32 class CantReprocessAndShowBase(Exception):
33 33 pass
34 34
35 35
36 36 def intersect(ra, rb):
37 37 """Given two ranges return the range where they intersect or None.
38 38
39 39 >>> intersect((0, 10), (0, 6))
40 40 (0, 6)
41 41 >>> intersect((0, 10), (5, 15))
42 42 (5, 10)
43 43 >>> intersect((0, 10), (10, 15))
44 44 >>> intersect((0, 9), (10, 15))
45 45 >>> intersect((0, 9), (7, 15))
46 46 (7, 9)
47 47 """
48 48 assert ra[0] <= ra[1]
49 49 assert rb[0] <= rb[1]
50 50
51 51 sa = max(ra[0], rb[0])
52 52 sb = min(ra[1], rb[1])
53 53 if sa < sb:
54 54 return sa, sb
55 55 else:
56 56 return None
57 57
58 58
59 59 def compare_range(a, astart, aend, b, bstart, bend):
60 60 """Compare a[astart:aend] == b[bstart:bend], without slicing."""
61 61 if (aend - astart) != (bend - bstart):
62 62 return False
63 63 for ia, ib in zip(
64 64 pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend)
65 65 ):
66 66 if a[ia] != b[ib]:
67 67 return False
68 68 else:
69 69 return True
70 70
71 71
72 72 class Merge3Text(object):
73 73 """3-way merge of texts.
74 74
75 75 Given strings BASE, OTHER, THIS, tries to produce a combined text
76 76 incorporating the changes from both BASE->OTHER and BASE->THIS."""
77 77
78 78 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
79 79 self.basetext = basetext
80 80 self.atext = atext
81 81 self.btext = btext
82 82 if base is None:
83 83 base = mdiff.splitnewlines(basetext)
84 84 if a is None:
85 85 a = mdiff.splitnewlines(atext)
86 86 if b is None:
87 87 b = mdiff.splitnewlines(btext)
88 88 self.base = base
89 89 self.a = a
90 90 self.b = b
91 91
92 92 def merge_lines(
93 93 self,
94 94 name_a=None,
95 95 name_b=None,
96 96 name_base=None,
97 97 start_marker=b'<<<<<<<',
98 98 mid_marker=b'=======',
99 99 end_marker=b'>>>>>>>',
100 100 base_marker=None,
101 101 localorother=None,
102 102 minimize=False,
103 103 ):
104 104 """Return merge in cvs-like form."""
105 105 self.conflicts = False
106 106 newline = b'\n'
107 107 if len(self.a) > 0:
108 108 if self.a[0].endswith(b'\r\n'):
109 109 newline = b'\r\n'
110 110 elif self.a[0].endswith(b'\r'):
111 111 newline = b'\r'
112 112 if name_a and start_marker:
113 113 start_marker = start_marker + b' ' + name_a
114 114 if name_b and end_marker:
115 115 end_marker = end_marker + b' ' + name_b
116 116 if name_base and base_marker:
117 117 base_marker = base_marker + b' ' + name_base
118 118 merge_regions = self.merge_regions()
119 119 if minimize:
120 120 merge_regions = self.minimize(merge_regions)
121 121 for t in merge_regions:
122 122 what = t[0]
123 123 if what == b'unchanged':
124 124 for i in range(t[1], t[2]):
125 125 yield self.base[i]
126 126 elif what == b'a' or what == b'same':
127 127 for i in range(t[1], t[2]):
128 128 yield self.a[i]
129 129 elif what == b'b':
130 130 for i in range(t[1], t[2]):
131 131 yield self.b[i]
132 132 elif what == b'conflict':
133 133 if localorother == b'local':
134 134 for i in range(t[3], t[4]):
135 135 yield self.a[i]
136 136 elif localorother == b'other':
137 137 for i in range(t[5], t[6]):
138 138 yield self.b[i]
139 139 else:
140 140 self.conflicts = True
141 141 if start_marker is not None:
142 142 yield start_marker + newline
143 143 for i in range(t[3], t[4]):
144 144 yield self.a[i]
145 145 if base_marker is not None:
146 146 yield base_marker + newline
147 147 for i in range(t[1], t[2]):
148 148 yield self.base[i]
149 149 if mid_marker is not None:
150 150 yield mid_marker + newline
151 151 for i in range(t[5], t[6]):
152 152 yield self.b[i]
153 153 if end_marker is not None:
154 154 yield end_marker + newline
155 155 else:
156 156 raise ValueError(what)
157 157
158 158 def merge_groups(self):
159 159 """Yield sequence of line groups. Each one is a tuple:
160 160
161 161 'unchanged', lines
162 162 Lines unchanged from base
163 163
164 164 'a', lines
165 165 Lines taken from a
166 166
167 167 'same', lines
168 168 Lines taken from a (and equal to b)
169 169
170 170 'b', lines
171 171 Lines taken from b
172 172
173 173 'conflict', base_lines, a_lines, b_lines
174 174 Lines from base were changed to either a or b and conflict.
175 175 """
176 176 for t in self.merge_regions():
177 177 what = t[0]
178 178 if what == b'unchanged':
179 179 yield what, self.base[t[1] : t[2]]
180 180 elif what == b'a' or what == b'same':
181 181 yield what, self.a[t[1] : t[2]]
182 182 elif what == b'b':
183 183 yield what, self.b[t[1] : t[2]]
184 184 elif what == b'conflict':
185 185 yield (
186 186 what,
187 187 self.base[t[1] : t[2]],
188 188 self.a[t[3] : t[4]],
189 189 self.b[t[5] : t[6]],
190 190 )
191 191 else:
192 192 raise ValueError(what)
193 193
194 194 def merge_regions(self):
195 195 """Return sequences of matching and conflicting regions.
196 196
197 197 This returns tuples, where the first value says what kind we
198 198 have:
199 199
200 200 'unchanged', start, end
201 201 Take a region of base[start:end]
202 202
203 203 'same', astart, aend
204 204 b and a are different from base but give the same result
205 205
206 206 'a', start, end
207 207 Non-clashing insertion from a[start:end]
208 208
209 209 'conflict', zstart, zend, astart, aend, bstart, bend
210 210 Conflict between a and b, with z as common ancestor
211 211
212 212 Method is as follows:
213 213
214 214 The two sequences align only on regions which match the base
215 215 and both descendants. These are found by doing a two-way diff
216 216 of each one against the base, and then finding the
217 217 intersections between those regions. These "sync regions"
218 218 are by definition unchanged in both and easily dealt with.
219 219
220 220 The regions in between can be in any of three cases:
221 221 conflicted, or changed on only one side.
222 222 """
223 223
224 224 # section a[0:ia] has been disposed of, etc
225 225 iz = ia = ib = 0
226 226
227 227 for region in self.find_sync_regions():
228 228 zmatch, zend, amatch, aend, bmatch, bend = region
229 229 # print 'match base [%d:%d]' % (zmatch, zend)
230 230
231 231 matchlen = zend - zmatch
232 232 assert matchlen >= 0
233 233 assert matchlen == (aend - amatch)
234 234 assert matchlen == (bend - bmatch)
235 235
236 236 len_a = amatch - ia
237 237 len_b = bmatch - ib
238 238 len_base = zmatch - iz
239 239 assert len_a >= 0
240 240 assert len_b >= 0
241 241 assert len_base >= 0
242 242
243 243 # print 'unmatched a=%d, b=%d' % (len_a, len_b)
244 244
245 245 if len_a or len_b:
246 246 # try to avoid actually slicing the lists
247 247 equal_a = compare_range(
248 248 self.a, ia, amatch, self.base, iz, zmatch
249 249 )
250 250 equal_b = compare_range(
251 251 self.b, ib, bmatch, self.base, iz, zmatch
252 252 )
253 253 same = compare_range(self.a, ia, amatch, self.b, ib, bmatch)
254 254
255 255 if same:
256 256 yield b'same', ia, amatch
257 257 elif equal_a and not equal_b:
258 258 yield b'b', ib, bmatch
259 259 elif equal_b and not equal_a:
260 260 yield b'a', ia, amatch
261 261 elif not equal_a and not equal_b:
262 262 yield b'conflict', iz, zmatch, ia, amatch, ib, bmatch
263 263 else:
264 264 raise AssertionError(b"can't handle a=b=base but unmatched")
265 265
266 266 ia = amatch
267 267 ib = bmatch
268 268 iz = zmatch
269 269
270 270 # if the same part of the base was deleted on both sides
271 271 # that's OK, we can just skip it.
272 272
273 273 if matchlen > 0:
274 274 assert ia == amatch
275 275 assert ib == bmatch
276 276 assert iz == zmatch
277 277
278 278 yield b'unchanged', zmatch, zend
279 279 iz = zend
280 280 ia = aend
281 281 ib = bend
282 282
283 283 def minimize(self, merge_regions):
284 284 """Trim conflict regions of lines where A and B sides match.
285 285
286 286 Lines where both A and B have made the same changes at the beginning
287 287 or the end of each merge region are eliminated from the conflict
288 288 region and are instead considered the same.
289 289 """
290 290 for region in merge_regions:
291 291 if region[0] != b"conflict":
292 292 yield region
293 293 continue
294 294 # pytype thinks this tuple contains only 3 things, but
295 295 # that's clearly not true because this code successfully
296 296 # executes. It might be wise to rework merge_regions to be
297 297 # some kind of attrs type.
298 298 (
299 299 issue,
300 300 z1,
301 301 z2,
302 302 a1,
303 303 a2,
304 304 b1,
305 305 b2,
306 306 ) = region # pytype: disable=bad-unpacking
307 307 alen = a2 - a1
308 308 blen = b2 - b1
309 309
310 310 # find matches at the front
311 311 ii = 0
312 312 while (
313 313 ii < alen and ii < blen and self.a[a1 + ii] == self.b[b1 + ii]
314 314 ):
315 315 ii += 1
316 316 startmatches = ii
317 317
318 318 # find matches at the end
319 319 ii = 0
320 320 while (
321 321 ii < alen
322 322 and ii < blen
323 323 and self.a[a2 - ii - 1] == self.b[b2 - ii - 1]
324 324 ):
325 325 ii += 1
326 326 endmatches = ii
327 327
328 328 if startmatches > 0:
329 329 yield b'same', a1, a1 + startmatches
330 330
331 331 yield (
332 332 b'conflict',
333 333 z1,
334 334 z2,
335 335 a1 + startmatches,
336 336 a2 - endmatches,
337 337 b1 + startmatches,
338 338 b2 - endmatches,
339 339 )
340 340
341 341 if endmatches > 0:
342 342 yield b'same', a2 - endmatches, a2
343 343
344 344 def find_sync_regions(self):
345 345 """Return a list of sync regions, where both descendants match the base.
346 346
347 347 Generates a list of (base1, base2, a1, a2, b1, b2). There is
348 348 always a zero-length sync region at the end of all the files.
349 349 """
350 350
351 351 ia = ib = 0
352 352 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
353 353 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
354 354 len_a = len(amatches)
355 355 len_b = len(bmatches)
356 356
357 357 sl = []
358 358
359 359 while ia < len_a and ib < len_b:
360 360 abase, amatch, alen = amatches[ia]
361 361 bbase, bmatch, blen = bmatches[ib]
362 362
363 363 # there is an unconflicted block at i; how long does it
364 364 # extend? until whichever one ends earlier.
365 365 i = intersect((abase, abase + alen), (bbase, bbase + blen))
366 366 if i:
367 367 intbase = i[0]
368 368 intend = i[1]
369 369 intlen = intend - intbase
370 370
371 371 # found a match of base[i[0], i[1]]; this may be less than
372 372 # the region that matches in either one
373 373 assert intlen <= alen
374 374 assert intlen <= blen
375 375 assert abase <= intbase
376 376 assert bbase <= intbase
377 377
378 378 asub = amatch + (intbase - abase)
379 379 bsub = bmatch + (intbase - bbase)
380 380 aend = asub + intlen
381 381 bend = bsub + intlen
382 382
383 383 assert self.base[intbase:intend] == self.a[asub:aend], (
384 384 self.base[intbase:intend],
385 385 self.a[asub:aend],
386 386 )
387 387
388 388 assert self.base[intbase:intend] == self.b[bsub:bend]
389 389
390 390 sl.append((intbase, intend, asub, aend, bsub, bend))
391 391
392 392 # advance whichever one ends first in the base text
393 393 if (abase + alen) < (bbase + blen):
394 394 ia += 1
395 395 else:
396 396 ib += 1
397 397
398 398 intbase = len(self.base)
399 399 abase = len(self.a)
400 400 bbase = len(self.b)
401 401 sl.append((intbase, intbase, abase, abase, bbase, bbase))
402 402
403 403 return sl
404 404
405 405
406 406 def _verifytext(text, path, ui, opts):
407 407 """verifies that text is non-binary (unless opts[text] is passed,
408 408 then we just warn)"""
409 409 if stringutil.binary(text):
410 410 msg = _(b"%s looks like a binary file.") % path
411 411 if not opts.get('quiet'):
412 412 ui.warn(_(b'warning: %s\n') % msg)
413 413 if not opts.get('text'):
414 414 raise error.Abort(msg)
415 415 return text
416 416
417 417
418 418 def _picklabels(defaults, overrides):
419 419 if len(overrides) > 3:
420 420 raise error.Abort(_(b"can only specify three labels."))
421 421 result = defaults[:]
422 422 for i, override in enumerate(overrides):
423 423 result[i] = override
424 424 return result
425 425
426 426
427 427 def is_not_null(ctx):
428 428 if not util.safehasattr(ctx, "node"):
429 429 return False
430 return ctx.node() != nullid
430 return ctx.rev() != nullrev
431 431
432 432
433 433 def _mergediff(m3, name_a, name_b, name_base):
434 434 lines = []
435 435 conflicts = False
436 436 for group in m3.merge_groups():
437 437 if group[0] == b'conflict':
438 438 base_lines, a_lines, b_lines = group[1:]
439 439 base_text = b''.join(base_lines)
440 440 b_blocks = list(
441 441 mdiff.allblocks(
442 442 base_text,
443 443 b''.join(b_lines),
444 444 lines1=base_lines,
445 445 lines2=b_lines,
446 446 )
447 447 )
448 448 a_blocks = list(
449 449 mdiff.allblocks(
450 450 base_text,
451 451 b''.join(a_lines),
452 452 lines1=base_lines,
453 453 lines2=b_lines,
454 454 )
455 455 )
456 456
457 457 def matching_lines(blocks):
458 458 return sum(
459 459 block[1] - block[0]
460 460 for block, kind in blocks
461 461 if kind == b'='
462 462 )
463 463
464 464 def diff_lines(blocks, lines1, lines2):
465 465 for block, kind in blocks:
466 466 if kind == b'=':
467 467 for line in lines1[block[0] : block[1]]:
468 468 yield b' ' + line
469 469 else:
470 470 for line in lines1[block[0] : block[1]]:
471 471 yield b'-' + line
472 472 for line in lines2[block[2] : block[3]]:
473 473 yield b'+' + line
474 474
475 475 lines.append(b"<<<<<<<\n")
476 476 if matching_lines(a_blocks) < matching_lines(b_blocks):
477 477 lines.append(b"======= %s\n" % name_a)
478 478 lines.extend(a_lines)
479 479 lines.append(b"------- %s\n" % name_base)
480 480 lines.append(b"+++++++ %s\n" % name_b)
481 481 lines.extend(diff_lines(b_blocks, base_lines, b_lines))
482 482 else:
483 483 lines.append(b"------- %s\n" % name_base)
484 484 lines.append(b"+++++++ %s\n" % name_a)
485 485 lines.extend(diff_lines(a_blocks, base_lines, a_lines))
486 486 lines.append(b"======= %s\n" % name_b)
487 487 lines.extend(b_lines)
488 488 lines.append(b">>>>>>>\n")
489 489 conflicts = True
490 490 else:
491 491 lines.extend(group[1])
492 492 return lines, conflicts
493 493
494 494
495 495 def simplemerge(ui, localctx, basectx, otherctx, **opts):
496 496 """Performs the simplemerge algorithm.
497 497
498 498 The merged result is written into `localctx`.
499 499 """
500 500
501 501 def readctx(ctx):
502 502 # Merges were always run in the working copy before, which means
503 503 # they used decoded data, if the user defined any repository
504 504 # filters.
505 505 #
506 506 # Maintain that behavior today for BC, though perhaps in the future
507 507 # it'd be worth considering whether merging encoded data (what the
508 508 # repository usually sees) might be more useful.
509 509 return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts)
510 510
511 511 mode = opts.get('mode', b'merge')
512 512 name_a, name_b, name_base = None, None, None
513 513 if mode != b'union':
514 514 name_a, name_b, name_base = _picklabels(
515 515 [localctx.path(), otherctx.path(), None], opts.get('label', [])
516 516 )
517 517
518 518 try:
519 519 localtext = readctx(localctx)
520 520 basetext = readctx(basectx)
521 521 othertext = readctx(otherctx)
522 522 except error.Abort:
523 523 return 1
524 524
525 525 m3 = Merge3Text(basetext, localtext, othertext)
526 526 extrakwargs = {
527 527 b"localorother": opts.get("localorother", None),
528 528 b'minimize': True,
529 529 }
530 530 if mode == b'union':
531 531 extrakwargs[b'start_marker'] = None
532 532 extrakwargs[b'mid_marker'] = None
533 533 extrakwargs[b'end_marker'] = None
534 534 elif name_base is not None:
535 535 extrakwargs[b'base_marker'] = b'|||||||'
536 536 extrakwargs[b'name_base'] = name_base
537 537 extrakwargs[b'minimize'] = False
538 538
539 539 if mode == b'mergediff':
540 540 lines, conflicts = _mergediff(m3, name_a, name_b, name_base)
541 541 else:
542 542 lines = list(
543 543 m3.merge_lines(
544 544 name_a=name_a, name_b=name_b, **pycompat.strkwargs(extrakwargs)
545 545 )
546 546 )
547 547 conflicts = m3.conflicts
548 548
549 549 # merge flags if necessary
550 550 flags = localctx.flags()
551 551 localflags = set(pycompat.iterbytestr(flags))
552 552 otherflags = set(pycompat.iterbytestr(otherctx.flags()))
553 553 if is_not_null(basectx) and localflags != otherflags:
554 554 baseflags = set(pycompat.iterbytestr(basectx.flags()))
555 555 commonflags = localflags & otherflags
556 556 addedflags = (localflags ^ otherflags) - baseflags
557 557 flags = b''.join(sorted(commonflags | addedflags))
558 558
559 559 mergedtext = b''.join(lines)
560 560 if opts.get('print'):
561 561 ui.fout.write(mergedtext)
562 562 else:
563 563 localctx.write(mergedtext, flags)
564 564
565 565 if conflicts and not mode == b'union':
566 566 return 1
General Comments 0
You need to be logged in to leave comments. Login now