##// END OF EJS Templates
filemerge: stop returning always-`True` value...
Martin von Zweigbergk -
r49337:608a35db default
parent child Browse files
Show More
@@ -1,1855 +1,1855 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 url as urlmod,
41 41 util,
42 42 )
43 43
44 44 from mercurial.upgrade_utils import (
45 45 actions as upgrade_actions,
46 46 )
47 47
48 48 from . import (
49 49 lfcommands,
50 50 lfutil,
51 51 storefactory,
52 52 )
53 53
54 54 eh = exthelper.exthelper()
55 55
56 56 lfstatus = lfutil.lfstatus
57 57
58 58 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
59 59
60 60 # -- Utility functions: commonly/repeatedly needed functionality ---------------
61 61
62 62
63 63 def composelargefilematcher(match, manifest):
64 64 """create a matcher that matches only the largefiles in the original
65 65 matcher"""
66 66 m = copy.copy(match)
67 67 lfile = lambda f: lfutil.standin(f) in manifest
68 68 m._files = [lf for lf in m._files if lfile(lf)]
69 69 m._fileset = set(m._files)
70 70 m.always = lambda: False
71 71 origmatchfn = m.matchfn
72 72 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
73 73 return m
74 74
75 75
76 76 def composenormalfilematcher(match, manifest, exclude=None):
77 77 excluded = set()
78 78 if exclude is not None:
79 79 excluded.update(exclude)
80 80
81 81 m = copy.copy(match)
82 82 notlfile = lambda f: not (
83 83 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
84 84 )
85 85 m._files = [lf for lf in m._files if notlfile(lf)]
86 86 m._fileset = set(m._files)
87 87 m.always = lambda: False
88 88 origmatchfn = m.matchfn
89 89 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
90 90 return m
91 91
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
97 97 )
98 98
99 99 lfmatcher = None
100 100 if lfutil.islfilesrepo(repo):
101 101 lfpats = ui.configlist(lfutil.longname, b'patterns')
102 102 if lfpats:
103 103 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
104 104
105 105 lfnames = []
106 106 m = matcher
107 107
108 108 wctx = repo[None]
109 109 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
110 110 exact = m.exact(f)
111 111 lfile = lfutil.standin(f) in wctx
112 112 nfile = f in wctx
113 113 exists = lfile or nfile
114 114
115 115 # Don't warn the user when they attempt to add a normal tracked file.
116 116 # The normal add code will do that for us.
117 117 if exact and exists:
118 118 if lfile:
119 119 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
120 120 continue
121 121
122 122 if (exact or not exists) and not lfutil.isstandin(f):
123 123 # In case the file was removed previously, but not committed
124 124 # (issue3507)
125 125 if not repo.wvfs.exists(f):
126 126 continue
127 127
128 128 abovemin = (
129 129 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
130 130 )
131 131 if large or abovemin or (lfmatcher and lfmatcher(f)):
132 132 lfnames.append(f)
133 133 if ui.verbose or not exact:
134 134 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
135 135
136 136 bad = []
137 137
138 138 # Need to lock, otherwise there could be a race condition between
139 139 # when standins are created and added to the repo.
140 140 with repo.wlock():
141 141 if not opts.get('dry_run'):
142 142 standins = []
143 143 lfdirstate = lfutil.openlfdirstate(ui, repo)
144 144 for f in lfnames:
145 145 standinname = lfutil.standin(f)
146 146 lfutil.writestandin(
147 147 repo,
148 148 standinname,
149 149 hash=b'',
150 150 executable=lfutil.getexecutable(repo.wjoin(f)),
151 151 )
152 152 standins.append(standinname)
153 153 lfdirstate.set_tracked(f)
154 154 lfdirstate.write(repo.currenttransaction())
155 155 bad += [
156 156 lfutil.splitstandin(f)
157 157 for f in repo[None].add(standins)
158 158 if f in m.files()
159 159 ]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 return added, bad
163 163
164 164
165 165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 166 after = opts.get('after')
167 167 m = composelargefilematcher(matcher, repo[None].manifest())
168 168 with lfstatus(repo):
169 169 s = repo.status(match=m, clean=not isaddremove)
170 170 manifest = repo[None].manifest()
171 171 modified, added, deleted, clean = [
172 172 [f for f in list if lfutil.standin(f) in manifest]
173 173 for list in (s.modified, s.added, s.deleted, s.clean)
174 174 ]
175 175
176 176 def warn(files, msg):
177 177 for f in files:
178 178 ui.warn(msg % uipathfn(f))
179 179 return int(len(files) > 0)
180 180
181 181 if after:
182 182 remove = deleted
183 183 result = warn(
184 184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 185 )
186 186 else:
187 187 remove = deleted + clean
188 188 result = warn(
189 189 modified,
190 190 _(
191 191 b'not removing %s: file is modified (use -f'
192 192 b' to force removal)\n'
193 193 ),
194 194 )
195 195 result = (
196 196 warn(
197 197 added,
198 198 _(
199 199 b'not removing %s: file has been marked for add'
200 200 b' (use forget to undo)\n'
201 201 ),
202 202 )
203 203 or result
204 204 )
205 205
206 206 # Need to lock because standin files are deleted then removed from the
207 207 # repository and we could race in-between.
208 208 with repo.wlock():
209 209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 210 for f in sorted(remove):
211 211 if ui.verbose or not m.exact(f):
212 212 ui.status(_(b'removing %s\n') % uipathfn(f))
213 213
214 214 if not dryrun:
215 215 if not after:
216 216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217 217
218 218 if dryrun:
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfdirstate.set_untracked(lfutil.splitstandin(f))
231 231
232 232 lfdirstate.write(repo.currenttransaction())
233 233
234 234 return result
235 235
236 236
237 237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 238 # appear at their right place in the manifests.
239 239 @eh.wrapfunction(webcommands, b'decodepath')
240 240 def decodepath(orig, path):
241 241 return lfutil.splitstandin(path) or path
242 242
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246
247 247 @eh.wrapcommand(
248 248 b'add',
249 249 opts=[
250 250 (b'', b'large', None, _(b'add as largefile')),
251 251 (b'', b'normal', None, _(b'add as normal file')),
252 252 (
253 253 b'',
254 254 b'lfsize',
255 255 b'',
256 256 _(
257 257 b'add all files above this size (in megabytes) '
258 258 b'as largefiles (default: 10)'
259 259 ),
260 260 ),
261 261 ],
262 262 )
263 263 def overrideadd(orig, ui, repo, *pats, **opts):
264 264 if opts.get('normal') and opts.get('large'):
265 265 raise error.Abort(_(b'--normal cannot be used with --large'))
266 266 return orig(ui, repo, *pats, **opts)
267 267
268 268
269 269 @eh.wrapfunction(cmdutil, b'add')
270 270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
271 271 # The --normal flag short circuits this override
272 272 if opts.get('normal'):
273 273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
274 274
275 275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
276 276 normalmatcher = composenormalfilematcher(
277 277 matcher, repo[None].manifest(), ladded
278 278 )
279 279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
280 280
281 281 bad.extend(f for f in lbad)
282 282 return bad
283 283
284 284
285 285 @eh.wrapfunction(cmdutil, b'remove')
286 286 def cmdutilremove(
287 287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
288 288 ):
289 289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
290 290 result = orig(
291 291 ui,
292 292 repo,
293 293 normalmatcher,
294 294 prefix,
295 295 uipathfn,
296 296 after,
297 297 force,
298 298 subrepos,
299 299 dryrun,
300 300 )
301 301 return (
302 302 removelargefiles(
303 303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
304 304 )
305 305 or result
306 306 )
307 307
308 308
309 309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
310 310 def overridestatusfn(orig, repo, rev2, **opts):
311 311 with lfstatus(repo._repo):
312 312 return orig(repo, rev2, **opts)
313 313
314 314
315 315 @eh.wrapcommand(b'status')
316 316 def overridestatus(orig, ui, repo, *pats, **opts):
317 317 with lfstatus(repo):
318 318 return orig(ui, repo, *pats, **opts)
319 319
320 320
321 321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
322 322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
323 323 with lfstatus(repo._repo):
324 324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
325 325
326 326
327 327 @eh.wrapcommand(b'log')
328 328 def overridelog(orig, ui, repo, *pats, **opts):
329 329 def overridematchandpats(
330 330 orig,
331 331 ctx,
332 332 pats=(),
333 333 opts=None,
334 334 globbed=False,
335 335 default=b'relpath',
336 336 badfn=None,
337 337 ):
338 338 """Matcher that merges root directory with .hglf, suitable for log.
339 339 It is still possible to match .hglf directly.
340 340 For any listed files run log on the standin too.
341 341 matchfn tries both the given filename and with .hglf stripped.
342 342 """
343 343 if opts is None:
344 344 opts = {}
345 345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
346 346 m, p = copy.copy(matchandpats)
347 347
348 348 if m.always():
349 349 # We want to match everything anyway, so there's no benefit trying
350 350 # to add standins.
351 351 return matchandpats
352 352
353 353 pats = set(p)
354 354
355 355 def fixpats(pat, tostandin=lfutil.standin):
356 356 if pat.startswith(b'set:'):
357 357 return pat
358 358
359 359 kindpat = matchmod._patsplit(pat, None)
360 360
361 361 if kindpat[0] is not None:
362 362 return kindpat[0] + b':' + tostandin(kindpat[1])
363 363 return tostandin(kindpat[1])
364 364
365 365 cwd = repo.getcwd()
366 366 if cwd:
367 367 hglf = lfutil.shortname
368 368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
369 369
370 370 def tostandin(f):
371 371 # The file may already be a standin, so truncate the back
372 372 # prefix and test before mangling it. This avoids turning
373 373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
374 374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
375 375 return f
376 376
377 377 # An absolute path is from outside the repo, so truncate the
378 378 # path to the root before building the standin. Otherwise cwd
379 379 # is somewhere in the repo, relative to root, and needs to be
380 380 # prepended before building the standin.
381 381 if os.path.isabs(cwd):
382 382 f = f[len(back) :]
383 383 else:
384 384 f = cwd + b'/' + f
385 385 return back + lfutil.standin(f)
386 386
387 387 else:
388 388
389 389 def tostandin(f):
390 390 if lfutil.isstandin(f):
391 391 return f
392 392 return lfutil.standin(f)
393 393
394 394 pats.update(fixpats(f, tostandin) for f in p)
395 395
396 396 for i in range(0, len(m._files)):
397 397 # Don't add '.hglf' to m.files, since that is already covered by '.'
398 398 if m._files[i] == b'.':
399 399 continue
400 400 standin = lfutil.standin(m._files[i])
401 401 # If the "standin" is a directory, append instead of replace to
402 402 # support naming a directory on the command line with only
403 403 # largefiles. The original directory is kept to support normal
404 404 # files.
405 405 if standin in ctx:
406 406 m._files[i] = standin
407 407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
408 408 m._files.append(standin)
409 409
410 410 m._fileset = set(m._files)
411 411 m.always = lambda: False
412 412 origmatchfn = m.matchfn
413 413
414 414 def lfmatchfn(f):
415 415 lf = lfutil.splitstandin(f)
416 416 if lf is not None and origmatchfn(lf):
417 417 return True
418 418 r = origmatchfn(f)
419 419 return r
420 420
421 421 m.matchfn = lfmatchfn
422 422
423 423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
424 424 return m, pats
425 425
426 426 # For hg log --patch, the match object is used in two different senses:
427 427 # (1) to determine what revisions should be printed out, and
428 428 # (2) to determine what files to print out diffs for.
429 429 # The magic matchandpats override should be used for case (1) but not for
430 430 # case (2).
431 431 oldmatchandpats = scmutil.matchandpats
432 432
433 433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
434 434 wctx = repo[None]
435 435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
436 436 return lambda ctx: match
437 437
438 438 wrappedmatchandpats = extensions.wrappedfunction(
439 439 scmutil, b'matchandpats', overridematchandpats
440 440 )
441 441 wrappedmakefilematcher = extensions.wrappedfunction(
442 442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
443 443 )
444 444 with wrappedmatchandpats, wrappedmakefilematcher:
445 445 return orig(ui, repo, *pats, **opts)
446 446
447 447
448 448 @eh.wrapcommand(
449 449 b'verify',
450 450 opts=[
451 451 (
452 452 b'',
453 453 b'large',
454 454 None,
455 455 _(b'verify that all largefiles in current revision exists'),
456 456 ),
457 457 (
458 458 b'',
459 459 b'lfa',
460 460 None,
461 461 _(b'verify largefiles in all revisions, not just current'),
462 462 ),
463 463 (
464 464 b'',
465 465 b'lfc',
466 466 None,
467 467 _(b'verify local largefile contents, not just existence'),
468 468 ),
469 469 ],
470 470 )
471 471 def overrideverify(orig, ui, repo, *pats, **opts):
472 472 large = opts.pop('large', False)
473 473 all = opts.pop('lfa', False)
474 474 contents = opts.pop('lfc', False)
475 475
476 476 result = orig(ui, repo, *pats, **opts)
477 477 if large or all or contents:
478 478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
479 479 return result
480 480
481 481
482 482 @eh.wrapcommand(
483 483 b'debugstate',
484 484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
485 485 )
486 486 def overridedebugstate(orig, ui, repo, *pats, **opts):
487 487 large = opts.pop('large', False)
488 488 if large:
489 489
490 490 class fakerepo(object):
491 491 dirstate = lfutil.openlfdirstate(ui, repo)
492 492
493 493 orig(ui, fakerepo, *pats, **opts)
494 494 else:
495 495 orig(ui, repo, *pats, **opts)
496 496
497 497
498 498 # Before starting the manifest merge, merge.updates will call
499 499 # _checkunknownfile to check if there are any files in the merged-in
500 500 # changeset that collide with unknown files in the working copy.
501 501 #
502 502 # The largefiles are seen as unknown, so this prevents us from merging
503 503 # in a file 'foo' if we already have a largefile with the same name.
504 504 #
505 505 # The overridden function filters the unknown files by removing any
506 506 # largefiles. This makes the merge proceed and we can then handle this
507 507 # case further in the overridden calculateupdates function below.
508 508 @eh.wrapfunction(merge, b'_checkunknownfile')
509 509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
510 510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
511 511 return False
512 512 return origfn(repo, wctx, mctx, f, f2)
513 513
514 514
515 515 # The manifest merge handles conflicts on the manifest level. We want
516 516 # to handle changes in largefile-ness of files at this level too.
517 517 #
518 518 # The strategy is to run the original calculateupdates and then process
519 519 # the action list it outputs. There are two cases we need to deal with:
520 520 #
521 521 # 1. Normal file in p1, largefile in p2. Here the largefile is
522 522 # detected via its standin file, which will enter the working copy
523 523 # with a "get" action. It is not "merge" since the standin is all
524 524 # Mercurial is concerned with at this level -- the link to the
525 525 # existing normal file is not relevant here.
526 526 #
527 527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
528 528 # since the largefile will be present in the working copy and
529 529 # different from the normal file in p2. Mercurial therefore
530 530 # triggers a merge action.
531 531 #
532 532 # In both cases, we prompt the user and emit new actions to either
533 533 # remove the standin (if the normal file was kept) or to remove the
534 534 # normal file and get the standin (if the largefile was kept). The
535 535 # default prompt answer is to use the largefile version since it was
536 536 # presumably changed on purpose.
537 537 #
538 538 # Finally, the merge.applyupdates function will then take care of
539 539 # writing the files into the working copy and lfcommands.updatelfiles
540 540 # will update the largefiles.
541 541 @eh.wrapfunction(merge, b'calculateupdates')
542 542 def overridecalculateupdates(
543 543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
544 544 ):
545 545 overwrite = force and not branchmerge
546 546 mresult = origfn(
547 547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 548 )
549 549
550 550 if overwrite:
551 551 return mresult
552 552
553 553 # Convert to dictionary with filename as key and action as value.
554 554 lfiles = set()
555 555 for f in mresult.files():
556 556 splitstandin = lfutil.splitstandin(f)
557 557 if splitstandin is not None and splitstandin in p1:
558 558 lfiles.add(splitstandin)
559 559 elif lfutil.standin(f) in p1:
560 560 lfiles.add(f)
561 561
562 562 for lfile in sorted(lfiles):
563 563 standin = lfutil.standin(lfile)
564 564 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
565 565 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
566 566 if sm in (b'g', b'dc') and lm != b'r':
567 567 if sm == b'dc':
568 568 f1, f2, fa, move, anc = sargs
569 569 sargs = (p2[f2].flags(), False)
570 570 # Case 1: normal file in the working copy, largefile in
571 571 # the second parent
572 572 usermsg = (
573 573 _(
574 574 b'remote turned local normal file %s into a largefile\n'
575 575 b'use (l)argefile or keep (n)ormal file?'
576 576 b'$$ &Largefile $$ &Normal file'
577 577 )
578 578 % lfile
579 579 )
580 580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
581 581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
582 582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
583 583 else: # keep local normal file
584 584 mresult.addfile(lfile, b'k', None, b'replaces standin')
585 585 if branchmerge:
586 586 mresult.addfile(
587 587 standin,
588 588 b'k',
589 589 None,
590 590 b'replaced by non-standin',
591 591 )
592 592 else:
593 593 mresult.addfile(
594 594 standin,
595 595 b'r',
596 596 None,
597 597 b'replaced by non-standin',
598 598 )
599 599 elif lm in (b'g', b'dc') and sm != b'r':
600 600 if lm == b'dc':
601 601 f1, f2, fa, move, anc = largs
602 602 largs = (p2[f2].flags(), False)
603 603 # Case 2: largefile in the working copy, normal file in
604 604 # the second parent
605 605 usermsg = (
606 606 _(
607 607 b'remote turned local largefile %s into a normal file\n'
608 608 b'keep (l)argefile or use (n)ormal file?'
609 609 b'$$ &Largefile $$ &Normal file'
610 610 )
611 611 % lfile
612 612 )
613 613 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
614 614 if branchmerge:
615 615 # largefile can be restored from standin safely
616 616 mresult.addfile(
617 617 lfile,
618 618 b'k',
619 619 None,
620 620 b'replaced by standin',
621 621 )
622 622 mresult.addfile(standin, b'k', None, b'replaces standin')
623 623 else:
624 624 # "lfile" should be marked as "removed" without
625 625 # removal of itself
626 626 mresult.addfile(
627 627 lfile,
628 628 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
629 629 None,
630 630 b'forget non-standin largefile',
631 631 )
632 632
633 633 # linear-merge should treat this largefile as 're-added'
634 634 mresult.addfile(standin, b'a', None, b'keep standin')
635 635 else: # pick remote normal file
636 636 mresult.addfile(lfile, b'g', largs, b'replaces standin')
637 637 mresult.addfile(
638 638 standin,
639 639 b'r',
640 640 None,
641 641 b'replaced by non-standin',
642 642 )
643 643
644 644 return mresult
645 645
646 646
647 647 @eh.wrapfunction(mergestatemod, b'recordupdates')
648 648 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
649 649 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
650 650 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
651 651 with lfdirstate.parentchange():
652 652 for lfile, args, msg in actions[
653 653 MERGE_ACTION_LARGEFILE_MARK_REMOVED
654 654 ]:
655 655 # this should be executed before 'orig', to execute 'remove'
656 656 # before all other actions
657 657 repo.dirstate.update_file(
658 658 lfile, p1_tracked=True, wc_tracked=False
659 659 )
660 660 # make sure lfile doesn't get synclfdirstate'd as normal
661 661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
662 662 lfdirstate.write(repo.currenttransaction())
663 663
664 664 return orig(repo, actions, branchmerge, getfiledata)
665 665
666 666
667 667 # Override filemerge to prompt the user about how they wish to merge
668 668 # largefiles. This will handle identical edits without prompting the user.
669 669 @eh.wrapfunction(filemerge, b'filemerge')
670 670 def overridefilemerge(
671 671 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
672 672 ):
673 673 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
674 674 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
675 675
676 676 ahash = lfutil.readasstandin(fca).lower()
677 677 dhash = lfutil.readasstandin(fcd).lower()
678 678 ohash = lfutil.readasstandin(fco).lower()
679 679 if (
680 680 ohash != ahash
681 681 and ohash != dhash
682 682 and (
683 683 dhash == ahash
684 684 or repo.ui.promptchoice(
685 685 _(
686 686 b'largefile %s has a merge conflict\nancestor was %s\n'
687 687 b'you can keep (l)ocal %s or take (o)ther %s.\n'
688 688 b'what do you want to do?'
689 689 b'$$ &Local $$ &Other'
690 690 )
691 691 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
692 692 0,
693 693 )
694 694 == 1
695 695 )
696 696 ):
697 697 repo.wwrite(fcd.path(), fco.data(), fco.flags())
698 return True, 0, False
698 return 0, False
699 699
700 700
701 701 @eh.wrapfunction(copiesmod, b'pathcopies')
702 702 def copiespathcopies(orig, ctx1, ctx2, match=None):
703 703 copies = orig(ctx1, ctx2, match=match)
704 704 updated = {}
705 705
706 706 for k, v in pycompat.iteritems(copies):
707 707 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
708 708
709 709 return updated
710 710
711 711
712 712 # Copy first changes the matchers to match standins instead of
713 713 # largefiles. Then it overrides util.copyfile in that function it
714 714 # checks if the destination largefile already exists. It also keeps a
715 715 # list of copied files so that the largefiles can be copied and the
716 716 # dirstate updated.
717 717 @eh.wrapfunction(cmdutil, b'copy')
718 718 def overridecopy(orig, ui, repo, pats, opts, rename=False):
719 719 # doesn't remove largefile on rename
720 720 if len(pats) < 2:
721 721 # this isn't legal, let the original function deal with it
722 722 return orig(ui, repo, pats, opts, rename)
723 723
724 724 # This could copy both lfiles and normal files in one command,
725 725 # but we don't want to do that. First replace their matcher to
726 726 # only match normal files and run it, then replace it to just
727 727 # match largefiles and run it again.
728 728 nonormalfiles = False
729 729 nolfiles = False
730 730 manifest = repo[None].manifest()
731 731
732 732 def normalfilesmatchfn(
733 733 orig,
734 734 ctx,
735 735 pats=(),
736 736 opts=None,
737 737 globbed=False,
738 738 default=b'relpath',
739 739 badfn=None,
740 740 ):
741 741 if opts is None:
742 742 opts = {}
743 743 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
744 744 return composenormalfilematcher(match, manifest)
745 745
746 746 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
747 747 try:
748 748 result = orig(ui, repo, pats, opts, rename)
749 749 except error.Abort as e:
750 750 if e.message != _(b'no files to copy'):
751 751 raise e
752 752 else:
753 753 nonormalfiles = True
754 754 result = 0
755 755
756 756 # The first rename can cause our current working directory to be removed.
757 757 # In that case there is nothing left to copy/rename so just quit.
758 758 try:
759 759 repo.getcwd()
760 760 except OSError:
761 761 return result
762 762
763 763 def makestandin(relpath):
764 764 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
765 765 return repo.wvfs.join(lfutil.standin(path))
766 766
767 767 fullpats = scmutil.expandpats(pats)
768 768 dest = fullpats[-1]
769 769
770 770 if os.path.isdir(dest):
771 771 if not os.path.isdir(makestandin(dest)):
772 772 os.makedirs(makestandin(dest))
773 773
774 774 try:
775 775 # When we call orig below it creates the standins but we don't add
776 776 # them to the dir state until later so lock during that time.
777 777 wlock = repo.wlock()
778 778
779 779 manifest = repo[None].manifest()
780 780
781 781 def overridematch(
782 782 orig,
783 783 ctx,
784 784 pats=(),
785 785 opts=None,
786 786 globbed=False,
787 787 default=b'relpath',
788 788 badfn=None,
789 789 ):
790 790 if opts is None:
791 791 opts = {}
792 792 newpats = []
793 793 # The patterns were previously mangled to add the standin
794 794 # directory; we need to remove that now
795 795 for pat in pats:
796 796 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
797 797 newpats.append(pat.replace(lfutil.shortname, b''))
798 798 else:
799 799 newpats.append(pat)
800 800 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
801 801 m = copy.copy(match)
802 802 lfile = lambda f: lfutil.standin(f) in manifest
803 803 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
804 804 m._fileset = set(m._files)
805 805 origmatchfn = m.matchfn
806 806
807 807 def matchfn(f):
808 808 lfile = lfutil.splitstandin(f)
809 809 return (
810 810 lfile is not None
811 811 and (f in manifest)
812 812 and origmatchfn(lfile)
813 813 or None
814 814 )
815 815
816 816 m.matchfn = matchfn
817 817 return m
818 818
819 819 listpats = []
820 820 for pat in pats:
821 821 if matchmod.patkind(pat) is not None:
822 822 listpats.append(pat)
823 823 else:
824 824 listpats.append(makestandin(pat))
825 825
826 826 copiedfiles = []
827 827
828 828 def overridecopyfile(orig, src, dest, *args, **kwargs):
829 829 if lfutil.shortname in src and dest.startswith(
830 830 repo.wjoin(lfutil.shortname)
831 831 ):
832 832 destlfile = dest.replace(lfutil.shortname, b'')
833 833 if not opts[b'force'] and os.path.exists(destlfile):
834 834 raise IOError(
835 835 b'', _(b'destination largefile already exists')
836 836 )
837 837 copiedfiles.append((src, dest))
838 838 orig(src, dest, *args, **kwargs)
839 839
840 840 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
841 841 with extensions.wrappedfunction(scmutil, b'match', overridematch):
842 842 result += orig(ui, repo, listpats, opts, rename)
843 843
844 844 lfdirstate = lfutil.openlfdirstate(ui, repo)
845 845 for (src, dest) in copiedfiles:
846 846 if lfutil.shortname in src and dest.startswith(
847 847 repo.wjoin(lfutil.shortname)
848 848 ):
849 849 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
850 850 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
851 851 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
852 852 if not os.path.isdir(destlfiledir):
853 853 os.makedirs(destlfiledir)
854 854 if rename:
855 855 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
856 856
857 857 # The file is gone, but this deletes any empty parent
858 858 # directories as a side-effect.
859 859 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
860 860 lfdirstate.set_untracked(srclfile)
861 861 else:
862 862 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
863 863
864 864 lfdirstate.set_tracked(destlfile)
865 865 lfdirstate.write(repo.currenttransaction())
866 866 except error.Abort as e:
867 867 if e.message != _(b'no files to copy'):
868 868 raise e
869 869 else:
870 870 nolfiles = True
871 871 finally:
872 872 wlock.release()
873 873
874 874 if nolfiles and nonormalfiles:
875 875 raise error.Abort(_(b'no files to copy'))
876 876
877 877 return result
878 878
879 879
880 880 # When the user calls revert, we have to be careful to not revert any
881 881 # changes to other largefiles accidentally. This means we have to keep
882 882 # track of the largefiles that are being reverted so we only pull down
883 883 # the necessary largefiles.
884 884 #
885 885 # Standins are only updated (to match the hash of largefiles) before
886 886 # commits. Update the standins then run the original revert, changing
887 887 # the matcher to hit standins instead of largefiles. Based on the
888 888 # resulting standins update the largefiles.
889 889 @eh.wrapfunction(cmdutil, b'revert')
890 890 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
891 891 # Because we put the standins in a bad state (by updating them)
892 892 # and then return them to a correct state we need to lock to
893 893 # prevent others from changing them in their incorrect state.
894 894 with repo.wlock():
895 895 lfdirstate = lfutil.openlfdirstate(ui, repo)
896 896 s = lfutil.lfdirstatestatus(lfdirstate, repo)
897 897 lfdirstate.write(repo.currenttransaction())
898 898 for lfile in s.modified:
899 899 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
900 900 for lfile in s.deleted:
901 901 fstandin = lfutil.standin(lfile)
902 902 if repo.wvfs.exists(fstandin):
903 903 repo.wvfs.unlink(fstandin)
904 904
905 905 oldstandins = lfutil.getstandinsstate(repo)
906 906
907 907 def overridematch(
908 908 orig,
909 909 mctx,
910 910 pats=(),
911 911 opts=None,
912 912 globbed=False,
913 913 default=b'relpath',
914 914 badfn=None,
915 915 ):
916 916 if opts is None:
917 917 opts = {}
918 918 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
919 919 m = copy.copy(match)
920 920
921 921 # revert supports recursing into subrepos, and though largefiles
922 922 # currently doesn't work correctly in that case, this match is
923 923 # called, so the lfdirstate above may not be the correct one for
924 924 # this invocation of match.
925 925 lfdirstate = lfutil.openlfdirstate(
926 926 mctx.repo().ui, mctx.repo(), False
927 927 )
928 928
929 929 wctx = repo[None]
930 930 matchfiles = []
931 931 for f in m._files:
932 932 standin = lfutil.standin(f)
933 933 if standin in ctx or standin in mctx:
934 934 matchfiles.append(standin)
935 935 elif standin in wctx or lfdirstate.get_entry(f).removed:
936 936 continue
937 937 else:
938 938 matchfiles.append(f)
939 939 m._files = matchfiles
940 940 m._fileset = set(m._files)
941 941 origmatchfn = m.matchfn
942 942
943 943 def matchfn(f):
944 944 lfile = lfutil.splitstandin(f)
945 945 if lfile is not None:
946 946 return origmatchfn(lfile) and (f in ctx or f in mctx)
947 947 return origmatchfn(f)
948 948
949 949 m.matchfn = matchfn
950 950 return m
951 951
952 952 with extensions.wrappedfunction(scmutil, b'match', overridematch):
953 953 orig(ui, repo, ctx, *pats, **opts)
954 954
955 955 newstandins = lfutil.getstandinsstate(repo)
956 956 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
957 957 # lfdirstate should be 'normallookup'-ed for updated files,
958 958 # because reverting doesn't touch dirstate for 'normal' files
959 959 # when target revision is explicitly specified: in such case,
960 960 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
961 961 # of target (standin) file.
962 962 lfcommands.updatelfiles(
963 963 ui, repo, filelist, printmessage=False, normallookup=True
964 964 )
965 965
966 966
967 967 # after pulling changesets, we need to take some extra care to get
968 968 # largefiles updated remotely
969 969 @eh.wrapcommand(
970 970 b'pull',
971 971 opts=[
972 972 (
973 973 b'',
974 974 b'all-largefiles',
975 975 None,
976 976 _(b'download all pulled versions of largefiles (DEPRECATED)'),
977 977 ),
978 978 (
979 979 b'',
980 980 b'lfrev',
981 981 [],
982 982 _(b'download largefiles for these revisions'),
983 983 _(b'REV'),
984 984 ),
985 985 ],
986 986 )
987 987 def overridepull(orig, ui, repo, source=None, **opts):
988 988 revsprepull = len(repo)
989 989 if not source:
990 990 source = b'default'
991 991 repo.lfpullsource = source
992 992 result = orig(ui, repo, source, **opts)
993 993 revspostpull = len(repo)
994 994 lfrevs = opts.get('lfrev', [])
995 995 if opts.get('all_largefiles'):
996 996 lfrevs.append(b'pulled()')
997 997 if lfrevs and revspostpull > revsprepull:
998 998 numcached = 0
999 999 repo.firstpulled = revsprepull # for pulled() revset expression
1000 1000 try:
1001 1001 for rev in logcmdutil.revrange(repo, lfrevs):
1002 1002 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1003 1003 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1004 1004 numcached += len(cached)
1005 1005 finally:
1006 1006 del repo.firstpulled
1007 1007 ui.status(_(b"%d largefiles cached\n") % numcached)
1008 1008 return result
1009 1009
1010 1010
1011 1011 @eh.wrapcommand(
1012 1012 b'push',
1013 1013 opts=[
1014 1014 (
1015 1015 b'',
1016 1016 b'lfrev',
1017 1017 [],
1018 1018 _(b'upload largefiles for these revisions'),
1019 1019 _(b'REV'),
1020 1020 )
1021 1021 ],
1022 1022 )
1023 1023 def overridepush(orig, ui, repo, *args, **kwargs):
1024 1024 """Override push command and store --lfrev parameters in opargs"""
1025 1025 lfrevs = kwargs.pop('lfrev', None)
1026 1026 if lfrevs:
1027 1027 opargs = kwargs.setdefault('opargs', {})
1028 1028 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1029 1029 return orig(ui, repo, *args, **kwargs)
1030 1030
1031 1031
1032 1032 @eh.wrapfunction(exchange, b'pushoperation')
1033 1033 def exchangepushoperation(orig, *args, **kwargs):
1034 1034 """Override pushoperation constructor and store lfrevs parameter"""
1035 1035 lfrevs = kwargs.pop('lfrevs', None)
1036 1036 pushop = orig(*args, **kwargs)
1037 1037 pushop.lfrevs = lfrevs
1038 1038 return pushop
1039 1039
1040 1040
1041 1041 @eh.revsetpredicate(b'pulled()')
1042 1042 def pulledrevsetsymbol(repo, subset, x):
1043 1043 """Changesets that just has been pulled.
1044 1044
1045 1045 Only available with largefiles from pull --lfrev expressions.
1046 1046
1047 1047 .. container:: verbose
1048 1048
1049 1049 Some examples:
1050 1050
1051 1051 - pull largefiles for all new changesets::
1052 1052
1053 1053 hg pull -lfrev "pulled()"
1054 1054
1055 1055 - pull largefiles for all new branch heads::
1056 1056
1057 1057 hg pull -lfrev "head(pulled()) and not closed()"
1058 1058
1059 1059 """
1060 1060
1061 1061 try:
1062 1062 firstpulled = repo.firstpulled
1063 1063 except AttributeError:
1064 1064 raise error.Abort(_(b"pulled() only available in --lfrev"))
1065 1065 return smartset.baseset([r for r in subset if r >= firstpulled])
1066 1066
1067 1067
1068 1068 @eh.wrapcommand(
1069 1069 b'clone',
1070 1070 opts=[
1071 1071 (
1072 1072 b'',
1073 1073 b'all-largefiles',
1074 1074 None,
1075 1075 _(b'download all versions of all largefiles'),
1076 1076 )
1077 1077 ],
1078 1078 )
1079 1079 def overrideclone(orig, ui, source, dest=None, **opts):
1080 1080 d = dest
1081 1081 if d is None:
1082 1082 d = hg.defaultdest(source)
1083 1083 if opts.get('all_largefiles') and not hg.islocal(d):
1084 1084 raise error.Abort(
1085 1085 _(b'--all-largefiles is incompatible with non-local destination %s')
1086 1086 % d
1087 1087 )
1088 1088
1089 1089 return orig(ui, source, dest, **opts)
1090 1090
1091 1091
1092 1092 @eh.wrapfunction(hg, b'clone')
1093 1093 def hgclone(orig, ui, opts, *args, **kwargs):
1094 1094 result = orig(ui, opts, *args, **kwargs)
1095 1095
1096 1096 if result is not None:
1097 1097 sourcerepo, destrepo = result
1098 1098 repo = destrepo.local()
1099 1099
1100 1100 # When cloning to a remote repo (like through SSH), no repo is available
1101 1101 # from the peer. Therefore the largefiles can't be downloaded and the
1102 1102 # hgrc can't be updated.
1103 1103 if not repo:
1104 1104 return result
1105 1105
1106 1106 # Caching is implicitly limited to 'rev' option, since the dest repo was
1107 1107 # truncated at that point. The user may expect a download count with
1108 1108 # this option, so attempt whether or not this is a largefile repo.
1109 1109 if opts.get(b'all_largefiles'):
1110 1110 success, missing = lfcommands.downloadlfiles(ui, repo)
1111 1111
1112 1112 if missing != 0:
1113 1113 return None
1114 1114
1115 1115 return result
1116 1116
1117 1117
1118 1118 @eh.wrapcommand(b'rebase', extension=b'rebase')
1119 1119 def overriderebasecmd(orig, ui, repo, **opts):
1120 1120 if not util.safehasattr(repo, b'_largefilesenabled'):
1121 1121 return orig(ui, repo, **opts)
1122 1122
1123 1123 resuming = opts.get('continue')
1124 1124 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1125 1125 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1126 1126 try:
1127 1127 with ui.configoverride(
1128 1128 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1129 1129 ):
1130 1130 return orig(ui, repo, **opts)
1131 1131 finally:
1132 1132 repo._lfstatuswriters.pop()
1133 1133 repo._lfcommithooks.pop()
1134 1134
1135 1135
1136 1136 @eh.extsetup
1137 1137 def overriderebase(ui):
1138 1138 try:
1139 1139 rebase = extensions.find(b'rebase')
1140 1140 except KeyError:
1141 1141 pass
1142 1142 else:
1143 1143
1144 1144 def _dorebase(orig, *args, **kwargs):
1145 1145 kwargs['inmemory'] = False
1146 1146 return orig(*args, **kwargs)
1147 1147
1148 1148 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1149 1149
1150 1150
1151 1151 @eh.wrapcommand(b'archive')
1152 1152 def overridearchivecmd(orig, ui, repo, dest, **opts):
1153 1153 with lfstatus(repo.unfiltered()):
1154 1154 return orig(ui, repo.unfiltered(), dest, **opts)
1155 1155
1156 1156
1157 1157 @eh.wrapfunction(webcommands, b'archive')
1158 1158 def hgwebarchive(orig, web):
1159 1159 with lfstatus(web.repo):
1160 1160 return orig(web)
1161 1161
1162 1162
1163 1163 @eh.wrapfunction(archival, b'archive')
1164 1164 def overridearchive(
1165 1165 orig,
1166 1166 repo,
1167 1167 dest,
1168 1168 node,
1169 1169 kind,
1170 1170 decode=True,
1171 1171 match=None,
1172 1172 prefix=b'',
1173 1173 mtime=None,
1174 1174 subrepos=None,
1175 1175 ):
1176 1176 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1177 1177 # unfiltered repo's attr, so check that as well.
1178 1178 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1179 1179 return orig(
1180 1180 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1181 1181 )
1182 1182
1183 1183 # No need to lock because we are only reading history and
1184 1184 # largefile caches, neither of which are modified.
1185 1185 if node is not None:
1186 1186 lfcommands.cachelfiles(repo.ui, repo, node)
1187 1187
1188 1188 if kind not in archival.archivers:
1189 1189 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1190 1190
1191 1191 ctx = repo[node]
1192 1192
1193 1193 if kind == b'files':
1194 1194 if prefix:
1195 1195 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1196 1196 else:
1197 1197 prefix = archival.tidyprefix(dest, kind, prefix)
1198 1198
1199 1199 def write(name, mode, islink, getdata):
1200 1200 if match and not match(name):
1201 1201 return
1202 1202 data = getdata()
1203 1203 if decode:
1204 1204 data = repo.wwritedata(name, data)
1205 1205 archiver.addfile(prefix + name, mode, islink, data)
1206 1206
1207 1207 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1208 1208
1209 1209 if repo.ui.configbool(b"ui", b"archivemeta"):
1210 1210 write(
1211 1211 b'.hg_archival.txt',
1212 1212 0o644,
1213 1213 False,
1214 1214 lambda: archival.buildmetadata(ctx),
1215 1215 )
1216 1216
1217 1217 for f in ctx:
1218 1218 ff = ctx.flags(f)
1219 1219 getdata = ctx[f].data
1220 1220 lfile = lfutil.splitstandin(f)
1221 1221 if lfile is not None:
1222 1222 if node is not None:
1223 1223 path = lfutil.findfile(repo, getdata().strip())
1224 1224
1225 1225 if path is None:
1226 1226 raise error.Abort(
1227 1227 _(
1228 1228 b'largefile %s not found in repo store or system cache'
1229 1229 )
1230 1230 % lfile
1231 1231 )
1232 1232 else:
1233 1233 path = lfile
1234 1234
1235 1235 f = lfile
1236 1236
1237 1237 getdata = lambda: util.readfile(path)
1238 1238 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1239 1239
1240 1240 if subrepos:
1241 1241 for subpath in sorted(ctx.substate):
1242 1242 sub = ctx.workingsub(subpath)
1243 1243 submatch = matchmod.subdirmatcher(subpath, match)
1244 1244 subprefix = prefix + subpath + b'/'
1245 1245
1246 1246 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1247 1247 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1248 1248 # allow only hgsubrepos to set this, instead of the current scheme
1249 1249 # where the parent sets this for the child.
1250 1250 with (
1251 1251 util.safehasattr(sub, '_repo')
1252 1252 and lfstatus(sub._repo)
1253 1253 or util.nullcontextmanager()
1254 1254 ):
1255 1255 sub.archive(archiver, subprefix, submatch)
1256 1256
1257 1257 archiver.done()
1258 1258
1259 1259
1260 1260 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1261 1261 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1262 1262 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1263 1263 if not lfenabled or not repo._repo.lfstatus:
1264 1264 return orig(repo, archiver, prefix, match, decode)
1265 1265
1266 1266 repo._get(repo._state + (b'hg',))
1267 1267 rev = repo._state[1]
1268 1268 ctx = repo._repo[rev]
1269 1269
1270 1270 if ctx.node() is not None:
1271 1271 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1272 1272
1273 1273 def write(name, mode, islink, getdata):
1274 1274 # At this point, the standin has been replaced with the largefile name,
1275 1275 # so the normal matcher works here without the lfutil variants.
1276 1276 if match and not match(f):
1277 1277 return
1278 1278 data = getdata()
1279 1279 if decode:
1280 1280 data = repo._repo.wwritedata(name, data)
1281 1281
1282 1282 archiver.addfile(prefix + name, mode, islink, data)
1283 1283
1284 1284 for f in ctx:
1285 1285 ff = ctx.flags(f)
1286 1286 getdata = ctx[f].data
1287 1287 lfile = lfutil.splitstandin(f)
1288 1288 if lfile is not None:
1289 1289 if ctx.node() is not None:
1290 1290 path = lfutil.findfile(repo._repo, getdata().strip())
1291 1291
1292 1292 if path is None:
1293 1293 raise error.Abort(
1294 1294 _(
1295 1295 b'largefile %s not found in repo store or system cache'
1296 1296 )
1297 1297 % lfile
1298 1298 )
1299 1299 else:
1300 1300 path = lfile
1301 1301
1302 1302 f = lfile
1303 1303
1304 1304 getdata = lambda: util.readfile(os.path.join(prefix, path))
1305 1305
1306 1306 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1307 1307
1308 1308 for subpath in sorted(ctx.substate):
1309 1309 sub = ctx.workingsub(subpath)
1310 1310 submatch = matchmod.subdirmatcher(subpath, match)
1311 1311 subprefix = prefix + subpath + b'/'
1312 1312 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1313 1313 # infer and possibly set lfstatus at the top of this function. That
1314 1314 # would allow only hgsubrepos to set this, instead of the current scheme
1315 1315 # where the parent sets this for the child.
1316 1316 with (
1317 1317 util.safehasattr(sub, '_repo')
1318 1318 and lfstatus(sub._repo)
1319 1319 or util.nullcontextmanager()
1320 1320 ):
1321 1321 sub.archive(archiver, subprefix, submatch, decode)
1322 1322
1323 1323
1324 1324 # If a largefile is modified, the change is not reflected in its
1325 1325 # standin until a commit. cmdutil.bailifchanged() raises an exception
1326 1326 # if the repo has uncommitted changes. Wrap it to also check if
1327 1327 # largefiles were changed. This is used by bisect, backout and fetch.
1328 1328 @eh.wrapfunction(cmdutil, b'bailifchanged')
1329 1329 def overridebailifchanged(orig, repo, *args, **kwargs):
1330 1330 orig(repo, *args, **kwargs)
1331 1331 with lfstatus(repo):
1332 1332 s = repo.status()
1333 1333 if s.modified or s.added or s.removed or s.deleted:
1334 1334 raise error.Abort(_(b'uncommitted changes'))
1335 1335
1336 1336
1337 1337 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1338 1338 def postcommitstatus(orig, repo, *args, **kwargs):
1339 1339 with lfstatus(repo):
1340 1340 return orig(repo, *args, **kwargs)
1341 1341
1342 1342
1343 1343 @eh.wrapfunction(cmdutil, b'forget')
1344 1344 def cmdutilforget(
1345 1345 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1346 1346 ):
1347 1347 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1348 1348 bad, forgot = orig(
1349 1349 ui,
1350 1350 repo,
1351 1351 normalmatcher,
1352 1352 prefix,
1353 1353 uipathfn,
1354 1354 explicitonly,
1355 1355 dryrun,
1356 1356 interactive,
1357 1357 )
1358 1358 m = composelargefilematcher(match, repo[None].manifest())
1359 1359
1360 1360 with lfstatus(repo):
1361 1361 s = repo.status(match=m, clean=True)
1362 1362 manifest = repo[None].manifest()
1363 1363 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1364 1364 forget = [f for f in forget if lfutil.standin(f) in manifest]
1365 1365
1366 1366 for f in forget:
1367 1367 fstandin = lfutil.standin(f)
1368 1368 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1369 1369 ui.warn(
1370 1370 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1371 1371 )
1372 1372 bad.append(f)
1373 1373
1374 1374 for f in forget:
1375 1375 if ui.verbose or not m.exact(f):
1376 1376 ui.status(_(b'removing %s\n') % uipathfn(f))
1377 1377
1378 1378 # Need to lock because standin files are deleted then removed from the
1379 1379 # repository and we could race in-between.
1380 1380 with repo.wlock():
1381 1381 lfdirstate = lfutil.openlfdirstate(ui, repo)
1382 1382 for f in forget:
1383 1383 lfdirstate.set_untracked(f)
1384 1384 lfdirstate.write(repo.currenttransaction())
1385 1385 standins = [lfutil.standin(f) for f in forget]
1386 1386 for f in standins:
1387 1387 repo.wvfs.unlinkpath(f, ignoremissing=True)
1388 1388 rejected = repo[None].forget(standins)
1389 1389
1390 1390 bad.extend(f for f in rejected if f in m.files())
1391 1391 forgot.extend(f for f in forget if f not in rejected)
1392 1392 return bad, forgot
1393 1393
1394 1394
1395 1395 def _getoutgoings(repo, other, missing, addfunc):
1396 1396 """get pairs of filename and largefile hash in outgoing revisions
1397 1397 in 'missing'.
1398 1398
1399 1399 largefiles already existing on 'other' repository are ignored.
1400 1400
1401 1401 'addfunc' is invoked with each unique pairs of filename and
1402 1402 largefile hash value.
1403 1403 """
1404 1404 knowns = set()
1405 1405 lfhashes = set()
1406 1406
1407 1407 def dedup(fn, lfhash):
1408 1408 k = (fn, lfhash)
1409 1409 if k not in knowns:
1410 1410 knowns.add(k)
1411 1411 lfhashes.add(lfhash)
1412 1412
1413 1413 lfutil.getlfilestoupload(repo, missing, dedup)
1414 1414 if lfhashes:
1415 1415 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1416 1416 for fn, lfhash in knowns:
1417 1417 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1418 1418 addfunc(fn, lfhash)
1419 1419
1420 1420
1421 1421 def outgoinghook(ui, repo, other, opts, missing):
1422 1422 if opts.pop(b'large', None):
1423 1423 lfhashes = set()
1424 1424 if ui.debugflag:
1425 1425 toupload = {}
1426 1426
1427 1427 def addfunc(fn, lfhash):
1428 1428 if fn not in toupload:
1429 1429 toupload[fn] = []
1430 1430 toupload[fn].append(lfhash)
1431 1431 lfhashes.add(lfhash)
1432 1432
1433 1433 def showhashes(fn):
1434 1434 for lfhash in sorted(toupload[fn]):
1435 1435 ui.debug(b' %s\n' % lfhash)
1436 1436
1437 1437 else:
1438 1438 toupload = set()
1439 1439
1440 1440 def addfunc(fn, lfhash):
1441 1441 toupload.add(fn)
1442 1442 lfhashes.add(lfhash)
1443 1443
1444 1444 def showhashes(fn):
1445 1445 pass
1446 1446
1447 1447 _getoutgoings(repo, other, missing, addfunc)
1448 1448
1449 1449 if not toupload:
1450 1450 ui.status(_(b'largefiles: no files to upload\n'))
1451 1451 else:
1452 1452 ui.status(
1453 1453 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1454 1454 )
1455 1455 for file in sorted(toupload):
1456 1456 ui.status(lfutil.splitstandin(file) + b'\n')
1457 1457 showhashes(file)
1458 1458 ui.status(b'\n')
1459 1459
1460 1460
1461 1461 @eh.wrapcommand(
1462 1462 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1463 1463 )
1464 1464 def _outgoingcmd(orig, *args, **kwargs):
1465 1465 # Nothing to do here other than add the extra help option- the hook above
1466 1466 # processes it.
1467 1467 return orig(*args, **kwargs)
1468 1468
1469 1469
1470 1470 def summaryremotehook(ui, repo, opts, changes):
1471 1471 largeopt = opts.get(b'large', False)
1472 1472 if changes is None:
1473 1473 if largeopt:
1474 1474 return (False, True) # only outgoing check is needed
1475 1475 else:
1476 1476 return (False, False)
1477 1477 elif largeopt:
1478 1478 url, branch, peer, outgoing = changes[1]
1479 1479 if peer is None:
1480 1480 # i18n: column positioning for "hg summary"
1481 1481 ui.status(_(b'largefiles: (no remote repo)\n'))
1482 1482 return
1483 1483
1484 1484 toupload = set()
1485 1485 lfhashes = set()
1486 1486
1487 1487 def addfunc(fn, lfhash):
1488 1488 toupload.add(fn)
1489 1489 lfhashes.add(lfhash)
1490 1490
1491 1491 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1492 1492
1493 1493 if not toupload:
1494 1494 # i18n: column positioning for "hg summary"
1495 1495 ui.status(_(b'largefiles: (no files to upload)\n'))
1496 1496 else:
1497 1497 # i18n: column positioning for "hg summary"
1498 1498 ui.status(
1499 1499 _(b'largefiles: %d entities for %d files to upload\n')
1500 1500 % (len(lfhashes), len(toupload))
1501 1501 )
1502 1502
1503 1503
1504 1504 @eh.wrapcommand(
1505 1505 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1506 1506 )
1507 1507 def overridesummary(orig, ui, repo, *pats, **opts):
1508 1508 with lfstatus(repo):
1509 1509 orig(ui, repo, *pats, **opts)
1510 1510
1511 1511
1512 1512 @eh.wrapfunction(scmutil, b'addremove')
1513 1513 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1514 1514 if opts is None:
1515 1515 opts = {}
1516 1516 if not lfutil.islfilesrepo(repo):
1517 1517 return orig(repo, matcher, prefix, uipathfn, opts)
1518 1518 # Get the list of missing largefiles so we can remove them
1519 1519 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1520 1520 unsure, s, mtime_boundary = lfdirstate.status(
1521 1521 matchmod.always(),
1522 1522 subrepos=[],
1523 1523 ignored=False,
1524 1524 clean=False,
1525 1525 unknown=False,
1526 1526 )
1527 1527
1528 1528 # Call into the normal remove code, but the removing of the standin, we want
1529 1529 # to have handled by original addremove. Monkey patching here makes sure
1530 1530 # we don't remove the standin in the largefiles code, preventing a very
1531 1531 # confused state later.
1532 1532 if s.deleted:
1533 1533 m = copy.copy(matcher)
1534 1534
1535 1535 # The m._files and m._map attributes are not changed to the deleted list
1536 1536 # because that affects the m.exact() test, which in turn governs whether
1537 1537 # or not the file name is printed, and how. Simply limit the original
1538 1538 # matches to those in the deleted status list.
1539 1539 matchfn = m.matchfn
1540 1540 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1541 1541
1542 1542 removelargefiles(
1543 1543 repo.ui,
1544 1544 repo,
1545 1545 True,
1546 1546 m,
1547 1547 uipathfn,
1548 1548 opts.get(b'dry_run'),
1549 1549 **pycompat.strkwargs(opts)
1550 1550 )
1551 1551 # Call into the normal add code, and any files that *should* be added as
1552 1552 # largefiles will be
1553 1553 added, bad = addlargefiles(
1554 1554 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1555 1555 )
1556 1556 # Now that we've handled largefiles, hand off to the original addremove
1557 1557 # function to take care of the rest. Make sure it doesn't do anything with
1558 1558 # largefiles by passing a matcher that will ignore them.
1559 1559 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1560 1560 return orig(repo, matcher, prefix, uipathfn, opts)
1561 1561
1562 1562
1563 1563 # Calling purge with --all will cause the largefiles to be deleted.
1564 1564 # Override repo.status to prevent this from happening.
1565 1565 @eh.wrapcommand(b'purge')
1566 1566 def overridepurge(orig, ui, repo, *dirs, **opts):
1567 1567 # XXX Monkey patching a repoview will not work. The assigned attribute will
1568 1568 # be set on the unfiltered repo, but we will only lookup attributes in the
1569 1569 # unfiltered repo if the lookup in the repoview object itself fails. As the
1570 1570 # monkey patched method exists on the repoview class the lookup will not
1571 1571 # fail. As a result, the original version will shadow the monkey patched
1572 1572 # one, defeating the monkey patch.
1573 1573 #
1574 1574 # As a work around we use an unfiltered repo here. We should do something
1575 1575 # cleaner instead.
1576 1576 repo = repo.unfiltered()
1577 1577 oldstatus = repo.status
1578 1578
1579 1579 def overridestatus(
1580 1580 node1=b'.',
1581 1581 node2=None,
1582 1582 match=None,
1583 1583 ignored=False,
1584 1584 clean=False,
1585 1585 unknown=False,
1586 1586 listsubrepos=False,
1587 1587 ):
1588 1588 r = oldstatus(
1589 1589 node1, node2, match, ignored, clean, unknown, listsubrepos
1590 1590 )
1591 1591 lfdirstate = lfutil.openlfdirstate(ui, repo)
1592 1592 unknown = [
1593 1593 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1594 1594 ]
1595 1595 ignored = [
1596 1596 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1597 1597 ]
1598 1598 return scmutil.status(
1599 1599 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1600 1600 )
1601 1601
1602 1602 repo.status = overridestatus
1603 1603 orig(ui, repo, *dirs, **opts)
1604 1604 repo.status = oldstatus
1605 1605
1606 1606
1607 1607 @eh.wrapcommand(b'rollback')
1608 1608 def overriderollback(orig, ui, repo, **opts):
1609 1609 with repo.wlock():
1610 1610 before = repo.dirstate.parents()
1611 1611 orphans = {
1612 1612 f
1613 1613 for f in repo.dirstate
1614 1614 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1615 1615 }
1616 1616 result = orig(ui, repo, **opts)
1617 1617 after = repo.dirstate.parents()
1618 1618 if before == after:
1619 1619 return result # no need to restore standins
1620 1620
1621 1621 pctx = repo[b'.']
1622 1622 for f in repo.dirstate:
1623 1623 if lfutil.isstandin(f):
1624 1624 orphans.discard(f)
1625 1625 if repo.dirstate.get_entry(f).removed:
1626 1626 repo.wvfs.unlinkpath(f, ignoremissing=True)
1627 1627 elif f in pctx:
1628 1628 fctx = pctx[f]
1629 1629 repo.wwrite(f, fctx.data(), fctx.flags())
1630 1630 else:
1631 1631 # content of standin is not so important in 'a',
1632 1632 # 'm' or 'n' (coming from the 2nd parent) cases
1633 1633 lfutil.writestandin(repo, f, b'', False)
1634 1634 for standin in orphans:
1635 1635 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1636 1636
1637 1637 return result
1638 1638
1639 1639
1640 1640 @eh.wrapcommand(b'transplant', extension=b'transplant')
1641 1641 def overridetransplant(orig, ui, repo, *revs, **opts):
1642 1642 resuming = opts.get('continue')
1643 1643 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1644 1644 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1645 1645 try:
1646 1646 result = orig(ui, repo, *revs, **opts)
1647 1647 finally:
1648 1648 repo._lfstatuswriters.pop()
1649 1649 repo._lfcommithooks.pop()
1650 1650 return result
1651 1651
1652 1652
1653 1653 @eh.wrapcommand(b'cat')
1654 1654 def overridecat(orig, ui, repo, file1, *pats, **opts):
1655 1655 opts = pycompat.byteskwargs(opts)
1656 1656 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1657 1657 err = 1
1658 1658 notbad = set()
1659 1659 m = scmutil.match(ctx, (file1,) + pats, opts)
1660 1660 origmatchfn = m.matchfn
1661 1661
1662 1662 def lfmatchfn(f):
1663 1663 if origmatchfn(f):
1664 1664 return True
1665 1665 lf = lfutil.splitstandin(f)
1666 1666 if lf is None:
1667 1667 return False
1668 1668 notbad.add(lf)
1669 1669 return origmatchfn(lf)
1670 1670
1671 1671 m.matchfn = lfmatchfn
1672 1672 origbadfn = m.bad
1673 1673
1674 1674 def lfbadfn(f, msg):
1675 1675 if not f in notbad:
1676 1676 origbadfn(f, msg)
1677 1677
1678 1678 m.bad = lfbadfn
1679 1679
1680 1680 origvisitdirfn = m.visitdir
1681 1681
1682 1682 def lfvisitdirfn(dir):
1683 1683 if dir == lfutil.shortname:
1684 1684 return True
1685 1685 ret = origvisitdirfn(dir)
1686 1686 if ret:
1687 1687 return ret
1688 1688 lf = lfutil.splitstandin(dir)
1689 1689 if lf is None:
1690 1690 return False
1691 1691 return origvisitdirfn(lf)
1692 1692
1693 1693 m.visitdir = lfvisitdirfn
1694 1694
1695 1695 for f in ctx.walk(m):
1696 1696 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1697 1697 lf = lfutil.splitstandin(f)
1698 1698 if lf is None or origmatchfn(f):
1699 1699 # duplicating unreachable code from commands.cat
1700 1700 data = ctx[f].data()
1701 1701 if opts.get(b'decode'):
1702 1702 data = repo.wwritedata(f, data)
1703 1703 fp.write(data)
1704 1704 else:
1705 1705 hash = lfutil.readasstandin(ctx[f])
1706 1706 if not lfutil.inusercache(repo.ui, hash):
1707 1707 store = storefactory.openstore(repo)
1708 1708 success, missing = store.get([(lf, hash)])
1709 1709 if len(success) != 1:
1710 1710 raise error.Abort(
1711 1711 _(
1712 1712 b'largefile %s is not in cache and could not be '
1713 1713 b'downloaded'
1714 1714 )
1715 1715 % lf
1716 1716 )
1717 1717 path = lfutil.usercachepath(repo.ui, hash)
1718 1718 with open(path, b"rb") as fpin:
1719 1719 for chunk in util.filechunkiter(fpin):
1720 1720 fp.write(chunk)
1721 1721 err = 0
1722 1722 return err
1723 1723
1724 1724
1725 1725 @eh.wrapfunction(merge, b'_update')
1726 1726 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1727 1727 matcher = kwargs.get('matcher', None)
1728 1728 # note if this is a partial update
1729 1729 partial = matcher and not matcher.always()
1730 1730 with repo.wlock():
1731 1731 # branch | | |
1732 1732 # merge | force | partial | action
1733 1733 # -------+-------+---------+--------------
1734 1734 # x | x | x | linear-merge
1735 1735 # o | x | x | branch-merge
1736 1736 # x | o | x | overwrite (as clean update)
1737 1737 # o | o | x | force-branch-merge (*1)
1738 1738 # x | x | o | (*)
1739 1739 # o | x | o | (*)
1740 1740 # x | o | o | overwrite (as revert)
1741 1741 # o | o | o | (*)
1742 1742 #
1743 1743 # (*) don't care
1744 1744 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1745 1745
1746 1746 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1747 1747 unsure, s, mtime_boundary = lfdirstate.status(
1748 1748 matchmod.always(),
1749 1749 subrepos=[],
1750 1750 ignored=False,
1751 1751 clean=True,
1752 1752 unknown=False,
1753 1753 )
1754 1754 oldclean = set(s.clean)
1755 1755 pctx = repo[b'.']
1756 1756 dctx = repo[node]
1757 1757 for lfile in unsure + s.modified:
1758 1758 lfileabs = repo.wvfs.join(lfile)
1759 1759 if not repo.wvfs.exists(lfileabs):
1760 1760 continue
1761 1761 lfhash = lfutil.hashfile(lfileabs)
1762 1762 standin = lfutil.standin(lfile)
1763 1763 lfutil.writestandin(
1764 1764 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1765 1765 )
1766 1766 if standin in pctx and lfhash == lfutil.readasstandin(
1767 1767 pctx[standin]
1768 1768 ):
1769 1769 oldclean.add(lfile)
1770 1770 for lfile in s.added:
1771 1771 fstandin = lfutil.standin(lfile)
1772 1772 if fstandin not in dctx:
1773 1773 # in this case, content of standin file is meaningless
1774 1774 # (in dctx, lfile is unknown, or normal file)
1775 1775 continue
1776 1776 lfutil.updatestandin(repo, lfile, fstandin)
1777 1777 # mark all clean largefiles as dirty, just in case the update gets
1778 1778 # interrupted before largefiles and lfdirstate are synchronized
1779 1779 for lfile in oldclean:
1780 1780 lfdirstate.set_possibly_dirty(lfile)
1781 1781 lfdirstate.write(repo.currenttransaction())
1782 1782
1783 1783 oldstandins = lfutil.getstandinsstate(repo)
1784 1784 wc = kwargs.get('wc')
1785 1785 if wc and wc.isinmemory():
1786 1786 # largefiles is not a good candidate for in-memory merge (large
1787 1787 # files, custom dirstate, matcher usage).
1788 1788 raise error.ProgrammingError(
1789 1789 b'largefiles is not compatible with in-memory merge'
1790 1790 )
1791 1791 with lfdirstate.parentchange():
1792 1792 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1793 1793
1794 1794 newstandins = lfutil.getstandinsstate(repo)
1795 1795 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1796 1796
1797 1797 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1798 1798 # all the ones that didn't change as clean
1799 1799 for lfile in oldclean.difference(filelist):
1800 1800 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1801 1801 lfdirstate.write(repo.currenttransaction())
1802 1802
1803 1803 if branchmerge or force or partial:
1804 1804 filelist.extend(s.deleted + s.removed)
1805 1805
1806 1806 lfcommands.updatelfiles(
1807 1807 repo.ui, repo, filelist=filelist, normallookup=partial
1808 1808 )
1809 1809
1810 1810 return result
1811 1811
1812 1812
1813 1813 @eh.wrapfunction(scmutil, b'marktouched')
1814 1814 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1815 1815 result = orig(repo, files, *args, **kwargs)
1816 1816
1817 1817 filelist = []
1818 1818 for f in files:
1819 1819 lf = lfutil.splitstandin(f)
1820 1820 if lf is not None:
1821 1821 filelist.append(lf)
1822 1822 if filelist:
1823 1823 lfcommands.updatelfiles(
1824 1824 repo.ui,
1825 1825 repo,
1826 1826 filelist=filelist,
1827 1827 printmessage=False,
1828 1828 normallookup=True,
1829 1829 )
1830 1830
1831 1831 return result
1832 1832
1833 1833
1834 1834 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1835 1835 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1836 1836 def upgraderequirements(orig, repo):
1837 1837 reqs = orig(repo)
1838 1838 if b'largefiles' in repo.requirements:
1839 1839 reqs.add(b'largefiles')
1840 1840 return reqs
1841 1841
1842 1842
1843 1843 _lfscheme = b'largefile://'
1844 1844
1845 1845
1846 1846 @eh.wrapfunction(urlmod, b'open')
1847 1847 def openlargefile(orig, ui, url_, data=None, **kwargs):
1848 1848 if url_.startswith(_lfscheme):
1849 1849 if data:
1850 1850 msg = b"cannot use data on a 'largefile://' url"
1851 1851 raise error.ProgrammingError(msg)
1852 1852 lfid = url_[len(_lfscheme) :]
1853 1853 return storefactory.getlfile(ui, lfid)
1854 1854 else:
1855 1855 return orig(ui, url_, data=data, **kwargs)
@@ -1,1299 +1,1298 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import os
12 12 import re
13 13 import shutil
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 short,
19 19 )
20 20 from .pycompat import (
21 21 getattr,
22 22 open,
23 23 )
24 24
25 25 from . import (
26 26 encoding,
27 27 error,
28 28 formatter,
29 29 match,
30 30 pycompat,
31 31 registrar,
32 32 scmutil,
33 33 simplemerge,
34 34 tagmerge,
35 35 templatekw,
36 36 templater,
37 37 templateutil,
38 38 util,
39 39 )
40 40
41 41 from .utils import (
42 42 procutil,
43 43 stringutil,
44 44 )
45 45
46 46
47 47 def _toolstr(ui, tool, part, *args):
48 48 return ui.config(b"merge-tools", tool + b"." + part, *args)
49 49
50 50
51 51 def _toolbool(ui, tool, part, *args):
52 52 return ui.configbool(b"merge-tools", tool + b"." + part, *args)
53 53
54 54
55 55 def _toollist(ui, tool, part):
56 56 return ui.configlist(b"merge-tools", tool + b"." + part)
57 57
58 58
59 59 internals = {}
60 60 # Merge tools to document.
61 61 internalsdoc = {}
62 62
63 63 internaltool = registrar.internalmerge()
64 64
65 65 # internal tool merge types
66 66 nomerge = internaltool.nomerge
67 67 mergeonly = internaltool.mergeonly # just the full merge, no premerge
68 68 fullmerge = internaltool.fullmerge # both premerge and merge
69 69
70 70 # IMPORTANT: keep the last line of this prompt very short ("What do you want to
71 71 # do?") because of issue6158, ideally to <40 English characters (to allow other
72 72 # languages that may take more columns to still have a chance to fit in an
73 73 # 80-column screen).
74 74 _localchangedotherdeletedmsg = _(
75 75 b"file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
76 76 b"You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
77 77 b"What do you want to do?"
78 78 b"$$ &Changed $$ &Delete $$ &Unresolved"
79 79 )
80 80
81 81 _otherchangedlocaldeletedmsg = _(
82 82 b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
83 83 b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
84 84 b"What do you want to do?"
85 85 b"$$ &Changed $$ &Deleted $$ &Unresolved"
86 86 )
87 87
88 88
89 89 class absentfilectx(object):
90 90 """Represents a file that's ostensibly in a context but is actually not
91 91 present in it.
92 92
93 93 This is here because it's very specific to the filemerge code for now --
94 94 other code is likely going to break with the values this returns."""
95 95
96 96 def __init__(self, ctx, f):
97 97 self._ctx = ctx
98 98 self._f = f
99 99
100 100 def __bytes__(self):
101 101 return b'absent file %s@%s' % (self._f, self._ctx)
102 102
103 103 def path(self):
104 104 return self._f
105 105
106 106 def size(self):
107 107 return None
108 108
109 109 def data(self):
110 110 return None
111 111
112 112 def filenode(self):
113 113 return self._ctx.repo().nullid
114 114
115 115 _customcmp = True
116 116
117 117 def cmp(self, fctx):
118 118 """compare with other file context
119 119
120 120 returns True if different from fctx.
121 121 """
122 122 return not (
123 123 fctx.isabsent()
124 124 and fctx.changectx() == self.changectx()
125 125 and fctx.path() == self.path()
126 126 )
127 127
128 128 def flags(self):
129 129 return b''
130 130
131 131 def changectx(self):
132 132 return self._ctx
133 133
134 134 def isbinary(self):
135 135 return False
136 136
137 137 def isabsent(self):
138 138 return True
139 139
140 140
141 141 def _findtool(ui, tool):
142 142 if tool in internals:
143 143 return tool
144 144 cmd = _toolstr(ui, tool, b"executable", tool)
145 145 if cmd.startswith(b'python:'):
146 146 return cmd
147 147 return findexternaltool(ui, tool)
148 148
149 149
150 150 def _quotetoolpath(cmd):
151 151 if cmd.startswith(b'python:'):
152 152 return cmd
153 153 return procutil.shellquote(cmd)
154 154
155 155
156 156 def findexternaltool(ui, tool):
157 157 for kn in (b"regkey", b"regkeyalt"):
158 158 k = _toolstr(ui, tool, kn)
159 159 if not k:
160 160 continue
161 161 p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
162 162 if p:
163 163 p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
164 164 if p:
165 165 return p
166 166 exe = _toolstr(ui, tool, b"executable", tool)
167 167 return procutil.findexe(util.expandpath(exe))
168 168
169 169
170 170 def _picktool(repo, ui, path, binary, symlink, changedelete):
171 171 strictcheck = ui.configbool(b'merge', b'strict-capability-check')
172 172
173 173 def hascapability(tool, capability, strict=False):
174 174 if tool in internals:
175 175 return strict and internals[tool].capabilities.get(capability)
176 176 return _toolbool(ui, tool, capability)
177 177
178 178 def supportscd(tool):
179 179 return tool in internals and internals[tool].mergetype == nomerge
180 180
181 181 def check(tool, pat, symlink, binary, changedelete):
182 182 tmsg = tool
183 183 if pat:
184 184 tmsg = _(b"%s (for pattern %s)") % (tool, pat)
185 185 if not _findtool(ui, tool):
186 186 if pat: # explicitly requested tool deserves a warning
187 187 ui.warn(_(b"couldn't find merge tool %s\n") % tmsg)
188 188 else: # configured but non-existing tools are more silent
189 189 ui.note(_(b"couldn't find merge tool %s\n") % tmsg)
190 190 elif symlink and not hascapability(tool, b"symlink", strictcheck):
191 191 ui.warn(_(b"tool %s can't handle symlinks\n") % tmsg)
192 192 elif binary and not hascapability(tool, b"binary", strictcheck):
193 193 ui.warn(_(b"tool %s can't handle binary\n") % tmsg)
194 194 elif changedelete and not supportscd(tool):
195 195 # the nomerge tools are the only tools that support change/delete
196 196 # conflicts
197 197 pass
198 198 elif not procutil.gui() and _toolbool(ui, tool, b"gui"):
199 199 ui.warn(_(b"tool %s requires a GUI\n") % tmsg)
200 200 else:
201 201 return True
202 202 return False
203 203
204 204 # internal config: ui.forcemerge
205 205 # forcemerge comes from command line arguments, highest priority
206 206 force = ui.config(b'ui', b'forcemerge')
207 207 if force:
208 208 toolpath = _findtool(ui, force)
209 209 if changedelete and not supportscd(toolpath):
210 210 return b":prompt", None
211 211 else:
212 212 if toolpath:
213 213 return (force, _quotetoolpath(toolpath))
214 214 else:
215 215 # mimic HGMERGE if given tool not found
216 216 return (force, force)
217 217
218 218 # HGMERGE takes next precedence
219 219 hgmerge = encoding.environ.get(b"HGMERGE")
220 220 if hgmerge:
221 221 if changedelete and not supportscd(hgmerge):
222 222 return b":prompt", None
223 223 else:
224 224 return (hgmerge, hgmerge)
225 225
226 226 # then patterns
227 227
228 228 # whether binary capability should be checked strictly
229 229 binarycap = binary and strictcheck
230 230
231 231 for pat, tool in ui.configitems(b"merge-patterns"):
232 232 mf = match.match(repo.root, b'', [pat])
233 233 if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
234 234 if binary and not hascapability(tool, b"binary", strict=True):
235 235 ui.warn(
236 236 _(
237 237 b"warning: check merge-patterns configurations,"
238 238 b" if %r for binary file %r is unintentional\n"
239 239 b"(see 'hg help merge-tools'"
240 240 b" for binary files capability)\n"
241 241 )
242 242 % (pycompat.bytestr(tool), pycompat.bytestr(path))
243 243 )
244 244 toolpath = _findtool(ui, tool)
245 245 return (tool, _quotetoolpath(toolpath))
246 246
247 247 # then merge tools
248 248 tools = {}
249 249 disabled = set()
250 250 for k, v in ui.configitems(b"merge-tools"):
251 251 t = k.split(b'.')[0]
252 252 if t not in tools:
253 253 tools[t] = int(_toolstr(ui, t, b"priority"))
254 254 if _toolbool(ui, t, b"disabled"):
255 255 disabled.add(t)
256 256 names = tools.keys()
257 257 tools = sorted(
258 258 [(-p, tool) for tool, p in tools.items() if tool not in disabled]
259 259 )
260 260 uimerge = ui.config(b"ui", b"merge")
261 261 if uimerge:
262 262 # external tools defined in uimerge won't be able to handle
263 263 # change/delete conflicts
264 264 if check(uimerge, path, symlink, binary, changedelete):
265 265 if uimerge not in names and not changedelete:
266 266 return (uimerge, uimerge)
267 267 tools.insert(0, (None, uimerge)) # highest priority
268 268 tools.append((None, b"hgmerge")) # the old default, if found
269 269 for p, t in tools:
270 270 if check(t, None, symlink, binary, changedelete):
271 271 toolpath = _findtool(ui, t)
272 272 return (t, _quotetoolpath(toolpath))
273 273
274 274 # internal merge or prompt as last resort
275 275 if symlink or binary or changedelete:
276 276 if not changedelete and len(tools):
277 277 # any tool is rejected by capability for symlink or binary
278 278 ui.warn(_(b"no tool found to merge %s\n") % path)
279 279 return b":prompt", None
280 280 return b":merge", None
281 281
282 282
283 283 def _eoltype(data):
284 284 """Guess the EOL type of a file"""
285 285 if b'\0' in data: # binary
286 286 return None
287 287 if b'\r\n' in data: # Windows
288 288 return b'\r\n'
289 289 if b'\r' in data: # Old Mac
290 290 return b'\r'
291 291 if b'\n' in data: # UNIX
292 292 return b'\n'
293 293 return None # unknown
294 294
295 295
296 296 def _matcheol(file, backup):
297 297 """Convert EOL markers in a file to match origfile"""
298 298 tostyle = _eoltype(backup.data()) # No repo.wread filters?
299 299 if tostyle:
300 300 data = util.readfile(file)
301 301 style = _eoltype(data)
302 302 if style:
303 303 newdata = data.replace(style, tostyle)
304 304 if newdata != data:
305 305 util.writefile(file, newdata)
306 306
307 307
308 308 @internaltool(b'prompt', nomerge)
309 309 def _iprompt(repo, mynode, fcd, fco, fca, toolconf, labels=None):
310 310 """Asks the user which of the local `p1()` or the other `p2()` version to
311 311 keep as the merged version."""
312 312 ui = repo.ui
313 313 fd = fcd.path()
314 314 uipathfn = scmutil.getuipathfn(repo)
315 315
316 316 # Avoid prompting during an in-memory merge since it doesn't support merge
317 317 # conflicts.
318 318 if fcd.changectx().isinmemory():
319 319 raise error.InMemoryMergeConflictsError(
320 320 b'in-memory merge does not support file conflicts'
321 321 )
322 322
323 323 prompts = partextras(labels)
324 324 prompts[b'fd'] = uipathfn(fd)
325 325 try:
326 326 if fco.isabsent():
327 327 index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
328 328 choice = [b'local', b'other', b'unresolved'][index]
329 329 elif fcd.isabsent():
330 330 index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
331 331 choice = [b'other', b'local', b'unresolved'][index]
332 332 else:
333 333 # IMPORTANT: keep the last line of this prompt ("What do you want to
334 334 # do?") very short, see comment next to _localchangedotherdeletedmsg
335 335 # at the top of the file for details.
336 336 index = ui.promptchoice(
337 337 _(
338 338 b"file '%(fd)s' needs to be resolved.\n"
339 339 b"You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
340 340 b"(u)nresolved.\n"
341 341 b"What do you want to do?"
342 342 b"$$ &Local $$ &Other $$ &Unresolved"
343 343 )
344 344 % prompts,
345 345 2,
346 346 )
347 347 choice = [b'local', b'other', b'unresolved'][index]
348 348
349 349 if choice == b'other':
350 350 return _iother(repo, mynode, fcd, fco, fca, toolconf, labels)
351 351 elif choice == b'local':
352 352 return _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels)
353 353 elif choice == b'unresolved':
354 354 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
355 355 except error.ResponseExpected:
356 356 ui.write(b"\n")
357 357 return _ifail(repo, mynode, fcd, fco, fca, toolconf, labels)
358 358
359 359
360 360 @internaltool(b'local', nomerge)
361 361 def _ilocal(repo, mynode, fcd, fco, fca, toolconf, labels=None):
362 362 """Uses the local `p1()` version of files as the merged version."""
363 363 return 0, fcd.isabsent()
364 364
365 365
366 366 @internaltool(b'other', nomerge)
367 367 def _iother(repo, mynode, fcd, fco, fca, toolconf, labels=None):
368 368 """Uses the other `p2()` version of files as the merged version."""
369 369 if fco.isabsent():
370 370 # local changed, remote deleted -- 'deleted' picked
371 371 _underlyingfctxifabsent(fcd).remove()
372 372 deleted = True
373 373 else:
374 374 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
375 375 deleted = False
376 376 return 0, deleted
377 377
378 378
379 379 @internaltool(b'fail', nomerge)
380 380 def _ifail(repo, mynode, fcd, fco, fca, toolconf, labels=None):
381 381 """
382 382 Rather than attempting to merge files that were modified on both
383 383 branches, it marks them as unresolved. The resolve command must be
384 384 used to resolve these conflicts."""
385 385 # for change/delete conflicts write out the changed version, then fail
386 386 if fcd.isabsent():
387 387 _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
388 388 return 1, False
389 389
390 390
391 391 def _underlyingfctxifabsent(filectx):
392 392 """Sometimes when resolving, our fcd is actually an absentfilectx, but
393 393 we want to write to it (to do the resolve). This helper returns the
394 394 underyling workingfilectx in that case.
395 395 """
396 396 if filectx.isabsent():
397 397 return filectx.changectx()[filectx.path()]
398 398 else:
399 399 return filectx
400 400
401 401
402 402 def _premerge(repo, fcd, fco, fca, toolconf, backup, labels=None):
403 403 tool, toolpath, binary, symlink, scriptfn = toolconf
404 404 if symlink or fcd.isabsent() or fco.isabsent():
405 405 return 1
406 406
407 407 ui = repo.ui
408 408
409 409 validkeep = [b'keep', b'keep-merge3', b'keep-mergediff']
410 410
411 411 # do we attempt to simplemerge first?
412 412 try:
413 413 premerge = _toolbool(ui, tool, b"premerge", not binary)
414 414 except error.ConfigError:
415 415 premerge = _toolstr(ui, tool, b"premerge", b"").lower()
416 416 if premerge not in validkeep:
417 417 _valid = b', '.join([b"'" + v + b"'" for v in validkeep])
418 418 raise error.ConfigError(
419 419 _(b"%s.premerge not valid ('%s' is neither boolean nor %s)")
420 420 % (tool, premerge, _valid)
421 421 )
422 422
423 423 if premerge:
424 424 mode = b'merge'
425 425 if premerge in {b'keep-merge3', b'keep-mergediff'}:
426 426 if not labels:
427 427 labels = _defaultconflictlabels
428 428 if len(labels) < 3:
429 429 labels.append(b'base')
430 430 if premerge == b'keep-mergediff':
431 431 mode = b'mergediff'
432 432 r = simplemerge.simplemerge(
433 433 ui, fcd, fca, fco, quiet=True, label=labels, mode=mode
434 434 )
435 435 if not r:
436 436 ui.debug(b" premerge successful\n")
437 437 return 0
438 438 if premerge not in validkeep:
439 439 # restore from backup and try again
440 440 _restorebackup(fcd, backup)
441 441 return 1 # continue merging
442 442
443 443
444 444 def _mergecheck(repo, mynode, fcd, fco, fca, toolconf):
445 445 tool, toolpath, binary, symlink, scriptfn = toolconf
446 446 uipathfn = scmutil.getuipathfn(repo)
447 447 if symlink:
448 448 repo.ui.warn(
449 449 _(b'warning: internal %s cannot merge symlinks for %s\n')
450 450 % (tool, uipathfn(fcd.path()))
451 451 )
452 452 return False
453 453 if fcd.isabsent() or fco.isabsent():
454 454 repo.ui.warn(
455 455 _(
456 456 b'warning: internal %s cannot merge change/delete '
457 457 b'conflict for %s\n'
458 458 )
459 459 % (tool, uipathfn(fcd.path()))
460 460 )
461 461 return False
462 462 return True
463 463
464 464
465 465 def _merge(repo, mynode, fcd, fco, fca, toolconf, backup, labels, mode):
466 466 """
467 467 Uses the internal non-interactive simple merge algorithm for merging
468 468 files. It will fail if there are any conflicts and leave markers in
469 469 the partially merged file. Markers will have two sections, one for each side
470 470 of merge, unless mode equals 'union' which suppresses the markers."""
471 471 ui = repo.ui
472 472
473 473 r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
474 474 return True, r, False
475 475
476 476
477 477 @internaltool(
478 478 b'union',
479 479 fullmerge,
480 480 _(
481 481 b"warning: conflicts while merging %s! "
482 482 b"(edit, then use 'hg resolve --mark')\n"
483 483 ),
484 484 precheck=_mergecheck,
485 485 )
486 486 def _iunion(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
487 487 """
488 488 Uses the internal non-interactive simple merge algorithm for merging
489 489 files. It will use both left and right sides for conflict regions.
490 490 No markers are inserted."""
491 491 return _merge(
492 492 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'union'
493 493 )
494 494
495 495
496 496 @internaltool(
497 497 b'merge',
498 498 fullmerge,
499 499 _(
500 500 b"warning: conflicts while merging %s! "
501 501 b"(edit, then use 'hg resolve --mark')\n"
502 502 ),
503 503 precheck=_mergecheck,
504 504 )
505 505 def _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
506 506 """
507 507 Uses the internal non-interactive simple merge algorithm for merging
508 508 files. It will fail if there are any conflicts and leave markers in
509 509 the partially merged file. Markers will have two sections, one for each side
510 510 of merge."""
511 511 return _merge(
512 512 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'merge'
513 513 )
514 514
515 515
516 516 @internaltool(
517 517 b'merge3',
518 518 fullmerge,
519 519 _(
520 520 b"warning: conflicts while merging %s! "
521 521 b"(edit, then use 'hg resolve --mark')\n"
522 522 ),
523 523 precheck=_mergecheck,
524 524 )
525 525 def _imerge3(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
526 526 """
527 527 Uses the internal non-interactive simple merge algorithm for merging
528 528 files. It will fail if there are any conflicts and leave markers in
529 529 the partially merged file. Marker will have three sections, one from each
530 530 side of the merge and one for the base content."""
531 531 if not labels:
532 532 labels = _defaultconflictlabels
533 533 if len(labels) < 3:
534 534 labels.append(b'base')
535 535 return _imerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels)
536 536
537 537
538 538 @internaltool(
539 539 b'merge3-lie-about-conflicts',
540 540 fullmerge,
541 541 b'',
542 542 precheck=_mergecheck,
543 543 )
544 544 def _imerge3alwaysgood(*args, **kwargs):
545 545 # Like merge3, but record conflicts as resolved with markers in place.
546 546 #
547 547 # This is used for `diff.merge` to show the differences between
548 548 # the auto-merge state and the committed merge state. It may be
549 549 # useful for other things.
550 550 b1, junk, b2 = _imerge3(*args, **kwargs)
551 551 # TODO is this right? I'm not sure what these return values mean,
552 552 # but as far as I can tell this will indicate to callers tha the
553 553 # merge succeeded.
554 554 return b1, False, b2
555 555
556 556
557 557 @internaltool(
558 558 b'mergediff',
559 559 fullmerge,
560 560 _(
561 561 b"warning: conflicts while merging %s! "
562 562 b"(edit, then use 'hg resolve --mark')\n"
563 563 ),
564 564 precheck=_mergecheck,
565 565 )
566 566 def _imerge_diff(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
567 567 """
568 568 Uses the internal non-interactive simple merge algorithm for merging
569 569 files. It will fail if there are any conflicts and leave markers in
570 570 the partially merged file. The marker will have two sections, one with the
571 571 content from one side of the merge, and one with a diff from the base
572 572 content to the content on the other side. (experimental)"""
573 573 if not labels:
574 574 labels = _defaultconflictlabels
575 575 if len(labels) < 3:
576 576 labels.append(b'base')
577 577 return _merge(
578 578 repo, mynode, fcd, fco, fca, toolconf, backup, labels, b'mergediff'
579 579 )
580 580
581 581
582 582 def _imergeauto(
583 583 repo,
584 584 mynode,
585 585 fcd,
586 586 fco,
587 587 fca,
588 588 toolconf,
589 589 backup,
590 590 labels=None,
591 591 localorother=None,
592 592 ):
593 593 """
594 594 Generic driver for _imergelocal and _imergeother
595 595 """
596 596 assert localorother is not None
597 597 r = simplemerge.simplemerge(
598 598 repo.ui, fcd, fca, fco, label=labels, localorother=localorother
599 599 )
600 600 return True, r
601 601
602 602
603 603 @internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
604 604 def _imergelocal(*args, **kwargs):
605 605 """
606 606 Like :merge, but resolve all conflicts non-interactively in favor
607 607 of the local `p1()` changes."""
608 608 success, status = _imergeauto(localorother=b'local', *args, **kwargs)
609 609 return success, status, False
610 610
611 611
612 612 @internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
613 613 def _imergeother(*args, **kwargs):
614 614 """
615 615 Like :merge, but resolve all conflicts non-interactively in favor
616 616 of the other `p2()` changes."""
617 617 success, status = _imergeauto(localorother=b'other', *args, **kwargs)
618 618 return success, status, False
619 619
620 620
621 621 @internaltool(
622 622 b'tagmerge',
623 623 mergeonly,
624 624 _(
625 625 b"automatic tag merging of %s failed! "
626 626 b"(use 'hg resolve --tool :merge' or another merge "
627 627 b"tool of your choice)\n"
628 628 ),
629 629 )
630 630 def _itagmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
631 631 """
632 632 Uses the internal tag merge algorithm (experimental).
633 633 """
634 634 success, status = tagmerge.merge(repo, fcd, fco, fca)
635 635 return success, status, False
636 636
637 637
638 638 @internaltool(b'dump', fullmerge, binary=True, symlink=True)
639 639 def _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
640 640 """
641 641 Creates three versions of the files to merge, containing the
642 642 contents of local, other and base. These files can then be used to
643 643 perform a merge manually. If the file to be merged is named
644 644 ``a.txt``, these files will accordingly be named ``a.txt.local``,
645 645 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
646 646 same directory as ``a.txt``.
647 647
648 648 This implies premerge. Therefore, files aren't dumped, if premerge
649 649 runs successfully. Use :forcedump to forcibly write files out.
650 650 """
651 651 a = _workingpath(repo, fcd)
652 652 fd = fcd.path()
653 653
654 654 from . import context
655 655
656 656 if isinstance(fcd, context.overlayworkingfilectx):
657 657 raise error.InMemoryMergeConflictsError(
658 658 b'in-memory merge does not support the :dump tool.'
659 659 )
660 660
661 661 util.writefile(a + b".local", fcd.decodeddata())
662 662 repo.wwrite(fd + b".other", fco.data(), fco.flags())
663 663 repo.wwrite(fd + b".base", fca.data(), fca.flags())
664 664 return False, 1, False
665 665
666 666
667 667 @internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
668 668 def _forcedump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
669 669 """
670 670 Creates three versions of the files as same as :dump, but omits premerge.
671 671 """
672 672 return _idump(repo, mynode, fcd, fco, fca, toolconf, backup, labels=labels)
673 673
674 674
675 675 def _xmergeimm(repo, mynode, fcd, fco, fca, toolconf, backup, labels=None):
676 676 # In-memory merge simply raises an exception on all external merge tools,
677 677 # for now.
678 678 #
679 679 # It would be possible to run most tools with temporary files, but this
680 680 # raises the question of what to do if the user only partially resolves the
681 681 # file -- we can't leave a merge state. (Copy to somewhere in the .hg/
682 682 # directory and tell the user how to get it is my best idea, but it's
683 683 # clunky.)
684 684 raise error.InMemoryMergeConflictsError(
685 685 b'in-memory merge does not support external merge tools'
686 686 )
687 687
688 688
689 689 def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
690 690 tmpl = ui.config(b'command-templates', b'pre-merge-tool-output')
691 691 if not tmpl:
692 692 return
693 693
694 694 mappingdict = templateutil.mappingdict
695 695 props = {
696 696 b'ctx': fcl.changectx(),
697 697 b'node': hex(mynode),
698 698 b'path': fcl.path(),
699 699 b'local': mappingdict(
700 700 {
701 701 b'ctx': fcl.changectx(),
702 702 b'fctx': fcl,
703 703 b'node': hex(mynode),
704 704 b'name': _(b'local'),
705 705 b'islink': b'l' in fcl.flags(),
706 706 b'label': env[b'HG_MY_LABEL'],
707 707 }
708 708 ),
709 709 b'base': mappingdict(
710 710 {
711 711 b'ctx': fcb.changectx(),
712 712 b'fctx': fcb,
713 713 b'name': _(b'base'),
714 714 b'islink': b'l' in fcb.flags(),
715 715 b'label': env[b'HG_BASE_LABEL'],
716 716 }
717 717 ),
718 718 b'other': mappingdict(
719 719 {
720 720 b'ctx': fco.changectx(),
721 721 b'fctx': fco,
722 722 b'name': _(b'other'),
723 723 b'islink': b'l' in fco.flags(),
724 724 b'label': env[b'HG_OTHER_LABEL'],
725 725 }
726 726 ),
727 727 b'toolpath': toolpath,
728 728 b'toolargs': args,
729 729 }
730 730
731 731 # TODO: make all of this something that can be specified on a per-tool basis
732 732 tmpl = templater.unquotestring(tmpl)
733 733
734 734 # Not using cmdutil.rendertemplate here since it causes errors importing
735 735 # things for us to import cmdutil.
736 736 tres = formatter.templateresources(ui, repo)
737 737 t = formatter.maketemplater(
738 738 ui, tmpl, defaults=templatekw.keywords, resources=tres
739 739 )
740 740 ui.status(t.renderdefault(props))
741 741
742 742
743 743 def _xmerge(repo, mynode, fcd, fco, fca, toolconf, backup, labels):
744 744 tool, toolpath, binary, symlink, scriptfn = toolconf
745 745 uipathfn = scmutil.getuipathfn(repo)
746 746 if fcd.isabsent() or fco.isabsent():
747 747 repo.ui.warn(
748 748 _(b'warning: %s cannot merge change/delete conflict for %s\n')
749 749 % (tool, uipathfn(fcd.path()))
750 750 )
751 751 return False, 1, None
752 752 localpath = _workingpath(repo, fcd)
753 753 args = _toolstr(repo.ui, tool, b"args")
754 754
755 755 with _maketempfiles(
756 756 repo, fco, fca, repo.wvfs.join(backup.path()), b"$output" in args
757 757 ) as temppaths:
758 758 basepath, otherpath, localoutputpath = temppaths
759 759 outpath = b""
760 760 mylabel, otherlabel = labels[:2]
761 761 if len(labels) >= 3:
762 762 baselabel = labels[2]
763 763 else:
764 764 baselabel = b'base'
765 765 env = {
766 766 b'HG_FILE': fcd.path(),
767 767 b'HG_MY_NODE': short(mynode),
768 768 b'HG_OTHER_NODE': short(fco.changectx().node()),
769 769 b'HG_BASE_NODE': short(fca.changectx().node()),
770 770 b'HG_MY_ISLINK': b'l' in fcd.flags(),
771 771 b'HG_OTHER_ISLINK': b'l' in fco.flags(),
772 772 b'HG_BASE_ISLINK': b'l' in fca.flags(),
773 773 b'HG_MY_LABEL': mylabel,
774 774 b'HG_OTHER_LABEL': otherlabel,
775 775 b'HG_BASE_LABEL': baselabel,
776 776 }
777 777 ui = repo.ui
778 778
779 779 if b"$output" in args:
780 780 # read input from backup, write to original
781 781 outpath = localpath
782 782 localpath = localoutputpath
783 783 replace = {
784 784 b'local': localpath,
785 785 b'base': basepath,
786 786 b'other': otherpath,
787 787 b'output': outpath,
788 788 b'labellocal': mylabel,
789 789 b'labelother': otherlabel,
790 790 b'labelbase': baselabel,
791 791 }
792 792 args = util.interpolate(
793 793 br'\$',
794 794 replace,
795 795 args,
796 796 lambda s: procutil.shellquote(util.localpath(s)),
797 797 )
798 798 if _toolbool(ui, tool, b"gui"):
799 799 repo.ui.status(
800 800 _(b'running merge tool %s for file %s\n')
801 801 % (tool, uipathfn(fcd.path()))
802 802 )
803 803 if scriptfn is None:
804 804 cmd = toolpath + b' ' + args
805 805 repo.ui.debug(b'launching merge tool: %s\n' % cmd)
806 806 _describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
807 807 r = ui.system(
808 808 cmd, cwd=repo.root, environ=env, blockedtag=b'mergetool'
809 809 )
810 810 else:
811 811 repo.ui.debug(
812 812 b'launching python merge script: %s:%s\n' % (toolpath, scriptfn)
813 813 )
814 814 r = 0
815 815 try:
816 816 # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
817 817 from . import extensions
818 818
819 819 mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
820 820 except Exception:
821 821 raise error.Abort(
822 822 _(b"loading python merge script failed: %s") % toolpath
823 823 )
824 824 mergefn = getattr(mod, scriptfn, None)
825 825 if mergefn is None:
826 826 raise error.Abort(
827 827 _(b"%s does not have function: %s") % (toolpath, scriptfn)
828 828 )
829 829 argslist = procutil.shellsplit(args)
830 830 # avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil
831 831 from . import hook
832 832
833 833 ret, raised = hook.pythonhook(
834 834 ui, repo, b"merge", toolpath, mergefn, {b'args': argslist}, True
835 835 )
836 836 if raised:
837 837 r = 1
838 838 repo.ui.debug(b'merge tool returned: %d\n' % r)
839 839 return True, r, False
840 840
841 841
842 842 def _formatlabel(ctx, template, label, pad):
843 843 """Applies the given template to the ctx, prefixed by the label.
844 844
845 845 Pad is the minimum width of the label prefix, so that multiple markers
846 846 can have aligned templated parts.
847 847 """
848 848 if ctx.node() is None:
849 849 ctx = ctx.p1()
850 850
851 851 props = {b'ctx': ctx}
852 852 templateresult = template.renderdefault(props)
853 853
854 854 label = (b'%s:' % label).ljust(pad + 1)
855 855 mark = b'%s %s' % (label, templateresult)
856 856
857 857 if mark:
858 858 mark = mark.splitlines()[0] # split for safety
859 859
860 860 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
861 861 return stringutil.ellipsis(mark, 80 - 8)
862 862
863 863
864 864 _defaultconflictlabels = [b'local', b'other']
865 865
866 866
867 867 def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
868 868 """Formats the given labels using the conflict marker template.
869 869
870 870 Returns a list of formatted labels.
871 871 """
872 872 cd = fcd.changectx()
873 873 co = fco.changectx()
874 874 ca = fca.changectx()
875 875
876 876 ui = repo.ui
877 877 template = ui.config(b'command-templates', b'mergemarker')
878 878 if tool is not None:
879 879 template = _toolstr(ui, tool, b'mergemarkertemplate', template)
880 880 template = templater.unquotestring(template)
881 881 tres = formatter.templateresources(ui, repo)
882 882 tmpl = formatter.maketemplater(
883 883 ui, template, defaults=templatekw.keywords, resources=tres
884 884 )
885 885
886 886 pad = max(len(l) for l in labels)
887 887
888 888 newlabels = [
889 889 _formatlabel(cd, tmpl, labels[0], pad),
890 890 _formatlabel(co, tmpl, labels[1], pad),
891 891 ]
892 892 if len(labels) > 2:
893 893 newlabels.append(_formatlabel(ca, tmpl, labels[2], pad))
894 894 return newlabels
895 895
896 896
897 897 def partextras(labels):
898 898 """Return a dictionary of extra labels for use in prompts to the user
899 899
900 900 Intended use is in strings of the form "(l)ocal%(l)s".
901 901 """
902 902 if labels is None:
903 903 return {
904 904 b"l": b"",
905 905 b"o": b"",
906 906 }
907 907
908 908 return {
909 909 b"l": b" [%s]" % labels[0],
910 910 b"o": b" [%s]" % labels[1],
911 911 }
912 912
913 913
914 914 def _restorebackup(fcd, backup):
915 915 # TODO: Add a workingfilectx.write(otherfilectx) path so we can use
916 916 # util.copy here instead.
917 917 fcd.write(backup.data(), fcd.flags())
918 918
919 919
920 920 def _makebackup(repo, ui, wctx, fcd):
921 921 """Makes and returns a filectx-like object for ``fcd``'s backup file.
922 922
923 923 In addition to preserving the user's pre-existing modifications to `fcd`
924 924 (if any), the backup is used to undo certain premerges, confirm whether a
925 925 merge changed anything, and determine what line endings the new file should
926 926 have.
927 927
928 928 Backups only need to be written once since their content doesn't change
929 929 afterwards.
930 930 """
931 931 if fcd.isabsent():
932 932 return None
933 933 # TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
934 934 # merge -> filemerge). (I suspect the fileset import is the weakest link)
935 935 from . import context
936 936
937 937 backup = scmutil.backuppath(ui, repo, fcd.path())
938 938 inworkingdir = backup.startswith(repo.wvfs.base) and not backup.startswith(
939 939 repo.vfs.base
940 940 )
941 941 if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
942 942 # If the backup file is to be in the working directory, and we're
943 943 # merging in-memory, we must redirect the backup to the memory context
944 944 # so we don't disturb the working directory.
945 945 relpath = backup[len(repo.wvfs.base) + 1 :]
946 946 wctx[relpath].write(fcd.data(), fcd.flags())
947 947 return wctx[relpath]
948 948 else:
949 949 # Otherwise, write to wherever path the user specified the backups
950 950 # should go. We still need to switch based on whether the source is
951 951 # in-memory so we can use the fast path of ``util.copy`` if both are
952 952 # on disk.
953 953 if isinstance(fcd, context.overlayworkingfilectx):
954 954 util.writefile(backup, fcd.data())
955 955 else:
956 956 a = _workingpath(repo, fcd)
957 957 util.copyfile(a, backup)
958 958 # A arbitraryfilectx is returned, so we can run the same functions on
959 959 # the backup context regardless of where it lives.
960 960 return context.arbitraryfilectx(backup, repo=repo)
961 961
962 962
963 963 @contextlib.contextmanager
964 964 def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
965 965 """Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
966 966 copies `localpath` to another temporary file, so an external merge tool may
967 967 use them.
968 968 """
969 969 tmproot = None
970 970 tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
971 971 if tmprootprefix:
972 972 tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
973 973
974 974 def maketempfrompath(prefix, path):
975 975 fullbase, ext = os.path.splitext(path)
976 976 pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
977 977 if tmproot:
978 978 name = os.path.join(tmproot, pre)
979 979 if ext:
980 980 name += ext
981 981 f = open(name, "wb")
982 982 else:
983 983 fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
984 984 f = os.fdopen(fd, "wb")
985 985 return f, name
986 986
987 987 def tempfromcontext(prefix, ctx):
988 988 f, name = maketempfrompath(prefix, ctx.path())
989 989 data = ctx.decodeddata()
990 990 f.write(data)
991 991 f.close()
992 992 return name
993 993
994 994 b = tempfromcontext(b"base", fca)
995 995 c = tempfromcontext(b"other", fco)
996 996 d = localpath
997 997 if uselocalpath:
998 998 # We start off with this being the backup filename, so remove the .orig
999 999 # to make syntax-highlighting more likely.
1000 1000 if d.endswith(b'.orig'):
1001 1001 d, _ = os.path.splitext(d)
1002 1002 f, d = maketempfrompath(b"local", d)
1003 1003 with open(localpath, b'rb') as src:
1004 1004 f.write(src.read())
1005 1005 f.close()
1006 1006
1007 1007 try:
1008 1008 yield b, c, d
1009 1009 finally:
1010 1010 if tmproot:
1011 1011 shutil.rmtree(tmproot)
1012 1012 else:
1013 1013 util.unlink(b)
1014 1014 util.unlink(c)
1015 1015 # if not uselocalpath, d is the 'orig'/backup file which we
1016 1016 # shouldn't delete.
1017 1017 if d and uselocalpath:
1018 1018 util.unlink(d)
1019 1019
1020 1020
1021 1021 def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
1022 1022 """perform a 3-way merge in the working directory
1023 1023
1024 1024 mynode = parent node before merge
1025 1025 orig = original local filename before merge
1026 1026 fco = other file context
1027 1027 fca = ancestor file context
1028 1028 fcd = local file context for current/destination file
1029 1029
1030 1030 Returns whether the merge is complete, the return value of the merge, and
1031 1031 a boolean indicating whether the file was deleted from disk."""
1032 1032
1033 1033 if not fco.cmp(fcd): # files identical?
1034 return True, None, False
1034 return None, False
1035 1035
1036 1036 ui = repo.ui
1037 1037 fd = fcd.path()
1038 1038 uipathfn = scmutil.getuipathfn(repo)
1039 1039 fduipath = uipathfn(fd)
1040 1040 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
1041 1041 symlink = b'l' in fcd.flags() + fco.flags()
1042 1042 changedelete = fcd.isabsent() or fco.isabsent()
1043 1043 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
1044 1044 scriptfn = None
1045 1045 if tool in internals and tool.startswith(b'internal:'):
1046 1046 # normalize to new-style names (':merge' etc)
1047 1047 tool = tool[len(b'internal') :]
1048 1048 if toolpath and toolpath.startswith(b'python:'):
1049 1049 invalidsyntax = False
1050 1050 if toolpath.count(b':') >= 2:
1051 1051 script, scriptfn = toolpath[7:].rsplit(b':', 1)
1052 1052 if not scriptfn:
1053 1053 invalidsyntax = True
1054 1054 # missing :callable can lead to spliting on windows drive letter
1055 1055 if b'\\' in scriptfn or b'/' in scriptfn:
1056 1056 invalidsyntax = True
1057 1057 else:
1058 1058 invalidsyntax = True
1059 1059 if invalidsyntax:
1060 1060 raise error.Abort(_(b"invalid 'python:' syntax: %s") % toolpath)
1061 1061 toolpath = script
1062 1062 ui.debug(
1063 1063 b"picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
1064 1064 % (
1065 1065 tool,
1066 1066 fduipath,
1067 1067 pycompat.bytestr(binary),
1068 1068 pycompat.bytestr(symlink),
1069 1069 pycompat.bytestr(changedelete),
1070 1070 )
1071 1071 )
1072 1072
1073 1073 if tool in internals:
1074 1074 func = internals[tool]
1075 1075 mergetype = func.mergetype
1076 1076 onfailure = func.onfailure
1077 1077 precheck = func.precheck
1078 1078 isexternal = False
1079 1079 else:
1080 1080 if wctx.isinmemory():
1081 1081 func = _xmergeimm
1082 1082 else:
1083 1083 func = _xmerge
1084 1084 mergetype = fullmerge
1085 1085 onfailure = _(b"merging %s failed!\n")
1086 1086 precheck = None
1087 1087 isexternal = True
1088 1088
1089 1089 toolconf = tool, toolpath, binary, symlink, scriptfn
1090 1090
1091 1091 if mergetype == nomerge:
1092 r, deleted = func(repo, mynode, fcd, fco, fca, toolconf, labels)
1093 return True, r, deleted
1092 return func(repo, mynode, fcd, fco, fca, toolconf, labels)
1094 1093
1095 1094 if orig != fco.path():
1096 1095 ui.status(
1097 1096 _(b"merging %s and %s to %s\n")
1098 1097 % (uipathfn(orig), uipathfn(fco.path()), fduipath)
1099 1098 )
1100 1099 else:
1101 1100 ui.status(_(b"merging %s\n") % fduipath)
1102 1101
1103 1102 ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
1104 1103
1105 1104 if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf):
1106 1105 if onfailure:
1107 1106 if wctx.isinmemory():
1108 1107 raise error.InMemoryMergeConflictsError(
1109 1108 b'in-memory merge does not support merge conflicts'
1110 1109 )
1111 1110 ui.warn(onfailure % fduipath)
1112 return True, 1, False
1111 return 1, False
1113 1112
1114 1113 backup = _makebackup(repo, ui, wctx, fcd)
1115 1114 r = 1
1116 1115 try:
1117 1116 internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
1118 1117 if isexternal:
1119 1118 markerstyle = _toolstr(ui, tool, b'mergemarkers')
1120 1119 else:
1121 1120 markerstyle = internalmarkerstyle
1122 1121
1123 1122 if not labels:
1124 1123 labels = _defaultconflictlabels
1125 1124 formattedlabels = labels
1126 1125 if markerstyle != b'basic':
1127 1126 formattedlabels = _formatlabels(
1128 1127 repo, fcd, fco, fca, labels, tool=tool
1129 1128 )
1130 1129
1131 1130 if mergetype == fullmerge:
1132 1131 # conflict markers generated by premerge will use 'detailed'
1133 1132 # settings if either ui.mergemarkers or the tool's mergemarkers
1134 1133 # setting is 'detailed'. This way tools can have basic labels in
1135 1134 # space-constrained areas of the UI, but still get full information
1136 1135 # in conflict markers if premerge is 'keep' or 'keep-merge3'.
1137 1136 premergelabels = labels
1138 1137 labeltool = None
1139 1138 if markerstyle != b'basic':
1140 1139 # respect 'tool's mergemarkertemplate (which defaults to
1141 1140 # command-templates.mergemarker)
1142 1141 labeltool = tool
1143 1142 if internalmarkerstyle != b'basic' or markerstyle != b'basic':
1144 1143 premergelabels = _formatlabels(
1145 1144 repo, fcd, fco, fca, premergelabels, tool=labeltool
1146 1145 )
1147 1146
1148 1147 r = _premerge(
1149 1148 repo, fcd, fco, fca, toolconf, backup, labels=premergelabels
1150 1149 )
1151 1150 # we're done if premerge was successful (r is 0)
1152 1151 if not r:
1153 return not r, r, False
1152 return r, False
1154 1153
1155 1154 needcheck, r, deleted = func(
1156 1155 repo,
1157 1156 mynode,
1158 1157 fcd,
1159 1158 fco,
1160 1159 fca,
1161 1160 toolconf,
1162 1161 backup,
1163 1162 labels=formattedlabels,
1164 1163 )
1165 1164
1166 1165 if needcheck:
1167 1166 r = _check(repo, r, ui, tool, fcd, backup)
1168 1167
1169 1168 if r:
1170 1169 if onfailure:
1171 1170 if wctx.isinmemory():
1172 1171 raise error.InMemoryMergeConflictsError(
1173 1172 b'in-memory merge '
1174 1173 b'does not support '
1175 1174 b'merge conflicts'
1176 1175 )
1177 1176 ui.warn(onfailure % fduipath)
1178 1177 _onfilemergefailure(ui)
1179 1178
1180 return True, r, deleted
1179 return r, deleted
1181 1180 finally:
1182 1181 if not r and backup is not None:
1183 1182 backup.remove()
1184 1183
1185 1184
1186 1185 def _haltmerge():
1187 1186 msg = _(b'merge halted after failed merge (see hg resolve)')
1188 1187 raise error.InterventionRequired(msg)
1189 1188
1190 1189
1191 1190 def _onfilemergefailure(ui):
1192 1191 action = ui.config(b'merge', b'on-failure')
1193 1192 if action == b'prompt':
1194 1193 msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No')
1195 1194 if ui.promptchoice(msg, 0) == 1:
1196 1195 _haltmerge()
1197 1196 if action == b'halt':
1198 1197 _haltmerge()
1199 1198 # default action is 'continue', in which case we neither prompt nor halt
1200 1199
1201 1200
1202 1201 def hasconflictmarkers(data):
1203 1202 # Detect lines starting with a string of 7 identical characters from the
1204 1203 # subset Mercurial uses for conflict markers, followed by either the end of
1205 1204 # line or a space and some text. Note that using [<>=+|-]{7} would detect
1206 1205 # `<><><><><` as a conflict marker, which we don't want.
1207 1206 return bool(
1208 1207 re.search(
1209 1208 br"^([<>=+|-])\1{6}( .*)$",
1210 1209 data,
1211 1210 re.MULTILINE,
1212 1211 )
1213 1212 )
1214 1213
1215 1214
1216 1215 def _check(repo, r, ui, tool, fcd, backup):
1217 1216 fd = fcd.path()
1218 1217 uipathfn = scmutil.getuipathfn(repo)
1219 1218
1220 1219 if not r and (
1221 1220 _toolbool(ui, tool, b"checkconflicts")
1222 1221 or b'conflicts' in _toollist(ui, tool, b"check")
1223 1222 ):
1224 1223 if hasconflictmarkers(fcd.data()):
1225 1224 r = 1
1226 1225
1227 1226 checked = False
1228 1227 if b'prompt' in _toollist(ui, tool, b"check"):
1229 1228 checked = True
1230 1229 if ui.promptchoice(
1231 1230 _(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No")
1232 1231 % uipathfn(fd),
1233 1232 1,
1234 1233 ):
1235 1234 r = 1
1236 1235
1237 1236 if (
1238 1237 not r
1239 1238 and not checked
1240 1239 and (
1241 1240 _toolbool(ui, tool, b"checkchanged")
1242 1241 or b'changed' in _toollist(ui, tool, b"check")
1243 1242 )
1244 1243 ):
1245 1244 if backup is not None and not fcd.cmp(backup):
1246 1245 if ui.promptchoice(
1247 1246 _(
1248 1247 b" output file %s appears unchanged\n"
1249 1248 b"was merge successful (yn)?"
1250 1249 b"$$ &Yes $$ &No"
1251 1250 )
1252 1251 % uipathfn(fd),
1253 1252 1,
1254 1253 ):
1255 1254 r = 1
1256 1255
1257 1256 if backup is not None and _toolbool(ui, tool, b"fixeol"):
1258 1257 _matcheol(_workingpath(repo, fcd), backup)
1259 1258
1260 1259 return r
1261 1260
1262 1261
1263 1262 def _workingpath(repo, ctx):
1264 1263 return repo.wjoin(ctx.path())
1265 1264
1266 1265
1267 1266 def loadinternalmerge(ui, extname, registrarobj):
1268 1267 """Load internal merge tool from specified registrarobj"""
1269 1268 for name, func in pycompat.iteritems(registrarobj._table):
1270 1269 fullname = b':' + name
1271 1270 internals[fullname] = func
1272 1271 internals[b'internal:' + name] = func
1273 1272 internalsdoc[fullname] = func
1274 1273
1275 1274 capabilities = sorted([k for k, v in func.capabilities.items() if v])
1276 1275 if capabilities:
1277 1276 capdesc = b" (actual capabilities: %s)" % b', '.join(
1278 1277 capabilities
1279 1278 )
1280 1279 func.__doc__ = func.__doc__ + pycompat.sysstr(b"\n\n%s" % capdesc)
1281 1280
1282 1281 # to put i18n comments into hg.pot for automatically generated texts
1283 1282
1284 1283 # i18n: "binary" and "symlink" are keywords
1285 1284 # i18n: this text is added automatically
1286 1285 _(b" (actual capabilities: binary, symlink)")
1287 1286 # i18n: "binary" is keyword
1288 1287 # i18n: this text is added automatically
1289 1288 _(b" (actual capabilities: binary)")
1290 1289 # i18n: "symlink" is keyword
1291 1290 # i18n: this text is added automatically
1292 1291 _(b" (actual capabilities: symlink)")
1293 1292
1294 1293
1295 1294 # load built-in merge tools explicitly to setup internalsdoc
1296 1295 loadinternalmerge(None, None, internaltool)
1297 1296
1298 1297 # tell hggettext to extract docstrings from these functions:
1299 1298 i18nfunctions = internals.values()
@@ -1,845 +1,844 b''
1 1 from __future__ import absolute_import
2 2
3 3 import collections
4 4 import errno
5 5 import shutil
6 6 import struct
7 7
8 8 from .i18n import _
9 9 from .node import (
10 10 bin,
11 11 hex,
12 12 nullrev,
13 13 )
14 14 from . import (
15 15 error,
16 16 filemerge,
17 17 pycompat,
18 18 util,
19 19 )
20 20 from .utils import hashutil
21 21
22 22 _pack = struct.pack
23 23 _unpack = struct.unpack
24 24
25 25
26 26 def _droponode(data):
27 27 # used for compatibility for v1
28 28 bits = data.split(b'\0')
29 29 bits = bits[:-2] + bits[-1:]
30 30 return b'\0'.join(bits)
31 31
32 32
33 33 def _filectxorabsent(hexnode, ctx, f):
34 34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 35 return filemerge.absentfilectx(ctx, f)
36 36 else:
37 37 return ctx[f]
38 38
39 39
40 40 # Merge state record types. See ``mergestate`` docs for more.
41 41
42 42 ####
43 43 # merge records which records metadata about a current merge
44 44 # exists only once in a mergestate
45 45 #####
46 46 RECORD_LOCAL = b'L'
47 47 RECORD_OTHER = b'O'
48 48 # record merge labels
49 49 RECORD_LABELS = b'l'
50 50
51 51 #####
52 52 # record extra information about files, with one entry containing info about one
53 53 # file. Hence, multiple of them can exists
54 54 #####
55 55 RECORD_FILE_VALUES = b'f'
56 56
57 57 #####
58 58 # merge records which represents state of individual merges of files/folders
59 59 # These are top level records for each entry containing merge related info.
60 60 # Each record of these has info about one file. Hence multiple of them can
61 61 # exists
62 62 #####
63 63 RECORD_MERGED = b'F'
64 64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 65 # the path was dir on one side of merge and file on another
66 66 RECORD_PATH_CONFLICT = b'P'
67 67
68 68 #####
69 69 # possible state which a merge entry can have. These are stored inside top-level
70 70 # merge records mentioned just above.
71 71 #####
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 # represents that the file was automatically merged in favor
77 77 # of other version. This info is used on commit.
78 78 # This is now deprecated and commit related information is now
79 79 # stored in RECORD_FILE_VALUES
80 80 MERGE_RECORD_MERGED_OTHER = b'o'
81 81
82 82 #####
83 83 # top level record which stores other unknown records. Multiple of these can
84 84 # exists
85 85 #####
86 86 RECORD_OVERRIDE = b't'
87 87
88 88 #####
89 89 # legacy records which are no longer used but kept to prevent breaking BC
90 90 #####
91 91 # This record was release in 5.4 and usage was removed in 5.5
92 92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 93 # This record was release in 3.7 and usage was removed in 5.6
94 94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 95 # This record was release in 3.7 and usage was removed in 5.6
96 96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 97 # This record was release in 3.7 and usage was removed in 5.6
98 98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99 99
100 100
101 101 ACTION_FORGET = b'f'
102 102 ACTION_REMOVE = b'r'
103 103 ACTION_ADD = b'a'
104 104 ACTION_GET = b'g'
105 105 ACTION_PATH_CONFLICT = b'p'
106 106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 107 ACTION_ADD_MODIFIED = b'am'
108 108 ACTION_CREATED = b'c'
109 109 ACTION_DELETED_CHANGED = b'dc'
110 110 ACTION_CHANGED_DELETED = b'cd'
111 111 ACTION_MERGE = b'm'
112 112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 114 ACTION_KEEP = b'k'
115 115 # the file was absent on local side before merge and we should
116 116 # keep it absent (absent means file not present, it can be a result
117 117 # of file deletion, rename etc.)
118 118 ACTION_KEEP_ABSENT = b'ka'
119 119 # the file is absent on the ancestor and remote side of the merge
120 120 # hence this file is new and we should keep it
121 121 ACTION_KEEP_NEW = b'kn'
122 122 ACTION_EXEC = b'e'
123 123 ACTION_CREATED_MERGE = b'cm'
124 124
125 125 # actions which are no op
126 126 NO_OP_ACTIONS = (
127 127 ACTION_KEEP,
128 128 ACTION_KEEP_ABSENT,
129 129 ACTION_KEEP_NEW,
130 130 )
131 131
132 132
133 133 class _mergestate_base(object):
134 134 """track 3-way merge state of individual files
135 135
136 136 The merge state is stored on disk when needed. Two files are used: one with
137 137 an old format (version 1), and one with a new format (version 2). Version 2
138 138 stores a superset of the data in version 1, including new kinds of records
139 139 in the future. For more about the new format, see the documentation for
140 140 `_readrecordsv2`.
141 141
142 142 Each record can contain arbitrary content, and has an associated type. This
143 143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 144 versions of Mercurial that don't support it should abort. If `type` is
145 145 lowercase, the record can be safely ignored.
146 146
147 147 Currently known records:
148 148
149 149 L: the node of the "local" part of the merge (hexified version)
150 150 O: the node of the "other" part of the merge (hexified version)
151 151 F: a file to be merged entry
152 152 C: a change/delete or delete/change conflict
153 153 P: a path conflict (file vs directory)
154 154 f: a (filename, dictionary) tuple of optional values for a given file
155 155 l: the labels for the parts of the merge.
156 156
157 157 Merge record states (stored in self._state, indexed by filename):
158 158 u: unresolved conflict
159 159 r: resolved conflict
160 160 pu: unresolved path conflict (file conflicts with directory)
161 161 pr: resolved path conflict
162 162 o: file was merged in favor of other parent of merge (DEPRECATED)
163 163
164 164 The resolve command transitions between 'u' and 'r' for conflicts and
165 165 'pu' and 'pr' for path conflicts.
166 166 """
167 167
168 168 def __init__(self, repo):
169 169 """Initialize the merge state.
170 170
171 171 Do not use this directly! Instead call read() or clean()."""
172 172 self._repo = repo
173 173 self._state = {}
174 174 self._stateextras = collections.defaultdict(dict)
175 175 self._local = None
176 176 self._other = None
177 177 self._labels = None
178 178 # contains a mapping of form:
179 179 # {filename : (merge_return_value, action_to_be_performed}
180 180 # these are results of re-running merge process
181 181 # this dict is used to perform actions on dirstate caused by re-running
182 182 # the merge
183 183 self._results = {}
184 184 self._dirty = False
185 185
186 186 def reset(self):
187 187 pass
188 188
189 189 def start(self, node, other, labels=None):
190 190 self._local = node
191 191 self._other = other
192 192 self._labels = labels
193 193
194 194 @util.propertycache
195 195 def local(self):
196 196 if self._local is None:
197 197 msg = b"local accessed but self._local isn't set"
198 198 raise error.ProgrammingError(msg)
199 199 return self._local
200 200
201 201 @util.propertycache
202 202 def localctx(self):
203 203 return self._repo[self.local]
204 204
205 205 @util.propertycache
206 206 def other(self):
207 207 if self._other is None:
208 208 msg = b"other accessed but self._other isn't set"
209 209 raise error.ProgrammingError(msg)
210 210 return self._other
211 211
212 212 @util.propertycache
213 213 def otherctx(self):
214 214 return self._repo[self.other]
215 215
216 216 def active(self):
217 217 """Whether mergestate is active.
218 218
219 219 Returns True if there appears to be mergestate. This is a rough proxy
220 220 for "is a merge in progress."
221 221 """
222 222 return bool(self._local) or bool(self._state)
223 223
224 224 def commit(self):
225 225 """Write current state on disk (if necessary)"""
226 226
227 227 @staticmethod
228 228 def getlocalkey(path):
229 229 """hash the path of a local file context for storage in the .hg/merge
230 230 directory."""
231 231
232 232 return hex(hashutil.sha1(path).digest())
233 233
234 234 def _make_backup(self, fctx, localkey):
235 235 raise NotImplementedError()
236 236
237 237 def _restore_backup(self, fctx, localkey, flags):
238 238 raise NotImplementedError()
239 239
240 240 def add(self, fcl, fco, fca, fd):
241 241 """add a new (potentially?) conflicting file the merge state
242 242 fcl: file context for local,
243 243 fco: file context for remote,
244 244 fca: file context for ancestors,
245 245 fd: file path of the resulting merge.
246 246
247 247 note: also write the local version to the `.hg/merge` directory.
248 248 """
249 249 if fcl.isabsent():
250 250 localkey = self._repo.nodeconstants.nullhex
251 251 else:
252 252 localkey = mergestate.getlocalkey(fcl.path())
253 253 self._make_backup(fcl, localkey)
254 254 self._state[fd] = [
255 255 MERGE_RECORD_UNRESOLVED,
256 256 localkey,
257 257 fcl.path(),
258 258 fca.path(),
259 259 hex(fca.filenode()),
260 260 fco.path(),
261 261 hex(fco.filenode()),
262 262 fcl.flags(),
263 263 ]
264 264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 265 self._dirty = True
266 266
267 267 def addpathconflict(self, path, frename, forigin):
268 268 """add a new conflicting path to the merge state
269 269 path: the path that conflicts
270 270 frename: the filename the conflicting file was renamed to
271 271 forigin: origin of the file ('l' or 'r' for local/remote)
272 272 """
273 273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 274 self._dirty = True
275 275
276 276 def addcommitinfo(self, path, data):
277 277 """stores information which is required at commit
278 278 into _stateextras"""
279 279 self._stateextras[path].update(data)
280 280 self._dirty = True
281 281
282 282 def __contains__(self, dfile):
283 283 return dfile in self._state
284 284
285 285 def __getitem__(self, dfile):
286 286 return self._state[dfile][0]
287 287
288 288 def __iter__(self):
289 289 return iter(sorted(self._state))
290 290
291 291 def files(self):
292 292 return self._state.keys()
293 293
294 294 def mark(self, dfile, state):
295 295 self._state[dfile][0] = state
296 296 self._dirty = True
297 297
298 298 def unresolved(self):
299 299 """Obtain the paths of unresolved files."""
300 300
301 301 for f, entry in pycompat.iteritems(self._state):
302 302 if entry[0] in (
303 303 MERGE_RECORD_UNRESOLVED,
304 304 MERGE_RECORD_UNRESOLVED_PATH,
305 305 ):
306 306 yield f
307 307
308 308 def allextras(self):
309 309 """return all extras information stored with the mergestate"""
310 310 return self._stateextras
311 311
312 312 def extras(self, filename):
313 313 """return extras stored with the mergestate for the given filename"""
314 314 return self._stateextras[filename]
315 315
316 316 def resolve(self, dfile, wctx):
317 317 """run merge process for dfile
318 318
319 319 Returns the exit code of the merge."""
320 320 if self[dfile] in (
321 321 MERGE_RECORD_RESOLVED,
322 322 LEGACY_RECORD_DRIVER_RESOLVED,
323 323 ):
324 324 return 0
325 325 stateentry = self._state[dfile]
326 326 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
327 327 octx = self._repo[self._other]
328 328 extras = self.extras(dfile)
329 329 anccommitnode = extras.get(b'ancestorlinknode')
330 330 if anccommitnode:
331 331 actx = self._repo[anccommitnode]
332 332 else:
333 333 actx = None
334 334 fcd = _filectxorabsent(localkey, wctx, dfile)
335 335 fco = _filectxorabsent(onode, octx, ofile)
336 336 # TODO: move this to filectxorabsent
337 337 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
338 338 # "premerge" x flags
339 339 flo = fco.flags()
340 340 fla = fca.flags()
341 341 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
342 342 if fca.rev() == nullrev and flags != flo:
343 343 self._repo.ui.warn(
344 344 _(
345 345 b'warning: cannot merge flags for %s '
346 346 b'without common ancestor - keeping local flags\n'
347 347 )
348 348 % afile
349 349 )
350 350 elif flags == fla:
351 351 flags = flo
352 352 # restore local
353 353 if localkey != self._repo.nodeconstants.nullhex:
354 354 self._restore_backup(wctx[dfile], localkey, flags)
355 355 else:
356 356 wctx[dfile].remove(ignoremissing=True)
357 complete, merge_ret, deleted = filemerge.filemerge(
357 merge_ret, deleted = filemerge.filemerge(
358 358 self._repo,
359 359 wctx,
360 360 self._local,
361 361 lfile,
362 362 fcd,
363 363 fco,
364 364 fca,
365 365 labels=self._labels,
366 366 )
367 367 if merge_ret is None:
368 368 # If return value of merge is None, then there are no real conflict
369 369 del self._state[dfile]
370 370 self._dirty = True
371 371 elif not merge_ret:
372 372 self.mark(dfile, MERGE_RECORD_RESOLVED)
373 373
374 if complete:
375 action = None
376 if deleted:
377 if fcd.isabsent():
378 # dc: local picked. Need to drop if present, which may
379 # happen on re-resolves.
380 action = ACTION_FORGET
374 action = None
375 if deleted:
376 if fcd.isabsent():
377 # dc: local picked. Need to drop if present, which may
378 # happen on re-resolves.
379 action = ACTION_FORGET
380 else:
381 # cd: remote picked (or otherwise deleted)
382 action = ACTION_REMOVE
383 else:
384 if fcd.isabsent(): # dc: remote picked
385 action = ACTION_GET
386 elif fco.isabsent(): # cd: local picked
387 if dfile in self.localctx:
388 action = ACTION_ADD_MODIFIED
381 389 else:
382 # cd: remote picked (or otherwise deleted)
383 action = ACTION_REMOVE
384 else:
385 if fcd.isabsent(): # dc: remote picked
386 action = ACTION_GET
387 elif fco.isabsent(): # cd: local picked
388 if dfile in self.localctx:
389 action = ACTION_ADD_MODIFIED
390 else:
391 action = ACTION_ADD
392 # else: regular merges (no action necessary)
393 self._results[dfile] = merge_ret, action
390 action = ACTION_ADD
391 # else: regular merges (no action necessary)
392 self._results[dfile] = merge_ret, action
394 393
395 394 return merge_ret
396 395
397 396 def counts(self):
398 397 """return counts for updated, merged and removed files in this
399 398 session"""
400 399 updated, merged, removed = 0, 0, 0
401 400 for r, action in pycompat.itervalues(self._results):
402 401 if r is None:
403 402 updated += 1
404 403 elif r == 0:
405 404 if action == ACTION_REMOVE:
406 405 removed += 1
407 406 else:
408 407 merged += 1
409 408 return updated, merged, removed
410 409
411 410 def unresolvedcount(self):
412 411 """get unresolved count for this merge (persistent)"""
413 412 return len(list(self.unresolved()))
414 413
415 414 def actions(self):
416 415 """return lists of actions to perform on the dirstate"""
417 416 actions = {
418 417 ACTION_REMOVE: [],
419 418 ACTION_FORGET: [],
420 419 ACTION_ADD: [],
421 420 ACTION_ADD_MODIFIED: [],
422 421 ACTION_GET: [],
423 422 }
424 423 for f, (r, action) in pycompat.iteritems(self._results):
425 424 if action is not None:
426 425 actions[action].append((f, None, b"merge result"))
427 426 return actions
428 427
429 428
430 429 class mergestate(_mergestate_base):
431 430
432 431 statepathv1 = b'merge/state'
433 432 statepathv2 = b'merge/state2'
434 433
435 434 @staticmethod
436 435 def clean(repo):
437 436 """Initialize a brand new merge state, removing any existing state on
438 437 disk."""
439 438 ms = mergestate(repo)
440 439 ms.reset()
441 440 return ms
442 441
443 442 @staticmethod
444 443 def read(repo):
445 444 """Initialize the merge state, reading it from disk."""
446 445 ms = mergestate(repo)
447 446 ms._read()
448 447 return ms
449 448
450 449 def _read(self):
451 450 """Analyse each record content to restore a serialized state from disk
452 451
453 452 This function process "record" entry produced by the de-serialization
454 453 of on disk file.
455 454 """
456 455 unsupported = set()
457 456 records = self._readrecords()
458 457 for rtype, record in records:
459 458 if rtype == RECORD_LOCAL:
460 459 self._local = bin(record)
461 460 elif rtype == RECORD_OTHER:
462 461 self._other = bin(record)
463 462 elif rtype == LEGACY_MERGE_DRIVER_STATE:
464 463 pass
465 464 elif rtype in (
466 465 RECORD_MERGED,
467 466 RECORD_CHANGEDELETE_CONFLICT,
468 467 RECORD_PATH_CONFLICT,
469 468 LEGACY_MERGE_DRIVER_MERGE,
470 469 LEGACY_RECORD_RESOLVED_OTHER,
471 470 ):
472 471 bits = record.split(b'\0')
473 472 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
474 473 # and we now store related information in _stateextras, so
475 474 # lets write to _stateextras directly
476 475 if bits[1] == MERGE_RECORD_MERGED_OTHER:
477 476 self._stateextras[bits[0]][b'filenode-source'] = b'other'
478 477 else:
479 478 self._state[bits[0]] = bits[1:]
480 479 elif rtype == RECORD_FILE_VALUES:
481 480 filename, rawextras = record.split(b'\0', 1)
482 481 extraparts = rawextras.split(b'\0')
483 482 extras = {}
484 483 i = 0
485 484 while i < len(extraparts):
486 485 extras[extraparts[i]] = extraparts[i + 1]
487 486 i += 2
488 487
489 488 self._stateextras[filename] = extras
490 489 elif rtype == RECORD_LABELS:
491 490 labels = record.split(b'\0', 2)
492 491 self._labels = [l for l in labels if len(l) > 0]
493 492 elif not rtype.islower():
494 493 unsupported.add(rtype)
495 494
496 495 if unsupported:
497 496 raise error.UnsupportedMergeRecords(unsupported)
498 497
499 498 def _readrecords(self):
500 499 """Read merge state from disk and return a list of record (TYPE, data)
501 500
502 501 We read data from both v1 and v2 files and decide which one to use.
503 502
504 503 V1 has been used by version prior to 2.9.1 and contains less data than
505 504 v2. We read both versions and check if no data in v2 contradicts
506 505 v1. If there is not contradiction we can safely assume that both v1
507 506 and v2 were written at the same time and use the extract data in v2. If
508 507 there is contradiction we ignore v2 content as we assume an old version
509 508 of Mercurial has overwritten the mergestate file and left an old v2
510 509 file around.
511 510
512 511 returns list of record [(TYPE, data), ...]"""
513 512 v1records = self._readrecordsv1()
514 513 v2records = self._readrecordsv2()
515 514 if self._v1v2match(v1records, v2records):
516 515 return v2records
517 516 else:
518 517 # v1 file is newer than v2 file, use it
519 518 # we have to infer the "other" changeset of the merge
520 519 # we cannot do better than that with v1 of the format
521 520 mctx = self._repo[None].parents()[-1]
522 521 v1records.append((RECORD_OTHER, mctx.hex()))
523 522 # add place holder "other" file node information
524 523 # nobody is using it yet so we do no need to fetch the data
525 524 # if mctx was wrong `mctx[bits[-2]]` may fails.
526 525 for idx, r in enumerate(v1records):
527 526 if r[0] == RECORD_MERGED:
528 527 bits = r[1].split(b'\0')
529 528 bits.insert(-2, b'')
530 529 v1records[idx] = (r[0], b'\0'.join(bits))
531 530 return v1records
532 531
533 532 def _v1v2match(self, v1records, v2records):
534 533 oldv2 = set() # old format version of v2 record
535 534 for rec in v2records:
536 535 if rec[0] == RECORD_LOCAL:
537 536 oldv2.add(rec)
538 537 elif rec[0] == RECORD_MERGED:
539 538 # drop the onode data (not contained in v1)
540 539 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
541 540 for rec in v1records:
542 541 if rec not in oldv2:
543 542 return False
544 543 else:
545 544 return True
546 545
547 546 def _readrecordsv1(self):
548 547 """read on disk merge state for version 1 file
549 548
550 549 returns list of record [(TYPE, data), ...]
551 550
552 551 Note: the "F" data from this file are one entry short
553 552 (no "other file node" entry)
554 553 """
555 554 records = []
556 555 try:
557 556 f = self._repo.vfs(self.statepathv1)
558 557 for i, l in enumerate(f):
559 558 if i == 0:
560 559 records.append((RECORD_LOCAL, l[:-1]))
561 560 else:
562 561 records.append((RECORD_MERGED, l[:-1]))
563 562 f.close()
564 563 except IOError as err:
565 564 if err.errno != errno.ENOENT:
566 565 raise
567 566 return records
568 567
569 568 def _readrecordsv2(self):
570 569 """read on disk merge state for version 2 file
571 570
572 571 This format is a list of arbitrary records of the form:
573 572
574 573 [type][length][content]
575 574
576 575 `type` is a single character, `length` is a 4 byte integer, and
577 576 `content` is an arbitrary byte sequence of length `length`.
578 577
579 578 Mercurial versions prior to 3.7 have a bug where if there are
580 579 unsupported mandatory merge records, attempting to clear out the merge
581 580 state with hg update --clean or similar aborts. The 't' record type
582 581 works around that by writing out what those versions treat as an
583 582 advisory record, but later versions interpret as special: the first
584 583 character is the 'real' record type and everything onwards is the data.
585 584
586 585 Returns list of records [(TYPE, data), ...]."""
587 586 records = []
588 587 try:
589 588 f = self._repo.vfs(self.statepathv2)
590 589 data = f.read()
591 590 off = 0
592 591 end = len(data)
593 592 while off < end:
594 593 rtype = data[off : off + 1]
595 594 off += 1
596 595 length = _unpack(b'>I', data[off : (off + 4)])[0]
597 596 off += 4
598 597 record = data[off : (off + length)]
599 598 off += length
600 599 if rtype == RECORD_OVERRIDE:
601 600 rtype, record = record[0:1], record[1:]
602 601 records.append((rtype, record))
603 602 f.close()
604 603 except IOError as err:
605 604 if err.errno != errno.ENOENT:
606 605 raise
607 606 return records
608 607
609 608 def commit(self):
610 609 if self._dirty:
611 610 records = self._makerecords()
612 611 self._writerecords(records)
613 612 self._dirty = False
614 613
615 614 def _makerecords(self):
616 615 records = []
617 616 records.append((RECORD_LOCAL, hex(self._local)))
618 617 records.append((RECORD_OTHER, hex(self._other)))
619 618 # Write out state items. In all cases, the value of the state map entry
620 619 # is written as the contents of the record. The record type depends on
621 620 # the type of state that is stored, and capital-letter records are used
622 621 # to prevent older versions of Mercurial that do not support the feature
623 622 # from loading them.
624 623 for filename, v in pycompat.iteritems(self._state):
625 624 if v[0] in (
626 625 MERGE_RECORD_UNRESOLVED_PATH,
627 626 MERGE_RECORD_RESOLVED_PATH,
628 627 ):
629 628 # Path conflicts. These are stored in 'P' records. The current
630 629 # resolution state ('pu' or 'pr') is stored within the record.
631 630 records.append(
632 631 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
633 632 )
634 633 elif (
635 634 v[1] == self._repo.nodeconstants.nullhex
636 635 or v[6] == self._repo.nodeconstants.nullhex
637 636 ):
638 637 # Change/Delete or Delete/Change conflicts. These are stored in
639 638 # 'C' records. v[1] is the local file, and is nullhex when the
640 639 # file is deleted locally ('dc'). v[6] is the remote file, and
641 640 # is nullhex when the file is deleted remotely ('cd').
642 641 records.append(
643 642 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
644 643 )
645 644 else:
646 645 # Normal files. These are stored in 'F' records.
647 646 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
648 647 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
649 648 rawextras = b'\0'.join(
650 649 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
651 650 )
652 651 records.append(
653 652 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
654 653 )
655 654 if self._labels is not None:
656 655 labels = b'\0'.join(self._labels)
657 656 records.append((RECORD_LABELS, labels))
658 657 return records
659 658
660 659 def _writerecords(self, records):
661 660 """Write current state on disk (both v1 and v2)"""
662 661 self._writerecordsv1(records)
663 662 self._writerecordsv2(records)
664 663
665 664 def _writerecordsv1(self, records):
666 665 """Write current state on disk in a version 1 file"""
667 666 f = self._repo.vfs(self.statepathv1, b'wb')
668 667 irecords = iter(records)
669 668 lrecords = next(irecords)
670 669 assert lrecords[0] == RECORD_LOCAL
671 670 f.write(hex(self._local) + b'\n')
672 671 for rtype, data in irecords:
673 672 if rtype == RECORD_MERGED:
674 673 f.write(b'%s\n' % _droponode(data))
675 674 f.close()
676 675
677 676 def _writerecordsv2(self, records):
678 677 """Write current state on disk in a version 2 file
679 678
680 679 See the docstring for _readrecordsv2 for why we use 't'."""
681 680 # these are the records that all version 2 clients can read
682 681 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
683 682 f = self._repo.vfs(self.statepathv2, b'wb')
684 683 for key, data in records:
685 684 assert len(key) == 1
686 685 if key not in allowlist:
687 686 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
688 687 format = b'>sI%is' % len(data)
689 688 f.write(_pack(format, key, len(data), data))
690 689 f.close()
691 690
692 691 def _make_backup(self, fctx, localkey):
693 692 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
694 693
695 694 def _restore_backup(self, fctx, localkey, flags):
696 695 with self._repo.vfs(b'merge/' + localkey) as f:
697 696 fctx.write(f.read(), flags)
698 697
699 698 def reset(self):
700 699 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
701 700
702 701
703 702 class memmergestate(_mergestate_base):
704 703 def __init__(self, repo):
705 704 super(memmergestate, self).__init__(repo)
706 705 self._backups = {}
707 706
708 707 def _make_backup(self, fctx, localkey):
709 708 self._backups[localkey] = fctx.data()
710 709
711 710 def _restore_backup(self, fctx, localkey, flags):
712 711 fctx.write(self._backups[localkey], flags)
713 712
714 713
715 714 def recordupdates(repo, actions, branchmerge, getfiledata):
716 715 """record merge actions to the dirstate"""
717 716 # remove (must come first)
718 717 for f, args, msg in actions.get(ACTION_REMOVE, []):
719 718 if branchmerge:
720 719 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
721 720 else:
722 721 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
723 722
724 723 # forget (must come first)
725 724 for f, args, msg in actions.get(ACTION_FORGET, []):
726 725 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
727 726
728 727 # resolve path conflicts
729 728 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
730 729 (f0, origf0) = args
731 730 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
732 731 repo.dirstate.copy(origf0, f)
733 732 if f0 == origf0:
734 733 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
735 734 else:
736 735 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
737 736
738 737 # re-add
739 738 for f, args, msg in actions.get(ACTION_ADD, []):
740 739 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
741 740
742 741 # re-add/mark as modified
743 742 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
744 743 if branchmerge:
745 744 repo.dirstate.update_file(
746 745 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
747 746 )
748 747 else:
749 748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
750 749
751 750 # exec change
752 751 for f, args, msg in actions.get(ACTION_EXEC, []):
753 752 repo.dirstate.update_file(
754 753 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
755 754 )
756 755
757 756 # keep
758 757 for f, args, msg in actions.get(ACTION_KEEP, []):
759 758 pass
760 759
761 760 # keep deleted
762 761 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
763 762 pass
764 763
765 764 # keep new
766 765 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
767 766 pass
768 767
769 768 # get
770 769 for f, args, msg in actions.get(ACTION_GET, []):
771 770 if branchmerge:
772 771 # tracked in p1 can be True also but update_file should not care
773 772 old_entry = repo.dirstate.get_entry(f)
774 773 p1_tracked = old_entry.any_tracked and not old_entry.added
775 774 repo.dirstate.update_file(
776 775 f,
777 776 p1_tracked=p1_tracked,
778 777 wc_tracked=True,
779 778 p2_info=True,
780 779 )
781 780 else:
782 781 parentfiledata = getfiledata[f] if getfiledata else None
783 782 repo.dirstate.update_file(
784 783 f,
785 784 p1_tracked=True,
786 785 wc_tracked=True,
787 786 parentfiledata=parentfiledata,
788 787 )
789 788
790 789 # merge
791 790 for f, args, msg in actions.get(ACTION_MERGE, []):
792 791 f1, f2, fa, move, anc = args
793 792 if branchmerge:
794 793 # We've done a branch merge, mark this file as merged
795 794 # so that we properly record the merger later
796 795 p1_tracked = f1 == f
797 796 repo.dirstate.update_file(
798 797 f,
799 798 p1_tracked=p1_tracked,
800 799 wc_tracked=True,
801 800 p2_info=True,
802 801 )
803 802 if f1 != f2: # copy/rename
804 803 if move:
805 804 repo.dirstate.update_file(
806 805 f1, p1_tracked=True, wc_tracked=False
807 806 )
808 807 if f1 != f:
809 808 repo.dirstate.copy(f1, f)
810 809 else:
811 810 repo.dirstate.copy(f2, f)
812 811 else:
813 812 # We've update-merged a locally modified file, so
814 813 # we set the dirstate to emulate a normal checkout
815 814 # of that file some time in the past. Thus our
816 815 # merge will appear as a normal local file
817 816 # modification.
818 817 if f2 == f: # file not locally copied/moved
819 818 repo.dirstate.update_file(
820 819 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
821 820 )
822 821 if move:
823 822 repo.dirstate.update_file(
824 823 f1, p1_tracked=False, wc_tracked=False
825 824 )
826 825
827 826 # directory rename, move local
828 827 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
829 828 f0, flag = args
830 829 if branchmerge:
831 830 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
832 831 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
833 832 repo.dirstate.copy(f0, f)
834 833 else:
835 834 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
836 835 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
837 836
838 837 # directory rename, get
839 838 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
840 839 f0, flag = args
841 840 if branchmerge:
842 841 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
843 842 repo.dirstate.copy(f0, f)
844 843 else:
845 844 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now