##// END OF EJS Templates
merge-actions: make merge action a full featured object...
marmoute -
r49560:9bc86adf default
parent child Browse files
Show More
@@ -1,1866 +1,1866 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 url as urlmod,
41 41 util,
42 42 )
43 43
44 44 from mercurial.upgrade_utils import (
45 45 actions as upgrade_actions,
46 46 )
47 47
48 48 from . import (
49 49 lfcommands,
50 50 lfutil,
51 51 storefactory,
52 52 )
53 53
54 54 ACTION_ADD = mergestatemod.ACTION_ADD
55 55 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 56 ACTION_GET = mergestatemod.ACTION_GET
57 57 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 58 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 59
60 60 eh = exthelper.exthelper()
61 61
62 62 lfstatus = lfutil.lfstatus
63 63
64 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
64 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 65
66 66 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 67
68 68
69 69 def composelargefilematcher(match, manifest):
70 70 """create a matcher that matches only the largefiles in the original
71 71 matcher"""
72 72 m = copy.copy(match)
73 73 lfile = lambda f: lfutil.standin(f) in manifest
74 74 m._files = [lf for lf in m._files if lfile(lf)]
75 75 m._fileset = set(m._files)
76 76 m.always = lambda: False
77 77 origmatchfn = m.matchfn
78 78 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 79 return m
80 80
81 81
82 82 def composenormalfilematcher(match, manifest, exclude=None):
83 83 excluded = set()
84 84 if exclude is not None:
85 85 excluded.update(exclude)
86 86
87 87 m = copy.copy(match)
88 88 notlfile = lambda f: not (
89 89 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 90 )
91 91 m._files = [lf for lf in m._files if notlfile(lf)]
92 92 m._fileset = set(m._files)
93 93 m.always = lambda: False
94 94 origmatchfn = m.matchfn
95 95 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 96 return m
97 97
98 98
99 99 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 100 large = opts.get('large')
101 101 lfsize = lfutil.getminsize(
102 102 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 103 )
104 104
105 105 lfmatcher = None
106 106 if lfutil.islfilesrepo(repo):
107 107 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 108 if lfpats:
109 109 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 110
111 111 lfnames = []
112 112 m = matcher
113 113
114 114 wctx = repo[None]
115 115 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 116 exact = m.exact(f)
117 117 lfile = lfutil.standin(f) in wctx
118 118 nfile = f in wctx
119 119 exists = lfile or nfile
120 120
121 121 # Don't warn the user when they attempt to add a normal tracked file.
122 122 # The normal add code will do that for us.
123 123 if exact and exists:
124 124 if lfile:
125 125 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 126 continue
127 127
128 128 if (exact or not exists) and not lfutil.isstandin(f):
129 129 # In case the file was removed previously, but not committed
130 130 # (issue3507)
131 131 if not repo.wvfs.exists(f):
132 132 continue
133 133
134 134 abovemin = (
135 135 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 136 )
137 137 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 138 lfnames.append(f)
139 139 if ui.verbose or not exact:
140 140 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 141
142 142 bad = []
143 143
144 144 # Need to lock, otherwise there could be a race condition between
145 145 # when standins are created and added to the repo.
146 146 with repo.wlock():
147 147 if not opts.get('dry_run'):
148 148 standins = []
149 149 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 150 for f in lfnames:
151 151 standinname = lfutil.standin(f)
152 152 lfutil.writestandin(
153 153 repo,
154 154 standinname,
155 155 hash=b'',
156 156 executable=lfutil.getexecutable(repo.wjoin(f)),
157 157 )
158 158 standins.append(standinname)
159 159 lfdirstate.set_tracked(f)
160 160 lfdirstate.write(repo.currenttransaction())
161 161 bad += [
162 162 lfutil.splitstandin(f)
163 163 for f in repo[None].add(standins)
164 164 if f in m.files()
165 165 ]
166 166
167 167 added = [f for f in lfnames if f not in bad]
168 168 return added, bad
169 169
170 170
171 171 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 172 after = opts.get('after')
173 173 m = composelargefilematcher(matcher, repo[None].manifest())
174 174 with lfstatus(repo):
175 175 s = repo.status(match=m, clean=not isaddremove)
176 176 manifest = repo[None].manifest()
177 177 modified, added, deleted, clean = [
178 178 [f for f in list if lfutil.standin(f) in manifest]
179 179 for list in (s.modified, s.added, s.deleted, s.clean)
180 180 ]
181 181
182 182 def warn(files, msg):
183 183 for f in files:
184 184 ui.warn(msg % uipathfn(f))
185 185 return int(len(files) > 0)
186 186
187 187 if after:
188 188 remove = deleted
189 189 result = warn(
190 190 modified + added + clean, _(b'not removing %s: file still exists\n')
191 191 )
192 192 else:
193 193 remove = deleted + clean
194 194 result = warn(
195 195 modified,
196 196 _(
197 197 b'not removing %s: file is modified (use -f'
198 198 b' to force removal)\n'
199 199 ),
200 200 )
201 201 result = (
202 202 warn(
203 203 added,
204 204 _(
205 205 b'not removing %s: file has been marked for add'
206 206 b' (use forget to undo)\n'
207 207 ),
208 208 )
209 209 or result
210 210 )
211 211
212 212 # Need to lock because standin files are deleted then removed from the
213 213 # repository and we could race in-between.
214 214 with repo.wlock():
215 215 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 216 for f in sorted(remove):
217 217 if ui.verbose or not m.exact(f):
218 218 ui.status(_(b'removing %s\n') % uipathfn(f))
219 219
220 220 if not dryrun:
221 221 if not after:
222 222 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 223
224 224 if dryrun:
225 225 return result
226 226
227 227 remove = [lfutil.standin(f) for f in remove]
228 228 # If this is being called by addremove, let the original addremove
229 229 # function handle this.
230 230 if not isaddremove:
231 231 for f in remove:
232 232 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 233 repo[None].forget(remove)
234 234
235 235 for f in remove:
236 236 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 237
238 238 lfdirstate.write(repo.currenttransaction())
239 239
240 240 return result
241 241
242 242
243 243 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 244 # appear at their right place in the manifests.
245 245 @eh.wrapfunction(webcommands, b'decodepath')
246 246 def decodepath(orig, path):
247 247 return lfutil.splitstandin(path) or path
248 248
249 249
250 250 # -- Wrappers: modify existing commands --------------------------------
251 251
252 252
253 253 @eh.wrapcommand(
254 254 b'add',
255 255 opts=[
256 256 (b'', b'large', None, _(b'add as largefile')),
257 257 (b'', b'normal', None, _(b'add as normal file')),
258 258 (
259 259 b'',
260 260 b'lfsize',
261 261 b'',
262 262 _(
263 263 b'add all files above this size (in megabytes) '
264 264 b'as largefiles (default: 10)'
265 265 ),
266 266 ),
267 267 ],
268 268 )
269 269 def overrideadd(orig, ui, repo, *pats, **opts):
270 270 if opts.get('normal') and opts.get('large'):
271 271 raise error.Abort(_(b'--normal cannot be used with --large'))
272 272 return orig(ui, repo, *pats, **opts)
273 273
274 274
275 275 @eh.wrapfunction(cmdutil, b'add')
276 276 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 277 # The --normal flag short circuits this override
278 278 if opts.get('normal'):
279 279 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 280
281 281 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 282 normalmatcher = composenormalfilematcher(
283 283 matcher, repo[None].manifest(), ladded
284 284 )
285 285 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 286
287 287 bad.extend(f for f in lbad)
288 288 return bad
289 289
290 290
291 291 @eh.wrapfunction(cmdutil, b'remove')
292 292 def cmdutilremove(
293 293 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 294 ):
295 295 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 296 result = orig(
297 297 ui,
298 298 repo,
299 299 normalmatcher,
300 300 prefix,
301 301 uipathfn,
302 302 after,
303 303 force,
304 304 subrepos,
305 305 dryrun,
306 306 )
307 307 return (
308 308 removelargefiles(
309 309 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 310 )
311 311 or result
312 312 )
313 313
314 314
315 315 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
316 316 def overridestatusfn(orig, repo, rev2, **opts):
317 317 with lfstatus(repo._repo):
318 318 return orig(repo, rev2, **opts)
319 319
320 320
321 321 @eh.wrapcommand(b'status')
322 322 def overridestatus(orig, ui, repo, *pats, **opts):
323 323 with lfstatus(repo):
324 324 return orig(ui, repo, *pats, **opts)
325 325
326 326
327 327 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
328 328 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
329 329 with lfstatus(repo._repo):
330 330 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
331 331
332 332
333 333 @eh.wrapcommand(b'log')
334 334 def overridelog(orig, ui, repo, *pats, **opts):
335 335 def overridematchandpats(
336 336 orig,
337 337 ctx,
338 338 pats=(),
339 339 opts=None,
340 340 globbed=False,
341 341 default=b'relpath',
342 342 badfn=None,
343 343 ):
344 344 """Matcher that merges root directory with .hglf, suitable for log.
345 345 It is still possible to match .hglf directly.
346 346 For any listed files run log on the standin too.
347 347 matchfn tries both the given filename and with .hglf stripped.
348 348 """
349 349 if opts is None:
350 350 opts = {}
351 351 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
352 352 m, p = copy.copy(matchandpats)
353 353
354 354 if m.always():
355 355 # We want to match everything anyway, so there's no benefit trying
356 356 # to add standins.
357 357 return matchandpats
358 358
359 359 pats = set(p)
360 360
361 361 def fixpats(pat, tostandin=lfutil.standin):
362 362 if pat.startswith(b'set:'):
363 363 return pat
364 364
365 365 kindpat = matchmod._patsplit(pat, None)
366 366
367 367 if kindpat[0] is not None:
368 368 return kindpat[0] + b':' + tostandin(kindpat[1])
369 369 return tostandin(kindpat[1])
370 370
371 371 cwd = repo.getcwd()
372 372 if cwd:
373 373 hglf = lfutil.shortname
374 374 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
375 375
376 376 def tostandin(f):
377 377 # The file may already be a standin, so truncate the back
378 378 # prefix and test before mangling it. This avoids turning
379 379 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
380 380 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
381 381 return f
382 382
383 383 # An absolute path is from outside the repo, so truncate the
384 384 # path to the root before building the standin. Otherwise cwd
385 385 # is somewhere in the repo, relative to root, and needs to be
386 386 # prepended before building the standin.
387 387 if os.path.isabs(cwd):
388 388 f = f[len(back) :]
389 389 else:
390 390 f = cwd + b'/' + f
391 391 return back + lfutil.standin(f)
392 392
393 393 else:
394 394
395 395 def tostandin(f):
396 396 if lfutil.isstandin(f):
397 397 return f
398 398 return lfutil.standin(f)
399 399
400 400 pats.update(fixpats(f, tostandin) for f in p)
401 401
402 402 for i in range(0, len(m._files)):
403 403 # Don't add '.hglf' to m.files, since that is already covered by '.'
404 404 if m._files[i] == b'.':
405 405 continue
406 406 standin = lfutil.standin(m._files[i])
407 407 # If the "standin" is a directory, append instead of replace to
408 408 # support naming a directory on the command line with only
409 409 # largefiles. The original directory is kept to support normal
410 410 # files.
411 411 if standin in ctx:
412 412 m._files[i] = standin
413 413 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
414 414 m._files.append(standin)
415 415
416 416 m._fileset = set(m._files)
417 417 m.always = lambda: False
418 418 origmatchfn = m.matchfn
419 419
420 420 def lfmatchfn(f):
421 421 lf = lfutil.splitstandin(f)
422 422 if lf is not None and origmatchfn(lf):
423 423 return True
424 424 r = origmatchfn(f)
425 425 return r
426 426
427 427 m.matchfn = lfmatchfn
428 428
429 429 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
430 430 return m, pats
431 431
432 432 # For hg log --patch, the match object is used in two different senses:
433 433 # (1) to determine what revisions should be printed out, and
434 434 # (2) to determine what files to print out diffs for.
435 435 # The magic matchandpats override should be used for case (1) but not for
436 436 # case (2).
437 437 oldmatchandpats = scmutil.matchandpats
438 438
439 439 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
440 440 wctx = repo[None]
441 441 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
442 442 return lambda ctx: match
443 443
444 444 wrappedmatchandpats = extensions.wrappedfunction(
445 445 scmutil, b'matchandpats', overridematchandpats
446 446 )
447 447 wrappedmakefilematcher = extensions.wrappedfunction(
448 448 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
449 449 )
450 450 with wrappedmatchandpats, wrappedmakefilematcher:
451 451 return orig(ui, repo, *pats, **opts)
452 452
453 453
454 454 @eh.wrapcommand(
455 455 b'verify',
456 456 opts=[
457 457 (
458 458 b'',
459 459 b'large',
460 460 None,
461 461 _(b'verify that all largefiles in current revision exists'),
462 462 ),
463 463 (
464 464 b'',
465 465 b'lfa',
466 466 None,
467 467 _(b'verify largefiles in all revisions, not just current'),
468 468 ),
469 469 (
470 470 b'',
471 471 b'lfc',
472 472 None,
473 473 _(b'verify local largefile contents, not just existence'),
474 474 ),
475 475 ],
476 476 )
477 477 def overrideverify(orig, ui, repo, *pats, **opts):
478 478 large = opts.pop('large', False)
479 479 all = opts.pop('lfa', False)
480 480 contents = opts.pop('lfc', False)
481 481
482 482 result = orig(ui, repo, *pats, **opts)
483 483 if large or all or contents:
484 484 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
485 485 return result
486 486
487 487
488 488 @eh.wrapcommand(
489 489 b'debugstate',
490 490 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
491 491 )
492 492 def overridedebugstate(orig, ui, repo, *pats, **opts):
493 493 large = opts.pop('large', False)
494 494 if large:
495 495
496 496 class fakerepo(object):
497 497 dirstate = lfutil.openlfdirstate(ui, repo)
498 498
499 499 orig(ui, fakerepo, *pats, **opts)
500 500 else:
501 501 orig(ui, repo, *pats, **opts)
502 502
503 503
504 504 # Before starting the manifest merge, merge.updates will call
505 505 # _checkunknownfile to check if there are any files in the merged-in
506 506 # changeset that collide with unknown files in the working copy.
507 507 #
508 508 # The largefiles are seen as unknown, so this prevents us from merging
509 509 # in a file 'foo' if we already have a largefile with the same name.
510 510 #
511 511 # The overridden function filters the unknown files by removing any
512 512 # largefiles. This makes the merge proceed and we can then handle this
513 513 # case further in the overridden calculateupdates function below.
514 514 @eh.wrapfunction(merge, b'_checkunknownfile')
515 515 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
516 516 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
517 517 return False
518 518 return origfn(repo, wctx, mctx, f, f2)
519 519
520 520
521 521 # The manifest merge handles conflicts on the manifest level. We want
522 522 # to handle changes in largefile-ness of files at this level too.
523 523 #
524 524 # The strategy is to run the original calculateupdates and then process
525 525 # the action list it outputs. There are two cases we need to deal with:
526 526 #
527 527 # 1. Normal file in p1, largefile in p2. Here the largefile is
528 528 # detected via its standin file, which will enter the working copy
529 529 # with a "get" action. It is not "merge" since the standin is all
530 530 # Mercurial is concerned with at this level -- the link to the
531 531 # existing normal file is not relevant here.
532 532 #
533 533 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
534 534 # since the largefile will be present in the working copy and
535 535 # different from the normal file in p2. Mercurial therefore
536 536 # triggers a merge action.
537 537 #
538 538 # In both cases, we prompt the user and emit new actions to either
539 539 # remove the standin (if the normal file was kept) or to remove the
540 540 # normal file and get the standin (if the largefile was kept). The
541 541 # default prompt answer is to use the largefile version since it was
542 542 # presumably changed on purpose.
543 543 #
544 544 # Finally, the merge.applyupdates function will then take care of
545 545 # writing the files into the working copy and lfcommands.updatelfiles
546 546 # will update the largefiles.
547 547 @eh.wrapfunction(merge, b'calculateupdates')
548 548 def overridecalculateupdates(
549 549 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 550 ):
551 551 overwrite = force and not branchmerge
552 552 mresult = origfn(
553 553 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 554 )
555 555
556 556 if overwrite:
557 557 return mresult
558 558
559 559 # Convert to dictionary with filename as key and action as value.
560 560 lfiles = set()
561 561 for f in mresult.files():
562 562 splitstandin = lfutil.splitstandin(f)
563 563 if splitstandin is not None and splitstandin in p1:
564 564 lfiles.add(splitstandin)
565 565 elif lfutil.standin(f) in p1:
566 566 lfiles.add(f)
567 567
568 568 for lfile in sorted(lfiles):
569 569 standin = lfutil.standin(lfile)
570 570 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
571 571 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
572 572
573 573 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
574 574 if sm == ACTION_DELETED_CHANGED:
575 575 f1, f2, fa, move, anc = sargs
576 576 sargs = (p2[f2].flags(), False)
577 577 # Case 1: normal file in the working copy, largefile in
578 578 # the second parent
579 579 usermsg = (
580 580 _(
581 581 b'remote turned local normal file %s into a largefile\n'
582 582 b'use (l)argefile or keep (n)ormal file?'
583 583 b'$$ &Largefile $$ &Normal file'
584 584 )
585 585 % lfile
586 586 )
587 587 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
588 588 mresult.addfile(
589 589 lfile, ACTION_REMOVE, None, b'replaced by standin'
590 590 )
591 591 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
592 592 else: # keep local normal file
593 593 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
594 594 if branchmerge:
595 595 mresult.addfile(
596 596 standin,
597 597 ACTION_KEEP,
598 598 None,
599 599 b'replaced by non-standin',
600 600 )
601 601 else:
602 602 mresult.addfile(
603 603 standin,
604 604 ACTION_REMOVE,
605 605 None,
606 606 b'replaced by non-standin',
607 607 )
608 608 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
609 609 if lm == ACTION_DELETED_CHANGED:
610 610 f1, f2, fa, move, anc = largs
611 611 largs = (p2[f2].flags(), False)
612 612 # Case 2: largefile in the working copy, normal file in
613 613 # the second parent
614 614 usermsg = (
615 615 _(
616 616 b'remote turned local largefile %s into a normal file\n'
617 617 b'keep (l)argefile or use (n)ormal file?'
618 618 b'$$ &Largefile $$ &Normal file'
619 619 )
620 620 % lfile
621 621 )
622 622 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
623 623 if branchmerge:
624 624 # largefile can be restored from standin safely
625 625 mresult.addfile(
626 626 lfile,
627 627 ACTION_KEEP,
628 628 None,
629 629 b'replaced by standin',
630 630 )
631 631 mresult.addfile(
632 632 standin, ACTION_KEEP, None, b'replaces standin'
633 633 )
634 634 else:
635 635 # "lfile" should be marked as "removed" without
636 636 # removal of itself
637 637 mresult.addfile(
638 638 lfile,
639 639 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
640 640 None,
641 641 b'forget non-standin largefile',
642 642 )
643 643
644 644 # linear-merge should treat this largefile as 're-added'
645 645 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
646 646 else: # pick remote normal file
647 647 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
648 648 mresult.addfile(
649 649 standin,
650 650 ACTION_REMOVE,
651 651 None,
652 652 b'replaced by non-standin',
653 653 )
654 654
655 655 return mresult
656 656
657 657
658 658 @eh.wrapfunction(mergestatemod, b'recordupdates')
659 659 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
660 660 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
661 661 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
662 662 with lfdirstate.parentchange():
663 663 for lfile, args, msg in actions[
664 664 MERGE_ACTION_LARGEFILE_MARK_REMOVED
665 665 ]:
666 666 # this should be executed before 'orig', to execute 'remove'
667 667 # before all other actions
668 668 repo.dirstate.update_file(
669 669 lfile, p1_tracked=True, wc_tracked=False
670 670 )
671 671 # make sure lfile doesn't get synclfdirstate'd as normal
672 672 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
673 673 lfdirstate.write(repo.currenttransaction())
674 674
675 675 return orig(repo, actions, branchmerge, getfiledata)
676 676
677 677
678 678 # Override filemerge to prompt the user about how they wish to merge
679 679 # largefiles. This will handle identical edits without prompting the user.
680 680 @eh.wrapfunction(filemerge, b'filemerge')
681 681 def overridefilemerge(
682 682 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
683 683 ):
684 684 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
685 685 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
686 686
687 687 ahash = lfutil.readasstandin(fca).lower()
688 688 dhash = lfutil.readasstandin(fcd).lower()
689 689 ohash = lfutil.readasstandin(fco).lower()
690 690 if (
691 691 ohash != ahash
692 692 and ohash != dhash
693 693 and (
694 694 dhash == ahash
695 695 or repo.ui.promptchoice(
696 696 _(
697 697 b'largefile %s has a merge conflict\nancestor was %s\n'
698 698 b'you can keep (l)ocal %s or take (o)ther %s.\n'
699 699 b'what do you want to do?'
700 700 b'$$ &Local $$ &Other'
701 701 )
702 702 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
703 703 0,
704 704 )
705 705 == 1
706 706 )
707 707 ):
708 708 repo.wwrite(fcd.path(), fco.data(), fco.flags())
709 709 return 0, False
710 710
711 711
712 712 @eh.wrapfunction(copiesmod, b'pathcopies')
713 713 def copiespathcopies(orig, ctx1, ctx2, match=None):
714 714 copies = orig(ctx1, ctx2, match=match)
715 715 updated = {}
716 716
717 717 for k, v in pycompat.iteritems(copies):
718 718 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
719 719
720 720 return updated
721 721
722 722
723 723 # Copy first changes the matchers to match standins instead of
724 724 # largefiles. Then it overrides util.copyfile in that function it
725 725 # checks if the destination largefile already exists. It also keeps a
726 726 # list of copied files so that the largefiles can be copied and the
727 727 # dirstate updated.
728 728 @eh.wrapfunction(cmdutil, b'copy')
729 729 def overridecopy(orig, ui, repo, pats, opts, rename=False):
730 730 # doesn't remove largefile on rename
731 731 if len(pats) < 2:
732 732 # this isn't legal, let the original function deal with it
733 733 return orig(ui, repo, pats, opts, rename)
734 734
735 735 # This could copy both lfiles and normal files in one command,
736 736 # but we don't want to do that. First replace their matcher to
737 737 # only match normal files and run it, then replace it to just
738 738 # match largefiles and run it again.
739 739 nonormalfiles = False
740 740 nolfiles = False
741 741 manifest = repo[None].manifest()
742 742
743 743 def normalfilesmatchfn(
744 744 orig,
745 745 ctx,
746 746 pats=(),
747 747 opts=None,
748 748 globbed=False,
749 749 default=b'relpath',
750 750 badfn=None,
751 751 ):
752 752 if opts is None:
753 753 opts = {}
754 754 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
755 755 return composenormalfilematcher(match, manifest)
756 756
757 757 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
758 758 try:
759 759 result = orig(ui, repo, pats, opts, rename)
760 760 except error.Abort as e:
761 761 if e.message != _(b'no files to copy'):
762 762 raise e
763 763 else:
764 764 nonormalfiles = True
765 765 result = 0
766 766
767 767 # The first rename can cause our current working directory to be removed.
768 768 # In that case there is nothing left to copy/rename so just quit.
769 769 try:
770 770 repo.getcwd()
771 771 except OSError:
772 772 return result
773 773
774 774 def makestandin(relpath):
775 775 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
776 776 return repo.wvfs.join(lfutil.standin(path))
777 777
778 778 fullpats = scmutil.expandpats(pats)
779 779 dest = fullpats[-1]
780 780
781 781 if os.path.isdir(dest):
782 782 if not os.path.isdir(makestandin(dest)):
783 783 os.makedirs(makestandin(dest))
784 784
785 785 try:
786 786 # When we call orig below it creates the standins but we don't add
787 787 # them to the dir state until later so lock during that time.
788 788 wlock = repo.wlock()
789 789
790 790 manifest = repo[None].manifest()
791 791
792 792 def overridematch(
793 793 orig,
794 794 ctx,
795 795 pats=(),
796 796 opts=None,
797 797 globbed=False,
798 798 default=b'relpath',
799 799 badfn=None,
800 800 ):
801 801 if opts is None:
802 802 opts = {}
803 803 newpats = []
804 804 # The patterns were previously mangled to add the standin
805 805 # directory; we need to remove that now
806 806 for pat in pats:
807 807 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
808 808 newpats.append(pat.replace(lfutil.shortname, b''))
809 809 else:
810 810 newpats.append(pat)
811 811 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
812 812 m = copy.copy(match)
813 813 lfile = lambda f: lfutil.standin(f) in manifest
814 814 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
815 815 m._fileset = set(m._files)
816 816 origmatchfn = m.matchfn
817 817
818 818 def matchfn(f):
819 819 lfile = lfutil.splitstandin(f)
820 820 return (
821 821 lfile is not None
822 822 and (f in manifest)
823 823 and origmatchfn(lfile)
824 824 or None
825 825 )
826 826
827 827 m.matchfn = matchfn
828 828 return m
829 829
830 830 listpats = []
831 831 for pat in pats:
832 832 if matchmod.patkind(pat) is not None:
833 833 listpats.append(pat)
834 834 else:
835 835 listpats.append(makestandin(pat))
836 836
837 837 copiedfiles = []
838 838
839 839 def overridecopyfile(orig, src, dest, *args, **kwargs):
840 840 if lfutil.shortname in src and dest.startswith(
841 841 repo.wjoin(lfutil.shortname)
842 842 ):
843 843 destlfile = dest.replace(lfutil.shortname, b'')
844 844 if not opts[b'force'] and os.path.exists(destlfile):
845 845 raise IOError(
846 846 b'', _(b'destination largefile already exists')
847 847 )
848 848 copiedfiles.append((src, dest))
849 849 orig(src, dest, *args, **kwargs)
850 850
851 851 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
852 852 with extensions.wrappedfunction(scmutil, b'match', overridematch):
853 853 result += orig(ui, repo, listpats, opts, rename)
854 854
855 855 lfdirstate = lfutil.openlfdirstate(ui, repo)
856 856 for (src, dest) in copiedfiles:
857 857 if lfutil.shortname in src and dest.startswith(
858 858 repo.wjoin(lfutil.shortname)
859 859 ):
860 860 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
861 861 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
862 862 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
863 863 if not os.path.isdir(destlfiledir):
864 864 os.makedirs(destlfiledir)
865 865 if rename:
866 866 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
867 867
868 868 # The file is gone, but this deletes any empty parent
869 869 # directories as a side-effect.
870 870 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
871 871 lfdirstate.set_untracked(srclfile)
872 872 else:
873 873 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
874 874
875 875 lfdirstate.set_tracked(destlfile)
876 876 lfdirstate.write(repo.currenttransaction())
877 877 except error.Abort as e:
878 878 if e.message != _(b'no files to copy'):
879 879 raise e
880 880 else:
881 881 nolfiles = True
882 882 finally:
883 883 wlock.release()
884 884
885 885 if nolfiles and nonormalfiles:
886 886 raise error.Abort(_(b'no files to copy'))
887 887
888 888 return result
889 889
890 890
891 891 # When the user calls revert, we have to be careful to not revert any
892 892 # changes to other largefiles accidentally. This means we have to keep
893 893 # track of the largefiles that are being reverted so we only pull down
894 894 # the necessary largefiles.
895 895 #
896 896 # Standins are only updated (to match the hash of largefiles) before
897 897 # commits. Update the standins then run the original revert, changing
898 898 # the matcher to hit standins instead of largefiles. Based on the
899 899 # resulting standins update the largefiles.
900 900 @eh.wrapfunction(cmdutil, b'revert')
901 901 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
902 902 # Because we put the standins in a bad state (by updating them)
903 903 # and then return them to a correct state we need to lock to
904 904 # prevent others from changing them in their incorrect state.
905 905 with repo.wlock():
906 906 lfdirstate = lfutil.openlfdirstate(ui, repo)
907 907 s = lfutil.lfdirstatestatus(lfdirstate, repo)
908 908 lfdirstate.write(repo.currenttransaction())
909 909 for lfile in s.modified:
910 910 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
911 911 for lfile in s.deleted:
912 912 fstandin = lfutil.standin(lfile)
913 913 if repo.wvfs.exists(fstandin):
914 914 repo.wvfs.unlink(fstandin)
915 915
916 916 oldstandins = lfutil.getstandinsstate(repo)
917 917
918 918 def overridematch(
919 919 orig,
920 920 mctx,
921 921 pats=(),
922 922 opts=None,
923 923 globbed=False,
924 924 default=b'relpath',
925 925 badfn=None,
926 926 ):
927 927 if opts is None:
928 928 opts = {}
929 929 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
930 930 m = copy.copy(match)
931 931
932 932 # revert supports recursing into subrepos, and though largefiles
933 933 # currently doesn't work correctly in that case, this match is
934 934 # called, so the lfdirstate above may not be the correct one for
935 935 # this invocation of match.
936 936 lfdirstate = lfutil.openlfdirstate(
937 937 mctx.repo().ui, mctx.repo(), False
938 938 )
939 939
940 940 wctx = repo[None]
941 941 matchfiles = []
942 942 for f in m._files:
943 943 standin = lfutil.standin(f)
944 944 if standin in ctx or standin in mctx:
945 945 matchfiles.append(standin)
946 946 elif standin in wctx or lfdirstate.get_entry(f).removed:
947 947 continue
948 948 else:
949 949 matchfiles.append(f)
950 950 m._files = matchfiles
951 951 m._fileset = set(m._files)
952 952 origmatchfn = m.matchfn
953 953
954 954 def matchfn(f):
955 955 lfile = lfutil.splitstandin(f)
956 956 if lfile is not None:
957 957 return origmatchfn(lfile) and (f in ctx or f in mctx)
958 958 return origmatchfn(f)
959 959
960 960 m.matchfn = matchfn
961 961 return m
962 962
963 963 with extensions.wrappedfunction(scmutil, b'match', overridematch):
964 964 orig(ui, repo, ctx, *pats, **opts)
965 965
966 966 newstandins = lfutil.getstandinsstate(repo)
967 967 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
968 968 # lfdirstate should be 'normallookup'-ed for updated files,
969 969 # because reverting doesn't touch dirstate for 'normal' files
970 970 # when target revision is explicitly specified: in such case,
971 971 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
972 972 # of target (standin) file.
973 973 lfcommands.updatelfiles(
974 974 ui, repo, filelist, printmessage=False, normallookup=True
975 975 )
976 976
977 977
978 978 # after pulling changesets, we need to take some extra care to get
979 979 # largefiles updated remotely
980 980 @eh.wrapcommand(
981 981 b'pull',
982 982 opts=[
983 983 (
984 984 b'',
985 985 b'all-largefiles',
986 986 None,
987 987 _(b'download all pulled versions of largefiles (DEPRECATED)'),
988 988 ),
989 989 (
990 990 b'',
991 991 b'lfrev',
992 992 [],
993 993 _(b'download largefiles for these revisions'),
994 994 _(b'REV'),
995 995 ),
996 996 ],
997 997 )
998 998 def overridepull(orig, ui, repo, source=None, **opts):
999 999 revsprepull = len(repo)
1000 1000 if not source:
1001 1001 source = b'default'
1002 1002 repo.lfpullsource = source
1003 1003 result = orig(ui, repo, source, **opts)
1004 1004 revspostpull = len(repo)
1005 1005 lfrevs = opts.get('lfrev', [])
1006 1006 if opts.get('all_largefiles'):
1007 1007 lfrevs.append(b'pulled()')
1008 1008 if lfrevs and revspostpull > revsprepull:
1009 1009 numcached = 0
1010 1010 repo.firstpulled = revsprepull # for pulled() revset expression
1011 1011 try:
1012 1012 for rev in logcmdutil.revrange(repo, lfrevs):
1013 1013 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1014 1014 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1015 1015 numcached += len(cached)
1016 1016 finally:
1017 1017 del repo.firstpulled
1018 1018 ui.status(_(b"%d largefiles cached\n") % numcached)
1019 1019 return result
1020 1020
1021 1021
1022 1022 @eh.wrapcommand(
1023 1023 b'push',
1024 1024 opts=[
1025 1025 (
1026 1026 b'',
1027 1027 b'lfrev',
1028 1028 [],
1029 1029 _(b'upload largefiles for these revisions'),
1030 1030 _(b'REV'),
1031 1031 )
1032 1032 ],
1033 1033 )
1034 1034 def overridepush(orig, ui, repo, *args, **kwargs):
1035 1035 """Override push command and store --lfrev parameters in opargs"""
1036 1036 lfrevs = kwargs.pop('lfrev', None)
1037 1037 if lfrevs:
1038 1038 opargs = kwargs.setdefault('opargs', {})
1039 1039 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1040 1040 return orig(ui, repo, *args, **kwargs)
1041 1041
1042 1042
1043 1043 @eh.wrapfunction(exchange, b'pushoperation')
1044 1044 def exchangepushoperation(orig, *args, **kwargs):
1045 1045 """Override pushoperation constructor and store lfrevs parameter"""
1046 1046 lfrevs = kwargs.pop('lfrevs', None)
1047 1047 pushop = orig(*args, **kwargs)
1048 1048 pushop.lfrevs = lfrevs
1049 1049 return pushop
1050 1050
1051 1051
1052 1052 @eh.revsetpredicate(b'pulled()')
1053 1053 def pulledrevsetsymbol(repo, subset, x):
1054 1054 """Changesets that just has been pulled.
1055 1055
1056 1056 Only available with largefiles from pull --lfrev expressions.
1057 1057
1058 1058 .. container:: verbose
1059 1059
1060 1060 Some examples:
1061 1061
1062 1062 - pull largefiles for all new changesets::
1063 1063
1064 1064 hg pull -lfrev "pulled()"
1065 1065
1066 1066 - pull largefiles for all new branch heads::
1067 1067
1068 1068 hg pull -lfrev "head(pulled()) and not closed()"
1069 1069
1070 1070 """
1071 1071
1072 1072 try:
1073 1073 firstpulled = repo.firstpulled
1074 1074 except AttributeError:
1075 1075 raise error.Abort(_(b"pulled() only available in --lfrev"))
1076 1076 return smartset.baseset([r for r in subset if r >= firstpulled])
1077 1077
1078 1078
1079 1079 @eh.wrapcommand(
1080 1080 b'clone',
1081 1081 opts=[
1082 1082 (
1083 1083 b'',
1084 1084 b'all-largefiles',
1085 1085 None,
1086 1086 _(b'download all versions of all largefiles'),
1087 1087 )
1088 1088 ],
1089 1089 )
1090 1090 def overrideclone(orig, ui, source, dest=None, **opts):
1091 1091 d = dest
1092 1092 if d is None:
1093 1093 d = hg.defaultdest(source)
1094 1094 if opts.get('all_largefiles') and not hg.islocal(d):
1095 1095 raise error.Abort(
1096 1096 _(b'--all-largefiles is incompatible with non-local destination %s')
1097 1097 % d
1098 1098 )
1099 1099
1100 1100 return orig(ui, source, dest, **opts)
1101 1101
1102 1102
1103 1103 @eh.wrapfunction(hg, b'clone')
1104 1104 def hgclone(orig, ui, opts, *args, **kwargs):
1105 1105 result = orig(ui, opts, *args, **kwargs)
1106 1106
1107 1107 if result is not None:
1108 1108 sourcerepo, destrepo = result
1109 1109 repo = destrepo.local()
1110 1110
1111 1111 # When cloning to a remote repo (like through SSH), no repo is available
1112 1112 # from the peer. Therefore the largefiles can't be downloaded and the
1113 1113 # hgrc can't be updated.
1114 1114 if not repo:
1115 1115 return result
1116 1116
1117 1117 # Caching is implicitly limited to 'rev' option, since the dest repo was
1118 1118 # truncated at that point. The user may expect a download count with
1119 1119 # this option, so attempt whether or not this is a largefile repo.
1120 1120 if opts.get(b'all_largefiles'):
1121 1121 success, missing = lfcommands.downloadlfiles(ui, repo)
1122 1122
1123 1123 if missing != 0:
1124 1124 return None
1125 1125
1126 1126 return result
1127 1127
1128 1128
1129 1129 @eh.wrapcommand(b'rebase', extension=b'rebase')
1130 1130 def overriderebasecmd(orig, ui, repo, **opts):
1131 1131 if not util.safehasattr(repo, b'_largefilesenabled'):
1132 1132 return orig(ui, repo, **opts)
1133 1133
1134 1134 resuming = opts.get('continue')
1135 1135 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1136 1136 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1137 1137 try:
1138 1138 with ui.configoverride(
1139 1139 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1140 1140 ):
1141 1141 return orig(ui, repo, **opts)
1142 1142 finally:
1143 1143 repo._lfstatuswriters.pop()
1144 1144 repo._lfcommithooks.pop()
1145 1145
1146 1146
1147 1147 @eh.extsetup
1148 1148 def overriderebase(ui):
1149 1149 try:
1150 1150 rebase = extensions.find(b'rebase')
1151 1151 except KeyError:
1152 1152 pass
1153 1153 else:
1154 1154
1155 1155 def _dorebase(orig, *args, **kwargs):
1156 1156 kwargs['inmemory'] = False
1157 1157 return orig(*args, **kwargs)
1158 1158
1159 1159 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1160 1160
1161 1161
1162 1162 @eh.wrapcommand(b'archive')
1163 1163 def overridearchivecmd(orig, ui, repo, dest, **opts):
1164 1164 with lfstatus(repo.unfiltered()):
1165 1165 return orig(ui, repo.unfiltered(), dest, **opts)
1166 1166
1167 1167
1168 1168 @eh.wrapfunction(webcommands, b'archive')
1169 1169 def hgwebarchive(orig, web):
1170 1170 with lfstatus(web.repo):
1171 1171 return orig(web)
1172 1172
1173 1173
1174 1174 @eh.wrapfunction(archival, b'archive')
1175 1175 def overridearchive(
1176 1176 orig,
1177 1177 repo,
1178 1178 dest,
1179 1179 node,
1180 1180 kind,
1181 1181 decode=True,
1182 1182 match=None,
1183 1183 prefix=b'',
1184 1184 mtime=None,
1185 1185 subrepos=None,
1186 1186 ):
1187 1187 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1188 1188 # unfiltered repo's attr, so check that as well.
1189 1189 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1190 1190 return orig(
1191 1191 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1192 1192 )
1193 1193
1194 1194 # No need to lock because we are only reading history and
1195 1195 # largefile caches, neither of which are modified.
1196 1196 if node is not None:
1197 1197 lfcommands.cachelfiles(repo.ui, repo, node)
1198 1198
1199 1199 if kind not in archival.archivers:
1200 1200 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1201 1201
1202 1202 ctx = repo[node]
1203 1203
1204 1204 if kind == b'files':
1205 1205 if prefix:
1206 1206 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1207 1207 else:
1208 1208 prefix = archival.tidyprefix(dest, kind, prefix)
1209 1209
1210 1210 def write(name, mode, islink, getdata):
1211 1211 if match and not match(name):
1212 1212 return
1213 1213 data = getdata()
1214 1214 if decode:
1215 1215 data = repo.wwritedata(name, data)
1216 1216 archiver.addfile(prefix + name, mode, islink, data)
1217 1217
1218 1218 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1219 1219
1220 1220 if repo.ui.configbool(b"ui", b"archivemeta"):
1221 1221 write(
1222 1222 b'.hg_archival.txt',
1223 1223 0o644,
1224 1224 False,
1225 1225 lambda: archival.buildmetadata(ctx),
1226 1226 )
1227 1227
1228 1228 for f in ctx:
1229 1229 ff = ctx.flags(f)
1230 1230 getdata = ctx[f].data
1231 1231 lfile = lfutil.splitstandin(f)
1232 1232 if lfile is not None:
1233 1233 if node is not None:
1234 1234 path = lfutil.findfile(repo, getdata().strip())
1235 1235
1236 1236 if path is None:
1237 1237 raise error.Abort(
1238 1238 _(
1239 1239 b'largefile %s not found in repo store or system cache'
1240 1240 )
1241 1241 % lfile
1242 1242 )
1243 1243 else:
1244 1244 path = lfile
1245 1245
1246 1246 f = lfile
1247 1247
1248 1248 getdata = lambda: util.readfile(path)
1249 1249 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1250 1250
1251 1251 if subrepos:
1252 1252 for subpath in sorted(ctx.substate):
1253 1253 sub = ctx.workingsub(subpath)
1254 1254 submatch = matchmod.subdirmatcher(subpath, match)
1255 1255 subprefix = prefix + subpath + b'/'
1256 1256
1257 1257 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1258 1258 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1259 1259 # allow only hgsubrepos to set this, instead of the current scheme
1260 1260 # where the parent sets this for the child.
1261 1261 with (
1262 1262 util.safehasattr(sub, '_repo')
1263 1263 and lfstatus(sub._repo)
1264 1264 or util.nullcontextmanager()
1265 1265 ):
1266 1266 sub.archive(archiver, subprefix, submatch)
1267 1267
1268 1268 archiver.done()
1269 1269
1270 1270
1271 1271 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1272 1272 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1273 1273 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1274 1274 if not lfenabled or not repo._repo.lfstatus:
1275 1275 return orig(repo, archiver, prefix, match, decode)
1276 1276
1277 1277 repo._get(repo._state + (b'hg',))
1278 1278 rev = repo._state[1]
1279 1279 ctx = repo._repo[rev]
1280 1280
1281 1281 if ctx.node() is not None:
1282 1282 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1283 1283
1284 1284 def write(name, mode, islink, getdata):
1285 1285 # At this point, the standin has been replaced with the largefile name,
1286 1286 # so the normal matcher works here without the lfutil variants.
1287 1287 if match and not match(f):
1288 1288 return
1289 1289 data = getdata()
1290 1290 if decode:
1291 1291 data = repo._repo.wwritedata(name, data)
1292 1292
1293 1293 archiver.addfile(prefix + name, mode, islink, data)
1294 1294
1295 1295 for f in ctx:
1296 1296 ff = ctx.flags(f)
1297 1297 getdata = ctx[f].data
1298 1298 lfile = lfutil.splitstandin(f)
1299 1299 if lfile is not None:
1300 1300 if ctx.node() is not None:
1301 1301 path = lfutil.findfile(repo._repo, getdata().strip())
1302 1302
1303 1303 if path is None:
1304 1304 raise error.Abort(
1305 1305 _(
1306 1306 b'largefile %s not found in repo store or system cache'
1307 1307 )
1308 1308 % lfile
1309 1309 )
1310 1310 else:
1311 1311 path = lfile
1312 1312
1313 1313 f = lfile
1314 1314
1315 1315 getdata = lambda: util.readfile(os.path.join(prefix, path))
1316 1316
1317 1317 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1318 1318
1319 1319 for subpath in sorted(ctx.substate):
1320 1320 sub = ctx.workingsub(subpath)
1321 1321 submatch = matchmod.subdirmatcher(subpath, match)
1322 1322 subprefix = prefix + subpath + b'/'
1323 1323 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1324 1324 # infer and possibly set lfstatus at the top of this function. That
1325 1325 # would allow only hgsubrepos to set this, instead of the current scheme
1326 1326 # where the parent sets this for the child.
1327 1327 with (
1328 1328 util.safehasattr(sub, '_repo')
1329 1329 and lfstatus(sub._repo)
1330 1330 or util.nullcontextmanager()
1331 1331 ):
1332 1332 sub.archive(archiver, subprefix, submatch, decode)
1333 1333
1334 1334
1335 1335 # If a largefile is modified, the change is not reflected in its
1336 1336 # standin until a commit. cmdutil.bailifchanged() raises an exception
1337 1337 # if the repo has uncommitted changes. Wrap it to also check if
1338 1338 # largefiles were changed. This is used by bisect, backout and fetch.
1339 1339 @eh.wrapfunction(cmdutil, b'bailifchanged')
1340 1340 def overridebailifchanged(orig, repo, *args, **kwargs):
1341 1341 orig(repo, *args, **kwargs)
1342 1342 with lfstatus(repo):
1343 1343 s = repo.status()
1344 1344 if s.modified or s.added or s.removed or s.deleted:
1345 1345 raise error.Abort(_(b'uncommitted changes'))
1346 1346
1347 1347
1348 1348 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1349 1349 def postcommitstatus(orig, repo, *args, **kwargs):
1350 1350 with lfstatus(repo):
1351 1351 return orig(repo, *args, **kwargs)
1352 1352
1353 1353
1354 1354 @eh.wrapfunction(cmdutil, b'forget')
1355 1355 def cmdutilforget(
1356 1356 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1357 1357 ):
1358 1358 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1359 1359 bad, forgot = orig(
1360 1360 ui,
1361 1361 repo,
1362 1362 normalmatcher,
1363 1363 prefix,
1364 1364 uipathfn,
1365 1365 explicitonly,
1366 1366 dryrun,
1367 1367 interactive,
1368 1368 )
1369 1369 m = composelargefilematcher(match, repo[None].manifest())
1370 1370
1371 1371 with lfstatus(repo):
1372 1372 s = repo.status(match=m, clean=True)
1373 1373 manifest = repo[None].manifest()
1374 1374 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1375 1375 forget = [f for f in forget if lfutil.standin(f) in manifest]
1376 1376
1377 1377 for f in forget:
1378 1378 fstandin = lfutil.standin(f)
1379 1379 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1380 1380 ui.warn(
1381 1381 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1382 1382 )
1383 1383 bad.append(f)
1384 1384
1385 1385 for f in forget:
1386 1386 if ui.verbose or not m.exact(f):
1387 1387 ui.status(_(b'removing %s\n') % uipathfn(f))
1388 1388
1389 1389 # Need to lock because standin files are deleted then removed from the
1390 1390 # repository and we could race in-between.
1391 1391 with repo.wlock():
1392 1392 lfdirstate = lfutil.openlfdirstate(ui, repo)
1393 1393 for f in forget:
1394 1394 lfdirstate.set_untracked(f)
1395 1395 lfdirstate.write(repo.currenttransaction())
1396 1396 standins = [lfutil.standin(f) for f in forget]
1397 1397 for f in standins:
1398 1398 repo.wvfs.unlinkpath(f, ignoremissing=True)
1399 1399 rejected = repo[None].forget(standins)
1400 1400
1401 1401 bad.extend(f for f in rejected if f in m.files())
1402 1402 forgot.extend(f for f in forget if f not in rejected)
1403 1403 return bad, forgot
1404 1404
1405 1405
1406 1406 def _getoutgoings(repo, other, missing, addfunc):
1407 1407 """get pairs of filename and largefile hash in outgoing revisions
1408 1408 in 'missing'.
1409 1409
1410 1410 largefiles already existing on 'other' repository are ignored.
1411 1411
1412 1412 'addfunc' is invoked with each unique pairs of filename and
1413 1413 largefile hash value.
1414 1414 """
1415 1415 knowns = set()
1416 1416 lfhashes = set()
1417 1417
1418 1418 def dedup(fn, lfhash):
1419 1419 k = (fn, lfhash)
1420 1420 if k not in knowns:
1421 1421 knowns.add(k)
1422 1422 lfhashes.add(lfhash)
1423 1423
1424 1424 lfutil.getlfilestoupload(repo, missing, dedup)
1425 1425 if lfhashes:
1426 1426 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1427 1427 for fn, lfhash in knowns:
1428 1428 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1429 1429 addfunc(fn, lfhash)
1430 1430
1431 1431
1432 1432 def outgoinghook(ui, repo, other, opts, missing):
1433 1433 if opts.pop(b'large', None):
1434 1434 lfhashes = set()
1435 1435 if ui.debugflag:
1436 1436 toupload = {}
1437 1437
1438 1438 def addfunc(fn, lfhash):
1439 1439 if fn not in toupload:
1440 1440 toupload[fn] = []
1441 1441 toupload[fn].append(lfhash)
1442 1442 lfhashes.add(lfhash)
1443 1443
1444 1444 def showhashes(fn):
1445 1445 for lfhash in sorted(toupload[fn]):
1446 1446 ui.debug(b' %s\n' % lfhash)
1447 1447
1448 1448 else:
1449 1449 toupload = set()
1450 1450
1451 1451 def addfunc(fn, lfhash):
1452 1452 toupload.add(fn)
1453 1453 lfhashes.add(lfhash)
1454 1454
1455 1455 def showhashes(fn):
1456 1456 pass
1457 1457
1458 1458 _getoutgoings(repo, other, missing, addfunc)
1459 1459
1460 1460 if not toupload:
1461 1461 ui.status(_(b'largefiles: no files to upload\n'))
1462 1462 else:
1463 1463 ui.status(
1464 1464 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1465 1465 )
1466 1466 for file in sorted(toupload):
1467 1467 ui.status(lfutil.splitstandin(file) + b'\n')
1468 1468 showhashes(file)
1469 1469 ui.status(b'\n')
1470 1470
1471 1471
1472 1472 @eh.wrapcommand(
1473 1473 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1474 1474 )
1475 1475 def _outgoingcmd(orig, *args, **kwargs):
1476 1476 # Nothing to do here other than add the extra help option- the hook above
1477 1477 # processes it.
1478 1478 return orig(*args, **kwargs)
1479 1479
1480 1480
1481 1481 def summaryremotehook(ui, repo, opts, changes):
1482 1482 largeopt = opts.get(b'large', False)
1483 1483 if changes is None:
1484 1484 if largeopt:
1485 1485 return (False, True) # only outgoing check is needed
1486 1486 else:
1487 1487 return (False, False)
1488 1488 elif largeopt:
1489 1489 url, branch, peer, outgoing = changes[1]
1490 1490 if peer is None:
1491 1491 # i18n: column positioning for "hg summary"
1492 1492 ui.status(_(b'largefiles: (no remote repo)\n'))
1493 1493 return
1494 1494
1495 1495 toupload = set()
1496 1496 lfhashes = set()
1497 1497
1498 1498 def addfunc(fn, lfhash):
1499 1499 toupload.add(fn)
1500 1500 lfhashes.add(lfhash)
1501 1501
1502 1502 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1503 1503
1504 1504 if not toupload:
1505 1505 # i18n: column positioning for "hg summary"
1506 1506 ui.status(_(b'largefiles: (no files to upload)\n'))
1507 1507 else:
1508 1508 # i18n: column positioning for "hg summary"
1509 1509 ui.status(
1510 1510 _(b'largefiles: %d entities for %d files to upload\n')
1511 1511 % (len(lfhashes), len(toupload))
1512 1512 )
1513 1513
1514 1514
1515 1515 @eh.wrapcommand(
1516 1516 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1517 1517 )
1518 1518 def overridesummary(orig, ui, repo, *pats, **opts):
1519 1519 with lfstatus(repo):
1520 1520 orig(ui, repo, *pats, **opts)
1521 1521
1522 1522
1523 1523 @eh.wrapfunction(scmutil, b'addremove')
1524 1524 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1525 1525 if opts is None:
1526 1526 opts = {}
1527 1527 if not lfutil.islfilesrepo(repo):
1528 1528 return orig(repo, matcher, prefix, uipathfn, opts)
1529 1529 # Get the list of missing largefiles so we can remove them
1530 1530 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1531 1531 unsure, s, mtime_boundary = lfdirstate.status(
1532 1532 matchmod.always(),
1533 1533 subrepos=[],
1534 1534 ignored=False,
1535 1535 clean=False,
1536 1536 unknown=False,
1537 1537 )
1538 1538
1539 1539 # Call into the normal remove code, but the removing of the standin, we want
1540 1540 # to have handled by original addremove. Monkey patching here makes sure
1541 1541 # we don't remove the standin in the largefiles code, preventing a very
1542 1542 # confused state later.
1543 1543 if s.deleted:
1544 1544 m = copy.copy(matcher)
1545 1545
1546 1546 # The m._files and m._map attributes are not changed to the deleted list
1547 1547 # because that affects the m.exact() test, which in turn governs whether
1548 1548 # or not the file name is printed, and how. Simply limit the original
1549 1549 # matches to those in the deleted status list.
1550 1550 matchfn = m.matchfn
1551 1551 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1552 1552
1553 1553 removelargefiles(
1554 1554 repo.ui,
1555 1555 repo,
1556 1556 True,
1557 1557 m,
1558 1558 uipathfn,
1559 1559 opts.get(b'dry_run'),
1560 1560 **pycompat.strkwargs(opts)
1561 1561 )
1562 1562 # Call into the normal add code, and any files that *should* be added as
1563 1563 # largefiles will be
1564 1564 added, bad = addlargefiles(
1565 1565 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1566 1566 )
1567 1567 # Now that we've handled largefiles, hand off to the original addremove
1568 1568 # function to take care of the rest. Make sure it doesn't do anything with
1569 1569 # largefiles by passing a matcher that will ignore them.
1570 1570 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1571 1571 return orig(repo, matcher, prefix, uipathfn, opts)
1572 1572
1573 1573
1574 1574 # Calling purge with --all will cause the largefiles to be deleted.
1575 1575 # Override repo.status to prevent this from happening.
1576 1576 @eh.wrapcommand(b'purge')
1577 1577 def overridepurge(orig, ui, repo, *dirs, **opts):
1578 1578 # XXX Monkey patching a repoview will not work. The assigned attribute will
1579 1579 # be set on the unfiltered repo, but we will only lookup attributes in the
1580 1580 # unfiltered repo if the lookup in the repoview object itself fails. As the
1581 1581 # monkey patched method exists on the repoview class the lookup will not
1582 1582 # fail. As a result, the original version will shadow the monkey patched
1583 1583 # one, defeating the monkey patch.
1584 1584 #
1585 1585 # As a work around we use an unfiltered repo here. We should do something
1586 1586 # cleaner instead.
1587 1587 repo = repo.unfiltered()
1588 1588 oldstatus = repo.status
1589 1589
1590 1590 def overridestatus(
1591 1591 node1=b'.',
1592 1592 node2=None,
1593 1593 match=None,
1594 1594 ignored=False,
1595 1595 clean=False,
1596 1596 unknown=False,
1597 1597 listsubrepos=False,
1598 1598 ):
1599 1599 r = oldstatus(
1600 1600 node1, node2, match, ignored, clean, unknown, listsubrepos
1601 1601 )
1602 1602 lfdirstate = lfutil.openlfdirstate(ui, repo)
1603 1603 unknown = [
1604 1604 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1605 1605 ]
1606 1606 ignored = [
1607 1607 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1608 1608 ]
1609 1609 return scmutil.status(
1610 1610 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1611 1611 )
1612 1612
1613 1613 repo.status = overridestatus
1614 1614 orig(ui, repo, *dirs, **opts)
1615 1615 repo.status = oldstatus
1616 1616
1617 1617
1618 1618 @eh.wrapcommand(b'rollback')
1619 1619 def overriderollback(orig, ui, repo, **opts):
1620 1620 with repo.wlock():
1621 1621 before = repo.dirstate.parents()
1622 1622 orphans = {
1623 1623 f
1624 1624 for f in repo.dirstate
1625 1625 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1626 1626 }
1627 1627 result = orig(ui, repo, **opts)
1628 1628 after = repo.dirstate.parents()
1629 1629 if before == after:
1630 1630 return result # no need to restore standins
1631 1631
1632 1632 pctx = repo[b'.']
1633 1633 for f in repo.dirstate:
1634 1634 if lfutil.isstandin(f):
1635 1635 orphans.discard(f)
1636 1636 if repo.dirstate.get_entry(f).removed:
1637 1637 repo.wvfs.unlinkpath(f, ignoremissing=True)
1638 1638 elif f in pctx:
1639 1639 fctx = pctx[f]
1640 1640 repo.wwrite(f, fctx.data(), fctx.flags())
1641 1641 else:
1642 1642 # content of standin is not so important in 'a',
1643 1643 # 'm' or 'n' (coming from the 2nd parent) cases
1644 1644 lfutil.writestandin(repo, f, b'', False)
1645 1645 for standin in orphans:
1646 1646 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1647 1647
1648 1648 return result
1649 1649
1650 1650
1651 1651 @eh.wrapcommand(b'transplant', extension=b'transplant')
1652 1652 def overridetransplant(orig, ui, repo, *revs, **opts):
1653 1653 resuming = opts.get('continue')
1654 1654 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1655 1655 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1656 1656 try:
1657 1657 result = orig(ui, repo, *revs, **opts)
1658 1658 finally:
1659 1659 repo._lfstatuswriters.pop()
1660 1660 repo._lfcommithooks.pop()
1661 1661 return result
1662 1662
1663 1663
1664 1664 @eh.wrapcommand(b'cat')
1665 1665 def overridecat(orig, ui, repo, file1, *pats, **opts):
1666 1666 opts = pycompat.byteskwargs(opts)
1667 1667 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1668 1668 err = 1
1669 1669 notbad = set()
1670 1670 m = scmutil.match(ctx, (file1,) + pats, opts)
1671 1671 origmatchfn = m.matchfn
1672 1672
1673 1673 def lfmatchfn(f):
1674 1674 if origmatchfn(f):
1675 1675 return True
1676 1676 lf = lfutil.splitstandin(f)
1677 1677 if lf is None:
1678 1678 return False
1679 1679 notbad.add(lf)
1680 1680 return origmatchfn(lf)
1681 1681
1682 1682 m.matchfn = lfmatchfn
1683 1683 origbadfn = m.bad
1684 1684
1685 1685 def lfbadfn(f, msg):
1686 1686 if not f in notbad:
1687 1687 origbadfn(f, msg)
1688 1688
1689 1689 m.bad = lfbadfn
1690 1690
1691 1691 origvisitdirfn = m.visitdir
1692 1692
1693 1693 def lfvisitdirfn(dir):
1694 1694 if dir == lfutil.shortname:
1695 1695 return True
1696 1696 ret = origvisitdirfn(dir)
1697 1697 if ret:
1698 1698 return ret
1699 1699 lf = lfutil.splitstandin(dir)
1700 1700 if lf is None:
1701 1701 return False
1702 1702 return origvisitdirfn(lf)
1703 1703
1704 1704 m.visitdir = lfvisitdirfn
1705 1705
1706 1706 for f in ctx.walk(m):
1707 1707 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1708 1708 lf = lfutil.splitstandin(f)
1709 1709 if lf is None or origmatchfn(f):
1710 1710 # duplicating unreachable code from commands.cat
1711 1711 data = ctx[f].data()
1712 1712 if opts.get(b'decode'):
1713 1713 data = repo.wwritedata(f, data)
1714 1714 fp.write(data)
1715 1715 else:
1716 1716 hash = lfutil.readasstandin(ctx[f])
1717 1717 if not lfutil.inusercache(repo.ui, hash):
1718 1718 store = storefactory.openstore(repo)
1719 1719 success, missing = store.get([(lf, hash)])
1720 1720 if len(success) != 1:
1721 1721 raise error.Abort(
1722 1722 _(
1723 1723 b'largefile %s is not in cache and could not be '
1724 1724 b'downloaded'
1725 1725 )
1726 1726 % lf
1727 1727 )
1728 1728 path = lfutil.usercachepath(repo.ui, hash)
1729 1729 with open(path, b"rb") as fpin:
1730 1730 for chunk in util.filechunkiter(fpin):
1731 1731 fp.write(chunk)
1732 1732 err = 0
1733 1733 return err
1734 1734
1735 1735
1736 1736 @eh.wrapfunction(merge, b'_update')
1737 1737 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1738 1738 matcher = kwargs.get('matcher', None)
1739 1739 # note if this is a partial update
1740 1740 partial = matcher and not matcher.always()
1741 1741 with repo.wlock():
1742 1742 # branch | | |
1743 1743 # merge | force | partial | action
1744 1744 # -------+-------+---------+--------------
1745 1745 # x | x | x | linear-merge
1746 1746 # o | x | x | branch-merge
1747 1747 # x | o | x | overwrite (as clean update)
1748 1748 # o | o | x | force-branch-merge (*1)
1749 1749 # x | x | o | (*)
1750 1750 # o | x | o | (*)
1751 1751 # x | o | o | overwrite (as revert)
1752 1752 # o | o | o | (*)
1753 1753 #
1754 1754 # (*) don't care
1755 1755 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1756 1756
1757 1757 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1758 1758 unsure, s, mtime_boundary = lfdirstate.status(
1759 1759 matchmod.always(),
1760 1760 subrepos=[],
1761 1761 ignored=False,
1762 1762 clean=True,
1763 1763 unknown=False,
1764 1764 )
1765 1765 oldclean = set(s.clean)
1766 1766 pctx = repo[b'.']
1767 1767 dctx = repo[node]
1768 1768 for lfile in unsure + s.modified:
1769 1769 lfileabs = repo.wvfs.join(lfile)
1770 1770 if not repo.wvfs.exists(lfileabs):
1771 1771 continue
1772 1772 lfhash = lfutil.hashfile(lfileabs)
1773 1773 standin = lfutil.standin(lfile)
1774 1774 lfutil.writestandin(
1775 1775 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1776 1776 )
1777 1777 if standin in pctx and lfhash == lfutil.readasstandin(
1778 1778 pctx[standin]
1779 1779 ):
1780 1780 oldclean.add(lfile)
1781 1781 for lfile in s.added:
1782 1782 fstandin = lfutil.standin(lfile)
1783 1783 if fstandin not in dctx:
1784 1784 # in this case, content of standin file is meaningless
1785 1785 # (in dctx, lfile is unknown, or normal file)
1786 1786 continue
1787 1787 lfutil.updatestandin(repo, lfile, fstandin)
1788 1788 # mark all clean largefiles as dirty, just in case the update gets
1789 1789 # interrupted before largefiles and lfdirstate are synchronized
1790 1790 for lfile in oldclean:
1791 1791 lfdirstate.set_possibly_dirty(lfile)
1792 1792 lfdirstate.write(repo.currenttransaction())
1793 1793
1794 1794 oldstandins = lfutil.getstandinsstate(repo)
1795 1795 wc = kwargs.get('wc')
1796 1796 if wc and wc.isinmemory():
1797 1797 # largefiles is not a good candidate for in-memory merge (large
1798 1798 # files, custom dirstate, matcher usage).
1799 1799 raise error.ProgrammingError(
1800 1800 b'largefiles is not compatible with in-memory merge'
1801 1801 )
1802 1802 with lfdirstate.parentchange():
1803 1803 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1804 1804
1805 1805 newstandins = lfutil.getstandinsstate(repo)
1806 1806 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1807 1807
1808 1808 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1809 1809 # all the ones that didn't change as clean
1810 1810 for lfile in oldclean.difference(filelist):
1811 1811 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1812 1812 lfdirstate.write(repo.currenttransaction())
1813 1813
1814 1814 if branchmerge or force or partial:
1815 1815 filelist.extend(s.deleted + s.removed)
1816 1816
1817 1817 lfcommands.updatelfiles(
1818 1818 repo.ui, repo, filelist=filelist, normallookup=partial
1819 1819 )
1820 1820
1821 1821 return result
1822 1822
1823 1823
1824 1824 @eh.wrapfunction(scmutil, b'marktouched')
1825 1825 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1826 1826 result = orig(repo, files, *args, **kwargs)
1827 1827
1828 1828 filelist = []
1829 1829 for f in files:
1830 1830 lf = lfutil.splitstandin(f)
1831 1831 if lf is not None:
1832 1832 filelist.append(lf)
1833 1833 if filelist:
1834 1834 lfcommands.updatelfiles(
1835 1835 repo.ui,
1836 1836 repo,
1837 1837 filelist=filelist,
1838 1838 printmessage=False,
1839 1839 normallookup=True,
1840 1840 )
1841 1841
1842 1842 return result
1843 1843
1844 1844
1845 1845 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1846 1846 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1847 1847 def upgraderequirements(orig, repo):
1848 1848 reqs = orig(repo)
1849 1849 if b'largefiles' in repo.requirements:
1850 1850 reqs.add(b'largefiles')
1851 1851 return reqs
1852 1852
1853 1853
1854 1854 _lfscheme = b'largefile://'
1855 1855
1856 1856
1857 1857 @eh.wrapfunction(urlmod, b'open')
1858 1858 def openlargefile(orig, ui, url_, data=None, **kwargs):
1859 1859 if url_.startswith(_lfscheme):
1860 1860 if data:
1861 1861 msg = b"cannot use data on a 'largefile://' url"
1862 1862 raise error.ProgrammingError(msg)
1863 1863 lfid = url_[len(_lfscheme) :]
1864 1864 return storefactory.getlfile(ui, lfid)
1865 1865 else:
1866 1866 return orig(ui, url_, data=data, **kwargs)
@@ -1,2490 +1,2493 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from .node import nullrev
16 16 from .thirdparty import attr
17 17 from .utils import stringutil
18 18 from .dirstateutils import timestamp
19 19 from . import (
20 20 copies,
21 21 encoding,
22 22 error,
23 23 filemerge,
24 24 match as matchmod,
25 25 mergestate as mergestatemod,
26 26 obsutil,
27 27 pathutil,
28 28 pycompat,
29 29 scmutil,
30 30 subrepoutil,
31 31 util,
32 32 worker,
33 33 )
34 34
35 35 _pack = struct.pack
36 36 _unpack = struct.unpack
37 37
38 38
39 39 def _getcheckunknownconfig(repo, section, name):
40 40 config = repo.ui.config(section, name)
41 41 valid = [b'abort', b'ignore', b'warn']
42 42 if config not in valid:
43 43 validstr = b', '.join([b"'" + v + b"'" for v in valid])
44 44 msg = _(b"%s.%s not valid ('%s' is none of %s)")
45 45 msg %= (section, name, config, validstr)
46 46 raise error.ConfigError(msg)
47 47 return config
48 48
49 49
50 50 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
51 51 if wctx.isinmemory():
52 52 # Nothing to do in IMM because nothing in the "working copy" can be an
53 53 # unknown file.
54 54 #
55 55 # Note that we should bail out here, not in ``_checkunknownfiles()``,
56 56 # because that function does other useful work.
57 57 return False
58 58
59 59 if f2 is None:
60 60 f2 = f
61 61 return (
62 62 repo.wvfs.audit.check(f)
63 63 and repo.wvfs.isfileorlink(f)
64 64 and repo.dirstate.normalize(f) not in repo.dirstate
65 65 and mctx[f2].cmp(wctx[f])
66 66 )
67 67
68 68
69 69 class _unknowndirschecker(object):
70 70 """
71 71 Look for any unknown files or directories that may have a path conflict
72 72 with a file. If any path prefix of the file exists as a file or link,
73 73 then it conflicts. If the file itself is a directory that contains any
74 74 file that is not tracked, then it conflicts.
75 75
76 76 Returns the shortest path at which a conflict occurs, or None if there is
77 77 no conflict.
78 78 """
79 79
80 80 def __init__(self):
81 81 # A set of paths known to be good. This prevents repeated checking of
82 82 # dirs. It will be updated with any new dirs that are checked and found
83 83 # to be safe.
84 84 self._unknowndircache = set()
85 85
86 86 # A set of paths that are known to be absent. This prevents repeated
87 87 # checking of subdirectories that are known not to exist. It will be
88 88 # updated with any new dirs that are checked and found to be absent.
89 89 self._missingdircache = set()
90 90
91 91 def __call__(self, repo, wctx, f):
92 92 if wctx.isinmemory():
93 93 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
94 94 return False
95 95
96 96 # Check for path prefixes that exist as unknown files.
97 97 for p in reversed(list(pathutil.finddirs(f))):
98 98 if p in self._missingdircache:
99 99 return
100 100 if p in self._unknowndircache:
101 101 continue
102 102 if repo.wvfs.audit.check(p):
103 103 if (
104 104 repo.wvfs.isfileorlink(p)
105 105 and repo.dirstate.normalize(p) not in repo.dirstate
106 106 ):
107 107 return p
108 108 if not repo.wvfs.lexists(p):
109 109 self._missingdircache.add(p)
110 110 return
111 111 self._unknowndircache.add(p)
112 112
113 113 # Check if the file conflicts with a directory containing unknown files.
114 114 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
115 115 # Does the directory contain any files that are not in the dirstate?
116 116 for p, dirs, files in repo.wvfs.walk(f):
117 117 for fn in files:
118 118 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
119 119 relf = repo.dirstate.normalize(relf, isknown=True)
120 120 if relf not in repo.dirstate:
121 121 return f
122 122 return None
123 123
124 124
125 125 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
126 126 """
127 127 Considers any actions that care about the presence of conflicting unknown
128 128 files. For some actions, the result is to abort; for others, it is to
129 129 choose a different action.
130 130 """
131 131 fileconflicts = set()
132 132 pathconflicts = set()
133 133 warnconflicts = set()
134 134 abortconflicts = set()
135 135 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
136 136 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
137 137 pathconfig = repo.ui.configbool(
138 138 b'experimental', b'merge.checkpathconflicts'
139 139 )
140 140 if not force:
141 141
142 142 def collectconflicts(conflicts, config):
143 143 if config == b'abort':
144 144 abortconflicts.update(conflicts)
145 145 elif config == b'warn':
146 146 warnconflicts.update(conflicts)
147 147
148 148 checkunknowndirs = _unknowndirschecker()
149 149 for f in mresult.files(
150 150 (
151 151 mergestatemod.ACTION_CREATED,
152 152 mergestatemod.ACTION_DELETED_CHANGED,
153 153 )
154 154 ):
155 155 if _checkunknownfile(repo, wctx, mctx, f):
156 156 fileconflicts.add(f)
157 157 elif pathconfig and f not in wctx:
158 158 path = checkunknowndirs(repo, wctx, f)
159 159 if path is not None:
160 160 pathconflicts.add(path)
161 161 for f, args, msg in mresult.getactions(
162 162 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
163 163 ):
164 164 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
165 165 fileconflicts.add(f)
166 166
167 167 allconflicts = fileconflicts | pathconflicts
168 168 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
169 169 unknownconflicts = allconflicts - ignoredconflicts
170 170 collectconflicts(ignoredconflicts, ignoredconfig)
171 171 collectconflicts(unknownconflicts, unknownconfig)
172 172 else:
173 173 for f, args, msg in list(
174 174 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
175 175 ):
176 176 fl2, anc = args
177 177 different = _checkunknownfile(repo, wctx, mctx, f)
178 178 if repo.dirstate._ignore(f):
179 179 config = ignoredconfig
180 180 else:
181 181 config = unknownconfig
182 182
183 183 # The behavior when force is True is described by this table:
184 184 # config different mergeforce | action backup
185 185 # * n * | get n
186 186 # * y y | merge -
187 187 # abort y n | merge - (1)
188 188 # warn y n | warn + get y
189 189 # ignore y n | get y
190 190 #
191 191 # (1) this is probably the wrong behavior here -- we should
192 192 # probably abort, but some actions like rebases currently
193 193 # don't like an abort happening in the middle of
194 194 # merge.update.
195 195 if not different:
196 196 mresult.addfile(
197 197 f,
198 198 mergestatemod.ACTION_GET,
199 199 (fl2, False),
200 200 b'remote created',
201 201 )
202 202 elif mergeforce or config == b'abort':
203 203 mresult.addfile(
204 204 f,
205 205 mergestatemod.ACTION_MERGE,
206 206 (f, f, None, False, anc),
207 207 b'remote differs from untracked local',
208 208 )
209 209 elif config == b'abort':
210 210 abortconflicts.add(f)
211 211 else:
212 212 if config == b'warn':
213 213 warnconflicts.add(f)
214 214 mresult.addfile(
215 215 f,
216 216 mergestatemod.ACTION_GET,
217 217 (fl2, True),
218 218 b'remote created',
219 219 )
220 220
221 221 for f in sorted(abortconflicts):
222 222 warn = repo.ui.warn
223 223 if f in pathconflicts:
224 224 if repo.wvfs.isfileorlink(f):
225 225 warn(_(b"%s: untracked file conflicts with directory\n") % f)
226 226 else:
227 227 warn(_(b"%s: untracked directory conflicts with file\n") % f)
228 228 else:
229 229 warn(_(b"%s: untracked file differs\n") % f)
230 230 if abortconflicts:
231 231 raise error.StateError(
232 232 _(
233 233 b"untracked files in working directory "
234 234 b"differ from files in requested revision"
235 235 )
236 236 )
237 237
238 238 for f in sorted(warnconflicts):
239 239 if repo.wvfs.isfileorlink(f):
240 240 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
241 241 else:
242 242 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
243 243
244 244 for f, args, msg in list(
245 245 mresult.getactions([mergestatemod.ACTION_CREATED])
246 246 ):
247 247 backup = (
248 248 f in fileconflicts
249 249 or f in pathconflicts
250 250 or any(p in pathconflicts for p in pathutil.finddirs(f))
251 251 )
252 252 (flags,) = args
253 253 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
254 254
255 255
256 256 def _forgetremoved(wctx, mctx, branchmerge, mresult):
257 257 """
258 258 Forget removed files
259 259
260 260 If we're jumping between revisions (as opposed to merging), and if
261 261 neither the working directory nor the target rev has the file,
262 262 then we need to remove it from the dirstate, to prevent the
263 263 dirstate from listing the file when it is no longer in the
264 264 manifest.
265 265
266 266 If we're merging, and the other revision has removed a file
267 267 that is not present in the working directory, we need to mark it
268 268 as removed.
269 269 """
270 270
271 271 m = mergestatemod.ACTION_FORGET
272 272 if branchmerge:
273 273 m = mergestatemod.ACTION_REMOVE
274 274 for f in wctx.deleted():
275 275 if f not in mctx:
276 276 mresult.addfile(f, m, None, b"forget deleted")
277 277
278 278 if not branchmerge:
279 279 for f in wctx.removed():
280 280 if f not in mctx:
281 281 mresult.addfile(
282 282 f,
283 283 mergestatemod.ACTION_FORGET,
284 284 None,
285 285 b"forget removed",
286 286 )
287 287
288 288
289 289 def _checkcollision(repo, wmf, mresult):
290 290 """
291 291 Check for case-folding collisions.
292 292 """
293 293 # If the repo is narrowed, filter out files outside the narrowspec.
294 294 narrowmatch = repo.narrowmatch()
295 295 if not narrowmatch.always():
296 296 pmmf = set(wmf.walk(narrowmatch))
297 297 if mresult:
298 298 for f in list(mresult.files()):
299 299 if not narrowmatch(f):
300 300 mresult.removefile(f)
301 301 else:
302 302 # build provisional merged manifest up
303 303 pmmf = set(wmf)
304 304
305 305 if mresult:
306 306 # KEEP and EXEC are no-op
307 307 for f in mresult.files(
308 308 (
309 309 mergestatemod.ACTION_ADD,
310 310 mergestatemod.ACTION_ADD_MODIFIED,
311 311 mergestatemod.ACTION_FORGET,
312 312 mergestatemod.ACTION_GET,
313 313 mergestatemod.ACTION_CHANGED_DELETED,
314 314 mergestatemod.ACTION_DELETED_CHANGED,
315 315 )
316 316 ):
317 317 pmmf.add(f)
318 318 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
319 319 pmmf.discard(f)
320 320 for f, args, msg in mresult.getactions(
321 321 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
322 322 ):
323 323 f2, flags = args
324 324 pmmf.discard(f2)
325 325 pmmf.add(f)
326 326 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
327 327 pmmf.add(f)
328 328 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
329 329 f1, f2, fa, move, anc = args
330 330 if move:
331 331 pmmf.discard(f1)
332 332 pmmf.add(f)
333 333
334 334 # check case-folding collision in provisional merged manifest
335 335 foldmap = {}
336 336 for f in pmmf:
337 337 fold = util.normcase(f)
338 338 if fold in foldmap:
339 339 msg = _(b"case-folding collision between %s and %s")
340 340 msg %= (f, foldmap[fold])
341 341 raise error.StateError(msg)
342 342 foldmap[fold] = f
343 343
344 344 # check case-folding of directories
345 345 foldprefix = unfoldprefix = lastfull = b''
346 346 for fold, f in sorted(foldmap.items()):
347 347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 348 # the folded prefix matches but actual casing is different
349 349 msg = _(b"case-folding collision between %s and directory of %s")
350 350 msg %= (lastfull, f)
351 351 raise error.StateError(msg)
352 352 foldprefix = fold + b'/'
353 353 unfoldprefix = f + b'/'
354 354 lastfull = f
355 355
356 356
357 357 def _filesindirs(repo, manifest, dirs):
358 358 """
359 359 Generator that yields pairs of all the files in the manifest that are found
360 360 inside the directories listed in dirs, and which directory they are found
361 361 in.
362 362 """
363 363 for f in manifest:
364 364 for p in pathutil.finddirs(f):
365 365 if p in dirs:
366 366 yield f, p
367 367 break
368 368
369 369
370 370 def checkpathconflicts(repo, wctx, mctx, mresult):
371 371 """
372 372 Check if any actions introduce path conflicts in the repository, updating
373 373 actions to record or handle the path conflict accordingly.
374 374 """
375 375 mf = wctx.manifest()
376 376
377 377 # The set of local files that conflict with a remote directory.
378 378 localconflicts = set()
379 379
380 380 # The set of directories that conflict with a remote file, and so may cause
381 381 # conflicts if they still contain any files after the merge.
382 382 remoteconflicts = set()
383 383
384 384 # The set of directories that appear as both a file and a directory in the
385 385 # remote manifest. These indicate an invalid remote manifest, which
386 386 # can't be updated to cleanly.
387 387 invalidconflicts = set()
388 388
389 389 # The set of directories that contain files that are being created.
390 390 createdfiledirs = set()
391 391
392 392 # The set of files deleted by all the actions.
393 393 deletedfiles = set()
394 394
395 395 for f in mresult.files(
396 396 (
397 397 mergestatemod.ACTION_CREATED,
398 398 mergestatemod.ACTION_DELETED_CHANGED,
399 399 mergestatemod.ACTION_MERGE,
400 400 mergestatemod.ACTION_CREATED_MERGE,
401 401 )
402 402 ):
403 403 # This action may create a new local file.
404 404 createdfiledirs.update(pathutil.finddirs(f))
405 405 if mf.hasdir(f):
406 406 # The file aliases a local directory. This might be ok if all
407 407 # the files in the local directory are being deleted. This
408 408 # will be checked once we know what all the deleted files are.
409 409 remoteconflicts.add(f)
410 410 # Track the names of all deleted files.
411 411 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
412 412 deletedfiles.add(f)
413 413 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
414 414 f1, f2, fa, move, anc = args
415 415 if move:
416 416 deletedfiles.add(f1)
417 417 for (f, args, msg) in mresult.getactions(
418 418 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
419 419 ):
420 420 f2, flags = args
421 421 deletedfiles.add(f2)
422 422
423 423 # Check all directories that contain created files for path conflicts.
424 424 for p in createdfiledirs:
425 425 if p in mf:
426 426 if p in mctx:
427 427 # A file is in a directory which aliases both a local
428 428 # and a remote file. This is an internal inconsistency
429 429 # within the remote manifest.
430 430 invalidconflicts.add(p)
431 431 else:
432 432 # A file is in a directory which aliases a local file.
433 433 # We will need to rename the local file.
434 434 localconflicts.add(p)
435 435 pd = mresult.getfile(p)
436 436 if pd and pd[0] in (
437 437 mergestatemod.ACTION_CREATED,
438 438 mergestatemod.ACTION_DELETED_CHANGED,
439 439 mergestatemod.ACTION_MERGE,
440 440 mergestatemod.ACTION_CREATED_MERGE,
441 441 ):
442 442 # The file is in a directory which aliases a remote file.
443 443 # This is an internal inconsistency within the remote
444 444 # manifest.
445 445 invalidconflicts.add(p)
446 446
447 447 # Rename all local conflicting files that have not been deleted.
448 448 for p in localconflicts:
449 449 if p not in deletedfiles:
450 450 ctxname = bytes(wctx).rstrip(b'+')
451 451 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
452 452 porig = wctx[p].copysource() or p
453 453 mresult.addfile(
454 454 pnew,
455 455 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
456 456 (p, porig),
457 457 b'local path conflict',
458 458 )
459 459 mresult.addfile(
460 460 p,
461 461 mergestatemod.ACTION_PATH_CONFLICT,
462 462 (pnew, b'l'),
463 463 b'path conflict',
464 464 )
465 465
466 466 if remoteconflicts:
467 467 # Check if all files in the conflicting directories have been removed.
468 468 ctxname = bytes(mctx).rstrip(b'+')
469 469 for f, p in _filesindirs(repo, mf, remoteconflicts):
470 470 if f not in deletedfiles:
471 471 m, args, msg = mresult.getfile(p)
472 472 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
473 473 if m in (
474 474 mergestatemod.ACTION_DELETED_CHANGED,
475 475 mergestatemod.ACTION_MERGE,
476 476 ):
477 477 # Action was merge, just update target.
478 478 mresult.addfile(pnew, m, args, msg)
479 479 else:
480 480 # Action was create, change to renamed get action.
481 481 fl = args[0]
482 482 mresult.addfile(
483 483 pnew,
484 484 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
485 485 (p, fl),
486 486 b'remote path conflict',
487 487 )
488 488 mresult.addfile(
489 489 p,
490 490 mergestatemod.ACTION_PATH_CONFLICT,
491 491 (pnew, b'r'),
492 492 b'path conflict',
493 493 )
494 494 remoteconflicts.remove(p)
495 495 break
496 496
497 497 if invalidconflicts:
498 498 for p in invalidconflicts:
499 499 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
500 500 raise error.StateError(
501 501 _(b"destination manifest contains path conflicts")
502 502 )
503 503
504 504
505 505 def _filternarrowactions(narrowmatch, branchmerge, mresult):
506 506 """
507 507 Filters out actions that can ignored because the repo is narrowed.
508 508
509 509 Raise an exception if the merge cannot be completed because the repo is
510 510 narrowed.
511 511 """
512 512 # TODO: handle with nonconflicttypes
513 513 nonconflicttypes = {
514 514 mergestatemod.ACTION_ADD,
515 515 mergestatemod.ACTION_ADD_MODIFIED,
516 516 mergestatemod.ACTION_CREATED,
517 517 mergestatemod.ACTION_CREATED_MERGE,
518 518 mergestatemod.ACTION_FORGET,
519 519 mergestatemod.ACTION_GET,
520 520 mergestatemod.ACTION_REMOVE,
521 521 mergestatemod.ACTION_EXEC,
522 522 }
523 523 # We mutate the items in the dict during iteration, so iterate
524 524 # over a copy.
525 525 for f, action in mresult.filemap():
526 526 if narrowmatch(f):
527 527 pass
528 528 elif not branchmerge:
529 529 mresult.removefile(f) # just updating, ignore changes outside clone
530 530 elif action[0] in mergestatemod.NO_OP_ACTIONS:
531 531 mresult.removefile(f) # merge does not affect file
532 532 elif action[0] in nonconflicttypes:
533 533 msg = _(
534 534 b'merge affects file \'%s\' outside narrow, '
535 535 b'which is not yet supported'
536 536 )
537 537 hint = _(b'merging in the other direction may work')
538 538 raise error.Abort(msg % f, hint=hint)
539 539 else:
540 540 msg = _(b'conflict in file \'%s\' is outside narrow clone')
541 541 raise error.StateError(msg % f)
542 542
543 543
544 544 class mergeresult(object):
545 545 """An object representing result of merging manifests.
546 546
547 547 It has information about what actions need to be performed on dirstate
548 548 mapping of divergent renames and other such cases."""
549 549
550 550 def __init__(self):
551 551 """
552 552 filemapping: dict of filename as keys and action related info as values
553 553 diverge: mapping of source name -> list of dest name for
554 554 divergent renames
555 555 renamedelete: mapping of source name -> list of destinations for files
556 556 deleted on one side and renamed on other.
557 557 commitinfo: dict containing data which should be used on commit
558 558 contains a filename -> info mapping
559 559 actionmapping: dict of action names as keys and values are dict of
560 560 filename as key and related data as values
561 561 """
562 562 self._filemapping = {}
563 563 self._diverge = {}
564 564 self._renamedelete = {}
565 565 self._commitinfo = collections.defaultdict(dict)
566 566 self._actionmapping = collections.defaultdict(dict)
567 567
568 568 def updatevalues(self, diverge, renamedelete):
569 569 self._diverge = diverge
570 570 self._renamedelete = renamedelete
571 571
572 572 def addfile(self, filename, action, data, message):
573 573 """adds a new file to the mergeresult object
574 574
575 575 filename: file which we are adding
576 576 action: one of mergestatemod.ACTION_*
577 577 data: a tuple of information like fctx and ctx related to this merge
578 578 message: a message about the merge
579 579 """
580 580 # if the file already existed, we need to delete it's old
581 581 # entry form _actionmapping too
582 582 if filename in self._filemapping:
583 583 a, d, m = self._filemapping[filename]
584 584 del self._actionmapping[a][filename]
585 585
586 586 self._filemapping[filename] = (action, data, message)
587 587 self._actionmapping[action][filename] = (data, message)
588 588
589 589 def getfile(self, filename, default_return=None):
590 590 """returns (action, args, msg) about this file
591 591
592 592 returns default_return if the file is not present"""
593 593 if filename in self._filemapping:
594 594 return self._filemapping[filename]
595 595 return default_return
596 596
597 597 def files(self, actions=None):
598 598 """returns files on which provided action needs to perfromed
599 599
600 600 If actions is None, all files are returned
601 601 """
602 602 # TODO: think whether we should return renamedelete and
603 603 # diverge filenames also
604 604 if actions is None:
605 605 for f in self._filemapping:
606 606 yield f
607 607
608 608 else:
609 609 for a in actions:
610 610 for f in self._actionmapping[a]:
611 611 yield f
612 612
613 613 def removefile(self, filename):
614 614 """removes a file from the mergeresult object as the file might
615 615 not merging anymore"""
616 616 action, data, message = self._filemapping[filename]
617 617 del self._filemapping[filename]
618 618 del self._actionmapping[action][filename]
619 619
620 620 def getactions(self, actions, sort=False):
621 621 """get list of files which are marked with these actions
622 622 if sort is true, files for each action is sorted and then added
623 623
624 624 Returns a list of tuple of form (filename, data, message)
625 625 """
626 626 for a in actions:
627 627 if sort:
628 628 for f in sorted(self._actionmapping[a]):
629 629 args, msg = self._actionmapping[a][f]
630 630 yield f, args, msg
631 631 else:
632 632 for f, (args, msg) in pycompat.iteritems(
633 633 self._actionmapping[a]
634 634 ):
635 635 yield f, args, msg
636 636
637 637 def len(self, actions=None):
638 638 """returns number of files which needs actions
639 639
640 640 if actions is passed, total of number of files in that action
641 641 only is returned"""
642 642
643 643 if actions is None:
644 644 return len(self._filemapping)
645 645
646 646 return sum(len(self._actionmapping[a]) for a in actions)
647 647
648 648 def filemap(self, sort=False):
649 649 if sorted:
650 650 for key, val in sorted(pycompat.iteritems(self._filemapping)):
651 651 yield key, val
652 652 else:
653 653 for key, val in pycompat.iteritems(self._filemapping):
654 654 yield key, val
655 655
656 656 def addcommitinfo(self, filename, key, value):
657 657 """adds key-value information about filename which will be required
658 658 while committing this merge"""
659 659 self._commitinfo[filename][key] = value
660 660
661 661 @property
662 662 def diverge(self):
663 663 return self._diverge
664 664
665 665 @property
666 666 def renamedelete(self):
667 667 return self._renamedelete
668 668
669 669 @property
670 670 def commitinfo(self):
671 671 return self._commitinfo
672 672
673 673 @property
674 674 def actionsdict(self):
675 675 """returns a dictionary of actions to be perfomed with action as key
676 676 and a list of files and related arguments as values"""
677 677 res = collections.defaultdict(list)
678 678 for a, d in pycompat.iteritems(self._actionmapping):
679 679 for f, (args, msg) in pycompat.iteritems(d):
680 680 res[a].append((f, args, msg))
681 681 return res
682 682
683 683 def setactions(self, actions):
684 684 self._filemapping = actions
685 685 self._actionmapping = collections.defaultdict(dict)
686 686 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
687 687 self._actionmapping[act][f] = data, msg
688 688
689 689 def hasconflicts(self):
690 690 """tells whether this merge resulted in some actions which can
691 691 result in conflicts or not"""
692 692 for a in self._actionmapping.keys():
693 693 if (
694 694 a
695 695 not in (
696 696 mergestatemod.ACTION_GET,
697 697 mergestatemod.ACTION_EXEC,
698 698 mergestatemod.ACTION_REMOVE,
699 699 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
700 700 )
701 701 and self._actionmapping[a]
702 702 and a not in mergestatemod.NO_OP_ACTIONS
703 703 ):
704 704 return True
705 705
706 706 return False
707 707
708 708
709 709 def manifestmerge(
710 710 repo,
711 711 wctx,
712 712 p2,
713 713 pa,
714 714 branchmerge,
715 715 force,
716 716 matcher,
717 717 acceptremote,
718 718 followcopies,
719 719 forcefulldiff=False,
720 720 ):
721 721 """
722 722 Merge wctx and p2 with ancestor pa and generate merge action list
723 723
724 724 branchmerge and force are as passed in to update
725 725 matcher = matcher to filter file lists
726 726 acceptremote = accept the incoming changes without prompting
727 727
728 728 Returns an object of mergeresult class
729 729 """
730 730 mresult = mergeresult()
731 731 if matcher is not None and matcher.always():
732 732 matcher = None
733 733
734 734 # manifests fetched in order are going to be faster, so prime the caches
735 735 [
736 736 x.manifest()
737 737 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
738 738 ]
739 739
740 740 branch_copies1 = copies.branch_copies()
741 741 branch_copies2 = copies.branch_copies()
742 742 diverge = {}
743 743 # information from merge which is needed at commit time
744 744 # for example choosing filelog of which parent to commit
745 745 # TODO: use specific constants in future for this mapping
746 746 if followcopies:
747 747 branch_copies1, branch_copies2, diverge = copies.mergecopies(
748 748 repo, wctx, p2, pa
749 749 )
750 750
751 751 boolbm = pycompat.bytestr(bool(branchmerge))
752 752 boolf = pycompat.bytestr(bool(force))
753 753 boolm = pycompat.bytestr(bool(matcher))
754 754 repo.ui.note(_(b"resolving manifests\n"))
755 755 repo.ui.debug(
756 756 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
757 757 )
758 758 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
759 759
760 760 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
761 761 copied1 = set(branch_copies1.copy.values())
762 762 copied1.update(branch_copies1.movewithdir.values())
763 763 copied2 = set(branch_copies2.copy.values())
764 764 copied2.update(branch_copies2.movewithdir.values())
765 765
766 766 if b'.hgsubstate' in m1 and wctx.rev() is None:
767 767 # Check whether sub state is modified, and overwrite the manifest
768 768 # to flag the change. If wctx is a committed revision, we shouldn't
769 769 # care for the dirty state of the working directory.
770 770 if any(wctx.sub(s).dirty() for s in wctx.substate):
771 771 m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
772 772
773 773 # Don't use m2-vs-ma optimization if:
774 774 # - ma is the same as m1 or m2, which we're just going to diff again later
775 775 # - The caller specifically asks for a full diff, which is useful during bid
776 776 # merge.
777 777 # - we are tracking salvaged files specifically hence should process all
778 778 # files
779 779 if (
780 780 pa not in ([wctx, p2] + wctx.parents())
781 781 and not forcefulldiff
782 782 and not (
783 783 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
784 784 or repo.filecopiesmode == b'changeset-sidedata'
785 785 )
786 786 ):
787 787 # Identify which files are relevant to the merge, so we can limit the
788 788 # total m1-vs-m2 diff to just those files. This has significant
789 789 # performance benefits in large repositories.
790 790 relevantfiles = set(ma.diff(m2).keys())
791 791
792 792 # For copied and moved files, we need to add the source file too.
793 793 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
794 794 if copyvalue in relevantfiles:
795 795 relevantfiles.add(copykey)
796 796 for movedirkey in branch_copies1.movewithdir:
797 797 relevantfiles.add(movedirkey)
798 798 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
799 799 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
800 800
801 801 diff = m1.diff(m2, match=matcher)
802 802
803 803 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
804 804 if n1 and n2: # file exists on both local and remote side
805 805 if f not in ma:
806 806 # TODO: what if they're renamed from different sources?
807 807 fa = branch_copies1.copy.get(
808 808 f, None
809 809 ) or branch_copies2.copy.get(f, None)
810 810 args, msg = None, None
811 811 if fa is not None:
812 812 args = (f, f, fa, False, pa.node())
813 813 msg = b'both renamed from %s' % fa
814 814 else:
815 815 args = (f, f, None, False, pa.node())
816 816 msg = b'both created'
817 817 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
818 818 elif f in branch_copies1.copy:
819 819 fa = branch_copies1.copy[f]
820 820 mresult.addfile(
821 821 f,
822 822 mergestatemod.ACTION_MERGE,
823 823 (f, fa, fa, False, pa.node()),
824 824 b'local replaced from %s' % fa,
825 825 )
826 826 elif f in branch_copies2.copy:
827 827 fa = branch_copies2.copy[f]
828 828 mresult.addfile(
829 829 f,
830 830 mergestatemod.ACTION_MERGE,
831 831 (fa, f, fa, False, pa.node()),
832 832 b'other replaced from %s' % fa,
833 833 )
834 834 else:
835 835 a = ma[f]
836 836 fla = ma.flags(f)
837 837 nol = b'l' not in fl1 + fl2 + fla
838 838 if n2 == a and fl2 == fla:
839 839 mresult.addfile(
840 840 f,
841 841 mergestatemod.ACTION_KEEP,
842 842 (),
843 843 b'remote unchanged',
844 844 )
845 845 elif n1 == a and fl1 == fla: # local unchanged - use remote
846 846 if n1 == n2: # optimization: keep local content
847 847 mresult.addfile(
848 848 f,
849 849 mergestatemod.ACTION_EXEC,
850 850 (fl2,),
851 851 b'update permissions',
852 852 )
853 853 else:
854 854 mresult.addfile(
855 855 f,
856 856 mergestatemod.ACTION_GET,
857 857 (fl2, False),
858 858 b'remote is newer',
859 859 )
860 860 if branchmerge:
861 861 mresult.addcommitinfo(
862 862 f, b'filenode-source', b'other'
863 863 )
864 864 elif nol and n2 == a: # remote only changed 'x'
865 865 mresult.addfile(
866 866 f,
867 867 mergestatemod.ACTION_EXEC,
868 868 (fl2,),
869 869 b'update permissions',
870 870 )
871 871 elif nol and n1 == a: # local only changed 'x'
872 872 mresult.addfile(
873 873 f,
874 874 mergestatemod.ACTION_GET,
875 875 (fl1, False),
876 876 b'remote is newer',
877 877 )
878 878 if branchmerge:
879 879 mresult.addcommitinfo(f, b'filenode-source', b'other')
880 880 else: # both changed something
881 881 mresult.addfile(
882 882 f,
883 883 mergestatemod.ACTION_MERGE,
884 884 (f, f, f, False, pa.node()),
885 885 b'versions differ',
886 886 )
887 887 elif n1: # file exists only on local side
888 888 if f in copied2:
889 889 pass # we'll deal with it on m2 side
890 890 elif (
891 891 f in branch_copies1.movewithdir
892 892 ): # directory rename, move local
893 893 f2 = branch_copies1.movewithdir[f]
894 894 if f2 in m2:
895 895 mresult.addfile(
896 896 f2,
897 897 mergestatemod.ACTION_MERGE,
898 898 (f, f2, None, True, pa.node()),
899 899 b'remote directory rename, both created',
900 900 )
901 901 else:
902 902 mresult.addfile(
903 903 f2,
904 904 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
905 905 (f, fl1),
906 906 b'remote directory rename - move from %s' % f,
907 907 )
908 908 elif f in branch_copies1.copy:
909 909 f2 = branch_copies1.copy[f]
910 910 mresult.addfile(
911 911 f,
912 912 mergestatemod.ACTION_MERGE,
913 913 (f, f2, f2, False, pa.node()),
914 914 b'local copied/moved from %s' % f2,
915 915 )
916 916 elif f in ma: # clean, a different, no remote
917 917 if n1 != ma[f]:
918 918 if acceptremote:
919 919 mresult.addfile(
920 920 f,
921 921 mergestatemod.ACTION_REMOVE,
922 922 None,
923 923 b'remote delete',
924 924 )
925 925 else:
926 926 mresult.addfile(
927 927 f,
928 928 mergestatemod.ACTION_CHANGED_DELETED,
929 929 (f, None, f, False, pa.node()),
930 930 b'prompt changed/deleted',
931 931 )
932 932 if branchmerge:
933 933 mresult.addcommitinfo(
934 934 f, b'merge-removal-candidate', b'yes'
935 935 )
936 936 elif n1 == repo.nodeconstants.addednodeid:
937 937 # This file was locally added. We should forget it instead of
938 938 # deleting it.
939 939 mresult.addfile(
940 940 f,
941 941 mergestatemod.ACTION_FORGET,
942 942 None,
943 943 b'remote deleted',
944 944 )
945 945 else:
946 946 mresult.addfile(
947 947 f,
948 948 mergestatemod.ACTION_REMOVE,
949 949 None,
950 950 b'other deleted',
951 951 )
952 952 if branchmerge:
953 953 # the file must be absent after merging,
954 954 # howeber the user might make
955 955 # the file reappear using revert and if they does,
956 956 # we force create a new node
957 957 mresult.addcommitinfo(
958 958 f, b'merge-removal-candidate', b'yes'
959 959 )
960 960
961 961 else: # file not in ancestor, not in remote
962 962 mresult.addfile(
963 963 f,
964 964 mergestatemod.ACTION_KEEP_NEW,
965 965 None,
966 966 b'ancestor missing, remote missing',
967 967 )
968 968
969 969 elif n2: # file exists only on remote side
970 970 if f in copied1:
971 971 pass # we'll deal with it on m1 side
972 972 elif f in branch_copies2.movewithdir:
973 973 f2 = branch_copies2.movewithdir[f]
974 974 if f2 in m1:
975 975 mresult.addfile(
976 976 f2,
977 977 mergestatemod.ACTION_MERGE,
978 978 (f2, f, None, False, pa.node()),
979 979 b'local directory rename, both created',
980 980 )
981 981 else:
982 982 mresult.addfile(
983 983 f2,
984 984 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
985 985 (f, fl2),
986 986 b'local directory rename - get from %s' % f,
987 987 )
988 988 elif f in branch_copies2.copy:
989 989 f2 = branch_copies2.copy[f]
990 990 msg, args = None, None
991 991 if f2 in m2:
992 992 args = (f2, f, f2, False, pa.node())
993 993 msg = b'remote copied from %s' % f2
994 994 else:
995 995 args = (f2, f, f2, True, pa.node())
996 996 msg = b'remote moved from %s' % f2
997 997 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
998 998 elif f not in ma:
999 999 # local unknown, remote created: the logic is described by the
1000 1000 # following table:
1001 1001 #
1002 1002 # force branchmerge different | action
1003 1003 # n * * | create
1004 1004 # y n * | create
1005 1005 # y y n | create
1006 1006 # y y y | merge
1007 1007 #
1008 1008 # Checking whether the files are different is expensive, so we
1009 1009 # don't do that when we can avoid it.
1010 1010 if not force:
1011 1011 mresult.addfile(
1012 1012 f,
1013 1013 mergestatemod.ACTION_CREATED,
1014 1014 (fl2,),
1015 1015 b'remote created',
1016 1016 )
1017 1017 elif not branchmerge:
1018 1018 mresult.addfile(
1019 1019 f,
1020 1020 mergestatemod.ACTION_CREATED,
1021 1021 (fl2,),
1022 1022 b'remote created',
1023 1023 )
1024 1024 else:
1025 1025 mresult.addfile(
1026 1026 f,
1027 1027 mergestatemod.ACTION_CREATED_MERGE,
1028 1028 (fl2, pa.node()),
1029 1029 b'remote created, get or merge',
1030 1030 )
1031 1031 elif n2 != ma[f]:
1032 1032 df = None
1033 1033 for d in branch_copies1.dirmove:
1034 1034 if f.startswith(d):
1035 1035 # new file added in a directory that was moved
1036 1036 df = branch_copies1.dirmove[d] + f[len(d) :]
1037 1037 break
1038 1038 if df is not None and df in m1:
1039 1039 mresult.addfile(
1040 1040 df,
1041 1041 mergestatemod.ACTION_MERGE,
1042 1042 (df, f, f, False, pa.node()),
1043 1043 b'local directory rename - respect move '
1044 1044 b'from %s' % f,
1045 1045 )
1046 1046 elif acceptremote:
1047 1047 mresult.addfile(
1048 1048 f,
1049 1049 mergestatemod.ACTION_CREATED,
1050 1050 (fl2,),
1051 1051 b'remote recreating',
1052 1052 )
1053 1053 else:
1054 1054 mresult.addfile(
1055 1055 f,
1056 1056 mergestatemod.ACTION_DELETED_CHANGED,
1057 1057 (None, f, f, False, pa.node()),
1058 1058 b'prompt deleted/changed',
1059 1059 )
1060 1060 if branchmerge:
1061 1061 mresult.addcommitinfo(
1062 1062 f, b'merge-removal-candidate', b'yes'
1063 1063 )
1064 1064 else:
1065 1065 mresult.addfile(
1066 1066 f,
1067 1067 mergestatemod.ACTION_KEEP_ABSENT,
1068 1068 None,
1069 1069 b'local not present, remote unchanged',
1070 1070 )
1071 1071 if branchmerge:
1072 1072 # the file must be absent after merging
1073 1073 # however the user might make
1074 1074 # the file reappear using revert and if they does,
1075 1075 # we force create a new node
1076 1076 mresult.addcommitinfo(f, b'merge-removal-candidate', b'yes')
1077 1077
1078 1078 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1079 1079 # If we are merging, look for path conflicts.
1080 1080 checkpathconflicts(repo, wctx, p2, mresult)
1081 1081
1082 1082 narrowmatch = repo.narrowmatch()
1083 1083 if not narrowmatch.always():
1084 1084 # Updates "actions" in place
1085 1085 _filternarrowactions(narrowmatch, branchmerge, mresult)
1086 1086
1087 1087 renamedelete = branch_copies1.renamedelete
1088 1088 renamedelete.update(branch_copies2.renamedelete)
1089 1089
1090 1090 mresult.updatevalues(diverge, renamedelete)
1091 1091 return mresult
1092 1092
1093 1093
1094 1094 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1095 1095 """Resolves false conflicts where the nodeid changed but the content
1096 1096 remained the same."""
1097 1097 # We force a copy of actions.items() because we're going to mutate
1098 1098 # actions as we resolve trivial conflicts.
1099 1099 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1100 1100 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1101 1101 # local did change but ended up with same content
1102 1102 mresult.addfile(
1103 1103 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1104 1104 )
1105 1105
1106 1106 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1107 1107 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1108 1108 # remote did change but ended up with same content
1109 1109 mresult.removefile(f) # don't get = keep local deleted
1110 1110
1111 1111
1112 1112 def calculateupdates(
1113 1113 repo,
1114 1114 wctx,
1115 1115 mctx,
1116 1116 ancestors,
1117 1117 branchmerge,
1118 1118 force,
1119 1119 acceptremote,
1120 1120 followcopies,
1121 1121 matcher=None,
1122 1122 mergeforce=False,
1123 1123 ):
1124 1124 """
1125 1125 Calculate the actions needed to merge mctx into wctx using ancestors
1126 1126
1127 1127 Uses manifestmerge() to merge manifest and get list of actions required to
1128 1128 perform for merging two manifests. If there are multiple ancestors, uses bid
1129 1129 merge if enabled.
1130 1130
1131 1131 Also filters out actions which are unrequired if repository is sparse.
1132 1132
1133 1133 Returns mergeresult object same as manifestmerge().
1134 1134 """
1135 1135 # Avoid cycle.
1136 1136 from . import sparse
1137 1137
1138 1138 mresult = None
1139 1139 if len(ancestors) == 1: # default
1140 1140 mresult = manifestmerge(
1141 1141 repo,
1142 1142 wctx,
1143 1143 mctx,
1144 1144 ancestors[0],
1145 1145 branchmerge,
1146 1146 force,
1147 1147 matcher,
1148 1148 acceptremote,
1149 1149 followcopies,
1150 1150 )
1151 1151 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1152 1152
1153 1153 else: # only when merge.preferancestor=* - the default
1154 1154 repo.ui.note(
1155 1155 _(b"note: merging %s and %s using bids from ancestors %s\n")
1156 1156 % (
1157 1157 wctx,
1158 1158 mctx,
1159 1159 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1160 1160 )
1161 1161 )
1162 1162
1163 1163 # mapping filename to bids (action method to list af actions)
1164 1164 # {FILENAME1 : BID1, FILENAME2 : BID2}
1165 1165 # BID is another dictionary which contains
1166 1166 # mapping of following form:
1167 1167 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1168 1168 fbids = {}
1169 1169 mresult = mergeresult()
1170 1170 diverge, renamedelete = None, None
1171 1171 for ancestor in ancestors:
1172 1172 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1173 1173 mresult1 = manifestmerge(
1174 1174 repo,
1175 1175 wctx,
1176 1176 mctx,
1177 1177 ancestor,
1178 1178 branchmerge,
1179 1179 force,
1180 1180 matcher,
1181 1181 acceptremote,
1182 1182 followcopies,
1183 1183 forcefulldiff=True,
1184 1184 )
1185 1185 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1186 1186
1187 1187 # Track the shortest set of warning on the theory that bid
1188 1188 # merge will correctly incorporate more information
1189 1189 if diverge is None or len(mresult1.diverge) < len(diverge):
1190 1190 diverge = mresult1.diverge
1191 1191 if renamedelete is None or len(renamedelete) < len(
1192 1192 mresult1.renamedelete
1193 1193 ):
1194 1194 renamedelete = mresult1.renamedelete
1195 1195
1196 1196 # blindly update final mergeresult commitinfo with what we get
1197 1197 # from mergeresult object for each ancestor
1198 1198 # TODO: some commitinfo depends on what bid merge choose and hence
1199 1199 # we will need to make commitinfo also depend on bid merge logic
1200 1200 mresult._commitinfo.update(mresult1._commitinfo)
1201 1201
1202 1202 for f, a in mresult1.filemap(sort=True):
1203 1203 m, args, msg = a
1204 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1204 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m.__bytes__()))
1205 1205 if f in fbids:
1206 1206 d = fbids[f]
1207 1207 if m in d:
1208 1208 d[m].append(a)
1209 1209 else:
1210 1210 d[m] = [a]
1211 1211 else:
1212 1212 fbids[f] = {m: [a]}
1213 1213
1214 1214 # Call for bids
1215 1215 # Pick the best bid for each file
1216 1216 repo.ui.note(
1217 1217 _(b'\nauction for merging merge bids (%d ancestors)\n')
1218 1218 % len(ancestors)
1219 1219 )
1220 1220 for f, bids in sorted(fbids.items()):
1221 1221 if repo.ui.debugflag:
1222 1222 repo.ui.debug(b" list of bids for %s:\n" % f)
1223 1223 for m, l in sorted(bids.items()):
1224 1224 for _f, args, msg in l:
1225 repo.ui.debug(b' %s -> %s\n' % (msg, m))
1225 repo.ui.debug(b' %s -> %s\n' % (msg, m.__bytes__()))
1226 1226 # bids is a mapping from action method to list af actions
1227 1227 # Consensus?
1228 1228 if len(bids) == 1: # all bids are the same kind of method
1229 1229 m, l = list(bids.items())[0]
1230 1230 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1231 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1231 repo.ui.note(
1232 _(b" %s: consensus for %s\n") % (f, m.__bytes__())
1233 )
1232 1234 mresult.addfile(f, *l[0])
1233 1235 continue
1234 1236 # If keep is an option, just do it.
1235 1237 if mergestatemod.ACTION_KEEP in bids:
1236 1238 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1237 1239 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1238 1240 continue
1239 1241 # If keep absent is an option, just do that
1240 1242 if mergestatemod.ACTION_KEEP_ABSENT in bids:
1241 1243 repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f)
1242 1244 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0])
1243 1245 continue
1244 1246 # ACTION_KEEP_NEW and ACTION_CHANGED_DELETED are conflicting actions
1245 1247 # as one say that file is new while other says that file was present
1246 1248 # earlier too and has a change delete conflict
1247 1249 # Let's fall back to conflicting ACTION_CHANGED_DELETED and let user
1248 1250 # do the right thing
1249 1251 if (
1250 1252 mergestatemod.ACTION_CHANGED_DELETED in bids
1251 1253 and mergestatemod.ACTION_KEEP_NEW in bids
1252 1254 ):
1253 1255 repo.ui.note(_(b" %s: picking 'changed/deleted' action\n") % f)
1254 1256 mresult.addfile(
1255 1257 f, *bids[mergestatemod.ACTION_CHANGED_DELETED][0]
1256 1258 )
1257 1259 continue
1258 1260 # If keep new is an option, let's just do that
1259 1261 if mergestatemod.ACTION_KEEP_NEW in bids:
1260 1262 repo.ui.note(_(b" %s: picking 'keep new' action\n") % f)
1261 1263 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_NEW][0])
1262 1264 continue
1263 1265 # ACTION_GET and ACTION_DELETE_CHANGED are conflicting actions as
1264 1266 # one action states the file is newer/created on remote side and
1265 1267 # other states that file is deleted locally and changed on remote
1266 1268 # side. Let's fallback and rely on a conflicting action to let user
1267 1269 # do the right thing
1268 1270 if (
1269 1271 mergestatemod.ACTION_DELETED_CHANGED in bids
1270 1272 and mergestatemod.ACTION_GET in bids
1271 1273 ):
1272 1274 repo.ui.note(_(b" %s: picking 'delete/changed' action\n") % f)
1273 1275 mresult.addfile(
1274 1276 f, *bids[mergestatemod.ACTION_DELETED_CHANGED][0]
1275 1277 )
1276 1278 continue
1277 1279 # If there are gets and they all agree [how could they not?], do it.
1278 1280 if mergestatemod.ACTION_GET in bids:
1279 1281 ga0 = bids[mergestatemod.ACTION_GET][0]
1280 1282 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1281 1283 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1282 1284 mresult.addfile(f, *ga0)
1283 1285 continue
1284 1286 # TODO: Consider other simple actions such as mode changes
1285 1287 # Handle inefficient democrazy.
1286 1288 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1287 1289 for m, l in sorted(bids.items()):
1288 1290 for _f, args, msg in l:
1289 repo.ui.note(b' %s -> %s\n' % (msg, m))
1291 repo.ui.note(b' %s -> %s\n' % (msg, m.__bytes__()))
1290 1292 # Pick random action. TODO: Instead, prompt user when resolving
1291 1293 m, l = list(bids.items())[0]
1292 1294 repo.ui.warn(
1293 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1295 _(b' %s: ambiguous merge - picked %s action\n')
1296 % (f, m.__bytes__())
1294 1297 )
1295 1298 mresult.addfile(f, *l[0])
1296 1299 continue
1297 1300 repo.ui.note(_(b'end of auction\n\n'))
1298 1301 mresult.updatevalues(diverge, renamedelete)
1299 1302
1300 1303 if wctx.rev() is None:
1301 1304 _forgetremoved(wctx, mctx, branchmerge, mresult)
1302 1305
1303 1306 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1304 1307 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1305 1308
1306 1309 return mresult
1307 1310
1308 1311
1309 1312 def _getcwd():
1310 1313 try:
1311 1314 return encoding.getcwd()
1312 1315 except OSError as err:
1313 1316 if err.errno == errno.ENOENT:
1314 1317 return None
1315 1318 raise
1316 1319
1317 1320
1318 1321 def batchremove(repo, wctx, actions):
1319 1322 """apply removes to the working directory
1320 1323
1321 1324 yields tuples for progress updates
1322 1325 """
1323 1326 verbose = repo.ui.verbose
1324 1327 cwd = _getcwd()
1325 1328 i = 0
1326 1329 for f, args, msg in actions:
1327 1330 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1328 1331 if verbose:
1329 1332 repo.ui.note(_(b"removing %s\n") % f)
1330 1333 wctx[f].audit()
1331 1334 try:
1332 1335 wctx[f].remove(ignoremissing=True)
1333 1336 except OSError as inst:
1334 1337 repo.ui.warn(
1335 1338 _(b"update failed to remove %s: %s!\n")
1336 1339 % (f, stringutil.forcebytestr(inst.strerror))
1337 1340 )
1338 1341 if i == 100:
1339 1342 yield i, f
1340 1343 i = 0
1341 1344 i += 1
1342 1345 if i > 0:
1343 1346 yield i, f
1344 1347
1345 1348 if cwd and not _getcwd():
1346 1349 # cwd was removed in the course of removing files; print a helpful
1347 1350 # warning.
1348 1351 repo.ui.warn(
1349 1352 _(
1350 1353 b"current directory was removed\n"
1351 1354 b"(consider changing to repo root: %s)\n"
1352 1355 )
1353 1356 % repo.root
1354 1357 )
1355 1358
1356 1359
1357 1360 def batchget(repo, mctx, wctx, wantfiledata, actions):
1358 1361 """apply gets to the working directory
1359 1362
1360 1363 mctx is the context to get from
1361 1364
1362 1365 Yields arbitrarily many (False, tuple) for progress updates, followed by
1363 1366 exactly one (True, filedata). When wantfiledata is false, filedata is an
1364 1367 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1365 1368 mtime) of the file f written for each action.
1366 1369 """
1367 1370 filedata = {}
1368 1371 verbose = repo.ui.verbose
1369 1372 fctx = mctx.filectx
1370 1373 ui = repo.ui
1371 1374 i = 0
1372 1375 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1373 1376 for f, (flags, backup), msg in actions:
1374 1377 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1375 1378 if verbose:
1376 1379 repo.ui.note(_(b"getting %s\n") % f)
1377 1380
1378 1381 if backup:
1379 1382 # If a file or directory exists with the same name, back that
1380 1383 # up. Otherwise, look to see if there is a file that conflicts
1381 1384 # with a directory this file is in, and if so, back that up.
1382 1385 conflicting = f
1383 1386 if not repo.wvfs.lexists(f):
1384 1387 for p in pathutil.finddirs(f):
1385 1388 if repo.wvfs.isfileorlink(p):
1386 1389 conflicting = p
1387 1390 break
1388 1391 if repo.wvfs.lexists(conflicting):
1389 1392 orig = scmutil.backuppath(ui, repo, conflicting)
1390 1393 util.rename(repo.wjoin(conflicting), orig)
1391 1394 wfctx = wctx[f]
1392 1395 wfctx.clearunknown()
1393 1396 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1394 1397 size = wfctx.write(
1395 1398 fctx(f).data(),
1396 1399 flags,
1397 1400 backgroundclose=True,
1398 1401 atomictemp=atomictemp,
1399 1402 )
1400 1403 if wantfiledata:
1401 1404 # XXX note that there is a race window between the time we
1402 1405 # write the clean data into the file and we stats it. So another
1403 1406 # writing process meddling with the file content right after we
1404 1407 # wrote it could cause bad stat data to be gathered.
1405 1408 #
1406 1409 # They are 2 data we gather here
1407 1410 # - the mode:
1408 1411 # That we actually just wrote, we should not need to read
1409 1412 # it from disk, (except not all mode might have survived
1410 1413 # the disk round-trip, which is another issue: we should
1411 1414 # not depends on this)
1412 1415 # - the mtime,
1413 1416 # On system that support nanosecond precision, the mtime
1414 1417 # could be accurate enough to tell the two writes appart.
1415 1418 # However gathering it in a racy way make the mtime we
1416 1419 # gather "unreliable".
1417 1420 #
1418 1421 # (note: we get the size from the data we write, which is sane)
1419 1422 #
1420 1423 # So in theory the data returned here are fully racy, but in
1421 1424 # practice "it works mostly fine".
1422 1425 #
1423 1426 # Do not be surprised if you end up reading this while looking
1424 1427 # for the causes of some buggy status. Feel free to improve
1425 1428 # this in the future, but we cannot simply stop gathering
1426 1429 # information. Otherwise `hg status` call made after a large `hg
1427 1430 # update` runs would have to redo a similar amount of work to
1428 1431 # restore and compare all files content.
1429 1432 s = wfctx.lstat()
1430 1433 mode = s.st_mode
1431 1434 mtime = timestamp.mtime_of(s)
1432 1435 # for dirstate.update_file's parentfiledata argument:
1433 1436 filedata[f] = (mode, size, mtime)
1434 1437 if i == 100:
1435 1438 yield False, (i, f)
1436 1439 i = 0
1437 1440 i += 1
1438 1441 if i > 0:
1439 1442 yield False, (i, f)
1440 1443 yield True, filedata
1441 1444
1442 1445
1443 1446 def _prefetchfiles(repo, ctx, mresult):
1444 1447 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1445 1448 of merge actions. ``ctx`` is the context being merged in."""
1446 1449
1447 1450 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1448 1451 # don't touch the context to be merged in. 'cd' is skipped, because
1449 1452 # changed/deleted never resolves to something from the remote side.
1450 1453 files = mresult.files(
1451 1454 [
1452 1455 mergestatemod.ACTION_GET,
1453 1456 mergestatemod.ACTION_DELETED_CHANGED,
1454 1457 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1455 1458 mergestatemod.ACTION_MERGE,
1456 1459 ]
1457 1460 )
1458 1461
1459 1462 prefetch = scmutil.prefetchfiles
1460 1463 matchfiles = scmutil.matchfiles
1461 1464 prefetch(
1462 1465 repo,
1463 1466 [
1464 1467 (
1465 1468 ctx.rev(),
1466 1469 matchfiles(repo, files),
1467 1470 )
1468 1471 ],
1469 1472 )
1470 1473
1471 1474
1472 1475 @attr.s(frozen=True)
1473 1476 class updateresult(object):
1474 1477 updatedcount = attr.ib()
1475 1478 mergedcount = attr.ib()
1476 1479 removedcount = attr.ib()
1477 1480 unresolvedcount = attr.ib()
1478 1481
1479 1482 def isempty(self):
1480 1483 return not (
1481 1484 self.updatedcount
1482 1485 or self.mergedcount
1483 1486 or self.removedcount
1484 1487 or self.unresolvedcount
1485 1488 )
1486 1489
1487 1490
1488 1491 def applyupdates(
1489 1492 repo,
1490 1493 mresult,
1491 1494 wctx,
1492 1495 mctx,
1493 1496 overwrite,
1494 1497 wantfiledata,
1495 1498 labels=None,
1496 1499 ):
1497 1500 """apply the merge action list to the working directory
1498 1501
1499 1502 mresult is a mergeresult object representing result of the merge
1500 1503 wctx is the working copy context
1501 1504 mctx is the context to be merged into the working copy
1502 1505
1503 1506 Return a tuple of (counts, filedata), where counts is a tuple
1504 1507 (updated, merged, removed, unresolved) that describes how many
1505 1508 files were affected by the update, and filedata is as described in
1506 1509 batchget.
1507 1510 """
1508 1511
1509 1512 _prefetchfiles(repo, mctx, mresult)
1510 1513
1511 1514 updated, merged, removed = 0, 0, 0
1512 1515 ms = wctx.mergestate(clean=True)
1513 1516 ms.start(wctx.p1().node(), mctx.node(), labels)
1514 1517
1515 1518 for f, op in pycompat.iteritems(mresult.commitinfo):
1516 1519 # the other side of filenode was choosen while merging, store this in
1517 1520 # mergestate so that it can be reused on commit
1518 1521 ms.addcommitinfo(f, op)
1519 1522
1520 1523 numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
1521 1524 progress = repo.ui.makeprogress(
1522 1525 _(b'updating'), unit=_(b'files'), total=numupdates
1523 1526 )
1524 1527
1525 1528 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1526 1529 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1527 1530
1528 1531 # record path conflicts
1529 1532 for f, args, msg in mresult.getactions(
1530 1533 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1531 1534 ):
1532 1535 f1, fo = args
1533 1536 s = repo.ui.status
1534 1537 s(
1535 1538 _(
1536 1539 b"%s: path conflict - a file or link has the same name as a "
1537 1540 b"directory\n"
1538 1541 )
1539 1542 % f
1540 1543 )
1541 1544 if fo == b'l':
1542 1545 s(_(b"the local file has been renamed to %s\n") % f1)
1543 1546 else:
1544 1547 s(_(b"the remote file has been renamed to %s\n") % f1)
1545 1548 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1546 1549 ms.addpathconflict(f, f1, fo)
1547 1550 progress.increment(item=f)
1548 1551
1549 1552 # When merging in-memory, we can't support worker processes, so set the
1550 1553 # per-item cost at 0 in that case.
1551 1554 cost = 0 if wctx.isinmemory() else 0.001
1552 1555
1553 1556 # remove in parallel (must come before resolving path conflicts and getting)
1554 1557 prog = worker.worker(
1555 1558 repo.ui,
1556 1559 cost,
1557 1560 batchremove,
1558 1561 (repo, wctx),
1559 1562 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1560 1563 )
1561 1564 for i, item in prog:
1562 1565 progress.increment(step=i, item=item)
1563 1566 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1564 1567
1565 1568 # resolve path conflicts (must come before getting)
1566 1569 for f, args, msg in mresult.getactions(
1567 1570 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1568 1571 ):
1569 1572 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1570 1573 (f0, origf0) = args
1571 1574 if wctx[f0].lexists():
1572 1575 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1573 1576 wctx[f].audit()
1574 1577 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1575 1578 wctx[f0].remove()
1576 1579 progress.increment(item=f)
1577 1580
1578 1581 # get in parallel.
1579 1582 threadsafe = repo.ui.configbool(
1580 1583 b'experimental', b'worker.wdir-get-thread-safe'
1581 1584 )
1582 1585 prog = worker.worker(
1583 1586 repo.ui,
1584 1587 cost,
1585 1588 batchget,
1586 1589 (repo, mctx, wctx, wantfiledata),
1587 1590 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1588 1591 threadsafe=threadsafe,
1589 1592 hasretval=True,
1590 1593 )
1591 1594 getfiledata = {}
1592 1595 for final, res in prog:
1593 1596 if final:
1594 1597 getfiledata = res
1595 1598 else:
1596 1599 i, item = res
1597 1600 progress.increment(step=i, item=item)
1598 1601
1599 1602 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1600 1603 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1601 1604
1602 1605 # forget (manifest only, just log it) (must come first)
1603 1606 for f, args, msg in mresult.getactions(
1604 1607 (mergestatemod.ACTION_FORGET,), sort=True
1605 1608 ):
1606 1609 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1607 1610 progress.increment(item=f)
1608 1611
1609 1612 # re-add (manifest only, just log it)
1610 1613 for f, args, msg in mresult.getactions(
1611 1614 (mergestatemod.ACTION_ADD,), sort=True
1612 1615 ):
1613 1616 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1614 1617 progress.increment(item=f)
1615 1618
1616 1619 # re-add/mark as modified (manifest only, just log it)
1617 1620 for f, args, msg in mresult.getactions(
1618 1621 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1619 1622 ):
1620 1623 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1621 1624 progress.increment(item=f)
1622 1625
1623 1626 # keep (noop, just log it)
1624 1627 for a in mergestatemod.NO_OP_ACTIONS:
1625 1628 for f, args, msg in mresult.getactions((a,), sort=True):
1626 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
1629 repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a.__bytes__()))
1627 1630 # no progress
1628 1631
1629 1632 # directory rename, move local
1630 1633 for f, args, msg in mresult.getactions(
1631 1634 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1632 1635 ):
1633 1636 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1634 1637 progress.increment(item=f)
1635 1638 f0, flags = args
1636 1639 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1637 1640 wctx[f].audit()
1638 1641 wctx[f].write(wctx.filectx(f0).data(), flags)
1639 1642 wctx[f0].remove()
1640 1643
1641 1644 # local directory rename, get
1642 1645 for f, args, msg in mresult.getactions(
1643 1646 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1644 1647 ):
1645 1648 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1646 1649 progress.increment(item=f)
1647 1650 f0, flags = args
1648 1651 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1649 1652 wctx[f].write(mctx.filectx(f0).data(), flags)
1650 1653
1651 1654 # exec
1652 1655 for f, args, msg in mresult.getactions(
1653 1656 (mergestatemod.ACTION_EXEC,), sort=True
1654 1657 ):
1655 1658 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1656 1659 progress.increment(item=f)
1657 1660 (flags,) = args
1658 1661 wctx[f].audit()
1659 1662 wctx[f].setflags(b'l' in flags, b'x' in flags)
1660 1663
1661 1664 moves = []
1662 1665
1663 1666 # 'cd' and 'dc' actions are treated like other merge conflicts
1664 1667 mergeactions = list(
1665 1668 mresult.getactions(
1666 1669 [
1667 1670 mergestatemod.ACTION_CHANGED_DELETED,
1668 1671 mergestatemod.ACTION_DELETED_CHANGED,
1669 1672 mergestatemod.ACTION_MERGE,
1670 1673 ],
1671 1674 sort=True,
1672 1675 )
1673 1676 )
1674 1677 for f, args, msg in mergeactions:
1675 1678 f1, f2, fa, move, anc = args
1676 1679 if f == b'.hgsubstate': # merged internally
1677 1680 continue
1678 1681 if f1 is None:
1679 1682 fcl = filemerge.absentfilectx(wctx, fa)
1680 1683 else:
1681 1684 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1682 1685 fcl = wctx[f1]
1683 1686 if f2 is None:
1684 1687 fco = filemerge.absentfilectx(mctx, fa)
1685 1688 else:
1686 1689 fco = mctx[f2]
1687 1690 actx = repo[anc]
1688 1691 if fa in actx:
1689 1692 fca = actx[fa]
1690 1693 else:
1691 1694 # TODO: move to absentfilectx
1692 1695 fca = repo.filectx(f1, fileid=nullrev)
1693 1696 ms.add(fcl, fco, fca, f)
1694 1697 if f1 != f and move:
1695 1698 moves.append(f1)
1696 1699
1697 1700 # remove renamed files after safely stored
1698 1701 for f in moves:
1699 1702 if wctx[f].lexists():
1700 1703 repo.ui.debug(b"removing %s\n" % f)
1701 1704 wctx[f].audit()
1702 1705 wctx[f].remove()
1703 1706
1704 1707 # these actions updates the file
1705 1708 updated = mresult.len(
1706 1709 (
1707 1710 mergestatemod.ACTION_GET,
1708 1711 mergestatemod.ACTION_EXEC,
1709 1712 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1710 1713 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1711 1714 )
1712 1715 )
1713 1716
1714 1717 try:
1715 1718 for f, args, msg in mergeactions:
1716 1719 repo.ui.debug(b" %s: %s -> m\n" % (f, msg))
1717 1720 ms.addcommitinfo(f, {b'merged': b'yes'})
1718 1721 progress.increment(item=f)
1719 1722 if f == b'.hgsubstate': # subrepo states need updating
1720 1723 subrepoutil.submerge(
1721 1724 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1722 1725 )
1723 1726 continue
1724 1727 wctx[f].audit()
1725 1728 ms.resolve(f, wctx)
1726 1729
1727 1730 except error.InterventionRequired:
1728 1731 # If the user has merge.on-failure=halt, catch the error and close the
1729 1732 # merge state "properly".
1730 1733 pass
1731 1734 finally:
1732 1735 ms.commit()
1733 1736
1734 1737 unresolved = ms.unresolvedcount()
1735 1738
1736 1739 msupdated, msmerged, msremoved = ms.counts()
1737 1740 updated += msupdated
1738 1741 merged += msmerged
1739 1742 removed += msremoved
1740 1743
1741 1744 extraactions = ms.actions()
1742 1745
1743 1746 progress.complete()
1744 1747 return (
1745 1748 updateresult(updated, merged, removed, unresolved),
1746 1749 getfiledata,
1747 1750 extraactions,
1748 1751 )
1749 1752
1750 1753
1751 1754 def _advertisefsmonitor(repo, num_gets, p1node):
1752 1755 # Advertise fsmonitor when its presence could be useful.
1753 1756 #
1754 1757 # We only advertise when performing an update from an empty working
1755 1758 # directory. This typically only occurs during initial clone.
1756 1759 #
1757 1760 # We give users a mechanism to disable the warning in case it is
1758 1761 # annoying.
1759 1762 #
1760 1763 # We only allow on Linux and MacOS because that's where fsmonitor is
1761 1764 # considered stable.
1762 1765 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1763 1766 fsmonitorthreshold = repo.ui.configint(
1764 1767 b'fsmonitor', b'warn_update_file_count'
1765 1768 )
1766 1769 # avoid cycle dirstate -> sparse -> merge -> dirstate
1767 1770 from . import dirstate
1768 1771
1769 1772 if dirstate.rustmod is not None:
1770 1773 # When using rust status, fsmonitor becomes necessary at higher sizes
1771 1774 fsmonitorthreshold = repo.ui.configint(
1772 1775 b'fsmonitor',
1773 1776 b'warn_update_file_count_rust',
1774 1777 )
1775 1778
1776 1779 try:
1777 1780 # avoid cycle: extensions -> cmdutil -> merge
1778 1781 from . import extensions
1779 1782
1780 1783 extensions.find(b'fsmonitor')
1781 1784 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1782 1785 # We intentionally don't look at whether fsmonitor has disabled
1783 1786 # itself because a) fsmonitor may have already printed a warning
1784 1787 # b) we only care about the config state here.
1785 1788 except KeyError:
1786 1789 fsmonitorenabled = False
1787 1790
1788 1791 if (
1789 1792 fsmonitorwarning
1790 1793 and not fsmonitorenabled
1791 1794 and p1node == repo.nullid
1792 1795 and num_gets >= fsmonitorthreshold
1793 1796 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1794 1797 ):
1795 1798 repo.ui.warn(
1796 1799 _(
1797 1800 b'(warning: large working directory being used without '
1798 1801 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1799 1802 b'see "hg help -e fsmonitor")\n'
1800 1803 )
1801 1804 )
1802 1805
1803 1806
1804 1807 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1805 1808 UPDATECHECK_NONE = b'none'
1806 1809 UPDATECHECK_LINEAR = b'linear'
1807 1810 UPDATECHECK_NO_CONFLICT = b'noconflict'
1808 1811
1809 1812
1810 1813 def _update(
1811 1814 repo,
1812 1815 node,
1813 1816 branchmerge,
1814 1817 force,
1815 1818 ancestor=None,
1816 1819 mergeancestor=False,
1817 1820 labels=None,
1818 1821 matcher=None,
1819 1822 mergeforce=False,
1820 1823 updatedirstate=True,
1821 1824 updatecheck=None,
1822 1825 wc=None,
1823 1826 ):
1824 1827 """
1825 1828 Perform a merge between the working directory and the given node
1826 1829
1827 1830 node = the node to update to
1828 1831 branchmerge = whether to merge between branches
1829 1832 force = whether to force branch merging or file overwriting
1830 1833 matcher = a matcher to filter file lists (dirstate not updated)
1831 1834 mergeancestor = whether it is merging with an ancestor. If true,
1832 1835 we should accept the incoming changes for any prompts that occur.
1833 1836 If false, merging with an ancestor (fast-forward) is only allowed
1834 1837 between different named branches. This flag is used by rebase extension
1835 1838 as a temporary fix and should be avoided in general.
1836 1839 labels = labels to use for base, local and other
1837 1840 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1838 1841 this is True, then 'force' should be True as well.
1839 1842
1840 1843 The table below shows all the behaviors of the update command given the
1841 1844 -c/--check and -C/--clean or no options, whether the working directory is
1842 1845 dirty, whether a revision is specified, and the relationship of the parent
1843 1846 rev to the target rev (linear or not). Match from top first. The -n
1844 1847 option doesn't exist on the command line, but represents the
1845 1848 experimental.updatecheck=noconflict option.
1846 1849
1847 1850 This logic is tested by test-update-branches.t.
1848 1851
1849 1852 -c -C -n -m dirty rev linear | result
1850 1853 y y * * * * * | (1)
1851 1854 y * y * * * * | (1)
1852 1855 y * * y * * * | (1)
1853 1856 * y y * * * * | (1)
1854 1857 * y * y * * * | (1)
1855 1858 * * y y * * * | (1)
1856 1859 * * * * * n n | x
1857 1860 * * * * n * * | ok
1858 1861 n n n n y * y | merge
1859 1862 n n n n y y n | (2)
1860 1863 n n n y y * * | merge
1861 1864 n n y n y * * | merge if no conflict
1862 1865 n y n n y * * | discard
1863 1866 y n n n y * * | (3)
1864 1867
1865 1868 x = can't happen
1866 1869 * = don't-care
1867 1870 1 = incompatible options (checked in commands.py)
1868 1871 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1869 1872 3 = abort: uncommitted changes (checked in commands.py)
1870 1873
1871 1874 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1872 1875 to repo[None] if None is passed.
1873 1876
1874 1877 Return the same tuple as applyupdates().
1875 1878 """
1876 1879 # Avoid cycle.
1877 1880 from . import sparse
1878 1881
1879 1882 # This function used to find the default destination if node was None, but
1880 1883 # that's now in destutil.py.
1881 1884 assert node is not None
1882 1885 if not branchmerge and not force:
1883 1886 # TODO: remove the default once all callers that pass branchmerge=False
1884 1887 # and force=False pass a value for updatecheck. We may want to allow
1885 1888 # updatecheck='abort' to better suppport some of these callers.
1886 1889 if updatecheck is None:
1887 1890 updatecheck = UPDATECHECK_LINEAR
1888 1891 okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT)
1889 1892 if updatecheck not in okay:
1890 1893 msg = r'Invalid updatecheck %r (can accept %r)'
1891 1894 msg %= (updatecheck, okay)
1892 1895 raise ValueError(msg)
1893 1896 if wc is not None and wc.isinmemory():
1894 1897 maybe_wlock = util.nullcontextmanager()
1895 1898 else:
1896 1899 maybe_wlock = repo.wlock()
1897 1900 with maybe_wlock:
1898 1901 if wc is None:
1899 1902 wc = repo[None]
1900 1903 pl = wc.parents()
1901 1904 p1 = pl[0]
1902 1905 p2 = repo[node]
1903 1906 if ancestor is not None:
1904 1907 pas = [repo[ancestor]]
1905 1908 else:
1906 1909 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1907 1910 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1908 1911 pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
1909 1912 else:
1910 1913 pas = [p1.ancestor(p2, warn=branchmerge)]
1911 1914
1912 1915 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1913 1916
1914 1917 overwrite = force and not branchmerge
1915 1918 ### check phase
1916 1919 if not overwrite:
1917 1920 if len(pl) > 1:
1918 1921 raise error.StateError(_(b"outstanding uncommitted merge"))
1919 1922 ms = wc.mergestate()
1920 1923 if ms.unresolvedcount():
1921 1924 msg = _(b"outstanding merge conflicts")
1922 1925 hint = _(b"use 'hg resolve' to resolve")
1923 1926 raise error.StateError(msg, hint=hint)
1924 1927 if branchmerge:
1925 1928 m_a = _(b"merging with a working directory ancestor has no effect")
1926 1929 if pas == [p2]:
1927 1930 raise error.Abort(m_a)
1928 1931 elif pas == [p1]:
1929 1932 if not mergeancestor and wc.branch() == p2.branch():
1930 1933 msg = _(b"nothing to merge")
1931 1934 hint = _(b"use 'hg update' or check 'hg heads'")
1932 1935 raise error.Abort(msg, hint=hint)
1933 1936 if not force and (wc.files() or wc.deleted()):
1934 1937 msg = _(b"uncommitted changes")
1935 1938 hint = _(b"use 'hg status' to list changes")
1936 1939 raise error.StateError(msg, hint=hint)
1937 1940 if not wc.isinmemory():
1938 1941 for s in sorted(wc.substate):
1939 1942 wc.sub(s).bailifchanged()
1940 1943
1941 1944 elif not overwrite:
1942 1945 if p1 == p2: # no-op update
1943 1946 # call the hooks and exit early
1944 1947 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1945 1948 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1946 1949 return updateresult(0, 0, 0, 0)
1947 1950
1948 1951 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1949 1952 [p1],
1950 1953 [p2],
1951 1954 ): # nonlinear
1952 1955 dirty = wc.dirty(missing=True)
1953 1956 if dirty:
1954 1957 # Branching is a bit strange to ensure we do the minimal
1955 1958 # amount of call to obsutil.foreground.
1956 1959 foreground = obsutil.foreground(repo, [p1.node()])
1957 1960 # note: the <node> variable contains a random identifier
1958 1961 if repo[node].node() in foreground:
1959 1962 pass # allow updating to successors
1960 1963 else:
1961 1964 msg = _(b"uncommitted changes")
1962 1965 hint = _(b"commit or update --clean to discard changes")
1963 1966 raise error.UpdateAbort(msg, hint=hint)
1964 1967 else:
1965 1968 # Allow jumping branches if clean and specific rev given
1966 1969 pass
1967 1970
1968 1971 if overwrite:
1969 1972 pas = [wc]
1970 1973 elif not branchmerge:
1971 1974 pas = [p1]
1972 1975
1973 1976 # deprecated config: merge.followcopies
1974 1977 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1975 1978 if overwrite:
1976 1979 followcopies = False
1977 1980 elif not pas[0]:
1978 1981 followcopies = False
1979 1982 if not branchmerge and not wc.dirty(missing=True):
1980 1983 followcopies = False
1981 1984
1982 1985 ### calculate phase
1983 1986 mresult = calculateupdates(
1984 1987 repo,
1985 1988 wc,
1986 1989 p2,
1987 1990 pas,
1988 1991 branchmerge,
1989 1992 force,
1990 1993 mergeancestor,
1991 1994 followcopies,
1992 1995 matcher=matcher,
1993 1996 mergeforce=mergeforce,
1994 1997 )
1995 1998
1996 1999 if updatecheck == UPDATECHECK_NO_CONFLICT:
1997 2000 if mresult.hasconflicts():
1998 2001 msg = _(b"conflicting changes")
1999 2002 hint = _(b"commit or update --clean to discard changes")
2000 2003 raise error.StateError(msg, hint=hint)
2001 2004
2002 2005 # Prompt and create actions. Most of this is in the resolve phase
2003 2006 # already, but we can't handle .hgsubstate in filemerge or
2004 2007 # subrepoutil.submerge yet so we have to keep prompting for it.
2005 2008 vals = mresult.getfile(b'.hgsubstate')
2006 2009 if vals:
2007 2010 f = b'.hgsubstate'
2008 2011 m, args, msg = vals
2009 2012 prompts = filemerge.partextras(labels)
2010 2013 prompts[b'f'] = f
2011 2014 if m == mergestatemod.ACTION_CHANGED_DELETED:
2012 2015 if repo.ui.promptchoice(
2013 2016 _(
2014 2017 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2015 2018 b"use (c)hanged version or (d)elete?"
2016 2019 b"$$ &Changed $$ &Delete"
2017 2020 )
2018 2021 % prompts,
2019 2022 0,
2020 2023 ):
2021 2024 mresult.addfile(
2022 2025 f,
2023 2026 mergestatemod.ACTION_REMOVE,
2024 2027 None,
2025 2028 b'prompt delete',
2026 2029 )
2027 2030 elif f in p1:
2028 2031 mresult.addfile(
2029 2032 f,
2030 2033 mergestatemod.ACTION_ADD_MODIFIED,
2031 2034 None,
2032 2035 b'prompt keep',
2033 2036 )
2034 2037 else:
2035 2038 mresult.addfile(
2036 2039 f,
2037 2040 mergestatemod.ACTION_ADD,
2038 2041 None,
2039 2042 b'prompt keep',
2040 2043 )
2041 2044 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2042 2045 f1, f2, fa, move, anc = args
2043 2046 flags = p2[f2].flags()
2044 2047 if (
2045 2048 repo.ui.promptchoice(
2046 2049 _(
2047 2050 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2048 2051 b"use (c)hanged version or leave (d)eleted?"
2049 2052 b"$$ &Changed $$ &Deleted"
2050 2053 )
2051 2054 % prompts,
2052 2055 0,
2053 2056 )
2054 2057 == 0
2055 2058 ):
2056 2059 mresult.addfile(
2057 2060 f,
2058 2061 mergestatemod.ACTION_GET,
2059 2062 (flags, False),
2060 2063 b'prompt recreating',
2061 2064 )
2062 2065 else:
2063 2066 mresult.removefile(f)
2064 2067
2065 2068 if not util.fscasesensitive(repo.path):
2066 2069 # check collision between files only in p2 for clean update
2067 2070 if not branchmerge and (
2068 2071 force or not wc.dirty(missing=True, branch=False)
2069 2072 ):
2070 2073 _checkcollision(repo, p2.manifest(), None)
2071 2074 else:
2072 2075 _checkcollision(repo, wc.manifest(), mresult)
2073 2076
2074 2077 # divergent renames
2075 2078 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2076 2079 repo.ui.warn(
2077 2080 _(
2078 2081 b"note: possible conflict - %s was renamed "
2079 2082 b"multiple times to:\n"
2080 2083 )
2081 2084 % f
2082 2085 )
2083 2086 for nf in sorted(fl):
2084 2087 repo.ui.warn(b" %s\n" % nf)
2085 2088
2086 2089 # rename and delete
2087 2090 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2088 2091 repo.ui.warn(
2089 2092 _(
2090 2093 b"note: possible conflict - %s was deleted "
2091 2094 b"and renamed to:\n"
2092 2095 )
2093 2096 % f
2094 2097 )
2095 2098 for nf in sorted(fl):
2096 2099 repo.ui.warn(b" %s\n" % nf)
2097 2100
2098 2101 ### apply phase
2099 2102 if not branchmerge: # just jump to the new rev
2100 2103 fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
2101 2104 # If we're doing a partial update, we need to skip updating
2102 2105 # the dirstate.
2103 2106 always = matcher is None or matcher.always()
2104 2107 updatedirstate = updatedirstate and always and not wc.isinmemory()
2105 2108 if updatedirstate:
2106 2109 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2107 2110 # note that we're in the middle of an update
2108 2111 repo.vfs.write(b'updatestate', p2.hex())
2109 2112
2110 2113 _advertisefsmonitor(
2111 2114 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2112 2115 )
2113 2116
2114 2117 wantfiledata = updatedirstate and not branchmerge
2115 2118 stats, getfiledata, extraactions = applyupdates(
2116 2119 repo,
2117 2120 mresult,
2118 2121 wc,
2119 2122 p2,
2120 2123 overwrite,
2121 2124 wantfiledata,
2122 2125 labels=labels,
2123 2126 )
2124 2127
2125 2128 if updatedirstate:
2126 2129 if extraactions:
2127 2130 for k, acts in pycompat.iteritems(extraactions):
2128 2131 for a in acts:
2129 2132 mresult.addfile(a[0], k, *a[1:])
2130 2133 if k == mergestatemod.ACTION_GET and wantfiledata:
2131 2134 # no filedata until mergestate is updated to provide it
2132 2135 for a in acts:
2133 2136 getfiledata[a[0]] = None
2134 2137
2135 2138 assert len(getfiledata) == (
2136 2139 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
2137 2140 )
2138 2141 with repo.dirstate.parentchange():
2139 2142 ### Filter Filedata
2140 2143 #
2141 2144 # We gathered "cache" information for the clean file while
2142 2145 # updating them: mtime, size and mode.
2143 2146 #
2144 2147 # At the time this comment is written, they are various issues
2145 2148 # with how we gather the `mode` and `mtime` information (see
2146 2149 # the comment in `batchget`).
2147 2150 #
2148 2151 # We are going to smooth one of this issue here : mtime ambiguity.
2149 2152 #
2150 2153 # i.e. even if the mtime gathered during `batchget` was
2151 2154 # correct[1] a change happening right after it could change the
2152 2155 # content while keeping the same mtime[2].
2153 2156 #
2154 2157 # When we reach the current code, the "on disk" part of the
2155 2158 # update operation is finished. We still assume that no other
2156 2159 # process raced that "on disk" part, but we want to at least
2157 2160 # prevent later file change to alter the content of the file
2158 2161 # right after the update operation. So quickly that the same
2159 2162 # mtime is record for the operation.
2160 2163 # To prevent such ambiguity to happens, we will only keep the
2161 2164 # "file data" for files with mtime that are stricly in the past,
2162 2165 # i.e. whose mtime is strictly lower than the current time.
2163 2166 #
2164 2167 # This protect us from race conditions from operation that could
2165 2168 # run right after this one, especially other Mercurial
2166 2169 # operation that could be waiting for the wlock to touch files
2167 2170 # content and the dirstate.
2168 2171 #
2169 2172 # In an ideal world, we could only get reliable information in
2170 2173 # `getfiledata` (from `getbatch`), however the current approach
2171 2174 # have been a successful compromise since many years.
2172 2175 #
2173 2176 # At the time this comment is written, not using any "cache"
2174 2177 # file data at all here would not be viable. As it would result is
2175 2178 # a very large amount of work (equivalent to the previous `hg
2176 2179 # update` during the next status after an update).
2177 2180 #
2178 2181 # [1] the current code cannot grantee that the `mtime` and
2179 2182 # `mode` are correct, but the result is "okay in practice".
2180 2183 # (see the comment in `batchget`). #
2181 2184 #
2182 2185 # [2] using nano-second precision can greatly help here because
2183 2186 # it makes the "different write with same mtime" issue
2184 2187 # virtually vanish. However, dirstate v1 cannot store such
2185 2188 # precision and a bunch of python-runtime, operating-system and
2186 2189 # filesystem does not provide use with such precision, so we
2187 2190 # have to operate as if it wasn't available.
2188 2191 if getfiledata:
2189 2192 ambiguous_mtime = {}
2190 2193 now = timestamp.get_fs_now(repo.vfs)
2191 2194 if now is None:
2192 2195 # we can't write to the FS, so we won't actually update
2193 2196 # the dirstate content anyway, no need to put cache
2194 2197 # information.
2195 2198 getfiledata = None
2196 2199 else:
2197 2200 now_sec = now[0]
2198 2201 for f, m in pycompat.iteritems(getfiledata):
2199 2202 if m is not None and m[2][0] >= now_sec:
2200 2203 ambiguous_mtime[f] = (m[0], m[1], None)
2201 2204 for f, m in pycompat.iteritems(ambiguous_mtime):
2202 2205 getfiledata[f] = m
2203 2206
2204 2207 repo.setparents(fp1, fp2)
2205 2208 mergestatemod.recordupdates(
2206 2209 repo, mresult.actionsdict, branchmerge, getfiledata
2207 2210 )
2208 2211 # update completed, clear state
2209 2212 util.unlink(repo.vfs.join(b'updatestate'))
2210 2213
2211 2214 if not branchmerge:
2212 2215 repo.dirstate.setbranch(p2.branch())
2213 2216
2214 2217 # If we're updating to a location, clean up any stale temporary includes
2215 2218 # (ex: this happens during hg rebase --abort).
2216 2219 if not branchmerge:
2217 2220 sparse.prunetemporaryincludes(repo)
2218 2221
2219 2222 if updatedirstate:
2220 2223 repo.hook(
2221 2224 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2222 2225 )
2223 2226 return stats
2224 2227
2225 2228
2226 2229 def merge(ctx, labels=None, force=False, wc=None):
2227 2230 """Merge another topological branch into the working copy.
2228 2231
2229 2232 force = whether the merge was run with 'merge --force' (deprecated)
2230 2233 """
2231 2234
2232 2235 return _update(
2233 2236 ctx.repo(),
2234 2237 ctx.rev(),
2235 2238 labels=labels,
2236 2239 branchmerge=True,
2237 2240 force=force,
2238 2241 mergeforce=force,
2239 2242 wc=wc,
2240 2243 )
2241 2244
2242 2245
2243 2246 def update(ctx, updatecheck=None, wc=None):
2244 2247 """Do a regular update to the given commit, aborting if there are conflicts.
2245 2248
2246 2249 The 'updatecheck' argument can be used to control what to do in case of
2247 2250 conflicts.
2248 2251
2249 2252 Note: This is a new, higher-level update() than the one that used to exist
2250 2253 in this module. That function is now called _update(). You can hopefully
2251 2254 replace your callers to use this new update(), or clean_update(), merge(),
2252 2255 revert_to(), or graft().
2253 2256 """
2254 2257 return _update(
2255 2258 ctx.repo(),
2256 2259 ctx.rev(),
2257 2260 branchmerge=False,
2258 2261 force=False,
2259 2262 labels=[b'working copy', b'destination', b'working copy parent'],
2260 2263 updatecheck=updatecheck,
2261 2264 wc=wc,
2262 2265 )
2263 2266
2264 2267
2265 2268 def clean_update(ctx, wc=None):
2266 2269 """Do a clean update to the given commit.
2267 2270
2268 2271 This involves updating to the commit and discarding any changes in the
2269 2272 working copy.
2270 2273 """
2271 2274 return _update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2272 2275
2273 2276
2274 2277 def revert_to(ctx, matcher=None, wc=None):
2275 2278 """Revert the working copy to the given commit.
2276 2279
2277 2280 The working copy will keep its current parent(s) but its content will
2278 2281 be the same as in the given commit.
2279 2282 """
2280 2283
2281 2284 return _update(
2282 2285 ctx.repo(),
2283 2286 ctx.rev(),
2284 2287 branchmerge=False,
2285 2288 force=True,
2286 2289 updatedirstate=False,
2287 2290 matcher=matcher,
2288 2291 wc=wc,
2289 2292 )
2290 2293
2291 2294
2292 2295 def graft(
2293 2296 repo,
2294 2297 ctx,
2295 2298 base=None,
2296 2299 labels=None,
2297 2300 keepparent=False,
2298 2301 keepconflictparent=False,
2299 2302 wctx=None,
2300 2303 ):
2301 2304 """Do a graft-like merge.
2302 2305
2303 2306 This is a merge where the merge ancestor is chosen such that one
2304 2307 or more changesets are grafted onto the current changeset. In
2305 2308 addition to the merge, this fixes up the dirstate to include only
2306 2309 a single parent (if keepparent is False) and tries to duplicate any
2307 2310 renames/copies appropriately.
2308 2311
2309 2312 ctx - changeset to rebase
2310 2313 base - merge base, or ctx.p1() if not specified
2311 2314 labels - merge labels eg ['local', 'graft']
2312 2315 keepparent - keep second parent if any
2313 2316 keepconflictparent - if unresolved, keep parent used for the merge
2314 2317
2315 2318 """
2316 2319 # If we're grafting a descendant onto an ancestor, be sure to pass
2317 2320 # mergeancestor=True to update. This does two things: 1) allows the merge if
2318 2321 # the destination is the same as the parent of the ctx (so we can use graft
2319 2322 # to copy commits), and 2) informs update that the incoming changes are
2320 2323 # newer than the destination so it doesn't prompt about "remote changed foo
2321 2324 # which local deleted".
2322 2325 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2323 2326 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2324 2327 wctx = wctx or repo[None]
2325 2328 pctx = wctx.p1()
2326 2329 base = base or ctx.p1()
2327 2330 mergeancestor = (
2328 2331 repo.changelog.isancestor(pctx.node(), ctx.node())
2329 2332 or pctx.rev() == base.rev()
2330 2333 )
2331 2334
2332 2335 stats = _update(
2333 2336 repo,
2334 2337 ctx.node(),
2335 2338 True,
2336 2339 True,
2337 2340 base.node(),
2338 2341 mergeancestor=mergeancestor,
2339 2342 labels=labels,
2340 2343 wc=wctx,
2341 2344 )
2342 2345
2343 2346 if keepconflictparent and stats.unresolvedcount:
2344 2347 pother = ctx.node()
2345 2348 else:
2346 2349 pother = repo.nullid
2347 2350 parents = ctx.parents()
2348 2351 if keepparent and len(parents) == 2 and base in parents:
2349 2352 parents.remove(base)
2350 2353 pother = parents[0].node()
2351 2354 # Never set both parents equal to each other
2352 2355 if pother == pctx.node():
2353 2356 pother = repo.nullid
2354 2357
2355 2358 if wctx.isinmemory():
2356 2359 wctx.setparents(pctx.node(), pother)
2357 2360 # fix up dirstate for copies and renames
2358 2361 copies.graftcopies(wctx, ctx, base)
2359 2362 else:
2360 2363 with repo.dirstate.parentchange():
2361 2364 repo.setparents(pctx.node(), pother)
2362 2365 repo.dirstate.write(repo.currenttransaction())
2363 2366 # fix up dirstate for copies and renames
2364 2367 copies.graftcopies(wctx, ctx, base)
2365 2368 return stats
2366 2369
2367 2370
2368 2371 def back_out(ctx, parent=None, wc=None):
2369 2372 if parent is None:
2370 2373 if ctx.p2() is not None:
2371 2374 msg = b"must specify parent of merge commit to back out"
2372 2375 raise error.ProgrammingError(msg)
2373 2376 parent = ctx.p1()
2374 2377 return _update(
2375 2378 ctx.repo(),
2376 2379 parent,
2377 2380 branchmerge=True,
2378 2381 force=True,
2379 2382 ancestor=ctx.node(),
2380 2383 mergeancestor=False,
2381 2384 )
2382 2385
2383 2386
2384 2387 def purge(
2385 2388 repo,
2386 2389 matcher,
2387 2390 unknown=True,
2388 2391 ignored=False,
2389 2392 removeemptydirs=True,
2390 2393 removefiles=True,
2391 2394 abortonerror=False,
2392 2395 noop=False,
2393 2396 confirm=False,
2394 2397 ):
2395 2398 """Purge the working directory of untracked files.
2396 2399
2397 2400 ``matcher`` is a matcher configured to scan the working directory -
2398 2401 potentially a subset.
2399 2402
2400 2403 ``unknown`` controls whether unknown files should be purged.
2401 2404
2402 2405 ``ignored`` controls whether ignored files should be purged.
2403 2406
2404 2407 ``removeemptydirs`` controls whether empty directories should be removed.
2405 2408
2406 2409 ``removefiles`` controls whether files are removed.
2407 2410
2408 2411 ``abortonerror`` causes an exception to be raised if an error occurs
2409 2412 deleting a file or directory.
2410 2413
2411 2414 ``noop`` controls whether to actually remove files. If not defined, actions
2412 2415 will be taken.
2413 2416
2414 2417 ``confirm`` ask confirmation before actually removing anything.
2415 2418
2416 2419 Returns an iterable of relative paths in the working directory that were
2417 2420 or would be removed.
2418 2421 """
2419 2422
2420 2423 def remove(removefn, path):
2421 2424 try:
2422 2425 removefn(path)
2423 2426 except OSError:
2424 2427 m = _(b'%s cannot be removed') % path
2425 2428 if abortonerror:
2426 2429 raise error.Abort(m)
2427 2430 else:
2428 2431 repo.ui.warn(_(b'warning: %s\n') % m)
2429 2432
2430 2433 # There's no API to copy a matcher. So mutate the passed matcher and
2431 2434 # restore it when we're done.
2432 2435 oldtraversedir = matcher.traversedir
2433 2436
2434 2437 res = []
2435 2438
2436 2439 try:
2437 2440 if removeemptydirs:
2438 2441 directories = []
2439 2442 matcher.traversedir = directories.append
2440 2443
2441 2444 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2442 2445
2443 2446 if confirm:
2444 2447 nb_ignored = len(status.ignored)
2445 2448 nb_unknown = len(status.unknown)
2446 2449 if nb_unknown and nb_ignored:
2447 2450 msg = _(b"permanently delete %d unknown and %d ignored files?")
2448 2451 msg %= (nb_unknown, nb_ignored)
2449 2452 elif nb_unknown:
2450 2453 msg = _(b"permanently delete %d unknown files?")
2451 2454 msg %= nb_unknown
2452 2455 elif nb_ignored:
2453 2456 msg = _(b"permanently delete %d ignored files?")
2454 2457 msg %= nb_ignored
2455 2458 elif removeemptydirs:
2456 2459 dir_count = 0
2457 2460 for f in directories:
2458 2461 if matcher(f) and not repo.wvfs.listdir(f):
2459 2462 dir_count += 1
2460 2463 if dir_count:
2461 2464 msg = _(
2462 2465 b"permanently delete at least %d empty directories?"
2463 2466 )
2464 2467 msg %= dir_count
2465 2468 else:
2466 2469 # XXX we might be missing directory there
2467 2470 return res
2468 2471 msg += b" (yN)$$ &Yes $$ &No"
2469 2472 if repo.ui.promptchoice(msg, default=1) == 1:
2470 2473 raise error.CanceledError(_(b'removal cancelled'))
2471 2474
2472 2475 if removefiles:
2473 2476 for f in sorted(status.unknown + status.ignored):
2474 2477 if not noop:
2475 2478 repo.ui.note(_(b'removing file %s\n') % f)
2476 2479 remove(repo.wvfs.unlink, f)
2477 2480 res.append(f)
2478 2481
2479 2482 if removeemptydirs:
2480 2483 for f in sorted(directories, reverse=True):
2481 2484 if matcher(f) and not repo.wvfs.listdir(f):
2482 2485 if not noop:
2483 2486 repo.ui.note(_(b'removing directory %s\n') % f)
2484 2487 remove(repo.wvfs.rmdir, f)
2485 2488 res.append(f)
2486 2489
2487 2490 return res
2488 2491
2489 2492 finally:
2490 2493 matcher.traversedir = oldtraversedir
@@ -1,853 +1,883 b''
1 1 from __future__ import absolute_import
2 2
3 3 import collections
4 4 import errno
5 5 import shutil
6 6 import struct
7 7
8 8 from .i18n import _
9 9 from .node import (
10 10 bin,
11 11 hex,
12 12 nullrev,
13 13 )
14 14 from . import (
15 15 error,
16 16 filemerge,
17 17 pycompat,
18 18 util,
19 19 )
20 20 from .utils import hashutil
21 21
22 22 _pack = struct.pack
23 23 _unpack = struct.unpack
24 24
25 25
26 26 def _droponode(data):
27 27 # used for compatibility for v1
28 28 bits = data.split(b'\0')
29 29 bits = bits[:-2] + bits[-1:]
30 30 return b'\0'.join(bits)
31 31
32 32
33 33 def _filectxorabsent(hexnode, ctx, f):
34 34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 35 return filemerge.absentfilectx(ctx, f)
36 36 else:
37 37 return ctx[f]
38 38
39 39
40 40 # Merge state record types. See ``mergestate`` docs for more.
41 41
42 42 ####
43 43 # merge records which records metadata about a current merge
44 44 # exists only once in a mergestate
45 45 #####
46 46 RECORD_LOCAL = b'L'
47 47 RECORD_OTHER = b'O'
48 48 # record merge labels
49 49 RECORD_LABELS = b'l'
50 50
51 51 #####
52 52 # record extra information about files, with one entry containing info about one
53 53 # file. Hence, multiple of them can exists
54 54 #####
55 55 RECORD_FILE_VALUES = b'f'
56 56
57 57 #####
58 58 # merge records which represents state of individual merges of files/folders
59 59 # These are top level records for each entry containing merge related info.
60 60 # Each record of these has info about one file. Hence multiple of them can
61 61 # exists
62 62 #####
63 63 RECORD_MERGED = b'F'
64 64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 65 # the path was dir on one side of merge and file on another
66 66 RECORD_PATH_CONFLICT = b'P'
67 67
68 68 #####
69 69 # possible state which a merge entry can have. These are stored inside top-level
70 70 # merge records mentioned just above.
71 71 #####
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 # represents that the file was automatically merged in favor
77 77 # of other version. This info is used on commit.
78 78 # This is now deprecated and commit related information is now
79 79 # stored in RECORD_FILE_VALUES
80 80 MERGE_RECORD_MERGED_OTHER = b'o'
81 81
82 82 #####
83 83 # top level record which stores other unknown records. Multiple of these can
84 84 # exists
85 85 #####
86 86 RECORD_OVERRIDE = b't'
87 87
88 88 #####
89 89 # legacy records which are no longer used but kept to prevent breaking BC
90 90 #####
91 91 # This record was release in 5.4 and usage was removed in 5.5
92 92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 93 # This record was release in 3.7 and usage was removed in 5.6
94 94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 95 # This record was release in 3.7 and usage was removed in 5.6
96 96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 97 # This record was release in 3.7 and usage was removed in 5.6
98 98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99 99
100 100
101 ACTION_FORGET = b'f'
102 ACTION_REMOVE = b'r'
103 ACTION_ADD = b'a'
104 ACTION_GET = b'g'
105 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_ADD_MODIFIED = b'am'
108 ACTION_CREATED = b'c'
109 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_MERGE = b'm'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_KEEP = b'k'
101 class MergeAction(object):
102 """represent an "action" merge need to take for a given file
103
104 Attributes:
105
106 _short: internal representation used to identify each action
107 """
108
109 def __init__(self, short):
110 self._short = short
111
112 def __hash__(self):
113 return hash(self._short)
114
115 def __repr__(self):
116 return 'MergeAction<%s>' % self._short.decode('ascii')
117
118 def __bytes__(self):
119 return self._short
120
121 def __eq__(self, other):
122 if other is None:
123 return False
124 assert isinstance(other, MergeAction)
125 return self._short == other._short
126
127 def __lt__(self, other):
128 return self._short < other._short
129
130
131 ACTION_FORGET = MergeAction(b'f')
132 ACTION_REMOVE = MergeAction(b'r')
133 ACTION_ADD = MergeAction(b'a')
134 ACTION_GET = MergeAction(b'g')
135 ACTION_PATH_CONFLICT = MergeAction(b'p')
136 ACTION_PATH_CONFLICT_RESOLVE = MergeAction('pr')
137 ACTION_ADD_MODIFIED = MergeAction(b'am')
138 ACTION_CREATED = MergeAction(b'c')
139 ACTION_DELETED_CHANGED = MergeAction(b'dc')
140 ACTION_CHANGED_DELETED = MergeAction(b'cd')
141 ACTION_MERGE = MergeAction(b'm')
142 ACTION_LOCAL_DIR_RENAME_GET = MergeAction(b'dg')
143 ACTION_DIR_RENAME_MOVE_LOCAL = MergeAction(b'dm')
144 ACTION_KEEP = MergeAction(b'k')
115 145 # the file was absent on local side before merge and we should
116 146 # keep it absent (absent means file not present, it can be a result
117 147 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
148 ACTION_KEEP_ABSENT = MergeAction(b'ka')
119 149 # the file is absent on the ancestor and remote side of the merge
120 150 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
122 ACTION_EXEC = b'e'
123 ACTION_CREATED_MERGE = b'cm'
151 ACTION_KEEP_NEW = MergeAction(b'kn')
152 ACTION_EXEC = MergeAction(b'e')
153 ACTION_CREATED_MERGE = MergeAction(b'cm')
124 154
125 155 # actions which are no op
126 156 NO_OP_ACTIONS = (
127 157 ACTION_KEEP,
128 158 ACTION_KEEP_ABSENT,
129 159 ACTION_KEEP_NEW,
130 160 )
131 161
132 162 # Used by concert to detect situation it does not like, not sure what the exact
133 163 # criteria is
134 164 CONVERT_MERGE_ACTIONS = (
135 165 ACTION_MERGE,
136 166 ACTION_DIR_RENAME_MOVE_LOCAL,
137 167 ACTION_CHANGED_DELETED,
138 168 ACTION_DELETED_CHANGED,
139 169 )
140 170
141 171
142 172 class _mergestate_base(object):
143 173 """track 3-way merge state of individual files
144 174
145 175 The merge state is stored on disk when needed. Two files are used: one with
146 176 an old format (version 1), and one with a new format (version 2). Version 2
147 177 stores a superset of the data in version 1, including new kinds of records
148 178 in the future. For more about the new format, see the documentation for
149 179 `_readrecordsv2`.
150 180
151 181 Each record can contain arbitrary content, and has an associated type. This
152 182 `type` should be a letter. If `type` is uppercase, the record is mandatory:
153 183 versions of Mercurial that don't support it should abort. If `type` is
154 184 lowercase, the record can be safely ignored.
155 185
156 186 Currently known records:
157 187
158 188 L: the node of the "local" part of the merge (hexified version)
159 189 O: the node of the "other" part of the merge (hexified version)
160 190 F: a file to be merged entry
161 191 C: a change/delete or delete/change conflict
162 192 P: a path conflict (file vs directory)
163 193 f: a (filename, dictionary) tuple of optional values for a given file
164 194 l: the labels for the parts of the merge.
165 195
166 196 Merge record states (stored in self._state, indexed by filename):
167 197 u: unresolved conflict
168 198 r: resolved conflict
169 199 pu: unresolved path conflict (file conflicts with directory)
170 200 pr: resolved path conflict
171 201 o: file was merged in favor of other parent of merge (DEPRECATED)
172 202
173 203 The resolve command transitions between 'u' and 'r' for conflicts and
174 204 'pu' and 'pr' for path conflicts.
175 205 """
176 206
177 207 def __init__(self, repo):
178 208 """Initialize the merge state.
179 209
180 210 Do not use this directly! Instead call read() or clean()."""
181 211 self._repo = repo
182 212 self._state = {}
183 213 self._stateextras = collections.defaultdict(dict)
184 214 self._local = None
185 215 self._other = None
186 216 self._labels = None
187 217 # contains a mapping of form:
188 218 # {filename : (merge_return_value, action_to_be_performed}
189 219 # these are results of re-running merge process
190 220 # this dict is used to perform actions on dirstate caused by re-running
191 221 # the merge
192 222 self._results = {}
193 223 self._dirty = False
194 224
195 225 def reset(self):
196 226 pass
197 227
198 228 def start(self, node, other, labels=None):
199 229 self._local = node
200 230 self._other = other
201 231 self._labels = labels
202 232
203 233 @util.propertycache
204 234 def local(self):
205 235 if self._local is None:
206 236 msg = b"local accessed but self._local isn't set"
207 237 raise error.ProgrammingError(msg)
208 238 return self._local
209 239
210 240 @util.propertycache
211 241 def localctx(self):
212 242 return self._repo[self.local]
213 243
214 244 @util.propertycache
215 245 def other(self):
216 246 if self._other is None:
217 247 msg = b"other accessed but self._other isn't set"
218 248 raise error.ProgrammingError(msg)
219 249 return self._other
220 250
221 251 @util.propertycache
222 252 def otherctx(self):
223 253 return self._repo[self.other]
224 254
225 255 def active(self):
226 256 """Whether mergestate is active.
227 257
228 258 Returns True if there appears to be mergestate. This is a rough proxy
229 259 for "is a merge in progress."
230 260 """
231 261 return bool(self._local) or bool(self._state)
232 262
233 263 def commit(self):
234 264 """Write current state on disk (if necessary)"""
235 265
236 266 @staticmethod
237 267 def getlocalkey(path):
238 268 """hash the path of a local file context for storage in the .hg/merge
239 269 directory."""
240 270
241 271 return hex(hashutil.sha1(path).digest())
242 272
243 273 def _make_backup(self, fctx, localkey):
244 274 raise NotImplementedError()
245 275
246 276 def _restore_backup(self, fctx, localkey, flags):
247 277 raise NotImplementedError()
248 278
249 279 def add(self, fcl, fco, fca, fd):
250 280 """add a new (potentially?) conflicting file the merge state
251 281 fcl: file context for local,
252 282 fco: file context for remote,
253 283 fca: file context for ancestors,
254 284 fd: file path of the resulting merge.
255 285
256 286 note: also write the local version to the `.hg/merge` directory.
257 287 """
258 288 if fcl.isabsent():
259 289 localkey = self._repo.nodeconstants.nullhex
260 290 else:
261 291 localkey = mergestate.getlocalkey(fcl.path())
262 292 self._make_backup(fcl, localkey)
263 293 self._state[fd] = [
264 294 MERGE_RECORD_UNRESOLVED,
265 295 localkey,
266 296 fcl.path(),
267 297 fca.path(),
268 298 hex(fca.filenode()),
269 299 fco.path(),
270 300 hex(fco.filenode()),
271 301 fcl.flags(),
272 302 ]
273 303 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
274 304 self._dirty = True
275 305
276 306 def addpathconflict(self, path, frename, forigin):
277 307 """add a new conflicting path to the merge state
278 308 path: the path that conflicts
279 309 frename: the filename the conflicting file was renamed to
280 310 forigin: origin of the file ('l' or 'r' for local/remote)
281 311 """
282 312 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
283 313 self._dirty = True
284 314
285 315 def addcommitinfo(self, path, data):
286 316 """stores information which is required at commit
287 317 into _stateextras"""
288 318 self._stateextras[path].update(data)
289 319 self._dirty = True
290 320
291 321 def __contains__(self, dfile):
292 322 return dfile in self._state
293 323
294 324 def __getitem__(self, dfile):
295 325 return self._state[dfile][0]
296 326
297 327 def __iter__(self):
298 328 return iter(sorted(self._state))
299 329
300 330 def files(self):
301 331 return self._state.keys()
302 332
303 333 def mark(self, dfile, state):
304 334 self._state[dfile][0] = state
305 335 self._dirty = True
306 336
307 337 def unresolved(self):
308 338 """Obtain the paths of unresolved files."""
309 339
310 340 for f, entry in pycompat.iteritems(self._state):
311 341 if entry[0] in (
312 342 MERGE_RECORD_UNRESOLVED,
313 343 MERGE_RECORD_UNRESOLVED_PATH,
314 344 ):
315 345 yield f
316 346
317 347 def allextras(self):
318 348 """return all extras information stored with the mergestate"""
319 349 return self._stateextras
320 350
321 351 def extras(self, filename):
322 352 """return extras stored with the mergestate for the given filename"""
323 353 return self._stateextras[filename]
324 354
325 355 def resolve(self, dfile, wctx):
326 356 """run merge process for dfile
327 357
328 358 Returns the exit code of the merge."""
329 359 if self[dfile] in (
330 360 MERGE_RECORD_RESOLVED,
331 361 LEGACY_RECORD_DRIVER_RESOLVED,
332 362 ):
333 363 return 0
334 364 stateentry = self._state[dfile]
335 365 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
336 366 octx = self._repo[self._other]
337 367 extras = self.extras(dfile)
338 368 anccommitnode = extras.get(b'ancestorlinknode')
339 369 if anccommitnode:
340 370 actx = self._repo[anccommitnode]
341 371 else:
342 372 actx = None
343 373 fcd = _filectxorabsent(localkey, wctx, dfile)
344 374 fco = _filectxorabsent(onode, octx, ofile)
345 375 # TODO: move this to filectxorabsent
346 376 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
347 377 # "premerge" x flags
348 378 flo = fco.flags()
349 379 fla = fca.flags()
350 380 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
351 381 if fca.rev() == nullrev and flags != flo:
352 382 self._repo.ui.warn(
353 383 _(
354 384 b'warning: cannot merge flags for %s '
355 385 b'without common ancestor - keeping local flags\n'
356 386 )
357 387 % afile
358 388 )
359 389 elif flags == fla:
360 390 flags = flo
361 391 # restore local
362 392 if localkey != self._repo.nodeconstants.nullhex:
363 393 self._restore_backup(wctx[dfile], localkey, flags)
364 394 else:
365 395 wctx[dfile].remove(ignoremissing=True)
366 396 merge_ret, deleted = filemerge.filemerge(
367 397 self._repo,
368 398 wctx,
369 399 self._local,
370 400 lfile,
371 401 fcd,
372 402 fco,
373 403 fca,
374 404 labels=self._labels,
375 405 )
376 406 if merge_ret is None:
377 407 # If return value of merge is None, then there are no real conflict
378 408 del self._state[dfile]
379 409 self._dirty = True
380 410 elif not merge_ret:
381 411 self.mark(dfile, MERGE_RECORD_RESOLVED)
382 412
383 413 action = None
384 414 if deleted:
385 415 if fcd.isabsent():
386 416 # dc: local picked. Need to drop if present, which may
387 417 # happen on re-resolves.
388 418 action = ACTION_FORGET
389 419 else:
390 420 # cd: remote picked (or otherwise deleted)
391 421 action = ACTION_REMOVE
392 422 else:
393 423 if fcd.isabsent(): # dc: remote picked
394 424 action = ACTION_GET
395 425 elif fco.isabsent(): # cd: local picked
396 426 if dfile in self.localctx:
397 427 action = ACTION_ADD_MODIFIED
398 428 else:
399 429 action = ACTION_ADD
400 430 # else: regular merges (no action necessary)
401 431 self._results[dfile] = merge_ret, action
402 432
403 433 return merge_ret
404 434
405 435 def counts(self):
406 436 """return counts for updated, merged and removed files in this
407 437 session"""
408 438 updated, merged, removed = 0, 0, 0
409 439 for r, action in pycompat.itervalues(self._results):
410 440 if r is None:
411 441 updated += 1
412 442 elif r == 0:
413 443 if action == ACTION_REMOVE:
414 444 removed += 1
415 445 else:
416 446 merged += 1
417 447 return updated, merged, removed
418 448
419 449 def unresolvedcount(self):
420 450 """get unresolved count for this merge (persistent)"""
421 451 return len(list(self.unresolved()))
422 452
423 453 def actions(self):
424 454 """return lists of actions to perform on the dirstate"""
425 455 actions = {
426 456 ACTION_REMOVE: [],
427 457 ACTION_FORGET: [],
428 458 ACTION_ADD: [],
429 459 ACTION_ADD_MODIFIED: [],
430 460 ACTION_GET: [],
431 461 }
432 462 for f, (r, action) in pycompat.iteritems(self._results):
433 463 if action is not None:
434 464 actions[action].append((f, None, b"merge result"))
435 465 return actions
436 466
437 467
438 468 class mergestate(_mergestate_base):
439 469
440 470 statepathv1 = b'merge/state'
441 471 statepathv2 = b'merge/state2'
442 472
443 473 @staticmethod
444 474 def clean(repo):
445 475 """Initialize a brand new merge state, removing any existing state on
446 476 disk."""
447 477 ms = mergestate(repo)
448 478 ms.reset()
449 479 return ms
450 480
451 481 @staticmethod
452 482 def read(repo):
453 483 """Initialize the merge state, reading it from disk."""
454 484 ms = mergestate(repo)
455 485 ms._read()
456 486 return ms
457 487
458 488 def _read(self):
459 489 """Analyse each record content to restore a serialized state from disk
460 490
461 491 This function process "record" entry produced by the de-serialization
462 492 of on disk file.
463 493 """
464 494 unsupported = set()
465 495 records = self._readrecords()
466 496 for rtype, record in records:
467 497 if rtype == RECORD_LOCAL:
468 498 self._local = bin(record)
469 499 elif rtype == RECORD_OTHER:
470 500 self._other = bin(record)
471 501 elif rtype == LEGACY_MERGE_DRIVER_STATE:
472 502 pass
473 503 elif rtype in (
474 504 RECORD_MERGED,
475 505 RECORD_CHANGEDELETE_CONFLICT,
476 506 RECORD_PATH_CONFLICT,
477 507 LEGACY_MERGE_DRIVER_MERGE,
478 508 LEGACY_RECORD_RESOLVED_OTHER,
479 509 ):
480 510 bits = record.split(b'\0')
481 511 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
482 512 # and we now store related information in _stateextras, so
483 513 # lets write to _stateextras directly
484 514 if bits[1] == MERGE_RECORD_MERGED_OTHER:
485 515 self._stateextras[bits[0]][b'filenode-source'] = b'other'
486 516 else:
487 517 self._state[bits[0]] = bits[1:]
488 518 elif rtype == RECORD_FILE_VALUES:
489 519 filename, rawextras = record.split(b'\0', 1)
490 520 extraparts = rawextras.split(b'\0')
491 521 extras = {}
492 522 i = 0
493 523 while i < len(extraparts):
494 524 extras[extraparts[i]] = extraparts[i + 1]
495 525 i += 2
496 526
497 527 self._stateextras[filename] = extras
498 528 elif rtype == RECORD_LABELS:
499 529 labels = record.split(b'\0', 2)
500 530 self._labels = [l for l in labels if len(l) > 0]
501 531 elif not rtype.islower():
502 532 unsupported.add(rtype)
503 533
504 534 if unsupported:
505 535 raise error.UnsupportedMergeRecords(unsupported)
506 536
507 537 def _readrecords(self):
508 538 """Read merge state from disk and return a list of record (TYPE, data)
509 539
510 540 We read data from both v1 and v2 files and decide which one to use.
511 541
512 542 V1 has been used by version prior to 2.9.1 and contains less data than
513 543 v2. We read both versions and check if no data in v2 contradicts
514 544 v1. If there is not contradiction we can safely assume that both v1
515 545 and v2 were written at the same time and use the extract data in v2. If
516 546 there is contradiction we ignore v2 content as we assume an old version
517 547 of Mercurial has overwritten the mergestate file and left an old v2
518 548 file around.
519 549
520 550 returns list of record [(TYPE, data), ...]"""
521 551 v1records = self._readrecordsv1()
522 552 v2records = self._readrecordsv2()
523 553 if self._v1v2match(v1records, v2records):
524 554 return v2records
525 555 else:
526 556 # v1 file is newer than v2 file, use it
527 557 # we have to infer the "other" changeset of the merge
528 558 # we cannot do better than that with v1 of the format
529 559 mctx = self._repo[None].parents()[-1]
530 560 v1records.append((RECORD_OTHER, mctx.hex()))
531 561 # add place holder "other" file node information
532 562 # nobody is using it yet so we do no need to fetch the data
533 563 # if mctx was wrong `mctx[bits[-2]]` may fails.
534 564 for idx, r in enumerate(v1records):
535 565 if r[0] == RECORD_MERGED:
536 566 bits = r[1].split(b'\0')
537 567 bits.insert(-2, b'')
538 568 v1records[idx] = (r[0], b'\0'.join(bits))
539 569 return v1records
540 570
541 571 def _v1v2match(self, v1records, v2records):
542 572 oldv2 = set() # old format version of v2 record
543 573 for rec in v2records:
544 574 if rec[0] == RECORD_LOCAL:
545 575 oldv2.add(rec)
546 576 elif rec[0] == RECORD_MERGED:
547 577 # drop the onode data (not contained in v1)
548 578 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
549 579 for rec in v1records:
550 580 if rec not in oldv2:
551 581 return False
552 582 else:
553 583 return True
554 584
555 585 def _readrecordsv1(self):
556 586 """read on disk merge state for version 1 file
557 587
558 588 returns list of record [(TYPE, data), ...]
559 589
560 590 Note: the "F" data from this file are one entry short
561 591 (no "other file node" entry)
562 592 """
563 593 records = []
564 594 try:
565 595 f = self._repo.vfs(self.statepathv1)
566 596 for i, l in enumerate(f):
567 597 if i == 0:
568 598 records.append((RECORD_LOCAL, l[:-1]))
569 599 else:
570 600 records.append((RECORD_MERGED, l[:-1]))
571 601 f.close()
572 602 except IOError as err:
573 603 if err.errno != errno.ENOENT:
574 604 raise
575 605 return records
576 606
577 607 def _readrecordsv2(self):
578 608 """read on disk merge state for version 2 file
579 609
580 610 This format is a list of arbitrary records of the form:
581 611
582 612 [type][length][content]
583 613
584 614 `type` is a single character, `length` is a 4 byte integer, and
585 615 `content` is an arbitrary byte sequence of length `length`.
586 616
587 617 Mercurial versions prior to 3.7 have a bug where if there are
588 618 unsupported mandatory merge records, attempting to clear out the merge
589 619 state with hg update --clean or similar aborts. The 't' record type
590 620 works around that by writing out what those versions treat as an
591 621 advisory record, but later versions interpret as special: the first
592 622 character is the 'real' record type and everything onwards is the data.
593 623
594 624 Returns list of records [(TYPE, data), ...]."""
595 625 records = []
596 626 try:
597 627 f = self._repo.vfs(self.statepathv2)
598 628 data = f.read()
599 629 off = 0
600 630 end = len(data)
601 631 while off < end:
602 632 rtype = data[off : off + 1]
603 633 off += 1
604 634 length = _unpack(b'>I', data[off : (off + 4)])[0]
605 635 off += 4
606 636 record = data[off : (off + length)]
607 637 off += length
608 638 if rtype == RECORD_OVERRIDE:
609 639 rtype, record = record[0:1], record[1:]
610 640 records.append((rtype, record))
611 641 f.close()
612 642 except IOError as err:
613 643 if err.errno != errno.ENOENT:
614 644 raise
615 645 return records
616 646
617 647 def commit(self):
618 648 if self._dirty:
619 649 records = self._makerecords()
620 650 self._writerecords(records)
621 651 self._dirty = False
622 652
623 653 def _makerecords(self):
624 654 records = []
625 655 records.append((RECORD_LOCAL, hex(self._local)))
626 656 records.append((RECORD_OTHER, hex(self._other)))
627 657 # Write out state items. In all cases, the value of the state map entry
628 658 # is written as the contents of the record. The record type depends on
629 659 # the type of state that is stored, and capital-letter records are used
630 660 # to prevent older versions of Mercurial that do not support the feature
631 661 # from loading them.
632 662 for filename, v in pycompat.iteritems(self._state):
633 663 if v[0] in (
634 664 MERGE_RECORD_UNRESOLVED_PATH,
635 665 MERGE_RECORD_RESOLVED_PATH,
636 666 ):
637 667 # Path conflicts. These are stored in 'P' records. The current
638 668 # resolution state ('pu' or 'pr') is stored within the record.
639 669 records.append(
640 670 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
641 671 )
642 672 elif (
643 673 v[1] == self._repo.nodeconstants.nullhex
644 674 or v[6] == self._repo.nodeconstants.nullhex
645 675 ):
646 676 # Change/Delete or Delete/Change conflicts. These are stored in
647 677 # 'C' records. v[1] is the local file, and is nullhex when the
648 678 # file is deleted locally ('dc'). v[6] is the remote file, and
649 679 # is nullhex when the file is deleted remotely ('cd').
650 680 records.append(
651 681 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
652 682 )
653 683 else:
654 684 # Normal files. These are stored in 'F' records.
655 685 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
656 686 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
657 687 rawextras = b'\0'.join(
658 688 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
659 689 )
660 690 records.append(
661 691 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
662 692 )
663 693 if self._labels is not None:
664 694 labels = b'\0'.join(self._labels)
665 695 records.append((RECORD_LABELS, labels))
666 696 return records
667 697
668 698 def _writerecords(self, records):
669 699 """Write current state on disk (both v1 and v2)"""
670 700 self._writerecordsv1(records)
671 701 self._writerecordsv2(records)
672 702
673 703 def _writerecordsv1(self, records):
674 704 """Write current state on disk in a version 1 file"""
675 705 f = self._repo.vfs(self.statepathv1, b'wb')
676 706 irecords = iter(records)
677 707 lrecords = next(irecords)
678 708 assert lrecords[0] == RECORD_LOCAL
679 709 f.write(hex(self._local) + b'\n')
680 710 for rtype, data in irecords:
681 711 if rtype == RECORD_MERGED:
682 712 f.write(b'%s\n' % _droponode(data))
683 713 f.close()
684 714
685 715 def _writerecordsv2(self, records):
686 716 """Write current state on disk in a version 2 file
687 717
688 718 See the docstring for _readrecordsv2 for why we use 't'."""
689 719 # these are the records that all version 2 clients can read
690 720 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
691 721 f = self._repo.vfs(self.statepathv2, b'wb')
692 722 for key, data in records:
693 723 assert len(key) == 1
694 724 if key not in allowlist:
695 725 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
696 726 format = b'>sI%is' % len(data)
697 727 f.write(_pack(format, key, len(data), data))
698 728 f.close()
699 729
700 730 def _make_backup(self, fctx, localkey):
701 731 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
702 732
703 733 def _restore_backup(self, fctx, localkey, flags):
704 734 with self._repo.vfs(b'merge/' + localkey) as f:
705 735 fctx.write(f.read(), flags)
706 736
707 737 def reset(self):
708 738 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
709 739
710 740
711 741 class memmergestate(_mergestate_base):
712 742 def __init__(self, repo):
713 743 super(memmergestate, self).__init__(repo)
714 744 self._backups = {}
715 745
716 746 def _make_backup(self, fctx, localkey):
717 747 self._backups[localkey] = fctx.data()
718 748
719 749 def _restore_backup(self, fctx, localkey, flags):
720 750 fctx.write(self._backups[localkey], flags)
721 751
722 752
723 753 def recordupdates(repo, actions, branchmerge, getfiledata):
724 754 """record merge actions to the dirstate"""
725 755 # remove (must come first)
726 756 for f, args, msg in actions.get(ACTION_REMOVE, []):
727 757 if branchmerge:
728 758 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
729 759 else:
730 760 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
731 761
732 762 # forget (must come first)
733 763 for f, args, msg in actions.get(ACTION_FORGET, []):
734 764 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
735 765
736 766 # resolve path conflicts
737 767 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
738 768 (f0, origf0) = args
739 769 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
740 770 repo.dirstate.copy(origf0, f)
741 771 if f0 == origf0:
742 772 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
743 773 else:
744 774 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
745 775
746 776 # re-add
747 777 for f, args, msg in actions.get(ACTION_ADD, []):
748 778 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
749 779
750 780 # re-add/mark as modified
751 781 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
752 782 if branchmerge:
753 783 repo.dirstate.update_file(
754 784 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
755 785 )
756 786 else:
757 787 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758 788
759 789 # exec change
760 790 for f, args, msg in actions.get(ACTION_EXEC, []):
761 791 repo.dirstate.update_file(
762 792 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
763 793 )
764 794
765 795 # keep
766 796 for f, args, msg in actions.get(ACTION_KEEP, []):
767 797 pass
768 798
769 799 # keep deleted
770 800 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
771 801 pass
772 802
773 803 # keep new
774 804 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
775 805 pass
776 806
777 807 # get
778 808 for f, args, msg in actions.get(ACTION_GET, []):
779 809 if branchmerge:
780 810 # tracked in p1 can be True also but update_file should not care
781 811 old_entry = repo.dirstate.get_entry(f)
782 812 p1_tracked = old_entry.any_tracked and not old_entry.added
783 813 repo.dirstate.update_file(
784 814 f,
785 815 p1_tracked=p1_tracked,
786 816 wc_tracked=True,
787 817 p2_info=True,
788 818 )
789 819 else:
790 820 parentfiledata = getfiledata[f] if getfiledata else None
791 821 repo.dirstate.update_file(
792 822 f,
793 823 p1_tracked=True,
794 824 wc_tracked=True,
795 825 parentfiledata=parentfiledata,
796 826 )
797 827
798 828 # merge
799 829 for f, args, msg in actions.get(ACTION_MERGE, []):
800 830 f1, f2, fa, move, anc = args
801 831 if branchmerge:
802 832 # We've done a branch merge, mark this file as merged
803 833 # so that we properly record the merger later
804 834 p1_tracked = f1 == f
805 835 repo.dirstate.update_file(
806 836 f,
807 837 p1_tracked=p1_tracked,
808 838 wc_tracked=True,
809 839 p2_info=True,
810 840 )
811 841 if f1 != f2: # copy/rename
812 842 if move:
813 843 repo.dirstate.update_file(
814 844 f1, p1_tracked=True, wc_tracked=False
815 845 )
816 846 if f1 != f:
817 847 repo.dirstate.copy(f1, f)
818 848 else:
819 849 repo.dirstate.copy(f2, f)
820 850 else:
821 851 # We've update-merged a locally modified file, so
822 852 # we set the dirstate to emulate a normal checkout
823 853 # of that file some time in the past. Thus our
824 854 # merge will appear as a normal local file
825 855 # modification.
826 856 if f2 == f: # file not locally copied/moved
827 857 repo.dirstate.update_file(
828 858 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
829 859 )
830 860 if move:
831 861 repo.dirstate.update_file(
832 862 f1, p1_tracked=False, wc_tracked=False
833 863 )
834 864
835 865 # directory rename, move local
836 866 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
837 867 f0, flag = args
838 868 if branchmerge:
839 869 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
840 870 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
841 871 repo.dirstate.copy(f0, f)
842 872 else:
843 873 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
844 874 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
845 875
846 876 # directory rename, get
847 877 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
848 878 f0, flag = args
849 879 if branchmerge:
850 880 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
851 881 repo.dirstate.copy(f0, f)
852 882 else:
853 883 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now