##// END OF EJS Templates
mergeresult: introduce getfile() and use it where required...
Pulkit Goyal -
r45904:4c6004af default
parent child Browse files
Show More
@@ -1,1836 +1,1836 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 upgrade,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44
45 45 from . import (
46 46 lfcommands,
47 47 lfutil,
48 48 storefactory,
49 49 )
50 50
51 51 eh = exthelper.exthelper()
52 52
53 53 lfstatus = lfutil.lfstatus
54 54
55 55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56 56
57 57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58 58
59 59
60 60 def composelargefilematcher(match, manifest):
61 61 '''create a matcher that matches only the largefiles in the original
62 62 matcher'''
63 63 m = copy.copy(match)
64 64 lfile = lambda f: lfutil.standin(f) in manifest
65 65 m._files = [lf for lf in m._files if lfile(lf)]
66 66 m._fileset = set(m._files)
67 67 m.always = lambda: False
68 68 origmatchfn = m.matchfn
69 69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 70 return m
71 71
72 72
73 73 def composenormalfilematcher(match, manifest, exclude=None):
74 74 excluded = set()
75 75 if exclude is not None:
76 76 excluded.update(exclude)
77 77
78 78 m = copy.copy(match)
79 79 notlfile = lambda f: not (
80 80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 81 )
82 82 m._files = [lf for lf in m._files if notlfile(lf)]
83 83 m._fileset = set(m._files)
84 84 m.always = lambda: False
85 85 origmatchfn = m.matchfn
86 86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 87 return m
88 88
89 89
90 90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 91 large = opts.get('large')
92 92 lfsize = lfutil.getminsize(
93 93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 94 )
95 95
96 96 lfmatcher = None
97 97 if lfutil.islfilesrepo(repo):
98 98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 99 if lfpats:
100 100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101 101
102 102 lfnames = []
103 103 m = matcher
104 104
105 105 wctx = repo[None]
106 106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 107 exact = m.exact(f)
108 108 lfile = lfutil.standin(f) in wctx
109 109 nfile = f in wctx
110 110 exists = lfile or nfile
111 111
112 112 # Don't warn the user when they attempt to add a normal tracked file.
113 113 # The normal add code will do that for us.
114 114 if exact and exists:
115 115 if lfile:
116 116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 117 continue
118 118
119 119 if (exact or not exists) and not lfutil.isstandin(f):
120 120 # In case the file was removed previously, but not committed
121 121 # (issue3507)
122 122 if not repo.wvfs.exists(f):
123 123 continue
124 124
125 125 abovemin = (
126 126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 127 )
128 128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 129 lfnames.append(f)
130 130 if ui.verbose or not exact:
131 131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132 132
133 133 bad = []
134 134
135 135 # Need to lock, otherwise there could be a race condition between
136 136 # when standins are created and added to the repo.
137 137 with repo.wlock():
138 138 if not opts.get('dry_run'):
139 139 standins = []
140 140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 141 for f in lfnames:
142 142 standinname = lfutil.standin(f)
143 143 lfutil.writestandin(
144 144 repo,
145 145 standinname,
146 146 hash=b'',
147 147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 148 )
149 149 standins.append(standinname)
150 150 if lfdirstate[f] == b'r':
151 151 lfdirstate.normallookup(f)
152 152 else:
153 153 lfdirstate.add(f)
154 154 lfdirstate.write()
155 155 bad += [
156 156 lfutil.splitstandin(f)
157 157 for f in repo[None].add(standins)
158 158 if f in m.files()
159 159 ]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 return added, bad
163 163
164 164
165 165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 166 after = opts.get('after')
167 167 m = composelargefilematcher(matcher, repo[None].manifest())
168 168 with lfstatus(repo):
169 169 s = repo.status(match=m, clean=not isaddremove)
170 170 manifest = repo[None].manifest()
171 171 modified, added, deleted, clean = [
172 172 [f for f in list if lfutil.standin(f) in manifest]
173 173 for list in (s.modified, s.added, s.deleted, s.clean)
174 174 ]
175 175
176 176 def warn(files, msg):
177 177 for f in files:
178 178 ui.warn(msg % uipathfn(f))
179 179 return int(len(files) > 0)
180 180
181 181 if after:
182 182 remove = deleted
183 183 result = warn(
184 184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 185 )
186 186 else:
187 187 remove = deleted + clean
188 188 result = warn(
189 189 modified,
190 190 _(
191 191 b'not removing %s: file is modified (use -f'
192 192 b' to force removal)\n'
193 193 ),
194 194 )
195 195 result = (
196 196 warn(
197 197 added,
198 198 _(
199 199 b'not removing %s: file has been marked for add'
200 200 b' (use forget to undo)\n'
201 201 ),
202 202 )
203 203 or result
204 204 )
205 205
206 206 # Need to lock because standin files are deleted then removed from the
207 207 # repository and we could race in-between.
208 208 with repo.wlock():
209 209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 210 for f in sorted(remove):
211 211 if ui.verbose or not m.exact(f):
212 212 ui.status(_(b'removing %s\n') % uipathfn(f))
213 213
214 214 if not dryrun:
215 215 if not after:
216 216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217 217
218 218 if dryrun:
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(
231 231 repo, lfdirstate, lfutil.splitstandin(f), False
232 232 )
233 233
234 234 lfdirstate.write()
235 235
236 236 return result
237 237
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 @eh.wrapfunction(webcommands, b'decodepath')
242 242 def decodepath(orig, path):
243 243 return lfutil.splitstandin(path) or path
244 244
245 245
246 246 # -- Wrappers: modify existing commands --------------------------------
247 247
248 248
249 249 @eh.wrapcommand(
250 250 b'add',
251 251 opts=[
252 252 (b'', b'large', None, _(b'add as largefile')),
253 253 (b'', b'normal', None, _(b'add as normal file')),
254 254 (
255 255 b'',
256 256 b'lfsize',
257 257 b'',
258 258 _(
259 259 b'add all files above this size (in megabytes) '
260 260 b'as largefiles (default: 10)'
261 261 ),
262 262 ),
263 263 ],
264 264 )
265 265 def overrideadd(orig, ui, repo, *pats, **opts):
266 266 if opts.get('normal') and opts.get('large'):
267 267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 268 return orig(ui, repo, *pats, **opts)
269 269
270 270
271 271 @eh.wrapfunction(cmdutil, b'add')
272 272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 273 # The --normal flag short circuits this override
274 274 if opts.get('normal'):
275 275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276 276
277 277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 278 normalmatcher = composenormalfilematcher(
279 279 matcher, repo[None].manifest(), ladded
280 280 )
281 281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282 282
283 283 bad.extend(f for f in lbad)
284 284 return bad
285 285
286 286
287 287 @eh.wrapfunction(cmdutil, b'remove')
288 288 def cmdutilremove(
289 289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 290 ):
291 291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 292 result = orig(
293 293 ui,
294 294 repo,
295 295 normalmatcher,
296 296 prefix,
297 297 uipathfn,
298 298 after,
299 299 force,
300 300 subrepos,
301 301 dryrun,
302 302 )
303 303 return (
304 304 removelargefiles(
305 305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 306 )
307 307 or result
308 308 )
309 309
310 310
311 311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 312 def overridestatusfn(orig, repo, rev2, **opts):
313 313 with lfstatus(repo._repo):
314 314 return orig(repo, rev2, **opts)
315 315
316 316
317 317 @eh.wrapcommand(b'status')
318 318 def overridestatus(orig, ui, repo, *pats, **opts):
319 319 with lfstatus(repo):
320 320 return orig(ui, repo, *pats, **opts)
321 321
322 322
323 323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 325 with lfstatus(repo._repo):
326 326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327 327
328 328
329 329 @eh.wrapcommand(b'log')
330 330 def overridelog(orig, ui, repo, *pats, **opts):
331 331 def overridematchandpats(
332 332 orig,
333 333 ctx,
334 334 pats=(),
335 335 opts=None,
336 336 globbed=False,
337 337 default=b'relpath',
338 338 badfn=None,
339 339 ):
340 340 """Matcher that merges root directory with .hglf, suitable for log.
341 341 It is still possible to match .hglf directly.
342 342 For any listed files run log on the standin too.
343 343 matchfn tries both the given filename and with .hglf stripped.
344 344 """
345 345 if opts is None:
346 346 opts = {}
347 347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 348 m, p = copy.copy(matchandpats)
349 349
350 350 if m.always():
351 351 # We want to match everything anyway, so there's no benefit trying
352 352 # to add standins.
353 353 return matchandpats
354 354
355 355 pats = set(p)
356 356
357 357 def fixpats(pat, tostandin=lfutil.standin):
358 358 if pat.startswith(b'set:'):
359 359 return pat
360 360
361 361 kindpat = matchmod._patsplit(pat, None)
362 362
363 363 if kindpat[0] is not None:
364 364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 365 return tostandin(kindpat[1])
366 366
367 367 cwd = repo.getcwd()
368 368 if cwd:
369 369 hglf = lfutil.shortname
370 370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371 371
372 372 def tostandin(f):
373 373 # The file may already be a standin, so truncate the back
374 374 # prefix and test before mangling it. This avoids turning
375 375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 377 return f
378 378
379 379 # An absolute path is from outside the repo, so truncate the
380 380 # path to the root before building the standin. Otherwise cwd
381 381 # is somewhere in the repo, relative to root, and needs to be
382 382 # prepended before building the standin.
383 383 if os.path.isabs(cwd):
384 384 f = f[len(back) :]
385 385 else:
386 386 f = cwd + b'/' + f
387 387 return back + lfutil.standin(f)
388 388
389 389 else:
390 390
391 391 def tostandin(f):
392 392 if lfutil.isstandin(f):
393 393 return f
394 394 return lfutil.standin(f)
395 395
396 396 pats.update(fixpats(f, tostandin) for f in p)
397 397
398 398 for i in range(0, len(m._files)):
399 399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 400 if m._files[i] == b'.':
401 401 continue
402 402 standin = lfutil.standin(m._files[i])
403 403 # If the "standin" is a directory, append instead of replace to
404 404 # support naming a directory on the command line with only
405 405 # largefiles. The original directory is kept to support normal
406 406 # files.
407 407 if standin in ctx:
408 408 m._files[i] = standin
409 409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 410 m._files.append(standin)
411 411
412 412 m._fileset = set(m._files)
413 413 m.always = lambda: False
414 414 origmatchfn = m.matchfn
415 415
416 416 def lfmatchfn(f):
417 417 lf = lfutil.splitstandin(f)
418 418 if lf is not None and origmatchfn(lf):
419 419 return True
420 420 r = origmatchfn(f)
421 421 return r
422 422
423 423 m.matchfn = lfmatchfn
424 424
425 425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 426 return m, pats
427 427
428 428 # For hg log --patch, the match object is used in two different senses:
429 429 # (1) to determine what revisions should be printed out, and
430 430 # (2) to determine what files to print out diffs for.
431 431 # The magic matchandpats override should be used for case (1) but not for
432 432 # case (2).
433 433 oldmatchandpats = scmutil.matchandpats
434 434
435 435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 436 wctx = repo[None]
437 437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 438 return lambda ctx: match
439 439
440 440 wrappedmatchandpats = extensions.wrappedfunction(
441 441 scmutil, b'matchandpats', overridematchandpats
442 442 )
443 443 wrappedmakefilematcher = extensions.wrappedfunction(
444 444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 445 )
446 446 with wrappedmatchandpats, wrappedmakefilematcher:
447 447 return orig(ui, repo, *pats, **opts)
448 448
449 449
450 450 @eh.wrapcommand(
451 451 b'verify',
452 452 opts=[
453 453 (
454 454 b'',
455 455 b'large',
456 456 None,
457 457 _(b'verify that all largefiles in current revision exists'),
458 458 ),
459 459 (
460 460 b'',
461 461 b'lfa',
462 462 None,
463 463 _(b'verify largefiles in all revisions, not just current'),
464 464 ),
465 465 (
466 466 b'',
467 467 b'lfc',
468 468 None,
469 469 _(b'verify local largefile contents, not just existence'),
470 470 ),
471 471 ],
472 472 )
473 473 def overrideverify(orig, ui, repo, *pats, **opts):
474 474 large = opts.pop('large', False)
475 475 all = opts.pop('lfa', False)
476 476 contents = opts.pop('lfc', False)
477 477
478 478 result = orig(ui, repo, *pats, **opts)
479 479 if large or all or contents:
480 480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 481 return result
482 482
483 483
484 484 @eh.wrapcommand(
485 485 b'debugstate',
486 486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 487 )
488 488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 489 large = opts.pop('large', False)
490 490 if large:
491 491
492 492 class fakerepo(object):
493 493 dirstate = lfutil.openlfdirstate(ui, repo)
494 494
495 495 orig(ui, fakerepo, *pats, **opts)
496 496 else:
497 497 orig(ui, repo, *pats, **opts)
498 498
499 499
500 500 # Register the MERGE_ACTION_LARGEFILE_MARK_REMOVED in emptyactions() return type
501 501 @eh.wrapfunction(merge, b'emptyactions')
502 502 def overrideemptyactions(origfn):
503 503 ret = origfn()
504 504 ret[MERGE_ACTION_LARGEFILE_MARK_REMOVED] = []
505 505 return ret
506 506
507 507
508 508 # Before starting the manifest merge, merge.updates will call
509 509 # _checkunknownfile to check if there are any files in the merged-in
510 510 # changeset that collide with unknown files in the working copy.
511 511 #
512 512 # The largefiles are seen as unknown, so this prevents us from merging
513 513 # in a file 'foo' if we already have a largefile with the same name.
514 514 #
515 515 # The overridden function filters the unknown files by removing any
516 516 # largefiles. This makes the merge proceed and we can then handle this
517 517 # case further in the overridden calculateupdates function below.
518 518 @eh.wrapfunction(merge, b'_checkunknownfile')
519 519 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
520 520 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
521 521 return False
522 522 return origfn(repo, wctx, mctx, f, f2)
523 523
524 524
525 525 # The manifest merge handles conflicts on the manifest level. We want
526 526 # to handle changes in largefile-ness of files at this level too.
527 527 #
528 528 # The strategy is to run the original calculateupdates and then process
529 529 # the action list it outputs. There are two cases we need to deal with:
530 530 #
531 531 # 1. Normal file in p1, largefile in p2. Here the largefile is
532 532 # detected via its standin file, which will enter the working copy
533 533 # with a "get" action. It is not "merge" since the standin is all
534 534 # Mercurial is concerned with at this level -- the link to the
535 535 # existing normal file is not relevant here.
536 536 #
537 537 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
538 538 # since the largefile will be present in the working copy and
539 539 # different from the normal file in p2. Mercurial therefore
540 540 # triggers a merge action.
541 541 #
542 542 # In both cases, we prompt the user and emit new actions to either
543 543 # remove the standin (if the normal file was kept) or to remove the
544 544 # normal file and get the standin (if the largefile was kept). The
545 545 # default prompt answer is to use the largefile version since it was
546 546 # presumably changed on purpose.
547 547 #
548 548 # Finally, the merge.applyupdates function will then take care of
549 549 # writing the files into the working copy and lfcommands.updatelfiles
550 550 # will update the largefiles.
551 551 @eh.wrapfunction(merge, b'calculateupdates')
552 552 def overridecalculateupdates(
553 553 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
554 554 ):
555 555 overwrite = force and not branchmerge
556 556 mresult = origfn(
557 557 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
558 558 )
559 559
560 560 if overwrite:
561 561 return mresult
562 562
563 563 # Convert to dictionary with filename as key and action as value.
564 564 lfiles = set()
565 565 for f in mresult.actions:
566 566 splitstandin = lfutil.splitstandin(f)
567 567 if splitstandin is not None and splitstandin in p1:
568 568 lfiles.add(splitstandin)
569 569 elif lfutil.standin(f) in p1:
570 570 lfiles.add(f)
571 571
572 572 for lfile in sorted(lfiles):
573 573 standin = lfutil.standin(lfile)
574 (lm, largs, lmsg) = mresult.actions.get(lfile, (None, None, None))
575 (sm, sargs, smsg) = mresult.actions.get(standin, (None, None, None))
574 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
575 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
576 576 if sm in (b'g', b'dc') and lm != b'r':
577 577 if sm == b'dc':
578 578 f1, f2, fa, move, anc = sargs
579 579 sargs = (p2[f2].flags(), False)
580 580 # Case 1: normal file in the working copy, largefile in
581 581 # the second parent
582 582 usermsg = (
583 583 _(
584 584 b'remote turned local normal file %s into a largefile\n'
585 585 b'use (l)argefile or keep (n)ormal file?'
586 586 b'$$ &Largefile $$ &Normal file'
587 587 )
588 588 % lfile
589 589 )
590 590 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
591 591 mresult.addfile(lfile, b'r', None, b'replaced by standin')
592 592 mresult.addfile(standin, b'g', sargs, b'replaces standin')
593 593 else: # keep local normal file
594 594 mresult.addfile(lfile, b'k', None, b'replaces standin')
595 595 if branchmerge:
596 596 mresult.addfile(
597 597 standin, b'k', None, b'replaced by non-standin',
598 598 )
599 599 else:
600 600 mresult.addfile(
601 601 standin, b'r', None, b'replaced by non-standin',
602 602 )
603 603 elif lm in (b'g', b'dc') and sm != b'r':
604 604 if lm == b'dc':
605 605 f1, f2, fa, move, anc = largs
606 606 largs = (p2[f2].flags(), False)
607 607 # Case 2: largefile in the working copy, normal file in
608 608 # the second parent
609 609 usermsg = (
610 610 _(
611 611 b'remote turned local largefile %s into a normal file\n'
612 612 b'keep (l)argefile or use (n)ormal file?'
613 613 b'$$ &Largefile $$ &Normal file'
614 614 )
615 615 % lfile
616 616 )
617 617 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
618 618 if branchmerge:
619 619 # largefile can be restored from standin safely
620 620 mresult.addfile(
621 621 lfile, b'k', None, b'replaced by standin',
622 622 )
623 623 mresult.addfile(standin, b'k', None, b'replaces standin')
624 624 else:
625 625 # "lfile" should be marked as "removed" without
626 626 # removal of itself
627 627 mresult.addfile(
628 628 lfile,
629 629 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
630 630 None,
631 631 b'forget non-standin largefile',
632 632 )
633 633
634 634 # linear-merge should treat this largefile as 're-added'
635 635 mresult.addfile(standin, b'a', None, b'keep standin')
636 636 else: # pick remote normal file
637 637 mresult.addfile(lfile, b'g', largs, b'replaces standin')
638 638 mresult.addfile(
639 639 standin, b'r', None, b'replaced by non-standin',
640 640 )
641 641
642 642 return mresult
643 643
644 644
645 645 @eh.wrapfunction(mergestatemod, b'recordupdates')
646 646 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
647 647 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
648 648 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
649 649 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
650 650 # this should be executed before 'orig', to execute 'remove'
651 651 # before all other actions
652 652 repo.dirstate.remove(lfile)
653 653 # make sure lfile doesn't get synclfdirstate'd as normal
654 654 lfdirstate.add(lfile)
655 655 lfdirstate.write()
656 656
657 657 return orig(repo, actions, branchmerge, getfiledata)
658 658
659 659
660 660 # Override filemerge to prompt the user about how they wish to merge
661 661 # largefiles. This will handle identical edits without prompting the user.
662 662 @eh.wrapfunction(filemerge, b'_filemerge')
663 663 def overridefilemerge(
664 664 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
665 665 ):
666 666 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
667 667 return origfn(
668 668 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
669 669 )
670 670
671 671 ahash = lfutil.readasstandin(fca).lower()
672 672 dhash = lfutil.readasstandin(fcd).lower()
673 673 ohash = lfutil.readasstandin(fco).lower()
674 674 if (
675 675 ohash != ahash
676 676 and ohash != dhash
677 677 and (
678 678 dhash == ahash
679 679 or repo.ui.promptchoice(
680 680 _(
681 681 b'largefile %s has a merge conflict\nancestor was %s\n'
682 682 b'you can keep (l)ocal %s or take (o)ther %s.\n'
683 683 b'what do you want to do?'
684 684 b'$$ &Local $$ &Other'
685 685 )
686 686 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
687 687 0,
688 688 )
689 689 == 1
690 690 )
691 691 ):
692 692 repo.wwrite(fcd.path(), fco.data(), fco.flags())
693 693 return True, 0, False
694 694
695 695
696 696 @eh.wrapfunction(copiesmod, b'pathcopies')
697 697 def copiespathcopies(orig, ctx1, ctx2, match=None):
698 698 copies = orig(ctx1, ctx2, match=match)
699 699 updated = {}
700 700
701 701 for k, v in pycompat.iteritems(copies):
702 702 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
703 703
704 704 return updated
705 705
706 706
707 707 # Copy first changes the matchers to match standins instead of
708 708 # largefiles. Then it overrides util.copyfile in that function it
709 709 # checks if the destination largefile already exists. It also keeps a
710 710 # list of copied files so that the largefiles can be copied and the
711 711 # dirstate updated.
712 712 @eh.wrapfunction(cmdutil, b'copy')
713 713 def overridecopy(orig, ui, repo, pats, opts, rename=False):
714 714 # doesn't remove largefile on rename
715 715 if len(pats) < 2:
716 716 # this isn't legal, let the original function deal with it
717 717 return orig(ui, repo, pats, opts, rename)
718 718
719 719 # This could copy both lfiles and normal files in one command,
720 720 # but we don't want to do that. First replace their matcher to
721 721 # only match normal files and run it, then replace it to just
722 722 # match largefiles and run it again.
723 723 nonormalfiles = False
724 724 nolfiles = False
725 725 manifest = repo[None].manifest()
726 726
727 727 def normalfilesmatchfn(
728 728 orig,
729 729 ctx,
730 730 pats=(),
731 731 opts=None,
732 732 globbed=False,
733 733 default=b'relpath',
734 734 badfn=None,
735 735 ):
736 736 if opts is None:
737 737 opts = {}
738 738 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
739 739 return composenormalfilematcher(match, manifest)
740 740
741 741 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
742 742 try:
743 743 result = orig(ui, repo, pats, opts, rename)
744 744 except error.Abort as e:
745 745 if pycompat.bytestr(e) != _(b'no files to copy'):
746 746 raise e
747 747 else:
748 748 nonormalfiles = True
749 749 result = 0
750 750
751 751 # The first rename can cause our current working directory to be removed.
752 752 # In that case there is nothing left to copy/rename so just quit.
753 753 try:
754 754 repo.getcwd()
755 755 except OSError:
756 756 return result
757 757
758 758 def makestandin(relpath):
759 759 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
760 760 return repo.wvfs.join(lfutil.standin(path))
761 761
762 762 fullpats = scmutil.expandpats(pats)
763 763 dest = fullpats[-1]
764 764
765 765 if os.path.isdir(dest):
766 766 if not os.path.isdir(makestandin(dest)):
767 767 os.makedirs(makestandin(dest))
768 768
769 769 try:
770 770 # When we call orig below it creates the standins but we don't add
771 771 # them to the dir state until later so lock during that time.
772 772 wlock = repo.wlock()
773 773
774 774 manifest = repo[None].manifest()
775 775
776 776 def overridematch(
777 777 orig,
778 778 ctx,
779 779 pats=(),
780 780 opts=None,
781 781 globbed=False,
782 782 default=b'relpath',
783 783 badfn=None,
784 784 ):
785 785 if opts is None:
786 786 opts = {}
787 787 newpats = []
788 788 # The patterns were previously mangled to add the standin
789 789 # directory; we need to remove that now
790 790 for pat in pats:
791 791 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
792 792 newpats.append(pat.replace(lfutil.shortname, b''))
793 793 else:
794 794 newpats.append(pat)
795 795 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
796 796 m = copy.copy(match)
797 797 lfile = lambda f: lfutil.standin(f) in manifest
798 798 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
799 799 m._fileset = set(m._files)
800 800 origmatchfn = m.matchfn
801 801
802 802 def matchfn(f):
803 803 lfile = lfutil.splitstandin(f)
804 804 return (
805 805 lfile is not None
806 806 and (f in manifest)
807 807 and origmatchfn(lfile)
808 808 or None
809 809 )
810 810
811 811 m.matchfn = matchfn
812 812 return m
813 813
814 814 listpats = []
815 815 for pat in pats:
816 816 if matchmod.patkind(pat) is not None:
817 817 listpats.append(pat)
818 818 else:
819 819 listpats.append(makestandin(pat))
820 820
821 821 copiedfiles = []
822 822
823 823 def overridecopyfile(orig, src, dest, *args, **kwargs):
824 824 if lfutil.shortname in src and dest.startswith(
825 825 repo.wjoin(lfutil.shortname)
826 826 ):
827 827 destlfile = dest.replace(lfutil.shortname, b'')
828 828 if not opts[b'force'] and os.path.exists(destlfile):
829 829 raise IOError(
830 830 b'', _(b'destination largefile already exists')
831 831 )
832 832 copiedfiles.append((src, dest))
833 833 orig(src, dest, *args, **kwargs)
834 834
835 835 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
836 836 with extensions.wrappedfunction(scmutil, b'match', overridematch):
837 837 result += orig(ui, repo, listpats, opts, rename)
838 838
839 839 lfdirstate = lfutil.openlfdirstate(ui, repo)
840 840 for (src, dest) in copiedfiles:
841 841 if lfutil.shortname in src and dest.startswith(
842 842 repo.wjoin(lfutil.shortname)
843 843 ):
844 844 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
845 845 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
846 846 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
847 847 if not os.path.isdir(destlfiledir):
848 848 os.makedirs(destlfiledir)
849 849 if rename:
850 850 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
851 851
852 852 # The file is gone, but this deletes any empty parent
853 853 # directories as a side-effect.
854 854 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
855 855 lfdirstate.remove(srclfile)
856 856 else:
857 857 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
858 858
859 859 lfdirstate.add(destlfile)
860 860 lfdirstate.write()
861 861 except error.Abort as e:
862 862 if pycompat.bytestr(e) != _(b'no files to copy'):
863 863 raise e
864 864 else:
865 865 nolfiles = True
866 866 finally:
867 867 wlock.release()
868 868
869 869 if nolfiles and nonormalfiles:
870 870 raise error.Abort(_(b'no files to copy'))
871 871
872 872 return result
873 873
874 874
875 875 # When the user calls revert, we have to be careful to not revert any
876 876 # changes to other largefiles accidentally. This means we have to keep
877 877 # track of the largefiles that are being reverted so we only pull down
878 878 # the necessary largefiles.
879 879 #
880 880 # Standins are only updated (to match the hash of largefiles) before
881 881 # commits. Update the standins then run the original revert, changing
882 882 # the matcher to hit standins instead of largefiles. Based on the
883 883 # resulting standins update the largefiles.
884 884 @eh.wrapfunction(cmdutil, b'revert')
885 885 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
886 886 # Because we put the standins in a bad state (by updating them)
887 887 # and then return them to a correct state we need to lock to
888 888 # prevent others from changing them in their incorrect state.
889 889 with repo.wlock():
890 890 lfdirstate = lfutil.openlfdirstate(ui, repo)
891 891 s = lfutil.lfdirstatestatus(lfdirstate, repo)
892 892 lfdirstate.write()
893 893 for lfile in s.modified:
894 894 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
895 895 for lfile in s.deleted:
896 896 fstandin = lfutil.standin(lfile)
897 897 if repo.wvfs.exists(fstandin):
898 898 repo.wvfs.unlink(fstandin)
899 899
900 900 oldstandins = lfutil.getstandinsstate(repo)
901 901
902 902 def overridematch(
903 903 orig,
904 904 mctx,
905 905 pats=(),
906 906 opts=None,
907 907 globbed=False,
908 908 default=b'relpath',
909 909 badfn=None,
910 910 ):
911 911 if opts is None:
912 912 opts = {}
913 913 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
914 914 m = copy.copy(match)
915 915
916 916 # revert supports recursing into subrepos, and though largefiles
917 917 # currently doesn't work correctly in that case, this match is
918 918 # called, so the lfdirstate above may not be the correct one for
919 919 # this invocation of match.
920 920 lfdirstate = lfutil.openlfdirstate(
921 921 mctx.repo().ui, mctx.repo(), False
922 922 )
923 923
924 924 wctx = repo[None]
925 925 matchfiles = []
926 926 for f in m._files:
927 927 standin = lfutil.standin(f)
928 928 if standin in ctx or standin in mctx:
929 929 matchfiles.append(standin)
930 930 elif standin in wctx or lfdirstate[f] == b'r':
931 931 continue
932 932 else:
933 933 matchfiles.append(f)
934 934 m._files = matchfiles
935 935 m._fileset = set(m._files)
936 936 origmatchfn = m.matchfn
937 937
938 938 def matchfn(f):
939 939 lfile = lfutil.splitstandin(f)
940 940 if lfile is not None:
941 941 return origmatchfn(lfile) and (f in ctx or f in mctx)
942 942 return origmatchfn(f)
943 943
944 944 m.matchfn = matchfn
945 945 return m
946 946
947 947 with extensions.wrappedfunction(scmutil, b'match', overridematch):
948 948 orig(ui, repo, ctx, parents, *pats, **opts)
949 949
950 950 newstandins = lfutil.getstandinsstate(repo)
951 951 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
952 952 # lfdirstate should be 'normallookup'-ed for updated files,
953 953 # because reverting doesn't touch dirstate for 'normal' files
954 954 # when target revision is explicitly specified: in such case,
955 955 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
956 956 # of target (standin) file.
957 957 lfcommands.updatelfiles(
958 958 ui, repo, filelist, printmessage=False, normallookup=True
959 959 )
960 960
961 961
962 962 # after pulling changesets, we need to take some extra care to get
963 963 # largefiles updated remotely
964 964 @eh.wrapcommand(
965 965 b'pull',
966 966 opts=[
967 967 (
968 968 b'',
969 969 b'all-largefiles',
970 970 None,
971 971 _(b'download all pulled versions of largefiles (DEPRECATED)'),
972 972 ),
973 973 (
974 974 b'',
975 975 b'lfrev',
976 976 [],
977 977 _(b'download largefiles for these revisions'),
978 978 _(b'REV'),
979 979 ),
980 980 ],
981 981 )
982 982 def overridepull(orig, ui, repo, source=None, **opts):
983 983 revsprepull = len(repo)
984 984 if not source:
985 985 source = b'default'
986 986 repo.lfpullsource = source
987 987 result = orig(ui, repo, source, **opts)
988 988 revspostpull = len(repo)
989 989 lfrevs = opts.get('lfrev', [])
990 990 if opts.get('all_largefiles'):
991 991 lfrevs.append(b'pulled()')
992 992 if lfrevs and revspostpull > revsprepull:
993 993 numcached = 0
994 994 repo.firstpulled = revsprepull # for pulled() revset expression
995 995 try:
996 996 for rev in scmutil.revrange(repo, lfrevs):
997 997 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
998 998 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
999 999 numcached += len(cached)
1000 1000 finally:
1001 1001 del repo.firstpulled
1002 1002 ui.status(_(b"%d largefiles cached\n") % numcached)
1003 1003 return result
1004 1004
1005 1005
1006 1006 @eh.wrapcommand(
1007 1007 b'push',
1008 1008 opts=[
1009 1009 (
1010 1010 b'',
1011 1011 b'lfrev',
1012 1012 [],
1013 1013 _(b'upload largefiles for these revisions'),
1014 1014 _(b'REV'),
1015 1015 )
1016 1016 ],
1017 1017 )
1018 1018 def overridepush(orig, ui, repo, *args, **kwargs):
1019 1019 """Override push command and store --lfrev parameters in opargs"""
1020 1020 lfrevs = kwargs.pop('lfrev', None)
1021 1021 if lfrevs:
1022 1022 opargs = kwargs.setdefault('opargs', {})
1023 1023 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1024 1024 return orig(ui, repo, *args, **kwargs)
1025 1025
1026 1026
1027 1027 @eh.wrapfunction(exchange, b'pushoperation')
1028 1028 def exchangepushoperation(orig, *args, **kwargs):
1029 1029 """Override pushoperation constructor and store lfrevs parameter"""
1030 1030 lfrevs = kwargs.pop('lfrevs', None)
1031 1031 pushop = orig(*args, **kwargs)
1032 1032 pushop.lfrevs = lfrevs
1033 1033 return pushop
1034 1034
1035 1035
1036 1036 @eh.revsetpredicate(b'pulled()')
1037 1037 def pulledrevsetsymbol(repo, subset, x):
1038 1038 """Changesets that just has been pulled.
1039 1039
1040 1040 Only available with largefiles from pull --lfrev expressions.
1041 1041
1042 1042 .. container:: verbose
1043 1043
1044 1044 Some examples:
1045 1045
1046 1046 - pull largefiles for all new changesets::
1047 1047
1048 1048 hg pull -lfrev "pulled()"
1049 1049
1050 1050 - pull largefiles for all new branch heads::
1051 1051
1052 1052 hg pull -lfrev "head(pulled()) and not closed()"
1053 1053
1054 1054 """
1055 1055
1056 1056 try:
1057 1057 firstpulled = repo.firstpulled
1058 1058 except AttributeError:
1059 1059 raise error.Abort(_(b"pulled() only available in --lfrev"))
1060 1060 return smartset.baseset([r for r in subset if r >= firstpulled])
1061 1061
1062 1062
1063 1063 @eh.wrapcommand(
1064 1064 b'clone',
1065 1065 opts=[
1066 1066 (
1067 1067 b'',
1068 1068 b'all-largefiles',
1069 1069 None,
1070 1070 _(b'download all versions of all largefiles'),
1071 1071 )
1072 1072 ],
1073 1073 )
1074 1074 def overrideclone(orig, ui, source, dest=None, **opts):
1075 1075 d = dest
1076 1076 if d is None:
1077 1077 d = hg.defaultdest(source)
1078 1078 if opts.get('all_largefiles') and not hg.islocal(d):
1079 1079 raise error.Abort(
1080 1080 _(b'--all-largefiles is incompatible with non-local destination %s')
1081 1081 % d
1082 1082 )
1083 1083
1084 1084 return orig(ui, source, dest, **opts)
1085 1085
1086 1086
1087 1087 @eh.wrapfunction(hg, b'clone')
1088 1088 def hgclone(orig, ui, opts, *args, **kwargs):
1089 1089 result = orig(ui, opts, *args, **kwargs)
1090 1090
1091 1091 if result is not None:
1092 1092 sourcerepo, destrepo = result
1093 1093 repo = destrepo.local()
1094 1094
1095 1095 # When cloning to a remote repo (like through SSH), no repo is available
1096 1096 # from the peer. Therefore the largefiles can't be downloaded and the
1097 1097 # hgrc can't be updated.
1098 1098 if not repo:
1099 1099 return result
1100 1100
1101 1101 # Caching is implicitly limited to 'rev' option, since the dest repo was
1102 1102 # truncated at that point. The user may expect a download count with
1103 1103 # this option, so attempt whether or not this is a largefile repo.
1104 1104 if opts.get(b'all_largefiles'):
1105 1105 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1106 1106
1107 1107 if missing != 0:
1108 1108 return None
1109 1109
1110 1110 return result
1111 1111
1112 1112
1113 1113 @eh.wrapcommand(b'rebase', extension=b'rebase')
1114 1114 def overriderebase(orig, ui, repo, **opts):
1115 1115 if not util.safehasattr(repo, b'_largefilesenabled'):
1116 1116 return orig(ui, repo, **opts)
1117 1117
1118 1118 resuming = opts.get('continue')
1119 1119 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1120 1120 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1121 1121 try:
1122 1122 return orig(ui, repo, **opts)
1123 1123 finally:
1124 1124 repo._lfstatuswriters.pop()
1125 1125 repo._lfcommithooks.pop()
1126 1126
1127 1127
1128 1128 @eh.wrapcommand(b'archive')
1129 1129 def overridearchivecmd(orig, ui, repo, dest, **opts):
1130 1130 with lfstatus(repo.unfiltered()):
1131 1131 return orig(ui, repo.unfiltered(), dest, **opts)
1132 1132
1133 1133
1134 1134 @eh.wrapfunction(webcommands, b'archive')
1135 1135 def hgwebarchive(orig, web):
1136 1136 with lfstatus(web.repo):
1137 1137 return orig(web)
1138 1138
1139 1139
1140 1140 @eh.wrapfunction(archival, b'archive')
1141 1141 def overridearchive(
1142 1142 orig,
1143 1143 repo,
1144 1144 dest,
1145 1145 node,
1146 1146 kind,
1147 1147 decode=True,
1148 1148 match=None,
1149 1149 prefix=b'',
1150 1150 mtime=None,
1151 1151 subrepos=None,
1152 1152 ):
1153 1153 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1154 1154 # unfiltered repo's attr, so check that as well.
1155 1155 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1156 1156 return orig(
1157 1157 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1158 1158 )
1159 1159
1160 1160 # No need to lock because we are only reading history and
1161 1161 # largefile caches, neither of which are modified.
1162 1162 if node is not None:
1163 1163 lfcommands.cachelfiles(repo.ui, repo, node)
1164 1164
1165 1165 if kind not in archival.archivers:
1166 1166 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1167 1167
1168 1168 ctx = repo[node]
1169 1169
1170 1170 if kind == b'files':
1171 1171 if prefix:
1172 1172 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1173 1173 else:
1174 1174 prefix = archival.tidyprefix(dest, kind, prefix)
1175 1175
1176 1176 def write(name, mode, islink, getdata):
1177 1177 if match and not match(name):
1178 1178 return
1179 1179 data = getdata()
1180 1180 if decode:
1181 1181 data = repo.wwritedata(name, data)
1182 1182 archiver.addfile(prefix + name, mode, islink, data)
1183 1183
1184 1184 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1185 1185
1186 1186 if repo.ui.configbool(b"ui", b"archivemeta"):
1187 1187 write(
1188 1188 b'.hg_archival.txt',
1189 1189 0o644,
1190 1190 False,
1191 1191 lambda: archival.buildmetadata(ctx),
1192 1192 )
1193 1193
1194 1194 for f in ctx:
1195 1195 ff = ctx.flags(f)
1196 1196 getdata = ctx[f].data
1197 1197 lfile = lfutil.splitstandin(f)
1198 1198 if lfile is not None:
1199 1199 if node is not None:
1200 1200 path = lfutil.findfile(repo, getdata().strip())
1201 1201
1202 1202 if path is None:
1203 1203 raise error.Abort(
1204 1204 _(
1205 1205 b'largefile %s not found in repo store or system cache'
1206 1206 )
1207 1207 % lfile
1208 1208 )
1209 1209 else:
1210 1210 path = lfile
1211 1211
1212 1212 f = lfile
1213 1213
1214 1214 getdata = lambda: util.readfile(path)
1215 1215 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1216 1216
1217 1217 if subrepos:
1218 1218 for subpath in sorted(ctx.substate):
1219 1219 sub = ctx.workingsub(subpath)
1220 1220 submatch = matchmod.subdirmatcher(subpath, match)
1221 1221 subprefix = prefix + subpath + b'/'
1222 1222
1223 1223 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1224 1224 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1225 1225 # allow only hgsubrepos to set this, instead of the current scheme
1226 1226 # where the parent sets this for the child.
1227 1227 with (
1228 1228 util.safehasattr(sub, '_repo')
1229 1229 and lfstatus(sub._repo)
1230 1230 or util.nullcontextmanager()
1231 1231 ):
1232 1232 sub.archive(archiver, subprefix, submatch)
1233 1233
1234 1234 archiver.done()
1235 1235
1236 1236
1237 1237 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1238 1238 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1239 1239 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1240 1240 if not lfenabled or not repo._repo.lfstatus:
1241 1241 return orig(repo, archiver, prefix, match, decode)
1242 1242
1243 1243 repo._get(repo._state + (b'hg',))
1244 1244 rev = repo._state[1]
1245 1245 ctx = repo._repo[rev]
1246 1246
1247 1247 if ctx.node() is not None:
1248 1248 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1249 1249
1250 1250 def write(name, mode, islink, getdata):
1251 1251 # At this point, the standin has been replaced with the largefile name,
1252 1252 # so the normal matcher works here without the lfutil variants.
1253 1253 if match and not match(f):
1254 1254 return
1255 1255 data = getdata()
1256 1256 if decode:
1257 1257 data = repo._repo.wwritedata(name, data)
1258 1258
1259 1259 archiver.addfile(prefix + name, mode, islink, data)
1260 1260
1261 1261 for f in ctx:
1262 1262 ff = ctx.flags(f)
1263 1263 getdata = ctx[f].data
1264 1264 lfile = lfutil.splitstandin(f)
1265 1265 if lfile is not None:
1266 1266 if ctx.node() is not None:
1267 1267 path = lfutil.findfile(repo._repo, getdata().strip())
1268 1268
1269 1269 if path is None:
1270 1270 raise error.Abort(
1271 1271 _(
1272 1272 b'largefile %s not found in repo store or system cache'
1273 1273 )
1274 1274 % lfile
1275 1275 )
1276 1276 else:
1277 1277 path = lfile
1278 1278
1279 1279 f = lfile
1280 1280
1281 1281 getdata = lambda: util.readfile(os.path.join(prefix, path))
1282 1282
1283 1283 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1284 1284
1285 1285 for subpath in sorted(ctx.substate):
1286 1286 sub = ctx.workingsub(subpath)
1287 1287 submatch = matchmod.subdirmatcher(subpath, match)
1288 1288 subprefix = prefix + subpath + b'/'
1289 1289 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1290 1290 # infer and possibly set lfstatus at the top of this function. That
1291 1291 # would allow only hgsubrepos to set this, instead of the current scheme
1292 1292 # where the parent sets this for the child.
1293 1293 with (
1294 1294 util.safehasattr(sub, '_repo')
1295 1295 and lfstatus(sub._repo)
1296 1296 or util.nullcontextmanager()
1297 1297 ):
1298 1298 sub.archive(archiver, subprefix, submatch, decode)
1299 1299
1300 1300
1301 1301 # If a largefile is modified, the change is not reflected in its
1302 1302 # standin until a commit. cmdutil.bailifchanged() raises an exception
1303 1303 # if the repo has uncommitted changes. Wrap it to also check if
1304 1304 # largefiles were changed. This is used by bisect, backout and fetch.
1305 1305 @eh.wrapfunction(cmdutil, b'bailifchanged')
1306 1306 def overridebailifchanged(orig, repo, *args, **kwargs):
1307 1307 orig(repo, *args, **kwargs)
1308 1308 with lfstatus(repo):
1309 1309 s = repo.status()
1310 1310 if s.modified or s.added or s.removed or s.deleted:
1311 1311 raise error.Abort(_(b'uncommitted changes'))
1312 1312
1313 1313
1314 1314 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1315 1315 def postcommitstatus(orig, repo, *args, **kwargs):
1316 1316 with lfstatus(repo):
1317 1317 return orig(repo, *args, **kwargs)
1318 1318
1319 1319
1320 1320 @eh.wrapfunction(cmdutil, b'forget')
1321 1321 def cmdutilforget(
1322 1322 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1323 1323 ):
1324 1324 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1325 1325 bad, forgot = orig(
1326 1326 ui,
1327 1327 repo,
1328 1328 normalmatcher,
1329 1329 prefix,
1330 1330 uipathfn,
1331 1331 explicitonly,
1332 1332 dryrun,
1333 1333 interactive,
1334 1334 )
1335 1335 m = composelargefilematcher(match, repo[None].manifest())
1336 1336
1337 1337 with lfstatus(repo):
1338 1338 s = repo.status(match=m, clean=True)
1339 1339 manifest = repo[None].manifest()
1340 1340 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1341 1341 forget = [f for f in forget if lfutil.standin(f) in manifest]
1342 1342
1343 1343 for f in forget:
1344 1344 fstandin = lfutil.standin(f)
1345 1345 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1346 1346 ui.warn(
1347 1347 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1348 1348 )
1349 1349 bad.append(f)
1350 1350
1351 1351 for f in forget:
1352 1352 if ui.verbose or not m.exact(f):
1353 1353 ui.status(_(b'removing %s\n') % uipathfn(f))
1354 1354
1355 1355 # Need to lock because standin files are deleted then removed from the
1356 1356 # repository and we could race in-between.
1357 1357 with repo.wlock():
1358 1358 lfdirstate = lfutil.openlfdirstate(ui, repo)
1359 1359 for f in forget:
1360 1360 if lfdirstate[f] == b'a':
1361 1361 lfdirstate.drop(f)
1362 1362 else:
1363 1363 lfdirstate.remove(f)
1364 1364 lfdirstate.write()
1365 1365 standins = [lfutil.standin(f) for f in forget]
1366 1366 for f in standins:
1367 1367 repo.wvfs.unlinkpath(f, ignoremissing=True)
1368 1368 rejected = repo[None].forget(standins)
1369 1369
1370 1370 bad.extend(f for f in rejected if f in m.files())
1371 1371 forgot.extend(f for f in forget if f not in rejected)
1372 1372 return bad, forgot
1373 1373
1374 1374
1375 1375 def _getoutgoings(repo, other, missing, addfunc):
1376 1376 """get pairs of filename and largefile hash in outgoing revisions
1377 1377 in 'missing'.
1378 1378
1379 1379 largefiles already existing on 'other' repository are ignored.
1380 1380
1381 1381 'addfunc' is invoked with each unique pairs of filename and
1382 1382 largefile hash value.
1383 1383 """
1384 1384 knowns = set()
1385 1385 lfhashes = set()
1386 1386
1387 1387 def dedup(fn, lfhash):
1388 1388 k = (fn, lfhash)
1389 1389 if k not in knowns:
1390 1390 knowns.add(k)
1391 1391 lfhashes.add(lfhash)
1392 1392
1393 1393 lfutil.getlfilestoupload(repo, missing, dedup)
1394 1394 if lfhashes:
1395 1395 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1396 1396 for fn, lfhash in knowns:
1397 1397 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1398 1398 addfunc(fn, lfhash)
1399 1399
1400 1400
1401 1401 def outgoinghook(ui, repo, other, opts, missing):
1402 1402 if opts.pop(b'large', None):
1403 1403 lfhashes = set()
1404 1404 if ui.debugflag:
1405 1405 toupload = {}
1406 1406
1407 1407 def addfunc(fn, lfhash):
1408 1408 if fn not in toupload:
1409 1409 toupload[fn] = []
1410 1410 toupload[fn].append(lfhash)
1411 1411 lfhashes.add(lfhash)
1412 1412
1413 1413 def showhashes(fn):
1414 1414 for lfhash in sorted(toupload[fn]):
1415 1415 ui.debug(b' %s\n' % lfhash)
1416 1416
1417 1417 else:
1418 1418 toupload = set()
1419 1419
1420 1420 def addfunc(fn, lfhash):
1421 1421 toupload.add(fn)
1422 1422 lfhashes.add(lfhash)
1423 1423
1424 1424 def showhashes(fn):
1425 1425 pass
1426 1426
1427 1427 _getoutgoings(repo, other, missing, addfunc)
1428 1428
1429 1429 if not toupload:
1430 1430 ui.status(_(b'largefiles: no files to upload\n'))
1431 1431 else:
1432 1432 ui.status(
1433 1433 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1434 1434 )
1435 1435 for file in sorted(toupload):
1436 1436 ui.status(lfutil.splitstandin(file) + b'\n')
1437 1437 showhashes(file)
1438 1438 ui.status(b'\n')
1439 1439
1440 1440
1441 1441 @eh.wrapcommand(
1442 1442 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1443 1443 )
1444 1444 def _outgoingcmd(orig, *args, **kwargs):
1445 1445 # Nothing to do here other than add the extra help option- the hook above
1446 1446 # processes it.
1447 1447 return orig(*args, **kwargs)
1448 1448
1449 1449
1450 1450 def summaryremotehook(ui, repo, opts, changes):
1451 1451 largeopt = opts.get(b'large', False)
1452 1452 if changes is None:
1453 1453 if largeopt:
1454 1454 return (False, True) # only outgoing check is needed
1455 1455 else:
1456 1456 return (False, False)
1457 1457 elif largeopt:
1458 1458 url, branch, peer, outgoing = changes[1]
1459 1459 if peer is None:
1460 1460 # i18n: column positioning for "hg summary"
1461 1461 ui.status(_(b'largefiles: (no remote repo)\n'))
1462 1462 return
1463 1463
1464 1464 toupload = set()
1465 1465 lfhashes = set()
1466 1466
1467 1467 def addfunc(fn, lfhash):
1468 1468 toupload.add(fn)
1469 1469 lfhashes.add(lfhash)
1470 1470
1471 1471 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1472 1472
1473 1473 if not toupload:
1474 1474 # i18n: column positioning for "hg summary"
1475 1475 ui.status(_(b'largefiles: (no files to upload)\n'))
1476 1476 else:
1477 1477 # i18n: column positioning for "hg summary"
1478 1478 ui.status(
1479 1479 _(b'largefiles: %d entities for %d files to upload\n')
1480 1480 % (len(lfhashes), len(toupload))
1481 1481 )
1482 1482
1483 1483
1484 1484 @eh.wrapcommand(
1485 1485 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1486 1486 )
1487 1487 def overridesummary(orig, ui, repo, *pats, **opts):
1488 1488 with lfstatus(repo):
1489 1489 orig(ui, repo, *pats, **opts)
1490 1490
1491 1491
1492 1492 @eh.wrapfunction(scmutil, b'addremove')
1493 1493 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1494 1494 if opts is None:
1495 1495 opts = {}
1496 1496 if not lfutil.islfilesrepo(repo):
1497 1497 return orig(repo, matcher, prefix, uipathfn, opts)
1498 1498 # Get the list of missing largefiles so we can remove them
1499 1499 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1500 1500 unsure, s = lfdirstate.status(
1501 1501 matchmod.always(),
1502 1502 subrepos=[],
1503 1503 ignored=False,
1504 1504 clean=False,
1505 1505 unknown=False,
1506 1506 )
1507 1507
1508 1508 # Call into the normal remove code, but the removing of the standin, we want
1509 1509 # to have handled by original addremove. Monkey patching here makes sure
1510 1510 # we don't remove the standin in the largefiles code, preventing a very
1511 1511 # confused state later.
1512 1512 if s.deleted:
1513 1513 m = copy.copy(matcher)
1514 1514
1515 1515 # The m._files and m._map attributes are not changed to the deleted list
1516 1516 # because that affects the m.exact() test, which in turn governs whether
1517 1517 # or not the file name is printed, and how. Simply limit the original
1518 1518 # matches to those in the deleted status list.
1519 1519 matchfn = m.matchfn
1520 1520 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1521 1521
1522 1522 removelargefiles(
1523 1523 repo.ui,
1524 1524 repo,
1525 1525 True,
1526 1526 m,
1527 1527 uipathfn,
1528 1528 opts.get(b'dry_run'),
1529 1529 **pycompat.strkwargs(opts)
1530 1530 )
1531 1531 # Call into the normal add code, and any files that *should* be added as
1532 1532 # largefiles will be
1533 1533 added, bad = addlargefiles(
1534 1534 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1535 1535 )
1536 1536 # Now that we've handled largefiles, hand off to the original addremove
1537 1537 # function to take care of the rest. Make sure it doesn't do anything with
1538 1538 # largefiles by passing a matcher that will ignore them.
1539 1539 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1540 1540 return orig(repo, matcher, prefix, uipathfn, opts)
1541 1541
1542 1542
1543 1543 # Calling purge with --all will cause the largefiles to be deleted.
1544 1544 # Override repo.status to prevent this from happening.
1545 1545 @eh.wrapcommand(b'purge', extension=b'purge')
1546 1546 def overridepurge(orig, ui, repo, *dirs, **opts):
1547 1547 # XXX Monkey patching a repoview will not work. The assigned attribute will
1548 1548 # be set on the unfiltered repo, but we will only lookup attributes in the
1549 1549 # unfiltered repo if the lookup in the repoview object itself fails. As the
1550 1550 # monkey patched method exists on the repoview class the lookup will not
1551 1551 # fail. As a result, the original version will shadow the monkey patched
1552 1552 # one, defeating the monkey patch.
1553 1553 #
1554 1554 # As a work around we use an unfiltered repo here. We should do something
1555 1555 # cleaner instead.
1556 1556 repo = repo.unfiltered()
1557 1557 oldstatus = repo.status
1558 1558
1559 1559 def overridestatus(
1560 1560 node1=b'.',
1561 1561 node2=None,
1562 1562 match=None,
1563 1563 ignored=False,
1564 1564 clean=False,
1565 1565 unknown=False,
1566 1566 listsubrepos=False,
1567 1567 ):
1568 1568 r = oldstatus(
1569 1569 node1, node2, match, ignored, clean, unknown, listsubrepos
1570 1570 )
1571 1571 lfdirstate = lfutil.openlfdirstate(ui, repo)
1572 1572 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1573 1573 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1574 1574 return scmutil.status(
1575 1575 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1576 1576 )
1577 1577
1578 1578 repo.status = overridestatus
1579 1579 orig(ui, repo, *dirs, **opts)
1580 1580 repo.status = oldstatus
1581 1581
1582 1582
1583 1583 @eh.wrapcommand(b'rollback')
1584 1584 def overriderollback(orig, ui, repo, **opts):
1585 1585 with repo.wlock():
1586 1586 before = repo.dirstate.parents()
1587 1587 orphans = {
1588 1588 f
1589 1589 for f in repo.dirstate
1590 1590 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1591 1591 }
1592 1592 result = orig(ui, repo, **opts)
1593 1593 after = repo.dirstate.parents()
1594 1594 if before == after:
1595 1595 return result # no need to restore standins
1596 1596
1597 1597 pctx = repo[b'.']
1598 1598 for f in repo.dirstate:
1599 1599 if lfutil.isstandin(f):
1600 1600 orphans.discard(f)
1601 1601 if repo.dirstate[f] == b'r':
1602 1602 repo.wvfs.unlinkpath(f, ignoremissing=True)
1603 1603 elif f in pctx:
1604 1604 fctx = pctx[f]
1605 1605 repo.wwrite(f, fctx.data(), fctx.flags())
1606 1606 else:
1607 1607 # content of standin is not so important in 'a',
1608 1608 # 'm' or 'n' (coming from the 2nd parent) cases
1609 1609 lfutil.writestandin(repo, f, b'', False)
1610 1610 for standin in orphans:
1611 1611 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1612 1612
1613 1613 lfdirstate = lfutil.openlfdirstate(ui, repo)
1614 1614 orphans = set(lfdirstate)
1615 1615 lfiles = lfutil.listlfiles(repo)
1616 1616 for file in lfiles:
1617 1617 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1618 1618 orphans.discard(file)
1619 1619 for lfile in orphans:
1620 1620 lfdirstate.drop(lfile)
1621 1621 lfdirstate.write()
1622 1622 return result
1623 1623
1624 1624
1625 1625 @eh.wrapcommand(b'transplant', extension=b'transplant')
1626 1626 def overridetransplant(orig, ui, repo, *revs, **opts):
1627 1627 resuming = opts.get('continue')
1628 1628 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1629 1629 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1630 1630 try:
1631 1631 result = orig(ui, repo, *revs, **opts)
1632 1632 finally:
1633 1633 repo._lfstatuswriters.pop()
1634 1634 repo._lfcommithooks.pop()
1635 1635 return result
1636 1636
1637 1637
1638 1638 @eh.wrapcommand(b'cat')
1639 1639 def overridecat(orig, ui, repo, file1, *pats, **opts):
1640 1640 opts = pycompat.byteskwargs(opts)
1641 1641 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1642 1642 err = 1
1643 1643 notbad = set()
1644 1644 m = scmutil.match(ctx, (file1,) + pats, opts)
1645 1645 origmatchfn = m.matchfn
1646 1646
1647 1647 def lfmatchfn(f):
1648 1648 if origmatchfn(f):
1649 1649 return True
1650 1650 lf = lfutil.splitstandin(f)
1651 1651 if lf is None:
1652 1652 return False
1653 1653 notbad.add(lf)
1654 1654 return origmatchfn(lf)
1655 1655
1656 1656 m.matchfn = lfmatchfn
1657 1657 origbadfn = m.bad
1658 1658
1659 1659 def lfbadfn(f, msg):
1660 1660 if not f in notbad:
1661 1661 origbadfn(f, msg)
1662 1662
1663 1663 m.bad = lfbadfn
1664 1664
1665 1665 origvisitdirfn = m.visitdir
1666 1666
1667 1667 def lfvisitdirfn(dir):
1668 1668 if dir == lfutil.shortname:
1669 1669 return True
1670 1670 ret = origvisitdirfn(dir)
1671 1671 if ret:
1672 1672 return ret
1673 1673 lf = lfutil.splitstandin(dir)
1674 1674 if lf is None:
1675 1675 return False
1676 1676 return origvisitdirfn(lf)
1677 1677
1678 1678 m.visitdir = lfvisitdirfn
1679 1679
1680 1680 for f in ctx.walk(m):
1681 1681 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1682 1682 lf = lfutil.splitstandin(f)
1683 1683 if lf is None or origmatchfn(f):
1684 1684 # duplicating unreachable code from commands.cat
1685 1685 data = ctx[f].data()
1686 1686 if opts.get(b'decode'):
1687 1687 data = repo.wwritedata(f, data)
1688 1688 fp.write(data)
1689 1689 else:
1690 1690 hash = lfutil.readasstandin(ctx[f])
1691 1691 if not lfutil.inusercache(repo.ui, hash):
1692 1692 store = storefactory.openstore(repo)
1693 1693 success, missing = store.get([(lf, hash)])
1694 1694 if len(success) != 1:
1695 1695 raise error.Abort(
1696 1696 _(
1697 1697 b'largefile %s is not in cache and could not be '
1698 1698 b'downloaded'
1699 1699 )
1700 1700 % lf
1701 1701 )
1702 1702 path = lfutil.usercachepath(repo.ui, hash)
1703 1703 with open(path, b"rb") as fpin:
1704 1704 for chunk in util.filechunkiter(fpin):
1705 1705 fp.write(chunk)
1706 1706 err = 0
1707 1707 return err
1708 1708
1709 1709
1710 1710 @eh.wrapfunction(merge, b'update')
1711 1711 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1712 1712 matcher = kwargs.get('matcher', None)
1713 1713 # note if this is a partial update
1714 1714 partial = matcher and not matcher.always()
1715 1715 with repo.wlock():
1716 1716 # branch | | |
1717 1717 # merge | force | partial | action
1718 1718 # -------+-------+---------+--------------
1719 1719 # x | x | x | linear-merge
1720 1720 # o | x | x | branch-merge
1721 1721 # x | o | x | overwrite (as clean update)
1722 1722 # o | o | x | force-branch-merge (*1)
1723 1723 # x | x | o | (*)
1724 1724 # o | x | o | (*)
1725 1725 # x | o | o | overwrite (as revert)
1726 1726 # o | o | o | (*)
1727 1727 #
1728 1728 # (*) don't care
1729 1729 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1730 1730
1731 1731 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1732 1732 unsure, s = lfdirstate.status(
1733 1733 matchmod.always(),
1734 1734 subrepos=[],
1735 1735 ignored=False,
1736 1736 clean=True,
1737 1737 unknown=False,
1738 1738 )
1739 1739 oldclean = set(s.clean)
1740 1740 pctx = repo[b'.']
1741 1741 dctx = repo[node]
1742 1742 for lfile in unsure + s.modified:
1743 1743 lfileabs = repo.wvfs.join(lfile)
1744 1744 if not repo.wvfs.exists(lfileabs):
1745 1745 continue
1746 1746 lfhash = lfutil.hashfile(lfileabs)
1747 1747 standin = lfutil.standin(lfile)
1748 1748 lfutil.writestandin(
1749 1749 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1750 1750 )
1751 1751 if standin in pctx and lfhash == lfutil.readasstandin(
1752 1752 pctx[standin]
1753 1753 ):
1754 1754 oldclean.add(lfile)
1755 1755 for lfile in s.added:
1756 1756 fstandin = lfutil.standin(lfile)
1757 1757 if fstandin not in dctx:
1758 1758 # in this case, content of standin file is meaningless
1759 1759 # (in dctx, lfile is unknown, or normal file)
1760 1760 continue
1761 1761 lfutil.updatestandin(repo, lfile, fstandin)
1762 1762 # mark all clean largefiles as dirty, just in case the update gets
1763 1763 # interrupted before largefiles and lfdirstate are synchronized
1764 1764 for lfile in oldclean:
1765 1765 lfdirstate.normallookup(lfile)
1766 1766 lfdirstate.write()
1767 1767
1768 1768 oldstandins = lfutil.getstandinsstate(repo)
1769 1769 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1770 1770 # good candidate for in-memory merge (large files, custom dirstate,
1771 1771 # matcher usage).
1772 1772 kwargs['wc'] = repo[None]
1773 1773 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1774 1774
1775 1775 newstandins = lfutil.getstandinsstate(repo)
1776 1776 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1777 1777
1778 1778 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1779 1779 # all the ones that didn't change as clean
1780 1780 for lfile in oldclean.difference(filelist):
1781 1781 lfdirstate.normal(lfile)
1782 1782 lfdirstate.write()
1783 1783
1784 1784 if branchmerge or force or partial:
1785 1785 filelist.extend(s.deleted + s.removed)
1786 1786
1787 1787 lfcommands.updatelfiles(
1788 1788 repo.ui, repo, filelist=filelist, normallookup=partial
1789 1789 )
1790 1790
1791 1791 return result
1792 1792
1793 1793
1794 1794 @eh.wrapfunction(scmutil, b'marktouched')
1795 1795 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1796 1796 result = orig(repo, files, *args, **kwargs)
1797 1797
1798 1798 filelist = []
1799 1799 for f in files:
1800 1800 lf = lfutil.splitstandin(f)
1801 1801 if lf is not None:
1802 1802 filelist.append(lf)
1803 1803 if filelist:
1804 1804 lfcommands.updatelfiles(
1805 1805 repo.ui,
1806 1806 repo,
1807 1807 filelist=filelist,
1808 1808 printmessage=False,
1809 1809 normallookup=True,
1810 1810 )
1811 1811
1812 1812 return result
1813 1813
1814 1814
1815 1815 @eh.wrapfunction(upgrade, b'preservedrequirements')
1816 1816 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1817 1817 def upgraderequirements(orig, repo):
1818 1818 reqs = orig(repo)
1819 1819 if b'largefiles' in repo.requirements:
1820 1820 reqs.add(b'largefiles')
1821 1821 return reqs
1822 1822
1823 1823
1824 1824 _lfscheme = b'largefile://'
1825 1825
1826 1826
1827 1827 @eh.wrapfunction(urlmod, b'open')
1828 1828 def openlargefile(orig, ui, url_, data=None):
1829 1829 if url_.startswith(_lfscheme):
1830 1830 if data:
1831 1831 msg = b"cannot use data on a 'largefile://' url"
1832 1832 raise error.ProgrammingError(msg)
1833 1833 lfid = url_[len(_lfscheme) :]
1834 1834 return storefactory.getlfile(ui, lfid)
1835 1835 else:
1836 1836 return orig(ui, url_, data=data)
@@ -1,2295 +1,2305 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import stat
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 modifiednodeid,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from .thirdparty import attr
23 23 from . import (
24 24 copies,
25 25 encoding,
26 26 error,
27 27 filemerge,
28 28 match as matchmod,
29 29 mergestate as mergestatemod,
30 30 obsutil,
31 31 pathutil,
32 32 pycompat,
33 33 scmutil,
34 34 subrepoutil,
35 35 util,
36 36 worker,
37 37 )
38 38
39 39 _pack = struct.pack
40 40 _unpack = struct.unpack
41 41
42 42
43 43 def _getcheckunknownconfig(repo, section, name):
44 44 config = repo.ui.config(section, name)
45 45 valid = [b'abort', b'ignore', b'warn']
46 46 if config not in valid:
47 47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 48 raise error.ConfigError(
49 49 _(b"%s.%s not valid ('%s' is none of %s)")
50 50 % (section, name, config, validstr)
51 51 )
52 52 return config
53 53
54 54
55 55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 56 if wctx.isinmemory():
57 57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 58 # unknown file.
59 59 #
60 60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 61 # because that function does other useful work.
62 62 return False
63 63
64 64 if f2 is None:
65 65 f2 = f
66 66 return (
67 67 repo.wvfs.audit.check(f)
68 68 and repo.wvfs.isfileorlink(f)
69 69 and repo.dirstate.normalize(f) not in repo.dirstate
70 70 and mctx[f2].cmp(wctx[f])
71 71 )
72 72
73 73
74 74 class _unknowndirschecker(object):
75 75 """
76 76 Look for any unknown files or directories that may have a path conflict
77 77 with a file. If any path prefix of the file exists as a file or link,
78 78 then it conflicts. If the file itself is a directory that contains any
79 79 file that is not tracked, then it conflicts.
80 80
81 81 Returns the shortest path at which a conflict occurs, or None if there is
82 82 no conflict.
83 83 """
84 84
85 85 def __init__(self):
86 86 # A set of paths known to be good. This prevents repeated checking of
87 87 # dirs. It will be updated with any new dirs that are checked and found
88 88 # to be safe.
89 89 self._unknowndircache = set()
90 90
91 91 # A set of paths that are known to be absent. This prevents repeated
92 92 # checking of subdirectories that are known not to exist. It will be
93 93 # updated with any new dirs that are checked and found to be absent.
94 94 self._missingdircache = set()
95 95
96 96 def __call__(self, repo, wctx, f):
97 97 if wctx.isinmemory():
98 98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 99 return False
100 100
101 101 # Check for path prefixes that exist as unknown files.
102 102 for p in reversed(list(pathutil.finddirs(f))):
103 103 if p in self._missingdircache:
104 104 return
105 105 if p in self._unknowndircache:
106 106 continue
107 107 if repo.wvfs.audit.check(p):
108 108 if (
109 109 repo.wvfs.isfileorlink(p)
110 110 and repo.dirstate.normalize(p) not in repo.dirstate
111 111 ):
112 112 return p
113 113 if not repo.wvfs.lexists(p):
114 114 self._missingdircache.add(p)
115 115 return
116 116 self._unknowndircache.add(p)
117 117
118 118 # Check if the file conflicts with a directory containing unknown files.
119 119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 120 # Does the directory contain any files that are not in the dirstate?
121 121 for p, dirs, files in repo.wvfs.walk(f):
122 122 for fn in files:
123 123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 124 relf = repo.dirstate.normalize(relf, isknown=True)
125 125 if relf not in repo.dirstate:
126 126 return f
127 127 return None
128 128
129 129
130 130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 131 """
132 132 Considers any actions that care about the presence of conflicting unknown
133 133 files. For some actions, the result is to abort; for others, it is to
134 134 choose a different action.
135 135 """
136 136 fileconflicts = set()
137 137 pathconflicts = set()
138 138 warnconflicts = set()
139 139 abortconflicts = set()
140 140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 142 pathconfig = repo.ui.configbool(
143 143 b'experimental', b'merge.checkpathconflicts'
144 144 )
145 145 if not force:
146 146
147 147 def collectconflicts(conflicts, config):
148 148 if config == b'abort':
149 149 abortconflicts.update(conflicts)
150 150 elif config == b'warn':
151 151 warnconflicts.update(conflicts)
152 152
153 153 checkunknowndirs = _unknowndirschecker()
154 154 for f, args, msg in mresult.getactions(
155 155 [
156 156 mergestatemod.ACTION_CREATED,
157 157 mergestatemod.ACTION_DELETED_CHANGED,
158 158 ]
159 159 ):
160 160 if _checkunknownfile(repo, wctx, mctx, f):
161 161 fileconflicts.add(f)
162 162 elif pathconfig and f not in wctx:
163 163 path = checkunknowndirs(repo, wctx, f)
164 164 if path is not None:
165 165 pathconflicts.add(path)
166 166 for f, args, msg in mresult.getactions(
167 167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 168 ):
169 169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 170 fileconflicts.add(f)
171 171
172 172 allconflicts = fileconflicts | pathconflicts
173 173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 174 unknownconflicts = allconflicts - ignoredconflicts
175 175 collectconflicts(ignoredconflicts, ignoredconfig)
176 176 collectconflicts(unknownconflicts, unknownconfig)
177 177 else:
178 178 for f, args, msg in list(
179 179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 180 ):
181 181 fl2, anc = args
182 182 different = _checkunknownfile(repo, wctx, mctx, f)
183 183 if repo.dirstate._ignore(f):
184 184 config = ignoredconfig
185 185 else:
186 186 config = unknownconfig
187 187
188 188 # The behavior when force is True is described by this table:
189 189 # config different mergeforce | action backup
190 190 # * n * | get n
191 191 # * y y | merge -
192 192 # abort y n | merge - (1)
193 193 # warn y n | warn + get y
194 194 # ignore y n | get y
195 195 #
196 196 # (1) this is probably the wrong behavior here -- we should
197 197 # probably abort, but some actions like rebases currently
198 198 # don't like an abort happening in the middle of
199 199 # merge.update.
200 200 if not different:
201 201 mresult.addfile(
202 202 f,
203 203 mergestatemod.ACTION_GET,
204 204 (fl2, False),
205 205 b'remote created',
206 206 )
207 207 elif mergeforce or config == b'abort':
208 208 mresult.addfile(
209 209 f,
210 210 mergestatemod.ACTION_MERGE,
211 211 (f, f, None, False, anc),
212 212 b'remote differs from untracked local',
213 213 )
214 214 elif config == b'abort':
215 215 abortconflicts.add(f)
216 216 else:
217 217 if config == b'warn':
218 218 warnconflicts.add(f)
219 219 mresult.addfile(
220 220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 221 )
222 222
223 223 for f in sorted(abortconflicts):
224 224 warn = repo.ui.warn
225 225 if f in pathconflicts:
226 226 if repo.wvfs.isfileorlink(f):
227 227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 228 else:
229 229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 230 else:
231 231 warn(_(b"%s: untracked file differs\n") % f)
232 232 if abortconflicts:
233 233 raise error.Abort(
234 234 _(
235 235 b"untracked files in working directory "
236 236 b"differ from files in requested revision"
237 237 )
238 238 )
239 239
240 240 for f in sorted(warnconflicts):
241 241 if repo.wvfs.isfileorlink(f):
242 242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 243 else:
244 244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245 245
246 246 for f, args, msg in list(
247 247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 248 ):
249 249 backup = (
250 250 f in fileconflicts
251 251 or f in pathconflicts
252 252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 253 )
254 254 (flags,) = args
255 255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256 256
257 257
258 258 def _forgetremoved(wctx, mctx, branchmerge):
259 259 """
260 260 Forget removed files
261 261
262 262 If we're jumping between revisions (as opposed to merging), and if
263 263 neither the working directory nor the target rev has the file,
264 264 then we need to remove it from the dirstate, to prevent the
265 265 dirstate from listing the file when it is no longer in the
266 266 manifest.
267 267
268 268 If we're merging, and the other revision has removed a file
269 269 that is not present in the working directory, we need to mark it
270 270 as removed.
271 271 """
272 272
273 273 actions = {}
274 274 m = mergestatemod.ACTION_FORGET
275 275 if branchmerge:
276 276 m = mergestatemod.ACTION_REMOVE
277 277 for f in wctx.deleted():
278 278 if f not in mctx:
279 279 actions[f] = m, None, b"forget deleted"
280 280
281 281 if not branchmerge:
282 282 for f in wctx.removed():
283 283 if f not in mctx:
284 284 actions[f] = (
285 285 mergestatemod.ACTION_FORGET,
286 286 None,
287 287 b"forget removed",
288 288 )
289 289
290 290 return actions
291 291
292 292
293 293 def _checkcollision(repo, wmf, mresult):
294 294 """
295 295 Check for case-folding collisions.
296 296 """
297 297 # If the repo is narrowed, filter out files outside the narrowspec.
298 298 narrowmatch = repo.narrowmatch()
299 299 if not narrowmatch.always():
300 300 pmmf = set(wmf.walk(narrowmatch))
301 301 if mresult:
302 302 for f, actionsfortype in pycompat.iteritems(mresult.actions):
303 303 if not narrowmatch(f):
304 304 mresult.removefile(f)
305 305 else:
306 306 # build provisional merged manifest up
307 307 pmmf = set(wmf)
308 308
309 309 if mresult:
310 310 # KEEP and EXEC are no-op
311 311 for f, args, msg in mresult.getactions(
312 312 (
313 313 mergestatemod.ACTION_ADD,
314 314 mergestatemod.ACTION_ADD_MODIFIED,
315 315 mergestatemod.ACTION_FORGET,
316 316 mergestatemod.ACTION_GET,
317 317 mergestatemod.ACTION_CHANGED_DELETED,
318 318 mergestatemod.ACTION_DELETED_CHANGED,
319 319 )
320 320 ):
321 321 pmmf.add(f)
322 322 for f, args, msg in mresult.getactions([mergestatemod.ACTION_REMOVE]):
323 323 pmmf.discard(f)
324 324 for f, args, msg in mresult.getactions(
325 325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
326 326 ):
327 327 f2, flags = args
328 328 pmmf.discard(f2)
329 329 pmmf.add(f)
330 330 for f, args, msg in mresult.getactions(
331 331 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
332 332 ):
333 333 pmmf.add(f)
334 334 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
335 335 f1, f2, fa, move, anc = args
336 336 if move:
337 337 pmmf.discard(f1)
338 338 pmmf.add(f)
339 339
340 340 # check case-folding collision in provisional merged manifest
341 341 foldmap = {}
342 342 for f in pmmf:
343 343 fold = util.normcase(f)
344 344 if fold in foldmap:
345 345 raise error.Abort(
346 346 _(b"case-folding collision between %s and %s")
347 347 % (f, foldmap[fold])
348 348 )
349 349 foldmap[fold] = f
350 350
351 351 # check case-folding of directories
352 352 foldprefix = unfoldprefix = lastfull = b''
353 353 for fold, f in sorted(foldmap.items()):
354 354 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
355 355 # the folded prefix matches but actual casing is different
356 356 raise error.Abort(
357 357 _(b"case-folding collision between %s and directory of %s")
358 358 % (lastfull, f)
359 359 )
360 360 foldprefix = fold + b'/'
361 361 unfoldprefix = f + b'/'
362 362 lastfull = f
363 363
364 364
365 365 def driverpreprocess(repo, ms, wctx, labels=None):
366 366 """run the preprocess step of the merge driver, if any
367 367
368 368 This is currently not implemented -- it's an extension point."""
369 369 return True
370 370
371 371
372 372 def driverconclude(repo, ms, wctx, labels=None):
373 373 """run the conclude step of the merge driver, if any
374 374
375 375 This is currently not implemented -- it's an extension point."""
376 376 return True
377 377
378 378
379 379 def _filesindirs(repo, manifest, dirs):
380 380 """
381 381 Generator that yields pairs of all the files in the manifest that are found
382 382 inside the directories listed in dirs, and which directory they are found
383 383 in.
384 384 """
385 385 for f in manifest:
386 386 for p in pathutil.finddirs(f):
387 387 if p in dirs:
388 388 yield f, p
389 389 break
390 390
391 391
392 392 def checkpathconflicts(repo, wctx, mctx, mresult):
393 393 """
394 394 Check if any actions introduce path conflicts in the repository, updating
395 395 actions to record or handle the path conflict accordingly.
396 396 """
397 397 mf = wctx.manifest()
398 398
399 399 # The set of local files that conflict with a remote directory.
400 400 localconflicts = set()
401 401
402 402 # The set of directories that conflict with a remote file, and so may cause
403 403 # conflicts if they still contain any files after the merge.
404 404 remoteconflicts = set()
405 405
406 406 # The set of directories that appear as both a file and a directory in the
407 407 # remote manifest. These indicate an invalid remote manifest, which
408 408 # can't be updated to cleanly.
409 409 invalidconflicts = set()
410 410
411 411 # The set of directories that contain files that are being created.
412 412 createdfiledirs = set()
413 413
414 414 # The set of files deleted by all the actions.
415 415 deletedfiles = set()
416 416
417 417 for (f, args, msg) in mresult.getactions(
418 418 (
419 419 mergestatemod.ACTION_CREATED,
420 420 mergestatemod.ACTION_DELETED_CHANGED,
421 421 mergestatemod.ACTION_MERGE,
422 422 mergestatemod.ACTION_CREATED_MERGE,
423 423 )
424 424 ):
425 425 # This action may create a new local file.
426 426 createdfiledirs.update(pathutil.finddirs(f))
427 427 if mf.hasdir(f):
428 428 # The file aliases a local directory. This might be ok if all
429 429 # the files in the local directory are being deleted. This
430 430 # will be checked once we know what all the deleted files are.
431 431 remoteconflicts.add(f)
432 432 # Track the names of all deleted files.
433 433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_REMOVE,)):
434 434 deletedfiles.add(f)
435 435 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
436 436 f1, f2, fa, move, anc = args
437 437 if move:
438 438 deletedfiles.add(f1)
439 439 for (f, args, msg) in mresult.getactions(
440 440 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
441 441 ):
442 442 f2, flags = args
443 443 deletedfiles.add(f2)
444 444
445 445 # Check all directories that contain created files for path conflicts.
446 446 for p in createdfiledirs:
447 447 if p in mf:
448 448 if p in mctx:
449 449 # A file is in a directory which aliases both a local
450 450 # and a remote file. This is an internal inconsistency
451 451 # within the remote manifest.
452 452 invalidconflicts.add(p)
453 453 else:
454 454 # A file is in a directory which aliases a local file.
455 455 # We will need to rename the local file.
456 456 localconflicts.add(p)
457 if p in mresult.actions and mresult.actions[p][0] in (
457 pd = mresult.getfile(p)
458 if pd and pd[0] in (
458 459 mergestatemod.ACTION_CREATED,
459 460 mergestatemod.ACTION_DELETED_CHANGED,
460 461 mergestatemod.ACTION_MERGE,
461 462 mergestatemod.ACTION_CREATED_MERGE,
462 463 ):
463 464 # The file is in a directory which aliases a remote file.
464 465 # This is an internal inconsistency within the remote
465 466 # manifest.
466 467 invalidconflicts.add(p)
467 468
468 469 # Rename all local conflicting files that have not been deleted.
469 470 for p in localconflicts:
470 471 if p not in deletedfiles:
471 472 ctxname = bytes(wctx).rstrip(b'+')
472 473 pnew = util.safename(p, ctxname, wctx, set(mresult.actions.keys()))
473 474 porig = wctx[p].copysource() or p
474 475 mresult.addfile(
475 476 pnew,
476 477 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
477 478 (p, porig),
478 479 b'local path conflict',
479 480 )
480 481 mresult.addfile(
481 482 p,
482 483 mergestatemod.ACTION_PATH_CONFLICT,
483 484 (pnew, b'l'),
484 485 b'path conflict',
485 486 )
486 487
487 488 if remoteconflicts:
488 489 # Check if all files in the conflicting directories have been removed.
489 490 ctxname = bytes(mctx).rstrip(b'+')
490 491 for f, p in _filesindirs(repo, mf, remoteconflicts):
491 492 if f not in deletedfiles:
492 m, args, msg = mresult.actions[p]
493 m, args, msg = mresult.getfile(p)
493 494 pnew = util.safename(
494 495 p, ctxname, wctx, set(mresult.actions.keys())
495 496 )
496 497 if m in (
497 498 mergestatemod.ACTION_DELETED_CHANGED,
498 499 mergestatemod.ACTION_MERGE,
499 500 ):
500 501 # Action was merge, just update target.
501 502 mresult.addfile(pnew, m, args, msg)
502 503 else:
503 504 # Action was create, change to renamed get action.
504 505 fl = args[0]
505 506 mresult.addfile(
506 507 pnew,
507 508 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
508 509 (p, fl),
509 510 b'remote path conflict',
510 511 )
511 512 mresult.addfile(
512 513 p,
513 514 mergestatemod.ACTION_PATH_CONFLICT,
514 515 (pnew, mergestatemod.ACTION_REMOVE),
515 516 b'path conflict',
516 517 )
517 518 remoteconflicts.remove(p)
518 519 break
519 520
520 521 if invalidconflicts:
521 522 for p in invalidconflicts:
522 523 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
523 524 raise error.Abort(_(b"destination manifest contains path conflicts"))
524 525
525 526
526 527 def _filternarrowactions(narrowmatch, branchmerge, mresult):
527 528 """
528 529 Filters out actions that can ignored because the repo is narrowed.
529 530
530 531 Raise an exception if the merge cannot be completed because the repo is
531 532 narrowed.
532 533 """
533 534 # TODO: handle with nonconflicttypes
534 535 nooptypes = {mergestatemod.ACTION_KEEP}
535 536 nonconflicttypes = {
536 537 mergestatemod.ACTION_ADD,
537 538 mergestatemod.ACTION_ADD_MODIFIED,
538 539 mergestatemod.ACTION_CREATED,
539 540 mergestatemod.ACTION_CREATED_MERGE,
540 541 mergestatemod.ACTION_FORGET,
541 542 mergestatemod.ACTION_GET,
542 543 mergestatemod.ACTION_REMOVE,
543 544 mergestatemod.ACTION_EXEC,
544 545 }
545 546 # We mutate the items in the dict during iteration, so iterate
546 547 # over a copy.
547 548 for f, action in list(mresult.actions.items()):
548 549 if narrowmatch(f):
549 550 pass
550 551 elif not branchmerge:
551 552 mresult.removefile(f) # just updating, ignore changes outside clone
552 553 elif action[0] in nooptypes:
553 554 mresult.removefile(f) # merge does not affect file
554 555 elif action[0] in nonconflicttypes:
555 556 raise error.Abort(
556 557 _(
557 558 b'merge affects file \'%s\' outside narrow, '
558 559 b'which is not yet supported'
559 560 )
560 561 % f,
561 562 hint=_(b'merging in the other direction may work'),
562 563 )
563 564 else:
564 565 raise error.Abort(
565 566 _(b'conflict in file \'%s\' is outside narrow clone') % f
566 567 )
567 568
568 569
569 570 class mergeresult(object):
570 571 ''''An object representing result of merging manifests.
571 572
572 573 It has information about what actions need to be performed on dirstate
573 574 mapping of divergent renames and other such cases. '''
574 575
575 576 def __init__(self):
576 577 """
577 578 filemapping: dict of filename as keys and action related info as values
578 579 diverge: mapping of source name -> list of dest name for
579 580 divergent renames
580 581 renamedelete: mapping of source name -> list of destinations for files
581 582 deleted on one side and renamed on other.
582 583 commitinfo: dict containing data which should be used on commit
583 584 contains a filename -> info mapping
584 585 actionmapping: dict of action names as keys and values are dict of
585 586 filename as key and related data as values
586 587 """
587 588 self._filemapping = {}
588 589 self._diverge = {}
589 590 self._renamedelete = {}
590 591 self._commitinfo = {}
591 592 self._actionmapping = collections.defaultdict(dict)
592 593
593 594 def updatevalues(self, diverge, renamedelete, commitinfo):
594 595 self._diverge = diverge
595 596 self._renamedelete = renamedelete
596 597 self._commitinfo = commitinfo
597 598
598 599 def addfile(self, filename, action, data, message):
599 600 """ adds a new file to the mergeresult object
600 601
601 602 filename: file which we are adding
602 603 action: one of mergestatemod.ACTION_*
603 604 data: a tuple of information like fctx and ctx related to this merge
604 605 message: a message about the merge
605 606 """
606 607 # if the file already existed, we need to delete it's old
607 608 # entry form _actionmapping too
608 609 if filename in self._filemapping:
609 610 a, d, m = self._filemapping[filename]
610 611 del self._actionmapping[a][filename]
611 612
612 613 self._filemapping[filename] = (action, data, message)
613 614 self._actionmapping[action][filename] = (data, message)
614 615
616 def getfile(self, filename, default_return=None):
617 """ returns (action, args, msg) about this file
618
619 returns default_return if the file is not present """
620 if filename in self._filemapping:
621 return self._filemapping[filename]
622 return default_return
623
615 624 def removefile(self, filename):
616 625 """ removes a file from the mergeresult object as the file might
617 626 not merging anymore """
618 627 action, data, message = self._filemapping[filename]
619 628 del self._filemapping[filename]
620 629 del self._actionmapping[action][filename]
621 630
622 631 def getactions(self, actions, sort=False):
623 632 """ get list of files which are marked with these actions
624 633 if sort is true, files for each action is sorted and then added
625 634
626 635 Returns a list of tuple of form (filename, data, message)
627 636 """
628 637 for a in actions:
629 638 if sort:
630 639 for f in sorted(self._actionmapping[a]):
631 640 args, msg = self._actionmapping[a][f]
632 641 yield f, args, msg
633 642 else:
634 643 for f, (args, msg) in pycompat.iteritems(
635 644 self._actionmapping[a]
636 645 ):
637 646 yield f, args, msg
638 647
639 648 def len(self, actions=None):
640 649 """ returns number of files which needs actions
641 650
642 651 if actions is passed, total of number of files in that action
643 652 only is returned """
644 653
645 654 if actions is None:
646 655 return len(self._filemapping)
647 656
648 657 return sum(len(self._actionmapping[a]) for a in actions)
649 658
650 659 @property
651 660 def actions(self):
652 661 return self._filemapping
653 662
654 663 @property
655 664 def diverge(self):
656 665 return self._diverge
657 666
658 667 @property
659 668 def renamedelete(self):
660 669 return self._renamedelete
661 670
662 671 @property
663 672 def commitinfo(self):
664 673 return self._commitinfo
665 674
666 675 @property
667 676 def actionsdict(self):
668 677 """ returns a dictionary of actions to be perfomed with action as key
669 678 and a list of files and related arguments as values """
670 679 res = emptyactions()
671 680 for a, d in pycompat.iteritems(self._actionmapping):
672 681 for f, (args, msg) in pycompat.iteritems(d):
673 682 res[a].append((f, args, msg))
674 683 return res
675 684
676 685 def setactions(self, actions):
677 686 self._filemapping = actions
678 687 self._actionmapping = collections.defaultdict(dict)
679 688 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
680 689 self._actionmapping[act][f] = data, msg
681 690
682 691 def updateactions(self, updates):
683 692 for f, (a, data, msg) in pycompat.iteritems(updates):
684 693 self.addfile(f, a, data, msg)
685 694
686 695 def hasconflicts(self):
687 696 """ tells whether this merge resulted in some actions which can
688 697 result in conflicts or not """
689 698 for a in self._actionmapping.keys():
690 699 if (
691 700 a
692 701 not in (
693 702 mergestatemod.ACTION_GET,
694 703 mergestatemod.ACTION_KEEP,
695 704 mergestatemod.ACTION_EXEC,
696 705 mergestatemod.ACTION_REMOVE,
697 706 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
698 707 )
699 708 and self._actionmapping[a]
700 709 ):
701 710 return True
702 711
703 712 return False
704 713
705 714
706 715 def manifestmerge(
707 716 repo,
708 717 wctx,
709 718 p2,
710 719 pa,
711 720 branchmerge,
712 721 force,
713 722 matcher,
714 723 acceptremote,
715 724 followcopies,
716 725 forcefulldiff=False,
717 726 ):
718 727 """
719 728 Merge wctx and p2 with ancestor pa and generate merge action list
720 729
721 730 branchmerge and force are as passed in to update
722 731 matcher = matcher to filter file lists
723 732 acceptremote = accept the incoming changes without prompting
724 733
725 734 Returns an object of mergeresult class
726 735 """
727 736 mresult = mergeresult()
728 737 if matcher is not None and matcher.always():
729 738 matcher = None
730 739
731 740 # manifests fetched in order are going to be faster, so prime the caches
732 741 [
733 742 x.manifest()
734 743 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
735 744 ]
736 745
737 746 branch_copies1 = copies.branch_copies()
738 747 branch_copies2 = copies.branch_copies()
739 748 diverge = {}
740 749 # information from merge which is needed at commit time
741 750 # for example choosing filelog of which parent to commit
742 751 # TODO: use specific constants in future for this mapping
743 752 commitinfo = {}
744 753 if followcopies:
745 754 branch_copies1, branch_copies2, diverge = copies.mergecopies(
746 755 repo, wctx, p2, pa
747 756 )
748 757
749 758 boolbm = pycompat.bytestr(bool(branchmerge))
750 759 boolf = pycompat.bytestr(bool(force))
751 760 boolm = pycompat.bytestr(bool(matcher))
752 761 repo.ui.note(_(b"resolving manifests\n"))
753 762 repo.ui.debug(
754 763 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
755 764 )
756 765 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
757 766
758 767 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
759 768 copied1 = set(branch_copies1.copy.values())
760 769 copied1.update(branch_copies1.movewithdir.values())
761 770 copied2 = set(branch_copies2.copy.values())
762 771 copied2.update(branch_copies2.movewithdir.values())
763 772
764 773 if b'.hgsubstate' in m1 and wctx.rev() is None:
765 774 # Check whether sub state is modified, and overwrite the manifest
766 775 # to flag the change. If wctx is a committed revision, we shouldn't
767 776 # care for the dirty state of the working directory.
768 777 if any(wctx.sub(s).dirty() for s in wctx.substate):
769 778 m1[b'.hgsubstate'] = modifiednodeid
770 779
771 780 # Don't use m2-vs-ma optimization if:
772 781 # - ma is the same as m1 or m2, which we're just going to diff again later
773 782 # - The caller specifically asks for a full diff, which is useful during bid
774 783 # merge.
775 784 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
776 785 # Identify which files are relevant to the merge, so we can limit the
777 786 # total m1-vs-m2 diff to just those files. This has significant
778 787 # performance benefits in large repositories.
779 788 relevantfiles = set(ma.diff(m2).keys())
780 789
781 790 # For copied and moved files, we need to add the source file too.
782 791 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
783 792 if copyvalue in relevantfiles:
784 793 relevantfiles.add(copykey)
785 794 for movedirkey in branch_copies1.movewithdir:
786 795 relevantfiles.add(movedirkey)
787 796 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
788 797 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
789 798
790 799 diff = m1.diff(m2, match=matcher)
791 800
792 801 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
793 802 if n1 and n2: # file exists on both local and remote side
794 803 if f not in ma:
795 804 # TODO: what if they're renamed from different sources?
796 805 fa = branch_copies1.copy.get(
797 806 f, None
798 807 ) or branch_copies2.copy.get(f, None)
799 808 args, msg = None, None
800 809 if fa is not None:
801 810 args = (f, f, fa, False, pa.node())
802 811 msg = b'both renamed from %s' % fa
803 812 else:
804 813 args = (f, f, None, False, pa.node())
805 814 msg = b'both created'
806 815 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
807 816 else:
808 817 a = ma[f]
809 818 fla = ma.flags(f)
810 819 nol = b'l' not in fl1 + fl2 + fla
811 820 if n2 == a and fl2 == fla:
812 821 mresult.addfile(
813 822 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
814 823 )
815 824 elif n1 == a and fl1 == fla: # local unchanged - use remote
816 825 if n1 == n2: # optimization: keep local content
817 826 mresult.addfile(
818 827 f,
819 828 mergestatemod.ACTION_EXEC,
820 829 (fl2,),
821 830 b'update permissions',
822 831 )
823 832 else:
824 833 mresult.addfile(
825 834 f,
826 835 mergestatemod.ACTION_GET,
827 836 (fl2, False),
828 837 b'remote is newer',
829 838 )
830 839 if branchmerge:
831 840 commitinfo[f] = b'other'
832 841 elif nol and n2 == a: # remote only changed 'x'
833 842 mresult.addfile(
834 843 f,
835 844 mergestatemod.ACTION_EXEC,
836 845 (fl2,),
837 846 b'update permissions',
838 847 )
839 848 elif nol and n1 == a: # local only changed 'x'
840 849 mresult.addfile(
841 850 f,
842 851 mergestatemod.ACTION_GET,
843 852 (fl1, False),
844 853 b'remote is newer',
845 854 )
846 855 if branchmerge:
847 856 commitinfo[f] = b'other'
848 857 else: # both changed something
849 858 mresult.addfile(
850 859 f,
851 860 mergestatemod.ACTION_MERGE,
852 861 (f, f, f, False, pa.node()),
853 862 b'versions differ',
854 863 )
855 864 elif n1: # file exists only on local side
856 865 if f in copied2:
857 866 pass # we'll deal with it on m2 side
858 867 elif (
859 868 f in branch_copies1.movewithdir
860 869 ): # directory rename, move local
861 870 f2 = branch_copies1.movewithdir[f]
862 871 if f2 in m2:
863 872 mresult.addfile(
864 873 f2,
865 874 mergestatemod.ACTION_MERGE,
866 875 (f, f2, None, True, pa.node()),
867 876 b'remote directory rename, both created',
868 877 )
869 878 else:
870 879 mresult.addfile(
871 880 f2,
872 881 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
873 882 (f, fl1),
874 883 b'remote directory rename - move from %s' % f,
875 884 )
876 885 elif f in branch_copies1.copy:
877 886 f2 = branch_copies1.copy[f]
878 887 mresult.addfile(
879 888 f,
880 889 mergestatemod.ACTION_MERGE,
881 890 (f, f2, f2, False, pa.node()),
882 891 b'local copied/moved from %s' % f2,
883 892 )
884 893 elif f in ma: # clean, a different, no remote
885 894 if n1 != ma[f]:
886 895 if acceptremote:
887 896 mresult.addfile(
888 897 f,
889 898 mergestatemod.ACTION_REMOVE,
890 899 None,
891 900 b'remote delete',
892 901 )
893 902 else:
894 903 mresult.addfile(
895 904 f,
896 905 mergestatemod.ACTION_CHANGED_DELETED,
897 906 (f, None, f, False, pa.node()),
898 907 b'prompt changed/deleted',
899 908 )
900 909 elif n1 == addednodeid:
901 910 # This file was locally added. We should forget it instead of
902 911 # deleting it.
903 912 mresult.addfile(
904 913 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
905 914 )
906 915 else:
907 916 mresult.addfile(
908 917 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
909 918 )
910 919 elif n2: # file exists only on remote side
911 920 if f in copied1:
912 921 pass # we'll deal with it on m1 side
913 922 elif f in branch_copies2.movewithdir:
914 923 f2 = branch_copies2.movewithdir[f]
915 924 if f2 in m1:
916 925 mresult.addfile(
917 926 f2,
918 927 mergestatemod.ACTION_MERGE,
919 928 (f2, f, None, False, pa.node()),
920 929 b'local directory rename, both created',
921 930 )
922 931 else:
923 932 mresult.addfile(
924 933 f2,
925 934 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
926 935 (f, fl2),
927 936 b'local directory rename - get from %s' % f,
928 937 )
929 938 elif f in branch_copies2.copy:
930 939 f2 = branch_copies2.copy[f]
931 940 msg, args = None, None
932 941 if f2 in m2:
933 942 args = (f2, f, f2, False, pa.node())
934 943 msg = b'remote copied from %s' % f2
935 944 else:
936 945 args = (f2, f, f2, True, pa.node())
937 946 msg = b'remote moved from %s' % f2
938 947 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
939 948 elif f not in ma:
940 949 # local unknown, remote created: the logic is described by the
941 950 # following table:
942 951 #
943 952 # force branchmerge different | action
944 953 # n * * | create
945 954 # y n * | create
946 955 # y y n | create
947 956 # y y y | merge
948 957 #
949 958 # Checking whether the files are different is expensive, so we
950 959 # don't do that when we can avoid it.
951 960 if not force:
952 961 mresult.addfile(
953 962 f,
954 963 mergestatemod.ACTION_CREATED,
955 964 (fl2,),
956 965 b'remote created',
957 966 )
958 967 elif not branchmerge:
959 968 mresult.addfile(
960 969 f,
961 970 mergestatemod.ACTION_CREATED,
962 971 (fl2,),
963 972 b'remote created',
964 973 )
965 974 else:
966 975 mresult.addfile(
967 976 f,
968 977 mergestatemod.ACTION_CREATED_MERGE,
969 978 (fl2, pa.node()),
970 979 b'remote created, get or merge',
971 980 )
972 981 elif n2 != ma[f]:
973 982 df = None
974 983 for d in branch_copies1.dirmove:
975 984 if f.startswith(d):
976 985 # new file added in a directory that was moved
977 986 df = branch_copies1.dirmove[d] + f[len(d) :]
978 987 break
979 988 if df is not None and df in m1:
980 989 mresult.addfile(
981 990 df,
982 991 mergestatemod.ACTION_MERGE,
983 992 (df, f, f, False, pa.node()),
984 993 b'local directory rename - respect move '
985 994 b'from %s' % f,
986 995 )
987 996 elif acceptremote:
988 997 mresult.addfile(
989 998 f,
990 999 mergestatemod.ACTION_CREATED,
991 1000 (fl2,),
992 1001 b'remote recreating',
993 1002 )
994 1003 else:
995 1004 mresult.addfile(
996 1005 f,
997 1006 mergestatemod.ACTION_DELETED_CHANGED,
998 1007 (None, f, f, False, pa.node()),
999 1008 b'prompt deleted/changed',
1000 1009 )
1001 1010
1002 1011 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1003 1012 # If we are merging, look for path conflicts.
1004 1013 checkpathconflicts(repo, wctx, p2, mresult)
1005 1014
1006 1015 narrowmatch = repo.narrowmatch()
1007 1016 if not narrowmatch.always():
1008 1017 # Updates "actions" in place
1009 1018 _filternarrowactions(narrowmatch, branchmerge, mresult)
1010 1019
1011 1020 renamedelete = branch_copies1.renamedelete
1012 1021 renamedelete.update(branch_copies2.renamedelete)
1013 1022
1014 1023 mresult.updatevalues(diverge, renamedelete, commitinfo)
1015 1024 return mresult
1016 1025
1017 1026
1018 1027 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1019 1028 """Resolves false conflicts where the nodeid changed but the content
1020 1029 remained the same."""
1021 1030 # We force a copy of actions.items() because we're going to mutate
1022 1031 # actions as we resolve trivial conflicts.
1023 1032 for f, args, msg in list(
1024 1033 mresult.getactions([mergestatemod.ACTION_CHANGED_DELETED])
1025 1034 ):
1026 1035 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1027 1036 # local did change but ended up with same content
1028 1037 mresult.addfile(
1029 1038 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1030 1039 )
1031 1040
1032 1041 for f, args, msg in list(
1033 1042 mresult.getactions([mergestatemod.ACTION_DELETED_CHANGED])
1034 1043 ):
1035 1044 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1036 1045 # remote did change but ended up with same content
1037 1046 mresult.removefile(f) # don't get = keep local deleted
1038 1047
1039 1048
1040 1049 def calculateupdates(
1041 1050 repo,
1042 1051 wctx,
1043 1052 mctx,
1044 1053 ancestors,
1045 1054 branchmerge,
1046 1055 force,
1047 1056 acceptremote,
1048 1057 followcopies,
1049 1058 matcher=None,
1050 1059 mergeforce=False,
1051 1060 ):
1052 1061 """
1053 1062 Calculate the actions needed to merge mctx into wctx using ancestors
1054 1063
1055 1064 Uses manifestmerge() to merge manifest and get list of actions required to
1056 1065 perform for merging two manifests. If there are multiple ancestors, uses bid
1057 1066 merge if enabled.
1058 1067
1059 1068 Also filters out actions which are unrequired if repository is sparse.
1060 1069
1061 1070 Returns mergeresult object same as manifestmerge().
1062 1071 """
1063 1072 # Avoid cycle.
1064 1073 from . import sparse
1065 1074
1066 1075 mresult = None
1067 1076 if len(ancestors) == 1: # default
1068 1077 mresult = manifestmerge(
1069 1078 repo,
1070 1079 wctx,
1071 1080 mctx,
1072 1081 ancestors[0],
1073 1082 branchmerge,
1074 1083 force,
1075 1084 matcher,
1076 1085 acceptremote,
1077 1086 followcopies,
1078 1087 )
1079 1088 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1080 1089
1081 1090 else: # only when merge.preferancestor=* - the default
1082 1091 repo.ui.note(
1083 1092 _(b"note: merging %s and %s using bids from ancestors %s\n")
1084 1093 % (
1085 1094 wctx,
1086 1095 mctx,
1087 1096 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1088 1097 )
1089 1098 )
1090 1099
1091 1100 # mapping filename to bids (action method to list af actions)
1092 1101 # {FILENAME1 : BID1, FILENAME2 : BID2}
1093 1102 # BID is another dictionary which contains
1094 1103 # mapping of following form:
1095 1104 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1096 1105 fbids = {}
1097 1106 diverge, renamedelete = None, None
1098 1107 for ancestor in ancestors:
1099 1108 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1100 1109 mresult1 = manifestmerge(
1101 1110 repo,
1102 1111 wctx,
1103 1112 mctx,
1104 1113 ancestor,
1105 1114 branchmerge,
1106 1115 force,
1107 1116 matcher,
1108 1117 acceptremote,
1109 1118 followcopies,
1110 1119 forcefulldiff=True,
1111 1120 )
1112 1121 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1113 1122
1114 1123 # Track the shortest set of warning on the theory that bid
1115 1124 # merge will correctly incorporate more information
1116 1125 if diverge is None or len(mresult1.diverge) < len(diverge):
1117 1126 diverge = mresult1.diverge
1118 1127 if renamedelete is None or len(renamedelete) < len(
1119 1128 mresult1.renamedelete
1120 1129 ):
1121 1130 renamedelete = mresult1.renamedelete
1122 1131
1123 1132 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1124 1133 m, args, msg = a
1125 1134 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1126 1135 if f in fbids:
1127 1136 d = fbids[f]
1128 1137 if m in d:
1129 1138 d[m].append(a)
1130 1139 else:
1131 1140 d[m] = [a]
1132 1141 else:
1133 1142 fbids[f] = {m: [a]}
1134 1143
1135 1144 # Call for bids
1136 1145 # Pick the best bid for each file
1137 1146 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1138 1147 mresult = mergeresult()
1139 1148 for f, bids in sorted(fbids.items()):
1140 1149 # bids is a mapping from action method to list af actions
1141 1150 # Consensus?
1142 1151 if len(bids) == 1: # all bids are the same kind of method
1143 1152 m, l = list(bids.items())[0]
1144 1153 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1145 1154 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1146 1155 mresult.addfile(f, *l[0])
1147 1156 continue
1148 1157 # If keep is an option, just do it.
1149 1158 if mergestatemod.ACTION_KEEP in bids:
1150 1159 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1151 1160 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1152 1161 continue
1153 1162 # If there are gets and they all agree [how could they not?], do it.
1154 1163 if mergestatemod.ACTION_GET in bids:
1155 1164 ga0 = bids[mergestatemod.ACTION_GET][0]
1156 1165 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1157 1166 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1158 1167 mresult.addfile(f, *ga0)
1159 1168 continue
1160 1169 # TODO: Consider other simple actions such as mode changes
1161 1170 # Handle inefficient democrazy.
1162 1171 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1163 1172 for m, l in sorted(bids.items()):
1164 1173 for _f, args, msg in l:
1165 1174 repo.ui.note(b' %s -> %s\n' % (msg, m))
1166 1175 # Pick random action. TODO: Instead, prompt user when resolving
1167 1176 m, l = list(bids.items())[0]
1168 1177 repo.ui.warn(
1169 1178 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1170 1179 )
1171 1180 mresult.addfile(f, *l[0])
1172 1181 continue
1173 1182 repo.ui.note(_(b'end of auction\n\n'))
1174 1183 # TODO: think about commitinfo when bid merge is used
1175 1184 mresult.updatevalues(diverge, renamedelete, {})
1176 1185
1177 1186 if wctx.rev() is None:
1178 1187 fractions = _forgetremoved(wctx, mctx, branchmerge)
1179 1188 mresult.updateactions(fractions)
1180 1189
1181 1190 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1182 1191 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1183 1192
1184 1193 return mresult
1185 1194
1186 1195
1187 1196 def _getcwd():
1188 1197 try:
1189 1198 return encoding.getcwd()
1190 1199 except OSError as err:
1191 1200 if err.errno == errno.ENOENT:
1192 1201 return None
1193 1202 raise
1194 1203
1195 1204
1196 1205 def batchremove(repo, wctx, actions):
1197 1206 """apply removes to the working directory
1198 1207
1199 1208 yields tuples for progress updates
1200 1209 """
1201 1210 verbose = repo.ui.verbose
1202 1211 cwd = _getcwd()
1203 1212 i = 0
1204 1213 for f, args, msg in actions:
1205 1214 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1206 1215 if verbose:
1207 1216 repo.ui.note(_(b"removing %s\n") % f)
1208 1217 wctx[f].audit()
1209 1218 try:
1210 1219 wctx[f].remove(ignoremissing=True)
1211 1220 except OSError as inst:
1212 1221 repo.ui.warn(
1213 1222 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1214 1223 )
1215 1224 if i == 100:
1216 1225 yield i, f
1217 1226 i = 0
1218 1227 i += 1
1219 1228 if i > 0:
1220 1229 yield i, f
1221 1230
1222 1231 if cwd and not _getcwd():
1223 1232 # cwd was removed in the course of removing files; print a helpful
1224 1233 # warning.
1225 1234 repo.ui.warn(
1226 1235 _(
1227 1236 b"current directory was removed\n"
1228 1237 b"(consider changing to repo root: %s)\n"
1229 1238 )
1230 1239 % repo.root
1231 1240 )
1232 1241
1233 1242
1234 1243 def batchget(repo, mctx, wctx, wantfiledata, actions):
1235 1244 """apply gets to the working directory
1236 1245
1237 1246 mctx is the context to get from
1238 1247
1239 1248 Yields arbitrarily many (False, tuple) for progress updates, followed by
1240 1249 exactly one (True, filedata). When wantfiledata is false, filedata is an
1241 1250 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1242 1251 mtime) of the file f written for each action.
1243 1252 """
1244 1253 filedata = {}
1245 1254 verbose = repo.ui.verbose
1246 1255 fctx = mctx.filectx
1247 1256 ui = repo.ui
1248 1257 i = 0
1249 1258 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1250 1259 for f, (flags, backup), msg in actions:
1251 1260 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1252 1261 if verbose:
1253 1262 repo.ui.note(_(b"getting %s\n") % f)
1254 1263
1255 1264 if backup:
1256 1265 # If a file or directory exists with the same name, back that
1257 1266 # up. Otherwise, look to see if there is a file that conflicts
1258 1267 # with a directory this file is in, and if so, back that up.
1259 1268 conflicting = f
1260 1269 if not repo.wvfs.lexists(f):
1261 1270 for p in pathutil.finddirs(f):
1262 1271 if repo.wvfs.isfileorlink(p):
1263 1272 conflicting = p
1264 1273 break
1265 1274 if repo.wvfs.lexists(conflicting):
1266 1275 orig = scmutil.backuppath(ui, repo, conflicting)
1267 1276 util.rename(repo.wjoin(conflicting), orig)
1268 1277 wfctx = wctx[f]
1269 1278 wfctx.clearunknown()
1270 1279 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1271 1280 size = wfctx.write(
1272 1281 fctx(f).data(),
1273 1282 flags,
1274 1283 backgroundclose=True,
1275 1284 atomictemp=atomictemp,
1276 1285 )
1277 1286 if wantfiledata:
1278 1287 s = wfctx.lstat()
1279 1288 mode = s.st_mode
1280 1289 mtime = s[stat.ST_MTIME]
1281 1290 filedata[f] = (mode, size, mtime) # for dirstate.normal
1282 1291 if i == 100:
1283 1292 yield False, (i, f)
1284 1293 i = 0
1285 1294 i += 1
1286 1295 if i > 0:
1287 1296 yield False, (i, f)
1288 1297 yield True, filedata
1289 1298
1290 1299
1291 1300 def _prefetchfiles(repo, ctx, mresult):
1292 1301 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1293 1302 of merge actions. ``ctx`` is the context being merged in."""
1294 1303
1295 1304 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1296 1305 # don't touch the context to be merged in. 'cd' is skipped, because
1297 1306 # changed/deleted never resolves to something from the remote side.
1298 1307 files = []
1299 1308 for f, args, msg in mresult.getactions(
1300 1309 [
1301 1310 mergestatemod.ACTION_GET,
1302 1311 mergestatemod.ACTION_DELETED_CHANGED,
1303 1312 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1304 1313 mergestatemod.ACTION_MERGE,
1305 1314 ]
1306 1315 ):
1307 1316 files.append(f)
1308 1317
1309 1318 prefetch = scmutil.prefetchfiles
1310 1319 matchfiles = scmutil.matchfiles
1311 1320 prefetch(
1312 1321 repo, [(ctx.rev(), matchfiles(repo, files),)],
1313 1322 )
1314 1323
1315 1324
1316 1325 @attr.s(frozen=True)
1317 1326 class updateresult(object):
1318 1327 updatedcount = attr.ib()
1319 1328 mergedcount = attr.ib()
1320 1329 removedcount = attr.ib()
1321 1330 unresolvedcount = attr.ib()
1322 1331
1323 1332 def isempty(self):
1324 1333 return not (
1325 1334 self.updatedcount
1326 1335 or self.mergedcount
1327 1336 or self.removedcount
1328 1337 or self.unresolvedcount
1329 1338 )
1330 1339
1331 1340
1332 1341 def emptyactions():
1333 1342 """create an actions dict, to be populated and passed to applyupdates()"""
1334 1343 return {
1335 1344 m: []
1336 1345 for m in (
1337 1346 mergestatemod.ACTION_ADD,
1338 1347 mergestatemod.ACTION_ADD_MODIFIED,
1339 1348 mergestatemod.ACTION_FORGET,
1340 1349 mergestatemod.ACTION_GET,
1341 1350 mergestatemod.ACTION_CHANGED_DELETED,
1342 1351 mergestatemod.ACTION_DELETED_CHANGED,
1343 1352 mergestatemod.ACTION_REMOVE,
1344 1353 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1345 1354 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1346 1355 mergestatemod.ACTION_MERGE,
1347 1356 mergestatemod.ACTION_EXEC,
1348 1357 mergestatemod.ACTION_KEEP,
1349 1358 mergestatemod.ACTION_PATH_CONFLICT,
1350 1359 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1351 1360 )
1352 1361 }
1353 1362
1354 1363
1355 1364 def applyupdates(
1356 1365 repo,
1357 1366 mresult,
1358 1367 wctx,
1359 1368 mctx,
1360 1369 overwrite,
1361 1370 wantfiledata,
1362 1371 labels=None,
1363 1372 commitinfo=None,
1364 1373 ):
1365 1374 """apply the merge action list to the working directory
1366 1375
1367 1376 mresult is a mergeresult object representing result of the merge
1368 1377 wctx is the working copy context
1369 1378 mctx is the context to be merged into the working copy
1370 1379 commitinfo is a mapping of information which needs to be stored somewhere
1371 1380 (probably mergestate) so that it can be used at commit time.
1372 1381
1373 1382 Return a tuple of (counts, filedata), where counts is a tuple
1374 1383 (updated, merged, removed, unresolved) that describes how many
1375 1384 files were affected by the update, and filedata is as described in
1376 1385 batchget.
1377 1386 """
1378 1387
1379 1388 _prefetchfiles(repo, mctx, mresult)
1380 1389
1381 1390 updated, merged, removed = 0, 0, 0
1382 1391 ms = mergestatemod.mergestate.clean(
1383 1392 repo, wctx.p1().node(), mctx.node(), labels
1384 1393 )
1385 1394
1386 1395 if commitinfo is None:
1387 1396 commitinfo = {}
1388 1397
1389 1398 for f, op in pycompat.iteritems(commitinfo):
1390 1399 # the other side of filenode was choosen while merging, store this in
1391 1400 # mergestate so that it can be reused on commit
1392 1401 if op == b'other':
1393 1402 ms.addmergedother(f)
1394 1403
1395 1404 moves = []
1396 1405
1397 1406 # 'cd' and 'dc' actions are treated like other merge conflicts
1398 1407 mergeactions = list(
1399 1408 mresult.getactions(
1400 1409 [
1401 1410 mergestatemod.ACTION_CHANGED_DELETED,
1402 1411 mergestatemod.ACTION_DELETED_CHANGED,
1403 1412 mergestatemod.ACTION_MERGE,
1404 1413 ],
1405 1414 sort=True,
1406 1415 )
1407 1416 )
1408 1417 for f, args, msg in mergeactions:
1409 1418 f1, f2, fa, move, anc = args
1410 1419 if f == b'.hgsubstate': # merged internally
1411 1420 continue
1412 1421 if f1 is None:
1413 1422 fcl = filemerge.absentfilectx(wctx, fa)
1414 1423 else:
1415 1424 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1416 1425 fcl = wctx[f1]
1417 1426 if f2 is None:
1418 1427 fco = filemerge.absentfilectx(mctx, fa)
1419 1428 else:
1420 1429 fco = mctx[f2]
1421 1430 actx = repo[anc]
1422 1431 if fa in actx:
1423 1432 fca = actx[fa]
1424 1433 else:
1425 1434 # TODO: move to absentfilectx
1426 1435 fca = repo.filectx(f1, fileid=nullrev)
1427 1436 ms.add(fcl, fco, fca, f)
1428 1437 if f1 != f and move:
1429 1438 moves.append(f1)
1430 1439
1431 1440 # remove renamed files after safely stored
1432 1441 for f in moves:
1433 1442 if wctx[f].lexists():
1434 1443 repo.ui.debug(b"removing %s\n" % f)
1435 1444 wctx[f].audit()
1436 1445 wctx[f].remove()
1437 1446
1438 1447 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1439 1448 progress = repo.ui.makeprogress(
1440 1449 _(b'updating'), unit=_(b'files'), total=numupdates
1441 1450 )
1442 1451
1443 1452 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1444 1453 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1445 1454
1446 1455 # record path conflicts
1447 1456 for f, args, msg in mresult.getactions(
1448 1457 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1449 1458 ):
1450 1459 f1, fo = args
1451 1460 s = repo.ui.status
1452 1461 s(
1453 1462 _(
1454 1463 b"%s: path conflict - a file or link has the same name as a "
1455 1464 b"directory\n"
1456 1465 )
1457 1466 % f
1458 1467 )
1459 1468 if fo == b'l':
1460 1469 s(_(b"the local file has been renamed to %s\n") % f1)
1461 1470 else:
1462 1471 s(_(b"the remote file has been renamed to %s\n") % f1)
1463 1472 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1464 1473 ms.addpathconflict(f, f1, fo)
1465 1474 progress.increment(item=f)
1466 1475
1467 1476 # When merging in-memory, we can't support worker processes, so set the
1468 1477 # per-item cost at 0 in that case.
1469 1478 cost = 0 if wctx.isinmemory() else 0.001
1470 1479
1471 1480 # remove in parallel (must come before resolving path conflicts and getting)
1472 1481 prog = worker.worker(
1473 1482 repo.ui,
1474 1483 cost,
1475 1484 batchremove,
1476 1485 (repo, wctx),
1477 1486 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1478 1487 )
1479 1488 for i, item in prog:
1480 1489 progress.increment(step=i, item=item)
1481 1490 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1482 1491
1483 1492 # resolve path conflicts (must come before getting)
1484 1493 for f, args, msg in mresult.getactions(
1485 1494 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1486 1495 ):
1487 1496 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1488 1497 (f0, origf0) = args
1489 1498 if wctx[f0].lexists():
1490 1499 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1491 1500 wctx[f].audit()
1492 1501 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1493 1502 wctx[f0].remove()
1494 1503 progress.increment(item=f)
1495 1504
1496 1505 # get in parallel.
1497 1506 threadsafe = repo.ui.configbool(
1498 1507 b'experimental', b'worker.wdir-get-thread-safe'
1499 1508 )
1500 1509 prog = worker.worker(
1501 1510 repo.ui,
1502 1511 cost,
1503 1512 batchget,
1504 1513 (repo, mctx, wctx, wantfiledata),
1505 1514 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1506 1515 threadsafe=threadsafe,
1507 1516 hasretval=True,
1508 1517 )
1509 1518 getfiledata = {}
1510 1519 for final, res in prog:
1511 1520 if final:
1512 1521 getfiledata = res
1513 1522 else:
1514 1523 i, item = res
1515 1524 progress.increment(step=i, item=item)
1516 1525
1517 1526 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1518 1527 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1519 1528
1520 1529 # forget (manifest only, just log it) (must come first)
1521 1530 for f, args, msg in mresult.getactions(
1522 1531 (mergestatemod.ACTION_FORGET,), sort=True
1523 1532 ):
1524 1533 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1525 1534 progress.increment(item=f)
1526 1535
1527 1536 # re-add (manifest only, just log it)
1528 1537 for f, args, msg in mresult.getactions(
1529 1538 (mergestatemod.ACTION_ADD,), sort=True
1530 1539 ):
1531 1540 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1532 1541 progress.increment(item=f)
1533 1542
1534 1543 # re-add/mark as modified (manifest only, just log it)
1535 1544 for f, args, msg in mresult.getactions(
1536 1545 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1537 1546 ):
1538 1547 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1539 1548 progress.increment(item=f)
1540 1549
1541 1550 # keep (noop, just log it)
1542 1551 for f, args, msg in mresult.getactions(
1543 1552 (mergestatemod.ACTION_KEEP,), sort=True
1544 1553 ):
1545 1554 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1546 1555 # no progress
1547 1556
1548 1557 # directory rename, move local
1549 1558 for f, args, msg in mresult.getactions(
1550 1559 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1551 1560 ):
1552 1561 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1553 1562 progress.increment(item=f)
1554 1563 f0, flags = args
1555 1564 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1556 1565 wctx[f].audit()
1557 1566 wctx[f].write(wctx.filectx(f0).data(), flags)
1558 1567 wctx[f0].remove()
1559 1568
1560 1569 # local directory rename, get
1561 1570 for f, args, msg in mresult.getactions(
1562 1571 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1563 1572 ):
1564 1573 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1565 1574 progress.increment(item=f)
1566 1575 f0, flags = args
1567 1576 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1568 1577 wctx[f].write(mctx.filectx(f0).data(), flags)
1569 1578
1570 1579 # exec
1571 1580 for f, args, msg in mresult.getactions(
1572 1581 (mergestatemod.ACTION_EXEC,), sort=True
1573 1582 ):
1574 1583 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1575 1584 progress.increment(item=f)
1576 1585 (flags,) = args
1577 1586 wctx[f].audit()
1578 1587 wctx[f].setflags(b'l' in flags, b'x' in flags)
1579 1588
1580 1589 # these actions updates the file
1581 1590 updated = mresult.len(
1582 1591 (
1583 1592 mergestatemod.ACTION_GET,
1584 1593 mergestatemod.ACTION_EXEC,
1585 1594 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1586 1595 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1587 1596 )
1588 1597 )
1589 1598 # the ordering is important here -- ms.mergedriver will raise if the merge
1590 1599 # driver has changed, and we want to be able to bypass it when overwrite is
1591 1600 # True
1592 1601 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1593 1602
1594 1603 if usemergedriver:
1595 1604 if wctx.isinmemory():
1596 1605 raise error.InMemoryMergeConflictsError(
1597 1606 b"in-memory merge does not support mergedriver"
1598 1607 )
1599 1608 ms.commit()
1600 1609 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1601 1610 # the driver might leave some files unresolved
1602 1611 unresolvedf = set(ms.unresolved())
1603 1612 if not proceed:
1604 1613 # XXX setting unresolved to at least 1 is a hack to make sure we
1605 1614 # error out
1606 1615 return updateresult(
1607 1616 updated, merged, removed, max(len(unresolvedf), 1)
1608 1617 )
1609 1618 newactions = []
1610 1619 for f, args, msg in mergeactions:
1611 1620 if f in unresolvedf:
1612 1621 newactions.append((f, args, msg))
1613 1622 mergeactions = newactions
1614 1623
1615 1624 try:
1616 1625 # premerge
1617 1626 tocomplete = []
1618 1627 for f, args, msg in mergeactions:
1619 1628 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1620 1629 progress.increment(item=f)
1621 1630 if f == b'.hgsubstate': # subrepo states need updating
1622 1631 subrepoutil.submerge(
1623 1632 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1624 1633 )
1625 1634 continue
1626 1635 wctx[f].audit()
1627 1636 complete, r = ms.preresolve(f, wctx)
1628 1637 if not complete:
1629 1638 numupdates += 1
1630 1639 tocomplete.append((f, args, msg))
1631 1640
1632 1641 # merge
1633 1642 for f, args, msg in tocomplete:
1634 1643 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1635 1644 progress.increment(item=f, total=numupdates)
1636 1645 ms.resolve(f, wctx)
1637 1646
1638 1647 finally:
1639 1648 ms.commit()
1640 1649
1641 1650 unresolved = ms.unresolvedcount()
1642 1651
1643 1652 if (
1644 1653 usemergedriver
1645 1654 and not unresolved
1646 1655 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1647 1656 ):
1648 1657 if not driverconclude(repo, ms, wctx, labels=labels):
1649 1658 # XXX setting unresolved to at least 1 is a hack to make sure we
1650 1659 # error out
1651 1660 unresolved = max(unresolved, 1)
1652 1661
1653 1662 ms.commit()
1654 1663
1655 1664 msupdated, msmerged, msremoved = ms.counts()
1656 1665 updated += msupdated
1657 1666 merged += msmerged
1658 1667 removed += msremoved
1659 1668
1660 1669 extraactions = ms.actions()
1661 1670 if extraactions:
1662 1671 mfiles = {
1663 1672 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1664 1673 }
1665 1674 for k, acts in pycompat.iteritems(extraactions):
1666 1675 for a in acts:
1667 1676 mresult.addfile(a[0], k, *a[1:])
1668 1677 if k == mergestatemod.ACTION_GET and wantfiledata:
1669 1678 # no filedata until mergestate is updated to provide it
1670 1679 for a in acts:
1671 1680 getfiledata[a[0]] = None
1672 1681 # Remove these files from actions[ACTION_MERGE] as well. This is
1673 1682 # important because in recordupdates, files in actions[ACTION_MERGE]
1674 1683 # are processed after files in other actions, and the merge driver
1675 1684 # might add files to those actions via extraactions above. This can
1676 1685 # lead to a file being recorded twice, with poor results. This is
1677 1686 # especially problematic for actions[ACTION_REMOVE] (currently only
1678 1687 # possible with the merge driver in the initial merge process;
1679 1688 # interrupted merges don't go through this flow).
1680 1689 #
1681 1690 # The real fix here is to have indexes by both file and action so
1682 1691 # that when the action for a file is changed it is automatically
1683 1692 # reflected in the other action lists. But that involves a more
1684 1693 # complex data structure, so this will do for now.
1685 1694 #
1686 1695 # We don't need to do the same operation for 'dc' and 'cd' because
1687 1696 # those lists aren't consulted again.
1688 1697 mfiles.difference_update(a[0] for a in acts)
1689 1698
1690 1699 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1691 1700 if a[0] not in mfiles:
1692 1701 mresult.removefile(a[0])
1693 1702
1694 1703 progress.complete()
1695 1704 assert len(getfiledata) == (
1696 1705 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1697 1706 )
1698 1707 return updateresult(updated, merged, removed, unresolved), getfiledata
1699 1708
1700 1709
1701 1710 def _advertisefsmonitor(repo, num_gets, p1node):
1702 1711 # Advertise fsmonitor when its presence could be useful.
1703 1712 #
1704 1713 # We only advertise when performing an update from an empty working
1705 1714 # directory. This typically only occurs during initial clone.
1706 1715 #
1707 1716 # We give users a mechanism to disable the warning in case it is
1708 1717 # annoying.
1709 1718 #
1710 1719 # We only allow on Linux and MacOS because that's where fsmonitor is
1711 1720 # considered stable.
1712 1721 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1713 1722 fsmonitorthreshold = repo.ui.configint(
1714 1723 b'fsmonitor', b'warn_update_file_count'
1715 1724 )
1716 1725 try:
1717 1726 # avoid cycle: extensions -> cmdutil -> merge
1718 1727 from . import extensions
1719 1728
1720 1729 extensions.find(b'fsmonitor')
1721 1730 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1722 1731 # We intentionally don't look at whether fsmonitor has disabled
1723 1732 # itself because a) fsmonitor may have already printed a warning
1724 1733 # b) we only care about the config state here.
1725 1734 except KeyError:
1726 1735 fsmonitorenabled = False
1727 1736
1728 1737 if (
1729 1738 fsmonitorwarning
1730 1739 and not fsmonitorenabled
1731 1740 and p1node == nullid
1732 1741 and num_gets >= fsmonitorthreshold
1733 1742 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1734 1743 ):
1735 1744 repo.ui.warn(
1736 1745 _(
1737 1746 b'(warning: large working directory being used without '
1738 1747 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1739 1748 b'see "hg help -e fsmonitor")\n'
1740 1749 )
1741 1750 )
1742 1751
1743 1752
1744 1753 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1745 1754 UPDATECHECK_NONE = b'none'
1746 1755 UPDATECHECK_LINEAR = b'linear'
1747 1756 UPDATECHECK_NO_CONFLICT = b'noconflict'
1748 1757
1749 1758
1750 1759 def update(
1751 1760 repo,
1752 1761 node,
1753 1762 branchmerge,
1754 1763 force,
1755 1764 ancestor=None,
1756 1765 mergeancestor=False,
1757 1766 labels=None,
1758 1767 matcher=None,
1759 1768 mergeforce=False,
1760 1769 updatedirstate=True,
1761 1770 updatecheck=None,
1762 1771 wc=None,
1763 1772 ):
1764 1773 """
1765 1774 Perform a merge between the working directory and the given node
1766 1775
1767 1776 node = the node to update to
1768 1777 branchmerge = whether to merge between branches
1769 1778 force = whether to force branch merging or file overwriting
1770 1779 matcher = a matcher to filter file lists (dirstate not updated)
1771 1780 mergeancestor = whether it is merging with an ancestor. If true,
1772 1781 we should accept the incoming changes for any prompts that occur.
1773 1782 If false, merging with an ancestor (fast-forward) is only allowed
1774 1783 between different named branches. This flag is used by rebase extension
1775 1784 as a temporary fix and should be avoided in general.
1776 1785 labels = labels to use for base, local and other
1777 1786 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1778 1787 this is True, then 'force' should be True as well.
1779 1788
1780 1789 The table below shows all the behaviors of the update command given the
1781 1790 -c/--check and -C/--clean or no options, whether the working directory is
1782 1791 dirty, whether a revision is specified, and the relationship of the parent
1783 1792 rev to the target rev (linear or not). Match from top first. The -n
1784 1793 option doesn't exist on the command line, but represents the
1785 1794 experimental.updatecheck=noconflict option.
1786 1795
1787 1796 This logic is tested by test-update-branches.t.
1788 1797
1789 1798 -c -C -n -m dirty rev linear | result
1790 1799 y y * * * * * | (1)
1791 1800 y * y * * * * | (1)
1792 1801 y * * y * * * | (1)
1793 1802 * y y * * * * | (1)
1794 1803 * y * y * * * | (1)
1795 1804 * * y y * * * | (1)
1796 1805 * * * * * n n | x
1797 1806 * * * * n * * | ok
1798 1807 n n n n y * y | merge
1799 1808 n n n n y y n | (2)
1800 1809 n n n y y * * | merge
1801 1810 n n y n y * * | merge if no conflict
1802 1811 n y n n y * * | discard
1803 1812 y n n n y * * | (3)
1804 1813
1805 1814 x = can't happen
1806 1815 * = don't-care
1807 1816 1 = incompatible options (checked in commands.py)
1808 1817 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1809 1818 3 = abort: uncommitted changes (checked in commands.py)
1810 1819
1811 1820 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1812 1821 to repo[None] if None is passed.
1813 1822
1814 1823 Return the same tuple as applyupdates().
1815 1824 """
1816 1825 # Avoid cycle.
1817 1826 from . import sparse
1818 1827
1819 1828 # This function used to find the default destination if node was None, but
1820 1829 # that's now in destutil.py.
1821 1830 assert node is not None
1822 1831 if not branchmerge and not force:
1823 1832 # TODO: remove the default once all callers that pass branchmerge=False
1824 1833 # and force=False pass a value for updatecheck. We may want to allow
1825 1834 # updatecheck='abort' to better suppport some of these callers.
1826 1835 if updatecheck is None:
1827 1836 updatecheck = UPDATECHECK_LINEAR
1828 1837 if updatecheck not in (
1829 1838 UPDATECHECK_NONE,
1830 1839 UPDATECHECK_LINEAR,
1831 1840 UPDATECHECK_NO_CONFLICT,
1832 1841 ):
1833 1842 raise ValueError(
1834 1843 r'Invalid updatecheck %r (can accept %r)'
1835 1844 % (
1836 1845 updatecheck,
1837 1846 (
1838 1847 UPDATECHECK_NONE,
1839 1848 UPDATECHECK_LINEAR,
1840 1849 UPDATECHECK_NO_CONFLICT,
1841 1850 ),
1842 1851 )
1843 1852 )
1844 1853 if wc is not None and wc.isinmemory():
1845 1854 maybe_wlock = util.nullcontextmanager()
1846 1855 else:
1847 1856 maybe_wlock = repo.wlock()
1848 1857 with maybe_wlock:
1849 1858 if wc is None:
1850 1859 wc = repo[None]
1851 1860 pl = wc.parents()
1852 1861 p1 = pl[0]
1853 1862 p2 = repo[node]
1854 1863 if ancestor is not None:
1855 1864 pas = [repo[ancestor]]
1856 1865 else:
1857 1866 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1858 1867 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1859 1868 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1860 1869 else:
1861 1870 pas = [p1.ancestor(p2, warn=branchmerge)]
1862 1871
1863 1872 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1864 1873
1865 1874 overwrite = force and not branchmerge
1866 1875 ### check phase
1867 1876 if not overwrite:
1868 1877 if len(pl) > 1:
1869 1878 raise error.Abort(_(b"outstanding uncommitted merge"))
1870 1879 ms = mergestatemod.mergestate.read(repo)
1871 1880 if list(ms.unresolved()):
1872 1881 raise error.Abort(
1873 1882 _(b"outstanding merge conflicts"),
1874 1883 hint=_(b"use 'hg resolve' to resolve"),
1875 1884 )
1876 1885 if branchmerge:
1877 1886 if pas == [p2]:
1878 1887 raise error.Abort(
1879 1888 _(
1880 1889 b"merging with a working directory ancestor"
1881 1890 b" has no effect"
1882 1891 )
1883 1892 )
1884 1893 elif pas == [p1]:
1885 1894 if not mergeancestor and wc.branch() == p2.branch():
1886 1895 raise error.Abort(
1887 1896 _(b"nothing to merge"),
1888 1897 hint=_(b"use 'hg update' or check 'hg heads'"),
1889 1898 )
1890 1899 if not force and (wc.files() or wc.deleted()):
1891 1900 raise error.Abort(
1892 1901 _(b"uncommitted changes"),
1893 1902 hint=_(b"use 'hg status' to list changes"),
1894 1903 )
1895 1904 if not wc.isinmemory():
1896 1905 for s in sorted(wc.substate):
1897 1906 wc.sub(s).bailifchanged()
1898 1907
1899 1908 elif not overwrite:
1900 1909 if p1 == p2: # no-op update
1901 1910 # call the hooks and exit early
1902 1911 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1903 1912 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1904 1913 return updateresult(0, 0, 0, 0)
1905 1914
1906 1915 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1907 1916 [p1],
1908 1917 [p2],
1909 1918 ): # nonlinear
1910 1919 dirty = wc.dirty(missing=True)
1911 1920 if dirty:
1912 1921 # Branching is a bit strange to ensure we do the minimal
1913 1922 # amount of call to obsutil.foreground.
1914 1923 foreground = obsutil.foreground(repo, [p1.node()])
1915 1924 # note: the <node> variable contains a random identifier
1916 1925 if repo[node].node() in foreground:
1917 1926 pass # allow updating to successors
1918 1927 else:
1919 1928 msg = _(b"uncommitted changes")
1920 1929 hint = _(b"commit or update --clean to discard changes")
1921 1930 raise error.UpdateAbort(msg, hint=hint)
1922 1931 else:
1923 1932 # Allow jumping branches if clean and specific rev given
1924 1933 pass
1925 1934
1926 1935 if overwrite:
1927 1936 pas = [wc]
1928 1937 elif not branchmerge:
1929 1938 pas = [p1]
1930 1939
1931 1940 # deprecated config: merge.followcopies
1932 1941 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1933 1942 if overwrite:
1934 1943 followcopies = False
1935 1944 elif not pas[0]:
1936 1945 followcopies = False
1937 1946 if not branchmerge and not wc.dirty(missing=True):
1938 1947 followcopies = False
1939 1948
1940 1949 ### calculate phase
1941 1950 mresult = calculateupdates(
1942 1951 repo,
1943 1952 wc,
1944 1953 p2,
1945 1954 pas,
1946 1955 branchmerge,
1947 1956 force,
1948 1957 mergeancestor,
1949 1958 followcopies,
1950 1959 matcher=matcher,
1951 1960 mergeforce=mergeforce,
1952 1961 )
1953 1962
1954 1963 if updatecheck == UPDATECHECK_NO_CONFLICT:
1955 1964 if mresult.hasconflicts():
1956 1965 msg = _(b"conflicting changes")
1957 1966 hint = _(b"commit or update --clean to discard changes")
1958 1967 raise error.Abort(msg, hint=hint)
1959 1968
1960 1969 # Prompt and create actions. Most of this is in the resolve phase
1961 1970 # already, but we can't handle .hgsubstate in filemerge or
1962 1971 # subrepoutil.submerge yet so we have to keep prompting for it.
1963 if b'.hgsubstate' in mresult.actions:
1972 vals = mresult.getfile(b'.hgsubstate')
1973 if vals:
1964 1974 f = b'.hgsubstate'
1965 m, args, msg = mresult.actions[f]
1975 m, args, msg = vals
1966 1976 prompts = filemerge.partextras(labels)
1967 1977 prompts[b'f'] = f
1968 1978 if m == mergestatemod.ACTION_CHANGED_DELETED:
1969 1979 if repo.ui.promptchoice(
1970 1980 _(
1971 1981 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1972 1982 b"use (c)hanged version or (d)elete?"
1973 1983 b"$$ &Changed $$ &Delete"
1974 1984 )
1975 1985 % prompts,
1976 1986 0,
1977 1987 ):
1978 1988 mresult.addfile(
1979 1989 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1980 1990 )
1981 1991 elif f in p1:
1982 1992 mresult.addfile(
1983 1993 f,
1984 1994 mergestatemod.ACTION_ADD_MODIFIED,
1985 1995 None,
1986 1996 b'prompt keep',
1987 1997 )
1988 1998 else:
1989 1999 mresult.addfile(
1990 2000 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1991 2001 )
1992 2002 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1993 2003 f1, f2, fa, move, anc = args
1994 2004 flags = p2[f2].flags()
1995 2005 if (
1996 2006 repo.ui.promptchoice(
1997 2007 _(
1998 2008 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1999 2009 b"use (c)hanged version or leave (d)eleted?"
2000 2010 b"$$ &Changed $$ &Deleted"
2001 2011 )
2002 2012 % prompts,
2003 2013 0,
2004 2014 )
2005 2015 == 0
2006 2016 ):
2007 2017 mresult.addfile(
2008 2018 f,
2009 2019 mergestatemod.ACTION_GET,
2010 2020 (flags, False),
2011 2021 b'prompt recreating',
2012 2022 )
2013 2023 else:
2014 2024 mresult.removefile(f)
2015 2025
2016 2026 if not util.fscasesensitive(repo.path):
2017 2027 # check collision between files only in p2 for clean update
2018 2028 if not branchmerge and (
2019 2029 force or not wc.dirty(missing=True, branch=False)
2020 2030 ):
2021 2031 _checkcollision(repo, p2.manifest(), None)
2022 2032 else:
2023 2033 _checkcollision(repo, wc.manifest(), mresult)
2024 2034
2025 2035 # divergent renames
2026 2036 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2027 2037 repo.ui.warn(
2028 2038 _(
2029 2039 b"note: possible conflict - %s was renamed "
2030 2040 b"multiple times to:\n"
2031 2041 )
2032 2042 % f
2033 2043 )
2034 2044 for nf in sorted(fl):
2035 2045 repo.ui.warn(b" %s\n" % nf)
2036 2046
2037 2047 # rename and delete
2038 2048 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2039 2049 repo.ui.warn(
2040 2050 _(
2041 2051 b"note: possible conflict - %s was deleted "
2042 2052 b"and renamed to:\n"
2043 2053 )
2044 2054 % f
2045 2055 )
2046 2056 for nf in sorted(fl):
2047 2057 repo.ui.warn(b" %s\n" % nf)
2048 2058
2049 2059 ### apply phase
2050 2060 if not branchmerge: # just jump to the new rev
2051 2061 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2052 2062 # If we're doing a partial update, we need to skip updating
2053 2063 # the dirstate.
2054 2064 always = matcher is None or matcher.always()
2055 2065 updatedirstate = updatedirstate and always and not wc.isinmemory()
2056 2066 if updatedirstate:
2057 2067 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2058 2068 # note that we're in the middle of an update
2059 2069 repo.vfs.write(b'updatestate', p2.hex())
2060 2070
2061 2071 _advertisefsmonitor(
2062 2072 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2063 2073 )
2064 2074
2065 2075 wantfiledata = updatedirstate and not branchmerge
2066 2076 stats, getfiledata = applyupdates(
2067 2077 repo,
2068 2078 mresult,
2069 2079 wc,
2070 2080 p2,
2071 2081 overwrite,
2072 2082 wantfiledata,
2073 2083 labels=labels,
2074 2084 commitinfo=mresult.commitinfo,
2075 2085 )
2076 2086
2077 2087 if updatedirstate:
2078 2088 with repo.dirstate.parentchange():
2079 2089 repo.setparents(fp1, fp2)
2080 2090 mergestatemod.recordupdates(
2081 2091 repo, mresult.actionsdict, branchmerge, getfiledata
2082 2092 )
2083 2093 # update completed, clear state
2084 2094 util.unlink(repo.vfs.join(b'updatestate'))
2085 2095
2086 2096 if not branchmerge:
2087 2097 repo.dirstate.setbranch(p2.branch())
2088 2098
2089 2099 # If we're updating to a location, clean up any stale temporary includes
2090 2100 # (ex: this happens during hg rebase --abort).
2091 2101 if not branchmerge:
2092 2102 sparse.prunetemporaryincludes(repo)
2093 2103
2094 2104 if updatedirstate:
2095 2105 repo.hook(
2096 2106 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2097 2107 )
2098 2108 return stats
2099 2109
2100 2110
2101 2111 def merge(ctx, labels=None, force=False, wc=None):
2102 2112 """Merge another topological branch into the working copy.
2103 2113
2104 2114 force = whether the merge was run with 'merge --force' (deprecated)
2105 2115 """
2106 2116
2107 2117 return update(
2108 2118 ctx.repo(),
2109 2119 ctx.rev(),
2110 2120 labels=labels,
2111 2121 branchmerge=True,
2112 2122 force=force,
2113 2123 mergeforce=force,
2114 2124 wc=wc,
2115 2125 )
2116 2126
2117 2127
2118 2128 def clean_update(ctx, wc=None):
2119 2129 """Do a clean update to the given commit.
2120 2130
2121 2131 This involves updating to the commit and discarding any changes in the
2122 2132 working copy.
2123 2133 """
2124 2134 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2125 2135
2126 2136
2127 2137 def revert_to(ctx, matcher=None, wc=None):
2128 2138 """Revert the working copy to the given commit.
2129 2139
2130 2140 The working copy will keep its current parent(s) but its content will
2131 2141 be the same as in the given commit.
2132 2142 """
2133 2143
2134 2144 return update(
2135 2145 ctx.repo(),
2136 2146 ctx.rev(),
2137 2147 branchmerge=False,
2138 2148 force=True,
2139 2149 updatedirstate=False,
2140 2150 matcher=matcher,
2141 2151 wc=wc,
2142 2152 )
2143 2153
2144 2154
2145 2155 def graft(
2146 2156 repo,
2147 2157 ctx,
2148 2158 base=None,
2149 2159 labels=None,
2150 2160 keepparent=False,
2151 2161 keepconflictparent=False,
2152 2162 wctx=None,
2153 2163 ):
2154 2164 """Do a graft-like merge.
2155 2165
2156 2166 This is a merge where the merge ancestor is chosen such that one
2157 2167 or more changesets are grafted onto the current changeset. In
2158 2168 addition to the merge, this fixes up the dirstate to include only
2159 2169 a single parent (if keepparent is False) and tries to duplicate any
2160 2170 renames/copies appropriately.
2161 2171
2162 2172 ctx - changeset to rebase
2163 2173 base - merge base, or ctx.p1() if not specified
2164 2174 labels - merge labels eg ['local', 'graft']
2165 2175 keepparent - keep second parent if any
2166 2176 keepconflictparent - if unresolved, keep parent used for the merge
2167 2177
2168 2178 """
2169 2179 # If we're grafting a descendant onto an ancestor, be sure to pass
2170 2180 # mergeancestor=True to update. This does two things: 1) allows the merge if
2171 2181 # the destination is the same as the parent of the ctx (so we can use graft
2172 2182 # to copy commits), and 2) informs update that the incoming changes are
2173 2183 # newer than the destination so it doesn't prompt about "remote changed foo
2174 2184 # which local deleted".
2175 2185 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2176 2186 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2177 2187 wctx = wctx or repo[None]
2178 2188 pctx = wctx.p1()
2179 2189 base = base or ctx.p1()
2180 2190 mergeancestor = (
2181 2191 repo.changelog.isancestor(pctx.node(), ctx.node())
2182 2192 or pctx.rev() == base.rev()
2183 2193 )
2184 2194
2185 2195 stats = update(
2186 2196 repo,
2187 2197 ctx.node(),
2188 2198 True,
2189 2199 True,
2190 2200 base.node(),
2191 2201 mergeancestor=mergeancestor,
2192 2202 labels=labels,
2193 2203 wc=wctx,
2194 2204 )
2195 2205
2196 2206 if keepconflictparent and stats.unresolvedcount:
2197 2207 pother = ctx.node()
2198 2208 else:
2199 2209 pother = nullid
2200 2210 parents = ctx.parents()
2201 2211 if keepparent and len(parents) == 2 and base in parents:
2202 2212 parents.remove(base)
2203 2213 pother = parents[0].node()
2204 2214 # Never set both parents equal to each other
2205 2215 if pother == pctx.node():
2206 2216 pother = nullid
2207 2217
2208 2218 if wctx.isinmemory():
2209 2219 wctx.setparents(pctx.node(), pother)
2210 2220 # fix up dirstate for copies and renames
2211 2221 copies.graftcopies(wctx, ctx, base)
2212 2222 else:
2213 2223 with repo.dirstate.parentchange():
2214 2224 repo.setparents(pctx.node(), pother)
2215 2225 repo.dirstate.write(repo.currenttransaction())
2216 2226 # fix up dirstate for copies and renames
2217 2227 copies.graftcopies(wctx, ctx, base)
2218 2228 return stats
2219 2229
2220 2230
2221 2231 def purge(
2222 2232 repo,
2223 2233 matcher,
2224 2234 unknown=True,
2225 2235 ignored=False,
2226 2236 removeemptydirs=True,
2227 2237 removefiles=True,
2228 2238 abortonerror=False,
2229 2239 noop=False,
2230 2240 ):
2231 2241 """Purge the working directory of untracked files.
2232 2242
2233 2243 ``matcher`` is a matcher configured to scan the working directory -
2234 2244 potentially a subset.
2235 2245
2236 2246 ``unknown`` controls whether unknown files should be purged.
2237 2247
2238 2248 ``ignored`` controls whether ignored files should be purged.
2239 2249
2240 2250 ``removeemptydirs`` controls whether empty directories should be removed.
2241 2251
2242 2252 ``removefiles`` controls whether files are removed.
2243 2253
2244 2254 ``abortonerror`` causes an exception to be raised if an error occurs
2245 2255 deleting a file or directory.
2246 2256
2247 2257 ``noop`` controls whether to actually remove files. If not defined, actions
2248 2258 will be taken.
2249 2259
2250 2260 Returns an iterable of relative paths in the working directory that were
2251 2261 or would be removed.
2252 2262 """
2253 2263
2254 2264 def remove(removefn, path):
2255 2265 try:
2256 2266 removefn(path)
2257 2267 except OSError:
2258 2268 m = _(b'%s cannot be removed') % path
2259 2269 if abortonerror:
2260 2270 raise error.Abort(m)
2261 2271 else:
2262 2272 repo.ui.warn(_(b'warning: %s\n') % m)
2263 2273
2264 2274 # There's no API to copy a matcher. So mutate the passed matcher and
2265 2275 # restore it when we're done.
2266 2276 oldtraversedir = matcher.traversedir
2267 2277
2268 2278 res = []
2269 2279
2270 2280 try:
2271 2281 if removeemptydirs:
2272 2282 directories = []
2273 2283 matcher.traversedir = directories.append
2274 2284
2275 2285 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2276 2286
2277 2287 if removefiles:
2278 2288 for f in sorted(status.unknown + status.ignored):
2279 2289 if not noop:
2280 2290 repo.ui.note(_(b'removing file %s\n') % f)
2281 2291 remove(repo.wvfs.unlink, f)
2282 2292 res.append(f)
2283 2293
2284 2294 if removeemptydirs:
2285 2295 for f in sorted(directories, reverse=True):
2286 2296 if matcher(f) and not repo.wvfs.listdir(f):
2287 2297 if not noop:
2288 2298 repo.ui.note(_(b'removing directory %s\n') % f)
2289 2299 remove(repo.wvfs.rmdir, f)
2290 2300 res.append(f)
2291 2301
2292 2302 return res
2293 2303
2294 2304 finally:
2295 2305 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now