##// END OF EJS Templates
merge: introduce mergeresult.addfile() and use it...
Pulkit Goyal -
r45839:b442920a default
parent child Browse files
Show More
@@ -1,1833 +1,1823 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 upgrade,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44
45 45 from . import (
46 46 lfcommands,
47 47 lfutil,
48 48 storefactory,
49 49 )
50 50
51 51 eh = exthelper.exthelper()
52 52
53 53 lfstatus = lfutil.lfstatus
54 54
55 55 # -- Utility functions: commonly/repeatedly needed functionality ---------------
56 56
57 57
58 58 def composelargefilematcher(match, manifest):
59 59 '''create a matcher that matches only the largefiles in the original
60 60 matcher'''
61 61 m = copy.copy(match)
62 62 lfile = lambda f: lfutil.standin(f) in manifest
63 63 m._files = [lf for lf in m._files if lfile(lf)]
64 64 m._fileset = set(m._files)
65 65 m.always = lambda: False
66 66 origmatchfn = m.matchfn
67 67 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
68 68 return m
69 69
70 70
71 71 def composenormalfilematcher(match, manifest, exclude=None):
72 72 excluded = set()
73 73 if exclude is not None:
74 74 excluded.update(exclude)
75 75
76 76 m = copy.copy(match)
77 77 notlfile = lambda f: not (
78 78 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
79 79 )
80 80 m._files = [lf for lf in m._files if notlfile(lf)]
81 81 m._fileset = set(m._files)
82 82 m.always = lambda: False
83 83 origmatchfn = m.matchfn
84 84 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
85 85 return m
86 86
87 87
88 88 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
89 89 large = opts.get('large')
90 90 lfsize = lfutil.getminsize(
91 91 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
92 92 )
93 93
94 94 lfmatcher = None
95 95 if lfutil.islfilesrepo(repo):
96 96 lfpats = ui.configlist(lfutil.longname, b'patterns')
97 97 if lfpats:
98 98 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
99 99
100 100 lfnames = []
101 101 m = matcher
102 102
103 103 wctx = repo[None]
104 104 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
105 105 exact = m.exact(f)
106 106 lfile = lfutil.standin(f) in wctx
107 107 nfile = f in wctx
108 108 exists = lfile or nfile
109 109
110 110 # Don't warn the user when they attempt to add a normal tracked file.
111 111 # The normal add code will do that for us.
112 112 if exact and exists:
113 113 if lfile:
114 114 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
115 115 continue
116 116
117 117 if (exact or not exists) and not lfutil.isstandin(f):
118 118 # In case the file was removed previously, but not committed
119 119 # (issue3507)
120 120 if not repo.wvfs.exists(f):
121 121 continue
122 122
123 123 abovemin = (
124 124 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
125 125 )
126 126 if large or abovemin or (lfmatcher and lfmatcher(f)):
127 127 lfnames.append(f)
128 128 if ui.verbose or not exact:
129 129 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
130 130
131 131 bad = []
132 132
133 133 # Need to lock, otherwise there could be a race condition between
134 134 # when standins are created and added to the repo.
135 135 with repo.wlock():
136 136 if not opts.get('dry_run'):
137 137 standins = []
138 138 lfdirstate = lfutil.openlfdirstate(ui, repo)
139 139 for f in lfnames:
140 140 standinname = lfutil.standin(f)
141 141 lfutil.writestandin(
142 142 repo,
143 143 standinname,
144 144 hash=b'',
145 145 executable=lfutil.getexecutable(repo.wjoin(f)),
146 146 )
147 147 standins.append(standinname)
148 148 if lfdirstate[f] == b'r':
149 149 lfdirstate.normallookup(f)
150 150 else:
151 151 lfdirstate.add(f)
152 152 lfdirstate.write()
153 153 bad += [
154 154 lfutil.splitstandin(f)
155 155 for f in repo[None].add(standins)
156 156 if f in m.files()
157 157 ]
158 158
159 159 added = [f for f in lfnames if f not in bad]
160 160 return added, bad
161 161
162 162
163 163 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
164 164 after = opts.get('after')
165 165 m = composelargefilematcher(matcher, repo[None].manifest())
166 166 with lfstatus(repo):
167 167 s = repo.status(match=m, clean=not isaddremove)
168 168 manifest = repo[None].manifest()
169 169 modified, added, deleted, clean = [
170 170 [f for f in list if lfutil.standin(f) in manifest]
171 171 for list in (s.modified, s.added, s.deleted, s.clean)
172 172 ]
173 173
174 174 def warn(files, msg):
175 175 for f in files:
176 176 ui.warn(msg % uipathfn(f))
177 177 return int(len(files) > 0)
178 178
179 179 if after:
180 180 remove = deleted
181 181 result = warn(
182 182 modified + added + clean, _(b'not removing %s: file still exists\n')
183 183 )
184 184 else:
185 185 remove = deleted + clean
186 186 result = warn(
187 187 modified,
188 188 _(
189 189 b'not removing %s: file is modified (use -f'
190 190 b' to force removal)\n'
191 191 ),
192 192 )
193 193 result = (
194 194 warn(
195 195 added,
196 196 _(
197 197 b'not removing %s: file has been marked for add'
198 198 b' (use forget to undo)\n'
199 199 ),
200 200 )
201 201 or result
202 202 )
203 203
204 204 # Need to lock because standin files are deleted then removed from the
205 205 # repository and we could race in-between.
206 206 with repo.wlock():
207 207 lfdirstate = lfutil.openlfdirstate(ui, repo)
208 208 for f in sorted(remove):
209 209 if ui.verbose or not m.exact(f):
210 210 ui.status(_(b'removing %s\n') % uipathfn(f))
211 211
212 212 if not dryrun:
213 213 if not after:
214 214 repo.wvfs.unlinkpath(f, ignoremissing=True)
215 215
216 216 if dryrun:
217 217 return result
218 218
219 219 remove = [lfutil.standin(f) for f in remove]
220 220 # If this is being called by addremove, let the original addremove
221 221 # function handle this.
222 222 if not isaddremove:
223 223 for f in remove:
224 224 repo.wvfs.unlinkpath(f, ignoremissing=True)
225 225 repo[None].forget(remove)
226 226
227 227 for f in remove:
228 228 lfutil.synclfdirstate(
229 229 repo, lfdirstate, lfutil.splitstandin(f), False
230 230 )
231 231
232 232 lfdirstate.write()
233 233
234 234 return result
235 235
236 236
237 237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 238 # appear at their right place in the manifests.
239 239 @eh.wrapfunction(webcommands, b'decodepath')
240 240 def decodepath(orig, path):
241 241 return lfutil.splitstandin(path) or path
242 242
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246
247 247 @eh.wrapcommand(
248 248 b'add',
249 249 opts=[
250 250 (b'', b'large', None, _(b'add as largefile')),
251 251 (b'', b'normal', None, _(b'add as normal file')),
252 252 (
253 253 b'',
254 254 b'lfsize',
255 255 b'',
256 256 _(
257 257 b'add all files above this size (in megabytes) '
258 258 b'as largefiles (default: 10)'
259 259 ),
260 260 ),
261 261 ],
262 262 )
263 263 def overrideadd(orig, ui, repo, *pats, **opts):
264 264 if opts.get('normal') and opts.get('large'):
265 265 raise error.Abort(_(b'--normal cannot be used with --large'))
266 266 return orig(ui, repo, *pats, **opts)
267 267
268 268
269 269 @eh.wrapfunction(cmdutil, b'add')
270 270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
271 271 # The --normal flag short circuits this override
272 272 if opts.get('normal'):
273 273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
274 274
275 275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
276 276 normalmatcher = composenormalfilematcher(
277 277 matcher, repo[None].manifest(), ladded
278 278 )
279 279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
280 280
281 281 bad.extend(f for f in lbad)
282 282 return bad
283 283
284 284
285 285 @eh.wrapfunction(cmdutil, b'remove')
286 286 def cmdutilremove(
287 287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
288 288 ):
289 289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
290 290 result = orig(
291 291 ui,
292 292 repo,
293 293 normalmatcher,
294 294 prefix,
295 295 uipathfn,
296 296 after,
297 297 force,
298 298 subrepos,
299 299 dryrun,
300 300 )
301 301 return (
302 302 removelargefiles(
303 303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
304 304 )
305 305 or result
306 306 )
307 307
308 308
309 309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
310 310 def overridestatusfn(orig, repo, rev2, **opts):
311 311 with lfstatus(repo._repo):
312 312 return orig(repo, rev2, **opts)
313 313
314 314
315 315 @eh.wrapcommand(b'status')
316 316 def overridestatus(orig, ui, repo, *pats, **opts):
317 317 with lfstatus(repo):
318 318 return orig(ui, repo, *pats, **opts)
319 319
320 320
321 321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
322 322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
323 323 with lfstatus(repo._repo):
324 324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
325 325
326 326
327 327 @eh.wrapcommand(b'log')
328 328 def overridelog(orig, ui, repo, *pats, **opts):
329 329 def overridematchandpats(
330 330 orig,
331 331 ctx,
332 332 pats=(),
333 333 opts=None,
334 334 globbed=False,
335 335 default=b'relpath',
336 336 badfn=None,
337 337 ):
338 338 """Matcher that merges root directory with .hglf, suitable for log.
339 339 It is still possible to match .hglf directly.
340 340 For any listed files run log on the standin too.
341 341 matchfn tries both the given filename and with .hglf stripped.
342 342 """
343 343 if opts is None:
344 344 opts = {}
345 345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
346 346 m, p = copy.copy(matchandpats)
347 347
348 348 if m.always():
349 349 # We want to match everything anyway, so there's no benefit trying
350 350 # to add standins.
351 351 return matchandpats
352 352
353 353 pats = set(p)
354 354
355 355 def fixpats(pat, tostandin=lfutil.standin):
356 356 if pat.startswith(b'set:'):
357 357 return pat
358 358
359 359 kindpat = matchmod._patsplit(pat, None)
360 360
361 361 if kindpat[0] is not None:
362 362 return kindpat[0] + b':' + tostandin(kindpat[1])
363 363 return tostandin(kindpat[1])
364 364
365 365 cwd = repo.getcwd()
366 366 if cwd:
367 367 hglf = lfutil.shortname
368 368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
369 369
370 370 def tostandin(f):
371 371 # The file may already be a standin, so truncate the back
372 372 # prefix and test before mangling it. This avoids turning
373 373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
374 374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
375 375 return f
376 376
377 377 # An absolute path is from outside the repo, so truncate the
378 378 # path to the root before building the standin. Otherwise cwd
379 379 # is somewhere in the repo, relative to root, and needs to be
380 380 # prepended before building the standin.
381 381 if os.path.isabs(cwd):
382 382 f = f[len(back) :]
383 383 else:
384 384 f = cwd + b'/' + f
385 385 return back + lfutil.standin(f)
386 386
387 387 else:
388 388
389 389 def tostandin(f):
390 390 if lfutil.isstandin(f):
391 391 return f
392 392 return lfutil.standin(f)
393 393
394 394 pats.update(fixpats(f, tostandin) for f in p)
395 395
396 396 for i in range(0, len(m._files)):
397 397 # Don't add '.hglf' to m.files, since that is already covered by '.'
398 398 if m._files[i] == b'.':
399 399 continue
400 400 standin = lfutil.standin(m._files[i])
401 401 # If the "standin" is a directory, append instead of replace to
402 402 # support naming a directory on the command line with only
403 403 # largefiles. The original directory is kept to support normal
404 404 # files.
405 405 if standin in ctx:
406 406 m._files[i] = standin
407 407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
408 408 m._files.append(standin)
409 409
410 410 m._fileset = set(m._files)
411 411 m.always = lambda: False
412 412 origmatchfn = m.matchfn
413 413
414 414 def lfmatchfn(f):
415 415 lf = lfutil.splitstandin(f)
416 416 if lf is not None and origmatchfn(lf):
417 417 return True
418 418 r = origmatchfn(f)
419 419 return r
420 420
421 421 m.matchfn = lfmatchfn
422 422
423 423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
424 424 return m, pats
425 425
426 426 # For hg log --patch, the match object is used in two different senses:
427 427 # (1) to determine what revisions should be printed out, and
428 428 # (2) to determine what files to print out diffs for.
429 429 # The magic matchandpats override should be used for case (1) but not for
430 430 # case (2).
431 431 oldmatchandpats = scmutil.matchandpats
432 432
433 433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
434 434 wctx = repo[None]
435 435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
436 436 return lambda ctx: match
437 437
438 438 wrappedmatchandpats = extensions.wrappedfunction(
439 439 scmutil, b'matchandpats', overridematchandpats
440 440 )
441 441 wrappedmakefilematcher = extensions.wrappedfunction(
442 442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
443 443 )
444 444 with wrappedmatchandpats, wrappedmakefilematcher:
445 445 return orig(ui, repo, *pats, **opts)
446 446
447 447
448 448 @eh.wrapcommand(
449 449 b'verify',
450 450 opts=[
451 451 (
452 452 b'',
453 453 b'large',
454 454 None,
455 455 _(b'verify that all largefiles in current revision exists'),
456 456 ),
457 457 (
458 458 b'',
459 459 b'lfa',
460 460 None,
461 461 _(b'verify largefiles in all revisions, not just current'),
462 462 ),
463 463 (
464 464 b'',
465 465 b'lfc',
466 466 None,
467 467 _(b'verify local largefile contents, not just existence'),
468 468 ),
469 469 ],
470 470 )
471 471 def overrideverify(orig, ui, repo, *pats, **opts):
472 472 large = opts.pop('large', False)
473 473 all = opts.pop('lfa', False)
474 474 contents = opts.pop('lfc', False)
475 475
476 476 result = orig(ui, repo, *pats, **opts)
477 477 if large or all or contents:
478 478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
479 479 return result
480 480
481 481
482 482 @eh.wrapcommand(
483 483 b'debugstate',
484 484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
485 485 )
486 486 def overridedebugstate(orig, ui, repo, *pats, **opts):
487 487 large = opts.pop('large', False)
488 488 if large:
489 489
490 490 class fakerepo(object):
491 491 dirstate = lfutil.openlfdirstate(ui, repo)
492 492
493 493 orig(ui, fakerepo, *pats, **opts)
494 494 else:
495 495 orig(ui, repo, *pats, **opts)
496 496
497 497
498 498 # Before starting the manifest merge, merge.updates will call
499 499 # _checkunknownfile to check if there are any files in the merged-in
500 500 # changeset that collide with unknown files in the working copy.
501 501 #
502 502 # The largefiles are seen as unknown, so this prevents us from merging
503 503 # in a file 'foo' if we already have a largefile with the same name.
504 504 #
505 505 # The overridden function filters the unknown files by removing any
506 506 # largefiles. This makes the merge proceed and we can then handle this
507 507 # case further in the overridden calculateupdates function below.
508 508 @eh.wrapfunction(merge, b'_checkunknownfile')
509 509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
510 510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
511 511 return False
512 512 return origfn(repo, wctx, mctx, f, f2)
513 513
514 514
515 515 # The manifest merge handles conflicts on the manifest level. We want
516 516 # to handle changes in largefile-ness of files at this level too.
517 517 #
518 518 # The strategy is to run the original calculateupdates and then process
519 519 # the action list it outputs. There are two cases we need to deal with:
520 520 #
521 521 # 1. Normal file in p1, largefile in p2. Here the largefile is
522 522 # detected via its standin file, which will enter the working copy
523 523 # with a "get" action. It is not "merge" since the standin is all
524 524 # Mercurial is concerned with at this level -- the link to the
525 525 # existing normal file is not relevant here.
526 526 #
527 527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
528 528 # since the largefile will be present in the working copy and
529 529 # different from the normal file in p2. Mercurial therefore
530 530 # triggers a merge action.
531 531 #
532 532 # In both cases, we prompt the user and emit new actions to either
533 533 # remove the standin (if the normal file was kept) or to remove the
534 534 # normal file and get the standin (if the largefile was kept). The
535 535 # default prompt answer is to use the largefile version since it was
536 536 # presumably changed on purpose.
537 537 #
538 538 # Finally, the merge.applyupdates function will then take care of
539 539 # writing the files into the working copy and lfcommands.updatelfiles
540 540 # will update the largefiles.
541 541 @eh.wrapfunction(merge, b'calculateupdates')
542 542 def overridecalculateupdates(
543 543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
544 544 ):
545 545 overwrite = force and not branchmerge
546 546 mresult = origfn(
547 547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 548 )
549 549
550 550 if overwrite:
551 551 return mresult
552 552
553 553 # Convert to dictionary with filename as key and action as value.
554 554 lfiles = set()
555 555 for f in mresult.actions:
556 556 splitstandin = lfutil.splitstandin(f)
557 557 if splitstandin is not None and splitstandin in p1:
558 558 lfiles.add(splitstandin)
559 559 elif lfutil.standin(f) in p1:
560 560 lfiles.add(f)
561 561
562 562 for lfile in sorted(lfiles):
563 563 standin = lfutil.standin(lfile)
564 564 (lm, largs, lmsg) = mresult.actions.get(lfile, (None, None, None))
565 565 (sm, sargs, smsg) = mresult.actions.get(standin, (None, None, None))
566 566 if sm in (b'g', b'dc') and lm != b'r':
567 567 if sm == b'dc':
568 568 f1, f2, fa, move, anc = sargs
569 569 sargs = (p2[f2].flags(), False)
570 570 # Case 1: normal file in the working copy, largefile in
571 571 # the second parent
572 572 usermsg = (
573 573 _(
574 574 b'remote turned local normal file %s into a largefile\n'
575 575 b'use (l)argefile or keep (n)ormal file?'
576 576 b'$$ &Largefile $$ &Normal file'
577 577 )
578 578 % lfile
579 579 )
580 580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
581 mresult.actions[lfile] = (b'r', None, b'replaced by standin')
582 mresult.actions[standin] = (b'g', sargs, b'replaces standin')
581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
583 583 else: # keep local normal file
584 mresult.actions[lfile] = (b'k', None, b'replaces standin')
584 mresult.addfile(lfile, b'k', None, b'replaces standin')
585 585 if branchmerge:
586 mresult.actions[standin] = (
587 b'k',
588 None,
589 b'replaced by non-standin',
586 mresult.addfile(
587 standin, b'k', None, b'replaced by non-standin',
590 588 )
591 589 else:
592 mresult.actions[standin] = (
593 b'r',
594 None,
595 b'replaced by non-standin',
590 mresult.addfile(
591 standin, b'r', None, b'replaced by non-standin',
596 592 )
597 593 elif lm in (b'g', b'dc') and sm != b'r':
598 594 if lm == b'dc':
599 595 f1, f2, fa, move, anc = largs
600 596 largs = (p2[f2].flags(), False)
601 597 # Case 2: largefile in the working copy, normal file in
602 598 # the second parent
603 599 usermsg = (
604 600 _(
605 601 b'remote turned local largefile %s into a normal file\n'
606 602 b'keep (l)argefile or use (n)ormal file?'
607 603 b'$$ &Largefile $$ &Normal file'
608 604 )
609 605 % lfile
610 606 )
611 607 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
612 608 if branchmerge:
613 609 # largefile can be restored from standin safely
614 mresult.actions[lfile] = (
615 b'k',
616 None,
617 b'replaced by standin',
610 mresult.addfile(
611 lfile, b'k', None, b'replaced by standin',
618 612 )
619 mresult.actions[standin] = (b'k', None, b'replaces standin')
613 mresult.addfile(standin, b'k', None, b'replaces standin')
620 614 else:
621 615 # "lfile" should be marked as "removed" without
622 616 # removal of itself
623 mresult.actions[lfile] = (
624 b'lfmr',
625 None,
626 b'forget non-standin largefile',
617 mresult.addfile(
618 lfile, b'lfmr', None, b'forget non-standin largefile',
627 619 )
628 620
629 621 # linear-merge should treat this largefile as 're-added'
630 mresult.actions[standin] = (b'a', None, b'keep standin')
622 mresult.addfile(standin, b'a', None, b'keep standin')
631 623 else: # pick remote normal file
632 mresult.actions[lfile] = (b'g', largs, b'replaces standin')
633 mresult.actions[standin] = (
634 b'r',
635 None,
636 b'replaced by non-standin',
624 mresult.addfile(lfile, b'g', largs, b'replaces standin')
625 mresult.addfile(
626 standin, b'r', None, b'replaced by non-standin',
637 627 )
638 628
639 629 return mresult
640 630
641 631
642 632 @eh.wrapfunction(mergestatemod, b'recordupdates')
643 633 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
644 634 if b'lfmr' in actions:
645 635 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
646 636 for lfile, args, msg in actions[b'lfmr']:
647 637 # this should be executed before 'orig', to execute 'remove'
648 638 # before all other actions
649 639 repo.dirstate.remove(lfile)
650 640 # make sure lfile doesn't get synclfdirstate'd as normal
651 641 lfdirstate.add(lfile)
652 642 lfdirstate.write()
653 643
654 644 return orig(repo, actions, branchmerge, getfiledata)
655 645
656 646
657 647 # Override filemerge to prompt the user about how they wish to merge
658 648 # largefiles. This will handle identical edits without prompting the user.
659 649 @eh.wrapfunction(filemerge, b'_filemerge')
660 650 def overridefilemerge(
661 651 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
662 652 ):
663 653 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
664 654 return origfn(
665 655 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
666 656 )
667 657
668 658 ahash = lfutil.readasstandin(fca).lower()
669 659 dhash = lfutil.readasstandin(fcd).lower()
670 660 ohash = lfutil.readasstandin(fco).lower()
671 661 if (
672 662 ohash != ahash
673 663 and ohash != dhash
674 664 and (
675 665 dhash == ahash
676 666 or repo.ui.promptchoice(
677 667 _(
678 668 b'largefile %s has a merge conflict\nancestor was %s\n'
679 669 b'you can keep (l)ocal %s or take (o)ther %s.\n'
680 670 b'what do you want to do?'
681 671 b'$$ &Local $$ &Other'
682 672 )
683 673 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
684 674 0,
685 675 )
686 676 == 1
687 677 )
688 678 ):
689 679 repo.wwrite(fcd.path(), fco.data(), fco.flags())
690 680 return True, 0, False
691 681
692 682
693 683 @eh.wrapfunction(copiesmod, b'pathcopies')
694 684 def copiespathcopies(orig, ctx1, ctx2, match=None):
695 685 copies = orig(ctx1, ctx2, match=match)
696 686 updated = {}
697 687
698 688 for k, v in pycompat.iteritems(copies):
699 689 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
700 690
701 691 return updated
702 692
703 693
704 694 # Copy first changes the matchers to match standins instead of
705 695 # largefiles. Then it overrides util.copyfile in that function it
706 696 # checks if the destination largefile already exists. It also keeps a
707 697 # list of copied files so that the largefiles can be copied and the
708 698 # dirstate updated.
709 699 @eh.wrapfunction(cmdutil, b'copy')
710 700 def overridecopy(orig, ui, repo, pats, opts, rename=False):
711 701 # doesn't remove largefile on rename
712 702 if len(pats) < 2:
713 703 # this isn't legal, let the original function deal with it
714 704 return orig(ui, repo, pats, opts, rename)
715 705
716 706 # This could copy both lfiles and normal files in one command,
717 707 # but we don't want to do that. First replace their matcher to
718 708 # only match normal files and run it, then replace it to just
719 709 # match largefiles and run it again.
720 710 nonormalfiles = False
721 711 nolfiles = False
722 712 manifest = repo[None].manifest()
723 713
724 714 def normalfilesmatchfn(
725 715 orig,
726 716 ctx,
727 717 pats=(),
728 718 opts=None,
729 719 globbed=False,
730 720 default=b'relpath',
731 721 badfn=None,
732 722 ):
733 723 if opts is None:
734 724 opts = {}
735 725 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
736 726 return composenormalfilematcher(match, manifest)
737 727
738 728 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
739 729 try:
740 730 result = orig(ui, repo, pats, opts, rename)
741 731 except error.Abort as e:
742 732 if pycompat.bytestr(e) != _(b'no files to copy'):
743 733 raise e
744 734 else:
745 735 nonormalfiles = True
746 736 result = 0
747 737
748 738 # The first rename can cause our current working directory to be removed.
749 739 # In that case there is nothing left to copy/rename so just quit.
750 740 try:
751 741 repo.getcwd()
752 742 except OSError:
753 743 return result
754 744
755 745 def makestandin(relpath):
756 746 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
757 747 return repo.wvfs.join(lfutil.standin(path))
758 748
759 749 fullpats = scmutil.expandpats(pats)
760 750 dest = fullpats[-1]
761 751
762 752 if os.path.isdir(dest):
763 753 if not os.path.isdir(makestandin(dest)):
764 754 os.makedirs(makestandin(dest))
765 755
766 756 try:
767 757 # When we call orig below it creates the standins but we don't add
768 758 # them to the dir state until later so lock during that time.
769 759 wlock = repo.wlock()
770 760
771 761 manifest = repo[None].manifest()
772 762
773 763 def overridematch(
774 764 orig,
775 765 ctx,
776 766 pats=(),
777 767 opts=None,
778 768 globbed=False,
779 769 default=b'relpath',
780 770 badfn=None,
781 771 ):
782 772 if opts is None:
783 773 opts = {}
784 774 newpats = []
785 775 # The patterns were previously mangled to add the standin
786 776 # directory; we need to remove that now
787 777 for pat in pats:
788 778 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
789 779 newpats.append(pat.replace(lfutil.shortname, b''))
790 780 else:
791 781 newpats.append(pat)
792 782 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
793 783 m = copy.copy(match)
794 784 lfile = lambda f: lfutil.standin(f) in manifest
795 785 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
796 786 m._fileset = set(m._files)
797 787 origmatchfn = m.matchfn
798 788
799 789 def matchfn(f):
800 790 lfile = lfutil.splitstandin(f)
801 791 return (
802 792 lfile is not None
803 793 and (f in manifest)
804 794 and origmatchfn(lfile)
805 795 or None
806 796 )
807 797
808 798 m.matchfn = matchfn
809 799 return m
810 800
811 801 listpats = []
812 802 for pat in pats:
813 803 if matchmod.patkind(pat) is not None:
814 804 listpats.append(pat)
815 805 else:
816 806 listpats.append(makestandin(pat))
817 807
818 808 copiedfiles = []
819 809
820 810 def overridecopyfile(orig, src, dest, *args, **kwargs):
821 811 if lfutil.shortname in src and dest.startswith(
822 812 repo.wjoin(lfutil.shortname)
823 813 ):
824 814 destlfile = dest.replace(lfutil.shortname, b'')
825 815 if not opts[b'force'] and os.path.exists(destlfile):
826 816 raise IOError(
827 817 b'', _(b'destination largefile already exists')
828 818 )
829 819 copiedfiles.append((src, dest))
830 820 orig(src, dest, *args, **kwargs)
831 821
832 822 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
833 823 with extensions.wrappedfunction(scmutil, b'match', overridematch):
834 824 result += orig(ui, repo, listpats, opts, rename)
835 825
836 826 lfdirstate = lfutil.openlfdirstate(ui, repo)
837 827 for (src, dest) in copiedfiles:
838 828 if lfutil.shortname in src and dest.startswith(
839 829 repo.wjoin(lfutil.shortname)
840 830 ):
841 831 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
842 832 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
843 833 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
844 834 if not os.path.isdir(destlfiledir):
845 835 os.makedirs(destlfiledir)
846 836 if rename:
847 837 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
848 838
849 839 # The file is gone, but this deletes any empty parent
850 840 # directories as a side-effect.
851 841 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
852 842 lfdirstate.remove(srclfile)
853 843 else:
854 844 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
855 845
856 846 lfdirstate.add(destlfile)
857 847 lfdirstate.write()
858 848 except error.Abort as e:
859 849 if pycompat.bytestr(e) != _(b'no files to copy'):
860 850 raise e
861 851 else:
862 852 nolfiles = True
863 853 finally:
864 854 wlock.release()
865 855
866 856 if nolfiles and nonormalfiles:
867 857 raise error.Abort(_(b'no files to copy'))
868 858
869 859 return result
870 860
871 861
872 862 # When the user calls revert, we have to be careful to not revert any
873 863 # changes to other largefiles accidentally. This means we have to keep
874 864 # track of the largefiles that are being reverted so we only pull down
875 865 # the necessary largefiles.
876 866 #
877 867 # Standins are only updated (to match the hash of largefiles) before
878 868 # commits. Update the standins then run the original revert, changing
879 869 # the matcher to hit standins instead of largefiles. Based on the
880 870 # resulting standins update the largefiles.
881 871 @eh.wrapfunction(cmdutil, b'revert')
882 872 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
883 873 # Because we put the standins in a bad state (by updating them)
884 874 # and then return them to a correct state we need to lock to
885 875 # prevent others from changing them in their incorrect state.
886 876 with repo.wlock():
887 877 lfdirstate = lfutil.openlfdirstate(ui, repo)
888 878 s = lfutil.lfdirstatestatus(lfdirstate, repo)
889 879 lfdirstate.write()
890 880 for lfile in s.modified:
891 881 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
892 882 for lfile in s.deleted:
893 883 fstandin = lfutil.standin(lfile)
894 884 if repo.wvfs.exists(fstandin):
895 885 repo.wvfs.unlink(fstandin)
896 886
897 887 oldstandins = lfutil.getstandinsstate(repo)
898 888
899 889 def overridematch(
900 890 orig,
901 891 mctx,
902 892 pats=(),
903 893 opts=None,
904 894 globbed=False,
905 895 default=b'relpath',
906 896 badfn=None,
907 897 ):
908 898 if opts is None:
909 899 opts = {}
910 900 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
911 901 m = copy.copy(match)
912 902
913 903 # revert supports recursing into subrepos, and though largefiles
914 904 # currently doesn't work correctly in that case, this match is
915 905 # called, so the lfdirstate above may not be the correct one for
916 906 # this invocation of match.
917 907 lfdirstate = lfutil.openlfdirstate(
918 908 mctx.repo().ui, mctx.repo(), False
919 909 )
920 910
921 911 wctx = repo[None]
922 912 matchfiles = []
923 913 for f in m._files:
924 914 standin = lfutil.standin(f)
925 915 if standin in ctx or standin in mctx:
926 916 matchfiles.append(standin)
927 917 elif standin in wctx or lfdirstate[f] == b'r':
928 918 continue
929 919 else:
930 920 matchfiles.append(f)
931 921 m._files = matchfiles
932 922 m._fileset = set(m._files)
933 923 origmatchfn = m.matchfn
934 924
935 925 def matchfn(f):
936 926 lfile = lfutil.splitstandin(f)
937 927 if lfile is not None:
938 928 return origmatchfn(lfile) and (f in ctx or f in mctx)
939 929 return origmatchfn(f)
940 930
941 931 m.matchfn = matchfn
942 932 return m
943 933
944 934 with extensions.wrappedfunction(scmutil, b'match', overridematch):
945 935 orig(ui, repo, ctx, parents, *pats, **opts)
946 936
947 937 newstandins = lfutil.getstandinsstate(repo)
948 938 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
949 939 # lfdirstate should be 'normallookup'-ed for updated files,
950 940 # because reverting doesn't touch dirstate for 'normal' files
951 941 # when target revision is explicitly specified: in such case,
952 942 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
953 943 # of target (standin) file.
954 944 lfcommands.updatelfiles(
955 945 ui, repo, filelist, printmessage=False, normallookup=True
956 946 )
957 947
958 948
959 949 # after pulling changesets, we need to take some extra care to get
960 950 # largefiles updated remotely
961 951 @eh.wrapcommand(
962 952 b'pull',
963 953 opts=[
964 954 (
965 955 b'',
966 956 b'all-largefiles',
967 957 None,
968 958 _(b'download all pulled versions of largefiles (DEPRECATED)'),
969 959 ),
970 960 (
971 961 b'',
972 962 b'lfrev',
973 963 [],
974 964 _(b'download largefiles for these revisions'),
975 965 _(b'REV'),
976 966 ),
977 967 ],
978 968 )
979 969 def overridepull(orig, ui, repo, source=None, **opts):
980 970 revsprepull = len(repo)
981 971 if not source:
982 972 source = b'default'
983 973 repo.lfpullsource = source
984 974 result = orig(ui, repo, source, **opts)
985 975 revspostpull = len(repo)
986 976 lfrevs = opts.get('lfrev', [])
987 977 if opts.get('all_largefiles'):
988 978 lfrevs.append(b'pulled()')
989 979 if lfrevs and revspostpull > revsprepull:
990 980 numcached = 0
991 981 repo.firstpulled = revsprepull # for pulled() revset expression
992 982 try:
993 983 for rev in scmutil.revrange(repo, lfrevs):
994 984 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
995 985 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
996 986 numcached += len(cached)
997 987 finally:
998 988 del repo.firstpulled
999 989 ui.status(_(b"%d largefiles cached\n") % numcached)
1000 990 return result
1001 991
1002 992
1003 993 @eh.wrapcommand(
1004 994 b'push',
1005 995 opts=[
1006 996 (
1007 997 b'',
1008 998 b'lfrev',
1009 999 [],
1010 1000 _(b'upload largefiles for these revisions'),
1011 1001 _(b'REV'),
1012 1002 )
1013 1003 ],
1014 1004 )
1015 1005 def overridepush(orig, ui, repo, *args, **kwargs):
1016 1006 """Override push command and store --lfrev parameters in opargs"""
1017 1007 lfrevs = kwargs.pop('lfrev', None)
1018 1008 if lfrevs:
1019 1009 opargs = kwargs.setdefault('opargs', {})
1020 1010 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1021 1011 return orig(ui, repo, *args, **kwargs)
1022 1012
1023 1013
1024 1014 @eh.wrapfunction(exchange, b'pushoperation')
1025 1015 def exchangepushoperation(orig, *args, **kwargs):
1026 1016 """Override pushoperation constructor and store lfrevs parameter"""
1027 1017 lfrevs = kwargs.pop('lfrevs', None)
1028 1018 pushop = orig(*args, **kwargs)
1029 1019 pushop.lfrevs = lfrevs
1030 1020 return pushop
1031 1021
1032 1022
1033 1023 @eh.revsetpredicate(b'pulled()')
1034 1024 def pulledrevsetsymbol(repo, subset, x):
1035 1025 """Changesets that just has been pulled.
1036 1026
1037 1027 Only available with largefiles from pull --lfrev expressions.
1038 1028
1039 1029 .. container:: verbose
1040 1030
1041 1031 Some examples:
1042 1032
1043 1033 - pull largefiles for all new changesets::
1044 1034
1045 1035 hg pull -lfrev "pulled()"
1046 1036
1047 1037 - pull largefiles for all new branch heads::
1048 1038
1049 1039 hg pull -lfrev "head(pulled()) and not closed()"
1050 1040
1051 1041 """
1052 1042
1053 1043 try:
1054 1044 firstpulled = repo.firstpulled
1055 1045 except AttributeError:
1056 1046 raise error.Abort(_(b"pulled() only available in --lfrev"))
1057 1047 return smartset.baseset([r for r in subset if r >= firstpulled])
1058 1048
1059 1049
1060 1050 @eh.wrapcommand(
1061 1051 b'clone',
1062 1052 opts=[
1063 1053 (
1064 1054 b'',
1065 1055 b'all-largefiles',
1066 1056 None,
1067 1057 _(b'download all versions of all largefiles'),
1068 1058 )
1069 1059 ],
1070 1060 )
1071 1061 def overrideclone(orig, ui, source, dest=None, **opts):
1072 1062 d = dest
1073 1063 if d is None:
1074 1064 d = hg.defaultdest(source)
1075 1065 if opts.get('all_largefiles') and not hg.islocal(d):
1076 1066 raise error.Abort(
1077 1067 _(b'--all-largefiles is incompatible with non-local destination %s')
1078 1068 % d
1079 1069 )
1080 1070
1081 1071 return orig(ui, source, dest, **opts)
1082 1072
1083 1073
1084 1074 @eh.wrapfunction(hg, b'clone')
1085 1075 def hgclone(orig, ui, opts, *args, **kwargs):
1086 1076 result = orig(ui, opts, *args, **kwargs)
1087 1077
1088 1078 if result is not None:
1089 1079 sourcerepo, destrepo = result
1090 1080 repo = destrepo.local()
1091 1081
1092 1082 # When cloning to a remote repo (like through SSH), no repo is available
1093 1083 # from the peer. Therefore the largefiles can't be downloaded and the
1094 1084 # hgrc can't be updated.
1095 1085 if not repo:
1096 1086 return result
1097 1087
1098 1088 # Caching is implicitly limited to 'rev' option, since the dest repo was
1099 1089 # truncated at that point. The user may expect a download count with
1100 1090 # this option, so attempt whether or not this is a largefile repo.
1101 1091 if opts.get(b'all_largefiles'):
1102 1092 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1103 1093
1104 1094 if missing != 0:
1105 1095 return None
1106 1096
1107 1097 return result
1108 1098
1109 1099
1110 1100 @eh.wrapcommand(b'rebase', extension=b'rebase')
1111 1101 def overriderebase(orig, ui, repo, **opts):
1112 1102 if not util.safehasattr(repo, b'_largefilesenabled'):
1113 1103 return orig(ui, repo, **opts)
1114 1104
1115 1105 resuming = opts.get('continue')
1116 1106 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1117 1107 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1118 1108 try:
1119 1109 return orig(ui, repo, **opts)
1120 1110 finally:
1121 1111 repo._lfstatuswriters.pop()
1122 1112 repo._lfcommithooks.pop()
1123 1113
1124 1114
1125 1115 @eh.wrapcommand(b'archive')
1126 1116 def overridearchivecmd(orig, ui, repo, dest, **opts):
1127 1117 with lfstatus(repo.unfiltered()):
1128 1118 return orig(ui, repo.unfiltered(), dest, **opts)
1129 1119
1130 1120
1131 1121 @eh.wrapfunction(webcommands, b'archive')
1132 1122 def hgwebarchive(orig, web):
1133 1123 with lfstatus(web.repo):
1134 1124 return orig(web)
1135 1125
1136 1126
1137 1127 @eh.wrapfunction(archival, b'archive')
1138 1128 def overridearchive(
1139 1129 orig,
1140 1130 repo,
1141 1131 dest,
1142 1132 node,
1143 1133 kind,
1144 1134 decode=True,
1145 1135 match=None,
1146 1136 prefix=b'',
1147 1137 mtime=None,
1148 1138 subrepos=None,
1149 1139 ):
1150 1140 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1151 1141 # unfiltered repo's attr, so check that as well.
1152 1142 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1153 1143 return orig(
1154 1144 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1155 1145 )
1156 1146
1157 1147 # No need to lock because we are only reading history and
1158 1148 # largefile caches, neither of which are modified.
1159 1149 if node is not None:
1160 1150 lfcommands.cachelfiles(repo.ui, repo, node)
1161 1151
1162 1152 if kind not in archival.archivers:
1163 1153 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1164 1154
1165 1155 ctx = repo[node]
1166 1156
1167 1157 if kind == b'files':
1168 1158 if prefix:
1169 1159 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1170 1160 else:
1171 1161 prefix = archival.tidyprefix(dest, kind, prefix)
1172 1162
1173 1163 def write(name, mode, islink, getdata):
1174 1164 if match and not match(name):
1175 1165 return
1176 1166 data = getdata()
1177 1167 if decode:
1178 1168 data = repo.wwritedata(name, data)
1179 1169 archiver.addfile(prefix + name, mode, islink, data)
1180 1170
1181 1171 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1182 1172
1183 1173 if repo.ui.configbool(b"ui", b"archivemeta"):
1184 1174 write(
1185 1175 b'.hg_archival.txt',
1186 1176 0o644,
1187 1177 False,
1188 1178 lambda: archival.buildmetadata(ctx),
1189 1179 )
1190 1180
1191 1181 for f in ctx:
1192 1182 ff = ctx.flags(f)
1193 1183 getdata = ctx[f].data
1194 1184 lfile = lfutil.splitstandin(f)
1195 1185 if lfile is not None:
1196 1186 if node is not None:
1197 1187 path = lfutil.findfile(repo, getdata().strip())
1198 1188
1199 1189 if path is None:
1200 1190 raise error.Abort(
1201 1191 _(
1202 1192 b'largefile %s not found in repo store or system cache'
1203 1193 )
1204 1194 % lfile
1205 1195 )
1206 1196 else:
1207 1197 path = lfile
1208 1198
1209 1199 f = lfile
1210 1200
1211 1201 getdata = lambda: util.readfile(path)
1212 1202 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1213 1203
1214 1204 if subrepos:
1215 1205 for subpath in sorted(ctx.substate):
1216 1206 sub = ctx.workingsub(subpath)
1217 1207 submatch = matchmod.subdirmatcher(subpath, match)
1218 1208 subprefix = prefix + subpath + b'/'
1219 1209
1220 1210 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1221 1211 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1222 1212 # allow only hgsubrepos to set this, instead of the current scheme
1223 1213 # where the parent sets this for the child.
1224 1214 with (
1225 1215 util.safehasattr(sub, '_repo')
1226 1216 and lfstatus(sub._repo)
1227 1217 or util.nullcontextmanager()
1228 1218 ):
1229 1219 sub.archive(archiver, subprefix, submatch)
1230 1220
1231 1221 archiver.done()
1232 1222
1233 1223
1234 1224 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1235 1225 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1236 1226 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1237 1227 if not lfenabled or not repo._repo.lfstatus:
1238 1228 return orig(repo, archiver, prefix, match, decode)
1239 1229
1240 1230 repo._get(repo._state + (b'hg',))
1241 1231 rev = repo._state[1]
1242 1232 ctx = repo._repo[rev]
1243 1233
1244 1234 if ctx.node() is not None:
1245 1235 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1246 1236
1247 1237 def write(name, mode, islink, getdata):
1248 1238 # At this point, the standin has been replaced with the largefile name,
1249 1239 # so the normal matcher works here without the lfutil variants.
1250 1240 if match and not match(f):
1251 1241 return
1252 1242 data = getdata()
1253 1243 if decode:
1254 1244 data = repo._repo.wwritedata(name, data)
1255 1245
1256 1246 archiver.addfile(prefix + name, mode, islink, data)
1257 1247
1258 1248 for f in ctx:
1259 1249 ff = ctx.flags(f)
1260 1250 getdata = ctx[f].data
1261 1251 lfile = lfutil.splitstandin(f)
1262 1252 if lfile is not None:
1263 1253 if ctx.node() is not None:
1264 1254 path = lfutil.findfile(repo._repo, getdata().strip())
1265 1255
1266 1256 if path is None:
1267 1257 raise error.Abort(
1268 1258 _(
1269 1259 b'largefile %s not found in repo store or system cache'
1270 1260 )
1271 1261 % lfile
1272 1262 )
1273 1263 else:
1274 1264 path = lfile
1275 1265
1276 1266 f = lfile
1277 1267
1278 1268 getdata = lambda: util.readfile(os.path.join(prefix, path))
1279 1269
1280 1270 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1281 1271
1282 1272 for subpath in sorted(ctx.substate):
1283 1273 sub = ctx.workingsub(subpath)
1284 1274 submatch = matchmod.subdirmatcher(subpath, match)
1285 1275 subprefix = prefix + subpath + b'/'
1286 1276 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1287 1277 # infer and possibly set lfstatus at the top of this function. That
1288 1278 # would allow only hgsubrepos to set this, instead of the current scheme
1289 1279 # where the parent sets this for the child.
1290 1280 with (
1291 1281 util.safehasattr(sub, '_repo')
1292 1282 and lfstatus(sub._repo)
1293 1283 or util.nullcontextmanager()
1294 1284 ):
1295 1285 sub.archive(archiver, subprefix, submatch, decode)
1296 1286
1297 1287
1298 1288 # If a largefile is modified, the change is not reflected in its
1299 1289 # standin until a commit. cmdutil.bailifchanged() raises an exception
1300 1290 # if the repo has uncommitted changes. Wrap it to also check if
1301 1291 # largefiles were changed. This is used by bisect, backout and fetch.
1302 1292 @eh.wrapfunction(cmdutil, b'bailifchanged')
1303 1293 def overridebailifchanged(orig, repo, *args, **kwargs):
1304 1294 orig(repo, *args, **kwargs)
1305 1295 with lfstatus(repo):
1306 1296 s = repo.status()
1307 1297 if s.modified or s.added or s.removed or s.deleted:
1308 1298 raise error.Abort(_(b'uncommitted changes'))
1309 1299
1310 1300
1311 1301 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1312 1302 def postcommitstatus(orig, repo, *args, **kwargs):
1313 1303 with lfstatus(repo):
1314 1304 return orig(repo, *args, **kwargs)
1315 1305
1316 1306
1317 1307 @eh.wrapfunction(cmdutil, b'forget')
1318 1308 def cmdutilforget(
1319 1309 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1320 1310 ):
1321 1311 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1322 1312 bad, forgot = orig(
1323 1313 ui,
1324 1314 repo,
1325 1315 normalmatcher,
1326 1316 prefix,
1327 1317 uipathfn,
1328 1318 explicitonly,
1329 1319 dryrun,
1330 1320 interactive,
1331 1321 )
1332 1322 m = composelargefilematcher(match, repo[None].manifest())
1333 1323
1334 1324 with lfstatus(repo):
1335 1325 s = repo.status(match=m, clean=True)
1336 1326 manifest = repo[None].manifest()
1337 1327 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1338 1328 forget = [f for f in forget if lfutil.standin(f) in manifest]
1339 1329
1340 1330 for f in forget:
1341 1331 fstandin = lfutil.standin(f)
1342 1332 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1343 1333 ui.warn(
1344 1334 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1345 1335 )
1346 1336 bad.append(f)
1347 1337
1348 1338 for f in forget:
1349 1339 if ui.verbose or not m.exact(f):
1350 1340 ui.status(_(b'removing %s\n') % uipathfn(f))
1351 1341
1352 1342 # Need to lock because standin files are deleted then removed from the
1353 1343 # repository and we could race in-between.
1354 1344 with repo.wlock():
1355 1345 lfdirstate = lfutil.openlfdirstate(ui, repo)
1356 1346 for f in forget:
1357 1347 if lfdirstate[f] == b'a':
1358 1348 lfdirstate.drop(f)
1359 1349 else:
1360 1350 lfdirstate.remove(f)
1361 1351 lfdirstate.write()
1362 1352 standins = [lfutil.standin(f) for f in forget]
1363 1353 for f in standins:
1364 1354 repo.wvfs.unlinkpath(f, ignoremissing=True)
1365 1355 rejected = repo[None].forget(standins)
1366 1356
1367 1357 bad.extend(f for f in rejected if f in m.files())
1368 1358 forgot.extend(f for f in forget if f not in rejected)
1369 1359 return bad, forgot
1370 1360
1371 1361
1372 1362 def _getoutgoings(repo, other, missing, addfunc):
1373 1363 """get pairs of filename and largefile hash in outgoing revisions
1374 1364 in 'missing'.
1375 1365
1376 1366 largefiles already existing on 'other' repository are ignored.
1377 1367
1378 1368 'addfunc' is invoked with each unique pairs of filename and
1379 1369 largefile hash value.
1380 1370 """
1381 1371 knowns = set()
1382 1372 lfhashes = set()
1383 1373
1384 1374 def dedup(fn, lfhash):
1385 1375 k = (fn, lfhash)
1386 1376 if k not in knowns:
1387 1377 knowns.add(k)
1388 1378 lfhashes.add(lfhash)
1389 1379
1390 1380 lfutil.getlfilestoupload(repo, missing, dedup)
1391 1381 if lfhashes:
1392 1382 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1393 1383 for fn, lfhash in knowns:
1394 1384 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1395 1385 addfunc(fn, lfhash)
1396 1386
1397 1387
1398 1388 def outgoinghook(ui, repo, other, opts, missing):
1399 1389 if opts.pop(b'large', None):
1400 1390 lfhashes = set()
1401 1391 if ui.debugflag:
1402 1392 toupload = {}
1403 1393
1404 1394 def addfunc(fn, lfhash):
1405 1395 if fn not in toupload:
1406 1396 toupload[fn] = []
1407 1397 toupload[fn].append(lfhash)
1408 1398 lfhashes.add(lfhash)
1409 1399
1410 1400 def showhashes(fn):
1411 1401 for lfhash in sorted(toupload[fn]):
1412 1402 ui.debug(b' %s\n' % lfhash)
1413 1403
1414 1404 else:
1415 1405 toupload = set()
1416 1406
1417 1407 def addfunc(fn, lfhash):
1418 1408 toupload.add(fn)
1419 1409 lfhashes.add(lfhash)
1420 1410
1421 1411 def showhashes(fn):
1422 1412 pass
1423 1413
1424 1414 _getoutgoings(repo, other, missing, addfunc)
1425 1415
1426 1416 if not toupload:
1427 1417 ui.status(_(b'largefiles: no files to upload\n'))
1428 1418 else:
1429 1419 ui.status(
1430 1420 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1431 1421 )
1432 1422 for file in sorted(toupload):
1433 1423 ui.status(lfutil.splitstandin(file) + b'\n')
1434 1424 showhashes(file)
1435 1425 ui.status(b'\n')
1436 1426
1437 1427
1438 1428 @eh.wrapcommand(
1439 1429 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1440 1430 )
1441 1431 def _outgoingcmd(orig, *args, **kwargs):
1442 1432 # Nothing to do here other than add the extra help option- the hook above
1443 1433 # processes it.
1444 1434 return orig(*args, **kwargs)
1445 1435
1446 1436
1447 1437 def summaryremotehook(ui, repo, opts, changes):
1448 1438 largeopt = opts.get(b'large', False)
1449 1439 if changes is None:
1450 1440 if largeopt:
1451 1441 return (False, True) # only outgoing check is needed
1452 1442 else:
1453 1443 return (False, False)
1454 1444 elif largeopt:
1455 1445 url, branch, peer, outgoing = changes[1]
1456 1446 if peer is None:
1457 1447 # i18n: column positioning for "hg summary"
1458 1448 ui.status(_(b'largefiles: (no remote repo)\n'))
1459 1449 return
1460 1450
1461 1451 toupload = set()
1462 1452 lfhashes = set()
1463 1453
1464 1454 def addfunc(fn, lfhash):
1465 1455 toupload.add(fn)
1466 1456 lfhashes.add(lfhash)
1467 1457
1468 1458 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1469 1459
1470 1460 if not toupload:
1471 1461 # i18n: column positioning for "hg summary"
1472 1462 ui.status(_(b'largefiles: (no files to upload)\n'))
1473 1463 else:
1474 1464 # i18n: column positioning for "hg summary"
1475 1465 ui.status(
1476 1466 _(b'largefiles: %d entities for %d files to upload\n')
1477 1467 % (len(lfhashes), len(toupload))
1478 1468 )
1479 1469
1480 1470
1481 1471 @eh.wrapcommand(
1482 1472 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1483 1473 )
1484 1474 def overridesummary(orig, ui, repo, *pats, **opts):
1485 1475 with lfstatus(repo):
1486 1476 orig(ui, repo, *pats, **opts)
1487 1477
1488 1478
1489 1479 @eh.wrapfunction(scmutil, b'addremove')
1490 1480 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1491 1481 if opts is None:
1492 1482 opts = {}
1493 1483 if not lfutil.islfilesrepo(repo):
1494 1484 return orig(repo, matcher, prefix, uipathfn, opts)
1495 1485 # Get the list of missing largefiles so we can remove them
1496 1486 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1497 1487 unsure, s = lfdirstate.status(
1498 1488 matchmod.always(),
1499 1489 subrepos=[],
1500 1490 ignored=False,
1501 1491 clean=False,
1502 1492 unknown=False,
1503 1493 )
1504 1494
1505 1495 # Call into the normal remove code, but the removing of the standin, we want
1506 1496 # to have handled by original addremove. Monkey patching here makes sure
1507 1497 # we don't remove the standin in the largefiles code, preventing a very
1508 1498 # confused state later.
1509 1499 if s.deleted:
1510 1500 m = copy.copy(matcher)
1511 1501
1512 1502 # The m._files and m._map attributes are not changed to the deleted list
1513 1503 # because that affects the m.exact() test, which in turn governs whether
1514 1504 # or not the file name is printed, and how. Simply limit the original
1515 1505 # matches to those in the deleted status list.
1516 1506 matchfn = m.matchfn
1517 1507 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1518 1508
1519 1509 removelargefiles(
1520 1510 repo.ui,
1521 1511 repo,
1522 1512 True,
1523 1513 m,
1524 1514 uipathfn,
1525 1515 opts.get(b'dry_run'),
1526 1516 **pycompat.strkwargs(opts)
1527 1517 )
1528 1518 # Call into the normal add code, and any files that *should* be added as
1529 1519 # largefiles will be
1530 1520 added, bad = addlargefiles(
1531 1521 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1532 1522 )
1533 1523 # Now that we've handled largefiles, hand off to the original addremove
1534 1524 # function to take care of the rest. Make sure it doesn't do anything with
1535 1525 # largefiles by passing a matcher that will ignore them.
1536 1526 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1537 1527 return orig(repo, matcher, prefix, uipathfn, opts)
1538 1528
1539 1529
1540 1530 # Calling purge with --all will cause the largefiles to be deleted.
1541 1531 # Override repo.status to prevent this from happening.
1542 1532 @eh.wrapcommand(b'purge', extension=b'purge')
1543 1533 def overridepurge(orig, ui, repo, *dirs, **opts):
1544 1534 # XXX Monkey patching a repoview will not work. The assigned attribute will
1545 1535 # be set on the unfiltered repo, but we will only lookup attributes in the
1546 1536 # unfiltered repo if the lookup in the repoview object itself fails. As the
1547 1537 # monkey patched method exists on the repoview class the lookup will not
1548 1538 # fail. As a result, the original version will shadow the monkey patched
1549 1539 # one, defeating the monkey patch.
1550 1540 #
1551 1541 # As a work around we use an unfiltered repo here. We should do something
1552 1542 # cleaner instead.
1553 1543 repo = repo.unfiltered()
1554 1544 oldstatus = repo.status
1555 1545
1556 1546 def overridestatus(
1557 1547 node1=b'.',
1558 1548 node2=None,
1559 1549 match=None,
1560 1550 ignored=False,
1561 1551 clean=False,
1562 1552 unknown=False,
1563 1553 listsubrepos=False,
1564 1554 ):
1565 1555 r = oldstatus(
1566 1556 node1, node2, match, ignored, clean, unknown, listsubrepos
1567 1557 )
1568 1558 lfdirstate = lfutil.openlfdirstate(ui, repo)
1569 1559 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1570 1560 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1571 1561 return scmutil.status(
1572 1562 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1573 1563 )
1574 1564
1575 1565 repo.status = overridestatus
1576 1566 orig(ui, repo, *dirs, **opts)
1577 1567 repo.status = oldstatus
1578 1568
1579 1569
1580 1570 @eh.wrapcommand(b'rollback')
1581 1571 def overriderollback(orig, ui, repo, **opts):
1582 1572 with repo.wlock():
1583 1573 before = repo.dirstate.parents()
1584 1574 orphans = {
1585 1575 f
1586 1576 for f in repo.dirstate
1587 1577 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1588 1578 }
1589 1579 result = orig(ui, repo, **opts)
1590 1580 after = repo.dirstate.parents()
1591 1581 if before == after:
1592 1582 return result # no need to restore standins
1593 1583
1594 1584 pctx = repo[b'.']
1595 1585 for f in repo.dirstate:
1596 1586 if lfutil.isstandin(f):
1597 1587 orphans.discard(f)
1598 1588 if repo.dirstate[f] == b'r':
1599 1589 repo.wvfs.unlinkpath(f, ignoremissing=True)
1600 1590 elif f in pctx:
1601 1591 fctx = pctx[f]
1602 1592 repo.wwrite(f, fctx.data(), fctx.flags())
1603 1593 else:
1604 1594 # content of standin is not so important in 'a',
1605 1595 # 'm' or 'n' (coming from the 2nd parent) cases
1606 1596 lfutil.writestandin(repo, f, b'', False)
1607 1597 for standin in orphans:
1608 1598 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1609 1599
1610 1600 lfdirstate = lfutil.openlfdirstate(ui, repo)
1611 1601 orphans = set(lfdirstate)
1612 1602 lfiles = lfutil.listlfiles(repo)
1613 1603 for file in lfiles:
1614 1604 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1615 1605 orphans.discard(file)
1616 1606 for lfile in orphans:
1617 1607 lfdirstate.drop(lfile)
1618 1608 lfdirstate.write()
1619 1609 return result
1620 1610
1621 1611
1622 1612 @eh.wrapcommand(b'transplant', extension=b'transplant')
1623 1613 def overridetransplant(orig, ui, repo, *revs, **opts):
1624 1614 resuming = opts.get('continue')
1625 1615 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1626 1616 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1627 1617 try:
1628 1618 result = orig(ui, repo, *revs, **opts)
1629 1619 finally:
1630 1620 repo._lfstatuswriters.pop()
1631 1621 repo._lfcommithooks.pop()
1632 1622 return result
1633 1623
1634 1624
1635 1625 @eh.wrapcommand(b'cat')
1636 1626 def overridecat(orig, ui, repo, file1, *pats, **opts):
1637 1627 opts = pycompat.byteskwargs(opts)
1638 1628 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1639 1629 err = 1
1640 1630 notbad = set()
1641 1631 m = scmutil.match(ctx, (file1,) + pats, opts)
1642 1632 origmatchfn = m.matchfn
1643 1633
1644 1634 def lfmatchfn(f):
1645 1635 if origmatchfn(f):
1646 1636 return True
1647 1637 lf = lfutil.splitstandin(f)
1648 1638 if lf is None:
1649 1639 return False
1650 1640 notbad.add(lf)
1651 1641 return origmatchfn(lf)
1652 1642
1653 1643 m.matchfn = lfmatchfn
1654 1644 origbadfn = m.bad
1655 1645
1656 1646 def lfbadfn(f, msg):
1657 1647 if not f in notbad:
1658 1648 origbadfn(f, msg)
1659 1649
1660 1650 m.bad = lfbadfn
1661 1651
1662 1652 origvisitdirfn = m.visitdir
1663 1653
1664 1654 def lfvisitdirfn(dir):
1665 1655 if dir == lfutil.shortname:
1666 1656 return True
1667 1657 ret = origvisitdirfn(dir)
1668 1658 if ret:
1669 1659 return ret
1670 1660 lf = lfutil.splitstandin(dir)
1671 1661 if lf is None:
1672 1662 return False
1673 1663 return origvisitdirfn(lf)
1674 1664
1675 1665 m.visitdir = lfvisitdirfn
1676 1666
1677 1667 for f in ctx.walk(m):
1678 1668 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1679 1669 lf = lfutil.splitstandin(f)
1680 1670 if lf is None or origmatchfn(f):
1681 1671 # duplicating unreachable code from commands.cat
1682 1672 data = ctx[f].data()
1683 1673 if opts.get(b'decode'):
1684 1674 data = repo.wwritedata(f, data)
1685 1675 fp.write(data)
1686 1676 else:
1687 1677 hash = lfutil.readasstandin(ctx[f])
1688 1678 if not lfutil.inusercache(repo.ui, hash):
1689 1679 store = storefactory.openstore(repo)
1690 1680 success, missing = store.get([(lf, hash)])
1691 1681 if len(success) != 1:
1692 1682 raise error.Abort(
1693 1683 _(
1694 1684 b'largefile %s is not in cache and could not be '
1695 1685 b'downloaded'
1696 1686 )
1697 1687 % lf
1698 1688 )
1699 1689 path = lfutil.usercachepath(repo.ui, hash)
1700 1690 with open(path, b"rb") as fpin:
1701 1691 for chunk in util.filechunkiter(fpin):
1702 1692 fp.write(chunk)
1703 1693 err = 0
1704 1694 return err
1705 1695
1706 1696
1707 1697 @eh.wrapfunction(merge, b'update')
1708 1698 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1709 1699 matcher = kwargs.get('matcher', None)
1710 1700 # note if this is a partial update
1711 1701 partial = matcher and not matcher.always()
1712 1702 with repo.wlock():
1713 1703 # branch | | |
1714 1704 # merge | force | partial | action
1715 1705 # -------+-------+---------+--------------
1716 1706 # x | x | x | linear-merge
1717 1707 # o | x | x | branch-merge
1718 1708 # x | o | x | overwrite (as clean update)
1719 1709 # o | o | x | force-branch-merge (*1)
1720 1710 # x | x | o | (*)
1721 1711 # o | x | o | (*)
1722 1712 # x | o | o | overwrite (as revert)
1723 1713 # o | o | o | (*)
1724 1714 #
1725 1715 # (*) don't care
1726 1716 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1727 1717
1728 1718 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1729 1719 unsure, s = lfdirstate.status(
1730 1720 matchmod.always(),
1731 1721 subrepos=[],
1732 1722 ignored=False,
1733 1723 clean=True,
1734 1724 unknown=False,
1735 1725 )
1736 1726 oldclean = set(s.clean)
1737 1727 pctx = repo[b'.']
1738 1728 dctx = repo[node]
1739 1729 for lfile in unsure + s.modified:
1740 1730 lfileabs = repo.wvfs.join(lfile)
1741 1731 if not repo.wvfs.exists(lfileabs):
1742 1732 continue
1743 1733 lfhash = lfutil.hashfile(lfileabs)
1744 1734 standin = lfutil.standin(lfile)
1745 1735 lfutil.writestandin(
1746 1736 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1747 1737 )
1748 1738 if standin in pctx and lfhash == lfutil.readasstandin(
1749 1739 pctx[standin]
1750 1740 ):
1751 1741 oldclean.add(lfile)
1752 1742 for lfile in s.added:
1753 1743 fstandin = lfutil.standin(lfile)
1754 1744 if fstandin not in dctx:
1755 1745 # in this case, content of standin file is meaningless
1756 1746 # (in dctx, lfile is unknown, or normal file)
1757 1747 continue
1758 1748 lfutil.updatestandin(repo, lfile, fstandin)
1759 1749 # mark all clean largefiles as dirty, just in case the update gets
1760 1750 # interrupted before largefiles and lfdirstate are synchronized
1761 1751 for lfile in oldclean:
1762 1752 lfdirstate.normallookup(lfile)
1763 1753 lfdirstate.write()
1764 1754
1765 1755 oldstandins = lfutil.getstandinsstate(repo)
1766 1756 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1767 1757 # good candidate for in-memory merge (large files, custom dirstate,
1768 1758 # matcher usage).
1769 1759 kwargs['wc'] = repo[None]
1770 1760 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1771 1761
1772 1762 newstandins = lfutil.getstandinsstate(repo)
1773 1763 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1774 1764
1775 1765 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1776 1766 # all the ones that didn't change as clean
1777 1767 for lfile in oldclean.difference(filelist):
1778 1768 lfdirstate.normal(lfile)
1779 1769 lfdirstate.write()
1780 1770
1781 1771 if branchmerge or force or partial:
1782 1772 filelist.extend(s.deleted + s.removed)
1783 1773
1784 1774 lfcommands.updatelfiles(
1785 1775 repo.ui, repo, filelist=filelist, normallookup=partial
1786 1776 )
1787 1777
1788 1778 return result
1789 1779
1790 1780
1791 1781 @eh.wrapfunction(scmutil, b'marktouched')
1792 1782 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1793 1783 result = orig(repo, files, *args, **kwargs)
1794 1784
1795 1785 filelist = []
1796 1786 for f in files:
1797 1787 lf = lfutil.splitstandin(f)
1798 1788 if lf is not None:
1799 1789 filelist.append(lf)
1800 1790 if filelist:
1801 1791 lfcommands.updatelfiles(
1802 1792 repo.ui,
1803 1793 repo,
1804 1794 filelist=filelist,
1805 1795 printmessage=False,
1806 1796 normallookup=True,
1807 1797 )
1808 1798
1809 1799 return result
1810 1800
1811 1801
1812 1802 @eh.wrapfunction(upgrade, b'preservedrequirements')
1813 1803 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1814 1804 def upgraderequirements(orig, repo):
1815 1805 reqs = orig(repo)
1816 1806 if b'largefiles' in repo.requirements:
1817 1807 reqs.add(b'largefiles')
1818 1808 return reqs
1819 1809
1820 1810
1821 1811 _lfscheme = b'largefile://'
1822 1812
1823 1813
1824 1814 @eh.wrapfunction(urlmod, b'open')
1825 1815 def openlargefile(orig, ui, url_, data=None):
1826 1816 if url_.startswith(_lfscheme):
1827 1817 if data:
1828 1818 msg = b"cannot use data on a 'largefile://' url"
1829 1819 raise error.ProgrammingError(msg)
1830 1820 lfid = url_[len(_lfscheme) :]
1831 1821 return storefactory.getlfile(ui, lfid)
1832 1822 else:
1833 1823 return orig(ui, url_, data=data)
@@ -1,2195 +1,2216 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import stat
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 addednodeid,
17 17 modifiednodeid,
18 18 nullid,
19 19 nullrev,
20 20 )
21 21 from .thirdparty import attr
22 22 from . import (
23 23 copies,
24 24 encoding,
25 25 error,
26 26 filemerge,
27 27 match as matchmod,
28 28 mergestate as mergestatemod,
29 29 obsutil,
30 30 pathutil,
31 31 pycompat,
32 32 scmutil,
33 33 subrepoutil,
34 34 util,
35 35 worker,
36 36 )
37 37
38 38 _pack = struct.pack
39 39 _unpack = struct.unpack
40 40
41 41
42 42 def _getcheckunknownconfig(repo, section, name):
43 43 config = repo.ui.config(section, name)
44 44 valid = [b'abort', b'ignore', b'warn']
45 45 if config not in valid:
46 46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 47 raise error.ConfigError(
48 48 _(b"%s.%s not valid ('%s' is none of %s)")
49 49 % (section, name, config, validstr)
50 50 )
51 51 return config
52 52
53 53
54 54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 55 if wctx.isinmemory():
56 56 # Nothing to do in IMM because nothing in the "working copy" can be an
57 57 # unknown file.
58 58 #
59 59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 60 # because that function does other useful work.
61 61 return False
62 62
63 63 if f2 is None:
64 64 f2 = f
65 65 return (
66 66 repo.wvfs.audit.check(f)
67 67 and repo.wvfs.isfileorlink(f)
68 68 and repo.dirstate.normalize(f) not in repo.dirstate
69 69 and mctx[f2].cmp(wctx[f])
70 70 )
71 71
72 72
73 73 class _unknowndirschecker(object):
74 74 """
75 75 Look for any unknown files or directories that may have a path conflict
76 76 with a file. If any path prefix of the file exists as a file or link,
77 77 then it conflicts. If the file itself is a directory that contains any
78 78 file that is not tracked, then it conflicts.
79 79
80 80 Returns the shortest path at which a conflict occurs, or None if there is
81 81 no conflict.
82 82 """
83 83
84 84 def __init__(self):
85 85 # A set of paths known to be good. This prevents repeated checking of
86 86 # dirs. It will be updated with any new dirs that are checked and found
87 87 # to be safe.
88 88 self._unknowndircache = set()
89 89
90 90 # A set of paths that are known to be absent. This prevents repeated
91 91 # checking of subdirectories that are known not to exist. It will be
92 92 # updated with any new dirs that are checked and found to be absent.
93 93 self._missingdircache = set()
94 94
95 95 def __call__(self, repo, wctx, f):
96 96 if wctx.isinmemory():
97 97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 98 return False
99 99
100 100 # Check for path prefixes that exist as unknown files.
101 101 for p in reversed(list(pathutil.finddirs(f))):
102 102 if p in self._missingdircache:
103 103 return
104 104 if p in self._unknowndircache:
105 105 continue
106 106 if repo.wvfs.audit.check(p):
107 107 if (
108 108 repo.wvfs.isfileorlink(p)
109 109 and repo.dirstate.normalize(p) not in repo.dirstate
110 110 ):
111 111 return p
112 112 if not repo.wvfs.lexists(p):
113 113 self._missingdircache.add(p)
114 114 return
115 115 self._unknowndircache.add(p)
116 116
117 117 # Check if the file conflicts with a directory containing unknown files.
118 118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 119 # Does the directory contain any files that are not in the dirstate?
120 120 for p, dirs, files in repo.wvfs.walk(f):
121 121 for fn in files:
122 122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 123 relf = repo.dirstate.normalize(relf, isknown=True)
124 124 if relf not in repo.dirstate:
125 125 return f
126 126 return None
127 127
128 128
129 129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
130 130 """
131 131 Considers any actions that care about the presence of conflicting unknown
132 132 files. For some actions, the result is to abort; for others, it is to
133 133 choose a different action.
134 134 """
135 135 fileconflicts = set()
136 136 pathconflicts = set()
137 137 warnconflicts = set()
138 138 abortconflicts = set()
139 139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 141 pathconfig = repo.ui.configbool(
142 142 b'experimental', b'merge.checkpathconflicts'
143 143 )
144 144 if not force:
145 145
146 146 def collectconflicts(conflicts, config):
147 147 if config == b'abort':
148 148 abortconflicts.update(conflicts)
149 149 elif config == b'warn':
150 150 warnconflicts.update(conflicts)
151 151
152 152 checkunknowndirs = _unknowndirschecker()
153 153 for f, (m, args, msg) in pycompat.iteritems(actions):
154 154 if m in (
155 155 mergestatemod.ACTION_CREATED,
156 156 mergestatemod.ACTION_DELETED_CHANGED,
157 157 ):
158 158 if _checkunknownfile(repo, wctx, mctx, f):
159 159 fileconflicts.add(f)
160 160 elif pathconfig and f not in wctx:
161 161 path = checkunknowndirs(repo, wctx, f)
162 162 if path is not None:
163 163 pathconflicts.add(path)
164 164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
165 165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
166 166 fileconflicts.add(f)
167 167
168 168 allconflicts = fileconflicts | pathconflicts
169 169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
170 170 unknownconflicts = allconflicts - ignoredconflicts
171 171 collectconflicts(ignoredconflicts, ignoredconfig)
172 172 collectconflicts(unknownconflicts, unknownconfig)
173 173 else:
174 174 for f, (m, args, msg) in pycompat.iteritems(actions):
175 175 if m == mergestatemod.ACTION_CREATED_MERGE:
176 176 fl2, anc = args
177 177 different = _checkunknownfile(repo, wctx, mctx, f)
178 178 if repo.dirstate._ignore(f):
179 179 config = ignoredconfig
180 180 else:
181 181 config = unknownconfig
182 182
183 183 # The behavior when force is True is described by this table:
184 184 # config different mergeforce | action backup
185 185 # * n * | get n
186 186 # * y y | merge -
187 187 # abort y n | merge - (1)
188 188 # warn y n | warn + get y
189 189 # ignore y n | get y
190 190 #
191 191 # (1) this is probably the wrong behavior here -- we should
192 192 # probably abort, but some actions like rebases currently
193 193 # don't like an abort happening in the middle of
194 194 # merge.update.
195 195 if not different:
196 196 actions[f] = (
197 197 mergestatemod.ACTION_GET,
198 198 (fl2, False),
199 199 b'remote created',
200 200 )
201 201 elif mergeforce or config == b'abort':
202 202 actions[f] = (
203 203 mergestatemod.ACTION_MERGE,
204 204 (f, f, None, False, anc),
205 205 b'remote differs from untracked local',
206 206 )
207 207 elif config == b'abort':
208 208 abortconflicts.add(f)
209 209 else:
210 210 if config == b'warn':
211 211 warnconflicts.add(f)
212 212 actions[f] = (
213 213 mergestatemod.ACTION_GET,
214 214 (fl2, True),
215 215 b'remote created',
216 216 )
217 217
218 218 for f in sorted(abortconflicts):
219 219 warn = repo.ui.warn
220 220 if f in pathconflicts:
221 221 if repo.wvfs.isfileorlink(f):
222 222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
223 223 else:
224 224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
225 225 else:
226 226 warn(_(b"%s: untracked file differs\n") % f)
227 227 if abortconflicts:
228 228 raise error.Abort(
229 229 _(
230 230 b"untracked files in working directory "
231 231 b"differ from files in requested revision"
232 232 )
233 233 )
234 234
235 235 for f in sorted(warnconflicts):
236 236 if repo.wvfs.isfileorlink(f):
237 237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
238 238 else:
239 239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
240 240
241 241 for f, (m, args, msg) in pycompat.iteritems(actions):
242 242 if m == mergestatemod.ACTION_CREATED:
243 243 backup = (
244 244 f in fileconflicts
245 245 or f in pathconflicts
246 246 or any(p in pathconflicts for p in pathutil.finddirs(f))
247 247 )
248 248 (flags,) = args
249 249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
250 250
251 251
252 252 def _forgetremoved(wctx, mctx, branchmerge):
253 253 """
254 254 Forget removed files
255 255
256 256 If we're jumping between revisions (as opposed to merging), and if
257 257 neither the working directory nor the target rev has the file,
258 258 then we need to remove it from the dirstate, to prevent the
259 259 dirstate from listing the file when it is no longer in the
260 260 manifest.
261 261
262 262 If we're merging, and the other revision has removed a file
263 263 that is not present in the working directory, we need to mark it
264 264 as removed.
265 265 """
266 266
267 267 actions = {}
268 268 m = mergestatemod.ACTION_FORGET
269 269 if branchmerge:
270 270 m = mergestatemod.ACTION_REMOVE
271 271 for f in wctx.deleted():
272 272 if f not in mctx:
273 273 actions[f] = m, None, b"forget deleted"
274 274
275 275 if not branchmerge:
276 276 for f in wctx.removed():
277 277 if f not in mctx:
278 278 actions[f] = (
279 279 mergestatemod.ACTION_FORGET,
280 280 None,
281 281 b"forget removed",
282 282 )
283 283
284 284 return actions
285 285
286 286
287 287 def _checkcollision(repo, wmf, actions):
288 288 """
289 289 Check for case-folding collisions.
290 290 """
291 291 # If the repo is narrowed, filter out files outside the narrowspec.
292 292 narrowmatch = repo.narrowmatch()
293 293 if not narrowmatch.always():
294 294 pmmf = set(wmf.walk(narrowmatch))
295 295 if actions:
296 296 narrowactions = {}
297 297 for m, actionsfortype in pycompat.iteritems(actions):
298 298 narrowactions[m] = []
299 299 for (f, args, msg) in actionsfortype:
300 300 if narrowmatch(f):
301 301 narrowactions[m].append((f, args, msg))
302 302 actions = narrowactions
303 303 else:
304 304 # build provisional merged manifest up
305 305 pmmf = set(wmf)
306 306
307 307 if actions:
308 308 # KEEP and EXEC are no-op
309 309 for m in (
310 310 mergestatemod.ACTION_ADD,
311 311 mergestatemod.ACTION_ADD_MODIFIED,
312 312 mergestatemod.ACTION_FORGET,
313 313 mergestatemod.ACTION_GET,
314 314 mergestatemod.ACTION_CHANGED_DELETED,
315 315 mergestatemod.ACTION_DELETED_CHANGED,
316 316 ):
317 317 for f, args, msg in actions[m]:
318 318 pmmf.add(f)
319 319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
320 320 pmmf.discard(f)
321 321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
322 322 f2, flags = args
323 323 pmmf.discard(f2)
324 324 pmmf.add(f)
325 325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
326 326 pmmf.add(f)
327 327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
328 328 f1, f2, fa, move, anc = args
329 329 if move:
330 330 pmmf.discard(f1)
331 331 pmmf.add(f)
332 332
333 333 # check case-folding collision in provisional merged manifest
334 334 foldmap = {}
335 335 for f in pmmf:
336 336 fold = util.normcase(f)
337 337 if fold in foldmap:
338 338 raise error.Abort(
339 339 _(b"case-folding collision between %s and %s")
340 340 % (f, foldmap[fold])
341 341 )
342 342 foldmap[fold] = f
343 343
344 344 # check case-folding of directories
345 345 foldprefix = unfoldprefix = lastfull = b''
346 346 for fold, f in sorted(foldmap.items()):
347 347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 348 # the folded prefix matches but actual casing is different
349 349 raise error.Abort(
350 350 _(b"case-folding collision between %s and directory of %s")
351 351 % (lastfull, f)
352 352 )
353 353 foldprefix = fold + b'/'
354 354 unfoldprefix = f + b'/'
355 355 lastfull = f
356 356
357 357
358 358 def driverpreprocess(repo, ms, wctx, labels=None):
359 359 """run the preprocess step of the merge driver, if any
360 360
361 361 This is currently not implemented -- it's an extension point."""
362 362 return True
363 363
364 364
365 365 def driverconclude(repo, ms, wctx, labels=None):
366 366 """run the conclude step of the merge driver, if any
367 367
368 368 This is currently not implemented -- it's an extension point."""
369 369 return True
370 370
371 371
372 372 def _filesindirs(repo, manifest, dirs):
373 373 """
374 374 Generator that yields pairs of all the files in the manifest that are found
375 375 inside the directories listed in dirs, and which directory they are found
376 376 in.
377 377 """
378 378 for f in manifest:
379 379 for p in pathutil.finddirs(f):
380 380 if p in dirs:
381 381 yield f, p
382 382 break
383 383
384 384
385 385 def checkpathconflicts(repo, wctx, mctx, actions):
386 386 """
387 387 Check if any actions introduce path conflicts in the repository, updating
388 388 actions to record or handle the path conflict accordingly.
389 389 """
390 390 mf = wctx.manifest()
391 391
392 392 # The set of local files that conflict with a remote directory.
393 393 localconflicts = set()
394 394
395 395 # The set of directories that conflict with a remote file, and so may cause
396 396 # conflicts if they still contain any files after the merge.
397 397 remoteconflicts = set()
398 398
399 399 # The set of directories that appear as both a file and a directory in the
400 400 # remote manifest. These indicate an invalid remote manifest, which
401 401 # can't be updated to cleanly.
402 402 invalidconflicts = set()
403 403
404 404 # The set of directories that contain files that are being created.
405 405 createdfiledirs = set()
406 406
407 407 # The set of files deleted by all the actions.
408 408 deletedfiles = set()
409 409
410 410 for f, (m, args, msg) in actions.items():
411 411 if m in (
412 412 mergestatemod.ACTION_CREATED,
413 413 mergestatemod.ACTION_DELETED_CHANGED,
414 414 mergestatemod.ACTION_MERGE,
415 415 mergestatemod.ACTION_CREATED_MERGE,
416 416 ):
417 417 # This action may create a new local file.
418 418 createdfiledirs.update(pathutil.finddirs(f))
419 419 if mf.hasdir(f):
420 420 # The file aliases a local directory. This might be ok if all
421 421 # the files in the local directory are being deleted. This
422 422 # will be checked once we know what all the deleted files are.
423 423 remoteconflicts.add(f)
424 424 # Track the names of all deleted files.
425 425 if m == mergestatemod.ACTION_REMOVE:
426 426 deletedfiles.add(f)
427 427 if m == mergestatemod.ACTION_MERGE:
428 428 f1, f2, fa, move, anc = args
429 429 if move:
430 430 deletedfiles.add(f1)
431 431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
432 432 f2, flags = args
433 433 deletedfiles.add(f2)
434 434
435 435 # Check all directories that contain created files for path conflicts.
436 436 for p in createdfiledirs:
437 437 if p in mf:
438 438 if p in mctx:
439 439 # A file is in a directory which aliases both a local
440 440 # and a remote file. This is an internal inconsistency
441 441 # within the remote manifest.
442 442 invalidconflicts.add(p)
443 443 else:
444 444 # A file is in a directory which aliases a local file.
445 445 # We will need to rename the local file.
446 446 localconflicts.add(p)
447 447 if p in actions and actions[p][0] in (
448 448 mergestatemod.ACTION_CREATED,
449 449 mergestatemod.ACTION_DELETED_CHANGED,
450 450 mergestatemod.ACTION_MERGE,
451 451 mergestatemod.ACTION_CREATED_MERGE,
452 452 ):
453 453 # The file is in a directory which aliases a remote file.
454 454 # This is an internal inconsistency within the remote
455 455 # manifest.
456 456 invalidconflicts.add(p)
457 457
458 458 # Rename all local conflicting files that have not been deleted.
459 459 for p in localconflicts:
460 460 if p not in deletedfiles:
461 461 ctxname = bytes(wctx).rstrip(b'+')
462 462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
463 463 porig = wctx[p].copysource() or p
464 464 actions[pnew] = (
465 465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
466 466 (p, porig),
467 467 b'local path conflict',
468 468 )
469 469 actions[p] = (
470 470 mergestatemod.ACTION_PATH_CONFLICT,
471 471 (pnew, b'l'),
472 472 b'path conflict',
473 473 )
474 474
475 475 if remoteconflicts:
476 476 # Check if all files in the conflicting directories have been removed.
477 477 ctxname = bytes(mctx).rstrip(b'+')
478 478 for f, p in _filesindirs(repo, mf, remoteconflicts):
479 479 if f not in deletedfiles:
480 480 m, args, msg = actions[p]
481 481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
482 482 if m in (
483 483 mergestatemod.ACTION_DELETED_CHANGED,
484 484 mergestatemod.ACTION_MERGE,
485 485 ):
486 486 # Action was merge, just update target.
487 487 actions[pnew] = (m, args, msg)
488 488 else:
489 489 # Action was create, change to renamed get action.
490 490 fl = args[0]
491 491 actions[pnew] = (
492 492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
493 493 (p, fl),
494 494 b'remote path conflict',
495 495 )
496 496 actions[p] = (
497 497 mergestatemod.ACTION_PATH_CONFLICT,
498 498 (pnew, mergestatemod.ACTION_REMOVE),
499 499 b'path conflict',
500 500 )
501 501 remoteconflicts.remove(p)
502 502 break
503 503
504 504 if invalidconflicts:
505 505 for p in invalidconflicts:
506 506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
507 507 raise error.Abort(_(b"destination manifest contains path conflicts"))
508 508
509 509
510 510 def _filternarrowactions(narrowmatch, branchmerge, actions):
511 511 """
512 512 Filters out actions that can ignored because the repo is narrowed.
513 513
514 514 Raise an exception if the merge cannot be completed because the repo is
515 515 narrowed.
516 516 """
517 517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
518 518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
519 519 # We mutate the items in the dict during iteration, so iterate
520 520 # over a copy.
521 521 for f, action in list(actions.items()):
522 522 if narrowmatch(f):
523 523 pass
524 524 elif not branchmerge:
525 525 del actions[f] # just updating, ignore changes outside clone
526 526 elif action[0] in nooptypes:
527 527 del actions[f] # merge does not affect file
528 528 elif action[0] in nonconflicttypes:
529 529 raise error.Abort(
530 530 _(
531 531 b'merge affects file \'%s\' outside narrow, '
532 532 b'which is not yet supported'
533 533 )
534 534 % f,
535 535 hint=_(b'merging in the other direction may work'),
536 536 )
537 537 else:
538 538 raise error.Abort(
539 539 _(b'conflict in file \'%s\' is outside narrow clone') % f
540 540 )
541 541
542 542
543 543 class mergeresult(object):
544 544 ''''An object representing result of merging manifests.
545 545
546 546 It has information about what actions need to be performed on dirstate
547 547 mapping of divergent renames and other such cases. '''
548 548
549 549 def __init__(self):
550 550 """
551 551 actions: dict of filename as keys and action related info as values
552 552 diverge: mapping of source name -> list of dest name for
553 553 divergent renames
554 554 renamedelete: mapping of source name -> list of destinations for files
555 555 deleted on one side and renamed on other.
556 556 commitinfo: dict containing data which should be used on commit
557 557 contains a filename -> info mapping
558 558 """
559 559 self._actions = {}
560 560 self._diverge = {}
561 561 self._renamedelete = {}
562 562 self._commitinfo = {}
563 563
564 def updatevalues(self, actions, diverge, renamedelete, commitinfo):
565 self._actions = actions
564 def updatevalues(self, diverge, renamedelete, commitinfo):
566 565 self._diverge = diverge
567 566 self._renamedelete = renamedelete
568 567 self._commitinfo = commitinfo
569 568
569 def addfile(self, filename, action, data, message):
570 """ adds a new file to the mergeresult object
571
572 filename: file which we are adding
573 action: one of mergestatemod.ACTION_*
574 data: a tuple of information like fctx and ctx related to this merge
575 message: a message about the merge
576 """
577 self._actions[filename] = (action, data, message)
578
570 579 @property
571 580 def actions(self):
572 581 return self._actions
573 582
574 583 @property
575 584 def diverge(self):
576 585 return self._diverge
577 586
578 587 @property
579 588 def renamedelete(self):
580 589 return self._renamedelete
581 590
582 591 @property
583 592 def commitinfo(self):
584 593 return self._commitinfo
585 594
586 595 @property
587 596 def actionsdict(self):
588 597 """ returns a dictionary of actions to be perfomed with action as key
589 598 and a list of files and related arguments as values """
590 599 # Convert to dictionary-of-lists format
591 600 actions = emptyactions()
592 601 for f, (m, args, msg) in pycompat.iteritems(self._actions):
593 602 if m not in actions:
594 603 actions[m] = []
595 604 actions[m].append((f, args, msg))
596 605
597 606 return actions
598 607
599 608 def setactions(self, actions):
600 609 self._actions = actions
601 610
602 611 def hasconflicts(self):
603 612 """ tells whether this merge resulted in some actions which can
604 613 result in conflicts or not """
605 614 for _f, (m, _unused, _unused) in pycompat.iteritems(self._actions):
606 615 if m not in (
607 616 mergestatemod.ACTION_GET,
608 617 mergestatemod.ACTION_KEEP,
609 618 mergestatemod.ACTION_EXEC,
610 619 mergestatemod.ACTION_REMOVE,
611 620 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
612 621 ):
613 622 return True
614 623
615 624 return False
616 625
617 626
618 627 def manifestmerge(
619 628 repo,
620 629 wctx,
621 630 p2,
622 631 pa,
623 632 branchmerge,
624 633 force,
625 634 matcher,
626 635 acceptremote,
627 636 followcopies,
628 637 forcefulldiff=False,
629 638 ):
630 639 """
631 640 Merge wctx and p2 with ancestor pa and generate merge action list
632 641
633 642 branchmerge and force are as passed in to update
634 643 matcher = matcher to filter file lists
635 644 acceptremote = accept the incoming changes without prompting
636 645
637 646 Returns an object of mergeresult class
638 647 """
648 mresult = mergeresult()
639 649 if matcher is not None and matcher.always():
640 650 matcher = None
641 651
642 652 # manifests fetched in order are going to be faster, so prime the caches
643 653 [
644 654 x.manifest()
645 655 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
646 656 ]
647 657
648 658 branch_copies1 = copies.branch_copies()
649 659 branch_copies2 = copies.branch_copies()
650 660 diverge = {}
651 661 # information from merge which is needed at commit time
652 662 # for example choosing filelog of which parent to commit
653 663 # TODO: use specific constants in future for this mapping
654 664 commitinfo = {}
655 665 if followcopies:
656 666 branch_copies1, branch_copies2, diverge = copies.mergecopies(
657 667 repo, wctx, p2, pa
658 668 )
659 669
660 670 boolbm = pycompat.bytestr(bool(branchmerge))
661 671 boolf = pycompat.bytestr(bool(force))
662 672 boolm = pycompat.bytestr(bool(matcher))
663 673 repo.ui.note(_(b"resolving manifests\n"))
664 674 repo.ui.debug(
665 675 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
666 676 )
667 677 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
668 678
669 679 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
670 680 copied1 = set(branch_copies1.copy.values())
671 681 copied1.update(branch_copies1.movewithdir.values())
672 682 copied2 = set(branch_copies2.copy.values())
673 683 copied2.update(branch_copies2.movewithdir.values())
674 684
675 685 if b'.hgsubstate' in m1 and wctx.rev() is None:
676 686 # Check whether sub state is modified, and overwrite the manifest
677 687 # to flag the change. If wctx is a committed revision, we shouldn't
678 688 # care for the dirty state of the working directory.
679 689 if any(wctx.sub(s).dirty() for s in wctx.substate):
680 690 m1[b'.hgsubstate'] = modifiednodeid
681 691
682 692 # Don't use m2-vs-ma optimization if:
683 693 # - ma is the same as m1 or m2, which we're just going to diff again later
684 694 # - The caller specifically asks for a full diff, which is useful during bid
685 695 # merge.
686 696 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
687 697 # Identify which files are relevant to the merge, so we can limit the
688 698 # total m1-vs-m2 diff to just those files. This has significant
689 699 # performance benefits in large repositories.
690 700 relevantfiles = set(ma.diff(m2).keys())
691 701
692 702 # For copied and moved files, we need to add the source file too.
693 703 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
694 704 if copyvalue in relevantfiles:
695 705 relevantfiles.add(copykey)
696 706 for movedirkey in branch_copies1.movewithdir:
697 707 relevantfiles.add(movedirkey)
698 708 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
699 709 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
700 710
701 711 diff = m1.diff(m2, match=matcher)
702 712
703 actions = {}
704 713 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
705 714 if n1 and n2: # file exists on both local and remote side
706 715 if f not in ma:
707 716 # TODO: what if they're renamed from different sources?
708 717 fa = branch_copies1.copy.get(
709 718 f, None
710 719 ) or branch_copies2.copy.get(f, None)
711 720 if fa is not None:
712 actions[f] = (
721 mresult.addfile(
722 f,
713 723 mergestatemod.ACTION_MERGE,
714 724 (f, f, fa, False, pa.node()),
715 725 b'both renamed from %s' % fa,
716 726 )
717 727 else:
718 actions[f] = (
728 mresult.addfile(
729 f,
719 730 mergestatemod.ACTION_MERGE,
720 731 (f, f, None, False, pa.node()),
721 732 b'both created',
722 733 )
723 734 else:
724 735 a = ma[f]
725 736 fla = ma.flags(f)
726 737 nol = b'l' not in fl1 + fl2 + fla
727 738 if n2 == a and fl2 == fla:
728 actions[f] = (
729 mergestatemod.ACTION_KEEP,
730 (),
731 b'remote unchanged',
739 mresult.addfile(
740 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
732 741 )
733 742 elif n1 == a and fl1 == fla: # local unchanged - use remote
734 743 if n1 == n2: # optimization: keep local content
735 actions[f] = (
744 mresult.addfile(
745 f,
736 746 mergestatemod.ACTION_EXEC,
737 747 (fl2,),
738 748 b'update permissions',
739 749 )
740 750 else:
741 actions[f] = (
751 mresult.addfile(
752 f,
742 753 mergestatemod.ACTION_GET,
743 754 (fl2, False),
744 755 b'remote is newer',
745 756 )
746 757 if branchmerge:
747 758 commitinfo[f] = b'other'
748 759 elif nol and n2 == a: # remote only changed 'x'
749 actions[f] = (
760 mresult.addfile(
761 f,
750 762 mergestatemod.ACTION_EXEC,
751 763 (fl2,),
752 764 b'update permissions',
753 765 )
754 766 elif nol and n1 == a: # local only changed 'x'
755 actions[f] = (
767 mresult.addfile(
768 f,
756 769 mergestatemod.ACTION_GET,
757 770 (fl1, False),
758 771 b'remote is newer',
759 772 )
760 773 if branchmerge:
761 774 commitinfo[f] = b'other'
762 775 else: # both changed something
763 actions[f] = (
776 mresult.addfile(
777 f,
764 778 mergestatemod.ACTION_MERGE,
765 779 (f, f, f, False, pa.node()),
766 780 b'versions differ',
767 781 )
768 782 elif n1: # file exists only on local side
769 783 if f in copied2:
770 784 pass # we'll deal with it on m2 side
771 785 elif (
772 786 f in branch_copies1.movewithdir
773 787 ): # directory rename, move local
774 788 f2 = branch_copies1.movewithdir[f]
775 789 if f2 in m2:
776 actions[f2] = (
790 mresult.addfile(
791 f2,
777 792 mergestatemod.ACTION_MERGE,
778 793 (f, f2, None, True, pa.node()),
779 794 b'remote directory rename, both created',
780 795 )
781 796 else:
782 actions[f2] = (
797 mresult.addfile(
798 f2,
783 799 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
784 800 (f, fl1),
785 801 b'remote directory rename - move from %s' % f,
786 802 )
787 803 elif f in branch_copies1.copy:
788 804 f2 = branch_copies1.copy[f]
789 actions[f] = (
805 mresult.addfile(
806 f,
790 807 mergestatemod.ACTION_MERGE,
791 808 (f, f2, f2, False, pa.node()),
792 809 b'local copied/moved from %s' % f2,
793 810 )
794 811 elif f in ma: # clean, a different, no remote
795 812 if n1 != ma[f]:
796 813 if acceptremote:
797 actions[f] = (
814 mresult.addfile(
815 f,
798 816 mergestatemod.ACTION_REMOVE,
799 817 None,
800 818 b'remote delete',
801 819 )
802 820 else:
803 actions[f] = (
821 mresult.addfile(
822 f,
804 823 mergestatemod.ACTION_CHANGED_DELETED,
805 824 (f, None, f, False, pa.node()),
806 825 b'prompt changed/deleted',
807 826 )
808 827 elif n1 == addednodeid:
809 828 # This file was locally added. We should forget it instead of
810 829 # deleting it.
811 actions[f] = (
812 mergestatemod.ACTION_FORGET,
813 None,
814 b'remote deleted',
830 mresult.addfile(
831 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
815 832 )
816 833 else:
817 actions[f] = (
818 mergestatemod.ACTION_REMOVE,
819 None,
820 b'other deleted',
834 mresult.addfile(
835 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
821 836 )
822 837 elif n2: # file exists only on remote side
823 838 if f in copied1:
824 839 pass # we'll deal with it on m1 side
825 840 elif f in branch_copies2.movewithdir:
826 841 f2 = branch_copies2.movewithdir[f]
827 842 if f2 in m1:
828 actions[f2] = (
843 mresult.addfile(
844 f2,
829 845 mergestatemod.ACTION_MERGE,
830 846 (f2, f, None, False, pa.node()),
831 847 b'local directory rename, both created',
832 848 )
833 849 else:
834 actions[f2] = (
850 mresult.addfile(
851 f2,
835 852 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
836 853 (f, fl2),
837 854 b'local directory rename - get from %s' % f,
838 855 )
839 856 elif f in branch_copies2.copy:
840 857 f2 = branch_copies2.copy[f]
841 858 if f2 in m2:
842 actions[f] = (
859 mresult.addfile(
860 f,
843 861 mergestatemod.ACTION_MERGE,
844 862 (f2, f, f2, False, pa.node()),
845 863 b'remote copied from %s' % f2,
846 864 )
847 865 else:
848 actions[f] = (
866 mresult.addfile(
867 f,
849 868 mergestatemod.ACTION_MERGE,
850 869 (f2, f, f2, True, pa.node()),
851 870 b'remote moved from %s' % f2,
852 871 )
853 872 elif f not in ma:
854 873 # local unknown, remote created: the logic is described by the
855 874 # following table:
856 875 #
857 876 # force branchmerge different | action
858 877 # n * * | create
859 878 # y n * | create
860 879 # y y n | create
861 880 # y y y | merge
862 881 #
863 882 # Checking whether the files are different is expensive, so we
864 883 # don't do that when we can avoid it.
865 884 if not force:
866 actions[f] = (
885 mresult.addfile(
886 f,
867 887 mergestatemod.ACTION_CREATED,
868 888 (fl2,),
869 889 b'remote created',
870 890 )
871 891 elif not branchmerge:
872 actions[f] = (
892 mresult.addfile(
893 f,
873 894 mergestatemod.ACTION_CREATED,
874 895 (fl2,),
875 896 b'remote created',
876 897 )
877 898 else:
878 actions[f] = (
899 mresult.addfile(
900 f,
879 901 mergestatemod.ACTION_CREATED_MERGE,
880 902 (fl2, pa.node()),
881 903 b'remote created, get or merge',
882 904 )
883 905 elif n2 != ma[f]:
884 906 df = None
885 907 for d in branch_copies1.dirmove:
886 908 if f.startswith(d):
887 909 # new file added in a directory that was moved
888 910 df = branch_copies1.dirmove[d] + f[len(d) :]
889 911 break
890 912 if df is not None and df in m1:
891 actions[df] = (
913 mresult.addfile(
914 df,
892 915 mergestatemod.ACTION_MERGE,
893 916 (df, f, f, False, pa.node()),
894 917 b'local directory rename - respect move '
895 918 b'from %s' % f,
896 919 )
897 920 elif acceptremote:
898 actions[f] = (
921 mresult.addfile(
922 f,
899 923 mergestatemod.ACTION_CREATED,
900 924 (fl2,),
901 925 b'remote recreating',
902 926 )
903 927 else:
904 actions[f] = (
928 mresult.addfile(
929 f,
905 930 mergestatemod.ACTION_DELETED_CHANGED,
906 931 (None, f, f, False, pa.node()),
907 932 b'prompt deleted/changed',
908 933 )
909 934
910 935 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
911 936 # If we are merging, look for path conflicts.
912 checkpathconflicts(repo, wctx, p2, actions)
937 checkpathconflicts(repo, wctx, p2, mresult.actions)
913 938
914 939 narrowmatch = repo.narrowmatch()
915 940 if not narrowmatch.always():
916 941 # Updates "actions" in place
917 _filternarrowactions(narrowmatch, branchmerge, actions)
942 _filternarrowactions(narrowmatch, branchmerge, mresult.actions)
918 943
919 944 renamedelete = branch_copies1.renamedelete
920 945 renamedelete.update(branch_copies2.renamedelete)
921 946
922 mresult = mergeresult()
923 mresult.updatevalues(actions, diverge, renamedelete, commitinfo)
947 mresult.updatevalues(diverge, renamedelete, commitinfo)
924 948 return mresult
925 949
926 950
927 951 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
928 952 """Resolves false conflicts where the nodeid changed but the content
929 953 remained the same."""
930 954 # We force a copy of actions.items() because we're going to mutate
931 955 # actions as we resolve trivial conflicts.
932 956 for f, (m, args, msg) in list(actions.items()):
933 957 if (
934 958 m == mergestatemod.ACTION_CHANGED_DELETED
935 959 and f in ancestor
936 960 and not wctx[f].cmp(ancestor[f])
937 961 ):
938 962 # local did change but ended up with same content
939 963 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
940 964 elif (
941 965 m == mergestatemod.ACTION_DELETED_CHANGED
942 966 and f in ancestor
943 967 and not mctx[f].cmp(ancestor[f])
944 968 ):
945 969 # remote did change but ended up with same content
946 970 del actions[f] # don't get = keep local deleted
947 971
948 972
949 973 def calculateupdates(
950 974 repo,
951 975 wctx,
952 976 mctx,
953 977 ancestors,
954 978 branchmerge,
955 979 force,
956 980 acceptremote,
957 981 followcopies,
958 982 matcher=None,
959 983 mergeforce=False,
960 984 ):
961 985 """
962 986 Calculate the actions needed to merge mctx into wctx using ancestors
963 987
964 988 Uses manifestmerge() to merge manifest and get list of actions required to
965 989 perform for merging two manifests. If there are multiple ancestors, uses bid
966 990 merge if enabled.
967 991
968 992 Also filters out actions which are unrequired if repository is sparse.
969 993
970 994 Returns mergeresult object same as manifestmerge().
971 995 """
972 996 # Avoid cycle.
973 997 from . import sparse
974 998
975 999 mresult = None
976 1000 if len(ancestors) == 1: # default
977 1001 mresult = manifestmerge(
978 1002 repo,
979 1003 wctx,
980 1004 mctx,
981 1005 ancestors[0],
982 1006 branchmerge,
983 1007 force,
984 1008 matcher,
985 1009 acceptremote,
986 1010 followcopies,
987 1011 )
988 1012 _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce)
989 1013
990 1014 else: # only when merge.preferancestor=* - the default
991 1015 repo.ui.note(
992 1016 _(b"note: merging %s and %s using bids from ancestors %s\n")
993 1017 % (
994 1018 wctx,
995 1019 mctx,
996 1020 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
997 1021 )
998 1022 )
999 1023
1000 1024 # mapping filename to bids (action method to list af actions)
1001 1025 # {FILENAME1 : BID1, FILENAME2 : BID2}
1002 1026 # BID is another dictionary which contains
1003 1027 # mapping of following form:
1004 1028 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1005 1029 fbids = {}
1006 1030 diverge, renamedelete = None, None
1007 1031 for ancestor in ancestors:
1008 1032 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1009 1033 mresult1 = manifestmerge(
1010 1034 repo,
1011 1035 wctx,
1012 1036 mctx,
1013 1037 ancestor,
1014 1038 branchmerge,
1015 1039 force,
1016 1040 matcher,
1017 1041 acceptremote,
1018 1042 followcopies,
1019 1043 forcefulldiff=True,
1020 1044 )
1021 1045 _checkunknownfiles(
1022 1046 repo, wctx, mctx, force, mresult1.actions, mergeforce
1023 1047 )
1024 1048
1025 1049 # Track the shortest set of warning on the theory that bid
1026 1050 # merge will correctly incorporate more information
1027 1051 if diverge is None or len(mresult1.diverge) < len(diverge):
1028 1052 diverge = mresult1.diverge
1029 1053 if renamedelete is None or len(renamedelete) < len(
1030 1054 mresult1.renamedelete
1031 1055 ):
1032 1056 renamedelete = mresult1.renamedelete
1033 1057
1034 1058 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1035 1059 m, args, msg = a
1036 1060 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1037 1061 if f in fbids:
1038 1062 d = fbids[f]
1039 1063 if m in d:
1040 1064 d[m].append(a)
1041 1065 else:
1042 1066 d[m] = [a]
1043 1067 else:
1044 1068 fbids[f] = {m: [a]}
1045 1069
1046 1070 # Call for bids
1047 1071 # Pick the best bid for each file
1048 1072 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1049 actions = {}
1073 mresult = mergeresult()
1050 1074 for f, bids in sorted(fbids.items()):
1051 1075 # bids is a mapping from action method to list af actions
1052 1076 # Consensus?
1053 1077 if len(bids) == 1: # all bids are the same kind of method
1054 1078 m, l = list(bids.items())[0]
1055 1079 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1056 1080 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1057 actions[f] = l[0]
1081 mresult.addfile(f, *l[0])
1058 1082 continue
1059 1083 # If keep is an option, just do it.
1060 1084 if mergestatemod.ACTION_KEEP in bids:
1061 1085 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1062 actions[f] = bids[mergestatemod.ACTION_KEEP][0]
1086 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1063 1087 continue
1064 1088 # If there are gets and they all agree [how could they not?], do it.
1065 1089 if mergestatemod.ACTION_GET in bids:
1066 1090 ga0 = bids[mergestatemod.ACTION_GET][0]
1067 1091 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1068 1092 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1069 actions[f] = ga0
1093 mresult.addfile(f, *ga0)
1070 1094 continue
1071 1095 # TODO: Consider other simple actions such as mode changes
1072 1096 # Handle inefficient democrazy.
1073 1097 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1074 1098 for m, l in sorted(bids.items()):
1075 1099 for _f, args, msg in l:
1076 1100 repo.ui.note(b' %s -> %s\n' % (msg, m))
1077 1101 # Pick random action. TODO: Instead, prompt user when resolving
1078 1102 m, l = list(bids.items())[0]
1079 1103 repo.ui.warn(
1080 1104 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1081 1105 )
1082 actions[f] = l[0]
1106 mresult.addfile(f, *l[0])
1083 1107 continue
1084 1108 repo.ui.note(_(b'end of auction\n\n'))
1085 1109 # TODO: think about commitinfo when bid merge is used
1086 mresult = mergeresult()
1087 mresult.updatevalues(actions, diverge, renamedelete, {})
1110 mresult.updatevalues(diverge, renamedelete, {})
1088 1111
1089 1112 if wctx.rev() is None:
1090 1113 fractions = _forgetremoved(wctx, mctx, branchmerge)
1091 1114 mresult.actions.update(fractions)
1092 1115
1093 1116 prunedactions = sparse.filterupdatesactions(
1094 1117 repo, wctx, mctx, branchmerge, mresult.actions
1095 1118 )
1096 1119 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions)
1097 1120
1098 1121 mresult.setactions(prunedactions)
1099 1122 return mresult
1100 1123
1101 1124
1102 1125 def _getcwd():
1103 1126 try:
1104 1127 return encoding.getcwd()
1105 1128 except OSError as err:
1106 1129 if err.errno == errno.ENOENT:
1107 1130 return None
1108 1131 raise
1109 1132
1110 1133
1111 1134 def batchremove(repo, wctx, actions):
1112 1135 """apply removes to the working directory
1113 1136
1114 1137 yields tuples for progress updates
1115 1138 """
1116 1139 verbose = repo.ui.verbose
1117 1140 cwd = _getcwd()
1118 1141 i = 0
1119 1142 for f, args, msg in actions:
1120 1143 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1121 1144 if verbose:
1122 1145 repo.ui.note(_(b"removing %s\n") % f)
1123 1146 wctx[f].audit()
1124 1147 try:
1125 1148 wctx[f].remove(ignoremissing=True)
1126 1149 except OSError as inst:
1127 1150 repo.ui.warn(
1128 1151 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1129 1152 )
1130 1153 if i == 100:
1131 1154 yield i, f
1132 1155 i = 0
1133 1156 i += 1
1134 1157 if i > 0:
1135 1158 yield i, f
1136 1159
1137 1160 if cwd and not _getcwd():
1138 1161 # cwd was removed in the course of removing files; print a helpful
1139 1162 # warning.
1140 1163 repo.ui.warn(
1141 1164 _(
1142 1165 b"current directory was removed\n"
1143 1166 b"(consider changing to repo root: %s)\n"
1144 1167 )
1145 1168 % repo.root
1146 1169 )
1147 1170
1148 1171
1149 1172 def batchget(repo, mctx, wctx, wantfiledata, actions):
1150 1173 """apply gets to the working directory
1151 1174
1152 1175 mctx is the context to get from
1153 1176
1154 1177 Yields arbitrarily many (False, tuple) for progress updates, followed by
1155 1178 exactly one (True, filedata). When wantfiledata is false, filedata is an
1156 1179 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1157 1180 mtime) of the file f written for each action.
1158 1181 """
1159 1182 filedata = {}
1160 1183 verbose = repo.ui.verbose
1161 1184 fctx = mctx.filectx
1162 1185 ui = repo.ui
1163 1186 i = 0
1164 1187 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1165 1188 for f, (flags, backup), msg in actions:
1166 1189 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1167 1190 if verbose:
1168 1191 repo.ui.note(_(b"getting %s\n") % f)
1169 1192
1170 1193 if backup:
1171 1194 # If a file or directory exists with the same name, back that
1172 1195 # up. Otherwise, look to see if there is a file that conflicts
1173 1196 # with a directory this file is in, and if so, back that up.
1174 1197 conflicting = f
1175 1198 if not repo.wvfs.lexists(f):
1176 1199 for p in pathutil.finddirs(f):
1177 1200 if repo.wvfs.isfileorlink(p):
1178 1201 conflicting = p
1179 1202 break
1180 1203 if repo.wvfs.lexists(conflicting):
1181 1204 orig = scmutil.backuppath(ui, repo, conflicting)
1182 1205 util.rename(repo.wjoin(conflicting), orig)
1183 1206 wfctx = wctx[f]
1184 1207 wfctx.clearunknown()
1185 1208 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1186 1209 size = wfctx.write(
1187 1210 fctx(f).data(),
1188 1211 flags,
1189 1212 backgroundclose=True,
1190 1213 atomictemp=atomictemp,
1191 1214 )
1192 1215 if wantfiledata:
1193 1216 s = wfctx.lstat()
1194 1217 mode = s.st_mode
1195 1218 mtime = s[stat.ST_MTIME]
1196 1219 filedata[f] = (mode, size, mtime) # for dirstate.normal
1197 1220 if i == 100:
1198 1221 yield False, (i, f)
1199 1222 i = 0
1200 1223 i += 1
1201 1224 if i > 0:
1202 1225 yield False, (i, f)
1203 1226 yield True, filedata
1204 1227
1205 1228
1206 1229 def _prefetchfiles(repo, ctx, actions):
1207 1230 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1208 1231 of merge actions. ``ctx`` is the context being merged in."""
1209 1232
1210 1233 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1211 1234 # don't touch the context to be merged in. 'cd' is skipped, because
1212 1235 # changed/deleted never resolves to something from the remote side.
1213 1236 oplist = [
1214 1237 actions[a]
1215 1238 for a in (
1216 1239 mergestatemod.ACTION_GET,
1217 1240 mergestatemod.ACTION_DELETED_CHANGED,
1218 1241 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1219 1242 mergestatemod.ACTION_MERGE,
1220 1243 )
1221 1244 ]
1222 1245 prefetch = scmutil.prefetchfiles
1223 1246 matchfiles = scmutil.matchfiles
1224 1247 prefetch(
1225 1248 repo,
1226 1249 [
1227 1250 (
1228 1251 ctx.rev(),
1229 1252 matchfiles(
1230 1253 repo, [f for sublist in oplist for f, args, msg in sublist]
1231 1254 ),
1232 1255 )
1233 1256 ],
1234 1257 )
1235 1258
1236 1259
1237 1260 @attr.s(frozen=True)
1238 1261 class updateresult(object):
1239 1262 updatedcount = attr.ib()
1240 1263 mergedcount = attr.ib()
1241 1264 removedcount = attr.ib()
1242 1265 unresolvedcount = attr.ib()
1243 1266
1244 1267 def isempty(self):
1245 1268 return not (
1246 1269 self.updatedcount
1247 1270 or self.mergedcount
1248 1271 or self.removedcount
1249 1272 or self.unresolvedcount
1250 1273 )
1251 1274
1252 1275
1253 1276 def emptyactions():
1254 1277 """create an actions dict, to be populated and passed to applyupdates()"""
1255 1278 return {
1256 1279 m: []
1257 1280 for m in (
1258 1281 mergestatemod.ACTION_ADD,
1259 1282 mergestatemod.ACTION_ADD_MODIFIED,
1260 1283 mergestatemod.ACTION_FORGET,
1261 1284 mergestatemod.ACTION_GET,
1262 1285 mergestatemod.ACTION_CHANGED_DELETED,
1263 1286 mergestatemod.ACTION_DELETED_CHANGED,
1264 1287 mergestatemod.ACTION_REMOVE,
1265 1288 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1266 1289 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1267 1290 mergestatemod.ACTION_MERGE,
1268 1291 mergestatemod.ACTION_EXEC,
1269 1292 mergestatemod.ACTION_KEEP,
1270 1293 mergestatemod.ACTION_PATH_CONFLICT,
1271 1294 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1272 1295 )
1273 1296 }
1274 1297
1275 1298
1276 1299 def applyupdates(
1277 1300 repo,
1278 1301 actions,
1279 1302 wctx,
1280 1303 mctx,
1281 1304 overwrite,
1282 1305 wantfiledata,
1283 1306 labels=None,
1284 1307 commitinfo=None,
1285 1308 ):
1286 1309 """apply the merge action list to the working directory
1287 1310
1288 1311 wctx is the working copy context
1289 1312 mctx is the context to be merged into the working copy
1290 1313 commitinfo is a mapping of information which needs to be stored somewhere
1291 1314 (probably mergestate) so that it can be used at commit time.
1292 1315
1293 1316 Return a tuple of (counts, filedata), where counts is a tuple
1294 1317 (updated, merged, removed, unresolved) that describes how many
1295 1318 files were affected by the update, and filedata is as described in
1296 1319 batchget.
1297 1320 """
1298 1321
1299 1322 _prefetchfiles(repo, mctx, actions)
1300 1323
1301 1324 updated, merged, removed = 0, 0, 0
1302 1325 ms = mergestatemod.mergestate.clean(
1303 1326 repo, wctx.p1().node(), mctx.node(), labels
1304 1327 )
1305 1328
1306 1329 if commitinfo is None:
1307 1330 commitinfo = {}
1308 1331
1309 1332 for f, op in pycompat.iteritems(commitinfo):
1310 1333 # the other side of filenode was choosen while merging, store this in
1311 1334 # mergestate so that it can be reused on commit
1312 1335 if op == b'other':
1313 1336 ms.addmergedother(f)
1314 1337
1315 1338 moves = []
1316 1339 for m, l in actions.items():
1317 1340 l.sort()
1318 1341
1319 1342 # 'cd' and 'dc' actions are treated like other merge conflicts
1320 1343 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1321 1344 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1322 1345 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1323 1346 for f, args, msg in mergeactions:
1324 1347 f1, f2, fa, move, anc = args
1325 1348 if f == b'.hgsubstate': # merged internally
1326 1349 continue
1327 1350 if f1 is None:
1328 1351 fcl = filemerge.absentfilectx(wctx, fa)
1329 1352 else:
1330 1353 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1331 1354 fcl = wctx[f1]
1332 1355 if f2 is None:
1333 1356 fco = filemerge.absentfilectx(mctx, fa)
1334 1357 else:
1335 1358 fco = mctx[f2]
1336 1359 actx = repo[anc]
1337 1360 if fa in actx:
1338 1361 fca = actx[fa]
1339 1362 else:
1340 1363 # TODO: move to absentfilectx
1341 1364 fca = repo.filectx(f1, fileid=nullrev)
1342 1365 ms.add(fcl, fco, fca, f)
1343 1366 if f1 != f and move:
1344 1367 moves.append(f1)
1345 1368
1346 1369 # remove renamed files after safely stored
1347 1370 for f in moves:
1348 1371 if wctx[f].lexists():
1349 1372 repo.ui.debug(b"removing %s\n" % f)
1350 1373 wctx[f].audit()
1351 1374 wctx[f].remove()
1352 1375
1353 1376 numupdates = sum(
1354 1377 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1355 1378 )
1356 1379 progress = repo.ui.makeprogress(
1357 1380 _(b'updating'), unit=_(b'files'), total=numupdates
1358 1381 )
1359 1382
1360 1383 if [
1361 1384 a
1362 1385 for a in actions[mergestatemod.ACTION_REMOVE]
1363 1386 if a[0] == b'.hgsubstate'
1364 1387 ]:
1365 1388 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1366 1389
1367 1390 # record path conflicts
1368 1391 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1369 1392 f1, fo = args
1370 1393 s = repo.ui.status
1371 1394 s(
1372 1395 _(
1373 1396 b"%s: path conflict - a file or link has the same name as a "
1374 1397 b"directory\n"
1375 1398 )
1376 1399 % f
1377 1400 )
1378 1401 if fo == b'l':
1379 1402 s(_(b"the local file has been renamed to %s\n") % f1)
1380 1403 else:
1381 1404 s(_(b"the remote file has been renamed to %s\n") % f1)
1382 1405 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1383 1406 ms.addpathconflict(f, f1, fo)
1384 1407 progress.increment(item=f)
1385 1408
1386 1409 # When merging in-memory, we can't support worker processes, so set the
1387 1410 # per-item cost at 0 in that case.
1388 1411 cost = 0 if wctx.isinmemory() else 0.001
1389 1412
1390 1413 # remove in parallel (must come before resolving path conflicts and getting)
1391 1414 prog = worker.worker(
1392 1415 repo.ui,
1393 1416 cost,
1394 1417 batchremove,
1395 1418 (repo, wctx),
1396 1419 actions[mergestatemod.ACTION_REMOVE],
1397 1420 )
1398 1421 for i, item in prog:
1399 1422 progress.increment(step=i, item=item)
1400 1423 removed = len(actions[mergestatemod.ACTION_REMOVE])
1401 1424
1402 1425 # resolve path conflicts (must come before getting)
1403 1426 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1404 1427 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1405 1428 (f0, origf0) = args
1406 1429 if wctx[f0].lexists():
1407 1430 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1408 1431 wctx[f].audit()
1409 1432 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1410 1433 wctx[f0].remove()
1411 1434 progress.increment(item=f)
1412 1435
1413 1436 # get in parallel.
1414 1437 threadsafe = repo.ui.configbool(
1415 1438 b'experimental', b'worker.wdir-get-thread-safe'
1416 1439 )
1417 1440 prog = worker.worker(
1418 1441 repo.ui,
1419 1442 cost,
1420 1443 batchget,
1421 1444 (repo, mctx, wctx, wantfiledata),
1422 1445 actions[mergestatemod.ACTION_GET],
1423 1446 threadsafe=threadsafe,
1424 1447 hasretval=True,
1425 1448 )
1426 1449 getfiledata = {}
1427 1450 for final, res in prog:
1428 1451 if final:
1429 1452 getfiledata = res
1430 1453 else:
1431 1454 i, item = res
1432 1455 progress.increment(step=i, item=item)
1433 1456 updated = len(actions[mergestatemod.ACTION_GET])
1434 1457
1435 1458 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1436 1459 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1437 1460
1438 1461 # forget (manifest only, just log it) (must come first)
1439 1462 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1440 1463 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1441 1464 progress.increment(item=f)
1442 1465
1443 1466 # re-add (manifest only, just log it)
1444 1467 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1445 1468 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1446 1469 progress.increment(item=f)
1447 1470
1448 1471 # re-add/mark as modified (manifest only, just log it)
1449 1472 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1450 1473 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1451 1474 progress.increment(item=f)
1452 1475
1453 1476 # keep (noop, just log it)
1454 1477 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1455 1478 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1456 1479 # no progress
1457 1480
1458 1481 # directory rename, move local
1459 1482 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1460 1483 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1461 1484 progress.increment(item=f)
1462 1485 f0, flags = args
1463 1486 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1464 1487 wctx[f].audit()
1465 1488 wctx[f].write(wctx.filectx(f0).data(), flags)
1466 1489 wctx[f0].remove()
1467 1490 updated += 1
1468 1491
1469 1492 # local directory rename, get
1470 1493 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1471 1494 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1472 1495 progress.increment(item=f)
1473 1496 f0, flags = args
1474 1497 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1475 1498 wctx[f].write(mctx.filectx(f0).data(), flags)
1476 1499 updated += 1
1477 1500
1478 1501 # exec
1479 1502 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1480 1503 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1481 1504 progress.increment(item=f)
1482 1505 (flags,) = args
1483 1506 wctx[f].audit()
1484 1507 wctx[f].setflags(b'l' in flags, b'x' in flags)
1485 1508 updated += 1
1486 1509
1487 1510 # the ordering is important here -- ms.mergedriver will raise if the merge
1488 1511 # driver has changed, and we want to be able to bypass it when overwrite is
1489 1512 # True
1490 1513 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1491 1514
1492 1515 if usemergedriver:
1493 1516 if wctx.isinmemory():
1494 1517 raise error.InMemoryMergeConflictsError(
1495 1518 b"in-memory merge does not support mergedriver"
1496 1519 )
1497 1520 ms.commit()
1498 1521 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1499 1522 # the driver might leave some files unresolved
1500 1523 unresolvedf = set(ms.unresolved())
1501 1524 if not proceed:
1502 1525 # XXX setting unresolved to at least 1 is a hack to make sure we
1503 1526 # error out
1504 1527 return updateresult(
1505 1528 updated, merged, removed, max(len(unresolvedf), 1)
1506 1529 )
1507 1530 newactions = []
1508 1531 for f, args, msg in mergeactions:
1509 1532 if f in unresolvedf:
1510 1533 newactions.append((f, args, msg))
1511 1534 mergeactions = newactions
1512 1535
1513 1536 try:
1514 1537 # premerge
1515 1538 tocomplete = []
1516 1539 for f, args, msg in mergeactions:
1517 1540 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1518 1541 progress.increment(item=f)
1519 1542 if f == b'.hgsubstate': # subrepo states need updating
1520 1543 subrepoutil.submerge(
1521 1544 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1522 1545 )
1523 1546 continue
1524 1547 wctx[f].audit()
1525 1548 complete, r = ms.preresolve(f, wctx)
1526 1549 if not complete:
1527 1550 numupdates += 1
1528 1551 tocomplete.append((f, args, msg))
1529 1552
1530 1553 # merge
1531 1554 for f, args, msg in tocomplete:
1532 1555 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1533 1556 progress.increment(item=f, total=numupdates)
1534 1557 ms.resolve(f, wctx)
1535 1558
1536 1559 finally:
1537 1560 ms.commit()
1538 1561
1539 1562 unresolved = ms.unresolvedcount()
1540 1563
1541 1564 if (
1542 1565 usemergedriver
1543 1566 and not unresolved
1544 1567 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1545 1568 ):
1546 1569 if not driverconclude(repo, ms, wctx, labels=labels):
1547 1570 # XXX setting unresolved to at least 1 is a hack to make sure we
1548 1571 # error out
1549 1572 unresolved = max(unresolved, 1)
1550 1573
1551 1574 ms.commit()
1552 1575
1553 1576 msupdated, msmerged, msremoved = ms.counts()
1554 1577 updated += msupdated
1555 1578 merged += msmerged
1556 1579 removed += msremoved
1557 1580
1558 1581 extraactions = ms.actions()
1559 1582 if extraactions:
1560 1583 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1561 1584 for k, acts in pycompat.iteritems(extraactions):
1562 1585 actions[k].extend(acts)
1563 1586 if k == mergestatemod.ACTION_GET and wantfiledata:
1564 1587 # no filedata until mergestate is updated to provide it
1565 1588 for a in acts:
1566 1589 getfiledata[a[0]] = None
1567 1590 # Remove these files from actions[ACTION_MERGE] as well. This is
1568 1591 # important because in recordupdates, files in actions[ACTION_MERGE]
1569 1592 # are processed after files in other actions, and the merge driver
1570 1593 # might add files to those actions via extraactions above. This can
1571 1594 # lead to a file being recorded twice, with poor results. This is
1572 1595 # especially problematic for actions[ACTION_REMOVE] (currently only
1573 1596 # possible with the merge driver in the initial merge process;
1574 1597 # interrupted merges don't go through this flow).
1575 1598 #
1576 1599 # The real fix here is to have indexes by both file and action so
1577 1600 # that when the action for a file is changed it is automatically
1578 1601 # reflected in the other action lists. But that involves a more
1579 1602 # complex data structure, so this will do for now.
1580 1603 #
1581 1604 # We don't need to do the same operation for 'dc' and 'cd' because
1582 1605 # those lists aren't consulted again.
1583 1606 mfiles.difference_update(a[0] for a in acts)
1584 1607
1585 1608 actions[mergestatemod.ACTION_MERGE] = [
1586 1609 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1587 1610 ]
1588 1611
1589 1612 progress.complete()
1590 1613 assert len(getfiledata) == (
1591 1614 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1592 1615 )
1593 1616 return updateresult(updated, merged, removed, unresolved), getfiledata
1594 1617
1595 1618
1596 1619 def _advertisefsmonitor(repo, num_gets, p1node):
1597 1620 # Advertise fsmonitor when its presence could be useful.
1598 1621 #
1599 1622 # We only advertise when performing an update from an empty working
1600 1623 # directory. This typically only occurs during initial clone.
1601 1624 #
1602 1625 # We give users a mechanism to disable the warning in case it is
1603 1626 # annoying.
1604 1627 #
1605 1628 # We only allow on Linux and MacOS because that's where fsmonitor is
1606 1629 # considered stable.
1607 1630 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1608 1631 fsmonitorthreshold = repo.ui.configint(
1609 1632 b'fsmonitor', b'warn_update_file_count'
1610 1633 )
1611 1634 try:
1612 1635 # avoid cycle: extensions -> cmdutil -> merge
1613 1636 from . import extensions
1614 1637
1615 1638 extensions.find(b'fsmonitor')
1616 1639 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1617 1640 # We intentionally don't look at whether fsmonitor has disabled
1618 1641 # itself because a) fsmonitor may have already printed a warning
1619 1642 # b) we only care about the config state here.
1620 1643 except KeyError:
1621 1644 fsmonitorenabled = False
1622 1645
1623 1646 if (
1624 1647 fsmonitorwarning
1625 1648 and not fsmonitorenabled
1626 1649 and p1node == nullid
1627 1650 and num_gets >= fsmonitorthreshold
1628 1651 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1629 1652 ):
1630 1653 repo.ui.warn(
1631 1654 _(
1632 1655 b'(warning: large working directory being used without '
1633 1656 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1634 1657 b'see "hg help -e fsmonitor")\n'
1635 1658 )
1636 1659 )
1637 1660
1638 1661
1639 1662 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1640 1663 UPDATECHECK_NONE = b'none'
1641 1664 UPDATECHECK_LINEAR = b'linear'
1642 1665 UPDATECHECK_NO_CONFLICT = b'noconflict'
1643 1666
1644 1667
1645 1668 def update(
1646 1669 repo,
1647 1670 node,
1648 1671 branchmerge,
1649 1672 force,
1650 1673 ancestor=None,
1651 1674 mergeancestor=False,
1652 1675 labels=None,
1653 1676 matcher=None,
1654 1677 mergeforce=False,
1655 1678 updatedirstate=True,
1656 1679 updatecheck=None,
1657 1680 wc=None,
1658 1681 ):
1659 1682 """
1660 1683 Perform a merge between the working directory and the given node
1661 1684
1662 1685 node = the node to update to
1663 1686 branchmerge = whether to merge between branches
1664 1687 force = whether to force branch merging or file overwriting
1665 1688 matcher = a matcher to filter file lists (dirstate not updated)
1666 1689 mergeancestor = whether it is merging with an ancestor. If true,
1667 1690 we should accept the incoming changes for any prompts that occur.
1668 1691 If false, merging with an ancestor (fast-forward) is only allowed
1669 1692 between different named branches. This flag is used by rebase extension
1670 1693 as a temporary fix and should be avoided in general.
1671 1694 labels = labels to use for base, local and other
1672 1695 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1673 1696 this is True, then 'force' should be True as well.
1674 1697
1675 1698 The table below shows all the behaviors of the update command given the
1676 1699 -c/--check and -C/--clean or no options, whether the working directory is
1677 1700 dirty, whether a revision is specified, and the relationship of the parent
1678 1701 rev to the target rev (linear or not). Match from top first. The -n
1679 1702 option doesn't exist on the command line, but represents the
1680 1703 experimental.updatecheck=noconflict option.
1681 1704
1682 1705 This logic is tested by test-update-branches.t.
1683 1706
1684 1707 -c -C -n -m dirty rev linear | result
1685 1708 y y * * * * * | (1)
1686 1709 y * y * * * * | (1)
1687 1710 y * * y * * * | (1)
1688 1711 * y y * * * * | (1)
1689 1712 * y * y * * * | (1)
1690 1713 * * y y * * * | (1)
1691 1714 * * * * * n n | x
1692 1715 * * * * n * * | ok
1693 1716 n n n n y * y | merge
1694 1717 n n n n y y n | (2)
1695 1718 n n n y y * * | merge
1696 1719 n n y n y * * | merge if no conflict
1697 1720 n y n n y * * | discard
1698 1721 y n n n y * * | (3)
1699 1722
1700 1723 x = can't happen
1701 1724 * = don't-care
1702 1725 1 = incompatible options (checked in commands.py)
1703 1726 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1704 1727 3 = abort: uncommitted changes (checked in commands.py)
1705 1728
1706 1729 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1707 1730 to repo[None] if None is passed.
1708 1731
1709 1732 Return the same tuple as applyupdates().
1710 1733 """
1711 1734 # Avoid cycle.
1712 1735 from . import sparse
1713 1736
1714 1737 # This function used to find the default destination if node was None, but
1715 1738 # that's now in destutil.py.
1716 1739 assert node is not None
1717 1740 if not branchmerge and not force:
1718 1741 # TODO: remove the default once all callers that pass branchmerge=False
1719 1742 # and force=False pass a value for updatecheck. We may want to allow
1720 1743 # updatecheck='abort' to better suppport some of these callers.
1721 1744 if updatecheck is None:
1722 1745 updatecheck = UPDATECHECK_LINEAR
1723 1746 if updatecheck not in (
1724 1747 UPDATECHECK_NONE,
1725 1748 UPDATECHECK_LINEAR,
1726 1749 UPDATECHECK_NO_CONFLICT,
1727 1750 ):
1728 1751 raise ValueError(
1729 1752 r'Invalid updatecheck %r (can accept %r)'
1730 1753 % (
1731 1754 updatecheck,
1732 1755 (
1733 1756 UPDATECHECK_NONE,
1734 1757 UPDATECHECK_LINEAR,
1735 1758 UPDATECHECK_NO_CONFLICT,
1736 1759 ),
1737 1760 )
1738 1761 )
1739 1762 if wc is not None and wc.isinmemory():
1740 1763 maybe_wlock = util.nullcontextmanager()
1741 1764 else:
1742 1765 maybe_wlock = repo.wlock()
1743 1766 with maybe_wlock:
1744 1767 if wc is None:
1745 1768 wc = repo[None]
1746 1769 pl = wc.parents()
1747 1770 p1 = pl[0]
1748 1771 p2 = repo[node]
1749 1772 if ancestor is not None:
1750 1773 pas = [repo[ancestor]]
1751 1774 else:
1752 1775 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1753 1776 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1754 1777 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1755 1778 else:
1756 1779 pas = [p1.ancestor(p2, warn=branchmerge)]
1757 1780
1758 1781 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1759 1782
1760 1783 overwrite = force and not branchmerge
1761 1784 ### check phase
1762 1785 if not overwrite:
1763 1786 if len(pl) > 1:
1764 1787 raise error.Abort(_(b"outstanding uncommitted merge"))
1765 1788 ms = mergestatemod.mergestate.read(repo)
1766 1789 if list(ms.unresolved()):
1767 1790 raise error.Abort(
1768 1791 _(b"outstanding merge conflicts"),
1769 1792 hint=_(b"use 'hg resolve' to resolve"),
1770 1793 )
1771 1794 if branchmerge:
1772 1795 if pas == [p2]:
1773 1796 raise error.Abort(
1774 1797 _(
1775 1798 b"merging with a working directory ancestor"
1776 1799 b" has no effect"
1777 1800 )
1778 1801 )
1779 1802 elif pas == [p1]:
1780 1803 if not mergeancestor and wc.branch() == p2.branch():
1781 1804 raise error.Abort(
1782 1805 _(b"nothing to merge"),
1783 1806 hint=_(b"use 'hg update' or check 'hg heads'"),
1784 1807 )
1785 1808 if not force and (wc.files() or wc.deleted()):
1786 1809 raise error.Abort(
1787 1810 _(b"uncommitted changes"),
1788 1811 hint=_(b"use 'hg status' to list changes"),
1789 1812 )
1790 1813 if not wc.isinmemory():
1791 1814 for s in sorted(wc.substate):
1792 1815 wc.sub(s).bailifchanged()
1793 1816
1794 1817 elif not overwrite:
1795 1818 if p1 == p2: # no-op update
1796 1819 # call the hooks and exit early
1797 1820 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1798 1821 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1799 1822 return updateresult(0, 0, 0, 0)
1800 1823
1801 1824 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1802 1825 [p1],
1803 1826 [p2],
1804 1827 ): # nonlinear
1805 1828 dirty = wc.dirty(missing=True)
1806 1829 if dirty:
1807 1830 # Branching is a bit strange to ensure we do the minimal
1808 1831 # amount of call to obsutil.foreground.
1809 1832 foreground = obsutil.foreground(repo, [p1.node()])
1810 1833 # note: the <node> variable contains a random identifier
1811 1834 if repo[node].node() in foreground:
1812 1835 pass # allow updating to successors
1813 1836 else:
1814 1837 msg = _(b"uncommitted changes")
1815 1838 hint = _(b"commit or update --clean to discard changes")
1816 1839 raise error.UpdateAbort(msg, hint=hint)
1817 1840 else:
1818 1841 # Allow jumping branches if clean and specific rev given
1819 1842 pass
1820 1843
1821 1844 if overwrite:
1822 1845 pas = [wc]
1823 1846 elif not branchmerge:
1824 1847 pas = [p1]
1825 1848
1826 1849 # deprecated config: merge.followcopies
1827 1850 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1828 1851 if overwrite:
1829 1852 followcopies = False
1830 1853 elif not pas[0]:
1831 1854 followcopies = False
1832 1855 if not branchmerge and not wc.dirty(missing=True):
1833 1856 followcopies = False
1834 1857
1835 1858 ### calculate phase
1836 1859 mresult = calculateupdates(
1837 1860 repo,
1838 1861 wc,
1839 1862 p2,
1840 1863 pas,
1841 1864 branchmerge,
1842 1865 force,
1843 1866 mergeancestor,
1844 1867 followcopies,
1845 1868 matcher=matcher,
1846 1869 mergeforce=mergeforce,
1847 1870 )
1848 1871
1849 1872 if updatecheck == UPDATECHECK_NO_CONFLICT:
1850 1873 if mresult.hasconflicts():
1851 1874 msg = _(b"conflicting changes")
1852 1875 hint = _(b"commit or update --clean to discard changes")
1853 1876 raise error.Abort(msg, hint=hint)
1854 1877
1855 1878 # Prompt and create actions. Most of this is in the resolve phase
1856 1879 # already, but we can't handle .hgsubstate in filemerge or
1857 1880 # subrepoutil.submerge yet so we have to keep prompting for it.
1858 1881 if b'.hgsubstate' in mresult.actions:
1859 1882 f = b'.hgsubstate'
1860 1883 m, args, msg = mresult.actions[f]
1861 1884 prompts = filemerge.partextras(labels)
1862 1885 prompts[b'f'] = f
1863 1886 if m == mergestatemod.ACTION_CHANGED_DELETED:
1864 1887 if repo.ui.promptchoice(
1865 1888 _(
1866 1889 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1867 1890 b"use (c)hanged version or (d)elete?"
1868 1891 b"$$ &Changed $$ &Delete"
1869 1892 )
1870 1893 % prompts,
1871 1894 0,
1872 1895 ):
1873 mresult.actions[f] = (
1874 mergestatemod.ACTION_REMOVE,
1875 None,
1876 b'prompt delete',
1896 mresult.addfile(
1897 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1877 1898 )
1878 1899 elif f in p1:
1879 mresult.actions[f] = (
1900 mresult.addfile(
1901 f,
1880 1902 mergestatemod.ACTION_ADD_MODIFIED,
1881 1903 None,
1882 1904 b'prompt keep',
1883 1905 )
1884 1906 else:
1885 mresult.actions[f] = (
1886 mergestatemod.ACTION_ADD,
1887 None,
1888 b'prompt keep',
1907 mresult.addfile(
1908 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
1889 1909 )
1890 1910 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1891 1911 f1, f2, fa, move, anc = args
1892 1912 flags = p2[f2].flags()
1893 1913 if (
1894 1914 repo.ui.promptchoice(
1895 1915 _(
1896 1916 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1897 1917 b"use (c)hanged version or leave (d)eleted?"
1898 1918 b"$$ &Changed $$ &Deleted"
1899 1919 )
1900 1920 % prompts,
1901 1921 0,
1902 1922 )
1903 1923 == 0
1904 1924 ):
1905 mresult.actions[f] = (
1925 mresult.addfile(
1926 f,
1906 1927 mergestatemod.ACTION_GET,
1907 1928 (flags, False),
1908 1929 b'prompt recreating',
1909 1930 )
1910 1931 else:
1911 1932 del mresult.actions[f]
1912 1933
1913 1934 # Convert to dictionary-of-lists format
1914 1935 actions = mresult.actionsdict
1915 1936
1916 1937 if not util.fscasesensitive(repo.path):
1917 1938 # check collision between files only in p2 for clean update
1918 1939 if not branchmerge and (
1919 1940 force or not wc.dirty(missing=True, branch=False)
1920 1941 ):
1921 1942 _checkcollision(repo, p2.manifest(), None)
1922 1943 else:
1923 1944 _checkcollision(repo, wc.manifest(), actions)
1924 1945
1925 1946 # divergent renames
1926 1947 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1927 1948 repo.ui.warn(
1928 1949 _(
1929 1950 b"note: possible conflict - %s was renamed "
1930 1951 b"multiple times to:\n"
1931 1952 )
1932 1953 % f
1933 1954 )
1934 1955 for nf in sorted(fl):
1935 1956 repo.ui.warn(b" %s\n" % nf)
1936 1957
1937 1958 # rename and delete
1938 1959 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1939 1960 repo.ui.warn(
1940 1961 _(
1941 1962 b"note: possible conflict - %s was deleted "
1942 1963 b"and renamed to:\n"
1943 1964 )
1944 1965 % f
1945 1966 )
1946 1967 for nf in sorted(fl):
1947 1968 repo.ui.warn(b" %s\n" % nf)
1948 1969
1949 1970 ### apply phase
1950 1971 if not branchmerge: # just jump to the new rev
1951 1972 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1952 1973 # If we're doing a partial update, we need to skip updating
1953 1974 # the dirstate.
1954 1975 always = matcher is None or matcher.always()
1955 1976 updatedirstate = updatedirstate and always and not wc.isinmemory()
1956 1977 if updatedirstate:
1957 1978 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1958 1979 # note that we're in the middle of an update
1959 1980 repo.vfs.write(b'updatestate', p2.hex())
1960 1981
1961 1982 _advertisefsmonitor(
1962 1983 repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
1963 1984 )
1964 1985
1965 1986 wantfiledata = updatedirstate and not branchmerge
1966 1987 stats, getfiledata = applyupdates(
1967 1988 repo,
1968 1989 actions,
1969 1990 wc,
1970 1991 p2,
1971 1992 overwrite,
1972 1993 wantfiledata,
1973 1994 labels=labels,
1974 1995 commitinfo=mresult.commitinfo,
1975 1996 )
1976 1997
1977 1998 if updatedirstate:
1978 1999 with repo.dirstate.parentchange():
1979 2000 repo.setparents(fp1, fp2)
1980 2001 mergestatemod.recordupdates(
1981 2002 repo, actions, branchmerge, getfiledata
1982 2003 )
1983 2004 # update completed, clear state
1984 2005 util.unlink(repo.vfs.join(b'updatestate'))
1985 2006
1986 2007 if not branchmerge:
1987 2008 repo.dirstate.setbranch(p2.branch())
1988 2009
1989 2010 # If we're updating to a location, clean up any stale temporary includes
1990 2011 # (ex: this happens during hg rebase --abort).
1991 2012 if not branchmerge:
1992 2013 sparse.prunetemporaryincludes(repo)
1993 2014
1994 2015 if updatedirstate:
1995 2016 repo.hook(
1996 2017 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
1997 2018 )
1998 2019 return stats
1999 2020
2000 2021
2001 2022 def merge(ctx, labels=None, force=False, wc=None):
2002 2023 """Merge another topological branch into the working copy.
2003 2024
2004 2025 force = whether the merge was run with 'merge --force' (deprecated)
2005 2026 """
2006 2027
2007 2028 return update(
2008 2029 ctx.repo(),
2009 2030 ctx.rev(),
2010 2031 labels=labels,
2011 2032 branchmerge=True,
2012 2033 force=force,
2013 2034 mergeforce=force,
2014 2035 wc=wc,
2015 2036 )
2016 2037
2017 2038
2018 2039 def clean_update(ctx, wc=None):
2019 2040 """Do a clean update to the given commit.
2020 2041
2021 2042 This involves updating to the commit and discarding any changes in the
2022 2043 working copy.
2023 2044 """
2024 2045 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2025 2046
2026 2047
2027 2048 def revert_to(ctx, matcher=None, wc=None):
2028 2049 """Revert the working copy to the given commit.
2029 2050
2030 2051 The working copy will keep its current parent(s) but its content will
2031 2052 be the same as in the given commit.
2032 2053 """
2033 2054
2034 2055 return update(
2035 2056 ctx.repo(),
2036 2057 ctx.rev(),
2037 2058 branchmerge=False,
2038 2059 force=True,
2039 2060 updatedirstate=False,
2040 2061 matcher=matcher,
2041 2062 wc=wc,
2042 2063 )
2043 2064
2044 2065
2045 2066 def graft(
2046 2067 repo,
2047 2068 ctx,
2048 2069 base=None,
2049 2070 labels=None,
2050 2071 keepparent=False,
2051 2072 keepconflictparent=False,
2052 2073 wctx=None,
2053 2074 ):
2054 2075 """Do a graft-like merge.
2055 2076
2056 2077 This is a merge where the merge ancestor is chosen such that one
2057 2078 or more changesets are grafted onto the current changeset. In
2058 2079 addition to the merge, this fixes up the dirstate to include only
2059 2080 a single parent (if keepparent is False) and tries to duplicate any
2060 2081 renames/copies appropriately.
2061 2082
2062 2083 ctx - changeset to rebase
2063 2084 base - merge base, or ctx.p1() if not specified
2064 2085 labels - merge labels eg ['local', 'graft']
2065 2086 keepparent - keep second parent if any
2066 2087 keepconflictparent - if unresolved, keep parent used for the merge
2067 2088
2068 2089 """
2069 2090 # If we're grafting a descendant onto an ancestor, be sure to pass
2070 2091 # mergeancestor=True to update. This does two things: 1) allows the merge if
2071 2092 # the destination is the same as the parent of the ctx (so we can use graft
2072 2093 # to copy commits), and 2) informs update that the incoming changes are
2073 2094 # newer than the destination so it doesn't prompt about "remote changed foo
2074 2095 # which local deleted".
2075 2096 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2076 2097 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2077 2098 wctx = wctx or repo[None]
2078 2099 pctx = wctx.p1()
2079 2100 base = base or ctx.p1()
2080 2101 mergeancestor = (
2081 2102 repo.changelog.isancestor(pctx.node(), ctx.node())
2082 2103 or pctx.rev() == base.rev()
2083 2104 )
2084 2105
2085 2106 stats = update(
2086 2107 repo,
2087 2108 ctx.node(),
2088 2109 True,
2089 2110 True,
2090 2111 base.node(),
2091 2112 mergeancestor=mergeancestor,
2092 2113 labels=labels,
2093 2114 wc=wctx,
2094 2115 )
2095 2116
2096 2117 if keepconflictparent and stats.unresolvedcount:
2097 2118 pother = ctx.node()
2098 2119 else:
2099 2120 pother = nullid
2100 2121 parents = ctx.parents()
2101 2122 if keepparent and len(parents) == 2 and base in parents:
2102 2123 parents.remove(base)
2103 2124 pother = parents[0].node()
2104 2125 # Never set both parents equal to each other
2105 2126 if pother == pctx.node():
2106 2127 pother = nullid
2107 2128
2108 2129 if wctx.isinmemory():
2109 2130 wctx.setparents(pctx.node(), pother)
2110 2131 # fix up dirstate for copies and renames
2111 2132 copies.graftcopies(wctx, ctx, base)
2112 2133 else:
2113 2134 with repo.dirstate.parentchange():
2114 2135 repo.setparents(pctx.node(), pother)
2115 2136 repo.dirstate.write(repo.currenttransaction())
2116 2137 # fix up dirstate for copies and renames
2117 2138 copies.graftcopies(wctx, ctx, base)
2118 2139 return stats
2119 2140
2120 2141
2121 2142 def purge(
2122 2143 repo,
2123 2144 matcher,
2124 2145 unknown=True,
2125 2146 ignored=False,
2126 2147 removeemptydirs=True,
2127 2148 removefiles=True,
2128 2149 abortonerror=False,
2129 2150 noop=False,
2130 2151 ):
2131 2152 """Purge the working directory of untracked files.
2132 2153
2133 2154 ``matcher`` is a matcher configured to scan the working directory -
2134 2155 potentially a subset.
2135 2156
2136 2157 ``unknown`` controls whether unknown files should be purged.
2137 2158
2138 2159 ``ignored`` controls whether ignored files should be purged.
2139 2160
2140 2161 ``removeemptydirs`` controls whether empty directories should be removed.
2141 2162
2142 2163 ``removefiles`` controls whether files are removed.
2143 2164
2144 2165 ``abortonerror`` causes an exception to be raised if an error occurs
2145 2166 deleting a file or directory.
2146 2167
2147 2168 ``noop`` controls whether to actually remove files. If not defined, actions
2148 2169 will be taken.
2149 2170
2150 2171 Returns an iterable of relative paths in the working directory that were
2151 2172 or would be removed.
2152 2173 """
2153 2174
2154 2175 def remove(removefn, path):
2155 2176 try:
2156 2177 removefn(path)
2157 2178 except OSError:
2158 2179 m = _(b'%s cannot be removed') % path
2159 2180 if abortonerror:
2160 2181 raise error.Abort(m)
2161 2182 else:
2162 2183 repo.ui.warn(_(b'warning: %s\n') % m)
2163 2184
2164 2185 # There's no API to copy a matcher. So mutate the passed matcher and
2165 2186 # restore it when we're done.
2166 2187 oldtraversedir = matcher.traversedir
2167 2188
2168 2189 res = []
2169 2190
2170 2191 try:
2171 2192 if removeemptydirs:
2172 2193 directories = []
2173 2194 matcher.traversedir = directories.append
2174 2195
2175 2196 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2176 2197
2177 2198 if removefiles:
2178 2199 for f in sorted(status.unknown + status.ignored):
2179 2200 if not noop:
2180 2201 repo.ui.note(_(b'removing file %s\n') % f)
2181 2202 remove(repo.wvfs.unlink, f)
2182 2203 res.append(f)
2183 2204
2184 2205 if removeemptydirs:
2185 2206 for f in sorted(directories, reverse=True):
2186 2207 if matcher(f) and not repo.wvfs.listdir(f):
2187 2208 if not noop:
2188 2209 repo.ui.note(_(b'removing directory %s\n') % f)
2189 2210 remove(repo.wvfs.rmdir, f)
2190 2211 res.append(f)
2191 2212
2192 2213 return res
2193 2214
2194 2215 finally:
2195 2216 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now