##// END OF EJS Templates
merge: make 'cd' and 'dc' actions store the same arguments as 'm'...
Siddharth Agarwal -
r26962:fa2daf0e default
parent child Browse files
Show More
@@ -1,1424 +1,1430
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset, error
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 30 m._fileroots = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 45 m._fileroots = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 54 default='relpath', badfn=None):
55 55 if opts is None:
56 56 opts = {}
57 57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 58 return composenormalfilematcher(match, manifest)
59 59 oldmatch = installmatchfn(overridematch)
60 60
61 61 def installmatchfn(f):
62 62 '''monkey patch the scmutil module with a custom match function.
63 63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 64 oldmatch = scmutil.match
65 65 setattr(f, 'oldmatch', oldmatch)
66 66 scmutil.match = f
67 67 return oldmatch
68 68
69 69 def restorematchfn():
70 70 '''restores scmutil.match to what it was before installmatchfn
71 71 was called. no-op if scmutil.match is its original function.
72 72
73 73 Note that n calls to installmatchfn will require n calls to
74 74 restore the original matchfn.'''
75 75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76 76
77 77 def installmatchandpatsfn(f):
78 78 oldmatchandpats = scmutil.matchandpats
79 79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 80 scmutil.matchandpats = f
81 81 return oldmatchandpats
82 82
83 83 def restorematchandpatsfn():
84 84 '''restores scmutil.matchandpats to what it was before
85 85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 86 is its original function.
87 87
88 88 Note that n calls to installmatchandpatsfn will require n calls
89 89 to restore the original matchfn.'''
90 90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 91 scmutil.matchandpats)
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97 97
98 98 lfmatcher = None
99 99 if lfutil.islfilesrepo(repo):
100 100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 101 if lfpats:
102 102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103 103
104 104 lfnames = []
105 105 m = matcher
106 106
107 107 wctx = repo[None]
108 108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 109 exact = m.exact(f)
110 110 lfile = lfutil.standin(f) in wctx
111 111 nfile = f in wctx
112 112 exists = lfile or nfile
113 113
114 114 # addremove in core gets fancy with the name, add doesn't
115 115 if isaddremove:
116 116 name = m.uipath(f)
117 117 else:
118 118 name = m.rel(f)
119 119
120 120 # Don't warn the user when they attempt to add a normal tracked file.
121 121 # The normal add code will do that for us.
122 122 if exact and exists:
123 123 if lfile:
124 124 ui.warn(_('%s already a largefile\n') % name)
125 125 continue
126 126
127 127 if (exact or not exists) and not lfutil.isstandin(f):
128 128 # In case the file was removed previously, but not committed
129 129 # (issue3507)
130 130 if not repo.wvfs.exists(f):
131 131 continue
132 132
133 133 abovemin = (lfsize and
134 134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 136 lfnames.append(f)
137 137 if ui.verbose or not exact:
138 138 ui.status(_('adding %s as a largefile\n') % name)
139 139
140 140 bad = []
141 141
142 142 # Need to lock, otherwise there could be a race condition between
143 143 # when standins are created and added to the repo.
144 144 wlock = repo.wlock()
145 145 try:
146 146 if not opts.get('dry_run'):
147 147 standins = []
148 148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 149 for f in lfnames:
150 150 standinname = lfutil.standin(f)
151 151 lfutil.writestandin(repo, standinname, hash='',
152 152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 153 standins.append(standinname)
154 154 if lfdirstate[f] == 'r':
155 155 lfdirstate.normallookup(f)
156 156 else:
157 157 lfdirstate.add(f)
158 158 lfdirstate.write()
159 159 bad += [lfutil.splitstandin(f)
160 160 for f in repo[None].add(standins)
161 161 if f in m.files()]
162 162
163 163 added = [f for f in lfnames if f not in bad]
164 164 finally:
165 165 wlock.release()
166 166 return added, bad
167 167
168 168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 169 after = opts.get('after')
170 170 m = composelargefilematcher(matcher, repo[None].manifest())
171 171 try:
172 172 repo.lfstatus = True
173 173 s = repo.status(match=m, clean=not isaddremove)
174 174 finally:
175 175 repo.lfstatus = False
176 176 manifest = repo[None].manifest()
177 177 modified, added, deleted, clean = [[f for f in list
178 178 if lfutil.standin(f) in manifest]
179 179 for list in (s.modified, s.added,
180 180 s.deleted, s.clean)]
181 181
182 182 def warn(files, msg):
183 183 for f in files:
184 184 ui.warn(msg % m.rel(f))
185 185 return int(len(files) > 0)
186 186
187 187 result = 0
188 188
189 189 if after:
190 190 remove = deleted
191 191 result = warn(modified + added + clean,
192 192 _('not removing %s: file still exists\n'))
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 196 ' to force removal)\n'))
197 197 result = warn(added, _('not removing %s: file has been marked for add'
198 198 ' (use forget to undo)\n')) or result
199 199
200 200 # Need to lock because standin files are deleted then removed from the
201 201 # repository and we could race in-between.
202 202 wlock = repo.wlock()
203 203 try:
204 204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 205 for f in sorted(remove):
206 206 if ui.verbose or not m.exact(f):
207 207 # addremove in core gets fancy with the name, remove doesn't
208 208 if isaddremove:
209 209 name = m.uipath(f)
210 210 else:
211 211 name = m.rel(f)
212 212 ui.status(_('removing %s\n') % name)
213 213
214 214 if not opts.get('dry_run'):
215 215 if not after:
216 216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217 217
218 218 if opts.get('dry_run'):
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 231 False)
232 232
233 233 lfdirstate.write()
234 234 finally:
235 235 wlock.release()
236 236
237 237 return result
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 def decodepath(orig, path):
242 242 return lfutil.splitstandin(path) or path
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246 def overrideadd(orig, ui, repo, *pats, **opts):
247 247 if opts.get('normal') and opts.get('large'):
248 248 raise error.Abort(_('--normal cannot be used with --large'))
249 249 return orig(ui, repo, *pats, **opts)
250 250
251 251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 252 # The --normal flag short circuits this override
253 253 if opts.get('normal'):
254 254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255 255
256 256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 258 ladded)
259 259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260 260
261 261 bad.extend(f for f in lbad)
262 262 return bad
263 263
264 264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 267 return removelargefiles(ui, repo, False, matcher, after=after,
268 268 force=force) or result
269 269
270 270 def overridestatusfn(orig, repo, rev2, **opts):
271 271 try:
272 272 repo._repo.lfstatus = True
273 273 return orig(repo, rev2, **opts)
274 274 finally:
275 275 repo._repo.lfstatus = False
276 276
277 277 def overridestatus(orig, ui, repo, *pats, **opts):
278 278 try:
279 279 repo.lfstatus = True
280 280 return orig(ui, repo, *pats, **opts)
281 281 finally:
282 282 repo.lfstatus = False
283 283
284 284 def overridedirty(orig, repo, ignoreupdate=False):
285 285 try:
286 286 repo._repo.lfstatus = True
287 287 return orig(repo, ignoreupdate)
288 288 finally:
289 289 repo._repo.lfstatus = False
290 290
291 291 def overridelog(orig, ui, repo, *pats, **opts):
292 292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 293 default='relpath', badfn=None):
294 294 """Matcher that merges root directory with .hglf, suitable for log.
295 295 It is still possible to match .hglf directly.
296 296 For any listed files run log on the standin too.
297 297 matchfn tries both the given filename and with .hglf stripped.
298 298 """
299 299 if opts is None:
300 300 opts = {}
301 301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 302 badfn=badfn)
303 303 m, p = copy.copy(matchandpats)
304 304
305 305 if m.always():
306 306 # We want to match everything anyway, so there's no benefit trying
307 307 # to add standins.
308 308 return matchandpats
309 309
310 310 pats = set(p)
311 311
312 312 def fixpats(pat, tostandin=lfutil.standin):
313 313 if pat.startswith('set:'):
314 314 return pat
315 315
316 316 kindpat = match_._patsplit(pat, None)
317 317
318 318 if kindpat[0] is not None:
319 319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 320 return tostandin(kindpat[1])
321 321
322 322 if m._cwd:
323 323 hglf = lfutil.shortname
324 324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325 325
326 326 def tostandin(f):
327 327 # The file may already be a standin, so truncate the back
328 328 # prefix and test before mangling it. This avoids turning
329 329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 331 return f
332 332
333 333 # An absolute path is from outside the repo, so truncate the
334 334 # path to the root before building the standin. Otherwise cwd
335 335 # is somewhere in the repo, relative to root, and needs to be
336 336 # prepended before building the standin.
337 337 if os.path.isabs(m._cwd):
338 338 f = f[len(back):]
339 339 else:
340 340 f = m._cwd + '/' + f
341 341 return back + lfutil.standin(f)
342 342
343 343 pats.update(fixpats(f, tostandin) for f in p)
344 344 else:
345 345 def tostandin(f):
346 346 if lfutil.splitstandin(f):
347 347 return f
348 348 return lfutil.standin(f)
349 349 pats.update(fixpats(f, tostandin) for f in p)
350 350
351 351 for i in range(0, len(m._files)):
352 352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 353 if m._files[i] == '.':
354 354 continue
355 355 standin = lfutil.standin(m._files[i])
356 356 # If the "standin" is a directory, append instead of replace to
357 357 # support naming a directory on the command line with only
358 358 # largefiles. The original directory is kept to support normal
359 359 # files.
360 360 if standin in repo[ctx.node()]:
361 361 m._files[i] = standin
362 362 elif m._files[i] not in repo[ctx.node()] \
363 363 and repo.wvfs.isdir(standin):
364 364 m._files.append(standin)
365 365
366 366 m._fileroots = set(m._files)
367 367 m._always = False
368 368 origmatchfn = m.matchfn
369 369 def lfmatchfn(f):
370 370 lf = lfutil.splitstandin(f)
371 371 if lf is not None and origmatchfn(lf):
372 372 return True
373 373 r = origmatchfn(f)
374 374 return r
375 375 m.matchfn = lfmatchfn
376 376
377 377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 378 return m, pats
379 379
380 380 # For hg log --patch, the match object is used in two different senses:
381 381 # (1) to determine what revisions should be printed out, and
382 382 # (2) to determine what files to print out diffs for.
383 383 # The magic matchandpats override should be used for case (1) but not for
384 384 # case (2).
385 385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 386 wctx = repo[None]
387 387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 388 return lambda rev: match
389 389
390 390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393 393
394 394 try:
395 395 return orig(ui, repo, *pats, **opts)
396 396 finally:
397 397 restorematchandpatsfn()
398 398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399 399
400 400 def overrideverify(orig, ui, repo, *pats, **opts):
401 401 large = opts.pop('large', False)
402 402 all = opts.pop('lfa', False)
403 403 contents = opts.pop('lfc', False)
404 404
405 405 result = orig(ui, repo, *pats, **opts)
406 406 if large or all or contents:
407 407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 408 return result
409 409
410 410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 411 large = opts.pop('large', False)
412 412 if large:
413 413 class fakerepo(object):
414 414 dirstate = lfutil.openlfdirstate(ui, repo)
415 415 orig(ui, fakerepo, *pats, **opts)
416 416 else:
417 417 orig(ui, repo, *pats, **opts)
418 418
419 419 # Before starting the manifest merge, merge.updates will call
420 420 # _checkunknownfile to check if there are any files in the merged-in
421 421 # changeset that collide with unknown files in the working copy.
422 422 #
423 423 # The largefiles are seen as unknown, so this prevents us from merging
424 424 # in a file 'foo' if we already have a largefile with the same name.
425 425 #
426 426 # The overridden function filters the unknown files by removing any
427 427 # largefiles. This makes the merge proceed and we can then handle this
428 428 # case further in the overridden calculateupdates function below.
429 429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 431 return False
432 432 return origfn(repo, wctx, mctx, f, f2)
433 433
434 434 # The manifest merge handles conflicts on the manifest level. We want
435 435 # to handle changes in largefile-ness of files at this level too.
436 436 #
437 437 # The strategy is to run the original calculateupdates and then process
438 438 # the action list it outputs. There are two cases we need to deal with:
439 439 #
440 440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 441 # detected via its standin file, which will enter the working copy
442 442 # with a "get" action. It is not "merge" since the standin is all
443 443 # Mercurial is concerned with at this level -- the link to the
444 444 # existing normal file is not relevant here.
445 445 #
446 446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 447 # since the largefile will be present in the working copy and
448 448 # different from the normal file in p2. Mercurial therefore
449 449 # triggers a merge action.
450 450 #
451 451 # In both cases, we prompt the user and emit new actions to either
452 452 # remove the standin (if the normal file was kept) or to remove the
453 453 # normal file and get the standin (if the largefile was kept). The
454 454 # default prompt answer is to use the largefile version since it was
455 455 # presumably changed on purpose.
456 456 #
457 457 # Finally, the merge.applyupdates function will then take care of
458 458 # writing the files into the working copy and lfcommands.updatelfiles
459 459 # will update the largefiles.
460 460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 461 partial, acceptremote, followcopies):
462 462 overwrite = force and not branchmerge
463 463 actions, diverge, renamedelete = origfn(
464 464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
465 465 followcopies)
466 466
467 467 if overwrite:
468 468 return actions, diverge, renamedelete
469 469
470 470 # Convert to dictionary with filename as key and action as value.
471 471 lfiles = set()
472 472 for f in actions:
473 473 splitstandin = f and lfutil.splitstandin(f)
474 474 if splitstandin in p1:
475 475 lfiles.add(splitstandin)
476 476 elif lfutil.standin(f) in p1:
477 477 lfiles.add(f)
478 478
479 479 for lfile in lfiles:
480 480 standin = lfutil.standin(lfile)
481 481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 483 if sm in ('g', 'dc') and lm != 'r':
484 if sm == 'dc':
485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
484 487 # Case 1: normal file in the working copy, largefile in
485 488 # the second parent
486 489 usermsg = _('remote turned local normal file %s into a largefile\n'
487 490 'use (l)argefile or keep (n)ormal file?'
488 491 '$$ &Largefile $$ &Normal file') % lfile
489 492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
490 493 actions[lfile] = ('r', None, 'replaced by standin')
491 494 actions[standin] = ('g', sargs, 'replaces standin')
492 495 else: # keep local normal file
493 496 actions[lfile] = ('k', None, 'replaces standin')
494 497 if branchmerge:
495 498 actions[standin] = ('k', None, 'replaced by non-standin')
496 499 else:
497 500 actions[standin] = ('r', None, 'replaced by non-standin')
498 501 elif lm in ('g', 'dc') and sm != 'r':
502 if lm == 'dc':
503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
499 505 # Case 2: largefile in the working copy, normal file in
500 506 # the second parent
501 507 usermsg = _('remote turned local largefile %s into a normal file\n'
502 508 'keep (l)argefile or use (n)ormal file?'
503 509 '$$ &Largefile $$ &Normal file') % lfile
504 510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
505 511 if branchmerge:
506 512 # largefile can be restored from standin safely
507 513 actions[lfile] = ('k', None, 'replaced by standin')
508 514 actions[standin] = ('k', None, 'replaces standin')
509 515 else:
510 516 # "lfile" should be marked as "removed" without
511 517 # removal of itself
512 518 actions[lfile] = ('lfmr', None,
513 519 'forget non-standin largefile')
514 520
515 521 # linear-merge should treat this largefile as 're-added'
516 522 actions[standin] = ('a', None, 'keep standin')
517 523 else: # pick remote normal file
518 524 actions[lfile] = ('g', largs, 'replaces standin')
519 525 actions[standin] = ('r', None, 'replaced by non-standin')
520 526
521 527 return actions, diverge, renamedelete
522 528
523 529 def mergerecordupdates(orig, repo, actions, branchmerge):
524 530 if 'lfmr' in actions:
525 531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
526 532 for lfile, args, msg in actions['lfmr']:
527 533 # this should be executed before 'orig', to execute 'remove'
528 534 # before all other actions
529 535 repo.dirstate.remove(lfile)
530 536 # make sure lfile doesn't get synclfdirstate'd as normal
531 537 lfdirstate.add(lfile)
532 538 lfdirstate.write()
533 539
534 540 return orig(repo, actions, branchmerge)
535 541
536 542
537 543 # Override filemerge to prompt the user about how they wish to merge
538 544 # largefiles. This will handle identical edits without prompting the user.
539 545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
540 546 labels=None):
541 547 if not lfutil.isstandin(orig):
542 548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
543 549 labels=labels)
544 550
545 551 ahash = fca.data().strip().lower()
546 552 dhash = fcd.data().strip().lower()
547 553 ohash = fco.data().strip().lower()
548 554 if (ohash != ahash and
549 555 ohash != dhash and
550 556 (dhash == ahash or
551 557 repo.ui.promptchoice(
552 558 _('largefile %s has a merge conflict\nancestor was %s\n'
553 559 'keep (l)ocal %s or\ntake (o)ther %s?'
554 560 '$$ &Local $$ &Other') %
555 561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
556 562 0) == 1)):
557 563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
558 564 return True, 0
559 565
560 566 def copiespathcopies(orig, ctx1, ctx2, match=None):
561 567 copies = orig(ctx1, ctx2, match=match)
562 568 updated = {}
563 569
564 570 for k, v in copies.iteritems():
565 571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
566 572
567 573 return updated
568 574
569 575 # Copy first changes the matchers to match standins instead of
570 576 # largefiles. Then it overrides util.copyfile in that function it
571 577 # checks if the destination largefile already exists. It also keeps a
572 578 # list of copied files so that the largefiles can be copied and the
573 579 # dirstate updated.
574 580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
575 581 # doesn't remove largefile on rename
576 582 if len(pats) < 2:
577 583 # this isn't legal, let the original function deal with it
578 584 return orig(ui, repo, pats, opts, rename)
579 585
580 586 # This could copy both lfiles and normal files in one command,
581 587 # but we don't want to do that. First replace their matcher to
582 588 # only match normal files and run it, then replace it to just
583 589 # match largefiles and run it again.
584 590 nonormalfiles = False
585 591 nolfiles = False
586 592 installnormalfilesmatchfn(repo[None].manifest())
587 593 try:
588 594 result = orig(ui, repo, pats, opts, rename)
589 595 except error.Abort as e:
590 596 if str(e) != _('no files to copy'):
591 597 raise e
592 598 else:
593 599 nonormalfiles = True
594 600 result = 0
595 601 finally:
596 602 restorematchfn()
597 603
598 604 # The first rename can cause our current working directory to be removed.
599 605 # In that case there is nothing left to copy/rename so just quit.
600 606 try:
601 607 repo.getcwd()
602 608 except OSError:
603 609 return result
604 610
605 611 def makestandin(relpath):
606 612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
607 613 return os.path.join(repo.wjoin(lfutil.standin(path)))
608 614
609 615 fullpats = scmutil.expandpats(pats)
610 616 dest = fullpats[-1]
611 617
612 618 if os.path.isdir(dest):
613 619 if not os.path.isdir(makestandin(dest)):
614 620 os.makedirs(makestandin(dest))
615 621
616 622 try:
617 623 # When we call orig below it creates the standins but we don't add
618 624 # them to the dir state until later so lock during that time.
619 625 wlock = repo.wlock()
620 626
621 627 manifest = repo[None].manifest()
622 628 def overridematch(ctx, pats=(), opts=None, globbed=False,
623 629 default='relpath', badfn=None):
624 630 if opts is None:
625 631 opts = {}
626 632 newpats = []
627 633 # The patterns were previously mangled to add the standin
628 634 # directory; we need to remove that now
629 635 for pat in pats:
630 636 if match_.patkind(pat) is None and lfutil.shortname in pat:
631 637 newpats.append(pat.replace(lfutil.shortname, ''))
632 638 else:
633 639 newpats.append(pat)
634 640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
635 641 m = copy.copy(match)
636 642 lfile = lambda f: lfutil.standin(f) in manifest
637 643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
638 644 m._fileroots = set(m._files)
639 645 origmatchfn = m.matchfn
640 646 m.matchfn = lambda f: (lfutil.isstandin(f) and
641 647 (f in manifest) and
642 648 origmatchfn(lfutil.splitstandin(f)) or
643 649 None)
644 650 return m
645 651 oldmatch = installmatchfn(overridematch)
646 652 listpats = []
647 653 for pat in pats:
648 654 if match_.patkind(pat) is not None:
649 655 listpats.append(pat)
650 656 else:
651 657 listpats.append(makestandin(pat))
652 658
653 659 try:
654 660 origcopyfile = util.copyfile
655 661 copiedfiles = []
656 662 def overridecopyfile(src, dest):
657 663 if (lfutil.shortname in src and
658 664 dest.startswith(repo.wjoin(lfutil.shortname))):
659 665 destlfile = dest.replace(lfutil.shortname, '')
660 666 if not opts['force'] and os.path.exists(destlfile):
661 667 raise IOError('',
662 668 _('destination largefile already exists'))
663 669 copiedfiles.append((src, dest))
664 670 origcopyfile(src, dest)
665 671
666 672 util.copyfile = overridecopyfile
667 673 result += orig(ui, repo, listpats, opts, rename)
668 674 finally:
669 675 util.copyfile = origcopyfile
670 676
671 677 lfdirstate = lfutil.openlfdirstate(ui, repo)
672 678 for (src, dest) in copiedfiles:
673 679 if (lfutil.shortname in src and
674 680 dest.startswith(repo.wjoin(lfutil.shortname))):
675 681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
676 682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
677 683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
678 684 if not os.path.isdir(destlfiledir):
679 685 os.makedirs(destlfiledir)
680 686 if rename:
681 687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
682 688
683 689 # The file is gone, but this deletes any empty parent
684 690 # directories as a side-effect.
685 691 util.unlinkpath(repo.wjoin(srclfile), True)
686 692 lfdirstate.remove(srclfile)
687 693 else:
688 694 util.copyfile(repo.wjoin(srclfile),
689 695 repo.wjoin(destlfile))
690 696
691 697 lfdirstate.add(destlfile)
692 698 lfdirstate.write()
693 699 except error.Abort as e:
694 700 if str(e) != _('no files to copy'):
695 701 raise e
696 702 else:
697 703 nolfiles = True
698 704 finally:
699 705 restorematchfn()
700 706 wlock.release()
701 707
702 708 if nolfiles and nonormalfiles:
703 709 raise error.Abort(_('no files to copy'))
704 710
705 711 return result
706 712
707 713 # When the user calls revert, we have to be careful to not revert any
708 714 # changes to other largefiles accidentally. This means we have to keep
709 715 # track of the largefiles that are being reverted so we only pull down
710 716 # the necessary largefiles.
711 717 #
712 718 # Standins are only updated (to match the hash of largefiles) before
713 719 # commits. Update the standins then run the original revert, changing
714 720 # the matcher to hit standins instead of largefiles. Based on the
715 721 # resulting standins update the largefiles.
716 722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
717 723 # Because we put the standins in a bad state (by updating them)
718 724 # and then return them to a correct state we need to lock to
719 725 # prevent others from changing them in their incorrect state.
720 726 wlock = repo.wlock()
721 727 try:
722 728 lfdirstate = lfutil.openlfdirstate(ui, repo)
723 729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
724 730 lfdirstate.write()
725 731 for lfile in s.modified:
726 732 lfutil.updatestandin(repo, lfutil.standin(lfile))
727 733 for lfile in s.deleted:
728 734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
729 735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
730 736
731 737 oldstandins = lfutil.getstandinsstate(repo)
732 738
733 739 def overridematch(mctx, pats=(), opts=None, globbed=False,
734 740 default='relpath', badfn=None):
735 741 if opts is None:
736 742 opts = {}
737 743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
738 744 m = copy.copy(match)
739 745
740 746 # revert supports recursing into subrepos, and though largefiles
741 747 # currently doesn't work correctly in that case, this match is
742 748 # called, so the lfdirstate above may not be the correct one for
743 749 # this invocation of match.
744 750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
745 751 False)
746 752
747 753 def tostandin(f):
748 754 standin = lfutil.standin(f)
749 755 if standin in ctx or standin in mctx:
750 756 return standin
751 757 elif standin in repo[None] or lfdirstate[f] == 'r':
752 758 return None
753 759 return f
754 760 m._files = [tostandin(f) for f in m._files]
755 761 m._files = [f for f in m._files if f is not None]
756 762 m._fileroots = set(m._files)
757 763 origmatchfn = m.matchfn
758 764 def matchfn(f):
759 765 if lfutil.isstandin(f):
760 766 return (origmatchfn(lfutil.splitstandin(f)) and
761 767 (f in ctx or f in mctx))
762 768 return origmatchfn(f)
763 769 m.matchfn = matchfn
764 770 return m
765 771 oldmatch = installmatchfn(overridematch)
766 772 try:
767 773 orig(ui, repo, ctx, parents, *pats, **opts)
768 774 finally:
769 775 restorematchfn()
770 776
771 777 newstandins = lfutil.getstandinsstate(repo)
772 778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
773 779 # lfdirstate should be 'normallookup'-ed for updated files,
774 780 # because reverting doesn't touch dirstate for 'normal' files
775 781 # when target revision is explicitly specified: in such case,
776 782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
777 783 # of target (standin) file.
778 784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
779 785 normallookup=True)
780 786
781 787 finally:
782 788 wlock.release()
783 789
784 790 # after pulling changesets, we need to take some extra care to get
785 791 # largefiles updated remotely
786 792 def overridepull(orig, ui, repo, source=None, **opts):
787 793 revsprepull = len(repo)
788 794 if not source:
789 795 source = 'default'
790 796 repo.lfpullsource = source
791 797 result = orig(ui, repo, source, **opts)
792 798 revspostpull = len(repo)
793 799 lfrevs = opts.get('lfrev', [])
794 800 if opts.get('all_largefiles'):
795 801 lfrevs.append('pulled()')
796 802 if lfrevs and revspostpull > revsprepull:
797 803 numcached = 0
798 804 repo.firstpulled = revsprepull # for pulled() revset expression
799 805 try:
800 806 for rev in scmutil.revrange(repo, lfrevs):
801 807 ui.note(_('pulling largefiles for revision %s\n') % rev)
802 808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
803 809 numcached += len(cached)
804 810 finally:
805 811 del repo.firstpulled
806 812 ui.status(_("%d largefiles cached\n") % numcached)
807 813 return result
808 814
809 815 def pulledrevsetsymbol(repo, subset, x):
810 816 """``pulled()``
811 817 Changesets that just has been pulled.
812 818
813 819 Only available with largefiles from pull --lfrev expressions.
814 820
815 821 .. container:: verbose
816 822
817 823 Some examples:
818 824
819 825 - pull largefiles for all new changesets::
820 826
821 827 hg pull -lfrev "pulled()"
822 828
823 829 - pull largefiles for all new branch heads::
824 830
825 831 hg pull -lfrev "head(pulled()) and not closed()"
826 832
827 833 """
828 834
829 835 try:
830 836 firstpulled = repo.firstpulled
831 837 except AttributeError:
832 838 raise error.Abort(_("pulled() only available in --lfrev"))
833 839 return revset.baseset([r for r in subset if r >= firstpulled])
834 840
835 841 def overrideclone(orig, ui, source, dest=None, **opts):
836 842 d = dest
837 843 if d is None:
838 844 d = hg.defaultdest(source)
839 845 if opts.get('all_largefiles') and not hg.islocal(d):
840 846 raise error.Abort(_(
841 847 '--all-largefiles is incompatible with non-local destination %s') %
842 848 d)
843 849
844 850 return orig(ui, source, dest, **opts)
845 851
846 852 def hgclone(orig, ui, opts, *args, **kwargs):
847 853 result = orig(ui, opts, *args, **kwargs)
848 854
849 855 if result is not None:
850 856 sourcerepo, destrepo = result
851 857 repo = destrepo.local()
852 858
853 859 # When cloning to a remote repo (like through SSH), no repo is available
854 860 # from the peer. Therefore the largefiles can't be downloaded and the
855 861 # hgrc can't be updated.
856 862 if not repo:
857 863 return result
858 864
859 865 # If largefiles is required for this repo, permanently enable it locally
860 866 if 'largefiles' in repo.requirements:
861 867 fp = repo.vfs('hgrc', 'a', text=True)
862 868 try:
863 869 fp.write('\n[extensions]\nlargefiles=\n')
864 870 finally:
865 871 fp.close()
866 872
867 873 # Caching is implicitly limited to 'rev' option, since the dest repo was
868 874 # truncated at that point. The user may expect a download count with
869 875 # this option, so attempt whether or not this is a largefile repo.
870 876 if opts.get('all_largefiles'):
871 877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
872 878
873 879 if missing != 0:
874 880 return None
875 881
876 882 return result
877 883
878 884 def overriderebase(orig, ui, repo, **opts):
879 885 if not util.safehasattr(repo, '_largefilesenabled'):
880 886 return orig(ui, repo, **opts)
881 887
882 888 resuming = opts.get('continue')
883 889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
884 890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
885 891 try:
886 892 return orig(ui, repo, **opts)
887 893 finally:
888 894 repo._lfstatuswriters.pop()
889 895 repo._lfcommithooks.pop()
890 896
891 897 def overridearchivecmd(orig, ui, repo, dest, **opts):
892 898 repo.unfiltered().lfstatus = True
893 899
894 900 try:
895 901 return orig(ui, repo.unfiltered(), dest, **opts)
896 902 finally:
897 903 repo.unfiltered().lfstatus = False
898 904
899 905 def hgwebarchive(orig, web, req, tmpl):
900 906 web.repo.lfstatus = True
901 907
902 908 try:
903 909 return orig(web, req, tmpl)
904 910 finally:
905 911 web.repo.lfstatus = False
906 912
907 913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
908 914 prefix='', mtime=None, subrepos=None):
909 915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
910 916 # unfiltered repo's attr, so check that as well.
911 917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
912 918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
913 919 subrepos)
914 920
915 921 # No need to lock because we are only reading history and
916 922 # largefile caches, neither of which are modified.
917 923 if node is not None:
918 924 lfcommands.cachelfiles(repo.ui, repo, node)
919 925
920 926 if kind not in archival.archivers:
921 927 raise error.Abort(_("unknown archive type '%s'") % kind)
922 928
923 929 ctx = repo[node]
924 930
925 931 if kind == 'files':
926 932 if prefix:
927 933 raise error.Abort(
928 934 _('cannot give prefix when archiving to files'))
929 935 else:
930 936 prefix = archival.tidyprefix(dest, kind, prefix)
931 937
932 938 def write(name, mode, islink, getdata):
933 939 if matchfn and not matchfn(name):
934 940 return
935 941 data = getdata()
936 942 if decode:
937 943 data = repo.wwritedata(name, data)
938 944 archiver.addfile(prefix + name, mode, islink, data)
939 945
940 946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
941 947
942 948 if repo.ui.configbool("ui", "archivemeta", True):
943 949 write('.hg_archival.txt', 0o644, False,
944 950 lambda: archival.buildmetadata(ctx))
945 951
946 952 for f in ctx:
947 953 ff = ctx.flags(f)
948 954 getdata = ctx[f].data
949 955 if lfutil.isstandin(f):
950 956 if node is not None:
951 957 path = lfutil.findfile(repo, getdata().strip())
952 958
953 959 if path is None:
954 960 raise error.Abort(
955 961 _('largefile %s not found in repo store or system cache')
956 962 % lfutil.splitstandin(f))
957 963 else:
958 964 path = lfutil.splitstandin(f)
959 965
960 966 f = lfutil.splitstandin(f)
961 967
962 968 def getdatafn():
963 969 fd = None
964 970 try:
965 971 fd = open(path, 'rb')
966 972 return fd.read()
967 973 finally:
968 974 if fd:
969 975 fd.close()
970 976
971 977 getdata = getdatafn
972 978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
973 979
974 980 if subrepos:
975 981 for subpath in sorted(ctx.substate):
976 982 sub = ctx.workingsub(subpath)
977 983 submatch = match_.narrowmatcher(subpath, matchfn)
978 984 sub._repo.lfstatus = True
979 985 sub.archive(archiver, prefix, submatch)
980 986
981 987 archiver.done()
982 988
983 989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
984 990 if not repo._repo.lfstatus:
985 991 return orig(repo, archiver, prefix, match)
986 992
987 993 repo._get(repo._state + ('hg',))
988 994 rev = repo._state[1]
989 995 ctx = repo._repo[rev]
990 996
991 997 if ctx.node() is not None:
992 998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
993 999
994 1000 def write(name, mode, islink, getdata):
995 1001 # At this point, the standin has been replaced with the largefile name,
996 1002 # so the normal matcher works here without the lfutil variants.
997 1003 if match and not match(f):
998 1004 return
999 1005 data = getdata()
1000 1006
1001 1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1002 1008
1003 1009 for f in ctx:
1004 1010 ff = ctx.flags(f)
1005 1011 getdata = ctx[f].data
1006 1012 if lfutil.isstandin(f):
1007 1013 if ctx.node() is not None:
1008 1014 path = lfutil.findfile(repo._repo, getdata().strip())
1009 1015
1010 1016 if path is None:
1011 1017 raise error.Abort(
1012 1018 _('largefile %s not found in repo store or system cache')
1013 1019 % lfutil.splitstandin(f))
1014 1020 else:
1015 1021 path = lfutil.splitstandin(f)
1016 1022
1017 1023 f = lfutil.splitstandin(f)
1018 1024
1019 1025 def getdatafn():
1020 1026 fd = None
1021 1027 try:
1022 1028 fd = open(os.path.join(prefix, path), 'rb')
1023 1029 return fd.read()
1024 1030 finally:
1025 1031 if fd:
1026 1032 fd.close()
1027 1033
1028 1034 getdata = getdatafn
1029 1035
1030 1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1031 1037
1032 1038 for subpath in sorted(ctx.substate):
1033 1039 sub = ctx.workingsub(subpath)
1034 1040 submatch = match_.narrowmatcher(subpath, match)
1035 1041 sub._repo.lfstatus = True
1036 1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1037 1043
1038 1044 # If a largefile is modified, the change is not reflected in its
1039 1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1040 1046 # if the repo has uncommitted changes. Wrap it to also check if
1041 1047 # largefiles were changed. This is used by bisect, backout and fetch.
1042 1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1043 1049 orig(repo, *args, **kwargs)
1044 1050 repo.lfstatus = True
1045 1051 s = repo.status()
1046 1052 repo.lfstatus = False
1047 1053 if s.modified or s.added or s.removed or s.deleted:
1048 1054 raise error.Abort(_('uncommitted changes'))
1049 1055
1050 1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1051 1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1052 1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1053 1059 m = composelargefilematcher(match, repo[None].manifest())
1054 1060
1055 1061 try:
1056 1062 repo.lfstatus = True
1057 1063 s = repo.status(match=m, clean=True)
1058 1064 finally:
1059 1065 repo.lfstatus = False
1060 1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1061 1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1062 1068
1063 1069 for f in forget:
1064 1070 if lfutil.standin(f) not in repo.dirstate and not \
1065 1071 repo.wvfs.isdir(lfutil.standin(f)):
1066 1072 ui.warn(_('not removing %s: file is already untracked\n')
1067 1073 % m.rel(f))
1068 1074 bad.append(f)
1069 1075
1070 1076 for f in forget:
1071 1077 if ui.verbose or not m.exact(f):
1072 1078 ui.status(_('removing %s\n') % m.rel(f))
1073 1079
1074 1080 # Need to lock because standin files are deleted then removed from the
1075 1081 # repository and we could race in-between.
1076 1082 wlock = repo.wlock()
1077 1083 try:
1078 1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1079 1085 for f in forget:
1080 1086 if lfdirstate[f] == 'a':
1081 1087 lfdirstate.drop(f)
1082 1088 else:
1083 1089 lfdirstate.remove(f)
1084 1090 lfdirstate.write()
1085 1091 standins = [lfutil.standin(f) for f in forget]
1086 1092 for f in standins:
1087 1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1088 1094 rejected = repo[None].forget(standins)
1089 1095 finally:
1090 1096 wlock.release()
1091 1097
1092 1098 bad.extend(f for f in rejected if f in m.files())
1093 1099 forgot.extend(f for f in forget if f not in rejected)
1094 1100 return bad, forgot
1095 1101
1096 1102 def _getoutgoings(repo, other, missing, addfunc):
1097 1103 """get pairs of filename and largefile hash in outgoing revisions
1098 1104 in 'missing'.
1099 1105
1100 1106 largefiles already existing on 'other' repository are ignored.
1101 1107
1102 1108 'addfunc' is invoked with each unique pairs of filename and
1103 1109 largefile hash value.
1104 1110 """
1105 1111 knowns = set()
1106 1112 lfhashes = set()
1107 1113 def dedup(fn, lfhash):
1108 1114 k = (fn, lfhash)
1109 1115 if k not in knowns:
1110 1116 knowns.add(k)
1111 1117 lfhashes.add(lfhash)
1112 1118 lfutil.getlfilestoupload(repo, missing, dedup)
1113 1119 if lfhashes:
1114 1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1115 1121 for fn, lfhash in knowns:
1116 1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1117 1123 addfunc(fn, lfhash)
1118 1124
1119 1125 def outgoinghook(ui, repo, other, opts, missing):
1120 1126 if opts.pop('large', None):
1121 1127 lfhashes = set()
1122 1128 if ui.debugflag:
1123 1129 toupload = {}
1124 1130 def addfunc(fn, lfhash):
1125 1131 if fn not in toupload:
1126 1132 toupload[fn] = []
1127 1133 toupload[fn].append(lfhash)
1128 1134 lfhashes.add(lfhash)
1129 1135 def showhashes(fn):
1130 1136 for lfhash in sorted(toupload[fn]):
1131 1137 ui.debug(' %s\n' % (lfhash))
1132 1138 else:
1133 1139 toupload = set()
1134 1140 def addfunc(fn, lfhash):
1135 1141 toupload.add(fn)
1136 1142 lfhashes.add(lfhash)
1137 1143 def showhashes(fn):
1138 1144 pass
1139 1145 _getoutgoings(repo, other, missing, addfunc)
1140 1146
1141 1147 if not toupload:
1142 1148 ui.status(_('largefiles: no files to upload\n'))
1143 1149 else:
1144 1150 ui.status(_('largefiles to upload (%d entities):\n')
1145 1151 % (len(lfhashes)))
1146 1152 for file in sorted(toupload):
1147 1153 ui.status(lfutil.splitstandin(file) + '\n')
1148 1154 showhashes(file)
1149 1155 ui.status('\n')
1150 1156
1151 1157 def summaryremotehook(ui, repo, opts, changes):
1152 1158 largeopt = opts.get('large', False)
1153 1159 if changes is None:
1154 1160 if largeopt:
1155 1161 return (False, True) # only outgoing check is needed
1156 1162 else:
1157 1163 return (False, False)
1158 1164 elif largeopt:
1159 1165 url, branch, peer, outgoing = changes[1]
1160 1166 if peer is None:
1161 1167 # i18n: column positioning for "hg summary"
1162 1168 ui.status(_('largefiles: (no remote repo)\n'))
1163 1169 return
1164 1170
1165 1171 toupload = set()
1166 1172 lfhashes = set()
1167 1173 def addfunc(fn, lfhash):
1168 1174 toupload.add(fn)
1169 1175 lfhashes.add(lfhash)
1170 1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1171 1177
1172 1178 if not toupload:
1173 1179 # i18n: column positioning for "hg summary"
1174 1180 ui.status(_('largefiles: (no files to upload)\n'))
1175 1181 else:
1176 1182 # i18n: column positioning for "hg summary"
1177 1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1178 1184 % (len(lfhashes), len(toupload)))
1179 1185
1180 1186 def overridesummary(orig, ui, repo, *pats, **opts):
1181 1187 try:
1182 1188 repo.lfstatus = True
1183 1189 orig(ui, repo, *pats, **opts)
1184 1190 finally:
1185 1191 repo.lfstatus = False
1186 1192
1187 1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1188 1194 similarity=None):
1189 1195 if opts is None:
1190 1196 opts = {}
1191 1197 if not lfutil.islfilesrepo(repo):
1192 1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1193 1199 # Get the list of missing largefiles so we can remove them
1194 1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1195 1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1196 1202 False, False, False)
1197 1203
1198 1204 # Call into the normal remove code, but the removing of the standin, we want
1199 1205 # to have handled by original addremove. Monkey patching here makes sure
1200 1206 # we don't remove the standin in the largefiles code, preventing a very
1201 1207 # confused state later.
1202 1208 if s.deleted:
1203 1209 m = copy.copy(matcher)
1204 1210
1205 1211 # The m._files and m._map attributes are not changed to the deleted list
1206 1212 # because that affects the m.exact() test, which in turn governs whether
1207 1213 # or not the file name is printed, and how. Simply limit the original
1208 1214 # matches to those in the deleted status list.
1209 1215 matchfn = m.matchfn
1210 1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1211 1217
1212 1218 removelargefiles(repo.ui, repo, True, m, **opts)
1213 1219 # Call into the normal add code, and any files that *should* be added as
1214 1220 # largefiles will be
1215 1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1216 1222 # Now that we've handled largefiles, hand off to the original addremove
1217 1223 # function to take care of the rest. Make sure it doesn't do anything with
1218 1224 # largefiles by passing a matcher that will ignore them.
1219 1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1220 1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1221 1227
1222 1228 # Calling purge with --all will cause the largefiles to be deleted.
1223 1229 # Override repo.status to prevent this from happening.
1224 1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1225 1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1226 1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1227 1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1228 1234 # monkey patched method exists on the repoview class the lookup will not
1229 1235 # fail. As a result, the original version will shadow the monkey patched
1230 1236 # one, defeating the monkey patch.
1231 1237 #
1232 1238 # As a work around we use an unfiltered repo here. We should do something
1233 1239 # cleaner instead.
1234 1240 repo = repo.unfiltered()
1235 1241 oldstatus = repo.status
1236 1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1237 1243 clean=False, unknown=False, listsubrepos=False):
1238 1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1239 1245 listsubrepos)
1240 1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1241 1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1242 1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1243 1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1244 1250 unknown, ignored, r.clean)
1245 1251 repo.status = overridestatus
1246 1252 orig(ui, repo, *dirs, **opts)
1247 1253 repo.status = oldstatus
1248 1254 def overriderollback(orig, ui, repo, **opts):
1249 1255 wlock = repo.wlock()
1250 1256 try:
1251 1257 before = repo.dirstate.parents()
1252 1258 orphans = set(f for f in repo.dirstate
1253 1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1254 1260 result = orig(ui, repo, **opts)
1255 1261 after = repo.dirstate.parents()
1256 1262 if before == after:
1257 1263 return result # no need to restore standins
1258 1264
1259 1265 pctx = repo['.']
1260 1266 for f in repo.dirstate:
1261 1267 if lfutil.isstandin(f):
1262 1268 orphans.discard(f)
1263 1269 if repo.dirstate[f] == 'r':
1264 1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1265 1271 elif f in pctx:
1266 1272 fctx = pctx[f]
1267 1273 repo.wwrite(f, fctx.data(), fctx.flags())
1268 1274 else:
1269 1275 # content of standin is not so important in 'a',
1270 1276 # 'm' or 'n' (coming from the 2nd parent) cases
1271 1277 lfutil.writestandin(repo, f, '', False)
1272 1278 for standin in orphans:
1273 1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1274 1280
1275 1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1276 1282 orphans = set(lfdirstate)
1277 1283 lfiles = lfutil.listlfiles(repo)
1278 1284 for file in lfiles:
1279 1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1280 1286 orphans.discard(file)
1281 1287 for lfile in orphans:
1282 1288 lfdirstate.drop(lfile)
1283 1289 lfdirstate.write()
1284 1290 finally:
1285 1291 wlock.release()
1286 1292 return result
1287 1293
1288 1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1289 1295 resuming = opts.get('continue')
1290 1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1291 1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1292 1298 try:
1293 1299 result = orig(ui, repo, *revs, **opts)
1294 1300 finally:
1295 1301 repo._lfstatuswriters.pop()
1296 1302 repo._lfcommithooks.pop()
1297 1303 return result
1298 1304
1299 1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1300 1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1301 1307 err = 1
1302 1308 notbad = set()
1303 1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1304 1310 origmatchfn = m.matchfn
1305 1311 def lfmatchfn(f):
1306 1312 if origmatchfn(f):
1307 1313 return True
1308 1314 lf = lfutil.splitstandin(f)
1309 1315 if lf is None:
1310 1316 return False
1311 1317 notbad.add(lf)
1312 1318 return origmatchfn(lf)
1313 1319 m.matchfn = lfmatchfn
1314 1320 origbadfn = m.bad
1315 1321 def lfbadfn(f, msg):
1316 1322 if not f in notbad:
1317 1323 origbadfn(f, msg)
1318 1324 m.bad = lfbadfn
1319 1325
1320 1326 origvisitdirfn = m.visitdir
1321 1327 def lfvisitdirfn(dir):
1322 1328 if dir == lfutil.shortname:
1323 1329 return True
1324 1330 ret = origvisitdirfn(dir)
1325 1331 if ret:
1326 1332 return ret
1327 1333 lf = lfutil.splitstandin(dir)
1328 1334 if lf is None:
1329 1335 return False
1330 1336 return origvisitdirfn(lf)
1331 1337 m.visitdir = lfvisitdirfn
1332 1338
1333 1339 for f in ctx.walk(m):
1334 1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1335 1341 pathname=f)
1336 1342 lf = lfutil.splitstandin(f)
1337 1343 if lf is None or origmatchfn(f):
1338 1344 # duplicating unreachable code from commands.cat
1339 1345 data = ctx[f].data()
1340 1346 if opts.get('decode'):
1341 1347 data = repo.wwritedata(f, data)
1342 1348 fp.write(data)
1343 1349 else:
1344 1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1345 1351 if not lfutil.inusercache(repo.ui, hash):
1346 1352 store = basestore._openstore(repo)
1347 1353 success, missing = store.get([(lf, hash)])
1348 1354 if len(success) != 1:
1349 1355 raise error.Abort(
1350 1356 _('largefile %s is not in cache and could not be '
1351 1357 'downloaded') % lf)
1352 1358 path = lfutil.usercachepath(repo.ui, hash)
1353 1359 fpin = open(path, "rb")
1354 1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1355 1361 fp.write(chunk)
1356 1362 fpin.close()
1357 1363 fp.close()
1358 1364 err = 0
1359 1365 return err
1360 1366
1361 1367 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1362 1368 *args, **kwargs):
1363 1369 wlock = repo.wlock()
1364 1370 try:
1365 1371 # branch | | |
1366 1372 # merge | force | partial | action
1367 1373 # -------+-------+---------+--------------
1368 1374 # x | x | x | linear-merge
1369 1375 # o | x | x | branch-merge
1370 1376 # x | o | x | overwrite (as clean update)
1371 1377 # o | o | x | force-branch-merge (*1)
1372 1378 # x | x | o | (*)
1373 1379 # o | x | o | (*)
1374 1380 # x | o | o | overwrite (as revert)
1375 1381 # o | o | o | (*)
1376 1382 #
1377 1383 # (*) don't care
1378 1384 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1379 1385
1380 1386 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1381 1387 unsure, s = lfdirstate.status(match_.always(repo.root,
1382 1388 repo.getcwd()),
1383 1389 [], False, False, False)
1384 1390 pctx = repo['.']
1385 1391 for lfile in unsure + s.modified:
1386 1392 lfileabs = repo.wvfs.join(lfile)
1387 1393 if not os.path.exists(lfileabs):
1388 1394 continue
1389 1395 lfhash = lfutil.hashrepofile(repo, lfile)
1390 1396 standin = lfutil.standin(lfile)
1391 1397 lfutil.writestandin(repo, standin, lfhash,
1392 1398 lfutil.getexecutable(lfileabs))
1393 1399 if (standin in pctx and
1394 1400 lfhash == lfutil.readstandin(repo, lfile, '.')):
1395 1401 lfdirstate.normal(lfile)
1396 1402 for lfile in s.added:
1397 1403 lfutil.updatestandin(repo, lfutil.standin(lfile))
1398 1404 lfdirstate.write()
1399 1405
1400 1406 oldstandins = lfutil.getstandinsstate(repo)
1401 1407
1402 1408 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1403 1409
1404 1410 newstandins = lfutil.getstandinsstate(repo)
1405 1411 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1406 1412 if branchmerge or force or partial:
1407 1413 filelist.extend(s.deleted + s.removed)
1408 1414
1409 1415 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1410 1416 normallookup=partial)
1411 1417
1412 1418 return result
1413 1419 finally:
1414 1420 wlock.release()
1415 1421
1416 1422 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1417 1423 result = orig(repo, files, *args, **kwargs)
1418 1424
1419 1425 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1420 1426 if filelist:
1421 1427 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1422 1428 printmessage=False, normallookup=True)
1423 1429
1424 1430 return result
@@ -1,1347 +1,1350
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from . import (
23 23 copies,
24 24 destutil,
25 25 error,
26 26 filemerge,
27 27 obsolete,
28 28 subrepo,
29 29 util,
30 30 worker,
31 31 )
32 32
33 33 _pack = struct.pack
34 34 _unpack = struct.unpack
35 35
36 36 def _droponode(data):
37 37 # used for compatibility for v1
38 38 bits = data.split('\0')
39 39 bits = bits[:-2] + bits[-1:]
40 40 return '\0'.join(bits)
41 41
42 42 class mergestate(object):
43 43 '''track 3-way merge state of individual files
44 44
45 45 it is stored on disk when needed. Two file are used, one with an old
46 46 format, one with a new format. Both contains similar data, but the new
47 47 format can store new kinds of field.
48 48
49 49 Current new format is a list of arbitrary record of the form:
50 50
51 51 [type][length][content]
52 52
53 53 Type is a single character, length is a 4 bytes integer, content is an
54 54 arbitrary suites of bytes of length `length`.
55 55
56 56 Type should be a letter. Capital letter are mandatory record, Mercurial
57 57 should abort if they are unknown. lower case record can be safely ignored.
58 58
59 59 Currently known record:
60 60
61 61 L: the node of the "local" part of the merge (hexified version)
62 62 O: the node of the "other" part of the merge (hexified version)
63 63 F: a file to be merged entry
64 64 D: a file that the external merge driver will merge internally
65 65 (experimental)
66 66 m: the external merge driver defined for this merge plus its run state
67 67 (experimental)
68 68
69 69 Merge driver run states (experimental):
70 70 u: driver-resolved files unmarked -- needs to be run next time we're about
71 71 to resolve or commit
72 72 m: driver-resolved files marked -- only needs to be run before commit
73 73 s: success/skipped -- does not need to be run any more
74 74 '''
75 75 statepathv1 = 'merge/state'
76 76 statepathv2 = 'merge/state2'
77 77
78 78 def __init__(self, repo):
79 79 self._repo = repo
80 80 self._dirty = False
81 81 self._read()
82 82
83 83 def reset(self, node=None, other=None):
84 84 self._state = {}
85 85 self._local = None
86 86 self._other = None
87 87 if 'otherctx' in vars(self):
88 88 del self.otherctx
89 89 if node:
90 90 self._local = node
91 91 self._other = other
92 92 self._readmergedriver = None
93 93 if self.mergedriver:
94 94 self._mdstate = 's'
95 95 else:
96 96 self._mdstate = 'u'
97 97 shutil.rmtree(self._repo.join('merge'), True)
98 98 self._dirty = False
99 99
100 100 def _read(self):
101 101 """Analyse each record content to restore a serialized state from disk
102 102
103 103 This function process "record" entry produced by the de-serialization
104 104 of on disk file.
105 105 """
106 106 self._state = {}
107 107 self._local = None
108 108 self._other = None
109 109 if 'otherctx' in vars(self):
110 110 del self.otherctx
111 111 self._readmergedriver = None
112 112 self._mdstate = 's'
113 113 records = self._readrecords()
114 114 for rtype, record in records:
115 115 if rtype == 'L':
116 116 self._local = bin(record)
117 117 elif rtype == 'O':
118 118 self._other = bin(record)
119 119 elif rtype == 'm':
120 120 bits = record.split('\0', 1)
121 121 mdstate = bits[1]
122 122 if len(mdstate) != 1 or mdstate not in 'ums':
123 123 # the merge driver should be idempotent, so just rerun it
124 124 mdstate = 'u'
125 125
126 126 self._readmergedriver = bits[0]
127 127 self._mdstate = mdstate
128 128 elif rtype in 'FD':
129 129 bits = record.split('\0')
130 130 self._state[bits[0]] = bits[1:]
131 131 elif not rtype.islower():
132 132 raise error.Abort(_('unsupported merge state record: %s')
133 133 % rtype)
134 134 self._dirty = False
135 135
136 136 def _readrecords(self):
137 137 """Read merge state from disk and return a list of record (TYPE, data)
138 138
139 139 We read data from both v1 and v2 files and decide which one to use.
140 140
141 141 V1 has been used by version prior to 2.9.1 and contains less data than
142 142 v2. We read both versions and check if no data in v2 contradicts
143 143 v1. If there is not contradiction we can safely assume that both v1
144 144 and v2 were written at the same time and use the extract data in v2. If
145 145 there is contradiction we ignore v2 content as we assume an old version
146 146 of Mercurial has overwritten the mergestate file and left an old v2
147 147 file around.
148 148
149 149 returns list of record [(TYPE, data), ...]"""
150 150 v1records = self._readrecordsv1()
151 151 v2records = self._readrecordsv2()
152 152 if self._v1v2match(v1records, v2records):
153 153 return v2records
154 154 else:
155 155 # v1 file is newer than v2 file, use it
156 156 # we have to infer the "other" changeset of the merge
157 157 # we cannot do better than that with v1 of the format
158 158 mctx = self._repo[None].parents()[-1]
159 159 v1records.append(('O', mctx.hex()))
160 160 # add place holder "other" file node information
161 161 # nobody is using it yet so we do no need to fetch the data
162 162 # if mctx was wrong `mctx[bits[-2]]` may fails.
163 163 for idx, r in enumerate(v1records):
164 164 if r[0] == 'F':
165 165 bits = r[1].split('\0')
166 166 bits.insert(-2, '')
167 167 v1records[idx] = (r[0], '\0'.join(bits))
168 168 return v1records
169 169
170 170 def _v1v2match(self, v1records, v2records):
171 171 oldv2 = set() # old format version of v2 record
172 172 for rec in v2records:
173 173 if rec[0] == 'L':
174 174 oldv2.add(rec)
175 175 elif rec[0] == 'F':
176 176 # drop the onode data (not contained in v1)
177 177 oldv2.add(('F', _droponode(rec[1])))
178 178 for rec in v1records:
179 179 if rec not in oldv2:
180 180 return False
181 181 else:
182 182 return True
183 183
184 184 def _readrecordsv1(self):
185 185 """read on disk merge state for version 1 file
186 186
187 187 returns list of record [(TYPE, data), ...]
188 188
189 189 Note: the "F" data from this file are one entry short
190 190 (no "other file node" entry)
191 191 """
192 192 records = []
193 193 try:
194 194 f = self._repo.vfs(self.statepathv1)
195 195 for i, l in enumerate(f):
196 196 if i == 0:
197 197 records.append(('L', l[:-1]))
198 198 else:
199 199 records.append(('F', l[:-1]))
200 200 f.close()
201 201 except IOError as err:
202 202 if err.errno != errno.ENOENT:
203 203 raise
204 204 return records
205 205
206 206 def _readrecordsv2(self):
207 207 """read on disk merge state for version 2 file
208 208
209 209 returns list of record [(TYPE, data), ...]
210 210 """
211 211 records = []
212 212 try:
213 213 f = self._repo.vfs(self.statepathv2)
214 214 data = f.read()
215 215 off = 0
216 216 end = len(data)
217 217 while off < end:
218 218 rtype = data[off]
219 219 off += 1
220 220 length = _unpack('>I', data[off:(off + 4)])[0]
221 221 off += 4
222 222 record = data[off:(off + length)]
223 223 off += length
224 224 records.append((rtype, record))
225 225 f.close()
226 226 except IOError as err:
227 227 if err.errno != errno.ENOENT:
228 228 raise
229 229 return records
230 230
231 231 @util.propertycache
232 232 def mergedriver(self):
233 233 # protect against the following:
234 234 # - A configures a malicious merge driver in their hgrc, then
235 235 # pauses the merge
236 236 # - A edits their hgrc to remove references to the merge driver
237 237 # - A gives a copy of their entire repo, including .hg, to B
238 238 # - B inspects .hgrc and finds it to be clean
239 239 # - B then continues the merge and the malicious merge driver
240 240 # gets invoked
241 241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
242 242 if (self._readmergedriver is not None
243 243 and self._readmergedriver != configmergedriver):
244 244 raise error.ConfigError(
245 245 _("merge driver changed since merge started"),
246 246 hint=_("revert merge driver change or abort merge"))
247 247
248 248 return configmergedriver
249 249
250 250 @util.propertycache
251 251 def otherctx(self):
252 252 return self._repo[self._other]
253 253
254 254 def active(self):
255 255 """Whether mergestate is active.
256 256
257 257 Returns True if there appears to be mergestate. This is a rough proxy
258 258 for "is a merge in progress."
259 259 """
260 260 # Check local variables before looking at filesystem for performance
261 261 # reasons.
262 262 return bool(self._local) or bool(self._state) or \
263 263 self._repo.vfs.exists(self.statepathv1) or \
264 264 self._repo.vfs.exists(self.statepathv2)
265 265
266 266 def commit(self):
267 267 """Write current state on disk (if necessary)"""
268 268 if self._dirty:
269 269 records = []
270 270 records.append(('L', hex(self._local)))
271 271 records.append(('O', hex(self._other)))
272 272 if self.mergedriver:
273 273 records.append(('m', '\0'.join([
274 274 self.mergedriver, self._mdstate])))
275 275 for d, v in self._state.iteritems():
276 276 if v[0] == 'd':
277 277 records.append(('D', '\0'.join([d] + v)))
278 278 else:
279 279 records.append(('F', '\0'.join([d] + v)))
280 280 self._writerecords(records)
281 281 self._dirty = False
282 282
283 283 def _writerecords(self, records):
284 284 """Write current state on disk (both v1 and v2)"""
285 285 self._writerecordsv1(records)
286 286 self._writerecordsv2(records)
287 287
288 288 def _writerecordsv1(self, records):
289 289 """Write current state on disk in a version 1 file"""
290 290 f = self._repo.vfs(self.statepathv1, 'w')
291 291 irecords = iter(records)
292 292 lrecords = irecords.next()
293 293 assert lrecords[0] == 'L'
294 294 f.write(hex(self._local) + '\n')
295 295 for rtype, data in irecords:
296 296 if rtype == 'F':
297 297 f.write('%s\n' % _droponode(data))
298 298 f.close()
299 299
300 300 def _writerecordsv2(self, records):
301 301 """Write current state on disk in a version 2 file"""
302 302 f = self._repo.vfs(self.statepathv2, 'w')
303 303 for key, data in records:
304 304 assert len(key) == 1
305 305 format = '>sI%is' % len(data)
306 306 f.write(_pack(format, key, len(data), data))
307 307 f.close()
308 308
309 309 def add(self, fcl, fco, fca, fd):
310 310 """add a new (potentially?) conflicting file the merge state
311 311 fcl: file context for local,
312 312 fco: file context for remote,
313 313 fca: file context for ancestors,
314 314 fd: file path of the resulting merge.
315 315
316 316 note: also write the local version to the `.hg/merge` directory.
317 317 """
318 318 hash = util.sha1(fcl.path()).hexdigest()
319 319 self._repo.vfs.write('merge/' + hash, fcl.data())
320 320 self._state[fd] = ['u', hash, fcl.path(),
321 321 fca.path(), hex(fca.filenode()),
322 322 fco.path(), hex(fco.filenode()),
323 323 fcl.flags()]
324 324 self._dirty = True
325 325
326 326 def __contains__(self, dfile):
327 327 return dfile in self._state
328 328
329 329 def __getitem__(self, dfile):
330 330 return self._state[dfile][0]
331 331
332 332 def __iter__(self):
333 333 return iter(sorted(self._state))
334 334
335 335 def files(self):
336 336 return self._state.keys()
337 337
338 338 def mark(self, dfile, state):
339 339 self._state[dfile][0] = state
340 340 self._dirty = True
341 341
342 342 def mdstate(self):
343 343 return self._mdstate
344 344
345 345 def unresolved(self):
346 346 """Obtain the paths of unresolved files."""
347 347
348 348 for f, entry in self._state.items():
349 349 if entry[0] == 'u':
350 350 yield f
351 351
352 352 def driverresolved(self):
353 353 """Obtain the paths of driver-resolved files."""
354 354
355 355 for f, entry in self._state.items():
356 356 if entry[0] == 'd':
357 357 yield f
358 358
359 359 def _resolve(self, preresolve, dfile, wctx, labels=None):
360 360 """rerun merge process for file path `dfile`"""
361 361 if self[dfile] in 'rd':
362 362 return True, 0
363 363 stateentry = self._state[dfile]
364 364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
365 365 octx = self._repo[self._other]
366 366 fcd = wctx[dfile]
367 367 fco = octx[ofile]
368 368 fca = self._repo.filectx(afile, fileid=anode)
369 369 # "premerge" x flags
370 370 flo = fco.flags()
371 371 fla = fca.flags()
372 372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
373 373 if fca.node() == nullid:
374 374 if preresolve:
375 375 self._repo.ui.warn(
376 376 _('warning: cannot merge flags for %s\n') % afile)
377 377 elif flags == fla:
378 378 flags = flo
379 379 if preresolve:
380 380 # restore local
381 381 f = self._repo.vfs('merge/' + hash)
382 382 self._repo.wwrite(dfile, f.read(), flags)
383 383 f.close()
384 384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
385 385 fcd, fco, fca, labels=labels)
386 386 else:
387 387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
388 388 fcd, fco, fca, labels=labels)
389 389 if r is None:
390 390 # no real conflict
391 391 del self._state[dfile]
392 392 self._dirty = True
393 393 elif not r:
394 394 self.mark(dfile, 'r')
395 395 return complete, r
396 396
397 397 def preresolve(self, dfile, wctx, labels=None):
398 398 """run premerge process for dfile
399 399
400 400 Returns whether the merge is complete, and the exit code."""
401 401 return self._resolve(True, dfile, wctx, labels=labels)
402 402
403 403 def resolve(self, dfile, wctx, labels=None):
404 404 """run merge process (assuming premerge was run) for dfile
405 405
406 406 Returns the exit code of the merge."""
407 407 return self._resolve(False, dfile, wctx, labels=labels)[1]
408 408
409 409 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
410 410 if f2 is None:
411 411 f2 = f
412 412 return (os.path.isfile(repo.wjoin(f))
413 413 and repo.wvfs.audit.check(f)
414 414 and repo.dirstate.normalize(f) not in repo.dirstate
415 415 and mctx[f2].cmp(wctx[f]))
416 416
417 417 def _checkunknownfiles(repo, wctx, mctx, force, actions):
418 418 """
419 419 Considers any actions that care about the presence of conflicting unknown
420 420 files. For some actions, the result is to abort; for others, it is to
421 421 choose a different action.
422 422 """
423 423 aborts = []
424 424 if not force:
425 425 for f, (m, args, msg) in actions.iteritems():
426 426 if m in ('c', 'dc'):
427 427 if _checkunknownfile(repo, wctx, mctx, f):
428 428 aborts.append(f)
429 429 elif m == 'dg':
430 430 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
431 431 aborts.append(f)
432 432
433 433 for f in sorted(aborts):
434 434 repo.ui.warn(_("%s: untracked file differs\n") % f)
435 435 if aborts:
436 436 raise error.Abort(_("untracked files in working directory differ "
437 437 "from files in requested revision"))
438 438
439 439 for f, (m, args, msg) in actions.iteritems():
440 440 if m == 'c':
441 441 actions[f] = ('g', args, msg)
442 442 elif m == 'cm':
443 443 fl2, anc = args
444 444 different = _checkunknownfile(repo, wctx, mctx, f)
445 445 if different:
446 446 actions[f] = ('m', (f, f, None, False, anc),
447 447 "remote differs from untracked local")
448 448 else:
449 449 actions[f] = ('g', (fl2,), "remote created")
450 450
451 451 def _forgetremoved(wctx, mctx, branchmerge):
452 452 """
453 453 Forget removed files
454 454
455 455 If we're jumping between revisions (as opposed to merging), and if
456 456 neither the working directory nor the target rev has the file,
457 457 then we need to remove it from the dirstate, to prevent the
458 458 dirstate from listing the file when it is no longer in the
459 459 manifest.
460 460
461 461 If we're merging, and the other revision has removed a file
462 462 that is not present in the working directory, we need to mark it
463 463 as removed.
464 464 """
465 465
466 466 actions = {}
467 467 m = 'f'
468 468 if branchmerge:
469 469 m = 'r'
470 470 for f in wctx.deleted():
471 471 if f not in mctx:
472 472 actions[f] = m, None, "forget deleted"
473 473
474 474 if not branchmerge:
475 475 for f in wctx.removed():
476 476 if f not in mctx:
477 477 actions[f] = 'f', None, "forget removed"
478 478
479 479 return actions
480 480
481 481 def _checkcollision(repo, wmf, actions):
482 482 # build provisional merged manifest up
483 483 pmmf = set(wmf)
484 484
485 485 if actions:
486 486 # k, dr, e and rd are no-op
487 487 for m in 'a', 'f', 'g', 'cd', 'dc':
488 488 for f, args, msg in actions[m]:
489 489 pmmf.add(f)
490 490 for f, args, msg in actions['r']:
491 491 pmmf.discard(f)
492 492 for f, args, msg in actions['dm']:
493 493 f2, flags = args
494 494 pmmf.discard(f2)
495 495 pmmf.add(f)
496 496 for f, args, msg in actions['dg']:
497 497 pmmf.add(f)
498 498 for f, args, msg in actions['m']:
499 499 f1, f2, fa, move, anc = args
500 500 if move:
501 501 pmmf.discard(f1)
502 502 pmmf.add(f)
503 503
504 504 # check case-folding collision in provisional merged manifest
505 505 foldmap = {}
506 506 for f in sorted(pmmf):
507 507 fold = util.normcase(f)
508 508 if fold in foldmap:
509 509 raise error.Abort(_("case-folding collision between %s and %s")
510 510 % (f, foldmap[fold]))
511 511 foldmap[fold] = f
512 512
513 513 # check case-folding of directories
514 514 foldprefix = unfoldprefix = lastfull = ''
515 515 for fold, f in sorted(foldmap.items()):
516 516 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
517 517 # the folded prefix matches but actual casing is different
518 518 raise error.Abort(_("case-folding collision between "
519 519 "%s and directory of %s") % (lastfull, f))
520 520 foldprefix = fold + '/'
521 521 unfoldprefix = f + '/'
522 522 lastfull = f
523 523
524 524 def driverpreprocess(repo, ms, wctx, labels=None):
525 525 """run the preprocess step of the merge driver, if any
526 526
527 527 This is currently not implemented -- it's an extension point."""
528 528 return True
529 529
530 530 def driverconclude(repo, ms, wctx, labels=None):
531 531 """run the conclude step of the merge driver, if any
532 532
533 533 This is currently not implemented -- it's an extension point."""
534 534 return True
535 535
536 536 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
537 537 acceptremote, followcopies):
538 538 """
539 539 Merge p1 and p2 with ancestor pa and generate merge action list
540 540
541 541 branchmerge and force are as passed in to update
542 542 partial = function to filter file lists
543 543 acceptremote = accept the incoming changes without prompting
544 544 """
545 545
546 546 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
547 547
548 548 # manifests fetched in order are going to be faster, so prime the caches
549 549 [x.manifest() for x in
550 550 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
551 551
552 552 if followcopies:
553 553 ret = copies.mergecopies(repo, wctx, p2, pa)
554 554 copy, movewithdir, diverge, renamedelete = ret
555 555
556 556 repo.ui.note(_("resolving manifests\n"))
557 557 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
558 558 % (bool(branchmerge), bool(force), bool(partial)))
559 559 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
560 560
561 561 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
562 562 copied = set(copy.values())
563 563 copied.update(movewithdir.values())
564 564
565 565 if '.hgsubstate' in m1:
566 566 # check whether sub state is modified
567 567 for s in sorted(wctx.substate):
568 568 if wctx.sub(s).dirty():
569 569 m1['.hgsubstate'] += '+'
570 570 break
571 571
572 572 # Compare manifests
573 573 diff = m1.diff(m2)
574 574
575 575 actions = {}
576 576 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
577 577 if partial and not partial(f):
578 578 continue
579 579 if n1 and n2: # file exists on both local and remote side
580 580 if f not in ma:
581 581 fa = copy.get(f, None)
582 582 if fa is not None:
583 583 actions[f] = ('m', (f, f, fa, False, pa.node()),
584 584 "both renamed from " + fa)
585 585 else:
586 586 actions[f] = ('m', (f, f, None, False, pa.node()),
587 587 "both created")
588 588 else:
589 589 a = ma[f]
590 590 fla = ma.flags(f)
591 591 nol = 'l' not in fl1 + fl2 + fla
592 592 if n2 == a and fl2 == fla:
593 593 actions[f] = ('k' , (), "remote unchanged")
594 594 elif n1 == a and fl1 == fla: # local unchanged - use remote
595 595 if n1 == n2: # optimization: keep local content
596 596 actions[f] = ('e', (fl2,), "update permissions")
597 597 else:
598 598 actions[f] = ('g', (fl2,), "remote is newer")
599 599 elif nol and n2 == a: # remote only changed 'x'
600 600 actions[f] = ('e', (fl2,), "update permissions")
601 601 elif nol and n1 == a: # local only changed 'x'
602 602 actions[f] = ('g', (fl1,), "remote is newer")
603 603 else: # both changed something
604 604 actions[f] = ('m', (f, f, f, False, pa.node()),
605 605 "versions differ")
606 606 elif n1: # file exists only on local side
607 607 if f in copied:
608 608 pass # we'll deal with it on m2 side
609 609 elif f in movewithdir: # directory rename, move local
610 610 f2 = movewithdir[f]
611 611 if f2 in m2:
612 612 actions[f2] = ('m', (f, f2, None, True, pa.node()),
613 613 "remote directory rename, both created")
614 614 else:
615 615 actions[f2] = ('dm', (f, fl1),
616 616 "remote directory rename - move from " + f)
617 617 elif f in copy:
618 618 f2 = copy[f]
619 619 actions[f] = ('m', (f, f2, f2, False, pa.node()),
620 620 "local copied/moved from " + f2)
621 621 elif f in ma: # clean, a different, no remote
622 622 if n1 != ma[f]:
623 623 if acceptremote:
624 624 actions[f] = ('r', None, "remote delete")
625 625 else:
626 actions[f] = ('cd', None, "prompt changed/deleted")
626 actions[f] = ('cd', (f, None, f, False, pa.node()),
627 "prompt changed/deleted")
627 628 elif n1[20:] == 'a':
628 629 # This extra 'a' is added by working copy manifest to mark
629 630 # the file as locally added. We should forget it instead of
630 631 # deleting it.
631 632 actions[f] = ('f', None, "remote deleted")
632 633 else:
633 634 actions[f] = ('r', None, "other deleted")
634 635 elif n2: # file exists only on remote side
635 636 if f in copied:
636 637 pass # we'll deal with it on m1 side
637 638 elif f in movewithdir:
638 639 f2 = movewithdir[f]
639 640 if f2 in m1:
640 641 actions[f2] = ('m', (f2, f, None, False, pa.node()),
641 642 "local directory rename, both created")
642 643 else:
643 644 actions[f2] = ('dg', (f, fl2),
644 645 "local directory rename - get from " + f)
645 646 elif f in copy:
646 647 f2 = copy[f]
647 648 if f2 in m2:
648 649 actions[f] = ('m', (f2, f, f2, False, pa.node()),
649 650 "remote copied from " + f2)
650 651 else:
651 652 actions[f] = ('m', (f2, f, f2, True, pa.node()),
652 653 "remote moved from " + f2)
653 654 elif f not in ma:
654 655 # local unknown, remote created: the logic is described by the
655 656 # following table:
656 657 #
657 658 # force branchmerge different | action
658 659 # n * * | create
659 660 # y n * | create
660 661 # y y n | create
661 662 # y y y | merge
662 663 #
663 664 # Checking whether the files are different is expensive, so we
664 665 # don't do that when we can avoid it.
665 666 if not force:
666 667 actions[f] = ('c', (fl2,), "remote created")
667 668 elif not branchmerge:
668 669 actions[f] = ('c', (fl2,), "remote created")
669 670 else:
670 671 actions[f] = ('cm', (fl2, pa.node()),
671 672 "remote created, get or merge")
672 673 elif n2 != ma[f]:
673 674 if acceptremote:
674 675 actions[f] = ('c', (fl2,), "remote recreating")
675 676 else:
676 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
677 actions[f] = ('dc', (None, f, f, False, pa.node()),
678 "prompt deleted/changed")
677 679
678 680 return actions, diverge, renamedelete
679 681
680 682 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
681 683 """Resolves false conflicts where the nodeid changed but the content
682 684 remained the same."""
683 685
684 686 for f, (m, args, msg) in actions.items():
685 687 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
686 688 # local did change but ended up with same content
687 689 actions[f] = 'r', None, "prompt same"
688 690 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
689 691 # remote did change but ended up with same content
690 692 del actions[f] # don't get = keep local deleted
691 693
692 694 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
693 695 acceptremote, followcopies):
694 696 "Calculate the actions needed to merge mctx into wctx using ancestors"
695 697
696 698 if len(ancestors) == 1: # default
697 699 actions, diverge, renamedelete = manifestmerge(
698 700 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
699 701 acceptremote, followcopies)
700 702 _checkunknownfiles(repo, wctx, mctx, force, actions)
701 703
702 704 else: # only when merge.preferancestor=* - the default
703 705 repo.ui.note(
704 706 _("note: merging %s and %s using bids from ancestors %s\n") %
705 707 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
706 708
707 709 # Call for bids
708 710 fbids = {} # mapping filename to bids (action method to list af actions)
709 711 diverge, renamedelete = None, None
710 712 for ancestor in ancestors:
711 713 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
712 714 actions, diverge1, renamedelete1 = manifestmerge(
713 715 repo, wctx, mctx, ancestor, branchmerge, force, partial,
714 716 acceptremote, followcopies)
715 717 _checkunknownfiles(repo, wctx, mctx, force, actions)
716 718
717 719 # Track the shortest set of warning on the theory that bid
718 720 # merge will correctly incorporate more information
719 721 if diverge is None or len(diverge1) < len(diverge):
720 722 diverge = diverge1
721 723 if renamedelete is None or len(renamedelete) < len(renamedelete1):
722 724 renamedelete = renamedelete1
723 725
724 726 for f, a in sorted(actions.iteritems()):
725 727 m, args, msg = a
726 728 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
727 729 if f in fbids:
728 730 d = fbids[f]
729 731 if m in d:
730 732 d[m].append(a)
731 733 else:
732 734 d[m] = [a]
733 735 else:
734 736 fbids[f] = {m: [a]}
735 737
736 738 # Pick the best bid for each file
737 739 repo.ui.note(_('\nauction for merging merge bids\n'))
738 740 actions = {}
739 741 for f, bids in sorted(fbids.items()):
740 742 # bids is a mapping from action method to list af actions
741 743 # Consensus?
742 744 if len(bids) == 1: # all bids are the same kind of method
743 745 m, l = bids.items()[0]
744 746 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
745 747 repo.ui.note(" %s: consensus for %s\n" % (f, m))
746 748 actions[f] = l[0]
747 749 continue
748 750 # If keep is an option, just do it.
749 751 if 'k' in bids:
750 752 repo.ui.note(" %s: picking 'keep' action\n" % f)
751 753 actions[f] = bids['k'][0]
752 754 continue
753 755 # If there are gets and they all agree [how could they not?], do it.
754 756 if 'g' in bids:
755 757 ga0 = bids['g'][0]
756 758 if all(a == ga0 for a in bids['g'][1:]):
757 759 repo.ui.note(" %s: picking 'get' action\n" % f)
758 760 actions[f] = ga0
759 761 continue
760 762 # TODO: Consider other simple actions such as mode changes
761 763 # Handle inefficient democrazy.
762 764 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
763 765 for m, l in sorted(bids.items()):
764 766 for _f, args, msg in l:
765 767 repo.ui.note(' %s -> %s\n' % (msg, m))
766 768 # Pick random action. TODO: Instead, prompt user when resolving
767 769 m, l = bids.items()[0]
768 770 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
769 771 (f, m))
770 772 actions[f] = l[0]
771 773 continue
772 774 repo.ui.note(_('end of auction\n\n'))
773 775
774 776 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
775 777
776 778 if wctx.rev() is None:
777 779 fractions = _forgetremoved(wctx, mctx, branchmerge)
778 780 actions.update(fractions)
779 781
780 782 return actions, diverge, renamedelete
781 783
782 784 def batchremove(repo, actions):
783 785 """apply removes to the working directory
784 786
785 787 yields tuples for progress updates
786 788 """
787 789 verbose = repo.ui.verbose
788 790 unlink = util.unlinkpath
789 791 wjoin = repo.wjoin
790 792 audit = repo.wvfs.audit
791 793 i = 0
792 794 for f, args, msg in actions:
793 795 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
794 796 if verbose:
795 797 repo.ui.note(_("removing %s\n") % f)
796 798 audit(f)
797 799 try:
798 800 unlink(wjoin(f), ignoremissing=True)
799 801 except OSError as inst:
800 802 repo.ui.warn(_("update failed to remove %s: %s!\n") %
801 803 (f, inst.strerror))
802 804 if i == 100:
803 805 yield i, f
804 806 i = 0
805 807 i += 1
806 808 if i > 0:
807 809 yield i, f
808 810
809 811 def batchget(repo, mctx, actions):
810 812 """apply gets to the working directory
811 813
812 814 mctx is the context to get from
813 815
814 816 yields tuples for progress updates
815 817 """
816 818 verbose = repo.ui.verbose
817 819 fctx = mctx.filectx
818 820 wwrite = repo.wwrite
819 821 i = 0
820 822 for f, args, msg in actions:
821 823 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
822 824 if verbose:
823 825 repo.ui.note(_("getting %s\n") % f)
824 826 wwrite(f, fctx(f).data(), args[0])
825 827 if i == 100:
826 828 yield i, f
827 829 i = 0
828 830 i += 1
829 831 if i > 0:
830 832 yield i, f
831 833
832 834 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
833 835 """apply the merge action list to the working directory
834 836
835 837 wctx is the working copy context
836 838 mctx is the context to be merged into the working copy
837 839
838 840 Return a tuple of counts (updated, merged, removed, unresolved) that
839 841 describes how many files were affected by the update.
840 842 """
841 843
842 844 updated, merged, removed, unresolved = 0, 0, 0, 0
843 845 ms = mergestate(repo)
844 846 ms.reset(wctx.p1().node(), mctx.node())
845 847 moves = []
846 848 for m, l in actions.items():
847 849 l.sort()
848 850
849 851 # prescan for merges
850 852 for f, args, msg in actions['m']:
851 853 f1, f2, fa, move, anc = args
852 854 if f == '.hgsubstate': # merged internally
853 855 continue
854 856 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
855 857 fcl = wctx[f1]
856 858 fco = mctx[f2]
857 859 actx = repo[anc]
858 860 if fa in actx:
859 861 fca = actx[fa]
860 862 else:
861 863 fca = repo.filectx(f1, fileid=nullrev)
862 864 ms.add(fcl, fco, fca, f)
863 865 if f1 != f and move:
864 866 moves.append(f1)
865 867
866 868 audit = repo.wvfs.audit
867 869 _updating = _('updating')
868 870 _files = _('files')
869 871 progress = repo.ui.progress
870 872
871 873 # remove renamed files after safely stored
872 874 for f in moves:
873 875 if os.path.lexists(repo.wjoin(f)):
874 876 repo.ui.debug("removing %s\n" % f)
875 877 audit(f)
876 878 util.unlinkpath(repo.wjoin(f))
877 879
878 880 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
879 881
880 882 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
881 883 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
882 884
883 885 # remove in parallel (must come first)
884 886 z = 0
885 887 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
886 888 for i, item in prog:
887 889 z += i
888 890 progress(_updating, z, item=item, total=numupdates, unit=_files)
889 891 removed = len(actions['r'])
890 892
891 893 # get in parallel
892 894 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
893 895 for i, item in prog:
894 896 z += i
895 897 progress(_updating, z, item=item, total=numupdates, unit=_files)
896 898 updated = len(actions['g'])
897 899
898 900 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
899 901 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
900 902
901 903 # forget (manifest only, just log it) (must come first)
902 904 for f, args, msg in actions['f']:
903 905 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
904 906 z += 1
905 907 progress(_updating, z, item=f, total=numupdates, unit=_files)
906 908
907 909 # re-add (manifest only, just log it)
908 910 for f, args, msg in actions['a']:
909 911 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
910 912 z += 1
911 913 progress(_updating, z, item=f, total=numupdates, unit=_files)
912 914
913 915 # keep (noop, just log it)
914 916 for f, args, msg in actions['k']:
915 917 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
916 918 # no progress
917 919
918 920 # directory rename, move local
919 921 for f, args, msg in actions['dm']:
920 922 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
921 923 z += 1
922 924 progress(_updating, z, item=f, total=numupdates, unit=_files)
923 925 f0, flags = args
924 926 repo.ui.note(_("moving %s to %s\n") % (f0, f))
925 927 audit(f)
926 928 repo.wwrite(f, wctx.filectx(f0).data(), flags)
927 929 util.unlinkpath(repo.wjoin(f0))
928 930 updated += 1
929 931
930 932 # local directory rename, get
931 933 for f, args, msg in actions['dg']:
932 934 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
933 935 z += 1
934 936 progress(_updating, z, item=f, total=numupdates, unit=_files)
935 937 f0, flags = args
936 938 repo.ui.note(_("getting %s to %s\n") % (f0, f))
937 939 repo.wwrite(f, mctx.filectx(f0).data(), flags)
938 940 updated += 1
939 941
940 942 # exec
941 943 for f, args, msg in actions['e']:
942 944 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
943 945 z += 1
944 946 progress(_updating, z, item=f, total=numupdates, unit=_files)
945 947 flags, = args
946 948 audit(f)
947 949 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
948 950 updated += 1
949 951
950 952 mergeactions = actions['m']
951 953 # the ordering is important here -- ms.mergedriver will raise if the merge
952 954 # driver has changed, and we want to be able to bypass it when overwrite is
953 955 # True
954 956 usemergedriver = not overwrite and mergeactions and ms.mergedriver
955 957
956 958 if usemergedriver:
957 959 ms.commit()
958 960 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
959 961 # the driver might leave some files unresolved
960 962 unresolvedf = set(ms.unresolved())
961 963 if not proceed:
962 964 # XXX setting unresolved to at least 1 is a hack to make sure we
963 965 # error out
964 966 return updated, merged, removed, max(len(unresolvedf), 1)
965 967 newactions = []
966 968 for f, args, msg in mergeactions:
967 969 if f in unresolvedf:
968 970 newactions.append((f, args, msg))
969 971 mergeactions = newactions
970 972
971 973 # premerge
972 974 tocomplete = []
973 975 for f, args, msg in mergeactions:
974 976 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
975 977 z += 1
976 978 progress(_updating, z, item=f, total=numupdates, unit=_files)
977 979 if f == '.hgsubstate': # subrepo states need updating
978 980 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
979 981 overwrite)
980 982 continue
981 983 audit(f)
982 984 complete, r = ms.preresolve(f, wctx, labels=labels)
983 985 if complete:
984 986 if r is not None and r > 0:
985 987 unresolved += 1
986 988 else:
987 989 if r is None:
988 990 updated += 1
989 991 else:
990 992 merged += 1
991 993 else:
992 994 numupdates += 1
993 995 tocomplete.append((f, args, msg))
994 996
995 997 # merge
996 998 for f, args, msg in tocomplete:
997 999 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
998 1000 z += 1
999 1001 progress(_updating, z, item=f, total=numupdates, unit=_files)
1000 1002 r = ms.resolve(f, wctx, labels=labels)
1001 1003 if r is not None and r > 0:
1002 1004 unresolved += 1
1003 1005 else:
1004 1006 if r is None:
1005 1007 updated += 1
1006 1008 else:
1007 1009 merged += 1
1008 1010
1009 1011 ms.commit()
1010 1012
1011 1013 if usemergedriver and not unresolved and ms.mdstate() != 's':
1012 1014 if not driverconclude(repo, ms, wctx, labels=labels):
1013 1015 # XXX setting unresolved to at least 1 is a hack to make sure we
1014 1016 # error out
1015 1017 return updated, merged, removed, max(unresolved, 1)
1016 1018
1017 1019 ms.commit()
1018 1020
1019 1021 progress(_updating, None, total=numupdates, unit=_files)
1020 1022
1021 1023 return updated, merged, removed, unresolved
1022 1024
1023 1025 def recordupdates(repo, actions, branchmerge):
1024 1026 "record merge actions to the dirstate"
1025 1027 # remove (must come first)
1026 1028 for f, args, msg in actions['r']:
1027 1029 if branchmerge:
1028 1030 repo.dirstate.remove(f)
1029 1031 else:
1030 1032 repo.dirstate.drop(f)
1031 1033
1032 1034 # forget (must come first)
1033 1035 for f, args, msg in actions['f']:
1034 1036 repo.dirstate.drop(f)
1035 1037
1036 1038 # re-add
1037 1039 for f, args, msg in actions['a']:
1038 1040 if not branchmerge:
1039 1041 repo.dirstate.add(f)
1040 1042
1041 1043 # exec change
1042 1044 for f, args, msg in actions['e']:
1043 1045 repo.dirstate.normallookup(f)
1044 1046
1045 1047 # keep
1046 1048 for f, args, msg in actions['k']:
1047 1049 pass
1048 1050
1049 1051 # get
1050 1052 for f, args, msg in actions['g']:
1051 1053 if branchmerge:
1052 1054 repo.dirstate.otherparent(f)
1053 1055 else:
1054 1056 repo.dirstate.normal(f)
1055 1057
1056 1058 # merge
1057 1059 for f, args, msg in actions['m']:
1058 1060 f1, f2, fa, move, anc = args
1059 1061 if branchmerge:
1060 1062 # We've done a branch merge, mark this file as merged
1061 1063 # so that we properly record the merger later
1062 1064 repo.dirstate.merge(f)
1063 1065 if f1 != f2: # copy/rename
1064 1066 if move:
1065 1067 repo.dirstate.remove(f1)
1066 1068 if f1 != f:
1067 1069 repo.dirstate.copy(f1, f)
1068 1070 else:
1069 1071 repo.dirstate.copy(f2, f)
1070 1072 else:
1071 1073 # We've update-merged a locally modified file, so
1072 1074 # we set the dirstate to emulate a normal checkout
1073 1075 # of that file some time in the past. Thus our
1074 1076 # merge will appear as a normal local file
1075 1077 # modification.
1076 1078 if f2 == f: # file not locally copied/moved
1077 1079 repo.dirstate.normallookup(f)
1078 1080 if move:
1079 1081 repo.dirstate.drop(f1)
1080 1082
1081 1083 # directory rename, move local
1082 1084 for f, args, msg in actions['dm']:
1083 1085 f0, flag = args
1084 1086 if branchmerge:
1085 1087 repo.dirstate.add(f)
1086 1088 repo.dirstate.remove(f0)
1087 1089 repo.dirstate.copy(f0, f)
1088 1090 else:
1089 1091 repo.dirstate.normal(f)
1090 1092 repo.dirstate.drop(f0)
1091 1093
1092 1094 # directory rename, get
1093 1095 for f, args, msg in actions['dg']:
1094 1096 f0, flag = args
1095 1097 if branchmerge:
1096 1098 repo.dirstate.add(f)
1097 1099 repo.dirstate.copy(f0, f)
1098 1100 else:
1099 1101 repo.dirstate.normal(f)
1100 1102
1101 1103 def update(repo, node, branchmerge, force, partial, ancestor=None,
1102 1104 mergeancestor=False, labels=None):
1103 1105 """
1104 1106 Perform a merge between the working directory and the given node
1105 1107
1106 1108 node = the node to update to, or None if unspecified
1107 1109 branchmerge = whether to merge between branches
1108 1110 force = whether to force branch merging or file overwriting
1109 1111 partial = a function to filter file lists (dirstate not updated)
1110 1112 mergeancestor = whether it is merging with an ancestor. If true,
1111 1113 we should accept the incoming changes for any prompts that occur.
1112 1114 If false, merging with an ancestor (fast-forward) is only allowed
1113 1115 between different named branches. This flag is used by rebase extension
1114 1116 as a temporary fix and should be avoided in general.
1115 1117
1116 1118 The table below shows all the behaviors of the update command
1117 1119 given the -c and -C or no options, whether the working directory
1118 1120 is dirty, whether a revision is specified, and the relationship of
1119 1121 the parent rev to the target rev (linear, on the same named
1120 1122 branch, or on another named branch).
1121 1123
1122 1124 This logic is tested by test-update-branches.t.
1123 1125
1124 1126 -c -C dirty rev | linear same cross
1125 1127 n n n n | ok (1) x
1126 1128 n n n y | ok ok ok
1127 1129 n n y n | merge (2) (2)
1128 1130 n n y y | merge (3) (3)
1129 1131 n y * * | discard discard discard
1130 1132 y n y * | (4) (4) (4)
1131 1133 y n n * | ok ok ok
1132 1134 y y * * | (5) (5) (5)
1133 1135
1134 1136 x = can't happen
1135 1137 * = don't-care
1136 1138 1 = abort: not a linear update (merge or update --check to force update)
1137 1139 2 = abort: uncommitted changes (commit and merge, or update --clean to
1138 1140 discard changes)
1139 1141 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1140 1142 4 = abort: uncommitted changes (checked in commands.py)
1141 1143 5 = incompatible options (checked in commands.py)
1142 1144
1143 1145 Return the same tuple as applyupdates().
1144 1146 """
1145 1147
1146 1148 onode = node
1147 1149 wlock = repo.wlock()
1148 1150 try:
1149 1151 wc = repo[None]
1150 1152 pl = wc.parents()
1151 1153 p1 = pl[0]
1152 1154 pas = [None]
1153 1155 if ancestor is not None:
1154 1156 pas = [repo[ancestor]]
1155 1157
1156 1158 if node is None:
1157 1159 if (repo.ui.configbool('devel', 'all-warnings')
1158 1160 or repo.ui.configbool('devel', 'oldapi')):
1159 1161 repo.ui.develwarn('update with no target')
1160 1162 rev, _mark, _act = destutil.destupdate(repo)
1161 1163 node = repo[rev].node()
1162 1164
1163 1165 overwrite = force and not branchmerge
1164 1166
1165 1167 p2 = repo[node]
1166 1168 if pas[0] is None:
1167 1169 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1168 1170 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1169 1171 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1170 1172 else:
1171 1173 pas = [p1.ancestor(p2, warn=branchmerge)]
1172 1174
1173 1175 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1174 1176
1175 1177 ### check phase
1176 1178 if not overwrite and len(pl) > 1:
1177 1179 raise error.Abort(_("outstanding uncommitted merge"))
1178 1180 if branchmerge:
1179 1181 if pas == [p2]:
1180 1182 raise error.Abort(_("merging with a working directory ancestor"
1181 1183 " has no effect"))
1182 1184 elif pas == [p1]:
1183 1185 if not mergeancestor and p1.branch() == p2.branch():
1184 1186 raise error.Abort(_("nothing to merge"),
1185 1187 hint=_("use 'hg update' "
1186 1188 "or check 'hg heads'"))
1187 1189 if not force and (wc.files() or wc.deleted()):
1188 1190 raise error.Abort(_("uncommitted changes"),
1189 1191 hint=_("use 'hg status' to list changes"))
1190 1192 for s in sorted(wc.substate):
1191 1193 wc.sub(s).bailifchanged()
1192 1194
1193 1195 elif not overwrite:
1194 1196 if p1 == p2: # no-op update
1195 1197 # call the hooks and exit early
1196 1198 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1197 1199 repo.hook('update', parent1=xp2, parent2='', error=0)
1198 1200 return 0, 0, 0, 0
1199 1201
1200 1202 if pas not in ([p1], [p2]): # nonlinear
1201 1203 dirty = wc.dirty(missing=True)
1202 1204 if dirty or onode is None:
1203 1205 # Branching is a bit strange to ensure we do the minimal
1204 1206 # amount of call to obsolete.background.
1205 1207 foreground = obsolete.foreground(repo, [p1.node()])
1206 1208 # note: the <node> variable contains a random identifier
1207 1209 if repo[node].node() in foreground:
1208 1210 pas = [p1] # allow updating to successors
1209 1211 elif dirty:
1210 1212 msg = _("uncommitted changes")
1211 1213 if onode is None:
1212 1214 hint = _("commit and merge, or update --clean to"
1213 1215 " discard changes")
1214 1216 else:
1215 1217 hint = _("commit or update --clean to discard"
1216 1218 " changes")
1217 1219 raise error.Abort(msg, hint=hint)
1218 1220 else: # node is none
1219 1221 msg = _("not a linear update")
1220 1222 hint = _("merge or update --check to force update")
1221 1223 raise error.Abort(msg, hint=hint)
1222 1224 else:
1223 1225 # Allow jumping branches if clean and specific rev given
1224 1226 pas = [p1]
1225 1227
1226 1228 # deprecated config: merge.followcopies
1227 1229 followcopies = False
1228 1230 if overwrite:
1229 1231 pas = [wc]
1230 1232 elif pas == [p2]: # backwards
1231 1233 pas = [wc.p1()]
1232 1234 elif not branchmerge and not wc.dirty(missing=True):
1233 1235 pass
1234 1236 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1235 1237 followcopies = True
1236 1238
1237 1239 ### calculate phase
1238 1240 actionbyfile, diverge, renamedelete = calculateupdates(
1239 1241 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1240 1242 followcopies)
1241 1243 # Convert to dictionary-of-lists format
1242 1244 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1243 1245 for f, (m, args, msg) in actionbyfile.iteritems():
1244 1246 if m not in actions:
1245 1247 actions[m] = []
1246 1248 actions[m].append((f, args, msg))
1247 1249
1248 1250 if not util.checkcase(repo.path):
1249 1251 # check collision between files only in p2 for clean update
1250 1252 if (not branchmerge and
1251 1253 (force or not wc.dirty(missing=True, branch=False))):
1252 1254 _checkcollision(repo, p2.manifest(), None)
1253 1255 else:
1254 1256 _checkcollision(repo, wc.manifest(), actions)
1255 1257
1256 1258 # Prompt and create actions. TODO: Move this towards resolve phase.
1257 1259 for f, args, msg in sorted(actions['cd']):
1258 1260 if repo.ui.promptchoice(
1259 1261 _("local changed %s which remote deleted\n"
1260 1262 "use (c)hanged version or (d)elete?"
1261 1263 "$$ &Changed $$ &Delete") % f, 0):
1262 1264 actions['r'].append((f, None, "prompt delete"))
1263 1265 else:
1264 1266 actions['a'].append((f, None, "prompt keep"))
1265 1267
1266 1268 for f, args, msg in sorted(actions['dc']):
1267 flags, = args
1269 f1, f2, fa, move, anc = args
1270 flags = p2[f2].flags()
1268 1271 if repo.ui.promptchoice(
1269 1272 _("remote changed %s which local deleted\n"
1270 1273 "use (c)hanged version or leave (d)eleted?"
1271 1274 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1272 1275 actions['g'].append((f, (flags,), "prompt recreating"))
1273 1276
1274 1277 # divergent renames
1275 1278 for f, fl in sorted(diverge.iteritems()):
1276 1279 repo.ui.warn(_("note: possible conflict - %s was renamed "
1277 1280 "multiple times to:\n") % f)
1278 1281 for nf in fl:
1279 1282 repo.ui.warn(" %s\n" % nf)
1280 1283
1281 1284 # rename and delete
1282 1285 for f, fl in sorted(renamedelete.iteritems()):
1283 1286 repo.ui.warn(_("note: possible conflict - %s was deleted "
1284 1287 "and renamed to:\n") % f)
1285 1288 for nf in fl:
1286 1289 repo.ui.warn(" %s\n" % nf)
1287 1290
1288 1291 ### apply phase
1289 1292 if not branchmerge: # just jump to the new rev
1290 1293 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1291 1294 if not partial:
1292 1295 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1293 1296 # note that we're in the middle of an update
1294 1297 repo.vfs.write('updatestate', p2.hex())
1295 1298
1296 1299 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1297 1300
1298 1301 if not partial:
1299 1302 repo.dirstate.beginparentchange()
1300 1303 repo.setparents(fp1, fp2)
1301 1304 recordupdates(repo, actions, branchmerge)
1302 1305 # update completed, clear state
1303 1306 util.unlink(repo.join('updatestate'))
1304 1307
1305 1308 if not branchmerge:
1306 1309 repo.dirstate.setbranch(p2.branch())
1307 1310 repo.dirstate.endparentchange()
1308 1311 finally:
1309 1312 wlock.release()
1310 1313
1311 1314 if not partial:
1312 1315 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1313 1316 return stats
1314 1317
1315 1318 def graft(repo, ctx, pctx, labels):
1316 1319 """Do a graft-like merge.
1317 1320
1318 1321 This is a merge where the merge ancestor is chosen such that one
1319 1322 or more changesets are grafted onto the current changeset. In
1320 1323 addition to the merge, this fixes up the dirstate to include only
1321 1324 a single parent and tries to duplicate any renames/copies
1322 1325 appropriately.
1323 1326
1324 1327 ctx - changeset to rebase
1325 1328 pctx - merge base, usually ctx.p1()
1326 1329 labels - merge labels eg ['local', 'graft']
1327 1330
1328 1331 """
1329 1332 # If we're grafting a descendant onto an ancestor, be sure to pass
1330 1333 # mergeancestor=True to update. This does two things: 1) allows the merge if
1331 1334 # the destination is the same as the parent of the ctx (so we can use graft
1332 1335 # to copy commits), and 2) informs update that the incoming changes are
1333 1336 # newer than the destination so it doesn't prompt about "remote changed foo
1334 1337 # which local deleted".
1335 1338 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1336 1339
1337 1340 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1338 1341 mergeancestor=mergeancestor, labels=labels)
1339 1342
1340 1343 # drop the second merge parent
1341 1344 repo.dirstate.beginparentchange()
1342 1345 repo.setparents(repo['.'].node(), nullid)
1343 1346 repo.dirstate.write(repo.currenttransaction())
1344 1347 # fix up dirstate for copies and renames
1345 1348 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1346 1349 repo.dirstate.endparentchange()
1347 1350 return stats
General Comments 0
You need to be logged in to leave comments. Login now