##// END OF EJS Templates
merge: add a new 'backup' argument to get actions...
Siddharth Agarwal -
r27655:af13eaf9 default
parent child Browse files
Show More
@@ -1,1435 +1,1435
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset, error
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 30 m._fileroots = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 45 m._fileroots = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 54 default='relpath', badfn=None):
55 55 if opts is None:
56 56 opts = {}
57 57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 58 return composenormalfilematcher(match, manifest)
59 59 oldmatch = installmatchfn(overridematch)
60 60
61 61 def installmatchfn(f):
62 62 '''monkey patch the scmutil module with a custom match function.
63 63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 64 oldmatch = scmutil.match
65 65 setattr(f, 'oldmatch', oldmatch)
66 66 scmutil.match = f
67 67 return oldmatch
68 68
69 69 def restorematchfn():
70 70 '''restores scmutil.match to what it was before installmatchfn
71 71 was called. no-op if scmutil.match is its original function.
72 72
73 73 Note that n calls to installmatchfn will require n calls to
74 74 restore the original matchfn.'''
75 75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76 76
77 77 def installmatchandpatsfn(f):
78 78 oldmatchandpats = scmutil.matchandpats
79 79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 80 scmutil.matchandpats = f
81 81 return oldmatchandpats
82 82
83 83 def restorematchandpatsfn():
84 84 '''restores scmutil.matchandpats to what it was before
85 85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 86 is its original function.
87 87
88 88 Note that n calls to installmatchandpatsfn will require n calls
89 89 to restore the original matchfn.'''
90 90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 91 scmutil.matchandpats)
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97 97
98 98 lfmatcher = None
99 99 if lfutil.islfilesrepo(repo):
100 100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 101 if lfpats:
102 102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103 103
104 104 lfnames = []
105 105 m = matcher
106 106
107 107 wctx = repo[None]
108 108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 109 exact = m.exact(f)
110 110 lfile = lfutil.standin(f) in wctx
111 111 nfile = f in wctx
112 112 exists = lfile or nfile
113 113
114 114 # addremove in core gets fancy with the name, add doesn't
115 115 if isaddremove:
116 116 name = m.uipath(f)
117 117 else:
118 118 name = m.rel(f)
119 119
120 120 # Don't warn the user when they attempt to add a normal tracked file.
121 121 # The normal add code will do that for us.
122 122 if exact and exists:
123 123 if lfile:
124 124 ui.warn(_('%s already a largefile\n') % name)
125 125 continue
126 126
127 127 if (exact or not exists) and not lfutil.isstandin(f):
128 128 # In case the file was removed previously, but not committed
129 129 # (issue3507)
130 130 if not repo.wvfs.exists(f):
131 131 continue
132 132
133 133 abovemin = (lfsize and
134 134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 136 lfnames.append(f)
137 137 if ui.verbose or not exact:
138 138 ui.status(_('adding %s as a largefile\n') % name)
139 139
140 140 bad = []
141 141
142 142 # Need to lock, otherwise there could be a race condition between
143 143 # when standins are created and added to the repo.
144 144 wlock = repo.wlock()
145 145 try:
146 146 if not opts.get('dry_run'):
147 147 standins = []
148 148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 149 for f in lfnames:
150 150 standinname = lfutil.standin(f)
151 151 lfutil.writestandin(repo, standinname, hash='',
152 152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 153 standins.append(standinname)
154 154 if lfdirstate[f] == 'r':
155 155 lfdirstate.normallookup(f)
156 156 else:
157 157 lfdirstate.add(f)
158 158 lfdirstate.write()
159 159 bad += [lfutil.splitstandin(f)
160 160 for f in repo[None].add(standins)
161 161 if f in m.files()]
162 162
163 163 added = [f for f in lfnames if f not in bad]
164 164 finally:
165 165 wlock.release()
166 166 return added, bad
167 167
168 168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 169 after = opts.get('after')
170 170 m = composelargefilematcher(matcher, repo[None].manifest())
171 171 try:
172 172 repo.lfstatus = True
173 173 s = repo.status(match=m, clean=not isaddremove)
174 174 finally:
175 175 repo.lfstatus = False
176 176 manifest = repo[None].manifest()
177 177 modified, added, deleted, clean = [[f for f in list
178 178 if lfutil.standin(f) in manifest]
179 179 for list in (s.modified, s.added,
180 180 s.deleted, s.clean)]
181 181
182 182 def warn(files, msg):
183 183 for f in files:
184 184 ui.warn(msg % m.rel(f))
185 185 return int(len(files) > 0)
186 186
187 187 result = 0
188 188
189 189 if after:
190 190 remove = deleted
191 191 result = warn(modified + added + clean,
192 192 _('not removing %s: file still exists\n'))
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 196 ' to force removal)\n'))
197 197 result = warn(added, _('not removing %s: file has been marked for add'
198 198 ' (use forget to undo)\n')) or result
199 199
200 200 # Need to lock because standin files are deleted then removed from the
201 201 # repository and we could race in-between.
202 202 wlock = repo.wlock()
203 203 try:
204 204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 205 for f in sorted(remove):
206 206 if ui.verbose or not m.exact(f):
207 207 # addremove in core gets fancy with the name, remove doesn't
208 208 if isaddremove:
209 209 name = m.uipath(f)
210 210 else:
211 211 name = m.rel(f)
212 212 ui.status(_('removing %s\n') % name)
213 213
214 214 if not opts.get('dry_run'):
215 215 if not after:
216 216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217 217
218 218 if opts.get('dry_run'):
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 231 False)
232 232
233 233 lfdirstate.write()
234 234 finally:
235 235 wlock.release()
236 236
237 237 return result
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 def decodepath(orig, path):
242 242 return lfutil.splitstandin(path) or path
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246 def overrideadd(orig, ui, repo, *pats, **opts):
247 247 if opts.get('normal') and opts.get('large'):
248 248 raise error.Abort(_('--normal cannot be used with --large'))
249 249 return orig(ui, repo, *pats, **opts)
250 250
251 251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 252 # The --normal flag short circuits this override
253 253 if opts.get('normal'):
254 254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255 255
256 256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 258 ladded)
259 259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260 260
261 261 bad.extend(f for f in lbad)
262 262 return bad
263 263
264 264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 267 return removelargefiles(ui, repo, False, matcher, after=after,
268 268 force=force) or result
269 269
270 270 def overridestatusfn(orig, repo, rev2, **opts):
271 271 try:
272 272 repo._repo.lfstatus = True
273 273 return orig(repo, rev2, **opts)
274 274 finally:
275 275 repo._repo.lfstatus = False
276 276
277 277 def overridestatus(orig, ui, repo, *pats, **opts):
278 278 try:
279 279 repo.lfstatus = True
280 280 return orig(ui, repo, *pats, **opts)
281 281 finally:
282 282 repo.lfstatus = False
283 283
284 284 def overridedirty(orig, repo, ignoreupdate=False):
285 285 try:
286 286 repo._repo.lfstatus = True
287 287 return orig(repo, ignoreupdate)
288 288 finally:
289 289 repo._repo.lfstatus = False
290 290
291 291 def overridelog(orig, ui, repo, *pats, **opts):
292 292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 293 default='relpath', badfn=None):
294 294 """Matcher that merges root directory with .hglf, suitable for log.
295 295 It is still possible to match .hglf directly.
296 296 For any listed files run log on the standin too.
297 297 matchfn tries both the given filename and with .hglf stripped.
298 298 """
299 299 if opts is None:
300 300 opts = {}
301 301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 302 badfn=badfn)
303 303 m, p = copy.copy(matchandpats)
304 304
305 305 if m.always():
306 306 # We want to match everything anyway, so there's no benefit trying
307 307 # to add standins.
308 308 return matchandpats
309 309
310 310 pats = set(p)
311 311
312 312 def fixpats(pat, tostandin=lfutil.standin):
313 313 if pat.startswith('set:'):
314 314 return pat
315 315
316 316 kindpat = match_._patsplit(pat, None)
317 317
318 318 if kindpat[0] is not None:
319 319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 320 return tostandin(kindpat[1])
321 321
322 322 if m._cwd:
323 323 hglf = lfutil.shortname
324 324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325 325
326 326 def tostandin(f):
327 327 # The file may already be a standin, so truncate the back
328 328 # prefix and test before mangling it. This avoids turning
329 329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 331 return f
332 332
333 333 # An absolute path is from outside the repo, so truncate the
334 334 # path to the root before building the standin. Otherwise cwd
335 335 # is somewhere in the repo, relative to root, and needs to be
336 336 # prepended before building the standin.
337 337 if os.path.isabs(m._cwd):
338 338 f = f[len(back):]
339 339 else:
340 340 f = m._cwd + '/' + f
341 341 return back + lfutil.standin(f)
342 342
343 343 pats.update(fixpats(f, tostandin) for f in p)
344 344 else:
345 345 def tostandin(f):
346 346 if lfutil.splitstandin(f):
347 347 return f
348 348 return lfutil.standin(f)
349 349 pats.update(fixpats(f, tostandin) for f in p)
350 350
351 351 for i in range(0, len(m._files)):
352 352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 353 if m._files[i] == '.':
354 354 continue
355 355 standin = lfutil.standin(m._files[i])
356 356 # If the "standin" is a directory, append instead of replace to
357 357 # support naming a directory on the command line with only
358 358 # largefiles. The original directory is kept to support normal
359 359 # files.
360 360 if standin in repo[ctx.node()]:
361 361 m._files[i] = standin
362 362 elif m._files[i] not in repo[ctx.node()] \
363 363 and repo.wvfs.isdir(standin):
364 364 m._files.append(standin)
365 365
366 366 m._fileroots = set(m._files)
367 367 m._always = False
368 368 origmatchfn = m.matchfn
369 369 def lfmatchfn(f):
370 370 lf = lfutil.splitstandin(f)
371 371 if lf is not None and origmatchfn(lf):
372 372 return True
373 373 r = origmatchfn(f)
374 374 return r
375 375 m.matchfn = lfmatchfn
376 376
377 377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 378 return m, pats
379 379
380 380 # For hg log --patch, the match object is used in two different senses:
381 381 # (1) to determine what revisions should be printed out, and
382 382 # (2) to determine what files to print out diffs for.
383 383 # The magic matchandpats override should be used for case (1) but not for
384 384 # case (2).
385 385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 386 wctx = repo[None]
387 387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 388 return lambda rev: match
389 389
390 390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393 393
394 394 try:
395 395 return orig(ui, repo, *pats, **opts)
396 396 finally:
397 397 restorematchandpatsfn()
398 398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399 399
400 400 def overrideverify(orig, ui, repo, *pats, **opts):
401 401 large = opts.pop('large', False)
402 402 all = opts.pop('lfa', False)
403 403 contents = opts.pop('lfc', False)
404 404
405 405 result = orig(ui, repo, *pats, **opts)
406 406 if large or all or contents:
407 407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 408 return result
409 409
410 410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 411 large = opts.pop('large', False)
412 412 if large:
413 413 class fakerepo(object):
414 414 dirstate = lfutil.openlfdirstate(ui, repo)
415 415 orig(ui, fakerepo, *pats, **opts)
416 416 else:
417 417 orig(ui, repo, *pats, **opts)
418 418
419 419 # Before starting the manifest merge, merge.updates will call
420 420 # _checkunknownfile to check if there are any files in the merged-in
421 421 # changeset that collide with unknown files in the working copy.
422 422 #
423 423 # The largefiles are seen as unknown, so this prevents us from merging
424 424 # in a file 'foo' if we already have a largefile with the same name.
425 425 #
426 426 # The overridden function filters the unknown files by removing any
427 427 # largefiles. This makes the merge proceed and we can then handle this
428 428 # case further in the overridden calculateupdates function below.
429 429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 431 return False
432 432 return origfn(repo, wctx, mctx, f, f2)
433 433
434 434 # The manifest merge handles conflicts on the manifest level. We want
435 435 # to handle changes in largefile-ness of files at this level too.
436 436 #
437 437 # The strategy is to run the original calculateupdates and then process
438 438 # the action list it outputs. There are two cases we need to deal with:
439 439 #
440 440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 441 # detected via its standin file, which will enter the working copy
442 442 # with a "get" action. It is not "merge" since the standin is all
443 443 # Mercurial is concerned with at this level -- the link to the
444 444 # existing normal file is not relevant here.
445 445 #
446 446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 447 # since the largefile will be present in the working copy and
448 448 # different from the normal file in p2. Mercurial therefore
449 449 # triggers a merge action.
450 450 #
451 451 # In both cases, we prompt the user and emit new actions to either
452 452 # remove the standin (if the normal file was kept) or to remove the
453 453 # normal file and get the standin (if the largefile was kept). The
454 454 # default prompt answer is to use the largefile version since it was
455 455 # presumably changed on purpose.
456 456 #
457 457 # Finally, the merge.applyupdates function will then take care of
458 458 # writing the files into the working copy and lfcommands.updatelfiles
459 459 # will update the largefiles.
460 460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 461 acceptremote, followcopies, matcher=None):
462 462 overwrite = force and not branchmerge
463 463 actions, diverge, renamedelete = origfn(
464 464 repo, p1, p2, pas, branchmerge, force, acceptremote,
465 465 followcopies, matcher=matcher)
466 466
467 467 if overwrite:
468 468 return actions, diverge, renamedelete
469 469
470 470 # Convert to dictionary with filename as key and action as value.
471 471 lfiles = set()
472 472 for f in actions:
473 473 splitstandin = f and lfutil.splitstandin(f)
474 474 if splitstandin in p1:
475 475 lfiles.add(splitstandin)
476 476 elif lfutil.standin(f) in p1:
477 477 lfiles.add(f)
478 478
479 479 for lfile in lfiles:
480 480 standin = lfutil.standin(lfile)
481 481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 483 if sm in ('g', 'dc') and lm != 'r':
484 484 if sm == 'dc':
485 485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
486 sargs = (p2[f2].flags(), False)
487 487 # Case 1: normal file in the working copy, largefile in
488 488 # the second parent
489 489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 490 'use (l)argefile or keep (n)ormal file?'
491 491 '$$ &Largefile $$ &Normal file') % lfile
492 492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 493 actions[lfile] = ('r', None, 'replaced by standin')
494 494 actions[standin] = ('g', sargs, 'replaces standin')
495 495 else: # keep local normal file
496 496 actions[lfile] = ('k', None, 'replaces standin')
497 497 if branchmerge:
498 498 actions[standin] = ('k', None, 'replaced by non-standin')
499 499 else:
500 500 actions[standin] = ('r', None, 'replaced by non-standin')
501 501 elif lm in ('g', 'dc') and sm != 'r':
502 502 if lm == 'dc':
503 503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
504 largs = (p2[f2].flags(), False)
505 505 # Case 2: largefile in the working copy, normal file in
506 506 # the second parent
507 507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 508 'keep (l)argefile or use (n)ormal file?'
509 509 '$$ &Largefile $$ &Normal file') % lfile
510 510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 511 if branchmerge:
512 512 # largefile can be restored from standin safely
513 513 actions[lfile] = ('k', None, 'replaced by standin')
514 514 actions[standin] = ('k', None, 'replaces standin')
515 515 else:
516 516 # "lfile" should be marked as "removed" without
517 517 # removal of itself
518 518 actions[lfile] = ('lfmr', None,
519 519 'forget non-standin largefile')
520 520
521 521 # linear-merge should treat this largefile as 're-added'
522 522 actions[standin] = ('a', None, 'keep standin')
523 523 else: # pick remote normal file
524 524 actions[lfile] = ('g', largs, 'replaces standin')
525 525 actions[standin] = ('r', None, 'replaced by non-standin')
526 526
527 527 return actions, diverge, renamedelete
528 528
529 529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 530 if 'lfmr' in actions:
531 531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 532 for lfile, args, msg in actions['lfmr']:
533 533 # this should be executed before 'orig', to execute 'remove'
534 534 # before all other actions
535 535 repo.dirstate.remove(lfile)
536 536 # make sure lfile doesn't get synclfdirstate'd as normal
537 537 lfdirstate.add(lfile)
538 538 lfdirstate.write()
539 539
540 540 return orig(repo, actions, branchmerge)
541 541
542 542
543 543 # Override filemerge to prompt the user about how they wish to merge
544 544 # largefiles. This will handle identical edits without prompting the user.
545 545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 546 labels=None):
547 547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
548 548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 549 labels=labels)
550 550
551 551 ahash = fca.data().strip().lower()
552 552 dhash = fcd.data().strip().lower()
553 553 ohash = fco.data().strip().lower()
554 554 if (ohash != ahash and
555 555 ohash != dhash and
556 556 (dhash == ahash or
557 557 repo.ui.promptchoice(
558 558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 560 '$$ &Local $$ &Other') %
561 561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 562 0) == 1)):
563 563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 564 return True, 0, False
565 565
566 566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 567 copies = orig(ctx1, ctx2, match=match)
568 568 updated = {}
569 569
570 570 for k, v in copies.iteritems():
571 571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572 572
573 573 return updated
574 574
575 575 # Copy first changes the matchers to match standins instead of
576 576 # largefiles. Then it overrides util.copyfile in that function it
577 577 # checks if the destination largefile already exists. It also keeps a
578 578 # list of copied files so that the largefiles can be copied and the
579 579 # dirstate updated.
580 580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 581 # doesn't remove largefile on rename
582 582 if len(pats) < 2:
583 583 # this isn't legal, let the original function deal with it
584 584 return orig(ui, repo, pats, opts, rename)
585 585
586 586 # This could copy both lfiles and normal files in one command,
587 587 # but we don't want to do that. First replace their matcher to
588 588 # only match normal files and run it, then replace it to just
589 589 # match largefiles and run it again.
590 590 nonormalfiles = False
591 591 nolfiles = False
592 592 installnormalfilesmatchfn(repo[None].manifest())
593 593 try:
594 594 result = orig(ui, repo, pats, opts, rename)
595 595 except error.Abort as e:
596 596 if str(e) != _('no files to copy'):
597 597 raise e
598 598 else:
599 599 nonormalfiles = True
600 600 result = 0
601 601 finally:
602 602 restorematchfn()
603 603
604 604 # The first rename can cause our current working directory to be removed.
605 605 # In that case there is nothing left to copy/rename so just quit.
606 606 try:
607 607 repo.getcwd()
608 608 except OSError:
609 609 return result
610 610
611 611 def makestandin(relpath):
612 612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614 614
615 615 fullpats = scmutil.expandpats(pats)
616 616 dest = fullpats[-1]
617 617
618 618 if os.path.isdir(dest):
619 619 if not os.path.isdir(makestandin(dest)):
620 620 os.makedirs(makestandin(dest))
621 621
622 622 try:
623 623 # When we call orig below it creates the standins but we don't add
624 624 # them to the dir state until later so lock during that time.
625 625 wlock = repo.wlock()
626 626
627 627 manifest = repo[None].manifest()
628 628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 629 default='relpath', badfn=None):
630 630 if opts is None:
631 631 opts = {}
632 632 newpats = []
633 633 # The patterns were previously mangled to add the standin
634 634 # directory; we need to remove that now
635 635 for pat in pats:
636 636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 637 newpats.append(pat.replace(lfutil.shortname, ''))
638 638 else:
639 639 newpats.append(pat)
640 640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 641 m = copy.copy(match)
642 642 lfile = lambda f: lfutil.standin(f) in manifest
643 643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 644 m._fileroots = set(m._files)
645 645 origmatchfn = m.matchfn
646 646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 647 (f in manifest) and
648 648 origmatchfn(lfutil.splitstandin(f)) or
649 649 None)
650 650 return m
651 651 oldmatch = installmatchfn(overridematch)
652 652 listpats = []
653 653 for pat in pats:
654 654 if match_.patkind(pat) is not None:
655 655 listpats.append(pat)
656 656 else:
657 657 listpats.append(makestandin(pat))
658 658
659 659 try:
660 660 origcopyfile = util.copyfile
661 661 copiedfiles = []
662 662 def overridecopyfile(src, dest):
663 663 if (lfutil.shortname in src and
664 664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 665 destlfile = dest.replace(lfutil.shortname, '')
666 666 if not opts['force'] and os.path.exists(destlfile):
667 667 raise IOError('',
668 668 _('destination largefile already exists'))
669 669 copiedfiles.append((src, dest))
670 670 origcopyfile(src, dest)
671 671
672 672 util.copyfile = overridecopyfile
673 673 result += orig(ui, repo, listpats, opts, rename)
674 674 finally:
675 675 util.copyfile = origcopyfile
676 676
677 677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 678 for (src, dest) in copiedfiles:
679 679 if (lfutil.shortname in src and
680 680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 684 if not os.path.isdir(destlfiledir):
685 685 os.makedirs(destlfiledir)
686 686 if rename:
687 687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688 688
689 689 # The file is gone, but this deletes any empty parent
690 690 # directories as a side-effect.
691 691 util.unlinkpath(repo.wjoin(srclfile), True)
692 692 lfdirstate.remove(srclfile)
693 693 else:
694 694 util.copyfile(repo.wjoin(srclfile),
695 695 repo.wjoin(destlfile))
696 696
697 697 lfdirstate.add(destlfile)
698 698 lfdirstate.write()
699 699 except error.Abort as e:
700 700 if str(e) != _('no files to copy'):
701 701 raise e
702 702 else:
703 703 nolfiles = True
704 704 finally:
705 705 restorematchfn()
706 706 wlock.release()
707 707
708 708 if nolfiles and nonormalfiles:
709 709 raise error.Abort(_('no files to copy'))
710 710
711 711 return result
712 712
713 713 # When the user calls revert, we have to be careful to not revert any
714 714 # changes to other largefiles accidentally. This means we have to keep
715 715 # track of the largefiles that are being reverted so we only pull down
716 716 # the necessary largefiles.
717 717 #
718 718 # Standins are only updated (to match the hash of largefiles) before
719 719 # commits. Update the standins then run the original revert, changing
720 720 # the matcher to hit standins instead of largefiles. Based on the
721 721 # resulting standins update the largefiles.
722 722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 723 # Because we put the standins in a bad state (by updating them)
724 724 # and then return them to a correct state we need to lock to
725 725 # prevent others from changing them in their incorrect state.
726 726 wlock = repo.wlock()
727 727 try:
728 728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 730 lfdirstate.write()
731 731 for lfile in s.modified:
732 732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 733 for lfile in s.deleted:
734 734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736 736
737 737 oldstandins = lfutil.getstandinsstate(repo)
738 738
739 739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 740 default='relpath', badfn=None):
741 741 if opts is None:
742 742 opts = {}
743 743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 744 m = copy.copy(match)
745 745
746 746 # revert supports recursing into subrepos, and though largefiles
747 747 # currently doesn't work correctly in that case, this match is
748 748 # called, so the lfdirstate above may not be the correct one for
749 749 # this invocation of match.
750 750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 751 False)
752 752
753 753 def tostandin(f):
754 754 standin = lfutil.standin(f)
755 755 if standin in ctx or standin in mctx:
756 756 return standin
757 757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 758 return None
759 759 return f
760 760 m._files = [tostandin(f) for f in m._files]
761 761 m._files = [f for f in m._files if f is not None]
762 762 m._fileroots = set(m._files)
763 763 origmatchfn = m.matchfn
764 764 def matchfn(f):
765 765 if lfutil.isstandin(f):
766 766 return (origmatchfn(lfutil.splitstandin(f)) and
767 767 (f in ctx or f in mctx))
768 768 return origmatchfn(f)
769 769 m.matchfn = matchfn
770 770 return m
771 771 oldmatch = installmatchfn(overridematch)
772 772 try:
773 773 orig(ui, repo, ctx, parents, *pats, **opts)
774 774 finally:
775 775 restorematchfn()
776 776
777 777 newstandins = lfutil.getstandinsstate(repo)
778 778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 779 # lfdirstate should be 'normallookup'-ed for updated files,
780 780 # because reverting doesn't touch dirstate for 'normal' files
781 781 # when target revision is explicitly specified: in such case,
782 782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 783 # of target (standin) file.
784 784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 785 normallookup=True)
786 786
787 787 finally:
788 788 wlock.release()
789 789
790 790 # after pulling changesets, we need to take some extra care to get
791 791 # largefiles updated remotely
792 792 def overridepull(orig, ui, repo, source=None, **opts):
793 793 revsprepull = len(repo)
794 794 if not source:
795 795 source = 'default'
796 796 repo.lfpullsource = source
797 797 result = orig(ui, repo, source, **opts)
798 798 revspostpull = len(repo)
799 799 lfrevs = opts.get('lfrev', [])
800 800 if opts.get('all_largefiles'):
801 801 lfrevs.append('pulled()')
802 802 if lfrevs and revspostpull > revsprepull:
803 803 numcached = 0
804 804 repo.firstpulled = revsprepull # for pulled() revset expression
805 805 try:
806 806 for rev in scmutil.revrange(repo, lfrevs):
807 807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 809 numcached += len(cached)
810 810 finally:
811 811 del repo.firstpulled
812 812 ui.status(_("%d largefiles cached\n") % numcached)
813 813 return result
814 814
815 815 revsetpredicate = revset.extpredicate()
816 816
817 817 @revsetpredicate('pulled()')
818 818 def pulledrevsetsymbol(repo, subset, x):
819 819 """Changesets that just has been pulled.
820 820
821 821 Only available with largefiles from pull --lfrev expressions.
822 822
823 823 .. container:: verbose
824 824
825 825 Some examples:
826 826
827 827 - pull largefiles for all new changesets::
828 828
829 829 hg pull -lfrev "pulled()"
830 830
831 831 - pull largefiles for all new branch heads::
832 832
833 833 hg pull -lfrev "head(pulled()) and not closed()"
834 834
835 835 """
836 836
837 837 try:
838 838 firstpulled = repo.firstpulled
839 839 except AttributeError:
840 840 raise error.Abort(_("pulled() only available in --lfrev"))
841 841 return revset.baseset([r for r in subset if r >= firstpulled])
842 842
843 843 def overrideclone(orig, ui, source, dest=None, **opts):
844 844 d = dest
845 845 if d is None:
846 846 d = hg.defaultdest(source)
847 847 if opts.get('all_largefiles') and not hg.islocal(d):
848 848 raise error.Abort(_(
849 849 '--all-largefiles is incompatible with non-local destination %s') %
850 850 d)
851 851
852 852 return orig(ui, source, dest, **opts)
853 853
854 854 def hgclone(orig, ui, opts, *args, **kwargs):
855 855 result = orig(ui, opts, *args, **kwargs)
856 856
857 857 if result is not None:
858 858 sourcerepo, destrepo = result
859 859 repo = destrepo.local()
860 860
861 861 # When cloning to a remote repo (like through SSH), no repo is available
862 862 # from the peer. Therefore the largefiles can't be downloaded and the
863 863 # hgrc can't be updated.
864 864 if not repo:
865 865 return result
866 866
867 867 # If largefiles is required for this repo, permanently enable it locally
868 868 if 'largefiles' in repo.requirements:
869 869 fp = repo.vfs('hgrc', 'a', text=True)
870 870 try:
871 871 fp.write('\n[extensions]\nlargefiles=\n')
872 872 finally:
873 873 fp.close()
874 874
875 875 # Caching is implicitly limited to 'rev' option, since the dest repo was
876 876 # truncated at that point. The user may expect a download count with
877 877 # this option, so attempt whether or not this is a largefile repo.
878 878 if opts.get('all_largefiles'):
879 879 success, missing = lfcommands.downloadlfiles(ui, repo, None)
880 880
881 881 if missing != 0:
882 882 return None
883 883
884 884 return result
885 885
886 886 def overriderebase(orig, ui, repo, **opts):
887 887 if not util.safehasattr(repo, '_largefilesenabled'):
888 888 return orig(ui, repo, **opts)
889 889
890 890 resuming = opts.get('continue')
891 891 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
892 892 repo._lfstatuswriters.append(lambda *msg, **opts: None)
893 893 try:
894 894 return orig(ui, repo, **opts)
895 895 finally:
896 896 repo._lfstatuswriters.pop()
897 897 repo._lfcommithooks.pop()
898 898
899 899 def overridearchivecmd(orig, ui, repo, dest, **opts):
900 900 repo.unfiltered().lfstatus = True
901 901
902 902 try:
903 903 return orig(ui, repo.unfiltered(), dest, **opts)
904 904 finally:
905 905 repo.unfiltered().lfstatus = False
906 906
907 907 def hgwebarchive(orig, web, req, tmpl):
908 908 web.repo.lfstatus = True
909 909
910 910 try:
911 911 return orig(web, req, tmpl)
912 912 finally:
913 913 web.repo.lfstatus = False
914 914
915 915 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
916 916 prefix='', mtime=None, subrepos=None):
917 917 # For some reason setting repo.lfstatus in hgwebarchive only changes the
918 918 # unfiltered repo's attr, so check that as well.
919 919 if not repo.lfstatus and not repo.unfiltered().lfstatus:
920 920 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
921 921 subrepos)
922 922
923 923 # No need to lock because we are only reading history and
924 924 # largefile caches, neither of which are modified.
925 925 if node is not None:
926 926 lfcommands.cachelfiles(repo.ui, repo, node)
927 927
928 928 if kind not in archival.archivers:
929 929 raise error.Abort(_("unknown archive type '%s'") % kind)
930 930
931 931 ctx = repo[node]
932 932
933 933 if kind == 'files':
934 934 if prefix:
935 935 raise error.Abort(
936 936 _('cannot give prefix when archiving to files'))
937 937 else:
938 938 prefix = archival.tidyprefix(dest, kind, prefix)
939 939
940 940 def write(name, mode, islink, getdata):
941 941 if matchfn and not matchfn(name):
942 942 return
943 943 data = getdata()
944 944 if decode:
945 945 data = repo.wwritedata(name, data)
946 946 archiver.addfile(prefix + name, mode, islink, data)
947 947
948 948 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
949 949
950 950 if repo.ui.configbool("ui", "archivemeta", True):
951 951 write('.hg_archival.txt', 0o644, False,
952 952 lambda: archival.buildmetadata(ctx))
953 953
954 954 for f in ctx:
955 955 ff = ctx.flags(f)
956 956 getdata = ctx[f].data
957 957 if lfutil.isstandin(f):
958 958 if node is not None:
959 959 path = lfutil.findfile(repo, getdata().strip())
960 960
961 961 if path is None:
962 962 raise error.Abort(
963 963 _('largefile %s not found in repo store or system cache')
964 964 % lfutil.splitstandin(f))
965 965 else:
966 966 path = lfutil.splitstandin(f)
967 967
968 968 f = lfutil.splitstandin(f)
969 969
970 970 def getdatafn():
971 971 fd = None
972 972 try:
973 973 fd = open(path, 'rb')
974 974 return fd.read()
975 975 finally:
976 976 if fd:
977 977 fd.close()
978 978
979 979 getdata = getdatafn
980 980 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
981 981
982 982 if subrepos:
983 983 for subpath in sorted(ctx.substate):
984 984 sub = ctx.workingsub(subpath)
985 985 submatch = match_.narrowmatcher(subpath, matchfn)
986 986 sub._repo.lfstatus = True
987 987 sub.archive(archiver, prefix, submatch)
988 988
989 989 archiver.done()
990 990
991 991 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
992 992 if not repo._repo.lfstatus:
993 993 return orig(repo, archiver, prefix, match)
994 994
995 995 repo._get(repo._state + ('hg',))
996 996 rev = repo._state[1]
997 997 ctx = repo._repo[rev]
998 998
999 999 if ctx.node() is not None:
1000 1000 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1001 1001
1002 1002 def write(name, mode, islink, getdata):
1003 1003 # At this point, the standin has been replaced with the largefile name,
1004 1004 # so the normal matcher works here without the lfutil variants.
1005 1005 if match and not match(f):
1006 1006 return
1007 1007 data = getdata()
1008 1008
1009 1009 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1010 1010
1011 1011 for f in ctx:
1012 1012 ff = ctx.flags(f)
1013 1013 getdata = ctx[f].data
1014 1014 if lfutil.isstandin(f):
1015 1015 if ctx.node() is not None:
1016 1016 path = lfutil.findfile(repo._repo, getdata().strip())
1017 1017
1018 1018 if path is None:
1019 1019 raise error.Abort(
1020 1020 _('largefile %s not found in repo store or system cache')
1021 1021 % lfutil.splitstandin(f))
1022 1022 else:
1023 1023 path = lfutil.splitstandin(f)
1024 1024
1025 1025 f = lfutil.splitstandin(f)
1026 1026
1027 1027 def getdatafn():
1028 1028 fd = None
1029 1029 try:
1030 1030 fd = open(os.path.join(prefix, path), 'rb')
1031 1031 return fd.read()
1032 1032 finally:
1033 1033 if fd:
1034 1034 fd.close()
1035 1035
1036 1036 getdata = getdatafn
1037 1037
1038 1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1039 1039
1040 1040 for subpath in sorted(ctx.substate):
1041 1041 sub = ctx.workingsub(subpath)
1042 1042 submatch = match_.narrowmatcher(subpath, match)
1043 1043 sub._repo.lfstatus = True
1044 1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1045 1045
1046 1046 # If a largefile is modified, the change is not reflected in its
1047 1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1048 1048 # if the repo has uncommitted changes. Wrap it to also check if
1049 1049 # largefiles were changed. This is used by bisect, backout and fetch.
1050 1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1051 1051 orig(repo, *args, **kwargs)
1052 1052 repo.lfstatus = True
1053 1053 s = repo.status()
1054 1054 repo.lfstatus = False
1055 1055 if s.modified or s.added or s.removed or s.deleted:
1056 1056 raise error.Abort(_('uncommitted changes'))
1057 1057
1058 1058 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1059 1059 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1060 1060 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1061 1061 m = composelargefilematcher(match, repo[None].manifest())
1062 1062
1063 1063 try:
1064 1064 repo.lfstatus = True
1065 1065 s = repo.status(match=m, clean=True)
1066 1066 finally:
1067 1067 repo.lfstatus = False
1068 1068 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1069 1069 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1070 1070
1071 1071 for f in forget:
1072 1072 if lfutil.standin(f) not in repo.dirstate and not \
1073 1073 repo.wvfs.isdir(lfutil.standin(f)):
1074 1074 ui.warn(_('not removing %s: file is already untracked\n')
1075 1075 % m.rel(f))
1076 1076 bad.append(f)
1077 1077
1078 1078 for f in forget:
1079 1079 if ui.verbose or not m.exact(f):
1080 1080 ui.status(_('removing %s\n') % m.rel(f))
1081 1081
1082 1082 # Need to lock because standin files are deleted then removed from the
1083 1083 # repository and we could race in-between.
1084 1084 wlock = repo.wlock()
1085 1085 try:
1086 1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1087 1087 for f in forget:
1088 1088 if lfdirstate[f] == 'a':
1089 1089 lfdirstate.drop(f)
1090 1090 else:
1091 1091 lfdirstate.remove(f)
1092 1092 lfdirstate.write()
1093 1093 standins = [lfutil.standin(f) for f in forget]
1094 1094 for f in standins:
1095 1095 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1096 1096 rejected = repo[None].forget(standins)
1097 1097 finally:
1098 1098 wlock.release()
1099 1099
1100 1100 bad.extend(f for f in rejected if f in m.files())
1101 1101 forgot.extend(f for f in forget if f not in rejected)
1102 1102 return bad, forgot
1103 1103
1104 1104 def _getoutgoings(repo, other, missing, addfunc):
1105 1105 """get pairs of filename and largefile hash in outgoing revisions
1106 1106 in 'missing'.
1107 1107
1108 1108 largefiles already existing on 'other' repository are ignored.
1109 1109
1110 1110 'addfunc' is invoked with each unique pairs of filename and
1111 1111 largefile hash value.
1112 1112 """
1113 1113 knowns = set()
1114 1114 lfhashes = set()
1115 1115 def dedup(fn, lfhash):
1116 1116 k = (fn, lfhash)
1117 1117 if k not in knowns:
1118 1118 knowns.add(k)
1119 1119 lfhashes.add(lfhash)
1120 1120 lfutil.getlfilestoupload(repo, missing, dedup)
1121 1121 if lfhashes:
1122 1122 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1123 1123 for fn, lfhash in knowns:
1124 1124 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1125 1125 addfunc(fn, lfhash)
1126 1126
1127 1127 def outgoinghook(ui, repo, other, opts, missing):
1128 1128 if opts.pop('large', None):
1129 1129 lfhashes = set()
1130 1130 if ui.debugflag:
1131 1131 toupload = {}
1132 1132 def addfunc(fn, lfhash):
1133 1133 if fn not in toupload:
1134 1134 toupload[fn] = []
1135 1135 toupload[fn].append(lfhash)
1136 1136 lfhashes.add(lfhash)
1137 1137 def showhashes(fn):
1138 1138 for lfhash in sorted(toupload[fn]):
1139 1139 ui.debug(' %s\n' % (lfhash))
1140 1140 else:
1141 1141 toupload = set()
1142 1142 def addfunc(fn, lfhash):
1143 1143 toupload.add(fn)
1144 1144 lfhashes.add(lfhash)
1145 1145 def showhashes(fn):
1146 1146 pass
1147 1147 _getoutgoings(repo, other, missing, addfunc)
1148 1148
1149 1149 if not toupload:
1150 1150 ui.status(_('largefiles: no files to upload\n'))
1151 1151 else:
1152 1152 ui.status(_('largefiles to upload (%d entities):\n')
1153 1153 % (len(lfhashes)))
1154 1154 for file in sorted(toupload):
1155 1155 ui.status(lfutil.splitstandin(file) + '\n')
1156 1156 showhashes(file)
1157 1157 ui.status('\n')
1158 1158
1159 1159 def summaryremotehook(ui, repo, opts, changes):
1160 1160 largeopt = opts.get('large', False)
1161 1161 if changes is None:
1162 1162 if largeopt:
1163 1163 return (False, True) # only outgoing check is needed
1164 1164 else:
1165 1165 return (False, False)
1166 1166 elif largeopt:
1167 1167 url, branch, peer, outgoing = changes[1]
1168 1168 if peer is None:
1169 1169 # i18n: column positioning for "hg summary"
1170 1170 ui.status(_('largefiles: (no remote repo)\n'))
1171 1171 return
1172 1172
1173 1173 toupload = set()
1174 1174 lfhashes = set()
1175 1175 def addfunc(fn, lfhash):
1176 1176 toupload.add(fn)
1177 1177 lfhashes.add(lfhash)
1178 1178 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1179 1179
1180 1180 if not toupload:
1181 1181 # i18n: column positioning for "hg summary"
1182 1182 ui.status(_('largefiles: (no files to upload)\n'))
1183 1183 else:
1184 1184 # i18n: column positioning for "hg summary"
1185 1185 ui.status(_('largefiles: %d entities for %d files to upload\n')
1186 1186 % (len(lfhashes), len(toupload)))
1187 1187
1188 1188 def overridesummary(orig, ui, repo, *pats, **opts):
1189 1189 try:
1190 1190 repo.lfstatus = True
1191 1191 orig(ui, repo, *pats, **opts)
1192 1192 finally:
1193 1193 repo.lfstatus = False
1194 1194
1195 1195 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1196 1196 similarity=None):
1197 1197 if opts is None:
1198 1198 opts = {}
1199 1199 if not lfutil.islfilesrepo(repo):
1200 1200 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1201 1201 # Get the list of missing largefiles so we can remove them
1202 1202 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1203 1203 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1204 1204 False, False, False)
1205 1205
1206 1206 # Call into the normal remove code, but the removing of the standin, we want
1207 1207 # to have handled by original addremove. Monkey patching here makes sure
1208 1208 # we don't remove the standin in the largefiles code, preventing a very
1209 1209 # confused state later.
1210 1210 if s.deleted:
1211 1211 m = copy.copy(matcher)
1212 1212
1213 1213 # The m._files and m._map attributes are not changed to the deleted list
1214 1214 # because that affects the m.exact() test, which in turn governs whether
1215 1215 # or not the file name is printed, and how. Simply limit the original
1216 1216 # matches to those in the deleted status list.
1217 1217 matchfn = m.matchfn
1218 1218 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1219 1219
1220 1220 removelargefiles(repo.ui, repo, True, m, **opts)
1221 1221 # Call into the normal add code, and any files that *should* be added as
1222 1222 # largefiles will be
1223 1223 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1224 1224 # Now that we've handled largefiles, hand off to the original addremove
1225 1225 # function to take care of the rest. Make sure it doesn't do anything with
1226 1226 # largefiles by passing a matcher that will ignore them.
1227 1227 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1228 1228 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1229 1229
1230 1230 # Calling purge with --all will cause the largefiles to be deleted.
1231 1231 # Override repo.status to prevent this from happening.
1232 1232 def overridepurge(orig, ui, repo, *dirs, **opts):
1233 1233 # XXX Monkey patching a repoview will not work. The assigned attribute will
1234 1234 # be set on the unfiltered repo, but we will only lookup attributes in the
1235 1235 # unfiltered repo if the lookup in the repoview object itself fails. As the
1236 1236 # monkey patched method exists on the repoview class the lookup will not
1237 1237 # fail. As a result, the original version will shadow the monkey patched
1238 1238 # one, defeating the monkey patch.
1239 1239 #
1240 1240 # As a work around we use an unfiltered repo here. We should do something
1241 1241 # cleaner instead.
1242 1242 repo = repo.unfiltered()
1243 1243 oldstatus = repo.status
1244 1244 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1245 1245 clean=False, unknown=False, listsubrepos=False):
1246 1246 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1247 1247 listsubrepos)
1248 1248 lfdirstate = lfutil.openlfdirstate(ui, repo)
1249 1249 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1250 1250 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1251 1251 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1252 1252 unknown, ignored, r.clean)
1253 1253 repo.status = overridestatus
1254 1254 orig(ui, repo, *dirs, **opts)
1255 1255 repo.status = oldstatus
1256 1256 def overriderollback(orig, ui, repo, **opts):
1257 1257 wlock = repo.wlock()
1258 1258 try:
1259 1259 before = repo.dirstate.parents()
1260 1260 orphans = set(f for f in repo.dirstate
1261 1261 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1262 1262 result = orig(ui, repo, **opts)
1263 1263 after = repo.dirstate.parents()
1264 1264 if before == after:
1265 1265 return result # no need to restore standins
1266 1266
1267 1267 pctx = repo['.']
1268 1268 for f in repo.dirstate:
1269 1269 if lfutil.isstandin(f):
1270 1270 orphans.discard(f)
1271 1271 if repo.dirstate[f] == 'r':
1272 1272 repo.wvfs.unlinkpath(f, ignoremissing=True)
1273 1273 elif f in pctx:
1274 1274 fctx = pctx[f]
1275 1275 repo.wwrite(f, fctx.data(), fctx.flags())
1276 1276 else:
1277 1277 # content of standin is not so important in 'a',
1278 1278 # 'm' or 'n' (coming from the 2nd parent) cases
1279 1279 lfutil.writestandin(repo, f, '', False)
1280 1280 for standin in orphans:
1281 1281 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1282 1282
1283 1283 lfdirstate = lfutil.openlfdirstate(ui, repo)
1284 1284 orphans = set(lfdirstate)
1285 1285 lfiles = lfutil.listlfiles(repo)
1286 1286 for file in lfiles:
1287 1287 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1288 1288 orphans.discard(file)
1289 1289 for lfile in orphans:
1290 1290 lfdirstate.drop(lfile)
1291 1291 lfdirstate.write()
1292 1292 finally:
1293 1293 wlock.release()
1294 1294 return result
1295 1295
1296 1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1297 1297 resuming = opts.get('continue')
1298 1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1299 1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1300 1300 try:
1301 1301 result = orig(ui, repo, *revs, **opts)
1302 1302 finally:
1303 1303 repo._lfstatuswriters.pop()
1304 1304 repo._lfcommithooks.pop()
1305 1305 return result
1306 1306
1307 1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1308 1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1309 1309 err = 1
1310 1310 notbad = set()
1311 1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1312 1312 origmatchfn = m.matchfn
1313 1313 def lfmatchfn(f):
1314 1314 if origmatchfn(f):
1315 1315 return True
1316 1316 lf = lfutil.splitstandin(f)
1317 1317 if lf is None:
1318 1318 return False
1319 1319 notbad.add(lf)
1320 1320 return origmatchfn(lf)
1321 1321 m.matchfn = lfmatchfn
1322 1322 origbadfn = m.bad
1323 1323 def lfbadfn(f, msg):
1324 1324 if not f in notbad:
1325 1325 origbadfn(f, msg)
1326 1326 m.bad = lfbadfn
1327 1327
1328 1328 origvisitdirfn = m.visitdir
1329 1329 def lfvisitdirfn(dir):
1330 1330 if dir == lfutil.shortname:
1331 1331 return True
1332 1332 ret = origvisitdirfn(dir)
1333 1333 if ret:
1334 1334 return ret
1335 1335 lf = lfutil.splitstandin(dir)
1336 1336 if lf is None:
1337 1337 return False
1338 1338 return origvisitdirfn(lf)
1339 1339 m.visitdir = lfvisitdirfn
1340 1340
1341 1341 for f in ctx.walk(m):
1342 1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1343 1343 pathname=f)
1344 1344 lf = lfutil.splitstandin(f)
1345 1345 if lf is None or origmatchfn(f):
1346 1346 # duplicating unreachable code from commands.cat
1347 1347 data = ctx[f].data()
1348 1348 if opts.get('decode'):
1349 1349 data = repo.wwritedata(f, data)
1350 1350 fp.write(data)
1351 1351 else:
1352 1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1353 1353 if not lfutil.inusercache(repo.ui, hash):
1354 1354 store = basestore._openstore(repo)
1355 1355 success, missing = store.get([(lf, hash)])
1356 1356 if len(success) != 1:
1357 1357 raise error.Abort(
1358 1358 _('largefile %s is not in cache and could not be '
1359 1359 'downloaded') % lf)
1360 1360 path = lfutil.usercachepath(repo.ui, hash)
1361 1361 fpin = open(path, "rb")
1362 1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1363 1363 fp.write(chunk)
1364 1364 fpin.close()
1365 1365 fp.close()
1366 1366 err = 0
1367 1367 return err
1368 1368
1369 1369 def mergeupdate(orig, repo, node, branchmerge, force,
1370 1370 *args, **kwargs):
1371 1371 matcher = kwargs.get('matcher', None)
1372 1372 # note if this is a partial update
1373 1373 partial = matcher and not matcher.always()
1374 1374 wlock = repo.wlock()
1375 1375 try:
1376 1376 # branch | | |
1377 1377 # merge | force | partial | action
1378 1378 # -------+-------+---------+--------------
1379 1379 # x | x | x | linear-merge
1380 1380 # o | x | x | branch-merge
1381 1381 # x | o | x | overwrite (as clean update)
1382 1382 # o | o | x | force-branch-merge (*1)
1383 1383 # x | x | o | (*)
1384 1384 # o | x | o | (*)
1385 1385 # x | o | o | overwrite (as revert)
1386 1386 # o | o | o | (*)
1387 1387 #
1388 1388 # (*) don't care
1389 1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1390 1390
1391 1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1392 1392 unsure, s = lfdirstate.status(match_.always(repo.root,
1393 1393 repo.getcwd()),
1394 1394 [], False, False, False)
1395 1395 pctx = repo['.']
1396 1396 for lfile in unsure + s.modified:
1397 1397 lfileabs = repo.wvfs.join(lfile)
1398 1398 if not os.path.exists(lfileabs):
1399 1399 continue
1400 1400 lfhash = lfutil.hashrepofile(repo, lfile)
1401 1401 standin = lfutil.standin(lfile)
1402 1402 lfutil.writestandin(repo, standin, lfhash,
1403 1403 lfutil.getexecutable(lfileabs))
1404 1404 if (standin in pctx and
1405 1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1406 1406 lfdirstate.normal(lfile)
1407 1407 for lfile in s.added:
1408 1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1409 1409 lfdirstate.write()
1410 1410
1411 1411 oldstandins = lfutil.getstandinsstate(repo)
1412 1412
1413 1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1414 1414
1415 1415 newstandins = lfutil.getstandinsstate(repo)
1416 1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1417 1417 if branchmerge or force or partial:
1418 1418 filelist.extend(s.deleted + s.removed)
1419 1419
1420 1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1421 1421 normallookup=partial)
1422 1422
1423 1423 return result
1424 1424 finally:
1425 1425 wlock.release()
1426 1426
1427 1427 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1428 1428 result = orig(repo, files, *args, **kwargs)
1429 1429
1430 1430 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1431 1431 if filelist:
1432 1432 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1433 1433 printmessage=False, normallookup=True)
1434 1434
1435 1435 return result
@@ -1,1543 +1,1544
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullhex,
20 20 nullid,
21 21 nullrev,
22 22 )
23 23 from . import (
24 24 copies,
25 25 destutil,
26 26 error,
27 27 filemerge,
28 28 obsolete,
29 29 subrepo,
30 30 util,
31 31 worker,
32 32 )
33 33
34 34 _pack = struct.pack
35 35 _unpack = struct.unpack
36 36
37 37 def _droponode(data):
38 38 # used for compatibility for v1
39 39 bits = data.split('\0')
40 40 bits = bits[:-2] + bits[-1:]
41 41 return '\0'.join(bits)
42 42
43 43 class mergestate(object):
44 44 '''track 3-way merge state of individual files
45 45
46 46 The merge state is stored on disk when needed. Two files are used: one with
47 47 an old format (version 1), and one with a new format (version 2). Version 2
48 48 stores a superset of the data in version 1, including new kinds of records
49 49 in the future. For more about the new format, see the documentation for
50 50 `_readrecordsv2`.
51 51
52 52 Each record can contain arbitrary content, and has an associated type. This
53 53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 54 versions of Mercurial that don't support it should abort. If `type` is
55 55 lowercase, the record can be safely ignored.
56 56
57 57 Currently known records:
58 58
59 59 L: the node of the "local" part of the merge (hexified version)
60 60 O: the node of the "other" part of the merge (hexified version)
61 61 F: a file to be merged entry
62 62 C: a change/delete or delete/change conflict
63 63 D: a file that the external merge driver will merge internally
64 64 (experimental)
65 65 m: the external merge driver defined for this merge plus its run state
66 66 (experimental)
67 67 X: unsupported mandatory record type (used in tests)
68 68 x: unsupported advisory record type (used in tests)
69 69
70 70 Merge driver run states (experimental):
71 71 u: driver-resolved files unmarked -- needs to be run next time we're about
72 72 to resolve or commit
73 73 m: driver-resolved files marked -- only needs to be run before commit
74 74 s: success/skipped -- does not need to be run any more
75 75
76 76 '''
77 77 statepathv1 = 'merge/state'
78 78 statepathv2 = 'merge/state2'
79 79
80 80 @staticmethod
81 81 def clean(repo, node=None, other=None):
82 82 """Initialize a brand new merge state, removing any existing state on
83 83 disk."""
84 84 ms = mergestate(repo)
85 85 ms.reset(node, other)
86 86 return ms
87 87
88 88 @staticmethod
89 89 def read(repo):
90 90 """Initialize the merge state, reading it from disk."""
91 91 ms = mergestate(repo)
92 92 ms._read()
93 93 return ms
94 94
95 95 def __init__(self, repo):
96 96 """Initialize the merge state.
97 97
98 98 Do not use this directly! Instead call read() or clean()."""
99 99 self._repo = repo
100 100 self._dirty = False
101 101
102 102 def reset(self, node=None, other=None):
103 103 self._state = {}
104 104 self._local = None
105 105 self._other = None
106 106 for var in ('localctx', 'otherctx'):
107 107 if var in vars(self):
108 108 delattr(self, var)
109 109 if node:
110 110 self._local = node
111 111 self._other = other
112 112 self._readmergedriver = None
113 113 if self.mergedriver:
114 114 self._mdstate = 's'
115 115 else:
116 116 self._mdstate = 'u'
117 117 shutil.rmtree(self._repo.join('merge'), True)
118 118 self._results = {}
119 119 self._dirty = False
120 120
121 121 def _read(self):
122 122 """Analyse each record content to restore a serialized state from disk
123 123
124 124 This function process "record" entry produced by the de-serialization
125 125 of on disk file.
126 126 """
127 127 self._state = {}
128 128 self._local = None
129 129 self._other = None
130 130 for var in ('localctx', 'otherctx'):
131 131 if var in vars(self):
132 132 delattr(self, var)
133 133 self._readmergedriver = None
134 134 self._mdstate = 's'
135 135 unsupported = set()
136 136 records = self._readrecords()
137 137 for rtype, record in records:
138 138 if rtype == 'L':
139 139 self._local = bin(record)
140 140 elif rtype == 'O':
141 141 self._other = bin(record)
142 142 elif rtype == 'm':
143 143 bits = record.split('\0', 1)
144 144 mdstate = bits[1]
145 145 if len(mdstate) != 1 or mdstate not in 'ums':
146 146 # the merge driver should be idempotent, so just rerun it
147 147 mdstate = 'u'
148 148
149 149 self._readmergedriver = bits[0]
150 150 self._mdstate = mdstate
151 151 elif rtype in 'FDC':
152 152 bits = record.split('\0')
153 153 self._state[bits[0]] = bits[1:]
154 154 elif not rtype.islower():
155 155 unsupported.add(rtype)
156 156 self._results = {}
157 157 self._dirty = False
158 158
159 159 if unsupported:
160 160 raise error.UnsupportedMergeRecords(unsupported)
161 161
162 162 def _readrecords(self):
163 163 """Read merge state from disk and return a list of record (TYPE, data)
164 164
165 165 We read data from both v1 and v2 files and decide which one to use.
166 166
167 167 V1 has been used by version prior to 2.9.1 and contains less data than
168 168 v2. We read both versions and check if no data in v2 contradicts
169 169 v1. If there is not contradiction we can safely assume that both v1
170 170 and v2 were written at the same time and use the extract data in v2. If
171 171 there is contradiction we ignore v2 content as we assume an old version
172 172 of Mercurial has overwritten the mergestate file and left an old v2
173 173 file around.
174 174
175 175 returns list of record [(TYPE, data), ...]"""
176 176 v1records = self._readrecordsv1()
177 177 v2records = self._readrecordsv2()
178 178 if self._v1v2match(v1records, v2records):
179 179 return v2records
180 180 else:
181 181 # v1 file is newer than v2 file, use it
182 182 # we have to infer the "other" changeset of the merge
183 183 # we cannot do better than that with v1 of the format
184 184 mctx = self._repo[None].parents()[-1]
185 185 v1records.append(('O', mctx.hex()))
186 186 # add place holder "other" file node information
187 187 # nobody is using it yet so we do no need to fetch the data
188 188 # if mctx was wrong `mctx[bits[-2]]` may fails.
189 189 for idx, r in enumerate(v1records):
190 190 if r[0] == 'F':
191 191 bits = r[1].split('\0')
192 192 bits.insert(-2, '')
193 193 v1records[idx] = (r[0], '\0'.join(bits))
194 194 return v1records
195 195
196 196 def _v1v2match(self, v1records, v2records):
197 197 oldv2 = set() # old format version of v2 record
198 198 for rec in v2records:
199 199 if rec[0] == 'L':
200 200 oldv2.add(rec)
201 201 elif rec[0] == 'F':
202 202 # drop the onode data (not contained in v1)
203 203 oldv2.add(('F', _droponode(rec[1])))
204 204 for rec in v1records:
205 205 if rec not in oldv2:
206 206 return False
207 207 else:
208 208 return True
209 209
210 210 def _readrecordsv1(self):
211 211 """read on disk merge state for version 1 file
212 212
213 213 returns list of record [(TYPE, data), ...]
214 214
215 215 Note: the "F" data from this file are one entry short
216 216 (no "other file node" entry)
217 217 """
218 218 records = []
219 219 try:
220 220 f = self._repo.vfs(self.statepathv1)
221 221 for i, l in enumerate(f):
222 222 if i == 0:
223 223 records.append(('L', l[:-1]))
224 224 else:
225 225 records.append(('F', l[:-1]))
226 226 f.close()
227 227 except IOError as err:
228 228 if err.errno != errno.ENOENT:
229 229 raise
230 230 return records
231 231
232 232 def _readrecordsv2(self):
233 233 """read on disk merge state for version 2 file
234 234
235 235 This format is a list of arbitrary records of the form:
236 236
237 237 [type][length][content]
238 238
239 239 `type` is a single character, `length` is a 4 byte integer, and
240 240 `content` is an arbitrary byte sequence of length `length`.
241 241
242 242 Mercurial versions prior to 3.7 have a bug where if there are
243 243 unsupported mandatory merge records, attempting to clear out the merge
244 244 state with hg update --clean or similar aborts. The 't' record type
245 245 works around that by writing out what those versions treat as an
246 246 advisory record, but later versions interpret as special: the first
247 247 character is the 'real' record type and everything onwards is the data.
248 248
249 249 Returns list of records [(TYPE, data), ...]."""
250 250 records = []
251 251 try:
252 252 f = self._repo.vfs(self.statepathv2)
253 253 data = f.read()
254 254 off = 0
255 255 end = len(data)
256 256 while off < end:
257 257 rtype = data[off]
258 258 off += 1
259 259 length = _unpack('>I', data[off:(off + 4)])[0]
260 260 off += 4
261 261 record = data[off:(off + length)]
262 262 off += length
263 263 if rtype == 't':
264 264 rtype, record = record[0], record[1:]
265 265 records.append((rtype, record))
266 266 f.close()
267 267 except IOError as err:
268 268 if err.errno != errno.ENOENT:
269 269 raise
270 270 return records
271 271
272 272 @util.propertycache
273 273 def mergedriver(self):
274 274 # protect against the following:
275 275 # - A configures a malicious merge driver in their hgrc, then
276 276 # pauses the merge
277 277 # - A edits their hgrc to remove references to the merge driver
278 278 # - A gives a copy of their entire repo, including .hg, to B
279 279 # - B inspects .hgrc and finds it to be clean
280 280 # - B then continues the merge and the malicious merge driver
281 281 # gets invoked
282 282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
283 283 if (self._readmergedriver is not None
284 284 and self._readmergedriver != configmergedriver):
285 285 raise error.ConfigError(
286 286 _("merge driver changed since merge started"),
287 287 hint=_("revert merge driver change or abort merge"))
288 288
289 289 return configmergedriver
290 290
291 291 @util.propertycache
292 292 def localctx(self):
293 293 if self._local is None:
294 294 raise RuntimeError("localctx accessed but self._local isn't set")
295 295 return self._repo[self._local]
296 296
297 297 @util.propertycache
298 298 def otherctx(self):
299 299 if self._other is None:
300 300 raise RuntimeError("localctx accessed but self._local isn't set")
301 301 return self._repo[self._other]
302 302
303 303 def active(self):
304 304 """Whether mergestate is active.
305 305
306 306 Returns True if there appears to be mergestate. This is a rough proxy
307 307 for "is a merge in progress."
308 308 """
309 309 # Check local variables before looking at filesystem for performance
310 310 # reasons.
311 311 return bool(self._local) or bool(self._state) or \
312 312 self._repo.vfs.exists(self.statepathv1) or \
313 313 self._repo.vfs.exists(self.statepathv2)
314 314
315 315 def commit(self):
316 316 """Write current state on disk (if necessary)"""
317 317 if self._dirty:
318 318 records = self._makerecords()
319 319 self._writerecords(records)
320 320 self._dirty = False
321 321
322 322 def _makerecords(self):
323 323 records = []
324 324 records.append(('L', hex(self._local)))
325 325 records.append(('O', hex(self._other)))
326 326 if self.mergedriver:
327 327 records.append(('m', '\0'.join([
328 328 self.mergedriver, self._mdstate])))
329 329 for d, v in self._state.iteritems():
330 330 if v[0] == 'd':
331 331 records.append(('D', '\0'.join([d] + v)))
332 332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
333 333 # older versions of Mercurial
334 334 elif v[1] == nullhex or v[6] == nullhex:
335 335 records.append(('C', '\0'.join([d] + v)))
336 336 else:
337 337 records.append(('F', '\0'.join([d] + v)))
338 338 return records
339 339
340 340 def _writerecords(self, records):
341 341 """Write current state on disk (both v1 and v2)"""
342 342 self._writerecordsv1(records)
343 343 self._writerecordsv2(records)
344 344
345 345 def _writerecordsv1(self, records):
346 346 """Write current state on disk in a version 1 file"""
347 347 f = self._repo.vfs(self.statepathv1, 'w')
348 348 irecords = iter(records)
349 349 lrecords = irecords.next()
350 350 assert lrecords[0] == 'L'
351 351 f.write(hex(self._local) + '\n')
352 352 for rtype, data in irecords:
353 353 if rtype == 'F':
354 354 f.write('%s\n' % _droponode(data))
355 355 f.close()
356 356
357 357 def _writerecordsv2(self, records):
358 358 """Write current state on disk in a version 2 file
359 359
360 360 See the docstring for _readrecordsv2 for why we use 't'."""
361 361 # these are the records that all version 2 clients can read
362 362 whitelist = 'LOF'
363 363 f = self._repo.vfs(self.statepathv2, 'w')
364 364 for key, data in records:
365 365 assert len(key) == 1
366 366 if key not in whitelist:
367 367 key, data = 't', '%s%s' % (key, data)
368 368 format = '>sI%is' % len(data)
369 369 f.write(_pack(format, key, len(data), data))
370 370 f.close()
371 371
372 372 def add(self, fcl, fco, fca, fd):
373 373 """add a new (potentially?) conflicting file the merge state
374 374 fcl: file context for local,
375 375 fco: file context for remote,
376 376 fca: file context for ancestors,
377 377 fd: file path of the resulting merge.
378 378
379 379 note: also write the local version to the `.hg/merge` directory.
380 380 """
381 381 if fcl.isabsent():
382 382 hash = nullhex
383 383 else:
384 384 hash = util.sha1(fcl.path()).hexdigest()
385 385 self._repo.vfs.write('merge/' + hash, fcl.data())
386 386 self._state[fd] = ['u', hash, fcl.path(),
387 387 fca.path(), hex(fca.filenode()),
388 388 fco.path(), hex(fco.filenode()),
389 389 fcl.flags()]
390 390 self._dirty = True
391 391
392 392 def __contains__(self, dfile):
393 393 return dfile in self._state
394 394
395 395 def __getitem__(self, dfile):
396 396 return self._state[dfile][0]
397 397
398 398 def __iter__(self):
399 399 return iter(sorted(self._state))
400 400
401 401 def files(self):
402 402 return self._state.keys()
403 403
404 404 def mark(self, dfile, state):
405 405 self._state[dfile][0] = state
406 406 self._dirty = True
407 407
408 408 def mdstate(self):
409 409 return self._mdstate
410 410
411 411 def unresolved(self):
412 412 """Obtain the paths of unresolved files."""
413 413
414 414 for f, entry in self._state.items():
415 415 if entry[0] == 'u':
416 416 yield f
417 417
418 418 def driverresolved(self):
419 419 """Obtain the paths of driver-resolved files."""
420 420
421 421 for f, entry in self._state.items():
422 422 if entry[0] == 'd':
423 423 yield f
424 424
425 425 def _resolve(self, preresolve, dfile, wctx, labels=None):
426 426 """rerun merge process for file path `dfile`"""
427 427 if self[dfile] in 'rd':
428 428 return True, 0
429 429 stateentry = self._state[dfile]
430 430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
431 431 octx = self._repo[self._other]
432 432 fcd = self._filectxorabsent(hash, wctx, dfile)
433 433 fco = self._filectxorabsent(onode, octx, ofile)
434 434 # TODO: move this to filectxorabsent
435 435 fca = self._repo.filectx(afile, fileid=anode)
436 436 # "premerge" x flags
437 437 flo = fco.flags()
438 438 fla = fca.flags()
439 439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
440 440 if fca.node() == nullid:
441 441 if preresolve:
442 442 self._repo.ui.warn(
443 443 _('warning: cannot merge flags for %s\n') % afile)
444 444 elif flags == fla:
445 445 flags = flo
446 446 if preresolve:
447 447 # restore local
448 448 if hash != nullhex:
449 449 f = self._repo.vfs('merge/' + hash)
450 450 self._repo.wwrite(dfile, f.read(), flags)
451 451 f.close()
452 452 else:
453 453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
454 454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
455 455 lfile, fcd, fco, fca,
456 456 labels=labels)
457 457 else:
458 458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
459 459 lfile, fcd, fco, fca,
460 460 labels=labels)
461 461 if r is None:
462 462 # no real conflict
463 463 del self._state[dfile]
464 464 self._dirty = True
465 465 elif not r:
466 466 self.mark(dfile, 'r')
467 467
468 468 if complete:
469 469 action = None
470 470 if deleted:
471 471 if fcd.isabsent():
472 472 # dc: local picked. Need to drop if present, which may
473 473 # happen on re-resolves.
474 474 action = 'f'
475 475 else:
476 476 # cd: remote picked (or otherwise deleted)
477 477 action = 'r'
478 478 else:
479 479 if fcd.isabsent(): # dc: remote picked
480 480 action = 'g'
481 481 elif fco.isabsent(): # cd: local picked
482 482 if dfile in self.localctx:
483 483 action = 'am'
484 484 else:
485 485 action = 'a'
486 486 # else: regular merges (no action necessary)
487 487 self._results[dfile] = r, action
488 488
489 489 return complete, r
490 490
491 491 def _filectxorabsent(self, hexnode, ctx, f):
492 492 if hexnode == nullhex:
493 493 return filemerge.absentfilectx(ctx, f)
494 494 else:
495 495 return ctx[f]
496 496
497 497 def preresolve(self, dfile, wctx, labels=None):
498 498 """run premerge process for dfile
499 499
500 500 Returns whether the merge is complete, and the exit code."""
501 501 return self._resolve(True, dfile, wctx, labels=labels)
502 502
503 503 def resolve(self, dfile, wctx, labels=None):
504 504 """run merge process (assuming premerge was run) for dfile
505 505
506 506 Returns the exit code of the merge."""
507 507 return self._resolve(False, dfile, wctx, labels=labels)[1]
508 508
509 509 def counts(self):
510 510 """return counts for updated, merged and removed files in this
511 511 session"""
512 512 updated, merged, removed = 0, 0, 0
513 513 for r, action in self._results.itervalues():
514 514 if r is None:
515 515 updated += 1
516 516 elif r == 0:
517 517 if action == 'r':
518 518 removed += 1
519 519 else:
520 520 merged += 1
521 521 return updated, merged, removed
522 522
523 523 def unresolvedcount(self):
524 524 """get unresolved count for this merge (persistent)"""
525 525 return len([True for f, entry in self._state.iteritems()
526 526 if entry[0] == 'u'])
527 527
528 528 def actions(self):
529 529 """return lists of actions to perform on the dirstate"""
530 530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
531 531 for f, (r, action) in self._results.iteritems():
532 532 if action is not None:
533 533 actions[action].append((f, None, "merge result"))
534 534 return actions
535 535
536 536 def recordactions(self):
537 537 """record remove/add/get actions in the dirstate"""
538 538 branchmerge = self._repo.dirstate.p2() != nullid
539 539 recordupdates(self._repo, self.actions(), branchmerge)
540 540
541 541 def queueremove(self, f):
542 542 """queues a file to be removed from the dirstate
543 543
544 544 Meant for use by custom merge drivers."""
545 545 self._results[f] = 0, 'r'
546 546
547 547 def queueadd(self, f):
548 548 """queues a file to be added to the dirstate
549 549
550 550 Meant for use by custom merge drivers."""
551 551 self._results[f] = 0, 'a'
552 552
553 553 def queueget(self, f):
554 554 """queues a file to be marked modified in the dirstate
555 555
556 556 Meant for use by custom merge drivers."""
557 557 self._results[f] = 0, 'g'
558 558
559 559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
560 560 if f2 is None:
561 561 f2 = f
562 562 return (repo.wvfs.isfileorlink(f)
563 563 and repo.wvfs.audit.check(f)
564 564 and repo.dirstate.normalize(f) not in repo.dirstate
565 565 and mctx[f2].cmp(wctx[f]))
566 566
567 567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
568 568 """
569 569 Considers any actions that care about the presence of conflicting unknown
570 570 files. For some actions, the result is to abort; for others, it is to
571 571 choose a different action.
572 572 """
573 573 conflicts = set()
574 574 if not force:
575 575 for f, (m, args, msg) in actions.iteritems():
576 576 if m in ('c', 'dc'):
577 577 if _checkunknownfile(repo, wctx, mctx, f):
578 578 conflicts.add(f)
579 579 elif m == 'dg':
580 580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
581 581 conflicts.add(f)
582 582
583 583 for f in sorted(conflicts):
584 584 repo.ui.warn(_("%s: untracked file differs\n") % f)
585 585 if conflicts:
586 586 raise error.Abort(_("untracked files in working directory differ "
587 587 "from files in requested revision"))
588 588
589 589 for f, (m, args, msg) in actions.iteritems():
590 590 if m == 'c':
591 actions[f] = ('g', args, msg)
591 flags, = args
592 actions[f] = ('g', (flags, False), msg)
592 593 elif m == 'cm':
593 594 fl2, anc = args
594 595 different = _checkunknownfile(repo, wctx, mctx, f)
595 596 if different:
596 597 actions[f] = ('m', (f, f, None, False, anc),
597 598 "remote differs from untracked local")
598 599 else:
599 actions[f] = ('g', (fl2,), "remote created")
600 actions[f] = ('g', (fl2, False), "remote created")
600 601
601 602 def _forgetremoved(wctx, mctx, branchmerge):
602 603 """
603 604 Forget removed files
604 605
605 606 If we're jumping between revisions (as opposed to merging), and if
606 607 neither the working directory nor the target rev has the file,
607 608 then we need to remove it from the dirstate, to prevent the
608 609 dirstate from listing the file when it is no longer in the
609 610 manifest.
610 611
611 612 If we're merging, and the other revision has removed a file
612 613 that is not present in the working directory, we need to mark it
613 614 as removed.
614 615 """
615 616
616 617 actions = {}
617 618 m = 'f'
618 619 if branchmerge:
619 620 m = 'r'
620 621 for f in wctx.deleted():
621 622 if f not in mctx:
622 623 actions[f] = m, None, "forget deleted"
623 624
624 625 if not branchmerge:
625 626 for f in wctx.removed():
626 627 if f not in mctx:
627 628 actions[f] = 'f', None, "forget removed"
628 629
629 630 return actions
630 631
631 632 def _checkcollision(repo, wmf, actions):
632 633 # build provisional merged manifest up
633 634 pmmf = set(wmf)
634 635
635 636 if actions:
636 637 # k, dr, e and rd are no-op
637 638 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
638 639 for f, args, msg in actions[m]:
639 640 pmmf.add(f)
640 641 for f, args, msg in actions['r']:
641 642 pmmf.discard(f)
642 643 for f, args, msg in actions['dm']:
643 644 f2, flags = args
644 645 pmmf.discard(f2)
645 646 pmmf.add(f)
646 647 for f, args, msg in actions['dg']:
647 648 pmmf.add(f)
648 649 for f, args, msg in actions['m']:
649 650 f1, f2, fa, move, anc = args
650 651 if move:
651 652 pmmf.discard(f1)
652 653 pmmf.add(f)
653 654
654 655 # check case-folding collision in provisional merged manifest
655 656 foldmap = {}
656 657 for f in sorted(pmmf):
657 658 fold = util.normcase(f)
658 659 if fold in foldmap:
659 660 raise error.Abort(_("case-folding collision between %s and %s")
660 661 % (f, foldmap[fold]))
661 662 foldmap[fold] = f
662 663
663 664 # check case-folding of directories
664 665 foldprefix = unfoldprefix = lastfull = ''
665 666 for fold, f in sorted(foldmap.items()):
666 667 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
667 668 # the folded prefix matches but actual casing is different
668 669 raise error.Abort(_("case-folding collision between "
669 670 "%s and directory of %s") % (lastfull, f))
670 671 foldprefix = fold + '/'
671 672 unfoldprefix = f + '/'
672 673 lastfull = f
673 674
674 675 def driverpreprocess(repo, ms, wctx, labels=None):
675 676 """run the preprocess step of the merge driver, if any
676 677
677 678 This is currently not implemented -- it's an extension point."""
678 679 return True
679 680
680 681 def driverconclude(repo, ms, wctx, labels=None):
681 682 """run the conclude step of the merge driver, if any
682 683
683 684 This is currently not implemented -- it's an extension point."""
684 685 return True
685 686
686 687 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
687 688 acceptremote, followcopies):
688 689 """
689 690 Merge p1 and p2 with ancestor pa and generate merge action list
690 691
691 692 branchmerge and force are as passed in to update
692 693 matcher = matcher to filter file lists
693 694 acceptremote = accept the incoming changes without prompting
694 695 """
695 696 if matcher is not None and matcher.always():
696 697 matcher = None
697 698
698 699 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
699 700
700 701 # manifests fetched in order are going to be faster, so prime the caches
701 702 [x.manifest() for x in
702 703 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
703 704
704 705 if followcopies:
705 706 ret = copies.mergecopies(repo, wctx, p2, pa)
706 707 copy, movewithdir, diverge, renamedelete = ret
707 708
708 709 repo.ui.note(_("resolving manifests\n"))
709 710 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
710 711 % (bool(branchmerge), bool(force), bool(matcher)))
711 712 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
712 713
713 714 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
714 715 copied = set(copy.values())
715 716 copied.update(movewithdir.values())
716 717
717 718 if '.hgsubstate' in m1:
718 719 # check whether sub state is modified
719 720 for s in sorted(wctx.substate):
720 721 if wctx.sub(s).dirty():
721 722 m1['.hgsubstate'] += '+'
722 723 break
723 724
724 725 # Compare manifests
725 726 if matcher is not None:
726 727 m1 = m1.matches(matcher)
727 728 m2 = m2.matches(matcher)
728 729 diff = m1.diff(m2)
729 730
730 731 actions = {}
731 732 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
732 733 if n1 and n2: # file exists on both local and remote side
733 734 if f not in ma:
734 735 fa = copy.get(f, None)
735 736 if fa is not None:
736 737 actions[f] = ('m', (f, f, fa, False, pa.node()),
737 738 "both renamed from " + fa)
738 739 else:
739 740 actions[f] = ('m', (f, f, None, False, pa.node()),
740 741 "both created")
741 742 else:
742 743 a = ma[f]
743 744 fla = ma.flags(f)
744 745 nol = 'l' not in fl1 + fl2 + fla
745 746 if n2 == a and fl2 == fla:
746 747 actions[f] = ('k' , (), "remote unchanged")
747 748 elif n1 == a and fl1 == fla: # local unchanged - use remote
748 749 if n1 == n2: # optimization: keep local content
749 750 actions[f] = ('e', (fl2,), "update permissions")
750 751 else:
751 actions[f] = ('g', (fl2,), "remote is newer")
752 actions[f] = ('g', (fl2, False), "remote is newer")
752 753 elif nol and n2 == a: # remote only changed 'x'
753 754 actions[f] = ('e', (fl2,), "update permissions")
754 755 elif nol and n1 == a: # local only changed 'x'
755 actions[f] = ('g', (fl1,), "remote is newer")
756 actions[f] = ('g', (fl1, False), "remote is newer")
756 757 else: # both changed something
757 758 actions[f] = ('m', (f, f, f, False, pa.node()),
758 759 "versions differ")
759 760 elif n1: # file exists only on local side
760 761 if f in copied:
761 762 pass # we'll deal with it on m2 side
762 763 elif f in movewithdir: # directory rename, move local
763 764 f2 = movewithdir[f]
764 765 if f2 in m2:
765 766 actions[f2] = ('m', (f, f2, None, True, pa.node()),
766 767 "remote directory rename, both created")
767 768 else:
768 769 actions[f2] = ('dm', (f, fl1),
769 770 "remote directory rename - move from " + f)
770 771 elif f in copy:
771 772 f2 = copy[f]
772 773 actions[f] = ('m', (f, f2, f2, False, pa.node()),
773 774 "local copied/moved from " + f2)
774 775 elif f in ma: # clean, a different, no remote
775 776 if n1 != ma[f]:
776 777 if acceptremote:
777 778 actions[f] = ('r', None, "remote delete")
778 779 else:
779 780 actions[f] = ('cd', (f, None, f, False, pa.node()),
780 781 "prompt changed/deleted")
781 782 elif n1[20:] == 'a':
782 783 # This extra 'a' is added by working copy manifest to mark
783 784 # the file as locally added. We should forget it instead of
784 785 # deleting it.
785 786 actions[f] = ('f', None, "remote deleted")
786 787 else:
787 788 actions[f] = ('r', None, "other deleted")
788 789 elif n2: # file exists only on remote side
789 790 if f in copied:
790 791 pass # we'll deal with it on m1 side
791 792 elif f in movewithdir:
792 793 f2 = movewithdir[f]
793 794 if f2 in m1:
794 795 actions[f2] = ('m', (f2, f, None, False, pa.node()),
795 796 "local directory rename, both created")
796 797 else:
797 798 actions[f2] = ('dg', (f, fl2),
798 799 "local directory rename - get from " + f)
799 800 elif f in copy:
800 801 f2 = copy[f]
801 802 if f2 in m2:
802 803 actions[f] = ('m', (f2, f, f2, False, pa.node()),
803 804 "remote copied from " + f2)
804 805 else:
805 806 actions[f] = ('m', (f2, f, f2, True, pa.node()),
806 807 "remote moved from " + f2)
807 808 elif f not in ma:
808 809 # local unknown, remote created: the logic is described by the
809 810 # following table:
810 811 #
811 812 # force branchmerge different | action
812 813 # n * * | create
813 814 # y n * | create
814 815 # y y n | create
815 816 # y y y | merge
816 817 #
817 818 # Checking whether the files are different is expensive, so we
818 819 # don't do that when we can avoid it.
819 820 if not force:
820 821 actions[f] = ('c', (fl2,), "remote created")
821 822 elif not branchmerge:
822 823 actions[f] = ('c', (fl2,), "remote created")
823 824 else:
824 825 actions[f] = ('cm', (fl2, pa.node()),
825 826 "remote created, get or merge")
826 827 elif n2 != ma[f]:
827 828 if acceptremote:
828 829 actions[f] = ('c', (fl2,), "remote recreating")
829 830 else:
830 831 actions[f] = ('dc', (None, f, f, False, pa.node()),
831 832 "prompt deleted/changed")
832 833
833 834 return actions, diverge, renamedelete
834 835
835 836 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
836 837 """Resolves false conflicts where the nodeid changed but the content
837 838 remained the same."""
838 839
839 840 for f, (m, args, msg) in actions.items():
840 841 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
841 842 # local did change but ended up with same content
842 843 actions[f] = 'r', None, "prompt same"
843 844 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
844 845 # remote did change but ended up with same content
845 846 del actions[f] # don't get = keep local deleted
846 847
847 848 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
848 849 acceptremote, followcopies, matcher=None):
849 850 "Calculate the actions needed to merge mctx into wctx using ancestors"
850 851 if len(ancestors) == 1: # default
851 852 actions, diverge, renamedelete = manifestmerge(
852 853 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
853 854 acceptremote, followcopies)
854 855 _checkunknownfiles(repo, wctx, mctx, force, actions)
855 856
856 857 else: # only when merge.preferancestor=* - the default
857 858 repo.ui.note(
858 859 _("note: merging %s and %s using bids from ancestors %s\n") %
859 860 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
860 861
861 862 # Call for bids
862 863 fbids = {} # mapping filename to bids (action method to list af actions)
863 864 diverge, renamedelete = None, None
864 865 for ancestor in ancestors:
865 866 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
866 867 actions, diverge1, renamedelete1 = manifestmerge(
867 868 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
868 869 acceptremote, followcopies)
869 870 _checkunknownfiles(repo, wctx, mctx, force, actions)
870 871
871 872 # Track the shortest set of warning on the theory that bid
872 873 # merge will correctly incorporate more information
873 874 if diverge is None or len(diverge1) < len(diverge):
874 875 diverge = diverge1
875 876 if renamedelete is None or len(renamedelete) < len(renamedelete1):
876 877 renamedelete = renamedelete1
877 878
878 879 for f, a in sorted(actions.iteritems()):
879 880 m, args, msg = a
880 881 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
881 882 if f in fbids:
882 883 d = fbids[f]
883 884 if m in d:
884 885 d[m].append(a)
885 886 else:
886 887 d[m] = [a]
887 888 else:
888 889 fbids[f] = {m: [a]}
889 890
890 891 # Pick the best bid for each file
891 892 repo.ui.note(_('\nauction for merging merge bids\n'))
892 893 actions = {}
893 894 for f, bids in sorted(fbids.items()):
894 895 # bids is a mapping from action method to list af actions
895 896 # Consensus?
896 897 if len(bids) == 1: # all bids are the same kind of method
897 898 m, l = bids.items()[0]
898 899 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
899 900 repo.ui.note(" %s: consensus for %s\n" % (f, m))
900 901 actions[f] = l[0]
901 902 continue
902 903 # If keep is an option, just do it.
903 904 if 'k' in bids:
904 905 repo.ui.note(" %s: picking 'keep' action\n" % f)
905 906 actions[f] = bids['k'][0]
906 907 continue
907 908 # If there are gets and they all agree [how could they not?], do it.
908 909 if 'g' in bids:
909 910 ga0 = bids['g'][0]
910 911 if all(a == ga0 for a in bids['g'][1:]):
911 912 repo.ui.note(" %s: picking 'get' action\n" % f)
912 913 actions[f] = ga0
913 914 continue
914 915 # TODO: Consider other simple actions such as mode changes
915 916 # Handle inefficient democrazy.
916 917 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
917 918 for m, l in sorted(bids.items()):
918 919 for _f, args, msg in l:
919 920 repo.ui.note(' %s -> %s\n' % (msg, m))
920 921 # Pick random action. TODO: Instead, prompt user when resolving
921 922 m, l = bids.items()[0]
922 923 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
923 924 (f, m))
924 925 actions[f] = l[0]
925 926 continue
926 927 repo.ui.note(_('end of auction\n\n'))
927 928
928 929 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
929 930
930 931 if wctx.rev() is None:
931 932 fractions = _forgetremoved(wctx, mctx, branchmerge)
932 933 actions.update(fractions)
933 934
934 935 return actions, diverge, renamedelete
935 936
936 937 def batchremove(repo, actions):
937 938 """apply removes to the working directory
938 939
939 940 yields tuples for progress updates
940 941 """
941 942 verbose = repo.ui.verbose
942 943 unlink = util.unlinkpath
943 944 wjoin = repo.wjoin
944 945 audit = repo.wvfs.audit
945 946 i = 0
946 947 for f, args, msg in actions:
947 948 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
948 949 if verbose:
949 950 repo.ui.note(_("removing %s\n") % f)
950 951 audit(f)
951 952 try:
952 953 unlink(wjoin(f), ignoremissing=True)
953 954 except OSError as inst:
954 955 repo.ui.warn(_("update failed to remove %s: %s!\n") %
955 956 (f, inst.strerror))
956 957 if i == 100:
957 958 yield i, f
958 959 i = 0
959 960 i += 1
960 961 if i > 0:
961 962 yield i, f
962 963
963 964 def batchget(repo, mctx, actions):
964 965 """apply gets to the working directory
965 966
966 967 mctx is the context to get from
967 968
968 969 yields tuples for progress updates
969 970 """
970 971 verbose = repo.ui.verbose
971 972 fctx = mctx.filectx
972 973 wwrite = repo.wwrite
973 974 i = 0
974 975 for f, args, msg in actions:
975 976 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
976 977 if verbose:
977 978 repo.ui.note(_("getting %s\n") % f)
978 979 wwrite(f, fctx(f).data(), args[0])
979 980 if i == 100:
980 981 yield i, f
981 982 i = 0
982 983 i += 1
983 984 if i > 0:
984 985 yield i, f
985 986
986 987 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
987 988 """apply the merge action list to the working directory
988 989
989 990 wctx is the working copy context
990 991 mctx is the context to be merged into the working copy
991 992
992 993 Return a tuple of counts (updated, merged, removed, unresolved) that
993 994 describes how many files were affected by the update.
994 995 """
995 996
996 997 updated, merged, removed = 0, 0, 0
997 998 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
998 999 moves = []
999 1000 for m, l in actions.items():
1000 1001 l.sort()
1001 1002
1002 1003 # 'cd' and 'dc' actions are treated like other merge conflicts
1003 1004 mergeactions = sorted(actions['cd'])
1004 1005 mergeactions.extend(sorted(actions['dc']))
1005 1006 mergeactions.extend(actions['m'])
1006 1007 for f, args, msg in mergeactions:
1007 1008 f1, f2, fa, move, anc = args
1008 1009 if f == '.hgsubstate': # merged internally
1009 1010 continue
1010 1011 if f1 is None:
1011 1012 fcl = filemerge.absentfilectx(wctx, fa)
1012 1013 else:
1013 1014 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1014 1015 fcl = wctx[f1]
1015 1016 if f2 is None:
1016 1017 fco = filemerge.absentfilectx(mctx, fa)
1017 1018 else:
1018 1019 fco = mctx[f2]
1019 1020 actx = repo[anc]
1020 1021 if fa in actx:
1021 1022 fca = actx[fa]
1022 1023 else:
1023 1024 # TODO: move to absentfilectx
1024 1025 fca = repo.filectx(f1, fileid=nullrev)
1025 1026 ms.add(fcl, fco, fca, f)
1026 1027 if f1 != f and move:
1027 1028 moves.append(f1)
1028 1029
1029 1030 audit = repo.wvfs.audit
1030 1031 _updating = _('updating')
1031 1032 _files = _('files')
1032 1033 progress = repo.ui.progress
1033 1034
1034 1035 # remove renamed files after safely stored
1035 1036 for f in moves:
1036 1037 if os.path.lexists(repo.wjoin(f)):
1037 1038 repo.ui.debug("removing %s\n" % f)
1038 1039 audit(f)
1039 1040 util.unlinkpath(repo.wjoin(f))
1040 1041
1041 1042 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1042 1043
1043 1044 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1044 1045 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1045 1046
1046 1047 # remove in parallel (must come first)
1047 1048 z = 0
1048 1049 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1049 1050 for i, item in prog:
1050 1051 z += i
1051 1052 progress(_updating, z, item=item, total=numupdates, unit=_files)
1052 1053 removed = len(actions['r'])
1053 1054
1054 1055 # get in parallel
1055 1056 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1056 1057 for i, item in prog:
1057 1058 z += i
1058 1059 progress(_updating, z, item=item, total=numupdates, unit=_files)
1059 1060 updated = len(actions['g'])
1060 1061
1061 1062 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1062 1063 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1063 1064
1064 1065 # forget (manifest only, just log it) (must come first)
1065 1066 for f, args, msg in actions['f']:
1066 1067 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1067 1068 z += 1
1068 1069 progress(_updating, z, item=f, total=numupdates, unit=_files)
1069 1070
1070 1071 # re-add (manifest only, just log it)
1071 1072 for f, args, msg in actions['a']:
1072 1073 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1073 1074 z += 1
1074 1075 progress(_updating, z, item=f, total=numupdates, unit=_files)
1075 1076
1076 1077 # re-add/mark as modified (manifest only, just log it)
1077 1078 for f, args, msg in actions['am']:
1078 1079 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1079 1080 z += 1
1080 1081 progress(_updating, z, item=f, total=numupdates, unit=_files)
1081 1082
1082 1083 # keep (noop, just log it)
1083 1084 for f, args, msg in actions['k']:
1084 1085 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1085 1086 # no progress
1086 1087
1087 1088 # directory rename, move local
1088 1089 for f, args, msg in actions['dm']:
1089 1090 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1090 1091 z += 1
1091 1092 progress(_updating, z, item=f, total=numupdates, unit=_files)
1092 1093 f0, flags = args
1093 1094 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1094 1095 audit(f)
1095 1096 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1096 1097 util.unlinkpath(repo.wjoin(f0))
1097 1098 updated += 1
1098 1099
1099 1100 # local directory rename, get
1100 1101 for f, args, msg in actions['dg']:
1101 1102 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1102 1103 z += 1
1103 1104 progress(_updating, z, item=f, total=numupdates, unit=_files)
1104 1105 f0, flags = args
1105 1106 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1106 1107 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1107 1108 updated += 1
1108 1109
1109 1110 # exec
1110 1111 for f, args, msg in actions['e']:
1111 1112 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1112 1113 z += 1
1113 1114 progress(_updating, z, item=f, total=numupdates, unit=_files)
1114 1115 flags, = args
1115 1116 audit(f)
1116 1117 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1117 1118 updated += 1
1118 1119
1119 1120 # the ordering is important here -- ms.mergedriver will raise if the merge
1120 1121 # driver has changed, and we want to be able to bypass it when overwrite is
1121 1122 # True
1122 1123 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1123 1124
1124 1125 if usemergedriver:
1125 1126 ms.commit()
1126 1127 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1127 1128 # the driver might leave some files unresolved
1128 1129 unresolvedf = set(ms.unresolved())
1129 1130 if not proceed:
1130 1131 # XXX setting unresolved to at least 1 is a hack to make sure we
1131 1132 # error out
1132 1133 return updated, merged, removed, max(len(unresolvedf), 1)
1133 1134 newactions = []
1134 1135 for f, args, msg in mergeactions:
1135 1136 if f in unresolvedf:
1136 1137 newactions.append((f, args, msg))
1137 1138 mergeactions = newactions
1138 1139
1139 1140 # premerge
1140 1141 tocomplete = []
1141 1142 for f, args, msg in mergeactions:
1142 1143 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1143 1144 z += 1
1144 1145 progress(_updating, z, item=f, total=numupdates, unit=_files)
1145 1146 if f == '.hgsubstate': # subrepo states need updating
1146 1147 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1147 1148 overwrite)
1148 1149 continue
1149 1150 audit(f)
1150 1151 complete, r = ms.preresolve(f, wctx, labels=labels)
1151 1152 if not complete:
1152 1153 numupdates += 1
1153 1154 tocomplete.append((f, args, msg))
1154 1155
1155 1156 # merge
1156 1157 for f, args, msg in tocomplete:
1157 1158 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1158 1159 z += 1
1159 1160 progress(_updating, z, item=f, total=numupdates, unit=_files)
1160 1161 ms.resolve(f, wctx, labels=labels)
1161 1162
1162 1163 ms.commit()
1163 1164
1164 1165 unresolved = ms.unresolvedcount()
1165 1166
1166 1167 if usemergedriver and not unresolved and ms.mdstate() != 's':
1167 1168 if not driverconclude(repo, ms, wctx, labels=labels):
1168 1169 # XXX setting unresolved to at least 1 is a hack to make sure we
1169 1170 # error out
1170 1171 unresolved = max(unresolved, 1)
1171 1172
1172 1173 ms.commit()
1173 1174
1174 1175 msupdated, msmerged, msremoved = ms.counts()
1175 1176 updated += msupdated
1176 1177 merged += msmerged
1177 1178 removed += msremoved
1178 1179
1179 1180 extraactions = ms.actions()
1180 1181 for k, acts in extraactions.iteritems():
1181 1182 actions[k].extend(acts)
1182 1183
1183 1184 progress(_updating, None, total=numupdates, unit=_files)
1184 1185
1185 1186 return updated, merged, removed, unresolved
1186 1187
1187 1188 def recordupdates(repo, actions, branchmerge):
1188 1189 "record merge actions to the dirstate"
1189 1190 # remove (must come first)
1190 1191 for f, args, msg in actions.get('r', []):
1191 1192 if branchmerge:
1192 1193 repo.dirstate.remove(f)
1193 1194 else:
1194 1195 repo.dirstate.drop(f)
1195 1196
1196 1197 # forget (must come first)
1197 1198 for f, args, msg in actions.get('f', []):
1198 1199 repo.dirstate.drop(f)
1199 1200
1200 1201 # re-add
1201 1202 for f, args, msg in actions.get('a', []):
1202 1203 repo.dirstate.add(f)
1203 1204
1204 1205 # re-add/mark as modified
1205 1206 for f, args, msg in actions.get('am', []):
1206 1207 if branchmerge:
1207 1208 repo.dirstate.normallookup(f)
1208 1209 else:
1209 1210 repo.dirstate.add(f)
1210 1211
1211 1212 # exec change
1212 1213 for f, args, msg in actions.get('e', []):
1213 1214 repo.dirstate.normallookup(f)
1214 1215
1215 1216 # keep
1216 1217 for f, args, msg in actions.get('k', []):
1217 1218 pass
1218 1219
1219 1220 # get
1220 1221 for f, args, msg in actions.get('g', []):
1221 1222 if branchmerge:
1222 1223 repo.dirstate.otherparent(f)
1223 1224 else:
1224 1225 repo.dirstate.normal(f)
1225 1226
1226 1227 # merge
1227 1228 for f, args, msg in actions.get('m', []):
1228 1229 f1, f2, fa, move, anc = args
1229 1230 if branchmerge:
1230 1231 # We've done a branch merge, mark this file as merged
1231 1232 # so that we properly record the merger later
1232 1233 repo.dirstate.merge(f)
1233 1234 if f1 != f2: # copy/rename
1234 1235 if move:
1235 1236 repo.dirstate.remove(f1)
1236 1237 if f1 != f:
1237 1238 repo.dirstate.copy(f1, f)
1238 1239 else:
1239 1240 repo.dirstate.copy(f2, f)
1240 1241 else:
1241 1242 # We've update-merged a locally modified file, so
1242 1243 # we set the dirstate to emulate a normal checkout
1243 1244 # of that file some time in the past. Thus our
1244 1245 # merge will appear as a normal local file
1245 1246 # modification.
1246 1247 if f2 == f: # file not locally copied/moved
1247 1248 repo.dirstate.normallookup(f)
1248 1249 if move:
1249 1250 repo.dirstate.drop(f1)
1250 1251
1251 1252 # directory rename, move local
1252 1253 for f, args, msg in actions.get('dm', []):
1253 1254 f0, flag = args
1254 1255 if branchmerge:
1255 1256 repo.dirstate.add(f)
1256 1257 repo.dirstate.remove(f0)
1257 1258 repo.dirstate.copy(f0, f)
1258 1259 else:
1259 1260 repo.dirstate.normal(f)
1260 1261 repo.dirstate.drop(f0)
1261 1262
1262 1263 # directory rename, get
1263 1264 for f, args, msg in actions.get('dg', []):
1264 1265 f0, flag = args
1265 1266 if branchmerge:
1266 1267 repo.dirstate.add(f)
1267 1268 repo.dirstate.copy(f0, f)
1268 1269 else:
1269 1270 repo.dirstate.normal(f)
1270 1271
1271 1272 def update(repo, node, branchmerge, force, ancestor=None,
1272 1273 mergeancestor=False, labels=None, matcher=None):
1273 1274 """
1274 1275 Perform a merge between the working directory and the given node
1275 1276
1276 1277 node = the node to update to, or None if unspecified
1277 1278 branchmerge = whether to merge between branches
1278 1279 force = whether to force branch merging or file overwriting
1279 1280 matcher = a matcher to filter file lists (dirstate not updated)
1280 1281 mergeancestor = whether it is merging with an ancestor. If true,
1281 1282 we should accept the incoming changes for any prompts that occur.
1282 1283 If false, merging with an ancestor (fast-forward) is only allowed
1283 1284 between different named branches. This flag is used by rebase extension
1284 1285 as a temporary fix and should be avoided in general.
1285 1286
1286 1287 The table below shows all the behaviors of the update command
1287 1288 given the -c and -C or no options, whether the working directory
1288 1289 is dirty, whether a revision is specified, and the relationship of
1289 1290 the parent rev to the target rev (linear, on the same named
1290 1291 branch, or on another named branch).
1291 1292
1292 1293 This logic is tested by test-update-branches.t.
1293 1294
1294 1295 -c -C dirty rev | linear same cross
1295 1296 n n n n | ok (1) x
1296 1297 n n n y | ok ok ok
1297 1298 n n y n | merge (2) (2)
1298 1299 n n y y | merge (3) (3)
1299 1300 n y * * | discard discard discard
1300 1301 y n y * | (4) (4) (4)
1301 1302 y n n * | ok ok ok
1302 1303 y y * * | (5) (5) (5)
1303 1304
1304 1305 x = can't happen
1305 1306 * = don't-care
1306 1307 1 = abort: not a linear update (merge or update --check to force update)
1307 1308 2 = abort: uncommitted changes (commit and merge, or update --clean to
1308 1309 discard changes)
1309 1310 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1310 1311 4 = abort: uncommitted changes (checked in commands.py)
1311 1312 5 = incompatible options (checked in commands.py)
1312 1313
1313 1314 Return the same tuple as applyupdates().
1314 1315 """
1315 1316
1316 1317 onode = node
1317 1318 wlock = repo.wlock()
1318 1319 # If we're doing a partial update, we need to skip updating
1319 1320 # the dirstate, so make a note of any partial-ness to the
1320 1321 # update here.
1321 1322 if matcher is None or matcher.always():
1322 1323 partial = False
1323 1324 else:
1324 1325 partial = True
1325 1326 try:
1326 1327 wc = repo[None]
1327 1328 pl = wc.parents()
1328 1329 p1 = pl[0]
1329 1330 pas = [None]
1330 1331 if ancestor is not None:
1331 1332 pas = [repo[ancestor]]
1332 1333
1333 1334 if node is None:
1334 1335 if (repo.ui.configbool('devel', 'all-warnings')
1335 1336 or repo.ui.configbool('devel', 'oldapi')):
1336 1337 repo.ui.develwarn('update with no target')
1337 1338 rev, _mark, _act = destutil.destupdate(repo)
1338 1339 node = repo[rev].node()
1339 1340
1340 1341 overwrite = force and not branchmerge
1341 1342
1342 1343 p2 = repo[node]
1343 1344 if pas[0] is None:
1344 1345 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1345 1346 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1346 1347 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1347 1348 else:
1348 1349 pas = [p1.ancestor(p2, warn=branchmerge)]
1349 1350
1350 1351 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1351 1352
1352 1353 ### check phase
1353 1354 if not overwrite:
1354 1355 if len(pl) > 1:
1355 1356 raise error.Abort(_("outstanding uncommitted merge"))
1356 1357 ms = mergestate.read(repo)
1357 1358 if list(ms.unresolved()):
1358 1359 raise error.Abort(_("outstanding merge conflicts"))
1359 1360 if branchmerge:
1360 1361 if pas == [p2]:
1361 1362 raise error.Abort(_("merging with a working directory ancestor"
1362 1363 " has no effect"))
1363 1364 elif pas == [p1]:
1364 1365 if not mergeancestor and p1.branch() == p2.branch():
1365 1366 raise error.Abort(_("nothing to merge"),
1366 1367 hint=_("use 'hg update' "
1367 1368 "or check 'hg heads'"))
1368 1369 if not force and (wc.files() or wc.deleted()):
1369 1370 raise error.Abort(_("uncommitted changes"),
1370 1371 hint=_("use 'hg status' to list changes"))
1371 1372 for s in sorted(wc.substate):
1372 1373 wc.sub(s).bailifchanged()
1373 1374
1374 1375 elif not overwrite:
1375 1376 if p1 == p2: # no-op update
1376 1377 # call the hooks and exit early
1377 1378 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1378 1379 repo.hook('update', parent1=xp2, parent2='', error=0)
1379 1380 return 0, 0, 0, 0
1380 1381
1381 1382 if pas not in ([p1], [p2]): # nonlinear
1382 1383 dirty = wc.dirty(missing=True)
1383 1384 if dirty or onode is None:
1384 1385 # Branching is a bit strange to ensure we do the minimal
1385 1386 # amount of call to obsolete.background.
1386 1387 foreground = obsolete.foreground(repo, [p1.node()])
1387 1388 # note: the <node> variable contains a random identifier
1388 1389 if repo[node].node() in foreground:
1389 1390 pas = [p1] # allow updating to successors
1390 1391 elif dirty:
1391 1392 msg = _("uncommitted changes")
1392 1393 if onode is None:
1393 1394 hint = _("commit and merge, or update --clean to"
1394 1395 " discard changes")
1395 1396 else:
1396 1397 hint = _("commit or update --clean to discard"
1397 1398 " changes")
1398 1399 raise error.Abort(msg, hint=hint)
1399 1400 else: # node is none
1400 1401 msg = _("not a linear update")
1401 1402 hint = _("merge or update --check to force update")
1402 1403 raise error.Abort(msg, hint=hint)
1403 1404 else:
1404 1405 # Allow jumping branches if clean and specific rev given
1405 1406 pas = [p1]
1406 1407
1407 1408 # deprecated config: merge.followcopies
1408 1409 followcopies = False
1409 1410 if overwrite:
1410 1411 pas = [wc]
1411 1412 elif pas == [p2]: # backwards
1412 1413 pas = [wc.p1()]
1413 1414 elif not branchmerge and not wc.dirty(missing=True):
1414 1415 pass
1415 1416 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1416 1417 followcopies = True
1417 1418
1418 1419 ### calculate phase
1419 1420 actionbyfile, diverge, renamedelete = calculateupdates(
1420 1421 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1421 1422 followcopies, matcher=matcher)
1422 1423 # Convert to dictionary-of-lists format
1423 1424 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1424 1425 for f, (m, args, msg) in actionbyfile.iteritems():
1425 1426 if m not in actions:
1426 1427 actions[m] = []
1427 1428 actions[m].append((f, args, msg))
1428 1429
1429 1430 if not util.checkcase(repo.path):
1430 1431 # check collision between files only in p2 for clean update
1431 1432 if (not branchmerge and
1432 1433 (force or not wc.dirty(missing=True, branch=False))):
1433 1434 _checkcollision(repo, p2.manifest(), None)
1434 1435 else:
1435 1436 _checkcollision(repo, wc.manifest(), actions)
1436 1437
1437 1438 # Prompt and create actions. Most of this is in the resolve phase
1438 1439 # already, but we can't handle .hgsubstate in filemerge or
1439 1440 # subrepo.submerge yet so we have to keep prompting for it.
1440 1441 for f, args, msg in sorted(actions['cd']):
1441 1442 if f != '.hgsubstate':
1442 1443 continue
1443 1444 if repo.ui.promptchoice(
1444 1445 _("local changed %s which remote deleted\n"
1445 1446 "use (c)hanged version or (d)elete?"
1446 1447 "$$ &Changed $$ &Delete") % f, 0):
1447 1448 actions['r'].append((f, None, "prompt delete"))
1448 1449 elif f in p1:
1449 1450 actions['am'].append((f, None, "prompt keep"))
1450 1451 else:
1451 1452 actions['a'].append((f, None, "prompt keep"))
1452 1453
1453 1454 for f, args, msg in sorted(actions['dc']):
1454 1455 if f != '.hgsubstate':
1455 1456 continue
1456 1457 f1, f2, fa, move, anc = args
1457 1458 flags = p2[f2].flags()
1458 1459 if repo.ui.promptchoice(
1459 1460 _("remote changed %s which local deleted\n"
1460 1461 "use (c)hanged version or leave (d)eleted?"
1461 1462 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1462 actions['g'].append((f, (flags,), "prompt recreating"))
1463 actions['g'].append((f, (flags, False), "prompt recreating"))
1463 1464
1464 1465 # divergent renames
1465 1466 for f, fl in sorted(diverge.iteritems()):
1466 1467 repo.ui.warn(_("note: possible conflict - %s was renamed "
1467 1468 "multiple times to:\n") % f)
1468 1469 for nf in fl:
1469 1470 repo.ui.warn(" %s\n" % nf)
1470 1471
1471 1472 # rename and delete
1472 1473 for f, fl in sorted(renamedelete.iteritems()):
1473 1474 repo.ui.warn(_("note: possible conflict - %s was deleted "
1474 1475 "and renamed to:\n") % f)
1475 1476 for nf in fl:
1476 1477 repo.ui.warn(" %s\n" % nf)
1477 1478
1478 1479 ### apply phase
1479 1480 if not branchmerge: # just jump to the new rev
1480 1481 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1481 1482 if not partial:
1482 1483 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1483 1484 # note that we're in the middle of an update
1484 1485 repo.vfs.write('updatestate', p2.hex())
1485 1486
1486 1487 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1487 1488
1488 1489 if not partial:
1489 1490 repo.dirstate.beginparentchange()
1490 1491 repo.setparents(fp1, fp2)
1491 1492 recordupdates(repo, actions, branchmerge)
1492 1493 # update completed, clear state
1493 1494 util.unlink(repo.join('updatestate'))
1494 1495
1495 1496 if not branchmerge:
1496 1497 repo.dirstate.setbranch(p2.branch())
1497 1498 repo.dirstate.endparentchange()
1498 1499 finally:
1499 1500 wlock.release()
1500 1501
1501 1502 if not partial:
1502 1503 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1503 1504 return stats
1504 1505
1505 1506 def graft(repo, ctx, pctx, labels, keepparent=False):
1506 1507 """Do a graft-like merge.
1507 1508
1508 1509 This is a merge where the merge ancestor is chosen such that one
1509 1510 or more changesets are grafted onto the current changeset. In
1510 1511 addition to the merge, this fixes up the dirstate to include only
1511 1512 a single parent (if keepparent is False) and tries to duplicate any
1512 1513 renames/copies appropriately.
1513 1514
1514 1515 ctx - changeset to rebase
1515 1516 pctx - merge base, usually ctx.p1()
1516 1517 labels - merge labels eg ['local', 'graft']
1517 1518 keepparent - keep second parent if any
1518 1519
1519 1520 """
1520 1521 # If we're grafting a descendant onto an ancestor, be sure to pass
1521 1522 # mergeancestor=True to update. This does two things: 1) allows the merge if
1522 1523 # the destination is the same as the parent of the ctx (so we can use graft
1523 1524 # to copy commits), and 2) informs update that the incoming changes are
1524 1525 # newer than the destination so it doesn't prompt about "remote changed foo
1525 1526 # which local deleted".
1526 1527 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1527 1528
1528 1529 stats = update(repo, ctx.node(), True, True, pctx.node(),
1529 1530 mergeancestor=mergeancestor, labels=labels)
1530 1531
1531 1532 pother = nullid
1532 1533 parents = ctx.parents()
1533 1534 if keepparent and len(parents) == 2 and pctx in parents:
1534 1535 parents.remove(pctx)
1535 1536 pother = parents[0].node()
1536 1537
1537 1538 repo.dirstate.beginparentchange()
1538 1539 repo.setparents(repo['.'].node(), pother)
1539 1540 repo.dirstate.write(repo.currenttransaction())
1540 1541 # fix up dirstate for copies and renames
1541 1542 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1542 1543 repo.dirstate.endparentchange()
1543 1544 return stats
General Comments 0
You need to be logged in to leave comments. Login now