##// END OF EJS Templates
merge: use separate lists for each action type...
Mads Kiilerich -
r21545:43eecb4e default
parent child Browse files
Show More
@@ -1,1174 +1,1175 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 archival, merge, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22 import basestore
23 23
24 24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25 25
26 26 def installnormalfilesmatchfn(manifest):
27 27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 29 default='relpath'):
30 30 match = oldmatch(ctx, pats, opts, globbed, default)
31 31 m = copy.copy(match)
32 32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 33 manifest)
34 34 m._files = filter(notlfile, m._files)
35 35 m._fmap = set(m._files)
36 36 m._always = False
37 37 origmatchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(overridematch)
41 41
42 42 def installmatchfn(f):
43 43 '''monkey patch the scmutil module with a custom match function.
44 44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 45 oldmatch = scmutil.match
46 46 setattr(f, 'oldmatch', oldmatch)
47 47 scmutil.match = f
48 48 return oldmatch
49 49
50 50 def restorematchfn():
51 51 '''restores scmutil.match to what it was before installmatchfn
52 52 was called. no-op if scmutil.match is its original function.
53 53
54 54 Note that n calls to installmatchfn will require n calls to
55 55 restore matchfn to reverse'''
56 56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57 57
58 58 def installmatchandpatsfn(f):
59 59 oldmatchandpats = scmutil.matchandpats
60 60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 61 scmutil.matchandpats = f
62 62 return oldmatchandpats
63 63
64 64 def restorematchandpatsfn():
65 65 '''restores scmutil.matchandpats to what it was before
66 66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 67 is its original function.
68 68
69 69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 70 to restore matchfn to reverse'''
71 71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 72 scmutil.matchandpats)
73 73
74 74 def addlargefiles(ui, repo, *pats, **opts):
75 75 large = opts.pop('large', None)
76 76 lfsize = lfutil.getminsize(
77 77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78 78
79 79 lfmatcher = None
80 80 if lfutil.islfilesrepo(repo):
81 81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 82 if lfpats:
83 83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84 84
85 85 lfnames = []
86 86 m = scmutil.match(repo[None], pats, opts)
87 87 m.bad = lambda x, y: None
88 88 wctx = repo[None]
89 89 for f in repo.walk(m):
90 90 exact = m.exact(f)
91 91 lfile = lfutil.standin(f) in wctx
92 92 nfile = f in wctx
93 93 exists = lfile or nfile
94 94
95 95 # Don't warn the user when they attempt to add a normal tracked file.
96 96 # The normal add code will do that for us.
97 97 if exact and exists:
98 98 if lfile:
99 99 ui.warn(_('%s already a largefile\n') % f)
100 100 continue
101 101
102 102 if (exact or not exists) and not lfutil.isstandin(f):
103 103 wfile = repo.wjoin(f)
104 104
105 105 # In case the file was removed previously, but not committed
106 106 # (issue3507)
107 107 if not os.path.exists(wfile):
108 108 continue
109 109
110 110 abovemin = (lfsize and
111 111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 113 lfnames.append(f)
114 114 if ui.verbose or not exact:
115 115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116 116
117 117 bad = []
118 118 standins = []
119 119
120 120 # Need to lock, otherwise there could be a race condition between
121 121 # when standins are created and added to the repo.
122 122 wlock = repo.wlock()
123 123 try:
124 124 if not opts.get('dry_run'):
125 125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 126 for f in lfnames:
127 127 standinname = lfutil.standin(f)
128 128 lfutil.writestandin(repo, standinname, hash='',
129 129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 130 standins.append(standinname)
131 131 if lfdirstate[f] == 'r':
132 132 lfdirstate.normallookup(f)
133 133 else:
134 134 lfdirstate.add(f)
135 135 lfdirstate.write()
136 136 bad += [lfutil.splitstandin(f)
137 137 for f in repo[None].add(standins)
138 138 if f in m.files()]
139 139 finally:
140 140 wlock.release()
141 141 return bad
142 142
143 143 def removelargefiles(ui, repo, *pats, **opts):
144 144 after = opts.get('after')
145 145 if not pats and not after:
146 146 raise util.Abort(_('no files specified'))
147 147 m = scmutil.match(repo[None], pats, opts)
148 148 try:
149 149 repo.lfstatus = True
150 150 s = repo.status(match=m, clean=True)
151 151 finally:
152 152 repo.lfstatus = False
153 153 manifest = repo[None].manifest()
154 154 modified, added, deleted, clean = [[f for f in list
155 155 if lfutil.standin(f) in manifest]
156 156 for list in [s[0], s[1], s[3], s[6]]]
157 157
158 158 def warn(files, msg):
159 159 for f in files:
160 160 ui.warn(msg % m.rel(f))
161 161 return int(len(files) > 0)
162 162
163 163 result = 0
164 164
165 165 if after:
166 166 remove, forget = deleted, []
167 167 result = warn(modified + added + clean,
168 168 _('not removing %s: file still exists\n'))
169 169 else:
170 170 remove, forget = deleted + clean, []
171 171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 172 ' to force removal)\n'))
173 173 result = warn(added, _('not removing %s: file has been marked for add'
174 174 ' (use forget to undo)\n')) or result
175 175
176 176 for f in sorted(remove + forget):
177 177 if ui.verbose or not m.exact(f):
178 178 ui.status(_('removing %s\n') % m.rel(f))
179 179
180 180 # Need to lock because standin files are deleted then removed from the
181 181 # repository and we could race in-between.
182 182 wlock = repo.wlock()
183 183 try:
184 184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 185 for f in remove:
186 186 if not after:
187 187 # If this is being called by addremove, notify the user that we
188 188 # are removing the file.
189 189 if getattr(repo, "_isaddremove", False):
190 190 ui.status(_('removing %s\n') % f)
191 191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 192 lfdirstate.remove(f)
193 193 lfdirstate.write()
194 194 forget = [lfutil.standin(f) for f in forget]
195 195 remove = [lfutil.standin(f) for f in remove]
196 196 repo[None].forget(forget)
197 197 # If this is being called by addremove, let the original addremove
198 198 # function handle this.
199 199 if not getattr(repo, "_isaddremove", False):
200 200 for f in remove:
201 201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
202 202 repo[None].forget(remove)
203 203 finally:
204 204 wlock.release()
205 205
206 206 return result
207 207
208 208 # For overriding mercurial.hgweb.webcommands so that largefiles will
209 209 # appear at their right place in the manifests.
210 210 def decodepath(orig, path):
211 211 return lfutil.splitstandin(path) or path
212 212
213 213 # -- Wrappers: modify existing commands --------------------------------
214 214
215 215 # Add works by going through the files that the user wanted to add and
216 216 # checking if they should be added as largefiles. Then it makes a new
217 217 # matcher which matches only the normal files and runs the original
218 218 # version of add.
219 219 def overrideadd(orig, ui, repo, *pats, **opts):
220 220 normal = opts.pop('normal')
221 221 if normal:
222 222 if opts.get('large'):
223 223 raise util.Abort(_('--normal cannot be used with --large'))
224 224 return orig(ui, repo, *pats, **opts)
225 225 bad = addlargefiles(ui, repo, *pats, **opts)
226 226 installnormalfilesmatchfn(repo[None].manifest())
227 227 result = orig(ui, repo, *pats, **opts)
228 228 restorematchfn()
229 229
230 230 return (result == 1 or bad) and 1 or 0
231 231
232 232 def overrideremove(orig, ui, repo, *pats, **opts):
233 233 installnormalfilesmatchfn(repo[None].manifest())
234 234 result = orig(ui, repo, *pats, **opts)
235 235 restorematchfn()
236 236 return removelargefiles(ui, repo, *pats, **opts) or result
237 237
238 238 def overridestatusfn(orig, repo, rev2, **opts):
239 239 try:
240 240 repo._repo.lfstatus = True
241 241 return orig(repo, rev2, **opts)
242 242 finally:
243 243 repo._repo.lfstatus = False
244 244
245 245 def overridestatus(orig, ui, repo, *pats, **opts):
246 246 try:
247 247 repo.lfstatus = True
248 248 return orig(ui, repo, *pats, **opts)
249 249 finally:
250 250 repo.lfstatus = False
251 251
252 252 def overridedirty(orig, repo, ignoreupdate=False):
253 253 try:
254 254 repo._repo.lfstatus = True
255 255 return orig(repo, ignoreupdate)
256 256 finally:
257 257 repo._repo.lfstatus = False
258 258
259 259 def overridelog(orig, ui, repo, *pats, **opts):
260 260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
261 261 default='relpath'):
262 262 """Matcher that merges root directory with .hglf, suitable for log.
263 263 It is still possible to match .hglf directly.
264 264 For any listed files run log on the standin too.
265 265 matchfn tries both the given filename and with .hglf stripped.
266 266 """
267 267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
268 268 m, p = copy.copy(matchandpats)
269 269
270 270 pats = set(p)
271 271 # TODO: handling of patterns in both cases below
272 272 if m._cwd:
273 273 if os.path.isabs(m._cwd):
274 274 # TODO: handle largefile magic when invoked from other cwd
275 275 return matchandpats
276 276 back = (m._cwd.count('/') + 1) * '../'
277 277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
278 278 else:
279 279 pats.update(lfutil.standin(f) for f in p)
280 280
281 281 for i in range(0, len(m._files)):
282 282 standin = lfutil.standin(m._files[i])
283 283 if standin in repo[ctx.node()]:
284 284 m._files[i] = standin
285 285 elif m._files[i] not in repo[ctx.node()]:
286 286 m._files.append(standin)
287 287 pats.add(standin)
288 288
289 289 m._fmap = set(m._files)
290 290 m._always = False
291 291 origmatchfn = m.matchfn
292 292 def lfmatchfn(f):
293 293 lf = lfutil.splitstandin(f)
294 294 if lf is not None and origmatchfn(lf):
295 295 return True
296 296 r = origmatchfn(f)
297 297 return r
298 298 m.matchfn = lfmatchfn
299 299
300 300 return m, pats
301 301
302 302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
303 303 try:
304 304 repo.lfstatus = True
305 305 return orig(ui, repo, *pats, **opts)
306 306 finally:
307 307 repo.lfstatus = False
308 308 restorematchandpatsfn()
309 309
310 310 def overrideverify(orig, ui, repo, *pats, **opts):
311 311 large = opts.pop('large', False)
312 312 all = opts.pop('lfa', False)
313 313 contents = opts.pop('lfc', False)
314 314
315 315 result = orig(ui, repo, *pats, **opts)
316 316 if large or all or contents:
317 317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
318 318 return result
319 319
320 320 def overridedebugstate(orig, ui, repo, *pats, **opts):
321 321 large = opts.pop('large', False)
322 322 if large:
323 323 class fakerepo(object):
324 324 dirstate = lfutil.openlfdirstate(ui, repo)
325 325 orig(ui, fakerepo, *pats, **opts)
326 326 else:
327 327 orig(ui, repo, *pats, **opts)
328 328
329 329 # Override needs to refresh standins so that update's normal merge
330 330 # will go through properly. Then the other update hook (overriding repo.update)
331 331 # will get the new files. Filemerge is also overridden so that the merge
332 332 # will merge standins correctly.
333 333 def overrideupdate(orig, ui, repo, *pats, **opts):
334 334 # Need to lock between the standins getting updated and their
335 335 # largefiles getting updated
336 336 wlock = repo.wlock()
337 337 try:
338 338 lfdirstate = lfutil.openlfdirstate(ui, repo)
339 339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
340 340 [], False, False, False)
341 341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
342 342
343 343 if opts['check']:
344 344 mod = len(modified) > 0
345 345 for lfile in unsure:
346 346 standin = lfutil.standin(lfile)
347 347 if repo['.'][standin].data().strip() != \
348 348 lfutil.hashfile(repo.wjoin(lfile)):
349 349 mod = True
350 350 else:
351 351 lfdirstate.normal(lfile)
352 352 lfdirstate.write()
353 353 if mod:
354 354 raise util.Abort(_('uncommitted changes'))
355 355 # XXX handle removed differently
356 356 if not opts['clean']:
357 357 for lfile in unsure + modified + added:
358 358 lfutil.updatestandin(repo, lfutil.standin(lfile))
359 359 return orig(ui, repo, *pats, **opts)
360 360 finally:
361 361 wlock.release()
362 362
363 363 # Before starting the manifest merge, merge.updates will call
364 364 # _checkunknown to check if there are any files in the merged-in
365 365 # changeset that collide with unknown files in the working copy.
366 366 #
367 367 # The largefiles are seen as unknown, so this prevents us from merging
368 368 # in a file 'foo' if we already have a largefile with the same name.
369 369 #
370 370 # The overridden function filters the unknown files by removing any
371 371 # largefiles. This makes the merge proceed and we can then handle this
372 372 # case further in the overridden manifestmerge function below.
373 373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
374 374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
375 375 return False
376 376 return origfn(repo, wctx, mctx, f)
377 377
378 378 # The manifest merge handles conflicts on the manifest level. We want
379 379 # to handle changes in largefile-ness of files at this level too.
380 380 #
381 381 # The strategy is to run the original manifestmerge and then process
382 382 # the action list it outputs. There are two cases we need to deal with:
383 383 #
384 384 # 1. Normal file in p1, largefile in p2. Here the largefile is
385 385 # detected via its standin file, which will enter the working copy
386 386 # with a "get" action. It is not "merge" since the standin is all
387 387 # Mercurial is concerned with at this level -- the link to the
388 388 # existing normal file is not relevant here.
389 389 #
390 390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
391 391 # since the largefile will be present in the working copy and
392 392 # different from the normal file in p2. Mercurial therefore
393 393 # triggers a merge action.
394 394 #
395 395 # In both cases, we prompt the user and emit new actions to either
396 396 # remove the standin (if the normal file was kept) or to remove the
397 397 # normal file and get the standin (if the largefile was kept). The
398 398 # default prompt answer is to use the largefile version since it was
399 399 # presumably changed on purpose.
400 400 #
401 401 # Finally, the merge.applyupdates function will then take care of
402 402 # writing the files into the working copy and lfcommands.updatelfiles
403 403 # will update the largefiles.
404 404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
405 405 partial, acceptremote, followcopies):
406 406 overwrite = force and not branchmerge
407 407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
408 408 acceptremote, followcopies)
409 409
410 410 if overwrite:
411 411 return actions
412 412
413 removes = set(a[0] for a in actions if a[1] == 'r')
414 processed = []
413 removes = set(a[0] for a in actions['r'])
415 414
416 for action in actions:
417 f, m, args, msg = action
418
415 newglist = []
416 for action in actions['g']:
417 f, args, msg = action
419 418 splitstandin = f and lfutil.splitstandin(f)
420 if (m == "g" and splitstandin is not None and
419 if (splitstandin is not None and
421 420 splitstandin in p1 and splitstandin not in removes):
422 421 # Case 1: normal file in the working copy, largefile in
423 422 # the second parent
424 423 lfile = splitstandin
425 424 standin = f
426 425 msg = _('remote turned local normal file %s into a largefile\n'
427 426 'use (l)argefile or keep (n)ormal file?'
428 427 '$$ &Largefile $$ &Normal file') % lfile
429 428 if repo.ui.promptchoice(msg, 0) == 0:
430 processed.append((lfile, "r", None, msg))
431 processed.append((standin, "g", (p2.flags(standin),), msg))
429 actions['r'].append((lfile, None, msg))
430 newglist.append((standin, (p2.flags(standin),), msg))
432 431 else:
433 processed.append((standin, "r", None, msg))
434 elif (m == "g" and
435 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
432 actions['r'].append((standin, None, msg))
433 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
436 434 # Case 2: largefile in the working copy, normal file in
437 435 # the second parent
438 436 standin = lfutil.standin(f)
439 437 lfile = f
440 438 msg = _('remote turned local largefile %s into a normal file\n'
441 439 'keep (l)argefile or use (n)ormal file?'
442 440 '$$ &Largefile $$ &Normal file') % lfile
443 441 if repo.ui.promptchoice(msg, 0) == 0:
444 processed.append((lfile, "r", None, msg))
442 actions['r'].append((lfile, None, msg))
445 443 else:
446 processed.append((standin, "r", None, msg))
447 processed.append((lfile, "g", (p2.flags(lfile),), msg))
444 actions['r'].append((standin, None, msg))
445 newglist.append((lfile, (p2.flags(lfile),), msg))
448 446 else:
449 processed.append(action)
447 newglist.append(action)
450 448
451 return processed
449 newglist.sort()
450 actions['g'] = newglist
451
452 return actions
452 453
453 454 # Override filemerge to prompt the user about how they wish to merge
454 455 # largefiles. This will handle identical edits without prompting the user.
455 456 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 457 if not lfutil.isstandin(orig):
457 458 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458 459
459 460 ahash = fca.data().strip().lower()
460 461 dhash = fcd.data().strip().lower()
461 462 ohash = fco.data().strip().lower()
462 463 if (ohash != ahash and
463 464 ohash != dhash and
464 465 (dhash == ahash or
465 466 repo.ui.promptchoice(
466 467 _('largefile %s has a merge conflict\nancestor was %s\n'
467 468 'keep (l)ocal %s or\ntake (o)ther %s?'
468 469 '$$ &Local $$ &Other') %
469 470 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 471 0) == 1)):
471 472 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 473 return 0
473 474
474 475 # Copy first changes the matchers to match standins instead of
475 476 # largefiles. Then it overrides util.copyfile in that function it
476 477 # checks if the destination largefile already exists. It also keeps a
477 478 # list of copied files so that the largefiles can be copied and the
478 479 # dirstate updated.
479 480 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 481 # doesn't remove largefile on rename
481 482 if len(pats) < 2:
482 483 # this isn't legal, let the original function deal with it
483 484 return orig(ui, repo, pats, opts, rename)
484 485
485 486 def makestandin(relpath):
486 487 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 488 return os.path.join(repo.wjoin(lfutil.standin(path)))
488 489
489 490 fullpats = scmutil.expandpats(pats)
490 491 dest = fullpats[-1]
491 492
492 493 if os.path.isdir(dest):
493 494 if not os.path.isdir(makestandin(dest)):
494 495 os.makedirs(makestandin(dest))
495 496 # This could copy both lfiles and normal files in one command,
496 497 # but we don't want to do that. First replace their matcher to
497 498 # only match normal files and run it, then replace it to just
498 499 # match largefiles and run it again.
499 500 nonormalfiles = False
500 501 nolfiles = False
501 502 installnormalfilesmatchfn(repo[None].manifest())
502 503 try:
503 504 try:
504 505 result = orig(ui, repo, pats, opts, rename)
505 506 except util.Abort, e:
506 507 if str(e) != _('no files to copy'):
507 508 raise e
508 509 else:
509 510 nonormalfiles = True
510 511 result = 0
511 512 finally:
512 513 restorematchfn()
513 514
514 515 # The first rename can cause our current working directory to be removed.
515 516 # In that case there is nothing left to copy/rename so just quit.
516 517 try:
517 518 repo.getcwd()
518 519 except OSError:
519 520 return result
520 521
521 522 try:
522 523 try:
523 524 # When we call orig below it creates the standins but we don't add
524 525 # them to the dir state until later so lock during that time.
525 526 wlock = repo.wlock()
526 527
527 528 manifest = repo[None].manifest()
528 529 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 530 default='relpath'):
530 531 newpats = []
531 532 # The patterns were previously mangled to add the standin
532 533 # directory; we need to remove that now
533 534 for pat in pats:
534 535 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 536 newpats.append(pat.replace(lfutil.shortname, ''))
536 537 else:
537 538 newpats.append(pat)
538 539 match = oldmatch(ctx, newpats, opts, globbed, default)
539 540 m = copy.copy(match)
540 541 lfile = lambda f: lfutil.standin(f) in manifest
541 542 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 543 m._fmap = set(m._files)
543 544 m._always = False
544 545 origmatchfn = m.matchfn
545 546 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 547 (f in manifest) and
547 548 origmatchfn(lfutil.splitstandin(f)) or
548 549 None)
549 550 return m
550 551 oldmatch = installmatchfn(overridematch)
551 552 listpats = []
552 553 for pat in pats:
553 554 if match_.patkind(pat) is not None:
554 555 listpats.append(pat)
555 556 else:
556 557 listpats.append(makestandin(pat))
557 558
558 559 try:
559 560 origcopyfile = util.copyfile
560 561 copiedfiles = []
561 562 def overridecopyfile(src, dest):
562 563 if (lfutil.shortname in src and
563 564 dest.startswith(repo.wjoin(lfutil.shortname))):
564 565 destlfile = dest.replace(lfutil.shortname, '')
565 566 if not opts['force'] and os.path.exists(destlfile):
566 567 raise IOError('',
567 568 _('destination largefile already exists'))
568 569 copiedfiles.append((src, dest))
569 570 origcopyfile(src, dest)
570 571
571 572 util.copyfile = overridecopyfile
572 573 result += orig(ui, repo, listpats, opts, rename)
573 574 finally:
574 575 util.copyfile = origcopyfile
575 576
576 577 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 578 for (src, dest) in copiedfiles:
578 579 if (lfutil.shortname in src and
579 580 dest.startswith(repo.wjoin(lfutil.shortname))):
580 581 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 582 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 583 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 584 if not os.path.isdir(destlfiledir):
584 585 os.makedirs(destlfiledir)
585 586 if rename:
586 587 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587 588
588 589 # The file is gone, but this deletes any empty parent
589 590 # directories as a side-effect.
590 591 util.unlinkpath(repo.wjoin(srclfile), True)
591 592 lfdirstate.remove(srclfile)
592 593 else:
593 594 util.copyfile(repo.wjoin(srclfile),
594 595 repo.wjoin(destlfile))
595 596
596 597 lfdirstate.add(destlfile)
597 598 lfdirstate.write()
598 599 except util.Abort, e:
599 600 if str(e) != _('no files to copy'):
600 601 raise e
601 602 else:
602 603 nolfiles = True
603 604 finally:
604 605 restorematchfn()
605 606 wlock.release()
606 607
607 608 if nolfiles and nonormalfiles:
608 609 raise util.Abort(_('no files to copy'))
609 610
610 611 return result
611 612
612 613 # When the user calls revert, we have to be careful to not revert any
613 614 # changes to other largefiles accidentally. This means we have to keep
614 615 # track of the largefiles that are being reverted so we only pull down
615 616 # the necessary largefiles.
616 617 #
617 618 # Standins are only updated (to match the hash of largefiles) before
618 619 # commits. Update the standins then run the original revert, changing
619 620 # the matcher to hit standins instead of largefiles. Based on the
620 621 # resulting standins update the largefiles.
621 622 def overriderevert(orig, ui, repo, *pats, **opts):
622 623 # Because we put the standins in a bad state (by updating them)
623 624 # and then return them to a correct state we need to lock to
624 625 # prevent others from changing them in their incorrect state.
625 626 wlock = repo.wlock()
626 627 try:
627 628 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 629 (modified, added, removed, missing, unknown, ignored, clean) = \
629 630 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 631 lfdirstate.write()
631 632 for lfile in modified:
632 633 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 634 for lfile in missing:
634 635 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 636 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636 637
637 638 oldstandins = lfutil.getstandinsstate(repo)
638 639
639 640 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 641 default='relpath'):
641 642 match = oldmatch(ctx, pats, opts, globbed, default)
642 643 m = copy.copy(match)
643 644 def tostandin(f):
644 645 if lfutil.standin(f) in ctx:
645 646 return lfutil.standin(f)
646 647 elif lfutil.standin(f) in repo[None]:
647 648 return None
648 649 return f
649 650 m._files = [tostandin(f) for f in m._files]
650 651 m._files = [f for f in m._files if f is not None]
651 652 m._fmap = set(m._files)
652 653 m._always = False
653 654 origmatchfn = m.matchfn
654 655 def matchfn(f):
655 656 if lfutil.isstandin(f):
656 657 return (origmatchfn(lfutil.splitstandin(f)) and
657 658 (f in repo[None] or f in ctx))
658 659 return origmatchfn(f)
659 660 m.matchfn = matchfn
660 661 return m
661 662 oldmatch = installmatchfn(overridematch)
662 663 try:
663 664 orig(ui, repo, *pats, **opts)
664 665 finally:
665 666 restorematchfn()
666 667
667 668 newstandins = lfutil.getstandinsstate(repo)
668 669 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 670 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670 671
671 672 finally:
672 673 wlock.release()
673 674
674 675 def hgupdaterepo(orig, repo, node, overwrite):
675 676 if not overwrite:
676 677 # Only call updatelfiles on the standins that have changed to save time
677 678 oldstandins = lfutil.getstandinsstate(repo)
678 679
679 680 result = orig(repo, node, overwrite)
680 681
681 682 filelist = None
682 683 if not overwrite:
683 684 newstandins = lfutil.getstandinsstate(repo)
684 685 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
685 686 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
686 687 return result
687 688
688 689 def hgmerge(orig, repo, node, force=None, remind=True):
689 690 result = orig(repo, node, force, remind)
690 691 lfcommands.updatelfiles(repo.ui, repo)
691 692 return result
692 693
693 694 # When we rebase a repository with remotely changed largefiles, we need to
694 695 # take some extra care so that the largefiles are correctly updated in the
695 696 # working copy
696 697 def overridepull(orig, ui, repo, source=None, **opts):
697 698 revsprepull = len(repo)
698 699 if not source:
699 700 source = 'default'
700 701 repo.lfpullsource = source
701 702 if opts.get('rebase', False):
702 703 repo._isrebasing = True
703 704 try:
704 705 if opts.get('update'):
705 706 del opts['update']
706 707 ui.debug('--update and --rebase are not compatible, ignoring '
707 708 'the update flag\n')
708 709 del opts['rebase']
709 710 origpostincoming = commands.postincoming
710 711 def _dummy(*args, **kwargs):
711 712 pass
712 713 commands.postincoming = _dummy
713 714 try:
714 715 result = commands.pull(ui, repo, source, **opts)
715 716 finally:
716 717 commands.postincoming = origpostincoming
717 718 revspostpull = len(repo)
718 719 if revspostpull > revsprepull:
719 720 result = result or rebase.rebase(ui, repo)
720 721 finally:
721 722 repo._isrebasing = False
722 723 else:
723 724 result = orig(ui, repo, source, **opts)
724 725 revspostpull = len(repo)
725 726 lfrevs = opts.get('lfrev', [])
726 727 if opts.get('all_largefiles'):
727 728 lfrevs.append('pulled()')
728 729 if lfrevs and revspostpull > revsprepull:
729 730 numcached = 0
730 731 repo.firstpulled = revsprepull # for pulled() revset expression
731 732 try:
732 733 for rev in scmutil.revrange(repo, lfrevs):
733 734 ui.note(_('pulling largefiles for revision %s\n') % rev)
734 735 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
735 736 numcached += len(cached)
736 737 finally:
737 738 del repo.firstpulled
738 739 ui.status(_("%d largefiles cached\n") % numcached)
739 740 return result
740 741
741 742 def pulledrevsetsymbol(repo, subset, x):
742 743 """``pulled()``
743 744 Changesets that just has been pulled.
744 745
745 746 Only available with largefiles from pull --lfrev expressions.
746 747
747 748 .. container:: verbose
748 749
749 750 Some examples:
750 751
751 752 - pull largefiles for all new changesets::
752 753
753 754 hg pull -lfrev "pulled()"
754 755
755 756 - pull largefiles for all new branch heads::
756 757
757 758 hg pull -lfrev "head(pulled()) and not closed()"
758 759
759 760 """
760 761
761 762 try:
762 763 firstpulled = repo.firstpulled
763 764 except AttributeError:
764 765 raise util.Abort(_("pulled() only available in --lfrev"))
765 766 return revset.baseset([r for r in subset if r >= firstpulled])
766 767
767 768 def overrideclone(orig, ui, source, dest=None, **opts):
768 769 d = dest
769 770 if d is None:
770 771 d = hg.defaultdest(source)
771 772 if opts.get('all_largefiles') and not hg.islocal(d):
772 773 raise util.Abort(_(
773 774 '--all-largefiles is incompatible with non-local destination %s') %
774 775 d)
775 776
776 777 return orig(ui, source, dest, **opts)
777 778
778 779 def hgclone(orig, ui, opts, *args, **kwargs):
779 780 result = orig(ui, opts, *args, **kwargs)
780 781
781 782 if result is not None:
782 783 sourcerepo, destrepo = result
783 784 repo = destrepo.local()
784 785
785 786 # Caching is implicitly limited to 'rev' option, since the dest repo was
786 787 # truncated at that point. The user may expect a download count with
787 788 # this option, so attempt whether or not this is a largefile repo.
788 789 if opts.get('all_largefiles'):
789 790 success, missing = lfcommands.downloadlfiles(ui, repo, None)
790 791
791 792 if missing != 0:
792 793 return None
793 794
794 795 return result
795 796
796 797 def overriderebase(orig, ui, repo, **opts):
797 798 repo._isrebasing = True
798 799 try:
799 800 return orig(ui, repo, **opts)
800 801 finally:
801 802 repo._isrebasing = False
802 803
803 804 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
804 805 prefix=None, mtime=None, subrepos=None):
805 806 # No need to lock because we are only reading history and
806 807 # largefile caches, neither of which are modified.
807 808 lfcommands.cachelfiles(repo.ui, repo, node)
808 809
809 810 if kind not in archival.archivers:
810 811 raise util.Abort(_("unknown archive type '%s'") % kind)
811 812
812 813 ctx = repo[node]
813 814
814 815 if kind == 'files':
815 816 if prefix:
816 817 raise util.Abort(
817 818 _('cannot give prefix when archiving to files'))
818 819 else:
819 820 prefix = archival.tidyprefix(dest, kind, prefix)
820 821
821 822 def write(name, mode, islink, getdata):
822 823 if matchfn and not matchfn(name):
823 824 return
824 825 data = getdata()
825 826 if decode:
826 827 data = repo.wwritedata(name, data)
827 828 archiver.addfile(prefix + name, mode, islink, data)
828 829
829 830 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
830 831
831 832 if repo.ui.configbool("ui", "archivemeta", True):
832 833 def metadata():
833 834 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
834 835 hex(repo.changelog.node(0)), hex(node), ctx.branch())
835 836
836 837 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
837 838 if repo.tagtype(t) == 'global')
838 839 if not tags:
839 840 repo.ui.pushbuffer()
840 841 opts = {'template': '{latesttag}\n{latesttagdistance}',
841 842 'style': '', 'patch': None, 'git': None}
842 843 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
843 844 ltags, dist = repo.ui.popbuffer().split('\n')
844 845 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
845 846 tags += 'latesttagdistance: %s\n' % dist
846 847
847 848 return base + tags
848 849
849 850 write('.hg_archival.txt', 0644, False, metadata)
850 851
851 852 for f in ctx:
852 853 ff = ctx.flags(f)
853 854 getdata = ctx[f].data
854 855 if lfutil.isstandin(f):
855 856 path = lfutil.findfile(repo, getdata().strip())
856 857 if path is None:
857 858 raise util.Abort(
858 859 _('largefile %s not found in repo store or system cache')
859 860 % lfutil.splitstandin(f))
860 861 f = lfutil.splitstandin(f)
861 862
862 863 def getdatafn():
863 864 fd = None
864 865 try:
865 866 fd = open(path, 'rb')
866 867 return fd.read()
867 868 finally:
868 869 if fd:
869 870 fd.close()
870 871
871 872 getdata = getdatafn
872 873 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
873 874
874 875 if subrepos:
875 876 for subpath in sorted(ctx.substate):
876 877 sub = ctx.sub(subpath)
877 878 submatch = match_.narrowmatcher(subpath, matchfn)
878 879 sub.archive(repo.ui, archiver, prefix, submatch)
879 880
880 881 archiver.done()
881 882
882 883 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
883 884 repo._get(repo._state + ('hg',))
884 885 rev = repo._state[1]
885 886 ctx = repo._repo[rev]
886 887
887 888 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
888 889
889 890 def write(name, mode, islink, getdata):
890 891 # At this point, the standin has been replaced with the largefile name,
891 892 # so the normal matcher works here without the lfutil variants.
892 893 if match and not match(f):
893 894 return
894 895 data = getdata()
895 896
896 897 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
897 898
898 899 for f in ctx:
899 900 ff = ctx.flags(f)
900 901 getdata = ctx[f].data
901 902 if lfutil.isstandin(f):
902 903 path = lfutil.findfile(repo._repo, getdata().strip())
903 904 if path is None:
904 905 raise util.Abort(
905 906 _('largefile %s not found in repo store or system cache')
906 907 % lfutil.splitstandin(f))
907 908 f = lfutil.splitstandin(f)
908 909
909 910 def getdatafn():
910 911 fd = None
911 912 try:
912 913 fd = open(os.path.join(prefix, path), 'rb')
913 914 return fd.read()
914 915 finally:
915 916 if fd:
916 917 fd.close()
917 918
918 919 getdata = getdatafn
919 920
920 921 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
921 922
922 923 for subpath in sorted(ctx.substate):
923 924 sub = ctx.sub(subpath)
924 925 submatch = match_.narrowmatcher(subpath, match)
925 926 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
926 927 submatch)
927 928
928 929 # If a largefile is modified, the change is not reflected in its
929 930 # standin until a commit. cmdutil.bailifchanged() raises an exception
930 931 # if the repo has uncommitted changes. Wrap it to also check if
931 932 # largefiles were changed. This is used by bisect and backout.
932 933 def overridebailifchanged(orig, repo):
933 934 orig(repo)
934 935 repo.lfstatus = True
935 936 modified, added, removed, deleted = repo.status()[:4]
936 937 repo.lfstatus = False
937 938 if modified or added or removed or deleted:
938 939 raise util.Abort(_('uncommitted changes'))
939 940
940 941 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
941 942 def overridefetch(orig, ui, repo, *pats, **opts):
942 943 repo.lfstatus = True
943 944 modified, added, removed, deleted = repo.status()[:4]
944 945 repo.lfstatus = False
945 946 if modified or added or removed or deleted:
946 947 raise util.Abort(_('uncommitted changes'))
947 948 return orig(ui, repo, *pats, **opts)
948 949
949 950 def overrideforget(orig, ui, repo, *pats, **opts):
950 951 installnormalfilesmatchfn(repo[None].manifest())
951 952 result = orig(ui, repo, *pats, **opts)
952 953 restorematchfn()
953 954 m = scmutil.match(repo[None], pats, opts)
954 955
955 956 try:
956 957 repo.lfstatus = True
957 958 s = repo.status(match=m, clean=True)
958 959 finally:
959 960 repo.lfstatus = False
960 961 forget = sorted(s[0] + s[1] + s[3] + s[6])
961 962 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
962 963
963 964 for f in forget:
964 965 if lfutil.standin(f) not in repo.dirstate and not \
965 966 os.path.isdir(m.rel(lfutil.standin(f))):
966 967 ui.warn(_('not removing %s: file is already untracked\n')
967 968 % m.rel(f))
968 969 result = 1
969 970
970 971 for f in forget:
971 972 if ui.verbose or not m.exact(f):
972 973 ui.status(_('removing %s\n') % m.rel(f))
973 974
974 975 # Need to lock because standin files are deleted then removed from the
975 976 # repository and we could race in-between.
976 977 wlock = repo.wlock()
977 978 try:
978 979 lfdirstate = lfutil.openlfdirstate(ui, repo)
979 980 for f in forget:
980 981 if lfdirstate[f] == 'a':
981 982 lfdirstate.drop(f)
982 983 else:
983 984 lfdirstate.remove(f)
984 985 lfdirstate.write()
985 986 standins = [lfutil.standin(f) for f in forget]
986 987 for f in standins:
987 988 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
988 989 repo[None].forget(standins)
989 990 finally:
990 991 wlock.release()
991 992
992 993 return result
993 994
994 995 def outgoinghook(ui, repo, other, opts, missing):
995 996 if opts.pop('large', None):
996 997 toupload = set()
997 998 lfutil.getlfilestoupload(repo, missing,
998 999 lambda fn, lfhash: toupload.add(fn))
999 1000 if not toupload:
1000 1001 ui.status(_('largefiles: no files to upload\n'))
1001 1002 else:
1002 1003 ui.status(_('largefiles to upload:\n'))
1003 1004 for file in sorted(toupload):
1004 1005 ui.status(lfutil.splitstandin(file) + '\n')
1005 1006 ui.status('\n')
1006 1007
1007 1008 def summaryremotehook(ui, repo, opts, changes):
1008 1009 largeopt = opts.get('large', False)
1009 1010 if changes is None:
1010 1011 if largeopt:
1011 1012 return (False, True) # only outgoing check is needed
1012 1013 else:
1013 1014 return (False, False)
1014 1015 elif largeopt:
1015 1016 url, branch, peer, outgoing = changes[1]
1016 1017 if peer is None:
1017 1018 # i18n: column positioning for "hg summary"
1018 1019 ui.status(_('largefiles: (no remote repo)\n'))
1019 1020 return
1020 1021
1021 1022 toupload = set()
1022 1023 lfutil.getlfilestoupload(repo, outgoing.missing,
1023 1024 lambda fn, lfhash: toupload.add(fn))
1024 1025 if not toupload:
1025 1026 # i18n: column positioning for "hg summary"
1026 1027 ui.status(_('largefiles: (no files to upload)\n'))
1027 1028 else:
1028 1029 # i18n: column positioning for "hg summary"
1029 1030 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1030 1031
1031 1032 def overridesummary(orig, ui, repo, *pats, **opts):
1032 1033 try:
1033 1034 repo.lfstatus = True
1034 1035 orig(ui, repo, *pats, **opts)
1035 1036 finally:
1036 1037 repo.lfstatus = False
1037 1038
1038 1039 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1039 1040 similarity=None):
1040 1041 if not lfutil.islfilesrepo(repo):
1041 1042 return orig(repo, pats, opts, dry_run, similarity)
1042 1043 # Get the list of missing largefiles so we can remove them
1043 1044 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1044 1045 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1045 1046 False, False)
1046 1047 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1047 1048
1048 1049 # Call into the normal remove code, but the removing of the standin, we want
1049 1050 # to have handled by original addremove. Monkey patching here makes sure
1050 1051 # we don't remove the standin in the largefiles code, preventing a very
1051 1052 # confused state later.
1052 1053 if missing:
1053 1054 m = [repo.wjoin(f) for f in missing]
1054 1055 repo._isaddremove = True
1055 1056 removelargefiles(repo.ui, repo, *m, **opts)
1056 1057 repo._isaddremove = False
1057 1058 # Call into the normal add code, and any files that *should* be added as
1058 1059 # largefiles will be
1059 1060 addlargefiles(repo.ui, repo, *pats, **opts)
1060 1061 # Now that we've handled largefiles, hand off to the original addremove
1061 1062 # function to take care of the rest. Make sure it doesn't do anything with
1062 1063 # largefiles by installing a matcher that will ignore them.
1063 1064 installnormalfilesmatchfn(repo[None].manifest())
1064 1065 result = orig(repo, pats, opts, dry_run, similarity)
1065 1066 restorematchfn()
1066 1067 return result
1067 1068
1068 1069 # Calling purge with --all will cause the largefiles to be deleted.
1069 1070 # Override repo.status to prevent this from happening.
1070 1071 def overridepurge(orig, ui, repo, *dirs, **opts):
1071 1072 # XXX large file status is buggy when used on repo proxy.
1072 1073 # XXX this needs to be investigate.
1073 1074 repo = repo.unfiltered()
1074 1075 oldstatus = repo.status
1075 1076 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1076 1077 clean=False, unknown=False, listsubrepos=False):
1077 1078 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1078 1079 listsubrepos)
1079 1080 lfdirstate = lfutil.openlfdirstate(ui, repo)
1080 1081 modified, added, removed, deleted, unknown, ignored, clean = r
1081 1082 unknown = [f for f in unknown if lfdirstate[f] == '?']
1082 1083 ignored = [f for f in ignored if lfdirstate[f] == '?']
1083 1084 return modified, added, removed, deleted, unknown, ignored, clean
1084 1085 repo.status = overridestatus
1085 1086 orig(ui, repo, *dirs, **opts)
1086 1087 repo.status = oldstatus
1087 1088
1088 1089 def overriderollback(orig, ui, repo, **opts):
1089 1090 result = orig(ui, repo, **opts)
1090 1091 merge.update(repo, node=None, branchmerge=False, force=True,
1091 1092 partial=lfutil.isstandin)
1092 1093 wlock = repo.wlock()
1093 1094 try:
1094 1095 lfdirstate = lfutil.openlfdirstate(ui, repo)
1095 1096 lfiles = lfutil.listlfiles(repo)
1096 1097 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1097 1098 for file in lfiles:
1098 1099 if file in oldlfiles:
1099 1100 lfdirstate.normallookup(file)
1100 1101 else:
1101 1102 lfdirstate.add(file)
1102 1103 lfdirstate.write()
1103 1104 finally:
1104 1105 wlock.release()
1105 1106 return result
1106 1107
1107 1108 def overridetransplant(orig, ui, repo, *revs, **opts):
1108 1109 try:
1109 1110 oldstandins = lfutil.getstandinsstate(repo)
1110 1111 repo._istransplanting = True
1111 1112 result = orig(ui, repo, *revs, **opts)
1112 1113 newstandins = lfutil.getstandinsstate(repo)
1113 1114 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1114 1115 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1115 1116 printmessage=True)
1116 1117 finally:
1117 1118 repo._istransplanting = False
1118 1119 return result
1119 1120
1120 1121 def overridecat(orig, ui, repo, file1, *pats, **opts):
1121 1122 ctx = scmutil.revsingle(repo, opts.get('rev'))
1122 1123 err = 1
1123 1124 notbad = set()
1124 1125 m = scmutil.match(ctx, (file1,) + pats, opts)
1125 1126 origmatchfn = m.matchfn
1126 1127 def lfmatchfn(f):
1127 1128 if origmatchfn(f):
1128 1129 return True
1129 1130 lf = lfutil.splitstandin(f)
1130 1131 if lf is None:
1131 1132 return False
1132 1133 notbad.add(lf)
1133 1134 return origmatchfn(lf)
1134 1135 m.matchfn = lfmatchfn
1135 1136 origbadfn = m.bad
1136 1137 def lfbadfn(f, msg):
1137 1138 if not f in notbad:
1138 1139 origbadfn(f, msg)
1139 1140 m.bad = lfbadfn
1140 1141 for f in ctx.walk(m):
1141 1142 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1142 1143 pathname=f)
1143 1144 lf = lfutil.splitstandin(f)
1144 1145 if lf is None or origmatchfn(f):
1145 1146 # duplicating unreachable code from commands.cat
1146 1147 data = ctx[f].data()
1147 1148 if opts.get('decode'):
1148 1149 data = repo.wwritedata(f, data)
1149 1150 fp.write(data)
1150 1151 else:
1151 1152 hash = lfutil.readstandin(repo, lf, ctx.rev())
1152 1153 if not lfutil.inusercache(repo.ui, hash):
1153 1154 store = basestore._openstore(repo)
1154 1155 success, missing = store.get([(lf, hash)])
1155 1156 if len(success) != 1:
1156 1157 raise util.Abort(
1157 1158 _('largefile %s is not in cache and could not be '
1158 1159 'downloaded') % lf)
1159 1160 path = lfutil.usercachepath(repo.ui, hash)
1160 1161 fpin = open(path, "rb")
1161 1162 for chunk in util.filechunkiter(fpin, 128 * 1024):
1162 1163 fp.write(chunk)
1163 1164 fpin.close()
1164 1165 fp.close()
1165 1166 err = 0
1166 1167 return err
1167 1168
1168 1169 def mercurialsinkbefore(orig, sink):
1169 1170 sink.repo._isconverting = True
1170 1171 orig(sink)
1171 1172
1172 1173 def mercurialsinkafter(orig, sink):
1173 1174 sink.repo._isconverting = False
1174 1175 orig(sink)
@@ -1,1189 +1,1156 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import struct
9 9
10 10 from node import nullid, nullrev, hex, bin
11 11 from i18n import _
12 12 from mercurial import obsolete
13 13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 14 import errno, os, shutil
15 15
16 16 _pack = struct.pack
17 17 _unpack = struct.unpack
18 18
19 19 def _droponode(data):
20 20 # used for compatibility for v1
21 21 bits = data.split("\0")
22 22 bits = bits[:-2] + bits[-1:]
23 23 return "\0".join(bits)
24 24
25 25 class mergestate(object):
26 26 '''track 3-way merge state of individual files
27 27
28 28 it is stored on disk when needed. Two file are used, one with an old
29 29 format, one with a new format. Both contains similar data, but the new
30 30 format can store new kind of field.
31 31
32 32 Current new format is a list of arbitrary record of the form:
33 33
34 34 [type][length][content]
35 35
36 36 Type is a single character, length is a 4 bytes integer, content is an
37 37 arbitrary suites of bytes of length `length`.
38 38
39 39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 40 should abort if they are unknown. lower case record can be safely ignored.
41 41
42 42 Currently known record:
43 43
44 44 L: the node of the "local" part of the merge (hexified version)
45 45 O: the node of the "other" part of the merge (hexified version)
46 46 F: a file to be merged entry
47 47 '''
48 48 statepathv1 = "merge/state"
49 49 statepathv2 = "merge/state2"
50 50
51 51 def __init__(self, repo):
52 52 self._repo = repo
53 53 self._dirty = False
54 54 self._read()
55 55
56 56 def reset(self, node=None, other=None):
57 57 self._state = {}
58 58 self._local = None
59 59 self._other = None
60 60 if node:
61 61 self._local = node
62 62 self._other = other
63 63 shutil.rmtree(self._repo.join("merge"), True)
64 64 self._dirty = False
65 65
66 66 def _read(self):
67 67 """Analyse each record content to restore a serialized state from disk
68 68
69 69 This function process "record" entry produced by the de-serialization
70 70 of on disk file.
71 71 """
72 72 self._state = {}
73 73 self._local = None
74 74 self._other = None
75 75 records = self._readrecords()
76 76 for rtype, record in records:
77 77 if rtype == 'L':
78 78 self._local = bin(record)
79 79 elif rtype == 'O':
80 80 self._other = bin(record)
81 81 elif rtype == "F":
82 82 bits = record.split("\0")
83 83 self._state[bits[0]] = bits[1:]
84 84 elif not rtype.islower():
85 85 raise util.Abort(_('unsupported merge state record: %s')
86 86 % rtype)
87 87 self._dirty = False
88 88
89 89 def _readrecords(self):
90 90 """Read merge state from disk and return a list of record (TYPE, data)
91 91
92 92 We read data from both v1 and v2 files and decide which one to use.
93 93
94 94 V1 has been used by version prior to 2.9.1 and contains less data than
95 95 v2. We read both versions and check if no data in v2 contradicts
96 96 v1. If there is not contradiction we can safely assume that both v1
97 97 and v2 were written at the same time and use the extract data in v2. If
98 98 there is contradiction we ignore v2 content as we assume an old version
99 99 of Mercurial has overwritten the mergestate file and left an old v2
100 100 file around.
101 101
102 102 returns list of record [(TYPE, data), ...]"""
103 103 v1records = self._readrecordsv1()
104 104 v2records = self._readrecordsv2()
105 105 oldv2 = set() # old format version of v2 record
106 106 for rec in v2records:
107 107 if rec[0] == 'L':
108 108 oldv2.add(rec)
109 109 elif rec[0] == 'F':
110 110 # drop the onode data (not contained in v1)
111 111 oldv2.add(('F', _droponode(rec[1])))
112 112 for rec in v1records:
113 113 if rec not in oldv2:
114 114 # v1 file is newer than v2 file, use it
115 115 # we have to infer the "other" changeset of the merge
116 116 # we cannot do better than that with v1 of the format
117 117 mctx = self._repo[None].parents()[-1]
118 118 v1records.append(('O', mctx.hex()))
119 119 # add place holder "other" file node information
120 120 # nobody is using it yet so we do no need to fetch the data
121 121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 122 for idx, r in enumerate(v1records):
123 123 if r[0] == 'F':
124 124 bits = r[1].split("\0")
125 125 bits.insert(-2, '')
126 126 v1records[idx] = (r[0], "\0".join(bits))
127 127 return v1records
128 128 else:
129 129 return v2records
130 130
131 131 def _readrecordsv1(self):
132 132 """read on disk merge state for version 1 file
133 133
134 134 returns list of record [(TYPE, data), ...]
135 135
136 136 Note: the "F" data from this file are one entry short
137 137 (no "other file node" entry)
138 138 """
139 139 records = []
140 140 try:
141 141 f = self._repo.opener(self.statepathv1)
142 142 for i, l in enumerate(f):
143 143 if i == 0:
144 144 records.append(('L', l[:-1]))
145 145 else:
146 146 records.append(('F', l[:-1]))
147 147 f.close()
148 148 except IOError, err:
149 149 if err.errno != errno.ENOENT:
150 150 raise
151 151 return records
152 152
153 153 def _readrecordsv2(self):
154 154 """read on disk merge state for version 2 file
155 155
156 156 returns list of record [(TYPE, data), ...]
157 157 """
158 158 records = []
159 159 try:
160 160 f = self._repo.opener(self.statepathv2)
161 161 data = f.read()
162 162 off = 0
163 163 end = len(data)
164 164 while off < end:
165 165 rtype = data[off]
166 166 off += 1
167 167 length = _unpack('>I', data[off:(off + 4)])[0]
168 168 off += 4
169 169 record = data[off:(off + length)]
170 170 off += length
171 171 records.append((rtype, record))
172 172 f.close()
173 173 except IOError, err:
174 174 if err.errno != errno.ENOENT:
175 175 raise
176 176 return records
177 177
178 178 def active(self):
179 179 """Whether mergestate is active.
180 180
181 181 Returns True if there appears to be mergestate. This is a rough proxy
182 182 for "is a merge in progress."
183 183 """
184 184 # Check local variables before looking at filesystem for performance
185 185 # reasons.
186 186 return bool(self._local) or bool(self._state) or \
187 187 self._repo.opener.exists(self.statepathv1) or \
188 188 self._repo.opener.exists(self.statepathv2)
189 189
190 190 def commit(self):
191 191 """Write current state on disk (if necessary)"""
192 192 if self._dirty:
193 193 records = []
194 194 records.append(("L", hex(self._local)))
195 195 records.append(("O", hex(self._other)))
196 196 for d, v in self._state.iteritems():
197 197 records.append(("F", "\0".join([d] + v)))
198 198 self._writerecords(records)
199 199 self._dirty = False
200 200
201 201 def _writerecords(self, records):
202 202 """Write current state on disk (both v1 and v2)"""
203 203 self._writerecordsv1(records)
204 204 self._writerecordsv2(records)
205 205
206 206 def _writerecordsv1(self, records):
207 207 """Write current state on disk in a version 1 file"""
208 208 f = self._repo.opener(self.statepathv1, "w")
209 209 irecords = iter(records)
210 210 lrecords = irecords.next()
211 211 assert lrecords[0] == 'L'
212 212 f.write(hex(self._local) + "\n")
213 213 for rtype, data in irecords:
214 214 if rtype == "F":
215 215 f.write("%s\n" % _droponode(data))
216 216 f.close()
217 217
218 218 def _writerecordsv2(self, records):
219 219 """Write current state on disk in a version 2 file"""
220 220 f = self._repo.opener(self.statepathv2, "w")
221 221 for key, data in records:
222 222 assert len(key) == 1
223 223 format = ">sI%is" % len(data)
224 224 f.write(_pack(format, key, len(data), data))
225 225 f.close()
226 226
227 227 def add(self, fcl, fco, fca, fd):
228 228 """add a new (potentially?) conflicting file the merge state
229 229 fcl: file context for local,
230 230 fco: file context for remote,
231 231 fca: file context for ancestors,
232 232 fd: file path of the resulting merge.
233 233
234 234 note: also write the local version to the `.hg/merge` directory.
235 235 """
236 236 hash = util.sha1(fcl.path()).hexdigest()
237 237 self._repo.opener.write("merge/" + hash, fcl.data())
238 238 self._state[fd] = ['u', hash, fcl.path(),
239 239 fca.path(), hex(fca.filenode()),
240 240 fco.path(), hex(fco.filenode()),
241 241 fcl.flags()]
242 242 self._dirty = True
243 243
244 244 def __contains__(self, dfile):
245 245 return dfile in self._state
246 246
247 247 def __getitem__(self, dfile):
248 248 return self._state[dfile][0]
249 249
250 250 def __iter__(self):
251 251 return iter(sorted(self._state))
252 252
253 253 def files(self):
254 254 return self._state.keys()
255 255
256 256 def mark(self, dfile, state):
257 257 self._state[dfile][0] = state
258 258 self._dirty = True
259 259
260 260 def unresolved(self):
261 261 """Obtain the paths of unresolved files."""
262 262
263 263 for f, entry in self._state.items():
264 264 if entry[0] == 'u':
265 265 yield f
266 266
267 267 def resolve(self, dfile, wctx, labels=None):
268 268 """rerun merge process for file path `dfile`"""
269 269 if self[dfile] == 'r':
270 270 return 0
271 271 stateentry = self._state[dfile]
272 272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 273 octx = self._repo[self._other]
274 274 fcd = wctx[dfile]
275 275 fco = octx[ofile]
276 276 fca = self._repo.filectx(afile, fileid=anode)
277 277 # "premerge" x flags
278 278 flo = fco.flags()
279 279 fla = fca.flags()
280 280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 281 if fca.node() == nullid:
282 282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 283 afile)
284 284 elif flags == fla:
285 285 flags = flo
286 286 # restore local
287 287 f = self._repo.opener("merge/" + hash)
288 288 self._repo.wwrite(dfile, f.read(), flags)
289 289 f.close()
290 290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 291 labels=labels)
292 292 if r is None:
293 293 # no real conflict
294 294 del self._state[dfile]
295 295 self._dirty = True
296 296 elif not r:
297 297 self.mark(dfile, 'r')
298 298 return r
299 299
300 300 def _checkunknownfile(repo, wctx, mctx, f):
301 301 return (not repo.dirstate._ignore(f)
302 302 and os.path.isfile(repo.wjoin(f))
303 303 and repo.wopener.audit.check(f)
304 304 and repo.dirstate.normalize(f) not in repo.dirstate
305 305 and mctx[f].cmp(wctx[f]))
306 306
307 307 def _checkunknown(repo, wctx, mctx):
308 308 "check for collisions between unknown files and files in mctx"
309 309
310 310 error = False
311 311 for f in mctx:
312 312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
313 313 error = True
314 314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
315 315 if error:
316 316 raise util.Abort(_("untracked files in working directory differ "
317 317 "from files in requested revision"))
318 318
319 319 def _forgetremoved(wctx, mctx, branchmerge):
320 320 """
321 321 Forget removed files
322 322
323 323 If we're jumping between revisions (as opposed to merging), and if
324 324 neither the working directory nor the target rev has the file,
325 325 then we need to remove it from the dirstate, to prevent the
326 326 dirstate from listing the file when it is no longer in the
327 327 manifest.
328 328
329 329 If we're merging, and the other revision has removed a file
330 330 that is not present in the working directory, we need to mark it
331 331 as removed.
332 332 """
333 333
334 actions = []
335 state = branchmerge and 'r' or 'f'
334 ractions = []
335 factions = xactions = []
336 if branchmerge:
337 xactions = ractions
336 338 for f in wctx.deleted():
337 339 if f not in mctx:
338 actions.append((f, state, None, "forget deleted"))
340 xactions.append((f, None, "forget deleted"))
339 341
340 342 if not branchmerge:
341 343 for f in wctx.removed():
342 344 if f not in mctx:
343 actions.append((f, "f", None, "forget removed"))
345 factions.append((f, None, "forget removed"))
344 346
345 return actions
347 return ractions, factions
346 348
347 349 def _checkcollision(repo, wmf, actions):
348 350 # build provisional merged manifest up
349 351 pmmf = set(wmf)
350 352
351 def addop(f, args):
352 pmmf.add(f)
353 def removeop(f, args):
354 pmmf.discard(f)
355 def nop(f, args):
356 pass
357
358 def renamemoveop(f, args):
359 f2, flags = args
360 pmmf.discard(f2)
361 pmmf.add(f)
362 def renamegetop(f, args):
363 f2, flags = args
364 pmmf.add(f)
365 def mergeop(f, args):
366 f1, f2, fa, move, anc = args
367 if move:
368 pmmf.discard(f1)
369 pmmf.add(f)
370
371 opmap = {
372 "a": addop,
373 "dm": renamemoveop,
374 "dg": renamegetop,
375 "dr": nop,
376 "e": nop,
377 "k": nop,
378 "f": addop, # untracked file should be kept in working directory
379 "g": addop,
380 "m": mergeop,
381 "r": removeop,
382 "rd": nop,
383 "cd": addop,
384 "dc": addop,
385 }
386 for f, m, args, msg in actions:
387 op = opmap.get(m)
388 assert op, m
389 op(f, args)
353 if actions:
354 # k, dr, e and rd are no-op
355 for m in 'a', 'f', 'g', 'cd', 'dc':
356 for f, args, msg in actions[m]:
357 pmmf.add(f)
358 for f, args, msg in actions['r']:
359 pmmf.discard(f)
360 for f, args, msg in actions['dm']:
361 f2, flags = args
362 pmmf.discard(f2)
363 pmmf.add(f)
364 for f, args, msg in actions['dg']:
365 f2, flags = args
366 pmmf.add(f)
367 for f, args, msg in actions['m']:
368 f1, f2, fa, move, anc = args
369 if move:
370 pmmf.discard(f1)
371 pmmf.add(f)
390 372
391 373 # check case-folding collision in provisional merged manifest
392 374 foldmap = {}
393 375 for f in sorted(pmmf):
394 376 fold = util.normcase(f)
395 377 if fold in foldmap:
396 378 raise util.Abort(_("case-folding collision between %s and %s")
397 379 % (f, foldmap[fold]))
398 380 foldmap[fold] = f
399 381
400 382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
401 383 acceptremote, followcopies):
402 384 """
403 385 Merge p1 and p2 with ancestor pa and generate merge action list
404 386
405 387 branchmerge and force are as passed in to update
406 388 partial = function to filter file lists
407 389 acceptremote = accept the incoming changes without prompting
408 390 """
409 391
410 actions, copy, movewithdir = [], {}, {}
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
393 copy, movewithdir = {}, {}
411 394
412 395 # manifests fetched in order are going to be faster, so prime the caches
413 396 [x.manifest() for x in
414 397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
415 398
416 399 if followcopies:
417 400 ret = copies.mergecopies(repo, wctx, p2, pa)
418 401 copy, movewithdir, diverge, renamedelete = ret
419 402 for of, fl in diverge.iteritems():
420 actions.append((of, "dr", (fl,), "divergent renames"))
403 actions['dr'].append((of, (fl,), "divergent renames"))
421 404 for of, fl in renamedelete.iteritems():
422 actions.append((of, "rd", (fl,), "rename and delete"))
405 actions['rd'].append((of, (fl,), "rename and delete"))
423 406
424 407 repo.ui.note(_("resolving manifests\n"))
425 408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
426 409 % (bool(branchmerge), bool(force), bool(partial)))
427 410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
428 411
429 412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
430 413 copied = set(copy.values())
431 414 copied.update(movewithdir.values())
432 415
433 416 if '.hgsubstate' in m1:
434 417 # check whether sub state is modified
435 418 for s in sorted(wctx.substate):
436 419 if wctx.sub(s).dirty():
437 420 m1['.hgsubstate'] += "+"
438 421 break
439 422
440 423 aborts = []
441 424 # Compare manifests
442 425 fdiff = dicthelpers.diff(m1, m2)
443 426 flagsdiff = m1.flagsdiff(m2)
444 427 diff12 = dicthelpers.join(fdiff, flagsdiff)
445 428
446 429 for f, (n12, fl12) in diff12.iteritems():
447 430 if n12:
448 431 n1, n2 = n12
449 432 else: # file contents didn't change, but flags did
450 433 n1 = n2 = m1.get(f, None)
451 434 if n1 is None:
452 435 # Since n1 == n2, the file isn't present in m2 either. This
453 436 # means that the file was removed or deleted locally and
454 437 # removed remotely, but that residual entries remain in flags.
455 438 # This can happen in manifests generated by workingctx.
456 439 continue
457 440 if fl12:
458 441 fl1, fl2 = fl12
459 442 else: # flags didn't change, file contents did
460 443 fl1 = fl2 = m1.flags(f)
461 444
462 445 if partial and not partial(f):
463 446 continue
464 447 if n1 and n2:
465 448 fa = f
466 449 a = ma.get(f, nullid)
467 450 if a == nullid:
468 451 fa = copy.get(f, f)
469 452 # Note: f as default is wrong - we can't really make a 3-way
470 453 # merge without an ancestor file.
471 454 fla = ma.flags(fa)
472 455 nol = 'l' not in fl1 + fl2 + fla
473 456 if n2 == a and fl2 == fla:
474 actions.append((f, "k", (), "keep")) # remote unchanged
457 actions['k'].append((f, (), "keep")) # remote unchanged
475 458 elif n1 == a and fl1 == fla: # local unchanged - use remote
476 459 if n1 == n2: # optimization: keep local content
477 actions.append((f, "e", (fl2,), "update permissions"))
460 actions['e'].append((f, (fl2,), "update permissions"))
478 461 else:
479 actions.append((f, "g", (fl2,), "remote is newer"))
462 actions['g'].append((f, (fl2,), "remote is newer"))
480 463 elif nol and n2 == a: # remote only changed 'x'
481 actions.append((f, "e", (fl2,), "update permissions"))
464 actions['e'].append((f, (fl2,), "update permissions"))
482 465 elif nol and n1 == a: # local only changed 'x'
483 actions.append((f, "g", (fl1,), "remote is newer"))
466 actions['g'].append((f, (fl1,), "remote is newer"))
484 467 else: # both changed something
485 actions.append((f, "m", (f, f, fa, False, pa.node()),
468 actions['m'].append((f, (f, f, fa, False, pa.node()),
486 469 "versions differ"))
487 470 elif f in copied: # files we'll deal with on m2 side
488 471 pass
489 472 elif n1 and f in movewithdir: # directory rename, move local
490 473 f2 = movewithdir[f]
491 actions.append((f2, "dm", (f, fl1),
474 actions['dm'].append((f2, (f, fl1),
492 475 "remote directory rename - move from " + f))
493 476 elif n1 and f in copy:
494 477 f2 = copy[f]
495 actions.append((f, "m", (f, f2, f2, False, pa.node()),
478 actions['m'].append((f, (f, f2, f2, False, pa.node()),
496 479 "local copied/moved from " + f2))
497 480 elif n1 and f in ma: # clean, a different, no remote
498 481 if n1 != ma[f]:
499 482 if acceptremote:
500 actions.append((f, "r", None, "remote delete"))
483 actions['r'].append((f, None, "remote delete"))
501 484 else:
502 actions.append((f, "cd", None, "prompt changed/deleted"))
485 actions['cd'].append((f, None, "prompt changed/deleted"))
503 486 elif n1[20:] == "a": # added, no remote
504 actions.append((f, "f", None, "remote deleted"))
487 actions['f'].append((f, None, "remote deleted"))
505 488 else:
506 actions.append((f, "r", None, "other deleted"))
489 actions['r'].append((f, None, "other deleted"))
507 490 elif n2 and f in movewithdir:
508 491 f2 = movewithdir[f]
509 actions.append((f2, "dg", (f, fl2),
492 actions['dg'].append((f2, (f, fl2),
510 493 "local directory rename - get from " + f))
511 494 elif n2 and f in copy:
512 495 f2 = copy[f]
513 496 if f2 in m2:
514 actions.append((f, "m", (f2, f, f2, False, pa.node()),
497 actions['m'].append((f, (f2, f, f2, False, pa.node()),
515 498 "remote copied from " + f2))
516 499 else:
517 actions.append((f, "m", (f2, f, f2, True, pa.node()),
500 actions['m'].append((f, (f2, f, f2, True, pa.node()),
518 501 "remote moved from " + f2))
519 502 elif n2 and f not in ma:
520 503 # local unknown, remote created: the logic is described by the
521 504 # following table:
522 505 #
523 506 # force branchmerge different | action
524 507 # n * n | get
525 508 # n * y | abort
526 509 # y n * | get
527 510 # y y n | get
528 511 # y y y | merge
529 512 #
530 513 # Checking whether the files are different is expensive, so we
531 514 # don't do that when we can avoid it.
532 515 if force and not branchmerge:
533 actions.append((f, "g", (fl2,), "remote created"))
516 actions['g'].append((f, (fl2,), "remote created"))
534 517 else:
535 518 different = _checkunknownfile(repo, wctx, p2, f)
536 519 if force and branchmerge and different:
537 520 # FIXME: This is wrong - f is not in ma ...
538 actions.append((f, "m", (f, f, f, False, pa.node()),
521 actions['m'].append((f, (f, f, f, False, pa.node()),
539 522 "remote differs from untracked local"))
540 523 elif not force and different:
541 524 aborts.append((f, "ud"))
542 525 else:
543 actions.append((f, "g", (fl2,), "remote created"))
526 actions['g'].append((f, (fl2,), "remote created"))
544 527 elif n2 and n2 != ma[f]:
545 528 different = _checkunknownfile(repo, wctx, p2, f)
546 529 if not force and different:
547 530 aborts.append((f, "ud"))
548 531 else:
549 532 # if different: old untracked f may be overwritten and lost
550 533 if acceptremote:
551 actions.append((f, "g", (m2.flags(f),),
534 actions['g'].append((f, (m2.flags(f),),
552 535 "remote recreating"))
553 536 else:
554 actions.append((f, "dc", (m2.flags(f),),
537 actions['dc'].append((f, (m2.flags(f),),
555 538 "prompt deleted/changed"))
556 539
557 540 for f, m in sorted(aborts):
558 541 if m == "ud":
559 542 repo.ui.warn(_("%s: untracked file differs\n") % f)
560 543 else: assert False, m
561 544 if aborts:
562 545 raise util.Abort(_("untracked files in working directory differ "
563 546 "from files in requested revision"))
564 547
565 548 if not util.checkcase(repo.path):
566 549 # check collision between files only in p2 for clean update
567 550 if (not branchmerge and
568 551 (force or not wctx.dirty(missing=True, branch=False))):
569 _checkcollision(repo, m2, [])
552 _checkcollision(repo, m2, None)
570 553 else:
571 554 _checkcollision(repo, m1, actions)
572 555
573 556 return actions
574 557
575 actionpriority = dict((m, p) for p, m in enumerate(
576 ['r', 'f', 'g', 'a', 'k', 'm', 'dm', 'dg', 'dr', 'cd', 'dc', 'rd', 'e']))
577
578 def actionkey(a):
579 return actionpriority[a[1]], a
580
581 558 def batchremove(repo, actions):
582 559 """apply removes to the working directory
583 560
584 561 yields tuples for progress updates
585 562 """
586 563 verbose = repo.ui.verbose
587 564 unlink = util.unlinkpath
588 565 wjoin = repo.wjoin
589 566 audit = repo.wopener.audit
590 567 i = 0
591 for f, m, args, msg in actions:
568 for f, args, msg in actions:
592 569 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
593 570 if True:
594 571 if verbose:
595 572 repo.ui.note(_("removing %s\n") % f)
596 573 audit(f)
597 574 try:
598 575 unlink(wjoin(f), ignoremissing=True)
599 576 except OSError, inst:
600 577 repo.ui.warn(_("update failed to remove %s: %s!\n") %
601 578 (f, inst.strerror))
602 579 if i == 100:
603 580 yield i, f
604 581 i = 0
605 582 i += 1
606 583 if i > 0:
607 584 yield i, f
608 585
609 586 def batchget(repo, mctx, actions):
610 587 """apply gets to the working directory
611 588
612 589 mctx is the context to get from
613 590
614 591 yields tuples for progress updates
615 592 """
616 593 verbose = repo.ui.verbose
617 594 fctx = mctx.filectx
618 595 wwrite = repo.wwrite
619 596 i = 0
620 for f, m, args, msg in actions:
597 for f, args, msg in actions:
621 598 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
622 599 if True:
623 600 if verbose:
624 601 repo.ui.note(_("getting %s\n") % f)
625 602 wwrite(f, fctx(f).data(), args[0])
626 603 if i == 100:
627 604 yield i, f
628 605 i = 0
629 606 i += 1
630 607 if i > 0:
631 608 yield i, f
632 609
633 610 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
634 611 """apply the merge action list to the working directory
635 612
636 613 wctx is the working copy context
637 614 mctx is the context to be merged into the working copy
638 615
639 616 Return a tuple of counts (updated, merged, removed, unresolved) that
640 617 describes how many files were affected by the update.
641 618 """
642 619
643 620 updated, merged, removed, unresolved = 0, 0, 0, 0
644 621 ms = mergestate(repo)
645 622 ms.reset(wctx.p1().node(), mctx.node())
646 623 moves = []
647 actions.sort(key=actionkey)
624 for m, l in actions.items():
625 l.sort()
648 626
649 627 # prescan for merges
650 for a in actions:
651 f, m, args, msg = a
652 if m == "m": # merge
628 for f, args, msg in actions['m']:
629 if True:
653 630 f1, f2, fa, move, anc = args
654 631 if f == '.hgsubstate': # merged internally
655 632 continue
656 633 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
657 634 fcl = wctx[f1]
658 635 fco = mctx[f2]
659 636 actx = repo[anc]
660 637 if fa in actx:
661 638 fca = actx[fa]
662 639 else:
663 640 fca = repo.filectx(f1, fileid=nullrev)
664 641 ms.add(fcl, fco, fca, f)
665 642 if f1 != f and move:
666 643 moves.append(f1)
667 644
668 645 audit = repo.wopener.audit
669 646 _updating = _('updating')
670 647 _files = _('files')
671 648 progress = repo.ui.progress
672 649
673 650 # remove renamed files after safely stored
674 651 for f in moves:
675 652 if os.path.lexists(repo.wjoin(f)):
676 653 repo.ui.debug("removing %s\n" % f)
677 654 audit(f)
678 655 util.unlinkpath(repo.wjoin(f))
679 656
680 numupdates = len([a for a in actions if a[1] != 'k'])
681 workeractions = [a for a in actions if a[1] in 'gr']
682 updateactions = [a for a in workeractions if a[1] == 'g']
683 updated = len(updateactions)
684 removeactions = [a for a in workeractions if a[1] == 'r']
685 removed = len(removeactions)
686 actions = [a for a in actions if a[1] not in 'gr']
657 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
687 658
688 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
689 if hgsub and hgsub[0] == 'r':
659 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
690 660 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
691 661
692 662 # remove in parallel (must come first)
693 663 z = 0
694 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), removeactions)
664 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
695 665 for i, item in prog:
696 666 z += i
697 667 progress(_updating, z, item=item, total=numupdates, unit=_files)
668 removed = len(actions['r'])
698 669
699 670 # get in parallel
700 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), updateactions)
671 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
701 672 for i, item in prog:
702 673 z += i
703 674 progress(_updating, z, item=item, total=numupdates, unit=_files)
675 updated = len(actions['g'])
704 676
705 if hgsub and hgsub[0] == 'g':
677 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
706 678 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
707 679
708 for f, m, args, msg in actions:
680 if True:
709 681
710 682 # forget (manifest only, just log it) (must come first)
711 if m == "f":
683 for f, args, msg in actions['f']:
712 684 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
713 685 z += 1
714 686 progress(_updating, z, item=f, total=numupdates, unit=_files)
715 687
716 688 # re-add (manifest only, just log it)
717 elif m == "a":
689 for f, args, msg in actions['a']:
718 690 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
719 691 z += 1
720 692 progress(_updating, z, item=f, total=numupdates, unit=_files)
721 693
722 694 # keep (noop, just log it)
723 elif m == "k":
695 for f, args, msg in actions['k']:
724 696 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
725 697 # no progress
726 698
727 699 # merge
728 elif m == "m":
700 for f, args, msg in actions['m']:
729 701 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
730 702 z += 1
731 703 progress(_updating, z, item=f, total=numupdates, unit=_files)
732 704 f1, f2, fa, move, anc = args
733 705 if f == '.hgsubstate': # subrepo states need updating
734 706 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
735 707 overwrite)
736 708 continue
737 709 audit(f)
738 710 r = ms.resolve(f, wctx, labels=labels)
739 711 if r is not None and r > 0:
740 712 unresolved += 1
741 713 else:
742 714 if r is None:
743 715 updated += 1
744 716 else:
745 717 merged += 1
746 718
747 719 # directory rename, move local
748 elif m == "dm":
720 for f, args, msg in actions['dm']:
749 721 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
750 722 z += 1
751 723 progress(_updating, z, item=f, total=numupdates, unit=_files)
752 724 f0, flags = args
753 725 repo.ui.note(_("moving %s to %s\n") % (f0, f))
754 726 audit(f)
755 727 repo.wwrite(f, wctx.filectx(f0).data(), flags)
756 728 util.unlinkpath(repo.wjoin(f0))
757 729 updated += 1
758 730
759 731 # local directory rename, get
760 elif m == "dg":
732 for f, args, msg in actions['dg']:
761 733 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
762 734 z += 1
763 735 progress(_updating, z, item=f, total=numupdates, unit=_files)
764 736 f0, flags = args
765 737 repo.ui.note(_("getting %s to %s\n") % (f0, f))
766 738 repo.wwrite(f, mctx.filectx(f0).data(), flags)
767 739 updated += 1
768 740
769 741 # divergent renames
770 elif m == "dr":
742 for f, args, msg in actions['dr']:
771 743 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
772 744 z += 1
773 745 progress(_updating, z, item=f, total=numupdates, unit=_files)
774 746 fl, = args
775 747 repo.ui.warn(_("note: possible conflict - %s was renamed "
776 748 "multiple times to:\n") % f)
777 749 for nf in fl:
778 750 repo.ui.warn(" %s\n" % nf)
779 751
780 752 # rename and delete
781 elif m == "rd":
753 for f, args, msg in actions['rd']:
782 754 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
783 755 z += 1
784 756 progress(_updating, z, item=f, total=numupdates, unit=_files)
785 757 fl, = args
786 758 repo.ui.warn(_("note: possible conflict - %s was deleted "
787 759 "and renamed to:\n") % f)
788 760 for nf in fl:
789 761 repo.ui.warn(" %s\n" % nf)
790 762
791 763 # exec
792 elif m == "e":
764 for f, args, msg in actions['e']:
793 765 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
794 766 z += 1
795 767 progress(_updating, z, item=f, total=numupdates, unit=_files)
796 768 flags, = args
797 769 audit(f)
798 770 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
799 771 updated += 1
800 772
801 773 ms.commit()
802 774 progress(_updating, None, total=numupdates, unit=_files)
803 775
804 776 return updated, merged, removed, unresolved
805 777
806 778 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
807 779 acceptremote, followcopies):
808 780 "Calculate the actions needed to merge mctx into wctx using ancestors"
809 781
810 782 if len(ancestors) == 1: # default
811 783 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
812 784 branchmerge, force,
813 785 partial, acceptremote, followcopies)
814 786
815 787 else: # only when merge.preferancestor=* - experimentalish code
816 788 repo.ui.status(
817 789 _("note: merging %s and %s using bids from ancestors %s\n") %
818 790 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
819 791
820 792 # Call for bids
821 fbids = {} # mapping filename to list af action bids
793 fbids = {} # mapping filename to bids (action method to list af actions)
822 794 for ancestor in ancestors:
823 795 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
824 796 actions = manifestmerge(repo, wctx, mctx, ancestor,
825 797 branchmerge, force,
826 798 partial, acceptremote, followcopies)
827 for a in sorted(actions, key=lambda a: (a[1], a)):
828 f, m, args, msg = a
829 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
830 if f in fbids:
831 fbids[f].append(a)
832 else:
833 fbids[f] = [a]
799 for m, l in sorted(actions.items()):
800 for a in l:
801 f, args, msg = a
802 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
803 if f in fbids:
804 d = fbids[f]
805 if m in d:
806 d[m].append(a)
807 else:
808 d[m] = [a]
809 else:
810 fbids[f] = {m: [a]}
834 811
835 812 # Pick the best bid for each file
836 813 repo.ui.note(_('\nauction for merging merge bids\n'))
837 actions = []
838 for f, bidsl in sorted(fbids.items()):
814 actions = dict((m, []) for m in actions.keys())
815 for f, bids in sorted(fbids.items()):
816 # bids is a mapping from action method to list af actions
839 817 # Consensus?
840 a0 = bidsl[0]
841 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
842 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
843 actions.append(a0)
844 continue
845 # Group bids by kind of action
846 bids = {}
847 for a in bidsl:
848 m = a[1]
849 if m in bids:
850 bids[m].append(a)
851 else:
852 bids[m] = [a]
818 if len(bids) == 1: # all bids are the same kind of method
819 m, l = bids.items()[0]
820 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
821 repo.ui.note(" %s: consensus for %s\n" % (f, m))
822 actions[m].append(l[0])
823 continue
853 824 # If keep is an option, just do it.
854 825 if "k" in bids:
855 826 repo.ui.note(" %s: picking 'keep' action\n" % f)
856 actions.append(bids["k"][0])
827 actions['k'].append(bids["k"][0])
857 828 continue
858 # If all gets agree [how could they not?], just do it.
829 # If there are gets and they all agree [how could they not?], do it.
859 830 if "g" in bids:
860 831 ga0 = bids["g"][0]
861 832 if util.all(a == ga0 for a in bids["g"][1:]):
862 833 repo.ui.note(" %s: picking 'get' action\n" % f)
863 actions.append(ga0)
834 actions['g'].append(ga0)
864 835 continue
865 836 # TODO: Consider other simple actions such as mode changes
866 837 # Handle inefficient democrazy.
867 838 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
868 for _f, m, args, msg in bidsl:
869 repo.ui.note(' %s -> %s\n' % (msg, m))
839 for m, l in sorted(bids.items()):
840 for _f, args, msg in l:
841 repo.ui.note(' %s -> %s\n' % (msg, m))
870 842 # Pick random action. TODO: Instead, prompt user when resolving
871 a0 = bidsl[0]
843 m, l = bids.items()[0]
872 844 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
873 (f, a0[1]))
874 actions.append(a0)
845 (f, m))
846 actions[m].append(l[0])
875 847 continue
876 848 repo.ui.note(_('end of auction\n\n'))
877 849
878 # Filter out prompts.
879 newactions, prompts = [], []
880 for a in actions:
881 if a[1] in ("cd", "dc"):
882 prompts.append(a)
883 else:
884 newactions.append(a)
885 850 # Prompt and create actions. TODO: Move this towards resolve phase.
886 for f, m, args, msg in sorted(prompts):
887 if m == "cd":
851 if True:
852 for f, args, msg in actions['cd']:
888 853 if repo.ui.promptchoice(
889 854 _("local changed %s which remote deleted\n"
890 855 "use (c)hanged version or (d)elete?"
891 856 "$$ &Changed $$ &Delete") % f, 0):
892 newactions.append((f, "r", None, "prompt delete"))
857 actions['r'].append((f, None, "prompt delete"))
893 858 else:
894 newactions.append((f, "a", None, "prompt keep"))
895 elif m == "dc":
859 actions['a'].append((f, None, "prompt keep"))
860 del actions['cd'][:]
861
862 for f, args, msg in actions['dc']:
896 863 flags, = args
897 864 if repo.ui.promptchoice(
898 865 _("remote changed %s which local deleted\n"
899 866 "use (c)hanged version or leave (d)eleted?"
900 867 "$$ &Changed $$ &Deleted") % f, 0) == 0:
901 newactions.append((f, "g", (flags,), "prompt recreating"))
902 else: assert False, m
868 actions['g'].append((f, (flags,), "prompt recreating"))
869 del actions['dc'][:]
903 870
904 871 if wctx.rev() is None:
905 newactions += _forgetremoved(wctx, mctx, branchmerge)
872 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
873 actions['r'].extend(ractions)
874 actions['f'].extend(factions)
906 875
907 return newactions
876 return actions
908 877
909 878 def recordupdates(repo, actions, branchmerge):
910 879 "record merge actions to the dirstate"
911
912 for f, m, args, msg in actions:
913
880 if True:
914 881 # remove (must come first)
915 if m == "r": # remove
882 for f, args, msg in actions['r']:
916 883 if branchmerge:
917 884 repo.dirstate.remove(f)
918 885 else:
919 886 repo.dirstate.drop(f)
920 887
921 888 # forget (must come first)
922 elif m == "f":
889 for f, args, msg in actions['f']:
923 890 repo.dirstate.drop(f)
924 891
925 892 # re-add
926 elif m == "a":
893 for f, args, msg in actions['a']:
927 894 if not branchmerge:
928 895 repo.dirstate.add(f)
929 896
930 897 # exec change
931 elif m == "e":
898 for f, args, msg in actions['e']:
932 899 repo.dirstate.normallookup(f)
933 900
934 901 # keep
935 elif m == "k":
902 for f, args, msg in actions['k']:
936 903 pass
937 904
938 905 # get
939 elif m == "g":
906 for f, args, msg in actions['g']:
940 907 if branchmerge:
941 908 repo.dirstate.otherparent(f)
942 909 else:
943 910 repo.dirstate.normal(f)
944 911
945 912 # merge
946 elif m == "m":
913 for f, args, msg in actions['m']:
947 914 f1, f2, fa, move, anc = args
948 915 if branchmerge:
949 916 # We've done a branch merge, mark this file as merged
950 917 # so that we properly record the merger later
951 918 repo.dirstate.merge(f)
952 919 if f1 != f2: # copy/rename
953 920 if move:
954 921 repo.dirstate.remove(f1)
955 922 if f1 != f:
956 923 repo.dirstate.copy(f1, f)
957 924 else:
958 925 repo.dirstate.copy(f2, f)
959 926 else:
960 927 # We've update-merged a locally modified file, so
961 928 # we set the dirstate to emulate a normal checkout
962 929 # of that file some time in the past. Thus our
963 930 # merge will appear as a normal local file
964 931 # modification.
965 932 if f2 == f: # file not locally copied/moved
966 933 repo.dirstate.normallookup(f)
967 934 if move:
968 935 repo.dirstate.drop(f1)
969 936
970 937 # directory rename, move local
971 elif m == "dm":
938 for f, args, msg in actions['dm']:
972 939 f0, flag = args
973 940 if f0 not in repo.dirstate:
974 941 # untracked file moved
975 942 continue
976 943 if branchmerge:
977 944 repo.dirstate.add(f)
978 945 repo.dirstate.remove(f0)
979 946 repo.dirstate.copy(f0, f)
980 947 else:
981 948 repo.dirstate.normal(f)
982 949 repo.dirstate.drop(f0)
983 950
984 951 # directory rename, get
985 elif m == "dg":
952 for f, args, msg in actions['dg']:
986 953 f0, flag = args
987 954 if branchmerge:
988 955 repo.dirstate.add(f)
989 956 repo.dirstate.copy(f0, f)
990 957 else:
991 958 repo.dirstate.normal(f)
992 959
993 960 def update(repo, node, branchmerge, force, partial, ancestor=None,
994 961 mergeancestor=False, labels=None):
995 962 """
996 963 Perform a merge between the working directory and the given node
997 964
998 965 node = the node to update to, or None if unspecified
999 966 branchmerge = whether to merge between branches
1000 967 force = whether to force branch merging or file overwriting
1001 968 partial = a function to filter file lists (dirstate not updated)
1002 969 mergeancestor = whether it is merging with an ancestor. If true,
1003 970 we should accept the incoming changes for any prompts that occur.
1004 971 If false, merging with an ancestor (fast-forward) is only allowed
1005 972 between different named branches. This flag is used by rebase extension
1006 973 as a temporary fix and should be avoided in general.
1007 974
1008 975 The table below shows all the behaviors of the update command
1009 976 given the -c and -C or no options, whether the working directory
1010 977 is dirty, whether a revision is specified, and the relationship of
1011 978 the parent rev to the target rev (linear, on the same named
1012 979 branch, or on another named branch).
1013 980
1014 981 This logic is tested by test-update-branches.t.
1015 982
1016 983 -c -C dirty rev | linear same cross
1017 984 n n n n | ok (1) x
1018 985 n n n y | ok ok ok
1019 986 n n y n | merge (2) (2)
1020 987 n n y y | merge (3) (3)
1021 988 n y * * | --- discard ---
1022 989 y n y * | --- (4) ---
1023 990 y n n * | --- ok ---
1024 991 y y * * | --- (5) ---
1025 992
1026 993 x = can't happen
1027 994 * = don't-care
1028 995 1 = abort: not a linear update (merge or update --check to force update)
1029 996 2 = abort: uncommitted changes (commit and merge, or update --clean to
1030 997 discard changes)
1031 998 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1032 999 4 = abort: uncommitted changes (checked in commands.py)
1033 1000 5 = incompatible options (checked in commands.py)
1034 1001
1035 1002 Return the same tuple as applyupdates().
1036 1003 """
1037 1004
1038 1005 onode = node
1039 1006 wlock = repo.wlock()
1040 1007 try:
1041 1008 wc = repo[None]
1042 1009 pl = wc.parents()
1043 1010 p1 = pl[0]
1044 1011 pas = [None]
1045 1012 if ancestor:
1046 1013 pas = [repo[ancestor]]
1047 1014
1048 1015 if node is None:
1049 1016 # Here is where we should consider bookmarks, divergent bookmarks,
1050 1017 # foreground changesets (successors), and tip of current branch;
1051 1018 # but currently we are only checking the branch tips.
1052 1019 try:
1053 1020 node = repo.branchtip(wc.branch())
1054 1021 except error.RepoLookupError:
1055 1022 if wc.branch() == "default": # no default branch!
1056 1023 node = repo.lookup("tip") # update to tip
1057 1024 else:
1058 1025 raise util.Abort(_("branch %s not found") % wc.branch())
1059 1026
1060 1027 if p1.obsolete() and not p1.children():
1061 1028 # allow updating to successors
1062 1029 successors = obsolete.successorssets(repo, p1.node())
1063 1030
1064 1031 # behavior of certain cases is as follows,
1065 1032 #
1066 1033 # divergent changesets: update to highest rev, similar to what
1067 1034 # is currently done when there are more than one head
1068 1035 # (i.e. 'tip')
1069 1036 #
1070 1037 # replaced changesets: same as divergent except we know there
1071 1038 # is no conflict
1072 1039 #
1073 1040 # pruned changeset: no update is done; though, we could
1074 1041 # consider updating to the first non-obsolete parent,
1075 1042 # similar to what is current done for 'hg prune'
1076 1043
1077 1044 if successors:
1078 1045 # flatten the list here handles both divergent (len > 1)
1079 1046 # and the usual case (len = 1)
1080 1047 successors = [n for sub in successors for n in sub]
1081 1048
1082 1049 # get the max revision for the given successors set,
1083 1050 # i.e. the 'tip' of a set
1084 1051 node = repo.revs("max(%ln)", successors)[0]
1085 1052 pas = [p1]
1086 1053
1087 1054 overwrite = force and not branchmerge
1088 1055
1089 1056 p2 = repo[node]
1090 1057 if pas[0] is None:
1091 1058 if repo.ui.config("merge", "preferancestor") == '*':
1092 1059 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1093 1060 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1094 1061 else:
1095 1062 pas = [p1.ancestor(p2, warn=True)]
1096 1063
1097 1064 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1098 1065
1099 1066 ### check phase
1100 1067 if not overwrite and len(pl) > 1:
1101 1068 raise util.Abort(_("outstanding uncommitted merges"))
1102 1069 if branchmerge:
1103 1070 if pas == [p2]:
1104 1071 raise util.Abort(_("merging with a working directory ancestor"
1105 1072 " has no effect"))
1106 1073 elif pas == [p1]:
1107 1074 if not mergeancestor and p1.branch() == p2.branch():
1108 1075 raise util.Abort(_("nothing to merge"),
1109 1076 hint=_("use 'hg update' "
1110 1077 "or check 'hg heads'"))
1111 1078 if not force and (wc.files() or wc.deleted()):
1112 1079 raise util.Abort(_("uncommitted changes"),
1113 1080 hint=_("use 'hg status' to list changes"))
1114 1081 for s in sorted(wc.substate):
1115 1082 if wc.sub(s).dirty():
1116 1083 raise util.Abort(_("uncommitted changes in "
1117 1084 "subrepository '%s'") % s)
1118 1085
1119 1086 elif not overwrite:
1120 1087 if p1 == p2: # no-op update
1121 1088 # call the hooks and exit early
1122 1089 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1123 1090 repo.hook('update', parent1=xp2, parent2='', error=0)
1124 1091 return 0, 0, 0, 0
1125 1092
1126 1093 if pas not in ([p1], [p2]): # nonlinear
1127 1094 dirty = wc.dirty(missing=True)
1128 1095 if dirty or onode is None:
1129 1096 # Branching is a bit strange to ensure we do the minimal
1130 1097 # amount of call to obsolete.background.
1131 1098 foreground = obsolete.foreground(repo, [p1.node()])
1132 1099 # note: the <node> variable contains a random identifier
1133 1100 if repo[node].node() in foreground:
1134 1101 pas = [p1] # allow updating to successors
1135 1102 elif dirty:
1136 1103 msg = _("uncommitted changes")
1137 1104 if onode is None:
1138 1105 hint = _("commit and merge, or update --clean to"
1139 1106 " discard changes")
1140 1107 else:
1141 1108 hint = _("commit or update --clean to discard"
1142 1109 " changes")
1143 1110 raise util.Abort(msg, hint=hint)
1144 1111 else: # node is none
1145 1112 msg = _("not a linear update")
1146 1113 hint = _("merge or update --check to force update")
1147 1114 raise util.Abort(msg, hint=hint)
1148 1115 else:
1149 1116 # Allow jumping branches if clean and specific rev given
1150 1117 pas = [p1]
1151 1118
1152 1119 followcopies = False
1153 1120 if overwrite:
1154 1121 pas = [wc]
1155 1122 elif pas == [p2]: # backwards
1156 1123 pas = [wc.p1()]
1157 1124 elif not branchmerge and not wc.dirty(missing=True):
1158 1125 pass
1159 1126 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1160 1127 followcopies = True
1161 1128
1162 1129 ### calculate phase
1163 1130 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1164 1131 partial, mergeancestor, followcopies)
1165 1132
1166 1133 ### apply phase
1167 1134 if not branchmerge: # just jump to the new rev
1168 1135 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1169 1136 if not partial:
1170 1137 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1171 1138 # note that we're in the middle of an update
1172 1139 repo.vfs.write('updatestate', p2.hex())
1173 1140
1174 1141 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1175 1142
1176 1143 if not partial:
1177 1144 repo.setparents(fp1, fp2)
1178 1145 recordupdates(repo, actions, branchmerge)
1179 1146 # update completed, clear state
1180 1147 util.unlink(repo.join('updatestate'))
1181 1148
1182 1149 if not branchmerge:
1183 1150 repo.dirstate.setbranch(p2.branch())
1184 1151 finally:
1185 1152 wlock.release()
1186 1153
1187 1154 if not partial:
1188 1155 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1189 1156 return stats
General Comments 0
You need to be logged in to leave comments. Login now