##// END OF EJS Templates
merge: add labels parameter from merge.update to filemerge...
Durham Goode -
r21524:47b97d9a default
parent child Browse files
Show More
@@ -1,1174 +1,1174 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 archival, merge, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22 import basestore
23 23
24 24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25 25
26 26 def installnormalfilesmatchfn(manifest):
27 27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 29 default='relpath'):
30 30 match = oldmatch(ctx, pats, opts, globbed, default)
31 31 m = copy.copy(match)
32 32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 33 manifest)
34 34 m._files = filter(notlfile, m._files)
35 35 m._fmap = set(m._files)
36 36 m._always = False
37 37 origmatchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(overridematch)
41 41
42 42 def installmatchfn(f):
43 43 '''monkey patch the scmutil module with a custom match function.
44 44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 45 oldmatch = scmutil.match
46 46 setattr(f, 'oldmatch', oldmatch)
47 47 scmutil.match = f
48 48 return oldmatch
49 49
50 50 def restorematchfn():
51 51 '''restores scmutil.match to what it was before installmatchfn
52 52 was called. no-op if scmutil.match is its original function.
53 53
54 54 Note that n calls to installmatchfn will require n calls to
55 55 restore matchfn to reverse'''
56 56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57 57
58 58 def installmatchandpatsfn(f):
59 59 oldmatchandpats = scmutil.matchandpats
60 60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 61 scmutil.matchandpats = f
62 62 return oldmatchandpats
63 63
64 64 def restorematchandpatsfn():
65 65 '''restores scmutil.matchandpats to what it was before
66 66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 67 is its original function.
68 68
69 69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 70 to restore matchfn to reverse'''
71 71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 72 scmutil.matchandpats)
73 73
74 74 def addlargefiles(ui, repo, *pats, **opts):
75 75 large = opts.pop('large', None)
76 76 lfsize = lfutil.getminsize(
77 77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78 78
79 79 lfmatcher = None
80 80 if lfutil.islfilesrepo(repo):
81 81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 82 if lfpats:
83 83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84 84
85 85 lfnames = []
86 86 m = scmutil.match(repo[None], pats, opts)
87 87 m.bad = lambda x, y: None
88 88 wctx = repo[None]
89 89 for f in repo.walk(m):
90 90 exact = m.exact(f)
91 91 lfile = lfutil.standin(f) in wctx
92 92 nfile = f in wctx
93 93 exists = lfile or nfile
94 94
95 95 # Don't warn the user when they attempt to add a normal tracked file.
96 96 # The normal add code will do that for us.
97 97 if exact and exists:
98 98 if lfile:
99 99 ui.warn(_('%s already a largefile\n') % f)
100 100 continue
101 101
102 102 if (exact or not exists) and not lfutil.isstandin(f):
103 103 wfile = repo.wjoin(f)
104 104
105 105 # In case the file was removed previously, but not committed
106 106 # (issue3507)
107 107 if not os.path.exists(wfile):
108 108 continue
109 109
110 110 abovemin = (lfsize and
111 111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 113 lfnames.append(f)
114 114 if ui.verbose or not exact:
115 115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116 116
117 117 bad = []
118 118 standins = []
119 119
120 120 # Need to lock, otherwise there could be a race condition between
121 121 # when standins are created and added to the repo.
122 122 wlock = repo.wlock()
123 123 try:
124 124 if not opts.get('dry_run'):
125 125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 126 for f in lfnames:
127 127 standinname = lfutil.standin(f)
128 128 lfutil.writestandin(repo, standinname, hash='',
129 129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 130 standins.append(standinname)
131 131 if lfdirstate[f] == 'r':
132 132 lfdirstate.normallookup(f)
133 133 else:
134 134 lfdirstate.add(f)
135 135 lfdirstate.write()
136 136 bad += [lfutil.splitstandin(f)
137 137 for f in repo[None].add(standins)
138 138 if f in m.files()]
139 139 finally:
140 140 wlock.release()
141 141 return bad
142 142
143 143 def removelargefiles(ui, repo, *pats, **opts):
144 144 after = opts.get('after')
145 145 if not pats and not after:
146 146 raise util.Abort(_('no files specified'))
147 147 m = scmutil.match(repo[None], pats, opts)
148 148 try:
149 149 repo.lfstatus = True
150 150 s = repo.status(match=m, clean=True)
151 151 finally:
152 152 repo.lfstatus = False
153 153 manifest = repo[None].manifest()
154 154 modified, added, deleted, clean = [[f for f in list
155 155 if lfutil.standin(f) in manifest]
156 156 for list in [s[0], s[1], s[3], s[6]]]
157 157
158 158 def warn(files, msg):
159 159 for f in files:
160 160 ui.warn(msg % m.rel(f))
161 161 return int(len(files) > 0)
162 162
163 163 result = 0
164 164
165 165 if after:
166 166 remove, forget = deleted, []
167 167 result = warn(modified + added + clean,
168 168 _('not removing %s: file still exists\n'))
169 169 else:
170 170 remove, forget = deleted + clean, []
171 171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 172 ' to force removal)\n'))
173 173 result = warn(added, _('not removing %s: file has been marked for add'
174 174 ' (use forget to undo)\n')) or result
175 175
176 176 for f in sorted(remove + forget):
177 177 if ui.verbose or not m.exact(f):
178 178 ui.status(_('removing %s\n') % m.rel(f))
179 179
180 180 # Need to lock because standin files are deleted then removed from the
181 181 # repository and we could race in-between.
182 182 wlock = repo.wlock()
183 183 try:
184 184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 185 for f in remove:
186 186 if not after:
187 187 # If this is being called by addremove, notify the user that we
188 188 # are removing the file.
189 189 if getattr(repo, "_isaddremove", False):
190 190 ui.status(_('removing %s\n') % f)
191 191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 192 lfdirstate.remove(f)
193 193 lfdirstate.write()
194 194 forget = [lfutil.standin(f) for f in forget]
195 195 remove = [lfutil.standin(f) for f in remove]
196 196 repo[None].forget(forget)
197 197 # If this is being called by addremove, let the original addremove
198 198 # function handle this.
199 199 if not getattr(repo, "_isaddremove", False):
200 200 for f in remove:
201 201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
202 202 repo[None].forget(remove)
203 203 finally:
204 204 wlock.release()
205 205
206 206 return result
207 207
208 208 # For overriding mercurial.hgweb.webcommands so that largefiles will
209 209 # appear at their right place in the manifests.
210 210 def decodepath(orig, path):
211 211 return lfutil.splitstandin(path) or path
212 212
213 213 # -- Wrappers: modify existing commands --------------------------------
214 214
215 215 # Add works by going through the files that the user wanted to add and
216 216 # checking if they should be added as largefiles. Then it makes a new
217 217 # matcher which matches only the normal files and runs the original
218 218 # version of add.
219 219 def overrideadd(orig, ui, repo, *pats, **opts):
220 220 normal = opts.pop('normal')
221 221 if normal:
222 222 if opts.get('large'):
223 223 raise util.Abort(_('--normal cannot be used with --large'))
224 224 return orig(ui, repo, *pats, **opts)
225 225 bad = addlargefiles(ui, repo, *pats, **opts)
226 226 installnormalfilesmatchfn(repo[None].manifest())
227 227 result = orig(ui, repo, *pats, **opts)
228 228 restorematchfn()
229 229
230 230 return (result == 1 or bad) and 1 or 0
231 231
232 232 def overrideremove(orig, ui, repo, *pats, **opts):
233 233 installnormalfilesmatchfn(repo[None].manifest())
234 234 result = orig(ui, repo, *pats, **opts)
235 235 restorematchfn()
236 236 return removelargefiles(ui, repo, *pats, **opts) or result
237 237
238 238 def overridestatusfn(orig, repo, rev2, **opts):
239 239 try:
240 240 repo._repo.lfstatus = True
241 241 return orig(repo, rev2, **opts)
242 242 finally:
243 243 repo._repo.lfstatus = False
244 244
245 245 def overridestatus(orig, ui, repo, *pats, **opts):
246 246 try:
247 247 repo.lfstatus = True
248 248 return orig(ui, repo, *pats, **opts)
249 249 finally:
250 250 repo.lfstatus = False
251 251
252 252 def overridedirty(orig, repo, ignoreupdate=False):
253 253 try:
254 254 repo._repo.lfstatus = True
255 255 return orig(repo, ignoreupdate)
256 256 finally:
257 257 repo._repo.lfstatus = False
258 258
259 259 def overridelog(orig, ui, repo, *pats, **opts):
260 260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
261 261 default='relpath'):
262 262 """Matcher that merges root directory with .hglf, suitable for log.
263 263 It is still possible to match .hglf directly.
264 264 For any listed files run log on the standin too.
265 265 matchfn tries both the given filename and with .hglf stripped.
266 266 """
267 267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
268 268 m, p = copy.copy(matchandpats)
269 269
270 270 pats = set(p)
271 271 # TODO: handling of patterns in both cases below
272 272 if m._cwd:
273 273 if os.path.isabs(m._cwd):
274 274 # TODO: handle largefile magic when invoked from other cwd
275 275 return matchandpats
276 276 back = (m._cwd.count('/') + 1) * '../'
277 277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
278 278 else:
279 279 pats.update(lfutil.standin(f) for f in p)
280 280
281 281 for i in range(0, len(m._files)):
282 282 standin = lfutil.standin(m._files[i])
283 283 if standin in repo[ctx.node()]:
284 284 m._files[i] = standin
285 285 elif m._files[i] not in repo[ctx.node()]:
286 286 m._files.append(standin)
287 287 pats.add(standin)
288 288
289 289 m._fmap = set(m._files)
290 290 m._always = False
291 291 origmatchfn = m.matchfn
292 292 def lfmatchfn(f):
293 293 lf = lfutil.splitstandin(f)
294 294 if lf is not None and origmatchfn(lf):
295 295 return True
296 296 r = origmatchfn(f)
297 297 return r
298 298 m.matchfn = lfmatchfn
299 299
300 300 return m, pats
301 301
302 302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
303 303 try:
304 304 repo.lfstatus = True
305 305 return orig(ui, repo, *pats, **opts)
306 306 finally:
307 307 repo.lfstatus = False
308 308 restorematchandpatsfn()
309 309
310 310 def overrideverify(orig, ui, repo, *pats, **opts):
311 311 large = opts.pop('large', False)
312 312 all = opts.pop('lfa', False)
313 313 contents = opts.pop('lfc', False)
314 314
315 315 result = orig(ui, repo, *pats, **opts)
316 316 if large or all or contents:
317 317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
318 318 return result
319 319
320 320 def overridedebugstate(orig, ui, repo, *pats, **opts):
321 321 large = opts.pop('large', False)
322 322 if large:
323 323 class fakerepo(object):
324 324 dirstate = lfutil.openlfdirstate(ui, repo)
325 325 orig(ui, fakerepo, *pats, **opts)
326 326 else:
327 327 orig(ui, repo, *pats, **opts)
328 328
329 329 # Override needs to refresh standins so that update's normal merge
330 330 # will go through properly. Then the other update hook (overriding repo.update)
331 331 # will get the new files. Filemerge is also overridden so that the merge
332 332 # will merge standins correctly.
333 333 def overrideupdate(orig, ui, repo, *pats, **opts):
334 334 # Need to lock between the standins getting updated and their
335 335 # largefiles getting updated
336 336 wlock = repo.wlock()
337 337 try:
338 338 lfdirstate = lfutil.openlfdirstate(ui, repo)
339 339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
340 340 [], False, False, False)
341 341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
342 342
343 343 if opts['check']:
344 344 mod = len(modified) > 0
345 345 for lfile in unsure:
346 346 standin = lfutil.standin(lfile)
347 347 if repo['.'][standin].data().strip() != \
348 348 lfutil.hashfile(repo.wjoin(lfile)):
349 349 mod = True
350 350 else:
351 351 lfdirstate.normal(lfile)
352 352 lfdirstate.write()
353 353 if mod:
354 354 raise util.Abort(_('uncommitted changes'))
355 355 # XXX handle removed differently
356 356 if not opts['clean']:
357 357 for lfile in unsure + modified + added:
358 358 lfutil.updatestandin(repo, lfutil.standin(lfile))
359 359 return orig(ui, repo, *pats, **opts)
360 360 finally:
361 361 wlock.release()
362 362
363 363 # Before starting the manifest merge, merge.updates will call
364 364 # _checkunknown to check if there are any files in the merged-in
365 365 # changeset that collide with unknown files in the working copy.
366 366 #
367 367 # The largefiles are seen as unknown, so this prevents us from merging
368 368 # in a file 'foo' if we already have a largefile with the same name.
369 369 #
370 370 # The overridden function filters the unknown files by removing any
371 371 # largefiles. This makes the merge proceed and we can then handle this
372 372 # case further in the overridden manifestmerge function below.
373 373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
374 374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
375 375 return False
376 376 return origfn(repo, wctx, mctx, f)
377 377
378 378 # The manifest merge handles conflicts on the manifest level. We want
379 379 # to handle changes in largefile-ness of files at this level too.
380 380 #
381 381 # The strategy is to run the original manifestmerge and then process
382 382 # the action list it outputs. There are two cases we need to deal with:
383 383 #
384 384 # 1. Normal file in p1, largefile in p2. Here the largefile is
385 385 # detected via its standin file, which will enter the working copy
386 386 # with a "get" action. It is not "merge" since the standin is all
387 387 # Mercurial is concerned with at this level -- the link to the
388 388 # existing normal file is not relevant here.
389 389 #
390 390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
391 391 # since the largefile will be present in the working copy and
392 392 # different from the normal file in p2. Mercurial therefore
393 393 # triggers a merge action.
394 394 #
395 395 # In both cases, we prompt the user and emit new actions to either
396 396 # remove the standin (if the normal file was kept) or to remove the
397 397 # normal file and get the standin (if the largefile was kept). The
398 398 # default prompt answer is to use the largefile version since it was
399 399 # presumably changed on purpose.
400 400 #
401 401 # Finally, the merge.applyupdates function will then take care of
402 402 # writing the files into the working copy and lfcommands.updatelfiles
403 403 # will update the largefiles.
404 404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
405 405 partial, acceptremote, followcopies):
406 406 overwrite = force and not branchmerge
407 407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
408 408 acceptremote, followcopies)
409 409
410 410 if overwrite:
411 411 return actions
412 412
413 413 removes = set(a[0] for a in actions if a[1] == 'r')
414 414 processed = []
415 415
416 416 for action in actions:
417 417 f, m, args, msg = action
418 418
419 419 splitstandin = f and lfutil.splitstandin(f)
420 420 if (m == "g" and splitstandin is not None and
421 421 splitstandin in p1 and splitstandin not in removes):
422 422 # Case 1: normal file in the working copy, largefile in
423 423 # the second parent
424 424 lfile = splitstandin
425 425 standin = f
426 426 msg = _('remote turned local normal file %s into a largefile\n'
427 427 'use (l)argefile or keep (n)ormal file?'
428 428 '$$ &Largefile $$ &Normal file') % lfile
429 429 if repo.ui.promptchoice(msg, 0) == 0:
430 430 processed.append((lfile, "r", None, msg))
431 431 processed.append((standin, "g", (p2.flags(standin),), msg))
432 432 else:
433 433 processed.append((standin, "r", None, msg))
434 434 elif (m == "g" and
435 435 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
436 436 # Case 2: largefile in the working copy, normal file in
437 437 # the second parent
438 438 standin = lfutil.standin(f)
439 439 lfile = f
440 440 msg = _('remote turned local largefile %s into a normal file\n'
441 441 'keep (l)argefile or use (n)ormal file?'
442 442 '$$ &Largefile $$ &Normal file') % lfile
443 443 if repo.ui.promptchoice(msg, 0) == 0:
444 444 processed.append((lfile, "r", None, msg))
445 445 else:
446 446 processed.append((standin, "r", None, msg))
447 447 processed.append((lfile, "g", (p2.flags(lfile),), msg))
448 448 else:
449 449 processed.append(action)
450 450
451 451 return processed
452 452
453 453 # Override filemerge to prompt the user about how they wish to merge
454 454 # largefiles. This will handle identical edits without prompting the user.
455 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
455 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 456 if not lfutil.isstandin(orig):
457 return origfn(repo, mynode, orig, fcd, fco, fca)
457 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458 458
459 459 ahash = fca.data().strip().lower()
460 460 dhash = fcd.data().strip().lower()
461 461 ohash = fco.data().strip().lower()
462 462 if (ohash != ahash and
463 463 ohash != dhash and
464 464 (dhash == ahash or
465 465 repo.ui.promptchoice(
466 466 _('largefile %s has a merge conflict\nancestor was %s\n'
467 467 'keep (l)ocal %s or\ntake (o)ther %s?'
468 468 '$$ &Local $$ &Other') %
469 469 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 470 0) == 1)):
471 471 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 472 return 0
473 473
474 474 # Copy first changes the matchers to match standins instead of
475 475 # largefiles. Then it overrides util.copyfile in that function it
476 476 # checks if the destination largefile already exists. It also keeps a
477 477 # list of copied files so that the largefiles can be copied and the
478 478 # dirstate updated.
479 479 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 480 # doesn't remove largefile on rename
481 481 if len(pats) < 2:
482 482 # this isn't legal, let the original function deal with it
483 483 return orig(ui, repo, pats, opts, rename)
484 484
485 485 def makestandin(relpath):
486 486 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 487 return os.path.join(repo.wjoin(lfutil.standin(path)))
488 488
489 489 fullpats = scmutil.expandpats(pats)
490 490 dest = fullpats[-1]
491 491
492 492 if os.path.isdir(dest):
493 493 if not os.path.isdir(makestandin(dest)):
494 494 os.makedirs(makestandin(dest))
495 495 # This could copy both lfiles and normal files in one command,
496 496 # but we don't want to do that. First replace their matcher to
497 497 # only match normal files and run it, then replace it to just
498 498 # match largefiles and run it again.
499 499 nonormalfiles = False
500 500 nolfiles = False
501 501 installnormalfilesmatchfn(repo[None].manifest())
502 502 try:
503 503 try:
504 504 result = orig(ui, repo, pats, opts, rename)
505 505 except util.Abort, e:
506 506 if str(e) != _('no files to copy'):
507 507 raise e
508 508 else:
509 509 nonormalfiles = True
510 510 result = 0
511 511 finally:
512 512 restorematchfn()
513 513
514 514 # The first rename can cause our current working directory to be removed.
515 515 # In that case there is nothing left to copy/rename so just quit.
516 516 try:
517 517 repo.getcwd()
518 518 except OSError:
519 519 return result
520 520
521 521 try:
522 522 try:
523 523 # When we call orig below it creates the standins but we don't add
524 524 # them to the dir state until later so lock during that time.
525 525 wlock = repo.wlock()
526 526
527 527 manifest = repo[None].manifest()
528 528 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 529 default='relpath'):
530 530 newpats = []
531 531 # The patterns were previously mangled to add the standin
532 532 # directory; we need to remove that now
533 533 for pat in pats:
534 534 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 535 newpats.append(pat.replace(lfutil.shortname, ''))
536 536 else:
537 537 newpats.append(pat)
538 538 match = oldmatch(ctx, newpats, opts, globbed, default)
539 539 m = copy.copy(match)
540 540 lfile = lambda f: lfutil.standin(f) in manifest
541 541 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 542 m._fmap = set(m._files)
543 543 m._always = False
544 544 origmatchfn = m.matchfn
545 545 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 546 (f in manifest) and
547 547 origmatchfn(lfutil.splitstandin(f)) or
548 548 None)
549 549 return m
550 550 oldmatch = installmatchfn(overridematch)
551 551 listpats = []
552 552 for pat in pats:
553 553 if match_.patkind(pat) is not None:
554 554 listpats.append(pat)
555 555 else:
556 556 listpats.append(makestandin(pat))
557 557
558 558 try:
559 559 origcopyfile = util.copyfile
560 560 copiedfiles = []
561 561 def overridecopyfile(src, dest):
562 562 if (lfutil.shortname in src and
563 563 dest.startswith(repo.wjoin(lfutil.shortname))):
564 564 destlfile = dest.replace(lfutil.shortname, '')
565 565 if not opts['force'] and os.path.exists(destlfile):
566 566 raise IOError('',
567 567 _('destination largefile already exists'))
568 568 copiedfiles.append((src, dest))
569 569 origcopyfile(src, dest)
570 570
571 571 util.copyfile = overridecopyfile
572 572 result += orig(ui, repo, listpats, opts, rename)
573 573 finally:
574 574 util.copyfile = origcopyfile
575 575
576 576 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 577 for (src, dest) in copiedfiles:
578 578 if (lfutil.shortname in src and
579 579 dest.startswith(repo.wjoin(lfutil.shortname))):
580 580 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 581 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 582 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 583 if not os.path.isdir(destlfiledir):
584 584 os.makedirs(destlfiledir)
585 585 if rename:
586 586 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587 587
588 588 # The file is gone, but this deletes any empty parent
589 589 # directories as a side-effect.
590 590 util.unlinkpath(repo.wjoin(srclfile), True)
591 591 lfdirstate.remove(srclfile)
592 592 else:
593 593 util.copyfile(repo.wjoin(srclfile),
594 594 repo.wjoin(destlfile))
595 595
596 596 lfdirstate.add(destlfile)
597 597 lfdirstate.write()
598 598 except util.Abort, e:
599 599 if str(e) != _('no files to copy'):
600 600 raise e
601 601 else:
602 602 nolfiles = True
603 603 finally:
604 604 restorematchfn()
605 605 wlock.release()
606 606
607 607 if nolfiles and nonormalfiles:
608 608 raise util.Abort(_('no files to copy'))
609 609
610 610 return result
611 611
612 612 # When the user calls revert, we have to be careful to not revert any
613 613 # changes to other largefiles accidentally. This means we have to keep
614 614 # track of the largefiles that are being reverted so we only pull down
615 615 # the necessary largefiles.
616 616 #
617 617 # Standins are only updated (to match the hash of largefiles) before
618 618 # commits. Update the standins then run the original revert, changing
619 619 # the matcher to hit standins instead of largefiles. Based on the
620 620 # resulting standins update the largefiles.
621 621 def overriderevert(orig, ui, repo, *pats, **opts):
622 622 # Because we put the standins in a bad state (by updating them)
623 623 # and then return them to a correct state we need to lock to
624 624 # prevent others from changing them in their incorrect state.
625 625 wlock = repo.wlock()
626 626 try:
627 627 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 628 (modified, added, removed, missing, unknown, ignored, clean) = \
629 629 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 630 lfdirstate.write()
631 631 for lfile in modified:
632 632 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 633 for lfile in missing:
634 634 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 635 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636 636
637 637 oldstandins = lfutil.getstandinsstate(repo)
638 638
639 639 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 640 default='relpath'):
641 641 match = oldmatch(ctx, pats, opts, globbed, default)
642 642 m = copy.copy(match)
643 643 def tostandin(f):
644 644 if lfutil.standin(f) in ctx:
645 645 return lfutil.standin(f)
646 646 elif lfutil.standin(f) in repo[None]:
647 647 return None
648 648 return f
649 649 m._files = [tostandin(f) for f in m._files]
650 650 m._files = [f for f in m._files if f is not None]
651 651 m._fmap = set(m._files)
652 652 m._always = False
653 653 origmatchfn = m.matchfn
654 654 def matchfn(f):
655 655 if lfutil.isstandin(f):
656 656 return (origmatchfn(lfutil.splitstandin(f)) and
657 657 (f in repo[None] or f in ctx))
658 658 return origmatchfn(f)
659 659 m.matchfn = matchfn
660 660 return m
661 661 oldmatch = installmatchfn(overridematch)
662 662 try:
663 663 orig(ui, repo, *pats, **opts)
664 664 finally:
665 665 restorematchfn()
666 666
667 667 newstandins = lfutil.getstandinsstate(repo)
668 668 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 669 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670 670
671 671 finally:
672 672 wlock.release()
673 673
674 674 def hgupdaterepo(orig, repo, node, overwrite):
675 675 if not overwrite:
676 676 # Only call updatelfiles on the standins that have changed to save time
677 677 oldstandins = lfutil.getstandinsstate(repo)
678 678
679 679 result = orig(repo, node, overwrite)
680 680
681 681 filelist = None
682 682 if not overwrite:
683 683 newstandins = lfutil.getstandinsstate(repo)
684 684 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
685 685 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
686 686 return result
687 687
688 688 def hgmerge(orig, repo, node, force=None, remind=True):
689 689 result = orig(repo, node, force, remind)
690 690 lfcommands.updatelfiles(repo.ui, repo)
691 691 return result
692 692
693 693 # When we rebase a repository with remotely changed largefiles, we need to
694 694 # take some extra care so that the largefiles are correctly updated in the
695 695 # working copy
696 696 def overridepull(orig, ui, repo, source=None, **opts):
697 697 revsprepull = len(repo)
698 698 if not source:
699 699 source = 'default'
700 700 repo.lfpullsource = source
701 701 if opts.get('rebase', False):
702 702 repo._isrebasing = True
703 703 try:
704 704 if opts.get('update'):
705 705 del opts['update']
706 706 ui.debug('--update and --rebase are not compatible, ignoring '
707 707 'the update flag\n')
708 708 del opts['rebase']
709 709 origpostincoming = commands.postincoming
710 710 def _dummy(*args, **kwargs):
711 711 pass
712 712 commands.postincoming = _dummy
713 713 try:
714 714 result = commands.pull(ui, repo, source, **opts)
715 715 finally:
716 716 commands.postincoming = origpostincoming
717 717 revspostpull = len(repo)
718 718 if revspostpull > revsprepull:
719 719 result = result or rebase.rebase(ui, repo)
720 720 finally:
721 721 repo._isrebasing = False
722 722 else:
723 723 result = orig(ui, repo, source, **opts)
724 724 revspostpull = len(repo)
725 725 lfrevs = opts.get('lfrev', [])
726 726 if opts.get('all_largefiles'):
727 727 lfrevs.append('pulled()')
728 728 if lfrevs and revspostpull > revsprepull:
729 729 numcached = 0
730 730 repo.firstpulled = revsprepull # for pulled() revset expression
731 731 try:
732 732 for rev in scmutil.revrange(repo, lfrevs):
733 733 ui.note(_('pulling largefiles for revision %s\n') % rev)
734 734 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
735 735 numcached += len(cached)
736 736 finally:
737 737 del repo.firstpulled
738 738 ui.status(_("%d largefiles cached\n") % numcached)
739 739 return result
740 740
741 741 def pulledrevsetsymbol(repo, subset, x):
742 742 """``pulled()``
743 743 Changesets that just has been pulled.
744 744
745 745 Only available with largefiles from pull --lfrev expressions.
746 746
747 747 .. container:: verbose
748 748
749 749 Some examples:
750 750
751 751 - pull largefiles for all new changesets::
752 752
753 753 hg pull -lfrev "pulled()"
754 754
755 755 - pull largefiles for all new branch heads::
756 756
757 757 hg pull -lfrev "head(pulled()) and not closed()"
758 758
759 759 """
760 760
761 761 try:
762 762 firstpulled = repo.firstpulled
763 763 except AttributeError:
764 764 raise util.Abort(_("pulled() only available in --lfrev"))
765 765 return revset.baseset([r for r in subset if r >= firstpulled])
766 766
767 767 def overrideclone(orig, ui, source, dest=None, **opts):
768 768 d = dest
769 769 if d is None:
770 770 d = hg.defaultdest(source)
771 771 if opts.get('all_largefiles') and not hg.islocal(d):
772 772 raise util.Abort(_(
773 773 '--all-largefiles is incompatible with non-local destination %s') %
774 774 d)
775 775
776 776 return orig(ui, source, dest, **opts)
777 777
778 778 def hgclone(orig, ui, opts, *args, **kwargs):
779 779 result = orig(ui, opts, *args, **kwargs)
780 780
781 781 if result is not None:
782 782 sourcerepo, destrepo = result
783 783 repo = destrepo.local()
784 784
785 785 # Caching is implicitly limited to 'rev' option, since the dest repo was
786 786 # truncated at that point. The user may expect a download count with
787 787 # this option, so attempt whether or not this is a largefile repo.
788 788 if opts.get('all_largefiles'):
789 789 success, missing = lfcommands.downloadlfiles(ui, repo, None)
790 790
791 791 if missing != 0:
792 792 return None
793 793
794 794 return result
795 795
796 796 def overriderebase(orig, ui, repo, **opts):
797 797 repo._isrebasing = True
798 798 try:
799 799 return orig(ui, repo, **opts)
800 800 finally:
801 801 repo._isrebasing = False
802 802
803 803 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
804 804 prefix=None, mtime=None, subrepos=None):
805 805 # No need to lock because we are only reading history and
806 806 # largefile caches, neither of which are modified.
807 807 lfcommands.cachelfiles(repo.ui, repo, node)
808 808
809 809 if kind not in archival.archivers:
810 810 raise util.Abort(_("unknown archive type '%s'") % kind)
811 811
812 812 ctx = repo[node]
813 813
814 814 if kind == 'files':
815 815 if prefix:
816 816 raise util.Abort(
817 817 _('cannot give prefix when archiving to files'))
818 818 else:
819 819 prefix = archival.tidyprefix(dest, kind, prefix)
820 820
821 821 def write(name, mode, islink, getdata):
822 822 if matchfn and not matchfn(name):
823 823 return
824 824 data = getdata()
825 825 if decode:
826 826 data = repo.wwritedata(name, data)
827 827 archiver.addfile(prefix + name, mode, islink, data)
828 828
829 829 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
830 830
831 831 if repo.ui.configbool("ui", "archivemeta", True):
832 832 def metadata():
833 833 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
834 834 hex(repo.changelog.node(0)), hex(node), ctx.branch())
835 835
836 836 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
837 837 if repo.tagtype(t) == 'global')
838 838 if not tags:
839 839 repo.ui.pushbuffer()
840 840 opts = {'template': '{latesttag}\n{latesttagdistance}',
841 841 'style': '', 'patch': None, 'git': None}
842 842 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
843 843 ltags, dist = repo.ui.popbuffer().split('\n')
844 844 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
845 845 tags += 'latesttagdistance: %s\n' % dist
846 846
847 847 return base + tags
848 848
849 849 write('.hg_archival.txt', 0644, False, metadata)
850 850
851 851 for f in ctx:
852 852 ff = ctx.flags(f)
853 853 getdata = ctx[f].data
854 854 if lfutil.isstandin(f):
855 855 path = lfutil.findfile(repo, getdata().strip())
856 856 if path is None:
857 857 raise util.Abort(
858 858 _('largefile %s not found in repo store or system cache')
859 859 % lfutil.splitstandin(f))
860 860 f = lfutil.splitstandin(f)
861 861
862 862 def getdatafn():
863 863 fd = None
864 864 try:
865 865 fd = open(path, 'rb')
866 866 return fd.read()
867 867 finally:
868 868 if fd:
869 869 fd.close()
870 870
871 871 getdata = getdatafn
872 872 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
873 873
874 874 if subrepos:
875 875 for subpath in sorted(ctx.substate):
876 876 sub = ctx.sub(subpath)
877 877 submatch = match_.narrowmatcher(subpath, matchfn)
878 878 sub.archive(repo.ui, archiver, prefix, submatch)
879 879
880 880 archiver.done()
881 881
882 882 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
883 883 repo._get(repo._state + ('hg',))
884 884 rev = repo._state[1]
885 885 ctx = repo._repo[rev]
886 886
887 887 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
888 888
889 889 def write(name, mode, islink, getdata):
890 890 # At this point, the standin has been replaced with the largefile name,
891 891 # so the normal matcher works here without the lfutil variants.
892 892 if match and not match(f):
893 893 return
894 894 data = getdata()
895 895
896 896 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
897 897
898 898 for f in ctx:
899 899 ff = ctx.flags(f)
900 900 getdata = ctx[f].data
901 901 if lfutil.isstandin(f):
902 902 path = lfutil.findfile(repo._repo, getdata().strip())
903 903 if path is None:
904 904 raise util.Abort(
905 905 _('largefile %s not found in repo store or system cache')
906 906 % lfutil.splitstandin(f))
907 907 f = lfutil.splitstandin(f)
908 908
909 909 def getdatafn():
910 910 fd = None
911 911 try:
912 912 fd = open(os.path.join(prefix, path), 'rb')
913 913 return fd.read()
914 914 finally:
915 915 if fd:
916 916 fd.close()
917 917
918 918 getdata = getdatafn
919 919
920 920 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
921 921
922 922 for subpath in sorted(ctx.substate):
923 923 sub = ctx.sub(subpath)
924 924 submatch = match_.narrowmatcher(subpath, match)
925 925 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
926 926 submatch)
927 927
928 928 # If a largefile is modified, the change is not reflected in its
929 929 # standin until a commit. cmdutil.bailifchanged() raises an exception
930 930 # if the repo has uncommitted changes. Wrap it to also check if
931 931 # largefiles were changed. This is used by bisect and backout.
932 932 def overridebailifchanged(orig, repo):
933 933 orig(repo)
934 934 repo.lfstatus = True
935 935 modified, added, removed, deleted = repo.status()[:4]
936 936 repo.lfstatus = False
937 937 if modified or added or removed or deleted:
938 938 raise util.Abort(_('uncommitted changes'))
939 939
940 940 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
941 941 def overridefetch(orig, ui, repo, *pats, **opts):
942 942 repo.lfstatus = True
943 943 modified, added, removed, deleted = repo.status()[:4]
944 944 repo.lfstatus = False
945 945 if modified or added or removed or deleted:
946 946 raise util.Abort(_('uncommitted changes'))
947 947 return orig(ui, repo, *pats, **opts)
948 948
949 949 def overrideforget(orig, ui, repo, *pats, **opts):
950 950 installnormalfilesmatchfn(repo[None].manifest())
951 951 result = orig(ui, repo, *pats, **opts)
952 952 restorematchfn()
953 953 m = scmutil.match(repo[None], pats, opts)
954 954
955 955 try:
956 956 repo.lfstatus = True
957 957 s = repo.status(match=m, clean=True)
958 958 finally:
959 959 repo.lfstatus = False
960 960 forget = sorted(s[0] + s[1] + s[3] + s[6])
961 961 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
962 962
963 963 for f in forget:
964 964 if lfutil.standin(f) not in repo.dirstate and not \
965 965 os.path.isdir(m.rel(lfutil.standin(f))):
966 966 ui.warn(_('not removing %s: file is already untracked\n')
967 967 % m.rel(f))
968 968 result = 1
969 969
970 970 for f in forget:
971 971 if ui.verbose or not m.exact(f):
972 972 ui.status(_('removing %s\n') % m.rel(f))
973 973
974 974 # Need to lock because standin files are deleted then removed from the
975 975 # repository and we could race in-between.
976 976 wlock = repo.wlock()
977 977 try:
978 978 lfdirstate = lfutil.openlfdirstate(ui, repo)
979 979 for f in forget:
980 980 if lfdirstate[f] == 'a':
981 981 lfdirstate.drop(f)
982 982 else:
983 983 lfdirstate.remove(f)
984 984 lfdirstate.write()
985 985 standins = [lfutil.standin(f) for f in forget]
986 986 for f in standins:
987 987 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
988 988 repo[None].forget(standins)
989 989 finally:
990 990 wlock.release()
991 991
992 992 return result
993 993
994 994 def outgoinghook(ui, repo, other, opts, missing):
995 995 if opts.pop('large', None):
996 996 toupload = set()
997 997 lfutil.getlfilestoupload(repo, missing,
998 998 lambda fn, lfhash: toupload.add(fn))
999 999 if not toupload:
1000 1000 ui.status(_('largefiles: no files to upload\n'))
1001 1001 else:
1002 1002 ui.status(_('largefiles to upload:\n'))
1003 1003 for file in sorted(toupload):
1004 1004 ui.status(lfutil.splitstandin(file) + '\n')
1005 1005 ui.status('\n')
1006 1006
1007 1007 def summaryremotehook(ui, repo, opts, changes):
1008 1008 largeopt = opts.get('large', False)
1009 1009 if changes is None:
1010 1010 if largeopt:
1011 1011 return (False, True) # only outgoing check is needed
1012 1012 else:
1013 1013 return (False, False)
1014 1014 elif largeopt:
1015 1015 url, branch, peer, outgoing = changes[1]
1016 1016 if peer is None:
1017 1017 # i18n: column positioning for "hg summary"
1018 1018 ui.status(_('largefiles: (no remote repo)\n'))
1019 1019 return
1020 1020
1021 1021 toupload = set()
1022 1022 lfutil.getlfilestoupload(repo, outgoing.missing,
1023 1023 lambda fn, lfhash: toupload.add(fn))
1024 1024 if not toupload:
1025 1025 # i18n: column positioning for "hg summary"
1026 1026 ui.status(_('largefiles: (no files to upload)\n'))
1027 1027 else:
1028 1028 # i18n: column positioning for "hg summary"
1029 1029 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1030 1030
1031 1031 def overridesummary(orig, ui, repo, *pats, **opts):
1032 1032 try:
1033 1033 repo.lfstatus = True
1034 1034 orig(ui, repo, *pats, **opts)
1035 1035 finally:
1036 1036 repo.lfstatus = False
1037 1037
1038 1038 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1039 1039 similarity=None):
1040 1040 if not lfutil.islfilesrepo(repo):
1041 1041 return orig(repo, pats, opts, dry_run, similarity)
1042 1042 # Get the list of missing largefiles so we can remove them
1043 1043 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1044 1044 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1045 1045 False, False)
1046 1046 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1047 1047
1048 1048 # Call into the normal remove code, but the removing of the standin, we want
1049 1049 # to have handled by original addremove. Monkey patching here makes sure
1050 1050 # we don't remove the standin in the largefiles code, preventing a very
1051 1051 # confused state later.
1052 1052 if missing:
1053 1053 m = [repo.wjoin(f) for f in missing]
1054 1054 repo._isaddremove = True
1055 1055 removelargefiles(repo.ui, repo, *m, **opts)
1056 1056 repo._isaddremove = False
1057 1057 # Call into the normal add code, and any files that *should* be added as
1058 1058 # largefiles will be
1059 1059 addlargefiles(repo.ui, repo, *pats, **opts)
1060 1060 # Now that we've handled largefiles, hand off to the original addremove
1061 1061 # function to take care of the rest. Make sure it doesn't do anything with
1062 1062 # largefiles by installing a matcher that will ignore them.
1063 1063 installnormalfilesmatchfn(repo[None].manifest())
1064 1064 result = orig(repo, pats, opts, dry_run, similarity)
1065 1065 restorematchfn()
1066 1066 return result
1067 1067
1068 1068 # Calling purge with --all will cause the largefiles to be deleted.
1069 1069 # Override repo.status to prevent this from happening.
1070 1070 def overridepurge(orig, ui, repo, *dirs, **opts):
1071 1071 # XXX large file status is buggy when used on repo proxy.
1072 1072 # XXX this needs to be investigate.
1073 1073 repo = repo.unfiltered()
1074 1074 oldstatus = repo.status
1075 1075 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1076 1076 clean=False, unknown=False, listsubrepos=False):
1077 1077 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1078 1078 listsubrepos)
1079 1079 lfdirstate = lfutil.openlfdirstate(ui, repo)
1080 1080 modified, added, removed, deleted, unknown, ignored, clean = r
1081 1081 unknown = [f for f in unknown if lfdirstate[f] == '?']
1082 1082 ignored = [f for f in ignored if lfdirstate[f] == '?']
1083 1083 return modified, added, removed, deleted, unknown, ignored, clean
1084 1084 repo.status = overridestatus
1085 1085 orig(ui, repo, *dirs, **opts)
1086 1086 repo.status = oldstatus
1087 1087
1088 1088 def overriderollback(orig, ui, repo, **opts):
1089 1089 result = orig(ui, repo, **opts)
1090 1090 merge.update(repo, node=None, branchmerge=False, force=True,
1091 1091 partial=lfutil.isstandin)
1092 1092 wlock = repo.wlock()
1093 1093 try:
1094 1094 lfdirstate = lfutil.openlfdirstate(ui, repo)
1095 1095 lfiles = lfutil.listlfiles(repo)
1096 1096 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1097 1097 for file in lfiles:
1098 1098 if file in oldlfiles:
1099 1099 lfdirstate.normallookup(file)
1100 1100 else:
1101 1101 lfdirstate.add(file)
1102 1102 lfdirstate.write()
1103 1103 finally:
1104 1104 wlock.release()
1105 1105 return result
1106 1106
1107 1107 def overridetransplant(orig, ui, repo, *revs, **opts):
1108 1108 try:
1109 1109 oldstandins = lfutil.getstandinsstate(repo)
1110 1110 repo._istransplanting = True
1111 1111 result = orig(ui, repo, *revs, **opts)
1112 1112 newstandins = lfutil.getstandinsstate(repo)
1113 1113 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1114 1114 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1115 1115 printmessage=True)
1116 1116 finally:
1117 1117 repo._istransplanting = False
1118 1118 return result
1119 1119
1120 1120 def overridecat(orig, ui, repo, file1, *pats, **opts):
1121 1121 ctx = scmutil.revsingle(repo, opts.get('rev'))
1122 1122 err = 1
1123 1123 notbad = set()
1124 1124 m = scmutil.match(ctx, (file1,) + pats, opts)
1125 1125 origmatchfn = m.matchfn
1126 1126 def lfmatchfn(f):
1127 1127 if origmatchfn(f):
1128 1128 return True
1129 1129 lf = lfutil.splitstandin(f)
1130 1130 if lf is None:
1131 1131 return False
1132 1132 notbad.add(lf)
1133 1133 return origmatchfn(lf)
1134 1134 m.matchfn = lfmatchfn
1135 1135 origbadfn = m.bad
1136 1136 def lfbadfn(f, msg):
1137 1137 if not f in notbad:
1138 1138 origbadfn(f, msg)
1139 1139 m.bad = lfbadfn
1140 1140 for f in ctx.walk(m):
1141 1141 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1142 1142 pathname=f)
1143 1143 lf = lfutil.splitstandin(f)
1144 1144 if lf is None or origmatchfn(f):
1145 1145 # duplicating unreachable code from commands.cat
1146 1146 data = ctx[f].data()
1147 1147 if opts.get('decode'):
1148 1148 data = repo.wwritedata(f, data)
1149 1149 fp.write(data)
1150 1150 else:
1151 1151 hash = lfutil.readstandin(repo, lf, ctx.rev())
1152 1152 if not lfutil.inusercache(repo.ui, hash):
1153 1153 store = basestore._openstore(repo)
1154 1154 success, missing = store.get([(lf, hash)])
1155 1155 if len(success) != 1:
1156 1156 raise util.Abort(
1157 1157 _('largefile %s is not in cache and could not be '
1158 1158 'downloaded') % lf)
1159 1159 path = lfutil.usercachepath(repo.ui, hash)
1160 1160 fpin = open(path, "rb")
1161 1161 for chunk in util.filechunkiter(fpin, 128 * 1024):
1162 1162 fp.write(chunk)
1163 1163 fpin.close()
1164 1164 fp.close()
1165 1165 err = 0
1166 1166 return err
1167 1167
1168 1168 def mercurialsinkbefore(orig, sink):
1169 1169 sink.repo._isconverting = True
1170 1170 orig(sink)
1171 1171
1172 1172 def mercurialsinkafter(orig, sink):
1173 1173 sink.repo._isconverting = False
1174 1174 orig(sink)
@@ -1,433 +1,437 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import short
9 9 from i18n import _
10 10 import util, simplemerge, match, error, templater, templatekw
11 11 import os, tempfile, re, filecmp
12 12
13 13 def _toolstr(ui, tool, part, default=""):
14 14 return ui.config("merge-tools", tool + "." + part, default)
15 15
16 16 def _toolbool(ui, tool, part, default=False):
17 17 return ui.configbool("merge-tools", tool + "." + part, default)
18 18
19 19 def _toollist(ui, tool, part, default=[]):
20 20 return ui.configlist("merge-tools", tool + "." + part, default)
21 21
22 22 internals = {}
23 23
24 24 def internaltool(name, trymerge, onfailure=None):
25 25 '''return a decorator for populating internal merge tool table'''
26 26 def decorator(func):
27 27 fullname = 'internal:' + name
28 28 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
29 29 internals[fullname] = func
30 30 func.trymerge = trymerge
31 31 func.onfailure = onfailure
32 32 return func
33 33 return decorator
34 34
35 35 def _findtool(ui, tool):
36 36 if tool in internals:
37 37 return tool
38 38 for kn in ("regkey", "regkeyalt"):
39 39 k = _toolstr(ui, tool, kn)
40 40 if not k:
41 41 continue
42 42 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
43 43 if p:
44 44 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
45 45 if p:
46 46 return p
47 47 exe = _toolstr(ui, tool, "executable", tool)
48 48 return util.findexe(util.expandpath(exe))
49 49
50 50 def _picktool(repo, ui, path, binary, symlink):
51 51 def check(tool, pat, symlink, binary):
52 52 tmsg = tool
53 53 if pat:
54 54 tmsg += " specified for " + pat
55 55 if not _findtool(ui, tool):
56 56 if pat: # explicitly requested tool deserves a warning
57 57 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
58 58 else: # configured but non-existing tools are more silent
59 59 ui.note(_("couldn't find merge tool %s\n") % tmsg)
60 60 elif symlink and not _toolbool(ui, tool, "symlink"):
61 61 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
62 62 elif binary and not _toolbool(ui, tool, "binary"):
63 63 ui.warn(_("tool %s can't handle binary\n") % tmsg)
64 64 elif not util.gui() and _toolbool(ui, tool, "gui"):
65 65 ui.warn(_("tool %s requires a GUI\n") % tmsg)
66 66 else:
67 67 return True
68 68 return False
69 69
70 70 # forcemerge comes from command line arguments, highest priority
71 71 force = ui.config('ui', 'forcemerge')
72 72 if force:
73 73 toolpath = _findtool(ui, force)
74 74 if toolpath:
75 75 return (force, util.shellquote(toolpath))
76 76 else:
77 77 # mimic HGMERGE if given tool not found
78 78 return (force, force)
79 79
80 80 # HGMERGE takes next precedence
81 81 hgmerge = os.environ.get("HGMERGE")
82 82 if hgmerge:
83 83 return (hgmerge, hgmerge)
84 84
85 85 # then patterns
86 86 for pat, tool in ui.configitems("merge-patterns"):
87 87 mf = match.match(repo.root, '', [pat])
88 88 if mf(path) and check(tool, pat, symlink, False):
89 89 toolpath = _findtool(ui, tool)
90 90 return (tool, util.shellquote(toolpath))
91 91
92 92 # then merge tools
93 93 tools = {}
94 94 for k, v in ui.configitems("merge-tools"):
95 95 t = k.split('.')[0]
96 96 if t not in tools:
97 97 tools[t] = int(_toolstr(ui, t, "priority", "0"))
98 98 names = tools.keys()
99 99 tools = sorted([(-p, t) for t, p in tools.items()])
100 100 uimerge = ui.config("ui", "merge")
101 101 if uimerge:
102 102 if uimerge not in names:
103 103 return (uimerge, uimerge)
104 104 tools.insert(0, (None, uimerge)) # highest priority
105 105 tools.append((None, "hgmerge")) # the old default, if found
106 106 for p, t in tools:
107 107 if check(t, None, symlink, binary):
108 108 toolpath = _findtool(ui, t)
109 109 return (t, util.shellquote(toolpath))
110 110
111 111 # internal merge or prompt as last resort
112 112 if symlink or binary:
113 113 return "internal:prompt", None
114 114 return "internal:merge", None
115 115
116 116 def _eoltype(data):
117 117 "Guess the EOL type of a file"
118 118 if '\0' in data: # binary
119 119 return None
120 120 if '\r\n' in data: # Windows
121 121 return '\r\n'
122 122 if '\r' in data: # Old Mac
123 123 return '\r'
124 124 if '\n' in data: # UNIX
125 125 return '\n'
126 126 return None # unknown
127 127
128 128 def _matcheol(file, origfile):
129 129 "Convert EOL markers in a file to match origfile"
130 130 tostyle = _eoltype(util.readfile(origfile))
131 131 if tostyle:
132 132 data = util.readfile(file)
133 133 style = _eoltype(data)
134 134 if style:
135 135 newdata = data.replace(style, tostyle)
136 136 if newdata != data:
137 137 util.writefile(file, newdata)
138 138
139 139 @internaltool('prompt', False)
140 140 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
141 141 """Asks the user which of the local or the other version to keep as
142 142 the merged version."""
143 143 ui = repo.ui
144 144 fd = fcd.path()
145 145
146 146 if ui.promptchoice(_(" no tool found to merge %s\n"
147 147 "keep (l)ocal or take (o)ther?"
148 148 "$$ &Local $$ &Other") % fd, 0):
149 149 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
150 150 else:
151 151 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
152 152
153 153 @internaltool('local', False)
154 154 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
155 155 """Uses the local version of files as the merged version."""
156 156 return 0
157 157
158 158 @internaltool('other', False)
159 159 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
160 160 """Uses the other version of files as the merged version."""
161 161 repo.wwrite(fcd.path(), fco.data(), fco.flags())
162 162 return 0
163 163
164 164 @internaltool('fail', False)
165 165 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
166 166 """
167 167 Rather than attempting to merge files that were modified on both
168 168 branches, it marks them as unresolved. The resolve command must be
169 169 used to resolve these conflicts."""
170 170 return 1
171 171
172 172 def _premerge(repo, toolconf, files, labels=None):
173 173 tool, toolpath, binary, symlink = toolconf
174 174 if symlink:
175 175 return 1
176 176 a, b, c, back = files
177 177
178 178 ui = repo.ui
179 179
180 180 # do we attempt to simplemerge first?
181 181 try:
182 182 premerge = _toolbool(ui, tool, "premerge", not binary)
183 183 except error.ConfigError:
184 184 premerge = _toolstr(ui, tool, "premerge").lower()
185 185 valid = 'keep'.split()
186 186 if premerge not in valid:
187 187 _valid = ', '.join(["'" + v + "'" for v in valid])
188 188 raise error.ConfigError(_("%s.premerge not valid "
189 189 "('%s' is neither boolean nor %s)") %
190 190 (tool, premerge, _valid))
191 191
192 192 if premerge:
193 193 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
194 194 if not r:
195 195 ui.debug(" premerge successful\n")
196 196 return 0
197 197 if premerge != 'keep':
198 198 util.copyfile(back, a) # restore from backup and try again
199 199 return 1 # continue merging
200 200
201 201 @internaltool('merge', True,
202 202 _("merging %s incomplete! "
203 203 "(edit conflicts, then use 'hg resolve --mark')\n"))
204 204 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
205 205 """
206 206 Uses the internal non-interactive simple merge algorithm for merging
207 207 files. It will fail if there are any conflicts and leave markers in
208 208 the partially merged file."""
209 209 tool, toolpath, binary, symlink = toolconf
210 210 if symlink:
211 211 repo.ui.warn(_('warning: internal:merge cannot merge symlinks '
212 212 'for %s\n') % fcd.path())
213 213 return False, 1
214 214 r = _premerge(repo, toolconf, files, labels=labels)
215 215 if r:
216 216 a, b, c, back = files
217 217
218 218 ui = repo.ui
219 219
220 220 r = simplemerge.simplemerge(ui, a, b, c, label=labels)
221 221 return True, r
222 222 return False, 0
223 223
224 224 @internaltool('dump', True)
225 225 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
226 226 """
227 227 Creates three versions of the files to merge, containing the
228 228 contents of local, other and base. These files can then be used to
229 229 perform a merge manually. If the file to be merged is named
230 230 ``a.txt``, these files will accordingly be named ``a.txt.local``,
231 231 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
232 232 same directory as ``a.txt``."""
233 233 r = _premerge(repo, toolconf, files, labels=labels)
234 234 if r:
235 235 a, b, c, back = files
236 236
237 237 fd = fcd.path()
238 238
239 239 util.copyfile(a, a + ".local")
240 240 repo.wwrite(fd + ".other", fco.data(), fco.flags())
241 241 repo.wwrite(fd + ".base", fca.data(), fca.flags())
242 242 return False, r
243 243
244 244 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
245 245 r = _premerge(repo, toolconf, files, labels=labels)
246 246 if r:
247 247 tool, toolpath, binary, symlink = toolconf
248 248 a, b, c, back = files
249 249 out = ""
250 250 env = {'HG_FILE': fcd.path(),
251 251 'HG_MY_NODE': short(mynode),
252 252 'HG_OTHER_NODE': str(fco.changectx()),
253 253 'HG_BASE_NODE': str(fca.changectx()),
254 254 'HG_MY_ISLINK': 'l' in fcd.flags(),
255 255 'HG_OTHER_ISLINK': 'l' in fco.flags(),
256 256 'HG_BASE_ISLINK': 'l' in fca.flags(),
257 257 }
258 258
259 259 ui = repo.ui
260 260
261 261 args = _toolstr(ui, tool, "args", '$local $base $other')
262 262 if "$output" in args:
263 263 out, a = a, back # read input from backup, write to original
264 264 replace = {'local': a, 'base': b, 'other': c, 'output': out}
265 265 args = util.interpolate(r'\$', replace, args,
266 266 lambda s: util.shellquote(util.localpath(s)))
267 267 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
268 268 out=ui.fout)
269 269 return True, r
270 270 return False, 0
271 271
272 272 def _formatconflictmarker(repo, ctx, template, label, pad):
273 273 """Applies the given template to the ctx, prefixed by the label.
274 274
275 275 Pad is the minimum width of the label prefix, so that multiple markers
276 276 can have aligned templated parts.
277 277 """
278 278 if ctx.node() is None:
279 279 ctx = ctx.p1()
280 280
281 281 props = templatekw.keywords.copy()
282 282 props['templ'] = template
283 283 props['ctx'] = ctx
284 284 props['repo'] = repo
285 285 templateresult = template('conflictmarker', **props)
286 286
287 287 label = ('%s:' % label).ljust(pad + 1)
288 288 mark = '%s %s' % (label, templater.stringify(templateresult))
289 289
290 290 # The <<< marks add 8 to the length, and '...' adds three, so max
291 291 # length of the actual marker is 69.
292 292 maxlength = 80 - 8 - 3
293 293 if len(mark) > maxlength:
294 294 mark = mark[:maxlength] + '...'
295 295 return mark
296 296
297 297 _defaultconflictmarker = ('{node|short} ' +
298 298 '{ifeq(tags, "tip", "", "{tags} ")}' +
299 299 '{if(bookmarks, "{bookmarks} ")}' +
300 300 '{ifeq(branch, "default", "", "{branch} ")}' +
301 301 '- {author|user}: "{desc|firstline}"')
302 302
303 _defaultconflictlabels = ['local', 'other']
304
303 305 def _formatlabels(repo, fcd, fco, labels):
304 306 """Formats the given labels using the conflict marker template.
305 307
306 308 Returns a list of formatted labels.
307 309 """
308 310 cd = fcd.changectx()
309 311 co = fco.changectx()
310 312
311 313 ui = repo.ui
312 314 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
313 315 template = templater.parsestring(template, quoted=False)
314 316 tmpl = templater.templater(None, cache={ 'conflictmarker' : template })
315 317
316 318 pad = max(len(labels[0]), len(labels[1]))
317 319
318 320 return [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
319 321 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
320 322
321 def filemerge(repo, mynode, orig, fcd, fco, fca):
323 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
322 324 """perform a 3-way merge in the working directory
323 325
324 326 mynode = parent node before merge
325 327 orig = original local filename before merge
326 328 fco = other file context
327 329 fca = ancestor file context
328 330 fcd = local file context for current/destination file
329 331 """
330 332
331 333 def temp(prefix, ctx):
332 334 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
333 335 (fd, name) = tempfile.mkstemp(prefix=pre)
334 336 data = repo.wwritedata(ctx.path(), ctx.data())
335 337 f = os.fdopen(fd, "wb")
336 338 f.write(data)
337 339 f.close()
338 340 return name
339 341
340 342 if not fco.cmp(fcd): # files identical?
341 343 return None
342 344
343 345 ui = repo.ui
344 346 fd = fcd.path()
345 347 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
346 348 symlink = 'l' in fcd.flags() + fco.flags()
347 349 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
348 350 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
349 351 (tool, fd, binary, symlink))
350 352
351 353 if tool in internals:
352 354 func = internals[tool]
353 355 trymerge = func.trymerge
354 356 onfailure = func.onfailure
355 357 else:
356 358 func = _xmerge
357 359 trymerge = True
358 360 onfailure = _("merging %s failed!\n")
359 361
360 362 toolconf = tool, toolpath, binary, symlink
361 363
362 364 if not trymerge:
363 365 return func(repo, mynode, orig, fcd, fco, fca, toolconf)
364 366
365 367 a = repo.wjoin(fd)
366 368 b = temp("base", fca)
367 369 c = temp("other", fco)
368 370 back = a + ".orig"
369 371 util.copyfile(a, back)
370 372
371 373 if orig != fco.path():
372 374 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
373 375 else:
374 376 ui.status(_("merging %s\n") % fd)
375 377
376 378 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
377 379
378 380 markerstyle = ui.config('ui', 'mergemarkers', 'detailed')
379 labels = ['local', 'other']
380 381 if markerstyle == 'basic':
381 formattedlabels = labels
382 formattedlabels = _defaultconflictlabels
382 383 else:
384 if not labels:
385 labels = _defaultconflictlabels
386
383 387 formattedlabels = _formatlabels(repo, fcd, fco, labels)
384 388
385 389 needcheck, r = func(repo, mynode, orig, fcd, fco, fca, toolconf,
386 390 (a, b, c, back), labels=formattedlabels)
387 391 if not needcheck:
388 392 if r:
389 393 if onfailure:
390 394 ui.warn(onfailure % fd)
391 395 else:
392 396 util.unlink(back)
393 397
394 398 util.unlink(b)
395 399 util.unlink(c)
396 400 return r
397 401
398 402 if not r and (_toolbool(ui, tool, "checkconflicts") or
399 403 'conflicts' in _toollist(ui, tool, "check")):
400 404 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
401 405 re.MULTILINE):
402 406 r = 1
403 407
404 408 checked = False
405 409 if 'prompt' in _toollist(ui, tool, "check"):
406 410 checked = True
407 411 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
408 412 "$$ &Yes $$ &No") % fd, 1):
409 413 r = 1
410 414
411 415 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
412 416 'changed' in _toollist(ui, tool, "check")):
413 417 if filecmp.cmp(a, back):
414 418 if ui.promptchoice(_(" output file %s appears unchanged\n"
415 419 "was merge successful (yn)?"
416 420 "$$ &Yes $$ &No") % fd, 1):
417 421 r = 1
418 422
419 423 if _toolbool(ui, tool, "fixeol"):
420 424 _matcheol(a, back)
421 425
422 426 if r:
423 427 if onfailure:
424 428 ui.warn(onfailure % fd)
425 429 else:
426 430 util.unlink(back)
427 431
428 432 util.unlink(b)
429 433 util.unlink(c)
430 434 return r
431 435
432 436 # tell hggettext to extract docstrings from these functions:
433 437 i18nfunctions = internals.values()
@@ -1,1188 +1,1189 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import struct
9 9
10 10 from node import nullid, nullrev, hex, bin
11 11 from i18n import _
12 12 from mercurial import obsolete
13 13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 14 import errno, os, shutil
15 15
16 16 _pack = struct.pack
17 17 _unpack = struct.unpack
18 18
19 19 def _droponode(data):
20 20 # used for compatibility for v1
21 21 bits = data.split("\0")
22 22 bits = bits[:-2] + bits[-1:]
23 23 return "\0".join(bits)
24 24
25 25 class mergestate(object):
26 26 '''track 3-way merge state of individual files
27 27
28 28 it is stored on disk when needed. Two file are used, one with an old
29 29 format, one with a new format. Both contains similar data, but the new
30 30 format can store new kind of field.
31 31
32 32 Current new format is a list of arbitrary record of the form:
33 33
34 34 [type][length][content]
35 35
36 36 Type is a single character, length is a 4 bytes integer, content is an
37 37 arbitrary suites of bytes of length `length`.
38 38
39 39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 40 should abort if they are unknown. lower case record can be safely ignored.
41 41
42 42 Currently known record:
43 43
44 44 L: the node of the "local" part of the merge (hexified version)
45 45 O: the node of the "other" part of the merge (hexified version)
46 46 F: a file to be merged entry
47 47 '''
48 48 statepathv1 = "merge/state"
49 49 statepathv2 = "merge/state2"
50 50
51 51 def __init__(self, repo):
52 52 self._repo = repo
53 53 self._dirty = False
54 54 self._read()
55 55
56 56 def reset(self, node=None, other=None):
57 57 self._state = {}
58 58 self._local = None
59 59 self._other = None
60 60 if node:
61 61 self._local = node
62 62 self._other = other
63 63 shutil.rmtree(self._repo.join("merge"), True)
64 64 self._dirty = False
65 65
66 66 def _read(self):
67 67 """Analyse each record content to restore a serialized state from disk
68 68
69 69 This function process "record" entry produced by the de-serialization
70 70 of on disk file.
71 71 """
72 72 self._state = {}
73 73 self._local = None
74 74 self._other = None
75 75 records = self._readrecords()
76 76 for rtype, record in records:
77 77 if rtype == 'L':
78 78 self._local = bin(record)
79 79 elif rtype == 'O':
80 80 self._other = bin(record)
81 81 elif rtype == "F":
82 82 bits = record.split("\0")
83 83 self._state[bits[0]] = bits[1:]
84 84 elif not rtype.islower():
85 85 raise util.Abort(_('unsupported merge state record: %s')
86 86 % rtype)
87 87 self._dirty = False
88 88
89 89 def _readrecords(self):
90 90 """Read merge state from disk and return a list of record (TYPE, data)
91 91
92 92 We read data from both v1 and v2 files and decide which one to use.
93 93
94 94 V1 has been used by version prior to 2.9.1 and contains less data than
95 95 v2. We read both versions and check if no data in v2 contradicts
96 96 v1. If there is not contradiction we can safely assume that both v1
97 97 and v2 were written at the same time and use the extract data in v2. If
98 98 there is contradiction we ignore v2 content as we assume an old version
99 99 of Mercurial has overwritten the mergestate file and left an old v2
100 100 file around.
101 101
102 102 returns list of record [(TYPE, data), ...]"""
103 103 v1records = self._readrecordsv1()
104 104 v2records = self._readrecordsv2()
105 105 oldv2 = set() # old format version of v2 record
106 106 for rec in v2records:
107 107 if rec[0] == 'L':
108 108 oldv2.add(rec)
109 109 elif rec[0] == 'F':
110 110 # drop the onode data (not contained in v1)
111 111 oldv2.add(('F', _droponode(rec[1])))
112 112 for rec in v1records:
113 113 if rec not in oldv2:
114 114 # v1 file is newer than v2 file, use it
115 115 # we have to infer the "other" changeset of the merge
116 116 # we cannot do better than that with v1 of the format
117 117 mctx = self._repo[None].parents()[-1]
118 118 v1records.append(('O', mctx.hex()))
119 119 # add place holder "other" file node information
120 120 # nobody is using it yet so we do no need to fetch the data
121 121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 122 for idx, r in enumerate(v1records):
123 123 if r[0] == 'F':
124 124 bits = r[1].split("\0")
125 125 bits.insert(-2, '')
126 126 v1records[idx] = (r[0], "\0".join(bits))
127 127 return v1records
128 128 else:
129 129 return v2records
130 130
131 131 def _readrecordsv1(self):
132 132 """read on disk merge state for version 1 file
133 133
134 134 returns list of record [(TYPE, data), ...]
135 135
136 136 Note: the "F" data from this file are one entry short
137 137 (no "other file node" entry)
138 138 """
139 139 records = []
140 140 try:
141 141 f = self._repo.opener(self.statepathv1)
142 142 for i, l in enumerate(f):
143 143 if i == 0:
144 144 records.append(('L', l[:-1]))
145 145 else:
146 146 records.append(('F', l[:-1]))
147 147 f.close()
148 148 except IOError, err:
149 149 if err.errno != errno.ENOENT:
150 150 raise
151 151 return records
152 152
153 153 def _readrecordsv2(self):
154 154 """read on disk merge state for version 2 file
155 155
156 156 returns list of record [(TYPE, data), ...]
157 157 """
158 158 records = []
159 159 try:
160 160 f = self._repo.opener(self.statepathv2)
161 161 data = f.read()
162 162 off = 0
163 163 end = len(data)
164 164 while off < end:
165 165 rtype = data[off]
166 166 off += 1
167 167 length = _unpack('>I', data[off:(off + 4)])[0]
168 168 off += 4
169 169 record = data[off:(off + length)]
170 170 off += length
171 171 records.append((rtype, record))
172 172 f.close()
173 173 except IOError, err:
174 174 if err.errno != errno.ENOENT:
175 175 raise
176 176 return records
177 177
178 178 def active(self):
179 179 """Whether mergestate is active.
180 180
181 181 Returns True if there appears to be mergestate. This is a rough proxy
182 182 for "is a merge in progress."
183 183 """
184 184 # Check local variables before looking at filesystem for performance
185 185 # reasons.
186 186 return bool(self._local) or bool(self._state) or \
187 187 self._repo.opener.exists(self.statepathv1) or \
188 188 self._repo.opener.exists(self.statepathv2)
189 189
190 190 def commit(self):
191 191 """Write current state on disk (if necessary)"""
192 192 if self._dirty:
193 193 records = []
194 194 records.append(("L", hex(self._local)))
195 195 records.append(("O", hex(self._other)))
196 196 for d, v in self._state.iteritems():
197 197 records.append(("F", "\0".join([d] + v)))
198 198 self._writerecords(records)
199 199 self._dirty = False
200 200
201 201 def _writerecords(self, records):
202 202 """Write current state on disk (both v1 and v2)"""
203 203 self._writerecordsv1(records)
204 204 self._writerecordsv2(records)
205 205
206 206 def _writerecordsv1(self, records):
207 207 """Write current state on disk in a version 1 file"""
208 208 f = self._repo.opener(self.statepathv1, "w")
209 209 irecords = iter(records)
210 210 lrecords = irecords.next()
211 211 assert lrecords[0] == 'L'
212 212 f.write(hex(self._local) + "\n")
213 213 for rtype, data in irecords:
214 214 if rtype == "F":
215 215 f.write("%s\n" % _droponode(data))
216 216 f.close()
217 217
218 218 def _writerecordsv2(self, records):
219 219 """Write current state on disk in a version 2 file"""
220 220 f = self._repo.opener(self.statepathv2, "w")
221 221 for key, data in records:
222 222 assert len(key) == 1
223 223 format = ">sI%is" % len(data)
224 224 f.write(_pack(format, key, len(data), data))
225 225 f.close()
226 226
227 227 def add(self, fcl, fco, fca, fd):
228 228 """add a new (potentially?) conflicting file the merge state
229 229 fcl: file context for local,
230 230 fco: file context for remote,
231 231 fca: file context for ancestors,
232 232 fd: file path of the resulting merge.
233 233
234 234 note: also write the local version to the `.hg/merge` directory.
235 235 """
236 236 hash = util.sha1(fcl.path()).hexdigest()
237 237 self._repo.opener.write("merge/" + hash, fcl.data())
238 238 self._state[fd] = ['u', hash, fcl.path(),
239 239 fca.path(), hex(fca.filenode()),
240 240 fco.path(), hex(fco.filenode()),
241 241 fcl.flags()]
242 242 self._dirty = True
243 243
244 244 def __contains__(self, dfile):
245 245 return dfile in self._state
246 246
247 247 def __getitem__(self, dfile):
248 248 return self._state[dfile][0]
249 249
250 250 def __iter__(self):
251 251 return iter(sorted(self._state))
252 252
253 253 def files(self):
254 254 return self._state.keys()
255 255
256 256 def mark(self, dfile, state):
257 257 self._state[dfile][0] = state
258 258 self._dirty = True
259 259
260 260 def unresolved(self):
261 261 """Obtain the paths of unresolved files."""
262 262
263 263 for f, entry in self._state.items():
264 264 if entry[0] == 'u':
265 265 yield f
266 266
267 def resolve(self, dfile, wctx):
267 def resolve(self, dfile, wctx, labels=None):
268 268 """rerun merge process for file path `dfile`"""
269 269 if self[dfile] == 'r':
270 270 return 0
271 271 stateentry = self._state[dfile]
272 272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 273 octx = self._repo[self._other]
274 274 fcd = wctx[dfile]
275 275 fco = octx[ofile]
276 276 fca = self._repo.filectx(afile, fileid=anode)
277 277 # "premerge" x flags
278 278 flo = fco.flags()
279 279 fla = fca.flags()
280 280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 281 if fca.node() == nullid:
282 282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 283 afile)
284 284 elif flags == fla:
285 285 flags = flo
286 286 # restore local
287 287 f = self._repo.opener("merge/" + hash)
288 288 self._repo.wwrite(dfile, f.read(), flags)
289 289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 292 if r is None:
292 293 # no real conflict
293 294 del self._state[dfile]
294 295 self._dirty = True
295 296 elif not r:
296 297 self.mark(dfile, 'r')
297 298 return r
298 299
299 300 def _checkunknownfile(repo, wctx, mctx, f):
300 301 return (not repo.dirstate._ignore(f)
301 302 and os.path.isfile(repo.wjoin(f))
302 303 and repo.wopener.audit.check(f)
303 304 and repo.dirstate.normalize(f) not in repo.dirstate
304 305 and mctx[f].cmp(wctx[f]))
305 306
306 307 def _checkunknown(repo, wctx, mctx):
307 308 "check for collisions between unknown files and files in mctx"
308 309
309 310 error = False
310 311 for f in mctx:
311 312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 313 error = True
313 314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 315 if error:
315 316 raise util.Abort(_("untracked files in working directory differ "
316 317 "from files in requested revision"))
317 318
318 319 def _forgetremoved(wctx, mctx, branchmerge):
319 320 """
320 321 Forget removed files
321 322
322 323 If we're jumping between revisions (as opposed to merging), and if
323 324 neither the working directory nor the target rev has the file,
324 325 then we need to remove it from the dirstate, to prevent the
325 326 dirstate from listing the file when it is no longer in the
326 327 manifest.
327 328
328 329 If we're merging, and the other revision has removed a file
329 330 that is not present in the working directory, we need to mark it
330 331 as removed.
331 332 """
332 333
333 334 actions = []
334 335 state = branchmerge and 'r' or 'f'
335 336 for f in wctx.deleted():
336 337 if f not in mctx:
337 338 actions.append((f, state, None, "forget deleted"))
338 339
339 340 if not branchmerge:
340 341 for f in wctx.removed():
341 342 if f not in mctx:
342 343 actions.append((f, "f", None, "forget removed"))
343 344
344 345 return actions
345 346
346 347 def _checkcollision(repo, wmf, actions):
347 348 # build provisional merged manifest up
348 349 pmmf = set(wmf)
349 350
350 351 def addop(f, args):
351 352 pmmf.add(f)
352 353 def removeop(f, args):
353 354 pmmf.discard(f)
354 355 def nop(f, args):
355 356 pass
356 357
357 358 def renamemoveop(f, args):
358 359 f2, flags = args
359 360 pmmf.discard(f2)
360 361 pmmf.add(f)
361 362 def renamegetop(f, args):
362 363 f2, flags = args
363 364 pmmf.add(f)
364 365 def mergeop(f, args):
365 366 f1, f2, fa, move, anc = args
366 367 if move:
367 368 pmmf.discard(f1)
368 369 pmmf.add(f)
369 370
370 371 opmap = {
371 372 "a": addop,
372 373 "dm": renamemoveop,
373 374 "dg": renamegetop,
374 375 "dr": nop,
375 376 "e": nop,
376 377 "k": nop,
377 378 "f": addop, # untracked file should be kept in working directory
378 379 "g": addop,
379 380 "m": mergeop,
380 381 "r": removeop,
381 382 "rd": nop,
382 383 "cd": addop,
383 384 "dc": addop,
384 385 }
385 386 for f, m, args, msg in actions:
386 387 op = opmap.get(m)
387 388 assert op, m
388 389 op(f, args)
389 390
390 391 # check case-folding collision in provisional merged manifest
391 392 foldmap = {}
392 393 for f in sorted(pmmf):
393 394 fold = util.normcase(f)
394 395 if fold in foldmap:
395 396 raise util.Abort(_("case-folding collision between %s and %s")
396 397 % (f, foldmap[fold]))
397 398 foldmap[fold] = f
398 399
399 400 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
400 401 acceptremote, followcopies):
401 402 """
402 403 Merge p1 and p2 with ancestor pa and generate merge action list
403 404
404 405 branchmerge and force are as passed in to update
405 406 partial = function to filter file lists
406 407 acceptremote = accept the incoming changes without prompting
407 408 """
408 409
409 410 actions, copy, movewithdir = [], {}, {}
410 411
411 412 # manifests fetched in order are going to be faster, so prime the caches
412 413 [x.manifest() for x in
413 414 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
414 415
415 416 if followcopies:
416 417 ret = copies.mergecopies(repo, wctx, p2, pa)
417 418 copy, movewithdir, diverge, renamedelete = ret
418 419 for of, fl in diverge.iteritems():
419 420 actions.append((of, "dr", (fl,), "divergent renames"))
420 421 for of, fl in renamedelete.iteritems():
421 422 actions.append((of, "rd", (fl,), "rename and delete"))
422 423
423 424 repo.ui.note(_("resolving manifests\n"))
424 425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
425 426 % (bool(branchmerge), bool(force), bool(partial)))
426 427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
427 428
428 429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
429 430 copied = set(copy.values())
430 431 copied.update(movewithdir.values())
431 432
432 433 if '.hgsubstate' in m1:
433 434 # check whether sub state is modified
434 435 for s in sorted(wctx.substate):
435 436 if wctx.sub(s).dirty():
436 437 m1['.hgsubstate'] += "+"
437 438 break
438 439
439 440 aborts = []
440 441 # Compare manifests
441 442 fdiff = dicthelpers.diff(m1, m2)
442 443 flagsdiff = m1.flagsdiff(m2)
443 444 diff12 = dicthelpers.join(fdiff, flagsdiff)
444 445
445 446 for f, (n12, fl12) in diff12.iteritems():
446 447 if n12:
447 448 n1, n2 = n12
448 449 else: # file contents didn't change, but flags did
449 450 n1 = n2 = m1.get(f, None)
450 451 if n1 is None:
451 452 # Since n1 == n2, the file isn't present in m2 either. This
452 453 # means that the file was removed or deleted locally and
453 454 # removed remotely, but that residual entries remain in flags.
454 455 # This can happen in manifests generated by workingctx.
455 456 continue
456 457 if fl12:
457 458 fl1, fl2 = fl12
458 459 else: # flags didn't change, file contents did
459 460 fl1 = fl2 = m1.flags(f)
460 461
461 462 if partial and not partial(f):
462 463 continue
463 464 if n1 and n2:
464 465 fa = f
465 466 a = ma.get(f, nullid)
466 467 if a == nullid:
467 468 fa = copy.get(f, f)
468 469 # Note: f as default is wrong - we can't really make a 3-way
469 470 # merge without an ancestor file.
470 471 fla = ma.flags(fa)
471 472 nol = 'l' not in fl1 + fl2 + fla
472 473 if n2 == a and fl2 == fla:
473 474 actions.append((f, "k", (), "keep")) # remote unchanged
474 475 elif n1 == a and fl1 == fla: # local unchanged - use remote
475 476 if n1 == n2: # optimization: keep local content
476 477 actions.append((f, "e", (fl2,), "update permissions"))
477 478 else:
478 479 actions.append((f, "g", (fl2,), "remote is newer"))
479 480 elif nol and n2 == a: # remote only changed 'x'
480 481 actions.append((f, "e", (fl2,), "update permissions"))
481 482 elif nol and n1 == a: # local only changed 'x'
482 483 actions.append((f, "g", (fl1,), "remote is newer"))
483 484 else: # both changed something
484 485 actions.append((f, "m", (f, f, fa, False, pa.node()),
485 486 "versions differ"))
486 487 elif f in copied: # files we'll deal with on m2 side
487 488 pass
488 489 elif n1 and f in movewithdir: # directory rename, move local
489 490 f2 = movewithdir[f]
490 491 actions.append((f2, "dm", (f, fl1),
491 492 "remote directory rename - move from " + f))
492 493 elif n1 and f in copy:
493 494 f2 = copy[f]
494 495 actions.append((f, "m", (f, f2, f2, False, pa.node()),
495 496 "local copied/moved from " + f2))
496 497 elif n1 and f in ma: # clean, a different, no remote
497 498 if n1 != ma[f]:
498 499 if acceptremote:
499 500 actions.append((f, "r", None, "remote delete"))
500 501 else:
501 502 actions.append((f, "cd", None, "prompt changed/deleted"))
502 503 elif n1[20:] == "a": # added, no remote
503 504 actions.append((f, "f", None, "remote deleted"))
504 505 else:
505 506 actions.append((f, "r", None, "other deleted"))
506 507 elif n2 and f in movewithdir:
507 508 f2 = movewithdir[f]
508 509 actions.append((f2, "dg", (f, fl2),
509 510 "local directory rename - get from " + f))
510 511 elif n2 and f in copy:
511 512 f2 = copy[f]
512 513 if f2 in m2:
513 514 actions.append((f, "m", (f2, f, f2, False, pa.node()),
514 515 "remote copied from " + f2))
515 516 else:
516 517 actions.append((f, "m", (f2, f, f2, True, pa.node()),
517 518 "remote moved from " + f2))
518 519 elif n2 and f not in ma:
519 520 # local unknown, remote created: the logic is described by the
520 521 # following table:
521 522 #
522 523 # force branchmerge different | action
523 524 # n * n | get
524 525 # n * y | abort
525 526 # y n * | get
526 527 # y y n | get
527 528 # y y y | merge
528 529 #
529 530 # Checking whether the files are different is expensive, so we
530 531 # don't do that when we can avoid it.
531 532 if force and not branchmerge:
532 533 actions.append((f, "g", (fl2,), "remote created"))
533 534 else:
534 535 different = _checkunknownfile(repo, wctx, p2, f)
535 536 if force and branchmerge and different:
536 537 # FIXME: This is wrong - f is not in ma ...
537 538 actions.append((f, "m", (f, f, f, False, pa.node()),
538 539 "remote differs from untracked local"))
539 540 elif not force and different:
540 541 aborts.append((f, "ud"))
541 542 else:
542 543 actions.append((f, "g", (fl2,), "remote created"))
543 544 elif n2 and n2 != ma[f]:
544 545 different = _checkunknownfile(repo, wctx, p2, f)
545 546 if not force and different:
546 547 aborts.append((f, "ud"))
547 548 else:
548 549 # if different: old untracked f may be overwritten and lost
549 550 if acceptremote:
550 551 actions.append((f, "g", (m2.flags(f),),
551 552 "remote recreating"))
552 553 else:
553 554 actions.append((f, "dc", (m2.flags(f),),
554 555 "prompt deleted/changed"))
555 556
556 557 for f, m in sorted(aborts):
557 558 if m == "ud":
558 559 repo.ui.warn(_("%s: untracked file differs\n") % f)
559 560 else: assert False, m
560 561 if aborts:
561 562 raise util.Abort(_("untracked files in working directory differ "
562 563 "from files in requested revision"))
563 564
564 565 if not util.checkcase(repo.path):
565 566 # check collision between files only in p2 for clean update
566 567 if (not branchmerge and
567 568 (force or not wctx.dirty(missing=True, branch=False))):
568 569 _checkcollision(repo, m2, [])
569 570 else:
570 571 _checkcollision(repo, m1, actions)
571 572
572 573 return actions
573 574
574 575 actionpriority = dict((m, p) for p, m in enumerate(
575 576 ['r', 'f', 'g', 'a', 'k', 'm', 'dm', 'dg', 'dr', 'cd', 'dc', 'rd', 'e']))
576 577
577 578 def actionkey(a):
578 579 return actionpriority[a[1]], a
579 580
580 581 def batchremove(repo, actions):
581 582 """apply removes to the working directory
582 583
583 584 yields tuples for progress updates
584 585 """
585 586 verbose = repo.ui.verbose
586 587 unlink = util.unlinkpath
587 588 wjoin = repo.wjoin
588 589 audit = repo.wopener.audit
589 590 i = 0
590 591 for f, m, args, msg in actions:
591 592 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
592 593 if True:
593 594 if verbose:
594 595 repo.ui.note(_("removing %s\n") % f)
595 596 audit(f)
596 597 try:
597 598 unlink(wjoin(f), ignoremissing=True)
598 599 except OSError, inst:
599 600 repo.ui.warn(_("update failed to remove %s: %s!\n") %
600 601 (f, inst.strerror))
601 602 if i == 100:
602 603 yield i, f
603 604 i = 0
604 605 i += 1
605 606 if i > 0:
606 607 yield i, f
607 608
608 609 def batchget(repo, mctx, actions):
609 610 """apply gets to the working directory
610 611
611 612 mctx is the context to get from
612 613
613 614 yields tuples for progress updates
614 615 """
615 616 verbose = repo.ui.verbose
616 617 fctx = mctx.filectx
617 618 wwrite = repo.wwrite
618 619 i = 0
619 620 for f, m, args, msg in actions:
620 621 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
621 622 if True:
622 623 if verbose:
623 624 repo.ui.note(_("getting %s\n") % f)
624 625 wwrite(f, fctx(f).data(), args[0])
625 626 if i == 100:
626 627 yield i, f
627 628 i = 0
628 629 i += 1
629 630 if i > 0:
630 631 yield i, f
631 632
632 def applyupdates(repo, actions, wctx, mctx, overwrite):
633 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
633 634 """apply the merge action list to the working directory
634 635
635 636 wctx is the working copy context
636 637 mctx is the context to be merged into the working copy
637 638
638 639 Return a tuple of counts (updated, merged, removed, unresolved) that
639 640 describes how many files were affected by the update.
640 641 """
641 642
642 643 updated, merged, removed, unresolved = 0, 0, 0, 0
643 644 ms = mergestate(repo)
644 645 ms.reset(wctx.p1().node(), mctx.node())
645 646 moves = []
646 647 actions.sort(key=actionkey)
647 648
648 649 # prescan for merges
649 650 for a in actions:
650 651 f, m, args, msg = a
651 652 if m == "m": # merge
652 653 f1, f2, fa, move, anc = args
653 654 if f == '.hgsubstate': # merged internally
654 655 continue
655 656 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
656 657 fcl = wctx[f1]
657 658 fco = mctx[f2]
658 659 actx = repo[anc]
659 660 if fa in actx:
660 661 fca = actx[fa]
661 662 else:
662 663 fca = repo.filectx(f1, fileid=nullrev)
663 664 ms.add(fcl, fco, fca, f)
664 665 if f1 != f and move:
665 666 moves.append(f1)
666 667
667 668 audit = repo.wopener.audit
668 669 _updating = _('updating')
669 670 _files = _('files')
670 671 progress = repo.ui.progress
671 672
672 673 # remove renamed files after safely stored
673 674 for f in moves:
674 675 if os.path.lexists(repo.wjoin(f)):
675 676 repo.ui.debug("removing %s\n" % f)
676 677 audit(f)
677 678 util.unlinkpath(repo.wjoin(f))
678 679
679 680 numupdates = len([a for a in actions if a[1] != 'k'])
680 681 workeractions = [a for a in actions if a[1] in 'gr']
681 682 updateactions = [a for a in workeractions if a[1] == 'g']
682 683 updated = len(updateactions)
683 684 removeactions = [a for a in workeractions if a[1] == 'r']
684 685 removed = len(removeactions)
685 686 actions = [a for a in actions if a[1] not in 'gr']
686 687
687 688 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
688 689 if hgsub and hgsub[0] == 'r':
689 690 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
690 691
691 692 # remove in parallel (must come first)
692 693 z = 0
693 694 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), removeactions)
694 695 for i, item in prog:
695 696 z += i
696 697 progress(_updating, z, item=item, total=numupdates, unit=_files)
697 698
698 699 # get in parallel
699 700 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), updateactions)
700 701 for i, item in prog:
701 702 z += i
702 703 progress(_updating, z, item=item, total=numupdates, unit=_files)
703 704
704 705 if hgsub and hgsub[0] == 'g':
705 706 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
706 707
707 708 for f, m, args, msg in actions:
708 709
709 710 # forget (manifest only, just log it) (must come first)
710 711 if m == "f":
711 712 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
712 713 z += 1
713 714 progress(_updating, z, item=f, total=numupdates, unit=_files)
714 715
715 716 # re-add (manifest only, just log it)
716 717 elif m == "a":
717 718 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
718 719 z += 1
719 720 progress(_updating, z, item=f, total=numupdates, unit=_files)
720 721
721 722 # keep (noop, just log it)
722 723 elif m == "k":
723 724 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
724 725 # no progress
725 726
726 727 # merge
727 728 elif m == "m":
728 729 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
729 730 z += 1
730 731 progress(_updating, z, item=f, total=numupdates, unit=_files)
731 732 f1, f2, fa, move, anc = args
732 733 if f == '.hgsubstate': # subrepo states need updating
733 734 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
734 735 overwrite)
735 736 continue
736 737 audit(f)
737 r = ms.resolve(f, wctx)
738 r = ms.resolve(f, wctx, labels=labels)
738 739 if r is not None and r > 0:
739 740 unresolved += 1
740 741 else:
741 742 if r is None:
742 743 updated += 1
743 744 else:
744 745 merged += 1
745 746
746 747 # directory rename, move local
747 748 elif m == "dm":
748 749 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
749 750 z += 1
750 751 progress(_updating, z, item=f, total=numupdates, unit=_files)
751 752 f0, flags = args
752 753 repo.ui.note(_("moving %s to %s\n") % (f0, f))
753 754 audit(f)
754 755 repo.wwrite(f, wctx.filectx(f0).data(), flags)
755 756 util.unlinkpath(repo.wjoin(f0))
756 757 updated += 1
757 758
758 759 # local directory rename, get
759 760 elif m == "dg":
760 761 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
761 762 z += 1
762 763 progress(_updating, z, item=f, total=numupdates, unit=_files)
763 764 f0, flags = args
764 765 repo.ui.note(_("getting %s to %s\n") % (f0, f))
765 766 repo.wwrite(f, mctx.filectx(f0).data(), flags)
766 767 updated += 1
767 768
768 769 # divergent renames
769 770 elif m == "dr":
770 771 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
771 772 z += 1
772 773 progress(_updating, z, item=f, total=numupdates, unit=_files)
773 774 fl, = args
774 775 repo.ui.warn(_("note: possible conflict - %s was renamed "
775 776 "multiple times to:\n") % f)
776 777 for nf in fl:
777 778 repo.ui.warn(" %s\n" % nf)
778 779
779 780 # rename and delete
780 781 elif m == "rd":
781 782 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
782 783 z += 1
783 784 progress(_updating, z, item=f, total=numupdates, unit=_files)
784 785 fl, = args
785 786 repo.ui.warn(_("note: possible conflict - %s was deleted "
786 787 "and renamed to:\n") % f)
787 788 for nf in fl:
788 789 repo.ui.warn(" %s\n" % nf)
789 790
790 791 # exec
791 792 elif m == "e":
792 793 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
793 794 z += 1
794 795 progress(_updating, z, item=f, total=numupdates, unit=_files)
795 796 flags, = args
796 797 audit(f)
797 798 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
798 799 updated += 1
799 800
800 801 ms.commit()
801 802 progress(_updating, None, total=numupdates, unit=_files)
802 803
803 804 return updated, merged, removed, unresolved
804 805
805 806 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
806 807 acceptremote, followcopies):
807 808 "Calculate the actions needed to merge mctx into wctx using ancestors"
808 809
809 810 if len(ancestors) == 1: # default
810 811 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
811 812 branchmerge, force,
812 813 partial, acceptremote, followcopies)
813 814
814 815 else: # only when merge.preferancestor=* - experimentalish code
815 816 repo.ui.status(
816 817 _("note: merging %s and %s using bids from ancestors %s\n") %
817 818 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
818 819
819 820 # Call for bids
820 821 fbids = {} # mapping filename to list af action bids
821 822 for ancestor in ancestors:
822 823 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
823 824 actions = manifestmerge(repo, wctx, mctx, ancestor,
824 825 branchmerge, force,
825 826 partial, acceptremote, followcopies)
826 827 for a in sorted(actions, key=lambda a: (a[1], a)):
827 828 f, m, args, msg = a
828 829 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
829 830 if f in fbids:
830 831 fbids[f].append(a)
831 832 else:
832 833 fbids[f] = [a]
833 834
834 835 # Pick the best bid for each file
835 836 repo.ui.note(_('\nauction for merging merge bids\n'))
836 837 actions = []
837 838 for f, bidsl in sorted(fbids.items()):
838 839 # Consensus?
839 840 a0 = bidsl[0]
840 841 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
841 842 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
842 843 actions.append(a0)
843 844 continue
844 845 # Group bids by kind of action
845 846 bids = {}
846 847 for a in bidsl:
847 848 m = a[1]
848 849 if m in bids:
849 850 bids[m].append(a)
850 851 else:
851 852 bids[m] = [a]
852 853 # If keep is an option, just do it.
853 854 if "k" in bids:
854 855 repo.ui.note(" %s: picking 'keep' action\n" % f)
855 856 actions.append(bids["k"][0])
856 857 continue
857 858 # If all gets agree [how could they not?], just do it.
858 859 if "g" in bids:
859 860 ga0 = bids["g"][0]
860 861 if util.all(a == ga0 for a in bids["g"][1:]):
861 862 repo.ui.note(" %s: picking 'get' action\n" % f)
862 863 actions.append(ga0)
863 864 continue
864 865 # TODO: Consider other simple actions such as mode changes
865 866 # Handle inefficient democrazy.
866 867 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
867 868 for _f, m, args, msg in bidsl:
868 869 repo.ui.note(' %s -> %s\n' % (msg, m))
869 870 # Pick random action. TODO: Instead, prompt user when resolving
870 871 a0 = bidsl[0]
871 872 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
872 873 (f, a0[1]))
873 874 actions.append(a0)
874 875 continue
875 876 repo.ui.note(_('end of auction\n\n'))
876 877
877 878 # Filter out prompts.
878 879 newactions, prompts = [], []
879 880 for a in actions:
880 881 if a[1] in ("cd", "dc"):
881 882 prompts.append(a)
882 883 else:
883 884 newactions.append(a)
884 885 # Prompt and create actions. TODO: Move this towards resolve phase.
885 886 for f, m, args, msg in sorted(prompts):
886 887 if m == "cd":
887 888 if repo.ui.promptchoice(
888 889 _("local changed %s which remote deleted\n"
889 890 "use (c)hanged version or (d)elete?"
890 891 "$$ &Changed $$ &Delete") % f, 0):
891 892 newactions.append((f, "r", None, "prompt delete"))
892 893 else:
893 894 newactions.append((f, "a", None, "prompt keep"))
894 895 elif m == "dc":
895 896 flags, = args
896 897 if repo.ui.promptchoice(
897 898 _("remote changed %s which local deleted\n"
898 899 "use (c)hanged version or leave (d)eleted?"
899 900 "$$ &Changed $$ &Deleted") % f, 0) == 0:
900 901 newactions.append((f, "g", (flags,), "prompt recreating"))
901 902 else: assert False, m
902 903
903 904 if wctx.rev() is None:
904 905 newactions += _forgetremoved(wctx, mctx, branchmerge)
905 906
906 907 return newactions
907 908
908 909 def recordupdates(repo, actions, branchmerge):
909 910 "record merge actions to the dirstate"
910 911
911 912 for f, m, args, msg in actions:
912 913
913 914 # remove (must come first)
914 915 if m == "r": # remove
915 916 if branchmerge:
916 917 repo.dirstate.remove(f)
917 918 else:
918 919 repo.dirstate.drop(f)
919 920
920 921 # forget (must come first)
921 922 elif m == "f":
922 923 repo.dirstate.drop(f)
923 924
924 925 # re-add
925 926 elif m == "a":
926 927 if not branchmerge:
927 928 repo.dirstate.add(f)
928 929
929 930 # exec change
930 931 elif m == "e":
931 932 repo.dirstate.normallookup(f)
932 933
933 934 # keep
934 935 elif m == "k":
935 936 pass
936 937
937 938 # get
938 939 elif m == "g":
939 940 if branchmerge:
940 941 repo.dirstate.otherparent(f)
941 942 else:
942 943 repo.dirstate.normal(f)
943 944
944 945 # merge
945 946 elif m == "m":
946 947 f1, f2, fa, move, anc = args
947 948 if branchmerge:
948 949 # We've done a branch merge, mark this file as merged
949 950 # so that we properly record the merger later
950 951 repo.dirstate.merge(f)
951 952 if f1 != f2: # copy/rename
952 953 if move:
953 954 repo.dirstate.remove(f1)
954 955 if f1 != f:
955 956 repo.dirstate.copy(f1, f)
956 957 else:
957 958 repo.dirstate.copy(f2, f)
958 959 else:
959 960 # We've update-merged a locally modified file, so
960 961 # we set the dirstate to emulate a normal checkout
961 962 # of that file some time in the past. Thus our
962 963 # merge will appear as a normal local file
963 964 # modification.
964 965 if f2 == f: # file not locally copied/moved
965 966 repo.dirstate.normallookup(f)
966 967 if move:
967 968 repo.dirstate.drop(f1)
968 969
969 970 # directory rename, move local
970 971 elif m == "dm":
971 972 f0, flag = args
972 973 if f0 not in repo.dirstate:
973 974 # untracked file moved
974 975 continue
975 976 if branchmerge:
976 977 repo.dirstate.add(f)
977 978 repo.dirstate.remove(f0)
978 979 repo.dirstate.copy(f0, f)
979 980 else:
980 981 repo.dirstate.normal(f)
981 982 repo.dirstate.drop(f0)
982 983
983 984 # directory rename, get
984 985 elif m == "dg":
985 986 f0, flag = args
986 987 if branchmerge:
987 988 repo.dirstate.add(f)
988 989 repo.dirstate.copy(f0, f)
989 990 else:
990 991 repo.dirstate.normal(f)
991 992
992 993 def update(repo, node, branchmerge, force, partial, ancestor=None,
993 mergeancestor=False):
994 mergeancestor=False, labels=None):
994 995 """
995 996 Perform a merge between the working directory and the given node
996 997
997 998 node = the node to update to, or None if unspecified
998 999 branchmerge = whether to merge between branches
999 1000 force = whether to force branch merging or file overwriting
1000 1001 partial = a function to filter file lists (dirstate not updated)
1001 1002 mergeancestor = whether it is merging with an ancestor. If true,
1002 1003 we should accept the incoming changes for any prompts that occur.
1003 1004 If false, merging with an ancestor (fast-forward) is only allowed
1004 1005 between different named branches. This flag is used by rebase extension
1005 1006 as a temporary fix and should be avoided in general.
1006 1007
1007 1008 The table below shows all the behaviors of the update command
1008 1009 given the -c and -C or no options, whether the working directory
1009 1010 is dirty, whether a revision is specified, and the relationship of
1010 1011 the parent rev to the target rev (linear, on the same named
1011 1012 branch, or on another named branch).
1012 1013
1013 1014 This logic is tested by test-update-branches.t.
1014 1015
1015 1016 -c -C dirty rev | linear same cross
1016 1017 n n n n | ok (1) x
1017 1018 n n n y | ok ok ok
1018 1019 n n y n | merge (2) (2)
1019 1020 n n y y | merge (3) (3)
1020 1021 n y * * | --- discard ---
1021 1022 y n y * | --- (4) ---
1022 1023 y n n * | --- ok ---
1023 1024 y y * * | --- (5) ---
1024 1025
1025 1026 x = can't happen
1026 1027 * = don't-care
1027 1028 1 = abort: not a linear update (merge or update --check to force update)
1028 1029 2 = abort: uncommitted changes (commit and merge, or update --clean to
1029 1030 discard changes)
1030 1031 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1031 1032 4 = abort: uncommitted changes (checked in commands.py)
1032 1033 5 = incompatible options (checked in commands.py)
1033 1034
1034 1035 Return the same tuple as applyupdates().
1035 1036 """
1036 1037
1037 1038 onode = node
1038 1039 wlock = repo.wlock()
1039 1040 try:
1040 1041 wc = repo[None]
1041 1042 pl = wc.parents()
1042 1043 p1 = pl[0]
1043 1044 pas = [None]
1044 1045 if ancestor:
1045 1046 pas = [repo[ancestor]]
1046 1047
1047 1048 if node is None:
1048 1049 # Here is where we should consider bookmarks, divergent bookmarks,
1049 1050 # foreground changesets (successors), and tip of current branch;
1050 1051 # but currently we are only checking the branch tips.
1051 1052 try:
1052 1053 node = repo.branchtip(wc.branch())
1053 1054 except error.RepoLookupError:
1054 1055 if wc.branch() == "default": # no default branch!
1055 1056 node = repo.lookup("tip") # update to tip
1056 1057 else:
1057 1058 raise util.Abort(_("branch %s not found") % wc.branch())
1058 1059
1059 1060 if p1.obsolete() and not p1.children():
1060 1061 # allow updating to successors
1061 1062 successors = obsolete.successorssets(repo, p1.node())
1062 1063
1063 1064 # behavior of certain cases is as follows,
1064 1065 #
1065 1066 # divergent changesets: update to highest rev, similar to what
1066 1067 # is currently done when there are more than one head
1067 1068 # (i.e. 'tip')
1068 1069 #
1069 1070 # replaced changesets: same as divergent except we know there
1070 1071 # is no conflict
1071 1072 #
1072 1073 # pruned changeset: no update is done; though, we could
1073 1074 # consider updating to the first non-obsolete parent,
1074 1075 # similar to what is current done for 'hg prune'
1075 1076
1076 1077 if successors:
1077 1078 # flatten the list here handles both divergent (len > 1)
1078 1079 # and the usual case (len = 1)
1079 1080 successors = [n for sub in successors for n in sub]
1080 1081
1081 1082 # get the max revision for the given successors set,
1082 1083 # i.e. the 'tip' of a set
1083 1084 node = repo.revs("max(%ln)", successors)[0]
1084 1085 pas = [p1]
1085 1086
1086 1087 overwrite = force and not branchmerge
1087 1088
1088 1089 p2 = repo[node]
1089 1090 if pas[0] is None:
1090 1091 if repo.ui.config("merge", "preferancestor") == '*':
1091 1092 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1092 1093 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1093 1094 else:
1094 1095 pas = [p1.ancestor(p2, warn=True)]
1095 1096
1096 1097 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1097 1098
1098 1099 ### check phase
1099 1100 if not overwrite and len(pl) > 1:
1100 1101 raise util.Abort(_("outstanding uncommitted merges"))
1101 1102 if branchmerge:
1102 1103 if pas == [p2]:
1103 1104 raise util.Abort(_("merging with a working directory ancestor"
1104 1105 " has no effect"))
1105 1106 elif pas == [p1]:
1106 1107 if not mergeancestor and p1.branch() == p2.branch():
1107 1108 raise util.Abort(_("nothing to merge"),
1108 1109 hint=_("use 'hg update' "
1109 1110 "or check 'hg heads'"))
1110 1111 if not force and (wc.files() or wc.deleted()):
1111 1112 raise util.Abort(_("uncommitted changes"),
1112 1113 hint=_("use 'hg status' to list changes"))
1113 1114 for s in sorted(wc.substate):
1114 1115 if wc.sub(s).dirty():
1115 1116 raise util.Abort(_("uncommitted changes in "
1116 1117 "subrepository '%s'") % s)
1117 1118
1118 1119 elif not overwrite:
1119 1120 if p1 == p2: # no-op update
1120 1121 # call the hooks and exit early
1121 1122 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1122 1123 repo.hook('update', parent1=xp2, parent2='', error=0)
1123 1124 return 0, 0, 0, 0
1124 1125
1125 1126 if pas not in ([p1], [p2]): # nonlinear
1126 1127 dirty = wc.dirty(missing=True)
1127 1128 if dirty or onode is None:
1128 1129 # Branching is a bit strange to ensure we do the minimal
1129 1130 # amount of call to obsolete.background.
1130 1131 foreground = obsolete.foreground(repo, [p1.node()])
1131 1132 # note: the <node> variable contains a random identifier
1132 1133 if repo[node].node() in foreground:
1133 1134 pas = [p1] # allow updating to successors
1134 1135 elif dirty:
1135 1136 msg = _("uncommitted changes")
1136 1137 if onode is None:
1137 1138 hint = _("commit and merge, or update --clean to"
1138 1139 " discard changes")
1139 1140 else:
1140 1141 hint = _("commit or update --clean to discard"
1141 1142 " changes")
1142 1143 raise util.Abort(msg, hint=hint)
1143 1144 else: # node is none
1144 1145 msg = _("not a linear update")
1145 1146 hint = _("merge or update --check to force update")
1146 1147 raise util.Abort(msg, hint=hint)
1147 1148 else:
1148 1149 # Allow jumping branches if clean and specific rev given
1149 1150 pas = [p1]
1150 1151
1151 1152 followcopies = False
1152 1153 if overwrite:
1153 1154 pas = [wc]
1154 1155 elif pas == [p2]: # backwards
1155 1156 pas = [wc.p1()]
1156 1157 elif not branchmerge and not wc.dirty(missing=True):
1157 1158 pass
1158 1159 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1159 1160 followcopies = True
1160 1161
1161 1162 ### calculate phase
1162 1163 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1163 1164 partial, mergeancestor, followcopies)
1164 1165
1165 1166 ### apply phase
1166 1167 if not branchmerge: # just jump to the new rev
1167 1168 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1168 1169 if not partial:
1169 1170 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1170 1171 # note that we're in the middle of an update
1171 1172 repo.vfs.write('updatestate', p2.hex())
1172 1173
1173 stats = applyupdates(repo, actions, wc, p2, overwrite)
1174 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1174 1175
1175 1176 if not partial:
1176 1177 repo.setparents(fp1, fp2)
1177 1178 recordupdates(repo, actions, branchmerge)
1178 1179 # update completed, clear state
1179 1180 util.unlink(repo.join('updatestate'))
1180 1181
1181 1182 if not branchmerge:
1182 1183 repo.dirstate.setbranch(p2.branch())
1183 1184 finally:
1184 1185 wlock.release()
1185 1186
1186 1187 if not partial:
1187 1188 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1188 1189 return stats
General Comments 0
You need to be logged in to leave comments. Login now