##// END OF EJS Templates
merge: pass merge ancestor to calculateupdates as a list...
Mads Kiilerich -
r21081:ffd7b6ce default
parent child Browse files
Show More
@@ -1,1164 +1,1164
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 archival, merge, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22 import basestore
23 23
24 24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25 25
26 26 def installnormalfilesmatchfn(manifest):
27 27 '''overrides scmutil.match so that the matcher it returns will ignore all
28 28 largefiles'''
29 29 oldmatch = None # for the closure
30 30 def overridematch(ctx, pats=[], opts={}, globbed=False,
31 31 default='relpath'):
32 32 match = oldmatch(ctx, pats, opts, globbed, default)
33 33 m = copy.copy(match)
34 34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
35 35 manifest)
36 36 m._files = filter(notlfile, m._files)
37 37 m._fmap = set(m._files)
38 38 m._always = False
39 39 origmatchfn = m.matchfn
40 40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
41 41 return m
42 42 oldmatch = installmatchfn(overridematch)
43 43
44 44 def installmatchfn(f):
45 45 oldmatch = scmutil.match
46 46 setattr(f, 'oldmatch', oldmatch)
47 47 scmutil.match = f
48 48 return oldmatch
49 49
50 50 def restorematchfn():
51 51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 52 was called. no-op if scmutil.match is its original function.
53 53
54 54 Note that n calls to installnormalfilesmatchfn will require n calls to
55 55 restore matchfn to reverse'''
56 56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57 57
58 58 def addlargefiles(ui, repo, *pats, **opts):
59 59 large = opts.pop('large', None)
60 60 lfsize = lfutil.getminsize(
61 61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
62 62
63 63 lfmatcher = None
64 64 if lfutil.islfilesrepo(repo):
65 65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
66 66 if lfpats:
67 67 lfmatcher = match_.match(repo.root, '', list(lfpats))
68 68
69 69 lfnames = []
70 70 m = scmutil.match(repo[None], pats, opts)
71 71 m.bad = lambda x, y: None
72 72 wctx = repo[None]
73 73 for f in repo.walk(m):
74 74 exact = m.exact(f)
75 75 lfile = lfutil.standin(f) in wctx
76 76 nfile = f in wctx
77 77 exists = lfile or nfile
78 78
79 79 # Don't warn the user when they attempt to add a normal tracked file.
80 80 # The normal add code will do that for us.
81 81 if exact and exists:
82 82 if lfile:
83 83 ui.warn(_('%s already a largefile\n') % f)
84 84 continue
85 85
86 86 if (exact or not exists) and not lfutil.isstandin(f):
87 87 wfile = repo.wjoin(f)
88 88
89 89 # In case the file was removed previously, but not committed
90 90 # (issue3507)
91 91 if not os.path.exists(wfile):
92 92 continue
93 93
94 94 abovemin = (lfsize and
95 95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
96 96 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 97 lfnames.append(f)
98 98 if ui.verbose or not exact:
99 99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100 100
101 101 bad = []
102 102 standins = []
103 103
104 104 # Need to lock, otherwise there could be a race condition between
105 105 # when standins are created and added to the repo.
106 106 wlock = repo.wlock()
107 107 try:
108 108 if not opts.get('dry_run'):
109 109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 110 for f in lfnames:
111 111 standinname = lfutil.standin(f)
112 112 lfutil.writestandin(repo, standinname, hash='',
113 113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 114 standins.append(standinname)
115 115 if lfdirstate[f] == 'r':
116 116 lfdirstate.normallookup(f)
117 117 else:
118 118 lfdirstate.add(f)
119 119 lfdirstate.write()
120 120 bad += [lfutil.splitstandin(f)
121 121 for f in repo[None].add(standins)
122 122 if f in m.files()]
123 123 finally:
124 124 wlock.release()
125 125 return bad
126 126
127 127 def removelargefiles(ui, repo, *pats, **opts):
128 128 after = opts.get('after')
129 129 if not pats and not after:
130 130 raise util.Abort(_('no files specified'))
131 131 m = scmutil.match(repo[None], pats, opts)
132 132 try:
133 133 repo.lfstatus = True
134 134 s = repo.status(match=m, clean=True)
135 135 finally:
136 136 repo.lfstatus = False
137 137 manifest = repo[None].manifest()
138 138 modified, added, deleted, clean = [[f for f in list
139 139 if lfutil.standin(f) in manifest]
140 140 for list in [s[0], s[1], s[3], s[6]]]
141 141
142 142 def warn(files, msg):
143 143 for f in files:
144 144 ui.warn(msg % m.rel(f))
145 145 return int(len(files) > 0)
146 146
147 147 result = 0
148 148
149 149 if after:
150 150 remove, forget = deleted, []
151 151 result = warn(modified + added + clean,
152 152 _('not removing %s: file still exists\n'))
153 153 else:
154 154 remove, forget = deleted + clean, []
155 155 result = warn(modified, _('not removing %s: file is modified (use -f'
156 156 ' to force removal)\n'))
157 157 result = warn(added, _('not removing %s: file has been marked for add'
158 158 ' (use forget to undo)\n')) or result
159 159
160 160 for f in sorted(remove + forget):
161 161 if ui.verbose or not m.exact(f):
162 162 ui.status(_('removing %s\n') % m.rel(f))
163 163
164 164 # Need to lock because standin files are deleted then removed from the
165 165 # repository and we could race in-between.
166 166 wlock = repo.wlock()
167 167 try:
168 168 lfdirstate = lfutil.openlfdirstate(ui, repo)
169 169 for f in remove:
170 170 if not after:
171 171 # If this is being called by addremove, notify the user that we
172 172 # are removing the file.
173 173 if getattr(repo, "_isaddremove", False):
174 174 ui.status(_('removing %s\n') % f)
175 175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
176 176 lfdirstate.remove(f)
177 177 lfdirstate.write()
178 178 forget = [lfutil.standin(f) for f in forget]
179 179 remove = [lfutil.standin(f) for f in remove]
180 180 repo[None].forget(forget)
181 181 # If this is being called by addremove, let the original addremove
182 182 # function handle this.
183 183 if not getattr(repo, "_isaddremove", False):
184 184 for f in remove:
185 185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
186 186 repo[None].forget(remove)
187 187 finally:
188 188 wlock.release()
189 189
190 190 return result
191 191
192 192 # For overriding mercurial.hgweb.webcommands so that largefiles will
193 193 # appear at their right place in the manifests.
194 194 def decodepath(orig, path):
195 195 return lfutil.splitstandin(path) or path
196 196
197 197 # -- Wrappers: modify existing commands --------------------------------
198 198
199 199 # Add works by going through the files that the user wanted to add and
200 200 # checking if they should be added as largefiles. Then it makes a new
201 201 # matcher which matches only the normal files and runs the original
202 202 # version of add.
203 203 def overrideadd(orig, ui, repo, *pats, **opts):
204 204 normal = opts.pop('normal')
205 205 if normal:
206 206 if opts.get('large'):
207 207 raise util.Abort(_('--normal cannot be used with --large'))
208 208 return orig(ui, repo, *pats, **opts)
209 209 bad = addlargefiles(ui, repo, *pats, **opts)
210 210 installnormalfilesmatchfn(repo[None].manifest())
211 211 result = orig(ui, repo, *pats, **opts)
212 212 restorematchfn()
213 213
214 214 return (result == 1 or bad) and 1 or 0
215 215
216 216 def overrideremove(orig, ui, repo, *pats, **opts):
217 217 installnormalfilesmatchfn(repo[None].manifest())
218 218 result = orig(ui, repo, *pats, **opts)
219 219 restorematchfn()
220 220 return removelargefiles(ui, repo, *pats, **opts) or result
221 221
222 222 def overridestatusfn(orig, repo, rev2, **opts):
223 223 try:
224 224 repo._repo.lfstatus = True
225 225 return orig(repo, rev2, **opts)
226 226 finally:
227 227 repo._repo.lfstatus = False
228 228
229 229 def overridestatus(orig, ui, repo, *pats, **opts):
230 230 try:
231 231 repo.lfstatus = True
232 232 return orig(ui, repo, *pats, **opts)
233 233 finally:
234 234 repo.lfstatus = False
235 235
236 236 def overridedirty(orig, repo, ignoreupdate=False):
237 237 try:
238 238 repo._repo.lfstatus = True
239 239 return orig(repo, ignoreupdate)
240 240 finally:
241 241 repo._repo.lfstatus = False
242 242
243 243 def overridelog(orig, ui, repo, *pats, **opts):
244 244 def overridematch(ctx, pats=[], opts={}, globbed=False,
245 245 default='relpath'):
246 246 """Matcher that merges root directory with .hglf, suitable for log.
247 247 It is still possible to match .hglf directly.
248 248 For any listed files run log on the standin too.
249 249 matchfn tries both the given filename and with .hglf stripped.
250 250 """
251 251 match = oldmatch(ctx, pats, opts, globbed, default)
252 252 m = copy.copy(match)
253 253 for i in range(0, len(m._files)):
254 254 standin = lfutil.standin(m._files[i])
255 255 if standin in repo[ctx.node()]:
256 256 m._files[i] = standin
257 257 m._fmap = set(m._files)
258 258 m._always = False
259 259 origmatchfn = m.matchfn
260 260 def lfmatchfn(f):
261 261 lf = lfutil.splitstandin(f)
262 262 if lf is not None and origmatchfn(lf):
263 263 return True
264 264 r = origmatchfn(f)
265 265 return r
266 266 m.matchfn = lfmatchfn
267 267 return m
268 268 oldmatch = installmatchfn(overridematch)
269 269 try:
270 270 repo.lfstatus = True
271 271 return orig(ui, repo, *pats, **opts)
272 272 finally:
273 273 repo.lfstatus = False
274 274 restorematchfn()
275 275
276 276 def overrideverify(orig, ui, repo, *pats, **opts):
277 277 large = opts.pop('large', False)
278 278 all = opts.pop('lfa', False)
279 279 contents = opts.pop('lfc', False)
280 280
281 281 result = orig(ui, repo, *pats, **opts)
282 282 if large or all or contents:
283 283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
284 284 return result
285 285
286 286 def overridedebugstate(orig, ui, repo, *pats, **opts):
287 287 large = opts.pop('large', False)
288 288 if large:
289 289 lfcommands.debugdirstate(ui, repo)
290 290 else:
291 291 orig(ui, repo, *pats, **opts)
292 292
293 293 # Override needs to refresh standins so that update's normal merge
294 294 # will go through properly. Then the other update hook (overriding repo.update)
295 295 # will get the new files. Filemerge is also overridden so that the merge
296 296 # will merge standins correctly.
297 297 def overrideupdate(orig, ui, repo, *pats, **opts):
298 298 lfdirstate = lfutil.openlfdirstate(ui, repo)
299 299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
300 300 False, False)
301 301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
302 302
303 303 # Need to lock between the standins getting updated and their
304 304 # largefiles getting updated
305 305 wlock = repo.wlock()
306 306 try:
307 307 if opts['check']:
308 308 mod = len(modified) > 0
309 309 for lfile in unsure:
310 310 standin = lfutil.standin(lfile)
311 311 if repo['.'][standin].data().strip() != \
312 312 lfutil.hashfile(repo.wjoin(lfile)):
313 313 mod = True
314 314 else:
315 315 lfdirstate.normal(lfile)
316 316 lfdirstate.write()
317 317 if mod:
318 318 raise util.Abort(_('uncommitted changes'))
319 319 # XXX handle removed differently
320 320 if not opts['clean']:
321 321 for lfile in unsure + modified + added:
322 322 lfutil.updatestandin(repo, lfutil.standin(lfile))
323 323 finally:
324 324 wlock.release()
325 325 return orig(ui, repo, *pats, **opts)
326 326
327 327 # Before starting the manifest merge, merge.updates will call
328 328 # _checkunknown to check if there are any files in the merged-in
329 329 # changeset that collide with unknown files in the working copy.
330 330 #
331 331 # The largefiles are seen as unknown, so this prevents us from merging
332 332 # in a file 'foo' if we already have a largefile with the same name.
333 333 #
334 334 # The overridden function filters the unknown files by removing any
335 335 # largefiles. This makes the merge proceed and we can then handle this
336 336 # case further in the overridden manifestmerge function below.
337 337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
338 338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
339 339 return False
340 340 return origfn(repo, wctx, mctx, f)
341 341
342 342 # The manifest merge handles conflicts on the manifest level. We want
343 343 # to handle changes in largefile-ness of files at this level too.
344 344 #
345 345 # The strategy is to run the original manifestmerge and then process
346 346 # the action list it outputs. There are two cases we need to deal with:
347 347 #
348 348 # 1. Normal file in p1, largefile in p2. Here the largefile is
349 349 # detected via its standin file, which will enter the working copy
350 350 # with a "get" action. It is not "merge" since the standin is all
351 351 # Mercurial is concerned with at this level -- the link to the
352 352 # existing normal file is not relevant here.
353 353 #
354 354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
355 355 # since the largefile will be present in the working copy and
356 356 # different from the normal file in p2. Mercurial therefore
357 357 # triggers a merge action.
358 358 #
359 359 # In both cases, we prompt the user and emit new actions to either
360 360 # remove the standin (if the normal file was kept) or to remove the
361 361 # normal file and get the standin (if the largefile was kept). The
362 362 # default prompt answer is to use the largefile version since it was
363 363 # presumably changed on purpose.
364 364 #
365 365 # Finally, the merge.applyupdates function will then take care of
366 366 # writing the files into the working copy and lfcommands.updatelfiles
367 367 # will update the largefiles.
368 def overridecalculateupdates(origfn, repo, p1, p2, pa, branchmerge, force,
368 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
369 369 partial, acceptremote, followcopies):
370 370 overwrite = force and not branchmerge
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
371 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
372 372 acceptremote, followcopies)
373 373
374 374 if overwrite:
375 375 return actions
376 376
377 377 removes = set(a[0] for a in actions if a[1] == 'r')
378 378 processed = []
379 379
380 380 for action in actions:
381 381 f, m, args, msg = action
382 382
383 383 splitstandin = f and lfutil.splitstandin(f)
384 384 if (m == "g" and splitstandin is not None and
385 385 splitstandin in p1 and splitstandin not in removes):
386 386 # Case 1: normal file in the working copy, largefile in
387 387 # the second parent
388 388 lfile = splitstandin
389 389 standin = f
390 390 msg = _('remote turned local normal file %s into a largefile\n'
391 391 'use (l)argefile or keep (n)ormal file?'
392 392 '$$ &Largefile $$ &Normal file') % lfile
393 393 if repo.ui.promptchoice(msg, 0) == 0:
394 394 processed.append((lfile, "r", None, msg))
395 395 processed.append((standin, "g", (p2.flags(standin),), msg))
396 396 else:
397 397 processed.append((standin, "r", None, msg))
398 398 elif (m == "g" and
399 399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
400 400 # Case 2: largefile in the working copy, normal file in
401 401 # the second parent
402 402 standin = lfutil.standin(f)
403 403 lfile = f
404 404 msg = _('remote turned local largefile %s into a normal file\n'
405 405 'keep (l)argefile or use (n)ormal file?'
406 406 '$$ &Largefile $$ &Normal file') % lfile
407 407 if repo.ui.promptchoice(msg, 0) == 0:
408 408 processed.append((lfile, "r", None, msg))
409 409 else:
410 410 processed.append((standin, "r", None, msg))
411 411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
412 412 else:
413 413 processed.append(action)
414 414
415 415 return processed
416 416
417 417 # Override filemerge to prompt the user about how they wish to merge
418 418 # largefiles. This will handle identical edits without prompting the user.
419 419 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
420 420 if not lfutil.isstandin(orig):
421 421 return origfn(repo, mynode, orig, fcd, fco, fca)
422 422
423 423 ahash = fca.data().strip().lower()
424 424 dhash = fcd.data().strip().lower()
425 425 ohash = fco.data().strip().lower()
426 426 if (ohash != ahash and
427 427 ohash != dhash and
428 428 (dhash == ahash or
429 429 repo.ui.promptchoice(
430 430 _('largefile %s has a merge conflict\nancestor was %s\n'
431 431 'keep (l)ocal %s or\ntake (o)ther %s?'
432 432 '$$ &Local $$ &Other') %
433 433 (lfutil.splitstandin(orig), ahash, dhash, ohash),
434 434 0) == 1)):
435 435 repo.wwrite(fcd.path(), fco.data(), fco.flags())
436 436 return 0
437 437
438 438 # Copy first changes the matchers to match standins instead of
439 439 # largefiles. Then it overrides util.copyfile in that function it
440 440 # checks if the destination largefile already exists. It also keeps a
441 441 # list of copied files so that the largefiles can be copied and the
442 442 # dirstate updated.
443 443 def overridecopy(orig, ui, repo, pats, opts, rename=False):
444 444 # doesn't remove largefile on rename
445 445 if len(pats) < 2:
446 446 # this isn't legal, let the original function deal with it
447 447 return orig(ui, repo, pats, opts, rename)
448 448
449 449 def makestandin(relpath):
450 450 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
451 451 return os.path.join(repo.wjoin(lfutil.standin(path)))
452 452
453 453 fullpats = scmutil.expandpats(pats)
454 454 dest = fullpats[-1]
455 455
456 456 if os.path.isdir(dest):
457 457 if not os.path.isdir(makestandin(dest)):
458 458 os.makedirs(makestandin(dest))
459 459 # This could copy both lfiles and normal files in one command,
460 460 # but we don't want to do that. First replace their matcher to
461 461 # only match normal files and run it, then replace it to just
462 462 # match largefiles and run it again.
463 463 nonormalfiles = False
464 464 nolfiles = False
465 465 try:
466 466 try:
467 467 installnormalfilesmatchfn(repo[None].manifest())
468 468 result = orig(ui, repo, pats, opts, rename)
469 469 except util.Abort, e:
470 470 if str(e) != _('no files to copy'):
471 471 raise e
472 472 else:
473 473 nonormalfiles = True
474 474 result = 0
475 475 finally:
476 476 restorematchfn()
477 477
478 478 # The first rename can cause our current working directory to be removed.
479 479 # In that case there is nothing left to copy/rename so just quit.
480 480 try:
481 481 repo.getcwd()
482 482 except OSError:
483 483 return result
484 484
485 485 try:
486 486 try:
487 487 # When we call orig below it creates the standins but we don't add
488 488 # them to the dir state until later so lock during that time.
489 489 wlock = repo.wlock()
490 490
491 491 manifest = repo[None].manifest()
492 492 oldmatch = None # for the closure
493 493 def overridematch(ctx, pats=[], opts={}, globbed=False,
494 494 default='relpath'):
495 495 newpats = []
496 496 # The patterns were previously mangled to add the standin
497 497 # directory; we need to remove that now
498 498 for pat in pats:
499 499 if match_.patkind(pat) is None and lfutil.shortname in pat:
500 500 newpats.append(pat.replace(lfutil.shortname, ''))
501 501 else:
502 502 newpats.append(pat)
503 503 match = oldmatch(ctx, newpats, opts, globbed, default)
504 504 m = copy.copy(match)
505 505 lfile = lambda f: lfutil.standin(f) in manifest
506 506 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
507 507 m._fmap = set(m._files)
508 508 m._always = False
509 509 origmatchfn = m.matchfn
510 510 m.matchfn = lambda f: (lfutil.isstandin(f) and
511 511 (f in manifest) and
512 512 origmatchfn(lfutil.splitstandin(f)) or
513 513 None)
514 514 return m
515 515 oldmatch = installmatchfn(overridematch)
516 516 listpats = []
517 517 for pat in pats:
518 518 if match_.patkind(pat) is not None:
519 519 listpats.append(pat)
520 520 else:
521 521 listpats.append(makestandin(pat))
522 522
523 523 try:
524 524 origcopyfile = util.copyfile
525 525 copiedfiles = []
526 526 def overridecopyfile(src, dest):
527 527 if (lfutil.shortname in src and
528 528 dest.startswith(repo.wjoin(lfutil.shortname))):
529 529 destlfile = dest.replace(lfutil.shortname, '')
530 530 if not opts['force'] and os.path.exists(destlfile):
531 531 raise IOError('',
532 532 _('destination largefile already exists'))
533 533 copiedfiles.append((src, dest))
534 534 origcopyfile(src, dest)
535 535
536 536 util.copyfile = overridecopyfile
537 537 result += orig(ui, repo, listpats, opts, rename)
538 538 finally:
539 539 util.copyfile = origcopyfile
540 540
541 541 lfdirstate = lfutil.openlfdirstate(ui, repo)
542 542 for (src, dest) in copiedfiles:
543 543 if (lfutil.shortname in src and
544 544 dest.startswith(repo.wjoin(lfutil.shortname))):
545 545 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
546 546 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
547 547 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
548 548 if not os.path.isdir(destlfiledir):
549 549 os.makedirs(destlfiledir)
550 550 if rename:
551 551 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
552 552 lfdirstate.remove(srclfile)
553 553 else:
554 554 util.copyfile(repo.wjoin(srclfile),
555 555 repo.wjoin(destlfile))
556 556
557 557 lfdirstate.add(destlfile)
558 558 lfdirstate.write()
559 559 except util.Abort, e:
560 560 if str(e) != _('no files to copy'):
561 561 raise e
562 562 else:
563 563 nolfiles = True
564 564 finally:
565 565 restorematchfn()
566 566 wlock.release()
567 567
568 568 if nolfiles and nonormalfiles:
569 569 raise util.Abort(_('no files to copy'))
570 570
571 571 return result
572 572
573 573 # When the user calls revert, we have to be careful to not revert any
574 574 # changes to other largefiles accidentally. This means we have to keep
575 575 # track of the largefiles that are being reverted so we only pull down
576 576 # the necessary largefiles.
577 577 #
578 578 # Standins are only updated (to match the hash of largefiles) before
579 579 # commits. Update the standins then run the original revert, changing
580 580 # the matcher to hit standins instead of largefiles. Based on the
581 581 # resulting standins update the largefiles. Then return the standins
582 582 # to their proper state
583 583 def overriderevert(orig, ui, repo, *pats, **opts):
584 584 # Because we put the standins in a bad state (by updating them)
585 585 # and then return them to a correct state we need to lock to
586 586 # prevent others from changing them in their incorrect state.
587 587 wlock = repo.wlock()
588 588 try:
589 589 lfdirstate = lfutil.openlfdirstate(ui, repo)
590 590 (modified, added, removed, missing, unknown, ignored, clean) = \
591 591 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
592 592 lfdirstate.write()
593 593 for lfile in modified:
594 594 lfutil.updatestandin(repo, lfutil.standin(lfile))
595 595 for lfile in missing:
596 596 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
597 597 os.unlink(repo.wjoin(lfutil.standin(lfile)))
598 598
599 599 try:
600 600 ctx = scmutil.revsingle(repo, opts.get('rev'))
601 601 oldmatch = None # for the closure
602 602 def overridematch(ctx, pats=[], opts={}, globbed=False,
603 603 default='relpath'):
604 604 match = oldmatch(ctx, pats, opts, globbed, default)
605 605 m = copy.copy(match)
606 606 def tostandin(f):
607 607 if lfutil.standin(f) in ctx:
608 608 return lfutil.standin(f)
609 609 elif lfutil.standin(f) in repo[None]:
610 610 return None
611 611 return f
612 612 m._files = [tostandin(f) for f in m._files]
613 613 m._files = [f for f in m._files if f is not None]
614 614 m._fmap = set(m._files)
615 615 m._always = False
616 616 origmatchfn = m.matchfn
617 617 def matchfn(f):
618 618 if lfutil.isstandin(f):
619 619 # We need to keep track of what largefiles are being
620 620 # matched so we know which ones to update later --
621 621 # otherwise we accidentally revert changes to other
622 622 # largefiles. This is repo-specific, so duckpunch the
623 623 # repo object to keep the list of largefiles for us
624 624 # later.
625 625 if origmatchfn(lfutil.splitstandin(f)) and \
626 626 (f in repo[None] or f in ctx):
627 627 lfileslist = getattr(repo, '_lfilestoupdate', [])
628 628 lfileslist.append(lfutil.splitstandin(f))
629 629 repo._lfilestoupdate = lfileslist
630 630 return True
631 631 else:
632 632 return False
633 633 return origmatchfn(f)
634 634 m.matchfn = matchfn
635 635 return m
636 636 oldmatch = installmatchfn(overridematch)
637 637 scmutil.match
638 638 matches = overridematch(repo[None], pats, opts)
639 639 orig(ui, repo, *pats, **opts)
640 640 finally:
641 641 restorematchfn()
642 642 lfileslist = getattr(repo, '_lfilestoupdate', [])
643 643 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
644 644 printmessage=False)
645 645
646 646 # empty out the largefiles list so we start fresh next time
647 647 repo._lfilestoupdate = []
648 648 for lfile in modified:
649 649 if lfile in lfileslist:
650 650 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
651 651 in repo['.']:
652 652 lfutil.writestandin(repo, lfutil.standin(lfile),
653 653 repo['.'][lfile].data().strip(),
654 654 'x' in repo['.'][lfile].flags())
655 655 lfdirstate = lfutil.openlfdirstate(ui, repo)
656 656 for lfile in added:
657 657 standin = lfutil.standin(lfile)
658 658 if standin not in ctx and (standin in matches or opts.get('all')):
659 659 if lfile in lfdirstate:
660 660 lfdirstate.drop(lfile)
661 661 util.unlinkpath(repo.wjoin(standin))
662 662 lfdirstate.write()
663 663 finally:
664 664 wlock.release()
665 665
666 666 def hgupdaterepo(orig, repo, node, overwrite):
667 667 if not overwrite:
668 668 # Only call updatelfiles on the standins that have changed to save time
669 669 oldstandins = lfutil.getstandinsstate(repo)
670 670
671 671 result = orig(repo, node, overwrite)
672 672
673 673 filelist = None
674 674 if not overwrite:
675 675 newstandins = lfutil.getstandinsstate(repo)
676 676 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
677 677 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
678 678 return result
679 679
680 680 def hgmerge(orig, repo, node, force=None, remind=True):
681 681 result = orig(repo, node, force, remind)
682 682 lfcommands.updatelfiles(repo.ui, repo)
683 683 return result
684 684
685 685 # When we rebase a repository with remotely changed largefiles, we need to
686 686 # take some extra care so that the largefiles are correctly updated in the
687 687 # working copy
688 688 def overridepull(orig, ui, repo, source=None, **opts):
689 689 revsprepull = len(repo)
690 690 if not source:
691 691 source = 'default'
692 692 repo.lfpullsource = source
693 693 if opts.get('rebase', False):
694 694 repo._isrebasing = True
695 695 try:
696 696 if opts.get('update'):
697 697 del opts['update']
698 698 ui.debug('--update and --rebase are not compatible, ignoring '
699 699 'the update flag\n')
700 700 del opts['rebase']
701 701 origpostincoming = commands.postincoming
702 702 def _dummy(*args, **kwargs):
703 703 pass
704 704 commands.postincoming = _dummy
705 705 try:
706 706 result = commands.pull(ui, repo, source, **opts)
707 707 finally:
708 708 commands.postincoming = origpostincoming
709 709 revspostpull = len(repo)
710 710 if revspostpull > revsprepull:
711 711 result = result or rebase.rebase(ui, repo)
712 712 finally:
713 713 repo._isrebasing = False
714 714 else:
715 715 result = orig(ui, repo, source, **opts)
716 716 revspostpull = len(repo)
717 717 lfrevs = opts.get('lfrev', [])
718 718 if opts.get('all_largefiles'):
719 719 lfrevs.append('pulled()')
720 720 if lfrevs and revspostpull > revsprepull:
721 721 numcached = 0
722 722 repo.firstpulled = revsprepull # for pulled() revset expression
723 723 try:
724 724 for rev in scmutil.revrange(repo, lfrevs):
725 725 ui.note(_('pulling largefiles for revision %s\n') % rev)
726 726 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
727 727 numcached += len(cached)
728 728 finally:
729 729 del repo.firstpulled
730 730 ui.status(_("%d largefiles cached\n") % numcached)
731 731 return result
732 732
733 733 def pulledrevsetsymbol(repo, subset, x):
734 734 """``pulled()``
735 735 Changesets that just has been pulled.
736 736
737 737 Only available with largefiles from pull --lfrev expressions.
738 738
739 739 .. container:: verbose
740 740
741 741 Some examples:
742 742
743 743 - pull largefiles for all new changesets::
744 744
745 745 hg pull -lfrev "pulled()"
746 746
747 747 - pull largefiles for all new branch heads::
748 748
749 749 hg pull -lfrev "head(pulled()) and not closed()"
750 750
751 751 """
752 752
753 753 try:
754 754 firstpulled = repo.firstpulled
755 755 except AttributeError:
756 756 raise util.Abort(_("pulled() only available in --lfrev"))
757 757 return revset.baseset([r for r in subset if r >= firstpulled])
758 758
759 759 def overrideclone(orig, ui, source, dest=None, **opts):
760 760 d = dest
761 761 if d is None:
762 762 d = hg.defaultdest(source)
763 763 if opts.get('all_largefiles') and not hg.islocal(d):
764 764 raise util.Abort(_(
765 765 '--all-largefiles is incompatible with non-local destination %s' %
766 766 d))
767 767
768 768 return orig(ui, source, dest, **opts)
769 769
770 770 def hgclone(orig, ui, opts, *args, **kwargs):
771 771 result = orig(ui, opts, *args, **kwargs)
772 772
773 773 if result is not None:
774 774 sourcerepo, destrepo = result
775 775 repo = destrepo.local()
776 776
777 777 # Caching is implicitly limited to 'rev' option, since the dest repo was
778 778 # truncated at that point. The user may expect a download count with
779 779 # this option, so attempt whether or not this is a largefile repo.
780 780 if opts.get('all_largefiles'):
781 781 success, missing = lfcommands.downloadlfiles(ui, repo, None)
782 782
783 783 if missing != 0:
784 784 return None
785 785
786 786 return result
787 787
788 788 def overriderebase(orig, ui, repo, **opts):
789 789 repo._isrebasing = True
790 790 try:
791 791 return orig(ui, repo, **opts)
792 792 finally:
793 793 repo._isrebasing = False
794 794
795 795 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
796 796 prefix=None, mtime=None, subrepos=None):
797 797 # No need to lock because we are only reading history and
798 798 # largefile caches, neither of which are modified.
799 799 lfcommands.cachelfiles(repo.ui, repo, node)
800 800
801 801 if kind not in archival.archivers:
802 802 raise util.Abort(_("unknown archive type '%s'") % kind)
803 803
804 804 ctx = repo[node]
805 805
806 806 if kind == 'files':
807 807 if prefix:
808 808 raise util.Abort(
809 809 _('cannot give prefix when archiving to files'))
810 810 else:
811 811 prefix = archival.tidyprefix(dest, kind, prefix)
812 812
813 813 def write(name, mode, islink, getdata):
814 814 if matchfn and not matchfn(name):
815 815 return
816 816 data = getdata()
817 817 if decode:
818 818 data = repo.wwritedata(name, data)
819 819 archiver.addfile(prefix + name, mode, islink, data)
820 820
821 821 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
822 822
823 823 if repo.ui.configbool("ui", "archivemeta", True):
824 824 def metadata():
825 825 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
826 826 hex(repo.changelog.node(0)), hex(node), ctx.branch())
827 827
828 828 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
829 829 if repo.tagtype(t) == 'global')
830 830 if not tags:
831 831 repo.ui.pushbuffer()
832 832 opts = {'template': '{latesttag}\n{latesttagdistance}',
833 833 'style': '', 'patch': None, 'git': None}
834 834 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
835 835 ltags, dist = repo.ui.popbuffer().split('\n')
836 836 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
837 837 tags += 'latesttagdistance: %s\n' % dist
838 838
839 839 return base + tags
840 840
841 841 write('.hg_archival.txt', 0644, False, metadata)
842 842
843 843 for f in ctx:
844 844 ff = ctx.flags(f)
845 845 getdata = ctx[f].data
846 846 if lfutil.isstandin(f):
847 847 path = lfutil.findfile(repo, getdata().strip())
848 848 if path is None:
849 849 raise util.Abort(
850 850 _('largefile %s not found in repo store or system cache')
851 851 % lfutil.splitstandin(f))
852 852 f = lfutil.splitstandin(f)
853 853
854 854 def getdatafn():
855 855 fd = None
856 856 try:
857 857 fd = open(path, 'rb')
858 858 return fd.read()
859 859 finally:
860 860 if fd:
861 861 fd.close()
862 862
863 863 getdata = getdatafn
864 864 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
865 865
866 866 if subrepos:
867 867 for subpath in sorted(ctx.substate):
868 868 sub = ctx.sub(subpath)
869 869 submatch = match_.narrowmatcher(subpath, matchfn)
870 870 sub.archive(repo.ui, archiver, prefix, submatch)
871 871
872 872 archiver.done()
873 873
874 874 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
875 875 repo._get(repo._state + ('hg',))
876 876 rev = repo._state[1]
877 877 ctx = repo._repo[rev]
878 878
879 879 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
880 880
881 881 def write(name, mode, islink, getdata):
882 882 # At this point, the standin has been replaced with the largefile name,
883 883 # so the normal matcher works here without the lfutil variants.
884 884 if match and not match(f):
885 885 return
886 886 data = getdata()
887 887
888 888 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
889 889
890 890 for f in ctx:
891 891 ff = ctx.flags(f)
892 892 getdata = ctx[f].data
893 893 if lfutil.isstandin(f):
894 894 path = lfutil.findfile(repo._repo, getdata().strip())
895 895 if path is None:
896 896 raise util.Abort(
897 897 _('largefile %s not found in repo store or system cache')
898 898 % lfutil.splitstandin(f))
899 899 f = lfutil.splitstandin(f)
900 900
901 901 def getdatafn():
902 902 fd = None
903 903 try:
904 904 fd = open(os.path.join(prefix, path), 'rb')
905 905 return fd.read()
906 906 finally:
907 907 if fd:
908 908 fd.close()
909 909
910 910 getdata = getdatafn
911 911
912 912 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
913 913
914 914 for subpath in sorted(ctx.substate):
915 915 sub = ctx.sub(subpath)
916 916 submatch = match_.narrowmatcher(subpath, match)
917 917 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
918 918 submatch)
919 919
920 920 # If a largefile is modified, the change is not reflected in its
921 921 # standin until a commit. cmdutil.bailifchanged() raises an exception
922 922 # if the repo has uncommitted changes. Wrap it to also check if
923 923 # largefiles were changed. This is used by bisect and backout.
924 924 def overridebailifchanged(orig, repo):
925 925 orig(repo)
926 926 repo.lfstatus = True
927 927 modified, added, removed, deleted = repo.status()[:4]
928 928 repo.lfstatus = False
929 929 if modified or added or removed or deleted:
930 930 raise util.Abort(_('uncommitted changes'))
931 931
932 932 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
933 933 def overridefetch(orig, ui, repo, *pats, **opts):
934 934 repo.lfstatus = True
935 935 modified, added, removed, deleted = repo.status()[:4]
936 936 repo.lfstatus = False
937 937 if modified or added or removed or deleted:
938 938 raise util.Abort(_('uncommitted changes'))
939 939 return orig(ui, repo, *pats, **opts)
940 940
941 941 def overrideforget(orig, ui, repo, *pats, **opts):
942 942 installnormalfilesmatchfn(repo[None].manifest())
943 943 result = orig(ui, repo, *pats, **opts)
944 944 restorematchfn()
945 945 m = scmutil.match(repo[None], pats, opts)
946 946
947 947 try:
948 948 repo.lfstatus = True
949 949 s = repo.status(match=m, clean=True)
950 950 finally:
951 951 repo.lfstatus = False
952 952 forget = sorted(s[0] + s[1] + s[3] + s[6])
953 953 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
954 954
955 955 for f in forget:
956 956 if lfutil.standin(f) not in repo.dirstate and not \
957 957 os.path.isdir(m.rel(lfutil.standin(f))):
958 958 ui.warn(_('not removing %s: file is already untracked\n')
959 959 % m.rel(f))
960 960 result = 1
961 961
962 962 for f in forget:
963 963 if ui.verbose or not m.exact(f):
964 964 ui.status(_('removing %s\n') % m.rel(f))
965 965
966 966 # Need to lock because standin files are deleted then removed from the
967 967 # repository and we could race in-between.
968 968 wlock = repo.wlock()
969 969 try:
970 970 lfdirstate = lfutil.openlfdirstate(ui, repo)
971 971 for f in forget:
972 972 if lfdirstate[f] == 'a':
973 973 lfdirstate.drop(f)
974 974 else:
975 975 lfdirstate.remove(f)
976 976 lfdirstate.write()
977 977 standins = [lfutil.standin(f) for f in forget]
978 978 for f in standins:
979 979 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
980 980 repo[None].forget(standins)
981 981 finally:
982 982 wlock.release()
983 983
984 984 return result
985 985
986 986 def outgoinghook(ui, repo, other, opts, missing):
987 987 if opts.pop('large', None):
988 988 toupload = set()
989 989 lfutil.getlfilestoupload(repo, missing,
990 990 lambda fn, lfhash: toupload.add(fn))
991 991 if not toupload:
992 992 ui.status(_('largefiles: no files to upload\n'))
993 993 else:
994 994 ui.status(_('largefiles to upload:\n'))
995 995 for file in sorted(toupload):
996 996 ui.status(lfutil.splitstandin(file) + '\n')
997 997 ui.status('\n')
998 998
999 999 def summaryremotehook(ui, repo, opts, changes):
1000 1000 largeopt = opts.get('large', False)
1001 1001 if changes is None:
1002 1002 if largeopt:
1003 1003 return (False, True) # only outgoing check is needed
1004 1004 else:
1005 1005 return (False, False)
1006 1006 elif largeopt:
1007 1007 url, branch, peer, outgoing = changes[1]
1008 1008 if peer is None:
1009 1009 # i18n: column positioning for "hg summary"
1010 1010 ui.status(_('largefiles: (no remote repo)\n'))
1011 1011 return
1012 1012
1013 1013 toupload = set()
1014 1014 lfutil.getlfilestoupload(repo, outgoing.missing,
1015 1015 lambda fn, lfhash: toupload.add(fn))
1016 1016 if not toupload:
1017 1017 # i18n: column positioning for "hg summary"
1018 1018 ui.status(_('largefiles: (no files to upload)\n'))
1019 1019 else:
1020 1020 # i18n: column positioning for "hg summary"
1021 1021 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1022 1022
1023 1023 def overridesummary(orig, ui, repo, *pats, **opts):
1024 1024 try:
1025 1025 repo.lfstatus = True
1026 1026 orig(ui, repo, *pats, **opts)
1027 1027 finally:
1028 1028 repo.lfstatus = False
1029 1029
1030 1030 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1031 1031 similarity=None):
1032 1032 if not lfutil.islfilesrepo(repo):
1033 1033 return orig(repo, pats, opts, dry_run, similarity)
1034 1034 # Get the list of missing largefiles so we can remove them
1035 1035 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1036 1036 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1037 1037 False, False)
1038 1038 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1039 1039
1040 1040 # Call into the normal remove code, but the removing of the standin, we want
1041 1041 # to have handled by original addremove. Monkey patching here makes sure
1042 1042 # we don't remove the standin in the largefiles code, preventing a very
1043 1043 # confused state later.
1044 1044 if missing:
1045 1045 m = [repo.wjoin(f) for f in missing]
1046 1046 repo._isaddremove = True
1047 1047 removelargefiles(repo.ui, repo, *m, **opts)
1048 1048 repo._isaddremove = False
1049 1049 # Call into the normal add code, and any files that *should* be added as
1050 1050 # largefiles will be
1051 1051 addlargefiles(repo.ui, repo, *pats, **opts)
1052 1052 # Now that we've handled largefiles, hand off to the original addremove
1053 1053 # function to take care of the rest. Make sure it doesn't do anything with
1054 1054 # largefiles by installing a matcher that will ignore them.
1055 1055 installnormalfilesmatchfn(repo[None].manifest())
1056 1056 result = orig(repo, pats, opts, dry_run, similarity)
1057 1057 restorematchfn()
1058 1058 return result
1059 1059
1060 1060 # Calling purge with --all will cause the largefiles to be deleted.
1061 1061 # Override repo.status to prevent this from happening.
1062 1062 def overridepurge(orig, ui, repo, *dirs, **opts):
1063 1063 # XXX large file status is buggy when used on repo proxy.
1064 1064 # XXX this needs to be investigate.
1065 1065 repo = repo.unfiltered()
1066 1066 oldstatus = repo.status
1067 1067 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1068 1068 clean=False, unknown=False, listsubrepos=False):
1069 1069 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1070 1070 listsubrepos)
1071 1071 lfdirstate = lfutil.openlfdirstate(ui, repo)
1072 1072 modified, added, removed, deleted, unknown, ignored, clean = r
1073 1073 unknown = [f for f in unknown if lfdirstate[f] == '?']
1074 1074 ignored = [f for f in ignored if lfdirstate[f] == '?']
1075 1075 return modified, added, removed, deleted, unknown, ignored, clean
1076 1076 repo.status = overridestatus
1077 1077 orig(ui, repo, *dirs, **opts)
1078 1078 repo.status = oldstatus
1079 1079
1080 1080 def overriderollback(orig, ui, repo, **opts):
1081 1081 result = orig(ui, repo, **opts)
1082 1082 merge.update(repo, node=None, branchmerge=False, force=True,
1083 1083 partial=lfutil.isstandin)
1084 1084 wlock = repo.wlock()
1085 1085 try:
1086 1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1087 1087 lfiles = lfutil.listlfiles(repo)
1088 1088 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1089 1089 for file in lfiles:
1090 1090 if file in oldlfiles:
1091 1091 lfdirstate.normallookup(file)
1092 1092 else:
1093 1093 lfdirstate.add(file)
1094 1094 lfdirstate.write()
1095 1095 finally:
1096 1096 wlock.release()
1097 1097 return result
1098 1098
1099 1099 def overridetransplant(orig, ui, repo, *revs, **opts):
1100 1100 try:
1101 1101 oldstandins = lfutil.getstandinsstate(repo)
1102 1102 repo._istransplanting = True
1103 1103 result = orig(ui, repo, *revs, **opts)
1104 1104 newstandins = lfutil.getstandinsstate(repo)
1105 1105 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1106 1106 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1107 1107 printmessage=True)
1108 1108 finally:
1109 1109 repo._istransplanting = False
1110 1110 return result
1111 1111
1112 1112 def overridecat(orig, ui, repo, file1, *pats, **opts):
1113 1113 ctx = scmutil.revsingle(repo, opts.get('rev'))
1114 1114 err = 1
1115 1115 notbad = set()
1116 1116 m = scmutil.match(ctx, (file1,) + pats, opts)
1117 1117 origmatchfn = m.matchfn
1118 1118 def lfmatchfn(f):
1119 1119 lf = lfutil.splitstandin(f)
1120 1120 if lf is None:
1121 1121 return origmatchfn(f)
1122 1122 notbad.add(lf)
1123 1123 return origmatchfn(lf)
1124 1124 m.matchfn = lfmatchfn
1125 1125 origbadfn = m.bad
1126 1126 def lfbadfn(f, msg):
1127 1127 if not f in notbad:
1128 1128 return origbadfn(f, msg)
1129 1129 m.bad = lfbadfn
1130 1130 for f in ctx.walk(m):
1131 1131 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1132 1132 pathname=f)
1133 1133 lf = lfutil.splitstandin(f)
1134 1134 if lf is None:
1135 1135 # duplicating unreachable code from commands.cat
1136 1136 data = ctx[f].data()
1137 1137 if opts.get('decode'):
1138 1138 data = repo.wwritedata(f, data)
1139 1139 fp.write(data)
1140 1140 else:
1141 1141 hash = lfutil.readstandin(repo, lf, ctx.rev())
1142 1142 if not lfutil.inusercache(repo.ui, hash):
1143 1143 store = basestore._openstore(repo)
1144 1144 success, missing = store.get([(lf, hash)])
1145 1145 if len(success) != 1:
1146 1146 raise util.Abort(
1147 1147 _('largefile %s is not in cache and could not be '
1148 1148 'downloaded') % lf)
1149 1149 path = lfutil.usercachepath(repo.ui, hash)
1150 1150 fpin = open(path, "rb")
1151 1151 for chunk in util.filechunkiter(fpin, 128 * 1024):
1152 1152 fp.write(chunk)
1153 1153 fpin.close()
1154 1154 fp.close()
1155 1155 err = 0
1156 1156 return err
1157 1157
1158 1158 def mercurialsinkbefore(orig, sink):
1159 1159 sink.repo._isconverting = True
1160 1160 orig(sink)
1161 1161
1162 1162 def mercurialsinkafter(orig, sink):
1163 1163 sink.repo._isconverting = False
1164 1164 orig(sink)
@@ -1,1019 +1,1021
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import struct
9 9
10 10 from node import nullid, nullrev, hex, bin
11 11 from i18n import _
12 12 from mercurial import obsolete
13 13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 14 import errno, os, shutil
15 15
16 16 _pack = struct.pack
17 17 _unpack = struct.unpack
18 18
19 19 def _droponode(data):
20 20 # used for compatibility for v1
21 21 bits = data.split("\0")
22 22 bits = bits[:-2] + bits[-1:]
23 23 return "\0".join(bits)
24 24
25 25 class mergestate(object):
26 26 '''track 3-way merge state of individual files
27 27
28 28 it is stored on disk when needed. Two file are used, one with an old
29 29 format, one with a new format. Both contains similar data, but the new
30 30 format can store new kind of field.
31 31
32 32 Current new format is a list of arbitrary record of the form:
33 33
34 34 [type][length][content]
35 35
36 36 Type is a single character, length is a 4 bytes integer, content is an
37 37 arbitrary suites of bytes of length `length`.
38 38
39 39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 40 should abort if they are unknown. lower case record can be safely ignored.
41 41
42 42 Currently known record:
43 43
44 44 L: the node of the "local" part of the merge (hexified version)
45 45 O: the node of the "other" part of the merge (hexified version)
46 46 F: a file to be merged entry
47 47 '''
48 48 statepathv1 = "merge/state"
49 49 statepathv2 = "merge/state2"
50 50
51 51 def __init__(self, repo):
52 52 self._repo = repo
53 53 self._dirty = False
54 54 self._read()
55 55
56 56 def reset(self, node=None, other=None):
57 57 self._state = {}
58 58 if node:
59 59 self._local = node
60 60 self._other = other
61 61 shutil.rmtree(self._repo.join("merge"), True)
62 62 self._dirty = False
63 63
64 64 def _read(self):
65 65 """Analyse each record content to restore a serialized state from disk
66 66
67 67 This function process "record" entry produced by the de-serialization
68 68 of on disk file.
69 69 """
70 70 self._state = {}
71 71 records = self._readrecords()
72 72 for rtype, record in records:
73 73 if rtype == 'L':
74 74 self._local = bin(record)
75 75 elif rtype == 'O':
76 76 self._other = bin(record)
77 77 elif rtype == "F":
78 78 bits = record.split("\0")
79 79 self._state[bits[0]] = bits[1:]
80 80 elif not rtype.islower():
81 81 raise util.Abort(_('unsupported merge state record: %s')
82 82 % rtype)
83 83 self._dirty = False
84 84
85 85 def _readrecords(self):
86 86 """Read merge state from disk and return a list of record (TYPE, data)
87 87
88 88 We read data from both v1 and v2 files and decide which one to use.
89 89
90 90 V1 has been used by version prior to 2.9.1 and contains less data than
91 91 v2. We read both versions and check if no data in v2 contradicts
92 92 v1. If there is not contradiction we can safely assume that both v1
93 93 and v2 were written at the same time and use the extract data in v2. If
94 94 there is contradiction we ignore v2 content as we assume an old version
95 95 of Mercurial has overwritten the mergestate file and left an old v2
96 96 file around.
97 97
98 98 returns list of record [(TYPE, data), ...]"""
99 99 v1records = self._readrecordsv1()
100 100 v2records = self._readrecordsv2()
101 101 oldv2 = set() # old format version of v2 record
102 102 for rec in v2records:
103 103 if rec[0] == 'L':
104 104 oldv2.add(rec)
105 105 elif rec[0] == 'F':
106 106 # drop the onode data (not contained in v1)
107 107 oldv2.add(('F', _droponode(rec[1])))
108 108 for rec in v1records:
109 109 if rec not in oldv2:
110 110 # v1 file is newer than v2 file, use it
111 111 # we have to infer the "other" changeset of the merge
112 112 # we cannot do better than that with v1 of the format
113 113 mctx = self._repo[None].parents()[-1]
114 114 v1records.append(('O', mctx.hex()))
115 115 # add place holder "other" file node information
116 116 # nobody is using it yet so we do no need to fetch the data
117 117 # if mctx was wrong `mctx[bits[-2]]` may fails.
118 118 for idx, r in enumerate(v1records):
119 119 if r[0] == 'F':
120 120 bits = r[1].split("\0")
121 121 bits.insert(-2, '')
122 122 v1records[idx] = (r[0], "\0".join(bits))
123 123 return v1records
124 124 else:
125 125 return v2records
126 126
127 127 def _readrecordsv1(self):
128 128 """read on disk merge state for version 1 file
129 129
130 130 returns list of record [(TYPE, data), ...]
131 131
132 132 Note: the "F" data from this file are one entry short
133 133 (no "other file node" entry)
134 134 """
135 135 records = []
136 136 try:
137 137 f = self._repo.opener(self.statepathv1)
138 138 for i, l in enumerate(f):
139 139 if i == 0:
140 140 records.append(('L', l[:-1]))
141 141 else:
142 142 records.append(('F', l[:-1]))
143 143 f.close()
144 144 except IOError, err:
145 145 if err.errno != errno.ENOENT:
146 146 raise
147 147 return records
148 148
149 149 def _readrecordsv2(self):
150 150 """read on disk merge state for version 2 file
151 151
152 152 returns list of record [(TYPE, data), ...]
153 153 """
154 154 records = []
155 155 try:
156 156 f = self._repo.opener(self.statepathv2)
157 157 data = f.read()
158 158 off = 0
159 159 end = len(data)
160 160 while off < end:
161 161 rtype = data[off]
162 162 off += 1
163 163 length = _unpack('>I', data[off:(off + 4)])[0]
164 164 off += 4
165 165 record = data[off:(off + length)]
166 166 off += length
167 167 records.append((rtype, record))
168 168 f.close()
169 169 except IOError, err:
170 170 if err.errno != errno.ENOENT:
171 171 raise
172 172 return records
173 173
174 174 def commit(self):
175 175 """Write current state on disk (if necessary)"""
176 176 if self._dirty:
177 177 records = []
178 178 records.append(("L", hex(self._local)))
179 179 records.append(("O", hex(self._other)))
180 180 for d, v in self._state.iteritems():
181 181 records.append(("F", "\0".join([d] + v)))
182 182 self._writerecords(records)
183 183 self._dirty = False
184 184
185 185 def _writerecords(self, records):
186 186 """Write current state on disk (both v1 and v2)"""
187 187 self._writerecordsv1(records)
188 188 self._writerecordsv2(records)
189 189
190 190 def _writerecordsv1(self, records):
191 191 """Write current state on disk in a version 1 file"""
192 192 f = self._repo.opener(self.statepathv1, "w")
193 193 irecords = iter(records)
194 194 lrecords = irecords.next()
195 195 assert lrecords[0] == 'L'
196 196 f.write(hex(self._local) + "\n")
197 197 for rtype, data in irecords:
198 198 if rtype == "F":
199 199 f.write("%s\n" % _droponode(data))
200 200 f.close()
201 201
202 202 def _writerecordsv2(self, records):
203 203 """Write current state on disk in a version 2 file"""
204 204 f = self._repo.opener(self.statepathv2, "w")
205 205 for key, data in records:
206 206 assert len(key) == 1
207 207 format = ">sI%is" % len(data)
208 208 f.write(_pack(format, key, len(data), data))
209 209 f.close()
210 210
211 211 def add(self, fcl, fco, fca, fd):
212 212 """add a new (potentially?) conflicting file the merge state
213 213 fcl: file context for local,
214 214 fco: file context for remote,
215 215 fca: file context for ancestors,
216 216 fd: file path of the resulting merge.
217 217
218 218 note: also write the local version to the `.hg/merge` directory.
219 219 """
220 220 hash = util.sha1(fcl.path()).hexdigest()
221 221 self._repo.opener.write("merge/" + hash, fcl.data())
222 222 self._state[fd] = ['u', hash, fcl.path(),
223 223 fca.path(), hex(fca.filenode()),
224 224 fco.path(), hex(fco.filenode()),
225 225 fcl.flags()]
226 226 self._dirty = True
227 227
228 228 def __contains__(self, dfile):
229 229 return dfile in self._state
230 230
231 231 def __getitem__(self, dfile):
232 232 return self._state[dfile][0]
233 233
234 234 def __iter__(self):
235 235 l = self._state.keys()
236 236 l.sort()
237 237 for f in l:
238 238 yield f
239 239
240 240 def files(self):
241 241 return self._state.keys()
242 242
243 243 def mark(self, dfile, state):
244 244 self._state[dfile][0] = state
245 245 self._dirty = True
246 246
247 247 def resolve(self, dfile, wctx):
248 248 """rerun merge process for file path `dfile`"""
249 249 if self[dfile] == 'r':
250 250 return 0
251 251 stateentry = self._state[dfile]
252 252 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
253 253 octx = self._repo[self._other]
254 254 fcd = wctx[dfile]
255 255 fco = octx[ofile]
256 256 fca = self._repo.filectx(afile, fileid=anode)
257 257 # "premerge" x flags
258 258 flo = fco.flags()
259 259 fla = fca.flags()
260 260 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
261 261 if fca.node() == nullid:
262 262 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
263 263 afile)
264 264 elif flags == fla:
265 265 flags = flo
266 266 # restore local
267 267 f = self._repo.opener("merge/" + hash)
268 268 self._repo.wwrite(dfile, f.read(), flags)
269 269 f.close()
270 270 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
271 271 if r is None:
272 272 # no real conflict
273 273 del self._state[dfile]
274 274 self._dirty = True
275 275 elif not r:
276 276 self.mark(dfile, 'r')
277 277 return r
278 278
279 279 def _checkunknownfile(repo, wctx, mctx, f):
280 280 return (not repo.dirstate._ignore(f)
281 281 and os.path.isfile(repo.wjoin(f))
282 282 and repo.wopener.audit.check(f)
283 283 and repo.dirstate.normalize(f) not in repo.dirstate
284 284 and mctx[f].cmp(wctx[f]))
285 285
286 286 def _checkunknown(repo, wctx, mctx):
287 287 "check for collisions between unknown files and files in mctx"
288 288
289 289 error = False
290 290 for f in mctx:
291 291 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
292 292 error = True
293 293 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
294 294 if error:
295 295 raise util.Abort(_("untracked files in working directory differ "
296 296 "from files in requested revision"))
297 297
298 298 def _forgetremoved(wctx, mctx, branchmerge):
299 299 """
300 300 Forget removed files
301 301
302 302 If we're jumping between revisions (as opposed to merging), and if
303 303 neither the working directory nor the target rev has the file,
304 304 then we need to remove it from the dirstate, to prevent the
305 305 dirstate from listing the file when it is no longer in the
306 306 manifest.
307 307
308 308 If we're merging, and the other revision has removed a file
309 309 that is not present in the working directory, we need to mark it
310 310 as removed.
311 311 """
312 312
313 313 actions = []
314 314 state = branchmerge and 'r' or 'f'
315 315 for f in wctx.deleted():
316 316 if f not in mctx:
317 317 actions.append((f, state, None, "forget deleted"))
318 318
319 319 if not branchmerge:
320 320 for f in wctx.removed():
321 321 if f not in mctx:
322 322 actions.append((f, "f", None, "forget removed"))
323 323
324 324 return actions
325 325
326 326 def _checkcollision(repo, wmf, actions):
327 327 # build provisional merged manifest up
328 328 pmmf = set(wmf)
329 329
330 330 def addop(f, args):
331 331 pmmf.add(f)
332 332 def removeop(f, args):
333 333 pmmf.discard(f)
334 334 def nop(f, args):
335 335 pass
336 336
337 337 def renamemoveop(f, args):
338 338 f2, flags = args
339 339 pmmf.discard(f2)
340 340 pmmf.add(f)
341 341 def renamegetop(f, args):
342 342 f2, flags = args
343 343 pmmf.add(f)
344 344 def mergeop(f, args):
345 345 f1, f2, fa, move, anc = args
346 346 if move:
347 347 pmmf.discard(f1)
348 348 pmmf.add(f)
349 349
350 350 opmap = {
351 351 "a": addop,
352 352 "dm": renamemoveop,
353 353 "dg": renamegetop,
354 354 "dr": nop,
355 355 "e": nop,
356 356 "f": addop, # untracked file should be kept in working directory
357 357 "g": addop,
358 358 "m": mergeop,
359 359 "r": removeop,
360 360 "rd": nop,
361 361 "cd": addop,
362 362 "dc": addop,
363 363 }
364 364 for f, m, args, msg in actions:
365 365 op = opmap.get(m)
366 366 assert op, m
367 367 op(f, args)
368 368
369 369 # check case-folding collision in provisional merged manifest
370 370 foldmap = {}
371 371 for f in sorted(pmmf):
372 372 fold = util.normcase(f)
373 373 if fold in foldmap:
374 374 raise util.Abort(_("case-folding collision between %s and %s")
375 375 % (f, foldmap[fold]))
376 376 foldmap[fold] = f
377 377
378 378 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
379 379 acceptremote, followcopies):
380 380 """
381 381 Merge p1 and p2 with ancestor pa and generate merge action list
382 382
383 383 branchmerge and force are as passed in to update
384 384 partial = function to filter file lists
385 385 acceptremote = accept the incoming changes without prompting
386 386 """
387 387
388 388 actions, copy, movewithdir = [], {}, {}
389 389
390 390 # manifests fetched in order are going to be faster, so prime the caches
391 391 [x.manifest() for x in
392 392 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
393 393
394 394 if followcopies:
395 395 ret = copies.mergecopies(repo, wctx, p2, pa)
396 396 copy, movewithdir, diverge, renamedelete = ret
397 397 for of, fl in diverge.iteritems():
398 398 actions.append((of, "dr", (fl,), "divergent renames"))
399 399 for of, fl in renamedelete.iteritems():
400 400 actions.append((of, "rd", (fl,), "rename and delete"))
401 401
402 402 repo.ui.note(_("resolving manifests\n"))
403 403 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
404 404 % (bool(branchmerge), bool(force), bool(partial)))
405 405 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
406 406
407 407 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
408 408 copied = set(copy.values())
409 409 copied.update(movewithdir.values())
410 410
411 411 if '.hgsubstate' in m1:
412 412 # check whether sub state is modified
413 413 for s in sorted(wctx.substate):
414 414 if wctx.sub(s).dirty():
415 415 m1['.hgsubstate'] += "+"
416 416 break
417 417
418 418 aborts = []
419 419 # Compare manifests
420 420 fdiff = dicthelpers.diff(m1, m2)
421 421 flagsdiff = m1.flagsdiff(m2)
422 422 diff12 = dicthelpers.join(fdiff, flagsdiff)
423 423
424 424 for f, (n12, fl12) in diff12.iteritems():
425 425 if n12:
426 426 n1, n2 = n12
427 427 else: # file contents didn't change, but flags did
428 428 n1 = n2 = m1.get(f, None)
429 429 if n1 is None:
430 430 # Since n1 == n2, the file isn't present in m2 either. This
431 431 # means that the file was removed or deleted locally and
432 432 # removed remotely, but that residual entries remain in flags.
433 433 # This can happen in manifests generated by workingctx.
434 434 continue
435 435 if fl12:
436 436 fl1, fl2 = fl12
437 437 else: # flags didn't change, file contents did
438 438 fl1 = fl2 = m1.flags(f)
439 439
440 440 if partial and not partial(f):
441 441 continue
442 442 if n1 and n2:
443 443 fa = f
444 444 a = ma.get(f, nullid)
445 445 if a == nullid:
446 446 fa = copy.get(f, f)
447 447 # Note: f as default is wrong - we can't really make a 3-way
448 448 # merge without an ancestor file.
449 449 fla = ma.flags(fa)
450 450 nol = 'l' not in fl1 + fl2 + fla
451 451 if n2 == a and fl2 == fla:
452 452 pass # remote unchanged - keep local
453 453 elif n1 == a and fl1 == fla: # local unchanged - use remote
454 454 if n1 == n2: # optimization: keep local content
455 455 actions.append((f, "e", (fl2,), "update permissions"))
456 456 else:
457 457 actions.append((f, "g", (fl2,), "remote is newer"))
458 458 elif nol and n2 == a: # remote only changed 'x'
459 459 actions.append((f, "e", (fl2,), "update permissions"))
460 460 elif nol and n1 == a: # local only changed 'x'
461 461 actions.append((f, "g", (fl1,), "remote is newer"))
462 462 else: # both changed something
463 463 actions.append((f, "m", (f, f, fa, False, pa.node()),
464 464 "versions differ"))
465 465 elif f in copied: # files we'll deal with on m2 side
466 466 pass
467 467 elif n1 and f in movewithdir: # directory rename, move local
468 468 f2 = movewithdir[f]
469 469 actions.append((f2, "dm", (f, fl1),
470 470 "remote directory rename - move from " + f))
471 471 elif n1 and f in copy:
472 472 f2 = copy[f]
473 473 actions.append((f, "m", (f, f2, f2, False, pa.node()),
474 474 "local copied/moved from " + f2))
475 475 elif n1 and f in ma: # clean, a different, no remote
476 476 if n1 != ma[f]:
477 477 if acceptremote:
478 478 actions.append((f, "r", None, "remote delete"))
479 479 else:
480 480 actions.append((f, "cd", None, "prompt changed/deleted"))
481 481 elif n1[20:] == "a": # added, no remote
482 482 actions.append((f, "f", None, "remote deleted"))
483 483 else:
484 484 actions.append((f, "r", None, "other deleted"))
485 485 elif n2 and f in movewithdir:
486 486 f2 = movewithdir[f]
487 487 actions.append((f2, "dg", (f, fl2),
488 488 "local directory rename - get from " + f))
489 489 elif n2 and f in copy:
490 490 f2 = copy[f]
491 491 if f2 in m2:
492 492 actions.append((f, "m", (f2, f, f2, False, pa.node()),
493 493 "remote copied from " + f2))
494 494 else:
495 495 actions.append((f, "m", (f2, f, f2, True, pa.node()),
496 496 "remote moved from " + f2))
497 497 elif n2 and f not in ma:
498 498 # local unknown, remote created: the logic is described by the
499 499 # following table:
500 500 #
501 501 # force branchmerge different | action
502 502 # n * n | get
503 503 # n * y | abort
504 504 # y n * | get
505 505 # y y n | get
506 506 # y y y | merge
507 507 #
508 508 # Checking whether the files are different is expensive, so we
509 509 # don't do that when we can avoid it.
510 510 if force and not branchmerge:
511 511 actions.append((f, "g", (fl2,), "remote created"))
512 512 else:
513 513 different = _checkunknownfile(repo, wctx, p2, f)
514 514 if force and branchmerge and different:
515 515 # FIXME: This is wrong - f is not in ma ...
516 516 actions.append((f, "m", (f, f, f, False, pa.node()),
517 517 "remote differs from untracked local"))
518 518 elif not force and different:
519 519 aborts.append((f, "ud"))
520 520 else:
521 521 actions.append((f, "g", (fl2,), "remote created"))
522 522 elif n2 and n2 != ma[f]:
523 523 different = _checkunknownfile(repo, wctx, p2, f)
524 524 if not force and different:
525 525 aborts.append((f, "ud"))
526 526 else:
527 527 # if different: old untracked f may be overwritten and lost
528 528 if acceptremote:
529 529 actions.append((f, "g", (m2.flags(f),),
530 530 "remote recreating"))
531 531 else:
532 532 actions.append((f, "dc", (m2.flags(f),),
533 533 "prompt deleted/changed"))
534 534
535 535 for f, m in sorted(aborts):
536 536 if m == "ud":
537 537 repo.ui.warn(_("%s: untracked file differs\n") % f)
538 538 else: assert False, m
539 539 if aborts:
540 540 raise util.Abort(_("untracked files in working directory differ "
541 541 "from files in requested revision"))
542 542
543 543 if not util.checkcase(repo.path):
544 544 # check collision between files only in p2 for clean update
545 545 if (not branchmerge and
546 546 (force or not wctx.dirty(missing=True, branch=False))):
547 547 _checkcollision(repo, m2, [])
548 548 else:
549 549 _checkcollision(repo, m1, actions)
550 550
551 551 return actions
552 552
553 553 def actionkey(a):
554 554 return a[1] in "rf" and -1 or 0, a
555 555
556 556 def getremove(repo, mctx, overwrite, args):
557 557 """apply usually-non-interactive updates to the working directory
558 558
559 559 mctx is the context to be merged into the working copy
560 560
561 561 yields tuples for progress updates
562 562 """
563 563 verbose = repo.ui.verbose
564 564 unlink = util.unlinkpath
565 565 wjoin = repo.wjoin
566 566 fctx = mctx.filectx
567 567 wwrite = repo.wwrite
568 568 audit = repo.wopener.audit
569 569 i = 0
570 570 for arg in args:
571 571 f = arg[0]
572 572 if arg[1] == 'r':
573 573 if verbose:
574 574 repo.ui.note(_("removing %s\n") % f)
575 575 audit(f)
576 576 try:
577 577 unlink(wjoin(f), ignoremissing=True)
578 578 except OSError, inst:
579 579 repo.ui.warn(_("update failed to remove %s: %s!\n") %
580 580 (f, inst.strerror))
581 581 else:
582 582 if verbose:
583 583 repo.ui.note(_("getting %s\n") % f)
584 584 wwrite(f, fctx(f).data(), arg[2][0])
585 585 if i == 100:
586 586 yield i, f
587 587 i = 0
588 588 i += 1
589 589 if i > 0:
590 590 yield i, f
591 591
592 592 def applyupdates(repo, actions, wctx, mctx, overwrite):
593 593 """apply the merge action list to the working directory
594 594
595 595 wctx is the working copy context
596 596 mctx is the context to be merged into the working copy
597 597
598 598 Return a tuple of counts (updated, merged, removed, unresolved) that
599 599 describes how many files were affected by the update.
600 600 """
601 601
602 602 updated, merged, removed, unresolved = 0, 0, 0, 0
603 603 ms = mergestate(repo)
604 604 ms.reset(wctx.p1().node(), mctx.node())
605 605 moves = []
606 606 actions.sort(key=actionkey)
607 607
608 608 # prescan for merges
609 609 for a in actions:
610 610 f, m, args, msg = a
611 611 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
612 612 if m == "m": # merge
613 613 f1, f2, fa, move, anc = args
614 614 if f == '.hgsubstate': # merged internally
615 615 continue
616 616 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
617 617 fcl = wctx[f1]
618 618 fco = mctx[f2]
619 619 actx = repo[anc]
620 620 if fa in actx:
621 621 fca = actx[fa]
622 622 else:
623 623 fca = repo.filectx(f1, fileid=nullrev)
624 624 ms.add(fcl, fco, fca, f)
625 625 if f1 != f and move:
626 626 moves.append(f1)
627 627
628 628 audit = repo.wopener.audit
629 629
630 630 # remove renamed files after safely stored
631 631 for f in moves:
632 632 if os.path.lexists(repo.wjoin(f)):
633 633 repo.ui.debug("removing %s\n" % f)
634 634 audit(f)
635 635 util.unlinkpath(repo.wjoin(f))
636 636
637 637 numupdates = len(actions)
638 638 workeractions = [a for a in actions if a[1] in 'gr']
639 639 updateactions = [a for a in workeractions if a[1] == 'g']
640 640 updated = len(updateactions)
641 641 removeactions = [a for a in workeractions if a[1] == 'r']
642 642 removed = len(removeactions)
643 643 actions = [a for a in actions if a[1] not in 'gr']
644 644
645 645 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
646 646 if hgsub and hgsub[0] == 'r':
647 647 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
648 648
649 649 z = 0
650 650 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
651 651 removeactions)
652 652 for i, item in prog:
653 653 z += i
654 654 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
655 655 unit=_('files'))
656 656 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
657 657 updateactions)
658 658 for i, item in prog:
659 659 z += i
660 660 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
661 661 unit=_('files'))
662 662
663 663 if hgsub and hgsub[0] == 'g':
664 664 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
665 665
666 666 _updating = _('updating')
667 667 _files = _('files')
668 668 progress = repo.ui.progress
669 669
670 670 for i, a in enumerate(actions):
671 671 f, m, args, msg = a
672 672 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
673 673 if m == "m": # merge
674 674 f1, f2, fa, move, anc = args
675 675 if f == '.hgsubstate': # subrepo states need updating
676 676 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
677 677 overwrite)
678 678 continue
679 679 audit(f)
680 680 r = ms.resolve(f, wctx)
681 681 if r is not None and r > 0:
682 682 unresolved += 1
683 683 else:
684 684 if r is None:
685 685 updated += 1
686 686 else:
687 687 merged += 1
688 688 elif m == "dm": # directory rename, move local
689 689 f0, flags = args
690 690 repo.ui.note(_("moving %s to %s\n") % (f0, f))
691 691 audit(f)
692 692 repo.wwrite(f, wctx.filectx(f0).data(), flags)
693 693 util.unlinkpath(repo.wjoin(f0))
694 694 updated += 1
695 695 elif m == "dg": # local directory rename, get
696 696 f0, flags = args
697 697 repo.ui.note(_("getting %s to %s\n") % (f0, f))
698 698 repo.wwrite(f, mctx.filectx(f0).data(), flags)
699 699 updated += 1
700 700 elif m == "dr": # divergent renames
701 701 fl, = args
702 702 repo.ui.warn(_("note: possible conflict - %s was renamed "
703 703 "multiple times to:\n") % f)
704 704 for nf in fl:
705 705 repo.ui.warn(" %s\n" % nf)
706 706 elif m == "rd": # rename and delete
707 707 fl, = args
708 708 repo.ui.warn(_("note: possible conflict - %s was deleted "
709 709 "and renamed to:\n") % f)
710 710 for nf in fl:
711 711 repo.ui.warn(" %s\n" % nf)
712 712 elif m == "e": # exec
713 713 flags, = args
714 714 audit(f)
715 715 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
716 716 updated += 1
717 717 ms.commit()
718 718 progress(_updating, None, total=numupdates, unit=_files)
719 719
720 720 return updated, merged, removed, unresolved
721 721
722 def calculateupdates(repo, wctx, mctx, ancestor, branchmerge, force, partial,
722 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
723 723 acceptremote, followcopies):
724 "Calculate the actions needed to merge mctx into wctx using ancestor"
724 "Calculate the actions needed to merge mctx into wctx using ancestors"
725
726 ancestor = ancestors[0]
725 727
726 728 actions = manifestmerge(repo, wctx, mctx,
727 729 ancestor,
728 730 branchmerge, force,
729 731 partial, acceptremote, followcopies)
730 732
731 733 # Filter out prompts.
732 734 newactions, prompts = [], []
733 735 for a in actions:
734 736 if a[1] in ("cd", "dc"):
735 737 prompts.append(a)
736 738 else:
737 739 newactions.append(a)
738 740 # Prompt and create actions. TODO: Move this towards resolve phase.
739 741 for f, m, args, msg in sorted(prompts):
740 742 if m == "cd":
741 743 if repo.ui.promptchoice(
742 744 _("local changed %s which remote deleted\n"
743 745 "use (c)hanged version or (d)elete?"
744 746 "$$ &Changed $$ &Delete") % f, 0):
745 747 newactions.append((f, "r", None, "prompt delete"))
746 748 else:
747 749 newactions.append((f, "a", None, "prompt keep"))
748 750 elif m == "dc":
749 751 flags, = args
750 752 if repo.ui.promptchoice(
751 753 _("remote changed %s which local deleted\n"
752 754 "use (c)hanged version or leave (d)eleted?"
753 755 "$$ &Changed $$ &Deleted") % f, 0) == 0:
754 756 newactions.append((f, "g", (flags,), "prompt recreating"))
755 757 else: assert False, m
756 758
757 759 if wctx.rev() is None:
758 760 newactions += _forgetremoved(wctx, mctx, branchmerge)
759 761
760 762 return newactions
761 763
762 764 def recordupdates(repo, actions, branchmerge):
763 765 "record merge actions to the dirstate"
764 766
765 767 for a in actions:
766 768 f, m, args, msg = a
767 769 if m == "r": # remove
768 770 if branchmerge:
769 771 repo.dirstate.remove(f)
770 772 else:
771 773 repo.dirstate.drop(f)
772 774 elif m == "a": # re-add
773 775 if not branchmerge:
774 776 repo.dirstate.add(f)
775 777 elif m == "f": # forget
776 778 repo.dirstate.drop(f)
777 779 elif m == "e": # exec change
778 780 repo.dirstate.normallookup(f)
779 781 elif m == "g": # get
780 782 if branchmerge:
781 783 repo.dirstate.otherparent(f)
782 784 else:
783 785 repo.dirstate.normal(f)
784 786 elif m == "m": # merge
785 787 f1, f2, fa, move, anc = args
786 788 if branchmerge:
787 789 # We've done a branch merge, mark this file as merged
788 790 # so that we properly record the merger later
789 791 repo.dirstate.merge(f)
790 792 if f1 != f2: # copy/rename
791 793 if move:
792 794 repo.dirstate.remove(f1)
793 795 if f1 != f:
794 796 repo.dirstate.copy(f1, f)
795 797 else:
796 798 repo.dirstate.copy(f2, f)
797 799 else:
798 800 # We've update-merged a locally modified file, so
799 801 # we set the dirstate to emulate a normal checkout
800 802 # of that file some time in the past. Thus our
801 803 # merge will appear as a normal local file
802 804 # modification.
803 805 if f2 == f: # file not locally copied/moved
804 806 repo.dirstate.normallookup(f)
805 807 if move:
806 808 repo.dirstate.drop(f1)
807 809 elif m == "dm": # directory rename, move local
808 810 f0, flag = args
809 811 if f0 not in repo.dirstate:
810 812 # untracked file moved
811 813 continue
812 814 if branchmerge:
813 815 repo.dirstate.add(f)
814 816 repo.dirstate.remove(f0)
815 817 repo.dirstate.copy(f0, f)
816 818 else:
817 819 repo.dirstate.normal(f)
818 820 repo.dirstate.drop(f0)
819 821 elif m == "dg": # directory rename, get
820 822 f0, flag = args
821 823 if branchmerge:
822 824 repo.dirstate.add(f)
823 825 repo.dirstate.copy(f0, f)
824 826 else:
825 827 repo.dirstate.normal(f)
826 828
827 829 def update(repo, node, branchmerge, force, partial, ancestor=None,
828 830 mergeancestor=False):
829 831 """
830 832 Perform a merge between the working directory and the given node
831 833
832 834 node = the node to update to, or None if unspecified
833 835 branchmerge = whether to merge between branches
834 836 force = whether to force branch merging or file overwriting
835 837 partial = a function to filter file lists (dirstate not updated)
836 838 mergeancestor = whether it is merging with an ancestor. If true,
837 839 we should accept the incoming changes for any prompts that occur.
838 840 If false, merging with an ancestor (fast-forward) is only allowed
839 841 between different named branches. This flag is used by rebase extension
840 842 as a temporary fix and should be avoided in general.
841 843
842 844 The table below shows all the behaviors of the update command
843 845 given the -c and -C or no options, whether the working directory
844 846 is dirty, whether a revision is specified, and the relationship of
845 847 the parent rev to the target rev (linear, on the same named
846 848 branch, or on another named branch).
847 849
848 850 This logic is tested by test-update-branches.t.
849 851
850 852 -c -C dirty rev | linear same cross
851 853 n n n n | ok (1) x
852 854 n n n y | ok ok ok
853 855 n n y n | merge (2) (2)
854 856 n n y y | merge (3) (3)
855 857 n y * * | --- discard ---
856 858 y n y * | --- (4) ---
857 859 y n n * | --- ok ---
858 860 y y * * | --- (5) ---
859 861
860 862 x = can't happen
861 863 * = don't-care
862 864 1 = abort: not a linear update (merge or update --check to force update)
863 865 2 = abort: uncommitted changes (commit and merge, or update --clean to
864 866 discard changes)
865 867 3 = abort: uncommitted changes (commit or update --clean to discard changes)
866 868 4 = abort: uncommitted changes (checked in commands.py)
867 869 5 = incompatible options (checked in commands.py)
868 870
869 871 Return the same tuple as applyupdates().
870 872 """
871 873
872 874 onode = node
873 875 wlock = repo.wlock()
874 876 try:
875 877 wc = repo[None]
876 878 pl = wc.parents()
877 879 p1 = pl[0]
878 pa = None
880 pas = [None]
879 881 if ancestor:
880 pa = repo[ancestor]
882 pas = [repo[ancestor]]
881 883
882 884 if node is None:
883 885 # Here is where we should consider bookmarks, divergent bookmarks,
884 886 # foreground changesets (successors), and tip of current branch;
885 887 # but currently we are only checking the branch tips.
886 888 try:
887 889 node = repo.branchtip(wc.branch())
888 890 except error.RepoLookupError:
889 891 if wc.branch() == "default": # no default branch!
890 892 node = repo.lookup("tip") # update to tip
891 893 else:
892 894 raise util.Abort(_("branch %s not found") % wc.branch())
893 895
894 896 if p1.obsolete() and not p1.children():
895 897 # allow updating to successors
896 898 successors = obsolete.successorssets(repo, p1.node())
897 899
898 900 # behavior of certain cases is as follows,
899 901 #
900 902 # divergent changesets: update to highest rev, similar to what
901 903 # is currently done when there are more than one head
902 904 # (i.e. 'tip')
903 905 #
904 906 # replaced changesets: same as divergent except we know there
905 907 # is no conflict
906 908 #
907 909 # pruned changeset: no update is done; though, we could
908 910 # consider updating to the first non-obsolete parent,
909 911 # similar to what is current done for 'hg prune'
910 912
911 913 if successors:
912 914 # flatten the list here handles both divergent (len > 1)
913 915 # and the usual case (len = 1)
914 916 successors = [n for sub in successors for n in sub]
915 917
916 918 # get the max revision for the given successors set,
917 919 # i.e. the 'tip' of a set
918 920 node = repo.revs("max(%ln)", successors)[0]
919 pa = p1
921 pas = [p1]
920 922
921 923 overwrite = force and not branchmerge
922 924
923 925 p2 = repo[node]
924 if pa is None:
925 pa = p1.ancestor(p2)
926 if pas[0] is None:
927 pas = [p1.ancestor(p2)]
926 928
927 929 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
928 930
929 931 ### check phase
930 932 if not overwrite and len(pl) > 1:
931 933 raise util.Abort(_("outstanding uncommitted merges"))
932 934 if branchmerge:
933 if pa == p2:
935 if pas == [p2]:
934 936 raise util.Abort(_("merging with a working directory ancestor"
935 937 " has no effect"))
936 elif pa == p1:
938 elif pas == [p1]:
937 939 if not mergeancestor and p1.branch() == p2.branch():
938 940 raise util.Abort(_("nothing to merge"),
939 941 hint=_("use 'hg update' "
940 942 "or check 'hg heads'"))
941 943 if not force and (wc.files() or wc.deleted()):
942 944 raise util.Abort(_("uncommitted changes"),
943 945 hint=_("use 'hg status' to list changes"))
944 946 for s in sorted(wc.substate):
945 947 if wc.sub(s).dirty():
946 948 raise util.Abort(_("uncommitted changes in "
947 949 "subrepository '%s'") % s)
948 950
949 951 elif not overwrite:
950 952 if p1 == p2: # no-op update
951 953 # call the hooks and exit early
952 954 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
953 955 repo.hook('update', parent1=xp2, parent2='', error=0)
954 956 return 0, 0, 0, 0
955 957
956 if pa not in (p1, p2): # nonlinear
958 if pas not in ([p1], [p2]): # nonlinear
957 959 dirty = wc.dirty(missing=True)
958 960 if dirty or onode is None:
959 961 # Branching is a bit strange to ensure we do the minimal
960 962 # amount of call to obsolete.background.
961 963 foreground = obsolete.foreground(repo, [p1.node()])
962 964 # note: the <node> variable contains a random identifier
963 965 if repo[node].node() in foreground:
964 pa = p1 # allow updating to successors
966 pas = [p1] # allow updating to successors
965 967 elif dirty:
966 968 msg = _("uncommitted changes")
967 969 if onode is None:
968 970 hint = _("commit and merge, or update --clean to"
969 971 " discard changes")
970 972 else:
971 973 hint = _("commit or update --clean to discard"
972 974 " changes")
973 975 raise util.Abort(msg, hint=hint)
974 976 else: # node is none
975 977 msg = _("not a linear update")
976 978 hint = _("merge or update --check to force update")
977 979 raise util.Abort(msg, hint=hint)
978 980 else:
979 981 # Allow jumping branches if clean and specific rev given
980 pa = p1
982 pas = [p1]
981 983
982 984 followcopies = False
983 985 if overwrite:
984 pa = wc
985 elif pa == p2: # backwards
986 pa = wc.p1()
986 pas = [wc]
987 elif pas == [p2]: # backwards
988 pas = [wc.p1()]
987 989 elif not branchmerge and not wc.dirty(missing=True):
988 990 pass
989 elif pa and repo.ui.configbool("merge", "followcopies", True):
991 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
990 992 followcopies = True
991 993
992 994 ### calculate phase
993 actions = calculateupdates(repo, wc, p2, pa, branchmerge, force,
995 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
994 996 partial, mergeancestor, followcopies)
995 997
996 998 ### apply phase
997 999 if not branchmerge: # just jump to the new rev
998 1000 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
999 1001 if not partial:
1000 1002 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1001 1003 # note that we're in the middle of an update
1002 1004 repo.vfs.write('updatestate', p2.hex())
1003 1005
1004 1006 stats = applyupdates(repo, actions, wc, p2, overwrite)
1005 1007
1006 1008 if not partial:
1007 1009 repo.setparents(fp1, fp2)
1008 1010 recordupdates(repo, actions, branchmerge)
1009 1011 # update completed, clear state
1010 1012 util.unlink(repo.join('updatestate'))
1011 1013
1012 1014 if not branchmerge:
1013 1015 repo.dirstate.setbranch(p2.branch())
1014 1016 finally:
1015 1017 wlock.release()
1016 1018
1017 1019 if not partial:
1018 1020 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1019 1021 return stats
General Comments 0
You need to be logged in to leave comments. Login now