##// END OF EJS Templates
copies: add matcher parameter to copy logic...
Durham Goode -
r24782:4906dc0e default
parent child Browse files
Show More
@@ -1,1373 +1,1373
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 30 m._fmap = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 45 m._fmap = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 54 default='relpath'):
55 55 match = oldmatch(ctx, pats, opts, globbed, default)
56 56 return composenormalfilematcher(match, manifest)
57 57 oldmatch = installmatchfn(overridematch)
58 58
59 59 def installmatchfn(f):
60 60 '''monkey patch the scmutil module with a custom match function.
61 61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 62 oldmatch = scmutil.match
63 63 setattr(f, 'oldmatch', oldmatch)
64 64 scmutil.match = f
65 65 return oldmatch
66 66
67 67 def restorematchfn():
68 68 '''restores scmutil.match to what it was before installmatchfn
69 69 was called. no-op if scmutil.match is its original function.
70 70
71 71 Note that n calls to installmatchfn will require n calls to
72 72 restore the original matchfn.'''
73 73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74 74
75 75 def installmatchandpatsfn(f):
76 76 oldmatchandpats = scmutil.matchandpats
77 77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 78 scmutil.matchandpats = f
79 79 return oldmatchandpats
80 80
81 81 def restorematchandpatsfn():
82 82 '''restores scmutil.matchandpats to what it was before
83 83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 84 is its original function.
85 85
86 86 Note that n calls to installmatchandpatsfn will require n calls
87 87 to restore the original matchfn.'''
88 88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 89 scmutil.matchandpats)
90 90
91 91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 92 large = opts.get('large')
93 93 lfsize = lfutil.getminsize(
94 94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95 95
96 96 lfmatcher = None
97 97 if lfutil.islfilesrepo(repo):
98 98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 99 if lfpats:
100 100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101 101
102 102 lfnames = []
103 103 m = copy.copy(matcher)
104 104 m.bad = lambda x, y: None
105 105 wctx = repo[None]
106 106 for f in repo.walk(m):
107 107 exact = m.exact(f)
108 108 lfile = lfutil.standin(f) in wctx
109 109 nfile = f in wctx
110 110 exists = lfile or nfile
111 111
112 112 # addremove in core gets fancy with the name, add doesn't
113 113 if isaddremove:
114 114 name = m.uipath(f)
115 115 else:
116 116 name = m.rel(f)
117 117
118 118 # Don't warn the user when they attempt to add a normal tracked file.
119 119 # The normal add code will do that for us.
120 120 if exact and exists:
121 121 if lfile:
122 122 ui.warn(_('%s already a largefile\n') % name)
123 123 continue
124 124
125 125 if (exact or not exists) and not lfutil.isstandin(f):
126 126 # In case the file was removed previously, but not committed
127 127 # (issue3507)
128 128 if not repo.wvfs.exists(f):
129 129 continue
130 130
131 131 abovemin = (lfsize and
132 132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 134 lfnames.append(f)
135 135 if ui.verbose or not exact:
136 136 ui.status(_('adding %s as a largefile\n') % name)
137 137
138 138 bad = []
139 139
140 140 # Need to lock, otherwise there could be a race condition between
141 141 # when standins are created and added to the repo.
142 142 wlock = repo.wlock()
143 143 try:
144 144 if not opts.get('dry_run'):
145 145 standins = []
146 146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 147 for f in lfnames:
148 148 standinname = lfutil.standin(f)
149 149 lfutil.writestandin(repo, standinname, hash='',
150 150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 151 standins.append(standinname)
152 152 if lfdirstate[f] == 'r':
153 153 lfdirstate.normallookup(f)
154 154 else:
155 155 lfdirstate.add(f)
156 156 lfdirstate.write()
157 157 bad += [lfutil.splitstandin(f)
158 158 for f in repo[None].add(standins)
159 159 if f in m.files()]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 finally:
163 163 wlock.release()
164 164 return added, bad
165 165
166 166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 167 after = opts.get('after')
168 168 m = composelargefilematcher(matcher, repo[None].manifest())
169 169 try:
170 170 repo.lfstatus = True
171 171 s = repo.status(match=m, clean=not isaddremove)
172 172 finally:
173 173 repo.lfstatus = False
174 174 manifest = repo[None].manifest()
175 175 modified, added, deleted, clean = [[f for f in list
176 176 if lfutil.standin(f) in manifest]
177 177 for list in (s.modified, s.added,
178 178 s.deleted, s.clean)]
179 179
180 180 def warn(files, msg):
181 181 for f in files:
182 182 ui.warn(msg % m.rel(f))
183 183 return int(len(files) > 0)
184 184
185 185 result = 0
186 186
187 187 if after:
188 188 remove = deleted
189 189 result = warn(modified + added + clean,
190 190 _('not removing %s: file still exists\n'))
191 191 else:
192 192 remove = deleted + clean
193 193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 194 ' to force removal)\n'))
195 195 result = warn(added, _('not removing %s: file has been marked for add'
196 196 ' (use forget to undo)\n')) or result
197 197
198 198 # Need to lock because standin files are deleted then removed from the
199 199 # repository and we could race in-between.
200 200 wlock = repo.wlock()
201 201 try:
202 202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 203 for f in sorted(remove):
204 204 if ui.verbose or not m.exact(f):
205 205 # addremove in core gets fancy with the name, remove doesn't
206 206 if isaddremove:
207 207 name = m.uipath(f)
208 208 else:
209 209 name = m.rel(f)
210 210 ui.status(_('removing %s\n') % name)
211 211
212 212 if not opts.get('dry_run'):
213 213 if not after:
214 214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215 215
216 216 if opts.get('dry_run'):
217 217 return result
218 218
219 219 remove = [lfutil.standin(f) for f in remove]
220 220 # If this is being called by addremove, let the original addremove
221 221 # function handle this.
222 222 if not isaddremove:
223 223 for f in remove:
224 224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 225 repo[None].forget(remove)
226 226
227 227 for f in remove:
228 228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 229 False)
230 230
231 231 lfdirstate.write()
232 232 finally:
233 233 wlock.release()
234 234
235 235 return result
236 236
237 237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 238 # appear at their right place in the manifests.
239 239 def decodepath(orig, path):
240 240 return lfutil.splitstandin(path) or path
241 241
242 242 # -- Wrappers: modify existing commands --------------------------------
243 243
244 244 def overrideadd(orig, ui, repo, *pats, **opts):
245 245 if opts.get('normal') and opts.get('large'):
246 246 raise util.Abort(_('--normal cannot be used with --large'))
247 247 return orig(ui, repo, *pats, **opts)
248 248
249 249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 250 # The --normal flag short circuits this override
251 251 if opts.get('normal'):
252 252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253 253
254 254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 256 ladded)
257 257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258 258
259 259 bad.extend(f for f in lbad)
260 260 return bad
261 261
262 262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 265 return removelargefiles(ui, repo, False, matcher, after=after,
266 266 force=force) or result
267 267
268 268 def overridestatusfn(orig, repo, rev2, **opts):
269 269 try:
270 270 repo._repo.lfstatus = True
271 271 return orig(repo, rev2, **opts)
272 272 finally:
273 273 repo._repo.lfstatus = False
274 274
275 275 def overridestatus(orig, ui, repo, *pats, **opts):
276 276 try:
277 277 repo.lfstatus = True
278 278 return orig(ui, repo, *pats, **opts)
279 279 finally:
280 280 repo.lfstatus = False
281 281
282 282 def overridedirty(orig, repo, ignoreupdate=False):
283 283 try:
284 284 repo._repo.lfstatus = True
285 285 return orig(repo, ignoreupdate)
286 286 finally:
287 287 repo._repo.lfstatus = False
288 288
289 289 def overridelog(orig, ui, repo, *pats, **opts):
290 290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 291 default='relpath'):
292 292 """Matcher that merges root directory with .hglf, suitable for log.
293 293 It is still possible to match .hglf directly.
294 294 For any listed files run log on the standin too.
295 295 matchfn tries both the given filename and with .hglf stripped.
296 296 """
297 297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 298 m, p = copy.copy(matchandpats)
299 299
300 300 if m.always():
301 301 # We want to match everything anyway, so there's no benefit trying
302 302 # to add standins.
303 303 return matchandpats
304 304
305 305 pats = set(p)
306 306
307 307 def fixpats(pat, tostandin=lfutil.standin):
308 308 kindpat = match_._patsplit(pat, None)
309 309
310 310 if kindpat[0] is not None:
311 311 return kindpat[0] + ':' + tostandin(kindpat[1])
312 312 return tostandin(kindpat[1])
313 313
314 314 if m._cwd:
315 315 hglf = lfutil.shortname
316 316 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
317 317
318 318 def tostandin(f):
319 319 # The file may already be a standin, so trucate the back
320 320 # prefix and test before mangling it. This avoids turning
321 321 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
322 322 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
323 323 return f
324 324
325 325 # An absolute path is from outside the repo, so truncate the
326 326 # path to the root before building the standin. Otherwise cwd
327 327 # is somewhere in the repo, relative to root, and needs to be
328 328 # prepended before building the standin.
329 329 if os.path.isabs(m._cwd):
330 330 f = f[len(back):]
331 331 else:
332 332 f = m._cwd + '/' + f
333 333 return back + lfutil.standin(f)
334 334
335 335 pats.update(fixpats(f, tostandin) for f in p)
336 336 else:
337 337 def tostandin(f):
338 338 if lfutil.splitstandin(f):
339 339 return f
340 340 return lfutil.standin(f)
341 341 pats.update(fixpats(f, tostandin) for f in p)
342 342
343 343 for i in range(0, len(m._files)):
344 344 # Don't add '.hglf' to m.files, since that is already covered by '.'
345 345 if m._files[i] == '.':
346 346 continue
347 347 standin = lfutil.standin(m._files[i])
348 348 # If the "standin" is a directory, append instead of replace to
349 349 # support naming a directory on the command line with only
350 350 # largefiles. The original directory is kept to support normal
351 351 # files.
352 352 if standin in repo[ctx.node()]:
353 353 m._files[i] = standin
354 354 elif m._files[i] not in repo[ctx.node()] \
355 355 and repo.wvfs.isdir(standin):
356 356 m._files.append(standin)
357 357
358 358 m._fmap = set(m._files)
359 359 m._always = False
360 360 origmatchfn = m.matchfn
361 361 def lfmatchfn(f):
362 362 lf = lfutil.splitstandin(f)
363 363 if lf is not None and origmatchfn(lf):
364 364 return True
365 365 r = origmatchfn(f)
366 366 return r
367 367 m.matchfn = lfmatchfn
368 368
369 369 ui.debug('updated patterns: %s\n' % sorted(pats))
370 370 return m, pats
371 371
372 372 # For hg log --patch, the match object is used in two different senses:
373 373 # (1) to determine what revisions should be printed out, and
374 374 # (2) to determine what files to print out diffs for.
375 375 # The magic matchandpats override should be used for case (1) but not for
376 376 # case (2).
377 377 def overridemakelogfilematcher(repo, pats, opts):
378 378 wctx = repo[None]
379 379 match, pats = oldmatchandpats(wctx, pats, opts)
380 380 return lambda rev: match
381 381
382 382 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
383 383 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
384 384 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
385 385
386 386 try:
387 387 return orig(ui, repo, *pats, **opts)
388 388 finally:
389 389 restorematchandpatsfn()
390 390 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
391 391
392 392 def overrideverify(orig, ui, repo, *pats, **opts):
393 393 large = opts.pop('large', False)
394 394 all = opts.pop('lfa', False)
395 395 contents = opts.pop('lfc', False)
396 396
397 397 result = orig(ui, repo, *pats, **opts)
398 398 if large or all or contents:
399 399 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
400 400 return result
401 401
402 402 def overridedebugstate(orig, ui, repo, *pats, **opts):
403 403 large = opts.pop('large', False)
404 404 if large:
405 405 class fakerepo(object):
406 406 dirstate = lfutil.openlfdirstate(ui, repo)
407 407 orig(ui, fakerepo, *pats, **opts)
408 408 else:
409 409 orig(ui, repo, *pats, **opts)
410 410
411 411 # Before starting the manifest merge, merge.updates will call
412 412 # _checkunknownfile to check if there are any files in the merged-in
413 413 # changeset that collide with unknown files in the working copy.
414 414 #
415 415 # The largefiles are seen as unknown, so this prevents us from merging
416 416 # in a file 'foo' if we already have a largefile with the same name.
417 417 #
418 418 # The overridden function filters the unknown files by removing any
419 419 # largefiles. This makes the merge proceed and we can then handle this
420 420 # case further in the overridden calculateupdates function below.
421 421 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
422 422 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
423 423 return False
424 424 return origfn(repo, wctx, mctx, f, f2)
425 425
426 426 # The manifest merge handles conflicts on the manifest level. We want
427 427 # to handle changes in largefile-ness of files at this level too.
428 428 #
429 429 # The strategy is to run the original calculateupdates and then process
430 430 # the action list it outputs. There are two cases we need to deal with:
431 431 #
432 432 # 1. Normal file in p1, largefile in p2. Here the largefile is
433 433 # detected via its standin file, which will enter the working copy
434 434 # with a "get" action. It is not "merge" since the standin is all
435 435 # Mercurial is concerned with at this level -- the link to the
436 436 # existing normal file is not relevant here.
437 437 #
438 438 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
439 439 # since the largefile will be present in the working copy and
440 440 # different from the normal file in p2. Mercurial therefore
441 441 # triggers a merge action.
442 442 #
443 443 # In both cases, we prompt the user and emit new actions to either
444 444 # remove the standin (if the normal file was kept) or to remove the
445 445 # normal file and get the standin (if the largefile was kept). The
446 446 # default prompt answer is to use the largefile version since it was
447 447 # presumably changed on purpose.
448 448 #
449 449 # Finally, the merge.applyupdates function will then take care of
450 450 # writing the files into the working copy and lfcommands.updatelfiles
451 451 # will update the largefiles.
452 452 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
453 453 partial, acceptremote, followcopies):
454 454 overwrite = force and not branchmerge
455 455 actions, diverge, renamedelete = origfn(
456 456 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
457 457 followcopies)
458 458
459 459 if overwrite:
460 460 return actions, diverge, renamedelete
461 461
462 462 # Convert to dictionary with filename as key and action as value.
463 463 lfiles = set()
464 464 for f in actions:
465 465 splitstandin = f and lfutil.splitstandin(f)
466 466 if splitstandin in p1:
467 467 lfiles.add(splitstandin)
468 468 elif lfutil.standin(f) in p1:
469 469 lfiles.add(f)
470 470
471 471 for lfile in lfiles:
472 472 standin = lfutil.standin(lfile)
473 473 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
474 474 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
475 475 if sm in ('g', 'dc') and lm != 'r':
476 476 # Case 1: normal file in the working copy, largefile in
477 477 # the second parent
478 478 usermsg = _('remote turned local normal file %s into a largefile\n'
479 479 'use (l)argefile or keep (n)ormal file?'
480 480 '$$ &Largefile $$ &Normal file') % lfile
481 481 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
482 482 actions[lfile] = ('r', None, 'replaced by standin')
483 483 actions[standin] = ('g', sargs, 'replaces standin')
484 484 else: # keep local normal file
485 485 actions[lfile] = ('k', None, 'replaces standin')
486 486 if branchmerge:
487 487 actions[standin] = ('k', None, 'replaced by non-standin')
488 488 else:
489 489 actions[standin] = ('r', None, 'replaced by non-standin')
490 490 elif lm in ('g', 'dc') and sm != 'r':
491 491 # Case 2: largefile in the working copy, normal file in
492 492 # the second parent
493 493 usermsg = _('remote turned local largefile %s into a normal file\n'
494 494 'keep (l)argefile or use (n)ormal file?'
495 495 '$$ &Largefile $$ &Normal file') % lfile
496 496 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
497 497 if branchmerge:
498 498 # largefile can be restored from standin safely
499 499 actions[lfile] = ('k', None, 'replaced by standin')
500 500 actions[standin] = ('k', None, 'replaces standin')
501 501 else:
502 502 # "lfile" should be marked as "removed" without
503 503 # removal of itself
504 504 actions[lfile] = ('lfmr', None,
505 505 'forget non-standin largefile')
506 506
507 507 # linear-merge should treat this largefile as 're-added'
508 508 actions[standin] = ('a', None, 'keep standin')
509 509 else: # pick remote normal file
510 510 actions[lfile] = ('g', largs, 'replaces standin')
511 511 actions[standin] = ('r', None, 'replaced by non-standin')
512 512
513 513 return actions, diverge, renamedelete
514 514
515 515 def mergerecordupdates(orig, repo, actions, branchmerge):
516 516 if 'lfmr' in actions:
517 517 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
518 518 for lfile, args, msg in actions['lfmr']:
519 519 # this should be executed before 'orig', to execute 'remove'
520 520 # before all other actions
521 521 repo.dirstate.remove(lfile)
522 522 # make sure lfile doesn't get synclfdirstate'd as normal
523 523 lfdirstate.add(lfile)
524 524 lfdirstate.write()
525 525
526 526 return orig(repo, actions, branchmerge)
527 527
528 528
529 529 # Override filemerge to prompt the user about how they wish to merge
530 530 # largefiles. This will handle identical edits without prompting the user.
531 531 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
532 532 if not lfutil.isstandin(orig):
533 533 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
534 534
535 535 ahash = fca.data().strip().lower()
536 536 dhash = fcd.data().strip().lower()
537 537 ohash = fco.data().strip().lower()
538 538 if (ohash != ahash and
539 539 ohash != dhash and
540 540 (dhash == ahash or
541 541 repo.ui.promptchoice(
542 542 _('largefile %s has a merge conflict\nancestor was %s\n'
543 543 'keep (l)ocal %s or\ntake (o)ther %s?'
544 544 '$$ &Local $$ &Other') %
545 545 (lfutil.splitstandin(orig), ahash, dhash, ohash),
546 546 0) == 1)):
547 547 repo.wwrite(fcd.path(), fco.data(), fco.flags())
548 548 return 0
549 549
550 def copiespathcopies(orig, ctx1, ctx2):
551 copies = orig(ctx1, ctx2)
550 def copiespathcopies(orig, ctx1, ctx2, match=None):
551 copies = orig(ctx1, ctx2, match=match)
552 552 updated = {}
553 553
554 554 for k, v in copies.iteritems():
555 555 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
556 556
557 557 return updated
558 558
559 559 # Copy first changes the matchers to match standins instead of
560 560 # largefiles. Then it overrides util.copyfile in that function it
561 561 # checks if the destination largefile already exists. It also keeps a
562 562 # list of copied files so that the largefiles can be copied and the
563 563 # dirstate updated.
564 564 def overridecopy(orig, ui, repo, pats, opts, rename=False):
565 565 # doesn't remove largefile on rename
566 566 if len(pats) < 2:
567 567 # this isn't legal, let the original function deal with it
568 568 return orig(ui, repo, pats, opts, rename)
569 569
570 570 # This could copy both lfiles and normal files in one command,
571 571 # but we don't want to do that. First replace their matcher to
572 572 # only match normal files and run it, then replace it to just
573 573 # match largefiles and run it again.
574 574 nonormalfiles = False
575 575 nolfiles = False
576 576 installnormalfilesmatchfn(repo[None].manifest())
577 577 try:
578 578 try:
579 579 result = orig(ui, repo, pats, opts, rename)
580 580 except util.Abort, e:
581 581 if str(e) != _('no files to copy'):
582 582 raise e
583 583 else:
584 584 nonormalfiles = True
585 585 result = 0
586 586 finally:
587 587 restorematchfn()
588 588
589 589 # The first rename can cause our current working directory to be removed.
590 590 # In that case there is nothing left to copy/rename so just quit.
591 591 try:
592 592 repo.getcwd()
593 593 except OSError:
594 594 return result
595 595
596 596 def makestandin(relpath):
597 597 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
598 598 return os.path.join(repo.wjoin(lfutil.standin(path)))
599 599
600 600 fullpats = scmutil.expandpats(pats)
601 601 dest = fullpats[-1]
602 602
603 603 if os.path.isdir(dest):
604 604 if not os.path.isdir(makestandin(dest)):
605 605 os.makedirs(makestandin(dest))
606 606
607 607 try:
608 608 try:
609 609 # When we call orig below it creates the standins but we don't add
610 610 # them to the dir state until later so lock during that time.
611 611 wlock = repo.wlock()
612 612
613 613 manifest = repo[None].manifest()
614 614 def overridematch(ctx, pats=[], opts={}, globbed=False,
615 615 default='relpath'):
616 616 newpats = []
617 617 # The patterns were previously mangled to add the standin
618 618 # directory; we need to remove that now
619 619 for pat in pats:
620 620 if match_.patkind(pat) is None and lfutil.shortname in pat:
621 621 newpats.append(pat.replace(lfutil.shortname, ''))
622 622 else:
623 623 newpats.append(pat)
624 624 match = oldmatch(ctx, newpats, opts, globbed, default)
625 625 m = copy.copy(match)
626 626 lfile = lambda f: lfutil.standin(f) in manifest
627 627 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
628 628 m._fmap = set(m._files)
629 629 origmatchfn = m.matchfn
630 630 m.matchfn = lambda f: (lfutil.isstandin(f) and
631 631 (f in manifest) and
632 632 origmatchfn(lfutil.splitstandin(f)) or
633 633 None)
634 634 return m
635 635 oldmatch = installmatchfn(overridematch)
636 636 listpats = []
637 637 for pat in pats:
638 638 if match_.patkind(pat) is not None:
639 639 listpats.append(pat)
640 640 else:
641 641 listpats.append(makestandin(pat))
642 642
643 643 try:
644 644 origcopyfile = util.copyfile
645 645 copiedfiles = []
646 646 def overridecopyfile(src, dest):
647 647 if (lfutil.shortname in src and
648 648 dest.startswith(repo.wjoin(lfutil.shortname))):
649 649 destlfile = dest.replace(lfutil.shortname, '')
650 650 if not opts['force'] and os.path.exists(destlfile):
651 651 raise IOError('',
652 652 _('destination largefile already exists'))
653 653 copiedfiles.append((src, dest))
654 654 origcopyfile(src, dest)
655 655
656 656 util.copyfile = overridecopyfile
657 657 result += orig(ui, repo, listpats, opts, rename)
658 658 finally:
659 659 util.copyfile = origcopyfile
660 660
661 661 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 662 for (src, dest) in copiedfiles:
663 663 if (lfutil.shortname in src and
664 664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 665 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
666 666 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
667 667 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
668 668 if not os.path.isdir(destlfiledir):
669 669 os.makedirs(destlfiledir)
670 670 if rename:
671 671 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
672 672
673 673 # The file is gone, but this deletes any empty parent
674 674 # directories as a side-effect.
675 675 util.unlinkpath(repo.wjoin(srclfile), True)
676 676 lfdirstate.remove(srclfile)
677 677 else:
678 678 util.copyfile(repo.wjoin(srclfile),
679 679 repo.wjoin(destlfile))
680 680
681 681 lfdirstate.add(destlfile)
682 682 lfdirstate.write()
683 683 except util.Abort, e:
684 684 if str(e) != _('no files to copy'):
685 685 raise e
686 686 else:
687 687 nolfiles = True
688 688 finally:
689 689 restorematchfn()
690 690 wlock.release()
691 691
692 692 if nolfiles and nonormalfiles:
693 693 raise util.Abort(_('no files to copy'))
694 694
695 695 return result
696 696
697 697 # When the user calls revert, we have to be careful to not revert any
698 698 # changes to other largefiles accidentally. This means we have to keep
699 699 # track of the largefiles that are being reverted so we only pull down
700 700 # the necessary largefiles.
701 701 #
702 702 # Standins are only updated (to match the hash of largefiles) before
703 703 # commits. Update the standins then run the original revert, changing
704 704 # the matcher to hit standins instead of largefiles. Based on the
705 705 # resulting standins update the largefiles.
706 706 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
707 707 # Because we put the standins in a bad state (by updating them)
708 708 # and then return them to a correct state we need to lock to
709 709 # prevent others from changing them in their incorrect state.
710 710 wlock = repo.wlock()
711 711 try:
712 712 lfdirstate = lfutil.openlfdirstate(ui, repo)
713 713 s = lfutil.lfdirstatestatus(lfdirstate, repo)
714 714 lfdirstate.write()
715 715 for lfile in s.modified:
716 716 lfutil.updatestandin(repo, lfutil.standin(lfile))
717 717 for lfile in s.deleted:
718 718 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
719 719 os.unlink(repo.wjoin(lfutil.standin(lfile)))
720 720
721 721 oldstandins = lfutil.getstandinsstate(repo)
722 722
723 723 def overridematch(mctx, pats=[], opts={}, globbed=False,
724 724 default='relpath'):
725 725 match = oldmatch(mctx, pats, opts, globbed, default)
726 726 m = copy.copy(match)
727 727
728 728 # revert supports recursing into subrepos, and though largefiles
729 729 # currently doesn't work correctly in that case, this match is
730 730 # called, so the lfdirstate above may not be the correct one for
731 731 # this invocation of match.
732 732 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
733 733 False)
734 734
735 735 def tostandin(f):
736 736 standin = lfutil.standin(f)
737 737 if standin in ctx or standin in mctx:
738 738 return standin
739 739 elif standin in repo[None] or lfdirstate[f] == 'r':
740 740 return None
741 741 return f
742 742 m._files = [tostandin(f) for f in m._files]
743 743 m._files = [f for f in m._files if f is not None]
744 744 m._fmap = set(m._files)
745 745 origmatchfn = m.matchfn
746 746 def matchfn(f):
747 747 if lfutil.isstandin(f):
748 748 return (origmatchfn(lfutil.splitstandin(f)) and
749 749 (f in ctx or f in mctx))
750 750 return origmatchfn(f)
751 751 m.matchfn = matchfn
752 752 return m
753 753 oldmatch = installmatchfn(overridematch)
754 754 try:
755 755 orig(ui, repo, ctx, parents, *pats, **opts)
756 756 finally:
757 757 restorematchfn()
758 758
759 759 newstandins = lfutil.getstandinsstate(repo)
760 760 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
761 761 # lfdirstate should be 'normallookup'-ed for updated files,
762 762 # because reverting doesn't touch dirstate for 'normal' files
763 763 # when target revision is explicitly specified: in such case,
764 764 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
765 765 # of target (standin) file.
766 766 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
767 767 normallookup=True)
768 768
769 769 finally:
770 770 wlock.release()
771 771
772 772 # after pulling changesets, we need to take some extra care to get
773 773 # largefiles updated remotely
774 774 def overridepull(orig, ui, repo, source=None, **opts):
775 775 revsprepull = len(repo)
776 776 if not source:
777 777 source = 'default'
778 778 repo.lfpullsource = source
779 779 result = orig(ui, repo, source, **opts)
780 780 revspostpull = len(repo)
781 781 lfrevs = opts.get('lfrev', [])
782 782 if opts.get('all_largefiles'):
783 783 lfrevs.append('pulled()')
784 784 if lfrevs and revspostpull > revsprepull:
785 785 numcached = 0
786 786 repo.firstpulled = revsprepull # for pulled() revset expression
787 787 try:
788 788 for rev in scmutil.revrange(repo, lfrevs):
789 789 ui.note(_('pulling largefiles for revision %s\n') % rev)
790 790 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
791 791 numcached += len(cached)
792 792 finally:
793 793 del repo.firstpulled
794 794 ui.status(_("%d largefiles cached\n") % numcached)
795 795 return result
796 796
797 797 def pulledrevsetsymbol(repo, subset, x):
798 798 """``pulled()``
799 799 Changesets that just has been pulled.
800 800
801 801 Only available with largefiles from pull --lfrev expressions.
802 802
803 803 .. container:: verbose
804 804
805 805 Some examples:
806 806
807 807 - pull largefiles for all new changesets::
808 808
809 809 hg pull -lfrev "pulled()"
810 810
811 811 - pull largefiles for all new branch heads::
812 812
813 813 hg pull -lfrev "head(pulled()) and not closed()"
814 814
815 815 """
816 816
817 817 try:
818 818 firstpulled = repo.firstpulled
819 819 except AttributeError:
820 820 raise util.Abort(_("pulled() only available in --lfrev"))
821 821 return revset.baseset([r for r in subset if r >= firstpulled])
822 822
823 823 def overrideclone(orig, ui, source, dest=None, **opts):
824 824 d = dest
825 825 if d is None:
826 826 d = hg.defaultdest(source)
827 827 if opts.get('all_largefiles') and not hg.islocal(d):
828 828 raise util.Abort(_(
829 829 '--all-largefiles is incompatible with non-local destination %s') %
830 830 d)
831 831
832 832 return orig(ui, source, dest, **opts)
833 833
834 834 def hgclone(orig, ui, opts, *args, **kwargs):
835 835 result = orig(ui, opts, *args, **kwargs)
836 836
837 837 if result is not None:
838 838 sourcerepo, destrepo = result
839 839 repo = destrepo.local()
840 840
841 841 # If largefiles is required for this repo, permanently enable it locally
842 842 if 'largefiles' in repo.requirements:
843 843 fp = repo.vfs('hgrc', 'a', text=True)
844 844 try:
845 845 fp.write('\n[extensions]\nlargefiles=\n')
846 846 finally:
847 847 fp.close()
848 848
849 849 # Caching is implicitly limited to 'rev' option, since the dest repo was
850 850 # truncated at that point. The user may expect a download count with
851 851 # this option, so attempt whether or not this is a largefile repo.
852 852 if opts.get('all_largefiles'):
853 853 success, missing = lfcommands.downloadlfiles(ui, repo, None)
854 854
855 855 if missing != 0:
856 856 return None
857 857
858 858 return result
859 859
860 860 def overriderebase(orig, ui, repo, **opts):
861 861 if not util.safehasattr(repo, '_largefilesenabled'):
862 862 return orig(ui, repo, **opts)
863 863
864 864 resuming = opts.get('continue')
865 865 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
866 866 repo._lfstatuswriters.append(lambda *msg, **opts: None)
867 867 try:
868 868 return orig(ui, repo, **opts)
869 869 finally:
870 870 repo._lfstatuswriters.pop()
871 871 repo._lfcommithooks.pop()
872 872
873 873 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
874 874 prefix='', mtime=None, subrepos=None):
875 875 # No need to lock because we are only reading history and
876 876 # largefile caches, neither of which are modified.
877 877 lfcommands.cachelfiles(repo.ui, repo, node)
878 878
879 879 if kind not in archival.archivers:
880 880 raise util.Abort(_("unknown archive type '%s'") % kind)
881 881
882 882 ctx = repo[node]
883 883
884 884 if kind == 'files':
885 885 if prefix:
886 886 raise util.Abort(
887 887 _('cannot give prefix when archiving to files'))
888 888 else:
889 889 prefix = archival.tidyprefix(dest, kind, prefix)
890 890
891 891 def write(name, mode, islink, getdata):
892 892 if matchfn and not matchfn(name):
893 893 return
894 894 data = getdata()
895 895 if decode:
896 896 data = repo.wwritedata(name, data)
897 897 archiver.addfile(prefix + name, mode, islink, data)
898 898
899 899 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
900 900
901 901 if repo.ui.configbool("ui", "archivemeta", True):
902 902 write('.hg_archival.txt', 0644, False,
903 903 lambda: archival.buildmetadata(ctx))
904 904
905 905 for f in ctx:
906 906 ff = ctx.flags(f)
907 907 getdata = ctx[f].data
908 908 if lfutil.isstandin(f):
909 909 path = lfutil.findfile(repo, getdata().strip())
910 910 if path is None:
911 911 raise util.Abort(
912 912 _('largefile %s not found in repo store or system cache')
913 913 % lfutil.splitstandin(f))
914 914 f = lfutil.splitstandin(f)
915 915
916 916 def getdatafn():
917 917 fd = None
918 918 try:
919 919 fd = open(path, 'rb')
920 920 return fd.read()
921 921 finally:
922 922 if fd:
923 923 fd.close()
924 924
925 925 getdata = getdatafn
926 926 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
927 927
928 928 if subrepos:
929 929 for subpath in sorted(ctx.substate):
930 930 sub = ctx.sub(subpath)
931 931 submatch = match_.narrowmatcher(subpath, matchfn)
932 932 sub.archive(archiver, prefix, submatch)
933 933
934 934 archiver.done()
935 935
936 936 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
937 937 repo._get(repo._state + ('hg',))
938 938 rev = repo._state[1]
939 939 ctx = repo._repo[rev]
940 940
941 941 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
942 942
943 943 def write(name, mode, islink, getdata):
944 944 # At this point, the standin has been replaced with the largefile name,
945 945 # so the normal matcher works here without the lfutil variants.
946 946 if match and not match(f):
947 947 return
948 948 data = getdata()
949 949
950 950 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
951 951
952 952 for f in ctx:
953 953 ff = ctx.flags(f)
954 954 getdata = ctx[f].data
955 955 if lfutil.isstandin(f):
956 956 path = lfutil.findfile(repo._repo, getdata().strip())
957 957 if path is None:
958 958 raise util.Abort(
959 959 _('largefile %s not found in repo store or system cache')
960 960 % lfutil.splitstandin(f))
961 961 f = lfutil.splitstandin(f)
962 962
963 963 def getdatafn():
964 964 fd = None
965 965 try:
966 966 fd = open(os.path.join(prefix, path), 'rb')
967 967 return fd.read()
968 968 finally:
969 969 if fd:
970 970 fd.close()
971 971
972 972 getdata = getdatafn
973 973
974 974 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
975 975
976 976 for subpath in sorted(ctx.substate):
977 977 sub = ctx.sub(subpath)
978 978 submatch = match_.narrowmatcher(subpath, match)
979 979 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
980 980
981 981 # If a largefile is modified, the change is not reflected in its
982 982 # standin until a commit. cmdutil.bailifchanged() raises an exception
983 983 # if the repo has uncommitted changes. Wrap it to also check if
984 984 # largefiles were changed. This is used by bisect, backout and fetch.
985 985 def overridebailifchanged(orig, repo, *args, **kwargs):
986 986 orig(repo, *args, **kwargs)
987 987 repo.lfstatus = True
988 988 s = repo.status()
989 989 repo.lfstatus = False
990 990 if s.modified or s.added or s.removed or s.deleted:
991 991 raise util.Abort(_('uncommitted changes'))
992 992
993 993 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
994 994 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
995 995 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
996 996 m = composelargefilematcher(match, repo[None].manifest())
997 997
998 998 try:
999 999 repo.lfstatus = True
1000 1000 s = repo.status(match=m, clean=True)
1001 1001 finally:
1002 1002 repo.lfstatus = False
1003 1003 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1004 1004 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1005 1005
1006 1006 for f in forget:
1007 1007 if lfutil.standin(f) not in repo.dirstate and not \
1008 1008 repo.wvfs.isdir(lfutil.standin(f)):
1009 1009 ui.warn(_('not removing %s: file is already untracked\n')
1010 1010 % m.rel(f))
1011 1011 bad.append(f)
1012 1012
1013 1013 for f in forget:
1014 1014 if ui.verbose or not m.exact(f):
1015 1015 ui.status(_('removing %s\n') % m.rel(f))
1016 1016
1017 1017 # Need to lock because standin files are deleted then removed from the
1018 1018 # repository and we could race in-between.
1019 1019 wlock = repo.wlock()
1020 1020 try:
1021 1021 lfdirstate = lfutil.openlfdirstate(ui, repo)
1022 1022 for f in forget:
1023 1023 if lfdirstate[f] == 'a':
1024 1024 lfdirstate.drop(f)
1025 1025 else:
1026 1026 lfdirstate.remove(f)
1027 1027 lfdirstate.write()
1028 1028 standins = [lfutil.standin(f) for f in forget]
1029 1029 for f in standins:
1030 1030 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1031 1031 rejected = repo[None].forget(standins)
1032 1032 finally:
1033 1033 wlock.release()
1034 1034
1035 1035 bad.extend(f for f in rejected if f in m.files())
1036 1036 forgot.extend(f for f in forget if f not in rejected)
1037 1037 return bad, forgot
1038 1038
1039 1039 def _getoutgoings(repo, other, missing, addfunc):
1040 1040 """get pairs of filename and largefile hash in outgoing revisions
1041 1041 in 'missing'.
1042 1042
1043 1043 largefiles already existing on 'other' repository are ignored.
1044 1044
1045 1045 'addfunc' is invoked with each unique pairs of filename and
1046 1046 largefile hash value.
1047 1047 """
1048 1048 knowns = set()
1049 1049 lfhashes = set()
1050 1050 def dedup(fn, lfhash):
1051 1051 k = (fn, lfhash)
1052 1052 if k not in knowns:
1053 1053 knowns.add(k)
1054 1054 lfhashes.add(lfhash)
1055 1055 lfutil.getlfilestoupload(repo, missing, dedup)
1056 1056 if lfhashes:
1057 1057 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1058 1058 for fn, lfhash in knowns:
1059 1059 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1060 1060 addfunc(fn, lfhash)
1061 1061
1062 1062 def outgoinghook(ui, repo, other, opts, missing):
1063 1063 if opts.pop('large', None):
1064 1064 lfhashes = set()
1065 1065 if ui.debugflag:
1066 1066 toupload = {}
1067 1067 def addfunc(fn, lfhash):
1068 1068 if fn not in toupload:
1069 1069 toupload[fn] = []
1070 1070 toupload[fn].append(lfhash)
1071 1071 lfhashes.add(lfhash)
1072 1072 def showhashes(fn):
1073 1073 for lfhash in sorted(toupload[fn]):
1074 1074 ui.debug(' %s\n' % (lfhash))
1075 1075 else:
1076 1076 toupload = set()
1077 1077 def addfunc(fn, lfhash):
1078 1078 toupload.add(fn)
1079 1079 lfhashes.add(lfhash)
1080 1080 def showhashes(fn):
1081 1081 pass
1082 1082 _getoutgoings(repo, other, missing, addfunc)
1083 1083
1084 1084 if not toupload:
1085 1085 ui.status(_('largefiles: no files to upload\n'))
1086 1086 else:
1087 1087 ui.status(_('largefiles to upload (%d entities):\n')
1088 1088 % (len(lfhashes)))
1089 1089 for file in sorted(toupload):
1090 1090 ui.status(lfutil.splitstandin(file) + '\n')
1091 1091 showhashes(file)
1092 1092 ui.status('\n')
1093 1093
1094 1094 def summaryremotehook(ui, repo, opts, changes):
1095 1095 largeopt = opts.get('large', False)
1096 1096 if changes is None:
1097 1097 if largeopt:
1098 1098 return (False, True) # only outgoing check is needed
1099 1099 else:
1100 1100 return (False, False)
1101 1101 elif largeopt:
1102 1102 url, branch, peer, outgoing = changes[1]
1103 1103 if peer is None:
1104 1104 # i18n: column positioning for "hg summary"
1105 1105 ui.status(_('largefiles: (no remote repo)\n'))
1106 1106 return
1107 1107
1108 1108 toupload = set()
1109 1109 lfhashes = set()
1110 1110 def addfunc(fn, lfhash):
1111 1111 toupload.add(fn)
1112 1112 lfhashes.add(lfhash)
1113 1113 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1114 1114
1115 1115 if not toupload:
1116 1116 # i18n: column positioning for "hg summary"
1117 1117 ui.status(_('largefiles: (no files to upload)\n'))
1118 1118 else:
1119 1119 # i18n: column positioning for "hg summary"
1120 1120 ui.status(_('largefiles: %d entities for %d files to upload\n')
1121 1121 % (len(lfhashes), len(toupload)))
1122 1122
1123 1123 def overridesummary(orig, ui, repo, *pats, **opts):
1124 1124 try:
1125 1125 repo.lfstatus = True
1126 1126 orig(ui, repo, *pats, **opts)
1127 1127 finally:
1128 1128 repo.lfstatus = False
1129 1129
1130 1130 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1131 1131 similarity=None):
1132 1132 if not lfutil.islfilesrepo(repo):
1133 1133 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1134 1134 # Get the list of missing largefiles so we can remove them
1135 1135 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1136 1136 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1137 1137 False, False, False)
1138 1138
1139 1139 # Call into the normal remove code, but the removing of the standin, we want
1140 1140 # to have handled by original addremove. Monkey patching here makes sure
1141 1141 # we don't remove the standin in the largefiles code, preventing a very
1142 1142 # confused state later.
1143 1143 if s.deleted:
1144 1144 m = copy.copy(matcher)
1145 1145
1146 1146 # The m._files and m._map attributes are not changed to the deleted list
1147 1147 # because that affects the m.exact() test, which in turn governs whether
1148 1148 # or not the file name is printed, and how. Simply limit the original
1149 1149 # matches to those in the deleted status list.
1150 1150 matchfn = m.matchfn
1151 1151 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1152 1152
1153 1153 removelargefiles(repo.ui, repo, True, m, **opts)
1154 1154 # Call into the normal add code, and any files that *should* be added as
1155 1155 # largefiles will be
1156 1156 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1157 1157 # Now that we've handled largefiles, hand off to the original addremove
1158 1158 # function to take care of the rest. Make sure it doesn't do anything with
1159 1159 # largefiles by passing a matcher that will ignore them.
1160 1160 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1161 1161 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1162 1162
1163 1163 # Calling purge with --all will cause the largefiles to be deleted.
1164 1164 # Override repo.status to prevent this from happening.
1165 1165 def overridepurge(orig, ui, repo, *dirs, **opts):
1166 1166 # XXX Monkey patching a repoview will not work. The assigned attribute will
1167 1167 # be set on the unfiltered repo, but we will only lookup attributes in the
1168 1168 # unfiltered repo if the lookup in the repoview object itself fails. As the
1169 1169 # monkey patched method exists on the repoview class the lookup will not
1170 1170 # fail. As a result, the original version will shadow the monkey patched
1171 1171 # one, defeating the monkey patch.
1172 1172 #
1173 1173 # As a work around we use an unfiltered repo here. We should do something
1174 1174 # cleaner instead.
1175 1175 repo = repo.unfiltered()
1176 1176 oldstatus = repo.status
1177 1177 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1178 1178 clean=False, unknown=False, listsubrepos=False):
1179 1179 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1180 1180 listsubrepos)
1181 1181 lfdirstate = lfutil.openlfdirstate(ui, repo)
1182 1182 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1183 1183 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1184 1184 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1185 1185 unknown, ignored, r.clean)
1186 1186 repo.status = overridestatus
1187 1187 orig(ui, repo, *dirs, **opts)
1188 1188 repo.status = oldstatus
1189 1189 def overriderollback(orig, ui, repo, **opts):
1190 1190 wlock = repo.wlock()
1191 1191 try:
1192 1192 before = repo.dirstate.parents()
1193 1193 orphans = set(f for f in repo.dirstate
1194 1194 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1195 1195 result = orig(ui, repo, **opts)
1196 1196 after = repo.dirstate.parents()
1197 1197 if before == after:
1198 1198 return result # no need to restore standins
1199 1199
1200 1200 pctx = repo['.']
1201 1201 for f in repo.dirstate:
1202 1202 if lfutil.isstandin(f):
1203 1203 orphans.discard(f)
1204 1204 if repo.dirstate[f] == 'r':
1205 1205 repo.wvfs.unlinkpath(f, ignoremissing=True)
1206 1206 elif f in pctx:
1207 1207 fctx = pctx[f]
1208 1208 repo.wwrite(f, fctx.data(), fctx.flags())
1209 1209 else:
1210 1210 # content of standin is not so important in 'a',
1211 1211 # 'm' or 'n' (coming from the 2nd parent) cases
1212 1212 lfutil.writestandin(repo, f, '', False)
1213 1213 for standin in orphans:
1214 1214 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1215 1215
1216 1216 lfdirstate = lfutil.openlfdirstate(ui, repo)
1217 1217 orphans = set(lfdirstate)
1218 1218 lfiles = lfutil.listlfiles(repo)
1219 1219 for file in lfiles:
1220 1220 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1221 1221 orphans.discard(file)
1222 1222 for lfile in orphans:
1223 1223 lfdirstate.drop(lfile)
1224 1224 lfdirstate.write()
1225 1225 finally:
1226 1226 wlock.release()
1227 1227 return result
1228 1228
1229 1229 def overridetransplant(orig, ui, repo, *revs, **opts):
1230 1230 resuming = opts.get('continue')
1231 1231 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1232 1232 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1233 1233 try:
1234 1234 result = orig(ui, repo, *revs, **opts)
1235 1235 finally:
1236 1236 repo._lfstatuswriters.pop()
1237 1237 repo._lfcommithooks.pop()
1238 1238 return result
1239 1239
1240 1240 def overridecat(orig, ui, repo, file1, *pats, **opts):
1241 1241 ctx = scmutil.revsingle(repo, opts.get('rev'))
1242 1242 err = 1
1243 1243 notbad = set()
1244 1244 m = scmutil.match(ctx, (file1,) + pats, opts)
1245 1245 origmatchfn = m.matchfn
1246 1246 def lfmatchfn(f):
1247 1247 if origmatchfn(f):
1248 1248 return True
1249 1249 lf = lfutil.splitstandin(f)
1250 1250 if lf is None:
1251 1251 return False
1252 1252 notbad.add(lf)
1253 1253 return origmatchfn(lf)
1254 1254 m.matchfn = lfmatchfn
1255 1255 origbadfn = m.bad
1256 1256 def lfbadfn(f, msg):
1257 1257 if not f in notbad:
1258 1258 origbadfn(f, msg)
1259 1259 m.bad = lfbadfn
1260 1260
1261 1261 origvisitdirfn = m.visitdir
1262 1262 def lfvisitdirfn(dir):
1263 1263 if dir == lfutil.shortname:
1264 1264 return True
1265 1265 ret = origvisitdirfn(dir)
1266 1266 if ret:
1267 1267 return ret
1268 1268 lf = lfutil.splitstandin(dir)
1269 1269 if lf is None:
1270 1270 return False
1271 1271 return origvisitdirfn(lf)
1272 1272 m.visitdir = lfvisitdirfn
1273 1273
1274 1274 for f in ctx.walk(m):
1275 1275 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1276 1276 pathname=f)
1277 1277 lf = lfutil.splitstandin(f)
1278 1278 if lf is None or origmatchfn(f):
1279 1279 # duplicating unreachable code from commands.cat
1280 1280 data = ctx[f].data()
1281 1281 if opts.get('decode'):
1282 1282 data = repo.wwritedata(f, data)
1283 1283 fp.write(data)
1284 1284 else:
1285 1285 hash = lfutil.readstandin(repo, lf, ctx.rev())
1286 1286 if not lfutil.inusercache(repo.ui, hash):
1287 1287 store = basestore._openstore(repo)
1288 1288 success, missing = store.get([(lf, hash)])
1289 1289 if len(success) != 1:
1290 1290 raise util.Abort(
1291 1291 _('largefile %s is not in cache and could not be '
1292 1292 'downloaded') % lf)
1293 1293 path = lfutil.usercachepath(repo.ui, hash)
1294 1294 fpin = open(path, "rb")
1295 1295 for chunk in util.filechunkiter(fpin, 128 * 1024):
1296 1296 fp.write(chunk)
1297 1297 fpin.close()
1298 1298 fp.close()
1299 1299 err = 0
1300 1300 return err
1301 1301
1302 1302 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1303 1303 *args, **kwargs):
1304 1304 wlock = repo.wlock()
1305 1305 try:
1306 1306 # branch | | |
1307 1307 # merge | force | partial | action
1308 1308 # -------+-------+---------+--------------
1309 1309 # x | x | x | linear-merge
1310 1310 # o | x | x | branch-merge
1311 1311 # x | o | x | overwrite (as clean update)
1312 1312 # o | o | x | force-branch-merge (*1)
1313 1313 # x | x | o | (*)
1314 1314 # o | x | o | (*)
1315 1315 # x | o | o | overwrite (as revert)
1316 1316 # o | o | o | (*)
1317 1317 #
1318 1318 # (*) don't care
1319 1319 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1320 1320
1321 1321 linearmerge = not branchmerge and not force and not partial
1322 1322
1323 1323 if linearmerge or (branchmerge and force and not partial):
1324 1324 # update standins for linear-merge or force-branch-merge,
1325 1325 # because largefiles in the working directory may be modified
1326 1326 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1327 1327 unsure, s = lfdirstate.status(match_.always(repo.root,
1328 1328 repo.getcwd()),
1329 1329 [], False, False, False)
1330 1330 pctx = repo['.']
1331 1331 for lfile in unsure + s.modified:
1332 1332 lfileabs = repo.wvfs.join(lfile)
1333 1333 if not os.path.exists(lfileabs):
1334 1334 continue
1335 1335 lfhash = lfutil.hashrepofile(repo, lfile)
1336 1336 standin = lfutil.standin(lfile)
1337 1337 lfutil.writestandin(repo, standin, lfhash,
1338 1338 lfutil.getexecutable(lfileabs))
1339 1339 if (standin in pctx and
1340 1340 lfhash == lfutil.readstandin(repo, lfile, '.')):
1341 1341 lfdirstate.normal(lfile)
1342 1342 for lfile in s.added:
1343 1343 lfutil.updatestandin(repo, lfutil.standin(lfile))
1344 1344 lfdirstate.write()
1345 1345
1346 1346 if linearmerge:
1347 1347 # Only call updatelfiles on the standins that have changed
1348 1348 # to save time
1349 1349 oldstandins = lfutil.getstandinsstate(repo)
1350 1350
1351 1351 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1352 1352
1353 1353 filelist = None
1354 1354 if linearmerge:
1355 1355 newstandins = lfutil.getstandinsstate(repo)
1356 1356 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1357 1357
1358 1358 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1359 1359 normallookup=partial, checked=linearmerge)
1360 1360
1361 1361 return result
1362 1362 finally:
1363 1363 wlock.release()
1364 1364
1365 1365 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1366 1366 result = orig(repo, files, *args, **kwargs)
1367 1367
1368 1368 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1369 1369 if filelist:
1370 1370 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1371 1371 printmessage=False, normallookup=True)
1372 1372
1373 1373 return result
@@ -1,517 +1,523
1 1 # copies.py - copy detection for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import util
9 9 import heapq
10 10
11 11 def _dirname(f):
12 12 s = f.rfind("/")
13 13 if s == -1:
14 14 return ""
15 15 return f[:s]
16 16
17 17 def _findlimit(repo, a, b):
18 18 """
19 19 Find the last revision that needs to be checked to ensure that a full
20 20 transitive closure for file copies can be properly calculated.
21 21 Generally, this means finding the earliest revision number that's an
22 22 ancestor of a or b but not both, except when a or b is a direct descendent
23 23 of the other, in which case we can return the minimum revnum of a and b.
24 24 None if no such revision exists.
25 25 """
26 26
27 27 # basic idea:
28 28 # - mark a and b with different sides
29 29 # - if a parent's children are all on the same side, the parent is
30 30 # on that side, otherwise it is on no side
31 31 # - walk the graph in topological order with the help of a heap;
32 32 # - add unseen parents to side map
33 33 # - clear side of any parent that has children on different sides
34 34 # - track number of interesting revs that might still be on a side
35 35 # - track the lowest interesting rev seen
36 36 # - quit when interesting revs is zero
37 37
38 38 cl = repo.changelog
39 39 working = len(cl) # pseudo rev for the working directory
40 40 if a is None:
41 41 a = working
42 42 if b is None:
43 43 b = working
44 44
45 45 side = {a: -1, b: 1}
46 46 visit = [-a, -b]
47 47 heapq.heapify(visit)
48 48 interesting = len(visit)
49 49 hascommonancestor = False
50 50 limit = working
51 51
52 52 while interesting:
53 53 r = -heapq.heappop(visit)
54 54 if r == working:
55 55 parents = [cl.rev(p) for p in repo.dirstate.parents()]
56 56 else:
57 57 parents = cl.parentrevs(r)
58 58 for p in parents:
59 59 if p < 0:
60 60 continue
61 61 if p not in side:
62 62 # first time we see p; add it to visit
63 63 side[p] = side[r]
64 64 if side[p]:
65 65 interesting += 1
66 66 heapq.heappush(visit, -p)
67 67 elif side[p] and side[p] != side[r]:
68 68 # p was interesting but now we know better
69 69 side[p] = 0
70 70 interesting -= 1
71 71 hascommonancestor = True
72 72 if side[r]:
73 73 limit = r # lowest rev visited
74 74 interesting -= 1
75 75
76 76 if not hascommonancestor:
77 77 return None
78 78
79 79 # Consider the following flow (see test-commit-amend.t under issue4405):
80 80 # 1/ File 'a0' committed
81 81 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
82 82 # 3/ Move back to first commit
83 83 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
84 84 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
85 85 #
86 86 # During the amend in step five, we will be in this state:
87 87 #
88 88 # @ 3 temporary amend commit for a1-amend
89 89 # |
90 90 # o 2 a1-amend
91 91 # |
92 92 # | o 1 a1
93 93 # |/
94 94 # o 0 a0
95 95 #
96 96 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
97 97 # yet the filelog has the copy information in rev 1 and we will not look
98 98 # back far enough unless we also look at the a and b as candidates.
99 99 # This only occurs when a is a descendent of b or visa-versa.
100 100 return min(limit, a, b)
101 101
102 102 def _chain(src, dst, a, b):
103 103 '''chain two sets of copies a->b'''
104 104 t = a.copy()
105 105 for k, v in b.iteritems():
106 106 if v in t:
107 107 # found a chain
108 108 if t[v] != k:
109 109 # file wasn't renamed back to itself
110 110 t[k] = t[v]
111 111 if v not in dst:
112 112 # chain was a rename, not a copy
113 113 del t[v]
114 114 if v in src:
115 115 # file is a copy of an existing file
116 116 t[k] = v
117 117
118 118 # remove criss-crossed copies
119 119 for k, v in t.items():
120 120 if k in src and v in dst:
121 121 del t[k]
122 122
123 123 return t
124 124
125 125 def _tracefile(fctx, am, limit=-1):
126 126 '''return file context that is the ancestor of fctx present in ancestor
127 127 manifest am, stopping after the first ancestor lower than limit'''
128 128
129 129 for f in fctx.ancestors():
130 130 if am.get(f.path(), None) == f.filenode():
131 131 return f
132 132 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
133 133 return None
134 134
135 135 def _dirstatecopies(d):
136 136 ds = d._repo.dirstate
137 137 c = ds.copies().copy()
138 138 for k in c.keys():
139 139 if ds[k] not in 'anm':
140 140 del c[k]
141 141 return c
142 142
143 def _computeforwardmissing(a, b):
143 def _computeforwardmissing(a, b, match=None):
144 144 """Computes which files are in b but not a.
145 145 This is its own function so extensions can easily wrap this call to see what
146 146 files _forwardcopies is about to process.
147 147 """
148 return b.manifest().filesnotin(a.manifest())
148 ma = a.manifest()
149 mb = b.manifest()
150 if match:
151 ma = ma.matches(match)
152 mb = mb.matches(match)
153 return mb.filesnotin(ma)
149 154
150 def _forwardcopies(a, b):
155 def _forwardcopies(a, b, match=None):
151 156 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
152 157
153 158 # check for working copy
154 159 w = None
155 160 if b.rev() is None:
156 161 w = b
157 162 b = w.p1()
158 163 if a == b:
159 164 # short-circuit to avoid issues with merge states
160 165 return _dirstatecopies(w)
161 166
162 167 # files might have to be traced back to the fctx parent of the last
163 168 # one-side-only changeset, but not further back than that
164 169 limit = _findlimit(a._repo, a.rev(), b.rev())
165 170 if limit is None:
166 171 limit = -1
167 172 am = a.manifest()
168 173
169 174 # find where new files came from
170 175 # we currently don't try to find where old files went, too expensive
171 176 # this means we can miss a case like 'hg rm b; hg cp a b'
172 177 cm = {}
173 missing = _computeforwardmissing(a, b)
178 missing = _computeforwardmissing(a, b, match=match)
174 179 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
175 180 for f in missing:
176 181 fctx = b[f]
177 182 fctx._ancestrycontext = ancestrycontext
178 183 ofctx = _tracefile(fctx, am, limit)
179 184 if ofctx:
180 185 cm[f] = ofctx.path()
181 186
182 187 # combine copies from dirstate if necessary
183 188 if w is not None:
184 189 cm = _chain(a, w, cm, _dirstatecopies(w))
185 190
186 191 return cm
187 192
188 193 def _backwardrenames(a, b):
189 194 # Even though we're not taking copies into account, 1:n rename situations
190 195 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
191 196 # arbitrarily pick one of the renames.
192 197 f = _forwardcopies(b, a)
193 198 r = {}
194 199 for k, v in sorted(f.iteritems()):
195 200 # remove copies
196 201 if v in a:
197 202 continue
198 203 r[v] = k
199 204 return r
200 205
201 def pathcopies(x, y):
206 def pathcopies(x, y, match=None):
202 207 '''find {dst@y: src@x} copy mapping for directed compare'''
203 208 if x == y or not x or not y:
204 209 return {}
205 210 a = y.ancestor(x)
206 211 if a == x:
207 return _forwardcopies(x, y)
212 return _forwardcopies(x, y, match=match)
208 213 if a == y:
209 214 return _backwardrenames(x, y)
210 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
215 return _chain(x, y, _backwardrenames(x, a),
216 _forwardcopies(a, y, match=match))
211 217
212 218 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
213 219 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
214 220 and c2. This is its own function so extensions can easily wrap this call
215 221 to see what files mergecopies is about to process.
216 222
217 223 Even though c1 and c2 are not used in this function, they are useful in
218 224 other extensions for being able to read the file nodes of the changed files.
219 225 """
220 226 u1 = sorted(addedinm1 - addedinm2)
221 227 u2 = sorted(addedinm2 - addedinm1)
222 228
223 229 if u1:
224 230 repo.ui.debug(" unmatched files in local:\n %s\n"
225 231 % "\n ".join(u1))
226 232 if u2:
227 233 repo.ui.debug(" unmatched files in other:\n %s\n"
228 234 % "\n ".join(u2))
229 235 return u1, u2
230 236
231 237 def mergecopies(repo, c1, c2, ca):
232 238 """
233 239 Find moves and copies between context c1 and c2 that are relevant
234 240 for merging.
235 241
236 242 Returns four dicts: "copy", "movewithdir", "diverge", and
237 243 "renamedelete".
238 244
239 245 "copy" is a mapping from destination name -> source name,
240 246 where source is in c1 and destination is in c2 or vice-versa.
241 247
242 248 "movewithdir" is a mapping from source name -> destination name,
243 249 where the file at source present in one context but not the other
244 250 needs to be moved to destination by the merge process, because the
245 251 other context moved the directory it is in.
246 252
247 253 "diverge" is a mapping of source name -> list of destination names
248 254 for divergent renames.
249 255
250 256 "renamedelete" is a mapping of source name -> list of destination
251 257 names for files deleted in c1 that were renamed in c2 or vice-versa.
252 258 """
253 259 # avoid silly behavior for update from empty dir
254 260 if not c1 or not c2 or c1 == c2:
255 261 return {}, {}, {}, {}
256 262
257 263 # avoid silly behavior for parent -> working dir
258 264 if c2.node() is None and c1.node() == repo.dirstate.p1():
259 265 return repo.dirstate.copies(), {}, {}, {}
260 266
261 267 limit = _findlimit(repo, c1.rev(), c2.rev())
262 268 if limit is None:
263 269 # no common ancestor, no copies
264 270 return {}, {}, {}, {}
265 271 m1 = c1.manifest()
266 272 m2 = c2.manifest()
267 273 ma = ca.manifest()
268 274
269 275
270 276 def setupctx(ctx):
271 277 """return a 'makectx' function suitable for checkcopies usage from ctx
272 278
273 279 We have to re-setup the function building 'filectx' for each
274 280 'checkcopies' to ensure the linkrev adjustement is properly setup for
275 281 each. Linkrev adjustment is important to avoid bug in rename
276 282 detection. Moreover, having a proper '_ancestrycontext' setup ensures
277 283 the performance impact of this adjustment is kept limited. Without it,
278 284 each file could do a full dag traversal making the time complexity of
279 285 the operation explode (see issue4537).
280 286
281 287 This function exists here mostly to limit the impact on stable. Feel
282 288 free to refactor on default.
283 289 """
284 290 rev = ctx.rev()
285 291 ac = getattr(ctx, '_ancestrycontext', None)
286 292 if ac is None:
287 293 revs = [rev]
288 294 if rev is None:
289 295 revs = [p.rev() for p in ctx.parents()]
290 296 ac = ctx._repo.changelog.ancestors(revs, inclusive=True)
291 297 ctx._ancestrycontext = ac
292 298 def makectx(f, n):
293 299 if len(n) != 20: # in a working context?
294 300 if c1.rev() is None:
295 301 return c1.filectx(f)
296 302 return c2.filectx(f)
297 303 fctx = repo.filectx(f, fileid=n)
298 304 # setup only needed for filectx not create from a changectx
299 305 fctx._ancestrycontext = ac
300 306 fctx._descendantrev = rev
301 307 return fctx
302 308 return util.lrucachefunc(makectx)
303 309
304 310 copy = {}
305 311 movewithdir = {}
306 312 fullcopy = {}
307 313 diverge = {}
308 314
309 315 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
310 316
311 317 addedinm1 = m1.filesnotin(ma)
312 318 addedinm2 = m2.filesnotin(ma)
313 319 u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
314 320
315 321 for f in u1:
316 322 ctx = setupctx(c1)
317 323 checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
318 324
319 325 for f in u2:
320 326 ctx = setupctx(c2)
321 327 checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
322 328
323 329 renamedelete = {}
324 330 renamedelete2 = set()
325 331 diverge2 = set()
326 332 for of, fl in diverge.items():
327 333 if len(fl) == 1 or of in c1 or of in c2:
328 334 del diverge[of] # not actually divergent, or not a rename
329 335 if of not in c1 and of not in c2:
330 336 # renamed on one side, deleted on the other side, but filter
331 337 # out files that have been renamed and then deleted
332 338 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
333 339 renamedelete2.update(fl) # reverse map for below
334 340 else:
335 341 diverge2.update(fl) # reverse map for below
336 342
337 343 bothnew = sorted(addedinm1 & addedinm2)
338 344 if bothnew:
339 345 repo.ui.debug(" unmatched files new in both:\n %s\n"
340 346 % "\n ".join(bothnew))
341 347 bothdiverge, _copy, _fullcopy = {}, {}, {}
342 348 for f in bothnew:
343 349 ctx = setupctx(c1)
344 350 checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
345 351 ctx = setupctx(c2)
346 352 checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
347 353 for of, fl in bothdiverge.items():
348 354 if len(fl) == 2 and fl[0] == fl[1]:
349 355 copy[fl[0]] = of # not actually divergent, just matching renames
350 356
351 357 if fullcopy and repo.ui.debugflag:
352 358 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
353 359 "% = renamed and deleted):\n")
354 360 for f in sorted(fullcopy):
355 361 note = ""
356 362 if f in copy:
357 363 note += "*"
358 364 if f in diverge2:
359 365 note += "!"
360 366 if f in renamedelete2:
361 367 note += "%"
362 368 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
363 369 note))
364 370 del diverge2
365 371
366 372 if not fullcopy:
367 373 return copy, movewithdir, diverge, renamedelete
368 374
369 375 repo.ui.debug(" checking for directory renames\n")
370 376
371 377 # generate a directory move map
372 378 d1, d2 = c1.dirs(), c2.dirs()
373 379 d1.addpath('/')
374 380 d2.addpath('/')
375 381 invalid = set()
376 382 dirmove = {}
377 383
378 384 # examine each file copy for a potential directory move, which is
379 385 # when all the files in a directory are moved to a new directory
380 386 for dst, src in fullcopy.iteritems():
381 387 dsrc, ddst = _dirname(src), _dirname(dst)
382 388 if dsrc in invalid:
383 389 # already seen to be uninteresting
384 390 continue
385 391 elif dsrc in d1 and ddst in d1:
386 392 # directory wasn't entirely moved locally
387 393 invalid.add(dsrc)
388 394 elif dsrc in d2 and ddst in d2:
389 395 # directory wasn't entirely moved remotely
390 396 invalid.add(dsrc)
391 397 elif dsrc in dirmove and dirmove[dsrc] != ddst:
392 398 # files from the same directory moved to two different places
393 399 invalid.add(dsrc)
394 400 else:
395 401 # looks good so far
396 402 dirmove[dsrc + "/"] = ddst + "/"
397 403
398 404 for i in invalid:
399 405 if i in dirmove:
400 406 del dirmove[i]
401 407 del d1, d2, invalid
402 408
403 409 if not dirmove:
404 410 return copy, movewithdir, diverge, renamedelete
405 411
406 412 for d in dirmove:
407 413 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
408 414 (d, dirmove[d]))
409 415
410 416 # check unaccounted nonoverlapping files against directory moves
411 417 for f in u1 + u2:
412 418 if f not in fullcopy:
413 419 for d in dirmove:
414 420 if f.startswith(d):
415 421 # new file added in a directory that was moved, move it
416 422 df = dirmove[d] + f[len(d):]
417 423 if df not in copy:
418 424 movewithdir[f] = df
419 425 repo.ui.debug((" pending file src: '%s' -> "
420 426 "dst: '%s'\n") % (f, df))
421 427 break
422 428
423 429 return copy, movewithdir, diverge, renamedelete
424 430
425 431 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
426 432 """
427 433 check possible copies of f from m1 to m2
428 434
429 435 ctx = function accepting (filename, node) that returns a filectx.
430 436 f = the filename to check
431 437 m1 = the source manifest
432 438 m2 = the destination manifest
433 439 ca = the changectx of the common ancestor
434 440 limit = the rev number to not search beyond
435 441 diverge = record all diverges in this dict
436 442 copy = record all non-divergent copies in this dict
437 443 fullcopy = record all copies in this dict
438 444 """
439 445
440 446 ma = ca.manifest()
441 447
442 448 def _related(f1, f2, limit):
443 449 # Walk back to common ancestor to see if the two files originate
444 450 # from the same file. Since workingfilectx's rev() is None it messes
445 451 # up the integer comparison logic, hence the pre-step check for
446 452 # None (f1 and f2 can only be workingfilectx's initially).
447 453
448 454 if f1 == f2:
449 455 return f1 # a match
450 456
451 457 g1, g2 = f1.ancestors(), f2.ancestors()
452 458 try:
453 459 f1r, f2r = f1.rev(), f2.rev()
454 460
455 461 if f1r is None:
456 462 f1 = g1.next()
457 463 if f2r is None:
458 464 f2 = g2.next()
459 465
460 466 while True:
461 467 f1r, f2r = f1.rev(), f2.rev()
462 468 if f1r > f2r:
463 469 f1 = g1.next()
464 470 elif f2r > f1r:
465 471 f2 = g2.next()
466 472 elif f1 == f2:
467 473 return f1 # a match
468 474 elif f1r == f2r or f1r < limit or f2r < limit:
469 475 return False # copy no longer relevant
470 476 except StopIteration:
471 477 return False
472 478
473 479 of = None
474 480 seen = set([f])
475 481 for oc in ctx(f, m1[f]).ancestors():
476 482 ocr = oc.rev()
477 483 of = oc.path()
478 484 if of in seen:
479 485 # check limit late - grab last rename before
480 486 if ocr < limit:
481 487 break
482 488 continue
483 489 seen.add(of)
484 490
485 491 fullcopy[f] = of # remember for dir rename detection
486 492 if of not in m2:
487 493 continue # no match, keep looking
488 494 if m2[of] == ma.get(of):
489 495 break # no merge needed, quit early
490 496 c2 = ctx(of, m2[of])
491 497 cr = _related(oc, c2, ca.rev())
492 498 if cr and (of == f or of == c2.path()): # non-divergent
493 499 copy[f] = of
494 500 of = None
495 501 break
496 502
497 503 if of in ma:
498 504 diverge.setdefault(of, []).append(f)
499 505
500 506 def duplicatecopies(repo, rev, fromrev, skiprev=None):
501 507 '''reproduce copies from fromrev to rev in the dirstate
502 508
503 509 If skiprev is specified, it's a revision that should be used to
504 510 filter copy records. Any copies that occur between fromrev and
505 511 skiprev will not be duplicated, even if they appear in the set of
506 512 copies between fromrev and rev.
507 513 '''
508 514 exclude = {}
509 515 if skiprev is not None:
510 516 exclude = pathcopies(repo[fromrev], repo[skiprev])
511 517 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
512 518 # copies.pathcopies returns backward renames, so dst might not
513 519 # actually be in the dirstate
514 520 if dst in exclude:
515 521 continue
516 522 if repo.dirstate[dst] in "nma":
517 523 repo.dirstate.copy(src, dst)
General Comments 0
You need to be logged in to leave comments. Login now