##// END OF EJS Templates
largefiles: avoid redundant standin() invocations...
FUJIWARA Katsunori -
r31618:8228bc8f default
parent child Browse files
Show More
@@ -1,576 +1,577 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import hashlib
14 14 import os
15 15 import shutil
16 16
17 17 from mercurial.i18n import _
18 18
19 19 from mercurial import (
20 20 cmdutil,
21 21 commands,
22 22 context,
23 23 error,
24 24 hg,
25 25 lock,
26 26 match as matchmod,
27 27 node,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 from ..convert import (
33 33 convcmd,
34 34 filemap,
35 35 )
36 36
37 37 from . import (
38 38 lfutil,
39 39 storefactory
40 40 )
41 41
42 42 release = lock.release
43 43
44 44 # -- Commands ----------------------------------------------------------
45 45
46 46 cmdtable = {}
47 47 command = cmdutil.command(cmdtable)
48 48
49 49 @command('lfconvert',
50 50 [('s', 'size', '',
51 51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 52 ('', 'to-normal', False,
53 53 _('convert from a largefiles repo to a normal repo')),
54 54 ],
55 55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 56 norepo=True,
57 57 inferrepo=True)
58 58 def lfconvert(ui, src, dest, *pats, **opts):
59 59 '''convert a normal repository to a largefiles repository
60 60
61 61 Convert repository SOURCE to a new repository DEST, identical to
62 62 SOURCE except that certain files will be converted as largefiles:
63 63 specifically, any file that matches any PATTERN *or* whose size is
64 64 above the minimum size threshold is converted as a largefile. The
65 65 size used to determine whether or not to track a file as a
66 66 largefile is the size of the first version of the file. The
67 67 minimum size can be specified either with --size or in
68 68 configuration as ``largefiles.size``.
69 69
70 70 After running this command you will need to make sure that
71 71 largefiles is enabled anywhere you intend to push the new
72 72 repository.
73 73
74 74 Use --to-normal to convert largefiles back to normal files; after
75 75 this, the DEST repository can be used without largefiles at all.'''
76 76
77 77 if opts['to_normal']:
78 78 tolfile = False
79 79 else:
80 80 tolfile = True
81 81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82 82
83 83 if not hg.islocal(src):
84 84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 85 if not hg.islocal(dest):
86 86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87 87
88 88 rsrc = hg.repository(ui, src)
89 89 ui.status(_('initializing destination %s\n') % dest)
90 90 rdst = hg.repository(ui, dest, create=True)
91 91
92 92 success = False
93 93 dstwlock = dstlock = None
94 94 try:
95 95 # Get a list of all changesets in the source. The easy way to do this
96 96 # is to simply walk the changelog, using changelog.nodesbetween().
97 97 # Take a look at mercurial/revlog.py:639 for more details.
98 98 # Use a generator instead of a list to decrease memory usage
99 99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 100 rsrc.heads())[0])
101 101 revmap = {node.nullid: node.nullid}
102 102 if tolfile:
103 103 # Lock destination to prevent modification while it is converted to.
104 104 # Don't need to lock src because we are just reading from its
105 105 # history which can't change.
106 106 dstwlock = rdst.wlock()
107 107 dstlock = rdst.lock()
108 108
109 109 lfiles = set()
110 110 normalfiles = set()
111 111 if not pats:
112 112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 113 if pats:
114 114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 115 else:
116 116 matcher = None
117 117
118 118 lfiletohash = {}
119 119 for ctx in ctxs:
120 120 ui.progress(_('converting revisions'), ctx.rev(),
121 121 unit=_('revisions'), total=rsrc['tip'].rev())
122 122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 123 lfiles, normalfiles, matcher, size, lfiletohash)
124 124 ui.progress(_('converting revisions'), None)
125 125
126 126 if rdst.wvfs.exists(lfutil.shortname):
127 127 rdst.wvfs.rmtree(lfutil.shortname)
128 128
129 129 for f in lfiletohash.keys():
130 130 if rdst.wvfs.isfile(f):
131 131 rdst.wvfs.unlink(f)
132 132 try:
133 133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 134 except OSError:
135 135 pass
136 136
137 137 # If there were any files converted to largefiles, add largefiles
138 138 # to the destination repository's requirements.
139 139 if lfiles:
140 140 rdst.requirements.add('largefiles')
141 141 rdst._writerequirements()
142 142 else:
143 143 class lfsource(filemap.filemap_source):
144 144 def __init__(self, ui, source):
145 145 super(lfsource, self).__init__(ui, source, None)
146 146 self.filemapper.rename[lfutil.shortname] = '.'
147 147
148 148 def getfile(self, name, rev):
149 149 realname, realrev = rev
150 150 f = super(lfsource, self).getfile(name, rev)
151 151
152 152 if (not realname.startswith(lfutil.shortnameslash)
153 153 or f[0] is None):
154 154 return f
155 155
156 156 # Substitute in the largefile data for the hash
157 157 hash = f[0].strip()
158 158 path = lfutil.findfile(rsrc, hash)
159 159
160 160 if path is None:
161 161 raise error.Abort(_("missing largefile for '%s' in %s")
162 162 % (realname, realrev))
163 163 return util.readfile(path), f[1]
164 164
165 165 class converter(convcmd.converter):
166 166 def __init__(self, ui, source, dest, revmapfile, opts):
167 167 src = lfsource(ui, source)
168 168
169 169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 170 opts)
171 171
172 172 found, missing = downloadlfiles(ui, rsrc)
173 173 if missing != 0:
174 174 raise error.Abort(_("all largefiles must be present locally"))
175 175
176 176 orig = convcmd.converter
177 177 convcmd.converter = converter
178 178
179 179 try:
180 180 convcmd.convert(ui, src, dest)
181 181 finally:
182 182 convcmd.converter = orig
183 183 success = True
184 184 finally:
185 185 if tolfile:
186 186 rdst.dirstate.clear()
187 187 release(dstlock, dstwlock)
188 188 if not success:
189 189 # we failed, remove the new directory
190 190 shutil.rmtree(rdst.root)
191 191
192 192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 193 matcher, size, lfiletohash):
194 194 # Convert src parents to dst parents
195 195 parents = _convertparents(ctx, revmap)
196 196
197 197 # Generate list of changed files
198 198 files = _getchangedfiles(ctx, parents)
199 199
200 200 dstfiles = []
201 201 for f in files:
202 202 if f not in lfiles and f not in normalfiles:
203 203 islfile = _islfile(f, ctx, matcher, size)
204 204 # If this file was renamed or copied then copy
205 205 # the largefile-ness of its predecessor
206 206 if f in ctx.manifest():
207 207 fctx = ctx.filectx(f)
208 208 renamed = fctx.renamed()
209 209 renamedlfile = renamed and renamed[0] in lfiles
210 210 islfile |= renamedlfile
211 211 if 'l' in fctx.flags():
212 212 if renamedlfile:
213 213 raise error.Abort(
214 214 _('renamed/copied largefile %s becomes symlink')
215 215 % f)
216 216 islfile = False
217 217 if islfile:
218 218 lfiles.add(f)
219 219 else:
220 220 normalfiles.add(f)
221 221
222 222 if f in lfiles:
223 dstfiles.append(lfutil.standin(f))
223 fstandin = lfutil.standin(f)
224 dstfiles.append(fstandin)
224 225 # largefile in manifest if it has not been removed/renamed
225 226 if f in ctx.manifest():
226 227 fctx = ctx.filectx(f)
227 228 if 'l' in fctx.flags():
228 229 renamed = fctx.renamed()
229 230 if renamed and renamed[0] in lfiles:
230 231 raise error.Abort(_('largefile %s becomes symlink') % f)
231 232
232 233 # largefile was modified, update standins
233 234 m = hashlib.sha1('')
234 235 m.update(ctx[f].data())
235 236 hash = m.hexdigest()
236 237 if f not in lfiletohash or lfiletohash[f] != hash:
237 238 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 239 executable = 'x' in ctx[f].flags()
239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
240 lfutil.writestandin(rdst, fstandin, hash,
240 241 executable)
241 242 lfiletohash[f] = hash
242 243 else:
243 244 # normal file
244 245 dstfiles.append(f)
245 246
246 247 def getfilectx(repo, memctx, f):
247 248 srcfname = lfutil.splitstandin(f)
248 249 if srcfname is not None:
249 250 # if the file isn't in the manifest then it was removed
250 251 # or renamed, return None to indicate this
251 252 try:
252 253 fctx = ctx.filectx(srcfname)
253 254 except error.LookupError:
254 255 return None
255 256 renamed = fctx.renamed()
256 257 if renamed:
257 258 # standin is always a largefile because largefile-ness
258 259 # doesn't change after rename or copy
259 260 renamed = lfutil.standin(renamed[0])
260 261
261 262 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 263 'l' in fctx.flags(), 'x' in fctx.flags(),
263 264 renamed)
264 265 else:
265 266 return _getnormalcontext(repo, ctx, f, revmap)
266 267
267 268 # Commit
268 269 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269 270
270 271 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 272 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 273 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 274 ret = rdst.commitctx(mctx)
274 275 lfutil.copyalltostore(rdst, ret)
275 276 rdst.setparents(ret)
276 277 revmap[ctx.node()] = rdst.changelog.tip()
277 278
278 279 # Generate list of changed files
279 280 def _getchangedfiles(ctx, parents):
280 281 files = set(ctx.files())
281 282 if node.nullid not in parents:
282 283 mc = ctx.manifest()
283 284 mp1 = ctx.parents()[0].manifest()
284 285 mp2 = ctx.parents()[1].manifest()
285 286 files |= (set(mp1) | set(mp2)) - set(mc)
286 287 for f in mc:
287 288 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 289 files.add(f)
289 290 return files
290 291
291 292 # Convert src parents to dst parents
292 293 def _convertparents(ctx, revmap):
293 294 parents = []
294 295 for p in ctx.parents():
295 296 parents.append(revmap[p.node()])
296 297 while len(parents) < 2:
297 298 parents.append(node.nullid)
298 299 return parents
299 300
300 301 # Get memfilectx for a normal file
301 302 def _getnormalcontext(repo, ctx, f, revmap):
302 303 try:
303 304 fctx = ctx.filectx(f)
304 305 except error.LookupError:
305 306 return None
306 307 renamed = fctx.renamed()
307 308 if renamed:
308 309 renamed = renamed[0]
309 310
310 311 data = fctx.data()
311 312 if f == '.hgtags':
312 313 data = _converttags (repo.ui, revmap, data)
313 314 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 315 'x' in fctx.flags(), renamed)
315 316
316 317 # Remap tag data using a revision map
317 318 def _converttags(ui, revmap, data):
318 319 newdata = []
319 320 for line in data.splitlines():
320 321 try:
321 322 id, name = line.split(' ', 1)
322 323 except ValueError:
323 324 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 325 % line)
325 326 continue
326 327 try:
327 328 newid = node.bin(id)
328 329 except TypeError:
329 330 ui.warn(_('skipping incorrectly formatted id %s\n')
330 331 % id)
331 332 continue
332 333 try:
333 334 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 335 name))
335 336 except KeyError:
336 337 ui.warn(_('no mapping for id %s\n') % id)
337 338 continue
338 339 return ''.join(newdata)
339 340
340 341 def _islfile(file, ctx, matcher, size):
341 342 '''Return true if file should be considered a largefile, i.e.
342 343 matcher matches it or it is larger than size.'''
343 344 # never store special .hg* files as largefiles
344 345 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 346 return False
346 347 if matcher and matcher(file):
347 348 return True
348 349 try:
349 350 return ctx.filectx(file).size() >= size * 1024 * 1024
350 351 except error.LookupError:
351 352 return False
352 353
353 354 def uploadlfiles(ui, rsrc, rdst, files):
354 355 '''upload largefiles to the central store'''
355 356
356 357 if not files:
357 358 return
358 359
359 360 store = storefactory.openstore(rsrc, rdst, put=True)
360 361
361 362 at = 0
362 363 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 364 retval = store.exists(files)
364 365 files = filter(lambda h: not retval[h], files)
365 366 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366 367
367 368 for hash in files:
368 369 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 370 total=len(files))
370 371 source = lfutil.findfile(rsrc, hash)
371 372 if not source:
372 373 raise error.Abort(_('largefile %s missing from store'
373 374 ' (needs to be uploaded)') % hash)
374 375 # XXX check for errors here
375 376 store.put(source, hash)
376 377 at += 1
377 378 ui.progress(_('uploading largefiles'), None)
378 379
379 380 def verifylfiles(ui, repo, all=False, contents=False):
380 381 '''Verify that every largefile revision in the current changeset
381 382 exists in the central store. With --contents, also verify that
382 383 the contents of each local largefile file revision are correct (SHA-1 hash
383 384 matches the revision ID). With --all, check every changeset in
384 385 this repository.'''
385 386 if all:
386 387 revs = repo.revs('all()')
387 388 else:
388 389 revs = ['.']
389 390
390 391 store = storefactory.openstore(repo)
391 392 return store.verify(revs, contents=contents)
392 393
393 394 def cachelfiles(ui, repo, node, filelist=None):
394 395 '''cachelfiles ensures that all largefiles needed by the specified revision
395 396 are present in the repository's largefile cache.
396 397
397 398 returns a tuple (cached, missing). cached is the list of files downloaded
398 399 by this operation; missing is the list of files that were needed but could
399 400 not be found.'''
400 401 lfiles = lfutil.listlfiles(repo, node)
401 402 if filelist:
402 403 lfiles = set(lfiles) & set(filelist)
403 404 toget = []
404 405
405 406 for lfile in lfiles:
406 407 try:
407 408 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 409 except IOError as err:
409 410 if err.errno == errno.ENOENT:
410 411 continue # node must be None and standin wasn't found in wctx
411 412 raise
412 413 if not lfutil.findfile(repo, expectedhash):
413 414 toget.append((lfile, expectedhash))
414 415
415 416 if toget:
416 417 store = storefactory.openstore(repo)
417 418 ret = store.get(toget)
418 419 return ret
419 420
420 421 return ([], [])
421 422
422 423 def downloadlfiles(ui, repo, rev=None):
423 424 matchfn = scmutil.match(repo[None],
424 425 [repo.wjoin(lfutil.shortname)], {})
425 426 def prepare(ctx, fns):
426 427 pass
427 428 totalsuccess = 0
428 429 totalmissing = 0
429 430 if rev != []: # walkchangerevs on empty list would return all revs
430 431 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 432 prepare):
432 433 success, missing = cachelfiles(ui, repo, ctx.node())
433 434 totalsuccess += len(success)
434 435 totalmissing += len(missing)
435 436 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 437 if totalmissing > 0:
437 438 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 439 return totalsuccess, totalmissing
439 440
440 441 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 442 normallookup=False):
442 443 '''Update largefiles according to standins in the working directory
443 444
444 445 If ``printmessage`` is other than ``None``, it means "print (or
445 446 ignore, for false) message forcibly".
446 447 '''
447 448 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 449 with repo.wlock():
449 450 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 451 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451 452
452 453 if filelist is not None:
453 454 filelist = set(filelist)
454 455 lfiles = [f for f in lfiles if f in filelist]
455 456
456 457 update = {}
457 458 updated, removed = 0, 0
458 459 wvfs = repo.wvfs
459 460 for lfile in lfiles:
460 461 rellfile = lfile
461 462 rellfileorig = os.path.relpath(
462 463 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 464 start=repo.root)
464 465 relstandin = lfutil.standin(lfile)
465 466 relstandinorig = os.path.relpath(
466 467 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 468 start=repo.root)
468 469 if wvfs.exists(relstandin):
469 470 if (wvfs.exists(relstandinorig) and
470 471 wvfs.exists(rellfile)):
471 472 shutil.copyfile(wvfs.join(rellfile),
472 473 wvfs.join(rellfileorig))
473 474 wvfs.unlinkpath(relstandinorig)
474 475 expecthash = lfutil.readstandin(repo, lfile)
475 476 if expecthash != '':
476 477 if lfile not in repo[None]: # not switched to normal file
477 478 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 479 # use normallookup() to allocate an entry in largefiles
479 480 # dirstate to prevent lfilesrepo.status() from reporting
480 481 # missing files as removed.
481 482 lfdirstate.normallookup(lfile)
482 483 update[lfile] = expecthash
483 484 else:
484 485 # Remove lfiles for which the standin is deleted, unless the
485 486 # lfile is added to the repository again. This happens when a
486 487 # largefile is converted back to a normal file: the standin
487 488 # disappears, but a new (normal) file appears as the lfile.
488 489 if (wvfs.exists(rellfile) and
489 490 repo.dirstate.normalize(lfile) not in repo[None]):
490 491 wvfs.unlinkpath(rellfile)
491 492 removed += 1
492 493
493 494 # largefile processing might be slow and be interrupted - be prepared
494 495 lfdirstate.write()
495 496
496 497 if lfiles:
497 498 statuswriter(_('getting changed largefiles\n'))
498 499 cachelfiles(ui, repo, None, lfiles)
499 500
500 501 for lfile in lfiles:
501 502 update1 = 0
502 503
503 504 expecthash = update.get(lfile)
504 505 if expecthash:
505 506 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 507 # failed ... but already removed and set to normallookup
507 508 continue
508 509 # Synchronize largefile dirstate to the last modified
509 510 # time of the file
510 511 lfdirstate.normal(lfile)
511 512 update1 = 1
512 513
513 514 # copy the exec mode of largefile standin from the repository's
514 515 # dirstate to its state in the lfdirstate.
515 516 rellfile = lfile
516 517 relstandin = lfutil.standin(lfile)
517 518 if wvfs.exists(relstandin):
518 519 # exec is decided by the users permissions using mask 0o100
519 520 standinexec = wvfs.stat(relstandin).st_mode & 0o100
520 521 st = wvfs.stat(rellfile)
521 522 mode = st.st_mode
522 523 if standinexec != mode & 0o100:
523 524 # first remove all X bits, then shift all R bits to X
524 525 mode &= ~0o111
525 526 if standinexec:
526 527 mode |= (mode >> 2) & 0o111 & ~util.umask
527 528 wvfs.chmod(rellfile, mode)
528 529 update1 = 1
529 530
530 531 updated += update1
531 532
532 533 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
533 534
534 535 lfdirstate.write()
535 536 if lfiles:
536 537 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
537 538 removed))
538 539
539 540 @command('lfpull',
540 541 [('r', 'rev', [], _('pull largefiles for these revisions'))
541 542 ] + commands.remoteopts,
542 543 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
543 544 def lfpull(ui, repo, source="default", **opts):
544 545 """pull largefiles for the specified revisions from the specified source
545 546
546 547 Pull largefiles that are referenced from local changesets but missing
547 548 locally, pulling from a remote repository to the local cache.
548 549
549 550 If SOURCE is omitted, the 'default' path will be used.
550 551 See :hg:`help urls` for more information.
551 552
552 553 .. container:: verbose
553 554
554 555 Some examples:
555 556
556 557 - pull largefiles for all branch heads::
557 558
558 559 hg lfpull -r "head() and not closed()"
559 560
560 561 - pull largefiles on the default branch::
561 562
562 563 hg lfpull -r "branch(default)"
563 564 """
564 565 repo.lfpullsource = source
565 566
566 567 revs = opts.get('rev', [])
567 568 if not revs:
568 569 raise error.Abort(_('no revisions specified'))
569 570 revs = scmutil.revrange(repo, revs)
570 571
571 572 numcached = 0
572 573 for rev in revs:
573 574 ui.note(_('pulling largefiles for revision %s\n') % rev)
574 575 (cached, missing) = cachelfiles(ui, repo, rev)
575 576 numcached += len(cached)
576 577 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,670 +1,670 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 vfs as vfsmod,
31 31 )
32 32
33 33 shortname = '.hglf'
34 34 shortnameslash = shortname + '/'
35 35 longname = 'largefiles'
36 36
37 37 # -- Private worker functions ------------------------------------------
38 38
39 39 def getminsize(ui, assumelfiles, opt, default=10):
40 40 lfsize = opt
41 41 if not lfsize and assumelfiles:
42 42 lfsize = ui.config(longname, 'minsize', default=default)
43 43 if lfsize:
44 44 try:
45 45 lfsize = float(lfsize)
46 46 except ValueError:
47 47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 48 % lfsize)
49 49 if lfsize is None:
50 50 raise error.Abort(_('minimum size for largefiles must be specified'))
51 51 return lfsize
52 52
53 53 def link(src, dest):
54 54 """Try to create hardlink - if that fails, efficiently make a copy."""
55 55 util.makedirs(os.path.dirname(dest))
56 56 try:
57 57 util.oslink(src, dest)
58 58 except OSError:
59 59 # if hardlinks fail, fallback on atomic copy
60 60 with open(src, 'rb') as srcf:
61 61 with util.atomictempfile(dest) as dstf:
62 62 for chunk in util.filechunkiter(srcf):
63 63 dstf.write(chunk)
64 64 os.chmod(dest, os.stat(src).st_mode)
65 65
66 66 def usercachepath(ui, hash):
67 67 '''Return the correct location in the "global" largefiles cache for a file
68 68 with the given hash.
69 69 This cache is used for sharing of largefiles across repositories - both
70 70 to preserve download bandwidth and storage space.'''
71 71 return os.path.join(_usercachedir(ui), hash)
72 72
73 73 def _usercachedir(ui):
74 74 '''Return the location of the "global" largefiles cache.'''
75 75 path = ui.configpath(longname, 'usercache', None)
76 76 if path:
77 77 return path
78 78 if pycompat.osname == 'nt':
79 79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 80 encoding.environ.get('APPDATA'))
81 81 if appdata:
82 82 return os.path.join(appdata, longname)
83 83 elif platform.system() == 'Darwin':
84 84 home = encoding.environ.get('HOME')
85 85 if home:
86 86 return os.path.join(home, 'Library', 'Caches', longname)
87 87 elif pycompat.osname == 'posix':
88 88 path = encoding.environ.get('XDG_CACHE_HOME')
89 89 if path:
90 90 return os.path.join(path, longname)
91 91 home = encoding.environ.get('HOME')
92 92 if home:
93 93 return os.path.join(home, '.cache', longname)
94 94 else:
95 95 raise error.Abort(_('unknown operating system: %s\n')
96 96 % pycompat.osname)
97 97 raise error.Abort(_('unknown %s usercache location') % longname)
98 98
99 99 def inusercache(ui, hash):
100 100 path = usercachepath(ui, hash)
101 101 return os.path.exists(path)
102 102
103 103 def findfile(repo, hash):
104 104 '''Return store path of the largefile with the specified hash.
105 105 As a side effect, the file might be linked from user cache.
106 106 Return None if the file can't be found locally.'''
107 107 path, exists = findstorepath(repo, hash)
108 108 if exists:
109 109 repo.ui.note(_('found %s in store\n') % hash)
110 110 return path
111 111 elif inusercache(repo.ui, hash):
112 112 repo.ui.note(_('found %s in system cache\n') % hash)
113 113 path = storepath(repo, hash)
114 114 link(usercachepath(repo.ui, hash), path)
115 115 return path
116 116 return None
117 117
118 118 class largefilesdirstate(dirstate.dirstate):
119 119 def __getitem__(self, key):
120 120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 121 def normal(self, f):
122 122 return super(largefilesdirstate, self).normal(unixpath(f))
123 123 def remove(self, f):
124 124 return super(largefilesdirstate, self).remove(unixpath(f))
125 125 def add(self, f):
126 126 return super(largefilesdirstate, self).add(unixpath(f))
127 127 def drop(self, f):
128 128 return super(largefilesdirstate, self).drop(unixpath(f))
129 129 def forget(self, f):
130 130 return super(largefilesdirstate, self).forget(unixpath(f))
131 131 def normallookup(self, f):
132 132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 133 def _ignore(self, f):
134 134 return False
135 135 def write(self, tr=False):
136 136 # (1) disable PENDING mode always
137 137 # (lfdirstate isn't yet managed as a part of the transaction)
138 138 # (2) avoid develwarn 'use dirstate.write with ....'
139 139 super(largefilesdirstate, self).write(None)
140 140
141 141 def openlfdirstate(ui, repo, create=True):
142 142 '''
143 143 Return a dirstate object that tracks largefiles: i.e. its root is
144 144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 145 '''
146 146 vfs = repo.vfs
147 147 lfstoredir = longname
148 148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 150 repo.dirstate._validate)
151 151
152 152 # If the largefiles dirstate does not exist, populate and create
153 153 # it. This ensures that we create it on the first meaningful
154 154 # largefiles operation in a new clone.
155 155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 156 matcher = getstandinmatcher(repo)
157 157 standins = repo.dirstate.walk(matcher, [], False, False)
158 158
159 159 if len(standins) > 0:
160 160 vfs.makedirs(lfstoredir)
161 161
162 162 for standin in standins:
163 163 lfile = splitstandin(standin)
164 164 lfdirstate.normallookup(lfile)
165 165 return lfdirstate
166 166
167 167 def lfdirstatestatus(lfdirstate, repo):
168 168 wctx = repo['.']
169 169 match = matchmod.always(repo.root, repo.getcwd())
170 170 unsure, s = lfdirstate.status(match, [], False, False, False)
171 171 modified, clean = s.modified, s.clean
172 172 for lfile in unsure:
173 173 try:
174 174 fctx = wctx[standin(lfile)]
175 175 except LookupError:
176 176 fctx = None
177 177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
178 178 modified.append(lfile)
179 179 else:
180 180 clean.append(lfile)
181 181 lfdirstate.normal(lfile)
182 182 return s
183 183
184 184 def listlfiles(repo, rev=None, matcher=None):
185 185 '''return a list of largefiles in the working copy or the
186 186 specified changeset'''
187 187
188 188 if matcher is None:
189 189 matcher = getstandinmatcher(repo)
190 190
191 191 # ignore unknown files in working directory
192 192 return [splitstandin(f)
193 193 for f in repo[rev].walk(matcher)
194 194 if rev is not None or repo.dirstate[f] != '?']
195 195
196 196 def instore(repo, hash, forcelocal=False):
197 197 '''Return true if a largefile with the given hash exists in the store'''
198 198 return os.path.exists(storepath(repo, hash, forcelocal))
199 199
200 200 def storepath(repo, hash, forcelocal=False):
201 201 '''Return the correct location in the repository largefiles store for a
202 202 file with the given hash.'''
203 203 if not forcelocal and repo.shared():
204 204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
205 205 return repo.vfs.join(longname, hash)
206 206
207 207 def findstorepath(repo, hash):
208 208 '''Search through the local store path(s) to find the file for the given
209 209 hash. If the file is not found, its path in the primary store is returned.
210 210 The return value is a tuple of (path, exists(path)).
211 211 '''
212 212 # For shared repos, the primary store is in the share source. But for
213 213 # backward compatibility, force a lookup in the local store if it wasn't
214 214 # found in the share source.
215 215 path = storepath(repo, hash, False)
216 216
217 217 if instore(repo, hash):
218 218 return (path, True)
219 219 elif repo.shared() and instore(repo, hash, True):
220 220 return storepath(repo, hash, True), True
221 221
222 222 return (path, False)
223 223
224 224 def copyfromcache(repo, hash, filename):
225 225 '''Copy the specified largefile from the repo or system cache to
226 226 filename in the repository. Return true on success or false if the
227 227 file was not found in either cache (which should not happened:
228 228 this is meant to be called only after ensuring that the needed
229 229 largefile exists in the cache).'''
230 230 wvfs = repo.wvfs
231 231 path = findfile(repo, hash)
232 232 if path is None:
233 233 return False
234 234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
235 235 # The write may fail before the file is fully written, but we
236 236 # don't use atomic writes in the working copy.
237 237 with open(path, 'rb') as srcfd:
238 238 with wvfs(filename, 'wb') as destfd:
239 239 gothash = copyandhash(
240 240 util.filechunkiter(srcfd), destfd)
241 241 if gothash != hash:
242 242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 243 % (filename, path, gothash))
244 244 wvfs.unlink(filename)
245 245 return False
246 246 return True
247 247
248 248 def copytostore(repo, revorctx, file, uploaded=False):
249 249 wvfs = repo.wvfs
250 250 hash = readstandin(repo, file, revorctx)
251 251 if instore(repo, hash):
252 252 return
253 253 if wvfs.exists(file):
254 254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 255 else:
256 256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 257 (file, hash))
258 258
259 259 def copyalltostore(repo, node):
260 260 '''Copy all largefiles in a given revision to the store'''
261 261
262 262 ctx = repo[node]
263 263 for filename in ctx.files():
264 264 realfile = splitstandin(filename)
265 265 if realfile is not None and filename in ctx.manifest():
266 266 copytostore(repo, ctx, realfile)
267 267
268 268 def copytostoreabsolute(repo, file, hash):
269 269 if inusercache(repo.ui, hash):
270 270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 271 else:
272 272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 273 with open(file, 'rb') as srcf:
274 274 with util.atomictempfile(storepath(repo, hash),
275 275 createmode=repo.store.createmode) as dstf:
276 276 for chunk in util.filechunkiter(srcf):
277 277 dstf.write(chunk)
278 278 linktousercache(repo, hash)
279 279
280 280 def linktousercache(repo, hash):
281 281 '''Link / copy the largefile with the specified hash from the store
282 282 to the cache.'''
283 283 path = usercachepath(repo.ui, hash)
284 284 link(storepath(repo, hash), path)
285 285
286 286 def getstandinmatcher(repo, rmatcher=None):
287 287 '''Return a match object that applies rmatcher to the standin directory'''
288 288 wvfs = repo.wvfs
289 289 standindir = shortname
290 290
291 291 # no warnings about missing files or directories
292 292 badfn = lambda f, msg: None
293 293
294 294 if rmatcher and not rmatcher.always():
295 295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 296 if not pats:
297 297 pats = [wvfs.join(standindir)]
298 298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 299 # if pats is empty, it would incorrectly always match, so clear _always
300 300 match._always = False
301 301 else:
302 302 # no patterns: relative to repo root
303 303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 304 return match
305 305
306 306 def composestandinmatcher(repo, rmatcher):
307 307 '''Return a matcher that accepts standins corresponding to the
308 308 files accepted by rmatcher. Pass the list of files in the matcher
309 309 as the paths specified by the user.'''
310 310 smatcher = getstandinmatcher(repo, rmatcher)
311 311 isstandin = smatcher.matchfn
312 312 def composedmatchfn(f):
313 313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 314 smatcher.matchfn = composedmatchfn
315 315
316 316 return smatcher
317 317
318 318 def standin(filename):
319 319 '''Return the repo-relative path to the standin for the specified big
320 320 file.'''
321 321 # Notes:
322 322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 325 # 2) Join with '/' because that's what dirstate always uses, even on
326 326 # Windows. Change existing separator to '/' first in case we are
327 327 # passed filenames from an external source (like the command line).
328 328 return shortnameslash + util.pconvert(filename)
329 329
330 330 def isstandin(filename):
331 331 '''Return true if filename is a big file standin. filename must be
332 332 in Mercurial's internal form (slash-separated).'''
333 333 return filename.startswith(shortnameslash)
334 334
335 335 def splitstandin(filename):
336 336 # Split on / because that's what dirstate always uses, even on Windows.
337 337 # Change local separator to / first just in case we are passed filenames
338 338 # from an external source (like the command line).
339 339 bits = util.pconvert(filename).split('/', 1)
340 340 if len(bits) == 2 and bits[0] == shortname:
341 341 return bits[1]
342 342 else:
343 343 return None
344 344
345 345 def updatestandin(repo, standin):
346 346 lfile = splitstandin(standin)
347 347 file = repo.wjoin(lfile)
348 348 if repo.wvfs.exists(lfile):
349 349 hash = hashfile(file)
350 350 executable = getexecutable(file)
351 351 writestandin(repo, standin, hash, executable)
352 352 else:
353 353 raise error.Abort(_('%s: file not found!') % lfile)
354 354
355 355 def readstandin(repo, filename, node=None):
356 356 '''read hex hash from standin for filename at given node, or working
357 357 directory if no node is given'''
358 358 return repo[node][standin(filename)].data().strip()
359 359
360 360 def writestandin(repo, standin, hash, executable):
361 361 '''write hash to <repo.root>/<standin>'''
362 362 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
363 363
364 364 def copyandhash(instream, outfile):
365 365 '''Read bytes from instream (iterable) and write them to outfile,
366 366 computing the SHA-1 hash of the data along the way. Return the hash.'''
367 367 hasher = hashlib.sha1('')
368 368 for data in instream:
369 369 hasher.update(data)
370 370 outfile.write(data)
371 371 return hasher.hexdigest()
372 372
373 373 def hashfile(file):
374 374 if not os.path.exists(file):
375 375 return ''
376 376 hasher = hashlib.sha1('')
377 377 with open(file, 'rb') as fd:
378 378 for data in util.filechunkiter(fd):
379 379 hasher.update(data)
380 380 return hasher.hexdigest()
381 381
382 382 def getexecutable(filename):
383 383 mode = os.stat(filename).st_mode
384 384 return ((mode & stat.S_IXUSR) and
385 385 (mode & stat.S_IXGRP) and
386 386 (mode & stat.S_IXOTH))
387 387
388 388 def urljoin(first, second, *arg):
389 389 def join(left, right):
390 390 if not left.endswith('/'):
391 391 left += '/'
392 392 if right.startswith('/'):
393 393 right = right[1:]
394 394 return left + right
395 395
396 396 url = join(first, second)
397 397 for a in arg:
398 398 url = join(url, a)
399 399 return url
400 400
401 401 def hexsha1(data):
402 402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 403 object data"""
404 404 h = hashlib.sha1()
405 405 for chunk in util.filechunkiter(data):
406 406 h.update(chunk)
407 407 return h.hexdigest()
408 408
409 409 def httpsendfile(ui, filename):
410 410 return httpconnection.httpsendfile(ui, filename, 'rb')
411 411
412 412 def unixpath(path):
413 413 '''Return a version of path normalized for use with the lfdirstate.'''
414 414 return util.pconvert(os.path.normpath(path))
415 415
416 416 def islfilesrepo(repo):
417 417 '''Return true if the repo is a largefile repo.'''
418 418 if ('largefiles' in repo.requirements and
419 419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 420 return True
421 421
422 422 return any(openlfdirstate(repo.ui, repo, False))
423 423
424 424 class storeprotonotcapable(Exception):
425 425 def __init__(self, storetypes):
426 426 self.storetypes = storetypes
427 427
428 428 def getstandinsstate(repo):
429 429 standins = []
430 430 matcher = getstandinmatcher(repo)
431 431 for standin in repo.dirstate.walk(matcher, [], False, False):
432 432 lfile = splitstandin(standin)
433 433 try:
434 434 hash = readstandin(repo, lfile)
435 435 except IOError:
436 436 hash = None
437 437 standins.append((lfile, hash))
438 438 return standins
439 439
440 440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
441 441 lfstandin = standin(lfile)
442 442 if lfstandin in repo.dirstate:
443 443 stat = repo.dirstate._map[lfstandin]
444 444 state, mtime = stat[0], stat[3]
445 445 else:
446 446 state, mtime = '?', -1
447 447 if state == 'n':
448 448 if (normallookup or mtime < 0 or
449 449 not repo.wvfs.exists(lfile)):
450 450 # state 'n' doesn't ensure 'clean' in this case
451 451 lfdirstate.normallookup(lfile)
452 452 else:
453 453 lfdirstate.normal(lfile)
454 454 elif state == 'm':
455 455 lfdirstate.normallookup(lfile)
456 456 elif state == 'r':
457 457 lfdirstate.remove(lfile)
458 458 elif state == 'a':
459 459 lfdirstate.add(lfile)
460 460 elif state == '?':
461 461 lfdirstate.drop(lfile)
462 462
463 463 def markcommitted(orig, ctx, node):
464 464 repo = ctx.repo()
465 465
466 466 orig(node)
467 467
468 468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
469 469 # because files coming from the 2nd parent are omitted in the latter.
470 470 #
471 471 # The former should be used to get targets of "synclfdirstate",
472 472 # because such files:
473 473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
474 474 # - have to be marked as "n" after commit, but
475 475 # - aren't listed in "repo[node].files()"
476 476
477 477 lfdirstate = openlfdirstate(repo.ui, repo)
478 478 for f in ctx.files():
479 479 lfile = splitstandin(f)
480 480 if lfile is not None:
481 481 synclfdirstate(repo, lfdirstate, lfile, False)
482 482 lfdirstate.write()
483 483
484 484 # As part of committing, copy all of the largefiles into the cache.
485 485 #
486 486 # Using "node" instead of "ctx" implies additional "repo[node]"
487 487 # lookup while copyalltostore(), but can omit redundant check for
488 488 # files comming from the 2nd parent, which should exist in store
489 489 # at merging.
490 490 copyalltostore(repo, node)
491 491
492 492 def getlfilestoupdate(oldstandins, newstandins):
493 493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
494 494 filelist = []
495 495 for f in changedstandins:
496 496 if f[0] not in filelist:
497 497 filelist.append(f[0])
498 498 return filelist
499 499
500 500 def getlfilestoupload(repo, missing, addfunc):
501 501 for i, n in enumerate(missing):
502 502 repo.ui.progress(_('finding outgoing largefiles'), i,
503 503 unit=_('revisions'), total=len(missing))
504 504 parents = [p for p in repo[n].parents() if p != node.nullid]
505 505
506 506 oldlfstatus = repo.lfstatus
507 507 repo.lfstatus = False
508 508 try:
509 509 ctx = repo[n]
510 510 finally:
511 511 repo.lfstatus = oldlfstatus
512 512
513 513 files = set(ctx.files())
514 514 if len(parents) == 2:
515 515 mc = ctx.manifest()
516 516 mp1 = ctx.parents()[0].manifest()
517 517 mp2 = ctx.parents()[1].manifest()
518 518 for f in mp1:
519 519 if f not in mc:
520 520 files.add(f)
521 521 for f in mp2:
522 522 if f not in mc:
523 523 files.add(f)
524 524 for f in mc:
525 525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
526 526 files.add(f)
527 527 for fn in files:
528 528 if isstandin(fn) and fn in ctx:
529 529 addfunc(fn, ctx[fn].data().strip())
530 530 repo.ui.progress(_('finding outgoing largefiles'), None)
531 531
532 532 def updatestandinsbymatch(repo, match):
533 533 '''Update standins in the working directory according to specified match
534 534
535 535 This returns (possibly modified) ``match`` object to be used for
536 536 subsequent commit process.
537 537 '''
538 538
539 539 ui = repo.ui
540 540
541 541 # Case 1: user calls commit with no specific files or
542 542 # include/exclude patterns: refresh and commit all files that
543 543 # are "dirty".
544 544 if match is None or match.always():
545 545 # Spend a bit of time here to get a list of files we know
546 546 # are modified so we can compare only against those.
547 547 # It can cost a lot of time (several seconds)
548 548 # otherwise to update all standins if the largefiles are
549 549 # large.
550 550 lfdirstate = openlfdirstate(ui, repo)
551 551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
552 552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
553 553 False)
554 554 modifiedfiles = unsure + s.modified + s.added + s.removed
555 555 lfiles = listlfiles(repo)
556 556 # this only loops through largefiles that exist (not
557 557 # removed/renamed)
558 558 for lfile in lfiles:
559 559 if lfile in modifiedfiles:
560 if repo.wvfs.exists(standin(lfile)):
560 fstandin = standin(lfile)
561 if repo.wvfs.exists(fstandin):
561 562 # this handles the case where a rebase is being
562 563 # performed and the working copy is not updated
563 564 # yet.
564 565 if repo.wvfs.exists(lfile):
565 updatestandin(repo,
566 standin(lfile))
566 updatestandin(repo, fstandin)
567 567
568 568 return match
569 569
570 570 lfiles = listlfiles(repo)
571 571 match._files = repo._subdirlfs(match.files(), lfiles)
572 572
573 573 # Case 2: user calls commit with specified patterns: refresh
574 574 # any matching big files.
575 575 smatcher = composestandinmatcher(repo, match)
576 576 standins = repo.dirstate.walk(smatcher, [], False, False)
577 577
578 578 # No matching big files: get out of the way and pass control to
579 579 # the usual commit() method.
580 580 if not standins:
581 581 return match
582 582
583 583 # Refresh all matching big files. It's possible that the
584 584 # commit will end up failing, in which case the big files will
585 585 # stay refreshed. No harm done: the user modified them and
586 586 # asked to commit them, so sooner or later we're going to
587 587 # refresh the standins. Might as well leave them refreshed.
588 588 lfdirstate = openlfdirstate(ui, repo)
589 589 for fstandin in standins:
590 590 lfile = splitstandin(fstandin)
591 591 if lfdirstate[lfile] != 'r':
592 592 updatestandin(repo, fstandin)
593 593
594 594 # Cook up a new matcher that only matches regular files or
595 595 # standins corresponding to the big files requested by the
596 596 # user. Have to modify _files to prevent commit() from
597 597 # complaining "not tracked" for big files.
598 598 match = copy.copy(match)
599 599 origmatchfn = match.matchfn
600 600
601 601 # Check both the list of largefiles and the list of
602 602 # standins because if a largefile was removed, it
603 603 # won't be in the list of largefiles at this point
604 604 match._files += sorted(standins)
605 605
606 606 actualfiles = []
607 607 for f in match._files:
608 608 fstandin = standin(f)
609 609
610 610 # For largefiles, only one of the normal and standin should be
611 611 # committed (except if one of them is a remove). In the case of a
612 612 # standin removal, drop the normal file if it is unknown to dirstate.
613 613 # Thus, skip plain largefile names but keep the standin.
614 614 if f in lfiles or fstandin in standins:
615 615 if repo.dirstate[fstandin] != 'r':
616 616 if repo.dirstate[f] != 'r':
617 617 continue
618 618 elif repo.dirstate[f] == '?':
619 619 continue
620 620
621 621 actualfiles.append(f)
622 622 match._files = actualfiles
623 623
624 624 def matchfn(f):
625 625 if origmatchfn(f):
626 626 return f not in lfiles
627 627 else:
628 628 return f in standins
629 629
630 630 match.matchfn = matchfn
631 631
632 632 return match
633 633
634 634 class automatedcommithook(object):
635 635 '''Stateful hook to update standins at the 1st commit of resuming
636 636
637 637 For efficiency, updating standins in the working directory should
638 638 be avoided while automated committing (like rebase, transplant and
639 639 so on), because they should be updated before committing.
640 640
641 641 But the 1st commit of resuming automated committing (e.g. ``rebase
642 642 --continue``) should update them, because largefiles may be
643 643 modified manually.
644 644 '''
645 645 def __init__(self, resuming):
646 646 self.resuming = resuming
647 647
648 648 def __call__(self, repo, match):
649 649 if self.resuming:
650 650 self.resuming = False # avoids updating at subsequent commits
651 651 return updatestandinsbymatch(repo, match)
652 652 else:
653 653 return match
654 654
655 655 def getstatuswriter(ui, repo, forcibly=None):
656 656 '''Return the function to write largefiles specific status out
657 657
658 658 If ``forcibly`` is ``None``, this returns the last element of
659 659 ``repo._lfstatuswriters`` as "default" writer function.
660 660
661 661 Otherwise, this returns the function to always write out (or
662 662 ignore if ``not forcibly``) status.
663 663 '''
664 664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
665 665 return repo._lfstatuswriters[-1]
666 666 else:
667 667 if forcibly:
668 668 return ui.status # forcibly WRITE OUT
669 669 else:
670 670 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1451 +1,1452 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial import (
18 18 archival,
19 19 cmdutil,
20 20 error,
21 21 hg,
22 22 match as matchmod,
23 23 pathutil,
24 24 registrar,
25 25 scmutil,
26 26 smartset,
27 27 util,
28 28 )
29 29
30 30 from . import (
31 31 lfcommands,
32 32 lfutil,
33 33 storefactory,
34 34 )
35 35
36 36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37 37
38 38 def composelargefilematcher(match, manifest):
39 39 '''create a matcher that matches only the largefiles in the original
40 40 matcher'''
41 41 m = copy.copy(match)
42 42 lfile = lambda f: lfutil.standin(f) in manifest
43 43 m._files = filter(lfile, m._files)
44 44 m._fileroots = set(m._files)
45 45 m._always = False
46 46 origmatchfn = m.matchfn
47 47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 48 return m
49 49
50 50 def composenormalfilematcher(match, manifest, exclude=None):
51 51 excluded = set()
52 52 if exclude is not None:
53 53 excluded.update(exclude)
54 54
55 55 m = copy.copy(match)
56 56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 57 manifest or f in excluded)
58 58 m._files = filter(notlfile, m._files)
59 59 m._fileroots = set(m._files)
60 60 m._always = False
61 61 origmatchfn = m.matchfn
62 62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 63 return m
64 64
65 65 def installnormalfilesmatchfn(manifest):
66 66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 68 default='relpath', badfn=None):
69 69 if opts is None:
70 70 opts = {}
71 71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 72 return composenormalfilematcher(match, manifest)
73 73 oldmatch = installmatchfn(overridematch)
74 74
75 75 def installmatchfn(f):
76 76 '''monkey patch the scmutil module with a custom match function.
77 77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 78 oldmatch = scmutil.match
79 79 setattr(f, 'oldmatch', oldmatch)
80 80 scmutil.match = f
81 81 return oldmatch
82 82
83 83 def restorematchfn():
84 84 '''restores scmutil.match to what it was before installmatchfn
85 85 was called. no-op if scmutil.match is its original function.
86 86
87 87 Note that n calls to installmatchfn will require n calls to
88 88 restore the original matchfn.'''
89 89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90 90
91 91 def installmatchandpatsfn(f):
92 92 oldmatchandpats = scmutil.matchandpats
93 93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 94 scmutil.matchandpats = f
95 95 return oldmatchandpats
96 96
97 97 def restorematchandpatsfn():
98 98 '''restores scmutil.matchandpats to what it was before
99 99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 100 is its original function.
101 101
102 102 Note that n calls to installmatchandpatsfn will require n calls
103 103 to restore the original matchfn.'''
104 104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 105 scmutil.matchandpats)
106 106
107 107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 108 large = opts.get('large')
109 109 lfsize = lfutil.getminsize(
110 110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111 111
112 112 lfmatcher = None
113 113 if lfutil.islfilesrepo(repo):
114 114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 115 if lfpats:
116 116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117 117
118 118 lfnames = []
119 119 m = matcher
120 120
121 121 wctx = repo[None]
122 122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 123 exact = m.exact(f)
124 124 lfile = lfutil.standin(f) in wctx
125 125 nfile = f in wctx
126 126 exists = lfile or nfile
127 127
128 128 # addremove in core gets fancy with the name, add doesn't
129 129 if isaddremove:
130 130 name = m.uipath(f)
131 131 else:
132 132 name = m.rel(f)
133 133
134 134 # Don't warn the user when they attempt to add a normal tracked file.
135 135 # The normal add code will do that for us.
136 136 if exact and exists:
137 137 if lfile:
138 138 ui.warn(_('%s already a largefile\n') % name)
139 139 continue
140 140
141 141 if (exact or not exists) and not lfutil.isstandin(f):
142 142 # In case the file was removed previously, but not committed
143 143 # (issue3507)
144 144 if not repo.wvfs.exists(f):
145 145 continue
146 146
147 147 abovemin = (lfsize and
148 148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 150 lfnames.append(f)
151 151 if ui.verbose or not exact:
152 152 ui.status(_('adding %s as a largefile\n') % name)
153 153
154 154 bad = []
155 155
156 156 # Need to lock, otherwise there could be a race condition between
157 157 # when standins are created and added to the repo.
158 158 with repo.wlock():
159 159 if not opts.get('dry_run'):
160 160 standins = []
161 161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 162 for f in lfnames:
163 163 standinname = lfutil.standin(f)
164 164 lfutil.writestandin(repo, standinname, hash='',
165 165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 166 standins.append(standinname)
167 167 if lfdirstate[f] == 'r':
168 168 lfdirstate.normallookup(f)
169 169 else:
170 170 lfdirstate.add(f)
171 171 lfdirstate.write()
172 172 bad += [lfutil.splitstandin(f)
173 173 for f in repo[None].add(standins)
174 174 if f in m.files()]
175 175
176 176 added = [f for f in lfnames if f not in bad]
177 177 return added, bad
178 178
179 179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 180 after = opts.get('after')
181 181 m = composelargefilematcher(matcher, repo[None].manifest())
182 182 try:
183 183 repo.lfstatus = True
184 184 s = repo.status(match=m, clean=not isaddremove)
185 185 finally:
186 186 repo.lfstatus = False
187 187 manifest = repo[None].manifest()
188 188 modified, added, deleted, clean = [[f for f in list
189 189 if lfutil.standin(f) in manifest]
190 190 for list in (s.modified, s.added,
191 191 s.deleted, s.clean)]
192 192
193 193 def warn(files, msg):
194 194 for f in files:
195 195 ui.warn(msg % m.rel(f))
196 196 return int(len(files) > 0)
197 197
198 198 result = 0
199 199
200 200 if after:
201 201 remove = deleted
202 202 result = warn(modified + added + clean,
203 203 _('not removing %s: file still exists\n'))
204 204 else:
205 205 remove = deleted + clean
206 206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 207 ' to force removal)\n'))
208 208 result = warn(added, _('not removing %s: file has been marked for add'
209 209 ' (use forget to undo)\n')) or result
210 210
211 211 # Need to lock because standin files are deleted then removed from the
212 212 # repository and we could race in-between.
213 213 with repo.wlock():
214 214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 215 for f in sorted(remove):
216 216 if ui.verbose or not m.exact(f):
217 217 # addremove in core gets fancy with the name, remove doesn't
218 218 if isaddremove:
219 219 name = m.uipath(f)
220 220 else:
221 221 name = m.rel(f)
222 222 ui.status(_('removing %s\n') % name)
223 223
224 224 if not opts.get('dry_run'):
225 225 if not after:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227
228 228 if opts.get('dry_run'):
229 229 return result
230 230
231 231 remove = [lfutil.standin(f) for f in remove]
232 232 # If this is being called by addremove, let the original addremove
233 233 # function handle this.
234 234 if not isaddremove:
235 235 for f in remove:
236 236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 237 repo[None].forget(remove)
238 238
239 239 for f in remove:
240 240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 241 False)
242 242
243 243 lfdirstate.write()
244 244
245 245 return result
246 246
247 247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 248 # appear at their right place in the manifests.
249 249 def decodepath(orig, path):
250 250 return lfutil.splitstandin(path) or path
251 251
252 252 # -- Wrappers: modify existing commands --------------------------------
253 253
254 254 def overrideadd(orig, ui, repo, *pats, **opts):
255 255 if opts.get('normal') and opts.get('large'):
256 256 raise error.Abort(_('--normal cannot be used with --large'))
257 257 return orig(ui, repo, *pats, **opts)
258 258
259 259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 260 # The --normal flag short circuits this override
261 261 if opts.get('normal'):
262 262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263 263
264 264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 266 ladded)
267 267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268 268
269 269 bad.extend(f for f in lbad)
270 270 return bad
271 271
272 272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 275 return removelargefiles(ui, repo, False, matcher, after=after,
276 276 force=force) or result
277 277
278 278 def overridestatusfn(orig, repo, rev2, **opts):
279 279 try:
280 280 repo._repo.lfstatus = True
281 281 return orig(repo, rev2, **opts)
282 282 finally:
283 283 repo._repo.lfstatus = False
284 284
285 285 def overridestatus(orig, ui, repo, *pats, **opts):
286 286 try:
287 287 repo.lfstatus = True
288 288 return orig(ui, repo, *pats, **opts)
289 289 finally:
290 290 repo.lfstatus = False
291 291
292 292 def overridedirty(orig, repo, ignoreupdate=False):
293 293 try:
294 294 repo._repo.lfstatus = True
295 295 return orig(repo, ignoreupdate)
296 296 finally:
297 297 repo._repo.lfstatus = False
298 298
299 299 def overridelog(orig, ui, repo, *pats, **opts):
300 300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 301 default='relpath', badfn=None):
302 302 """Matcher that merges root directory with .hglf, suitable for log.
303 303 It is still possible to match .hglf directly.
304 304 For any listed files run log on the standin too.
305 305 matchfn tries both the given filename and with .hglf stripped.
306 306 """
307 307 if opts is None:
308 308 opts = {}
309 309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 310 badfn=badfn)
311 311 m, p = copy.copy(matchandpats)
312 312
313 313 if m.always():
314 314 # We want to match everything anyway, so there's no benefit trying
315 315 # to add standins.
316 316 return matchandpats
317 317
318 318 pats = set(p)
319 319
320 320 def fixpats(pat, tostandin=lfutil.standin):
321 321 if pat.startswith('set:'):
322 322 return pat
323 323
324 324 kindpat = matchmod._patsplit(pat, None)
325 325
326 326 if kindpat[0] is not None:
327 327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 328 return tostandin(kindpat[1])
329 329
330 330 if m._cwd:
331 331 hglf = lfutil.shortname
332 332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333 333
334 334 def tostandin(f):
335 335 # The file may already be a standin, so truncate the back
336 336 # prefix and test before mangling it. This avoids turning
337 337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 339 return f
340 340
341 341 # An absolute path is from outside the repo, so truncate the
342 342 # path to the root before building the standin. Otherwise cwd
343 343 # is somewhere in the repo, relative to root, and needs to be
344 344 # prepended before building the standin.
345 345 if os.path.isabs(m._cwd):
346 346 f = f[len(back):]
347 347 else:
348 348 f = m._cwd + '/' + f
349 349 return back + lfutil.standin(f)
350 350
351 351 pats.update(fixpats(f, tostandin) for f in p)
352 352 else:
353 353 def tostandin(f):
354 354 if lfutil.isstandin(f):
355 355 return f
356 356 return lfutil.standin(f)
357 357 pats.update(fixpats(f, tostandin) for f in p)
358 358
359 359 for i in range(0, len(m._files)):
360 360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 361 if m._files[i] == '.':
362 362 continue
363 363 standin = lfutil.standin(m._files[i])
364 364 # If the "standin" is a directory, append instead of replace to
365 365 # support naming a directory on the command line with only
366 366 # largefiles. The original directory is kept to support normal
367 367 # files.
368 368 if standin in repo[ctx.node()]:
369 369 m._files[i] = standin
370 370 elif m._files[i] not in repo[ctx.node()] \
371 371 and repo.wvfs.isdir(standin):
372 372 m._files.append(standin)
373 373
374 374 m._fileroots = set(m._files)
375 375 m._always = False
376 376 origmatchfn = m.matchfn
377 377 def lfmatchfn(f):
378 378 lf = lfutil.splitstandin(f)
379 379 if lf is not None and origmatchfn(lf):
380 380 return True
381 381 r = origmatchfn(f)
382 382 return r
383 383 m.matchfn = lfmatchfn
384 384
385 385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 386 return m, pats
387 387
388 388 # For hg log --patch, the match object is used in two different senses:
389 389 # (1) to determine what revisions should be printed out, and
390 390 # (2) to determine what files to print out diffs for.
391 391 # The magic matchandpats override should be used for case (1) but not for
392 392 # case (2).
393 393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 394 wctx = repo[None]
395 395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 396 return lambda rev: match
397 397
398 398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401 401
402 402 try:
403 403 return orig(ui, repo, *pats, **opts)
404 404 finally:
405 405 restorematchandpatsfn()
406 406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407 407
408 408 def overrideverify(orig, ui, repo, *pats, **opts):
409 409 large = opts.pop('large', False)
410 410 all = opts.pop('lfa', False)
411 411 contents = opts.pop('lfc', False)
412 412
413 413 result = orig(ui, repo, *pats, **opts)
414 414 if large or all or contents:
415 415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 416 return result
417 417
418 418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 419 large = opts.pop('large', False)
420 420 if large:
421 421 class fakerepo(object):
422 422 dirstate = lfutil.openlfdirstate(ui, repo)
423 423 orig(ui, fakerepo, *pats, **opts)
424 424 else:
425 425 orig(ui, repo, *pats, **opts)
426 426
427 427 # Before starting the manifest merge, merge.updates will call
428 428 # _checkunknownfile to check if there are any files in the merged-in
429 429 # changeset that collide with unknown files in the working copy.
430 430 #
431 431 # The largefiles are seen as unknown, so this prevents us from merging
432 432 # in a file 'foo' if we already have a largefile with the same name.
433 433 #
434 434 # The overridden function filters the unknown files by removing any
435 435 # largefiles. This makes the merge proceed and we can then handle this
436 436 # case further in the overridden calculateupdates function below.
437 437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 439 return False
440 440 return origfn(repo, wctx, mctx, f, f2)
441 441
442 442 # The manifest merge handles conflicts on the manifest level. We want
443 443 # to handle changes in largefile-ness of files at this level too.
444 444 #
445 445 # The strategy is to run the original calculateupdates and then process
446 446 # the action list it outputs. There are two cases we need to deal with:
447 447 #
448 448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 449 # detected via its standin file, which will enter the working copy
450 450 # with a "get" action. It is not "merge" since the standin is all
451 451 # Mercurial is concerned with at this level -- the link to the
452 452 # existing normal file is not relevant here.
453 453 #
454 454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 455 # since the largefile will be present in the working copy and
456 456 # different from the normal file in p2. Mercurial therefore
457 457 # triggers a merge action.
458 458 #
459 459 # In both cases, we prompt the user and emit new actions to either
460 460 # remove the standin (if the normal file was kept) or to remove the
461 461 # normal file and get the standin (if the largefile was kept). The
462 462 # default prompt answer is to use the largefile version since it was
463 463 # presumably changed on purpose.
464 464 #
465 465 # Finally, the merge.applyupdates function will then take care of
466 466 # writing the files into the working copy and lfcommands.updatelfiles
467 467 # will update the largefiles.
468 468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 469 acceptremote, *args, **kwargs):
470 470 overwrite = force and not branchmerge
471 471 actions, diverge, renamedelete = origfn(
472 472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473 473
474 474 if overwrite:
475 475 return actions, diverge, renamedelete
476 476
477 477 # Convert to dictionary with filename as key and action as value.
478 478 lfiles = set()
479 479 for f in actions:
480 480 splitstandin = lfutil.splitstandin(f)
481 481 if splitstandin in p1:
482 482 lfiles.add(splitstandin)
483 483 elif lfutil.standin(f) in p1:
484 484 lfiles.add(f)
485 485
486 486 for lfile in sorted(lfiles):
487 487 standin = lfutil.standin(lfile)
488 488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 490 if sm in ('g', 'dc') and lm != 'r':
491 491 if sm == 'dc':
492 492 f1, f2, fa, move, anc = sargs
493 493 sargs = (p2[f2].flags(), False)
494 494 # Case 1: normal file in the working copy, largefile in
495 495 # the second parent
496 496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 497 'use (l)argefile or keep (n)ormal file?'
498 498 '$$ &Largefile $$ &Normal file') % lfile
499 499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 500 actions[lfile] = ('r', None, 'replaced by standin')
501 501 actions[standin] = ('g', sargs, 'replaces standin')
502 502 else: # keep local normal file
503 503 actions[lfile] = ('k', None, 'replaces standin')
504 504 if branchmerge:
505 505 actions[standin] = ('k', None, 'replaced by non-standin')
506 506 else:
507 507 actions[standin] = ('r', None, 'replaced by non-standin')
508 508 elif lm in ('g', 'dc') and sm != 'r':
509 509 if lm == 'dc':
510 510 f1, f2, fa, move, anc = largs
511 511 largs = (p2[f2].flags(), False)
512 512 # Case 2: largefile in the working copy, normal file in
513 513 # the second parent
514 514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 515 'keep (l)argefile or use (n)ormal file?'
516 516 '$$ &Largefile $$ &Normal file') % lfile
517 517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 518 if branchmerge:
519 519 # largefile can be restored from standin safely
520 520 actions[lfile] = ('k', None, 'replaced by standin')
521 521 actions[standin] = ('k', None, 'replaces standin')
522 522 else:
523 523 # "lfile" should be marked as "removed" without
524 524 # removal of itself
525 525 actions[lfile] = ('lfmr', None,
526 526 'forget non-standin largefile')
527 527
528 528 # linear-merge should treat this largefile as 're-added'
529 529 actions[standin] = ('a', None, 'keep standin')
530 530 else: # pick remote normal file
531 531 actions[lfile] = ('g', largs, 'replaces standin')
532 532 actions[standin] = ('r', None, 'replaced by non-standin')
533 533
534 534 return actions, diverge, renamedelete
535 535
536 536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 537 if 'lfmr' in actions:
538 538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 539 for lfile, args, msg in actions['lfmr']:
540 540 # this should be executed before 'orig', to execute 'remove'
541 541 # before all other actions
542 542 repo.dirstate.remove(lfile)
543 543 # make sure lfile doesn't get synclfdirstate'd as normal
544 544 lfdirstate.add(lfile)
545 545 lfdirstate.write()
546 546
547 547 return orig(repo, actions, branchmerge)
548 548
549 549 # Override filemerge to prompt the user about how they wish to merge
550 550 # largefiles. This will handle identical edits without prompting the user.
551 551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
552 552 labels=None):
553 553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
555 555 labels=labels)
556 556
557 557 ahash = fca.data().strip().lower()
558 558 dhash = fcd.data().strip().lower()
559 559 ohash = fco.data().strip().lower()
560 560 if (ohash != ahash and
561 561 ohash != dhash and
562 562 (dhash == ahash or
563 563 repo.ui.promptchoice(
564 564 _('largefile %s has a merge conflict\nancestor was %s\n'
565 565 'keep (l)ocal %s or\ntake (o)ther %s?'
566 566 '$$ &Local $$ &Other') %
567 567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 568 0) == 1)):
569 569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 570 return True, 0, False
571 571
572 572 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 573 copies = orig(ctx1, ctx2, match=match)
574 574 updated = {}
575 575
576 576 for k, v in copies.iteritems():
577 577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578 578
579 579 return updated
580 580
581 581 # Copy first changes the matchers to match standins instead of
582 582 # largefiles. Then it overrides util.copyfile in that function it
583 583 # checks if the destination largefile already exists. It also keeps a
584 584 # list of copied files so that the largefiles can be copied and the
585 585 # dirstate updated.
586 586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 587 # doesn't remove largefile on rename
588 588 if len(pats) < 2:
589 589 # this isn't legal, let the original function deal with it
590 590 return orig(ui, repo, pats, opts, rename)
591 591
592 592 # This could copy both lfiles and normal files in one command,
593 593 # but we don't want to do that. First replace their matcher to
594 594 # only match normal files and run it, then replace it to just
595 595 # match largefiles and run it again.
596 596 nonormalfiles = False
597 597 nolfiles = False
598 598 installnormalfilesmatchfn(repo[None].manifest())
599 599 try:
600 600 result = orig(ui, repo, pats, opts, rename)
601 601 except error.Abort as e:
602 602 if str(e) != _('no files to copy'):
603 603 raise e
604 604 else:
605 605 nonormalfiles = True
606 606 result = 0
607 607 finally:
608 608 restorematchfn()
609 609
610 610 # The first rename can cause our current working directory to be removed.
611 611 # In that case there is nothing left to copy/rename so just quit.
612 612 try:
613 613 repo.getcwd()
614 614 except OSError:
615 615 return result
616 616
617 617 def makestandin(relpath):
618 618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 619 return repo.wvfs.join(lfutil.standin(path))
620 620
621 621 fullpats = scmutil.expandpats(pats)
622 622 dest = fullpats[-1]
623 623
624 624 if os.path.isdir(dest):
625 625 if not os.path.isdir(makestandin(dest)):
626 626 os.makedirs(makestandin(dest))
627 627
628 628 try:
629 629 # When we call orig below it creates the standins but we don't add
630 630 # them to the dir state until later so lock during that time.
631 631 wlock = repo.wlock()
632 632
633 633 manifest = repo[None].manifest()
634 634 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 635 default='relpath', badfn=None):
636 636 if opts is None:
637 637 opts = {}
638 638 newpats = []
639 639 # The patterns were previously mangled to add the standin
640 640 # directory; we need to remove that now
641 641 for pat in pats:
642 642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 643 newpats.append(pat.replace(lfutil.shortname, ''))
644 644 else:
645 645 newpats.append(pat)
646 646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 647 m = copy.copy(match)
648 648 lfile = lambda f: lfutil.standin(f) in manifest
649 649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 650 m._fileroots = set(m._files)
651 651 origmatchfn = m.matchfn
652 652 def matchfn(f):
653 653 lfile = lfutil.splitstandin(f)
654 654 return (lfile is not None and
655 655 (f in manifest) and
656 656 origmatchfn(lfile) or
657 657 None)
658 658 m.matchfn = matchfn
659 659 return m
660 660 oldmatch = installmatchfn(overridematch)
661 661 listpats = []
662 662 for pat in pats:
663 663 if matchmod.patkind(pat) is not None:
664 664 listpats.append(pat)
665 665 else:
666 666 listpats.append(makestandin(pat))
667 667
668 668 try:
669 669 origcopyfile = util.copyfile
670 670 copiedfiles = []
671 671 def overridecopyfile(src, dest):
672 672 if (lfutil.shortname in src and
673 673 dest.startswith(repo.wjoin(lfutil.shortname))):
674 674 destlfile = dest.replace(lfutil.shortname, '')
675 675 if not opts['force'] and os.path.exists(destlfile):
676 676 raise IOError('',
677 677 _('destination largefile already exists'))
678 678 copiedfiles.append((src, dest))
679 679 origcopyfile(src, dest)
680 680
681 681 util.copyfile = overridecopyfile
682 682 result += orig(ui, repo, listpats, opts, rename)
683 683 finally:
684 684 util.copyfile = origcopyfile
685 685
686 686 lfdirstate = lfutil.openlfdirstate(ui, repo)
687 687 for (src, dest) in copiedfiles:
688 688 if (lfutil.shortname in src and
689 689 dest.startswith(repo.wjoin(lfutil.shortname))):
690 690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
691 691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
692 692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
693 693 if not os.path.isdir(destlfiledir):
694 694 os.makedirs(destlfiledir)
695 695 if rename:
696 696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
697 697
698 698 # The file is gone, but this deletes any empty parent
699 699 # directories as a side-effect.
700 700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
701 701 lfdirstate.remove(srclfile)
702 702 else:
703 703 util.copyfile(repo.wjoin(srclfile),
704 704 repo.wjoin(destlfile))
705 705
706 706 lfdirstate.add(destlfile)
707 707 lfdirstate.write()
708 708 except error.Abort as e:
709 709 if str(e) != _('no files to copy'):
710 710 raise e
711 711 else:
712 712 nolfiles = True
713 713 finally:
714 714 restorematchfn()
715 715 wlock.release()
716 716
717 717 if nolfiles and nonormalfiles:
718 718 raise error.Abort(_('no files to copy'))
719 719
720 720 return result
721 721
722 722 # When the user calls revert, we have to be careful to not revert any
723 723 # changes to other largefiles accidentally. This means we have to keep
724 724 # track of the largefiles that are being reverted so we only pull down
725 725 # the necessary largefiles.
726 726 #
727 727 # Standins are only updated (to match the hash of largefiles) before
728 728 # commits. Update the standins then run the original revert, changing
729 729 # the matcher to hit standins instead of largefiles. Based on the
730 730 # resulting standins update the largefiles.
731 731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
732 732 # Because we put the standins in a bad state (by updating them)
733 733 # and then return them to a correct state we need to lock to
734 734 # prevent others from changing them in their incorrect state.
735 735 with repo.wlock():
736 736 lfdirstate = lfutil.openlfdirstate(ui, repo)
737 737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
738 738 lfdirstate.write()
739 739 for lfile in s.modified:
740 740 lfutil.updatestandin(repo, lfutil.standin(lfile))
741 741 for lfile in s.deleted:
742 if (repo.wvfs.exists(lfutil.standin(lfile))):
743 repo.wvfs.unlink(lfutil.standin(lfile))
742 fstandin = lfutil.standin(lfile)
743 if (repo.wvfs.exists(fstandin)):
744 repo.wvfs.unlink(fstandin)
744 745
745 746 oldstandins = lfutil.getstandinsstate(repo)
746 747
747 748 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 749 default='relpath', badfn=None):
749 750 if opts is None:
750 751 opts = {}
751 752 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 753 m = copy.copy(match)
753 754
754 755 # revert supports recursing into subrepos, and though largefiles
755 756 # currently doesn't work correctly in that case, this match is
756 757 # called, so the lfdirstate above may not be the correct one for
757 758 # this invocation of match.
758 759 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 760 False)
760 761
761 762 def tostandin(f):
762 763 standin = lfutil.standin(f)
763 764 if standin in ctx or standin in mctx:
764 765 return standin
765 766 elif standin in repo[None] or lfdirstate[f] == 'r':
766 767 return None
767 768 return f
768 769 m._files = [tostandin(f) for f in m._files]
769 770 m._files = [f for f in m._files if f is not None]
770 771 m._fileroots = set(m._files)
771 772 origmatchfn = m.matchfn
772 773 def matchfn(f):
773 774 lfile = lfutil.splitstandin(f)
774 775 if lfile is not None:
775 776 return (origmatchfn(lfile) and
776 777 (f in ctx or f in mctx))
777 778 return origmatchfn(f)
778 779 m.matchfn = matchfn
779 780 return m
780 781 oldmatch = installmatchfn(overridematch)
781 782 try:
782 783 orig(ui, repo, ctx, parents, *pats, **opts)
783 784 finally:
784 785 restorematchfn()
785 786
786 787 newstandins = lfutil.getstandinsstate(repo)
787 788 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
788 789 # lfdirstate should be 'normallookup'-ed for updated files,
789 790 # because reverting doesn't touch dirstate for 'normal' files
790 791 # when target revision is explicitly specified: in such case,
791 792 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
792 793 # of target (standin) file.
793 794 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
794 795 normallookup=True)
795 796
796 797 # after pulling changesets, we need to take some extra care to get
797 798 # largefiles updated remotely
798 799 def overridepull(orig, ui, repo, source=None, **opts):
799 800 revsprepull = len(repo)
800 801 if not source:
801 802 source = 'default'
802 803 repo.lfpullsource = source
803 804 result = orig(ui, repo, source, **opts)
804 805 revspostpull = len(repo)
805 806 lfrevs = opts.get('lfrev', [])
806 807 if opts.get('all_largefiles'):
807 808 lfrevs.append('pulled()')
808 809 if lfrevs and revspostpull > revsprepull:
809 810 numcached = 0
810 811 repo.firstpulled = revsprepull # for pulled() revset expression
811 812 try:
812 813 for rev in scmutil.revrange(repo, lfrevs):
813 814 ui.note(_('pulling largefiles for revision %s\n') % rev)
814 815 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
815 816 numcached += len(cached)
816 817 finally:
817 818 del repo.firstpulled
818 819 ui.status(_("%d largefiles cached\n") % numcached)
819 820 return result
820 821
821 822 def overridepush(orig, ui, repo, *args, **kwargs):
822 823 """Override push command and store --lfrev parameters in opargs"""
823 824 lfrevs = kwargs.pop('lfrev', None)
824 825 if lfrevs:
825 826 opargs = kwargs.setdefault('opargs', {})
826 827 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
827 828 return orig(ui, repo, *args, **kwargs)
828 829
829 830 def exchangepushoperation(orig, *args, **kwargs):
830 831 """Override pushoperation constructor and store lfrevs parameter"""
831 832 lfrevs = kwargs.pop('lfrevs', None)
832 833 pushop = orig(*args, **kwargs)
833 834 pushop.lfrevs = lfrevs
834 835 return pushop
835 836
836 837 revsetpredicate = registrar.revsetpredicate()
837 838
838 839 @revsetpredicate('pulled()')
839 840 def pulledrevsetsymbol(repo, subset, x):
840 841 """Changesets that just has been pulled.
841 842
842 843 Only available with largefiles from pull --lfrev expressions.
843 844
844 845 .. container:: verbose
845 846
846 847 Some examples:
847 848
848 849 - pull largefiles for all new changesets::
849 850
850 851 hg pull -lfrev "pulled()"
851 852
852 853 - pull largefiles for all new branch heads::
853 854
854 855 hg pull -lfrev "head(pulled()) and not closed()"
855 856
856 857 """
857 858
858 859 try:
859 860 firstpulled = repo.firstpulled
860 861 except AttributeError:
861 862 raise error.Abort(_("pulled() only available in --lfrev"))
862 863 return smartset.baseset([r for r in subset if r >= firstpulled])
863 864
864 865 def overrideclone(orig, ui, source, dest=None, **opts):
865 866 d = dest
866 867 if d is None:
867 868 d = hg.defaultdest(source)
868 869 if opts.get('all_largefiles') and not hg.islocal(d):
869 870 raise error.Abort(_(
870 871 '--all-largefiles is incompatible with non-local destination %s') %
871 872 d)
872 873
873 874 return orig(ui, source, dest, **opts)
874 875
875 876 def hgclone(orig, ui, opts, *args, **kwargs):
876 877 result = orig(ui, opts, *args, **kwargs)
877 878
878 879 if result is not None:
879 880 sourcerepo, destrepo = result
880 881 repo = destrepo.local()
881 882
882 883 # When cloning to a remote repo (like through SSH), no repo is available
883 884 # from the peer. Therefore the largefiles can't be downloaded and the
884 885 # hgrc can't be updated.
885 886 if not repo:
886 887 return result
887 888
888 889 # If largefiles is required for this repo, permanently enable it locally
889 890 if 'largefiles' in repo.requirements:
890 891 with repo.vfs('hgrc', 'a', text=True) as fp:
891 892 fp.write('\n[extensions]\nlargefiles=\n')
892 893
893 894 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 895 # truncated at that point. The user may expect a download count with
895 896 # this option, so attempt whether or not this is a largefile repo.
896 897 if opts.get('all_largefiles'):
897 898 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898 899
899 900 if missing != 0:
900 901 return None
901 902
902 903 return result
903 904
904 905 def overriderebase(orig, ui, repo, **opts):
905 906 if not util.safehasattr(repo, '_largefilesenabled'):
906 907 return orig(ui, repo, **opts)
907 908
908 909 resuming = opts.get('continue')
909 910 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 911 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 912 try:
912 913 return orig(ui, repo, **opts)
913 914 finally:
914 915 repo._lfstatuswriters.pop()
915 916 repo._lfcommithooks.pop()
916 917
917 918 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 919 repo.unfiltered().lfstatus = True
919 920
920 921 try:
921 922 return orig(ui, repo.unfiltered(), dest, **opts)
922 923 finally:
923 924 repo.unfiltered().lfstatus = False
924 925
925 926 def hgwebarchive(orig, web, req, tmpl):
926 927 web.repo.lfstatus = True
927 928
928 929 try:
929 930 return orig(web, req, tmpl)
930 931 finally:
931 932 web.repo.lfstatus = False
932 933
933 934 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 935 prefix='', mtime=None, subrepos=None):
935 936 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 937 # unfiltered repo's attr, so check that as well.
937 938 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 939 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 940 subrepos)
940 941
941 942 # No need to lock because we are only reading history and
942 943 # largefile caches, neither of which are modified.
943 944 if node is not None:
944 945 lfcommands.cachelfiles(repo.ui, repo, node)
945 946
946 947 if kind not in archival.archivers:
947 948 raise error.Abort(_("unknown archive type '%s'") % kind)
948 949
949 950 ctx = repo[node]
950 951
951 952 if kind == 'files':
952 953 if prefix:
953 954 raise error.Abort(
954 955 _('cannot give prefix when archiving to files'))
955 956 else:
956 957 prefix = archival.tidyprefix(dest, kind, prefix)
957 958
958 959 def write(name, mode, islink, getdata):
959 960 if matchfn and not matchfn(name):
960 961 return
961 962 data = getdata()
962 963 if decode:
963 964 data = repo.wwritedata(name, data)
964 965 archiver.addfile(prefix + name, mode, islink, data)
965 966
966 967 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967 968
968 969 if repo.ui.configbool("ui", "archivemeta", True):
969 970 write('.hg_archival.txt', 0o644, False,
970 971 lambda: archival.buildmetadata(ctx))
971 972
972 973 for f in ctx:
973 974 ff = ctx.flags(f)
974 975 getdata = ctx[f].data
975 976 lfile = lfutil.splitstandin(f)
976 977 if lfile is not None:
977 978 if node is not None:
978 979 path = lfutil.findfile(repo, getdata().strip())
979 980
980 981 if path is None:
981 982 raise error.Abort(
982 983 _('largefile %s not found in repo store or system cache')
983 984 % lfile)
984 985 else:
985 986 path = lfile
986 987
987 988 f = lfile
988 989
989 990 getdata = lambda: util.readfile(path)
990 991 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
991 992
992 993 if subrepos:
993 994 for subpath in sorted(ctx.substate):
994 995 sub = ctx.workingsub(subpath)
995 996 submatch = matchmod.subdirmatcher(subpath, matchfn)
996 997 sub._repo.lfstatus = True
997 998 sub.archive(archiver, prefix, submatch)
998 999
999 1000 archiver.done()
1000 1001
1001 1002 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1002 1003 if not repo._repo.lfstatus:
1003 1004 return orig(repo, archiver, prefix, match, decode)
1004 1005
1005 1006 repo._get(repo._state + ('hg',))
1006 1007 rev = repo._state[1]
1007 1008 ctx = repo._repo[rev]
1008 1009
1009 1010 if ctx.node() is not None:
1010 1011 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1011 1012
1012 1013 def write(name, mode, islink, getdata):
1013 1014 # At this point, the standin has been replaced with the largefile name,
1014 1015 # so the normal matcher works here without the lfutil variants.
1015 1016 if match and not match(f):
1016 1017 return
1017 1018 data = getdata()
1018 1019 if decode:
1019 1020 data = repo._repo.wwritedata(name, data)
1020 1021
1021 1022 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1022 1023
1023 1024 for f in ctx:
1024 1025 ff = ctx.flags(f)
1025 1026 getdata = ctx[f].data
1026 1027 lfile = lfutil.splitstandin(f)
1027 1028 if lfile is not None:
1028 1029 if ctx.node() is not None:
1029 1030 path = lfutil.findfile(repo._repo, getdata().strip())
1030 1031
1031 1032 if path is None:
1032 1033 raise error.Abort(
1033 1034 _('largefile %s not found in repo store or system cache')
1034 1035 % lfile)
1035 1036 else:
1036 1037 path = lfile
1037 1038
1038 1039 f = lfile
1039 1040
1040 1041 getdata = lambda: util.readfile(os.path.join(prefix, path))
1041 1042
1042 1043 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1043 1044
1044 1045 for subpath in sorted(ctx.substate):
1045 1046 sub = ctx.workingsub(subpath)
1046 1047 submatch = matchmod.subdirmatcher(subpath, match)
1047 1048 sub._repo.lfstatus = True
1048 1049 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1049 1050
1050 1051 # If a largefile is modified, the change is not reflected in its
1051 1052 # standin until a commit. cmdutil.bailifchanged() raises an exception
1052 1053 # if the repo has uncommitted changes. Wrap it to also check if
1053 1054 # largefiles were changed. This is used by bisect, backout and fetch.
1054 1055 def overridebailifchanged(orig, repo, *args, **kwargs):
1055 1056 orig(repo, *args, **kwargs)
1056 1057 repo.lfstatus = True
1057 1058 s = repo.status()
1058 1059 repo.lfstatus = False
1059 1060 if s.modified or s.added or s.removed or s.deleted:
1060 1061 raise error.Abort(_('uncommitted changes'))
1061 1062
1062 1063 def postcommitstatus(orig, repo, *args, **kwargs):
1063 1064 repo.lfstatus = True
1064 1065 try:
1065 1066 return orig(repo, *args, **kwargs)
1066 1067 finally:
1067 1068 repo.lfstatus = False
1068 1069
1069 1070 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1070 1071 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1071 1072 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1072 1073 m = composelargefilematcher(match, repo[None].manifest())
1073 1074
1074 1075 try:
1075 1076 repo.lfstatus = True
1076 1077 s = repo.status(match=m, clean=True)
1077 1078 finally:
1078 1079 repo.lfstatus = False
1079 1080 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1080 1081 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1081 1082
1082 1083 for f in forget:
1083 if lfutil.standin(f) not in repo.dirstate and not \
1084 repo.wvfs.isdir(lfutil.standin(f)):
1084 fstandin = lfutil.standin(f)
1085 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1085 1086 ui.warn(_('not removing %s: file is already untracked\n')
1086 1087 % m.rel(f))
1087 1088 bad.append(f)
1088 1089
1089 1090 for f in forget:
1090 1091 if ui.verbose or not m.exact(f):
1091 1092 ui.status(_('removing %s\n') % m.rel(f))
1092 1093
1093 1094 # Need to lock because standin files are deleted then removed from the
1094 1095 # repository and we could race in-between.
1095 1096 with repo.wlock():
1096 1097 lfdirstate = lfutil.openlfdirstate(ui, repo)
1097 1098 for f in forget:
1098 1099 if lfdirstate[f] == 'a':
1099 1100 lfdirstate.drop(f)
1100 1101 else:
1101 1102 lfdirstate.remove(f)
1102 1103 lfdirstate.write()
1103 1104 standins = [lfutil.standin(f) for f in forget]
1104 1105 for f in standins:
1105 1106 repo.wvfs.unlinkpath(f, ignoremissing=True)
1106 1107 rejected = repo[None].forget(standins)
1107 1108
1108 1109 bad.extend(f for f in rejected if f in m.files())
1109 1110 forgot.extend(f for f in forget if f not in rejected)
1110 1111 return bad, forgot
1111 1112
1112 1113 def _getoutgoings(repo, other, missing, addfunc):
1113 1114 """get pairs of filename and largefile hash in outgoing revisions
1114 1115 in 'missing'.
1115 1116
1116 1117 largefiles already existing on 'other' repository are ignored.
1117 1118
1118 1119 'addfunc' is invoked with each unique pairs of filename and
1119 1120 largefile hash value.
1120 1121 """
1121 1122 knowns = set()
1122 1123 lfhashes = set()
1123 1124 def dedup(fn, lfhash):
1124 1125 k = (fn, lfhash)
1125 1126 if k not in knowns:
1126 1127 knowns.add(k)
1127 1128 lfhashes.add(lfhash)
1128 1129 lfutil.getlfilestoupload(repo, missing, dedup)
1129 1130 if lfhashes:
1130 1131 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1131 1132 for fn, lfhash in knowns:
1132 1133 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1133 1134 addfunc(fn, lfhash)
1134 1135
1135 1136 def outgoinghook(ui, repo, other, opts, missing):
1136 1137 if opts.pop('large', None):
1137 1138 lfhashes = set()
1138 1139 if ui.debugflag:
1139 1140 toupload = {}
1140 1141 def addfunc(fn, lfhash):
1141 1142 if fn not in toupload:
1142 1143 toupload[fn] = []
1143 1144 toupload[fn].append(lfhash)
1144 1145 lfhashes.add(lfhash)
1145 1146 def showhashes(fn):
1146 1147 for lfhash in sorted(toupload[fn]):
1147 1148 ui.debug(' %s\n' % (lfhash))
1148 1149 else:
1149 1150 toupload = set()
1150 1151 def addfunc(fn, lfhash):
1151 1152 toupload.add(fn)
1152 1153 lfhashes.add(lfhash)
1153 1154 def showhashes(fn):
1154 1155 pass
1155 1156 _getoutgoings(repo, other, missing, addfunc)
1156 1157
1157 1158 if not toupload:
1158 1159 ui.status(_('largefiles: no files to upload\n'))
1159 1160 else:
1160 1161 ui.status(_('largefiles to upload (%d entities):\n')
1161 1162 % (len(lfhashes)))
1162 1163 for file in sorted(toupload):
1163 1164 ui.status(lfutil.splitstandin(file) + '\n')
1164 1165 showhashes(file)
1165 1166 ui.status('\n')
1166 1167
1167 1168 def summaryremotehook(ui, repo, opts, changes):
1168 1169 largeopt = opts.get('large', False)
1169 1170 if changes is None:
1170 1171 if largeopt:
1171 1172 return (False, True) # only outgoing check is needed
1172 1173 else:
1173 1174 return (False, False)
1174 1175 elif largeopt:
1175 1176 url, branch, peer, outgoing = changes[1]
1176 1177 if peer is None:
1177 1178 # i18n: column positioning for "hg summary"
1178 1179 ui.status(_('largefiles: (no remote repo)\n'))
1179 1180 return
1180 1181
1181 1182 toupload = set()
1182 1183 lfhashes = set()
1183 1184 def addfunc(fn, lfhash):
1184 1185 toupload.add(fn)
1185 1186 lfhashes.add(lfhash)
1186 1187 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1187 1188
1188 1189 if not toupload:
1189 1190 # i18n: column positioning for "hg summary"
1190 1191 ui.status(_('largefiles: (no files to upload)\n'))
1191 1192 else:
1192 1193 # i18n: column positioning for "hg summary"
1193 1194 ui.status(_('largefiles: %d entities for %d files to upload\n')
1194 1195 % (len(lfhashes), len(toupload)))
1195 1196
1196 1197 def overridesummary(orig, ui, repo, *pats, **opts):
1197 1198 try:
1198 1199 repo.lfstatus = True
1199 1200 orig(ui, repo, *pats, **opts)
1200 1201 finally:
1201 1202 repo.lfstatus = False
1202 1203
1203 1204 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1204 1205 similarity=None):
1205 1206 if opts is None:
1206 1207 opts = {}
1207 1208 if not lfutil.islfilesrepo(repo):
1208 1209 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1209 1210 # Get the list of missing largefiles so we can remove them
1210 1211 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1211 1212 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1212 1213 False, False, False)
1213 1214
1214 1215 # Call into the normal remove code, but the removing of the standin, we want
1215 1216 # to have handled by original addremove. Monkey patching here makes sure
1216 1217 # we don't remove the standin in the largefiles code, preventing a very
1217 1218 # confused state later.
1218 1219 if s.deleted:
1219 1220 m = copy.copy(matcher)
1220 1221
1221 1222 # The m._files and m._map attributes are not changed to the deleted list
1222 1223 # because that affects the m.exact() test, which in turn governs whether
1223 1224 # or not the file name is printed, and how. Simply limit the original
1224 1225 # matches to those in the deleted status list.
1225 1226 matchfn = m.matchfn
1226 1227 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1227 1228
1228 1229 removelargefiles(repo.ui, repo, True, m, **opts)
1229 1230 # Call into the normal add code, and any files that *should* be added as
1230 1231 # largefiles will be
1231 1232 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1232 1233 # Now that we've handled largefiles, hand off to the original addremove
1233 1234 # function to take care of the rest. Make sure it doesn't do anything with
1234 1235 # largefiles by passing a matcher that will ignore them.
1235 1236 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1236 1237 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1237 1238
1238 1239 # Calling purge with --all will cause the largefiles to be deleted.
1239 1240 # Override repo.status to prevent this from happening.
1240 1241 def overridepurge(orig, ui, repo, *dirs, **opts):
1241 1242 # XXX Monkey patching a repoview will not work. The assigned attribute will
1242 1243 # be set on the unfiltered repo, but we will only lookup attributes in the
1243 1244 # unfiltered repo if the lookup in the repoview object itself fails. As the
1244 1245 # monkey patched method exists on the repoview class the lookup will not
1245 1246 # fail. As a result, the original version will shadow the monkey patched
1246 1247 # one, defeating the monkey patch.
1247 1248 #
1248 1249 # As a work around we use an unfiltered repo here. We should do something
1249 1250 # cleaner instead.
1250 1251 repo = repo.unfiltered()
1251 1252 oldstatus = repo.status
1252 1253 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1253 1254 clean=False, unknown=False, listsubrepos=False):
1254 1255 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1255 1256 listsubrepos)
1256 1257 lfdirstate = lfutil.openlfdirstate(ui, repo)
1257 1258 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1258 1259 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1259 1260 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1260 1261 unknown, ignored, r.clean)
1261 1262 repo.status = overridestatus
1262 1263 orig(ui, repo, *dirs, **opts)
1263 1264 repo.status = oldstatus
1264 1265 def overriderollback(orig, ui, repo, **opts):
1265 1266 with repo.wlock():
1266 1267 before = repo.dirstate.parents()
1267 1268 orphans = set(f for f in repo.dirstate
1268 1269 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1269 1270 result = orig(ui, repo, **opts)
1270 1271 after = repo.dirstate.parents()
1271 1272 if before == after:
1272 1273 return result # no need to restore standins
1273 1274
1274 1275 pctx = repo['.']
1275 1276 for f in repo.dirstate:
1276 1277 if lfutil.isstandin(f):
1277 1278 orphans.discard(f)
1278 1279 if repo.dirstate[f] == 'r':
1279 1280 repo.wvfs.unlinkpath(f, ignoremissing=True)
1280 1281 elif f in pctx:
1281 1282 fctx = pctx[f]
1282 1283 repo.wwrite(f, fctx.data(), fctx.flags())
1283 1284 else:
1284 1285 # content of standin is not so important in 'a',
1285 1286 # 'm' or 'n' (coming from the 2nd parent) cases
1286 1287 lfutil.writestandin(repo, f, '', False)
1287 1288 for standin in orphans:
1288 1289 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1289 1290
1290 1291 lfdirstate = lfutil.openlfdirstate(ui, repo)
1291 1292 orphans = set(lfdirstate)
1292 1293 lfiles = lfutil.listlfiles(repo)
1293 1294 for file in lfiles:
1294 1295 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1295 1296 orphans.discard(file)
1296 1297 for lfile in orphans:
1297 1298 lfdirstate.drop(lfile)
1298 1299 lfdirstate.write()
1299 1300 return result
1300 1301
1301 1302 def overridetransplant(orig, ui, repo, *revs, **opts):
1302 1303 resuming = opts.get('continue')
1303 1304 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1304 1305 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1305 1306 try:
1306 1307 result = orig(ui, repo, *revs, **opts)
1307 1308 finally:
1308 1309 repo._lfstatuswriters.pop()
1309 1310 repo._lfcommithooks.pop()
1310 1311 return result
1311 1312
1312 1313 def overridecat(orig, ui, repo, file1, *pats, **opts):
1313 1314 ctx = scmutil.revsingle(repo, opts.get('rev'))
1314 1315 err = 1
1315 1316 notbad = set()
1316 1317 m = scmutil.match(ctx, (file1,) + pats, opts)
1317 1318 origmatchfn = m.matchfn
1318 1319 def lfmatchfn(f):
1319 1320 if origmatchfn(f):
1320 1321 return True
1321 1322 lf = lfutil.splitstandin(f)
1322 1323 if lf is None:
1323 1324 return False
1324 1325 notbad.add(lf)
1325 1326 return origmatchfn(lf)
1326 1327 m.matchfn = lfmatchfn
1327 1328 origbadfn = m.bad
1328 1329 def lfbadfn(f, msg):
1329 1330 if not f in notbad:
1330 1331 origbadfn(f, msg)
1331 1332 m.bad = lfbadfn
1332 1333
1333 1334 origvisitdirfn = m.visitdir
1334 1335 def lfvisitdirfn(dir):
1335 1336 if dir == lfutil.shortname:
1336 1337 return True
1337 1338 ret = origvisitdirfn(dir)
1338 1339 if ret:
1339 1340 return ret
1340 1341 lf = lfutil.splitstandin(dir)
1341 1342 if lf is None:
1342 1343 return False
1343 1344 return origvisitdirfn(lf)
1344 1345 m.visitdir = lfvisitdirfn
1345 1346
1346 1347 for f in ctx.walk(m):
1347 1348 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1348 1349 pathname=f) as fp:
1349 1350 lf = lfutil.splitstandin(f)
1350 1351 if lf is None or origmatchfn(f):
1351 1352 # duplicating unreachable code from commands.cat
1352 1353 data = ctx[f].data()
1353 1354 if opts.get('decode'):
1354 1355 data = repo.wwritedata(f, data)
1355 1356 fp.write(data)
1356 1357 else:
1357 1358 hash = lfutil.readstandin(repo, lf, ctx)
1358 1359 if not lfutil.inusercache(repo.ui, hash):
1359 1360 store = storefactory.openstore(repo)
1360 1361 success, missing = store.get([(lf, hash)])
1361 1362 if len(success) != 1:
1362 1363 raise error.Abort(
1363 1364 _('largefile %s is not in cache and could not be '
1364 1365 'downloaded') % lf)
1365 1366 path = lfutil.usercachepath(repo.ui, hash)
1366 1367 with open(path, "rb") as fpin:
1367 1368 for chunk in util.filechunkiter(fpin):
1368 1369 fp.write(chunk)
1369 1370 err = 0
1370 1371 return err
1371 1372
1372 1373 def mergeupdate(orig, repo, node, branchmerge, force,
1373 1374 *args, **kwargs):
1374 1375 matcher = kwargs.get('matcher', None)
1375 1376 # note if this is a partial update
1376 1377 partial = matcher and not matcher.always()
1377 1378 with repo.wlock():
1378 1379 # branch | | |
1379 1380 # merge | force | partial | action
1380 1381 # -------+-------+---------+--------------
1381 1382 # x | x | x | linear-merge
1382 1383 # o | x | x | branch-merge
1383 1384 # x | o | x | overwrite (as clean update)
1384 1385 # o | o | x | force-branch-merge (*1)
1385 1386 # x | x | o | (*)
1386 1387 # o | x | o | (*)
1387 1388 # x | o | o | overwrite (as revert)
1388 1389 # o | o | o | (*)
1389 1390 #
1390 1391 # (*) don't care
1391 1392 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1392 1393
1393 1394 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1394 1395 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1395 1396 repo.getcwd()),
1396 1397 [], False, True, False)
1397 1398 oldclean = set(s.clean)
1398 1399 pctx = repo['.']
1399 1400 for lfile in unsure + s.modified:
1400 1401 lfileabs = repo.wvfs.join(lfile)
1401 1402 if not repo.wvfs.exists(lfileabs):
1402 1403 continue
1403 1404 lfhash = lfutil.hashfile(lfileabs)
1404 1405 standin = lfutil.standin(lfile)
1405 1406 lfutil.writestandin(repo, standin, lfhash,
1406 1407 lfutil.getexecutable(lfileabs))
1407 1408 if (standin in pctx and
1408 1409 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1409 1410 oldclean.add(lfile)
1410 1411 for lfile in s.added:
1411 1412 lfutil.updatestandin(repo, lfutil.standin(lfile))
1412 1413 # mark all clean largefiles as dirty, just in case the update gets
1413 1414 # interrupted before largefiles and lfdirstate are synchronized
1414 1415 for lfile in oldclean:
1415 1416 lfdirstate.normallookup(lfile)
1416 1417 lfdirstate.write()
1417 1418
1418 1419 oldstandins = lfutil.getstandinsstate(repo)
1419 1420
1420 1421 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1421 1422
1422 1423 newstandins = lfutil.getstandinsstate(repo)
1423 1424 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1424 1425
1425 1426 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1426 1427 # all the ones that didn't change as clean
1427 1428 for lfile in oldclean.difference(filelist):
1428 1429 lfdirstate.normal(lfile)
1429 1430 lfdirstate.write()
1430 1431
1431 1432 if branchmerge or force or partial:
1432 1433 filelist.extend(s.deleted + s.removed)
1433 1434
1434 1435 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1435 1436 normallookup=partial)
1436 1437
1437 1438 return result
1438 1439
1439 1440 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1440 1441 result = orig(repo, files, *args, **kwargs)
1441 1442
1442 1443 filelist = []
1443 1444 for f in files:
1444 1445 lf = lfutil.splitstandin(f)
1445 1446 if lf is not None:
1446 1447 filelist.append(lf)
1447 1448 if filelist:
1448 1449 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1449 1450 printmessage=False, normallookup=True)
1450 1451
1451 1452 return result
General Comments 0
You need to be logged in to leave comments. Login now