##// END OF EJS Templates
largefiles: omit redundant isstandin() before splitstandin()...
FUJIWARA Katsunori -
r31613:5c1d3f1b default
parent child Browse files
Show More
@@ -1,576 +1,576 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import hashlib
14 14 import os
15 15 import shutil
16 16
17 17 from mercurial.i18n import _
18 18
19 19 from mercurial import (
20 20 cmdutil,
21 21 commands,
22 22 context,
23 23 error,
24 24 hg,
25 25 lock,
26 26 match as matchmod,
27 27 node,
28 28 scmutil,
29 29 util,
30 30 )
31 31
32 32 from ..convert import (
33 33 convcmd,
34 34 filemap,
35 35 )
36 36
37 37 from . import (
38 38 lfutil,
39 39 storefactory
40 40 )
41 41
42 42 release = lock.release
43 43
44 44 # -- Commands ----------------------------------------------------------
45 45
46 46 cmdtable = {}
47 47 command = cmdutil.command(cmdtable)
48 48
49 49 @command('lfconvert',
50 50 [('s', 'size', '',
51 51 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 52 ('', 'to-normal', False,
53 53 _('convert from a largefiles repo to a normal repo')),
54 54 ],
55 55 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 56 norepo=True,
57 57 inferrepo=True)
58 58 def lfconvert(ui, src, dest, *pats, **opts):
59 59 '''convert a normal repository to a largefiles repository
60 60
61 61 Convert repository SOURCE to a new repository DEST, identical to
62 62 SOURCE except that certain files will be converted as largefiles:
63 63 specifically, any file that matches any PATTERN *or* whose size is
64 64 above the minimum size threshold is converted as a largefile. The
65 65 size used to determine whether or not to track a file as a
66 66 largefile is the size of the first version of the file. The
67 67 minimum size can be specified either with --size or in
68 68 configuration as ``largefiles.size``.
69 69
70 70 After running this command you will need to make sure that
71 71 largefiles is enabled anywhere you intend to push the new
72 72 repository.
73 73
74 74 Use --to-normal to convert largefiles back to normal files; after
75 75 this, the DEST repository can be used without largefiles at all.'''
76 76
77 77 if opts['to_normal']:
78 78 tolfile = False
79 79 else:
80 80 tolfile = True
81 81 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
82 82
83 83 if not hg.islocal(src):
84 84 raise error.Abort(_('%s is not a local Mercurial repo') % src)
85 85 if not hg.islocal(dest):
86 86 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
87 87
88 88 rsrc = hg.repository(ui, src)
89 89 ui.status(_('initializing destination %s\n') % dest)
90 90 rdst = hg.repository(ui, dest, create=True)
91 91
92 92 success = False
93 93 dstwlock = dstlock = None
94 94 try:
95 95 # Get a list of all changesets in the source. The easy way to do this
96 96 # is to simply walk the changelog, using changelog.nodesbetween().
97 97 # Take a look at mercurial/revlog.py:639 for more details.
98 98 # Use a generator instead of a list to decrease memory usage
99 99 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
100 100 rsrc.heads())[0])
101 101 revmap = {node.nullid: node.nullid}
102 102 if tolfile:
103 103 # Lock destination to prevent modification while it is converted to.
104 104 # Don't need to lock src because we are just reading from its
105 105 # history which can't change.
106 106 dstwlock = rdst.wlock()
107 107 dstlock = rdst.lock()
108 108
109 109 lfiles = set()
110 110 normalfiles = set()
111 111 if not pats:
112 112 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
113 113 if pats:
114 114 matcher = matchmod.match(rsrc.root, '', list(pats))
115 115 else:
116 116 matcher = None
117 117
118 118 lfiletohash = {}
119 119 for ctx in ctxs:
120 120 ui.progress(_('converting revisions'), ctx.rev(),
121 121 unit=_('revisions'), total=rsrc['tip'].rev())
122 122 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
123 123 lfiles, normalfiles, matcher, size, lfiletohash)
124 124 ui.progress(_('converting revisions'), None)
125 125
126 126 if rdst.wvfs.exists(lfutil.shortname):
127 127 rdst.wvfs.rmtree(lfutil.shortname)
128 128
129 129 for f in lfiletohash.keys():
130 130 if rdst.wvfs.isfile(f):
131 131 rdst.wvfs.unlink(f)
132 132 try:
133 133 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
134 134 except OSError:
135 135 pass
136 136
137 137 # If there were any files converted to largefiles, add largefiles
138 138 # to the destination repository's requirements.
139 139 if lfiles:
140 140 rdst.requirements.add('largefiles')
141 141 rdst._writerequirements()
142 142 else:
143 143 class lfsource(filemap.filemap_source):
144 144 def __init__(self, ui, source):
145 145 super(lfsource, self).__init__(ui, source, None)
146 146 self.filemapper.rename[lfutil.shortname] = '.'
147 147
148 148 def getfile(self, name, rev):
149 149 realname, realrev = rev
150 150 f = super(lfsource, self).getfile(name, rev)
151 151
152 152 if (not realname.startswith(lfutil.shortnameslash)
153 153 or f[0] is None):
154 154 return f
155 155
156 156 # Substitute in the largefile data for the hash
157 157 hash = f[0].strip()
158 158 path = lfutil.findfile(rsrc, hash)
159 159
160 160 if path is None:
161 161 raise error.Abort(_("missing largefile for '%s' in %s")
162 162 % (realname, realrev))
163 163 return util.readfile(path), f[1]
164 164
165 165 class converter(convcmd.converter):
166 166 def __init__(self, ui, source, dest, revmapfile, opts):
167 167 src = lfsource(ui, source)
168 168
169 169 super(converter, self).__init__(ui, src, dest, revmapfile,
170 170 opts)
171 171
172 172 found, missing = downloadlfiles(ui, rsrc)
173 173 if missing != 0:
174 174 raise error.Abort(_("all largefiles must be present locally"))
175 175
176 176 orig = convcmd.converter
177 177 convcmd.converter = converter
178 178
179 179 try:
180 180 convcmd.convert(ui, src, dest)
181 181 finally:
182 182 convcmd.converter = orig
183 183 success = True
184 184 finally:
185 185 if tolfile:
186 186 rdst.dirstate.clear()
187 187 release(dstlock, dstwlock)
188 188 if not success:
189 189 # we failed, remove the new directory
190 190 shutil.rmtree(rdst.root)
191 191
192 192 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
193 193 matcher, size, lfiletohash):
194 194 # Convert src parents to dst parents
195 195 parents = _convertparents(ctx, revmap)
196 196
197 197 # Generate list of changed files
198 198 files = _getchangedfiles(ctx, parents)
199 199
200 200 dstfiles = []
201 201 for f in files:
202 202 if f not in lfiles and f not in normalfiles:
203 203 islfile = _islfile(f, ctx, matcher, size)
204 204 # If this file was renamed or copied then copy
205 205 # the largefile-ness of its predecessor
206 206 if f in ctx.manifest():
207 207 fctx = ctx.filectx(f)
208 208 renamed = fctx.renamed()
209 209 renamedlfile = renamed and renamed[0] in lfiles
210 210 islfile |= renamedlfile
211 211 if 'l' in fctx.flags():
212 212 if renamedlfile:
213 213 raise error.Abort(
214 214 _('renamed/copied largefile %s becomes symlink')
215 215 % f)
216 216 islfile = False
217 217 if islfile:
218 218 lfiles.add(f)
219 219 else:
220 220 normalfiles.add(f)
221 221
222 222 if f in lfiles:
223 223 dstfiles.append(lfutil.standin(f))
224 224 # largefile in manifest if it has not been removed/renamed
225 225 if f in ctx.manifest():
226 226 fctx = ctx.filectx(f)
227 227 if 'l' in fctx.flags():
228 228 renamed = fctx.renamed()
229 229 if renamed and renamed[0] in lfiles:
230 230 raise error.Abort(_('largefile %s becomes symlink') % f)
231 231
232 232 # largefile was modified, update standins
233 233 m = hashlib.sha1('')
234 234 m.update(ctx[f].data())
235 235 hash = m.hexdigest()
236 236 if f not in lfiletohash or lfiletohash[f] != hash:
237 237 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
238 238 executable = 'x' in ctx[f].flags()
239 239 lfutil.writestandin(rdst, lfutil.standin(f), hash,
240 240 executable)
241 241 lfiletohash[f] = hash
242 242 else:
243 243 # normal file
244 244 dstfiles.append(f)
245 245
246 246 def getfilectx(repo, memctx, f):
247 if lfutil.isstandin(f):
247 srcfname = lfutil.splitstandin(f)
248 if srcfname is not None:
248 249 # if the file isn't in the manifest then it was removed
249 250 # or renamed, return None to indicate this
250 srcfname = lfutil.splitstandin(f)
251 251 try:
252 252 fctx = ctx.filectx(srcfname)
253 253 except error.LookupError:
254 254 return None
255 255 renamed = fctx.renamed()
256 256 if renamed:
257 257 # standin is always a largefile because largefile-ness
258 258 # doesn't change after rename or copy
259 259 renamed = lfutil.standin(renamed[0])
260 260
261 261 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
262 262 'l' in fctx.flags(), 'x' in fctx.flags(),
263 263 renamed)
264 264 else:
265 265 return _getnormalcontext(repo, ctx, f, revmap)
266 266
267 267 # Commit
268 268 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
269 269
270 270 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
271 271 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
272 272 getfilectx, ctx.user(), ctx.date(), ctx.extra())
273 273 ret = rdst.commitctx(mctx)
274 274 lfutil.copyalltostore(rdst, ret)
275 275 rdst.setparents(ret)
276 276 revmap[ctx.node()] = rdst.changelog.tip()
277 277
278 278 # Generate list of changed files
279 279 def _getchangedfiles(ctx, parents):
280 280 files = set(ctx.files())
281 281 if node.nullid not in parents:
282 282 mc = ctx.manifest()
283 283 mp1 = ctx.parents()[0].manifest()
284 284 mp2 = ctx.parents()[1].manifest()
285 285 files |= (set(mp1) | set(mp2)) - set(mc)
286 286 for f in mc:
287 287 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
288 288 files.add(f)
289 289 return files
290 290
291 291 # Convert src parents to dst parents
292 292 def _convertparents(ctx, revmap):
293 293 parents = []
294 294 for p in ctx.parents():
295 295 parents.append(revmap[p.node()])
296 296 while len(parents) < 2:
297 297 parents.append(node.nullid)
298 298 return parents
299 299
300 300 # Get memfilectx for a normal file
301 301 def _getnormalcontext(repo, ctx, f, revmap):
302 302 try:
303 303 fctx = ctx.filectx(f)
304 304 except error.LookupError:
305 305 return None
306 306 renamed = fctx.renamed()
307 307 if renamed:
308 308 renamed = renamed[0]
309 309
310 310 data = fctx.data()
311 311 if f == '.hgtags':
312 312 data = _converttags (repo.ui, revmap, data)
313 313 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
314 314 'x' in fctx.flags(), renamed)
315 315
316 316 # Remap tag data using a revision map
317 317 def _converttags(ui, revmap, data):
318 318 newdata = []
319 319 for line in data.splitlines():
320 320 try:
321 321 id, name = line.split(' ', 1)
322 322 except ValueError:
323 323 ui.warn(_('skipping incorrectly formatted tag %s\n')
324 324 % line)
325 325 continue
326 326 try:
327 327 newid = node.bin(id)
328 328 except TypeError:
329 329 ui.warn(_('skipping incorrectly formatted id %s\n')
330 330 % id)
331 331 continue
332 332 try:
333 333 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
334 334 name))
335 335 except KeyError:
336 336 ui.warn(_('no mapping for id %s\n') % id)
337 337 continue
338 338 return ''.join(newdata)
339 339
340 340 def _islfile(file, ctx, matcher, size):
341 341 '''Return true if file should be considered a largefile, i.e.
342 342 matcher matches it or it is larger than size.'''
343 343 # never store special .hg* files as largefiles
344 344 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
345 345 return False
346 346 if matcher and matcher(file):
347 347 return True
348 348 try:
349 349 return ctx.filectx(file).size() >= size * 1024 * 1024
350 350 except error.LookupError:
351 351 return False
352 352
353 353 def uploadlfiles(ui, rsrc, rdst, files):
354 354 '''upload largefiles to the central store'''
355 355
356 356 if not files:
357 357 return
358 358
359 359 store = storefactory.openstore(rsrc, rdst, put=True)
360 360
361 361 at = 0
362 362 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
363 363 retval = store.exists(files)
364 364 files = filter(lambda h: not retval[h], files)
365 365 ui.debug("%d largefiles need to be uploaded\n" % len(files))
366 366
367 367 for hash in files:
368 368 ui.progress(_('uploading largefiles'), at, unit=_('files'),
369 369 total=len(files))
370 370 source = lfutil.findfile(rsrc, hash)
371 371 if not source:
372 372 raise error.Abort(_('largefile %s missing from store'
373 373 ' (needs to be uploaded)') % hash)
374 374 # XXX check for errors here
375 375 store.put(source, hash)
376 376 at += 1
377 377 ui.progress(_('uploading largefiles'), None)
378 378
379 379 def verifylfiles(ui, repo, all=False, contents=False):
380 380 '''Verify that every largefile revision in the current changeset
381 381 exists in the central store. With --contents, also verify that
382 382 the contents of each local largefile file revision are correct (SHA-1 hash
383 383 matches the revision ID). With --all, check every changeset in
384 384 this repository.'''
385 385 if all:
386 386 revs = repo.revs('all()')
387 387 else:
388 388 revs = ['.']
389 389
390 390 store = storefactory.openstore(repo)
391 391 return store.verify(revs, contents=contents)
392 392
393 393 def cachelfiles(ui, repo, node, filelist=None):
394 394 '''cachelfiles ensures that all largefiles needed by the specified revision
395 395 are present in the repository's largefile cache.
396 396
397 397 returns a tuple (cached, missing). cached is the list of files downloaded
398 398 by this operation; missing is the list of files that were needed but could
399 399 not be found.'''
400 400 lfiles = lfutil.listlfiles(repo, node)
401 401 if filelist:
402 402 lfiles = set(lfiles) & set(filelist)
403 403 toget = []
404 404
405 405 for lfile in lfiles:
406 406 try:
407 407 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
408 408 except IOError as err:
409 409 if err.errno == errno.ENOENT:
410 410 continue # node must be None and standin wasn't found in wctx
411 411 raise
412 412 if not lfutil.findfile(repo, expectedhash):
413 413 toget.append((lfile, expectedhash))
414 414
415 415 if toget:
416 416 store = storefactory.openstore(repo)
417 417 ret = store.get(toget)
418 418 return ret
419 419
420 420 return ([], [])
421 421
422 422 def downloadlfiles(ui, repo, rev=None):
423 423 matchfn = scmutil.match(repo[None],
424 424 [repo.wjoin(lfutil.shortname)], {})
425 425 def prepare(ctx, fns):
426 426 pass
427 427 totalsuccess = 0
428 428 totalmissing = 0
429 429 if rev != []: # walkchangerevs on empty list would return all revs
430 430 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
431 431 prepare):
432 432 success, missing = cachelfiles(ui, repo, ctx.node())
433 433 totalsuccess += len(success)
434 434 totalmissing += len(missing)
435 435 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
436 436 if totalmissing > 0:
437 437 ui.status(_("%d largefiles failed to download\n") % totalmissing)
438 438 return totalsuccess, totalmissing
439 439
440 440 def updatelfiles(ui, repo, filelist=None, printmessage=None,
441 441 normallookup=False):
442 442 '''Update largefiles according to standins in the working directory
443 443
444 444 If ``printmessage`` is other than ``None``, it means "print (or
445 445 ignore, for false) message forcibly".
446 446 '''
447 447 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
448 448 with repo.wlock():
449 449 lfdirstate = lfutil.openlfdirstate(ui, repo)
450 450 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
451 451
452 452 if filelist is not None:
453 453 filelist = set(filelist)
454 454 lfiles = [f for f in lfiles if f in filelist]
455 455
456 456 update = {}
457 457 updated, removed = 0, 0
458 458 wvfs = repo.wvfs
459 459 for lfile in lfiles:
460 460 rellfile = lfile
461 461 rellfileorig = os.path.relpath(
462 462 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
463 463 start=repo.root)
464 464 relstandin = lfutil.standin(lfile)
465 465 relstandinorig = os.path.relpath(
466 466 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
467 467 start=repo.root)
468 468 if wvfs.exists(relstandin):
469 469 if (wvfs.exists(relstandinorig) and
470 470 wvfs.exists(rellfile)):
471 471 shutil.copyfile(wvfs.join(rellfile),
472 472 wvfs.join(rellfileorig))
473 473 wvfs.unlinkpath(relstandinorig)
474 474 expecthash = lfutil.readstandin(repo, lfile)
475 475 if expecthash != '':
476 476 if lfile not in repo[None]: # not switched to normal file
477 477 wvfs.unlinkpath(rellfile, ignoremissing=True)
478 478 # use normallookup() to allocate an entry in largefiles
479 479 # dirstate to prevent lfilesrepo.status() from reporting
480 480 # missing files as removed.
481 481 lfdirstate.normallookup(lfile)
482 482 update[lfile] = expecthash
483 483 else:
484 484 # Remove lfiles for which the standin is deleted, unless the
485 485 # lfile is added to the repository again. This happens when a
486 486 # largefile is converted back to a normal file: the standin
487 487 # disappears, but a new (normal) file appears as the lfile.
488 488 if (wvfs.exists(rellfile) and
489 489 repo.dirstate.normalize(lfile) not in repo[None]):
490 490 wvfs.unlinkpath(rellfile)
491 491 removed += 1
492 492
493 493 # largefile processing might be slow and be interrupted - be prepared
494 494 lfdirstate.write()
495 495
496 496 if lfiles:
497 497 statuswriter(_('getting changed largefiles\n'))
498 498 cachelfiles(ui, repo, None, lfiles)
499 499
500 500 for lfile in lfiles:
501 501 update1 = 0
502 502
503 503 expecthash = update.get(lfile)
504 504 if expecthash:
505 505 if not lfutil.copyfromcache(repo, expecthash, lfile):
506 506 # failed ... but already removed and set to normallookup
507 507 continue
508 508 # Synchronize largefile dirstate to the last modified
509 509 # time of the file
510 510 lfdirstate.normal(lfile)
511 511 update1 = 1
512 512
513 513 # copy the exec mode of largefile standin from the repository's
514 514 # dirstate to its state in the lfdirstate.
515 515 rellfile = lfile
516 516 relstandin = lfutil.standin(lfile)
517 517 if wvfs.exists(relstandin):
518 518 # exec is decided by the users permissions using mask 0o100
519 519 standinexec = wvfs.stat(relstandin).st_mode & 0o100
520 520 st = wvfs.stat(rellfile)
521 521 mode = st.st_mode
522 522 if standinexec != mode & 0o100:
523 523 # first remove all X bits, then shift all R bits to X
524 524 mode &= ~0o111
525 525 if standinexec:
526 526 mode |= (mode >> 2) & 0o111 & ~util.umask
527 527 wvfs.chmod(rellfile, mode)
528 528 update1 = 1
529 529
530 530 updated += update1
531 531
532 532 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
533 533
534 534 lfdirstate.write()
535 535 if lfiles:
536 536 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
537 537 removed))
538 538
539 539 @command('lfpull',
540 540 [('r', 'rev', [], _('pull largefiles for these revisions'))
541 541 ] + commands.remoteopts,
542 542 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
543 543 def lfpull(ui, repo, source="default", **opts):
544 544 """pull largefiles for the specified revisions from the specified source
545 545
546 546 Pull largefiles that are referenced from local changesets but missing
547 547 locally, pulling from a remote repository to the local cache.
548 548
549 549 If SOURCE is omitted, the 'default' path will be used.
550 550 See :hg:`help urls` for more information.
551 551
552 552 .. container:: verbose
553 553
554 554 Some examples:
555 555
556 556 - pull largefiles for all branch heads::
557 557
558 558 hg lfpull -r "head() and not closed()"
559 559
560 560 - pull largefiles on the default branch::
561 561
562 562 hg lfpull -r "branch(default)"
563 563 """
564 564 repo.lfpullsource = source
565 565
566 566 revs = opts.get('rev', [])
567 567 if not revs:
568 568 raise error.Abort(_('no revisions specified'))
569 569 revs = scmutil.revrange(repo, revs)
570 570
571 571 numcached = 0
572 572 for rev in revs:
573 573 ui.note(_('pulling largefiles for revision %s\n') % rev)
574 574 (cached, missing) = cachelfiles(ui, repo, rev)
575 575 numcached += len(cached)
576 576 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,667 +1,667 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 encoding,
23 23 error,
24 24 httpconnection,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 vfs as vfsmod,
31 31 )
32 32
33 33 shortname = '.hglf'
34 34 shortnameslash = shortname + '/'
35 35 longname = 'largefiles'
36 36
37 37 # -- Private worker functions ------------------------------------------
38 38
39 39 def getminsize(ui, assumelfiles, opt, default=10):
40 40 lfsize = opt
41 41 if not lfsize and assumelfiles:
42 42 lfsize = ui.config(longname, 'minsize', default=default)
43 43 if lfsize:
44 44 try:
45 45 lfsize = float(lfsize)
46 46 except ValueError:
47 47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 48 % lfsize)
49 49 if lfsize is None:
50 50 raise error.Abort(_('minimum size for largefiles must be specified'))
51 51 return lfsize
52 52
53 53 def link(src, dest):
54 54 """Try to create hardlink - if that fails, efficiently make a copy."""
55 55 util.makedirs(os.path.dirname(dest))
56 56 try:
57 57 util.oslink(src, dest)
58 58 except OSError:
59 59 # if hardlinks fail, fallback on atomic copy
60 60 with open(src, 'rb') as srcf:
61 61 with util.atomictempfile(dest) as dstf:
62 62 for chunk in util.filechunkiter(srcf):
63 63 dstf.write(chunk)
64 64 os.chmod(dest, os.stat(src).st_mode)
65 65
66 66 def usercachepath(ui, hash):
67 67 '''Return the correct location in the "global" largefiles cache for a file
68 68 with the given hash.
69 69 This cache is used for sharing of largefiles across repositories - both
70 70 to preserve download bandwidth and storage space.'''
71 71 return os.path.join(_usercachedir(ui), hash)
72 72
73 73 def _usercachedir(ui):
74 74 '''Return the location of the "global" largefiles cache.'''
75 75 path = ui.configpath(longname, 'usercache', None)
76 76 if path:
77 77 return path
78 78 if pycompat.osname == 'nt':
79 79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 80 encoding.environ.get('APPDATA'))
81 81 if appdata:
82 82 return os.path.join(appdata, longname)
83 83 elif platform.system() == 'Darwin':
84 84 home = encoding.environ.get('HOME')
85 85 if home:
86 86 return os.path.join(home, 'Library', 'Caches', longname)
87 87 elif pycompat.osname == 'posix':
88 88 path = encoding.environ.get('XDG_CACHE_HOME')
89 89 if path:
90 90 return os.path.join(path, longname)
91 91 home = encoding.environ.get('HOME')
92 92 if home:
93 93 return os.path.join(home, '.cache', longname)
94 94 else:
95 95 raise error.Abort(_('unknown operating system: %s\n')
96 96 % pycompat.osname)
97 97 raise error.Abort(_('unknown %s usercache location') % longname)
98 98
99 99 def inusercache(ui, hash):
100 100 path = usercachepath(ui, hash)
101 101 return os.path.exists(path)
102 102
103 103 def findfile(repo, hash):
104 104 '''Return store path of the largefile with the specified hash.
105 105 As a side effect, the file might be linked from user cache.
106 106 Return None if the file can't be found locally.'''
107 107 path, exists = findstorepath(repo, hash)
108 108 if exists:
109 109 repo.ui.note(_('found %s in store\n') % hash)
110 110 return path
111 111 elif inusercache(repo.ui, hash):
112 112 repo.ui.note(_('found %s in system cache\n') % hash)
113 113 path = storepath(repo, hash)
114 114 link(usercachepath(repo.ui, hash), path)
115 115 return path
116 116 return None
117 117
118 118 class largefilesdirstate(dirstate.dirstate):
119 119 def __getitem__(self, key):
120 120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 121 def normal(self, f):
122 122 return super(largefilesdirstate, self).normal(unixpath(f))
123 123 def remove(self, f):
124 124 return super(largefilesdirstate, self).remove(unixpath(f))
125 125 def add(self, f):
126 126 return super(largefilesdirstate, self).add(unixpath(f))
127 127 def drop(self, f):
128 128 return super(largefilesdirstate, self).drop(unixpath(f))
129 129 def forget(self, f):
130 130 return super(largefilesdirstate, self).forget(unixpath(f))
131 131 def normallookup(self, f):
132 132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 133 def _ignore(self, f):
134 134 return False
135 135 def write(self, tr=False):
136 136 # (1) disable PENDING mode always
137 137 # (lfdirstate isn't yet managed as a part of the transaction)
138 138 # (2) avoid develwarn 'use dirstate.write with ....'
139 139 super(largefilesdirstate, self).write(None)
140 140
141 141 def openlfdirstate(ui, repo, create=True):
142 142 '''
143 143 Return a dirstate object that tracks largefiles: i.e. its root is
144 144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 145 '''
146 146 vfs = repo.vfs
147 147 lfstoredir = longname
148 148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 150 repo.dirstate._validate)
151 151
152 152 # If the largefiles dirstate does not exist, populate and create
153 153 # it. This ensures that we create it on the first meaningful
154 154 # largefiles operation in a new clone.
155 155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 156 matcher = getstandinmatcher(repo)
157 157 standins = repo.dirstate.walk(matcher, [], False, False)
158 158
159 159 if len(standins) > 0:
160 160 vfs.makedirs(lfstoredir)
161 161
162 162 for standin in standins:
163 163 lfile = splitstandin(standin)
164 164 lfdirstate.normallookup(lfile)
165 165 return lfdirstate
166 166
167 167 def lfdirstatestatus(lfdirstate, repo):
168 168 wctx = repo['.']
169 169 match = matchmod.always(repo.root, repo.getcwd())
170 170 unsure, s = lfdirstate.status(match, [], False, False, False)
171 171 modified, clean = s.modified, s.clean
172 172 for lfile in unsure:
173 173 try:
174 174 fctx = wctx[standin(lfile)]
175 175 except LookupError:
176 176 fctx = None
177 177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
178 178 modified.append(lfile)
179 179 else:
180 180 clean.append(lfile)
181 181 lfdirstate.normal(lfile)
182 182 return s
183 183
184 184 def listlfiles(repo, rev=None, matcher=None):
185 185 '''return a list of largefiles in the working copy or the
186 186 specified changeset'''
187 187
188 188 if matcher is None:
189 189 matcher = getstandinmatcher(repo)
190 190
191 191 # ignore unknown files in working directory
192 192 return [splitstandin(f)
193 193 for f in repo[rev].walk(matcher)
194 194 if rev is not None or repo.dirstate[f] != '?']
195 195
196 196 def instore(repo, hash, forcelocal=False):
197 197 '''Return true if a largefile with the given hash exists in the store'''
198 198 return os.path.exists(storepath(repo, hash, forcelocal))
199 199
200 200 def storepath(repo, hash, forcelocal=False):
201 201 '''Return the correct location in the repository largefiles store for a
202 202 file with the given hash.'''
203 203 if not forcelocal and repo.shared():
204 204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
205 205 return repo.vfs.join(longname, hash)
206 206
207 207 def findstorepath(repo, hash):
208 208 '''Search through the local store path(s) to find the file for the given
209 209 hash. If the file is not found, its path in the primary store is returned.
210 210 The return value is a tuple of (path, exists(path)).
211 211 '''
212 212 # For shared repos, the primary store is in the share source. But for
213 213 # backward compatibility, force a lookup in the local store if it wasn't
214 214 # found in the share source.
215 215 path = storepath(repo, hash, False)
216 216
217 217 if instore(repo, hash):
218 218 return (path, True)
219 219 elif repo.shared() and instore(repo, hash, True):
220 220 return storepath(repo, hash, True), True
221 221
222 222 return (path, False)
223 223
224 224 def copyfromcache(repo, hash, filename):
225 225 '''Copy the specified largefile from the repo or system cache to
226 226 filename in the repository. Return true on success or false if the
227 227 file was not found in either cache (which should not happened:
228 228 this is meant to be called only after ensuring that the needed
229 229 largefile exists in the cache).'''
230 230 wvfs = repo.wvfs
231 231 path = findfile(repo, hash)
232 232 if path is None:
233 233 return False
234 234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
235 235 # The write may fail before the file is fully written, but we
236 236 # don't use atomic writes in the working copy.
237 237 with open(path, 'rb') as srcfd:
238 238 with wvfs(filename, 'wb') as destfd:
239 239 gothash = copyandhash(
240 240 util.filechunkiter(srcfd), destfd)
241 241 if gothash != hash:
242 242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 243 % (filename, path, gothash))
244 244 wvfs.unlink(filename)
245 245 return False
246 246 return True
247 247
248 248 def copytostore(repo, rev, file, uploaded=False):
249 249 wvfs = repo.wvfs
250 250 hash = readstandin(repo, file, rev)
251 251 if instore(repo, hash):
252 252 return
253 253 if wvfs.exists(file):
254 254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 255 else:
256 256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 257 (file, hash))
258 258
259 259 def copyalltostore(repo, node):
260 260 '''Copy all largefiles in a given revision to the store'''
261 261
262 262 ctx = repo[node]
263 263 for filename in ctx.files():
264 if isstandin(filename) and filename in ctx.manifest():
265 realfile = splitstandin(filename)
264 realfile = splitstandin(filename)
265 if realfile is not None and filename in ctx.manifest():
266 266 copytostore(repo, ctx.node(), realfile)
267 267
268 268 def copytostoreabsolute(repo, file, hash):
269 269 if inusercache(repo.ui, hash):
270 270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 271 else:
272 272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 273 with open(file, 'rb') as srcf:
274 274 with util.atomictempfile(storepath(repo, hash),
275 275 createmode=repo.store.createmode) as dstf:
276 276 for chunk in util.filechunkiter(srcf):
277 277 dstf.write(chunk)
278 278 linktousercache(repo, hash)
279 279
280 280 def linktousercache(repo, hash):
281 281 '''Link / copy the largefile with the specified hash from the store
282 282 to the cache.'''
283 283 path = usercachepath(repo.ui, hash)
284 284 link(storepath(repo, hash), path)
285 285
286 286 def getstandinmatcher(repo, rmatcher=None):
287 287 '''Return a match object that applies rmatcher to the standin directory'''
288 288 wvfs = repo.wvfs
289 289 standindir = shortname
290 290
291 291 # no warnings about missing files or directories
292 292 badfn = lambda f, msg: None
293 293
294 294 if rmatcher and not rmatcher.always():
295 295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 296 if not pats:
297 297 pats = [wvfs.join(standindir)]
298 298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 299 # if pats is empty, it would incorrectly always match, so clear _always
300 300 match._always = False
301 301 else:
302 302 # no patterns: relative to repo root
303 303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 304 return match
305 305
306 306 def composestandinmatcher(repo, rmatcher):
307 307 '''Return a matcher that accepts standins corresponding to the
308 308 files accepted by rmatcher. Pass the list of files in the matcher
309 309 as the paths specified by the user.'''
310 310 smatcher = getstandinmatcher(repo, rmatcher)
311 311 isstandin = smatcher.matchfn
312 312 def composedmatchfn(f):
313 313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 314 smatcher.matchfn = composedmatchfn
315 315
316 316 return smatcher
317 317
318 318 def standin(filename):
319 319 '''Return the repo-relative path to the standin for the specified big
320 320 file.'''
321 321 # Notes:
322 322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 325 # 2) Join with '/' because that's what dirstate always uses, even on
326 326 # Windows. Change existing separator to '/' first in case we are
327 327 # passed filenames from an external source (like the command line).
328 328 return shortnameslash + util.pconvert(filename)
329 329
330 330 def isstandin(filename):
331 331 '''Return true if filename is a big file standin. filename must be
332 332 in Mercurial's internal form (slash-separated).'''
333 333 return filename.startswith(shortnameslash)
334 334
335 335 def splitstandin(filename):
336 336 # Split on / because that's what dirstate always uses, even on Windows.
337 337 # Change local separator to / first just in case we are passed filenames
338 338 # from an external source (like the command line).
339 339 bits = util.pconvert(filename).split('/', 1)
340 340 if len(bits) == 2 and bits[0] == shortname:
341 341 return bits[1]
342 342 else:
343 343 return None
344 344
345 345 def updatestandin(repo, standin):
346 346 file = repo.wjoin(splitstandin(standin))
347 347 if repo.wvfs.exists(splitstandin(standin)):
348 348 hash = hashfile(file)
349 349 executable = getexecutable(file)
350 350 writestandin(repo, standin, hash, executable)
351 351 else:
352 352 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
353 353
354 354 def readstandin(repo, filename, node=None):
355 355 '''read hex hash from standin for filename at given node, or working
356 356 directory if no node is given'''
357 357 return repo[node][standin(filename)].data().strip()
358 358
359 359 def writestandin(repo, standin, hash, executable):
360 360 '''write hash to <repo.root>/<standin>'''
361 361 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
362 362
363 363 def copyandhash(instream, outfile):
364 364 '''Read bytes from instream (iterable) and write them to outfile,
365 365 computing the SHA-1 hash of the data along the way. Return the hash.'''
366 366 hasher = hashlib.sha1('')
367 367 for data in instream:
368 368 hasher.update(data)
369 369 outfile.write(data)
370 370 return hasher.hexdigest()
371 371
372 372 def hashrepofile(repo, file):
373 373 return hashfile(repo.wjoin(file))
374 374
375 375 def hashfile(file):
376 376 if not os.path.exists(file):
377 377 return ''
378 378 hasher = hashlib.sha1('')
379 379 with open(file, 'rb') as fd:
380 380 for data in util.filechunkiter(fd):
381 381 hasher.update(data)
382 382 return hasher.hexdigest()
383 383
384 384 def getexecutable(filename):
385 385 mode = os.stat(filename).st_mode
386 386 return ((mode & stat.S_IXUSR) and
387 387 (mode & stat.S_IXGRP) and
388 388 (mode & stat.S_IXOTH))
389 389
390 390 def urljoin(first, second, *arg):
391 391 def join(left, right):
392 392 if not left.endswith('/'):
393 393 left += '/'
394 394 if right.startswith('/'):
395 395 right = right[1:]
396 396 return left + right
397 397
398 398 url = join(first, second)
399 399 for a in arg:
400 400 url = join(url, a)
401 401 return url
402 402
403 403 def hexsha1(data):
404 404 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
405 405 object data"""
406 406 h = hashlib.sha1()
407 407 for chunk in util.filechunkiter(data):
408 408 h.update(chunk)
409 409 return h.hexdigest()
410 410
411 411 def httpsendfile(ui, filename):
412 412 return httpconnection.httpsendfile(ui, filename, 'rb')
413 413
414 414 def unixpath(path):
415 415 '''Return a version of path normalized for use with the lfdirstate.'''
416 416 return util.pconvert(os.path.normpath(path))
417 417
418 418 def islfilesrepo(repo):
419 419 '''Return true if the repo is a largefile repo.'''
420 420 if ('largefiles' in repo.requirements and
421 421 any(shortnameslash in f[0] for f in repo.store.datafiles())):
422 422 return True
423 423
424 424 return any(openlfdirstate(repo.ui, repo, False))
425 425
426 426 class storeprotonotcapable(Exception):
427 427 def __init__(self, storetypes):
428 428 self.storetypes = storetypes
429 429
430 430 def getstandinsstate(repo):
431 431 standins = []
432 432 matcher = getstandinmatcher(repo)
433 433 for standin in repo.dirstate.walk(matcher, [], False, False):
434 434 lfile = splitstandin(standin)
435 435 try:
436 436 hash = readstandin(repo, lfile)
437 437 except IOError:
438 438 hash = None
439 439 standins.append((lfile, hash))
440 440 return standins
441 441
442 442 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
443 443 lfstandin = standin(lfile)
444 444 if lfstandin in repo.dirstate:
445 445 stat = repo.dirstate._map[lfstandin]
446 446 state, mtime = stat[0], stat[3]
447 447 else:
448 448 state, mtime = '?', -1
449 449 if state == 'n':
450 450 if (normallookup or mtime < 0 or
451 451 not repo.wvfs.exists(lfile)):
452 452 # state 'n' doesn't ensure 'clean' in this case
453 453 lfdirstate.normallookup(lfile)
454 454 else:
455 455 lfdirstate.normal(lfile)
456 456 elif state == 'm':
457 457 lfdirstate.normallookup(lfile)
458 458 elif state == 'r':
459 459 lfdirstate.remove(lfile)
460 460 elif state == 'a':
461 461 lfdirstate.add(lfile)
462 462 elif state == '?':
463 463 lfdirstate.drop(lfile)
464 464
465 465 def markcommitted(orig, ctx, node):
466 466 repo = ctx.repo()
467 467
468 468 orig(node)
469 469
470 470 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
471 471 # because files coming from the 2nd parent are omitted in the latter.
472 472 #
473 473 # The former should be used to get targets of "synclfdirstate",
474 474 # because such files:
475 475 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
476 476 # - have to be marked as "n" after commit, but
477 477 # - aren't listed in "repo[node].files()"
478 478
479 479 lfdirstate = openlfdirstate(repo.ui, repo)
480 480 for f in ctx.files():
481 if isstandin(f):
482 lfile = splitstandin(f)
481 lfile = splitstandin(f)
482 if lfile is not None:
483 483 synclfdirstate(repo, lfdirstate, lfile, False)
484 484 lfdirstate.write()
485 485
486 486 # As part of committing, copy all of the largefiles into the cache.
487 487 copyalltostore(repo, node)
488 488
489 489 def getlfilestoupdate(oldstandins, newstandins):
490 490 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
491 491 filelist = []
492 492 for f in changedstandins:
493 493 if f[0] not in filelist:
494 494 filelist.append(f[0])
495 495 return filelist
496 496
497 497 def getlfilestoupload(repo, missing, addfunc):
498 498 for i, n in enumerate(missing):
499 499 repo.ui.progress(_('finding outgoing largefiles'), i,
500 500 unit=_('revisions'), total=len(missing))
501 501 parents = [p for p in repo[n].parents() if p != node.nullid]
502 502
503 503 oldlfstatus = repo.lfstatus
504 504 repo.lfstatus = False
505 505 try:
506 506 ctx = repo[n]
507 507 finally:
508 508 repo.lfstatus = oldlfstatus
509 509
510 510 files = set(ctx.files())
511 511 if len(parents) == 2:
512 512 mc = ctx.manifest()
513 513 mp1 = ctx.parents()[0].manifest()
514 514 mp2 = ctx.parents()[1].manifest()
515 515 for f in mp1:
516 516 if f not in mc:
517 517 files.add(f)
518 518 for f in mp2:
519 519 if f not in mc:
520 520 files.add(f)
521 521 for f in mc:
522 522 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
523 523 files.add(f)
524 524 for fn in files:
525 525 if isstandin(fn) and fn in ctx:
526 526 addfunc(fn, ctx[fn].data().strip())
527 527 repo.ui.progress(_('finding outgoing largefiles'), None)
528 528
529 529 def updatestandinsbymatch(repo, match):
530 530 '''Update standins in the working directory according to specified match
531 531
532 532 This returns (possibly modified) ``match`` object to be used for
533 533 subsequent commit process.
534 534 '''
535 535
536 536 ui = repo.ui
537 537
538 538 # Case 1: user calls commit with no specific files or
539 539 # include/exclude patterns: refresh and commit all files that
540 540 # are "dirty".
541 541 if match is None or match.always():
542 542 # Spend a bit of time here to get a list of files we know
543 543 # are modified so we can compare only against those.
544 544 # It can cost a lot of time (several seconds)
545 545 # otherwise to update all standins if the largefiles are
546 546 # large.
547 547 lfdirstate = openlfdirstate(ui, repo)
548 548 dirtymatch = matchmod.always(repo.root, repo.getcwd())
549 549 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
550 550 False)
551 551 modifiedfiles = unsure + s.modified + s.added + s.removed
552 552 lfiles = listlfiles(repo)
553 553 # this only loops through largefiles that exist (not
554 554 # removed/renamed)
555 555 for lfile in lfiles:
556 556 if lfile in modifiedfiles:
557 557 if repo.wvfs.exists(standin(lfile)):
558 558 # this handles the case where a rebase is being
559 559 # performed and the working copy is not updated
560 560 # yet.
561 561 if repo.wvfs.exists(lfile):
562 562 updatestandin(repo,
563 563 standin(lfile))
564 564
565 565 return match
566 566
567 567 lfiles = listlfiles(repo)
568 568 match._files = repo._subdirlfs(match.files(), lfiles)
569 569
570 570 # Case 2: user calls commit with specified patterns: refresh
571 571 # any matching big files.
572 572 smatcher = composestandinmatcher(repo, match)
573 573 standins = repo.dirstate.walk(smatcher, [], False, False)
574 574
575 575 # No matching big files: get out of the way and pass control to
576 576 # the usual commit() method.
577 577 if not standins:
578 578 return match
579 579
580 580 # Refresh all matching big files. It's possible that the
581 581 # commit will end up failing, in which case the big files will
582 582 # stay refreshed. No harm done: the user modified them and
583 583 # asked to commit them, so sooner or later we're going to
584 584 # refresh the standins. Might as well leave them refreshed.
585 585 lfdirstate = openlfdirstate(ui, repo)
586 586 for fstandin in standins:
587 587 lfile = splitstandin(fstandin)
588 588 if lfdirstate[lfile] != 'r':
589 589 updatestandin(repo, fstandin)
590 590
591 591 # Cook up a new matcher that only matches regular files or
592 592 # standins corresponding to the big files requested by the
593 593 # user. Have to modify _files to prevent commit() from
594 594 # complaining "not tracked" for big files.
595 595 match = copy.copy(match)
596 596 origmatchfn = match.matchfn
597 597
598 598 # Check both the list of largefiles and the list of
599 599 # standins because if a largefile was removed, it
600 600 # won't be in the list of largefiles at this point
601 601 match._files += sorted(standins)
602 602
603 603 actualfiles = []
604 604 for f in match._files:
605 605 fstandin = standin(f)
606 606
607 607 # For largefiles, only one of the normal and standin should be
608 608 # committed (except if one of them is a remove). In the case of a
609 609 # standin removal, drop the normal file if it is unknown to dirstate.
610 610 # Thus, skip plain largefile names but keep the standin.
611 611 if f in lfiles or fstandin in standins:
612 612 if repo.dirstate[fstandin] != 'r':
613 613 if repo.dirstate[f] != 'r':
614 614 continue
615 615 elif repo.dirstate[f] == '?':
616 616 continue
617 617
618 618 actualfiles.append(f)
619 619 match._files = actualfiles
620 620
621 621 def matchfn(f):
622 622 if origmatchfn(f):
623 623 return f not in lfiles
624 624 else:
625 625 return f in standins
626 626
627 627 match.matchfn = matchfn
628 628
629 629 return match
630 630
631 631 class automatedcommithook(object):
632 632 '''Stateful hook to update standins at the 1st commit of resuming
633 633
634 634 For efficiency, updating standins in the working directory should
635 635 be avoided while automated committing (like rebase, transplant and
636 636 so on), because they should be updated before committing.
637 637
638 638 But the 1st commit of resuming automated committing (e.g. ``rebase
639 639 --continue``) should update them, because largefiles may be
640 640 modified manually.
641 641 '''
642 642 def __init__(self, resuming):
643 643 self.resuming = resuming
644 644
645 645 def __call__(self, repo, match):
646 646 if self.resuming:
647 647 self.resuming = False # avoids updating at subsequent commits
648 648 return updatestandinsbymatch(repo, match)
649 649 else:
650 650 return match
651 651
652 652 def getstatuswriter(ui, repo, forcibly=None):
653 653 '''Return the function to write largefiles specific status out
654 654
655 655 If ``forcibly`` is ``None``, this returns the last element of
656 656 ``repo._lfstatuswriters`` as "default" writer function.
657 657
658 658 Otherwise, this returns the function to always write out (or
659 659 ignore if ``not forcibly``) status.
660 660 '''
661 661 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
662 662 return repo._lfstatuswriters[-1]
663 663 else:
664 664 if forcibly:
665 665 return ui.status # forcibly WRITE OUT
666 666 else:
667 667 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1441 +1,1451 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial import (
18 18 archival,
19 19 cmdutil,
20 20 error,
21 21 hg,
22 22 match as matchmod,
23 23 pathutil,
24 24 registrar,
25 25 scmutil,
26 26 smartset,
27 27 util,
28 28 )
29 29
30 30 from . import (
31 31 lfcommands,
32 32 lfutil,
33 33 storefactory,
34 34 )
35 35
36 36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37 37
38 38 def composelargefilematcher(match, manifest):
39 39 '''create a matcher that matches only the largefiles in the original
40 40 matcher'''
41 41 m = copy.copy(match)
42 42 lfile = lambda f: lfutil.standin(f) in manifest
43 43 m._files = filter(lfile, m._files)
44 44 m._fileroots = set(m._files)
45 45 m._always = False
46 46 origmatchfn = m.matchfn
47 47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 48 return m
49 49
50 50 def composenormalfilematcher(match, manifest, exclude=None):
51 51 excluded = set()
52 52 if exclude is not None:
53 53 excluded.update(exclude)
54 54
55 55 m = copy.copy(match)
56 56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 57 manifest or f in excluded)
58 58 m._files = filter(notlfile, m._files)
59 59 m._fileroots = set(m._files)
60 60 m._always = False
61 61 origmatchfn = m.matchfn
62 62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 63 return m
64 64
65 65 def installnormalfilesmatchfn(manifest):
66 66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 68 default='relpath', badfn=None):
69 69 if opts is None:
70 70 opts = {}
71 71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 72 return composenormalfilematcher(match, manifest)
73 73 oldmatch = installmatchfn(overridematch)
74 74
75 75 def installmatchfn(f):
76 76 '''monkey patch the scmutil module with a custom match function.
77 77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 78 oldmatch = scmutil.match
79 79 setattr(f, 'oldmatch', oldmatch)
80 80 scmutil.match = f
81 81 return oldmatch
82 82
83 83 def restorematchfn():
84 84 '''restores scmutil.match to what it was before installmatchfn
85 85 was called. no-op if scmutil.match is its original function.
86 86
87 87 Note that n calls to installmatchfn will require n calls to
88 88 restore the original matchfn.'''
89 89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90 90
91 91 def installmatchandpatsfn(f):
92 92 oldmatchandpats = scmutil.matchandpats
93 93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 94 scmutil.matchandpats = f
95 95 return oldmatchandpats
96 96
97 97 def restorematchandpatsfn():
98 98 '''restores scmutil.matchandpats to what it was before
99 99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 100 is its original function.
101 101
102 102 Note that n calls to installmatchandpatsfn will require n calls
103 103 to restore the original matchfn.'''
104 104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 105 scmutil.matchandpats)
106 106
107 107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 108 large = opts.get('large')
109 109 lfsize = lfutil.getminsize(
110 110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111 111
112 112 lfmatcher = None
113 113 if lfutil.islfilesrepo(repo):
114 114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 115 if lfpats:
116 116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117 117
118 118 lfnames = []
119 119 m = matcher
120 120
121 121 wctx = repo[None]
122 122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 123 exact = m.exact(f)
124 124 lfile = lfutil.standin(f) in wctx
125 125 nfile = f in wctx
126 126 exists = lfile or nfile
127 127
128 128 # addremove in core gets fancy with the name, add doesn't
129 129 if isaddremove:
130 130 name = m.uipath(f)
131 131 else:
132 132 name = m.rel(f)
133 133
134 134 # Don't warn the user when they attempt to add a normal tracked file.
135 135 # The normal add code will do that for us.
136 136 if exact and exists:
137 137 if lfile:
138 138 ui.warn(_('%s already a largefile\n') % name)
139 139 continue
140 140
141 141 if (exact or not exists) and not lfutil.isstandin(f):
142 142 # In case the file was removed previously, but not committed
143 143 # (issue3507)
144 144 if not repo.wvfs.exists(f):
145 145 continue
146 146
147 147 abovemin = (lfsize and
148 148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 150 lfnames.append(f)
151 151 if ui.verbose or not exact:
152 152 ui.status(_('adding %s as a largefile\n') % name)
153 153
154 154 bad = []
155 155
156 156 # Need to lock, otherwise there could be a race condition between
157 157 # when standins are created and added to the repo.
158 158 with repo.wlock():
159 159 if not opts.get('dry_run'):
160 160 standins = []
161 161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 162 for f in lfnames:
163 163 standinname = lfutil.standin(f)
164 164 lfutil.writestandin(repo, standinname, hash='',
165 165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 166 standins.append(standinname)
167 167 if lfdirstate[f] == 'r':
168 168 lfdirstate.normallookup(f)
169 169 else:
170 170 lfdirstate.add(f)
171 171 lfdirstate.write()
172 172 bad += [lfutil.splitstandin(f)
173 173 for f in repo[None].add(standins)
174 174 if f in m.files()]
175 175
176 176 added = [f for f in lfnames if f not in bad]
177 177 return added, bad
178 178
179 179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 180 after = opts.get('after')
181 181 m = composelargefilematcher(matcher, repo[None].manifest())
182 182 try:
183 183 repo.lfstatus = True
184 184 s = repo.status(match=m, clean=not isaddremove)
185 185 finally:
186 186 repo.lfstatus = False
187 187 manifest = repo[None].manifest()
188 188 modified, added, deleted, clean = [[f for f in list
189 189 if lfutil.standin(f) in manifest]
190 190 for list in (s.modified, s.added,
191 191 s.deleted, s.clean)]
192 192
193 193 def warn(files, msg):
194 194 for f in files:
195 195 ui.warn(msg % m.rel(f))
196 196 return int(len(files) > 0)
197 197
198 198 result = 0
199 199
200 200 if after:
201 201 remove = deleted
202 202 result = warn(modified + added + clean,
203 203 _('not removing %s: file still exists\n'))
204 204 else:
205 205 remove = deleted + clean
206 206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 207 ' to force removal)\n'))
208 208 result = warn(added, _('not removing %s: file has been marked for add'
209 209 ' (use forget to undo)\n')) or result
210 210
211 211 # Need to lock because standin files are deleted then removed from the
212 212 # repository and we could race in-between.
213 213 with repo.wlock():
214 214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 215 for f in sorted(remove):
216 216 if ui.verbose or not m.exact(f):
217 217 # addremove in core gets fancy with the name, remove doesn't
218 218 if isaddremove:
219 219 name = m.uipath(f)
220 220 else:
221 221 name = m.rel(f)
222 222 ui.status(_('removing %s\n') % name)
223 223
224 224 if not opts.get('dry_run'):
225 225 if not after:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227
228 228 if opts.get('dry_run'):
229 229 return result
230 230
231 231 remove = [lfutil.standin(f) for f in remove]
232 232 # If this is being called by addremove, let the original addremove
233 233 # function handle this.
234 234 if not isaddremove:
235 235 for f in remove:
236 236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 237 repo[None].forget(remove)
238 238
239 239 for f in remove:
240 240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 241 False)
242 242
243 243 lfdirstate.write()
244 244
245 245 return result
246 246
247 247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 248 # appear at their right place in the manifests.
249 249 def decodepath(orig, path):
250 250 return lfutil.splitstandin(path) or path
251 251
252 252 # -- Wrappers: modify existing commands --------------------------------
253 253
254 254 def overrideadd(orig, ui, repo, *pats, **opts):
255 255 if opts.get('normal') and opts.get('large'):
256 256 raise error.Abort(_('--normal cannot be used with --large'))
257 257 return orig(ui, repo, *pats, **opts)
258 258
259 259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 260 # The --normal flag short circuits this override
261 261 if opts.get('normal'):
262 262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263 263
264 264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 266 ladded)
267 267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268 268
269 269 bad.extend(f for f in lbad)
270 270 return bad
271 271
272 272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 275 return removelargefiles(ui, repo, False, matcher, after=after,
276 276 force=force) or result
277 277
278 278 def overridestatusfn(orig, repo, rev2, **opts):
279 279 try:
280 280 repo._repo.lfstatus = True
281 281 return orig(repo, rev2, **opts)
282 282 finally:
283 283 repo._repo.lfstatus = False
284 284
285 285 def overridestatus(orig, ui, repo, *pats, **opts):
286 286 try:
287 287 repo.lfstatus = True
288 288 return orig(ui, repo, *pats, **opts)
289 289 finally:
290 290 repo.lfstatus = False
291 291
292 292 def overridedirty(orig, repo, ignoreupdate=False):
293 293 try:
294 294 repo._repo.lfstatus = True
295 295 return orig(repo, ignoreupdate)
296 296 finally:
297 297 repo._repo.lfstatus = False
298 298
299 299 def overridelog(orig, ui, repo, *pats, **opts):
300 300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 301 default='relpath', badfn=None):
302 302 """Matcher that merges root directory with .hglf, suitable for log.
303 303 It is still possible to match .hglf directly.
304 304 For any listed files run log on the standin too.
305 305 matchfn tries both the given filename and with .hglf stripped.
306 306 """
307 307 if opts is None:
308 308 opts = {}
309 309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 310 badfn=badfn)
311 311 m, p = copy.copy(matchandpats)
312 312
313 313 if m.always():
314 314 # We want to match everything anyway, so there's no benefit trying
315 315 # to add standins.
316 316 return matchandpats
317 317
318 318 pats = set(p)
319 319
320 320 def fixpats(pat, tostandin=lfutil.standin):
321 321 if pat.startswith('set:'):
322 322 return pat
323 323
324 324 kindpat = matchmod._patsplit(pat, None)
325 325
326 326 if kindpat[0] is not None:
327 327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 328 return tostandin(kindpat[1])
329 329
330 330 if m._cwd:
331 331 hglf = lfutil.shortname
332 332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333 333
334 334 def tostandin(f):
335 335 # The file may already be a standin, so truncate the back
336 336 # prefix and test before mangling it. This avoids turning
337 337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 339 return f
340 340
341 341 # An absolute path is from outside the repo, so truncate the
342 342 # path to the root before building the standin. Otherwise cwd
343 343 # is somewhere in the repo, relative to root, and needs to be
344 344 # prepended before building the standin.
345 345 if os.path.isabs(m._cwd):
346 346 f = f[len(back):]
347 347 else:
348 348 f = m._cwd + '/' + f
349 349 return back + lfutil.standin(f)
350 350
351 351 pats.update(fixpats(f, tostandin) for f in p)
352 352 else:
353 353 def tostandin(f):
354 354 if lfutil.splitstandin(f):
355 355 return f
356 356 return lfutil.standin(f)
357 357 pats.update(fixpats(f, tostandin) for f in p)
358 358
359 359 for i in range(0, len(m._files)):
360 360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 361 if m._files[i] == '.':
362 362 continue
363 363 standin = lfutil.standin(m._files[i])
364 364 # If the "standin" is a directory, append instead of replace to
365 365 # support naming a directory on the command line with only
366 366 # largefiles. The original directory is kept to support normal
367 367 # files.
368 368 if standin in repo[ctx.node()]:
369 369 m._files[i] = standin
370 370 elif m._files[i] not in repo[ctx.node()] \
371 371 and repo.wvfs.isdir(standin):
372 372 m._files.append(standin)
373 373
374 374 m._fileroots = set(m._files)
375 375 m._always = False
376 376 origmatchfn = m.matchfn
377 377 def lfmatchfn(f):
378 378 lf = lfutil.splitstandin(f)
379 379 if lf is not None and origmatchfn(lf):
380 380 return True
381 381 r = origmatchfn(f)
382 382 return r
383 383 m.matchfn = lfmatchfn
384 384
385 385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 386 return m, pats
387 387
388 388 # For hg log --patch, the match object is used in two different senses:
389 389 # (1) to determine what revisions should be printed out, and
390 390 # (2) to determine what files to print out diffs for.
391 391 # The magic matchandpats override should be used for case (1) but not for
392 392 # case (2).
393 393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 394 wctx = repo[None]
395 395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 396 return lambda rev: match
397 397
398 398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401 401
402 402 try:
403 403 return orig(ui, repo, *pats, **opts)
404 404 finally:
405 405 restorematchandpatsfn()
406 406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407 407
408 408 def overrideverify(orig, ui, repo, *pats, **opts):
409 409 large = opts.pop('large', False)
410 410 all = opts.pop('lfa', False)
411 411 contents = opts.pop('lfc', False)
412 412
413 413 result = orig(ui, repo, *pats, **opts)
414 414 if large or all or contents:
415 415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 416 return result
417 417
418 418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 419 large = opts.pop('large', False)
420 420 if large:
421 421 class fakerepo(object):
422 422 dirstate = lfutil.openlfdirstate(ui, repo)
423 423 orig(ui, fakerepo, *pats, **opts)
424 424 else:
425 425 orig(ui, repo, *pats, **opts)
426 426
427 427 # Before starting the manifest merge, merge.updates will call
428 428 # _checkunknownfile to check if there are any files in the merged-in
429 429 # changeset that collide with unknown files in the working copy.
430 430 #
431 431 # The largefiles are seen as unknown, so this prevents us from merging
432 432 # in a file 'foo' if we already have a largefile with the same name.
433 433 #
434 434 # The overridden function filters the unknown files by removing any
435 435 # largefiles. This makes the merge proceed and we can then handle this
436 436 # case further in the overridden calculateupdates function below.
437 437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 439 return False
440 440 return origfn(repo, wctx, mctx, f, f2)
441 441
442 442 # The manifest merge handles conflicts on the manifest level. We want
443 443 # to handle changes in largefile-ness of files at this level too.
444 444 #
445 445 # The strategy is to run the original calculateupdates and then process
446 446 # the action list it outputs. There are two cases we need to deal with:
447 447 #
448 448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 449 # detected via its standin file, which will enter the working copy
450 450 # with a "get" action. It is not "merge" since the standin is all
451 451 # Mercurial is concerned with at this level -- the link to the
452 452 # existing normal file is not relevant here.
453 453 #
454 454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 455 # since the largefile will be present in the working copy and
456 456 # different from the normal file in p2. Mercurial therefore
457 457 # triggers a merge action.
458 458 #
459 459 # In both cases, we prompt the user and emit new actions to either
460 460 # remove the standin (if the normal file was kept) or to remove the
461 461 # normal file and get the standin (if the largefile was kept). The
462 462 # default prompt answer is to use the largefile version since it was
463 463 # presumably changed on purpose.
464 464 #
465 465 # Finally, the merge.applyupdates function will then take care of
466 466 # writing the files into the working copy and lfcommands.updatelfiles
467 467 # will update the largefiles.
468 468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 469 acceptremote, *args, **kwargs):
470 470 overwrite = force and not branchmerge
471 471 actions, diverge, renamedelete = origfn(
472 472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473 473
474 474 if overwrite:
475 475 return actions, diverge, renamedelete
476 476
477 477 # Convert to dictionary with filename as key and action as value.
478 478 lfiles = set()
479 479 for f in actions:
480 480 splitstandin = lfutil.splitstandin(f)
481 481 if splitstandin in p1:
482 482 lfiles.add(splitstandin)
483 483 elif lfutil.standin(f) in p1:
484 484 lfiles.add(f)
485 485
486 486 for lfile in sorted(lfiles):
487 487 standin = lfutil.standin(lfile)
488 488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 490 if sm in ('g', 'dc') and lm != 'r':
491 491 if sm == 'dc':
492 492 f1, f2, fa, move, anc = sargs
493 493 sargs = (p2[f2].flags(), False)
494 494 # Case 1: normal file in the working copy, largefile in
495 495 # the second parent
496 496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 497 'use (l)argefile or keep (n)ormal file?'
498 498 '$$ &Largefile $$ &Normal file') % lfile
499 499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 500 actions[lfile] = ('r', None, 'replaced by standin')
501 501 actions[standin] = ('g', sargs, 'replaces standin')
502 502 else: # keep local normal file
503 503 actions[lfile] = ('k', None, 'replaces standin')
504 504 if branchmerge:
505 505 actions[standin] = ('k', None, 'replaced by non-standin')
506 506 else:
507 507 actions[standin] = ('r', None, 'replaced by non-standin')
508 508 elif lm in ('g', 'dc') and sm != 'r':
509 509 if lm == 'dc':
510 510 f1, f2, fa, move, anc = largs
511 511 largs = (p2[f2].flags(), False)
512 512 # Case 2: largefile in the working copy, normal file in
513 513 # the second parent
514 514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 515 'keep (l)argefile or use (n)ormal file?'
516 516 '$$ &Largefile $$ &Normal file') % lfile
517 517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 518 if branchmerge:
519 519 # largefile can be restored from standin safely
520 520 actions[lfile] = ('k', None, 'replaced by standin')
521 521 actions[standin] = ('k', None, 'replaces standin')
522 522 else:
523 523 # "lfile" should be marked as "removed" without
524 524 # removal of itself
525 525 actions[lfile] = ('lfmr', None,
526 526 'forget non-standin largefile')
527 527
528 528 # linear-merge should treat this largefile as 're-added'
529 529 actions[standin] = ('a', None, 'keep standin')
530 530 else: # pick remote normal file
531 531 actions[lfile] = ('g', largs, 'replaces standin')
532 532 actions[standin] = ('r', None, 'replaced by non-standin')
533 533
534 534 return actions, diverge, renamedelete
535 535
536 536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 537 if 'lfmr' in actions:
538 538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 539 for lfile, args, msg in actions['lfmr']:
540 540 # this should be executed before 'orig', to execute 'remove'
541 541 # before all other actions
542 542 repo.dirstate.remove(lfile)
543 543 # make sure lfile doesn't get synclfdirstate'd as normal
544 544 lfdirstate.add(lfile)
545 545 lfdirstate.write()
546 546
547 547 return orig(repo, actions, branchmerge)
548 548
549 549 # Override filemerge to prompt the user about how they wish to merge
550 550 # largefiles. This will handle identical edits without prompting the user.
551 551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
552 552 labels=None):
553 553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
554 554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
555 555 labels=labels)
556 556
557 557 ahash = fca.data().strip().lower()
558 558 dhash = fcd.data().strip().lower()
559 559 ohash = fco.data().strip().lower()
560 560 if (ohash != ahash and
561 561 ohash != dhash and
562 562 (dhash == ahash or
563 563 repo.ui.promptchoice(
564 564 _('largefile %s has a merge conflict\nancestor was %s\n'
565 565 'keep (l)ocal %s or\ntake (o)ther %s?'
566 566 '$$ &Local $$ &Other') %
567 567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
568 568 0) == 1)):
569 569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
570 570 return True, 0, False
571 571
572 572 def copiespathcopies(orig, ctx1, ctx2, match=None):
573 573 copies = orig(ctx1, ctx2, match=match)
574 574 updated = {}
575 575
576 576 for k, v in copies.iteritems():
577 577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
578 578
579 579 return updated
580 580
581 581 # Copy first changes the matchers to match standins instead of
582 582 # largefiles. Then it overrides util.copyfile in that function it
583 583 # checks if the destination largefile already exists. It also keeps a
584 584 # list of copied files so that the largefiles can be copied and the
585 585 # dirstate updated.
586 586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
587 587 # doesn't remove largefile on rename
588 588 if len(pats) < 2:
589 589 # this isn't legal, let the original function deal with it
590 590 return orig(ui, repo, pats, opts, rename)
591 591
592 592 # This could copy both lfiles and normal files in one command,
593 593 # but we don't want to do that. First replace their matcher to
594 594 # only match normal files and run it, then replace it to just
595 595 # match largefiles and run it again.
596 596 nonormalfiles = False
597 597 nolfiles = False
598 598 installnormalfilesmatchfn(repo[None].manifest())
599 599 try:
600 600 result = orig(ui, repo, pats, opts, rename)
601 601 except error.Abort as e:
602 602 if str(e) != _('no files to copy'):
603 603 raise e
604 604 else:
605 605 nonormalfiles = True
606 606 result = 0
607 607 finally:
608 608 restorematchfn()
609 609
610 610 # The first rename can cause our current working directory to be removed.
611 611 # In that case there is nothing left to copy/rename so just quit.
612 612 try:
613 613 repo.getcwd()
614 614 except OSError:
615 615 return result
616 616
617 617 def makestandin(relpath):
618 618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
619 619 return repo.wvfs.join(lfutil.standin(path))
620 620
621 621 fullpats = scmutil.expandpats(pats)
622 622 dest = fullpats[-1]
623 623
624 624 if os.path.isdir(dest):
625 625 if not os.path.isdir(makestandin(dest)):
626 626 os.makedirs(makestandin(dest))
627 627
628 628 try:
629 629 # When we call orig below it creates the standins but we don't add
630 630 # them to the dir state until later so lock during that time.
631 631 wlock = repo.wlock()
632 632
633 633 manifest = repo[None].manifest()
634 634 def overridematch(ctx, pats=(), opts=None, globbed=False,
635 635 default='relpath', badfn=None):
636 636 if opts is None:
637 637 opts = {}
638 638 newpats = []
639 639 # The patterns were previously mangled to add the standin
640 640 # directory; we need to remove that now
641 641 for pat in pats:
642 642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
643 643 newpats.append(pat.replace(lfutil.shortname, ''))
644 644 else:
645 645 newpats.append(pat)
646 646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
647 647 m = copy.copy(match)
648 648 lfile = lambda f: lfutil.standin(f) in manifest
649 649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
650 650 m._fileroots = set(m._files)
651 651 origmatchfn = m.matchfn
652 m.matchfn = lambda f: (lfutil.isstandin(f) and
653 (f in manifest) and
654 origmatchfn(lfutil.splitstandin(f)) or
655 None)
652 def matchfn(f):
653 lfile = lfutil.splitstandin(f)
654 return (lfile is not None and
655 (f in manifest) and
656 origmatchfn(lfile) or
657 None)
658 m.matchfn = matchfn
656 659 return m
657 660 oldmatch = installmatchfn(overridematch)
658 661 listpats = []
659 662 for pat in pats:
660 663 if matchmod.patkind(pat) is not None:
661 664 listpats.append(pat)
662 665 else:
663 666 listpats.append(makestandin(pat))
664 667
665 668 try:
666 669 origcopyfile = util.copyfile
667 670 copiedfiles = []
668 671 def overridecopyfile(src, dest):
669 672 if (lfutil.shortname in src and
670 673 dest.startswith(repo.wjoin(lfutil.shortname))):
671 674 destlfile = dest.replace(lfutil.shortname, '')
672 675 if not opts['force'] and os.path.exists(destlfile):
673 676 raise IOError('',
674 677 _('destination largefile already exists'))
675 678 copiedfiles.append((src, dest))
676 679 origcopyfile(src, dest)
677 680
678 681 util.copyfile = overridecopyfile
679 682 result += orig(ui, repo, listpats, opts, rename)
680 683 finally:
681 684 util.copyfile = origcopyfile
682 685
683 686 lfdirstate = lfutil.openlfdirstate(ui, repo)
684 687 for (src, dest) in copiedfiles:
685 688 if (lfutil.shortname in src and
686 689 dest.startswith(repo.wjoin(lfutil.shortname))):
687 690 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
688 691 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
689 692 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
690 693 if not os.path.isdir(destlfiledir):
691 694 os.makedirs(destlfiledir)
692 695 if rename:
693 696 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
694 697
695 698 # The file is gone, but this deletes any empty parent
696 699 # directories as a side-effect.
697 700 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
698 701 lfdirstate.remove(srclfile)
699 702 else:
700 703 util.copyfile(repo.wjoin(srclfile),
701 704 repo.wjoin(destlfile))
702 705
703 706 lfdirstate.add(destlfile)
704 707 lfdirstate.write()
705 708 except error.Abort as e:
706 709 if str(e) != _('no files to copy'):
707 710 raise e
708 711 else:
709 712 nolfiles = True
710 713 finally:
711 714 restorematchfn()
712 715 wlock.release()
713 716
714 717 if nolfiles and nonormalfiles:
715 718 raise error.Abort(_('no files to copy'))
716 719
717 720 return result
718 721
719 722 # When the user calls revert, we have to be careful to not revert any
720 723 # changes to other largefiles accidentally. This means we have to keep
721 724 # track of the largefiles that are being reverted so we only pull down
722 725 # the necessary largefiles.
723 726 #
724 727 # Standins are only updated (to match the hash of largefiles) before
725 728 # commits. Update the standins then run the original revert, changing
726 729 # the matcher to hit standins instead of largefiles. Based on the
727 730 # resulting standins update the largefiles.
728 731 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
729 732 # Because we put the standins in a bad state (by updating them)
730 733 # and then return them to a correct state we need to lock to
731 734 # prevent others from changing them in their incorrect state.
732 735 with repo.wlock():
733 736 lfdirstate = lfutil.openlfdirstate(ui, repo)
734 737 s = lfutil.lfdirstatestatus(lfdirstate, repo)
735 738 lfdirstate.write()
736 739 for lfile in s.modified:
737 740 lfutil.updatestandin(repo, lfutil.standin(lfile))
738 741 for lfile in s.deleted:
739 742 if (repo.wvfs.exists(lfutil.standin(lfile))):
740 743 repo.wvfs.unlink(lfutil.standin(lfile))
741 744
742 745 oldstandins = lfutil.getstandinsstate(repo)
743 746
744 747 def overridematch(mctx, pats=(), opts=None, globbed=False,
745 748 default='relpath', badfn=None):
746 749 if opts is None:
747 750 opts = {}
748 751 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
749 752 m = copy.copy(match)
750 753
751 754 # revert supports recursing into subrepos, and though largefiles
752 755 # currently doesn't work correctly in that case, this match is
753 756 # called, so the lfdirstate above may not be the correct one for
754 757 # this invocation of match.
755 758 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
756 759 False)
757 760
758 761 def tostandin(f):
759 762 standin = lfutil.standin(f)
760 763 if standin in ctx or standin in mctx:
761 764 return standin
762 765 elif standin in repo[None] or lfdirstate[f] == 'r':
763 766 return None
764 767 return f
765 768 m._files = [tostandin(f) for f in m._files]
766 769 m._files = [f for f in m._files if f is not None]
767 770 m._fileroots = set(m._files)
768 771 origmatchfn = m.matchfn
769 772 def matchfn(f):
770 if lfutil.isstandin(f):
771 return (origmatchfn(lfutil.splitstandin(f)) and
773 lfile = lfutil.splitstandin(f)
774 if lfile is not None:
775 return (origmatchfn(lfile) and
772 776 (f in ctx or f in mctx))
773 777 return origmatchfn(f)
774 778 m.matchfn = matchfn
775 779 return m
776 780 oldmatch = installmatchfn(overridematch)
777 781 try:
778 782 orig(ui, repo, ctx, parents, *pats, **opts)
779 783 finally:
780 784 restorematchfn()
781 785
782 786 newstandins = lfutil.getstandinsstate(repo)
783 787 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
784 788 # lfdirstate should be 'normallookup'-ed for updated files,
785 789 # because reverting doesn't touch dirstate for 'normal' files
786 790 # when target revision is explicitly specified: in such case,
787 791 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
788 792 # of target (standin) file.
789 793 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
790 794 normallookup=True)
791 795
792 796 # after pulling changesets, we need to take some extra care to get
793 797 # largefiles updated remotely
794 798 def overridepull(orig, ui, repo, source=None, **opts):
795 799 revsprepull = len(repo)
796 800 if not source:
797 801 source = 'default'
798 802 repo.lfpullsource = source
799 803 result = orig(ui, repo, source, **opts)
800 804 revspostpull = len(repo)
801 805 lfrevs = opts.get('lfrev', [])
802 806 if opts.get('all_largefiles'):
803 807 lfrevs.append('pulled()')
804 808 if lfrevs and revspostpull > revsprepull:
805 809 numcached = 0
806 810 repo.firstpulled = revsprepull # for pulled() revset expression
807 811 try:
808 812 for rev in scmutil.revrange(repo, lfrevs):
809 813 ui.note(_('pulling largefiles for revision %s\n') % rev)
810 814 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
811 815 numcached += len(cached)
812 816 finally:
813 817 del repo.firstpulled
814 818 ui.status(_("%d largefiles cached\n") % numcached)
815 819 return result
816 820
817 821 def overridepush(orig, ui, repo, *args, **kwargs):
818 822 """Override push command and store --lfrev parameters in opargs"""
819 823 lfrevs = kwargs.pop('lfrev', None)
820 824 if lfrevs:
821 825 opargs = kwargs.setdefault('opargs', {})
822 826 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
823 827 return orig(ui, repo, *args, **kwargs)
824 828
825 829 def exchangepushoperation(orig, *args, **kwargs):
826 830 """Override pushoperation constructor and store lfrevs parameter"""
827 831 lfrevs = kwargs.pop('lfrevs', None)
828 832 pushop = orig(*args, **kwargs)
829 833 pushop.lfrevs = lfrevs
830 834 return pushop
831 835
832 836 revsetpredicate = registrar.revsetpredicate()
833 837
834 838 @revsetpredicate('pulled()')
835 839 def pulledrevsetsymbol(repo, subset, x):
836 840 """Changesets that just has been pulled.
837 841
838 842 Only available with largefiles from pull --lfrev expressions.
839 843
840 844 .. container:: verbose
841 845
842 846 Some examples:
843 847
844 848 - pull largefiles for all new changesets::
845 849
846 850 hg pull -lfrev "pulled()"
847 851
848 852 - pull largefiles for all new branch heads::
849 853
850 854 hg pull -lfrev "head(pulled()) and not closed()"
851 855
852 856 """
853 857
854 858 try:
855 859 firstpulled = repo.firstpulled
856 860 except AttributeError:
857 861 raise error.Abort(_("pulled() only available in --lfrev"))
858 862 return smartset.baseset([r for r in subset if r >= firstpulled])
859 863
860 864 def overrideclone(orig, ui, source, dest=None, **opts):
861 865 d = dest
862 866 if d is None:
863 867 d = hg.defaultdest(source)
864 868 if opts.get('all_largefiles') and not hg.islocal(d):
865 869 raise error.Abort(_(
866 870 '--all-largefiles is incompatible with non-local destination %s') %
867 871 d)
868 872
869 873 return orig(ui, source, dest, **opts)
870 874
871 875 def hgclone(orig, ui, opts, *args, **kwargs):
872 876 result = orig(ui, opts, *args, **kwargs)
873 877
874 878 if result is not None:
875 879 sourcerepo, destrepo = result
876 880 repo = destrepo.local()
877 881
878 882 # When cloning to a remote repo (like through SSH), no repo is available
879 883 # from the peer. Therefore the largefiles can't be downloaded and the
880 884 # hgrc can't be updated.
881 885 if not repo:
882 886 return result
883 887
884 888 # If largefiles is required for this repo, permanently enable it locally
885 889 if 'largefiles' in repo.requirements:
886 890 with repo.vfs('hgrc', 'a', text=True) as fp:
887 891 fp.write('\n[extensions]\nlargefiles=\n')
888 892
889 893 # Caching is implicitly limited to 'rev' option, since the dest repo was
890 894 # truncated at that point. The user may expect a download count with
891 895 # this option, so attempt whether or not this is a largefile repo.
892 896 if opts.get('all_largefiles'):
893 897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
894 898
895 899 if missing != 0:
896 900 return None
897 901
898 902 return result
899 903
900 904 def overriderebase(orig, ui, repo, **opts):
901 905 if not util.safehasattr(repo, '_largefilesenabled'):
902 906 return orig(ui, repo, **opts)
903 907
904 908 resuming = opts.get('continue')
905 909 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
906 910 repo._lfstatuswriters.append(lambda *msg, **opts: None)
907 911 try:
908 912 return orig(ui, repo, **opts)
909 913 finally:
910 914 repo._lfstatuswriters.pop()
911 915 repo._lfcommithooks.pop()
912 916
913 917 def overridearchivecmd(orig, ui, repo, dest, **opts):
914 918 repo.unfiltered().lfstatus = True
915 919
916 920 try:
917 921 return orig(ui, repo.unfiltered(), dest, **opts)
918 922 finally:
919 923 repo.unfiltered().lfstatus = False
920 924
921 925 def hgwebarchive(orig, web, req, tmpl):
922 926 web.repo.lfstatus = True
923 927
924 928 try:
925 929 return orig(web, req, tmpl)
926 930 finally:
927 931 web.repo.lfstatus = False
928 932
929 933 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
930 934 prefix='', mtime=None, subrepos=None):
931 935 # For some reason setting repo.lfstatus in hgwebarchive only changes the
932 936 # unfiltered repo's attr, so check that as well.
933 937 if not repo.lfstatus and not repo.unfiltered().lfstatus:
934 938 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
935 939 subrepos)
936 940
937 941 # No need to lock because we are only reading history and
938 942 # largefile caches, neither of which are modified.
939 943 if node is not None:
940 944 lfcommands.cachelfiles(repo.ui, repo, node)
941 945
942 946 if kind not in archival.archivers:
943 947 raise error.Abort(_("unknown archive type '%s'") % kind)
944 948
945 949 ctx = repo[node]
946 950
947 951 if kind == 'files':
948 952 if prefix:
949 953 raise error.Abort(
950 954 _('cannot give prefix when archiving to files'))
951 955 else:
952 956 prefix = archival.tidyprefix(dest, kind, prefix)
953 957
954 958 def write(name, mode, islink, getdata):
955 959 if matchfn and not matchfn(name):
956 960 return
957 961 data = getdata()
958 962 if decode:
959 963 data = repo.wwritedata(name, data)
960 964 archiver.addfile(prefix + name, mode, islink, data)
961 965
962 966 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
963 967
964 968 if repo.ui.configbool("ui", "archivemeta", True):
965 969 write('.hg_archival.txt', 0o644, False,
966 970 lambda: archival.buildmetadata(ctx))
967 971
968 972 for f in ctx:
969 973 ff = ctx.flags(f)
970 974 getdata = ctx[f].data
971 if lfutil.isstandin(f):
975 lfile = lfutil.splitstandin(f)
976 if lfile is not None:
972 977 if node is not None:
973 978 path = lfutil.findfile(repo, getdata().strip())
974 979
975 980 if path is None:
976 981 raise error.Abort(
977 982 _('largefile %s not found in repo store or system cache')
978 % lfutil.splitstandin(f))
983 % lfile)
979 984 else:
980 path = lfutil.splitstandin(f)
985 path = lfile
981 986
982 f = lfutil.splitstandin(f)
987 f = lfile
983 988
984 989 getdata = lambda: util.readfile(path)
985 990 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
986 991
987 992 if subrepos:
988 993 for subpath in sorted(ctx.substate):
989 994 sub = ctx.workingsub(subpath)
990 995 submatch = matchmod.subdirmatcher(subpath, matchfn)
991 996 sub._repo.lfstatus = True
992 997 sub.archive(archiver, prefix, submatch)
993 998
994 999 archiver.done()
995 1000
996 1001 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
997 1002 if not repo._repo.lfstatus:
998 1003 return orig(repo, archiver, prefix, match, decode)
999 1004
1000 1005 repo._get(repo._state + ('hg',))
1001 1006 rev = repo._state[1]
1002 1007 ctx = repo._repo[rev]
1003 1008
1004 1009 if ctx.node() is not None:
1005 1010 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1006 1011
1007 1012 def write(name, mode, islink, getdata):
1008 1013 # At this point, the standin has been replaced with the largefile name,
1009 1014 # so the normal matcher works here without the lfutil variants.
1010 1015 if match and not match(f):
1011 1016 return
1012 1017 data = getdata()
1013 1018 if decode:
1014 1019 data = repo._repo.wwritedata(name, data)
1015 1020
1016 1021 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1017 1022
1018 1023 for f in ctx:
1019 1024 ff = ctx.flags(f)
1020 1025 getdata = ctx[f].data
1021 if lfutil.isstandin(f):
1026 lfile = lfutil.splitstandin(f)
1027 if lfile is not None:
1022 1028 if ctx.node() is not None:
1023 1029 path = lfutil.findfile(repo._repo, getdata().strip())
1024 1030
1025 1031 if path is None:
1026 1032 raise error.Abort(
1027 1033 _('largefile %s not found in repo store or system cache')
1028 % lfutil.splitstandin(f))
1034 % lfile)
1029 1035 else:
1030 path = lfutil.splitstandin(f)
1036 path = lfile
1031 1037
1032 f = lfutil.splitstandin(f)
1038 f = lfile
1033 1039
1034 1040 getdata = lambda: util.readfile(os.path.join(prefix, path))
1035 1041
1036 1042 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037 1043
1038 1044 for subpath in sorted(ctx.substate):
1039 1045 sub = ctx.workingsub(subpath)
1040 1046 submatch = matchmod.subdirmatcher(subpath, match)
1041 1047 sub._repo.lfstatus = True
1042 1048 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1043 1049
1044 1050 # If a largefile is modified, the change is not reflected in its
1045 1051 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 1052 # if the repo has uncommitted changes. Wrap it to also check if
1047 1053 # largefiles were changed. This is used by bisect, backout and fetch.
1048 1054 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 1055 orig(repo, *args, **kwargs)
1050 1056 repo.lfstatus = True
1051 1057 s = repo.status()
1052 1058 repo.lfstatus = False
1053 1059 if s.modified or s.added or s.removed or s.deleted:
1054 1060 raise error.Abort(_('uncommitted changes'))
1055 1061
1056 1062 def postcommitstatus(orig, repo, *args, **kwargs):
1057 1063 repo.lfstatus = True
1058 1064 try:
1059 1065 return orig(repo, *args, **kwargs)
1060 1066 finally:
1061 1067 repo.lfstatus = False
1062 1068
1063 1069 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1064 1070 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1065 1071 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1066 1072 m = composelargefilematcher(match, repo[None].manifest())
1067 1073
1068 1074 try:
1069 1075 repo.lfstatus = True
1070 1076 s = repo.status(match=m, clean=True)
1071 1077 finally:
1072 1078 repo.lfstatus = False
1073 1079 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1074 1080 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1075 1081
1076 1082 for f in forget:
1077 1083 if lfutil.standin(f) not in repo.dirstate and not \
1078 1084 repo.wvfs.isdir(lfutil.standin(f)):
1079 1085 ui.warn(_('not removing %s: file is already untracked\n')
1080 1086 % m.rel(f))
1081 1087 bad.append(f)
1082 1088
1083 1089 for f in forget:
1084 1090 if ui.verbose or not m.exact(f):
1085 1091 ui.status(_('removing %s\n') % m.rel(f))
1086 1092
1087 1093 # Need to lock because standin files are deleted then removed from the
1088 1094 # repository and we could race in-between.
1089 1095 with repo.wlock():
1090 1096 lfdirstate = lfutil.openlfdirstate(ui, repo)
1091 1097 for f in forget:
1092 1098 if lfdirstate[f] == 'a':
1093 1099 lfdirstate.drop(f)
1094 1100 else:
1095 1101 lfdirstate.remove(f)
1096 1102 lfdirstate.write()
1097 1103 standins = [lfutil.standin(f) for f in forget]
1098 1104 for f in standins:
1099 1105 repo.wvfs.unlinkpath(f, ignoremissing=True)
1100 1106 rejected = repo[None].forget(standins)
1101 1107
1102 1108 bad.extend(f for f in rejected if f in m.files())
1103 1109 forgot.extend(f for f in forget if f not in rejected)
1104 1110 return bad, forgot
1105 1111
1106 1112 def _getoutgoings(repo, other, missing, addfunc):
1107 1113 """get pairs of filename and largefile hash in outgoing revisions
1108 1114 in 'missing'.
1109 1115
1110 1116 largefiles already existing on 'other' repository are ignored.
1111 1117
1112 1118 'addfunc' is invoked with each unique pairs of filename and
1113 1119 largefile hash value.
1114 1120 """
1115 1121 knowns = set()
1116 1122 lfhashes = set()
1117 1123 def dedup(fn, lfhash):
1118 1124 k = (fn, lfhash)
1119 1125 if k not in knowns:
1120 1126 knowns.add(k)
1121 1127 lfhashes.add(lfhash)
1122 1128 lfutil.getlfilestoupload(repo, missing, dedup)
1123 1129 if lfhashes:
1124 1130 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1125 1131 for fn, lfhash in knowns:
1126 1132 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1127 1133 addfunc(fn, lfhash)
1128 1134
1129 1135 def outgoinghook(ui, repo, other, opts, missing):
1130 1136 if opts.pop('large', None):
1131 1137 lfhashes = set()
1132 1138 if ui.debugflag:
1133 1139 toupload = {}
1134 1140 def addfunc(fn, lfhash):
1135 1141 if fn not in toupload:
1136 1142 toupload[fn] = []
1137 1143 toupload[fn].append(lfhash)
1138 1144 lfhashes.add(lfhash)
1139 1145 def showhashes(fn):
1140 1146 for lfhash in sorted(toupload[fn]):
1141 1147 ui.debug(' %s\n' % (lfhash))
1142 1148 else:
1143 1149 toupload = set()
1144 1150 def addfunc(fn, lfhash):
1145 1151 toupload.add(fn)
1146 1152 lfhashes.add(lfhash)
1147 1153 def showhashes(fn):
1148 1154 pass
1149 1155 _getoutgoings(repo, other, missing, addfunc)
1150 1156
1151 1157 if not toupload:
1152 1158 ui.status(_('largefiles: no files to upload\n'))
1153 1159 else:
1154 1160 ui.status(_('largefiles to upload (%d entities):\n')
1155 1161 % (len(lfhashes)))
1156 1162 for file in sorted(toupload):
1157 1163 ui.status(lfutil.splitstandin(file) + '\n')
1158 1164 showhashes(file)
1159 1165 ui.status('\n')
1160 1166
1161 1167 def summaryremotehook(ui, repo, opts, changes):
1162 1168 largeopt = opts.get('large', False)
1163 1169 if changes is None:
1164 1170 if largeopt:
1165 1171 return (False, True) # only outgoing check is needed
1166 1172 else:
1167 1173 return (False, False)
1168 1174 elif largeopt:
1169 1175 url, branch, peer, outgoing = changes[1]
1170 1176 if peer is None:
1171 1177 # i18n: column positioning for "hg summary"
1172 1178 ui.status(_('largefiles: (no remote repo)\n'))
1173 1179 return
1174 1180
1175 1181 toupload = set()
1176 1182 lfhashes = set()
1177 1183 def addfunc(fn, lfhash):
1178 1184 toupload.add(fn)
1179 1185 lfhashes.add(lfhash)
1180 1186 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1181 1187
1182 1188 if not toupload:
1183 1189 # i18n: column positioning for "hg summary"
1184 1190 ui.status(_('largefiles: (no files to upload)\n'))
1185 1191 else:
1186 1192 # i18n: column positioning for "hg summary"
1187 1193 ui.status(_('largefiles: %d entities for %d files to upload\n')
1188 1194 % (len(lfhashes), len(toupload)))
1189 1195
1190 1196 def overridesummary(orig, ui, repo, *pats, **opts):
1191 1197 try:
1192 1198 repo.lfstatus = True
1193 1199 orig(ui, repo, *pats, **opts)
1194 1200 finally:
1195 1201 repo.lfstatus = False
1196 1202
1197 1203 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1198 1204 similarity=None):
1199 1205 if opts is None:
1200 1206 opts = {}
1201 1207 if not lfutil.islfilesrepo(repo):
1202 1208 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1203 1209 # Get the list of missing largefiles so we can remove them
1204 1210 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1205 1211 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1206 1212 False, False, False)
1207 1213
1208 1214 # Call into the normal remove code, but the removing of the standin, we want
1209 1215 # to have handled by original addremove. Monkey patching here makes sure
1210 1216 # we don't remove the standin in the largefiles code, preventing a very
1211 1217 # confused state later.
1212 1218 if s.deleted:
1213 1219 m = copy.copy(matcher)
1214 1220
1215 1221 # The m._files and m._map attributes are not changed to the deleted list
1216 1222 # because that affects the m.exact() test, which in turn governs whether
1217 1223 # or not the file name is printed, and how. Simply limit the original
1218 1224 # matches to those in the deleted status list.
1219 1225 matchfn = m.matchfn
1220 1226 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1221 1227
1222 1228 removelargefiles(repo.ui, repo, True, m, **opts)
1223 1229 # Call into the normal add code, and any files that *should* be added as
1224 1230 # largefiles will be
1225 1231 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1226 1232 # Now that we've handled largefiles, hand off to the original addremove
1227 1233 # function to take care of the rest. Make sure it doesn't do anything with
1228 1234 # largefiles by passing a matcher that will ignore them.
1229 1235 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1230 1236 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1231 1237
1232 1238 # Calling purge with --all will cause the largefiles to be deleted.
1233 1239 # Override repo.status to prevent this from happening.
1234 1240 def overridepurge(orig, ui, repo, *dirs, **opts):
1235 1241 # XXX Monkey patching a repoview will not work. The assigned attribute will
1236 1242 # be set on the unfiltered repo, but we will only lookup attributes in the
1237 1243 # unfiltered repo if the lookup in the repoview object itself fails. As the
1238 1244 # monkey patched method exists on the repoview class the lookup will not
1239 1245 # fail. As a result, the original version will shadow the monkey patched
1240 1246 # one, defeating the monkey patch.
1241 1247 #
1242 1248 # As a work around we use an unfiltered repo here. We should do something
1243 1249 # cleaner instead.
1244 1250 repo = repo.unfiltered()
1245 1251 oldstatus = repo.status
1246 1252 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1247 1253 clean=False, unknown=False, listsubrepos=False):
1248 1254 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1249 1255 listsubrepos)
1250 1256 lfdirstate = lfutil.openlfdirstate(ui, repo)
1251 1257 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1252 1258 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1253 1259 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1254 1260 unknown, ignored, r.clean)
1255 1261 repo.status = overridestatus
1256 1262 orig(ui, repo, *dirs, **opts)
1257 1263 repo.status = oldstatus
1258 1264 def overriderollback(orig, ui, repo, **opts):
1259 1265 with repo.wlock():
1260 1266 before = repo.dirstate.parents()
1261 1267 orphans = set(f for f in repo.dirstate
1262 1268 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1263 1269 result = orig(ui, repo, **opts)
1264 1270 after = repo.dirstate.parents()
1265 1271 if before == after:
1266 1272 return result # no need to restore standins
1267 1273
1268 1274 pctx = repo['.']
1269 1275 for f in repo.dirstate:
1270 1276 if lfutil.isstandin(f):
1271 1277 orphans.discard(f)
1272 1278 if repo.dirstate[f] == 'r':
1273 1279 repo.wvfs.unlinkpath(f, ignoremissing=True)
1274 1280 elif f in pctx:
1275 1281 fctx = pctx[f]
1276 1282 repo.wwrite(f, fctx.data(), fctx.flags())
1277 1283 else:
1278 1284 # content of standin is not so important in 'a',
1279 1285 # 'm' or 'n' (coming from the 2nd parent) cases
1280 1286 lfutil.writestandin(repo, f, '', False)
1281 1287 for standin in orphans:
1282 1288 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1283 1289
1284 1290 lfdirstate = lfutil.openlfdirstate(ui, repo)
1285 1291 orphans = set(lfdirstate)
1286 1292 lfiles = lfutil.listlfiles(repo)
1287 1293 for file in lfiles:
1288 1294 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1289 1295 orphans.discard(file)
1290 1296 for lfile in orphans:
1291 1297 lfdirstate.drop(lfile)
1292 1298 lfdirstate.write()
1293 1299 return result
1294 1300
1295 1301 def overridetransplant(orig, ui, repo, *revs, **opts):
1296 1302 resuming = opts.get('continue')
1297 1303 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1298 1304 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1299 1305 try:
1300 1306 result = orig(ui, repo, *revs, **opts)
1301 1307 finally:
1302 1308 repo._lfstatuswriters.pop()
1303 1309 repo._lfcommithooks.pop()
1304 1310 return result
1305 1311
1306 1312 def overridecat(orig, ui, repo, file1, *pats, **opts):
1307 1313 ctx = scmutil.revsingle(repo, opts.get('rev'))
1308 1314 err = 1
1309 1315 notbad = set()
1310 1316 m = scmutil.match(ctx, (file1,) + pats, opts)
1311 1317 origmatchfn = m.matchfn
1312 1318 def lfmatchfn(f):
1313 1319 if origmatchfn(f):
1314 1320 return True
1315 1321 lf = lfutil.splitstandin(f)
1316 1322 if lf is None:
1317 1323 return False
1318 1324 notbad.add(lf)
1319 1325 return origmatchfn(lf)
1320 1326 m.matchfn = lfmatchfn
1321 1327 origbadfn = m.bad
1322 1328 def lfbadfn(f, msg):
1323 1329 if not f in notbad:
1324 1330 origbadfn(f, msg)
1325 1331 m.bad = lfbadfn
1326 1332
1327 1333 origvisitdirfn = m.visitdir
1328 1334 def lfvisitdirfn(dir):
1329 1335 if dir == lfutil.shortname:
1330 1336 return True
1331 1337 ret = origvisitdirfn(dir)
1332 1338 if ret:
1333 1339 return ret
1334 1340 lf = lfutil.splitstandin(dir)
1335 1341 if lf is None:
1336 1342 return False
1337 1343 return origvisitdirfn(lf)
1338 1344 m.visitdir = lfvisitdirfn
1339 1345
1340 1346 for f in ctx.walk(m):
1341 1347 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1342 1348 pathname=f) as fp:
1343 1349 lf = lfutil.splitstandin(f)
1344 1350 if lf is None or origmatchfn(f):
1345 1351 # duplicating unreachable code from commands.cat
1346 1352 data = ctx[f].data()
1347 1353 if opts.get('decode'):
1348 1354 data = repo.wwritedata(f, data)
1349 1355 fp.write(data)
1350 1356 else:
1351 1357 hash = lfutil.readstandin(repo, lf, ctx.rev())
1352 1358 if not lfutil.inusercache(repo.ui, hash):
1353 1359 store = storefactory.openstore(repo)
1354 1360 success, missing = store.get([(lf, hash)])
1355 1361 if len(success) != 1:
1356 1362 raise error.Abort(
1357 1363 _('largefile %s is not in cache and could not be '
1358 1364 'downloaded') % lf)
1359 1365 path = lfutil.usercachepath(repo.ui, hash)
1360 1366 with open(path, "rb") as fpin:
1361 1367 for chunk in util.filechunkiter(fpin):
1362 1368 fp.write(chunk)
1363 1369 err = 0
1364 1370 return err
1365 1371
1366 1372 def mergeupdate(orig, repo, node, branchmerge, force,
1367 1373 *args, **kwargs):
1368 1374 matcher = kwargs.get('matcher', None)
1369 1375 # note if this is a partial update
1370 1376 partial = matcher and not matcher.always()
1371 1377 with repo.wlock():
1372 1378 # branch | | |
1373 1379 # merge | force | partial | action
1374 1380 # -------+-------+---------+--------------
1375 1381 # x | x | x | linear-merge
1376 1382 # o | x | x | branch-merge
1377 1383 # x | o | x | overwrite (as clean update)
1378 1384 # o | o | x | force-branch-merge (*1)
1379 1385 # x | x | o | (*)
1380 1386 # o | x | o | (*)
1381 1387 # x | o | o | overwrite (as revert)
1382 1388 # o | o | o | (*)
1383 1389 #
1384 1390 # (*) don't care
1385 1391 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1386 1392
1387 1393 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1388 1394 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1389 1395 repo.getcwd()),
1390 1396 [], False, True, False)
1391 1397 oldclean = set(s.clean)
1392 1398 pctx = repo['.']
1393 1399 for lfile in unsure + s.modified:
1394 1400 lfileabs = repo.wvfs.join(lfile)
1395 1401 if not repo.wvfs.exists(lfileabs):
1396 1402 continue
1397 1403 lfhash = lfutil.hashrepofile(repo, lfile)
1398 1404 standin = lfutil.standin(lfile)
1399 1405 lfutil.writestandin(repo, standin, lfhash,
1400 1406 lfutil.getexecutable(lfileabs))
1401 1407 if (standin in pctx and
1402 1408 lfhash == lfutil.readstandin(repo, lfile, '.')):
1403 1409 oldclean.add(lfile)
1404 1410 for lfile in s.added:
1405 1411 lfutil.updatestandin(repo, lfutil.standin(lfile))
1406 1412 # mark all clean largefiles as dirty, just in case the update gets
1407 1413 # interrupted before largefiles and lfdirstate are synchronized
1408 1414 for lfile in oldclean:
1409 1415 lfdirstate.normallookup(lfile)
1410 1416 lfdirstate.write()
1411 1417
1412 1418 oldstandins = lfutil.getstandinsstate(repo)
1413 1419
1414 1420 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1415 1421
1416 1422 newstandins = lfutil.getstandinsstate(repo)
1417 1423 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1418 1424
1419 1425 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1420 1426 # all the ones that didn't change as clean
1421 1427 for lfile in oldclean.difference(filelist):
1422 1428 lfdirstate.normal(lfile)
1423 1429 lfdirstate.write()
1424 1430
1425 1431 if branchmerge or force or partial:
1426 1432 filelist.extend(s.deleted + s.removed)
1427 1433
1428 1434 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1429 1435 normallookup=partial)
1430 1436
1431 1437 return result
1432 1438
1433 1439 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1434 1440 result = orig(repo, files, *args, **kwargs)
1435 1441
1436 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1442 filelist = []
1443 for f in files:
1444 lf = lfutil.splitstandin(f)
1445 if lf is not None:
1446 filelist.append(lf)
1437 1447 if filelist:
1438 1448 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1439 1449 printmessage=False, normallookup=True)
1440 1450
1441 1451 return result
General Comments 0
You need to be logged in to leave comments. Login now