##// END OF EJS Templates
largefiles: remove unused 'rev' parameter from downloadlfiles()...
Yuya Nishihara -
r46025:39ddb112 default
parent child Browse files
Show More
@@ -1,669 +1,668 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import os
14 14 import shutil
15 15
16 16 from mercurial.i18n import _
17 17
18 18 from mercurial import (
19 19 cmdutil,
20 20 context,
21 21 error,
22 22 exthelper,
23 23 hg,
24 24 lock,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 )
31 31 from mercurial.utils import hashutil
32 32
33 33 from ..convert import (
34 34 convcmd,
35 35 filemap,
36 36 )
37 37
38 38 from . import lfutil, storefactory
39 39
40 40 release = lock.release
41 41
42 42 # -- Commands ----------------------------------------------------------
43 43
44 44 eh = exthelper.exthelper()
45 45
46 46
47 47 @eh.command(
48 48 b'lfconvert',
49 49 [
50 50 (
51 51 b's',
52 52 b'size',
53 53 b'',
54 54 _(b'minimum size (MB) for files to be converted as largefiles'),
55 55 b'SIZE',
56 56 ),
57 57 (
58 58 b'',
59 59 b'to-normal',
60 60 False,
61 61 _(b'convert from a largefiles repo to a normal repo'),
62 62 ),
63 63 ],
64 64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
65 65 norepo=True,
66 66 inferrepo=True,
67 67 )
68 68 def lfconvert(ui, src, dest, *pats, **opts):
69 69 '''convert a normal repository to a largefiles repository
70 70
71 71 Convert repository SOURCE to a new repository DEST, identical to
72 72 SOURCE except that certain files will be converted as largefiles:
73 73 specifically, any file that matches any PATTERN *or* whose size is
74 74 above the minimum size threshold is converted as a largefile. The
75 75 size used to determine whether or not to track a file as a
76 76 largefile is the size of the first version of the file. The
77 77 minimum size can be specified either with --size or in
78 78 configuration as ``largefiles.size``.
79 79
80 80 After running this command you will need to make sure that
81 81 largefiles is enabled anywhere you intend to push the new
82 82 repository.
83 83
84 84 Use --to-normal to convert largefiles back to normal files; after
85 85 this, the DEST repository can be used without largefiles at all.'''
86 86
87 87 opts = pycompat.byteskwargs(opts)
88 88 if opts[b'to_normal']:
89 89 tolfile = False
90 90 else:
91 91 tolfile = True
92 92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93 93
94 94 if not hg.islocal(src):
95 95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
96 96 if not hg.islocal(dest):
97 97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
98 98
99 99 rsrc = hg.repository(ui, src)
100 100 ui.status(_(b'initializing destination %s\n') % dest)
101 101 rdst = hg.repository(ui, dest, create=True)
102 102
103 103 success = False
104 104 dstwlock = dstlock = None
105 105 try:
106 106 # Get a list of all changesets in the source. The easy way to do this
107 107 # is to simply walk the changelog, using changelog.nodesbetween().
108 108 # Take a look at mercurial/revlog.py:639 for more details.
109 109 # Use a generator instead of a list to decrease memory usage
110 110 ctxs = (
111 111 rsrc[ctx]
112 112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
113 113 )
114 114 revmap = {node.nullid: node.nullid}
115 115 if tolfile:
116 116 # Lock destination to prevent modification while it is converted to.
117 117 # Don't need to lock src because we are just reading from its
118 118 # history which can't change.
119 119 dstwlock = rdst.wlock()
120 120 dstlock = rdst.lock()
121 121
122 122 lfiles = set()
123 123 normalfiles = set()
124 124 if not pats:
125 125 pats = ui.configlist(lfutil.longname, b'patterns')
126 126 if pats:
127 127 matcher = matchmod.match(rsrc.root, b'', list(pats))
128 128 else:
129 129 matcher = None
130 130
131 131 lfiletohash = {}
132 132 with ui.makeprogress(
133 133 _(b'converting revisions'),
134 134 unit=_(b'revisions'),
135 135 total=rsrc[b'tip'].rev(),
136 136 ) as progress:
137 137 for ctx in ctxs:
138 138 progress.update(ctx.rev())
139 139 _lfconvert_addchangeset(
140 140 rsrc,
141 141 rdst,
142 142 ctx,
143 143 revmap,
144 144 lfiles,
145 145 normalfiles,
146 146 matcher,
147 147 size,
148 148 lfiletohash,
149 149 )
150 150
151 151 if rdst.wvfs.exists(lfutil.shortname):
152 152 rdst.wvfs.rmtree(lfutil.shortname)
153 153
154 154 for f in lfiletohash.keys():
155 155 if rdst.wvfs.isfile(f):
156 156 rdst.wvfs.unlink(f)
157 157 try:
158 158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
159 159 except OSError:
160 160 pass
161 161
162 162 # If there were any files converted to largefiles, add largefiles
163 163 # to the destination repository's requirements.
164 164 if lfiles:
165 165 rdst.requirements.add(b'largefiles')
166 166 scmutil.writereporequirements(rdst)
167 167 else:
168 168
169 169 class lfsource(filemap.filemap_source):
170 170 def __init__(self, ui, source):
171 171 super(lfsource, self).__init__(ui, source, None)
172 172 self.filemapper.rename[lfutil.shortname] = b'.'
173 173
174 174 def getfile(self, name, rev):
175 175 realname, realrev = rev
176 176 f = super(lfsource, self).getfile(name, rev)
177 177
178 178 if (
179 179 not realname.startswith(lfutil.shortnameslash)
180 180 or f[0] is None
181 181 ):
182 182 return f
183 183
184 184 # Substitute in the largefile data for the hash
185 185 hash = f[0].strip()
186 186 path = lfutil.findfile(rsrc, hash)
187 187
188 188 if path is None:
189 189 raise error.Abort(
190 190 _(b"missing largefile for '%s' in %s")
191 191 % (realname, realrev)
192 192 )
193 193 return util.readfile(path), f[1]
194 194
195 195 class converter(convcmd.converter):
196 196 def __init__(self, ui, source, dest, revmapfile, opts):
197 197 src = lfsource(ui, source)
198 198
199 199 super(converter, self).__init__(
200 200 ui, src, dest, revmapfile, opts
201 201 )
202 202
203 203 found, missing = downloadlfiles(ui, rsrc)
204 204 if missing != 0:
205 205 raise error.Abort(_(b"all largefiles must be present locally"))
206 206
207 207 orig = convcmd.converter
208 208 convcmd.converter = converter
209 209
210 210 try:
211 211 convcmd.convert(
212 212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
213 213 )
214 214 finally:
215 215 convcmd.converter = orig
216 216 success = True
217 217 finally:
218 218 if tolfile:
219 219 rdst.dirstate.clear()
220 220 release(dstlock, dstwlock)
221 221 if not success:
222 222 # we failed, remove the new directory
223 223 shutil.rmtree(rdst.root)
224 224
225 225
226 226 def _lfconvert_addchangeset(
227 227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
228 228 ):
229 229 # Convert src parents to dst parents
230 230 parents = _convertparents(ctx, revmap)
231 231
232 232 # Generate list of changed files
233 233 files = _getchangedfiles(ctx, parents)
234 234
235 235 dstfiles = []
236 236 for f in files:
237 237 if f not in lfiles and f not in normalfiles:
238 238 islfile = _islfile(f, ctx, matcher, size)
239 239 # If this file was renamed or copied then copy
240 240 # the largefile-ness of its predecessor
241 241 if f in ctx.manifest():
242 242 fctx = ctx.filectx(f)
243 243 renamed = fctx.copysource()
244 244 if renamed is None:
245 245 # the code below assumes renamed to be a boolean or a list
246 246 # and won't quite work with the value None
247 247 renamed = False
248 248 renamedlfile = renamed and renamed in lfiles
249 249 islfile |= renamedlfile
250 250 if b'l' in fctx.flags():
251 251 if renamedlfile:
252 252 raise error.Abort(
253 253 _(b'renamed/copied largefile %s becomes symlink')
254 254 % f
255 255 )
256 256 islfile = False
257 257 if islfile:
258 258 lfiles.add(f)
259 259 else:
260 260 normalfiles.add(f)
261 261
262 262 if f in lfiles:
263 263 fstandin = lfutil.standin(f)
264 264 dstfiles.append(fstandin)
265 265 # largefile in manifest if it has not been removed/renamed
266 266 if f in ctx.manifest():
267 267 fctx = ctx.filectx(f)
268 268 if b'l' in fctx.flags():
269 269 renamed = fctx.copysource()
270 270 if renamed and renamed in lfiles:
271 271 raise error.Abort(
272 272 _(b'largefile %s becomes symlink') % f
273 273 )
274 274
275 275 # largefile was modified, update standins
276 276 m = hashutil.sha1(b'')
277 277 m.update(ctx[f].data())
278 278 hash = node.hex(m.digest())
279 279 if f not in lfiletohash or lfiletohash[f] != hash:
280 280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
281 281 executable = b'x' in ctx[f].flags()
282 282 lfutil.writestandin(rdst, fstandin, hash, executable)
283 283 lfiletohash[f] = hash
284 284 else:
285 285 # normal file
286 286 dstfiles.append(f)
287 287
288 288 def getfilectx(repo, memctx, f):
289 289 srcfname = lfutil.splitstandin(f)
290 290 if srcfname is not None:
291 291 # if the file isn't in the manifest then it was removed
292 292 # or renamed, return None to indicate this
293 293 try:
294 294 fctx = ctx.filectx(srcfname)
295 295 except error.LookupError:
296 296 return None
297 297 renamed = fctx.copysource()
298 298 if renamed:
299 299 # standin is always a largefile because largefile-ness
300 300 # doesn't change after rename or copy
301 301 renamed = lfutil.standin(renamed)
302 302
303 303 return context.memfilectx(
304 304 repo,
305 305 memctx,
306 306 f,
307 307 lfiletohash[srcfname] + b'\n',
308 308 b'l' in fctx.flags(),
309 309 b'x' in fctx.flags(),
310 310 renamed,
311 311 )
312 312 else:
313 313 return _getnormalcontext(repo, ctx, f, revmap)
314 314
315 315 # Commit
316 316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
317 317
318 318
319 319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
320 320 mctx = context.memctx(
321 321 rdst,
322 322 parents,
323 323 ctx.description(),
324 324 dstfiles,
325 325 getfilectx,
326 326 ctx.user(),
327 327 ctx.date(),
328 328 ctx.extra(),
329 329 )
330 330 ret = rdst.commitctx(mctx)
331 331 lfutil.copyalltostore(rdst, ret)
332 332 rdst.setparents(ret)
333 333 revmap[ctx.node()] = rdst.changelog.tip()
334 334
335 335
336 336 # Generate list of changed files
337 337 def _getchangedfiles(ctx, parents):
338 338 files = set(ctx.files())
339 339 if node.nullid not in parents:
340 340 mc = ctx.manifest()
341 341 for pctx in ctx.parents():
342 342 for fn in pctx.manifest().diff(mc):
343 343 files.add(fn)
344 344 return files
345 345
346 346
347 347 # Convert src parents to dst parents
348 348 def _convertparents(ctx, revmap):
349 349 parents = []
350 350 for p in ctx.parents():
351 351 parents.append(revmap[p.node()])
352 352 while len(parents) < 2:
353 353 parents.append(node.nullid)
354 354 return parents
355 355
356 356
357 357 # Get memfilectx for a normal file
358 358 def _getnormalcontext(repo, ctx, f, revmap):
359 359 try:
360 360 fctx = ctx.filectx(f)
361 361 except error.LookupError:
362 362 return None
363 363 renamed = fctx.copysource()
364 364
365 365 data = fctx.data()
366 366 if f == b'.hgtags':
367 367 data = _converttags(repo.ui, revmap, data)
368 368 return context.memfilectx(
369 369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
370 370 )
371 371
372 372
373 373 # Remap tag data using a revision map
374 374 def _converttags(ui, revmap, data):
375 375 newdata = []
376 376 for line in data.splitlines():
377 377 try:
378 378 id, name = line.split(b' ', 1)
379 379 except ValueError:
380 380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
381 381 continue
382 382 try:
383 383 newid = node.bin(id)
384 384 except TypeError:
385 385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
386 386 continue
387 387 try:
388 388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
389 389 except KeyError:
390 390 ui.warn(_(b'no mapping for id %s\n') % id)
391 391 continue
392 392 return b''.join(newdata)
393 393
394 394
395 395 def _islfile(file, ctx, matcher, size):
396 396 '''Return true if file should be considered a largefile, i.e.
397 397 matcher matches it or it is larger than size.'''
398 398 # never store special .hg* files as largefiles
399 399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
400 400 return False
401 401 if matcher and matcher(file):
402 402 return True
403 403 try:
404 404 return ctx.filectx(file).size() >= size * 1024 * 1024
405 405 except error.LookupError:
406 406 return False
407 407
408 408
409 409 def uploadlfiles(ui, rsrc, rdst, files):
410 410 '''upload largefiles to the central store'''
411 411
412 412 if not files:
413 413 return
414 414
415 415 store = storefactory.openstore(rsrc, rdst, put=True)
416 416
417 417 at = 0
418 418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
419 419 retval = store.exists(files)
420 420 files = [h for h in files if not retval[h]]
421 421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
422 422
423 423 with ui.makeprogress(
424 424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
425 425 ) as progress:
426 426 for hash in files:
427 427 progress.update(at)
428 428 source = lfutil.findfile(rsrc, hash)
429 429 if not source:
430 430 raise error.Abort(
431 431 _(
432 432 b'largefile %s missing from store'
433 433 b' (needs to be uploaded)'
434 434 )
435 435 % hash
436 436 )
437 437 # XXX check for errors here
438 438 store.put(source, hash)
439 439 at += 1
440 440
441 441
442 442 def verifylfiles(ui, repo, all=False, contents=False):
443 443 '''Verify that every largefile revision in the current changeset
444 444 exists in the central store. With --contents, also verify that
445 445 the contents of each local largefile file revision are correct (SHA-1 hash
446 446 matches the revision ID). With --all, check every changeset in
447 447 this repository.'''
448 448 if all:
449 449 revs = repo.revs(b'all()')
450 450 else:
451 451 revs = [b'.']
452 452
453 453 store = storefactory.openstore(repo)
454 454 return store.verify(revs, contents=contents)
455 455
456 456
457 457 def cachelfiles(ui, repo, node, filelist=None):
458 458 '''cachelfiles ensures that all largefiles needed by the specified revision
459 459 are present in the repository's largefile cache.
460 460
461 461 returns a tuple (cached, missing). cached is the list of files downloaded
462 462 by this operation; missing is the list of files that were needed but could
463 463 not be found.'''
464 464 lfiles = lfutil.listlfiles(repo, node)
465 465 if filelist:
466 466 lfiles = set(lfiles) & set(filelist)
467 467 toget = []
468 468
469 469 ctx = repo[node]
470 470 for lfile in lfiles:
471 471 try:
472 472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
473 473 except IOError as err:
474 474 if err.errno == errno.ENOENT:
475 475 continue # node must be None and standin wasn't found in wctx
476 476 raise
477 477 if not lfutil.findfile(repo, expectedhash):
478 478 toget.append((lfile, expectedhash))
479 479
480 480 if toget:
481 481 store = storefactory.openstore(repo)
482 482 ret = store.get(toget)
483 483 return ret
484 484
485 485 return ([], [])
486 486
487 487
488 def downloadlfiles(ui, repo, rev=None):
488 def downloadlfiles(ui, repo):
489 489 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
490 490
491 491 def prepare(ctx, fns):
492 492 pass
493 493
494 494 totalsuccess = 0
495 495 totalmissing = 0
496 if rev != []: # walkchangerevs on empty list would return all revs
497 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': rev}, prepare):
498 success, missing = cachelfiles(ui, repo, ctx.node())
499 totalsuccess += len(success)
500 totalmissing += len(missing)
496 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': None}, prepare):
497 success, missing = cachelfiles(ui, repo, ctx.node())
498 totalsuccess += len(success)
499 totalmissing += len(missing)
501 500 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
502 501 if totalmissing > 0:
503 502 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
504 503 return totalsuccess, totalmissing
505 504
506 505
507 506 def updatelfiles(
508 507 ui, repo, filelist=None, printmessage=None, normallookup=False
509 508 ):
510 509 '''Update largefiles according to standins in the working directory
511 510
512 511 If ``printmessage`` is other than ``None``, it means "print (or
513 512 ignore, for false) message forcibly".
514 513 '''
515 514 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
516 515 with repo.wlock():
517 516 lfdirstate = lfutil.openlfdirstate(ui, repo)
518 517 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
519 518
520 519 if filelist is not None:
521 520 filelist = set(filelist)
522 521 lfiles = [f for f in lfiles if f in filelist]
523 522
524 523 update = {}
525 524 dropped = set()
526 525 updated, removed = 0, 0
527 526 wvfs = repo.wvfs
528 527 wctx = repo[None]
529 528 for lfile in lfiles:
530 529 lfileorig = os.path.relpath(
531 530 scmutil.backuppath(ui, repo, lfile), start=repo.root
532 531 )
533 532 standin = lfutil.standin(lfile)
534 533 standinorig = os.path.relpath(
535 534 scmutil.backuppath(ui, repo, standin), start=repo.root
536 535 )
537 536 if wvfs.exists(standin):
538 537 if wvfs.exists(standinorig) and wvfs.exists(lfile):
539 538 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
540 539 wvfs.unlinkpath(standinorig)
541 540 expecthash = lfutil.readasstandin(wctx[standin])
542 541 if expecthash != b'':
543 542 if lfile not in wctx: # not switched to normal file
544 543 if repo.dirstate[standin] != b'?':
545 544 wvfs.unlinkpath(lfile, ignoremissing=True)
546 545 else:
547 546 dropped.add(lfile)
548 547
549 548 # use normallookup() to allocate an entry in largefiles
550 549 # dirstate to prevent lfilesrepo.status() from reporting
551 550 # missing files as removed.
552 551 lfdirstate.normallookup(lfile)
553 552 update[lfile] = expecthash
554 553 else:
555 554 # Remove lfiles for which the standin is deleted, unless the
556 555 # lfile is added to the repository again. This happens when a
557 556 # largefile is converted back to a normal file: the standin
558 557 # disappears, but a new (normal) file appears as the lfile.
559 558 if (
560 559 wvfs.exists(lfile)
561 560 and repo.dirstate.normalize(lfile) not in wctx
562 561 ):
563 562 wvfs.unlinkpath(lfile)
564 563 removed += 1
565 564
566 565 # largefile processing might be slow and be interrupted - be prepared
567 566 lfdirstate.write()
568 567
569 568 if lfiles:
570 569 lfiles = [f for f in lfiles if f not in dropped]
571 570
572 571 for f in dropped:
573 572 repo.wvfs.unlinkpath(lfutil.standin(f))
574 573
575 574 # This needs to happen for dropped files, otherwise they stay in
576 575 # the M state.
577 576 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
578 577
579 578 statuswriter(_(b'getting changed largefiles\n'))
580 579 cachelfiles(ui, repo, None, lfiles)
581 580
582 581 for lfile in lfiles:
583 582 update1 = 0
584 583
585 584 expecthash = update.get(lfile)
586 585 if expecthash:
587 586 if not lfutil.copyfromcache(repo, expecthash, lfile):
588 587 # failed ... but already removed and set to normallookup
589 588 continue
590 589 # Synchronize largefile dirstate to the last modified
591 590 # time of the file
592 591 lfdirstate.normal(lfile)
593 592 update1 = 1
594 593
595 594 # copy the exec mode of largefile standin from the repository's
596 595 # dirstate to its state in the lfdirstate.
597 596 standin = lfutil.standin(lfile)
598 597 if wvfs.exists(standin):
599 598 # exec is decided by the users permissions using mask 0o100
600 599 standinexec = wvfs.stat(standin).st_mode & 0o100
601 600 st = wvfs.stat(lfile)
602 601 mode = st.st_mode
603 602 if standinexec != mode & 0o100:
604 603 # first remove all X bits, then shift all R bits to X
605 604 mode &= ~0o111
606 605 if standinexec:
607 606 mode |= (mode >> 2) & 0o111 & ~util.umask
608 607 wvfs.chmod(lfile, mode)
609 608 update1 = 1
610 609
611 610 updated += update1
612 611
613 612 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
614 613
615 614 lfdirstate.write()
616 615 if lfiles:
617 616 statuswriter(
618 617 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
619 618 )
620 619
621 620
622 621 @eh.command(
623 622 b'lfpull',
624 623 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
625 624 + cmdutil.remoteopts,
626 625 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
627 626 )
628 627 def lfpull(ui, repo, source=b"default", **opts):
629 628 """pull largefiles for the specified revisions from the specified source
630 629
631 630 Pull largefiles that are referenced from local changesets but missing
632 631 locally, pulling from a remote repository to the local cache.
633 632
634 633 If SOURCE is omitted, the 'default' path will be used.
635 634 See :hg:`help urls` for more information.
636 635
637 636 .. container:: verbose
638 637
639 638 Some examples:
640 639
641 640 - pull largefiles for all branch heads::
642 641
643 642 hg lfpull -r "head() and not closed()"
644 643
645 644 - pull largefiles on the default branch::
646 645
647 646 hg lfpull -r "branch(default)"
648 647 """
649 648 repo.lfpullsource = source
650 649
651 650 revs = opts.get('rev', [])
652 651 if not revs:
653 652 raise error.Abort(_(b'no revisions specified'))
654 653 revs = scmutil.revrange(repo, revs)
655 654
656 655 numcached = 0
657 656 for rev in revs:
658 657 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
659 658 (cached, missing) = cachelfiles(ui, repo, rev)
660 659 numcached += len(cached)
661 660 ui.status(_(b"%d largefiles cached\n") % numcached)
662 661
663 662
664 663 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
665 664 def debuglfput(ui, repo, filepath, **kwargs):
666 665 hash = lfutil.hashfile(filepath)
667 666 storefactory.openstore(repo).put(filepath, hash)
668 667 ui.write(b'%s\n' % hash)
669 668 return 0
@@ -1,1828 +1,1828 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 upgrade,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44
45 45 from . import (
46 46 lfcommands,
47 47 lfutil,
48 48 storefactory,
49 49 )
50 50
51 51 eh = exthelper.exthelper()
52 52
53 53 lfstatus = lfutil.lfstatus
54 54
55 55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56 56
57 57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58 58
59 59
60 60 def composelargefilematcher(match, manifest):
61 61 '''create a matcher that matches only the largefiles in the original
62 62 matcher'''
63 63 m = copy.copy(match)
64 64 lfile = lambda f: lfutil.standin(f) in manifest
65 65 m._files = [lf for lf in m._files if lfile(lf)]
66 66 m._fileset = set(m._files)
67 67 m.always = lambda: False
68 68 origmatchfn = m.matchfn
69 69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 70 return m
71 71
72 72
73 73 def composenormalfilematcher(match, manifest, exclude=None):
74 74 excluded = set()
75 75 if exclude is not None:
76 76 excluded.update(exclude)
77 77
78 78 m = copy.copy(match)
79 79 notlfile = lambda f: not (
80 80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 81 )
82 82 m._files = [lf for lf in m._files if notlfile(lf)]
83 83 m._fileset = set(m._files)
84 84 m.always = lambda: False
85 85 origmatchfn = m.matchfn
86 86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 87 return m
88 88
89 89
90 90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 91 large = opts.get('large')
92 92 lfsize = lfutil.getminsize(
93 93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 94 )
95 95
96 96 lfmatcher = None
97 97 if lfutil.islfilesrepo(repo):
98 98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 99 if lfpats:
100 100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101 101
102 102 lfnames = []
103 103 m = matcher
104 104
105 105 wctx = repo[None]
106 106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 107 exact = m.exact(f)
108 108 lfile = lfutil.standin(f) in wctx
109 109 nfile = f in wctx
110 110 exists = lfile or nfile
111 111
112 112 # Don't warn the user when they attempt to add a normal tracked file.
113 113 # The normal add code will do that for us.
114 114 if exact and exists:
115 115 if lfile:
116 116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 117 continue
118 118
119 119 if (exact or not exists) and not lfutil.isstandin(f):
120 120 # In case the file was removed previously, but not committed
121 121 # (issue3507)
122 122 if not repo.wvfs.exists(f):
123 123 continue
124 124
125 125 abovemin = (
126 126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 127 )
128 128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 129 lfnames.append(f)
130 130 if ui.verbose or not exact:
131 131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132 132
133 133 bad = []
134 134
135 135 # Need to lock, otherwise there could be a race condition between
136 136 # when standins are created and added to the repo.
137 137 with repo.wlock():
138 138 if not opts.get('dry_run'):
139 139 standins = []
140 140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 141 for f in lfnames:
142 142 standinname = lfutil.standin(f)
143 143 lfutil.writestandin(
144 144 repo,
145 145 standinname,
146 146 hash=b'',
147 147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 148 )
149 149 standins.append(standinname)
150 150 if lfdirstate[f] == b'r':
151 151 lfdirstate.normallookup(f)
152 152 else:
153 153 lfdirstate.add(f)
154 154 lfdirstate.write()
155 155 bad += [
156 156 lfutil.splitstandin(f)
157 157 for f in repo[None].add(standins)
158 158 if f in m.files()
159 159 ]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 return added, bad
163 163
164 164
165 165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 166 after = opts.get('after')
167 167 m = composelargefilematcher(matcher, repo[None].manifest())
168 168 with lfstatus(repo):
169 169 s = repo.status(match=m, clean=not isaddremove)
170 170 manifest = repo[None].manifest()
171 171 modified, added, deleted, clean = [
172 172 [f for f in list if lfutil.standin(f) in manifest]
173 173 for list in (s.modified, s.added, s.deleted, s.clean)
174 174 ]
175 175
176 176 def warn(files, msg):
177 177 for f in files:
178 178 ui.warn(msg % uipathfn(f))
179 179 return int(len(files) > 0)
180 180
181 181 if after:
182 182 remove = deleted
183 183 result = warn(
184 184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 185 )
186 186 else:
187 187 remove = deleted + clean
188 188 result = warn(
189 189 modified,
190 190 _(
191 191 b'not removing %s: file is modified (use -f'
192 192 b' to force removal)\n'
193 193 ),
194 194 )
195 195 result = (
196 196 warn(
197 197 added,
198 198 _(
199 199 b'not removing %s: file has been marked for add'
200 200 b' (use forget to undo)\n'
201 201 ),
202 202 )
203 203 or result
204 204 )
205 205
206 206 # Need to lock because standin files are deleted then removed from the
207 207 # repository and we could race in-between.
208 208 with repo.wlock():
209 209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 210 for f in sorted(remove):
211 211 if ui.verbose or not m.exact(f):
212 212 ui.status(_(b'removing %s\n') % uipathfn(f))
213 213
214 214 if not dryrun:
215 215 if not after:
216 216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217 217
218 218 if dryrun:
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfutil.synclfdirstate(
231 231 repo, lfdirstate, lfutil.splitstandin(f), False
232 232 )
233 233
234 234 lfdirstate.write()
235 235
236 236 return result
237 237
238 238
239 239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 240 # appear at their right place in the manifests.
241 241 @eh.wrapfunction(webcommands, b'decodepath')
242 242 def decodepath(orig, path):
243 243 return lfutil.splitstandin(path) or path
244 244
245 245
246 246 # -- Wrappers: modify existing commands --------------------------------
247 247
248 248
249 249 @eh.wrapcommand(
250 250 b'add',
251 251 opts=[
252 252 (b'', b'large', None, _(b'add as largefile')),
253 253 (b'', b'normal', None, _(b'add as normal file')),
254 254 (
255 255 b'',
256 256 b'lfsize',
257 257 b'',
258 258 _(
259 259 b'add all files above this size (in megabytes) '
260 260 b'as largefiles (default: 10)'
261 261 ),
262 262 ),
263 263 ],
264 264 )
265 265 def overrideadd(orig, ui, repo, *pats, **opts):
266 266 if opts.get('normal') and opts.get('large'):
267 267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 268 return orig(ui, repo, *pats, **opts)
269 269
270 270
271 271 @eh.wrapfunction(cmdutil, b'add')
272 272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 273 # The --normal flag short circuits this override
274 274 if opts.get('normal'):
275 275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276 276
277 277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 278 normalmatcher = composenormalfilematcher(
279 279 matcher, repo[None].manifest(), ladded
280 280 )
281 281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282 282
283 283 bad.extend(f for f in lbad)
284 284 return bad
285 285
286 286
287 287 @eh.wrapfunction(cmdutil, b'remove')
288 288 def cmdutilremove(
289 289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 290 ):
291 291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 292 result = orig(
293 293 ui,
294 294 repo,
295 295 normalmatcher,
296 296 prefix,
297 297 uipathfn,
298 298 after,
299 299 force,
300 300 subrepos,
301 301 dryrun,
302 302 )
303 303 return (
304 304 removelargefiles(
305 305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 306 )
307 307 or result
308 308 )
309 309
310 310
311 311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 312 def overridestatusfn(orig, repo, rev2, **opts):
313 313 with lfstatus(repo._repo):
314 314 return orig(repo, rev2, **opts)
315 315
316 316
317 317 @eh.wrapcommand(b'status')
318 318 def overridestatus(orig, ui, repo, *pats, **opts):
319 319 with lfstatus(repo):
320 320 return orig(ui, repo, *pats, **opts)
321 321
322 322
323 323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 325 with lfstatus(repo._repo):
326 326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327 327
328 328
329 329 @eh.wrapcommand(b'log')
330 330 def overridelog(orig, ui, repo, *pats, **opts):
331 331 def overridematchandpats(
332 332 orig,
333 333 ctx,
334 334 pats=(),
335 335 opts=None,
336 336 globbed=False,
337 337 default=b'relpath',
338 338 badfn=None,
339 339 ):
340 340 """Matcher that merges root directory with .hglf, suitable for log.
341 341 It is still possible to match .hglf directly.
342 342 For any listed files run log on the standin too.
343 343 matchfn tries both the given filename and with .hglf stripped.
344 344 """
345 345 if opts is None:
346 346 opts = {}
347 347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 348 m, p = copy.copy(matchandpats)
349 349
350 350 if m.always():
351 351 # We want to match everything anyway, so there's no benefit trying
352 352 # to add standins.
353 353 return matchandpats
354 354
355 355 pats = set(p)
356 356
357 357 def fixpats(pat, tostandin=lfutil.standin):
358 358 if pat.startswith(b'set:'):
359 359 return pat
360 360
361 361 kindpat = matchmod._patsplit(pat, None)
362 362
363 363 if kindpat[0] is not None:
364 364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 365 return tostandin(kindpat[1])
366 366
367 367 cwd = repo.getcwd()
368 368 if cwd:
369 369 hglf = lfutil.shortname
370 370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371 371
372 372 def tostandin(f):
373 373 # The file may already be a standin, so truncate the back
374 374 # prefix and test before mangling it. This avoids turning
375 375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 377 return f
378 378
379 379 # An absolute path is from outside the repo, so truncate the
380 380 # path to the root before building the standin. Otherwise cwd
381 381 # is somewhere in the repo, relative to root, and needs to be
382 382 # prepended before building the standin.
383 383 if os.path.isabs(cwd):
384 384 f = f[len(back) :]
385 385 else:
386 386 f = cwd + b'/' + f
387 387 return back + lfutil.standin(f)
388 388
389 389 else:
390 390
391 391 def tostandin(f):
392 392 if lfutil.isstandin(f):
393 393 return f
394 394 return lfutil.standin(f)
395 395
396 396 pats.update(fixpats(f, tostandin) for f in p)
397 397
398 398 for i in range(0, len(m._files)):
399 399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 400 if m._files[i] == b'.':
401 401 continue
402 402 standin = lfutil.standin(m._files[i])
403 403 # If the "standin" is a directory, append instead of replace to
404 404 # support naming a directory on the command line with only
405 405 # largefiles. The original directory is kept to support normal
406 406 # files.
407 407 if standin in ctx:
408 408 m._files[i] = standin
409 409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 410 m._files.append(standin)
411 411
412 412 m._fileset = set(m._files)
413 413 m.always = lambda: False
414 414 origmatchfn = m.matchfn
415 415
416 416 def lfmatchfn(f):
417 417 lf = lfutil.splitstandin(f)
418 418 if lf is not None and origmatchfn(lf):
419 419 return True
420 420 r = origmatchfn(f)
421 421 return r
422 422
423 423 m.matchfn = lfmatchfn
424 424
425 425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 426 return m, pats
427 427
428 428 # For hg log --patch, the match object is used in two different senses:
429 429 # (1) to determine what revisions should be printed out, and
430 430 # (2) to determine what files to print out diffs for.
431 431 # The magic matchandpats override should be used for case (1) but not for
432 432 # case (2).
433 433 oldmatchandpats = scmutil.matchandpats
434 434
435 435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 436 wctx = repo[None]
437 437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 438 return lambda ctx: match
439 439
440 440 wrappedmatchandpats = extensions.wrappedfunction(
441 441 scmutil, b'matchandpats', overridematchandpats
442 442 )
443 443 wrappedmakefilematcher = extensions.wrappedfunction(
444 444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 445 )
446 446 with wrappedmatchandpats, wrappedmakefilematcher:
447 447 return orig(ui, repo, *pats, **opts)
448 448
449 449
450 450 @eh.wrapcommand(
451 451 b'verify',
452 452 opts=[
453 453 (
454 454 b'',
455 455 b'large',
456 456 None,
457 457 _(b'verify that all largefiles in current revision exists'),
458 458 ),
459 459 (
460 460 b'',
461 461 b'lfa',
462 462 None,
463 463 _(b'verify largefiles in all revisions, not just current'),
464 464 ),
465 465 (
466 466 b'',
467 467 b'lfc',
468 468 None,
469 469 _(b'verify local largefile contents, not just existence'),
470 470 ),
471 471 ],
472 472 )
473 473 def overrideverify(orig, ui, repo, *pats, **opts):
474 474 large = opts.pop('large', False)
475 475 all = opts.pop('lfa', False)
476 476 contents = opts.pop('lfc', False)
477 477
478 478 result = orig(ui, repo, *pats, **opts)
479 479 if large or all or contents:
480 480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 481 return result
482 482
483 483
484 484 @eh.wrapcommand(
485 485 b'debugstate',
486 486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 487 )
488 488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 489 large = opts.pop('large', False)
490 490 if large:
491 491
492 492 class fakerepo(object):
493 493 dirstate = lfutil.openlfdirstate(ui, repo)
494 494
495 495 orig(ui, fakerepo, *pats, **opts)
496 496 else:
497 497 orig(ui, repo, *pats, **opts)
498 498
499 499
500 500 # Before starting the manifest merge, merge.updates will call
501 501 # _checkunknownfile to check if there are any files in the merged-in
502 502 # changeset that collide with unknown files in the working copy.
503 503 #
504 504 # The largefiles are seen as unknown, so this prevents us from merging
505 505 # in a file 'foo' if we already have a largefile with the same name.
506 506 #
507 507 # The overridden function filters the unknown files by removing any
508 508 # largefiles. This makes the merge proceed and we can then handle this
509 509 # case further in the overridden calculateupdates function below.
510 510 @eh.wrapfunction(merge, b'_checkunknownfile')
511 511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
512 512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
513 513 return False
514 514 return origfn(repo, wctx, mctx, f, f2)
515 515
516 516
517 517 # The manifest merge handles conflicts on the manifest level. We want
518 518 # to handle changes in largefile-ness of files at this level too.
519 519 #
520 520 # The strategy is to run the original calculateupdates and then process
521 521 # the action list it outputs. There are two cases we need to deal with:
522 522 #
523 523 # 1. Normal file in p1, largefile in p2. Here the largefile is
524 524 # detected via its standin file, which will enter the working copy
525 525 # with a "get" action. It is not "merge" since the standin is all
526 526 # Mercurial is concerned with at this level -- the link to the
527 527 # existing normal file is not relevant here.
528 528 #
529 529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
530 530 # since the largefile will be present in the working copy and
531 531 # different from the normal file in p2. Mercurial therefore
532 532 # triggers a merge action.
533 533 #
534 534 # In both cases, we prompt the user and emit new actions to either
535 535 # remove the standin (if the normal file was kept) or to remove the
536 536 # normal file and get the standin (if the largefile was kept). The
537 537 # default prompt answer is to use the largefile version since it was
538 538 # presumably changed on purpose.
539 539 #
540 540 # Finally, the merge.applyupdates function will then take care of
541 541 # writing the files into the working copy and lfcommands.updatelfiles
542 542 # will update the largefiles.
543 543 @eh.wrapfunction(merge, b'calculateupdates')
544 544 def overridecalculateupdates(
545 545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
546 546 ):
547 547 overwrite = force and not branchmerge
548 548 mresult = origfn(
549 549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 550 )
551 551
552 552 if overwrite:
553 553 return mresult
554 554
555 555 # Convert to dictionary with filename as key and action as value.
556 556 lfiles = set()
557 557 for f in mresult.files():
558 558 splitstandin = lfutil.splitstandin(f)
559 559 if splitstandin is not None and splitstandin in p1:
560 560 lfiles.add(splitstandin)
561 561 elif lfutil.standin(f) in p1:
562 562 lfiles.add(f)
563 563
564 564 for lfile in sorted(lfiles):
565 565 standin = lfutil.standin(lfile)
566 566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
567 567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
568 568 if sm in (b'g', b'dc') and lm != b'r':
569 569 if sm == b'dc':
570 570 f1, f2, fa, move, anc = sargs
571 571 sargs = (p2[f2].flags(), False)
572 572 # Case 1: normal file in the working copy, largefile in
573 573 # the second parent
574 574 usermsg = (
575 575 _(
576 576 b'remote turned local normal file %s into a largefile\n'
577 577 b'use (l)argefile or keep (n)ormal file?'
578 578 b'$$ &Largefile $$ &Normal file'
579 579 )
580 580 % lfile
581 581 )
582 582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
583 583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
584 584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
585 585 else: # keep local normal file
586 586 mresult.addfile(lfile, b'k', None, b'replaces standin')
587 587 if branchmerge:
588 588 mresult.addfile(
589 589 standin, b'k', None, b'replaced by non-standin',
590 590 )
591 591 else:
592 592 mresult.addfile(
593 593 standin, b'r', None, b'replaced by non-standin',
594 594 )
595 595 elif lm in (b'g', b'dc') and sm != b'r':
596 596 if lm == b'dc':
597 597 f1, f2, fa, move, anc = largs
598 598 largs = (p2[f2].flags(), False)
599 599 # Case 2: largefile in the working copy, normal file in
600 600 # the second parent
601 601 usermsg = (
602 602 _(
603 603 b'remote turned local largefile %s into a normal file\n'
604 604 b'keep (l)argefile or use (n)ormal file?'
605 605 b'$$ &Largefile $$ &Normal file'
606 606 )
607 607 % lfile
608 608 )
609 609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
610 610 if branchmerge:
611 611 # largefile can be restored from standin safely
612 612 mresult.addfile(
613 613 lfile, b'k', None, b'replaced by standin',
614 614 )
615 615 mresult.addfile(standin, b'k', None, b'replaces standin')
616 616 else:
617 617 # "lfile" should be marked as "removed" without
618 618 # removal of itself
619 619 mresult.addfile(
620 620 lfile,
621 621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
622 622 None,
623 623 b'forget non-standin largefile',
624 624 )
625 625
626 626 # linear-merge should treat this largefile as 're-added'
627 627 mresult.addfile(standin, b'a', None, b'keep standin')
628 628 else: # pick remote normal file
629 629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
630 630 mresult.addfile(
631 631 standin, b'r', None, b'replaced by non-standin',
632 632 )
633 633
634 634 return mresult
635 635
636 636
637 637 @eh.wrapfunction(mergestatemod, b'recordupdates')
638 638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
639 639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
640 640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
641 641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
642 642 # this should be executed before 'orig', to execute 'remove'
643 643 # before all other actions
644 644 repo.dirstate.remove(lfile)
645 645 # make sure lfile doesn't get synclfdirstate'd as normal
646 646 lfdirstate.add(lfile)
647 647 lfdirstate.write()
648 648
649 649 return orig(repo, actions, branchmerge, getfiledata)
650 650
651 651
652 652 # Override filemerge to prompt the user about how they wish to merge
653 653 # largefiles. This will handle identical edits without prompting the user.
654 654 @eh.wrapfunction(filemerge, b'_filemerge')
655 655 def overridefilemerge(
656 656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
657 657 ):
658 658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
659 659 return origfn(
660 660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
661 661 )
662 662
663 663 ahash = lfutil.readasstandin(fca).lower()
664 664 dhash = lfutil.readasstandin(fcd).lower()
665 665 ohash = lfutil.readasstandin(fco).lower()
666 666 if (
667 667 ohash != ahash
668 668 and ohash != dhash
669 669 and (
670 670 dhash == ahash
671 671 or repo.ui.promptchoice(
672 672 _(
673 673 b'largefile %s has a merge conflict\nancestor was %s\n'
674 674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
675 675 b'what do you want to do?'
676 676 b'$$ &Local $$ &Other'
677 677 )
678 678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
679 679 0,
680 680 )
681 681 == 1
682 682 )
683 683 ):
684 684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
685 685 return True, 0, False
686 686
687 687
688 688 @eh.wrapfunction(copiesmod, b'pathcopies')
689 689 def copiespathcopies(orig, ctx1, ctx2, match=None):
690 690 copies = orig(ctx1, ctx2, match=match)
691 691 updated = {}
692 692
693 693 for k, v in pycompat.iteritems(copies):
694 694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
695 695
696 696 return updated
697 697
698 698
699 699 # Copy first changes the matchers to match standins instead of
700 700 # largefiles. Then it overrides util.copyfile in that function it
701 701 # checks if the destination largefile already exists. It also keeps a
702 702 # list of copied files so that the largefiles can be copied and the
703 703 # dirstate updated.
704 704 @eh.wrapfunction(cmdutil, b'copy')
705 705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
706 706 # doesn't remove largefile on rename
707 707 if len(pats) < 2:
708 708 # this isn't legal, let the original function deal with it
709 709 return orig(ui, repo, pats, opts, rename)
710 710
711 711 # This could copy both lfiles and normal files in one command,
712 712 # but we don't want to do that. First replace their matcher to
713 713 # only match normal files and run it, then replace it to just
714 714 # match largefiles and run it again.
715 715 nonormalfiles = False
716 716 nolfiles = False
717 717 manifest = repo[None].manifest()
718 718
719 719 def normalfilesmatchfn(
720 720 orig,
721 721 ctx,
722 722 pats=(),
723 723 opts=None,
724 724 globbed=False,
725 725 default=b'relpath',
726 726 badfn=None,
727 727 ):
728 728 if opts is None:
729 729 opts = {}
730 730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
731 731 return composenormalfilematcher(match, manifest)
732 732
733 733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
734 734 try:
735 735 result = orig(ui, repo, pats, opts, rename)
736 736 except error.Abort as e:
737 737 if pycompat.bytestr(e) != _(b'no files to copy'):
738 738 raise e
739 739 else:
740 740 nonormalfiles = True
741 741 result = 0
742 742
743 743 # The first rename can cause our current working directory to be removed.
744 744 # In that case there is nothing left to copy/rename so just quit.
745 745 try:
746 746 repo.getcwd()
747 747 except OSError:
748 748 return result
749 749
750 750 def makestandin(relpath):
751 751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
752 752 return repo.wvfs.join(lfutil.standin(path))
753 753
754 754 fullpats = scmutil.expandpats(pats)
755 755 dest = fullpats[-1]
756 756
757 757 if os.path.isdir(dest):
758 758 if not os.path.isdir(makestandin(dest)):
759 759 os.makedirs(makestandin(dest))
760 760
761 761 try:
762 762 # When we call orig below it creates the standins but we don't add
763 763 # them to the dir state until later so lock during that time.
764 764 wlock = repo.wlock()
765 765
766 766 manifest = repo[None].manifest()
767 767
768 768 def overridematch(
769 769 orig,
770 770 ctx,
771 771 pats=(),
772 772 opts=None,
773 773 globbed=False,
774 774 default=b'relpath',
775 775 badfn=None,
776 776 ):
777 777 if opts is None:
778 778 opts = {}
779 779 newpats = []
780 780 # The patterns were previously mangled to add the standin
781 781 # directory; we need to remove that now
782 782 for pat in pats:
783 783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
784 784 newpats.append(pat.replace(lfutil.shortname, b''))
785 785 else:
786 786 newpats.append(pat)
787 787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
788 788 m = copy.copy(match)
789 789 lfile = lambda f: lfutil.standin(f) in manifest
790 790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
791 791 m._fileset = set(m._files)
792 792 origmatchfn = m.matchfn
793 793
794 794 def matchfn(f):
795 795 lfile = lfutil.splitstandin(f)
796 796 return (
797 797 lfile is not None
798 798 and (f in manifest)
799 799 and origmatchfn(lfile)
800 800 or None
801 801 )
802 802
803 803 m.matchfn = matchfn
804 804 return m
805 805
806 806 listpats = []
807 807 for pat in pats:
808 808 if matchmod.patkind(pat) is not None:
809 809 listpats.append(pat)
810 810 else:
811 811 listpats.append(makestandin(pat))
812 812
813 813 copiedfiles = []
814 814
815 815 def overridecopyfile(orig, src, dest, *args, **kwargs):
816 816 if lfutil.shortname in src and dest.startswith(
817 817 repo.wjoin(lfutil.shortname)
818 818 ):
819 819 destlfile = dest.replace(lfutil.shortname, b'')
820 820 if not opts[b'force'] and os.path.exists(destlfile):
821 821 raise IOError(
822 822 b'', _(b'destination largefile already exists')
823 823 )
824 824 copiedfiles.append((src, dest))
825 825 orig(src, dest, *args, **kwargs)
826 826
827 827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
828 828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
829 829 result += orig(ui, repo, listpats, opts, rename)
830 830
831 831 lfdirstate = lfutil.openlfdirstate(ui, repo)
832 832 for (src, dest) in copiedfiles:
833 833 if lfutil.shortname in src and dest.startswith(
834 834 repo.wjoin(lfutil.shortname)
835 835 ):
836 836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
838 838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
839 839 if not os.path.isdir(destlfiledir):
840 840 os.makedirs(destlfiledir)
841 841 if rename:
842 842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
843 843
844 844 # The file is gone, but this deletes any empty parent
845 845 # directories as a side-effect.
846 846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
847 847 lfdirstate.remove(srclfile)
848 848 else:
849 849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
850 850
851 851 lfdirstate.add(destlfile)
852 852 lfdirstate.write()
853 853 except error.Abort as e:
854 854 if pycompat.bytestr(e) != _(b'no files to copy'):
855 855 raise e
856 856 else:
857 857 nolfiles = True
858 858 finally:
859 859 wlock.release()
860 860
861 861 if nolfiles and nonormalfiles:
862 862 raise error.Abort(_(b'no files to copy'))
863 863
864 864 return result
865 865
866 866
867 867 # When the user calls revert, we have to be careful to not revert any
868 868 # changes to other largefiles accidentally. This means we have to keep
869 869 # track of the largefiles that are being reverted so we only pull down
870 870 # the necessary largefiles.
871 871 #
872 872 # Standins are only updated (to match the hash of largefiles) before
873 873 # commits. Update the standins then run the original revert, changing
874 874 # the matcher to hit standins instead of largefiles. Based on the
875 875 # resulting standins update the largefiles.
876 876 @eh.wrapfunction(cmdutil, b'revert')
877 877 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
878 878 # Because we put the standins in a bad state (by updating them)
879 879 # and then return them to a correct state we need to lock to
880 880 # prevent others from changing them in their incorrect state.
881 881 with repo.wlock():
882 882 lfdirstate = lfutil.openlfdirstate(ui, repo)
883 883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
884 884 lfdirstate.write()
885 885 for lfile in s.modified:
886 886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
887 887 for lfile in s.deleted:
888 888 fstandin = lfutil.standin(lfile)
889 889 if repo.wvfs.exists(fstandin):
890 890 repo.wvfs.unlink(fstandin)
891 891
892 892 oldstandins = lfutil.getstandinsstate(repo)
893 893
894 894 def overridematch(
895 895 orig,
896 896 mctx,
897 897 pats=(),
898 898 opts=None,
899 899 globbed=False,
900 900 default=b'relpath',
901 901 badfn=None,
902 902 ):
903 903 if opts is None:
904 904 opts = {}
905 905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
906 906 m = copy.copy(match)
907 907
908 908 # revert supports recursing into subrepos, and though largefiles
909 909 # currently doesn't work correctly in that case, this match is
910 910 # called, so the lfdirstate above may not be the correct one for
911 911 # this invocation of match.
912 912 lfdirstate = lfutil.openlfdirstate(
913 913 mctx.repo().ui, mctx.repo(), False
914 914 )
915 915
916 916 wctx = repo[None]
917 917 matchfiles = []
918 918 for f in m._files:
919 919 standin = lfutil.standin(f)
920 920 if standin in ctx or standin in mctx:
921 921 matchfiles.append(standin)
922 922 elif standin in wctx or lfdirstate[f] == b'r':
923 923 continue
924 924 else:
925 925 matchfiles.append(f)
926 926 m._files = matchfiles
927 927 m._fileset = set(m._files)
928 928 origmatchfn = m.matchfn
929 929
930 930 def matchfn(f):
931 931 lfile = lfutil.splitstandin(f)
932 932 if lfile is not None:
933 933 return origmatchfn(lfile) and (f in ctx or f in mctx)
934 934 return origmatchfn(f)
935 935
936 936 m.matchfn = matchfn
937 937 return m
938 938
939 939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
940 940 orig(ui, repo, ctx, *pats, **opts)
941 941
942 942 newstandins = lfutil.getstandinsstate(repo)
943 943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
944 944 # lfdirstate should be 'normallookup'-ed for updated files,
945 945 # because reverting doesn't touch dirstate for 'normal' files
946 946 # when target revision is explicitly specified: in such case,
947 947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
948 948 # of target (standin) file.
949 949 lfcommands.updatelfiles(
950 950 ui, repo, filelist, printmessage=False, normallookup=True
951 951 )
952 952
953 953
954 954 # after pulling changesets, we need to take some extra care to get
955 955 # largefiles updated remotely
956 956 @eh.wrapcommand(
957 957 b'pull',
958 958 opts=[
959 959 (
960 960 b'',
961 961 b'all-largefiles',
962 962 None,
963 963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
964 964 ),
965 965 (
966 966 b'',
967 967 b'lfrev',
968 968 [],
969 969 _(b'download largefiles for these revisions'),
970 970 _(b'REV'),
971 971 ),
972 972 ],
973 973 )
974 974 def overridepull(orig, ui, repo, source=None, **opts):
975 975 revsprepull = len(repo)
976 976 if not source:
977 977 source = b'default'
978 978 repo.lfpullsource = source
979 979 result = orig(ui, repo, source, **opts)
980 980 revspostpull = len(repo)
981 981 lfrevs = opts.get('lfrev', [])
982 982 if opts.get('all_largefiles'):
983 983 lfrevs.append(b'pulled()')
984 984 if lfrevs and revspostpull > revsprepull:
985 985 numcached = 0
986 986 repo.firstpulled = revsprepull # for pulled() revset expression
987 987 try:
988 988 for rev in scmutil.revrange(repo, lfrevs):
989 989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
990 990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
991 991 numcached += len(cached)
992 992 finally:
993 993 del repo.firstpulled
994 994 ui.status(_(b"%d largefiles cached\n") % numcached)
995 995 return result
996 996
997 997
998 998 @eh.wrapcommand(
999 999 b'push',
1000 1000 opts=[
1001 1001 (
1002 1002 b'',
1003 1003 b'lfrev',
1004 1004 [],
1005 1005 _(b'upload largefiles for these revisions'),
1006 1006 _(b'REV'),
1007 1007 )
1008 1008 ],
1009 1009 )
1010 1010 def overridepush(orig, ui, repo, *args, **kwargs):
1011 1011 """Override push command and store --lfrev parameters in opargs"""
1012 1012 lfrevs = kwargs.pop('lfrev', None)
1013 1013 if lfrevs:
1014 1014 opargs = kwargs.setdefault('opargs', {})
1015 1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1016 1016 return orig(ui, repo, *args, **kwargs)
1017 1017
1018 1018
1019 1019 @eh.wrapfunction(exchange, b'pushoperation')
1020 1020 def exchangepushoperation(orig, *args, **kwargs):
1021 1021 """Override pushoperation constructor and store lfrevs parameter"""
1022 1022 lfrevs = kwargs.pop('lfrevs', None)
1023 1023 pushop = orig(*args, **kwargs)
1024 1024 pushop.lfrevs = lfrevs
1025 1025 return pushop
1026 1026
1027 1027
1028 1028 @eh.revsetpredicate(b'pulled()')
1029 1029 def pulledrevsetsymbol(repo, subset, x):
1030 1030 """Changesets that just has been pulled.
1031 1031
1032 1032 Only available with largefiles from pull --lfrev expressions.
1033 1033
1034 1034 .. container:: verbose
1035 1035
1036 1036 Some examples:
1037 1037
1038 1038 - pull largefiles for all new changesets::
1039 1039
1040 1040 hg pull -lfrev "pulled()"
1041 1041
1042 1042 - pull largefiles for all new branch heads::
1043 1043
1044 1044 hg pull -lfrev "head(pulled()) and not closed()"
1045 1045
1046 1046 """
1047 1047
1048 1048 try:
1049 1049 firstpulled = repo.firstpulled
1050 1050 except AttributeError:
1051 1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1052 1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1053 1053
1054 1054
1055 1055 @eh.wrapcommand(
1056 1056 b'clone',
1057 1057 opts=[
1058 1058 (
1059 1059 b'',
1060 1060 b'all-largefiles',
1061 1061 None,
1062 1062 _(b'download all versions of all largefiles'),
1063 1063 )
1064 1064 ],
1065 1065 )
1066 1066 def overrideclone(orig, ui, source, dest=None, **opts):
1067 1067 d = dest
1068 1068 if d is None:
1069 1069 d = hg.defaultdest(source)
1070 1070 if opts.get('all_largefiles') and not hg.islocal(d):
1071 1071 raise error.Abort(
1072 1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1073 1073 % d
1074 1074 )
1075 1075
1076 1076 return orig(ui, source, dest, **opts)
1077 1077
1078 1078
1079 1079 @eh.wrapfunction(hg, b'clone')
1080 1080 def hgclone(orig, ui, opts, *args, **kwargs):
1081 1081 result = orig(ui, opts, *args, **kwargs)
1082 1082
1083 1083 if result is not None:
1084 1084 sourcerepo, destrepo = result
1085 1085 repo = destrepo.local()
1086 1086
1087 1087 # When cloning to a remote repo (like through SSH), no repo is available
1088 1088 # from the peer. Therefore the largefiles can't be downloaded and the
1089 1089 # hgrc can't be updated.
1090 1090 if not repo:
1091 1091 return result
1092 1092
1093 1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1094 1094 # truncated at that point. The user may expect a download count with
1095 1095 # this option, so attempt whether or not this is a largefile repo.
1096 1096 if opts.get(b'all_largefiles'):
1097 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1097 success, missing = lfcommands.downloadlfiles(ui, repo)
1098 1098
1099 1099 if missing != 0:
1100 1100 return None
1101 1101
1102 1102 return result
1103 1103
1104 1104
1105 1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1106 1106 def overriderebase(orig, ui, repo, **opts):
1107 1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1108 1108 return orig(ui, repo, **opts)
1109 1109
1110 1110 resuming = opts.get('continue')
1111 1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1112 1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1113 1113 try:
1114 1114 return orig(ui, repo, **opts)
1115 1115 finally:
1116 1116 repo._lfstatuswriters.pop()
1117 1117 repo._lfcommithooks.pop()
1118 1118
1119 1119
1120 1120 @eh.wrapcommand(b'archive')
1121 1121 def overridearchivecmd(orig, ui, repo, dest, **opts):
1122 1122 with lfstatus(repo.unfiltered()):
1123 1123 return orig(ui, repo.unfiltered(), dest, **opts)
1124 1124
1125 1125
1126 1126 @eh.wrapfunction(webcommands, b'archive')
1127 1127 def hgwebarchive(orig, web):
1128 1128 with lfstatus(web.repo):
1129 1129 return orig(web)
1130 1130
1131 1131
1132 1132 @eh.wrapfunction(archival, b'archive')
1133 1133 def overridearchive(
1134 1134 orig,
1135 1135 repo,
1136 1136 dest,
1137 1137 node,
1138 1138 kind,
1139 1139 decode=True,
1140 1140 match=None,
1141 1141 prefix=b'',
1142 1142 mtime=None,
1143 1143 subrepos=None,
1144 1144 ):
1145 1145 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1146 1146 # unfiltered repo's attr, so check that as well.
1147 1147 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1148 1148 return orig(
1149 1149 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1150 1150 )
1151 1151
1152 1152 # No need to lock because we are only reading history and
1153 1153 # largefile caches, neither of which are modified.
1154 1154 if node is not None:
1155 1155 lfcommands.cachelfiles(repo.ui, repo, node)
1156 1156
1157 1157 if kind not in archival.archivers:
1158 1158 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1159 1159
1160 1160 ctx = repo[node]
1161 1161
1162 1162 if kind == b'files':
1163 1163 if prefix:
1164 1164 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1165 1165 else:
1166 1166 prefix = archival.tidyprefix(dest, kind, prefix)
1167 1167
1168 1168 def write(name, mode, islink, getdata):
1169 1169 if match and not match(name):
1170 1170 return
1171 1171 data = getdata()
1172 1172 if decode:
1173 1173 data = repo.wwritedata(name, data)
1174 1174 archiver.addfile(prefix + name, mode, islink, data)
1175 1175
1176 1176 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1177 1177
1178 1178 if repo.ui.configbool(b"ui", b"archivemeta"):
1179 1179 write(
1180 1180 b'.hg_archival.txt',
1181 1181 0o644,
1182 1182 False,
1183 1183 lambda: archival.buildmetadata(ctx),
1184 1184 )
1185 1185
1186 1186 for f in ctx:
1187 1187 ff = ctx.flags(f)
1188 1188 getdata = ctx[f].data
1189 1189 lfile = lfutil.splitstandin(f)
1190 1190 if lfile is not None:
1191 1191 if node is not None:
1192 1192 path = lfutil.findfile(repo, getdata().strip())
1193 1193
1194 1194 if path is None:
1195 1195 raise error.Abort(
1196 1196 _(
1197 1197 b'largefile %s not found in repo store or system cache'
1198 1198 )
1199 1199 % lfile
1200 1200 )
1201 1201 else:
1202 1202 path = lfile
1203 1203
1204 1204 f = lfile
1205 1205
1206 1206 getdata = lambda: util.readfile(path)
1207 1207 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1208 1208
1209 1209 if subrepos:
1210 1210 for subpath in sorted(ctx.substate):
1211 1211 sub = ctx.workingsub(subpath)
1212 1212 submatch = matchmod.subdirmatcher(subpath, match)
1213 1213 subprefix = prefix + subpath + b'/'
1214 1214
1215 1215 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1216 1216 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1217 1217 # allow only hgsubrepos to set this, instead of the current scheme
1218 1218 # where the parent sets this for the child.
1219 1219 with (
1220 1220 util.safehasattr(sub, '_repo')
1221 1221 and lfstatus(sub._repo)
1222 1222 or util.nullcontextmanager()
1223 1223 ):
1224 1224 sub.archive(archiver, subprefix, submatch)
1225 1225
1226 1226 archiver.done()
1227 1227
1228 1228
1229 1229 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1230 1230 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1231 1231 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1232 1232 if not lfenabled or not repo._repo.lfstatus:
1233 1233 return orig(repo, archiver, prefix, match, decode)
1234 1234
1235 1235 repo._get(repo._state + (b'hg',))
1236 1236 rev = repo._state[1]
1237 1237 ctx = repo._repo[rev]
1238 1238
1239 1239 if ctx.node() is not None:
1240 1240 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1241 1241
1242 1242 def write(name, mode, islink, getdata):
1243 1243 # At this point, the standin has been replaced with the largefile name,
1244 1244 # so the normal matcher works here without the lfutil variants.
1245 1245 if match and not match(f):
1246 1246 return
1247 1247 data = getdata()
1248 1248 if decode:
1249 1249 data = repo._repo.wwritedata(name, data)
1250 1250
1251 1251 archiver.addfile(prefix + name, mode, islink, data)
1252 1252
1253 1253 for f in ctx:
1254 1254 ff = ctx.flags(f)
1255 1255 getdata = ctx[f].data
1256 1256 lfile = lfutil.splitstandin(f)
1257 1257 if lfile is not None:
1258 1258 if ctx.node() is not None:
1259 1259 path = lfutil.findfile(repo._repo, getdata().strip())
1260 1260
1261 1261 if path is None:
1262 1262 raise error.Abort(
1263 1263 _(
1264 1264 b'largefile %s not found in repo store or system cache'
1265 1265 )
1266 1266 % lfile
1267 1267 )
1268 1268 else:
1269 1269 path = lfile
1270 1270
1271 1271 f = lfile
1272 1272
1273 1273 getdata = lambda: util.readfile(os.path.join(prefix, path))
1274 1274
1275 1275 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1276 1276
1277 1277 for subpath in sorted(ctx.substate):
1278 1278 sub = ctx.workingsub(subpath)
1279 1279 submatch = matchmod.subdirmatcher(subpath, match)
1280 1280 subprefix = prefix + subpath + b'/'
1281 1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1282 1282 # infer and possibly set lfstatus at the top of this function. That
1283 1283 # would allow only hgsubrepos to set this, instead of the current scheme
1284 1284 # where the parent sets this for the child.
1285 1285 with (
1286 1286 util.safehasattr(sub, '_repo')
1287 1287 and lfstatus(sub._repo)
1288 1288 or util.nullcontextmanager()
1289 1289 ):
1290 1290 sub.archive(archiver, subprefix, submatch, decode)
1291 1291
1292 1292
1293 1293 # If a largefile is modified, the change is not reflected in its
1294 1294 # standin until a commit. cmdutil.bailifchanged() raises an exception
1295 1295 # if the repo has uncommitted changes. Wrap it to also check if
1296 1296 # largefiles were changed. This is used by bisect, backout and fetch.
1297 1297 @eh.wrapfunction(cmdutil, b'bailifchanged')
1298 1298 def overridebailifchanged(orig, repo, *args, **kwargs):
1299 1299 orig(repo, *args, **kwargs)
1300 1300 with lfstatus(repo):
1301 1301 s = repo.status()
1302 1302 if s.modified or s.added or s.removed or s.deleted:
1303 1303 raise error.Abort(_(b'uncommitted changes'))
1304 1304
1305 1305
1306 1306 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1307 1307 def postcommitstatus(orig, repo, *args, **kwargs):
1308 1308 with lfstatus(repo):
1309 1309 return orig(repo, *args, **kwargs)
1310 1310
1311 1311
1312 1312 @eh.wrapfunction(cmdutil, b'forget')
1313 1313 def cmdutilforget(
1314 1314 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1315 1315 ):
1316 1316 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1317 1317 bad, forgot = orig(
1318 1318 ui,
1319 1319 repo,
1320 1320 normalmatcher,
1321 1321 prefix,
1322 1322 uipathfn,
1323 1323 explicitonly,
1324 1324 dryrun,
1325 1325 interactive,
1326 1326 )
1327 1327 m = composelargefilematcher(match, repo[None].manifest())
1328 1328
1329 1329 with lfstatus(repo):
1330 1330 s = repo.status(match=m, clean=True)
1331 1331 manifest = repo[None].manifest()
1332 1332 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1333 1333 forget = [f for f in forget if lfutil.standin(f) in manifest]
1334 1334
1335 1335 for f in forget:
1336 1336 fstandin = lfutil.standin(f)
1337 1337 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1338 1338 ui.warn(
1339 1339 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1340 1340 )
1341 1341 bad.append(f)
1342 1342
1343 1343 for f in forget:
1344 1344 if ui.verbose or not m.exact(f):
1345 1345 ui.status(_(b'removing %s\n') % uipathfn(f))
1346 1346
1347 1347 # Need to lock because standin files are deleted then removed from the
1348 1348 # repository and we could race in-between.
1349 1349 with repo.wlock():
1350 1350 lfdirstate = lfutil.openlfdirstate(ui, repo)
1351 1351 for f in forget:
1352 1352 if lfdirstate[f] == b'a':
1353 1353 lfdirstate.drop(f)
1354 1354 else:
1355 1355 lfdirstate.remove(f)
1356 1356 lfdirstate.write()
1357 1357 standins = [lfutil.standin(f) for f in forget]
1358 1358 for f in standins:
1359 1359 repo.wvfs.unlinkpath(f, ignoremissing=True)
1360 1360 rejected = repo[None].forget(standins)
1361 1361
1362 1362 bad.extend(f for f in rejected if f in m.files())
1363 1363 forgot.extend(f for f in forget if f not in rejected)
1364 1364 return bad, forgot
1365 1365
1366 1366
1367 1367 def _getoutgoings(repo, other, missing, addfunc):
1368 1368 """get pairs of filename and largefile hash in outgoing revisions
1369 1369 in 'missing'.
1370 1370
1371 1371 largefiles already existing on 'other' repository are ignored.
1372 1372
1373 1373 'addfunc' is invoked with each unique pairs of filename and
1374 1374 largefile hash value.
1375 1375 """
1376 1376 knowns = set()
1377 1377 lfhashes = set()
1378 1378
1379 1379 def dedup(fn, lfhash):
1380 1380 k = (fn, lfhash)
1381 1381 if k not in knowns:
1382 1382 knowns.add(k)
1383 1383 lfhashes.add(lfhash)
1384 1384
1385 1385 lfutil.getlfilestoupload(repo, missing, dedup)
1386 1386 if lfhashes:
1387 1387 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1388 1388 for fn, lfhash in knowns:
1389 1389 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1390 1390 addfunc(fn, lfhash)
1391 1391
1392 1392
1393 1393 def outgoinghook(ui, repo, other, opts, missing):
1394 1394 if opts.pop(b'large', None):
1395 1395 lfhashes = set()
1396 1396 if ui.debugflag:
1397 1397 toupload = {}
1398 1398
1399 1399 def addfunc(fn, lfhash):
1400 1400 if fn not in toupload:
1401 1401 toupload[fn] = []
1402 1402 toupload[fn].append(lfhash)
1403 1403 lfhashes.add(lfhash)
1404 1404
1405 1405 def showhashes(fn):
1406 1406 for lfhash in sorted(toupload[fn]):
1407 1407 ui.debug(b' %s\n' % lfhash)
1408 1408
1409 1409 else:
1410 1410 toupload = set()
1411 1411
1412 1412 def addfunc(fn, lfhash):
1413 1413 toupload.add(fn)
1414 1414 lfhashes.add(lfhash)
1415 1415
1416 1416 def showhashes(fn):
1417 1417 pass
1418 1418
1419 1419 _getoutgoings(repo, other, missing, addfunc)
1420 1420
1421 1421 if not toupload:
1422 1422 ui.status(_(b'largefiles: no files to upload\n'))
1423 1423 else:
1424 1424 ui.status(
1425 1425 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1426 1426 )
1427 1427 for file in sorted(toupload):
1428 1428 ui.status(lfutil.splitstandin(file) + b'\n')
1429 1429 showhashes(file)
1430 1430 ui.status(b'\n')
1431 1431
1432 1432
1433 1433 @eh.wrapcommand(
1434 1434 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1435 1435 )
1436 1436 def _outgoingcmd(orig, *args, **kwargs):
1437 1437 # Nothing to do here other than add the extra help option- the hook above
1438 1438 # processes it.
1439 1439 return orig(*args, **kwargs)
1440 1440
1441 1441
1442 1442 def summaryremotehook(ui, repo, opts, changes):
1443 1443 largeopt = opts.get(b'large', False)
1444 1444 if changes is None:
1445 1445 if largeopt:
1446 1446 return (False, True) # only outgoing check is needed
1447 1447 else:
1448 1448 return (False, False)
1449 1449 elif largeopt:
1450 1450 url, branch, peer, outgoing = changes[1]
1451 1451 if peer is None:
1452 1452 # i18n: column positioning for "hg summary"
1453 1453 ui.status(_(b'largefiles: (no remote repo)\n'))
1454 1454 return
1455 1455
1456 1456 toupload = set()
1457 1457 lfhashes = set()
1458 1458
1459 1459 def addfunc(fn, lfhash):
1460 1460 toupload.add(fn)
1461 1461 lfhashes.add(lfhash)
1462 1462
1463 1463 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1464 1464
1465 1465 if not toupload:
1466 1466 # i18n: column positioning for "hg summary"
1467 1467 ui.status(_(b'largefiles: (no files to upload)\n'))
1468 1468 else:
1469 1469 # i18n: column positioning for "hg summary"
1470 1470 ui.status(
1471 1471 _(b'largefiles: %d entities for %d files to upload\n')
1472 1472 % (len(lfhashes), len(toupload))
1473 1473 )
1474 1474
1475 1475
1476 1476 @eh.wrapcommand(
1477 1477 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1478 1478 )
1479 1479 def overridesummary(orig, ui, repo, *pats, **opts):
1480 1480 with lfstatus(repo):
1481 1481 orig(ui, repo, *pats, **opts)
1482 1482
1483 1483
1484 1484 @eh.wrapfunction(scmutil, b'addremove')
1485 1485 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1486 1486 if opts is None:
1487 1487 opts = {}
1488 1488 if not lfutil.islfilesrepo(repo):
1489 1489 return orig(repo, matcher, prefix, uipathfn, opts)
1490 1490 # Get the list of missing largefiles so we can remove them
1491 1491 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1492 1492 unsure, s = lfdirstate.status(
1493 1493 matchmod.always(),
1494 1494 subrepos=[],
1495 1495 ignored=False,
1496 1496 clean=False,
1497 1497 unknown=False,
1498 1498 )
1499 1499
1500 1500 # Call into the normal remove code, but the removing of the standin, we want
1501 1501 # to have handled by original addremove. Monkey patching here makes sure
1502 1502 # we don't remove the standin in the largefiles code, preventing a very
1503 1503 # confused state later.
1504 1504 if s.deleted:
1505 1505 m = copy.copy(matcher)
1506 1506
1507 1507 # The m._files and m._map attributes are not changed to the deleted list
1508 1508 # because that affects the m.exact() test, which in turn governs whether
1509 1509 # or not the file name is printed, and how. Simply limit the original
1510 1510 # matches to those in the deleted status list.
1511 1511 matchfn = m.matchfn
1512 1512 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1513 1513
1514 1514 removelargefiles(
1515 1515 repo.ui,
1516 1516 repo,
1517 1517 True,
1518 1518 m,
1519 1519 uipathfn,
1520 1520 opts.get(b'dry_run'),
1521 1521 **pycompat.strkwargs(opts)
1522 1522 )
1523 1523 # Call into the normal add code, and any files that *should* be added as
1524 1524 # largefiles will be
1525 1525 added, bad = addlargefiles(
1526 1526 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1527 1527 )
1528 1528 # Now that we've handled largefiles, hand off to the original addremove
1529 1529 # function to take care of the rest. Make sure it doesn't do anything with
1530 1530 # largefiles by passing a matcher that will ignore them.
1531 1531 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1532 1532 return orig(repo, matcher, prefix, uipathfn, opts)
1533 1533
1534 1534
1535 1535 # Calling purge with --all will cause the largefiles to be deleted.
1536 1536 # Override repo.status to prevent this from happening.
1537 1537 @eh.wrapcommand(b'purge', extension=b'purge')
1538 1538 def overridepurge(orig, ui, repo, *dirs, **opts):
1539 1539 # XXX Monkey patching a repoview will not work. The assigned attribute will
1540 1540 # be set on the unfiltered repo, but we will only lookup attributes in the
1541 1541 # unfiltered repo if the lookup in the repoview object itself fails. As the
1542 1542 # monkey patched method exists on the repoview class the lookup will not
1543 1543 # fail. As a result, the original version will shadow the monkey patched
1544 1544 # one, defeating the monkey patch.
1545 1545 #
1546 1546 # As a work around we use an unfiltered repo here. We should do something
1547 1547 # cleaner instead.
1548 1548 repo = repo.unfiltered()
1549 1549 oldstatus = repo.status
1550 1550
1551 1551 def overridestatus(
1552 1552 node1=b'.',
1553 1553 node2=None,
1554 1554 match=None,
1555 1555 ignored=False,
1556 1556 clean=False,
1557 1557 unknown=False,
1558 1558 listsubrepos=False,
1559 1559 ):
1560 1560 r = oldstatus(
1561 1561 node1, node2, match, ignored, clean, unknown, listsubrepos
1562 1562 )
1563 1563 lfdirstate = lfutil.openlfdirstate(ui, repo)
1564 1564 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1565 1565 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1566 1566 return scmutil.status(
1567 1567 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1568 1568 )
1569 1569
1570 1570 repo.status = overridestatus
1571 1571 orig(ui, repo, *dirs, **opts)
1572 1572 repo.status = oldstatus
1573 1573
1574 1574
1575 1575 @eh.wrapcommand(b'rollback')
1576 1576 def overriderollback(orig, ui, repo, **opts):
1577 1577 with repo.wlock():
1578 1578 before = repo.dirstate.parents()
1579 1579 orphans = {
1580 1580 f
1581 1581 for f in repo.dirstate
1582 1582 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1583 1583 }
1584 1584 result = orig(ui, repo, **opts)
1585 1585 after = repo.dirstate.parents()
1586 1586 if before == after:
1587 1587 return result # no need to restore standins
1588 1588
1589 1589 pctx = repo[b'.']
1590 1590 for f in repo.dirstate:
1591 1591 if lfutil.isstandin(f):
1592 1592 orphans.discard(f)
1593 1593 if repo.dirstate[f] == b'r':
1594 1594 repo.wvfs.unlinkpath(f, ignoremissing=True)
1595 1595 elif f in pctx:
1596 1596 fctx = pctx[f]
1597 1597 repo.wwrite(f, fctx.data(), fctx.flags())
1598 1598 else:
1599 1599 # content of standin is not so important in 'a',
1600 1600 # 'm' or 'n' (coming from the 2nd parent) cases
1601 1601 lfutil.writestandin(repo, f, b'', False)
1602 1602 for standin in orphans:
1603 1603 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1604 1604
1605 1605 lfdirstate = lfutil.openlfdirstate(ui, repo)
1606 1606 orphans = set(lfdirstate)
1607 1607 lfiles = lfutil.listlfiles(repo)
1608 1608 for file in lfiles:
1609 1609 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1610 1610 orphans.discard(file)
1611 1611 for lfile in orphans:
1612 1612 lfdirstate.drop(lfile)
1613 1613 lfdirstate.write()
1614 1614 return result
1615 1615
1616 1616
1617 1617 @eh.wrapcommand(b'transplant', extension=b'transplant')
1618 1618 def overridetransplant(orig, ui, repo, *revs, **opts):
1619 1619 resuming = opts.get('continue')
1620 1620 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1621 1621 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1622 1622 try:
1623 1623 result = orig(ui, repo, *revs, **opts)
1624 1624 finally:
1625 1625 repo._lfstatuswriters.pop()
1626 1626 repo._lfcommithooks.pop()
1627 1627 return result
1628 1628
1629 1629
1630 1630 @eh.wrapcommand(b'cat')
1631 1631 def overridecat(orig, ui, repo, file1, *pats, **opts):
1632 1632 opts = pycompat.byteskwargs(opts)
1633 1633 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1634 1634 err = 1
1635 1635 notbad = set()
1636 1636 m = scmutil.match(ctx, (file1,) + pats, opts)
1637 1637 origmatchfn = m.matchfn
1638 1638
1639 1639 def lfmatchfn(f):
1640 1640 if origmatchfn(f):
1641 1641 return True
1642 1642 lf = lfutil.splitstandin(f)
1643 1643 if lf is None:
1644 1644 return False
1645 1645 notbad.add(lf)
1646 1646 return origmatchfn(lf)
1647 1647
1648 1648 m.matchfn = lfmatchfn
1649 1649 origbadfn = m.bad
1650 1650
1651 1651 def lfbadfn(f, msg):
1652 1652 if not f in notbad:
1653 1653 origbadfn(f, msg)
1654 1654
1655 1655 m.bad = lfbadfn
1656 1656
1657 1657 origvisitdirfn = m.visitdir
1658 1658
1659 1659 def lfvisitdirfn(dir):
1660 1660 if dir == lfutil.shortname:
1661 1661 return True
1662 1662 ret = origvisitdirfn(dir)
1663 1663 if ret:
1664 1664 return ret
1665 1665 lf = lfutil.splitstandin(dir)
1666 1666 if lf is None:
1667 1667 return False
1668 1668 return origvisitdirfn(lf)
1669 1669
1670 1670 m.visitdir = lfvisitdirfn
1671 1671
1672 1672 for f in ctx.walk(m):
1673 1673 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1674 1674 lf = lfutil.splitstandin(f)
1675 1675 if lf is None or origmatchfn(f):
1676 1676 # duplicating unreachable code from commands.cat
1677 1677 data = ctx[f].data()
1678 1678 if opts.get(b'decode'):
1679 1679 data = repo.wwritedata(f, data)
1680 1680 fp.write(data)
1681 1681 else:
1682 1682 hash = lfutil.readasstandin(ctx[f])
1683 1683 if not lfutil.inusercache(repo.ui, hash):
1684 1684 store = storefactory.openstore(repo)
1685 1685 success, missing = store.get([(lf, hash)])
1686 1686 if len(success) != 1:
1687 1687 raise error.Abort(
1688 1688 _(
1689 1689 b'largefile %s is not in cache and could not be '
1690 1690 b'downloaded'
1691 1691 )
1692 1692 % lf
1693 1693 )
1694 1694 path = lfutil.usercachepath(repo.ui, hash)
1695 1695 with open(path, b"rb") as fpin:
1696 1696 for chunk in util.filechunkiter(fpin):
1697 1697 fp.write(chunk)
1698 1698 err = 0
1699 1699 return err
1700 1700
1701 1701
1702 1702 @eh.wrapfunction(merge, b'update')
1703 1703 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1704 1704 matcher = kwargs.get('matcher', None)
1705 1705 # note if this is a partial update
1706 1706 partial = matcher and not matcher.always()
1707 1707 with repo.wlock():
1708 1708 # branch | | |
1709 1709 # merge | force | partial | action
1710 1710 # -------+-------+---------+--------------
1711 1711 # x | x | x | linear-merge
1712 1712 # o | x | x | branch-merge
1713 1713 # x | o | x | overwrite (as clean update)
1714 1714 # o | o | x | force-branch-merge (*1)
1715 1715 # x | x | o | (*)
1716 1716 # o | x | o | (*)
1717 1717 # x | o | o | overwrite (as revert)
1718 1718 # o | o | o | (*)
1719 1719 #
1720 1720 # (*) don't care
1721 1721 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1722 1722
1723 1723 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1724 1724 unsure, s = lfdirstate.status(
1725 1725 matchmod.always(),
1726 1726 subrepos=[],
1727 1727 ignored=False,
1728 1728 clean=True,
1729 1729 unknown=False,
1730 1730 )
1731 1731 oldclean = set(s.clean)
1732 1732 pctx = repo[b'.']
1733 1733 dctx = repo[node]
1734 1734 for lfile in unsure + s.modified:
1735 1735 lfileabs = repo.wvfs.join(lfile)
1736 1736 if not repo.wvfs.exists(lfileabs):
1737 1737 continue
1738 1738 lfhash = lfutil.hashfile(lfileabs)
1739 1739 standin = lfutil.standin(lfile)
1740 1740 lfutil.writestandin(
1741 1741 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1742 1742 )
1743 1743 if standin in pctx and lfhash == lfutil.readasstandin(
1744 1744 pctx[standin]
1745 1745 ):
1746 1746 oldclean.add(lfile)
1747 1747 for lfile in s.added:
1748 1748 fstandin = lfutil.standin(lfile)
1749 1749 if fstandin not in dctx:
1750 1750 # in this case, content of standin file is meaningless
1751 1751 # (in dctx, lfile is unknown, or normal file)
1752 1752 continue
1753 1753 lfutil.updatestandin(repo, lfile, fstandin)
1754 1754 # mark all clean largefiles as dirty, just in case the update gets
1755 1755 # interrupted before largefiles and lfdirstate are synchronized
1756 1756 for lfile in oldclean:
1757 1757 lfdirstate.normallookup(lfile)
1758 1758 lfdirstate.write()
1759 1759
1760 1760 oldstandins = lfutil.getstandinsstate(repo)
1761 1761 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1762 1762 # good candidate for in-memory merge (large files, custom dirstate,
1763 1763 # matcher usage).
1764 1764 kwargs['wc'] = repo[None]
1765 1765 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1766 1766
1767 1767 newstandins = lfutil.getstandinsstate(repo)
1768 1768 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1769 1769
1770 1770 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1771 1771 # all the ones that didn't change as clean
1772 1772 for lfile in oldclean.difference(filelist):
1773 1773 lfdirstate.normal(lfile)
1774 1774 lfdirstate.write()
1775 1775
1776 1776 if branchmerge or force or partial:
1777 1777 filelist.extend(s.deleted + s.removed)
1778 1778
1779 1779 lfcommands.updatelfiles(
1780 1780 repo.ui, repo, filelist=filelist, normallookup=partial
1781 1781 )
1782 1782
1783 1783 return result
1784 1784
1785 1785
1786 1786 @eh.wrapfunction(scmutil, b'marktouched')
1787 1787 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1788 1788 result = orig(repo, files, *args, **kwargs)
1789 1789
1790 1790 filelist = []
1791 1791 for f in files:
1792 1792 lf = lfutil.splitstandin(f)
1793 1793 if lf is not None:
1794 1794 filelist.append(lf)
1795 1795 if filelist:
1796 1796 lfcommands.updatelfiles(
1797 1797 repo.ui,
1798 1798 repo,
1799 1799 filelist=filelist,
1800 1800 printmessage=False,
1801 1801 normallookup=True,
1802 1802 )
1803 1803
1804 1804 return result
1805 1805
1806 1806
1807 1807 @eh.wrapfunction(upgrade, b'preservedrequirements')
1808 1808 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1809 1809 def upgraderequirements(orig, repo):
1810 1810 reqs = orig(repo)
1811 1811 if b'largefiles' in repo.requirements:
1812 1812 reqs.add(b'largefiles')
1813 1813 return reqs
1814 1814
1815 1815
1816 1816 _lfscheme = b'largefile://'
1817 1817
1818 1818
1819 1819 @eh.wrapfunction(urlmod, b'open')
1820 1820 def openlargefile(orig, ui, url_, data=None):
1821 1821 if url_.startswith(_lfscheme):
1822 1822 if data:
1823 1823 msg = b"cannot use data on a 'largefile://' url"
1824 1824 raise error.ProgrammingError(msg)
1825 1825 lfid = url_[len(_lfscheme) :]
1826 1826 return storefactory.getlfile(ui, lfid)
1827 1827 else:
1828 1828 return orig(ui, url_, data=data)
General Comments 0
You need to be logged in to leave comments. Login now