##// END OF EJS Templates
largefiles: migrate `opts` to native kwargs
Matt Harbison -
r51773:ee393dbf default
parent child Browse files
Show More
@@ -1,675 +1,673 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import binascii
12 12 import os
13 13 import shutil
14 14
15 15 from mercurial.i18n import _
16 16 from mercurial.node import (
17 17 bin,
18 18 hex,
19 19 )
20 20
21 21 from mercurial import (
22 22 cmdutil,
23 23 context,
24 24 error,
25 25 exthelper,
26 26 hg,
27 27 lock,
28 28 logcmdutil,
29 29 match as matchmod,
30 pycompat,
31 30 scmutil,
32 31 util,
33 32 )
34 33 from mercurial.utils import hashutil
35 34
36 35 from ..convert import (
37 36 convcmd,
38 37 filemap,
39 38 )
40 39
41 40 from . import lfutil, storefactory
42 41
43 42 release = lock.release
44 43
45 44 # -- Commands ----------------------------------------------------------
46 45
47 46 eh = exthelper.exthelper()
48 47
49 48
50 49 @eh.command(
51 50 b'lfconvert',
52 51 [
53 52 (
54 53 b's',
55 54 b'size',
56 55 b'',
57 56 _(b'minimum size (MB) for files to be converted as largefiles'),
58 57 b'SIZE',
59 58 ),
60 59 (
61 60 b'',
62 61 b'to-normal',
63 62 False,
64 63 _(b'convert from a largefiles repo to a normal repo'),
65 64 ),
66 65 ],
67 66 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 67 norepo=True,
69 68 inferrepo=True,
70 69 )
71 70 def lfconvert(ui, src, dest, *pats, **opts):
72 71 """convert a normal repository to a largefiles repository
73 72
74 73 Convert repository SOURCE to a new repository DEST, identical to
75 74 SOURCE except that certain files will be converted as largefiles:
76 75 specifically, any file that matches any PATTERN *or* whose size is
77 76 above the minimum size threshold is converted as a largefile. The
78 77 size used to determine whether or not to track a file as a
79 78 largefile is the size of the first version of the file. The
80 79 minimum size can be specified either with --size or in
81 80 configuration as ``largefiles.size``.
82 81
83 82 After running this command you will need to make sure that
84 83 largefiles is enabled anywhere you intend to push the new
85 84 repository.
86 85
87 86 Use --to-normal to convert largefiles back to normal files; after
88 87 this, the DEST repository can be used without largefiles at all."""
89 88
90 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
89 if opts['to_normal']:
92 90 tolfile = False
93 91 else:
94 92 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
96 94
97 95 if not hg.islocal(src):
98 96 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 97 if not hg.islocal(dest):
100 98 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101 99
102 100 rsrc = hg.repository(ui, src)
103 101 ui.status(_(b'initializing destination %s\n') % dest)
104 102 rdst = hg.repository(ui, dest, create=True)
105 103
106 104 success = False
107 105 dstwlock = dstlock = None
108 106 try:
109 107 # Get a list of all changesets in the source. The easy way to do this
110 108 # is to simply walk the changelog, using changelog.nodesbetween().
111 109 # Take a look at mercurial/revlog.py:639 for more details.
112 110 # Use a generator instead of a list to decrease memory usage
113 111 ctxs = (
114 112 rsrc[ctx]
115 113 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 114 )
117 115 revmap = {rsrc.nullid: rdst.nullid}
118 116 if tolfile:
119 117 # Lock destination to prevent modification while it is converted to.
120 118 # Don't need to lock src because we are just reading from its
121 119 # history which can't change.
122 120 dstwlock = rdst.wlock()
123 121 dstlock = rdst.lock()
124 122
125 123 lfiles = set()
126 124 normalfiles = set()
127 125 if not pats:
128 126 pats = ui.configlist(lfutil.longname, b'patterns')
129 127 if pats:
130 128 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 129 else:
132 130 matcher = None
133 131
134 132 lfiletohash = {}
135 133 with ui.makeprogress(
136 134 _(b'converting revisions'),
137 135 unit=_(b'revisions'),
138 136 total=rsrc[b'tip'].rev(),
139 137 ) as progress:
140 138 for ctx in ctxs:
141 139 progress.update(ctx.rev())
142 140 _lfconvert_addchangeset(
143 141 rsrc,
144 142 rdst,
145 143 ctx,
146 144 revmap,
147 145 lfiles,
148 146 normalfiles,
149 147 matcher,
150 148 size,
151 149 lfiletohash,
152 150 )
153 151
154 152 if rdst.wvfs.exists(lfutil.shortname):
155 153 rdst.wvfs.rmtree(lfutil.shortname)
156 154
157 155 for f in lfiletohash.keys():
158 156 if rdst.wvfs.isfile(f):
159 157 rdst.wvfs.unlink(f)
160 158 try:
161 159 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 160 except OSError:
163 161 pass
164 162
165 163 # If there were any files converted to largefiles, add largefiles
166 164 # to the destination repository's requirements.
167 165 if lfiles:
168 166 rdst.requirements.add(b'largefiles')
169 167 scmutil.writereporequirements(rdst)
170 168 else:
171 169
172 170 class lfsource(filemap.filemap_source):
173 171 def __init__(self, ui, source):
174 172 super(lfsource, self).__init__(ui, source, None)
175 173 self.filemapper.rename[lfutil.shortname] = b'.'
176 174
177 175 def getfile(self, name, rev):
178 176 realname, realrev = rev
179 177 f = super(lfsource, self).getfile(name, rev)
180 178
181 179 if (
182 180 not realname.startswith(lfutil.shortnameslash)
183 181 or f[0] is None
184 182 ):
185 183 return f
186 184
187 185 # Substitute in the largefile data for the hash
188 186 hash = f[0].strip()
189 187 path = lfutil.findfile(rsrc, hash)
190 188
191 189 if path is None:
192 190 raise error.Abort(
193 191 _(b"missing largefile for '%s' in %s")
194 192 % (realname, realrev)
195 193 )
196 194 return util.readfile(path), f[1]
197 195
198 196 class converter(convcmd.converter):
199 197 def __init__(self, ui, source, dest, revmapfile, opts):
200 198 src = lfsource(ui, source)
201 199
202 200 super(converter, self).__init__(
203 201 ui, src, dest, revmapfile, opts
204 202 )
205 203
206 204 found, missing = downloadlfiles(ui, rsrc)
207 205 if missing != 0:
208 206 raise error.Abort(_(b"all largefiles must be present locally"))
209 207
210 208 orig = convcmd.converter
211 209 convcmd.converter = converter
212 210
213 211 try:
214 212 convcmd.convert(
215 213 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 214 )
217 215 finally:
218 216 convcmd.converter = orig
219 217 success = True
220 218 finally:
221 219 if tolfile:
222 220 # XXX is this the right context semantically ?
223 221 with rdst.dirstate.changing_parents(rdst):
224 222 rdst.dirstate.clear()
225 223 release(dstlock, dstwlock)
226 224 if not success:
227 225 # we failed, remove the new directory
228 226 shutil.rmtree(rdst.root)
229 227
230 228
231 229 def _lfconvert_addchangeset(
232 230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
233 231 ):
234 232 # Convert src parents to dst parents
235 233 parents = _convertparents(ctx, revmap)
236 234
237 235 # Generate list of changed files
238 236 files = _getchangedfiles(ctx, parents)
239 237
240 238 dstfiles = []
241 239 for f in files:
242 240 if f not in lfiles and f not in normalfiles:
243 241 islfile = _islfile(f, ctx, matcher, size)
244 242 # If this file was renamed or copied then copy
245 243 # the largefile-ness of its predecessor
246 244 if f in ctx.manifest():
247 245 fctx = ctx.filectx(f)
248 246 renamed = fctx.copysource()
249 247 if renamed is None:
250 248 # the code below assumes renamed to be a boolean or a list
251 249 # and won't quite work with the value None
252 250 renamed = False
253 251 renamedlfile = renamed and renamed in lfiles
254 252 islfile |= renamedlfile
255 253 if b'l' in fctx.flags():
256 254 if renamedlfile:
257 255 raise error.Abort(
258 256 _(b'renamed/copied largefile %s becomes symlink')
259 257 % f
260 258 )
261 259 islfile = False
262 260 if islfile:
263 261 lfiles.add(f)
264 262 else:
265 263 normalfiles.add(f)
266 264
267 265 if f in lfiles:
268 266 fstandin = lfutil.standin(f)
269 267 dstfiles.append(fstandin)
270 268 # largefile in manifest if it has not been removed/renamed
271 269 if f in ctx.manifest():
272 270 fctx = ctx.filectx(f)
273 271 if b'l' in fctx.flags():
274 272 renamed = fctx.copysource()
275 273 if renamed and renamed in lfiles:
276 274 raise error.Abort(
277 275 _(b'largefile %s becomes symlink') % f
278 276 )
279 277
280 278 # largefile was modified, update standins
281 279 m = hashutil.sha1(b'')
282 280 m.update(ctx[f].data())
283 281 hash = hex(m.digest())
284 282 if f not in lfiletohash or lfiletohash[f] != hash:
285 283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
286 284 executable = b'x' in ctx[f].flags()
287 285 lfutil.writestandin(rdst, fstandin, hash, executable)
288 286 lfiletohash[f] = hash
289 287 else:
290 288 # normal file
291 289 dstfiles.append(f)
292 290
293 291 def getfilectx(repo, memctx, f):
294 292 srcfname = lfutil.splitstandin(f)
295 293 if srcfname is not None:
296 294 # if the file isn't in the manifest then it was removed
297 295 # or renamed, return None to indicate this
298 296 try:
299 297 fctx = ctx.filectx(srcfname)
300 298 except error.LookupError:
301 299 return None
302 300 renamed = fctx.copysource()
303 301 if renamed:
304 302 # standin is always a largefile because largefile-ness
305 303 # doesn't change after rename or copy
306 304 renamed = lfutil.standin(renamed)
307 305
308 306 return context.memfilectx(
309 307 repo,
310 308 memctx,
311 309 f,
312 310 lfiletohash[srcfname] + b'\n',
313 311 b'l' in fctx.flags(),
314 312 b'x' in fctx.flags(),
315 313 renamed,
316 314 )
317 315 else:
318 316 return _getnormalcontext(repo, ctx, f, revmap)
319 317
320 318 # Commit
321 319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
322 320
323 321
324 322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
325 323 mctx = context.memctx(
326 324 rdst,
327 325 parents,
328 326 ctx.description(),
329 327 dstfiles,
330 328 getfilectx,
331 329 ctx.user(),
332 330 ctx.date(),
333 331 ctx.extra(),
334 332 )
335 333 ret = rdst.commitctx(mctx)
336 334 lfutil.copyalltostore(rdst, ret)
337 335 rdst.setparents(ret)
338 336 revmap[ctx.node()] = rdst.changelog.tip()
339 337
340 338
341 339 # Generate list of changed files
342 340 def _getchangedfiles(ctx, parents):
343 341 files = set(ctx.files())
344 342 if ctx.repo().nullid not in parents:
345 343 mc = ctx.manifest()
346 344 for pctx in ctx.parents():
347 345 for fn in pctx.manifest().diff(mc):
348 346 files.add(fn)
349 347 return files
350 348
351 349
352 350 # Convert src parents to dst parents
353 351 def _convertparents(ctx, revmap):
354 352 parents = []
355 353 for p in ctx.parents():
356 354 parents.append(revmap[p.node()])
357 355 while len(parents) < 2:
358 356 parents.append(ctx.repo().nullid)
359 357 return parents
360 358
361 359
362 360 # Get memfilectx for a normal file
363 361 def _getnormalcontext(repo, ctx, f, revmap):
364 362 try:
365 363 fctx = ctx.filectx(f)
366 364 except error.LookupError:
367 365 return None
368 366 renamed = fctx.copysource()
369 367
370 368 data = fctx.data()
371 369 if f == b'.hgtags':
372 370 data = _converttags(repo.ui, revmap, data)
373 371 return context.memfilectx(
374 372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
375 373 )
376 374
377 375
378 376 # Remap tag data using a revision map
379 377 def _converttags(ui, revmap, data):
380 378 newdata = []
381 379 for line in data.splitlines():
382 380 try:
383 381 id, name = line.split(b' ', 1)
384 382 except ValueError:
385 383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
386 384 continue
387 385 try:
388 386 newid = bin(id)
389 387 except binascii.Error:
390 388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
391 389 continue
392 390 try:
393 391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
394 392 except KeyError:
395 393 ui.warn(_(b'no mapping for id %s\n') % id)
396 394 continue
397 395 return b''.join(newdata)
398 396
399 397
400 398 def _islfile(file, ctx, matcher, size):
401 399 """Return true if file should be considered a largefile, i.e.
402 400 matcher matches it or it is larger than size."""
403 401 # never store special .hg* files as largefiles
404 402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
405 403 return False
406 404 if matcher and matcher(file):
407 405 return True
408 406 try:
409 407 return ctx.filectx(file).size() >= size * 1024 * 1024
410 408 except error.LookupError:
411 409 return False
412 410
413 411
414 412 def uploadlfiles(ui, rsrc, rdst, files):
415 413 '''upload largefiles to the central store'''
416 414
417 415 if not files:
418 416 return
419 417
420 418 store = storefactory.openstore(rsrc, rdst, put=True)
421 419
422 420 at = 0
423 421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
424 422 retval = store.exists(files)
425 423 files = [h for h in files if not retval[h]]
426 424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
427 425
428 426 with ui.makeprogress(
429 427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
430 428 ) as progress:
431 429 for hash in files:
432 430 progress.update(at)
433 431 source = lfutil.findfile(rsrc, hash)
434 432 if not source:
435 433 raise error.Abort(
436 434 _(
437 435 b'largefile %s missing from store'
438 436 b' (needs to be uploaded)'
439 437 )
440 438 % hash
441 439 )
442 440 # XXX check for errors here
443 441 store.put(source, hash)
444 442 at += 1
445 443
446 444
447 445 def verifylfiles(ui, repo, all=False, contents=False):
448 446 """Verify that every largefile revision in the current changeset
449 447 exists in the central store. With --contents, also verify that
450 448 the contents of each local largefile file revision are correct (SHA-1 hash
451 449 matches the revision ID). With --all, check every changeset in
452 450 this repository."""
453 451 if all:
454 452 revs = repo.revs(b'all()')
455 453 else:
456 454 revs = [b'.']
457 455
458 456 store = storefactory.openstore(repo)
459 457 return store.verify(revs, contents=contents)
460 458
461 459
462 460 def cachelfiles(ui, repo, node, filelist=None):
463 461 """cachelfiles ensures that all largefiles needed by the specified revision
464 462 are present in the repository's largefile cache.
465 463
466 464 returns a tuple (cached, missing). cached is the list of files downloaded
467 465 by this operation; missing is the list of files that were needed but could
468 466 not be found."""
469 467 lfiles = lfutil.listlfiles(repo, node)
470 468 if filelist:
471 469 lfiles = set(lfiles) & set(filelist)
472 470 toget = []
473 471
474 472 ctx = repo[node]
475 473 for lfile in lfiles:
476 474 try:
477 475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
478 476 except FileNotFoundError:
479 477 continue # node must be None and standin wasn't found in wctx
480 478 if not lfutil.findfile(repo, expectedhash):
481 479 toget.append((lfile, expectedhash))
482 480
483 481 if toget:
484 482 store = storefactory.openstore(repo)
485 483 ret = store.get(toget)
486 484 return ret
487 485
488 486 return ([], [])
489 487
490 488
491 489 def downloadlfiles(ui, repo):
492 490 tonode = repo.changelog.node
493 491 totalsuccess = 0
494 492 totalmissing = 0
495 493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
496 494 success, missing = cachelfiles(ui, repo, tonode(rev))
497 495 totalsuccess += len(success)
498 496 totalmissing += len(missing)
499 497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
500 498 if totalmissing > 0:
501 499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
502 500 return totalsuccess, totalmissing
503 501
504 502
505 503 def updatelfiles(
506 504 ui, repo, filelist=None, printmessage=None, normallookup=False
507 505 ):
508 506 """Update largefiles according to standins in the working directory
509 507
510 508 If ``printmessage`` is other than ``None``, it means "print (or
511 509 ignore, for false) message forcibly".
512 510 """
513 511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
514 512 with repo.wlock():
515 513 lfdirstate = lfutil.openlfdirstate(ui, repo)
516 514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
517 515
518 516 if filelist is not None:
519 517 filelist = set(filelist)
520 518 lfiles = [f for f in lfiles if f in filelist]
521 519
522 520 update = {}
523 521 dropped = set()
524 522 updated, removed = 0, 0
525 523 wvfs = repo.wvfs
526 524 wctx = repo[None]
527 525 for lfile in lfiles:
528 526 lfileorig = os.path.relpath(
529 527 scmutil.backuppath(ui, repo, lfile), start=repo.root
530 528 )
531 529 standin = lfutil.standin(lfile)
532 530 standinorig = os.path.relpath(
533 531 scmutil.backuppath(ui, repo, standin), start=repo.root
534 532 )
535 533 if wvfs.exists(standin):
536 534 if wvfs.exists(standinorig) and wvfs.exists(lfile):
537 535 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
538 536 wvfs.unlinkpath(standinorig)
539 537 expecthash = lfutil.readasstandin(wctx[standin])
540 538 if expecthash != b'':
541 539 if lfile not in wctx: # not switched to normal file
542 540 if repo.dirstate.get_entry(standin).any_tracked:
543 541 wvfs.unlinkpath(lfile, ignoremissing=True)
544 542 else:
545 543 dropped.add(lfile)
546 544
547 545 # allocate an entry in largefiles dirstate to prevent
548 546 # lfilesrepo.status() from reporting missing files as
549 547 # removed.
550 548 lfdirstate.hacky_extension_update_file(
551 549 lfile,
552 550 p1_tracked=True,
553 551 wc_tracked=True,
554 552 possibly_dirty=True,
555 553 )
556 554 update[lfile] = expecthash
557 555 else:
558 556 # Remove lfiles for which the standin is deleted, unless the
559 557 # lfile is added to the repository again. This happens when a
560 558 # largefile is converted back to a normal file: the standin
561 559 # disappears, but a new (normal) file appears as the lfile.
562 560 if (
563 561 wvfs.exists(lfile)
564 562 and repo.dirstate.normalize(lfile) not in wctx
565 563 ):
566 564 wvfs.unlinkpath(lfile)
567 565 removed += 1
568 566
569 567 # largefile processing might be slow and be interrupted - be prepared
570 568 lfdirstate.write(repo.currenttransaction())
571 569
572 570 if lfiles:
573 571 lfiles = [f for f in lfiles if f not in dropped]
574 572
575 573 for f in dropped:
576 574 repo.wvfs.unlinkpath(lfutil.standin(f))
577 575 # This needs to happen for dropped files, otherwise they stay in
578 576 # the M state.
579 577 lfdirstate._map.reset_state(f)
580 578
581 579 statuswriter(_(b'getting changed largefiles\n'))
582 580 cachelfiles(ui, repo, None, lfiles)
583 581
584 582 for lfile in lfiles:
585 583 update1 = 0
586 584
587 585 expecthash = update.get(lfile)
588 586 if expecthash:
589 587 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 588 # failed ... but already removed and set to normallookup
591 589 continue
592 590 # Synchronize largefile dirstate to the last modified
593 591 # time of the file
594 592 lfdirstate.hacky_extension_update_file(
595 593 lfile,
596 594 p1_tracked=True,
597 595 wc_tracked=True,
598 596 )
599 597 update1 = 1
600 598
601 599 # copy the exec mode of largefile standin from the repository's
602 600 # dirstate to its state in the lfdirstate.
603 601 standin = lfutil.standin(lfile)
604 602 if wvfs.exists(standin):
605 603 # exec is decided by the users permissions using mask 0o100
606 604 standinexec = wvfs.stat(standin).st_mode & 0o100
607 605 st = wvfs.stat(lfile)
608 606 mode = st.st_mode
609 607 if standinexec != mode & 0o100:
610 608 # first remove all X bits, then shift all R bits to X
611 609 mode &= ~0o111
612 610 if standinexec:
613 611 mode |= (mode >> 2) & 0o111 & ~util.umask
614 612 wvfs.chmod(lfile, mode)
615 613 update1 = 1
616 614
617 615 updated += update1
618 616
619 617 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620 618
621 619 lfdirstate.write(repo.currenttransaction())
622 620 if lfiles:
623 621 statuswriter(
624 622 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 623 )
626 624
627 625
628 626 @eh.command(
629 627 b'lfpull',
630 628 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 629 + cmdutil.remoteopts,
632 630 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 631 )
634 632 def lfpull(ui, repo, source=b"default", **opts):
635 633 """pull largefiles for the specified revisions from the specified source
636 634
637 635 Pull largefiles that are referenced from local changesets but missing
638 636 locally, pulling from a remote repository to the local cache.
639 637
640 638 If SOURCE is omitted, the 'default' path will be used.
641 639 See :hg:`help urls` for more information.
642 640
643 641 .. container:: verbose
644 642
645 643 Some examples:
646 644
647 645 - pull largefiles for all branch heads::
648 646
649 647 hg lfpull -r "head() and not closed()"
650 648
651 649 - pull largefiles on the default branch::
652 650
653 651 hg lfpull -r "branch(default)"
654 652 """
655 653 repo.lfpullsource = source
656 654
657 655 revs = opts.get('rev', [])
658 656 if not revs:
659 657 raise error.Abort(_(b'no revisions specified'))
660 658 revs = logcmdutil.revrange(repo, revs)
661 659
662 660 numcached = 0
663 661 for rev in revs:
664 662 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 663 (cached, missing) = cachelfiles(ui, repo, rev)
666 664 numcached += len(cached)
667 665 ui.status(_(b"%d largefiles cached\n") % numcached)
668 666
669 667
670 668 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 669 def debuglfput(ui, repo, filepath, **kwargs):
672 670 hash = lfutil.hashfile(filepath)
673 671 storefactory.openstore(repo).put(filepath, hash)
674 672 ui.write(b'%s\n' % hash)
675 673 return 0
@@ -1,1925 +1,1924 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import contextlib
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 dirstate,
26 26 error,
27 27 exchange,
28 28 extensions,
29 29 exthelper,
30 30 filemerge,
31 31 hg,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 merge,
35 35 mergestate as mergestatemod,
36 36 pathutil,
37 37 pycompat,
38 38 scmutil,
39 39 smartset,
40 40 subrepo,
41 41 url as urlmod,
42 42 util,
43 43 )
44 44
45 45 from mercurial.upgrade_utils import (
46 46 actions as upgrade_actions,
47 47 )
48 48
49 49 from . import (
50 50 lfcommands,
51 51 lfutil,
52 52 storefactory,
53 53 )
54 54
55 55 ACTION_ADD = mergestatemod.ACTION_ADD
56 56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 57 ACTION_GET = mergestatemod.ACTION_GET
58 58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60 60
61 61 eh = exthelper.exthelper()
62 62
63 63 lfstatus = lfutil.lfstatus
64 64
65 65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66 66
67 67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68 68
69 69
70 70 def composelargefilematcher(match, manifest):
71 71 """create a matcher that matches only the largefiles in the original
72 72 matcher"""
73 73 m = copy.copy(match)
74 74 lfile = lambda f: lfutil.standin(f) in manifest
75 75 m._files = [lf for lf in m._files if lfile(lf)]
76 76 m._fileset = set(m._files)
77 77 m.always = lambda: False
78 78 origmatchfn = m.matchfn
79 79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 80 return m
81 81
82 82
83 83 def composenormalfilematcher(match, manifest, exclude=None):
84 84 excluded = set()
85 85 if exclude is not None:
86 86 excluded.update(exclude)
87 87
88 88 m = copy.copy(match)
89 89 notlfile = lambda f: not (
90 90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 91 )
92 92 m._files = [lf for lf in m._files if notlfile(lf)]
93 93 m._fileset = set(m._files)
94 94 m.always = lambda: False
95 95 origmatchfn = m.matchfn
96 96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 97 return m
98 98
99 99
100 100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 101 large = opts.get('large')
102 102 lfsize = lfutil.getminsize(
103 103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 104 )
105 105
106 106 lfmatcher = None
107 107 if lfutil.islfilesrepo(repo):
108 108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 109 if lfpats:
110 110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111 111
112 112 lfnames = []
113 113 m = matcher
114 114
115 115 wctx = repo[None]
116 116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 117 exact = m.exact(f)
118 118 lfile = lfutil.standin(f) in wctx
119 119 nfile = f in wctx
120 120 exists = lfile or nfile
121 121
122 122 # Don't warn the user when they attempt to add a normal tracked file.
123 123 # The normal add code will do that for us.
124 124 if exact and exists:
125 125 if lfile:
126 126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 127 continue
128 128
129 129 if (exact or not exists) and not lfutil.isstandin(f):
130 130 # In case the file was removed previously, but not committed
131 131 # (issue3507)
132 132 if not repo.wvfs.exists(f):
133 133 continue
134 134
135 135 abovemin = (
136 136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 137 )
138 138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 139 lfnames.append(f)
140 140 if ui.verbose or not exact:
141 141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142 142
143 143 bad = []
144 144
145 145 # Need to lock, otherwise there could be a race condition between
146 146 # when standins are created and added to the repo.
147 147 with repo.wlock():
148 148 if not opts.get('dry_run'):
149 149 standins = []
150 150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 151 for f in lfnames:
152 152 standinname = lfutil.standin(f)
153 153 lfutil.writestandin(
154 154 repo,
155 155 standinname,
156 156 hash=b'',
157 157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 158 )
159 159 standins.append(standinname)
160 160 lfdirstate.set_tracked(f)
161 161 lfdirstate.write(repo.currenttransaction())
162 162 bad += [
163 163 lfutil.splitstandin(f)
164 164 for f in repo[None].add(standins)
165 165 if f in m.files()
166 166 ]
167 167
168 168 added = [f for f in lfnames if f not in bad]
169 169 return added, bad
170 170
171 171
172 172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 173 after = opts.get('after')
174 174 m = composelargefilematcher(matcher, repo[None].manifest())
175 175 with lfstatus(repo):
176 176 s = repo.status(match=m, clean=not isaddremove)
177 177 manifest = repo[None].manifest()
178 178 modified, added, deleted, clean = [
179 179 [f for f in list if lfutil.standin(f) in manifest]
180 180 for list in (s.modified, s.added, s.deleted, s.clean)
181 181 ]
182 182
183 183 def warn(files, msg):
184 184 for f in files:
185 185 ui.warn(msg % uipathfn(f))
186 186 return int(len(files) > 0)
187 187
188 188 if after:
189 189 remove = deleted
190 190 result = warn(
191 191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 192 )
193 193 else:
194 194 remove = deleted + clean
195 195 result = warn(
196 196 modified,
197 197 _(
198 198 b'not removing %s: file is modified (use -f'
199 199 b' to force removal)\n'
200 200 ),
201 201 )
202 202 result = (
203 203 warn(
204 204 added,
205 205 _(
206 206 b'not removing %s: file has been marked for add'
207 207 b' (use forget to undo)\n'
208 208 ),
209 209 )
210 210 or result
211 211 )
212 212
213 213 # Need to lock because standin files are deleted then removed from the
214 214 # repository and we could race in-between.
215 215 with repo.wlock():
216 216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 217 for f in sorted(remove):
218 218 if ui.verbose or not m.exact(f):
219 219 ui.status(_(b'removing %s\n') % uipathfn(f))
220 220
221 221 if not dryrun:
222 222 if not after:
223 223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224 224
225 225 if dryrun:
226 226 return result
227 227
228 228 remove = [lfutil.standin(f) for f in remove]
229 229 # If this is being called by addremove, let the original addremove
230 230 # function handle this.
231 231 if not isaddremove:
232 232 for f in remove:
233 233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 234 repo[None].forget(remove)
235 235
236 236 for f in remove:
237 237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238 238
239 239 lfdirstate.write(repo.currenttransaction())
240 240
241 241 return result
242 242
243 243
244 244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 245 # appear at their right place in the manifests.
246 246 @eh.wrapfunction(webcommands, 'decodepath')
247 247 def decodepath(orig, path):
248 248 return lfutil.splitstandin(path) or path
249 249
250 250
251 251 # -- Wrappers: modify existing commands --------------------------------
252 252
253 253
254 254 @eh.wrapcommand(
255 255 b'add',
256 256 opts=[
257 257 (b'', b'large', None, _(b'add as largefile')),
258 258 (b'', b'normal', None, _(b'add as normal file')),
259 259 (
260 260 b'',
261 261 b'lfsize',
262 262 b'',
263 263 _(
264 264 b'add all files above this size (in megabytes) '
265 265 b'as largefiles (default: 10)'
266 266 ),
267 267 ),
268 268 ],
269 269 )
270 270 def overrideadd(orig, ui, repo, *pats, **opts):
271 271 if opts.get('normal') and opts.get('large'):
272 272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 273 return orig(ui, repo, *pats, **opts)
274 274
275 275
276 276 @eh.wrapfunction(cmdutil, 'add')
277 277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 278 # The --normal flag short circuits this override
279 279 if opts.get('normal'):
280 280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281 281
282 282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 283 normalmatcher = composenormalfilematcher(
284 284 matcher, repo[None].manifest(), ladded
285 285 )
286 286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287 287
288 288 bad.extend(f for f in lbad)
289 289 return bad
290 290
291 291
292 292 @eh.wrapfunction(cmdutil, 'remove')
293 293 def cmdutilremove(
294 294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 295 ):
296 296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 297 result = orig(
298 298 ui,
299 299 repo,
300 300 normalmatcher,
301 301 prefix,
302 302 uipathfn,
303 303 after,
304 304 force,
305 305 subrepos,
306 306 dryrun,
307 307 )
308 308 return (
309 309 removelargefiles(
310 310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 311 )
312 312 or result
313 313 )
314 314
315 315
316 316 @eh.wrapfunction(dirstate.dirstate, '_changing')
317 317 @contextlib.contextmanager
318 318 def _changing(orig, self, repo, change_type):
319 319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 320 try:
321 321 lfd = getattr(self, '_large_file_dirstate', False)
322 322 if sub_dirstate is None and not lfd:
323 323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 324 self._sub_dirstate = sub_dirstate
325 325 if not lfd:
326 326 assert self._sub_dirstate is not None
327 327 with orig(self, repo, change_type):
328 328 if sub_dirstate is None:
329 329 yield
330 330 else:
331 331 with sub_dirstate._changing(repo, change_type):
332 332 yield
333 333 finally:
334 334 self._sub_dirstate = pre
335 335
336 336
337 337 @eh.wrapfunction(dirstate.dirstate, 'running_status')
338 338 @contextlib.contextmanager
339 339 def running_status(orig, self, repo):
340 340 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
341 341 try:
342 342 lfd = getattr(self, '_large_file_dirstate', False)
343 343 if sub_dirstate is None and not lfd:
344 344 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
345 345 self._sub_dirstate = sub_dirstate
346 346 if not lfd:
347 347 assert self._sub_dirstate is not None
348 348 with orig(self, repo):
349 349 if sub_dirstate is None:
350 350 yield
351 351 else:
352 352 with sub_dirstate.running_status(repo):
353 353 yield
354 354 finally:
355 355 self._sub_dirstate = pre
356 356
357 357
358 358 @eh.wrapfunction(subrepo.hgsubrepo, 'status')
359 359 def overridestatusfn(orig, repo, rev2, **opts):
360 360 with lfstatus(repo._repo):
361 361 return orig(repo, rev2, **opts)
362 362
363 363
364 364 @eh.wrapcommand(b'status')
365 365 def overridestatus(orig, ui, repo, *pats, **opts):
366 366 with lfstatus(repo):
367 367 return orig(ui, repo, *pats, **opts)
368 368
369 369
370 370 @eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
371 371 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
372 372 with lfstatus(repo._repo):
373 373 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
374 374
375 375
376 376 @eh.wrapcommand(b'log')
377 377 def overridelog(orig, ui, repo, *pats, **opts):
378 378 def overridematchandpats(
379 379 orig,
380 380 ctx,
381 381 pats=(),
382 382 opts=None,
383 383 globbed=False,
384 384 default=b'relpath',
385 385 badfn=None,
386 386 ):
387 387 """Matcher that merges root directory with .hglf, suitable for log.
388 388 It is still possible to match .hglf directly.
389 389 For any listed files run log on the standin too.
390 390 matchfn tries both the given filename and with .hglf stripped.
391 391 """
392 392 if opts is None:
393 393 opts = {}
394 394 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
395 395 m, p = copy.copy(matchandpats)
396 396
397 397 if m.always():
398 398 # We want to match everything anyway, so there's no benefit trying
399 399 # to add standins.
400 400 return matchandpats
401 401
402 402 pats = set(p)
403 403
404 404 def fixpats(pat, tostandin=lfutil.standin):
405 405 if pat.startswith(b'set:'):
406 406 return pat
407 407
408 408 kindpat = matchmod._patsplit(pat, None)
409 409
410 410 if kindpat[0] is not None:
411 411 return kindpat[0] + b':' + tostandin(kindpat[1])
412 412 return tostandin(kindpat[1])
413 413
414 414 cwd = repo.getcwd()
415 415 if cwd:
416 416 hglf = lfutil.shortname
417 417 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
418 418
419 419 def tostandin(f):
420 420 # The file may already be a standin, so truncate the back
421 421 # prefix and test before mangling it. This avoids turning
422 422 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
423 423 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
424 424 return f
425 425
426 426 # An absolute path is from outside the repo, so truncate the
427 427 # path to the root before building the standin. Otherwise cwd
428 428 # is somewhere in the repo, relative to root, and needs to be
429 429 # prepended before building the standin.
430 430 if os.path.isabs(cwd):
431 431 f = f[len(back) :]
432 432 else:
433 433 f = cwd + b'/' + f
434 434 return back + lfutil.standin(f)
435 435
436 436 else:
437 437
438 438 def tostandin(f):
439 439 if lfutil.isstandin(f):
440 440 return f
441 441 return lfutil.standin(f)
442 442
443 443 pats.update(fixpats(f, tostandin) for f in p)
444 444
445 445 for i in range(0, len(m._files)):
446 446 # Don't add '.hglf' to m.files, since that is already covered by '.'
447 447 if m._files[i] == b'.':
448 448 continue
449 449 standin = lfutil.standin(m._files[i])
450 450 # If the "standin" is a directory, append instead of replace to
451 451 # support naming a directory on the command line with only
452 452 # largefiles. The original directory is kept to support normal
453 453 # files.
454 454 if standin in ctx:
455 455 m._files[i] = standin
456 456 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
457 457 m._files.append(standin)
458 458
459 459 m._fileset = set(m._files)
460 460 m.always = lambda: False
461 461 origmatchfn = m.matchfn
462 462
463 463 def lfmatchfn(f):
464 464 lf = lfutil.splitstandin(f)
465 465 if lf is not None and origmatchfn(lf):
466 466 return True
467 467 r = origmatchfn(f)
468 468 return r
469 469
470 470 m.matchfn = lfmatchfn
471 471
472 472 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
473 473 return m, pats
474 474
475 475 # For hg log --patch, the match object is used in two different senses:
476 476 # (1) to determine what revisions should be printed out, and
477 477 # (2) to determine what files to print out diffs for.
478 478 # The magic matchandpats override should be used for case (1) but not for
479 479 # case (2).
480 480 oldmatchandpats = scmutil.matchandpats
481 481
482 482 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
483 483 wctx = repo[None]
484 484 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
485 485 return lambda ctx: match
486 486
487 487 wrappedmatchandpats = extensions.wrappedfunction(
488 488 scmutil, 'matchandpats', overridematchandpats
489 489 )
490 490 wrappedmakefilematcher = extensions.wrappedfunction(
491 491 logcmdutil, '_makenofollowfilematcher', overridemakefilematcher
492 492 )
493 493 with wrappedmatchandpats, wrappedmakefilematcher:
494 494 return orig(ui, repo, *pats, **opts)
495 495
496 496
497 497 @eh.wrapcommand(
498 498 b'verify',
499 499 opts=[
500 500 (
501 501 b'',
502 502 b'large',
503 503 None,
504 504 _(b'verify that all largefiles in current revision exists'),
505 505 ),
506 506 (
507 507 b'',
508 508 b'lfa',
509 509 None,
510 510 _(b'verify largefiles in all revisions, not just current'),
511 511 ),
512 512 (
513 513 b'',
514 514 b'lfc',
515 515 None,
516 516 _(b'verify local largefile contents, not just existence'),
517 517 ),
518 518 ],
519 519 )
520 520 def overrideverify(orig, ui, repo, *pats, **opts):
521 521 large = opts.pop('large', False)
522 522 all = opts.pop('lfa', False)
523 523 contents = opts.pop('lfc', False)
524 524
525 525 result = orig(ui, repo, *pats, **opts)
526 526 if large or all or contents:
527 527 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
528 528 return result
529 529
530 530
531 531 @eh.wrapcommand(
532 532 b'debugstate',
533 533 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
534 534 )
535 535 def overridedebugstate(orig, ui, repo, *pats, **opts):
536 536 large = opts.pop('large', False)
537 537 if large:
538 538
539 539 class fakerepo:
540 540 dirstate = lfutil.openlfdirstate(ui, repo)
541 541
542 542 orig(ui, fakerepo, *pats, **opts)
543 543 else:
544 544 orig(ui, repo, *pats, **opts)
545 545
546 546
547 547 # Before starting the manifest merge, merge.updates will call
548 548 # _checkunknownfile to check if there are any files in the merged-in
549 549 # changeset that collide with unknown files in the working copy.
550 550 #
551 551 # The largefiles are seen as unknown, so this prevents us from merging
552 552 # in a file 'foo' if we already have a largefile with the same name.
553 553 #
554 554 # The overridden function filters the unknown files by removing any
555 555 # largefiles. This makes the merge proceed and we can then handle this
556 556 # case further in the overridden calculateupdates function below.
557 557 @eh.wrapfunction(merge, '_checkunknownfile')
558 558 def overridecheckunknownfile(
559 559 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
560 560 ):
561 561 if lfutil.standin(dirstate.normalize(f)) in wctx:
562 562 return False
563 563 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
564 564
565 565
566 566 # The manifest merge handles conflicts on the manifest level. We want
567 567 # to handle changes in largefile-ness of files at this level too.
568 568 #
569 569 # The strategy is to run the original calculateupdates and then process
570 570 # the action list it outputs. There are two cases we need to deal with:
571 571 #
572 572 # 1. Normal file in p1, largefile in p2. Here the largefile is
573 573 # detected via its standin file, which will enter the working copy
574 574 # with a "get" action. It is not "merge" since the standin is all
575 575 # Mercurial is concerned with at this level -- the link to the
576 576 # existing normal file is not relevant here.
577 577 #
578 578 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
579 579 # since the largefile will be present in the working copy and
580 580 # different from the normal file in p2. Mercurial therefore
581 581 # triggers a merge action.
582 582 #
583 583 # In both cases, we prompt the user and emit new actions to either
584 584 # remove the standin (if the normal file was kept) or to remove the
585 585 # normal file and get the standin (if the largefile was kept). The
586 586 # default prompt answer is to use the largefile version since it was
587 587 # presumably changed on purpose.
588 588 #
589 589 # Finally, the merge.applyupdates function will then take care of
590 590 # writing the files into the working copy and lfcommands.updatelfiles
591 591 # will update the largefiles.
592 592 @eh.wrapfunction(merge, 'calculateupdates')
593 593 def overridecalculateupdates(
594 594 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
595 595 ):
596 596 overwrite = force and not branchmerge
597 597 mresult = origfn(
598 598 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
599 599 )
600 600
601 601 if overwrite:
602 602 return mresult
603 603
604 604 # Convert to dictionary with filename as key and action as value.
605 605 lfiles = set()
606 606 for f in mresult.files():
607 607 splitstandin = lfutil.splitstandin(f)
608 608 if splitstandin is not None and splitstandin in p1:
609 609 lfiles.add(splitstandin)
610 610 elif lfutil.standin(f) in p1:
611 611 lfiles.add(f)
612 612
613 613 for lfile in sorted(lfiles):
614 614 standin = lfutil.standin(lfile)
615 615 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
616 616 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
617 617
618 618 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
619 619 if sm == ACTION_DELETED_CHANGED:
620 620 f1, f2, fa, move, anc = sargs
621 621 sargs = (p2[f2].flags(), False)
622 622 # Case 1: normal file in the working copy, largefile in
623 623 # the second parent
624 624 usermsg = (
625 625 _(
626 626 b'remote turned local normal file %s into a largefile\n'
627 627 b'use (l)argefile or keep (n)ormal file?'
628 628 b'$$ &Largefile $$ &Normal file'
629 629 )
630 630 % lfile
631 631 )
632 632 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
633 633 mresult.addfile(
634 634 lfile, ACTION_REMOVE, None, b'replaced by standin'
635 635 )
636 636 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
637 637 else: # keep local normal file
638 638 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
639 639 if branchmerge:
640 640 mresult.addfile(
641 641 standin,
642 642 ACTION_KEEP,
643 643 None,
644 644 b'replaced by non-standin',
645 645 )
646 646 else:
647 647 mresult.addfile(
648 648 standin,
649 649 ACTION_REMOVE,
650 650 None,
651 651 b'replaced by non-standin',
652 652 )
653 653 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
654 654 if lm == ACTION_DELETED_CHANGED:
655 655 f1, f2, fa, move, anc = largs
656 656 largs = (p2[f2].flags(), False)
657 657 # Case 2: largefile in the working copy, normal file in
658 658 # the second parent
659 659 usermsg = (
660 660 _(
661 661 b'remote turned local largefile %s into a normal file\n'
662 662 b'keep (l)argefile or use (n)ormal file?'
663 663 b'$$ &Largefile $$ &Normal file'
664 664 )
665 665 % lfile
666 666 )
667 667 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
668 668 if branchmerge:
669 669 # largefile can be restored from standin safely
670 670 mresult.addfile(
671 671 lfile,
672 672 ACTION_KEEP,
673 673 None,
674 674 b'replaced by standin',
675 675 )
676 676 mresult.addfile(
677 677 standin, ACTION_KEEP, None, b'replaces standin'
678 678 )
679 679 else:
680 680 # "lfile" should be marked as "removed" without
681 681 # removal of itself
682 682 mresult.addfile(
683 683 lfile,
684 684 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
685 685 None,
686 686 b'forget non-standin largefile',
687 687 )
688 688
689 689 # linear-merge should treat this largefile as 're-added'
690 690 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
691 691 else: # pick remote normal file
692 692 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
693 693 mresult.addfile(
694 694 standin,
695 695 ACTION_REMOVE,
696 696 None,
697 697 b'replaced by non-standin',
698 698 )
699 699
700 700 return mresult
701 701
702 702
703 703 @eh.wrapfunction(mergestatemod, 'recordupdates')
704 704 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
705 705 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
706 706 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
707 707 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
708 708 # this should be executed before 'orig', to execute 'remove'
709 709 # before all other actions
710 710 repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
711 711 # make sure lfile doesn't get synclfdirstate'd as normal
712 712 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
713 713
714 714 return orig(repo, actions, branchmerge, getfiledata)
715 715
716 716
717 717 # Override filemerge to prompt the user about how they wish to merge
718 718 # largefiles. This will handle identical edits without prompting the user.
719 719 @eh.wrapfunction(filemerge, 'filemerge')
720 720 def overridefilemerge(
721 721 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
722 722 ):
723 723 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
724 724 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
725 725
726 726 ahash = lfutil.readasstandin(fca).lower()
727 727 dhash = lfutil.readasstandin(fcd).lower()
728 728 ohash = lfutil.readasstandin(fco).lower()
729 729 if (
730 730 ohash != ahash
731 731 and ohash != dhash
732 732 and (
733 733 dhash == ahash
734 734 or repo.ui.promptchoice(
735 735 _(
736 736 b'largefile %s has a merge conflict\nancestor was %s\n'
737 737 b'you can keep (l)ocal %s or take (o)ther %s.\n'
738 738 b'what do you want to do?'
739 739 b'$$ &Local $$ &Other'
740 740 )
741 741 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
742 742 0,
743 743 )
744 744 == 1
745 745 )
746 746 ):
747 747 repo.wwrite(fcd.path(), fco.data(), fco.flags())
748 748 return 0, False
749 749
750 750
751 751 @eh.wrapfunction(copiesmod, 'pathcopies')
752 752 def copiespathcopies(orig, ctx1, ctx2, match=None):
753 753 copies = orig(ctx1, ctx2, match=match)
754 754 updated = {}
755 755
756 756 for k, v in copies.items():
757 757 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
758 758
759 759 return updated
760 760
761 761
762 762 # Copy first changes the matchers to match standins instead of
763 763 # largefiles. Then it overrides util.copyfile in that function it
764 764 # checks if the destination largefile already exists. It also keeps a
765 765 # list of copied files so that the largefiles can be copied and the
766 766 # dirstate updated.
767 767 @eh.wrapfunction(cmdutil, 'copy')
768 768 def overridecopy(orig, ui, repo, pats, opts, rename=False):
769 769 # doesn't remove largefile on rename
770 770 if len(pats) < 2:
771 771 # this isn't legal, let the original function deal with it
772 772 return orig(ui, repo, pats, opts, rename)
773 773
774 774 # This could copy both lfiles and normal files in one command,
775 775 # but we don't want to do that. First replace their matcher to
776 776 # only match normal files and run it, then replace it to just
777 777 # match largefiles and run it again.
778 778 nonormalfiles = False
779 779 nolfiles = False
780 780 manifest = repo[None].manifest()
781 781
782 782 def normalfilesmatchfn(
783 783 orig,
784 784 ctx,
785 785 pats=(),
786 786 opts=None,
787 787 globbed=False,
788 788 default=b'relpath',
789 789 badfn=None,
790 790 ):
791 791 if opts is None:
792 792 opts = {}
793 793 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
794 794 return composenormalfilematcher(match, manifest)
795 795
796 796 with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn):
797 797 try:
798 798 result = orig(ui, repo, pats, opts, rename)
799 799 except error.Abort as e:
800 800 if e.message != _(b'no files to copy'):
801 801 raise e
802 802 else:
803 803 nonormalfiles = True
804 804 result = 0
805 805
806 806 # The first rename can cause our current working directory to be removed.
807 807 # In that case there is nothing left to copy/rename so just quit.
808 808 try:
809 809 repo.getcwd()
810 810 except OSError:
811 811 return result
812 812
813 813 def makestandin(relpath):
814 814 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
815 815 return repo.wvfs.join(lfutil.standin(path))
816 816
817 817 fullpats = scmutil.expandpats(pats)
818 818 dest = fullpats[-1]
819 819
820 820 if os.path.isdir(dest):
821 821 if not os.path.isdir(makestandin(dest)):
822 822 os.makedirs(makestandin(dest))
823 823
824 824 try:
825 825 # When we call orig below it creates the standins but we don't add
826 826 # them to the dir state until later so lock during that time.
827 827 wlock = repo.wlock()
828 828
829 829 manifest = repo[None].manifest()
830 830
831 831 def overridematch(
832 832 orig,
833 833 ctx,
834 834 pats=(),
835 835 opts=None,
836 836 globbed=False,
837 837 default=b'relpath',
838 838 badfn=None,
839 839 ):
840 840 if opts is None:
841 841 opts = {}
842 842 newpats = []
843 843 # The patterns were previously mangled to add the standin
844 844 # directory; we need to remove that now
845 845 for pat in pats:
846 846 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
847 847 newpats.append(pat.replace(lfutil.shortname, b''))
848 848 else:
849 849 newpats.append(pat)
850 850 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
851 851 m = copy.copy(match)
852 852 lfile = lambda f: lfutil.standin(f) in manifest
853 853 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
854 854 m._fileset = set(m._files)
855 855 origmatchfn = m.matchfn
856 856
857 857 def matchfn(f):
858 858 lfile = lfutil.splitstandin(f)
859 859 return (
860 860 lfile is not None
861 861 and (f in manifest)
862 862 and origmatchfn(lfile)
863 863 or None
864 864 )
865 865
866 866 m.matchfn = matchfn
867 867 return m
868 868
869 869 listpats = []
870 870 for pat in pats:
871 871 if matchmod.patkind(pat) is not None:
872 872 listpats.append(pat)
873 873 else:
874 874 listpats.append(makestandin(pat))
875 875
876 876 copiedfiles = []
877 877
878 878 def overridecopyfile(orig, src, dest, *args, **kwargs):
879 879 if lfutil.shortname in src and dest.startswith(
880 880 repo.wjoin(lfutil.shortname)
881 881 ):
882 882 destlfile = dest.replace(lfutil.shortname, b'')
883 883 if not opts[b'force'] and os.path.exists(destlfile):
884 884 raise IOError(
885 885 b'', _(b'destination largefile already exists')
886 886 )
887 887 copiedfiles.append((src, dest))
888 888 orig(src, dest, *args, **kwargs)
889 889
890 890 with extensions.wrappedfunction(util, 'copyfile', overridecopyfile):
891 891 with extensions.wrappedfunction(scmutil, 'match', overridematch):
892 892 result += orig(ui, repo, listpats, opts, rename)
893 893
894 894 lfdirstate = lfutil.openlfdirstate(ui, repo)
895 895 for (src, dest) in copiedfiles:
896 896 if lfutil.shortname in src and dest.startswith(
897 897 repo.wjoin(lfutil.shortname)
898 898 ):
899 899 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
900 900 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
901 901 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
902 902 if not os.path.isdir(destlfiledir):
903 903 os.makedirs(destlfiledir)
904 904 if rename:
905 905 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
906 906
907 907 # The file is gone, but this deletes any empty parent
908 908 # directories as a side-effect.
909 909 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
910 910 lfdirstate.set_untracked(srclfile)
911 911 else:
912 912 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
913 913
914 914 lfdirstate.set_tracked(destlfile)
915 915 lfdirstate.write(repo.currenttransaction())
916 916 except error.Abort as e:
917 917 if e.message != _(b'no files to copy'):
918 918 raise e
919 919 else:
920 920 nolfiles = True
921 921 finally:
922 922 wlock.release()
923 923
924 924 if nolfiles and nonormalfiles:
925 925 raise error.Abort(_(b'no files to copy'))
926 926
927 927 return result
928 928
929 929
930 930 # When the user calls revert, we have to be careful to not revert any
931 931 # changes to other largefiles accidentally. This means we have to keep
932 932 # track of the largefiles that are being reverted so we only pull down
933 933 # the necessary largefiles.
934 934 #
935 935 # Standins are only updated (to match the hash of largefiles) before
936 936 # commits. Update the standins then run the original revert, changing
937 937 # the matcher to hit standins instead of largefiles. Based on the
938 938 # resulting standins update the largefiles.
939 939 @eh.wrapfunction(cmdutil, 'revert')
940 940 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
941 941 # Because we put the standins in a bad state (by updating them)
942 942 # and then return them to a correct state we need to lock to
943 943 # prevent others from changing them in their incorrect state.
944 944 with repo.wlock(), repo.dirstate.running_status(repo):
945 945 lfdirstate = lfutil.openlfdirstate(ui, repo)
946 946 s = lfutil.lfdirstatestatus(lfdirstate, repo)
947 947 lfdirstate.write(repo.currenttransaction())
948 948 for lfile in s.modified:
949 949 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
950 950 for lfile in s.deleted:
951 951 fstandin = lfutil.standin(lfile)
952 952 if repo.wvfs.exists(fstandin):
953 953 repo.wvfs.unlink(fstandin)
954 954
955 955 oldstandins = lfutil.getstandinsstate(repo)
956 956
957 957 def overridematch(
958 958 orig,
959 959 mctx,
960 960 pats=(),
961 961 opts=None,
962 962 globbed=False,
963 963 default=b'relpath',
964 964 badfn=None,
965 965 ):
966 966 if opts is None:
967 967 opts = {}
968 968 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
969 969 m = copy.copy(match)
970 970
971 971 # revert supports recursing into subrepos, and though largefiles
972 972 # currently doesn't work correctly in that case, this match is
973 973 # called, so the lfdirstate above may not be the correct one for
974 974 # this invocation of match.
975 975 lfdirstate = lfutil.openlfdirstate(
976 976 mctx.repo().ui, mctx.repo(), False
977 977 )
978 978
979 979 wctx = repo[None]
980 980 matchfiles = []
981 981 for f in m._files:
982 982 standin = lfutil.standin(f)
983 983 if standin in ctx or standin in mctx:
984 984 matchfiles.append(standin)
985 985 elif standin in wctx or lfdirstate.get_entry(f).removed:
986 986 continue
987 987 else:
988 988 matchfiles.append(f)
989 989 m._files = matchfiles
990 990 m._fileset = set(m._files)
991 991 origmatchfn = m.matchfn
992 992
993 993 def matchfn(f):
994 994 lfile = lfutil.splitstandin(f)
995 995 if lfile is not None:
996 996 return origmatchfn(lfile) and (f in ctx or f in mctx)
997 997 return origmatchfn(f)
998 998
999 999 m.matchfn = matchfn
1000 1000 return m
1001 1001
1002 1002 with extensions.wrappedfunction(scmutil, 'match', overridematch):
1003 1003 orig(ui, repo, ctx, *pats, **opts)
1004 1004
1005 1005 newstandins = lfutil.getstandinsstate(repo)
1006 1006 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1007 1007 # lfdirstate should be 'normallookup'-ed for updated files,
1008 1008 # because reverting doesn't touch dirstate for 'normal' files
1009 1009 # when target revision is explicitly specified: in such case,
1010 1010 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
1011 1011 # of target (standin) file.
1012 1012 lfcommands.updatelfiles(
1013 1013 ui, repo, filelist, printmessage=False, normallookup=True
1014 1014 )
1015 1015
1016 1016
1017 1017 # after pulling changesets, we need to take some extra care to get
1018 1018 # largefiles updated remotely
1019 1019 @eh.wrapcommand(
1020 1020 b'pull',
1021 1021 opts=[
1022 1022 (
1023 1023 b'',
1024 1024 b'all-largefiles',
1025 1025 None,
1026 1026 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1027 1027 ),
1028 1028 (
1029 1029 b'',
1030 1030 b'lfrev',
1031 1031 [],
1032 1032 _(b'download largefiles for these revisions'),
1033 1033 _(b'REV'),
1034 1034 ),
1035 1035 ],
1036 1036 )
1037 1037 def overridepull(orig, ui, repo, source=None, **opts):
1038 1038 revsprepull = len(repo)
1039 1039 if not source:
1040 1040 source = b'default'
1041 1041 repo.lfpullsource = source
1042 1042 result = orig(ui, repo, source, **opts)
1043 1043 revspostpull = len(repo)
1044 1044 lfrevs = opts.get('lfrev', [])
1045 1045 if opts.get('all_largefiles'):
1046 1046 lfrevs.append(b'pulled()')
1047 1047 if lfrevs and revspostpull > revsprepull:
1048 1048 numcached = 0
1049 1049 repo.firstpulled = revsprepull # for pulled() revset expression
1050 1050 try:
1051 1051 for rev in logcmdutil.revrange(repo, lfrevs):
1052 1052 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1053 1053 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1054 1054 numcached += len(cached)
1055 1055 finally:
1056 1056 del repo.firstpulled
1057 1057 ui.status(_(b"%d largefiles cached\n") % numcached)
1058 1058 return result
1059 1059
1060 1060
1061 1061 @eh.wrapcommand(
1062 1062 b'push',
1063 1063 opts=[
1064 1064 (
1065 1065 b'',
1066 1066 b'lfrev',
1067 1067 [],
1068 1068 _(b'upload largefiles for these revisions'),
1069 1069 _(b'REV'),
1070 1070 )
1071 1071 ],
1072 1072 )
1073 1073 def overridepush(orig, ui, repo, *args, **kwargs):
1074 1074 """Override push command and store --lfrev parameters in opargs"""
1075 1075 lfrevs = kwargs.pop('lfrev', None)
1076 1076 if lfrevs:
1077 1077 opargs = kwargs.setdefault('opargs', {})
1078 1078 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1079 1079 return orig(ui, repo, *args, **kwargs)
1080 1080
1081 1081
1082 1082 @eh.wrapfunction(exchange, 'pushoperation')
1083 1083 def exchangepushoperation(orig, *args, **kwargs):
1084 1084 """Override pushoperation constructor and store lfrevs parameter"""
1085 1085 lfrevs = kwargs.pop('lfrevs', None)
1086 1086 pushop = orig(*args, **kwargs)
1087 1087 pushop.lfrevs = lfrevs
1088 1088 return pushop
1089 1089
1090 1090
1091 1091 @eh.revsetpredicate(b'pulled()')
1092 1092 def pulledrevsetsymbol(repo, subset, x):
1093 1093 """Changesets that just has been pulled.
1094 1094
1095 1095 Only available with largefiles from pull --lfrev expressions.
1096 1096
1097 1097 .. container:: verbose
1098 1098
1099 1099 Some examples:
1100 1100
1101 1101 - pull largefiles for all new changesets::
1102 1102
1103 1103 hg pull -lfrev "pulled()"
1104 1104
1105 1105 - pull largefiles for all new branch heads::
1106 1106
1107 1107 hg pull -lfrev "head(pulled()) and not closed()"
1108 1108
1109 1109 """
1110 1110
1111 1111 try:
1112 1112 firstpulled = repo.firstpulled
1113 1113 except AttributeError:
1114 1114 raise error.Abort(_(b"pulled() only available in --lfrev"))
1115 1115 return smartset.baseset([r for r in subset if r >= firstpulled])
1116 1116
1117 1117
1118 1118 @eh.wrapcommand(
1119 1119 b'clone',
1120 1120 opts=[
1121 1121 (
1122 1122 b'',
1123 1123 b'all-largefiles',
1124 1124 None,
1125 1125 _(b'download all versions of all largefiles'),
1126 1126 )
1127 1127 ],
1128 1128 )
1129 1129 def overrideclone(orig, ui, source, dest=None, **opts):
1130 1130 d = dest
1131 1131 if d is None:
1132 1132 d = hg.defaultdest(source)
1133 1133 if opts.get('all_largefiles') and not hg.islocal(d):
1134 1134 raise error.Abort(
1135 1135 _(b'--all-largefiles is incompatible with non-local destination %s')
1136 1136 % d
1137 1137 )
1138 1138
1139 1139 return orig(ui, source, dest, **opts)
1140 1140
1141 1141
1142 1142 @eh.wrapfunction(hg, 'clone')
1143 1143 def hgclone(orig, ui, opts, *args, **kwargs):
1144 1144 result = orig(ui, opts, *args, **kwargs)
1145 1145
1146 1146 if result is not None:
1147 1147 sourcerepo, destrepo = result
1148 1148 repo = destrepo.local()
1149 1149
1150 1150 # When cloning to a remote repo (like through SSH), no repo is available
1151 1151 # from the peer. Therefore the largefiles can't be downloaded and the
1152 1152 # hgrc can't be updated.
1153 1153 if not repo:
1154 1154 return result
1155 1155
1156 1156 # Caching is implicitly limited to 'rev' option, since the dest repo was
1157 1157 # truncated at that point. The user may expect a download count with
1158 1158 # this option, so attempt whether or not this is a largefile repo.
1159 1159 if opts.get(b'all_largefiles'):
1160 1160 success, missing = lfcommands.downloadlfiles(ui, repo)
1161 1161
1162 1162 if missing != 0:
1163 1163 return None
1164 1164
1165 1165 return result
1166 1166
1167 1167
1168 1168 @eh.wrapcommand(b'rebase', extension=b'rebase')
1169 1169 def overriderebasecmd(orig, ui, repo, **opts):
1170 1170 if not util.safehasattr(repo, b'_largefilesenabled'):
1171 1171 return orig(ui, repo, **opts)
1172 1172
1173 1173 resuming = opts.get('continue')
1174 1174 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1175 1175 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1176 1176 try:
1177 1177 with ui.configoverride(
1178 1178 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1179 1179 ):
1180 1180 return orig(ui, repo, **opts)
1181 1181 finally:
1182 1182 repo._lfstatuswriters.pop()
1183 1183 repo._lfcommithooks.pop()
1184 1184
1185 1185
1186 1186 @eh.extsetup
1187 1187 def overriderebase(ui):
1188 1188 try:
1189 1189 rebase = extensions.find(b'rebase')
1190 1190 except KeyError:
1191 1191 pass
1192 1192 else:
1193 1193
1194 1194 def _dorebase(orig, *args, **kwargs):
1195 1195 kwargs['inmemory'] = False
1196 1196 return orig(*args, **kwargs)
1197 1197
1198 1198 extensions.wrapfunction(rebase, '_dorebase', _dorebase)
1199 1199
1200 1200
1201 1201 @eh.wrapcommand(b'archive')
1202 1202 def overridearchivecmd(orig, ui, repo, dest, **opts):
1203 1203 with lfstatus(repo.unfiltered()):
1204 1204 return orig(ui, repo.unfiltered(), dest, **opts)
1205 1205
1206 1206
1207 1207 @eh.wrapfunction(webcommands, 'archive')
1208 1208 def hgwebarchive(orig, web):
1209 1209 with lfstatus(web.repo):
1210 1210 return orig(web)
1211 1211
1212 1212
1213 1213 @eh.wrapfunction(archival, 'archive')
1214 1214 def overridearchive(
1215 1215 orig,
1216 1216 repo,
1217 1217 dest,
1218 1218 node,
1219 1219 kind,
1220 1220 decode=True,
1221 1221 match=None,
1222 1222 prefix=b'',
1223 1223 mtime=None,
1224 1224 subrepos=None,
1225 1225 ):
1226 1226 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1227 1227 # unfiltered repo's attr, so check that as well.
1228 1228 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1229 1229 return orig(
1230 1230 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1231 1231 )
1232 1232
1233 1233 # No need to lock because we are only reading history and
1234 1234 # largefile caches, neither of which are modified.
1235 1235 if node is not None:
1236 1236 lfcommands.cachelfiles(repo.ui, repo, node)
1237 1237
1238 1238 if kind not in archival.archivers:
1239 1239 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1240 1240
1241 1241 ctx = repo[node]
1242 1242
1243 1243 if kind == b'files':
1244 1244 if prefix:
1245 1245 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1246 1246 else:
1247 1247 prefix = archival.tidyprefix(dest, kind, prefix)
1248 1248
1249 1249 def write(name, mode, islink, getdata):
1250 1250 if match and not match(name):
1251 1251 return
1252 1252 data = getdata()
1253 1253 if decode:
1254 1254 data = repo.wwritedata(name, data)
1255 1255 archiver.addfile(prefix + name, mode, islink, data)
1256 1256
1257 1257 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1258 1258
1259 1259 if repo.ui.configbool(b"ui", b"archivemeta"):
1260 1260 write(
1261 1261 b'.hg_archival.txt',
1262 1262 0o644,
1263 1263 False,
1264 1264 lambda: archival.buildmetadata(ctx),
1265 1265 )
1266 1266
1267 1267 for f in ctx:
1268 1268 ff = ctx.flags(f)
1269 1269 getdata = ctx[f].data
1270 1270 lfile = lfutil.splitstandin(f)
1271 1271 if lfile is not None:
1272 1272 if node is not None:
1273 1273 path = lfutil.findfile(repo, getdata().strip())
1274 1274
1275 1275 if path is None:
1276 1276 raise error.Abort(
1277 1277 _(
1278 1278 b'largefile %s not found in repo store or system cache'
1279 1279 )
1280 1280 % lfile
1281 1281 )
1282 1282 else:
1283 1283 path = lfile
1284 1284
1285 1285 f = lfile
1286 1286
1287 1287 getdata = lambda: util.readfile(path)
1288 1288 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1289 1289
1290 1290 if subrepos:
1291 1291 for subpath in sorted(ctx.substate):
1292 1292 sub = ctx.workingsub(subpath)
1293 1293 submatch = matchmod.subdirmatcher(subpath, match)
1294 1294 subprefix = prefix + subpath + b'/'
1295 1295
1296 1296 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1297 1297 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1298 1298 # allow only hgsubrepos to set this, instead of the current scheme
1299 1299 # where the parent sets this for the child.
1300 1300 with (
1301 1301 util.safehasattr(sub, '_repo')
1302 1302 and lfstatus(sub._repo)
1303 1303 or util.nullcontextmanager()
1304 1304 ):
1305 1305 sub.archive(archiver, subprefix, submatch)
1306 1306
1307 1307 archiver.done()
1308 1308
1309 1309
1310 1310 @eh.wrapfunction(subrepo.hgsubrepo, 'archive')
1311 1311 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1312 1312 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1313 1313 if not lfenabled or not repo._repo.lfstatus:
1314 1314 return orig(repo, archiver, prefix, match, decode)
1315 1315
1316 1316 repo._get(repo._state + (b'hg',))
1317 1317 rev = repo._state[1]
1318 1318 ctx = repo._repo[rev]
1319 1319
1320 1320 if ctx.node() is not None:
1321 1321 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1322 1322
1323 1323 def write(name, mode, islink, getdata):
1324 1324 # At this point, the standin has been replaced with the largefile name,
1325 1325 # so the normal matcher works here without the lfutil variants.
1326 1326 if match and not match(f):
1327 1327 return
1328 1328 data = getdata()
1329 1329 if decode:
1330 1330 data = repo._repo.wwritedata(name, data)
1331 1331
1332 1332 archiver.addfile(prefix + name, mode, islink, data)
1333 1333
1334 1334 for f in ctx:
1335 1335 ff = ctx.flags(f)
1336 1336 getdata = ctx[f].data
1337 1337 lfile = lfutil.splitstandin(f)
1338 1338 if lfile is not None:
1339 1339 if ctx.node() is not None:
1340 1340 path = lfutil.findfile(repo._repo, getdata().strip())
1341 1341
1342 1342 if path is None:
1343 1343 raise error.Abort(
1344 1344 _(
1345 1345 b'largefile %s not found in repo store or system cache'
1346 1346 )
1347 1347 % lfile
1348 1348 )
1349 1349 else:
1350 1350 path = lfile
1351 1351
1352 1352 f = lfile
1353 1353
1354 1354 getdata = lambda: util.readfile(os.path.join(prefix, path))
1355 1355
1356 1356 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1357 1357
1358 1358 for subpath in sorted(ctx.substate):
1359 1359 sub = ctx.workingsub(subpath)
1360 1360 submatch = matchmod.subdirmatcher(subpath, match)
1361 1361 subprefix = prefix + subpath + b'/'
1362 1362 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1363 1363 # infer and possibly set lfstatus at the top of this function. That
1364 1364 # would allow only hgsubrepos to set this, instead of the current scheme
1365 1365 # where the parent sets this for the child.
1366 1366 with (
1367 1367 util.safehasattr(sub, '_repo')
1368 1368 and lfstatus(sub._repo)
1369 1369 or util.nullcontextmanager()
1370 1370 ):
1371 1371 sub.archive(archiver, subprefix, submatch, decode)
1372 1372
1373 1373
1374 1374 # If a largefile is modified, the change is not reflected in its
1375 1375 # standin until a commit. cmdutil.bailifchanged() raises an exception
1376 1376 # if the repo has uncommitted changes. Wrap it to also check if
1377 1377 # largefiles were changed. This is used by bisect, backout and fetch.
1378 1378 @eh.wrapfunction(cmdutil, 'bailifchanged')
1379 1379 def overridebailifchanged(orig, repo, *args, **kwargs):
1380 1380 orig(repo, *args, **kwargs)
1381 1381 with lfstatus(repo):
1382 1382 s = repo.status()
1383 1383 if s.modified or s.added or s.removed or s.deleted:
1384 1384 raise error.Abort(_(b'uncommitted changes'))
1385 1385
1386 1386
1387 1387 @eh.wrapfunction(cmdutil, 'postcommitstatus')
1388 1388 def postcommitstatus(orig, repo, *args, **kwargs):
1389 1389 with lfstatus(repo):
1390 1390 return orig(repo, *args, **kwargs)
1391 1391
1392 1392
1393 1393 @eh.wrapfunction(cmdutil, 'forget')
1394 1394 def cmdutilforget(
1395 1395 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1396 1396 ):
1397 1397 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1398 1398 bad, forgot = orig(
1399 1399 ui,
1400 1400 repo,
1401 1401 normalmatcher,
1402 1402 prefix,
1403 1403 uipathfn,
1404 1404 explicitonly,
1405 1405 dryrun,
1406 1406 interactive,
1407 1407 )
1408 1408 m = composelargefilematcher(match, repo[None].manifest())
1409 1409
1410 1410 with lfstatus(repo):
1411 1411 s = repo.status(match=m, clean=True)
1412 1412 manifest = repo[None].manifest()
1413 1413 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1414 1414 forget = [f for f in forget if lfutil.standin(f) in manifest]
1415 1415
1416 1416 for f in forget:
1417 1417 fstandin = lfutil.standin(f)
1418 1418 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1419 1419 ui.warn(
1420 1420 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1421 1421 )
1422 1422 bad.append(f)
1423 1423
1424 1424 for f in forget:
1425 1425 if ui.verbose or not m.exact(f):
1426 1426 ui.status(_(b'removing %s\n') % uipathfn(f))
1427 1427
1428 1428 # Need to lock because standin files are deleted then removed from the
1429 1429 # repository and we could race in-between.
1430 1430 with repo.wlock():
1431 1431 lfdirstate = lfutil.openlfdirstate(ui, repo)
1432 1432 for f in forget:
1433 1433 lfdirstate.set_untracked(f)
1434 1434 lfdirstate.write(repo.currenttransaction())
1435 1435 standins = [lfutil.standin(f) for f in forget]
1436 1436 for f in standins:
1437 1437 repo.wvfs.unlinkpath(f, ignoremissing=True)
1438 1438 rejected = repo[None].forget(standins)
1439 1439
1440 1440 bad.extend(f for f in rejected if f in m.files())
1441 1441 forgot.extend(f for f in forget if f not in rejected)
1442 1442 return bad, forgot
1443 1443
1444 1444
1445 1445 def _getoutgoings(repo, other, missing, addfunc):
1446 1446 """get pairs of filename and largefile hash in outgoing revisions
1447 1447 in 'missing'.
1448 1448
1449 1449 largefiles already existing on 'other' repository are ignored.
1450 1450
1451 1451 'addfunc' is invoked with each unique pairs of filename and
1452 1452 largefile hash value.
1453 1453 """
1454 1454 knowns = set()
1455 1455 lfhashes = set()
1456 1456
1457 1457 def dedup(fn, lfhash):
1458 1458 k = (fn, lfhash)
1459 1459 if k not in knowns:
1460 1460 knowns.add(k)
1461 1461 lfhashes.add(lfhash)
1462 1462
1463 1463 lfutil.getlfilestoupload(repo, missing, dedup)
1464 1464 if lfhashes:
1465 1465 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1466 1466 for fn, lfhash in knowns:
1467 1467 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1468 1468 addfunc(fn, lfhash)
1469 1469
1470 1470
1471 1471 def outgoinghook(ui, repo, other, opts, missing):
1472 1472 if opts.pop(b'large', None):
1473 1473 lfhashes = set()
1474 1474 if ui.debugflag:
1475 1475 toupload = {}
1476 1476
1477 1477 def addfunc(fn, lfhash):
1478 1478 if fn not in toupload:
1479 1479 toupload[fn] = [] # pytype: disable=unsupported-operands
1480 1480 toupload[fn].append(lfhash)
1481 1481 lfhashes.add(lfhash)
1482 1482
1483 1483 def showhashes(fn):
1484 1484 for lfhash in sorted(toupload[fn]):
1485 1485 ui.debug(b' %s\n' % lfhash)
1486 1486
1487 1487 else:
1488 1488 toupload = set()
1489 1489
1490 1490 def addfunc(fn, lfhash):
1491 1491 toupload.add(fn)
1492 1492 lfhashes.add(lfhash)
1493 1493
1494 1494 def showhashes(fn):
1495 1495 pass
1496 1496
1497 1497 _getoutgoings(repo, other, missing, addfunc)
1498 1498
1499 1499 if not toupload:
1500 1500 ui.status(_(b'largefiles: no files to upload\n'))
1501 1501 else:
1502 1502 ui.status(
1503 1503 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1504 1504 )
1505 1505 for file in sorted(toupload):
1506 1506 ui.status(lfutil.splitstandin(file) + b'\n')
1507 1507 showhashes(file)
1508 1508 ui.status(b'\n')
1509 1509
1510 1510
1511 1511 @eh.wrapcommand(
1512 1512 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1513 1513 )
1514 1514 def _outgoingcmd(orig, *args, **kwargs):
1515 1515 # Nothing to do here other than add the extra help option- the hook above
1516 1516 # processes it.
1517 1517 return orig(*args, **kwargs)
1518 1518
1519 1519
1520 1520 def summaryremotehook(ui, repo, opts, changes):
1521 1521 largeopt = opts.get(b'large', False)
1522 1522 if changes is None:
1523 1523 if largeopt:
1524 1524 return (False, True) # only outgoing check is needed
1525 1525 else:
1526 1526 return (False, False)
1527 1527 elif largeopt:
1528 1528 url, branch, peer, outgoing = changes[1]
1529 1529 if peer is None:
1530 1530 # i18n: column positioning for "hg summary"
1531 1531 ui.status(_(b'largefiles: (no remote repo)\n'))
1532 1532 return
1533 1533
1534 1534 toupload = set()
1535 1535 lfhashes = set()
1536 1536
1537 1537 def addfunc(fn, lfhash):
1538 1538 toupload.add(fn)
1539 1539 lfhashes.add(lfhash)
1540 1540
1541 1541 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1542 1542
1543 1543 if not toupload:
1544 1544 # i18n: column positioning for "hg summary"
1545 1545 ui.status(_(b'largefiles: (no files to upload)\n'))
1546 1546 else:
1547 1547 # i18n: column positioning for "hg summary"
1548 1548 ui.status(
1549 1549 _(b'largefiles: %d entities for %d files to upload\n')
1550 1550 % (len(lfhashes), len(toupload))
1551 1551 )
1552 1552
1553 1553
1554 1554 @eh.wrapcommand(
1555 1555 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1556 1556 )
1557 1557 def overridesummary(orig, ui, repo, *pats, **opts):
1558 1558 with lfstatus(repo):
1559 1559 orig(ui, repo, *pats, **opts)
1560 1560
1561 1561
1562 1562 @eh.wrapfunction(scmutil, 'addremove')
1563 1563 def scmutiladdremove(
1564 1564 orig,
1565 1565 repo,
1566 1566 matcher,
1567 1567 prefix,
1568 1568 uipathfn,
1569 1569 opts=None,
1570 1570 open_tr=None,
1571 1571 ):
1572 1572 if opts is None:
1573 1573 opts = {}
1574 1574 if not lfutil.islfilesrepo(repo):
1575 1575 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1576 1576
1577 1577 # open the transaction and changing_files context
1578 1578 if open_tr is not None:
1579 1579 open_tr()
1580 1580
1581 1581 # Get the list of missing largefiles so we can remove them
1582 1582 with repo.dirstate.running_status(repo):
1583 1583 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1584 1584 unsure, s, mtime_boundary = lfdirstate.status(
1585 1585 matchmod.always(),
1586 1586 subrepos=[],
1587 1587 ignored=False,
1588 1588 clean=False,
1589 1589 unknown=False,
1590 1590 )
1591 1591
1592 1592 # Call into the normal remove code, but the removing of the standin, we want
1593 1593 # to have handled by original addremove. Monkey patching here makes sure
1594 1594 # we don't remove the standin in the largefiles code, preventing a very
1595 1595 # confused state later.
1596 1596 if s.deleted:
1597 1597 m = copy.copy(matcher)
1598 1598
1599 1599 # The m._files and m._map attributes are not changed to the deleted list
1600 1600 # because that affects the m.exact() test, which in turn governs whether
1601 1601 # or not the file name is printed, and how. Simply limit the original
1602 1602 # matches to those in the deleted status list.
1603 1603 matchfn = m.matchfn
1604 1604 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1605 1605
1606 1606 removelargefiles(
1607 1607 repo.ui,
1608 1608 repo,
1609 1609 True,
1610 1610 m,
1611 1611 uipathfn,
1612 1612 opts.get(b'dry_run'),
1613 1613 **pycompat.strkwargs(opts)
1614 1614 )
1615 1615 # Call into the normal add code, and any files that *should* be added as
1616 1616 # largefiles will be
1617 1617 added, bad = addlargefiles(
1618 1618 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1619 1619 )
1620 1620 # Now that we've handled largefiles, hand off to the original addremove
1621 1621 # function to take care of the rest. Make sure it doesn't do anything with
1622 1622 # largefiles by passing a matcher that will ignore them.
1623 1623 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1624 1624
1625 1625 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1626 1626
1627 1627
1628 1628 # Calling purge with --all will cause the largefiles to be deleted.
1629 1629 # Override repo.status to prevent this from happening.
1630 1630 @eh.wrapcommand(b'purge')
1631 1631 def overridepurge(orig, ui, repo, *dirs, **opts):
1632 1632 # XXX Monkey patching a repoview will not work. The assigned attribute will
1633 1633 # be set on the unfiltered repo, but we will only lookup attributes in the
1634 1634 # unfiltered repo if the lookup in the repoview object itself fails. As the
1635 1635 # monkey patched method exists on the repoview class the lookup will not
1636 1636 # fail. As a result, the original version will shadow the monkey patched
1637 1637 # one, defeating the monkey patch.
1638 1638 #
1639 1639 # As a work around we use an unfiltered repo here. We should do something
1640 1640 # cleaner instead.
1641 1641 repo = repo.unfiltered()
1642 1642 oldstatus = repo.status
1643 1643
1644 1644 def overridestatus(
1645 1645 node1=b'.',
1646 1646 node2=None,
1647 1647 match=None,
1648 1648 ignored=False,
1649 1649 clean=False,
1650 1650 unknown=False,
1651 1651 listsubrepos=False,
1652 1652 ):
1653 1653 r = oldstatus(
1654 1654 node1, node2, match, ignored, clean, unknown, listsubrepos
1655 1655 )
1656 1656 lfdirstate = lfutil.openlfdirstate(ui, repo)
1657 1657 unknown = [
1658 1658 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1659 1659 ]
1660 1660 ignored = [
1661 1661 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1662 1662 ]
1663 1663 return scmutil.status(
1664 1664 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1665 1665 )
1666 1666
1667 1667 repo.status = overridestatus
1668 1668 orig(ui, repo, *dirs, **opts)
1669 1669 repo.status = oldstatus
1670 1670
1671 1671
1672 1672 @eh.wrapcommand(b'rollback')
1673 1673 def overriderollback(orig, ui, repo, **opts):
1674 1674 with repo.wlock():
1675 1675 before = repo.dirstate.parents()
1676 1676 orphans = {
1677 1677 f
1678 1678 for f in repo.dirstate
1679 1679 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1680 1680 }
1681 1681 result = orig(ui, repo, **opts)
1682 1682 after = repo.dirstate.parents()
1683 1683 if before == after:
1684 1684 return result # no need to restore standins
1685 1685
1686 1686 pctx = repo[b'.']
1687 1687 for f in repo.dirstate:
1688 1688 if lfutil.isstandin(f):
1689 1689 orphans.discard(f)
1690 1690 if repo.dirstate.get_entry(f).removed:
1691 1691 repo.wvfs.unlinkpath(f, ignoremissing=True)
1692 1692 elif f in pctx:
1693 1693 fctx = pctx[f]
1694 1694 repo.wwrite(f, fctx.data(), fctx.flags())
1695 1695 else:
1696 1696 # content of standin is not so important in 'a',
1697 1697 # 'm' or 'n' (coming from the 2nd parent) cases
1698 1698 lfutil.writestandin(repo, f, b'', False)
1699 1699 for standin in orphans:
1700 1700 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1701 1701
1702 1702 return result
1703 1703
1704 1704
1705 1705 @eh.wrapcommand(b'transplant', extension=b'transplant')
1706 1706 def overridetransplant(orig, ui, repo, *revs, **opts):
1707 1707 resuming = opts.get('continue')
1708 1708 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1709 1709 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1710 1710 try:
1711 1711 result = orig(ui, repo, *revs, **opts)
1712 1712 finally:
1713 1713 repo._lfstatuswriters.pop()
1714 1714 repo._lfcommithooks.pop()
1715 1715 return result
1716 1716
1717 1717
1718 1718 @eh.wrapcommand(b'cat')
1719 1719 def overridecat(orig, ui, repo, file1, *pats, **opts):
1720 opts = pycompat.byteskwargs(opts)
1721 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1720 ctx = logcmdutil.revsingle(repo, opts.get('rev'))
1722 1721 err = 1
1723 1722 notbad = set()
1724 m = scmutil.match(ctx, (file1,) + pats, opts)
1723 m = scmutil.match(ctx, (file1,) + pats, pycompat.byteskwargs(opts))
1725 1724 origmatchfn = m.matchfn
1726 1725
1727 1726 def lfmatchfn(f):
1728 1727 if origmatchfn(f):
1729 1728 return True
1730 1729 lf = lfutil.splitstandin(f)
1731 1730 if lf is None:
1732 1731 return False
1733 1732 notbad.add(lf)
1734 1733 return origmatchfn(lf)
1735 1734
1736 1735 m.matchfn = lfmatchfn
1737 1736 origbadfn = m.bad
1738 1737
1739 1738 def lfbadfn(f, msg):
1740 1739 if not f in notbad:
1741 1740 origbadfn(f, msg)
1742 1741
1743 1742 m.bad = lfbadfn
1744 1743
1745 1744 origvisitdirfn = m.visitdir
1746 1745
1747 1746 def lfvisitdirfn(dir):
1748 1747 if dir == lfutil.shortname:
1749 1748 return True
1750 1749 ret = origvisitdirfn(dir)
1751 1750 if ret:
1752 1751 return ret
1753 1752 lf = lfutil.splitstandin(dir)
1754 1753 if lf is None:
1755 1754 return False
1756 1755 return origvisitdirfn(lf)
1757 1756
1758 1757 m.visitdir = lfvisitdirfn
1759 1758
1760 1759 for f in ctx.walk(m):
1761 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1760 with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
1762 1761 lf = lfutil.splitstandin(f)
1763 1762 if lf is None or origmatchfn(f):
1764 1763 # duplicating unreachable code from commands.cat
1765 1764 data = ctx[f].data()
1766 if opts.get(b'decode'):
1765 if opts.get('decode'):
1767 1766 data = repo.wwritedata(f, data)
1768 1767 fp.write(data)
1769 1768 else:
1770 1769 hash = lfutil.readasstandin(ctx[f])
1771 1770 if not lfutil.inusercache(repo.ui, hash):
1772 1771 store = storefactory.openstore(repo)
1773 1772 success, missing = store.get([(lf, hash)])
1774 1773 if len(success) != 1:
1775 1774 raise error.Abort(
1776 1775 _(
1777 1776 b'largefile %s is not in cache and could not be '
1778 1777 b'downloaded'
1779 1778 )
1780 1779 % lf
1781 1780 )
1782 1781 path = lfutil.usercachepath(repo.ui, hash)
1783 1782 with open(path, b"rb") as fpin:
1784 1783 for chunk in util.filechunkiter(fpin):
1785 1784 fp.write(chunk)
1786 1785 err = 0
1787 1786 return err
1788 1787
1789 1788
1790 1789 @eh.wrapfunction(merge, '_update')
1791 1790 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1792 1791 matcher = kwargs.get('matcher', None)
1793 1792 # note if this is a partial update
1794 1793 partial = matcher and not matcher.always()
1795 1794 with repo.wlock(), repo.dirstate.changing_parents(repo):
1796 1795 # branch | | |
1797 1796 # merge | force | partial | action
1798 1797 # -------+-------+---------+--------------
1799 1798 # x | x | x | linear-merge
1800 1799 # o | x | x | branch-merge
1801 1800 # x | o | x | overwrite (as clean update)
1802 1801 # o | o | x | force-branch-merge (*1)
1803 1802 # x | x | o | (*)
1804 1803 # o | x | o | (*)
1805 1804 # x | o | o | overwrite (as revert)
1806 1805 # o | o | o | (*)
1807 1806 #
1808 1807 # (*) don't care
1809 1808 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1810 1809 with repo.dirstate.running_status(repo):
1811 1810 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1812 1811 unsure, s, mtime_boundary = lfdirstate.status(
1813 1812 matchmod.always(),
1814 1813 subrepos=[],
1815 1814 ignored=False,
1816 1815 clean=True,
1817 1816 unknown=False,
1818 1817 )
1819 1818 oldclean = set(s.clean)
1820 1819 pctx = repo[b'.']
1821 1820 dctx = repo[node]
1822 1821 for lfile in unsure + s.modified:
1823 1822 lfileabs = repo.wvfs.join(lfile)
1824 1823 if not repo.wvfs.exists(lfileabs):
1825 1824 continue
1826 1825 lfhash = lfutil.hashfile(lfileabs)
1827 1826 standin = lfutil.standin(lfile)
1828 1827 lfutil.writestandin(
1829 1828 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1830 1829 )
1831 1830 if standin in pctx and lfhash == lfutil.readasstandin(
1832 1831 pctx[standin]
1833 1832 ):
1834 1833 oldclean.add(lfile)
1835 1834 for lfile in s.added:
1836 1835 fstandin = lfutil.standin(lfile)
1837 1836 if fstandin not in dctx:
1838 1837 # in this case, content of standin file is meaningless
1839 1838 # (in dctx, lfile is unknown, or normal file)
1840 1839 continue
1841 1840 lfutil.updatestandin(repo, lfile, fstandin)
1842 1841 # mark all clean largefiles as dirty, just in case the update gets
1843 1842 # interrupted before largefiles and lfdirstate are synchronized
1844 1843 for lfile in oldclean:
1845 1844 entry = lfdirstate.get_entry(lfile)
1846 1845 lfdirstate.hacky_extension_update_file(
1847 1846 lfile,
1848 1847 wc_tracked=entry.tracked,
1849 1848 p1_tracked=entry.p1_tracked,
1850 1849 p2_info=entry.p2_info,
1851 1850 possibly_dirty=True,
1852 1851 )
1853 1852 lfdirstate.write(repo.currenttransaction())
1854 1853
1855 1854 oldstandins = lfutil.getstandinsstate(repo)
1856 1855 wc = kwargs.get('wc')
1857 1856 if wc and wc.isinmemory():
1858 1857 # largefiles is not a good candidate for in-memory merge (large
1859 1858 # files, custom dirstate, matcher usage).
1860 1859 raise error.ProgrammingError(
1861 1860 b'largefiles is not compatible with in-memory merge'
1862 1861 )
1863 1862 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1864 1863
1865 1864 newstandins = lfutil.getstandinsstate(repo)
1866 1865 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1867 1866
1868 1867 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1869 1868 # all the ones that didn't change as clean
1870 1869 for lfile in oldclean.difference(filelist):
1871 1870 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1872 1871
1873 1872 if branchmerge or force or partial:
1874 1873 filelist.extend(s.deleted + s.removed)
1875 1874
1876 1875 lfcommands.updatelfiles(
1877 1876 repo.ui, repo, filelist=filelist, normallookup=partial
1878 1877 )
1879 1878
1880 1879 return result
1881 1880
1882 1881
1883 1882 @eh.wrapfunction(scmutil, 'marktouched')
1884 1883 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1885 1884 result = orig(repo, files, *args, **kwargs)
1886 1885
1887 1886 filelist = []
1888 1887 for f in files:
1889 1888 lf = lfutil.splitstandin(f)
1890 1889 if lf is not None:
1891 1890 filelist.append(lf)
1892 1891 if filelist:
1893 1892 lfcommands.updatelfiles(
1894 1893 repo.ui,
1895 1894 repo,
1896 1895 filelist=filelist,
1897 1896 printmessage=False,
1898 1897 normallookup=True,
1899 1898 )
1900 1899
1901 1900 return result
1902 1901
1903 1902
1904 1903 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
1905 1904 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
1906 1905 def upgraderequirements(orig, repo):
1907 1906 reqs = orig(repo)
1908 1907 if b'largefiles' in repo.requirements:
1909 1908 reqs.add(b'largefiles')
1910 1909 return reqs
1911 1910
1912 1911
1913 1912 _lfscheme = b'largefile://'
1914 1913
1915 1914
1916 1915 @eh.wrapfunction(urlmod, 'open')
1917 1916 def openlargefile(orig, ui, url_, data=None, **kwargs):
1918 1917 if url_.startswith(_lfscheme):
1919 1918 if data:
1920 1919 msg = b"cannot use data on a 'largefile://' url"
1921 1920 raise error.ProgrammingError(msg)
1922 1921 lfid = url_[len(_lfscheme) :]
1923 1922 return storefactory.getlfile(ui, lfid)
1924 1923 else:
1925 1924 return orig(ui, url_, data=data, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now