##// END OF EJS Templates
largefiles: remove the first `changing_parents` in `updatelfiles`...
marmoute -
r50913:ef1540c5 default
parent child Browse files
Show More
@@ -1,675 +1,674 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import binascii
12 12 import os
13 13 import shutil
14 14
15 15 from mercurial.i18n import _
16 16 from mercurial.node import (
17 17 bin,
18 18 hex,
19 19 )
20 20
21 21 from mercurial import (
22 22 cmdutil,
23 23 context,
24 24 error,
25 25 exthelper,
26 26 hg,
27 27 lock,
28 28 logcmdutil,
29 29 match as matchmod,
30 30 pycompat,
31 31 scmutil,
32 32 util,
33 33 )
34 34 from mercurial.utils import hashutil
35 35
36 36 from ..convert import (
37 37 convcmd,
38 38 filemap,
39 39 )
40 40
41 41 from . import lfutil, storefactory
42 42
43 43 release = lock.release
44 44
45 45 # -- Commands ----------------------------------------------------------
46 46
47 47 eh = exthelper.exthelper()
48 48
49 49
50 50 @eh.command(
51 51 b'lfconvert',
52 52 [
53 53 (
54 54 b's',
55 55 b'size',
56 56 b'',
57 57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 58 b'SIZE',
59 59 ),
60 60 (
61 61 b'',
62 62 b'to-normal',
63 63 False,
64 64 _(b'convert from a largefiles repo to a normal repo'),
65 65 ),
66 66 ],
67 67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 68 norepo=True,
69 69 inferrepo=True,
70 70 )
71 71 def lfconvert(ui, src, dest, *pats, **opts):
72 72 """convert a normal repository to a largefiles repository
73 73
74 74 Convert repository SOURCE to a new repository DEST, identical to
75 75 SOURCE except that certain files will be converted as largefiles:
76 76 specifically, any file that matches any PATTERN *or* whose size is
77 77 above the minimum size threshold is converted as a largefile. The
78 78 size used to determine whether or not to track a file as a
79 79 largefile is the size of the first version of the file. The
80 80 minimum size can be specified either with --size or in
81 81 configuration as ``largefiles.size``.
82 82
83 83 After running this command you will need to make sure that
84 84 largefiles is enabled anywhere you intend to push the new
85 85 repository.
86 86
87 87 Use --to-normal to convert largefiles back to normal files; after
88 88 this, the DEST repository can be used without largefiles at all."""
89 89
90 90 opts = pycompat.byteskwargs(opts)
91 91 if opts[b'to_normal']:
92 92 tolfile = False
93 93 else:
94 94 tolfile = True
95 95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96 96
97 97 if not hg.islocal(src):
98 98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 99 if not hg.islocal(dest):
100 100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101 101
102 102 rsrc = hg.repository(ui, src)
103 103 ui.status(_(b'initializing destination %s\n') % dest)
104 104 rdst = hg.repository(ui, dest, create=True)
105 105
106 106 success = False
107 107 dstwlock = dstlock = None
108 108 try:
109 109 # Get a list of all changesets in the source. The easy way to do this
110 110 # is to simply walk the changelog, using changelog.nodesbetween().
111 111 # Take a look at mercurial/revlog.py:639 for more details.
112 112 # Use a generator instead of a list to decrease memory usage
113 113 ctxs = (
114 114 rsrc[ctx]
115 115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 116 )
117 117 revmap = {rsrc.nullid: rdst.nullid}
118 118 if tolfile:
119 119 # Lock destination to prevent modification while it is converted to.
120 120 # Don't need to lock src because we are just reading from its
121 121 # history which can't change.
122 122 dstwlock = rdst.wlock()
123 123 dstlock = rdst.lock()
124 124
125 125 lfiles = set()
126 126 normalfiles = set()
127 127 if not pats:
128 128 pats = ui.configlist(lfutil.longname, b'patterns')
129 129 if pats:
130 130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 131 else:
132 132 matcher = None
133 133
134 134 lfiletohash = {}
135 135 with ui.makeprogress(
136 136 _(b'converting revisions'),
137 137 unit=_(b'revisions'),
138 138 total=rsrc[b'tip'].rev(),
139 139 ) as progress:
140 140 for ctx in ctxs:
141 141 progress.update(ctx.rev())
142 142 _lfconvert_addchangeset(
143 143 rsrc,
144 144 rdst,
145 145 ctx,
146 146 revmap,
147 147 lfiles,
148 148 normalfiles,
149 149 matcher,
150 150 size,
151 151 lfiletohash,
152 152 )
153 153
154 154 if rdst.wvfs.exists(lfutil.shortname):
155 155 rdst.wvfs.rmtree(lfutil.shortname)
156 156
157 157 for f in lfiletohash.keys():
158 158 if rdst.wvfs.isfile(f):
159 159 rdst.wvfs.unlink(f)
160 160 try:
161 161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 162 except OSError:
163 163 pass
164 164
165 165 # If there were any files converted to largefiles, add largefiles
166 166 # to the destination repository's requirements.
167 167 if lfiles:
168 168 rdst.requirements.add(b'largefiles')
169 169 scmutil.writereporequirements(rdst)
170 170 else:
171 171
172 172 class lfsource(filemap.filemap_source):
173 173 def __init__(self, ui, source):
174 174 super(lfsource, self).__init__(ui, source, None)
175 175 self.filemapper.rename[lfutil.shortname] = b'.'
176 176
177 177 def getfile(self, name, rev):
178 178 realname, realrev = rev
179 179 f = super(lfsource, self).getfile(name, rev)
180 180
181 181 if (
182 182 not realname.startswith(lfutil.shortnameslash)
183 183 or f[0] is None
184 184 ):
185 185 return f
186 186
187 187 # Substitute in the largefile data for the hash
188 188 hash = f[0].strip()
189 189 path = lfutil.findfile(rsrc, hash)
190 190
191 191 if path is None:
192 192 raise error.Abort(
193 193 _(b"missing largefile for '%s' in %s")
194 194 % (realname, realrev)
195 195 )
196 196 return util.readfile(path), f[1]
197 197
198 198 class converter(convcmd.converter):
199 199 def __init__(self, ui, source, dest, revmapfile, opts):
200 200 src = lfsource(ui, source)
201 201
202 202 super(converter, self).__init__(
203 203 ui, src, dest, revmapfile, opts
204 204 )
205 205
206 206 found, missing = downloadlfiles(ui, rsrc)
207 207 if missing != 0:
208 208 raise error.Abort(_(b"all largefiles must be present locally"))
209 209
210 210 orig = convcmd.converter
211 211 convcmd.converter = converter
212 212
213 213 try:
214 214 convcmd.convert(
215 215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 216 )
217 217 finally:
218 218 convcmd.converter = orig
219 219 success = True
220 220 finally:
221 221 if tolfile:
222 222 rdst.dirstate.clear()
223 223 release(dstlock, dstwlock)
224 224 if not success:
225 225 # we failed, remove the new directory
226 226 shutil.rmtree(rdst.root)
227 227
228 228
229 229 def _lfconvert_addchangeset(
230 230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 231 ):
232 232 # Convert src parents to dst parents
233 233 parents = _convertparents(ctx, revmap)
234 234
235 235 # Generate list of changed files
236 236 files = _getchangedfiles(ctx, parents)
237 237
238 238 dstfiles = []
239 239 for f in files:
240 240 if f not in lfiles and f not in normalfiles:
241 241 islfile = _islfile(f, ctx, matcher, size)
242 242 # If this file was renamed or copied then copy
243 243 # the largefile-ness of its predecessor
244 244 if f in ctx.manifest():
245 245 fctx = ctx.filectx(f)
246 246 renamed = fctx.copysource()
247 247 if renamed is None:
248 248 # the code below assumes renamed to be a boolean or a list
249 249 # and won't quite work with the value None
250 250 renamed = False
251 251 renamedlfile = renamed and renamed in lfiles
252 252 islfile |= renamedlfile
253 253 if b'l' in fctx.flags():
254 254 if renamedlfile:
255 255 raise error.Abort(
256 256 _(b'renamed/copied largefile %s becomes symlink')
257 257 % f
258 258 )
259 259 islfile = False
260 260 if islfile:
261 261 lfiles.add(f)
262 262 else:
263 263 normalfiles.add(f)
264 264
265 265 if f in lfiles:
266 266 fstandin = lfutil.standin(f)
267 267 dstfiles.append(fstandin)
268 268 # largefile in manifest if it has not been removed/renamed
269 269 if f in ctx.manifest():
270 270 fctx = ctx.filectx(f)
271 271 if b'l' in fctx.flags():
272 272 renamed = fctx.copysource()
273 273 if renamed and renamed in lfiles:
274 274 raise error.Abort(
275 275 _(b'largefile %s becomes symlink') % f
276 276 )
277 277
278 278 # largefile was modified, update standins
279 279 m = hashutil.sha1(b'')
280 280 m.update(ctx[f].data())
281 281 hash = hex(m.digest())
282 282 if f not in lfiletohash or lfiletohash[f] != hash:
283 283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 284 executable = b'x' in ctx[f].flags()
285 285 lfutil.writestandin(rdst, fstandin, hash, executable)
286 286 lfiletohash[f] = hash
287 287 else:
288 288 # normal file
289 289 dstfiles.append(f)
290 290
291 291 def getfilectx(repo, memctx, f):
292 292 srcfname = lfutil.splitstandin(f)
293 293 if srcfname is not None:
294 294 # if the file isn't in the manifest then it was removed
295 295 # or renamed, return None to indicate this
296 296 try:
297 297 fctx = ctx.filectx(srcfname)
298 298 except error.LookupError:
299 299 return None
300 300 renamed = fctx.copysource()
301 301 if renamed:
302 302 # standin is always a largefile because largefile-ness
303 303 # doesn't change after rename or copy
304 304 renamed = lfutil.standin(renamed)
305 305
306 306 return context.memfilectx(
307 307 repo,
308 308 memctx,
309 309 f,
310 310 lfiletohash[srcfname] + b'\n',
311 311 b'l' in fctx.flags(),
312 312 b'x' in fctx.flags(),
313 313 renamed,
314 314 )
315 315 else:
316 316 return _getnormalcontext(repo, ctx, f, revmap)
317 317
318 318 # Commit
319 319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320 320
321 321
322 322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 323 mctx = context.memctx(
324 324 rdst,
325 325 parents,
326 326 ctx.description(),
327 327 dstfiles,
328 328 getfilectx,
329 329 ctx.user(),
330 330 ctx.date(),
331 331 ctx.extra(),
332 332 )
333 333 ret = rdst.commitctx(mctx)
334 334 lfutil.copyalltostore(rdst, ret)
335 335 rdst.setparents(ret)
336 336 revmap[ctx.node()] = rdst.changelog.tip()
337 337
338 338
339 339 # Generate list of changed files
340 340 def _getchangedfiles(ctx, parents):
341 341 files = set(ctx.files())
342 342 if ctx.repo().nullid not in parents:
343 343 mc = ctx.manifest()
344 344 for pctx in ctx.parents():
345 345 for fn in pctx.manifest().diff(mc):
346 346 files.add(fn)
347 347 return files
348 348
349 349
350 350 # Convert src parents to dst parents
351 351 def _convertparents(ctx, revmap):
352 352 parents = []
353 353 for p in ctx.parents():
354 354 parents.append(revmap[p.node()])
355 355 while len(parents) < 2:
356 356 parents.append(ctx.repo().nullid)
357 357 return parents
358 358
359 359
360 360 # Get memfilectx for a normal file
361 361 def _getnormalcontext(repo, ctx, f, revmap):
362 362 try:
363 363 fctx = ctx.filectx(f)
364 364 except error.LookupError:
365 365 return None
366 366 renamed = fctx.copysource()
367 367
368 368 data = fctx.data()
369 369 if f == b'.hgtags':
370 370 data = _converttags(repo.ui, revmap, data)
371 371 return context.memfilectx(
372 372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 373 )
374 374
375 375
376 376 # Remap tag data using a revision map
377 377 def _converttags(ui, revmap, data):
378 378 newdata = []
379 379 for line in data.splitlines():
380 380 try:
381 381 id, name = line.split(b' ', 1)
382 382 except ValueError:
383 383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 384 continue
385 385 try:
386 386 newid = bin(id)
387 387 except binascii.Error:
388 388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 389 continue
390 390 try:
391 391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 392 except KeyError:
393 393 ui.warn(_(b'no mapping for id %s\n') % id)
394 394 continue
395 395 return b''.join(newdata)
396 396
397 397
398 398 def _islfile(file, ctx, matcher, size):
399 399 """Return true if file should be considered a largefile, i.e.
400 400 matcher matches it or it is larger than size."""
401 401 # never store special .hg* files as largefiles
402 402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 403 return False
404 404 if matcher and matcher(file):
405 405 return True
406 406 try:
407 407 return ctx.filectx(file).size() >= size * 1024 * 1024
408 408 except error.LookupError:
409 409 return False
410 410
411 411
412 412 def uploadlfiles(ui, rsrc, rdst, files):
413 413 '''upload largefiles to the central store'''
414 414
415 415 if not files:
416 416 return
417 417
418 418 store = storefactory.openstore(rsrc, rdst, put=True)
419 419
420 420 at = 0
421 421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 422 retval = store.exists(files)
423 423 files = [h for h in files if not retval[h]]
424 424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425 425
426 426 with ui.makeprogress(
427 427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 428 ) as progress:
429 429 for hash in files:
430 430 progress.update(at)
431 431 source = lfutil.findfile(rsrc, hash)
432 432 if not source:
433 433 raise error.Abort(
434 434 _(
435 435 b'largefile %s missing from store'
436 436 b' (needs to be uploaded)'
437 437 )
438 438 % hash
439 439 )
440 440 # XXX check for errors here
441 441 store.put(source, hash)
442 442 at += 1
443 443
444 444
445 445 def verifylfiles(ui, repo, all=False, contents=False):
446 446 """Verify that every largefile revision in the current changeset
447 447 exists in the central store. With --contents, also verify that
448 448 the contents of each local largefile file revision are correct (SHA-1 hash
449 449 matches the revision ID). With --all, check every changeset in
450 450 this repository."""
451 451 if all:
452 452 revs = repo.revs(b'all()')
453 453 else:
454 454 revs = [b'.']
455 455
456 456 store = storefactory.openstore(repo)
457 457 return store.verify(revs, contents=contents)
458 458
459 459
460 460 def cachelfiles(ui, repo, node, filelist=None):
461 461 """cachelfiles ensures that all largefiles needed by the specified revision
462 462 are present in the repository's largefile cache.
463 463
464 464 returns a tuple (cached, missing). cached is the list of files downloaded
465 465 by this operation; missing is the list of files that were needed but could
466 466 not be found."""
467 467 lfiles = lfutil.listlfiles(repo, node)
468 468 if filelist:
469 469 lfiles = set(lfiles) & set(filelist)
470 470 toget = []
471 471
472 472 ctx = repo[node]
473 473 for lfile in lfiles:
474 474 try:
475 475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 476 except FileNotFoundError:
477 477 continue # node must be None and standin wasn't found in wctx
478 478 if not lfutil.findfile(repo, expectedhash):
479 479 toget.append((lfile, expectedhash))
480 480
481 481 if toget:
482 482 store = storefactory.openstore(repo)
483 483 ret = store.get(toget)
484 484 return ret
485 485
486 486 return ([], [])
487 487
488 488
489 489 def downloadlfiles(ui, repo):
490 490 tonode = repo.changelog.node
491 491 totalsuccess = 0
492 492 totalmissing = 0
493 493 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
494 494 success, missing = cachelfiles(ui, repo, tonode(rev))
495 495 totalsuccess += len(success)
496 496 totalmissing += len(missing)
497 497 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
498 498 if totalmissing > 0:
499 499 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
500 500 return totalsuccess, totalmissing
501 501
502 502
503 503 def updatelfiles(
504 504 ui, repo, filelist=None, printmessage=None, normallookup=False
505 505 ):
506 506 """Update largefiles according to standins in the working directory
507 507
508 508 If ``printmessage`` is other than ``None``, it means "print (or
509 509 ignore, for false) message forcibly".
510 510 """
511 511 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
512 512 with repo.wlock():
513 513 lfdirstate = lfutil.openlfdirstate(ui, repo)
514 514 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
515 515
516 516 if filelist is not None:
517 517 filelist = set(filelist)
518 518 lfiles = [f for f in lfiles if f in filelist]
519 519
520 with lfdirstate.changing_parents(repo):
521 520 update = {}
522 521 dropped = set()
523 522 updated, removed = 0, 0
524 523 wvfs = repo.wvfs
525 524 wctx = repo[None]
526 525 for lfile in lfiles:
527 526 lfileorig = os.path.relpath(
528 527 scmutil.backuppath(ui, repo, lfile), start=repo.root
529 528 )
530 529 standin = lfutil.standin(lfile)
531 530 standinorig = os.path.relpath(
532 531 scmutil.backuppath(ui, repo, standin), start=repo.root
533 532 )
534 533 if wvfs.exists(standin):
535 534 if wvfs.exists(standinorig) and wvfs.exists(lfile):
536 535 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
537 536 wvfs.unlinkpath(standinorig)
538 537 expecthash = lfutil.readasstandin(wctx[standin])
539 538 if expecthash != b'':
540 539 if lfile not in wctx: # not switched to normal file
541 540 if repo.dirstate.get_entry(standin).any_tracked:
542 541 wvfs.unlinkpath(lfile, ignoremissing=True)
543 542 else:
544 543 dropped.add(lfile)
545 544
546 545 # allocate an entry in largefiles dirstate to prevent
547 546 # lfilesrepo.status() from reporting missing files as
548 547 # removed.
549 548 lfdirstate.hacky_extension_update_file(
550 549 lfile,
551 550 p1_tracked=True,
552 551 wc_tracked=True,
553 552 possibly_dirty=True,
554 553 )
555 554 update[lfile] = expecthash
556 555 else:
557 556 # Remove lfiles for which the standin is deleted, unless the
558 557 # lfile is added to the repository again. This happens when a
559 558 # largefile is converted back to a normal file: the standin
560 559 # disappears, but a new (normal) file appears as the lfile.
561 560 if (
562 561 wvfs.exists(lfile)
563 562 and repo.dirstate.normalize(lfile) not in wctx
564 563 ):
565 564 wvfs.unlinkpath(lfile)
566 565 removed += 1
567 566
568 567 # largefile processing might be slow and be interrupted - be prepared
569 568 lfdirstate.write(repo.currenttransaction())
570 569
571 570 if lfiles:
572 571 lfiles = [f for f in lfiles if f not in dropped]
573 572
574 573 for f in dropped:
575 574 repo.wvfs.unlinkpath(lfutil.standin(f))
576 575 # This needs to happen for dropped files, otherwise they stay in
577 576 # the M state.
578 577 lfdirstate._map.reset_state(f)
579 578
580 579 statuswriter(_(b'getting changed largefiles\n'))
581 580 cachelfiles(ui, repo, None, lfiles)
582 581
583 582 with lfdirstate.changing_parents(repo):
584 583 for lfile in lfiles:
585 584 update1 = 0
586 585
587 586 expecthash = update.get(lfile)
588 587 if expecthash:
589 588 if not lfutil.copyfromcache(repo, expecthash, lfile):
590 589 # failed ... but already removed and set to normallookup
591 590 continue
592 591 # Synchronize largefile dirstate to the last modified
593 592 # time of the file
594 593 lfdirstate.hacky_extension_update_file(
595 594 lfile,
596 595 p1_tracked=True,
597 596 wc_tracked=True,
598 597 )
599 598 update1 = 1
600 599
601 600 # copy the exec mode of largefile standin from the repository's
602 601 # dirstate to its state in the lfdirstate.
603 602 standin = lfutil.standin(lfile)
604 603 if wvfs.exists(standin):
605 604 # exec is decided by the users permissions using mask 0o100
606 605 standinexec = wvfs.stat(standin).st_mode & 0o100
607 606 st = wvfs.stat(lfile)
608 607 mode = st.st_mode
609 608 if standinexec != mode & 0o100:
610 609 # first remove all X bits, then shift all R bits to X
611 610 mode &= ~0o111
612 611 if standinexec:
613 612 mode |= (mode >> 2) & 0o111 & ~util.umask
614 613 wvfs.chmod(lfile, mode)
615 614 update1 = 1
616 615
617 616 updated += update1
618 617
619 618 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620 619
621 620 lfdirstate.write(repo.currenttransaction())
622 621 if lfiles:
623 622 statuswriter(
624 623 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 624 )
626 625
627 626
628 627 @eh.command(
629 628 b'lfpull',
630 629 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 630 + cmdutil.remoteopts,
632 631 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 632 )
634 633 def lfpull(ui, repo, source=b"default", **opts):
635 634 """pull largefiles for the specified revisions from the specified source
636 635
637 636 Pull largefiles that are referenced from local changesets but missing
638 637 locally, pulling from a remote repository to the local cache.
639 638
640 639 If SOURCE is omitted, the 'default' path will be used.
641 640 See :hg:`help urls` for more information.
642 641
643 642 .. container:: verbose
644 643
645 644 Some examples:
646 645
647 646 - pull largefiles for all branch heads::
648 647
649 648 hg lfpull -r "head() and not closed()"
650 649
651 650 - pull largefiles on the default branch::
652 651
653 652 hg lfpull -r "branch(default)"
654 653 """
655 654 repo.lfpullsource = source
656 655
657 656 revs = opts.get('rev', [])
658 657 if not revs:
659 658 raise error.Abort(_(b'no revisions specified'))
660 659 revs = logcmdutil.revrange(repo, revs)
661 660
662 661 numcached = 0
663 662 for rev in revs:
664 663 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 664 (cached, missing) = cachelfiles(ui, repo, rev)
666 665 numcached += len(cached)
667 666 ui.status(_(b"%d largefiles cached\n") % numcached)
668 667
669 668
670 669 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 670 def debuglfput(ui, repo, filepath, **kwargs):
672 671 hash = lfutil.hashfile(filepath)
673 672 storefactory.openstore(repo).put(filepath, hash)
674 673 ui.write(b'%s\n' % hash)
675 674 return 0
General Comments 0
You need to be logged in to leave comments. Login now