##// END OF EJS Templates
largefiles: replace use of walkchangerevs() with simple revset query...
Yuya Nishihara -
r46026:ac7b9ed0 default
parent child Browse files
Show More
@@ -1,668 +1,664 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import os
14 14 import shutil
15 15
16 16 from mercurial.i18n import _
17 17
18 18 from mercurial import (
19 19 cmdutil,
20 20 context,
21 21 error,
22 22 exthelper,
23 23 hg,
24 24 lock,
25 25 match as matchmod,
26 26 node,
27 27 pycompat,
28 28 scmutil,
29 29 util,
30 30 )
31 31 from mercurial.utils import hashutil
32 32
33 33 from ..convert import (
34 34 convcmd,
35 35 filemap,
36 36 )
37 37
38 38 from . import lfutil, storefactory
39 39
40 40 release = lock.release
41 41
42 42 # -- Commands ----------------------------------------------------------
43 43
44 44 eh = exthelper.exthelper()
45 45
46 46
47 47 @eh.command(
48 48 b'lfconvert',
49 49 [
50 50 (
51 51 b's',
52 52 b'size',
53 53 b'',
54 54 _(b'minimum size (MB) for files to be converted as largefiles'),
55 55 b'SIZE',
56 56 ),
57 57 (
58 58 b'',
59 59 b'to-normal',
60 60 False,
61 61 _(b'convert from a largefiles repo to a normal repo'),
62 62 ),
63 63 ],
64 64 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
65 65 norepo=True,
66 66 inferrepo=True,
67 67 )
68 68 def lfconvert(ui, src, dest, *pats, **opts):
69 69 '''convert a normal repository to a largefiles repository
70 70
71 71 Convert repository SOURCE to a new repository DEST, identical to
72 72 SOURCE except that certain files will be converted as largefiles:
73 73 specifically, any file that matches any PATTERN *or* whose size is
74 74 above the minimum size threshold is converted as a largefile. The
75 75 size used to determine whether or not to track a file as a
76 76 largefile is the size of the first version of the file. The
77 77 minimum size can be specified either with --size or in
78 78 configuration as ``largefiles.size``.
79 79
80 80 After running this command you will need to make sure that
81 81 largefiles is enabled anywhere you intend to push the new
82 82 repository.
83 83
84 84 Use --to-normal to convert largefiles back to normal files; after
85 85 this, the DEST repository can be used without largefiles at all.'''
86 86
87 87 opts = pycompat.byteskwargs(opts)
88 88 if opts[b'to_normal']:
89 89 tolfile = False
90 90 else:
91 91 tolfile = True
92 92 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
93 93
94 94 if not hg.islocal(src):
95 95 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
96 96 if not hg.islocal(dest):
97 97 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
98 98
99 99 rsrc = hg.repository(ui, src)
100 100 ui.status(_(b'initializing destination %s\n') % dest)
101 101 rdst = hg.repository(ui, dest, create=True)
102 102
103 103 success = False
104 104 dstwlock = dstlock = None
105 105 try:
106 106 # Get a list of all changesets in the source. The easy way to do this
107 107 # is to simply walk the changelog, using changelog.nodesbetween().
108 108 # Take a look at mercurial/revlog.py:639 for more details.
109 109 # Use a generator instead of a list to decrease memory usage
110 110 ctxs = (
111 111 rsrc[ctx]
112 112 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
113 113 )
114 114 revmap = {node.nullid: node.nullid}
115 115 if tolfile:
116 116 # Lock destination to prevent modification while it is converted to.
117 117 # Don't need to lock src because we are just reading from its
118 118 # history which can't change.
119 119 dstwlock = rdst.wlock()
120 120 dstlock = rdst.lock()
121 121
122 122 lfiles = set()
123 123 normalfiles = set()
124 124 if not pats:
125 125 pats = ui.configlist(lfutil.longname, b'patterns')
126 126 if pats:
127 127 matcher = matchmod.match(rsrc.root, b'', list(pats))
128 128 else:
129 129 matcher = None
130 130
131 131 lfiletohash = {}
132 132 with ui.makeprogress(
133 133 _(b'converting revisions'),
134 134 unit=_(b'revisions'),
135 135 total=rsrc[b'tip'].rev(),
136 136 ) as progress:
137 137 for ctx in ctxs:
138 138 progress.update(ctx.rev())
139 139 _lfconvert_addchangeset(
140 140 rsrc,
141 141 rdst,
142 142 ctx,
143 143 revmap,
144 144 lfiles,
145 145 normalfiles,
146 146 matcher,
147 147 size,
148 148 lfiletohash,
149 149 )
150 150
151 151 if rdst.wvfs.exists(lfutil.shortname):
152 152 rdst.wvfs.rmtree(lfutil.shortname)
153 153
154 154 for f in lfiletohash.keys():
155 155 if rdst.wvfs.isfile(f):
156 156 rdst.wvfs.unlink(f)
157 157 try:
158 158 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
159 159 except OSError:
160 160 pass
161 161
162 162 # If there were any files converted to largefiles, add largefiles
163 163 # to the destination repository's requirements.
164 164 if lfiles:
165 165 rdst.requirements.add(b'largefiles')
166 166 scmutil.writereporequirements(rdst)
167 167 else:
168 168
169 169 class lfsource(filemap.filemap_source):
170 170 def __init__(self, ui, source):
171 171 super(lfsource, self).__init__(ui, source, None)
172 172 self.filemapper.rename[lfutil.shortname] = b'.'
173 173
174 174 def getfile(self, name, rev):
175 175 realname, realrev = rev
176 176 f = super(lfsource, self).getfile(name, rev)
177 177
178 178 if (
179 179 not realname.startswith(lfutil.shortnameslash)
180 180 or f[0] is None
181 181 ):
182 182 return f
183 183
184 184 # Substitute in the largefile data for the hash
185 185 hash = f[0].strip()
186 186 path = lfutil.findfile(rsrc, hash)
187 187
188 188 if path is None:
189 189 raise error.Abort(
190 190 _(b"missing largefile for '%s' in %s")
191 191 % (realname, realrev)
192 192 )
193 193 return util.readfile(path), f[1]
194 194
195 195 class converter(convcmd.converter):
196 196 def __init__(self, ui, source, dest, revmapfile, opts):
197 197 src = lfsource(ui, source)
198 198
199 199 super(converter, self).__init__(
200 200 ui, src, dest, revmapfile, opts
201 201 )
202 202
203 203 found, missing = downloadlfiles(ui, rsrc)
204 204 if missing != 0:
205 205 raise error.Abort(_(b"all largefiles must be present locally"))
206 206
207 207 orig = convcmd.converter
208 208 convcmd.converter = converter
209 209
210 210 try:
211 211 convcmd.convert(
212 212 ui, src, dest, source_type=b'hg', dest_type=b'hg'
213 213 )
214 214 finally:
215 215 convcmd.converter = orig
216 216 success = True
217 217 finally:
218 218 if tolfile:
219 219 rdst.dirstate.clear()
220 220 release(dstlock, dstwlock)
221 221 if not success:
222 222 # we failed, remove the new directory
223 223 shutil.rmtree(rdst.root)
224 224
225 225
226 226 def _lfconvert_addchangeset(
227 227 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
228 228 ):
229 229 # Convert src parents to dst parents
230 230 parents = _convertparents(ctx, revmap)
231 231
232 232 # Generate list of changed files
233 233 files = _getchangedfiles(ctx, parents)
234 234
235 235 dstfiles = []
236 236 for f in files:
237 237 if f not in lfiles and f not in normalfiles:
238 238 islfile = _islfile(f, ctx, matcher, size)
239 239 # If this file was renamed or copied then copy
240 240 # the largefile-ness of its predecessor
241 241 if f in ctx.manifest():
242 242 fctx = ctx.filectx(f)
243 243 renamed = fctx.copysource()
244 244 if renamed is None:
245 245 # the code below assumes renamed to be a boolean or a list
246 246 # and won't quite work with the value None
247 247 renamed = False
248 248 renamedlfile = renamed and renamed in lfiles
249 249 islfile |= renamedlfile
250 250 if b'l' in fctx.flags():
251 251 if renamedlfile:
252 252 raise error.Abort(
253 253 _(b'renamed/copied largefile %s becomes symlink')
254 254 % f
255 255 )
256 256 islfile = False
257 257 if islfile:
258 258 lfiles.add(f)
259 259 else:
260 260 normalfiles.add(f)
261 261
262 262 if f in lfiles:
263 263 fstandin = lfutil.standin(f)
264 264 dstfiles.append(fstandin)
265 265 # largefile in manifest if it has not been removed/renamed
266 266 if f in ctx.manifest():
267 267 fctx = ctx.filectx(f)
268 268 if b'l' in fctx.flags():
269 269 renamed = fctx.copysource()
270 270 if renamed and renamed in lfiles:
271 271 raise error.Abort(
272 272 _(b'largefile %s becomes symlink') % f
273 273 )
274 274
275 275 # largefile was modified, update standins
276 276 m = hashutil.sha1(b'')
277 277 m.update(ctx[f].data())
278 278 hash = node.hex(m.digest())
279 279 if f not in lfiletohash or lfiletohash[f] != hash:
280 280 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
281 281 executable = b'x' in ctx[f].flags()
282 282 lfutil.writestandin(rdst, fstandin, hash, executable)
283 283 lfiletohash[f] = hash
284 284 else:
285 285 # normal file
286 286 dstfiles.append(f)
287 287
288 288 def getfilectx(repo, memctx, f):
289 289 srcfname = lfutil.splitstandin(f)
290 290 if srcfname is not None:
291 291 # if the file isn't in the manifest then it was removed
292 292 # or renamed, return None to indicate this
293 293 try:
294 294 fctx = ctx.filectx(srcfname)
295 295 except error.LookupError:
296 296 return None
297 297 renamed = fctx.copysource()
298 298 if renamed:
299 299 # standin is always a largefile because largefile-ness
300 300 # doesn't change after rename or copy
301 301 renamed = lfutil.standin(renamed)
302 302
303 303 return context.memfilectx(
304 304 repo,
305 305 memctx,
306 306 f,
307 307 lfiletohash[srcfname] + b'\n',
308 308 b'l' in fctx.flags(),
309 309 b'x' in fctx.flags(),
310 310 renamed,
311 311 )
312 312 else:
313 313 return _getnormalcontext(repo, ctx, f, revmap)
314 314
315 315 # Commit
316 316 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
317 317
318 318
319 319 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
320 320 mctx = context.memctx(
321 321 rdst,
322 322 parents,
323 323 ctx.description(),
324 324 dstfiles,
325 325 getfilectx,
326 326 ctx.user(),
327 327 ctx.date(),
328 328 ctx.extra(),
329 329 )
330 330 ret = rdst.commitctx(mctx)
331 331 lfutil.copyalltostore(rdst, ret)
332 332 rdst.setparents(ret)
333 333 revmap[ctx.node()] = rdst.changelog.tip()
334 334
335 335
336 336 # Generate list of changed files
337 337 def _getchangedfiles(ctx, parents):
338 338 files = set(ctx.files())
339 339 if node.nullid not in parents:
340 340 mc = ctx.manifest()
341 341 for pctx in ctx.parents():
342 342 for fn in pctx.manifest().diff(mc):
343 343 files.add(fn)
344 344 return files
345 345
346 346
347 347 # Convert src parents to dst parents
348 348 def _convertparents(ctx, revmap):
349 349 parents = []
350 350 for p in ctx.parents():
351 351 parents.append(revmap[p.node()])
352 352 while len(parents) < 2:
353 353 parents.append(node.nullid)
354 354 return parents
355 355
356 356
357 357 # Get memfilectx for a normal file
358 358 def _getnormalcontext(repo, ctx, f, revmap):
359 359 try:
360 360 fctx = ctx.filectx(f)
361 361 except error.LookupError:
362 362 return None
363 363 renamed = fctx.copysource()
364 364
365 365 data = fctx.data()
366 366 if f == b'.hgtags':
367 367 data = _converttags(repo.ui, revmap, data)
368 368 return context.memfilectx(
369 369 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
370 370 )
371 371
372 372
373 373 # Remap tag data using a revision map
374 374 def _converttags(ui, revmap, data):
375 375 newdata = []
376 376 for line in data.splitlines():
377 377 try:
378 378 id, name = line.split(b' ', 1)
379 379 except ValueError:
380 380 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
381 381 continue
382 382 try:
383 383 newid = node.bin(id)
384 384 except TypeError:
385 385 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
386 386 continue
387 387 try:
388 388 newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
389 389 except KeyError:
390 390 ui.warn(_(b'no mapping for id %s\n') % id)
391 391 continue
392 392 return b''.join(newdata)
393 393
394 394
395 395 def _islfile(file, ctx, matcher, size):
396 396 '''Return true if file should be considered a largefile, i.e.
397 397 matcher matches it or it is larger than size.'''
398 398 # never store special .hg* files as largefiles
399 399 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
400 400 return False
401 401 if matcher and matcher(file):
402 402 return True
403 403 try:
404 404 return ctx.filectx(file).size() >= size * 1024 * 1024
405 405 except error.LookupError:
406 406 return False
407 407
408 408
409 409 def uploadlfiles(ui, rsrc, rdst, files):
410 410 '''upload largefiles to the central store'''
411 411
412 412 if not files:
413 413 return
414 414
415 415 store = storefactory.openstore(rsrc, rdst, put=True)
416 416
417 417 at = 0
418 418 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
419 419 retval = store.exists(files)
420 420 files = [h for h in files if not retval[h]]
421 421 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
422 422
423 423 with ui.makeprogress(
424 424 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
425 425 ) as progress:
426 426 for hash in files:
427 427 progress.update(at)
428 428 source = lfutil.findfile(rsrc, hash)
429 429 if not source:
430 430 raise error.Abort(
431 431 _(
432 432 b'largefile %s missing from store'
433 433 b' (needs to be uploaded)'
434 434 )
435 435 % hash
436 436 )
437 437 # XXX check for errors here
438 438 store.put(source, hash)
439 439 at += 1
440 440
441 441
442 442 def verifylfiles(ui, repo, all=False, contents=False):
443 443 '''Verify that every largefile revision in the current changeset
444 444 exists in the central store. With --contents, also verify that
445 445 the contents of each local largefile file revision are correct (SHA-1 hash
446 446 matches the revision ID). With --all, check every changeset in
447 447 this repository.'''
448 448 if all:
449 449 revs = repo.revs(b'all()')
450 450 else:
451 451 revs = [b'.']
452 452
453 453 store = storefactory.openstore(repo)
454 454 return store.verify(revs, contents=contents)
455 455
456 456
457 457 def cachelfiles(ui, repo, node, filelist=None):
458 458 '''cachelfiles ensures that all largefiles needed by the specified revision
459 459 are present in the repository's largefile cache.
460 460
461 461 returns a tuple (cached, missing). cached is the list of files downloaded
462 462 by this operation; missing is the list of files that were needed but could
463 463 not be found.'''
464 464 lfiles = lfutil.listlfiles(repo, node)
465 465 if filelist:
466 466 lfiles = set(lfiles) & set(filelist)
467 467 toget = []
468 468
469 469 ctx = repo[node]
470 470 for lfile in lfiles:
471 471 try:
472 472 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
473 473 except IOError as err:
474 474 if err.errno == errno.ENOENT:
475 475 continue # node must be None and standin wasn't found in wctx
476 476 raise
477 477 if not lfutil.findfile(repo, expectedhash):
478 478 toget.append((lfile, expectedhash))
479 479
480 480 if toget:
481 481 store = storefactory.openstore(repo)
482 482 ret = store.get(toget)
483 483 return ret
484 484
485 485 return ([], [])
486 486
487 487
488 488 def downloadlfiles(ui, repo):
489 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
490
491 def prepare(ctx, fns):
492 pass
493
489 tonode = repo.changelog.node
494 490 totalsuccess = 0
495 491 totalmissing = 0
496 for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': None}, prepare):
497 success, missing = cachelfiles(ui, repo, ctx.node())
492 for rev in repo.revs(b'reverse(file(%s))', b'path:' + lfutil.shortname):
493 success, missing = cachelfiles(ui, repo, tonode(rev))
498 494 totalsuccess += len(success)
499 495 totalmissing += len(missing)
500 496 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
501 497 if totalmissing > 0:
502 498 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
503 499 return totalsuccess, totalmissing
504 500
505 501
506 502 def updatelfiles(
507 503 ui, repo, filelist=None, printmessage=None, normallookup=False
508 504 ):
509 505 '''Update largefiles according to standins in the working directory
510 506
511 507 If ``printmessage`` is other than ``None``, it means "print (or
512 508 ignore, for false) message forcibly".
513 509 '''
514 510 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
515 511 with repo.wlock():
516 512 lfdirstate = lfutil.openlfdirstate(ui, repo)
517 513 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
518 514
519 515 if filelist is not None:
520 516 filelist = set(filelist)
521 517 lfiles = [f for f in lfiles if f in filelist]
522 518
523 519 update = {}
524 520 dropped = set()
525 521 updated, removed = 0, 0
526 522 wvfs = repo.wvfs
527 523 wctx = repo[None]
528 524 for lfile in lfiles:
529 525 lfileorig = os.path.relpath(
530 526 scmutil.backuppath(ui, repo, lfile), start=repo.root
531 527 )
532 528 standin = lfutil.standin(lfile)
533 529 standinorig = os.path.relpath(
534 530 scmutil.backuppath(ui, repo, standin), start=repo.root
535 531 )
536 532 if wvfs.exists(standin):
537 533 if wvfs.exists(standinorig) and wvfs.exists(lfile):
538 534 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
539 535 wvfs.unlinkpath(standinorig)
540 536 expecthash = lfutil.readasstandin(wctx[standin])
541 537 if expecthash != b'':
542 538 if lfile not in wctx: # not switched to normal file
543 539 if repo.dirstate[standin] != b'?':
544 540 wvfs.unlinkpath(lfile, ignoremissing=True)
545 541 else:
546 542 dropped.add(lfile)
547 543
548 544 # use normallookup() to allocate an entry in largefiles
549 545 # dirstate to prevent lfilesrepo.status() from reporting
550 546 # missing files as removed.
551 547 lfdirstate.normallookup(lfile)
552 548 update[lfile] = expecthash
553 549 else:
554 550 # Remove lfiles for which the standin is deleted, unless the
555 551 # lfile is added to the repository again. This happens when a
556 552 # largefile is converted back to a normal file: the standin
557 553 # disappears, but a new (normal) file appears as the lfile.
558 554 if (
559 555 wvfs.exists(lfile)
560 556 and repo.dirstate.normalize(lfile) not in wctx
561 557 ):
562 558 wvfs.unlinkpath(lfile)
563 559 removed += 1
564 560
565 561 # largefile processing might be slow and be interrupted - be prepared
566 562 lfdirstate.write()
567 563
568 564 if lfiles:
569 565 lfiles = [f for f in lfiles if f not in dropped]
570 566
571 567 for f in dropped:
572 568 repo.wvfs.unlinkpath(lfutil.standin(f))
573 569
574 570 # This needs to happen for dropped files, otherwise they stay in
575 571 # the M state.
576 572 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
577 573
578 574 statuswriter(_(b'getting changed largefiles\n'))
579 575 cachelfiles(ui, repo, None, lfiles)
580 576
581 577 for lfile in lfiles:
582 578 update1 = 0
583 579
584 580 expecthash = update.get(lfile)
585 581 if expecthash:
586 582 if not lfutil.copyfromcache(repo, expecthash, lfile):
587 583 # failed ... but already removed and set to normallookup
588 584 continue
589 585 # Synchronize largefile dirstate to the last modified
590 586 # time of the file
591 587 lfdirstate.normal(lfile)
592 588 update1 = 1
593 589
594 590 # copy the exec mode of largefile standin from the repository's
595 591 # dirstate to its state in the lfdirstate.
596 592 standin = lfutil.standin(lfile)
597 593 if wvfs.exists(standin):
598 594 # exec is decided by the users permissions using mask 0o100
599 595 standinexec = wvfs.stat(standin).st_mode & 0o100
600 596 st = wvfs.stat(lfile)
601 597 mode = st.st_mode
602 598 if standinexec != mode & 0o100:
603 599 # first remove all X bits, then shift all R bits to X
604 600 mode &= ~0o111
605 601 if standinexec:
606 602 mode |= (mode >> 2) & 0o111 & ~util.umask
607 603 wvfs.chmod(lfile, mode)
608 604 update1 = 1
609 605
610 606 updated += update1
611 607
612 608 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
613 609
614 610 lfdirstate.write()
615 611 if lfiles:
616 612 statuswriter(
617 613 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
618 614 )
619 615
620 616
621 617 @eh.command(
622 618 b'lfpull',
623 619 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
624 620 + cmdutil.remoteopts,
625 621 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
626 622 )
627 623 def lfpull(ui, repo, source=b"default", **opts):
628 624 """pull largefiles for the specified revisions from the specified source
629 625
630 626 Pull largefiles that are referenced from local changesets but missing
631 627 locally, pulling from a remote repository to the local cache.
632 628
633 629 If SOURCE is omitted, the 'default' path will be used.
634 630 See :hg:`help urls` for more information.
635 631
636 632 .. container:: verbose
637 633
638 634 Some examples:
639 635
640 636 - pull largefiles for all branch heads::
641 637
642 638 hg lfpull -r "head() and not closed()"
643 639
644 640 - pull largefiles on the default branch::
645 641
646 642 hg lfpull -r "branch(default)"
647 643 """
648 644 repo.lfpullsource = source
649 645
650 646 revs = opts.get('rev', [])
651 647 if not revs:
652 648 raise error.Abort(_(b'no revisions specified'))
653 649 revs = scmutil.revrange(repo, revs)
654 650
655 651 numcached = 0
656 652 for rev in revs:
657 653 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
658 654 (cached, missing) = cachelfiles(ui, repo, rev)
659 655 numcached += len(cached)
660 656 ui.status(_(b"%d largefiles cached\n") % numcached)
661 657
662 658
663 659 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
664 660 def debuglfput(ui, repo, filepath, **kwargs):
665 661 hash = lfutil.hashfile(filepath)
666 662 storefactory.openstore(repo).put(filepath, hash)
667 663 ui.write(b'%s\n' % hash)
668 664 return 0
General Comments 0
You need to be logged in to leave comments. Login now