##// END OF EJS Templates
largefiles: use "normallookup", if "mtime" of standin is unset...
FUJIWARA Katsunori -
r21932:21a2f31f stable
parent child Browse files
Show More
@@ -0,0 +1,53 b''
1 This file focuses mainly on updating largefiles in the working
2 directory (and ".hg/largefiles/dirstate")
3
4 $ cat >> $HGRCPATH <<EOF
5 > [ui]
6 > merge = internal:fail
7 > [extensions]
8 > largefiles =
9 > EOF
10
11 $ hg init repo
12 $ cd repo
13
14 $ echo large1 > large1
15 $ echo large2 > large2
16 $ hg add --large large1 large2
17 $ echo normal1 > normal1
18 $ hg add normal1
19 $ hg commit -m '#0'
20 $ echo 'large1 in #1' > large1
21 $ echo 'normal1 in #1' > normal1
22 $ hg commit -m '#1'
23 $ hg update -q -C 0
24 $ echo 'large2 in #2' > large2
25 $ hg commit -m '#2'
26 created new head
27
28 Test that "hg merge" updates largefiles from "other" correctly
29
30 (getting largefiles from "other" normally)
31
32 $ hg status -A large1
33 C large1
34 $ cat large1
35 large1
36 $ cat .hglf/large1
37 4669e532d5b2c093a78eca010077e708a071bb64
38 $ hg merge --config debug.dirstate.delaywrite=2
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 (branch merge, don't forget to commit)
41 getting changed largefiles
42 1 largefiles updated, 0 removed
43 $ hg status -A large1
44 M large1
45 $ cat large1
46 large1 in #1
47 $ cat .hglf/large1
48 58e24f733a964da346e2407a2bee99d9001184f5
49 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
50 -4669e532d5b2c093a78eca010077e708a071bb64
51 +58e24f733a964da346e2407a2bee99d9001184f5
52
53 $ cd ..
@@ -1,572 +1,575 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os, errno
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error, \
15 15 cmdutil, scmutil, commands
16 16 from mercurial.i18n import _
17 17 from mercurial.lock import release
18 18
19 19 import lfutil
20 20 import basestore
21 21
22 22 # -- Commands ----------------------------------------------------------
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 26
27 27 @command('lfconvert',
28 28 [('s', 'size', '',
29 29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
30 30 ('', 'to-normal', False,
31 31 _('convert from a largefiles repo to a normal repo')),
32 32 ],
33 33 _('hg lfconvert SOURCE DEST [FILE ...]'),
34 34 norepo=True,
35 35 inferrepo=True)
36 36 def lfconvert(ui, src, dest, *pats, **opts):
37 37 '''convert a normal repository to a largefiles repository
38 38
39 39 Convert repository SOURCE to a new repository DEST, identical to
40 40 SOURCE except that certain files will be converted as largefiles:
41 41 specifically, any file that matches any PATTERN *or* whose size is
42 42 above the minimum size threshold is converted as a largefile. The
43 43 size used to determine whether or not to track a file as a
44 44 largefile is the size of the first version of the file. The
45 45 minimum size can be specified either with --size or in
46 46 configuration as ``largefiles.size``.
47 47
48 48 After running this command you will need to make sure that
49 49 largefiles is enabled anywhere you intend to push the new
50 50 repository.
51 51
52 52 Use --to-normal to convert largefiles back to normal files; after
53 53 this, the DEST repository can be used without largefiles at all.'''
54 54
55 55 if opts['to_normal']:
56 56 tolfile = False
57 57 else:
58 58 tolfile = True
59 59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60 60
61 61 if not hg.islocal(src):
62 62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 63 if not hg.islocal(dest):
64 64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65 65
66 66 rsrc = hg.repository(ui, src)
67 67 ui.status(_('initializing destination %s\n') % dest)
68 68 rdst = hg.repository(ui, dest, create=True)
69 69
70 70 success = False
71 71 dstwlock = dstlock = None
72 72 try:
73 73 # Lock destination to prevent modification while it is converted to.
74 74 # Don't need to lock src because we are just reading from its history
75 75 # which can't change.
76 76 dstwlock = rdst.wlock()
77 77 dstlock = rdst.lock()
78 78
79 79 # Get a list of all changesets in the source. The easy way to do this
80 80 # is to simply walk the changelog, using changelog.nodesbetween().
81 81 # Take a look at mercurial/revlog.py:639 for more details.
82 82 # Use a generator instead of a list to decrease memory usage
83 83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 84 rsrc.heads())[0])
85 85 revmap = {node.nullid: node.nullid}
86 86 if tolfile:
87 87 lfiles = set()
88 88 normalfiles = set()
89 89 if not pats:
90 90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 91 if pats:
92 92 matcher = match_.match(rsrc.root, '', list(pats))
93 93 else:
94 94 matcher = None
95 95
96 96 lfiletohash = {}
97 97 for ctx in ctxs:
98 98 ui.progress(_('converting revisions'), ctx.rev(),
99 99 unit=_('revision'), total=rsrc['tip'].rev())
100 100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 101 lfiles, normalfiles, matcher, size, lfiletohash)
102 102 ui.progress(_('converting revisions'), None)
103 103
104 104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106 106
107 107 for f in lfiletohash.keys():
108 108 if os.path.isfile(rdst.wjoin(f)):
109 109 os.unlink(rdst.wjoin(f))
110 110 try:
111 111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 112 except OSError:
113 113 pass
114 114
115 115 # If there were any files converted to largefiles, add largefiles
116 116 # to the destination repository's requirements.
117 117 if lfiles:
118 118 rdst.requirements.add('largefiles')
119 119 rdst._writerequirements()
120 120 else:
121 121 for ctx in ctxs:
122 122 ui.progress(_('converting revisions'), ctx.rev(),
123 123 unit=_('revision'), total=rsrc['tip'].rev())
124 124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125 125
126 126 ui.progress(_('converting revisions'), None)
127 127 success = True
128 128 finally:
129 129 rdst.dirstate.clear()
130 130 release(dstlock, dstwlock)
131 131 if not success:
132 132 # we failed, remove the new directory
133 133 shutil.rmtree(rdst.root)
134 134
135 135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 136 # Convert src parents to dst parents
137 137 parents = _convertparents(ctx, revmap)
138 138
139 139 # Generate list of changed files
140 140 files = _getchangedfiles(ctx, parents)
141 141
142 142 def getfilectx(repo, memctx, f):
143 143 if lfutil.standin(f) in files:
144 144 # if the file isn't in the manifest then it was removed
145 145 # or renamed, raise IOError to indicate this
146 146 try:
147 147 fctx = ctx.filectx(lfutil.standin(f))
148 148 except error.LookupError:
149 149 raise IOError
150 150 renamed = fctx.renamed()
151 151 if renamed:
152 152 renamed = lfutil.splitstandin(renamed[0])
153 153
154 154 hash = fctx.data().strip()
155 155 path = lfutil.findfile(rsrc, hash)
156 156
157 157 # If one file is missing, likely all files from this rev are
158 158 if path is None:
159 159 cachelfiles(ui, rsrc, ctx.node())
160 160 path = lfutil.findfile(rsrc, hash)
161 161
162 162 if path is None:
163 163 raise util.Abort(
164 164 _("missing largefile \'%s\' from revision %s")
165 165 % (f, node.hex(ctx.node())))
166 166
167 167 data = ''
168 168 fd = None
169 169 try:
170 170 fd = open(path, 'rb')
171 171 data = fd.read()
172 172 finally:
173 173 if fd:
174 174 fd.close()
175 175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 176 'x' in fctx.flags(), renamed)
177 177 else:
178 178 return _getnormalcontext(repo, ctx, f, revmap)
179 179
180 180 dstfiles = []
181 181 for file in files:
182 182 if lfutil.isstandin(file):
183 183 dstfiles.append(lfutil.splitstandin(file))
184 184 else:
185 185 dstfiles.append(file)
186 186 # Commit
187 187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188 188
189 189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 190 matcher, size, lfiletohash):
191 191 # Convert src parents to dst parents
192 192 parents = _convertparents(ctx, revmap)
193 193
194 194 # Generate list of changed files
195 195 files = _getchangedfiles(ctx, parents)
196 196
197 197 dstfiles = []
198 198 for f in files:
199 199 if f not in lfiles and f not in normalfiles:
200 200 islfile = _islfile(f, ctx, matcher, size)
201 201 # If this file was renamed or copied then copy
202 202 # the largefile-ness of its predecessor
203 203 if f in ctx.manifest():
204 204 fctx = ctx.filectx(f)
205 205 renamed = fctx.renamed()
206 206 renamedlfile = renamed and renamed[0] in lfiles
207 207 islfile |= renamedlfile
208 208 if 'l' in fctx.flags():
209 209 if renamedlfile:
210 210 raise util.Abort(
211 211 _('renamed/copied largefile %s becomes symlink')
212 212 % f)
213 213 islfile = False
214 214 if islfile:
215 215 lfiles.add(f)
216 216 else:
217 217 normalfiles.add(f)
218 218
219 219 if f in lfiles:
220 220 dstfiles.append(lfutil.standin(f))
221 221 # largefile in manifest if it has not been removed/renamed
222 222 if f in ctx.manifest():
223 223 fctx = ctx.filectx(f)
224 224 if 'l' in fctx.flags():
225 225 renamed = fctx.renamed()
226 226 if renamed and renamed[0] in lfiles:
227 227 raise util.Abort(_('largefile %s becomes symlink') % f)
228 228
229 229 # largefile was modified, update standins
230 230 m = util.sha1('')
231 231 m.update(ctx[f].data())
232 232 hash = m.hexdigest()
233 233 if f not in lfiletohash or lfiletohash[f] != hash:
234 234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 235 executable = 'x' in ctx[f].flags()
236 236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 237 executable)
238 238 lfiletohash[f] = hash
239 239 else:
240 240 # normal file
241 241 dstfiles.append(f)
242 242
243 243 def getfilectx(repo, memctx, f):
244 244 if lfutil.isstandin(f):
245 245 # if the file isn't in the manifest then it was removed
246 246 # or renamed, raise IOError to indicate this
247 247 srcfname = lfutil.splitstandin(f)
248 248 try:
249 249 fctx = ctx.filectx(srcfname)
250 250 except error.LookupError:
251 251 raise IOError
252 252 renamed = fctx.renamed()
253 253 if renamed:
254 254 # standin is always a largefile because largefile-ness
255 255 # doesn't change after rename or copy
256 256 renamed = lfutil.standin(renamed[0])
257 257
258 258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 259 'l' in fctx.flags(), 'x' in fctx.flags(),
260 260 renamed)
261 261 else:
262 262 return _getnormalcontext(repo, ctx, f, revmap)
263 263
264 264 # Commit
265 265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266 266
267 267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 270 ret = rdst.commitctx(mctx)
271 271 rdst.setparents(ret)
272 272 revmap[ctx.node()] = rdst.changelog.tip()
273 273
274 274 # Generate list of changed files
275 275 def _getchangedfiles(ctx, parents):
276 276 files = set(ctx.files())
277 277 if node.nullid not in parents:
278 278 mc = ctx.manifest()
279 279 mp1 = ctx.parents()[0].manifest()
280 280 mp2 = ctx.parents()[1].manifest()
281 281 files |= (set(mp1) | set(mp2)) - set(mc)
282 282 for f in mc:
283 283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 284 files.add(f)
285 285 return files
286 286
287 287 # Convert src parents to dst parents
288 288 def _convertparents(ctx, revmap):
289 289 parents = []
290 290 for p in ctx.parents():
291 291 parents.append(revmap[p.node()])
292 292 while len(parents) < 2:
293 293 parents.append(node.nullid)
294 294 return parents
295 295
296 296 # Get memfilectx for a normal file
297 297 def _getnormalcontext(repo, ctx, f, revmap):
298 298 try:
299 299 fctx = ctx.filectx(f)
300 300 except error.LookupError:
301 301 raise IOError
302 302 renamed = fctx.renamed()
303 303 if renamed:
304 304 renamed = renamed[0]
305 305
306 306 data = fctx.data()
307 307 if f == '.hgtags':
308 308 data = _converttags (repo.ui, revmap, data)
309 309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 310 'x' in fctx.flags(), renamed)
311 311
312 312 # Remap tag data using a revision map
313 313 def _converttags(ui, revmap, data):
314 314 newdata = []
315 315 for line in data.splitlines():
316 316 try:
317 317 id, name = line.split(' ', 1)
318 318 except ValueError:
319 319 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 320 % line)
321 321 continue
322 322 try:
323 323 newid = node.bin(id)
324 324 except TypeError:
325 325 ui.warn(_('skipping incorrectly formatted id %s\n')
326 326 % id)
327 327 continue
328 328 try:
329 329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 330 name))
331 331 except KeyError:
332 332 ui.warn(_('no mapping for id %s\n') % id)
333 333 continue
334 334 return ''.join(newdata)
335 335
336 336 def _islfile(file, ctx, matcher, size):
337 337 '''Return true if file should be considered a largefile, i.e.
338 338 matcher matches it or it is larger than size.'''
339 339 # never store special .hg* files as largefiles
340 340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 341 return False
342 342 if matcher and matcher(file):
343 343 return True
344 344 try:
345 345 return ctx.filectx(file).size() >= size * 1024 * 1024
346 346 except error.LookupError:
347 347 return False
348 348
349 349 def uploadlfiles(ui, rsrc, rdst, files):
350 350 '''upload largefiles to the central store'''
351 351
352 352 if not files:
353 353 return
354 354
355 355 store = basestore._openstore(rsrc, rdst, put=True)
356 356
357 357 at = 0
358 358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 359 retval = store.exists(files)
360 360 files = filter(lambda h: not retval[h], files)
361 361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362 362
363 363 for hash in files:
364 364 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 365 total=len(files))
366 366 source = lfutil.findfile(rsrc, hash)
367 367 if not source:
368 368 raise util.Abort(_('largefile %s missing from store'
369 369 ' (needs to be uploaded)') % hash)
370 370 # XXX check for errors here
371 371 store.put(source, hash)
372 372 at += 1
373 373 ui.progress(_('uploading largefiles'), None)
374 374
375 375 def verifylfiles(ui, repo, all=False, contents=False):
376 376 '''Verify that every largefile revision in the current changeset
377 377 exists in the central store. With --contents, also verify that
378 378 the contents of each local largefile file revision are correct (SHA-1 hash
379 379 matches the revision ID). With --all, check every changeset in
380 380 this repository.'''
381 381 if all:
382 382 # Pass a list to the function rather than an iterator because we know a
383 383 # list will work.
384 384 revs = range(len(repo))
385 385 else:
386 386 revs = ['.']
387 387
388 388 store = basestore._openstore(repo)
389 389 return store.verify(revs, contents=contents)
390 390
391 391 def cachelfiles(ui, repo, node, filelist=None):
392 392 '''cachelfiles ensures that all largefiles needed by the specified revision
393 393 are present in the repository's largefile cache.
394 394
395 395 returns a tuple (cached, missing). cached is the list of files downloaded
396 396 by this operation; missing is the list of files that were needed but could
397 397 not be found.'''
398 398 lfiles = lfutil.listlfiles(repo, node)
399 399 if filelist:
400 400 lfiles = set(lfiles) & set(filelist)
401 401 toget = []
402 402
403 403 for lfile in lfiles:
404 404 try:
405 405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 406 except IOError, err:
407 407 if err.errno == errno.ENOENT:
408 408 continue # node must be None and standin wasn't found in wctx
409 409 raise
410 410 if not lfutil.findfile(repo, expectedhash):
411 411 toget.append((lfile, expectedhash))
412 412
413 413 if toget:
414 414 store = basestore._openstore(repo)
415 415 ret = store.get(toget)
416 416 return ret
417 417
418 418 return ([], [])
419 419
420 420 def downloadlfiles(ui, repo, rev=None):
421 421 matchfn = scmutil.match(repo[None],
422 422 [repo.wjoin(lfutil.shortname)], {})
423 423 def prepare(ctx, fns):
424 424 pass
425 425 totalsuccess = 0
426 426 totalmissing = 0
427 427 if rev != []: # walkchangerevs on empty list would return all revs
428 428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 429 prepare):
430 430 success, missing = cachelfiles(ui, repo, ctx.node())
431 431 totalsuccess += len(success)
432 432 totalmissing += len(missing)
433 433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 434 if totalmissing > 0:
435 435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 436 return totalsuccess, totalmissing
437 437
438 438 def updatelfiles(ui, repo, filelist=None, printmessage=True):
439 439 wlock = repo.wlock()
440 440 try:
441 441 lfdirstate = lfutil.openlfdirstate(ui, repo)
442 442 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
443 443
444 444 if filelist is not None:
445 445 lfiles = [f for f in lfiles if f in filelist]
446 446
447 447 update = {}
448 448 updated, removed = 0, 0
449 449 for lfile in lfiles:
450 450 abslfile = repo.wjoin(lfile)
451 451 absstandin = repo.wjoin(lfutil.standin(lfile))
452 452 if os.path.exists(absstandin):
453 453 if (os.path.exists(absstandin + '.orig') and
454 454 os.path.exists(abslfile)):
455 455 shutil.copyfile(abslfile, abslfile + '.orig')
456 456 util.unlinkpath(absstandin + '.orig')
457 457 expecthash = lfutil.readstandin(repo, lfile)
458 458 if (expecthash != '' and
459 459 (not os.path.exists(abslfile) or
460 460 expecthash != lfutil.hashfile(abslfile))):
461 461 if lfile not in repo[None]: # not switched to normal file
462 462 util.unlinkpath(abslfile, ignoremissing=True)
463 463 # use normallookup() to allocate entry in largefiles
464 464 # dirstate, because lack of it misleads
465 465 # lfilesrepo.status() into recognition that such cache
466 466 # missing files are REMOVED.
467 467 lfdirstate.normallookup(lfile)
468 468 update[lfile] = expecthash
469 469 else:
470 470 # Remove lfiles for which the standin is deleted, unless the
471 471 # lfile is added to the repository again. This happens when a
472 472 # largefile is converted back to a normal file: the standin
473 473 # disappears, but a new (normal) file appears as the lfile.
474 474 if (os.path.exists(abslfile) and
475 475 repo.dirstate.normalize(lfile) not in repo[None]):
476 476 util.unlinkpath(abslfile)
477 477 removed += 1
478 478
479 479 # largefile processing might be slow and be interrupted - be prepared
480 480 lfdirstate.write()
481 481
482 482 if lfiles:
483 483 if printmessage:
484 484 ui.status(_('getting changed largefiles\n'))
485 485 cachelfiles(ui, repo, None, lfiles)
486 486
487 487 for lfile in lfiles:
488 488 update1 = 0
489 489
490 490 expecthash = update.get(lfile)
491 491 if expecthash:
492 492 if not lfutil.copyfromcache(repo, expecthash, lfile):
493 493 # failed ... but already removed and set to normallookup
494 494 continue
495 495 # Synchronize largefile dirstate to the last modified
496 496 # time of the file
497 497 lfdirstate.normal(lfile)
498 498 update1 = 1
499 499
500 500 # copy the state of largefile standin from the repository's
501 501 # dirstate to its state in the lfdirstate.
502 502 abslfile = repo.wjoin(lfile)
503 503 absstandin = repo.wjoin(lfutil.standin(lfile))
504 504 if os.path.exists(absstandin):
505 505 mode = os.stat(absstandin).st_mode
506 506 if mode != os.stat(abslfile).st_mode:
507 507 os.chmod(abslfile, mode)
508 508 update1 = 1
509 509
510 510 updated += update1
511 511
512 state = repo.dirstate[lfutil.standin(lfile)]
512 standin = lfutil.standin(lfile)
513 if standin in repo.dirstate:
514 stat = repo.dirstate._map[standin]
515 state, mtime = stat[0], stat[3]
516 else:
517 state, mtime = '?', -1
513 518 if state == 'n':
514 # When rebasing, we need to synchronize the standin and the
515 # largefile, because otherwise the largefile will get reverted.
516 # But for commit's sake, we have to mark the file as unclean.
517 if getattr(repo, "_isrebasing", False):
519 if mtime < 0:
520 # state 'n' doesn't ensure 'clean' in this case
518 521 lfdirstate.normallookup(lfile)
519 522 else:
520 523 lfdirstate.normal(lfile)
521 524 elif state == 'r':
522 525 lfdirstate.remove(lfile)
523 526 elif state == 'a':
524 527 lfdirstate.add(lfile)
525 528 elif state == '?':
526 529 lfdirstate.drop(lfile)
527 530
528 531 lfdirstate.write()
529 532 if printmessage and lfiles:
530 533 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
531 534 removed))
532 535 finally:
533 536 wlock.release()
534 537
535 538 @command('lfpull',
536 539 [('r', 'rev', [], _('pull largefiles for these revisions'))
537 540 ] + commands.remoteopts,
538 541 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
539 542 def lfpull(ui, repo, source="default", **opts):
540 543 """pull largefiles for the specified revisions from the specified source
541 544
542 545 Pull largefiles that are referenced from local changesets but missing
543 546 locally, pulling from a remote repository to the local cache.
544 547
545 548 If SOURCE is omitted, the 'default' path will be used.
546 549 See :hg:`help urls` for more information.
547 550
548 551 .. container:: verbose
549 552
550 553 Some examples:
551 554
552 555 - pull largefiles for all branch heads::
553 556
554 557 hg lfpull -r "head() and not closed()"
555 558
556 559 - pull largefiles on the default branch::
557 560
558 561 hg lfpull -r "branch(default)"
559 562 """
560 563 repo.lfpullsource = source
561 564
562 565 revs = opts.get('rev', [])
563 566 if not revs:
564 567 raise util.Abort(_('no revisions specified'))
565 568 revs = scmutil.revrange(repo, revs)
566 569
567 570 numcached = 0
568 571 for rev in revs:
569 572 ui.note(_('pulling largefiles for revision %s\n') % rev)
570 573 (cached, missing) = cachelfiles(ui, repo, rev)
571 574 numcached += len(cached)
572 575 ui.status(_("%d largefiles cached\n") % numcached)
General Comments 0
You need to be logged in to leave comments. Login now