##// END OF EJS Templates
largefiles: define norepo in command decorator
Gregory Szorc -
r21770:15d434be default
parent child Browse files
Show More
@@ -1,130 +1,128
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''track large binary files
10 10
11 11 Large binary files tend to be not very compressible, not very
12 12 diffable, and not at all mergeable. Such files are not handled
13 13 efficiently by Mercurial's storage format (revlog), which is based on
14 14 compressed binary deltas; storing large binary files as regular
15 15 Mercurial files wastes bandwidth and disk space and increases
16 16 Mercurial's memory usage. The largefiles extension addresses these
17 17 problems by adding a centralized client-server layer on top of
18 18 Mercurial: largefiles live in a *central store* out on the network
19 19 somewhere, and you only fetch the revisions that you need when you
20 20 need them.
21 21
22 22 largefiles works by maintaining a "standin file" in .hglf/ for each
23 23 largefile. The standins are small (41 bytes: an SHA-1 hash plus
24 24 newline) and are tracked by Mercurial. Largefile revisions are
25 25 identified by the SHA-1 hash of their contents, which is written to
26 26 the standin. largefiles uses that revision ID to get/put largefile
27 27 revisions from/to the central store. This saves both disk space and
28 28 bandwidth, since you don't need to retrieve all historical revisions
29 29 of large files when you clone or pull.
30 30
31 31 To start a new repository or add new large binary files, just add
32 32 --large to your :hg:`add` command. For example::
33 33
34 34 $ dd if=/dev/urandom of=randomdata count=2000
35 35 $ hg add --large randomdata
36 36 $ hg commit -m 'add randomdata as a largefile'
37 37
38 38 When you push a changeset that adds/modifies largefiles to a remote
39 39 repository, its largefile revisions will be uploaded along with it.
40 40 Note that the remote Mercurial must also have the largefiles extension
41 41 enabled for this to work.
42 42
43 43 When you pull a changeset that affects largefiles from a remote
44 44 repository, the largefiles for the changeset will by default not be
45 45 pulled down. However, when you update to such a revision, any
46 46 largefiles needed by that revision are downloaded and cached (if
47 47 they have never been downloaded before). One way to pull largefiles
48 48 when pulling is thus to use --update, which will update your working
49 49 copy to the latest pulled revision (and thereby downloading any new
50 50 largefiles).
51 51
52 52 If you want to pull largefiles you don't need for update yet, then
53 53 you can use pull with the `--lfrev` option or the :hg:`lfpull` command.
54 54
55 55 If you know you are pulling from a non-default location and want to
56 56 download all the largefiles that correspond to the new changesets at
57 57 the same time, then you can pull with `--lfrev "pulled()"`.
58 58
59 59 If you just want to ensure that you will have the largefiles needed to
60 60 merge or rebase with new heads that you are pulling, then you can pull
61 61 with `--lfrev "head(pulled())"` flag to pre-emptively download any largefiles
62 62 that are new in the heads you are pulling.
63 63
64 64 Keep in mind that network access may now be required to update to
65 65 changesets that you have not previously updated to. The nature of the
66 66 largefiles extension means that updating is no longer guaranteed to
67 67 be a local-only operation.
68 68
69 69 If you already have large files tracked by Mercurial without the
70 70 largefiles extension, you will need to convert your repository in
71 71 order to benefit from largefiles. This is done with the
72 72 :hg:`lfconvert` command::
73 73
74 74 $ hg lfconvert --size 10 oldrepo newrepo
75 75
76 76 In repositories that already have largefiles in them, any new file
77 77 over 10MB will automatically be added as a largefile. To change this
78 78 threshold, set ``largefiles.minsize`` in your Mercurial config file
79 79 to the minimum size in megabytes to track as a largefile, or use the
80 80 --lfsize option to the add command (also in megabytes)::
81 81
82 82 [largefiles]
83 83 minsize = 2
84 84
85 85 $ hg add --lfsize 2
86 86
87 87 The ``largefiles.patterns`` config option allows you to specify a list
88 88 of filename patterns (see :hg:`help patterns`) that should always be
89 89 tracked as largefiles::
90 90
91 91 [largefiles]
92 92 patterns =
93 93 *.jpg
94 94 re:.*\.(png|bmp)$
95 95 library.zip
96 96 content/audio/*
97 97
98 98 Files that match one of these patterns will be added as largefiles
99 99 regardless of their size.
100 100
101 101 The ``largefiles.minsize`` and ``largefiles.patterns`` config options
102 102 will be ignored for any repositories not already containing a
103 103 largefile. To add the first largefile to a repository, you must
104 104 explicitly do so with the --large flag passed to the :hg:`add`
105 105 command.
106 106 '''
107 107
108 from mercurial import commands, hg, localrepo
108 from mercurial import hg, localrepo
109 109
110 110 import lfcommands
111 111 import proto
112 112 import reposetup
113 113 import uisetup as uisetupmod
114 114
115 115 testedwith = 'internal'
116 116
117 117 reposetup = reposetup.reposetup
118 118
119 119 def featuresetup(ui, supported):
120 120 # don't die on seeing a repo with the largefiles requirement
121 121 supported |= set(['largefiles'])
122 122
123 123 def uisetup(ui):
124 124 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
125 125 hg.wirepeersetupfuncs.append(proto.wirereposetup)
126 126 uisetupmod.uisetup(ui)
127 127
128 commands.norepo += " lfconvert"
129
130 128 cmdtable = lfcommands.cmdtable
@@ -1,572 +1,573
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os, errno
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error, \
15 15 cmdutil, scmutil, commands
16 16 from mercurial.i18n import _
17 17 from mercurial.lock import release
18 18
19 19 import lfutil
20 20 import basestore
21 21
22 22 # -- Commands ----------------------------------------------------------
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 26
27 27 commands.inferrepo += " lfconvert"
28 28
29 29 @command('lfconvert',
30 30 [('s', 'size', '',
31 31 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
32 32 ('', 'to-normal', False,
33 33 _('convert from a largefiles repo to a normal repo')),
34 34 ],
35 _('hg lfconvert SOURCE DEST [FILE ...]'))
35 _('hg lfconvert SOURCE DEST [FILE ...]'),
36 norepo=True)
36 37 def lfconvert(ui, src, dest, *pats, **opts):
37 38 '''convert a normal repository to a largefiles repository
38 39
39 40 Convert repository SOURCE to a new repository DEST, identical to
40 41 SOURCE except that certain files will be converted as largefiles:
41 42 specifically, any file that matches any PATTERN *or* whose size is
42 43 above the minimum size threshold is converted as a largefile. The
43 44 size used to determine whether or not to track a file as a
44 45 largefile is the size of the first version of the file. The
45 46 minimum size can be specified either with --size or in
46 47 configuration as ``largefiles.size``.
47 48
48 49 After running this command you will need to make sure that
49 50 largefiles is enabled anywhere you intend to push the new
50 51 repository.
51 52
52 53 Use --to-normal to convert largefiles back to normal files; after
53 54 this, the DEST repository can be used without largefiles at all.'''
54 55
55 56 if opts['to_normal']:
56 57 tolfile = False
57 58 else:
58 59 tolfile = True
59 60 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60 61
61 62 if not hg.islocal(src):
62 63 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 64 if not hg.islocal(dest):
64 65 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65 66
66 67 rsrc = hg.repository(ui, src)
67 68 ui.status(_('initializing destination %s\n') % dest)
68 69 rdst = hg.repository(ui, dest, create=True)
69 70
70 71 success = False
71 72 dstwlock = dstlock = None
72 73 try:
73 74 # Lock destination to prevent modification while it is converted to.
74 75 # Don't need to lock src because we are just reading from its history
75 76 # which can't change.
76 77 dstwlock = rdst.wlock()
77 78 dstlock = rdst.lock()
78 79
79 80 # Get a list of all changesets in the source. The easy way to do this
80 81 # is to simply walk the changelog, using changelog.nodesbetween().
81 82 # Take a look at mercurial/revlog.py:639 for more details.
82 83 # Use a generator instead of a list to decrease memory usage
83 84 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 85 rsrc.heads())[0])
85 86 revmap = {node.nullid: node.nullid}
86 87 if tolfile:
87 88 lfiles = set()
88 89 normalfiles = set()
89 90 if not pats:
90 91 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 92 if pats:
92 93 matcher = match_.match(rsrc.root, '', list(pats))
93 94 else:
94 95 matcher = None
95 96
96 97 lfiletohash = {}
97 98 for ctx in ctxs:
98 99 ui.progress(_('converting revisions'), ctx.rev(),
99 100 unit=_('revision'), total=rsrc['tip'].rev())
100 101 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 102 lfiles, normalfiles, matcher, size, lfiletohash)
102 103 ui.progress(_('converting revisions'), None)
103 104
104 105 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 106 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106 107
107 108 for f in lfiletohash.keys():
108 109 if os.path.isfile(rdst.wjoin(f)):
109 110 os.unlink(rdst.wjoin(f))
110 111 try:
111 112 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 113 except OSError:
113 114 pass
114 115
115 116 # If there were any files converted to largefiles, add largefiles
116 117 # to the destination repository's requirements.
117 118 if lfiles:
118 119 rdst.requirements.add('largefiles')
119 120 rdst._writerequirements()
120 121 else:
121 122 for ctx in ctxs:
122 123 ui.progress(_('converting revisions'), ctx.rev(),
123 124 unit=_('revision'), total=rsrc['tip'].rev())
124 125 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125 126
126 127 ui.progress(_('converting revisions'), None)
127 128 success = True
128 129 finally:
129 130 rdst.dirstate.clear()
130 131 release(dstlock, dstwlock)
131 132 if not success:
132 133 # we failed, remove the new directory
133 134 shutil.rmtree(rdst.root)
134 135
135 136 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 137 # Convert src parents to dst parents
137 138 parents = _convertparents(ctx, revmap)
138 139
139 140 # Generate list of changed files
140 141 files = _getchangedfiles(ctx, parents)
141 142
142 143 def getfilectx(repo, memctx, f):
143 144 if lfutil.standin(f) in files:
144 145 # if the file isn't in the manifest then it was removed
145 146 # or renamed, raise IOError to indicate this
146 147 try:
147 148 fctx = ctx.filectx(lfutil.standin(f))
148 149 except error.LookupError:
149 150 raise IOError
150 151 renamed = fctx.renamed()
151 152 if renamed:
152 153 renamed = lfutil.splitstandin(renamed[0])
153 154
154 155 hash = fctx.data().strip()
155 156 path = lfutil.findfile(rsrc, hash)
156 157
157 158 # If one file is missing, likely all files from this rev are
158 159 if path is None:
159 160 cachelfiles(ui, rsrc, ctx.node())
160 161 path = lfutil.findfile(rsrc, hash)
161 162
162 163 if path is None:
163 164 raise util.Abort(
164 165 _("missing largefile \'%s\' from revision %s")
165 166 % (f, node.hex(ctx.node())))
166 167
167 168 data = ''
168 169 fd = None
169 170 try:
170 171 fd = open(path, 'rb')
171 172 data = fd.read()
172 173 finally:
173 174 if fd:
174 175 fd.close()
175 176 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 177 'x' in fctx.flags(), renamed)
177 178 else:
178 179 return _getnormalcontext(repo, ctx, f, revmap)
179 180
180 181 dstfiles = []
181 182 for file in files:
182 183 if lfutil.isstandin(file):
183 184 dstfiles.append(lfutil.splitstandin(file))
184 185 else:
185 186 dstfiles.append(file)
186 187 # Commit
187 188 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188 189
189 190 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 191 matcher, size, lfiletohash):
191 192 # Convert src parents to dst parents
192 193 parents = _convertparents(ctx, revmap)
193 194
194 195 # Generate list of changed files
195 196 files = _getchangedfiles(ctx, parents)
196 197
197 198 dstfiles = []
198 199 for f in files:
199 200 if f not in lfiles and f not in normalfiles:
200 201 islfile = _islfile(f, ctx, matcher, size)
201 202 # If this file was renamed or copied then copy
202 203 # the largefile-ness of its predecessor
203 204 if f in ctx.manifest():
204 205 fctx = ctx.filectx(f)
205 206 renamed = fctx.renamed()
206 207 renamedlfile = renamed and renamed[0] in lfiles
207 208 islfile |= renamedlfile
208 209 if 'l' in fctx.flags():
209 210 if renamedlfile:
210 211 raise util.Abort(
211 212 _('renamed/copied largefile %s becomes symlink')
212 213 % f)
213 214 islfile = False
214 215 if islfile:
215 216 lfiles.add(f)
216 217 else:
217 218 normalfiles.add(f)
218 219
219 220 if f in lfiles:
220 221 dstfiles.append(lfutil.standin(f))
221 222 # largefile in manifest if it has not been removed/renamed
222 223 if f in ctx.manifest():
223 224 fctx = ctx.filectx(f)
224 225 if 'l' in fctx.flags():
225 226 renamed = fctx.renamed()
226 227 if renamed and renamed[0] in lfiles:
227 228 raise util.Abort(_('largefile %s becomes symlink') % f)
228 229
229 230 # largefile was modified, update standins
230 231 m = util.sha1('')
231 232 m.update(ctx[f].data())
232 233 hash = m.hexdigest()
233 234 if f not in lfiletohash or lfiletohash[f] != hash:
234 235 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 236 executable = 'x' in ctx[f].flags()
236 237 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 238 executable)
238 239 lfiletohash[f] = hash
239 240 else:
240 241 # normal file
241 242 dstfiles.append(f)
242 243
243 244 def getfilectx(repo, memctx, f):
244 245 if lfutil.isstandin(f):
245 246 # if the file isn't in the manifest then it was removed
246 247 # or renamed, raise IOError to indicate this
247 248 srcfname = lfutil.splitstandin(f)
248 249 try:
249 250 fctx = ctx.filectx(srcfname)
250 251 except error.LookupError:
251 252 raise IOError
252 253 renamed = fctx.renamed()
253 254 if renamed:
254 255 # standin is always a largefile because largefile-ness
255 256 # doesn't change after rename or copy
256 257 renamed = lfutil.standin(renamed[0])
257 258
258 259 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 260 'l' in fctx.flags(), 'x' in fctx.flags(),
260 261 renamed)
261 262 else:
262 263 return _getnormalcontext(repo, ctx, f, revmap)
263 264
264 265 # Commit
265 266 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266 267
267 268 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 269 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 270 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 271 ret = rdst.commitctx(mctx)
271 272 rdst.setparents(ret)
272 273 revmap[ctx.node()] = rdst.changelog.tip()
273 274
274 275 # Generate list of changed files
275 276 def _getchangedfiles(ctx, parents):
276 277 files = set(ctx.files())
277 278 if node.nullid not in parents:
278 279 mc = ctx.manifest()
279 280 mp1 = ctx.parents()[0].manifest()
280 281 mp2 = ctx.parents()[1].manifest()
281 282 files |= (set(mp1) | set(mp2)) - set(mc)
282 283 for f in mc:
283 284 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 285 files.add(f)
285 286 return files
286 287
287 288 # Convert src parents to dst parents
288 289 def _convertparents(ctx, revmap):
289 290 parents = []
290 291 for p in ctx.parents():
291 292 parents.append(revmap[p.node()])
292 293 while len(parents) < 2:
293 294 parents.append(node.nullid)
294 295 return parents
295 296
296 297 # Get memfilectx for a normal file
297 298 def _getnormalcontext(repo, ctx, f, revmap):
298 299 try:
299 300 fctx = ctx.filectx(f)
300 301 except error.LookupError:
301 302 raise IOError
302 303 renamed = fctx.renamed()
303 304 if renamed:
304 305 renamed = renamed[0]
305 306
306 307 data = fctx.data()
307 308 if f == '.hgtags':
308 309 data = _converttags (repo.ui, revmap, data)
309 310 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 311 'x' in fctx.flags(), renamed)
311 312
312 313 # Remap tag data using a revision map
313 314 def _converttags(ui, revmap, data):
314 315 newdata = []
315 316 for line in data.splitlines():
316 317 try:
317 318 id, name = line.split(' ', 1)
318 319 except ValueError:
319 320 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 321 % line)
321 322 continue
322 323 try:
323 324 newid = node.bin(id)
324 325 except TypeError:
325 326 ui.warn(_('skipping incorrectly formatted id %s\n')
326 327 % id)
327 328 continue
328 329 try:
329 330 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 331 name))
331 332 except KeyError:
332 333 ui.warn(_('no mapping for id %s\n') % id)
333 334 continue
334 335 return ''.join(newdata)
335 336
336 337 def _islfile(file, ctx, matcher, size):
337 338 '''Return true if file should be considered a largefile, i.e.
338 339 matcher matches it or it is larger than size.'''
339 340 # never store special .hg* files as largefiles
340 341 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 342 return False
342 343 if matcher and matcher(file):
343 344 return True
344 345 try:
345 346 return ctx.filectx(file).size() >= size * 1024 * 1024
346 347 except error.LookupError:
347 348 return False
348 349
349 350 def uploadlfiles(ui, rsrc, rdst, files):
350 351 '''upload largefiles to the central store'''
351 352
352 353 if not files:
353 354 return
354 355
355 356 store = basestore._openstore(rsrc, rdst, put=True)
356 357
357 358 at = 0
358 359 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 360 retval = store.exists(files)
360 361 files = filter(lambda h: not retval[h], files)
361 362 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362 363
363 364 for hash in files:
364 365 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 366 total=len(files))
366 367 source = lfutil.findfile(rsrc, hash)
367 368 if not source:
368 369 raise util.Abort(_('largefile %s missing from store'
369 370 ' (needs to be uploaded)') % hash)
370 371 # XXX check for errors here
371 372 store.put(source, hash)
372 373 at += 1
373 374 ui.progress(_('uploading largefiles'), None)
374 375
375 376 def verifylfiles(ui, repo, all=False, contents=False):
376 377 '''Verify that every largefile revision in the current changeset
377 378 exists in the central store. With --contents, also verify that
378 379 the contents of each local largefile file revision are correct (SHA-1 hash
379 380 matches the revision ID). With --all, check every changeset in
380 381 this repository.'''
381 382 if all:
382 383 # Pass a list to the function rather than an iterator because we know a
383 384 # list will work.
384 385 revs = range(len(repo))
385 386 else:
386 387 revs = ['.']
387 388
388 389 store = basestore._openstore(repo)
389 390 return store.verify(revs, contents=contents)
390 391
391 392 def cachelfiles(ui, repo, node, filelist=None):
392 393 '''cachelfiles ensures that all largefiles needed by the specified revision
393 394 are present in the repository's largefile cache.
394 395
395 396 returns a tuple (cached, missing). cached is the list of files downloaded
396 397 by this operation; missing is the list of files that were needed but could
397 398 not be found.'''
398 399 lfiles = lfutil.listlfiles(repo, node)
399 400 if filelist:
400 401 lfiles = set(lfiles) & set(filelist)
401 402 toget = []
402 403
403 404 for lfile in lfiles:
404 405 try:
405 406 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 407 except IOError, err:
407 408 if err.errno == errno.ENOENT:
408 409 continue # node must be None and standin wasn't found in wctx
409 410 raise
410 411 if not lfutil.findfile(repo, expectedhash):
411 412 toget.append((lfile, expectedhash))
412 413
413 414 if toget:
414 415 store = basestore._openstore(repo)
415 416 ret = store.get(toget)
416 417 return ret
417 418
418 419 return ([], [])
419 420
420 421 def downloadlfiles(ui, repo, rev=None):
421 422 matchfn = scmutil.match(repo[None],
422 423 [repo.wjoin(lfutil.shortname)], {})
423 424 def prepare(ctx, fns):
424 425 pass
425 426 totalsuccess = 0
426 427 totalmissing = 0
427 428 if rev != []: # walkchangerevs on empty list would return all revs
428 429 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 430 prepare):
430 431 success, missing = cachelfiles(ui, repo, ctx.node())
431 432 totalsuccess += len(success)
432 433 totalmissing += len(missing)
433 434 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 435 if totalmissing > 0:
435 436 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 437 return totalsuccess, totalmissing
437 438
438 439 def updatelfiles(ui, repo, filelist=None, printmessage=True):
439 440 wlock = repo.wlock()
440 441 try:
441 442 lfdirstate = lfutil.openlfdirstate(ui, repo)
442 443 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
443 444
444 445 if filelist is not None:
445 446 lfiles = [f for f in lfiles if f in filelist]
446 447
447 448 update = {}
448 449 updated, removed = 0, 0
449 450 for lfile in lfiles:
450 451 abslfile = repo.wjoin(lfile)
451 452 absstandin = repo.wjoin(lfutil.standin(lfile))
452 453 if os.path.exists(absstandin):
453 454 if (os.path.exists(absstandin + '.orig') and
454 455 os.path.exists(abslfile)):
455 456 shutil.copyfile(abslfile, abslfile + '.orig')
456 457 util.unlinkpath(absstandin + '.orig')
457 458 expecthash = lfutil.readstandin(repo, lfile)
458 459 if (expecthash != '' and
459 460 (not os.path.exists(abslfile) or
460 461 expecthash != lfutil.hashfile(abslfile))):
461 462 if lfile not in repo[None]: # not switched to normal file
462 463 util.unlinkpath(abslfile, ignoremissing=True)
463 464 # use normallookup() to allocate entry in largefiles
464 465 # dirstate, because lack of it misleads
465 466 # lfilesrepo.status() into recognition that such cache
466 467 # missing files are REMOVED.
467 468 lfdirstate.normallookup(lfile)
468 469 update[lfile] = expecthash
469 470 else:
470 471 # Remove lfiles for which the standin is deleted, unless the
471 472 # lfile is added to the repository again. This happens when a
472 473 # largefile is converted back to a normal file: the standin
473 474 # disappears, but a new (normal) file appears as the lfile.
474 475 if (os.path.exists(abslfile) and
475 476 repo.dirstate.normalize(lfile) not in repo[None]):
476 477 util.unlinkpath(abslfile)
477 478 removed += 1
478 479
479 480 # largefile processing might be slow and be interrupted - be prepared
480 481 lfdirstate.write()
481 482
482 483 if lfiles:
483 484 if printmessage:
484 485 ui.status(_('getting changed largefiles\n'))
485 486 cachelfiles(ui, repo, None, lfiles)
486 487
487 488 for lfile in lfiles:
488 489 update1 = 0
489 490
490 491 expecthash = update.get(lfile)
491 492 if expecthash:
492 493 if not lfutil.copyfromcache(repo, expecthash, lfile):
493 494 # failed ... but already removed and set to normallookup
494 495 continue
495 496 # Synchronize largefile dirstate to the last modified
496 497 # time of the file
497 498 lfdirstate.normal(lfile)
498 499 update1 = 1
499 500
500 501 # copy the state of largefile standin from the repository's
501 502 # dirstate to its state in the lfdirstate.
502 503 abslfile = repo.wjoin(lfile)
503 504 absstandin = repo.wjoin(lfutil.standin(lfile))
504 505 if os.path.exists(absstandin):
505 506 mode = os.stat(absstandin).st_mode
506 507 if mode != os.stat(abslfile).st_mode:
507 508 os.chmod(abslfile, mode)
508 509 update1 = 1
509 510
510 511 updated += update1
511 512
512 513 state = repo.dirstate[lfutil.standin(lfile)]
513 514 if state == 'n':
514 515 # When rebasing, we need to synchronize the standin and the
515 516 # largefile, because otherwise the largefile will get reverted.
516 517 # But for commit's sake, we have to mark the file as unclean.
517 518 if getattr(repo, "_isrebasing", False):
518 519 lfdirstate.normallookup(lfile)
519 520 else:
520 521 lfdirstate.normal(lfile)
521 522 elif state == 'r':
522 523 lfdirstate.remove(lfile)
523 524 elif state == 'a':
524 525 lfdirstate.add(lfile)
525 526 elif state == '?':
526 527 lfdirstate.drop(lfile)
527 528
528 529 lfdirstate.write()
529 530 if printmessage and lfiles:
530 531 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
531 532 removed))
532 533 finally:
533 534 wlock.release()
534 535
535 536 @command('lfpull',
536 537 [('r', 'rev', [], _('pull largefiles for these revisions'))
537 538 ] + commands.remoteopts,
538 539 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
539 540 def lfpull(ui, repo, source="default", **opts):
540 541 """pull largefiles for the specified revisions from the specified source
541 542
542 543 Pull largefiles that are referenced from local changesets but missing
543 544 locally, pulling from a remote repository to the local cache.
544 545
545 546 If SOURCE is omitted, the 'default' path will be used.
546 547 See :hg:`help urls` for more information.
547 548
548 549 .. container:: verbose
549 550
550 551 Some examples:
551 552
552 553 - pull largefiles for all branch heads::
553 554
554 555 hg lfpull -r "head() and not closed()"
555 556
556 557 - pull largefiles on the default branch::
557 558
558 559 hg lfpull -r "branch(default)"
559 560 """
560 561 repo.lfpullsource = source
561 562
562 563 revs = opts.get('rev', [])
563 564 if not revs:
564 565 raise util.Abort(_('no revisions specified'))
565 566 revs = scmutil.revrange(repo, revs)
566 567
567 568 numcached = 0
568 569 for rev in revs:
569 570 ui.note(_('pulling largefiles for revision %s\n') % rev)
570 571 (cached, missing) = cachelfiles(ui, repo, rev)
571 572 numcached += len(cached)
572 573 ui.status(_("%d largefiles cached\n") % numcached)
General Comments 0
You need to be logged in to leave comments. Login now