##// END OF EJS Templates
largefiles: specify unit for ui.progress when operating on files...
av6 -
r28463:19b4a208 default
parent child Browse files
Show More
@@ -1,221 +1,221 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''base class for store implementations and store-related utility code'''
10 10
11 11 import re
12 12
13 13 from mercurial import util, node, hg, error
14 14 from mercurial.i18n import _
15 15
16 16 import lfutil
17 17
18 18 class StoreError(Exception):
19 19 '''Raised when there is a problem getting files from or putting
20 20 files to a central store.'''
21 21 def __init__(self, filename, hash, url, detail):
22 22 self.filename = filename
23 23 self.hash = hash
24 24 self.url = url
25 25 self.detail = detail
26 26
27 27 def longmessage(self):
28 28 return (_("error getting id %s from url %s for file %s: %s\n") %
29 29 (self.hash, util.hidepassword(self.url), self.filename,
30 30 self.detail))
31 31
32 32 def __str__(self):
33 33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
34 34
35 35 class basestore(object):
36 36 def __init__(self, ui, repo, url):
37 37 self.ui = ui
38 38 self.repo = repo
39 39 self.url = url
40 40
41 41 def put(self, source, hash):
42 42 '''Put source file into the store so it can be retrieved by hash.'''
43 43 raise NotImplementedError('abstract method')
44 44
45 45 def exists(self, hashes):
46 46 '''Check to see if the store contains the given hashes. Given an
47 47 iterable of hashes it returns a mapping from hash to bool.'''
48 48 raise NotImplementedError('abstract method')
49 49
50 50 def get(self, files):
51 51 '''Get the specified largefiles from the store and write to local
52 52 files under repo.root. files is a list of (filename, hash)
53 53 tuples. Return (success, missing), lists of files successfully
54 54 downloaded and those not found in the store. success is a list
55 55 of (filename, hash) tuples; missing is a list of filenames that
56 56 we could not get. (The detailed error message will already have
57 57 been presented to the user, so missing is just supplied as a
58 58 summary.)'''
59 59 success = []
60 60 missing = []
61 61 ui = self.ui
62 62
63 63 at = 0
64 64 available = self.exists(set(hash for (_filename, hash) in files))
65 65 for filename, hash in files:
66 ui.progress(_('getting largefiles'), at, unit='lfile',
66 ui.progress(_('getting largefiles'), at, unit=_('files'),
67 67 total=len(files))
68 68 at += 1
69 69 ui.note(_('getting %s:%s\n') % (filename, hash))
70 70
71 71 if not available.get(hash):
72 72 ui.warn(_('%s: largefile %s not available from %s\n')
73 73 % (filename, hash, util.hidepassword(self.url)))
74 74 missing.append(filename)
75 75 continue
76 76
77 77 if self._gethash(filename, hash):
78 78 success.append((filename, hash))
79 79 else:
80 80 missing.append(filename)
81 81
82 82 ui.progress(_('getting largefiles'), None)
83 83 return (success, missing)
84 84
85 85 def _gethash(self, filename, hash):
86 86 """Get file with the provided hash and store it in the local repo's
87 87 store and in the usercache.
88 88 filename is for informational messages only.
89 89 """
90 90 util.makedirs(lfutil.storepath(self.repo, ''))
91 91 storefilename = lfutil.storepath(self.repo, hash)
92 92
93 93 tmpname = storefilename + '.tmp'
94 94 tmpfile = util.atomictempfile(tmpname,
95 95 createmode=self.repo.store.createmode)
96 96
97 97 try:
98 98 gothash = self._getfile(tmpfile, filename, hash)
99 99 except StoreError as err:
100 100 self.ui.warn(err.longmessage())
101 101 gothash = ""
102 102 tmpfile.close()
103 103
104 104 if gothash != hash:
105 105 if gothash != "":
106 106 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
107 107 % (filename, hash, gothash))
108 108 util.unlink(tmpname)
109 109 return False
110 110
111 111 util.rename(tmpname, storefilename)
112 112 lfutil.linktousercache(self.repo, hash)
113 113 return True
114 114
115 115 def verify(self, revs, contents=False):
116 116 '''Verify the existence (and, optionally, contents) of every big
117 117 file revision referenced by every changeset in revs.
118 118 Return 0 if all is well, non-zero on any errors.'''
119 119 failed = False
120 120
121 121 self.ui.status(_('searching %d changesets for largefiles\n') %
122 122 len(revs))
123 123 verified = set() # set of (filename, filenode) tuples
124 124
125 125 for rev in revs:
126 126 cctx = self.repo[rev]
127 127 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
128 128
129 129 for standin in cctx:
130 130 if self._verifyfile(cctx, cset, contents, standin, verified):
131 131 failed = True
132 132
133 133 numrevs = len(verified)
134 134 numlfiles = len(set([fname for (fname, fnode) in verified]))
135 135 if contents:
136 136 self.ui.status(
137 137 _('verified contents of %d revisions of %d largefiles\n')
138 138 % (numrevs, numlfiles))
139 139 else:
140 140 self.ui.status(
141 141 _('verified existence of %d revisions of %d largefiles\n')
142 142 % (numrevs, numlfiles))
143 143 return int(failed)
144 144
145 145 def _getfile(self, tmpfile, filename, hash):
146 146 '''Fetch one revision of one file from the store and write it
147 147 to tmpfile. Compute the hash of the file on-the-fly as it
148 148 downloads and return the hash. Close tmpfile. Raise
149 149 StoreError if unable to download the file (e.g. it does not
150 150 exist in the store).'''
151 151 raise NotImplementedError('abstract method')
152 152
153 153 def _verifyfile(self, cctx, cset, contents, standin, verified):
154 154 '''Perform the actual verification of a file in the store.
155 155 'cset' is only used in warnings.
156 156 'contents' controls verification of content hash.
157 157 'standin' is the standin path of the largefile to verify.
158 158 'verified' is maintained as a set of already verified files.
159 159 Returns _true_ if it is a standin and any problems are found!
160 160 '''
161 161 raise NotImplementedError('abstract method')
162 162
163 163 import localstore, wirestore
164 164
165 165 _storeprovider = {
166 166 'file': [localstore.localstore],
167 167 'http': [wirestore.wirestore],
168 168 'https': [wirestore.wirestore],
169 169 'ssh': [wirestore.wirestore],
170 170 }
171 171
172 172 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
173 173
174 174 # During clone this function is passed the src's ui object
175 175 # but it needs the dest's ui object so it can read out of
176 176 # the config file. Use repo.ui instead.
177 177 def _openstore(repo, remote=None, put=False):
178 178 ui = repo.ui
179 179
180 180 if not remote:
181 181 lfpullsource = getattr(repo, 'lfpullsource', None)
182 182 if lfpullsource:
183 183 path = ui.expandpath(lfpullsource)
184 184 elif put:
185 185 path = ui.expandpath('default-push', 'default')
186 186 else:
187 187 path = ui.expandpath('default')
188 188
189 189 # ui.expandpath() leaves 'default-push' and 'default' alone if
190 190 # they cannot be expanded: fallback to the empty string,
191 191 # meaning the current directory.
192 192 if path == 'default-push' or path == 'default':
193 193 path = ''
194 194 remote = repo
195 195 else:
196 196 path, _branches = hg.parseurl(path)
197 197 remote = hg.peer(repo, {}, path)
198 198
199 199 # The path could be a scheme so use Mercurial's normal functionality
200 200 # to resolve the scheme to a repository and use its path
201 201 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
202 202
203 203 match = _scheme_re.match(path)
204 204 if not match: # regular filesystem path
205 205 scheme = 'file'
206 206 else:
207 207 scheme = match.group(1)
208 208
209 209 try:
210 210 storeproviders = _storeprovider[scheme]
211 211 except KeyError:
212 212 raise error.Abort(_('unsupported URL scheme %r') % scheme)
213 213
214 214 for classobj in storeproviders:
215 215 try:
216 216 return classobj(ui, repo, remote)
217 217 except lfutil.storeprotonotcapable:
218 218 pass
219 219
220 220 raise error.Abort(_('%s does not appear to be a largefile store') %
221 221 util.hidepassword(path))
@@ -1,544 +1,544 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os, errno
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error, \
15 15 cmdutil, scmutil, commands
16 16 from mercurial.i18n import _
17 17 from mercurial.lock import release
18 18
19 19 from hgext.convert import convcmd
20 20 from hgext.convert import filemap
21 21
22 22 import lfutil
23 23 import basestore
24 24
25 25 # -- Commands ----------------------------------------------------------
26 26
27 27 cmdtable = {}
28 28 command = cmdutil.command(cmdtable)
29 29
30 30 @command('lfconvert',
31 31 [('s', 'size', '',
32 32 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
33 33 ('', 'to-normal', False,
34 34 _('convert from a largefiles repo to a normal repo')),
35 35 ],
36 36 _('hg lfconvert SOURCE DEST [FILE ...]'),
37 37 norepo=True,
38 38 inferrepo=True)
39 39 def lfconvert(ui, src, dest, *pats, **opts):
40 40 '''convert a normal repository to a largefiles repository
41 41
42 42 Convert repository SOURCE to a new repository DEST, identical to
43 43 SOURCE except that certain files will be converted as largefiles:
44 44 specifically, any file that matches any PATTERN *or* whose size is
45 45 above the minimum size threshold is converted as a largefile. The
46 46 size used to determine whether or not to track a file as a
47 47 largefile is the size of the first version of the file. The
48 48 minimum size can be specified either with --size or in
49 49 configuration as ``largefiles.size``.
50 50
51 51 After running this command you will need to make sure that
52 52 largefiles is enabled anywhere you intend to push the new
53 53 repository.
54 54
55 55 Use --to-normal to convert largefiles back to normal files; after
56 56 this, the DEST repository can be used without largefiles at all.'''
57 57
58 58 if opts['to_normal']:
59 59 tolfile = False
60 60 else:
61 61 tolfile = True
62 62 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
63 63
64 64 if not hg.islocal(src):
65 65 raise error.Abort(_('%s is not a local Mercurial repo') % src)
66 66 if not hg.islocal(dest):
67 67 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
68 68
69 69 rsrc = hg.repository(ui, src)
70 70 ui.status(_('initializing destination %s\n') % dest)
71 71 rdst = hg.repository(ui, dest, create=True)
72 72
73 73 success = False
74 74 dstwlock = dstlock = None
75 75 try:
76 76 # Get a list of all changesets in the source. The easy way to do this
77 77 # is to simply walk the changelog, using changelog.nodesbetween().
78 78 # Take a look at mercurial/revlog.py:639 for more details.
79 79 # Use a generator instead of a list to decrease memory usage
80 80 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
81 81 rsrc.heads())[0])
82 82 revmap = {node.nullid: node.nullid}
83 83 if tolfile:
84 84 # Lock destination to prevent modification while it is converted to.
85 85 # Don't need to lock src because we are just reading from its
86 86 # history which can't change.
87 87 dstwlock = rdst.wlock()
88 88 dstlock = rdst.lock()
89 89
90 90 lfiles = set()
91 91 normalfiles = set()
92 92 if not pats:
93 93 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
94 94 if pats:
95 95 matcher = match_.match(rsrc.root, '', list(pats))
96 96 else:
97 97 matcher = None
98 98
99 99 lfiletohash = {}
100 100 for ctx in ctxs:
101 101 ui.progress(_('converting revisions'), ctx.rev(),
102 102 unit=_('revision'), total=rsrc['tip'].rev())
103 103 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
104 104 lfiles, normalfiles, matcher, size, lfiletohash)
105 105 ui.progress(_('converting revisions'), None)
106 106
107 107 if os.path.exists(rdst.wjoin(lfutil.shortname)):
108 108 shutil.rmtree(rdst.wjoin(lfutil.shortname))
109 109
110 110 for f in lfiletohash.keys():
111 111 if os.path.isfile(rdst.wjoin(f)):
112 112 os.unlink(rdst.wjoin(f))
113 113 try:
114 114 os.removedirs(os.path.dirname(rdst.wjoin(f)))
115 115 except OSError:
116 116 pass
117 117
118 118 # If there were any files converted to largefiles, add largefiles
119 119 # to the destination repository's requirements.
120 120 if lfiles:
121 121 rdst.requirements.add('largefiles')
122 122 rdst._writerequirements()
123 123 else:
124 124 class lfsource(filemap.filemap_source):
125 125 def __init__(self, ui, source):
126 126 super(lfsource, self).__init__(ui, source, None)
127 127 self.filemapper.rename[lfutil.shortname] = '.'
128 128
129 129 def getfile(self, name, rev):
130 130 realname, realrev = rev
131 131 f = super(lfsource, self).getfile(name, rev)
132 132
133 133 if (not realname.startswith(lfutil.shortnameslash)
134 134 or f[0] is None):
135 135 return f
136 136
137 137 # Substitute in the largefile data for the hash
138 138 hash = f[0].strip()
139 139 path = lfutil.findfile(rsrc, hash)
140 140
141 141 if path is None:
142 142 raise error.Abort(_("missing largefile for '%s' in %s")
143 143 % (realname, realrev))
144 144 return util.readfile(path), f[1]
145 145
146 146 class converter(convcmd.converter):
147 147 def __init__(self, ui, source, dest, revmapfile, opts):
148 148 src = lfsource(ui, source)
149 149
150 150 super(converter, self).__init__(ui, src, dest, revmapfile,
151 151 opts)
152 152
153 153 found, missing = downloadlfiles(ui, rsrc)
154 154 if missing != 0:
155 155 raise error.Abort(_("all largefiles must be present locally"))
156 156
157 157 orig = convcmd.converter
158 158 convcmd.converter = converter
159 159
160 160 try:
161 161 convcmd.convert(ui, src, dest)
162 162 finally:
163 163 convcmd.converter = orig
164 164 success = True
165 165 finally:
166 166 if tolfile:
167 167 rdst.dirstate.clear()
168 168 release(dstlock, dstwlock)
169 169 if not success:
170 170 # we failed, remove the new directory
171 171 shutil.rmtree(rdst.root)
172 172
173 173 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
174 174 matcher, size, lfiletohash):
175 175 # Convert src parents to dst parents
176 176 parents = _convertparents(ctx, revmap)
177 177
178 178 # Generate list of changed files
179 179 files = _getchangedfiles(ctx, parents)
180 180
181 181 dstfiles = []
182 182 for f in files:
183 183 if f not in lfiles and f not in normalfiles:
184 184 islfile = _islfile(f, ctx, matcher, size)
185 185 # If this file was renamed or copied then copy
186 186 # the largefile-ness of its predecessor
187 187 if f in ctx.manifest():
188 188 fctx = ctx.filectx(f)
189 189 renamed = fctx.renamed()
190 190 renamedlfile = renamed and renamed[0] in lfiles
191 191 islfile |= renamedlfile
192 192 if 'l' in fctx.flags():
193 193 if renamedlfile:
194 194 raise error.Abort(
195 195 _('renamed/copied largefile %s becomes symlink')
196 196 % f)
197 197 islfile = False
198 198 if islfile:
199 199 lfiles.add(f)
200 200 else:
201 201 normalfiles.add(f)
202 202
203 203 if f in lfiles:
204 204 dstfiles.append(lfutil.standin(f))
205 205 # largefile in manifest if it has not been removed/renamed
206 206 if f in ctx.manifest():
207 207 fctx = ctx.filectx(f)
208 208 if 'l' in fctx.flags():
209 209 renamed = fctx.renamed()
210 210 if renamed and renamed[0] in lfiles:
211 211 raise error.Abort(_('largefile %s becomes symlink') % f)
212 212
213 213 # largefile was modified, update standins
214 214 m = util.sha1('')
215 215 m.update(ctx[f].data())
216 216 hash = m.hexdigest()
217 217 if f not in lfiletohash or lfiletohash[f] != hash:
218 218 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
219 219 executable = 'x' in ctx[f].flags()
220 220 lfutil.writestandin(rdst, lfutil.standin(f), hash,
221 221 executable)
222 222 lfiletohash[f] = hash
223 223 else:
224 224 # normal file
225 225 dstfiles.append(f)
226 226
227 227 def getfilectx(repo, memctx, f):
228 228 if lfutil.isstandin(f):
229 229 # if the file isn't in the manifest then it was removed
230 230 # or renamed, raise IOError to indicate this
231 231 srcfname = lfutil.splitstandin(f)
232 232 try:
233 233 fctx = ctx.filectx(srcfname)
234 234 except error.LookupError:
235 235 return None
236 236 renamed = fctx.renamed()
237 237 if renamed:
238 238 # standin is always a largefile because largefile-ness
239 239 # doesn't change after rename or copy
240 240 renamed = lfutil.standin(renamed[0])
241 241
242 242 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
243 243 'l' in fctx.flags(), 'x' in fctx.flags(),
244 244 renamed)
245 245 else:
246 246 return _getnormalcontext(repo, ctx, f, revmap)
247 247
248 248 # Commit
249 249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
250 250
251 251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
252 252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
253 253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
254 254 ret = rdst.commitctx(mctx)
255 255 lfutil.copyalltostore(rdst, ret)
256 256 rdst.setparents(ret)
257 257 revmap[ctx.node()] = rdst.changelog.tip()
258 258
259 259 # Generate list of changed files
260 260 def _getchangedfiles(ctx, parents):
261 261 files = set(ctx.files())
262 262 if node.nullid not in parents:
263 263 mc = ctx.manifest()
264 264 mp1 = ctx.parents()[0].manifest()
265 265 mp2 = ctx.parents()[1].manifest()
266 266 files |= (set(mp1) | set(mp2)) - set(mc)
267 267 for f in mc:
268 268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
269 269 files.add(f)
270 270 return files
271 271
272 272 # Convert src parents to dst parents
273 273 def _convertparents(ctx, revmap):
274 274 parents = []
275 275 for p in ctx.parents():
276 276 parents.append(revmap[p.node()])
277 277 while len(parents) < 2:
278 278 parents.append(node.nullid)
279 279 return parents
280 280
281 281 # Get memfilectx for a normal file
282 282 def _getnormalcontext(repo, ctx, f, revmap):
283 283 try:
284 284 fctx = ctx.filectx(f)
285 285 except error.LookupError:
286 286 return None
287 287 renamed = fctx.renamed()
288 288 if renamed:
289 289 renamed = renamed[0]
290 290
291 291 data = fctx.data()
292 292 if f == '.hgtags':
293 293 data = _converttags (repo.ui, revmap, data)
294 294 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
295 295 'x' in fctx.flags(), renamed)
296 296
297 297 # Remap tag data using a revision map
298 298 def _converttags(ui, revmap, data):
299 299 newdata = []
300 300 for line in data.splitlines():
301 301 try:
302 302 id, name = line.split(' ', 1)
303 303 except ValueError:
304 304 ui.warn(_('skipping incorrectly formatted tag %s\n')
305 305 % line)
306 306 continue
307 307 try:
308 308 newid = node.bin(id)
309 309 except TypeError:
310 310 ui.warn(_('skipping incorrectly formatted id %s\n')
311 311 % id)
312 312 continue
313 313 try:
314 314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
315 315 name))
316 316 except KeyError:
317 317 ui.warn(_('no mapping for id %s\n') % id)
318 318 continue
319 319 return ''.join(newdata)
320 320
321 321 def _islfile(file, ctx, matcher, size):
322 322 '''Return true if file should be considered a largefile, i.e.
323 323 matcher matches it or it is larger than size.'''
324 324 # never store special .hg* files as largefiles
325 325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 326 return False
327 327 if matcher and matcher(file):
328 328 return True
329 329 try:
330 330 return ctx.filectx(file).size() >= size * 1024 * 1024
331 331 except error.LookupError:
332 332 return False
333 333
334 334 def uploadlfiles(ui, rsrc, rdst, files):
335 335 '''upload largefiles to the central store'''
336 336
337 337 if not files:
338 338 return
339 339
340 340 store = basestore._openstore(rsrc, rdst, put=True)
341 341
342 342 at = 0
343 343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
344 344 retval = store.exists(files)
345 345 files = filter(lambda h: not retval[h], files)
346 346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
347 347
348 348 for hash in files:
349 ui.progress(_('uploading largefiles'), at, unit='largefile',
349 ui.progress(_('uploading largefiles'), at, unit=_('files'),
350 350 total=len(files))
351 351 source = lfutil.findfile(rsrc, hash)
352 352 if not source:
353 353 raise error.Abort(_('largefile %s missing from store'
354 354 ' (needs to be uploaded)') % hash)
355 355 # XXX check for errors here
356 356 store.put(source, hash)
357 357 at += 1
358 358 ui.progress(_('uploading largefiles'), None)
359 359
360 360 def verifylfiles(ui, repo, all=False, contents=False):
361 361 '''Verify that every largefile revision in the current changeset
362 362 exists in the central store. With --contents, also verify that
363 363 the contents of each local largefile file revision are correct (SHA-1 hash
364 364 matches the revision ID). With --all, check every changeset in
365 365 this repository.'''
366 366 if all:
367 367 revs = repo.revs('all()')
368 368 else:
369 369 revs = ['.']
370 370
371 371 store = basestore._openstore(repo)
372 372 return store.verify(revs, contents=contents)
373 373
374 374 def cachelfiles(ui, repo, node, filelist=None):
375 375 '''cachelfiles ensures that all largefiles needed by the specified revision
376 376 are present in the repository's largefile cache.
377 377
378 378 returns a tuple (cached, missing). cached is the list of files downloaded
379 379 by this operation; missing is the list of files that were needed but could
380 380 not be found.'''
381 381 lfiles = lfutil.listlfiles(repo, node)
382 382 if filelist:
383 383 lfiles = set(lfiles) & set(filelist)
384 384 toget = []
385 385
386 386 for lfile in lfiles:
387 387 try:
388 388 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
389 389 except IOError as err:
390 390 if err.errno == errno.ENOENT:
391 391 continue # node must be None and standin wasn't found in wctx
392 392 raise
393 393 if not lfutil.findfile(repo, expectedhash):
394 394 toget.append((lfile, expectedhash))
395 395
396 396 if toget:
397 397 store = basestore._openstore(repo)
398 398 ret = store.get(toget)
399 399 return ret
400 400
401 401 return ([], [])
402 402
403 403 def downloadlfiles(ui, repo, rev=None):
404 404 matchfn = scmutil.match(repo[None],
405 405 [repo.wjoin(lfutil.shortname)], {})
406 406 def prepare(ctx, fns):
407 407 pass
408 408 totalsuccess = 0
409 409 totalmissing = 0
410 410 if rev != []: # walkchangerevs on empty list would return all revs
411 411 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
412 412 prepare):
413 413 success, missing = cachelfiles(ui, repo, ctx.node())
414 414 totalsuccess += len(success)
415 415 totalmissing += len(missing)
416 416 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
417 417 if totalmissing > 0:
418 418 ui.status(_("%d largefiles failed to download\n") % totalmissing)
419 419 return totalsuccess, totalmissing
420 420
421 421 def updatelfiles(ui, repo, filelist=None, printmessage=None,
422 422 normallookup=False):
423 423 '''Update largefiles according to standins in the working directory
424 424
425 425 If ``printmessage`` is other than ``None``, it means "print (or
426 426 ignore, for false) message forcibly".
427 427 '''
428 428 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
429 429 with repo.wlock():
430 430 lfdirstate = lfutil.openlfdirstate(ui, repo)
431 431 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
432 432
433 433 if filelist is not None:
434 434 filelist = set(filelist)
435 435 lfiles = [f for f in lfiles if f in filelist]
436 436
437 437 update = {}
438 438 updated, removed = 0, 0
439 439 for lfile in lfiles:
440 440 abslfile = repo.wjoin(lfile)
441 441 abslfileorig = scmutil.origpath(ui, repo, abslfile)
442 442 absstandin = repo.wjoin(lfutil.standin(lfile))
443 443 absstandinorig = scmutil.origpath(ui, repo, absstandin)
444 444 if os.path.exists(absstandin):
445 445 if (os.path.exists(absstandinorig) and
446 446 os.path.exists(abslfile)):
447 447 shutil.copyfile(abslfile, abslfileorig)
448 448 util.unlinkpath(absstandinorig)
449 449 expecthash = lfutil.readstandin(repo, lfile)
450 450 if expecthash != '':
451 451 if lfile not in repo[None]: # not switched to normal file
452 452 util.unlinkpath(abslfile, ignoremissing=True)
453 453 # use normallookup() to allocate an entry in largefiles
454 454 # dirstate to prevent lfilesrepo.status() from reporting
455 455 # missing files as removed.
456 456 lfdirstate.normallookup(lfile)
457 457 update[lfile] = expecthash
458 458 else:
459 459 # Remove lfiles for which the standin is deleted, unless the
460 460 # lfile is added to the repository again. This happens when a
461 461 # largefile is converted back to a normal file: the standin
462 462 # disappears, but a new (normal) file appears as the lfile.
463 463 if (os.path.exists(abslfile) and
464 464 repo.dirstate.normalize(lfile) not in repo[None]):
465 465 util.unlinkpath(abslfile)
466 466 removed += 1
467 467
468 468 # largefile processing might be slow and be interrupted - be prepared
469 469 lfdirstate.write()
470 470
471 471 if lfiles:
472 472 statuswriter(_('getting changed largefiles\n'))
473 473 cachelfiles(ui, repo, None, lfiles)
474 474
475 475 for lfile in lfiles:
476 476 update1 = 0
477 477
478 478 expecthash = update.get(lfile)
479 479 if expecthash:
480 480 if not lfutil.copyfromcache(repo, expecthash, lfile):
481 481 # failed ... but already removed and set to normallookup
482 482 continue
483 483 # Synchronize largefile dirstate to the last modified
484 484 # time of the file
485 485 lfdirstate.normal(lfile)
486 486 update1 = 1
487 487
488 488 # copy the state of largefile standin from the repository's
489 489 # dirstate to its state in the lfdirstate.
490 490 abslfile = repo.wjoin(lfile)
491 491 absstandin = repo.wjoin(lfutil.standin(lfile))
492 492 if os.path.exists(absstandin):
493 493 mode = os.stat(absstandin).st_mode
494 494 if mode != os.stat(abslfile).st_mode:
495 495 os.chmod(abslfile, mode)
496 496 update1 = 1
497 497
498 498 updated += update1
499 499
500 500 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
501 501
502 502 lfdirstate.write()
503 503 if lfiles:
504 504 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
505 505 removed))
506 506
507 507 @command('lfpull',
508 508 [('r', 'rev', [], _('pull largefiles for these revisions'))
509 509 ] + commands.remoteopts,
510 510 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
511 511 def lfpull(ui, repo, source="default", **opts):
512 512 """pull largefiles for the specified revisions from the specified source
513 513
514 514 Pull largefiles that are referenced from local changesets but missing
515 515 locally, pulling from a remote repository to the local cache.
516 516
517 517 If SOURCE is omitted, the 'default' path will be used.
518 518 See :hg:`help urls` for more information.
519 519
520 520 .. container:: verbose
521 521
522 522 Some examples:
523 523
524 524 - pull largefiles for all branch heads::
525 525
526 526 hg lfpull -r "head() and not closed()"
527 527
528 528 - pull largefiles on the default branch::
529 529
530 530 hg lfpull -r "branch(default)"
531 531 """
532 532 repo.lfpullsource = source
533 533
534 534 revs = opts.get('rev', [])
535 535 if not revs:
536 536 raise error.Abort(_('no revisions specified'))
537 537 revs = scmutil.revrange(repo, revs)
538 538
539 539 numcached = 0
540 540 for rev in revs:
541 541 ui.note(_('pulling largefiles for revision %s\n') % rev)
542 542 (cached, missing) = cachelfiles(ui, repo, rev)
543 543 numcached += len(cached)
544 544 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,309 +1,309 b''
1 1 This file contains testcases that tend to be related to the wire protocol part
2 2 of largefiles.
3 3
4 4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
5 5 $ mkdir "${USERCACHE}"
6 6 $ cat >> $HGRCPATH <<EOF
7 7 > [extensions]
8 8 > largefiles=
9 9 > purge=
10 10 > rebase=
11 11 > transplant=
12 12 > [phases]
13 13 > publish=False
14 14 > [largefiles]
15 15 > minsize=2
16 16 > patterns=glob:**.dat
17 17 > usercache=${USERCACHE}
18 18 > [web]
19 19 > allow_archive = zip
20 20 > [hooks]
21 21 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
22 22 > EOF
23 23
24 24
25 25 #if serve
26 26 vanilla clients not locked out from largefiles servers on vanilla repos
27 27 $ mkdir r1
28 28 $ cd r1
29 29 $ hg init
30 30 $ echo c1 > f1
31 31 $ hg add f1
32 32 $ hg commit -m "m1"
33 33 Invoking status precommit hook
34 34 A f1
35 35 $ cd ..
36 36 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
37 37 $ cat hg.pid >> $DAEMON_PIDS
38 38 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
39 39 requesting all changes
40 40 adding changesets
41 41 adding manifests
42 42 adding file changes
43 43 added 1 changesets with 1 changes to 1 files
44 44 updating to branch default
45 45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 46
47 47 largefiles clients still work with vanilla servers
48 48 $ hg --config extensions.largefiles=! serve -R r1 -d -p $HGPORT1 --pid-file hg.pid
49 49 $ cat hg.pid >> $DAEMON_PIDS
50 50 $ hg clone http://localhost:$HGPORT1 r3
51 51 requesting all changes
52 52 adding changesets
53 53 adding manifests
54 54 adding file changes
55 55 added 1 changesets with 1 changes to 1 files
56 56 updating to branch default
57 57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 58 #endif
59 59
60 60 vanilla clients locked out from largefiles http repos
61 61 $ mkdir r4
62 62 $ cd r4
63 63 $ hg init
64 64 $ echo c1 > f1
65 65 $ hg add --large f1
66 66 $ hg commit -m "m1"
67 67 Invoking status precommit hook
68 68 A f1
69 69 $ cd ..
70 70
71 71 largefiles can be pushed locally (issue3583)
72 72 $ hg init dest
73 73 $ cd r4
74 74 $ hg outgoing ../dest
75 75 comparing with ../dest
76 76 searching for changes
77 77 changeset: 0:639881c12b4c
78 78 tag: tip
79 79 user: test
80 80 date: Thu Jan 01 00:00:00 1970 +0000
81 81 summary: m1
82 82
83 83 $ hg push ../dest
84 84 pushing to ../dest
85 85 searching for changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 1 changesets with 1 changes to 1 files
90 90
91 91 exit code with nothing outgoing (issue3611)
92 92 $ hg outgoing ../dest
93 93 comparing with ../dest
94 94 searching for changes
95 95 no changes found
96 96 [1]
97 97 $ cd ..
98 98
99 99 #if serve
100 100 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
101 101 $ cat hg.pid >> $DAEMON_PIDS
102 102 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
103 103 abort: remote error:
104 104
105 105 This repository uses the largefiles extension.
106 106
107 107 Please enable it in your Mercurial config file.
108 108 [255]
109 109
110 110 used all HGPORTs, kill all daemons
111 111 $ killdaemons.py
112 112 #endif
113 113
114 114 vanilla clients locked out from largefiles ssh repos
115 115 $ hg --config extensions.largefiles=! clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
116 116 remote:
117 117 remote: This repository uses the largefiles extension.
118 118 remote:
119 119 remote: Please enable it in your Mercurial config file.
120 120 remote:
121 121 remote: -
122 122 abort: remote error
123 123 (check previous remote output)
124 124 [255]
125 125
126 126 #if serve
127 127
128 128 largefiles clients refuse to push largefiles repos to vanilla servers
129 129 $ mkdir r6
130 130 $ cd r6
131 131 $ hg init
132 132 $ echo c1 > f1
133 133 $ hg add f1
134 134 $ hg commit -m "m1"
135 135 Invoking status precommit hook
136 136 A f1
137 137 $ cat >> .hg/hgrc <<!
138 138 > [web]
139 139 > push_ssl = false
140 140 > allow_push = *
141 141 > !
142 142 $ cd ..
143 143 $ hg clone r6 r7
144 144 updating to branch default
145 145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 146 $ cd r7
147 147 $ echo c2 > f2
148 148 $ hg add --large f2
149 149 $ hg commit -m "m2"
150 150 Invoking status precommit hook
151 151 A f2
152 152 $ hg --config extensions.largefiles=! -R ../r6 serve -d -p $HGPORT --pid-file ../hg.pid
153 153 $ cat ../hg.pid >> $DAEMON_PIDS
154 154 $ hg push http://localhost:$HGPORT
155 155 pushing to http://localhost:$HGPORT/
156 156 searching for changes
157 157 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
158 158 [255]
159 159 $ cd ..
160 160
161 161 putlfile errors are shown (issue3123)
162 162 Corrupt the cached largefile in r7 and move it out of the servers usercache
163 163 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
164 164 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
165 165 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
166 166 $ hg init empty
167 167 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
168 168 > --config 'web.allow_push=*' --config web.push_ssl=False
169 169 $ cat hg.pid >> $DAEMON_PIDS
170 170 $ hg push -R r7 http://localhost:$HGPORT1
171 171 pushing to http://localhost:$HGPORT1/
172 172 searching for changes
173 173 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
174 174 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ (glob)
175 175 [255]
176 176 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
177 177 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
178 178 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
179 179 $ hg push -R r7 http://localhost:$HGPORT1
180 180 pushing to http://localhost:$HGPORT1/
181 181 searching for changes
182 182 remote: adding changesets
183 183 remote: adding manifests
184 184 remote: adding file changes
185 185 remote: added 2 changesets with 2 changes to 2 files
186 186 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
187 187 server side corruption
188 188 $ rm -rf empty
189 189
190 190 Push a largefiles repository to a served empty repository
191 191 $ hg init r8
192 192 $ echo c3 > r8/f1
193 193 $ hg add --large r8/f1 -R r8
194 194 $ hg commit -m "m1" -R r8
195 195 Invoking status precommit hook
196 196 A f1
197 197 $ hg init empty
198 198 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
199 199 > --config 'web.allow_push=*' --config web.push_ssl=False
200 200 $ cat hg.pid >> $DAEMON_PIDS
201 201 $ rm "${USERCACHE}"/*
202 202 $ hg push -R r8 http://localhost:$HGPORT2/#default
203 203 pushing to http://localhost:$HGPORT2/
204 204 searching for changes
205 205 remote: adding changesets
206 206 remote: adding manifests
207 207 remote: adding file changes
208 208 remote: added 1 changesets with 1 changes to 1 files
209 209 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
210 210 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
211 211
212 212 Clone over http, no largefiles pulled on clone.
213 213
214 214 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
215 215 adding changesets
216 216 adding manifests
217 217 adding file changes
218 218 added 1 changesets with 1 changes to 1 files
219 219
220 220 Archive contains largefiles
221 221 >>> import urllib2, os
222 222 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
223 223 >>> with open('archive.zip', 'w') as f:
224 224 ... f.write(urllib2.urlopen(u).read())
225 225 $ unzip -t archive.zip
226 226 Archive: archive.zip
227 227 testing: empty-default/.hg_archival.txt OK
228 228 testing: empty-default/f1 OK
229 229 No errors detected in compressed data of archive.zip.
230 230
231 231 test 'verify' with remotestore:
232 232
233 233 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
234 234 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
235 235 $ hg -R http-clone verify --large --lfa
236 236 checking changesets
237 237 checking manifests
238 238 crosschecking files in changesets and manifests
239 239 checking files
240 240 1 files, 1 changesets, 1 total revisions
241 241 searching 1 changesets for largefiles
242 242 changeset 0:cf03e5bb9936: f1 missing
243 243 verified existence of 1 revisions of 1 largefiles
244 244 [1]
245 245 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
246 246 $ hg -R http-clone -q verify --large --lfa
247 247
248 248 largefiles pulled on update - a largefile missing on the server:
249 249 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
250 250 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
251 251 getting changed largefiles
252 252 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
253 253 0 largefiles updated, 0 removed
254 254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 255 $ hg -R http-clone st
256 256 ! f1
257 257 $ hg -R http-clone up -Cqr null
258 258
259 259 largefiles pulled on update - a largefile corrupted on the server:
260 260 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
261 261 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
262 262 getting changed largefiles
263 263 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
264 264 0 largefiles updated, 0 removed
265 265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 266 $ hg -R http-clone st
267 267 ! f1
268 268 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
269 269 $ [ ! -f http-clone/f1 ]
270 270 $ [ ! -f http-clone-usercache ]
271 271 $ hg -R http-clone verify --large --lfc
272 272 checking changesets
273 273 checking manifests
274 274 crosschecking files in changesets and manifests
275 275 checking files
276 276 1 files, 1 changesets, 1 total revisions
277 277 searching 1 changesets for largefiles
278 278 verified contents of 1 revisions of 1 largefiles
279 279 $ hg -R http-clone up -Cqr null
280 280
281 281 largefiles pulled on update - no server side problems:
282 282 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
283 283 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
284 284 resolving manifests
285 285 branchmerge: False, force: False, partial: False
286 286 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
287 287 .hglf/f1: remote created -> g
288 288 getting .hglf/f1
289 289 updating: .hglf/f1 1/1 files (100.00%)
290 290 getting changed largefiles
291 291 using http://localhost:$HGPORT2/
292 292 sending capabilities command
293 293 sending batch command
294 getting largefiles: 0/1 lfile (0.00%)
294 getting largefiles: 0/1 files (0.00%)
295 295 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
296 296 sending getlfile command
297 297 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
298 298 1 largefiles updated, 0 removed
299 299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
300 300
301 301 $ ls http-clone-usercache/*
302 302 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
303 303
304 304 $ rm -rf empty http-clone*
305 305
306 306 used all HGPORTs, kill all daemons
307 307 $ killdaemons.py
308 308
309 309 #endif
General Comments 0
You need to be logged in to leave comments. Login now