##// END OF EJS Templates
lfs: use the local store method for opening a blob...
Matt Harbison -
r35544:e8f80529 default
parent child Browse files
Show More
@@ -1,442 +1,442 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import json
12 12 import os
13 13 import re
14 14 import socket
15 15
16 16 from mercurial.i18n import _
17 17
18 18 from mercurial import (
19 19 error,
20 20 pathutil,
21 21 url as urlmod,
22 22 util,
23 23 vfs as vfsmod,
24 24 worker,
25 25 )
26 26
27 27 from ..largefiles import lfutil
28 28
29 29 # 64 bytes for SHA256
30 30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31 31
32 32 class lfsvfs(vfsmod.vfs):
33 33 def join(self, path):
34 34 """split the path at first two characters, like: XX/XXXXX..."""
35 35 if not _lfsre.match(path):
36 36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 37 return super(lfsvfs, self).join(path[0:2], path[2:])
38 38
39 39 def walk(self, path=None, onerror=None):
40 40 """Yield (dirpath, [], oids) tuple for blobs under path
41 41
42 42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 43 """
44 44 root = os.path.normpath(self.base)
45 45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 46 # because len(dirpath) < prefixlen.
47 47 prefixlen = len(pathutil.normasprefix(root))
48 48 oids = []
49 49
50 50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 51 onerror=onerror):
52 52 dirpath = dirpath[prefixlen:]
53 53
54 54 # Silently skip unexpected files and directories
55 55 if len(dirpath) == 2:
56 56 oids.extend([dirpath + f for f in files
57 57 if _lfsre.match(dirpath + f)])
58 58
59 59 yield ('', [], oids)
60 60
61 61 class filewithprogress(object):
62 62 """a file-like object that supports __len__ and read.
63 63
64 64 Useful to provide progress information for how many bytes are read.
65 65 """
66 66
67 67 def __init__(self, fp, callback):
68 68 self._fp = fp
69 69 self._callback = callback # func(readsize)
70 70 fp.seek(0, os.SEEK_END)
71 71 self._len = fp.tell()
72 72 fp.seek(0)
73 73
74 74 def __len__(self):
75 75 return self._len
76 76
77 77 def read(self, size):
78 78 if self._fp is None:
79 79 return b''
80 80 data = self._fp.read(size)
81 81 if data:
82 82 if self._callback:
83 83 self._callback(len(data))
84 84 else:
85 85 self._fp.close()
86 86 self._fp = None
87 87 return data
88 88
89 89 class local(object):
90 90 """Local blobstore for large file contents.
91 91
92 92 This blobstore is used both as a cache and as a staging area for large blobs
93 93 to be uploaded to the remote blobstore.
94 94 """
95 95
96 96 def __init__(self, repo):
97 97 fullpath = repo.svfs.join('lfs/objects')
98 98 self.vfs = lfsvfs(fullpath)
99 99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 100 self.cachevfs = lfsvfs(usercache)
101 101 self.ui = repo.ui
102 102
103 103 def open(self, oid):
104 104 """Open a read-only file descriptor to the named blob, in either the
105 105 usercache or the local store."""
106 106 if self.cachevfs.exists(oid):
107 107 return self.cachevfs(oid, 'rb')
108 108
109 109 return self.vfs(oid, 'rb')
110 110
111 111 def write(self, oid, data, verify=True):
112 112 """Write blob to local blobstore."""
113 113 if verify:
114 114 _verify(oid, data)
115 115
116 116 with self.vfs(oid, 'wb', atomictemp=True) as fp:
117 117 fp.write(data)
118 118
119 119 # XXX: should we verify the content of the cache, and hardlink back to
120 120 # the local store on success, but truncate, write and link on failure?
121 121 if not self.cachevfs.exists(oid):
122 122 if verify or hashlib.sha256(data).hexdigest() == oid:
123 123 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
124 124 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
125 125
126 126 def read(self, oid, verify=True):
127 127 """Read blob from local blobstore."""
128 128 if not self.vfs.exists(oid):
129 129 blob = self._read(self.cachevfs, oid, verify)
130 130
131 131 # Even if revlog will verify the content, it needs to be verified
132 132 # now before making the hardlink to avoid propagating corrupt blobs.
133 133 # Don't abort if corruption is detected, because `hg verify` will
134 134 # give more useful info about the corruption- simply don't add the
135 135 # hardlink.
136 136 if verify or hashlib.sha256(blob).hexdigest() == oid:
137 137 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
138 138 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
139 139 else:
140 140 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
141 141 blob = self._read(self.vfs, oid, verify)
142 142 return blob
143 143
144 144 def _read(self, vfs, oid, verify):
145 145 """Read blob (after verifying) from the given store"""
146 146 blob = vfs.read(oid)
147 147 if verify:
148 148 _verify(oid, blob)
149 149 return blob
150 150
151 151 def has(self, oid):
152 152 """Returns True if the local blobstore contains the requested blob,
153 153 False otherwise."""
154 154 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
155 155
156 156 class _gitlfsremote(object):
157 157
158 158 def __init__(self, repo, url):
159 159 ui = repo.ui
160 160 self.ui = ui
161 161 baseurl, authinfo = url.authinfo()
162 162 self.baseurl = baseurl.rstrip('/')
163 163 useragent = repo.ui.config('experimental', 'lfs.user-agent')
164 164 if not useragent:
165 165 useragent = 'mercurial/%s git/2.15.1' % util.version()
166 166 self.urlopener = urlmod.opener(ui, authinfo, useragent)
167 167 self.retry = ui.configint('lfs', 'retry')
168 168
169 169 def writebatch(self, pointers, fromstore):
170 170 """Batch upload from local to remote blobstore."""
171 171 self._batch(pointers, fromstore, 'upload')
172 172
173 173 def readbatch(self, pointers, tostore):
174 174 """Batch download from remote to local blostore."""
175 175 self._batch(pointers, tostore, 'download')
176 176
177 177 def _batchrequest(self, pointers, action):
178 178 """Get metadata about objects pointed by pointers for given action
179 179
180 180 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
181 181 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
182 182 """
183 183 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
184 184 requestdata = json.dumps({
185 185 'objects': objects,
186 186 'operation': action,
187 187 })
188 188 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
189 189 data=requestdata)
190 190 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
191 191 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
192 192 try:
193 193 rawjson = self.urlopener.open(batchreq).read()
194 194 except util.urlerr.httperror as ex:
195 195 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
196 196 % (ex, action))
197 197 try:
198 198 response = json.loads(rawjson)
199 199 except ValueError:
200 200 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
201 201 % rawjson)
202 202 return response
203 203
204 204 def _checkforservererror(self, pointers, responses):
205 205 """Scans errors from objects
206 206
207 207 Returns LfsRemoteError if any objects has an error"""
208 208 for response in responses:
209 209 error = response.get('error')
210 210 if error:
211 211 ptrmap = {p.oid(): p for p in pointers}
212 212 p = ptrmap.get(response['oid'], None)
213 213 if error['code'] == 404 and p:
214 214 filename = getattr(p, 'filename', 'unknown')
215 215 raise LfsRemoteError(
216 216 _(('LFS server error. Remote object '
217 217 'for file %s not found: %r')) % (filename, response))
218 218 raise LfsRemoteError(_('LFS server error: %r') % response)
219 219
220 220 def _extractobjects(self, response, pointers, action):
221 221 """extract objects from response of the batch API
222 222
223 223 response: parsed JSON object returned by batch API
224 224 return response['objects'] filtered by action
225 225 raise if any object has an error
226 226 """
227 227 # Scan errors from objects - fail early
228 228 objects = response.get('objects', [])
229 229 self._checkforservererror(pointers, objects)
230 230
231 231 # Filter objects with given action. Practically, this skips uploading
232 232 # objects which exist in the server.
233 233 filteredobjects = [o for o in objects if action in o.get('actions', [])]
234 234 # But for downloading, we want all objects. Therefore missing objects
235 235 # should be considered an error.
236 236 if action == 'download':
237 237 if len(filteredobjects) < len(objects):
238 238 missing = [o.get('oid', '?')
239 239 for o in objects
240 240 if action not in o.get('actions', [])]
241 241 raise LfsRemoteError(
242 242 _('LFS server claims required objects do not exist:\n%s')
243 243 % '\n'.join(missing))
244 244
245 245 return filteredobjects
246 246
247 247 def _basictransfer(self, obj, action, localstore):
248 248 """Download or upload a single object using basic transfer protocol
249 249
250 250 obj: dict, an object description returned by batch API
251 251 action: string, one of ['upload', 'download']
252 252 localstore: blobstore.local
253 253
254 254 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
255 255 basic-transfers.md
256 256 """
257 257 oid = str(obj['oid'])
258 258
259 259 href = str(obj['actions'][action].get('href'))
260 260 headers = obj['actions'][action].get('header', {}).items()
261 261
262 262 request = util.urlreq.request(href)
263 263 if action == 'upload':
264 264 # If uploading blobs, read data from local blobstore.
265 with localstore.vfs(oid) as fp:
265 with localstore.open(oid) as fp:
266 266 _verifyfile(oid, fp)
267 request.data = filewithprogress(localstore.vfs(oid), None)
267 request.data = filewithprogress(localstore.open(oid), None)
268 268 request.get_method = lambda: 'PUT'
269 269
270 270 for k, v in headers:
271 271 request.add_header(k, v)
272 272
273 273 response = b''
274 274 try:
275 275 req = self.urlopener.open(request)
276 276 while True:
277 277 data = req.read(1048576)
278 278 if not data:
279 279 break
280 280 response += data
281 281 except util.urlerr.httperror as ex:
282 282 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
283 283 % (ex, oid, action))
284 284
285 285 if action == 'download':
286 286 # If downloading blobs, store downloaded data to local blobstore
287 287 localstore.write(oid, response, verify=True)
288 288
289 289 def _batch(self, pointers, localstore, action):
290 290 if action not in ['upload', 'download']:
291 291 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
292 292
293 293 response = self._batchrequest(pointers, action)
294 294 objects = self._extractobjects(response, pointers, action)
295 295 total = sum(x.get('size', 0) for x in objects)
296 296 sizes = {}
297 297 for obj in objects:
298 298 sizes[obj.get('oid')] = obj.get('size', 0)
299 299 topic = {'upload': _('lfs uploading'),
300 300 'download': _('lfs downloading')}[action]
301 301 if len(objects) > 1:
302 302 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
303 303 % (len(objects), util.bytecount(total)))
304 304 self.ui.progress(topic, 0, total=total)
305 305 def transfer(chunk):
306 306 for obj in chunk:
307 307 objsize = obj.get('size', 0)
308 308 if self.ui.verbose:
309 309 if action == 'download':
310 310 msg = _('lfs: downloading %s (%s)\n')
311 311 elif action == 'upload':
312 312 msg = _('lfs: uploading %s (%s)\n')
313 313 self.ui.note(msg % (obj.get('oid'),
314 314 util.bytecount(objsize)))
315 315 retry = self.retry
316 316 while True:
317 317 try:
318 318 self._basictransfer(obj, action, localstore)
319 319 yield 1, obj.get('oid')
320 320 break
321 321 except socket.error as ex:
322 322 if retry > 0:
323 323 self.ui.note(
324 324 _('lfs: failed: %r (remaining retry %d)\n')
325 325 % (ex, retry))
326 326 retry -= 1
327 327 continue
328 328 raise
329 329
330 330 oids = worker.worker(self.ui, 0.1, transfer, (),
331 331 sorted(objects, key=lambda o: o.get('oid')))
332 332 processed = 0
333 333 for _one, oid in oids:
334 334 processed += sizes[oid]
335 335 self.ui.progress(topic, processed, total=total)
336 336 self.ui.note(_('lfs: processed: %s\n') % oid)
337 337 self.ui.progress(topic, pos=None, total=total)
338 338
339 339 def __del__(self):
340 340 # copied from mercurial/httppeer.py
341 341 urlopener = getattr(self, 'urlopener', None)
342 342 if urlopener:
343 343 for h in urlopener.handlers:
344 344 h.close()
345 345 getattr(h, "close_all", lambda : None)()
346 346
347 347 class _dummyremote(object):
348 348 """Dummy store storing blobs to temp directory."""
349 349
350 350 def __init__(self, repo, url):
351 351 fullpath = repo.vfs.join('lfs', url.path)
352 352 self.vfs = lfsvfs(fullpath)
353 353
354 354 def writebatch(self, pointers, fromstore):
355 355 for p in pointers:
356 356 content = fromstore.read(p.oid(), verify=True)
357 357 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
358 358 fp.write(content)
359 359
360 360 def readbatch(self, pointers, tostore):
361 361 for p in pointers:
362 362 content = self.vfs.read(p.oid())
363 363 tostore.write(p.oid(), content, verify=True)
364 364
365 365 class _nullremote(object):
366 366 """Null store storing blobs to /dev/null."""
367 367
368 368 def __init__(self, repo, url):
369 369 pass
370 370
371 371 def writebatch(self, pointers, fromstore):
372 372 pass
373 373
374 374 def readbatch(self, pointers, tostore):
375 375 pass
376 376
377 377 class _promptremote(object):
378 378 """Prompt user to set lfs.url when accessed."""
379 379
380 380 def __init__(self, repo, url):
381 381 pass
382 382
383 383 def writebatch(self, pointers, fromstore, ui=None):
384 384 self._prompt()
385 385
386 386 def readbatch(self, pointers, tostore, ui=None):
387 387 self._prompt()
388 388
389 389 def _prompt(self):
390 390 raise error.Abort(_('lfs.url needs to be configured'))
391 391
392 392 _storemap = {
393 393 'https': _gitlfsremote,
394 394 'http': _gitlfsremote,
395 395 'file': _dummyremote,
396 396 'null': _nullremote,
397 397 None: _promptremote,
398 398 }
399 399
400 400 def _verify(oid, content):
401 401 realoid = hashlib.sha256(content).hexdigest()
402 402 if realoid != oid:
403 403 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
404 404 hint=_('run hg verify'))
405 405
406 406 def _verifyfile(oid, fp):
407 407 sha256 = hashlib.sha256()
408 408 while True:
409 409 data = fp.read(1024 * 1024)
410 410 if not data:
411 411 break
412 412 sha256.update(data)
413 413 realoid = sha256.hexdigest()
414 414 if realoid != oid:
415 415 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
416 416 hint=_('run hg verify'))
417 417
418 418 def remote(repo):
419 419 """remotestore factory. return a store in _storemap depending on config"""
420 420 defaulturl = ''
421 421
422 422 # convert deprecated configs to the new url. TODO: remove this if other
423 423 # places are migrated to the new url config.
424 424 # deprecated config: lfs.remotestore
425 425 deprecatedstore = repo.ui.config('lfs', 'remotestore')
426 426 if deprecatedstore == 'dummy':
427 427 # deprecated config: lfs.remotepath
428 428 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
429 429 elif deprecatedstore == 'git-lfs':
430 430 # deprecated config: lfs.remoteurl
431 431 defaulturl = repo.ui.config('lfs', 'remoteurl')
432 432 elif deprecatedstore == 'null':
433 433 defaulturl = 'null://'
434 434
435 435 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
436 436 scheme = url.scheme
437 437 if scheme not in _storemap:
438 438 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
439 439 return _storemap[scheme](repo, url)
440 440
441 441 class LfsRemoteError(error.RevlogError):
442 442 pass
@@ -1,188 +1,193 b''
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 33 > threshold=1
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 A push can be serviced directly from the usercache if it isn't in the local
42 store.
43
41 44 $ hg init ../repo2
45 $ mv .hg/store/lfs .hg/store/lfs_
42 46 $ hg push ../repo2 -v
43 47 pushing to ../repo2
44 48 searching for changes
45 49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
46 50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
47 51 1 changesets found
48 52 uncompressed size of bundle content:
49 53 * (changelog) (glob)
50 54 * (manifests) (glob)
51 55 * a (glob)
52 56 adding changesets
53 57 adding manifests
54 58 adding file changes
55 59 added 1 changesets with 1 changes to 1 files
56 60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 $ mv .hg/store/lfs_ .hg/store/lfs
57 62
58 63 Clear the cache to force a download
59 64 $ rm -rf `hg config lfs.usercache`
60 65 $ cd ../repo2
61 66 $ hg update tip -v
62 67 resolving manifests
63 68 getting a
64 69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
65 70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
66 71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
67 72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
68 73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 74
70 75 When the server has some blobs already
71 76
72 77 $ hg mv a b
73 78 $ echo ANOTHER-LARGE-FILE > c
74 79 $ echo ANOTHER-LARGE-FILE2 > d
75 80 $ hg commit -m b-and-c -A b c d
76 81 $ hg push ../repo1 -v | grep -v '^ '
77 82 pushing to ../repo1
78 83 searching for changes
79 84 lfs: need to transfer 2 objects (39 bytes)
80 85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
81 86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
82 87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
83 88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
84 89 1 changesets found
85 90 uncompressed size of bundle content:
86 91 adding changesets
87 92 adding manifests
88 93 adding file changes
89 94 added 1 changesets with 3 changes to 3 files
90 95
91 96 Clear the cache to force a download
92 97 $ rm -rf `hg config lfs.usercache`
93 98 $ hg --repo ../repo1 update tip -v
94 99 resolving manifests
95 100 getting b
96 101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
97 102 getting c
98 103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
99 104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
100 105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
101 106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
102 107 getting d
103 108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
104 109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
105 110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
106 111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
107 112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 113
109 114 Test a corrupt file download, but clear the cache first to force a download.
110 115
111 116 $ rm -rf `hg config lfs.usercache`
112 117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
113 118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
114 119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
115 120 $ rm ../repo1/*
116 121
117 122 XXX: suggesting `hg verify` won't help with a corrupt file on the lfs server.
118 123 $ hg --repo ../repo1 update -C tip -v
119 124 resolving manifests
120 125 getting a
121 126 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
122 127 getting b
123 128 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
124 129 getting c
125 130 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
126 131 abort: detected corrupt lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
127 132 (run hg verify)
128 133 [255]
129 134
130 135 The corrupted blob is not added to the usercache or local store
131 136
132 137 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
133 138 [1]
134 139 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
135 140 [1]
136 141 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
137 142
138 143 Test a corrupted file upload
139 144
140 145 $ echo 'another lfs blob' > b
141 146 $ hg ci -m 'another blob'
142 147 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
143 148 $ hg push -v ../repo1
144 149 pushing to ../repo1
145 150 searching for changes
146 151 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
147 152 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
148 153 (run hg verify)
149 154 [255]
150 155
151 156 Check error message when the remote missed a blob:
152 157
153 158 $ echo FFFFF > b
154 159 $ hg commit -m b -A b
155 160 $ echo FFFFF >> b
156 161 $ hg commit -m b b
157 162 $ rm -rf .hg/store/lfs
158 163 $ rm -rf `hg config lfs.usercache`
159 164 $ hg update -C '.^'
160 165 abort: LFS server claims required objects do not exist:
161 166 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
162 167 [255]
163 168
164 169 Check error message when object does not exist:
165 170
166 171 $ hg init test && cd test
167 172 $ echo "[extensions]" >> .hg/hgrc
168 173 $ echo "lfs=" >> .hg/hgrc
169 174 $ echo "[lfs]" >> .hg/hgrc
170 175 $ echo "threshold=1" >> .hg/hgrc
171 176 $ echo a > a
172 177 $ hg add a
173 178 $ hg commit -m 'test'
174 179 $ echo aaaaa > a
175 180 $ hg commit -m 'largefile'
176 181 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
177 182 version https://git-lfs.github.com/spec/v1
178 183 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
179 184 size 6
180 185 x-is-binary 0
181 186 $ cd ..
182 187 $ rm -rf `hg config lfs.usercache`
183 188 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
184 189 updating to branch default
185 190 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
186 191 [255]
187 192
188 193 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now