##// END OF EJS Templates
lfs: remove internal url in test...
Jun Wu -
r35684:2c6ebd0c default
parent child Browse files
Show More
@@ -1,455 +1,452 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import json
12 12 import os
13 13 import re
14 14 import socket
15 15
16 16 from mercurial.i18n import _
17 17
18 18 from mercurial import (
19 19 error,
20 20 pathutil,
21 21 url as urlmod,
22 22 util,
23 23 vfs as vfsmod,
24 24 worker,
25 25 )
26 26
27 27 from ..largefiles import lfutil
28 28
29 29 # 64 bytes for SHA256
30 30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31 31
32 32 class lfsvfs(vfsmod.vfs):
33 33 def join(self, path):
34 34 """split the path at first two characters, like: XX/XXXXX..."""
35 35 if not _lfsre.match(path):
36 36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 37 return super(lfsvfs, self).join(path[0:2], path[2:])
38 38
39 39 def walk(self, path=None, onerror=None):
40 40 """Yield (dirpath, [], oids) tuple for blobs under path
41 41
42 42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 43 """
44 44 root = os.path.normpath(self.base)
45 45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 46 # because len(dirpath) < prefixlen.
47 47 prefixlen = len(pathutil.normasprefix(root))
48 48 oids = []
49 49
50 50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 51 onerror=onerror):
52 52 dirpath = dirpath[prefixlen:]
53 53
54 54 # Silently skip unexpected files and directories
55 55 if len(dirpath) == 2:
56 56 oids.extend([dirpath + f for f in files
57 57 if _lfsre.match(dirpath + f)])
58 58
59 59 yield ('', [], oids)
60 60
61 61 class filewithprogress(object):
62 62 """a file-like object that supports __len__ and read.
63 63
64 64 Useful to provide progress information for how many bytes are read.
65 65 """
66 66
67 67 def __init__(self, fp, callback):
68 68 self._fp = fp
69 69 self._callback = callback # func(readsize)
70 70 fp.seek(0, os.SEEK_END)
71 71 self._len = fp.tell()
72 72 fp.seek(0)
73 73
74 74 def __len__(self):
75 75 return self._len
76 76
77 77 def read(self, size):
78 78 if self._fp is None:
79 79 return b''
80 80 data = self._fp.read(size)
81 81 if data:
82 82 if self._callback:
83 83 self._callback(len(data))
84 84 else:
85 85 self._fp.close()
86 86 self._fp = None
87 87 return data
88 88
89 89 class local(object):
90 90 """Local blobstore for large file contents.
91 91
92 92 This blobstore is used both as a cache and as a staging area for large blobs
93 93 to be uploaded to the remote blobstore.
94 94 """
95 95
96 96 def __init__(self, repo):
97 97 fullpath = repo.svfs.join('lfs/objects')
98 98 self.vfs = lfsvfs(fullpath)
99 99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 100 self.cachevfs = lfsvfs(usercache)
101 101 self.ui = repo.ui
102 102
103 103 def open(self, oid):
104 104 """Open a read-only file descriptor to the named blob, in either the
105 105 usercache or the local store."""
106 106 # The usercache is the most likely place to hold the file. Commit will
107 107 # write to both it and the local store, as will anything that downloads
108 108 # the blobs. However, things like clone without an update won't
109 109 # populate the local store. For an init + push of a local clone,
110 110 # the usercache is the only place it _could_ be. If not present, the
111 111 # missing file msg here will indicate the local repo, not the usercache.
112 112 if self.cachevfs.exists(oid):
113 113 return self.cachevfs(oid, 'rb')
114 114
115 115 return self.vfs(oid, 'rb')
116 116
117 117 def download(self, oid, src):
118 118 """Read the blob from the remote source in chunks, verify the content,
119 119 and write to this local blobstore."""
120 120 sha256 = hashlib.sha256()
121 121
122 122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
123 123 for chunk in util.filechunkiter(src, size=1048576):
124 124 fp.write(chunk)
125 125 sha256.update(chunk)
126 126
127 127 realoid = sha256.hexdigest()
128 128 if realoid != oid:
129 129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
130 130
131 131 # XXX: should we verify the content of the cache, and hardlink back to
132 132 # the local store on success, but truncate, write and link on failure?
133 133 if not self.cachevfs.exists(oid):
134 134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
135 135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
136 136
137 137 def write(self, oid, data):
138 138 """Write blob to local blobstore.
139 139
140 140 This should only be called from the filelog during a commit or similar.
141 141 As such, there is no need to verify the data. Imports from a remote
142 142 store must use ``download()`` instead."""
143 143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
144 144 fp.write(data)
145 145
146 146 # XXX: should we verify the content of the cache, and hardlink back to
147 147 # the local store on success, but truncate, write and link on failure?
148 148 if not self.cachevfs.exists(oid):
149 149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
150 150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
151 151
152 152 def read(self, oid, verify=True):
153 153 """Read blob from local blobstore."""
154 154 if not self.vfs.exists(oid):
155 155 blob = self._read(self.cachevfs, oid, verify)
156 156
157 157 # Even if revlog will verify the content, it needs to be verified
158 158 # now before making the hardlink to avoid propagating corrupt blobs.
159 159 # Don't abort if corruption is detected, because `hg verify` will
160 160 # give more useful info about the corruption- simply don't add the
161 161 # hardlink.
162 162 if verify or hashlib.sha256(blob).hexdigest() == oid:
163 163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
164 164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
165 165 else:
166 166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
167 167 blob = self._read(self.vfs, oid, verify)
168 168 return blob
169 169
170 170 def _read(self, vfs, oid, verify):
171 171 """Read blob (after verifying) from the given store"""
172 172 blob = vfs.read(oid)
173 173 if verify:
174 174 _verify(oid, blob)
175 175 return blob
176 176
177 177 def has(self, oid):
178 178 """Returns True if the local blobstore contains the requested blob,
179 179 False otherwise."""
180 180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
181 181
182 182 class _gitlfsremote(object):
183 183
184 184 def __init__(self, repo, url):
185 185 ui = repo.ui
186 186 self.ui = ui
187 187 baseurl, authinfo = url.authinfo()
188 188 self.baseurl = baseurl.rstrip('/')
189 189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
190 190 if not useragent:
191 191 useragent = 'mercurial/%s git/2.15.1' % util.version()
192 192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
193 193 self.retry = ui.configint('lfs', 'retry')
194 194
195 195 def writebatch(self, pointers, fromstore):
196 196 """Batch upload from local to remote blobstore."""
197 197 self._batch(pointers, fromstore, 'upload')
198 198
199 199 def readbatch(self, pointers, tostore):
200 200 """Batch download from remote to local blostore."""
201 201 self._batch(pointers, tostore, 'download')
202 202
203 203 def _batchrequest(self, pointers, action):
204 204 """Get metadata about objects pointed by pointers for given action
205 205
206 206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
207 207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
208 208 """
209 209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
210 210 requestdata = json.dumps({
211 211 'objects': objects,
212 212 'operation': action,
213 213 })
214 214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
215 215 data=requestdata)
216 216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
217 217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
218 218 try:
219 219 rawjson = self.urlopener.open(batchreq).read()
220 220 except util.urlerr.httperror as ex:
221 221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
222 222 % (ex, action))
223 223 try:
224 224 response = json.loads(rawjson)
225 225 except ValueError:
226 226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
227 227 % rawjson)
228 228 return response
229 229
230 def _checkforservererror(self, pointers, responses):
230 def _checkforservererror(self, pointers, responses, action):
231 231 """Scans errors from objects
232 232
233 233 Returns LfsRemoteError if any objects has an error"""
234 234 for response in responses:
235 error = response.get('error')
236 if error:
235 # The server should return 404 when objects cannot be found. Some
236 # server implementation (ex. lfs-test-server) does not set "error"
237 # but just removes "download" from "actions". Treat that case
238 # as the same as 404 error.
239 notfound = (response.get('error', {}).get('code') == 404
240 or (action == 'download'
241 and action not in response.get('actions', [])))
242 if notfound:
237 243 ptrmap = {p.oid(): p for p in pointers}
238 244 p = ptrmap.get(response['oid'], None)
239 if error['code'] == 404 and p:
245 if p:
240 246 filename = getattr(p, 'filename', 'unknown')
241 247 raise LfsRemoteError(
242 248 _(('LFS server error. Remote object '
243 249 'for "%s" not found: %r')) % (filename, response))
250 if 'error' in response:
244 251 raise LfsRemoteError(_('LFS server error: %r') % response)
245 252
246 253 def _extractobjects(self, response, pointers, action):
247 254 """extract objects from response of the batch API
248 255
249 256 response: parsed JSON object returned by batch API
250 257 return response['objects'] filtered by action
251 258 raise if any object has an error
252 259 """
253 260 # Scan errors from objects - fail early
254 261 objects = response.get('objects', [])
255 self._checkforservererror(pointers, objects)
262 self._checkforservererror(pointers, objects, action)
256 263
257 264 # Filter objects with given action. Practically, this skips uploading
258 265 # objects which exist in the server.
259 266 filteredobjects = [o for o in objects if action in o.get('actions', [])]
260 # But for downloading, we want all objects. Therefore missing objects
261 # should be considered an error.
262 if action == 'download':
263 if len(filteredobjects) < len(objects):
264 missing = [o.get('oid', '?')
265 for o in objects
266 if action not in o.get('actions', [])]
267 raise LfsRemoteError(
268 _('LFS server claims required objects do not exist:\n%s')
269 % '\n'.join(missing))
270 267
271 268 return filteredobjects
272 269
273 270 def _basictransfer(self, obj, action, localstore):
274 271 """Download or upload a single object using basic transfer protocol
275 272
276 273 obj: dict, an object description returned by batch API
277 274 action: string, one of ['upload', 'download']
278 275 localstore: blobstore.local
279 276
280 277 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
281 278 basic-transfers.md
282 279 """
283 280 oid = str(obj['oid'])
284 281
285 282 href = str(obj['actions'][action].get('href'))
286 283 headers = obj['actions'][action].get('header', {}).items()
287 284
288 285 request = util.urlreq.request(href)
289 286 if action == 'upload':
290 287 # If uploading blobs, read data from local blobstore.
291 288 with localstore.open(oid) as fp:
292 289 _verifyfile(oid, fp)
293 290 request.data = filewithprogress(localstore.open(oid), None)
294 291 request.get_method = lambda: 'PUT'
295 292
296 293 for k, v in headers:
297 294 request.add_header(k, v)
298 295
299 296 response = b''
300 297 try:
301 298 req = self.urlopener.open(request)
302 299 if action == 'download':
303 300 # If downloading blobs, store downloaded data to local blobstore
304 301 localstore.download(oid, req)
305 302 else:
306 303 while True:
307 304 data = req.read(1048576)
308 305 if not data:
309 306 break
310 307 response += data
311 308 if response:
312 309 self.ui.debug('lfs %s response: %s' % (action, response))
313 310 except util.urlerr.httperror as ex:
314 311 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
315 312 % (ex, oid, action))
316 313
317 314 def _batch(self, pointers, localstore, action):
318 315 if action not in ['upload', 'download']:
319 316 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
320 317
321 318 response = self._batchrequest(pointers, action)
322 319 objects = self._extractobjects(response, pointers, action)
323 320 total = sum(x.get('size', 0) for x in objects)
324 321 sizes = {}
325 322 for obj in objects:
326 323 sizes[obj.get('oid')] = obj.get('size', 0)
327 324 topic = {'upload': _('lfs uploading'),
328 325 'download': _('lfs downloading')}[action]
329 326 if len(objects) > 1:
330 327 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
331 328 % (len(objects), util.bytecount(total)))
332 329 self.ui.progress(topic, 0, total=total)
333 330 def transfer(chunk):
334 331 for obj in chunk:
335 332 objsize = obj.get('size', 0)
336 333 if self.ui.verbose:
337 334 if action == 'download':
338 335 msg = _('lfs: downloading %s (%s)\n')
339 336 elif action == 'upload':
340 337 msg = _('lfs: uploading %s (%s)\n')
341 338 self.ui.note(msg % (obj.get('oid'),
342 339 util.bytecount(objsize)))
343 340 retry = self.retry
344 341 while True:
345 342 try:
346 343 self._basictransfer(obj, action, localstore)
347 344 yield 1, obj.get('oid')
348 345 break
349 346 except socket.error as ex:
350 347 if retry > 0:
351 348 self.ui.note(
352 349 _('lfs: failed: %r (remaining retry %d)\n')
353 350 % (ex, retry))
354 351 retry -= 1
355 352 continue
356 353 raise
357 354
358 355 oids = worker.worker(self.ui, 0.1, transfer, (),
359 356 sorted(objects, key=lambda o: o.get('oid')))
360 357 processed = 0
361 358 for _one, oid in oids:
362 359 processed += sizes[oid]
363 360 self.ui.progress(topic, processed, total=total)
364 361 self.ui.note(_('lfs: processed: %s\n') % oid)
365 362 self.ui.progress(topic, pos=None, total=total)
366 363
367 364 def __del__(self):
368 365 # copied from mercurial/httppeer.py
369 366 urlopener = getattr(self, 'urlopener', None)
370 367 if urlopener:
371 368 for h in urlopener.handlers:
372 369 h.close()
373 370 getattr(h, "close_all", lambda : None)()
374 371
375 372 class _dummyremote(object):
376 373 """Dummy store storing blobs to temp directory."""
377 374
378 375 def __init__(self, repo, url):
379 376 fullpath = repo.vfs.join('lfs', url.path)
380 377 self.vfs = lfsvfs(fullpath)
381 378
382 379 def writebatch(self, pointers, fromstore):
383 380 for p in pointers:
384 381 content = fromstore.read(p.oid(), verify=True)
385 382 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
386 383 fp.write(content)
387 384
388 385 def readbatch(self, pointers, tostore):
389 386 for p in pointers:
390 387 with self.vfs(p.oid(), 'rb') as fp:
391 388 tostore.download(p.oid(), fp)
392 389
393 390 class _nullremote(object):
394 391 """Null store storing blobs to /dev/null."""
395 392
396 393 def __init__(self, repo, url):
397 394 pass
398 395
399 396 def writebatch(self, pointers, fromstore):
400 397 pass
401 398
402 399 def readbatch(self, pointers, tostore):
403 400 pass
404 401
405 402 class _promptremote(object):
406 403 """Prompt user to set lfs.url when accessed."""
407 404
408 405 def __init__(self, repo, url):
409 406 pass
410 407
411 408 def writebatch(self, pointers, fromstore, ui=None):
412 409 self._prompt()
413 410
414 411 def readbatch(self, pointers, tostore, ui=None):
415 412 self._prompt()
416 413
417 414 def _prompt(self):
418 415 raise error.Abort(_('lfs.url needs to be configured'))
419 416
420 417 _storemap = {
421 418 'https': _gitlfsremote,
422 419 'http': _gitlfsremote,
423 420 'file': _dummyremote,
424 421 'null': _nullremote,
425 422 None: _promptremote,
426 423 }
427 424
428 425 def _verify(oid, content):
429 426 realoid = hashlib.sha256(content).hexdigest()
430 427 if realoid != oid:
431 428 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
432 429 hint=_('run hg verify'))
433 430
434 431 def _verifyfile(oid, fp):
435 432 sha256 = hashlib.sha256()
436 433 while True:
437 434 data = fp.read(1024 * 1024)
438 435 if not data:
439 436 break
440 437 sha256.update(data)
441 438 realoid = sha256.hexdigest()
442 439 if realoid != oid:
443 440 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
444 441 hint=_('run hg verify'))
445 442
446 443 def remote(repo):
447 444 """remotestore factory. return a store in _storemap depending on config"""
448 445 url = util.url(repo.ui.config('lfs', 'url') or '')
449 446 scheme = url.scheme
450 447 if scheme not in _storemap:
451 448 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
452 449 return _storemap[scheme](repo, url)
453 450
454 451 class LfsRemoteError(error.RevlogError):
455 452 pass
@@ -1,191 +1,206 b''
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 33 > track=all()
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 41 A push can be serviced directly from the usercache if it isn't in the local
42 42 store.
43 43
44 44 $ hg init ../repo2
45 45 $ mv .hg/store/lfs .hg/store/lfs_
46 46 $ hg push ../repo2 -v
47 47 pushing to ../repo2
48 48 searching for changes
49 49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
50 50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
51 51 1 changesets found
52 52 uncompressed size of bundle content:
53 53 * (changelog) (glob)
54 54 * (manifests) (glob)
55 55 * a (glob)
56 56 adding changesets
57 57 adding manifests
58 58 adding file changes
59 59 added 1 changesets with 1 changes to 1 files
60 60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 61 $ mv .hg/store/lfs_ .hg/store/lfs
62 62
63 63 Clear the cache to force a download
64 64 $ rm -rf `hg config lfs.usercache`
65 65 $ cd ../repo2
66 66 $ hg update tip -v
67 67 resolving manifests
68 68 getting a
69 69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
70 70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
71 71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
72 72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
73 73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
74 74
75 75 When the server has some blobs already
76 76
77 77 $ hg mv a b
78 78 $ echo ANOTHER-LARGE-FILE > c
79 79 $ echo ANOTHER-LARGE-FILE2 > d
80 80 $ hg commit -m b-and-c -A b c d
81 81 $ hg push ../repo1 -v | grep -v '^ '
82 82 pushing to ../repo1
83 83 searching for changes
84 84 lfs: need to transfer 2 objects (39 bytes)
85 85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
86 86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
87 87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
88 88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
89 89 1 changesets found
90 90 uncompressed size of bundle content:
91 91 adding changesets
92 92 adding manifests
93 93 adding file changes
94 94 added 1 changesets with 3 changes to 3 files
95 95
96 96 Clear the cache to force a download
97 97 $ rm -rf `hg config lfs.usercache`
98 98 $ hg --repo ../repo1 update tip -v
99 99 resolving manifests
100 100 getting b
101 101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
102 102 getting c
103 103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
104 104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
105 105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
106 106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
107 107 getting d
108 108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
109 109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
110 110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
111 111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
112 112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 113
114 114 Test a corrupt file download, but clear the cache first to force a download.
115 115
116 116 $ rm -rf `hg config lfs.usercache`
117 117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
118 118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
119 119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
120 120 $ rm ../repo1/*
121 121
122 122 $ hg --repo ../repo1 update -C tip -v
123 123 resolving manifests
124 124 getting a
125 125 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
126 126 getting b
127 127 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
128 128 getting c
129 129 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
130 130 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
131 131 [255]
132 132
133 133 The corrupted blob is not added to the usercache or local store
134 134
135 135 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
136 136 [1]
137 137 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
138 138 [1]
139 139 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
140 140
141 141 Test a corrupted file upload
142 142
143 143 $ echo 'another lfs blob' > b
144 144 $ hg ci -m 'another blob'
145 145 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
146 146 $ hg push -v ../repo1
147 147 pushing to ../repo1
148 148 searching for changes
149 149 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
150 150 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
151 151 (run hg verify)
152 152 [255]
153 153
154 154 Check error message when the remote missed a blob:
155 155
156 156 $ echo FFFFF > b
157 157 $ hg commit -m b -A b
158 158 $ echo FFFFF >> b
159 159 $ hg commit -m b b
160 160 $ rm -rf .hg/store/lfs
161 161 $ rm -rf `hg config lfs.usercache`
162 162 $ hg update -C '.^'
163 abort: LFS server claims required objects do not exist:
164 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
163 abort: LFS server error. Remote object for "b" not found:(.*)! (re)
165 164 [255]
166 165
167 166 Check error message when object does not exist:
168 167
168 $ cd $TESTTMP
169 169 $ hg init test && cd test
170 170 $ echo "[extensions]" >> .hg/hgrc
171 171 $ echo "lfs=" >> .hg/hgrc
172 172 $ echo "[lfs]" >> .hg/hgrc
173 173 $ echo "threshold=1" >> .hg/hgrc
174 174 $ echo a > a
175 175 $ hg add a
176 176 $ hg commit -m 'test'
177 177 $ echo aaaaa > a
178 178 $ hg commit -m 'largefile'
179 179 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
180 180 version https://git-lfs.github.com/spec/v1
181 181 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
182 182 size 6
183 183 x-is-binary 0
184 184 $ cd ..
185 185 $ rm -rf `hg config lfs.usercache`
186 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
186
187 (Restart the server in a different location so it no longer has the content)
188
189 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
190 $ rm $DAEMON_PIDS
191 $ mkdir $TESTTMP/lfs-server2
192 $ cd $TESTTMP/lfs-server2
193 #if no-windows
194 $ lfs-test-server &> lfs-server.log &
195 $ echo $! >> $DAEMON_PIDS
196 #else
197 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
198 #endif
199
200 $ cd $TESTTMP
201 $ hg clone test test2
187 202 updating to branch default
188 203 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
189 204 [255]
190 205
191 206 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now