##// END OF EJS Templates
lfs: deduplicate oids in the transfer...
Matt Harbison -
r35945:9b413478 default
parent child Browse files
Show More
@@ -1,474 +1,481 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import json
12 12 import os
13 13 import re
14 14 import socket
15 15
16 16 from mercurial.i18n import _
17 17
18 18 from mercurial import (
19 19 error,
20 20 pathutil,
21 21 url as urlmod,
22 22 util,
23 23 vfs as vfsmod,
24 24 worker,
25 25 )
26 26
27 27 from ..largefiles import lfutil
28 28
29 29 # 64 bytes for SHA256
30 30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31 31
32 32 class lfsvfs(vfsmod.vfs):
33 33 def join(self, path):
34 34 """split the path at first two characters, like: XX/XXXXX..."""
35 35 if not _lfsre.match(path):
36 36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 37 return super(lfsvfs, self).join(path[0:2], path[2:])
38 38
39 39 def walk(self, path=None, onerror=None):
40 40 """Yield (dirpath, [], oids) tuple for blobs under path
41 41
42 42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 43 """
44 44 root = os.path.normpath(self.base)
45 45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 46 # because len(dirpath) < prefixlen.
47 47 prefixlen = len(pathutil.normasprefix(root))
48 48 oids = []
49 49
50 50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 51 onerror=onerror):
52 52 dirpath = dirpath[prefixlen:]
53 53
54 54 # Silently skip unexpected files and directories
55 55 if len(dirpath) == 2:
56 56 oids.extend([dirpath + f for f in files
57 57 if _lfsre.match(dirpath + f)])
58 58
59 59 yield ('', [], oids)
60 60
61 61 class filewithprogress(object):
62 62 """a file-like object that supports __len__ and read.
63 63
64 64 Useful to provide progress information for how many bytes are read.
65 65 """
66 66
67 67 def __init__(self, fp, callback):
68 68 self._fp = fp
69 69 self._callback = callback # func(readsize)
70 70 fp.seek(0, os.SEEK_END)
71 71 self._len = fp.tell()
72 72 fp.seek(0)
73 73
74 74 def __len__(self):
75 75 return self._len
76 76
77 77 def read(self, size):
78 78 if self._fp is None:
79 79 return b''
80 80 data = self._fp.read(size)
81 81 if data:
82 82 if self._callback:
83 83 self._callback(len(data))
84 84 else:
85 85 self._fp.close()
86 86 self._fp = None
87 87 return data
88 88
89 89 class local(object):
90 90 """Local blobstore for large file contents.
91 91
92 92 This blobstore is used both as a cache and as a staging area for large blobs
93 93 to be uploaded to the remote blobstore.
94 94 """
95 95
96 96 def __init__(self, repo):
97 97 fullpath = repo.svfs.join('lfs/objects')
98 98 self.vfs = lfsvfs(fullpath)
99 99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 100 self.cachevfs = lfsvfs(usercache)
101 101 self.ui = repo.ui
102 102
103 103 def open(self, oid):
104 104 """Open a read-only file descriptor to the named blob, in either the
105 105 usercache or the local store."""
106 106 # The usercache is the most likely place to hold the file. Commit will
107 107 # write to both it and the local store, as will anything that downloads
108 108 # the blobs. However, things like clone without an update won't
109 109 # populate the local store. For an init + push of a local clone,
110 110 # the usercache is the only place it _could_ be. If not present, the
111 111 # missing file msg here will indicate the local repo, not the usercache.
112 112 if self.cachevfs.exists(oid):
113 113 return self.cachevfs(oid, 'rb')
114 114
115 115 return self.vfs(oid, 'rb')
116 116
117 117 def download(self, oid, src):
118 118 """Read the blob from the remote source in chunks, verify the content,
119 119 and write to this local blobstore."""
120 120 sha256 = hashlib.sha256()
121 121
122 122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
123 123 for chunk in util.filechunkiter(src, size=1048576):
124 124 fp.write(chunk)
125 125 sha256.update(chunk)
126 126
127 127 realoid = sha256.hexdigest()
128 128 if realoid != oid:
129 129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
130 130
131 131 # XXX: should we verify the content of the cache, and hardlink back to
132 132 # the local store on success, but truncate, write and link on failure?
133 133 if not self.cachevfs.exists(oid):
134 134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
135 135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
136 136
137 137 def write(self, oid, data):
138 138 """Write blob to local blobstore.
139 139
140 140 This should only be called from the filelog during a commit or similar.
141 141 As such, there is no need to verify the data. Imports from a remote
142 142 store must use ``download()`` instead."""
143 143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
144 144 fp.write(data)
145 145
146 146 # XXX: should we verify the content of the cache, and hardlink back to
147 147 # the local store on success, but truncate, write and link on failure?
148 148 if not self.cachevfs.exists(oid):
149 149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
150 150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
151 151
152 152 def read(self, oid, verify=True):
153 153 """Read blob from local blobstore."""
154 154 if not self.vfs.exists(oid):
155 155 blob = self._read(self.cachevfs, oid, verify)
156 156
157 157 # Even if revlog will verify the content, it needs to be verified
158 158 # now before making the hardlink to avoid propagating corrupt blobs.
159 159 # Don't abort if corruption is detected, because `hg verify` will
160 160 # give more useful info about the corruption- simply don't add the
161 161 # hardlink.
162 162 if verify or hashlib.sha256(blob).hexdigest() == oid:
163 163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
164 164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
165 165 else:
166 166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
167 167 blob = self._read(self.vfs, oid, verify)
168 168 return blob
169 169
170 170 def _read(self, vfs, oid, verify):
171 171 """Read blob (after verifying) from the given store"""
172 172 blob = vfs.read(oid)
173 173 if verify:
174 174 _verify(oid, blob)
175 175 return blob
176 176
177 177 def has(self, oid):
178 178 """Returns True if the local blobstore contains the requested blob,
179 179 False otherwise."""
180 180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
181 181
182 182 class _gitlfsremote(object):
183 183
184 184 def __init__(self, repo, url):
185 185 ui = repo.ui
186 186 self.ui = ui
187 187 baseurl, authinfo = url.authinfo()
188 188 self.baseurl = baseurl.rstrip('/')
189 189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
190 190 if not useragent:
191 191 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
192 192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
193 193 self.retry = ui.configint('lfs', 'retry')
194 194
195 195 def writebatch(self, pointers, fromstore):
196 196 """Batch upload from local to remote blobstore."""
197 self._batch(pointers, fromstore, 'upload')
197 self._batch(_deduplicate(pointers), fromstore, 'upload')
198 198
199 199 def readbatch(self, pointers, tostore):
200 200 """Batch download from remote to local blostore."""
201 self._batch(pointers, tostore, 'download')
201 self._batch(_deduplicate(pointers), tostore, 'download')
202 202
203 203 def _batchrequest(self, pointers, action):
204 204 """Get metadata about objects pointed by pointers for given action
205 205
206 206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
207 207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
208 208 """
209 209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
210 210 requestdata = json.dumps({
211 211 'objects': objects,
212 212 'operation': action,
213 213 })
214 214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
215 215 data=requestdata)
216 216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
217 217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
218 218 try:
219 219 rawjson = self.urlopener.open(batchreq).read()
220 220 except util.urlerr.httperror as ex:
221 221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
222 222 % (ex, action))
223 223 try:
224 224 response = json.loads(rawjson)
225 225 except ValueError:
226 226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
227 227 % rawjson)
228 228 return response
229 229
230 230 def _checkforservererror(self, pointers, responses, action):
231 231 """Scans errors from objects
232 232
233 233 Raises LfsRemoteError if any objects have an error"""
234 234 for response in responses:
235 235 # The server should return 404 when objects cannot be found. Some
236 236 # server implementation (ex. lfs-test-server) does not set "error"
237 237 # but just removes "download" from "actions". Treat that case
238 238 # as the same as 404 error.
239 239 notfound = (response.get('error', {}).get('code') == 404
240 240 or (action == 'download'
241 241 and action not in response.get('actions', [])))
242 242 if notfound:
243 243 ptrmap = {p.oid(): p for p in pointers}
244 244 p = ptrmap.get(response['oid'], None)
245 245 if p:
246 246 filename = getattr(p, 'filename', 'unknown')
247 247 raise LfsRemoteError(
248 248 _(('LFS server error. Remote object '
249 249 'for "%s" not found: %r')) % (filename, response))
250 250 else:
251 251 raise LfsRemoteError(
252 252 _('LFS server error. Unsolicited response for oid %s')
253 253 % response['oid'])
254 254 if 'error' in response:
255 255 raise LfsRemoteError(_('LFS server error: %r') % response)
256 256
257 257 def _extractobjects(self, response, pointers, action):
258 258 """extract objects from response of the batch API
259 259
260 260 response: parsed JSON object returned by batch API
261 261 return response['objects'] filtered by action
262 262 raise if any object has an error
263 263 """
264 264 # Scan errors from objects - fail early
265 265 objects = response.get('objects', [])
266 266 self._checkforservererror(pointers, objects, action)
267 267
268 268 # Filter objects with given action. Practically, this skips uploading
269 269 # objects which exist in the server.
270 270 filteredobjects = [o for o in objects if action in o.get('actions', [])]
271 271
272 272 return filteredobjects
273 273
274 274 def _basictransfer(self, obj, action, localstore):
275 275 """Download or upload a single object using basic transfer protocol
276 276
277 277 obj: dict, an object description returned by batch API
278 278 action: string, one of ['upload', 'download']
279 279 localstore: blobstore.local
280 280
281 281 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
282 282 basic-transfers.md
283 283 """
284 284 oid = str(obj['oid'])
285 285
286 286 href = str(obj['actions'][action].get('href'))
287 287 headers = obj['actions'][action].get('header', {}).items()
288 288
289 289 request = util.urlreq.request(href)
290 290 if action == 'upload':
291 291 # If uploading blobs, read data from local blobstore.
292 292 with localstore.open(oid) as fp:
293 293 _verifyfile(oid, fp)
294 294 request.data = filewithprogress(localstore.open(oid), None)
295 295 request.get_method = lambda: 'PUT'
296 296
297 297 for k, v in headers:
298 298 request.add_header(k, v)
299 299
300 300 response = b''
301 301 try:
302 302 req = self.urlopener.open(request)
303 303 if action == 'download':
304 304 # If downloading blobs, store downloaded data to local blobstore
305 305 localstore.download(oid, req)
306 306 else:
307 307 while True:
308 308 data = req.read(1048576)
309 309 if not data:
310 310 break
311 311 response += data
312 312 if response:
313 313 self.ui.debug('lfs %s response: %s' % (action, response))
314 314 except util.urlerr.httperror as ex:
315 315 if self.ui.debugflag:
316 316 self.ui.debug('%s: %s\n' % (oid, ex.read()))
317 317 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
318 318 % (ex, oid, action))
319 319
320 320 def _batch(self, pointers, localstore, action):
321 321 if action not in ['upload', 'download']:
322 322 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
323 323
324 324 response = self._batchrequest(pointers, action)
325 325 objects = self._extractobjects(response, pointers, action)
326 326 total = sum(x.get('size', 0) for x in objects)
327 327 sizes = {}
328 328 for obj in objects:
329 329 sizes[obj.get('oid')] = obj.get('size', 0)
330 330 topic = {'upload': _('lfs uploading'),
331 331 'download': _('lfs downloading')}[action]
332 332 if len(objects) > 1:
333 333 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
334 334 % (len(objects), util.bytecount(total)))
335 335 self.ui.progress(topic, 0, total=total)
336 336 def transfer(chunk):
337 337 for obj in chunk:
338 338 objsize = obj.get('size', 0)
339 339 if self.ui.verbose:
340 340 if action == 'download':
341 341 msg = _('lfs: downloading %s (%s)\n')
342 342 elif action == 'upload':
343 343 msg = _('lfs: uploading %s (%s)\n')
344 344 self.ui.note(msg % (obj.get('oid'),
345 345 util.bytecount(objsize)))
346 346 retry = self.retry
347 347 while True:
348 348 try:
349 349 self._basictransfer(obj, action, localstore)
350 350 yield 1, obj.get('oid')
351 351 break
352 352 except socket.error as ex:
353 353 if retry > 0:
354 354 self.ui.note(
355 355 _('lfs: failed: %r (remaining retry %d)\n')
356 356 % (ex, retry))
357 357 retry -= 1
358 358 continue
359 359 raise
360 360
361 361 # Until https multiplexing gets sorted out
362 362 if self.ui.configbool('experimental', 'lfs.worker-enable'):
363 363 oids = worker.worker(self.ui, 0.1, transfer, (),
364 364 sorted(objects, key=lambda o: o.get('oid')))
365 365 else:
366 366 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
367 367
368 368 processed = 0
369 369 blobs = 0
370 370 for _one, oid in oids:
371 371 processed += sizes[oid]
372 372 blobs += 1
373 373 self.ui.progress(topic, processed, total=total)
374 374 self.ui.note(_('lfs: processed: %s\n') % oid)
375 375 self.ui.progress(topic, pos=None, total=total)
376 376
377 377 if blobs > 0:
378 378 if action == 'upload':
379 379 self.ui.status(_('lfs: uploaded %d files (%s)\n')
380 380 % (blobs, util.bytecount(processed)))
381 381 # TODO: coalesce the download requests, and comment this in
382 382 #elif action == 'download':
383 383 # self.ui.status(_('lfs: downloaded %d files (%s)\n')
384 384 # % (blobs, util.bytecount(processed)))
385 385
386 386 def __del__(self):
387 387 # copied from mercurial/httppeer.py
388 388 urlopener = getattr(self, 'urlopener', None)
389 389 if urlopener:
390 390 for h in urlopener.handlers:
391 391 h.close()
392 392 getattr(h, "close_all", lambda : None)()
393 393
394 394 class _dummyremote(object):
395 395 """Dummy store storing blobs to temp directory."""
396 396
397 397 def __init__(self, repo, url):
398 398 fullpath = repo.vfs.join('lfs', url.path)
399 399 self.vfs = lfsvfs(fullpath)
400 400
401 401 def writebatch(self, pointers, fromstore):
402 for p in pointers:
402 for p in _deduplicate(pointers):
403 403 content = fromstore.read(p.oid(), verify=True)
404 404 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
405 405 fp.write(content)
406 406
407 407 def readbatch(self, pointers, tostore):
408 for p in pointers:
408 for p in _deduplicate(pointers):
409 409 with self.vfs(p.oid(), 'rb') as fp:
410 410 tostore.download(p.oid(), fp)
411 411
412 412 class _nullremote(object):
413 413 """Null store storing blobs to /dev/null."""
414 414
415 415 def __init__(self, repo, url):
416 416 pass
417 417
418 418 def writebatch(self, pointers, fromstore):
419 419 pass
420 420
421 421 def readbatch(self, pointers, tostore):
422 422 pass
423 423
424 424 class _promptremote(object):
425 425 """Prompt user to set lfs.url when accessed."""
426 426
427 427 def __init__(self, repo, url):
428 428 pass
429 429
430 430 def writebatch(self, pointers, fromstore, ui=None):
431 431 self._prompt()
432 432
433 433 def readbatch(self, pointers, tostore, ui=None):
434 434 self._prompt()
435 435
436 436 def _prompt(self):
437 437 raise error.Abort(_('lfs.url needs to be configured'))
438 438
439 439 _storemap = {
440 440 'https': _gitlfsremote,
441 441 'http': _gitlfsremote,
442 442 'file': _dummyremote,
443 443 'null': _nullremote,
444 444 None: _promptremote,
445 445 }
446 446
447 def _deduplicate(pointers):
448 """Remove any duplicate oids that exist in the list"""
449 reduced = util.sortdict()
450 for p in pointers:
451 reduced[p.oid()] = p
452 return reduced.values()
453
447 454 def _verify(oid, content):
448 455 realoid = hashlib.sha256(content).hexdigest()
449 456 if realoid != oid:
450 457 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
451 458 hint=_('run hg verify'))
452 459
453 460 def _verifyfile(oid, fp):
454 461 sha256 = hashlib.sha256()
455 462 while True:
456 463 data = fp.read(1024 * 1024)
457 464 if not data:
458 465 break
459 466 sha256.update(data)
460 467 realoid = sha256.hexdigest()
461 468 if realoid != oid:
462 469 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
463 470 hint=_('run hg verify'))
464 471
465 472 def remote(repo):
466 473 """remotestore factory. return a store in _storemap depending on config"""
467 474 url = util.url(repo.ui.config('lfs', 'url') or '')
468 475 scheme = url.scheme
469 476 if scheme not in _storemap:
470 477 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
471 478 return _storemap[scheme](repo, url)
472 479
473 480 class LfsRemoteError(error.RevlogError):
474 481 pass
@@ -1,279 +1,273 b''
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 33 > track=all()
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 41 A push can be serviced directly from the usercache if it isn't in the local
42 42 store.
43 43
44 44 $ hg init ../repo2
45 45 $ mv .hg/store/lfs .hg/store/lfs_
46 46 $ hg push ../repo2 -v
47 47 pushing to ../repo2
48 48 searching for changes
49 49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
50 50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
51 51 lfs: uploaded 1 files (12 bytes)
52 52 1 changesets found
53 53 uncompressed size of bundle content:
54 54 * (changelog) (glob)
55 55 * (manifests) (glob)
56 56 * a (glob)
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 1 changesets with 1 changes to 1 files
61 61 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
62 62 $ mv .hg/store/lfs_ .hg/store/lfs
63 63
64 64 Clear the cache to force a download
65 65 $ rm -rf `hg config lfs.usercache`
66 66 $ cd ../repo2
67 67 $ hg update tip -v
68 68 resolving manifests
69 69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
70 70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
71 71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
72 72 getting a
73 73 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
74 74 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 75
76 76 When the server has some blobs already
77 77
78 78 $ hg mv a b
79 79 $ echo ANOTHER-LARGE-FILE > c
80 80 $ echo ANOTHER-LARGE-FILE2 > d
81 81 $ hg commit -m b-and-c -A b c d
82 82 $ hg push ../repo1 -v | grep -v '^ '
83 83 pushing to ../repo1
84 84 searching for changes
85 85 lfs: need to transfer 2 objects (39 bytes)
86 86 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
87 87 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
88 88 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
89 89 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
90 90 lfs: uploaded 2 files (39 bytes)
91 91 1 changesets found
92 92 uncompressed size of bundle content:
93 93 adding changesets
94 94 adding manifests
95 95 adding file changes
96 96 added 1 changesets with 3 changes to 3 files
97 97
98 98 Clear the cache to force a download
99 99 $ rm -rf `hg config lfs.usercache`
100 100 $ hg --repo ../repo1 update tip -v
101 101 resolving manifests
102 102 lfs: need to transfer 2 objects (39 bytes)
103 103 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
104 104 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
105 105 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
106 106 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
107 107 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
108 108 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
109 109 getting b
110 110 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
111 111 getting c
112 112 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
113 113 getting d
114 114 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
115 115 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
116 116
117 117 Test a corrupt file download, but clear the cache first to force a download.
118 118
119 119 $ rm -rf `hg config lfs.usercache`
120 120 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
121 121 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
122 122 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
123 123 $ rm ../repo1/*
124 124
125 125 $ hg --repo ../repo1 update -C tip -v
126 126 resolving manifests
127 127 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
128 128 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
129 129 [255]
130 130
131 131 The corrupted blob is not added to the usercache or local store
132 132
133 133 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
134 134 [1]
135 135 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
136 136 [1]
137 137 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
138 138
139 139 Test a corrupted file upload
140 140
141 141 $ echo 'another lfs blob' > b
142 142 $ hg ci -m 'another blob'
143 143 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
144 144 $ hg push -v ../repo1
145 145 pushing to ../repo1
146 146 searching for changes
147 147 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
148 148 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
149 149 (run hg verify)
150 150 [255]
151 151
152 152 Archive will prefetch blobs in a group
153 153
154 154 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
155 155 $ hg archive -vr 1 ../archive
156 lfs: need to transfer 4 objects (63 bytes)
156 lfs: need to transfer 3 objects (51 bytes)
157 157 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
158 158 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
159 159 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
160 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
161 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
162 160 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
163 161 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
164 162 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
165 163 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
166 164 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
167 165 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
168 166 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
169 167 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
170 168 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
171 169 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
172 170 $ find ../archive | sort
173 171 ../archive
174 172 ../archive/.hg_archival.txt
175 173 ../archive/a
176 174 ../archive/b
177 175 ../archive/c
178 176 ../archive/d
179 177
180 178 Cat will prefetch blobs in a group
181 179
182 180 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
183 181 $ hg cat -vr 1 a b c
184 lfs: need to transfer 3 objects (43 bytes)
182 lfs: need to transfer 2 objects (31 bytes)
185 183 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
186 184 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
187 185 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
188 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
189 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
190 186 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
191 187 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
192 188 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
193 189 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
194 190 THIS-IS-LFS
195 191 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
196 192 THIS-IS-LFS
197 193 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
198 194 ANOTHER-LARGE-FILE
199 195
200 196 Revert will prefetch blobs in a group
201 197
202 198 $ rm -rf .hg/store/lfs
203 199 $ rm -rf `hg config lfs.usercache`
204 200 $ rm *
205 201 $ hg revert --all -r 1 -v
206 202 adding a
207 203 reverting b
208 204 reverting c
209 205 reverting d
210 lfs: need to transfer 4 objects (63 bytes)
206 lfs: need to transfer 3 objects (51 bytes)
211 207 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
212 208 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
213 209 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
214 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
215 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
216 210 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
217 211 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
218 212 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
219 213 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
220 214 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
221 215 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
222 216 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
223 217 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
224 218 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
225 219 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
226 220
227 221 Check error message when the remote missed a blob:
228 222
229 223 $ echo FFFFF > b
230 224 $ hg commit -m b -A b
231 225 $ echo FFFFF >> b
232 226 $ hg commit -m b b
233 227 $ rm -rf .hg/store/lfs
234 228 $ rm -rf `hg config lfs.usercache`
235 229 $ hg update -C '.^'
236 230 abort: LFS server error. Remote object for "b" not found:(.*)! (re)
237 231 [255]
238 232
239 233 Check error message when object does not exist:
240 234
241 235 $ cd $TESTTMP
242 236 $ hg init test && cd test
243 237 $ echo "[extensions]" >> .hg/hgrc
244 238 $ echo "lfs=" >> .hg/hgrc
245 239 $ echo "[lfs]" >> .hg/hgrc
246 240 $ echo "threshold=1" >> .hg/hgrc
247 241 $ echo a > a
248 242 $ hg add a
249 243 $ hg commit -m 'test'
250 244 $ echo aaaaa > a
251 245 $ hg commit -m 'largefile'
252 246 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
253 247 version https://git-lfs.github.com/spec/v1
254 248 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
255 249 size 6
256 250 x-is-binary 0
257 251 $ cd ..
258 252 $ rm -rf `hg config lfs.usercache`
259 253
260 254 (Restart the server in a different location so it no longer has the content)
261 255
262 256 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
263 257 $ rm $DAEMON_PIDS
264 258 $ mkdir $TESTTMP/lfs-server2
265 259 $ cd $TESTTMP/lfs-server2
266 260 #if no-windows
267 261 $ lfs-test-server &> lfs-server.log &
268 262 $ echo $! >> $DAEMON_PIDS
269 263 #else
270 264 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
271 265 #endif
272 266
273 267 $ cd $TESTTMP
274 268 $ hg clone test test2
275 269 updating to branch default
276 270 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
277 271 [255]
278 272
279 273 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now