##// END OF EJS Templates
lfs: add "bytes" as the unit to the upload/download progress bar...
Matt Harbison -
r44534:05881d00 default
parent child Browse files
Show More
@@ -1,746 +1,748 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import hashlib
13 13 import json
14 14 import os
15 15 import re
16 16 import socket
17 17
18 18 from mercurial.i18n import _
19 19 from mercurial.pycompat import getattr
20 20
21 21 from mercurial import (
22 22 encoding,
23 23 error,
24 24 node,
25 25 pathutil,
26 26 pycompat,
27 27 url as urlmod,
28 28 util,
29 29 vfs as vfsmod,
30 30 worker,
31 31 )
32 32
33 33 from mercurial.utils import stringutil
34 34
35 35 from ..largefiles import lfutil
36 36
37 37 # 64 bytes for SHA256
38 38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39 39
40 40
41 41 class lfsvfs(vfsmod.vfs):
42 42 def join(self, path):
43 43 """split the path at first two characters, like: XX/XXXXX..."""
44 44 if not _lfsre.match(path):
45 45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 46 return super(lfsvfs, self).join(path[0:2], path[2:])
47 47
48 48 def walk(self, path=None, onerror=None):
49 49 """Yield (dirpath, [], oids) tuple for blobs under path
50 50
51 51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 52 """
53 53 root = os.path.normpath(self.base)
54 54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 55 # because len(dirpath) < prefixlen.
56 56 prefixlen = len(pathutil.normasprefix(root))
57 57 oids = []
58 58
59 59 for dirpath, dirs, files in os.walk(
60 60 self.reljoin(self.base, path or b''), onerror=onerror
61 61 ):
62 62 dirpath = dirpath[prefixlen:]
63 63
64 64 # Silently skip unexpected files and directories
65 65 if len(dirpath) == 2:
66 66 oids.extend(
67 67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 68 )
69 69
70 70 yield (b'', [], oids)
71 71
72 72
73 73 class nullvfs(lfsvfs):
74 74 def __init__(self):
75 75 pass
76 76
77 77 def exists(self, oid):
78 78 return False
79 79
80 80 def read(self, oid):
81 81 # store.read() calls into here if the blob doesn't exist in its
82 82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 83 # file that doesn't exist. The only difference is the full file path
84 84 # isn't available in the error.
85 85 raise IOError(
86 86 errno.ENOENT,
87 87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 88 )
89 89
90 90 def walk(self, path=None, onerror=None):
91 91 return (b'', [], [])
92 92
93 93 def write(self, oid, data):
94 94 pass
95 95
96 96
97 97 class filewithprogress(object):
98 98 """a file-like object that supports __len__ and read.
99 99
100 100 Useful to provide progress information for how many bytes are read.
101 101 """
102 102
103 103 def __init__(self, fp, callback):
104 104 self._fp = fp
105 105 self._callback = callback # func(readsize)
106 106 fp.seek(0, os.SEEK_END)
107 107 self._len = fp.tell()
108 108 fp.seek(0)
109 109
110 110 def __len__(self):
111 111 return self._len
112 112
113 113 def read(self, size):
114 114 if self._fp is None:
115 115 return b''
116 116 data = self._fp.read(size)
117 117 if data:
118 118 if self._callback:
119 119 self._callback(len(data))
120 120 else:
121 121 self._fp.close()
122 122 self._fp = None
123 123 return data
124 124
125 125
126 126 class local(object):
127 127 """Local blobstore for large file contents.
128 128
129 129 This blobstore is used both as a cache and as a staging area for large blobs
130 130 to be uploaded to the remote blobstore.
131 131 """
132 132
133 133 def __init__(self, repo):
134 134 fullpath = repo.svfs.join(b'lfs/objects')
135 135 self.vfs = lfsvfs(fullpath)
136 136
137 137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
138 138 self.cachevfs = nullvfs()
139 139 else:
140 140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
141 141 self.cachevfs = lfsvfs(usercache)
142 142 self.ui = repo.ui
143 143
144 144 def open(self, oid):
145 145 """Open a read-only file descriptor to the named blob, in either the
146 146 usercache or the local store."""
147 147 # The usercache is the most likely place to hold the file. Commit will
148 148 # write to both it and the local store, as will anything that downloads
149 149 # the blobs. However, things like clone without an update won't
150 150 # populate the local store. For an init + push of a local clone,
151 151 # the usercache is the only place it _could_ be. If not present, the
152 152 # missing file msg here will indicate the local repo, not the usercache.
153 153 if self.cachevfs.exists(oid):
154 154 return self.cachevfs(oid, b'rb')
155 155
156 156 return self.vfs(oid, b'rb')
157 157
158 158 def download(self, oid, src):
159 159 """Read the blob from the remote source in chunks, verify the content,
160 160 and write to this local blobstore."""
161 161 sha256 = hashlib.sha256()
162 162
163 163 with self.vfs(oid, b'wb', atomictemp=True) as fp:
164 164 for chunk in util.filechunkiter(src, size=1048576):
165 165 fp.write(chunk)
166 166 sha256.update(chunk)
167 167
168 168 realoid = node.hex(sha256.digest())
169 169 if realoid != oid:
170 170 raise LfsCorruptionError(
171 171 _(b'corrupt remote lfs object: %s') % oid
172 172 )
173 173
174 174 self._linktousercache(oid)
175 175
176 176 def write(self, oid, data):
177 177 """Write blob to local blobstore.
178 178
179 179 This should only be called from the filelog during a commit or similar.
180 180 As such, there is no need to verify the data. Imports from a remote
181 181 store must use ``download()`` instead."""
182 182 with self.vfs(oid, b'wb', atomictemp=True) as fp:
183 183 fp.write(data)
184 184
185 185 self._linktousercache(oid)
186 186
187 187 def linkfromusercache(self, oid):
188 188 """Link blobs found in the user cache into this store.
189 189
190 190 The server module needs to do this when it lets the client know not to
191 191 upload the blob, to ensure it is always available in this store.
192 192 Normally this is done implicitly when the client reads or writes the
193 193 blob, but that doesn't happen when the server tells the client that it
194 194 already has the blob.
195 195 """
196 196 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
197 197 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
198 198 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
199 199
200 200 def _linktousercache(self, oid):
201 201 # XXX: should we verify the content of the cache, and hardlink back to
202 202 # the local store on success, but truncate, write and link on failure?
203 203 if not self.cachevfs.exists(oid) and not isinstance(
204 204 self.cachevfs, nullvfs
205 205 ):
206 206 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
207 207 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
208 208
209 209 def read(self, oid, verify=True):
210 210 """Read blob from local blobstore."""
211 211 if not self.vfs.exists(oid):
212 212 blob = self._read(self.cachevfs, oid, verify)
213 213
214 214 # Even if revlog will verify the content, it needs to be verified
215 215 # now before making the hardlink to avoid propagating corrupt blobs.
216 216 # Don't abort if corruption is detected, because `hg verify` will
217 217 # give more useful info about the corruption- simply don't add the
218 218 # hardlink.
219 219 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
220 220 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
221 221 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
222 222 else:
223 223 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
224 224 blob = self._read(self.vfs, oid, verify)
225 225 return blob
226 226
227 227 def _read(self, vfs, oid, verify):
228 228 """Read blob (after verifying) from the given store"""
229 229 blob = vfs.read(oid)
230 230 if verify:
231 231 _verify(oid, blob)
232 232 return blob
233 233
234 234 def verify(self, oid):
235 235 """Indicate whether or not the hash of the underlying file matches its
236 236 name."""
237 237 sha256 = hashlib.sha256()
238 238
239 239 with self.open(oid) as fp:
240 240 for chunk in util.filechunkiter(fp, size=1048576):
241 241 sha256.update(chunk)
242 242
243 243 return oid == node.hex(sha256.digest())
244 244
245 245 def has(self, oid):
246 246 """Returns True if the local blobstore contains the requested blob,
247 247 False otherwise."""
248 248 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
249 249
250 250
251 251 def _urlerrorreason(urlerror):
252 252 '''Create a friendly message for the given URLError to be used in an
253 253 LfsRemoteError message.
254 254 '''
255 255 inst = urlerror
256 256
257 257 if isinstance(urlerror.reason, Exception):
258 258 inst = urlerror.reason
259 259
260 260 if util.safehasattr(inst, b'reason'):
261 261 try: # usually it is in the form (errno, strerror)
262 262 reason = inst.reason.args[1]
263 263 except (AttributeError, IndexError):
264 264 # it might be anything, for example a string
265 265 reason = inst.reason
266 266 if isinstance(reason, pycompat.unicode):
267 267 # SSLError of Python 2.7.9 contains a unicode
268 268 reason = encoding.unitolocal(reason)
269 269 return reason
270 270 elif getattr(inst, "strerror", None):
271 271 return encoding.strtolocal(inst.strerror)
272 272 else:
273 273 return stringutil.forcebytestr(urlerror)
274 274
275 275
276 276 class lfsauthhandler(util.urlreq.basehandler):
277 277 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
278 278
279 279 def http_error_401(self, req, fp, code, msg, headers):
280 280 """Enforces that any authentication performed is HTTP Basic
281 281 Authentication. No authentication is also acceptable.
282 282 """
283 283 authreq = headers.get('www-authenticate', None)
284 284 if authreq:
285 285 scheme = authreq.split()[0]
286 286
287 287 if scheme.lower() != 'basic':
288 288 msg = _(b'the server must support Basic Authentication')
289 289 raise util.urlerr.httperror(
290 290 req.get_full_url(),
291 291 code,
292 292 encoding.strfromlocal(msg),
293 293 headers,
294 294 fp,
295 295 )
296 296 return None
297 297
298 298
299 299 class _gitlfsremote(object):
300 300 def __init__(self, repo, url):
301 301 ui = repo.ui
302 302 self.ui = ui
303 303 baseurl, authinfo = url.authinfo()
304 304 self.baseurl = baseurl.rstrip(b'/')
305 305 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
306 306 if not useragent:
307 307 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
308 308 self.urlopener = urlmod.opener(ui, authinfo, useragent)
309 309 self.urlopener.add_handler(lfsauthhandler())
310 310 self.retry = ui.configint(b'lfs', b'retry')
311 311
312 312 def writebatch(self, pointers, fromstore):
313 313 """Batch upload from local to remote blobstore."""
314 314 self._batch(_deduplicate(pointers), fromstore, b'upload')
315 315
316 316 def readbatch(self, pointers, tostore):
317 317 """Batch download from remote to local blostore."""
318 318 self._batch(_deduplicate(pointers), tostore, b'download')
319 319
320 320 def _batchrequest(self, pointers, action):
321 321 """Get metadata about objects pointed by pointers for given action
322 322
323 323 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
324 324 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
325 325 """
326 326 objects = [
327 327 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
328 328 for p in pointers
329 329 ]
330 330 requestdata = pycompat.bytesurl(
331 331 json.dumps(
332 332 {'objects': objects, 'operation': pycompat.strurl(action),}
333 333 )
334 334 )
335 335 url = b'%s/objects/batch' % self.baseurl
336 336 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
337 337 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
338 338 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
339 339 try:
340 340 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
341 341 rawjson = rsp.read()
342 342 except util.urlerr.httperror as ex:
343 343 hints = {
344 344 400: _(
345 345 b'check that lfs serving is enabled on %s and "%s" is '
346 346 b'supported'
347 347 )
348 348 % (self.baseurl, action),
349 349 404: _(b'the "lfs.url" config may be used to override %s')
350 350 % self.baseurl,
351 351 }
352 352 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
353 353 raise LfsRemoteError(
354 354 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
355 355 hint=hint,
356 356 )
357 357 except util.urlerr.urlerror as ex:
358 358 hint = (
359 359 _(b'the "lfs.url" config may be used to override %s')
360 360 % self.baseurl
361 361 )
362 362 raise LfsRemoteError(
363 363 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
364 364 )
365 365 try:
366 366 response = pycompat.json_loads(rawjson)
367 367 except ValueError:
368 368 raise LfsRemoteError(
369 369 _(b'LFS server returns invalid JSON: %s')
370 370 % rawjson.encode("utf-8")
371 371 )
372 372
373 373 if self.ui.debugflag:
374 374 self.ui.debug(b'Status: %d\n' % rsp.status)
375 375 # lfs-test-server and hg serve return headers in different order
376 376 headers = pycompat.bytestr(rsp.info()).strip()
377 377 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
378 378
379 379 if 'objects' in response:
380 380 response['objects'] = sorted(
381 381 response['objects'], key=lambda p: p['oid']
382 382 )
383 383 self.ui.debug(
384 384 b'%s\n'
385 385 % pycompat.bytesurl(
386 386 json.dumps(
387 387 response,
388 388 indent=2,
389 389 separators=('', ': '),
390 390 sort_keys=True,
391 391 )
392 392 )
393 393 )
394 394
395 395 def encodestr(x):
396 396 if isinstance(x, pycompat.unicode):
397 397 return x.encode('utf-8')
398 398 return x
399 399
400 400 return pycompat.rapply(encodestr, response)
401 401
402 402 def _checkforservererror(self, pointers, responses, action):
403 403 """Scans errors from objects
404 404
405 405 Raises LfsRemoteError if any objects have an error"""
406 406 for response in responses:
407 407 # The server should return 404 when objects cannot be found. Some
408 408 # server implementation (ex. lfs-test-server) does not set "error"
409 409 # but just removes "download" from "actions". Treat that case
410 410 # as the same as 404 error.
411 411 if b'error' not in response:
412 412 if action == b'download' and action not in response.get(
413 413 b'actions', []
414 414 ):
415 415 code = 404
416 416 else:
417 417 continue
418 418 else:
419 419 # An error dict without a code doesn't make much sense, so
420 420 # treat as a server error.
421 421 code = response.get(b'error').get(b'code', 500)
422 422
423 423 ptrmap = {p.oid(): p for p in pointers}
424 424 p = ptrmap.get(response[b'oid'], None)
425 425 if p:
426 426 filename = getattr(p, 'filename', b'unknown')
427 427 errors = {
428 428 404: b'The object does not exist',
429 429 410: b'The object was removed by the owner',
430 430 422: b'Validation error',
431 431 500: b'Internal server error',
432 432 }
433 433 msg = errors.get(code, b'status code %d' % code)
434 434 raise LfsRemoteError(
435 435 _(b'LFS server error for "%s": %s') % (filename, msg)
436 436 )
437 437 else:
438 438 raise LfsRemoteError(
439 439 _(b'LFS server error. Unsolicited response for oid %s')
440 440 % response[b'oid']
441 441 )
442 442
443 443 def _extractobjects(self, response, pointers, action):
444 444 """extract objects from response of the batch API
445 445
446 446 response: parsed JSON object returned by batch API
447 447 return response['objects'] filtered by action
448 448 raise if any object has an error
449 449 """
450 450 # Scan errors from objects - fail early
451 451 objects = response.get(b'objects', [])
452 452 self._checkforservererror(pointers, objects, action)
453 453
454 454 # Filter objects with given action. Practically, this skips uploading
455 455 # objects which exist in the server.
456 456 filteredobjects = [
457 457 o for o in objects if action in o.get(b'actions', [])
458 458 ]
459 459
460 460 return filteredobjects
461 461
462 462 def _basictransfer(self, obj, action, localstore):
463 463 """Download or upload a single object using basic transfer protocol
464 464
465 465 obj: dict, an object description returned by batch API
466 466 action: string, one of ['upload', 'download']
467 467 localstore: blobstore.local
468 468
469 469 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
470 470 basic-transfers.md
471 471 """
472 472 oid = obj[b'oid']
473 473 href = obj[b'actions'][action].get(b'href')
474 474 headers = obj[b'actions'][action].get(b'header', {}).items()
475 475
476 476 request = util.urlreq.request(pycompat.strurl(href))
477 477 if action == b'upload':
478 478 # If uploading blobs, read data from local blobstore.
479 479 if not localstore.verify(oid):
480 480 raise error.Abort(
481 481 _(b'detected corrupt lfs object: %s') % oid,
482 482 hint=_(b'run hg verify'),
483 483 )
484 484 request.data = filewithprogress(localstore.open(oid), None)
485 485 request.get_method = lambda: r'PUT'
486 486 request.add_header('Content-Type', 'application/octet-stream')
487 487 request.add_header('Content-Length', len(request.data))
488 488
489 489 for k, v in headers:
490 490 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
491 491
492 492 response = b''
493 493 try:
494 494 with contextlib.closing(self.urlopener.open(request)) as req:
495 495 ui = self.ui # Shorten debug lines
496 496 if self.ui.debugflag:
497 497 ui.debug(b'Status: %d\n' % req.status)
498 498 # lfs-test-server and hg serve return headers in different
499 499 # order
500 500 headers = pycompat.bytestr(req.info()).strip()
501 501 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
502 502
503 503 if action == b'download':
504 504 # If downloading blobs, store downloaded data to local
505 505 # blobstore
506 506 localstore.download(oid, req)
507 507 else:
508 508 while True:
509 509 data = req.read(1048576)
510 510 if not data:
511 511 break
512 512 response += data
513 513 if response:
514 514 ui.debug(b'lfs %s response: %s' % (action, response))
515 515 except util.urlerr.httperror as ex:
516 516 if self.ui.debugflag:
517 517 self.ui.debug(
518 518 b'%s: %s\n' % (oid, ex.read())
519 519 ) # XXX: also bytes?
520 520 raise LfsRemoteError(
521 521 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
522 522 % (stringutil.forcebytestr(ex), oid, action)
523 523 )
524 524 except util.urlerr.urlerror as ex:
525 525 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
526 526 util.urllibcompat.getfullurl(request)
527 527 )
528 528 raise LfsRemoteError(
529 529 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
530 530 )
531 531
532 532 def _batch(self, pointers, localstore, action):
533 533 if action not in [b'upload', b'download']:
534 534 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
535 535
536 536 response = self._batchrequest(pointers, action)
537 537 objects = self._extractobjects(response, pointers, action)
538 538 total = sum(x.get(b'size', 0) for x in objects)
539 539 sizes = {}
540 540 for obj in objects:
541 541 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
542 542 topic = {
543 543 b'upload': _(b'lfs uploading'),
544 544 b'download': _(b'lfs downloading'),
545 545 }[action]
546 546 if len(objects) > 1:
547 547 self.ui.note(
548 548 _(b'lfs: need to transfer %d objects (%s)\n')
549 549 % (len(objects), util.bytecount(total))
550 550 )
551 551
552 552 def transfer(chunk):
553 553 for obj in chunk:
554 554 objsize = obj.get(b'size', 0)
555 555 if self.ui.verbose:
556 556 if action == b'download':
557 557 msg = _(b'lfs: downloading %s (%s)\n')
558 558 elif action == b'upload':
559 559 msg = _(b'lfs: uploading %s (%s)\n')
560 560 self.ui.note(
561 561 msg % (obj.get(b'oid'), util.bytecount(objsize))
562 562 )
563 563 retry = self.retry
564 564 while True:
565 565 try:
566 566 self._basictransfer(obj, action, localstore)
567 567 yield 1, obj.get(b'oid')
568 568 break
569 569 except socket.error as ex:
570 570 if retry > 0:
571 571 self.ui.note(
572 572 _(b'lfs: failed: %r (remaining retry %d)\n')
573 573 % (stringutil.forcebytestr(ex), retry)
574 574 )
575 575 retry -= 1
576 576 continue
577 577 raise
578 578
579 579 # Until https multiplexing gets sorted out
580 580 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
581 581 oids = worker.worker(
582 582 self.ui,
583 583 0.1,
584 584 transfer,
585 585 (),
586 586 sorted(objects, key=lambda o: o.get(b'oid')),
587 587 )
588 588 else:
589 589 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
590 590
591 with self.ui.makeprogress(topic, total=total) as progress:
591 with self.ui.makeprogress(
592 topic, unit=_(b"bytes"), total=total
593 ) as progress:
592 594 progress.update(0)
593 595 processed = 0
594 596 blobs = 0
595 597 for _one, oid in oids:
596 598 processed += sizes[oid]
597 599 blobs += 1
598 600 progress.update(processed)
599 601 self.ui.note(_(b'lfs: processed: %s\n') % oid)
600 602
601 603 if blobs > 0:
602 604 if action == b'upload':
603 605 self.ui.status(
604 606 _(b'lfs: uploaded %d files (%s)\n')
605 607 % (blobs, util.bytecount(processed))
606 608 )
607 609 elif action == b'download':
608 610 self.ui.status(
609 611 _(b'lfs: downloaded %d files (%s)\n')
610 612 % (blobs, util.bytecount(processed))
611 613 )
612 614
613 615 def __del__(self):
614 616 # copied from mercurial/httppeer.py
615 617 urlopener = getattr(self, 'urlopener', None)
616 618 if urlopener:
617 619 for h in urlopener.handlers:
618 620 h.close()
619 621 getattr(h, "close_all", lambda: None)()
620 622
621 623
622 624 class _dummyremote(object):
623 625 """Dummy store storing blobs to temp directory."""
624 626
625 627 def __init__(self, repo, url):
626 628 fullpath = repo.vfs.join(b'lfs', url.path)
627 629 self.vfs = lfsvfs(fullpath)
628 630
629 631 def writebatch(self, pointers, fromstore):
630 632 for p in _deduplicate(pointers):
631 633 content = fromstore.read(p.oid(), verify=True)
632 634 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
633 635 fp.write(content)
634 636
635 637 def readbatch(self, pointers, tostore):
636 638 for p in _deduplicate(pointers):
637 639 with self.vfs(p.oid(), b'rb') as fp:
638 640 tostore.download(p.oid(), fp)
639 641
640 642
641 643 class _nullremote(object):
642 644 """Null store storing blobs to /dev/null."""
643 645
644 646 def __init__(self, repo, url):
645 647 pass
646 648
647 649 def writebatch(self, pointers, fromstore):
648 650 pass
649 651
650 652 def readbatch(self, pointers, tostore):
651 653 pass
652 654
653 655
654 656 class _promptremote(object):
655 657 """Prompt user to set lfs.url when accessed."""
656 658
657 659 def __init__(self, repo, url):
658 660 pass
659 661
660 662 def writebatch(self, pointers, fromstore, ui=None):
661 663 self._prompt()
662 664
663 665 def readbatch(self, pointers, tostore, ui=None):
664 666 self._prompt()
665 667
666 668 def _prompt(self):
667 669 raise error.Abort(_(b'lfs.url needs to be configured'))
668 670
669 671
670 672 _storemap = {
671 673 b'https': _gitlfsremote,
672 674 b'http': _gitlfsremote,
673 675 b'file': _dummyremote,
674 676 b'null': _nullremote,
675 677 None: _promptremote,
676 678 }
677 679
678 680
679 681 def _deduplicate(pointers):
680 682 """Remove any duplicate oids that exist in the list"""
681 683 reduced = util.sortdict()
682 684 for p in pointers:
683 685 reduced[p.oid()] = p
684 686 return reduced.values()
685 687
686 688
687 689 def _verify(oid, content):
688 690 realoid = node.hex(hashlib.sha256(content).digest())
689 691 if realoid != oid:
690 692 raise LfsCorruptionError(
691 693 _(b'detected corrupt lfs object: %s') % oid,
692 694 hint=_(b'run hg verify'),
693 695 )
694 696
695 697
696 698 def remote(repo, remote=None):
697 699 """remotestore factory. return a store in _storemap depending on config
698 700
699 701 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
700 702 infer the endpoint, based on the remote repository using the same path
701 703 adjustments as git. As an extension, 'http' is supported as well so that
702 704 ``hg serve`` works out of the box.
703 705
704 706 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
705 707 """
706 708 lfsurl = repo.ui.config(b'lfs', b'url')
707 709 url = util.url(lfsurl or b'')
708 710 if lfsurl is None:
709 711 if remote:
710 712 path = remote
711 713 elif util.safehasattr(repo, b'_subtoppath'):
712 714 # The pull command sets this during the optional update phase, which
713 715 # tells exactly where the pull originated, whether 'paths.default'
714 716 # or explicit.
715 717 path = repo._subtoppath
716 718 else:
717 719 # TODO: investigate 'paths.remote:lfsurl' style path customization,
718 720 # and fall back to inferring from 'paths.remote' if unspecified.
719 721 path = repo.ui.config(b'paths', b'default') or b''
720 722
721 723 defaulturl = util.url(path)
722 724
723 725 # TODO: support local paths as well.
724 726 # TODO: consider the ssh -> https transformation that git applies
725 727 if defaulturl.scheme in (b'http', b'https'):
726 728 if defaulturl.path and defaulturl.path[:-1] != b'/':
727 729 defaulturl.path += b'/'
728 730 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
729 731
730 732 url = util.url(bytes(defaulturl))
731 733 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
732 734
733 735 scheme = url.scheme
734 736 if scheme not in _storemap:
735 737 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
736 738 return _storemap[scheme](repo, url)
737 739
738 740
739 741 class LfsRemoteError(error.StorageError):
740 742 pass
741 743
742 744
743 745 class LfsCorruptionError(error.Abort):
744 746 """Raised when a corrupt blob is detected, aborting an operation
745 747
746 748 It exists to allow specialized handling on the server side."""
General Comments 0
You need to be logged in to leave comments. Login now