##// END OF EJS Templates
lfs: drop an unnecessary r'' prefix...
Matt Harbison -
r44598:b2408aca default
parent child Browse files
Show More
@@ -1,763 +1,763 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import hashlib
13 13 import json
14 14 import os
15 15 import re
16 16 import socket
17 17
18 18 from mercurial.i18n import _
19 19 from mercurial.pycompat import getattr
20 20
21 21 from mercurial import (
22 22 encoding,
23 23 error,
24 24 node,
25 25 pathutil,
26 26 pycompat,
27 27 url as urlmod,
28 28 util,
29 29 vfs as vfsmod,
30 30 worker,
31 31 )
32 32
33 33 from mercurial.utils import stringutil
34 34
35 35 from ..largefiles import lfutil
36 36
37 37 # 64 bytes for SHA256
38 38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39 39
40 40
41 41 class lfsvfs(vfsmod.vfs):
42 42 def join(self, path):
43 43 """split the path at first two characters, like: XX/XXXXX..."""
44 44 if not _lfsre.match(path):
45 45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 46 return super(lfsvfs, self).join(path[0:2], path[2:])
47 47
48 48 def walk(self, path=None, onerror=None):
49 49 """Yield (dirpath, [], oids) tuple for blobs under path
50 50
51 51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 52 """
53 53 root = os.path.normpath(self.base)
54 54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 55 # because len(dirpath) < prefixlen.
56 56 prefixlen = len(pathutil.normasprefix(root))
57 57 oids = []
58 58
59 59 for dirpath, dirs, files in os.walk(
60 60 self.reljoin(self.base, path or b''), onerror=onerror
61 61 ):
62 62 dirpath = dirpath[prefixlen:]
63 63
64 64 # Silently skip unexpected files and directories
65 65 if len(dirpath) == 2:
66 66 oids.extend(
67 67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 68 )
69 69
70 70 yield (b'', [], oids)
71 71
72 72
73 73 class nullvfs(lfsvfs):
74 74 def __init__(self):
75 75 pass
76 76
77 77 def exists(self, oid):
78 78 return False
79 79
80 80 def read(self, oid):
81 81 # store.read() calls into here if the blob doesn't exist in its
82 82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 83 # file that doesn't exist. The only difference is the full file path
84 84 # isn't available in the error.
85 85 raise IOError(
86 86 errno.ENOENT,
87 87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 88 )
89 89
90 90 def walk(self, path=None, onerror=None):
91 91 return (b'', [], [])
92 92
93 93 def write(self, oid, data):
94 94 pass
95 95
96 96
97 97 class lfsuploadfile(object):
98 98 """a file-like object that supports __len__ and read.
99 99 """
100 100
101 101 def __init__(self, fp):
102 102 self._fp = fp
103 103 fp.seek(0, os.SEEK_END)
104 104 self._len = fp.tell()
105 105 fp.seek(0)
106 106
107 107 def __len__(self):
108 108 return self._len
109 109
110 110 def read(self, size):
111 111 if self._fp is None:
112 112 return b''
113 113 return self._fp.read(size)
114 114
115 115 def close(self):
116 116 if self._fp is not None:
117 117 self._fp.close()
118 118 self._fp = None
119 119
120 120
121 121 class local(object):
122 122 """Local blobstore for large file contents.
123 123
124 124 This blobstore is used both as a cache and as a staging area for large blobs
125 125 to be uploaded to the remote blobstore.
126 126 """
127 127
128 128 def __init__(self, repo):
129 129 fullpath = repo.svfs.join(b'lfs/objects')
130 130 self.vfs = lfsvfs(fullpath)
131 131
132 132 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
133 133 self.cachevfs = nullvfs()
134 134 else:
135 135 usercache = lfutil._usercachedir(repo.ui, b'lfs')
136 136 self.cachevfs = lfsvfs(usercache)
137 137 self.ui = repo.ui
138 138
139 139 def open(self, oid):
140 140 """Open a read-only file descriptor to the named blob, in either the
141 141 usercache or the local store."""
142 142 # The usercache is the most likely place to hold the file. Commit will
143 143 # write to both it and the local store, as will anything that downloads
144 144 # the blobs. However, things like clone without an update won't
145 145 # populate the local store. For an init + push of a local clone,
146 146 # the usercache is the only place it _could_ be. If not present, the
147 147 # missing file msg here will indicate the local repo, not the usercache.
148 148 if self.cachevfs.exists(oid):
149 149 return self.cachevfs(oid, b'rb')
150 150
151 151 return self.vfs(oid, b'rb')
152 152
153 153 def download(self, oid, src, content_length):
154 154 """Read the blob from the remote source in chunks, verify the content,
155 155 and write to this local blobstore."""
156 156 sha256 = hashlib.sha256()
157 157 size = 0
158 158
159 159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
160 160 for chunk in util.filechunkiter(src, size=1048576):
161 161 fp.write(chunk)
162 162 sha256.update(chunk)
163 163 size += len(chunk)
164 164
165 165 # If the server advertised a length longer than what we actually
166 166 # received, then we should expect that the server crashed while
167 167 # producing the response (but the server has no way of telling us
168 168 # that), and we really don't need to try to write the response to
169 169 # the localstore, because it's not going to match the expected.
170 170 if content_length is not None and int(content_length) != size:
171 171 msg = (
172 172 b"Response length (%s) does not match Content-Length "
173 173 b"header (%d): likely server-side crash"
174 174 )
175 175 raise LfsRemoteError(_(msg) % (size, int(content_length)))
176 176
177 177 realoid = node.hex(sha256.digest())
178 178 if realoid != oid:
179 179 raise LfsCorruptionError(
180 180 _(b'corrupt remote lfs object: %s') % oid
181 181 )
182 182
183 183 self._linktousercache(oid)
184 184
185 185 def write(self, oid, data):
186 186 """Write blob to local blobstore.
187 187
188 188 This should only be called from the filelog during a commit or similar.
189 189 As such, there is no need to verify the data. Imports from a remote
190 190 store must use ``download()`` instead."""
191 191 with self.vfs(oid, b'wb', atomictemp=True) as fp:
192 192 fp.write(data)
193 193
194 194 self._linktousercache(oid)
195 195
196 196 def linkfromusercache(self, oid):
197 197 """Link blobs found in the user cache into this store.
198 198
199 199 The server module needs to do this when it lets the client know not to
200 200 upload the blob, to ensure it is always available in this store.
201 201 Normally this is done implicitly when the client reads or writes the
202 202 blob, but that doesn't happen when the server tells the client that it
203 203 already has the blob.
204 204 """
205 205 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
206 206 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
207 207 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
208 208
209 209 def _linktousercache(self, oid):
210 210 # XXX: should we verify the content of the cache, and hardlink back to
211 211 # the local store on success, but truncate, write and link on failure?
212 212 if not self.cachevfs.exists(oid) and not isinstance(
213 213 self.cachevfs, nullvfs
214 214 ):
215 215 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
216 216 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
217 217
218 218 def read(self, oid, verify=True):
219 219 """Read blob from local blobstore."""
220 220 if not self.vfs.exists(oid):
221 221 blob = self._read(self.cachevfs, oid, verify)
222 222
223 223 # Even if revlog will verify the content, it needs to be verified
224 224 # now before making the hardlink to avoid propagating corrupt blobs.
225 225 # Don't abort if corruption is detected, because `hg verify` will
226 226 # give more useful info about the corruption- simply don't add the
227 227 # hardlink.
228 228 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
229 229 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
230 230 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
231 231 else:
232 232 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
233 233 blob = self._read(self.vfs, oid, verify)
234 234 return blob
235 235
236 236 def _read(self, vfs, oid, verify):
237 237 """Read blob (after verifying) from the given store"""
238 238 blob = vfs.read(oid)
239 239 if verify:
240 240 _verify(oid, blob)
241 241 return blob
242 242
243 243 def verify(self, oid):
244 244 """Indicate whether or not the hash of the underlying file matches its
245 245 name."""
246 246 sha256 = hashlib.sha256()
247 247
248 248 with self.open(oid) as fp:
249 249 for chunk in util.filechunkiter(fp, size=1048576):
250 250 sha256.update(chunk)
251 251
252 252 return oid == node.hex(sha256.digest())
253 253
254 254 def has(self, oid):
255 255 """Returns True if the local blobstore contains the requested blob,
256 256 False otherwise."""
257 257 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
258 258
259 259
260 260 def _urlerrorreason(urlerror):
261 261 '''Create a friendly message for the given URLError to be used in an
262 262 LfsRemoteError message.
263 263 '''
264 264 inst = urlerror
265 265
266 266 if isinstance(urlerror.reason, Exception):
267 267 inst = urlerror.reason
268 268
269 269 if util.safehasattr(inst, b'reason'):
270 270 try: # usually it is in the form (errno, strerror)
271 271 reason = inst.reason.args[1]
272 272 except (AttributeError, IndexError):
273 273 # it might be anything, for example a string
274 274 reason = inst.reason
275 275 if isinstance(reason, pycompat.unicode):
276 276 # SSLError of Python 2.7.9 contains a unicode
277 277 reason = encoding.unitolocal(reason)
278 278 return reason
279 279 elif getattr(inst, "strerror", None):
280 280 return encoding.strtolocal(inst.strerror)
281 281 else:
282 282 return stringutil.forcebytestr(urlerror)
283 283
284 284
285 285 class lfsauthhandler(util.urlreq.basehandler):
286 286 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
287 287
288 288 def http_error_401(self, req, fp, code, msg, headers):
289 289 """Enforces that any authentication performed is HTTP Basic
290 290 Authentication. No authentication is also acceptable.
291 291 """
292 292 authreq = headers.get('www-authenticate', None)
293 293 if authreq:
294 294 scheme = authreq.split()[0]
295 295
296 296 if scheme.lower() != 'basic':
297 297 msg = _(b'the server must support Basic Authentication')
298 298 raise util.urlerr.httperror(
299 299 req.get_full_url(),
300 300 code,
301 301 encoding.strfromlocal(msg),
302 302 headers,
303 303 fp,
304 304 )
305 305 return None
306 306
307 307
308 308 class _gitlfsremote(object):
309 309 def __init__(self, repo, url):
310 310 ui = repo.ui
311 311 self.ui = ui
312 312 baseurl, authinfo = url.authinfo()
313 313 self.baseurl = baseurl.rstrip(b'/')
314 314 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
315 315 if not useragent:
316 316 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
317 317 self.urlopener = urlmod.opener(ui, authinfo, useragent)
318 318 self.urlopener.add_handler(lfsauthhandler())
319 319 self.retry = ui.configint(b'lfs', b'retry')
320 320
321 321 def writebatch(self, pointers, fromstore):
322 322 """Batch upload from local to remote blobstore."""
323 323 self._batch(_deduplicate(pointers), fromstore, b'upload')
324 324
325 325 def readbatch(self, pointers, tostore):
326 326 """Batch download from remote to local blostore."""
327 327 self._batch(_deduplicate(pointers), tostore, b'download')
328 328
329 329 def _batchrequest(self, pointers, action):
330 330 """Get metadata about objects pointed by pointers for given action
331 331
332 332 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
333 333 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
334 334 """
335 335 objects = [
336 336 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
337 337 for p in pointers
338 338 ]
339 339 requestdata = pycompat.bytesurl(
340 340 json.dumps(
341 341 {'objects': objects, 'operation': pycompat.strurl(action),}
342 342 )
343 343 )
344 344 url = b'%s/objects/batch' % self.baseurl
345 345 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
346 346 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
347 347 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
348 348 try:
349 349 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
350 350 rawjson = rsp.read()
351 351 except util.urlerr.httperror as ex:
352 352 hints = {
353 353 400: _(
354 354 b'check that lfs serving is enabled on %s and "%s" is '
355 355 b'supported'
356 356 )
357 357 % (self.baseurl, action),
358 358 404: _(b'the "lfs.url" config may be used to override %s')
359 359 % self.baseurl,
360 360 }
361 361 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
362 362 raise LfsRemoteError(
363 363 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
364 364 hint=hint,
365 365 )
366 366 except util.urlerr.urlerror as ex:
367 367 hint = (
368 368 _(b'the "lfs.url" config may be used to override %s')
369 369 % self.baseurl
370 370 )
371 371 raise LfsRemoteError(
372 372 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
373 373 )
374 374 try:
375 375 response = pycompat.json_loads(rawjson)
376 376 except ValueError:
377 377 raise LfsRemoteError(
378 378 _(b'LFS server returns invalid JSON: %s')
379 379 % rawjson.encode("utf-8")
380 380 )
381 381
382 382 if self.ui.debugflag:
383 383 self.ui.debug(b'Status: %d\n' % rsp.status)
384 384 # lfs-test-server and hg serve return headers in different order
385 385 headers = pycompat.bytestr(rsp.info()).strip()
386 386 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
387 387
388 388 if 'objects' in response:
389 389 response['objects'] = sorted(
390 390 response['objects'], key=lambda p: p['oid']
391 391 )
392 392 self.ui.debug(
393 393 b'%s\n'
394 394 % pycompat.bytesurl(
395 395 json.dumps(
396 396 response,
397 397 indent=2,
398 398 separators=('', ': '),
399 399 sort_keys=True,
400 400 )
401 401 )
402 402 )
403 403
404 404 def encodestr(x):
405 405 if isinstance(x, pycompat.unicode):
406 406 return x.encode('utf-8')
407 407 return x
408 408
409 409 return pycompat.rapply(encodestr, response)
410 410
411 411 def _checkforservererror(self, pointers, responses, action):
412 412 """Scans errors from objects
413 413
414 414 Raises LfsRemoteError if any objects have an error"""
415 415 for response in responses:
416 416 # The server should return 404 when objects cannot be found. Some
417 417 # server implementation (ex. lfs-test-server) does not set "error"
418 418 # but just removes "download" from "actions". Treat that case
419 419 # as the same as 404 error.
420 420 if b'error' not in response:
421 421 if action == b'download' and action not in response.get(
422 422 b'actions', []
423 423 ):
424 424 code = 404
425 425 else:
426 426 continue
427 427 else:
428 428 # An error dict without a code doesn't make much sense, so
429 429 # treat as a server error.
430 430 code = response.get(b'error').get(b'code', 500)
431 431
432 432 ptrmap = {p.oid(): p for p in pointers}
433 433 p = ptrmap.get(response[b'oid'], None)
434 434 if p:
435 435 filename = getattr(p, 'filename', b'unknown')
436 436 errors = {
437 437 404: b'The object does not exist',
438 438 410: b'The object was removed by the owner',
439 439 422: b'Validation error',
440 440 500: b'Internal server error',
441 441 }
442 442 msg = errors.get(code, b'status code %d' % code)
443 443 raise LfsRemoteError(
444 444 _(b'LFS server error for "%s": %s') % (filename, msg)
445 445 )
446 446 else:
447 447 raise LfsRemoteError(
448 448 _(b'LFS server error. Unsolicited response for oid %s')
449 449 % response[b'oid']
450 450 )
451 451
452 452 def _extractobjects(self, response, pointers, action):
453 453 """extract objects from response of the batch API
454 454
455 455 response: parsed JSON object returned by batch API
456 456 return response['objects'] filtered by action
457 457 raise if any object has an error
458 458 """
459 459 # Scan errors from objects - fail early
460 460 objects = response.get(b'objects', [])
461 461 self._checkforservererror(pointers, objects, action)
462 462
463 463 # Filter objects with given action. Practically, this skips uploading
464 464 # objects which exist in the server.
465 465 filteredobjects = [
466 466 o for o in objects if action in o.get(b'actions', [])
467 467 ]
468 468
469 469 return filteredobjects
470 470
471 471 def _basictransfer(self, obj, action, localstore):
472 472 """Download or upload a single object using basic transfer protocol
473 473
474 474 obj: dict, an object description returned by batch API
475 475 action: string, one of ['upload', 'download']
476 476 localstore: blobstore.local
477 477
478 478 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
479 479 basic-transfers.md
480 480 """
481 481 oid = obj[b'oid']
482 482 href = obj[b'actions'][action].get(b'href')
483 483 headers = obj[b'actions'][action].get(b'header', {}).items()
484 484
485 485 request = util.urlreq.request(pycompat.strurl(href))
486 486 if action == b'upload':
487 487 # If uploading blobs, read data from local blobstore.
488 488 if not localstore.verify(oid):
489 489 raise error.Abort(
490 490 _(b'detected corrupt lfs object: %s') % oid,
491 491 hint=_(b'run hg verify'),
492 492 )
493 493 request.data = lfsuploadfile(localstore.open(oid))
494 request.get_method = lambda: r'PUT'
494 request.get_method = lambda: 'PUT'
495 495 request.add_header('Content-Type', 'application/octet-stream')
496 496 request.add_header('Content-Length', len(request.data))
497 497
498 498 for k, v in headers:
499 499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
500 500
501 501 try:
502 502 with contextlib.closing(self.urlopener.open(request)) as res:
503 503 contentlength = res.info().get(b"content-length")
504 504 ui = self.ui # Shorten debug lines
505 505 if self.ui.debugflag:
506 506 ui.debug(b'Status: %d\n' % res.status)
507 507 # lfs-test-server and hg serve return headers in different
508 508 # order
509 509 headers = pycompat.bytestr(res.info()).strip()
510 510 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
511 511
512 512 if action == b'download':
513 513 # If downloading blobs, store downloaded data to local
514 514 # blobstore
515 515 localstore.download(oid, res, contentlength)
516 516 else:
517 517 blocks = []
518 518 while True:
519 519 data = res.read(1048576)
520 520 if not data:
521 521 break
522 522 blocks.append(data)
523 523
524 524 response = b"".join(blocks)
525 525 if response:
526 526 ui.debug(b'lfs %s response: %s' % (action, response))
527 527 except util.urlerr.httperror as ex:
528 528 if self.ui.debugflag:
529 529 self.ui.debug(
530 530 b'%s: %s\n' % (oid, ex.read())
531 531 ) # XXX: also bytes?
532 532 raise LfsRemoteError(
533 533 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
534 534 % (stringutil.forcebytestr(ex), oid, action)
535 535 )
536 536 except util.urlerr.urlerror as ex:
537 537 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
538 538 util.urllibcompat.getfullurl(request)
539 539 )
540 540 raise LfsRemoteError(
541 541 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
542 542 )
543 543 finally:
544 544 if request.data:
545 545 request.data.close()
546 546
547 547 def _batch(self, pointers, localstore, action):
548 548 if action not in [b'upload', b'download']:
549 549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
550 550
551 551 response = self._batchrequest(pointers, action)
552 552 objects = self._extractobjects(response, pointers, action)
553 553 total = sum(x.get(b'size', 0) for x in objects)
554 554 sizes = {}
555 555 for obj in objects:
556 556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
557 557 topic = {
558 558 b'upload': _(b'lfs uploading'),
559 559 b'download': _(b'lfs downloading'),
560 560 }[action]
561 561 if len(objects) > 1:
562 562 self.ui.note(
563 563 _(b'lfs: need to transfer %d objects (%s)\n')
564 564 % (len(objects), util.bytecount(total))
565 565 )
566 566
567 567 def transfer(chunk):
568 568 for obj in chunk:
569 569 objsize = obj.get(b'size', 0)
570 570 if self.ui.verbose:
571 571 if action == b'download':
572 572 msg = _(b'lfs: downloading %s (%s)\n')
573 573 elif action == b'upload':
574 574 msg = _(b'lfs: uploading %s (%s)\n')
575 575 self.ui.note(
576 576 msg % (obj.get(b'oid'), util.bytecount(objsize))
577 577 )
578 578 retry = self.retry
579 579 while True:
580 580 try:
581 581 self._basictransfer(obj, action, localstore)
582 582 yield 1, obj.get(b'oid')
583 583 break
584 584 except socket.error as ex:
585 585 if retry > 0:
586 586 self.ui.note(
587 587 _(b'lfs: failed: %r (remaining retry %d)\n')
588 588 % (stringutil.forcebytestr(ex), retry)
589 589 )
590 590 retry -= 1
591 591 continue
592 592 raise
593 593
594 594 # Until https multiplexing gets sorted out
595 595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
596 596 oids = worker.worker(
597 597 self.ui,
598 598 0.1,
599 599 transfer,
600 600 (),
601 601 sorted(objects, key=lambda o: o.get(b'oid')),
602 602 )
603 603 else:
604 604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
605 605
606 606 with self.ui.makeprogress(
607 607 topic, unit=_(b"bytes"), total=total
608 608 ) as progress:
609 609 progress.update(0)
610 610 processed = 0
611 611 blobs = 0
612 612 for _one, oid in oids:
613 613 processed += sizes[oid]
614 614 blobs += 1
615 615 progress.update(processed)
616 616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
617 617
618 618 if blobs > 0:
619 619 if action == b'upload':
620 620 self.ui.status(
621 621 _(b'lfs: uploaded %d files (%s)\n')
622 622 % (blobs, util.bytecount(processed))
623 623 )
624 624 elif action == b'download':
625 625 self.ui.status(
626 626 _(b'lfs: downloaded %d files (%s)\n')
627 627 % (blobs, util.bytecount(processed))
628 628 )
629 629
630 630 def __del__(self):
631 631 # copied from mercurial/httppeer.py
632 632 urlopener = getattr(self, 'urlopener', None)
633 633 if urlopener:
634 634 for h in urlopener.handlers:
635 635 h.close()
636 636 getattr(h, "close_all", lambda: None)()
637 637
638 638
639 639 class _dummyremote(object):
640 640 """Dummy store storing blobs to temp directory."""
641 641
642 642 def __init__(self, repo, url):
643 643 fullpath = repo.vfs.join(b'lfs', url.path)
644 644 self.vfs = lfsvfs(fullpath)
645 645
646 646 def writebatch(self, pointers, fromstore):
647 647 for p in _deduplicate(pointers):
648 648 content = fromstore.read(p.oid(), verify=True)
649 649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
650 650 fp.write(content)
651 651
652 652 def readbatch(self, pointers, tostore):
653 653 for p in _deduplicate(pointers):
654 654 with self.vfs(p.oid(), b'rb') as fp:
655 655 tostore.download(p.oid(), fp, None)
656 656
657 657
658 658 class _nullremote(object):
659 659 """Null store storing blobs to /dev/null."""
660 660
661 661 def __init__(self, repo, url):
662 662 pass
663 663
664 664 def writebatch(self, pointers, fromstore):
665 665 pass
666 666
667 667 def readbatch(self, pointers, tostore):
668 668 pass
669 669
670 670
671 671 class _promptremote(object):
672 672 """Prompt user to set lfs.url when accessed."""
673 673
674 674 def __init__(self, repo, url):
675 675 pass
676 676
677 677 def writebatch(self, pointers, fromstore, ui=None):
678 678 self._prompt()
679 679
680 680 def readbatch(self, pointers, tostore, ui=None):
681 681 self._prompt()
682 682
683 683 def _prompt(self):
684 684 raise error.Abort(_(b'lfs.url needs to be configured'))
685 685
686 686
687 687 _storemap = {
688 688 b'https': _gitlfsremote,
689 689 b'http': _gitlfsremote,
690 690 b'file': _dummyremote,
691 691 b'null': _nullremote,
692 692 None: _promptremote,
693 693 }
694 694
695 695
696 696 def _deduplicate(pointers):
697 697 """Remove any duplicate oids that exist in the list"""
698 698 reduced = util.sortdict()
699 699 for p in pointers:
700 700 reduced[p.oid()] = p
701 701 return reduced.values()
702 702
703 703
704 704 def _verify(oid, content):
705 705 realoid = node.hex(hashlib.sha256(content).digest())
706 706 if realoid != oid:
707 707 raise LfsCorruptionError(
708 708 _(b'detected corrupt lfs object: %s') % oid,
709 709 hint=_(b'run hg verify'),
710 710 )
711 711
712 712
713 713 def remote(repo, remote=None):
714 714 """remotestore factory. return a store in _storemap depending on config
715 715
716 716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
717 717 infer the endpoint, based on the remote repository using the same path
718 718 adjustments as git. As an extension, 'http' is supported as well so that
719 719 ``hg serve`` works out of the box.
720 720
721 721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
722 722 """
723 723 lfsurl = repo.ui.config(b'lfs', b'url')
724 724 url = util.url(lfsurl or b'')
725 725 if lfsurl is None:
726 726 if remote:
727 727 path = remote
728 728 elif util.safehasattr(repo, b'_subtoppath'):
729 729 # The pull command sets this during the optional update phase, which
730 730 # tells exactly where the pull originated, whether 'paths.default'
731 731 # or explicit.
732 732 path = repo._subtoppath
733 733 else:
734 734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
735 735 # and fall back to inferring from 'paths.remote' if unspecified.
736 736 path = repo.ui.config(b'paths', b'default') or b''
737 737
738 738 defaulturl = util.url(path)
739 739
740 740 # TODO: support local paths as well.
741 741 # TODO: consider the ssh -> https transformation that git applies
742 742 if defaulturl.scheme in (b'http', b'https'):
743 743 if defaulturl.path and defaulturl.path[:-1] != b'/':
744 744 defaulturl.path += b'/'
745 745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
746 746
747 747 url = util.url(bytes(defaulturl))
748 748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
749 749
750 750 scheme = url.scheme
751 751 if scheme not in _storemap:
752 752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
753 753 return _storemap[scheme](repo, url)
754 754
755 755
756 756 class LfsRemoteError(error.StorageError):
757 757 pass
758 758
759 759
760 760 class LfsCorruptionError(error.Abort):
761 761 """Raised when a corrupt blob is detected, aborting an operation
762 762
763 763 It exists to allow specialized handling on the server side."""
General Comments 0
You need to be logged in to leave comments. Login now