##// END OF EJS Templates
lfs: avoid quadratic performance in processing server responses...
Matt Harbison -
r44545:ffac09da default
parent child Browse files
Show More
@@ -1,763 +1,765 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import re
15 import re
16 import socket
16 import socket
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.pycompat import getattr
19 from mercurial.pycompat import getattr
20
20
21 from mercurial import (
21 from mercurial import (
22 encoding,
22 encoding,
23 error,
23 error,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 url as urlmod,
27 url as urlmod,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 worker,
30 worker,
31 )
31 )
32
32
33 from mercurial.utils import stringutil
33 from mercurial.utils import stringutil
34
34
35 from ..largefiles import lfutil
35 from ..largefiles import lfutil
36
36
37 # 64 bytes for SHA256
37 # 64 bytes for SHA256
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39
39
40
40
41 class lfsvfs(vfsmod.vfs):
41 class lfsvfs(vfsmod.vfs):
42 def join(self, path):
42 def join(self, path):
43 """split the path at first two characters, like: XX/XXXXX..."""
43 """split the path at first two characters, like: XX/XXXXX..."""
44 if not _lfsre.match(path):
44 if not _lfsre.match(path):
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 return super(lfsvfs, self).join(path[0:2], path[2:])
46 return super(lfsvfs, self).join(path[0:2], path[2:])
47
47
48 def walk(self, path=None, onerror=None):
48 def walk(self, path=None, onerror=None):
49 """Yield (dirpath, [], oids) tuple for blobs under path
49 """Yield (dirpath, [], oids) tuple for blobs under path
50
50
51 Oids only exist in the root of this vfs, so dirpath is always ''.
51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 """
52 """
53 root = os.path.normpath(self.base)
53 root = os.path.normpath(self.base)
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 # because len(dirpath) < prefixlen.
55 # because len(dirpath) < prefixlen.
56 prefixlen = len(pathutil.normasprefix(root))
56 prefixlen = len(pathutil.normasprefix(root))
57 oids = []
57 oids = []
58
58
59 for dirpath, dirs, files in os.walk(
59 for dirpath, dirs, files in os.walk(
60 self.reljoin(self.base, path or b''), onerror=onerror
60 self.reljoin(self.base, path or b''), onerror=onerror
61 ):
61 ):
62 dirpath = dirpath[prefixlen:]
62 dirpath = dirpath[prefixlen:]
63
63
64 # Silently skip unexpected files and directories
64 # Silently skip unexpected files and directories
65 if len(dirpath) == 2:
65 if len(dirpath) == 2:
66 oids.extend(
66 oids.extend(
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 )
68 )
69
69
70 yield (b'', [], oids)
70 yield (b'', [], oids)
71
71
72
72
73 class nullvfs(lfsvfs):
73 class nullvfs(lfsvfs):
74 def __init__(self):
74 def __init__(self):
75 pass
75 pass
76
76
77 def exists(self, oid):
77 def exists(self, oid):
78 return False
78 return False
79
79
80 def read(self, oid):
80 def read(self, oid):
81 # store.read() calls into here if the blob doesn't exist in its
81 # store.read() calls into here if the blob doesn't exist in its
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 # file that doesn't exist. The only difference is the full file path
83 # file that doesn't exist. The only difference is the full file path
84 # isn't available in the error.
84 # isn't available in the error.
85 raise IOError(
85 raise IOError(
86 errno.ENOENT,
86 errno.ENOENT,
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 )
88 )
89
89
90 def walk(self, path=None, onerror=None):
90 def walk(self, path=None, onerror=None):
91 return (b'', [], [])
91 return (b'', [], [])
92
92
93 def write(self, oid, data):
93 def write(self, oid, data):
94 pass
94 pass
95
95
96
96
97 class filewithprogress(object):
97 class filewithprogress(object):
98 """a file-like object that supports __len__ and read.
98 """a file-like object that supports __len__ and read.
99
99
100 Useful to provide progress information for how many bytes are read.
100 Useful to provide progress information for how many bytes are read.
101 """
101 """
102
102
103 def __init__(self, fp, callback):
103 def __init__(self, fp, callback):
104 self._fp = fp
104 self._fp = fp
105 self._callback = callback # func(readsize)
105 self._callback = callback # func(readsize)
106 fp.seek(0, os.SEEK_END)
106 fp.seek(0, os.SEEK_END)
107 self._len = fp.tell()
107 self._len = fp.tell()
108 fp.seek(0)
108 fp.seek(0)
109
109
110 def __len__(self):
110 def __len__(self):
111 return self._len
111 return self._len
112
112
113 def read(self, size):
113 def read(self, size):
114 if self._fp is None:
114 if self._fp is None:
115 return b''
115 return b''
116 data = self._fp.read(size)
116 data = self._fp.read(size)
117 if data:
117 if data:
118 if self._callback:
118 if self._callback:
119 self._callback(len(data))
119 self._callback(len(data))
120 else:
120 else:
121 self._fp.close()
121 self._fp.close()
122 self._fp = None
122 self._fp = None
123 return data
123 return data
124
124
125
125
126 class local(object):
126 class local(object):
127 """Local blobstore for large file contents.
127 """Local blobstore for large file contents.
128
128
129 This blobstore is used both as a cache and as a staging area for large blobs
129 This blobstore is used both as a cache and as a staging area for large blobs
130 to be uploaded to the remote blobstore.
130 to be uploaded to the remote blobstore.
131 """
131 """
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 fullpath = repo.svfs.join(b'lfs/objects')
134 fullpath = repo.svfs.join(b'lfs/objects')
135 self.vfs = lfsvfs(fullpath)
135 self.vfs = lfsvfs(fullpath)
136
136
137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
138 self.cachevfs = nullvfs()
138 self.cachevfs = nullvfs()
139 else:
139 else:
140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
141 self.cachevfs = lfsvfs(usercache)
141 self.cachevfs = lfsvfs(usercache)
142 self.ui = repo.ui
142 self.ui = repo.ui
143
143
144 def open(self, oid):
144 def open(self, oid):
145 """Open a read-only file descriptor to the named blob, in either the
145 """Open a read-only file descriptor to the named blob, in either the
146 usercache or the local store."""
146 usercache or the local store."""
147 # The usercache is the most likely place to hold the file. Commit will
147 # The usercache is the most likely place to hold the file. Commit will
148 # write to both it and the local store, as will anything that downloads
148 # write to both it and the local store, as will anything that downloads
149 # the blobs. However, things like clone without an update won't
149 # the blobs. However, things like clone without an update won't
150 # populate the local store. For an init + push of a local clone,
150 # populate the local store. For an init + push of a local clone,
151 # the usercache is the only place it _could_ be. If not present, the
151 # the usercache is the only place it _could_ be. If not present, the
152 # missing file msg here will indicate the local repo, not the usercache.
152 # missing file msg here will indicate the local repo, not the usercache.
153 if self.cachevfs.exists(oid):
153 if self.cachevfs.exists(oid):
154 return self.cachevfs(oid, b'rb')
154 return self.cachevfs(oid, b'rb')
155
155
156 return self.vfs(oid, b'rb')
156 return self.vfs(oid, b'rb')
157
157
158 def download(self, oid, src, content_length):
158 def download(self, oid, src, content_length):
159 """Read the blob from the remote source in chunks, verify the content,
159 """Read the blob from the remote source in chunks, verify the content,
160 and write to this local blobstore."""
160 and write to this local blobstore."""
161 sha256 = hashlib.sha256()
161 sha256 = hashlib.sha256()
162 size = 0
162 size = 0
163
163
164 with self.vfs(oid, b'wb', atomictemp=True) as fp:
164 with self.vfs(oid, b'wb', atomictemp=True) as fp:
165 for chunk in util.filechunkiter(src, size=1048576):
165 for chunk in util.filechunkiter(src, size=1048576):
166 fp.write(chunk)
166 fp.write(chunk)
167 sha256.update(chunk)
167 sha256.update(chunk)
168 size += len(chunk)
168 size += len(chunk)
169
169
170 # If the server advertised a length longer than what we actually
170 # If the server advertised a length longer than what we actually
171 # received, then we should expect that the server crashed while
171 # received, then we should expect that the server crashed while
172 # producing the response (but the server has no way of telling us
172 # producing the response (but the server has no way of telling us
173 # that), and we really don't need to try to write the response to
173 # that), and we really don't need to try to write the response to
174 # the localstore, because it's not going to match the expected.
174 # the localstore, because it's not going to match the expected.
175 if content_length is not None and int(content_length) != size:
175 if content_length is not None and int(content_length) != size:
176 msg = (
176 msg = (
177 b"Response length (%s) does not match Content-Length "
177 b"Response length (%s) does not match Content-Length "
178 b"header (%d): likely server-side crash"
178 b"header (%d): likely server-side crash"
179 )
179 )
180 raise LfsRemoteError(_(msg) % (size, int(content_length)))
180 raise LfsRemoteError(_(msg) % (size, int(content_length)))
181
181
182 realoid = node.hex(sha256.digest())
182 realoid = node.hex(sha256.digest())
183 if realoid != oid:
183 if realoid != oid:
184 raise LfsCorruptionError(
184 raise LfsCorruptionError(
185 _(b'corrupt remote lfs object: %s') % oid
185 _(b'corrupt remote lfs object: %s') % oid
186 )
186 )
187
187
188 self._linktousercache(oid)
188 self._linktousercache(oid)
189
189
190 def write(self, oid, data):
190 def write(self, oid, data):
191 """Write blob to local blobstore.
191 """Write blob to local blobstore.
192
192
193 This should only be called from the filelog during a commit or similar.
193 This should only be called from the filelog during a commit or similar.
194 As such, there is no need to verify the data. Imports from a remote
194 As such, there is no need to verify the data. Imports from a remote
195 store must use ``download()`` instead."""
195 store must use ``download()`` instead."""
196 with self.vfs(oid, b'wb', atomictemp=True) as fp:
196 with self.vfs(oid, b'wb', atomictemp=True) as fp:
197 fp.write(data)
197 fp.write(data)
198
198
199 self._linktousercache(oid)
199 self._linktousercache(oid)
200
200
201 def linkfromusercache(self, oid):
201 def linkfromusercache(self, oid):
202 """Link blobs found in the user cache into this store.
202 """Link blobs found in the user cache into this store.
203
203
204 The server module needs to do this when it lets the client know not to
204 The server module needs to do this when it lets the client know not to
205 upload the blob, to ensure it is always available in this store.
205 upload the blob, to ensure it is always available in this store.
206 Normally this is done implicitly when the client reads or writes the
206 Normally this is done implicitly when the client reads or writes the
207 blob, but that doesn't happen when the server tells the client that it
207 blob, but that doesn't happen when the server tells the client that it
208 already has the blob.
208 already has the blob.
209 """
209 """
210 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
210 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
211 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
211 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
212 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
212 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
213
213
214 def _linktousercache(self, oid):
214 def _linktousercache(self, oid):
215 # XXX: should we verify the content of the cache, and hardlink back to
215 # XXX: should we verify the content of the cache, and hardlink back to
216 # the local store on success, but truncate, write and link on failure?
216 # the local store on success, but truncate, write and link on failure?
217 if not self.cachevfs.exists(oid) and not isinstance(
217 if not self.cachevfs.exists(oid) and not isinstance(
218 self.cachevfs, nullvfs
218 self.cachevfs, nullvfs
219 ):
219 ):
220 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
220 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
221 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
221 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
222
222
223 def read(self, oid, verify=True):
223 def read(self, oid, verify=True):
224 """Read blob from local blobstore."""
224 """Read blob from local blobstore."""
225 if not self.vfs.exists(oid):
225 if not self.vfs.exists(oid):
226 blob = self._read(self.cachevfs, oid, verify)
226 blob = self._read(self.cachevfs, oid, verify)
227
227
228 # Even if revlog will verify the content, it needs to be verified
228 # Even if revlog will verify the content, it needs to be verified
229 # now before making the hardlink to avoid propagating corrupt blobs.
229 # now before making the hardlink to avoid propagating corrupt blobs.
230 # Don't abort if corruption is detected, because `hg verify` will
230 # Don't abort if corruption is detected, because `hg verify` will
231 # give more useful info about the corruption- simply don't add the
231 # give more useful info about the corruption- simply don't add the
232 # hardlink.
232 # hardlink.
233 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
233 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
234 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
234 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
235 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
235 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
236 else:
236 else:
237 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
237 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
238 blob = self._read(self.vfs, oid, verify)
238 blob = self._read(self.vfs, oid, verify)
239 return blob
239 return blob
240
240
241 def _read(self, vfs, oid, verify):
241 def _read(self, vfs, oid, verify):
242 """Read blob (after verifying) from the given store"""
242 """Read blob (after verifying) from the given store"""
243 blob = vfs.read(oid)
243 blob = vfs.read(oid)
244 if verify:
244 if verify:
245 _verify(oid, blob)
245 _verify(oid, blob)
246 return blob
246 return blob
247
247
248 def verify(self, oid):
248 def verify(self, oid):
249 """Indicate whether or not the hash of the underlying file matches its
249 """Indicate whether or not the hash of the underlying file matches its
250 name."""
250 name."""
251 sha256 = hashlib.sha256()
251 sha256 = hashlib.sha256()
252
252
253 with self.open(oid) as fp:
253 with self.open(oid) as fp:
254 for chunk in util.filechunkiter(fp, size=1048576):
254 for chunk in util.filechunkiter(fp, size=1048576):
255 sha256.update(chunk)
255 sha256.update(chunk)
256
256
257 return oid == node.hex(sha256.digest())
257 return oid == node.hex(sha256.digest())
258
258
259 def has(self, oid):
259 def has(self, oid):
260 """Returns True if the local blobstore contains the requested blob,
260 """Returns True if the local blobstore contains the requested blob,
261 False otherwise."""
261 False otherwise."""
262 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
262 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
263
263
264
264
265 def _urlerrorreason(urlerror):
265 def _urlerrorreason(urlerror):
266 '''Create a friendly message for the given URLError to be used in an
266 '''Create a friendly message for the given URLError to be used in an
267 LfsRemoteError message.
267 LfsRemoteError message.
268 '''
268 '''
269 inst = urlerror
269 inst = urlerror
270
270
271 if isinstance(urlerror.reason, Exception):
271 if isinstance(urlerror.reason, Exception):
272 inst = urlerror.reason
272 inst = urlerror.reason
273
273
274 if util.safehasattr(inst, b'reason'):
274 if util.safehasattr(inst, b'reason'):
275 try: # usually it is in the form (errno, strerror)
275 try: # usually it is in the form (errno, strerror)
276 reason = inst.reason.args[1]
276 reason = inst.reason.args[1]
277 except (AttributeError, IndexError):
277 except (AttributeError, IndexError):
278 # it might be anything, for example a string
278 # it might be anything, for example a string
279 reason = inst.reason
279 reason = inst.reason
280 if isinstance(reason, pycompat.unicode):
280 if isinstance(reason, pycompat.unicode):
281 # SSLError of Python 2.7.9 contains a unicode
281 # SSLError of Python 2.7.9 contains a unicode
282 reason = encoding.unitolocal(reason)
282 reason = encoding.unitolocal(reason)
283 return reason
283 return reason
284 elif getattr(inst, "strerror", None):
284 elif getattr(inst, "strerror", None):
285 return encoding.strtolocal(inst.strerror)
285 return encoding.strtolocal(inst.strerror)
286 else:
286 else:
287 return stringutil.forcebytestr(urlerror)
287 return stringutil.forcebytestr(urlerror)
288
288
289
289
290 class lfsauthhandler(util.urlreq.basehandler):
290 class lfsauthhandler(util.urlreq.basehandler):
291 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
291 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
292
292
293 def http_error_401(self, req, fp, code, msg, headers):
293 def http_error_401(self, req, fp, code, msg, headers):
294 """Enforces that any authentication performed is HTTP Basic
294 """Enforces that any authentication performed is HTTP Basic
295 Authentication. No authentication is also acceptable.
295 Authentication. No authentication is also acceptable.
296 """
296 """
297 authreq = headers.get('www-authenticate', None)
297 authreq = headers.get('www-authenticate', None)
298 if authreq:
298 if authreq:
299 scheme = authreq.split()[0]
299 scheme = authreq.split()[0]
300
300
301 if scheme.lower() != 'basic':
301 if scheme.lower() != 'basic':
302 msg = _(b'the server must support Basic Authentication')
302 msg = _(b'the server must support Basic Authentication')
303 raise util.urlerr.httperror(
303 raise util.urlerr.httperror(
304 req.get_full_url(),
304 req.get_full_url(),
305 code,
305 code,
306 encoding.strfromlocal(msg),
306 encoding.strfromlocal(msg),
307 headers,
307 headers,
308 fp,
308 fp,
309 )
309 )
310 return None
310 return None
311
311
312
312
313 class _gitlfsremote(object):
313 class _gitlfsremote(object):
314 def __init__(self, repo, url):
314 def __init__(self, repo, url):
315 ui = repo.ui
315 ui = repo.ui
316 self.ui = ui
316 self.ui = ui
317 baseurl, authinfo = url.authinfo()
317 baseurl, authinfo = url.authinfo()
318 self.baseurl = baseurl.rstrip(b'/')
318 self.baseurl = baseurl.rstrip(b'/')
319 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
319 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
320 if not useragent:
320 if not useragent:
321 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
321 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
322 self.urlopener = urlmod.opener(ui, authinfo, useragent)
322 self.urlopener = urlmod.opener(ui, authinfo, useragent)
323 self.urlopener.add_handler(lfsauthhandler())
323 self.urlopener.add_handler(lfsauthhandler())
324 self.retry = ui.configint(b'lfs', b'retry')
324 self.retry = ui.configint(b'lfs', b'retry')
325
325
326 def writebatch(self, pointers, fromstore):
326 def writebatch(self, pointers, fromstore):
327 """Batch upload from local to remote blobstore."""
327 """Batch upload from local to remote blobstore."""
328 self._batch(_deduplicate(pointers), fromstore, b'upload')
328 self._batch(_deduplicate(pointers), fromstore, b'upload')
329
329
330 def readbatch(self, pointers, tostore):
330 def readbatch(self, pointers, tostore):
331 """Batch download from remote to local blostore."""
331 """Batch download from remote to local blostore."""
332 self._batch(_deduplicate(pointers), tostore, b'download')
332 self._batch(_deduplicate(pointers), tostore, b'download')
333
333
334 def _batchrequest(self, pointers, action):
334 def _batchrequest(self, pointers, action):
335 """Get metadata about objects pointed by pointers for given action
335 """Get metadata about objects pointed by pointers for given action
336
336
337 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
337 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
338 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
338 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
339 """
339 """
340 objects = [
340 objects = [
341 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
341 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
342 for p in pointers
342 for p in pointers
343 ]
343 ]
344 requestdata = pycompat.bytesurl(
344 requestdata = pycompat.bytesurl(
345 json.dumps(
345 json.dumps(
346 {'objects': objects, 'operation': pycompat.strurl(action),}
346 {'objects': objects, 'operation': pycompat.strurl(action),}
347 )
347 )
348 )
348 )
349 url = b'%s/objects/batch' % self.baseurl
349 url = b'%s/objects/batch' % self.baseurl
350 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
350 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
351 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
351 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
352 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
352 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
353 try:
353 try:
354 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
354 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
355 rawjson = rsp.read()
355 rawjson = rsp.read()
356 except util.urlerr.httperror as ex:
356 except util.urlerr.httperror as ex:
357 hints = {
357 hints = {
358 400: _(
358 400: _(
359 b'check that lfs serving is enabled on %s and "%s" is '
359 b'check that lfs serving is enabled on %s and "%s" is '
360 b'supported'
360 b'supported'
361 )
361 )
362 % (self.baseurl, action),
362 % (self.baseurl, action),
363 404: _(b'the "lfs.url" config may be used to override %s')
363 404: _(b'the "lfs.url" config may be used to override %s')
364 % self.baseurl,
364 % self.baseurl,
365 }
365 }
366 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
366 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
367 raise LfsRemoteError(
367 raise LfsRemoteError(
368 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
368 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
369 hint=hint,
369 hint=hint,
370 )
370 )
371 except util.urlerr.urlerror as ex:
371 except util.urlerr.urlerror as ex:
372 hint = (
372 hint = (
373 _(b'the "lfs.url" config may be used to override %s')
373 _(b'the "lfs.url" config may be used to override %s')
374 % self.baseurl
374 % self.baseurl
375 )
375 )
376 raise LfsRemoteError(
376 raise LfsRemoteError(
377 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
377 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
378 )
378 )
379 try:
379 try:
380 response = pycompat.json_loads(rawjson)
380 response = pycompat.json_loads(rawjson)
381 except ValueError:
381 except ValueError:
382 raise LfsRemoteError(
382 raise LfsRemoteError(
383 _(b'LFS server returns invalid JSON: %s')
383 _(b'LFS server returns invalid JSON: %s')
384 % rawjson.encode("utf-8")
384 % rawjson.encode("utf-8")
385 )
385 )
386
386
387 if self.ui.debugflag:
387 if self.ui.debugflag:
388 self.ui.debug(b'Status: %d\n' % rsp.status)
388 self.ui.debug(b'Status: %d\n' % rsp.status)
389 # lfs-test-server and hg serve return headers in different order
389 # lfs-test-server and hg serve return headers in different order
390 headers = pycompat.bytestr(rsp.info()).strip()
390 headers = pycompat.bytestr(rsp.info()).strip()
391 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
391 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
392
392
393 if 'objects' in response:
393 if 'objects' in response:
394 response['objects'] = sorted(
394 response['objects'] = sorted(
395 response['objects'], key=lambda p: p['oid']
395 response['objects'], key=lambda p: p['oid']
396 )
396 )
397 self.ui.debug(
397 self.ui.debug(
398 b'%s\n'
398 b'%s\n'
399 % pycompat.bytesurl(
399 % pycompat.bytesurl(
400 json.dumps(
400 json.dumps(
401 response,
401 response,
402 indent=2,
402 indent=2,
403 separators=('', ': '),
403 separators=('', ': '),
404 sort_keys=True,
404 sort_keys=True,
405 )
405 )
406 )
406 )
407 )
407 )
408
408
409 def encodestr(x):
409 def encodestr(x):
410 if isinstance(x, pycompat.unicode):
410 if isinstance(x, pycompat.unicode):
411 return x.encode('utf-8')
411 return x.encode('utf-8')
412 return x
412 return x
413
413
414 return pycompat.rapply(encodestr, response)
414 return pycompat.rapply(encodestr, response)
415
415
416 def _checkforservererror(self, pointers, responses, action):
416 def _checkforservererror(self, pointers, responses, action):
417 """Scans errors from objects
417 """Scans errors from objects
418
418
419 Raises LfsRemoteError if any objects have an error"""
419 Raises LfsRemoteError if any objects have an error"""
420 for response in responses:
420 for response in responses:
421 # The server should return 404 when objects cannot be found. Some
421 # The server should return 404 when objects cannot be found. Some
422 # server implementation (ex. lfs-test-server) does not set "error"
422 # server implementation (ex. lfs-test-server) does not set "error"
423 # but just removes "download" from "actions". Treat that case
423 # but just removes "download" from "actions". Treat that case
424 # as the same as 404 error.
424 # as the same as 404 error.
425 if b'error' not in response:
425 if b'error' not in response:
426 if action == b'download' and action not in response.get(
426 if action == b'download' and action not in response.get(
427 b'actions', []
427 b'actions', []
428 ):
428 ):
429 code = 404
429 code = 404
430 else:
430 else:
431 continue
431 continue
432 else:
432 else:
433 # An error dict without a code doesn't make much sense, so
433 # An error dict without a code doesn't make much sense, so
434 # treat as a server error.
434 # treat as a server error.
435 code = response.get(b'error').get(b'code', 500)
435 code = response.get(b'error').get(b'code', 500)
436
436
437 ptrmap = {p.oid(): p for p in pointers}
437 ptrmap = {p.oid(): p for p in pointers}
438 p = ptrmap.get(response[b'oid'], None)
438 p = ptrmap.get(response[b'oid'], None)
439 if p:
439 if p:
440 filename = getattr(p, 'filename', b'unknown')
440 filename = getattr(p, 'filename', b'unknown')
441 errors = {
441 errors = {
442 404: b'The object does not exist',
442 404: b'The object does not exist',
443 410: b'The object was removed by the owner',
443 410: b'The object was removed by the owner',
444 422: b'Validation error',
444 422: b'Validation error',
445 500: b'Internal server error',
445 500: b'Internal server error',
446 }
446 }
447 msg = errors.get(code, b'status code %d' % code)
447 msg = errors.get(code, b'status code %d' % code)
448 raise LfsRemoteError(
448 raise LfsRemoteError(
449 _(b'LFS server error for "%s": %s') % (filename, msg)
449 _(b'LFS server error for "%s": %s') % (filename, msg)
450 )
450 )
451 else:
451 else:
452 raise LfsRemoteError(
452 raise LfsRemoteError(
453 _(b'LFS server error. Unsolicited response for oid %s')
453 _(b'LFS server error. Unsolicited response for oid %s')
454 % response[b'oid']
454 % response[b'oid']
455 )
455 )
456
456
457 def _extractobjects(self, response, pointers, action):
457 def _extractobjects(self, response, pointers, action):
458 """extract objects from response of the batch API
458 """extract objects from response of the batch API
459
459
460 response: parsed JSON object returned by batch API
460 response: parsed JSON object returned by batch API
461 return response['objects'] filtered by action
461 return response['objects'] filtered by action
462 raise if any object has an error
462 raise if any object has an error
463 """
463 """
464 # Scan errors from objects - fail early
464 # Scan errors from objects - fail early
465 objects = response.get(b'objects', [])
465 objects = response.get(b'objects', [])
466 self._checkforservererror(pointers, objects, action)
466 self._checkforservererror(pointers, objects, action)
467
467
468 # Filter objects with given action. Practically, this skips uploading
468 # Filter objects with given action. Practically, this skips uploading
469 # objects which exist in the server.
469 # objects which exist in the server.
470 filteredobjects = [
470 filteredobjects = [
471 o for o in objects if action in o.get(b'actions', [])
471 o for o in objects if action in o.get(b'actions', [])
472 ]
472 ]
473
473
474 return filteredobjects
474 return filteredobjects
475
475
476 def _basictransfer(self, obj, action, localstore):
476 def _basictransfer(self, obj, action, localstore):
477 """Download or upload a single object using basic transfer protocol
477 """Download or upload a single object using basic transfer protocol
478
478
479 obj: dict, an object description returned by batch API
479 obj: dict, an object description returned by batch API
480 action: string, one of ['upload', 'download']
480 action: string, one of ['upload', 'download']
481 localstore: blobstore.local
481 localstore: blobstore.local
482
482
483 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
483 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
484 basic-transfers.md
484 basic-transfers.md
485 """
485 """
486 oid = obj[b'oid']
486 oid = obj[b'oid']
487 href = obj[b'actions'][action].get(b'href')
487 href = obj[b'actions'][action].get(b'href')
488 headers = obj[b'actions'][action].get(b'header', {}).items()
488 headers = obj[b'actions'][action].get(b'header', {}).items()
489
489
490 request = util.urlreq.request(pycompat.strurl(href))
490 request = util.urlreq.request(pycompat.strurl(href))
491 if action == b'upload':
491 if action == b'upload':
492 # If uploading blobs, read data from local blobstore.
492 # If uploading blobs, read data from local blobstore.
493 if not localstore.verify(oid):
493 if not localstore.verify(oid):
494 raise error.Abort(
494 raise error.Abort(
495 _(b'detected corrupt lfs object: %s') % oid,
495 _(b'detected corrupt lfs object: %s') % oid,
496 hint=_(b'run hg verify'),
496 hint=_(b'run hg verify'),
497 )
497 )
498 request.data = filewithprogress(localstore.open(oid), None)
498 request.data = filewithprogress(localstore.open(oid), None)
499 request.get_method = lambda: r'PUT'
499 request.get_method = lambda: r'PUT'
500 request.add_header('Content-Type', 'application/octet-stream')
500 request.add_header('Content-Type', 'application/octet-stream')
501 request.add_header('Content-Length', len(request.data))
501 request.add_header('Content-Length', len(request.data))
502
502
503 for k, v in headers:
503 for k, v in headers:
504 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
504 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
505
505
506 response = b''
507 try:
506 try:
508 with contextlib.closing(self.urlopener.open(request)) as res:
507 with contextlib.closing(self.urlopener.open(request)) as res:
509 contentlength = res.info().get(b"content-length")
508 contentlength = res.info().get(b"content-length")
510 ui = self.ui # Shorten debug lines
509 ui = self.ui # Shorten debug lines
511 if self.ui.debugflag:
510 if self.ui.debugflag:
512 ui.debug(b'Status: %d\n' % res.status)
511 ui.debug(b'Status: %d\n' % res.status)
513 # lfs-test-server and hg serve return headers in different
512 # lfs-test-server and hg serve return headers in different
514 # order
513 # order
515 headers = pycompat.bytestr(res.info()).strip()
514 headers = pycompat.bytestr(res.info()).strip()
516 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
515 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
517
516
518 if action == b'download':
517 if action == b'download':
519 # If downloading blobs, store downloaded data to local
518 # If downloading blobs, store downloaded data to local
520 # blobstore
519 # blobstore
521 localstore.download(oid, res, contentlength)
520 localstore.download(oid, res, contentlength)
522 else:
521 else:
522 blocks = []
523 while True:
523 while True:
524 data = res.read(1048576)
524 data = res.read(1048576)
525 if not data:
525 if not data:
526 break
526 break
527 response += data
527 blocks.append(data)
528
529 response = b"".join(blocks)
528 if response:
530 if response:
529 ui.debug(b'lfs %s response: %s' % (action, response))
531 ui.debug(b'lfs %s response: %s' % (action, response))
530 except util.urlerr.httperror as ex:
532 except util.urlerr.httperror as ex:
531 if self.ui.debugflag:
533 if self.ui.debugflag:
532 self.ui.debug(
534 self.ui.debug(
533 b'%s: %s\n' % (oid, ex.read())
535 b'%s: %s\n' % (oid, ex.read())
534 ) # XXX: also bytes?
536 ) # XXX: also bytes?
535 raise LfsRemoteError(
537 raise LfsRemoteError(
536 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
538 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
537 % (stringutil.forcebytestr(ex), oid, action)
539 % (stringutil.forcebytestr(ex), oid, action)
538 )
540 )
539 except util.urlerr.urlerror as ex:
541 except util.urlerr.urlerror as ex:
540 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
542 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
541 util.urllibcompat.getfullurl(request)
543 util.urllibcompat.getfullurl(request)
542 )
544 )
543 raise LfsRemoteError(
545 raise LfsRemoteError(
544 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
546 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
545 )
547 )
546
548
547 def _batch(self, pointers, localstore, action):
549 def _batch(self, pointers, localstore, action):
548 if action not in [b'upload', b'download']:
550 if action not in [b'upload', b'download']:
549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
551 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
550
552
551 response = self._batchrequest(pointers, action)
553 response = self._batchrequest(pointers, action)
552 objects = self._extractobjects(response, pointers, action)
554 objects = self._extractobjects(response, pointers, action)
553 total = sum(x.get(b'size', 0) for x in objects)
555 total = sum(x.get(b'size', 0) for x in objects)
554 sizes = {}
556 sizes = {}
555 for obj in objects:
557 for obj in objects:
556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
558 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
557 topic = {
559 topic = {
558 b'upload': _(b'lfs uploading'),
560 b'upload': _(b'lfs uploading'),
559 b'download': _(b'lfs downloading'),
561 b'download': _(b'lfs downloading'),
560 }[action]
562 }[action]
561 if len(objects) > 1:
563 if len(objects) > 1:
562 self.ui.note(
564 self.ui.note(
563 _(b'lfs: need to transfer %d objects (%s)\n')
565 _(b'lfs: need to transfer %d objects (%s)\n')
564 % (len(objects), util.bytecount(total))
566 % (len(objects), util.bytecount(total))
565 )
567 )
566
568
567 def transfer(chunk):
569 def transfer(chunk):
568 for obj in chunk:
570 for obj in chunk:
569 objsize = obj.get(b'size', 0)
571 objsize = obj.get(b'size', 0)
570 if self.ui.verbose:
572 if self.ui.verbose:
571 if action == b'download':
573 if action == b'download':
572 msg = _(b'lfs: downloading %s (%s)\n')
574 msg = _(b'lfs: downloading %s (%s)\n')
573 elif action == b'upload':
575 elif action == b'upload':
574 msg = _(b'lfs: uploading %s (%s)\n')
576 msg = _(b'lfs: uploading %s (%s)\n')
575 self.ui.note(
577 self.ui.note(
576 msg % (obj.get(b'oid'), util.bytecount(objsize))
578 msg % (obj.get(b'oid'), util.bytecount(objsize))
577 )
579 )
578 retry = self.retry
580 retry = self.retry
579 while True:
581 while True:
580 try:
582 try:
581 self._basictransfer(obj, action, localstore)
583 self._basictransfer(obj, action, localstore)
582 yield 1, obj.get(b'oid')
584 yield 1, obj.get(b'oid')
583 break
585 break
584 except socket.error as ex:
586 except socket.error as ex:
585 if retry > 0:
587 if retry > 0:
586 self.ui.note(
588 self.ui.note(
587 _(b'lfs: failed: %r (remaining retry %d)\n')
589 _(b'lfs: failed: %r (remaining retry %d)\n')
588 % (stringutil.forcebytestr(ex), retry)
590 % (stringutil.forcebytestr(ex), retry)
589 )
591 )
590 retry -= 1
592 retry -= 1
591 continue
593 continue
592 raise
594 raise
593
595
594 # Until https multiplexing gets sorted out
596 # Until https multiplexing gets sorted out
595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
597 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
596 oids = worker.worker(
598 oids = worker.worker(
597 self.ui,
599 self.ui,
598 0.1,
600 0.1,
599 transfer,
601 transfer,
600 (),
602 (),
601 sorted(objects, key=lambda o: o.get(b'oid')),
603 sorted(objects, key=lambda o: o.get(b'oid')),
602 )
604 )
603 else:
605 else:
604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
606 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
605
607
606 with self.ui.makeprogress(
608 with self.ui.makeprogress(
607 topic, unit=_(b"bytes"), total=total
609 topic, unit=_(b"bytes"), total=total
608 ) as progress:
610 ) as progress:
609 progress.update(0)
611 progress.update(0)
610 processed = 0
612 processed = 0
611 blobs = 0
613 blobs = 0
612 for _one, oid in oids:
614 for _one, oid in oids:
613 processed += sizes[oid]
615 processed += sizes[oid]
614 blobs += 1
616 blobs += 1
615 progress.update(processed)
617 progress.update(processed)
616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
618 self.ui.note(_(b'lfs: processed: %s\n') % oid)
617
619
618 if blobs > 0:
620 if blobs > 0:
619 if action == b'upload':
621 if action == b'upload':
620 self.ui.status(
622 self.ui.status(
621 _(b'lfs: uploaded %d files (%s)\n')
623 _(b'lfs: uploaded %d files (%s)\n')
622 % (blobs, util.bytecount(processed))
624 % (blobs, util.bytecount(processed))
623 )
625 )
624 elif action == b'download':
626 elif action == b'download':
625 self.ui.status(
627 self.ui.status(
626 _(b'lfs: downloaded %d files (%s)\n')
628 _(b'lfs: downloaded %d files (%s)\n')
627 % (blobs, util.bytecount(processed))
629 % (blobs, util.bytecount(processed))
628 )
630 )
629
631
630 def __del__(self):
632 def __del__(self):
631 # copied from mercurial/httppeer.py
633 # copied from mercurial/httppeer.py
632 urlopener = getattr(self, 'urlopener', None)
634 urlopener = getattr(self, 'urlopener', None)
633 if urlopener:
635 if urlopener:
634 for h in urlopener.handlers:
636 for h in urlopener.handlers:
635 h.close()
637 h.close()
636 getattr(h, "close_all", lambda: None)()
638 getattr(h, "close_all", lambda: None)()
637
639
638
640
639 class _dummyremote(object):
641 class _dummyremote(object):
640 """Dummy store storing blobs to temp directory."""
642 """Dummy store storing blobs to temp directory."""
641
643
642 def __init__(self, repo, url):
644 def __init__(self, repo, url):
643 fullpath = repo.vfs.join(b'lfs', url.path)
645 fullpath = repo.vfs.join(b'lfs', url.path)
644 self.vfs = lfsvfs(fullpath)
646 self.vfs = lfsvfs(fullpath)
645
647
646 def writebatch(self, pointers, fromstore):
648 def writebatch(self, pointers, fromstore):
647 for p in _deduplicate(pointers):
649 for p in _deduplicate(pointers):
648 content = fromstore.read(p.oid(), verify=True)
650 content = fromstore.read(p.oid(), verify=True)
649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
651 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
650 fp.write(content)
652 fp.write(content)
651
653
652 def readbatch(self, pointers, tostore):
654 def readbatch(self, pointers, tostore):
653 for p in _deduplicate(pointers):
655 for p in _deduplicate(pointers):
654 with self.vfs(p.oid(), b'rb') as fp:
656 with self.vfs(p.oid(), b'rb') as fp:
655 tostore.download(p.oid(), fp, None)
657 tostore.download(p.oid(), fp, None)
656
658
657
659
658 class _nullremote(object):
660 class _nullremote(object):
659 """Null store storing blobs to /dev/null."""
661 """Null store storing blobs to /dev/null."""
660
662
661 def __init__(self, repo, url):
663 def __init__(self, repo, url):
662 pass
664 pass
663
665
664 def writebatch(self, pointers, fromstore):
666 def writebatch(self, pointers, fromstore):
665 pass
667 pass
666
668
667 def readbatch(self, pointers, tostore):
669 def readbatch(self, pointers, tostore):
668 pass
670 pass
669
671
670
672
671 class _promptremote(object):
673 class _promptremote(object):
672 """Prompt user to set lfs.url when accessed."""
674 """Prompt user to set lfs.url when accessed."""
673
675
674 def __init__(self, repo, url):
676 def __init__(self, repo, url):
675 pass
677 pass
676
678
677 def writebatch(self, pointers, fromstore, ui=None):
679 def writebatch(self, pointers, fromstore, ui=None):
678 self._prompt()
680 self._prompt()
679
681
680 def readbatch(self, pointers, tostore, ui=None):
682 def readbatch(self, pointers, tostore, ui=None):
681 self._prompt()
683 self._prompt()
682
684
683 def _prompt(self):
685 def _prompt(self):
684 raise error.Abort(_(b'lfs.url needs to be configured'))
686 raise error.Abort(_(b'lfs.url needs to be configured'))
685
687
686
688
687 _storemap = {
689 _storemap = {
688 b'https': _gitlfsremote,
690 b'https': _gitlfsremote,
689 b'http': _gitlfsremote,
691 b'http': _gitlfsremote,
690 b'file': _dummyremote,
692 b'file': _dummyremote,
691 b'null': _nullremote,
693 b'null': _nullremote,
692 None: _promptremote,
694 None: _promptremote,
693 }
695 }
694
696
695
697
696 def _deduplicate(pointers):
698 def _deduplicate(pointers):
697 """Remove any duplicate oids that exist in the list"""
699 """Remove any duplicate oids that exist in the list"""
698 reduced = util.sortdict()
700 reduced = util.sortdict()
699 for p in pointers:
701 for p in pointers:
700 reduced[p.oid()] = p
702 reduced[p.oid()] = p
701 return reduced.values()
703 return reduced.values()
702
704
703
705
704 def _verify(oid, content):
706 def _verify(oid, content):
705 realoid = node.hex(hashlib.sha256(content).digest())
707 realoid = node.hex(hashlib.sha256(content).digest())
706 if realoid != oid:
708 if realoid != oid:
707 raise LfsCorruptionError(
709 raise LfsCorruptionError(
708 _(b'detected corrupt lfs object: %s') % oid,
710 _(b'detected corrupt lfs object: %s') % oid,
709 hint=_(b'run hg verify'),
711 hint=_(b'run hg verify'),
710 )
712 )
711
713
712
714
713 def remote(repo, remote=None):
715 def remote(repo, remote=None):
714 """remotestore factory. return a store in _storemap depending on config
716 """remotestore factory. return a store in _storemap depending on config
715
717
716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
718 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
717 infer the endpoint, based on the remote repository using the same path
719 infer the endpoint, based on the remote repository using the same path
718 adjustments as git. As an extension, 'http' is supported as well so that
720 adjustments as git. As an extension, 'http' is supported as well so that
719 ``hg serve`` works out of the box.
721 ``hg serve`` works out of the box.
720
722
721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
723 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
722 """
724 """
723 lfsurl = repo.ui.config(b'lfs', b'url')
725 lfsurl = repo.ui.config(b'lfs', b'url')
724 url = util.url(lfsurl or b'')
726 url = util.url(lfsurl or b'')
725 if lfsurl is None:
727 if lfsurl is None:
726 if remote:
728 if remote:
727 path = remote
729 path = remote
728 elif util.safehasattr(repo, b'_subtoppath'):
730 elif util.safehasattr(repo, b'_subtoppath'):
729 # The pull command sets this during the optional update phase, which
731 # The pull command sets this during the optional update phase, which
730 # tells exactly where the pull originated, whether 'paths.default'
732 # tells exactly where the pull originated, whether 'paths.default'
731 # or explicit.
733 # or explicit.
732 path = repo._subtoppath
734 path = repo._subtoppath
733 else:
735 else:
734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
736 # TODO: investigate 'paths.remote:lfsurl' style path customization,
735 # and fall back to inferring from 'paths.remote' if unspecified.
737 # and fall back to inferring from 'paths.remote' if unspecified.
736 path = repo.ui.config(b'paths', b'default') or b''
738 path = repo.ui.config(b'paths', b'default') or b''
737
739
738 defaulturl = util.url(path)
740 defaulturl = util.url(path)
739
741
740 # TODO: support local paths as well.
742 # TODO: support local paths as well.
741 # TODO: consider the ssh -> https transformation that git applies
743 # TODO: consider the ssh -> https transformation that git applies
742 if defaulturl.scheme in (b'http', b'https'):
744 if defaulturl.scheme in (b'http', b'https'):
743 if defaulturl.path and defaulturl.path[:-1] != b'/':
745 if defaulturl.path and defaulturl.path[:-1] != b'/':
744 defaulturl.path += b'/'
746 defaulturl.path += b'/'
745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
747 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
746
748
747 url = util.url(bytes(defaulturl))
749 url = util.url(bytes(defaulturl))
748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
750 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
749
751
750 scheme = url.scheme
752 scheme = url.scheme
751 if scheme not in _storemap:
753 if scheme not in _storemap:
752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
754 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
753 return _storemap[scheme](repo, url)
755 return _storemap[scheme](repo, url)
754
756
755
757
756 class LfsRemoteError(error.StorageError):
758 class LfsRemoteError(error.StorageError):
757 pass
759 pass
758
760
759
761
760 class LfsCorruptionError(error.Abort):
762 class LfsCorruptionError(error.Abort):
761 """Raised when a corrupt blob is detected, aborting an operation
763 """Raised when a corrupt blob is detected, aborting an operation
762
764
763 It exists to allow specialized handling on the server side."""
765 It exists to allow specialized handling on the server side."""
General Comments 0
You need to be logged in to leave comments. Login now