##// END OF EJS Templates
lfs: drop an unnecessary r'' prefix...
Matt Harbison -
r44598:b2408aca default
parent child Browse files
Show More
@@ -1,763 +1,763 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import re
15 import re
16 import socket
16 import socket
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.pycompat import getattr
19 from mercurial.pycompat import getattr
20
20
21 from mercurial import (
21 from mercurial import (
22 encoding,
22 encoding,
23 error,
23 error,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 url as urlmod,
27 url as urlmod,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 worker,
30 worker,
31 )
31 )
32
32
33 from mercurial.utils import stringutil
33 from mercurial.utils import stringutil
34
34
35 from ..largefiles import lfutil
35 from ..largefiles import lfutil
36
36
37 # 64 bytes for SHA256
37 # 64 bytes for SHA256
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39
39
40
40
41 class lfsvfs(vfsmod.vfs):
41 class lfsvfs(vfsmod.vfs):
42 def join(self, path):
42 def join(self, path):
43 """split the path at first two characters, like: XX/XXXXX..."""
43 """split the path at first two characters, like: XX/XXXXX..."""
44 if not _lfsre.match(path):
44 if not _lfsre.match(path):
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 return super(lfsvfs, self).join(path[0:2], path[2:])
46 return super(lfsvfs, self).join(path[0:2], path[2:])
47
47
48 def walk(self, path=None, onerror=None):
48 def walk(self, path=None, onerror=None):
49 """Yield (dirpath, [], oids) tuple for blobs under path
49 """Yield (dirpath, [], oids) tuple for blobs under path
50
50
51 Oids only exist in the root of this vfs, so dirpath is always ''.
51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 """
52 """
53 root = os.path.normpath(self.base)
53 root = os.path.normpath(self.base)
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 # because len(dirpath) < prefixlen.
55 # because len(dirpath) < prefixlen.
56 prefixlen = len(pathutil.normasprefix(root))
56 prefixlen = len(pathutil.normasprefix(root))
57 oids = []
57 oids = []
58
58
59 for dirpath, dirs, files in os.walk(
59 for dirpath, dirs, files in os.walk(
60 self.reljoin(self.base, path or b''), onerror=onerror
60 self.reljoin(self.base, path or b''), onerror=onerror
61 ):
61 ):
62 dirpath = dirpath[prefixlen:]
62 dirpath = dirpath[prefixlen:]
63
63
64 # Silently skip unexpected files and directories
64 # Silently skip unexpected files and directories
65 if len(dirpath) == 2:
65 if len(dirpath) == 2:
66 oids.extend(
66 oids.extend(
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 )
68 )
69
69
70 yield (b'', [], oids)
70 yield (b'', [], oids)
71
71
72
72
73 class nullvfs(lfsvfs):
73 class nullvfs(lfsvfs):
74 def __init__(self):
74 def __init__(self):
75 pass
75 pass
76
76
77 def exists(self, oid):
77 def exists(self, oid):
78 return False
78 return False
79
79
80 def read(self, oid):
80 def read(self, oid):
81 # store.read() calls into here if the blob doesn't exist in its
81 # store.read() calls into here if the blob doesn't exist in its
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 # file that doesn't exist. The only difference is the full file path
83 # file that doesn't exist. The only difference is the full file path
84 # isn't available in the error.
84 # isn't available in the error.
85 raise IOError(
85 raise IOError(
86 errno.ENOENT,
86 errno.ENOENT,
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 )
88 )
89
89
90 def walk(self, path=None, onerror=None):
90 def walk(self, path=None, onerror=None):
91 return (b'', [], [])
91 return (b'', [], [])
92
92
93 def write(self, oid, data):
93 def write(self, oid, data):
94 pass
94 pass
95
95
96
96
97 class lfsuploadfile(object):
97 class lfsuploadfile(object):
98 """a file-like object that supports __len__ and read.
98 """a file-like object that supports __len__ and read.
99 """
99 """
100
100
101 def __init__(self, fp):
101 def __init__(self, fp):
102 self._fp = fp
102 self._fp = fp
103 fp.seek(0, os.SEEK_END)
103 fp.seek(0, os.SEEK_END)
104 self._len = fp.tell()
104 self._len = fp.tell()
105 fp.seek(0)
105 fp.seek(0)
106
106
107 def __len__(self):
107 def __len__(self):
108 return self._len
108 return self._len
109
109
110 def read(self, size):
110 def read(self, size):
111 if self._fp is None:
111 if self._fp is None:
112 return b''
112 return b''
113 return self._fp.read(size)
113 return self._fp.read(size)
114
114
115 def close(self):
115 def close(self):
116 if self._fp is not None:
116 if self._fp is not None:
117 self._fp.close()
117 self._fp.close()
118 self._fp = None
118 self._fp = None
119
119
120
120
121 class local(object):
121 class local(object):
122 """Local blobstore for large file contents.
122 """Local blobstore for large file contents.
123
123
124 This blobstore is used both as a cache and as a staging area for large blobs
124 This blobstore is used both as a cache and as a staging area for large blobs
125 to be uploaded to the remote blobstore.
125 to be uploaded to the remote blobstore.
126 """
126 """
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 fullpath = repo.svfs.join(b'lfs/objects')
129 fullpath = repo.svfs.join(b'lfs/objects')
130 self.vfs = lfsvfs(fullpath)
130 self.vfs = lfsvfs(fullpath)
131
131
132 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
132 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
133 self.cachevfs = nullvfs()
133 self.cachevfs = nullvfs()
134 else:
134 else:
135 usercache = lfutil._usercachedir(repo.ui, b'lfs')
135 usercache = lfutil._usercachedir(repo.ui, b'lfs')
136 self.cachevfs = lfsvfs(usercache)
136 self.cachevfs = lfsvfs(usercache)
137 self.ui = repo.ui
137 self.ui = repo.ui
138
138
139 def open(self, oid):
139 def open(self, oid):
140 """Open a read-only file descriptor to the named blob, in either the
140 """Open a read-only file descriptor to the named blob, in either the
141 usercache or the local store."""
141 usercache or the local store."""
142 # The usercache is the most likely place to hold the file. Commit will
142 # The usercache is the most likely place to hold the file. Commit will
143 # write to both it and the local store, as will anything that downloads
143 # write to both it and the local store, as will anything that downloads
144 # the blobs. However, things like clone without an update won't
144 # the blobs. However, things like clone without an update won't
145 # populate the local store. For an init + push of a local clone,
145 # populate the local store. For an init + push of a local clone,
146 # the usercache is the only place it _could_ be. If not present, the
146 # the usercache is the only place it _could_ be. If not present, the
147 # missing file msg here will indicate the local repo, not the usercache.
147 # missing file msg here will indicate the local repo, not the usercache.
148 if self.cachevfs.exists(oid):
148 if self.cachevfs.exists(oid):
149 return self.cachevfs(oid, b'rb')
149 return self.cachevfs(oid, b'rb')
150
150
151 return self.vfs(oid, b'rb')
151 return self.vfs(oid, b'rb')
152
152
153 def download(self, oid, src, content_length):
153 def download(self, oid, src, content_length):
154 """Read the blob from the remote source in chunks, verify the content,
154 """Read the blob from the remote source in chunks, verify the content,
155 and write to this local blobstore."""
155 and write to this local blobstore."""
156 sha256 = hashlib.sha256()
156 sha256 = hashlib.sha256()
157 size = 0
157 size = 0
158
158
159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
160 for chunk in util.filechunkiter(src, size=1048576):
160 for chunk in util.filechunkiter(src, size=1048576):
161 fp.write(chunk)
161 fp.write(chunk)
162 sha256.update(chunk)
162 sha256.update(chunk)
163 size += len(chunk)
163 size += len(chunk)
164
164
165 # If the server advertised a length longer than what we actually
165 # If the server advertised a length longer than what we actually
166 # received, then we should expect that the server crashed while
166 # received, then we should expect that the server crashed while
167 # producing the response (but the server has no way of telling us
167 # producing the response (but the server has no way of telling us
168 # that), and we really don't need to try to write the response to
168 # that), and we really don't need to try to write the response to
169 # the localstore, because it's not going to match the expected.
169 # the localstore, because it's not going to match the expected.
170 if content_length is not None and int(content_length) != size:
170 if content_length is not None and int(content_length) != size:
171 msg = (
171 msg = (
172 b"Response length (%s) does not match Content-Length "
172 b"Response length (%s) does not match Content-Length "
173 b"header (%d): likely server-side crash"
173 b"header (%d): likely server-side crash"
174 )
174 )
175 raise LfsRemoteError(_(msg) % (size, int(content_length)))
175 raise LfsRemoteError(_(msg) % (size, int(content_length)))
176
176
177 realoid = node.hex(sha256.digest())
177 realoid = node.hex(sha256.digest())
178 if realoid != oid:
178 if realoid != oid:
179 raise LfsCorruptionError(
179 raise LfsCorruptionError(
180 _(b'corrupt remote lfs object: %s') % oid
180 _(b'corrupt remote lfs object: %s') % oid
181 )
181 )
182
182
183 self._linktousercache(oid)
183 self._linktousercache(oid)
184
184
185 def write(self, oid, data):
185 def write(self, oid, data):
186 """Write blob to local blobstore.
186 """Write blob to local blobstore.
187
187
188 This should only be called from the filelog during a commit or similar.
188 This should only be called from the filelog during a commit or similar.
189 As such, there is no need to verify the data. Imports from a remote
189 As such, there is no need to verify the data. Imports from a remote
190 store must use ``download()`` instead."""
190 store must use ``download()`` instead."""
191 with self.vfs(oid, b'wb', atomictemp=True) as fp:
191 with self.vfs(oid, b'wb', atomictemp=True) as fp:
192 fp.write(data)
192 fp.write(data)
193
193
194 self._linktousercache(oid)
194 self._linktousercache(oid)
195
195
196 def linkfromusercache(self, oid):
196 def linkfromusercache(self, oid):
197 """Link blobs found in the user cache into this store.
197 """Link blobs found in the user cache into this store.
198
198
199 The server module needs to do this when it lets the client know not to
199 The server module needs to do this when it lets the client know not to
200 upload the blob, to ensure it is always available in this store.
200 upload the blob, to ensure it is always available in this store.
201 Normally this is done implicitly when the client reads or writes the
201 Normally this is done implicitly when the client reads or writes the
202 blob, but that doesn't happen when the server tells the client that it
202 blob, but that doesn't happen when the server tells the client that it
203 already has the blob.
203 already has the blob.
204 """
204 """
205 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
205 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
206 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
206 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
207 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
207 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
208
208
209 def _linktousercache(self, oid):
209 def _linktousercache(self, oid):
210 # XXX: should we verify the content of the cache, and hardlink back to
210 # XXX: should we verify the content of the cache, and hardlink back to
211 # the local store on success, but truncate, write and link on failure?
211 # the local store on success, but truncate, write and link on failure?
212 if not self.cachevfs.exists(oid) and not isinstance(
212 if not self.cachevfs.exists(oid) and not isinstance(
213 self.cachevfs, nullvfs
213 self.cachevfs, nullvfs
214 ):
214 ):
215 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
215 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
216 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
216 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
217
217
218 def read(self, oid, verify=True):
218 def read(self, oid, verify=True):
219 """Read blob from local blobstore."""
219 """Read blob from local blobstore."""
220 if not self.vfs.exists(oid):
220 if not self.vfs.exists(oid):
221 blob = self._read(self.cachevfs, oid, verify)
221 blob = self._read(self.cachevfs, oid, verify)
222
222
223 # Even if revlog will verify the content, it needs to be verified
223 # Even if revlog will verify the content, it needs to be verified
224 # now before making the hardlink to avoid propagating corrupt blobs.
224 # now before making the hardlink to avoid propagating corrupt blobs.
225 # Don't abort if corruption is detected, because `hg verify` will
225 # Don't abort if corruption is detected, because `hg verify` will
226 # give more useful info about the corruption- simply don't add the
226 # give more useful info about the corruption- simply don't add the
227 # hardlink.
227 # hardlink.
228 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
228 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
229 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
229 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
230 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
230 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
231 else:
231 else:
232 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
232 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
233 blob = self._read(self.vfs, oid, verify)
233 blob = self._read(self.vfs, oid, verify)
234 return blob
234 return blob
235
235
236 def _read(self, vfs, oid, verify):
236 def _read(self, vfs, oid, verify):
237 """Read blob (after verifying) from the given store"""
237 """Read blob (after verifying) from the given store"""
238 blob = vfs.read(oid)
238 blob = vfs.read(oid)
239 if verify:
239 if verify:
240 _verify(oid, blob)
240 _verify(oid, blob)
241 return blob
241 return blob
242
242
243 def verify(self, oid):
243 def verify(self, oid):
244 """Indicate whether or not the hash of the underlying file matches its
244 """Indicate whether or not the hash of the underlying file matches its
245 name."""
245 name."""
246 sha256 = hashlib.sha256()
246 sha256 = hashlib.sha256()
247
247
248 with self.open(oid) as fp:
248 with self.open(oid) as fp:
249 for chunk in util.filechunkiter(fp, size=1048576):
249 for chunk in util.filechunkiter(fp, size=1048576):
250 sha256.update(chunk)
250 sha256.update(chunk)
251
251
252 return oid == node.hex(sha256.digest())
252 return oid == node.hex(sha256.digest())
253
253
254 def has(self, oid):
254 def has(self, oid):
255 """Returns True if the local blobstore contains the requested blob,
255 """Returns True if the local blobstore contains the requested blob,
256 False otherwise."""
256 False otherwise."""
257 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
257 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
258
258
259
259
260 def _urlerrorreason(urlerror):
260 def _urlerrorreason(urlerror):
261 '''Create a friendly message for the given URLError to be used in an
261 '''Create a friendly message for the given URLError to be used in an
262 LfsRemoteError message.
262 LfsRemoteError message.
263 '''
263 '''
264 inst = urlerror
264 inst = urlerror
265
265
266 if isinstance(urlerror.reason, Exception):
266 if isinstance(urlerror.reason, Exception):
267 inst = urlerror.reason
267 inst = urlerror.reason
268
268
269 if util.safehasattr(inst, b'reason'):
269 if util.safehasattr(inst, b'reason'):
270 try: # usually it is in the form (errno, strerror)
270 try: # usually it is in the form (errno, strerror)
271 reason = inst.reason.args[1]
271 reason = inst.reason.args[1]
272 except (AttributeError, IndexError):
272 except (AttributeError, IndexError):
273 # it might be anything, for example a string
273 # it might be anything, for example a string
274 reason = inst.reason
274 reason = inst.reason
275 if isinstance(reason, pycompat.unicode):
275 if isinstance(reason, pycompat.unicode):
276 # SSLError of Python 2.7.9 contains a unicode
276 # SSLError of Python 2.7.9 contains a unicode
277 reason = encoding.unitolocal(reason)
277 reason = encoding.unitolocal(reason)
278 return reason
278 return reason
279 elif getattr(inst, "strerror", None):
279 elif getattr(inst, "strerror", None):
280 return encoding.strtolocal(inst.strerror)
280 return encoding.strtolocal(inst.strerror)
281 else:
281 else:
282 return stringutil.forcebytestr(urlerror)
282 return stringutil.forcebytestr(urlerror)
283
283
284
284
285 class lfsauthhandler(util.urlreq.basehandler):
285 class lfsauthhandler(util.urlreq.basehandler):
286 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
286 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
287
287
288 def http_error_401(self, req, fp, code, msg, headers):
288 def http_error_401(self, req, fp, code, msg, headers):
289 """Enforces that any authentication performed is HTTP Basic
289 """Enforces that any authentication performed is HTTP Basic
290 Authentication. No authentication is also acceptable.
290 Authentication. No authentication is also acceptable.
291 """
291 """
292 authreq = headers.get('www-authenticate', None)
292 authreq = headers.get('www-authenticate', None)
293 if authreq:
293 if authreq:
294 scheme = authreq.split()[0]
294 scheme = authreq.split()[0]
295
295
296 if scheme.lower() != 'basic':
296 if scheme.lower() != 'basic':
297 msg = _(b'the server must support Basic Authentication')
297 msg = _(b'the server must support Basic Authentication')
298 raise util.urlerr.httperror(
298 raise util.urlerr.httperror(
299 req.get_full_url(),
299 req.get_full_url(),
300 code,
300 code,
301 encoding.strfromlocal(msg),
301 encoding.strfromlocal(msg),
302 headers,
302 headers,
303 fp,
303 fp,
304 )
304 )
305 return None
305 return None
306
306
307
307
308 class _gitlfsremote(object):
308 class _gitlfsremote(object):
309 def __init__(self, repo, url):
309 def __init__(self, repo, url):
310 ui = repo.ui
310 ui = repo.ui
311 self.ui = ui
311 self.ui = ui
312 baseurl, authinfo = url.authinfo()
312 baseurl, authinfo = url.authinfo()
313 self.baseurl = baseurl.rstrip(b'/')
313 self.baseurl = baseurl.rstrip(b'/')
314 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
314 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
315 if not useragent:
315 if not useragent:
316 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
316 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
317 self.urlopener = urlmod.opener(ui, authinfo, useragent)
317 self.urlopener = urlmod.opener(ui, authinfo, useragent)
318 self.urlopener.add_handler(lfsauthhandler())
318 self.urlopener.add_handler(lfsauthhandler())
319 self.retry = ui.configint(b'lfs', b'retry')
319 self.retry = ui.configint(b'lfs', b'retry')
320
320
321 def writebatch(self, pointers, fromstore):
321 def writebatch(self, pointers, fromstore):
322 """Batch upload from local to remote blobstore."""
322 """Batch upload from local to remote blobstore."""
323 self._batch(_deduplicate(pointers), fromstore, b'upload')
323 self._batch(_deduplicate(pointers), fromstore, b'upload')
324
324
325 def readbatch(self, pointers, tostore):
325 def readbatch(self, pointers, tostore):
326 """Batch download from remote to local blostore."""
326 """Batch download from remote to local blostore."""
327 self._batch(_deduplicate(pointers), tostore, b'download')
327 self._batch(_deduplicate(pointers), tostore, b'download')
328
328
329 def _batchrequest(self, pointers, action):
329 def _batchrequest(self, pointers, action):
330 """Get metadata about objects pointed by pointers for given action
330 """Get metadata about objects pointed by pointers for given action
331
331
332 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
332 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
333 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
333 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
334 """
334 """
335 objects = [
335 objects = [
336 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
336 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
337 for p in pointers
337 for p in pointers
338 ]
338 ]
339 requestdata = pycompat.bytesurl(
339 requestdata = pycompat.bytesurl(
340 json.dumps(
340 json.dumps(
341 {'objects': objects, 'operation': pycompat.strurl(action),}
341 {'objects': objects, 'operation': pycompat.strurl(action),}
342 )
342 )
343 )
343 )
344 url = b'%s/objects/batch' % self.baseurl
344 url = b'%s/objects/batch' % self.baseurl
345 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
345 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
346 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
346 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
347 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
347 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
348 try:
348 try:
349 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
349 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
350 rawjson = rsp.read()
350 rawjson = rsp.read()
351 except util.urlerr.httperror as ex:
351 except util.urlerr.httperror as ex:
352 hints = {
352 hints = {
353 400: _(
353 400: _(
354 b'check that lfs serving is enabled on %s and "%s" is '
354 b'check that lfs serving is enabled on %s and "%s" is '
355 b'supported'
355 b'supported'
356 )
356 )
357 % (self.baseurl, action),
357 % (self.baseurl, action),
358 404: _(b'the "lfs.url" config may be used to override %s')
358 404: _(b'the "lfs.url" config may be used to override %s')
359 % self.baseurl,
359 % self.baseurl,
360 }
360 }
361 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
361 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
362 raise LfsRemoteError(
362 raise LfsRemoteError(
363 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
363 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
364 hint=hint,
364 hint=hint,
365 )
365 )
366 except util.urlerr.urlerror as ex:
366 except util.urlerr.urlerror as ex:
367 hint = (
367 hint = (
368 _(b'the "lfs.url" config may be used to override %s')
368 _(b'the "lfs.url" config may be used to override %s')
369 % self.baseurl
369 % self.baseurl
370 )
370 )
371 raise LfsRemoteError(
371 raise LfsRemoteError(
372 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
372 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
373 )
373 )
374 try:
374 try:
375 response = pycompat.json_loads(rawjson)
375 response = pycompat.json_loads(rawjson)
376 except ValueError:
376 except ValueError:
377 raise LfsRemoteError(
377 raise LfsRemoteError(
378 _(b'LFS server returns invalid JSON: %s')
378 _(b'LFS server returns invalid JSON: %s')
379 % rawjson.encode("utf-8")
379 % rawjson.encode("utf-8")
380 )
380 )
381
381
382 if self.ui.debugflag:
382 if self.ui.debugflag:
383 self.ui.debug(b'Status: %d\n' % rsp.status)
383 self.ui.debug(b'Status: %d\n' % rsp.status)
384 # lfs-test-server and hg serve return headers in different order
384 # lfs-test-server and hg serve return headers in different order
385 headers = pycompat.bytestr(rsp.info()).strip()
385 headers = pycompat.bytestr(rsp.info()).strip()
386 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
386 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
387
387
388 if 'objects' in response:
388 if 'objects' in response:
389 response['objects'] = sorted(
389 response['objects'] = sorted(
390 response['objects'], key=lambda p: p['oid']
390 response['objects'], key=lambda p: p['oid']
391 )
391 )
392 self.ui.debug(
392 self.ui.debug(
393 b'%s\n'
393 b'%s\n'
394 % pycompat.bytesurl(
394 % pycompat.bytesurl(
395 json.dumps(
395 json.dumps(
396 response,
396 response,
397 indent=2,
397 indent=2,
398 separators=('', ': '),
398 separators=('', ': '),
399 sort_keys=True,
399 sort_keys=True,
400 )
400 )
401 )
401 )
402 )
402 )
403
403
404 def encodestr(x):
404 def encodestr(x):
405 if isinstance(x, pycompat.unicode):
405 if isinstance(x, pycompat.unicode):
406 return x.encode('utf-8')
406 return x.encode('utf-8')
407 return x
407 return x
408
408
409 return pycompat.rapply(encodestr, response)
409 return pycompat.rapply(encodestr, response)
410
410
411 def _checkforservererror(self, pointers, responses, action):
411 def _checkforservererror(self, pointers, responses, action):
412 """Scans errors from objects
412 """Scans errors from objects
413
413
414 Raises LfsRemoteError if any objects have an error"""
414 Raises LfsRemoteError if any objects have an error"""
415 for response in responses:
415 for response in responses:
416 # The server should return 404 when objects cannot be found. Some
416 # The server should return 404 when objects cannot be found. Some
417 # server implementation (ex. lfs-test-server) does not set "error"
417 # server implementation (ex. lfs-test-server) does not set "error"
418 # but just removes "download" from "actions". Treat that case
418 # but just removes "download" from "actions". Treat that case
419 # as the same as 404 error.
419 # as the same as 404 error.
420 if b'error' not in response:
420 if b'error' not in response:
421 if action == b'download' and action not in response.get(
421 if action == b'download' and action not in response.get(
422 b'actions', []
422 b'actions', []
423 ):
423 ):
424 code = 404
424 code = 404
425 else:
425 else:
426 continue
426 continue
427 else:
427 else:
428 # An error dict without a code doesn't make much sense, so
428 # An error dict without a code doesn't make much sense, so
429 # treat as a server error.
429 # treat as a server error.
430 code = response.get(b'error').get(b'code', 500)
430 code = response.get(b'error').get(b'code', 500)
431
431
432 ptrmap = {p.oid(): p for p in pointers}
432 ptrmap = {p.oid(): p for p in pointers}
433 p = ptrmap.get(response[b'oid'], None)
433 p = ptrmap.get(response[b'oid'], None)
434 if p:
434 if p:
435 filename = getattr(p, 'filename', b'unknown')
435 filename = getattr(p, 'filename', b'unknown')
436 errors = {
436 errors = {
437 404: b'The object does not exist',
437 404: b'The object does not exist',
438 410: b'The object was removed by the owner',
438 410: b'The object was removed by the owner',
439 422: b'Validation error',
439 422: b'Validation error',
440 500: b'Internal server error',
440 500: b'Internal server error',
441 }
441 }
442 msg = errors.get(code, b'status code %d' % code)
442 msg = errors.get(code, b'status code %d' % code)
443 raise LfsRemoteError(
443 raise LfsRemoteError(
444 _(b'LFS server error for "%s": %s') % (filename, msg)
444 _(b'LFS server error for "%s": %s') % (filename, msg)
445 )
445 )
446 else:
446 else:
447 raise LfsRemoteError(
447 raise LfsRemoteError(
448 _(b'LFS server error. Unsolicited response for oid %s')
448 _(b'LFS server error. Unsolicited response for oid %s')
449 % response[b'oid']
449 % response[b'oid']
450 )
450 )
451
451
452 def _extractobjects(self, response, pointers, action):
452 def _extractobjects(self, response, pointers, action):
453 """extract objects from response of the batch API
453 """extract objects from response of the batch API
454
454
455 response: parsed JSON object returned by batch API
455 response: parsed JSON object returned by batch API
456 return response['objects'] filtered by action
456 return response['objects'] filtered by action
457 raise if any object has an error
457 raise if any object has an error
458 """
458 """
459 # Scan errors from objects - fail early
459 # Scan errors from objects - fail early
460 objects = response.get(b'objects', [])
460 objects = response.get(b'objects', [])
461 self._checkforservererror(pointers, objects, action)
461 self._checkforservererror(pointers, objects, action)
462
462
463 # Filter objects with given action. Practically, this skips uploading
463 # Filter objects with given action. Practically, this skips uploading
464 # objects which exist in the server.
464 # objects which exist in the server.
465 filteredobjects = [
465 filteredobjects = [
466 o for o in objects if action in o.get(b'actions', [])
466 o for o in objects if action in o.get(b'actions', [])
467 ]
467 ]
468
468
469 return filteredobjects
469 return filteredobjects
470
470
471 def _basictransfer(self, obj, action, localstore):
471 def _basictransfer(self, obj, action, localstore):
472 """Download or upload a single object using basic transfer protocol
472 """Download or upload a single object using basic transfer protocol
473
473
474 obj: dict, an object description returned by batch API
474 obj: dict, an object description returned by batch API
475 action: string, one of ['upload', 'download']
475 action: string, one of ['upload', 'download']
476 localstore: blobstore.local
476 localstore: blobstore.local
477
477
478 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
478 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
479 basic-transfers.md
479 basic-transfers.md
480 """
480 """
481 oid = obj[b'oid']
481 oid = obj[b'oid']
482 href = obj[b'actions'][action].get(b'href')
482 href = obj[b'actions'][action].get(b'href')
483 headers = obj[b'actions'][action].get(b'header', {}).items()
483 headers = obj[b'actions'][action].get(b'header', {}).items()
484
484
485 request = util.urlreq.request(pycompat.strurl(href))
485 request = util.urlreq.request(pycompat.strurl(href))
486 if action == b'upload':
486 if action == b'upload':
487 # If uploading blobs, read data from local blobstore.
487 # If uploading blobs, read data from local blobstore.
488 if not localstore.verify(oid):
488 if not localstore.verify(oid):
489 raise error.Abort(
489 raise error.Abort(
490 _(b'detected corrupt lfs object: %s') % oid,
490 _(b'detected corrupt lfs object: %s') % oid,
491 hint=_(b'run hg verify'),
491 hint=_(b'run hg verify'),
492 )
492 )
493 request.data = lfsuploadfile(localstore.open(oid))
493 request.data = lfsuploadfile(localstore.open(oid))
494 request.get_method = lambda: r'PUT'
494 request.get_method = lambda: 'PUT'
495 request.add_header('Content-Type', 'application/octet-stream')
495 request.add_header('Content-Type', 'application/octet-stream')
496 request.add_header('Content-Length', len(request.data))
496 request.add_header('Content-Length', len(request.data))
497
497
498 for k, v in headers:
498 for k, v in headers:
499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
500
500
501 try:
501 try:
502 with contextlib.closing(self.urlopener.open(request)) as res:
502 with contextlib.closing(self.urlopener.open(request)) as res:
503 contentlength = res.info().get(b"content-length")
503 contentlength = res.info().get(b"content-length")
504 ui = self.ui # Shorten debug lines
504 ui = self.ui # Shorten debug lines
505 if self.ui.debugflag:
505 if self.ui.debugflag:
506 ui.debug(b'Status: %d\n' % res.status)
506 ui.debug(b'Status: %d\n' % res.status)
507 # lfs-test-server and hg serve return headers in different
507 # lfs-test-server and hg serve return headers in different
508 # order
508 # order
509 headers = pycompat.bytestr(res.info()).strip()
509 headers = pycompat.bytestr(res.info()).strip()
510 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
510 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
511
511
512 if action == b'download':
512 if action == b'download':
513 # If downloading blobs, store downloaded data to local
513 # If downloading blobs, store downloaded data to local
514 # blobstore
514 # blobstore
515 localstore.download(oid, res, contentlength)
515 localstore.download(oid, res, contentlength)
516 else:
516 else:
517 blocks = []
517 blocks = []
518 while True:
518 while True:
519 data = res.read(1048576)
519 data = res.read(1048576)
520 if not data:
520 if not data:
521 break
521 break
522 blocks.append(data)
522 blocks.append(data)
523
523
524 response = b"".join(blocks)
524 response = b"".join(blocks)
525 if response:
525 if response:
526 ui.debug(b'lfs %s response: %s' % (action, response))
526 ui.debug(b'lfs %s response: %s' % (action, response))
527 except util.urlerr.httperror as ex:
527 except util.urlerr.httperror as ex:
528 if self.ui.debugflag:
528 if self.ui.debugflag:
529 self.ui.debug(
529 self.ui.debug(
530 b'%s: %s\n' % (oid, ex.read())
530 b'%s: %s\n' % (oid, ex.read())
531 ) # XXX: also bytes?
531 ) # XXX: also bytes?
532 raise LfsRemoteError(
532 raise LfsRemoteError(
533 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
533 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
534 % (stringutil.forcebytestr(ex), oid, action)
534 % (stringutil.forcebytestr(ex), oid, action)
535 )
535 )
536 except util.urlerr.urlerror as ex:
536 except util.urlerr.urlerror as ex:
537 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
537 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
538 util.urllibcompat.getfullurl(request)
538 util.urllibcompat.getfullurl(request)
539 )
539 )
540 raise LfsRemoteError(
540 raise LfsRemoteError(
541 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
541 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
542 )
542 )
543 finally:
543 finally:
544 if request.data:
544 if request.data:
545 request.data.close()
545 request.data.close()
546
546
547 def _batch(self, pointers, localstore, action):
547 def _batch(self, pointers, localstore, action):
548 if action not in [b'upload', b'download']:
548 if action not in [b'upload', b'download']:
549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
550
550
551 response = self._batchrequest(pointers, action)
551 response = self._batchrequest(pointers, action)
552 objects = self._extractobjects(response, pointers, action)
552 objects = self._extractobjects(response, pointers, action)
553 total = sum(x.get(b'size', 0) for x in objects)
553 total = sum(x.get(b'size', 0) for x in objects)
554 sizes = {}
554 sizes = {}
555 for obj in objects:
555 for obj in objects:
556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
557 topic = {
557 topic = {
558 b'upload': _(b'lfs uploading'),
558 b'upload': _(b'lfs uploading'),
559 b'download': _(b'lfs downloading'),
559 b'download': _(b'lfs downloading'),
560 }[action]
560 }[action]
561 if len(objects) > 1:
561 if len(objects) > 1:
562 self.ui.note(
562 self.ui.note(
563 _(b'lfs: need to transfer %d objects (%s)\n')
563 _(b'lfs: need to transfer %d objects (%s)\n')
564 % (len(objects), util.bytecount(total))
564 % (len(objects), util.bytecount(total))
565 )
565 )
566
566
567 def transfer(chunk):
567 def transfer(chunk):
568 for obj in chunk:
568 for obj in chunk:
569 objsize = obj.get(b'size', 0)
569 objsize = obj.get(b'size', 0)
570 if self.ui.verbose:
570 if self.ui.verbose:
571 if action == b'download':
571 if action == b'download':
572 msg = _(b'lfs: downloading %s (%s)\n')
572 msg = _(b'lfs: downloading %s (%s)\n')
573 elif action == b'upload':
573 elif action == b'upload':
574 msg = _(b'lfs: uploading %s (%s)\n')
574 msg = _(b'lfs: uploading %s (%s)\n')
575 self.ui.note(
575 self.ui.note(
576 msg % (obj.get(b'oid'), util.bytecount(objsize))
576 msg % (obj.get(b'oid'), util.bytecount(objsize))
577 )
577 )
578 retry = self.retry
578 retry = self.retry
579 while True:
579 while True:
580 try:
580 try:
581 self._basictransfer(obj, action, localstore)
581 self._basictransfer(obj, action, localstore)
582 yield 1, obj.get(b'oid')
582 yield 1, obj.get(b'oid')
583 break
583 break
584 except socket.error as ex:
584 except socket.error as ex:
585 if retry > 0:
585 if retry > 0:
586 self.ui.note(
586 self.ui.note(
587 _(b'lfs: failed: %r (remaining retry %d)\n')
587 _(b'lfs: failed: %r (remaining retry %d)\n')
588 % (stringutil.forcebytestr(ex), retry)
588 % (stringutil.forcebytestr(ex), retry)
589 )
589 )
590 retry -= 1
590 retry -= 1
591 continue
591 continue
592 raise
592 raise
593
593
594 # Until https multiplexing gets sorted out
594 # Until https multiplexing gets sorted out
595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
596 oids = worker.worker(
596 oids = worker.worker(
597 self.ui,
597 self.ui,
598 0.1,
598 0.1,
599 transfer,
599 transfer,
600 (),
600 (),
601 sorted(objects, key=lambda o: o.get(b'oid')),
601 sorted(objects, key=lambda o: o.get(b'oid')),
602 )
602 )
603 else:
603 else:
604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
605
605
606 with self.ui.makeprogress(
606 with self.ui.makeprogress(
607 topic, unit=_(b"bytes"), total=total
607 topic, unit=_(b"bytes"), total=total
608 ) as progress:
608 ) as progress:
609 progress.update(0)
609 progress.update(0)
610 processed = 0
610 processed = 0
611 blobs = 0
611 blobs = 0
612 for _one, oid in oids:
612 for _one, oid in oids:
613 processed += sizes[oid]
613 processed += sizes[oid]
614 blobs += 1
614 blobs += 1
615 progress.update(processed)
615 progress.update(processed)
616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
617
617
618 if blobs > 0:
618 if blobs > 0:
619 if action == b'upload':
619 if action == b'upload':
620 self.ui.status(
620 self.ui.status(
621 _(b'lfs: uploaded %d files (%s)\n')
621 _(b'lfs: uploaded %d files (%s)\n')
622 % (blobs, util.bytecount(processed))
622 % (blobs, util.bytecount(processed))
623 )
623 )
624 elif action == b'download':
624 elif action == b'download':
625 self.ui.status(
625 self.ui.status(
626 _(b'lfs: downloaded %d files (%s)\n')
626 _(b'lfs: downloaded %d files (%s)\n')
627 % (blobs, util.bytecount(processed))
627 % (blobs, util.bytecount(processed))
628 )
628 )
629
629
630 def __del__(self):
630 def __del__(self):
631 # copied from mercurial/httppeer.py
631 # copied from mercurial/httppeer.py
632 urlopener = getattr(self, 'urlopener', None)
632 urlopener = getattr(self, 'urlopener', None)
633 if urlopener:
633 if urlopener:
634 for h in urlopener.handlers:
634 for h in urlopener.handlers:
635 h.close()
635 h.close()
636 getattr(h, "close_all", lambda: None)()
636 getattr(h, "close_all", lambda: None)()
637
637
638
638
639 class _dummyremote(object):
639 class _dummyremote(object):
640 """Dummy store storing blobs to temp directory."""
640 """Dummy store storing blobs to temp directory."""
641
641
642 def __init__(self, repo, url):
642 def __init__(self, repo, url):
643 fullpath = repo.vfs.join(b'lfs', url.path)
643 fullpath = repo.vfs.join(b'lfs', url.path)
644 self.vfs = lfsvfs(fullpath)
644 self.vfs = lfsvfs(fullpath)
645
645
646 def writebatch(self, pointers, fromstore):
646 def writebatch(self, pointers, fromstore):
647 for p in _deduplicate(pointers):
647 for p in _deduplicate(pointers):
648 content = fromstore.read(p.oid(), verify=True)
648 content = fromstore.read(p.oid(), verify=True)
649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
650 fp.write(content)
650 fp.write(content)
651
651
652 def readbatch(self, pointers, tostore):
652 def readbatch(self, pointers, tostore):
653 for p in _deduplicate(pointers):
653 for p in _deduplicate(pointers):
654 with self.vfs(p.oid(), b'rb') as fp:
654 with self.vfs(p.oid(), b'rb') as fp:
655 tostore.download(p.oid(), fp, None)
655 tostore.download(p.oid(), fp, None)
656
656
657
657
658 class _nullremote(object):
658 class _nullremote(object):
659 """Null store storing blobs to /dev/null."""
659 """Null store storing blobs to /dev/null."""
660
660
661 def __init__(self, repo, url):
661 def __init__(self, repo, url):
662 pass
662 pass
663
663
664 def writebatch(self, pointers, fromstore):
664 def writebatch(self, pointers, fromstore):
665 pass
665 pass
666
666
667 def readbatch(self, pointers, tostore):
667 def readbatch(self, pointers, tostore):
668 pass
668 pass
669
669
670
670
671 class _promptremote(object):
671 class _promptremote(object):
672 """Prompt user to set lfs.url when accessed."""
672 """Prompt user to set lfs.url when accessed."""
673
673
674 def __init__(self, repo, url):
674 def __init__(self, repo, url):
675 pass
675 pass
676
676
677 def writebatch(self, pointers, fromstore, ui=None):
677 def writebatch(self, pointers, fromstore, ui=None):
678 self._prompt()
678 self._prompt()
679
679
680 def readbatch(self, pointers, tostore, ui=None):
680 def readbatch(self, pointers, tostore, ui=None):
681 self._prompt()
681 self._prompt()
682
682
683 def _prompt(self):
683 def _prompt(self):
684 raise error.Abort(_(b'lfs.url needs to be configured'))
684 raise error.Abort(_(b'lfs.url needs to be configured'))
685
685
686
686
687 _storemap = {
687 _storemap = {
688 b'https': _gitlfsremote,
688 b'https': _gitlfsremote,
689 b'http': _gitlfsremote,
689 b'http': _gitlfsremote,
690 b'file': _dummyremote,
690 b'file': _dummyremote,
691 b'null': _nullremote,
691 b'null': _nullremote,
692 None: _promptremote,
692 None: _promptremote,
693 }
693 }
694
694
695
695
696 def _deduplicate(pointers):
696 def _deduplicate(pointers):
697 """Remove any duplicate oids that exist in the list"""
697 """Remove any duplicate oids that exist in the list"""
698 reduced = util.sortdict()
698 reduced = util.sortdict()
699 for p in pointers:
699 for p in pointers:
700 reduced[p.oid()] = p
700 reduced[p.oid()] = p
701 return reduced.values()
701 return reduced.values()
702
702
703
703
704 def _verify(oid, content):
704 def _verify(oid, content):
705 realoid = node.hex(hashlib.sha256(content).digest())
705 realoid = node.hex(hashlib.sha256(content).digest())
706 if realoid != oid:
706 if realoid != oid:
707 raise LfsCorruptionError(
707 raise LfsCorruptionError(
708 _(b'detected corrupt lfs object: %s') % oid,
708 _(b'detected corrupt lfs object: %s') % oid,
709 hint=_(b'run hg verify'),
709 hint=_(b'run hg verify'),
710 )
710 )
711
711
712
712
713 def remote(repo, remote=None):
713 def remote(repo, remote=None):
714 """remotestore factory. return a store in _storemap depending on config
714 """remotestore factory. return a store in _storemap depending on config
715
715
716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
717 infer the endpoint, based on the remote repository using the same path
717 infer the endpoint, based on the remote repository using the same path
718 adjustments as git. As an extension, 'http' is supported as well so that
718 adjustments as git. As an extension, 'http' is supported as well so that
719 ``hg serve`` works out of the box.
719 ``hg serve`` works out of the box.
720
720
721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
722 """
722 """
723 lfsurl = repo.ui.config(b'lfs', b'url')
723 lfsurl = repo.ui.config(b'lfs', b'url')
724 url = util.url(lfsurl or b'')
724 url = util.url(lfsurl or b'')
725 if lfsurl is None:
725 if lfsurl is None:
726 if remote:
726 if remote:
727 path = remote
727 path = remote
728 elif util.safehasattr(repo, b'_subtoppath'):
728 elif util.safehasattr(repo, b'_subtoppath'):
729 # The pull command sets this during the optional update phase, which
729 # The pull command sets this during the optional update phase, which
730 # tells exactly where the pull originated, whether 'paths.default'
730 # tells exactly where the pull originated, whether 'paths.default'
731 # or explicit.
731 # or explicit.
732 path = repo._subtoppath
732 path = repo._subtoppath
733 else:
733 else:
734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
735 # and fall back to inferring from 'paths.remote' if unspecified.
735 # and fall back to inferring from 'paths.remote' if unspecified.
736 path = repo.ui.config(b'paths', b'default') or b''
736 path = repo.ui.config(b'paths', b'default') or b''
737
737
738 defaulturl = util.url(path)
738 defaulturl = util.url(path)
739
739
740 # TODO: support local paths as well.
740 # TODO: support local paths as well.
741 # TODO: consider the ssh -> https transformation that git applies
741 # TODO: consider the ssh -> https transformation that git applies
742 if defaulturl.scheme in (b'http', b'https'):
742 if defaulturl.scheme in (b'http', b'https'):
743 if defaulturl.path and defaulturl.path[:-1] != b'/':
743 if defaulturl.path and defaulturl.path[:-1] != b'/':
744 defaulturl.path += b'/'
744 defaulturl.path += b'/'
745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
746
746
747 url = util.url(bytes(defaulturl))
747 url = util.url(bytes(defaulturl))
748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
749
749
750 scheme = url.scheme
750 scheme = url.scheme
751 if scheme not in _storemap:
751 if scheme not in _storemap:
752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
753 return _storemap[scheme](repo, url)
753 return _storemap[scheme](repo, url)
754
754
755
755
756 class LfsRemoteError(error.StorageError):
756 class LfsRemoteError(error.StorageError):
757 pass
757 pass
758
758
759
759
760 class LfsCorruptionError(error.Abort):
760 class LfsCorruptionError(error.Abort):
761 """Raised when a corrupt blob is detected, aborting an operation
761 """Raised when a corrupt blob is detected, aborting an operation
762
762
763 It exists to allow specialized handling on the server side."""
763 It exists to allow specialized handling on the server side."""
General Comments 0
You need to be logged in to leave comments. Login now