##// END OF EJS Templates
lfs: explicitly close the file handle for the blob being uploaded...
Matt Harbison -
r44597:2ad4e8ae default
parent child Browse files
Show More
@@ -1,759 +1,763 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import re
15 import re
16 import socket
16 import socket
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.pycompat import getattr
19 from mercurial.pycompat import getattr
20
20
21 from mercurial import (
21 from mercurial import (
22 encoding,
22 encoding,
23 error,
23 error,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 url as urlmod,
27 url as urlmod,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 worker,
30 worker,
31 )
31 )
32
32
33 from mercurial.utils import stringutil
33 from mercurial.utils import stringutil
34
34
35 from ..largefiles import lfutil
35 from ..largefiles import lfutil
36
36
37 # 64 bytes for SHA256
37 # 64 bytes for SHA256
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39
39
40
40
41 class lfsvfs(vfsmod.vfs):
41 class lfsvfs(vfsmod.vfs):
42 def join(self, path):
42 def join(self, path):
43 """split the path at first two characters, like: XX/XXXXX..."""
43 """split the path at first two characters, like: XX/XXXXX..."""
44 if not _lfsre.match(path):
44 if not _lfsre.match(path):
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 return super(lfsvfs, self).join(path[0:2], path[2:])
46 return super(lfsvfs, self).join(path[0:2], path[2:])
47
47
48 def walk(self, path=None, onerror=None):
48 def walk(self, path=None, onerror=None):
49 """Yield (dirpath, [], oids) tuple for blobs under path
49 """Yield (dirpath, [], oids) tuple for blobs under path
50
50
51 Oids only exist in the root of this vfs, so dirpath is always ''.
51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 """
52 """
53 root = os.path.normpath(self.base)
53 root = os.path.normpath(self.base)
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 # because len(dirpath) < prefixlen.
55 # because len(dirpath) < prefixlen.
56 prefixlen = len(pathutil.normasprefix(root))
56 prefixlen = len(pathutil.normasprefix(root))
57 oids = []
57 oids = []
58
58
59 for dirpath, dirs, files in os.walk(
59 for dirpath, dirs, files in os.walk(
60 self.reljoin(self.base, path or b''), onerror=onerror
60 self.reljoin(self.base, path or b''), onerror=onerror
61 ):
61 ):
62 dirpath = dirpath[prefixlen:]
62 dirpath = dirpath[prefixlen:]
63
63
64 # Silently skip unexpected files and directories
64 # Silently skip unexpected files and directories
65 if len(dirpath) == 2:
65 if len(dirpath) == 2:
66 oids.extend(
66 oids.extend(
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 )
68 )
69
69
70 yield (b'', [], oids)
70 yield (b'', [], oids)
71
71
72
72
73 class nullvfs(lfsvfs):
73 class nullvfs(lfsvfs):
74 def __init__(self):
74 def __init__(self):
75 pass
75 pass
76
76
77 def exists(self, oid):
77 def exists(self, oid):
78 return False
78 return False
79
79
80 def read(self, oid):
80 def read(self, oid):
81 # store.read() calls into here if the blob doesn't exist in its
81 # store.read() calls into here if the blob doesn't exist in its
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 # file that doesn't exist. The only difference is the full file path
83 # file that doesn't exist. The only difference is the full file path
84 # isn't available in the error.
84 # isn't available in the error.
85 raise IOError(
85 raise IOError(
86 errno.ENOENT,
86 errno.ENOENT,
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 )
88 )
89
89
90 def walk(self, path=None, onerror=None):
90 def walk(self, path=None, onerror=None):
91 return (b'', [], [])
91 return (b'', [], [])
92
92
93 def write(self, oid, data):
93 def write(self, oid, data):
94 pass
94 pass
95
95
96
96
97 class lfsuploadfile(object):
97 class lfsuploadfile(object):
98 """a file-like object that supports __len__ and read.
98 """a file-like object that supports __len__ and read.
99 """
99 """
100
100
101 def __init__(self, fp):
101 def __init__(self, fp):
102 self._fp = fp
102 self._fp = fp
103 fp.seek(0, os.SEEK_END)
103 fp.seek(0, os.SEEK_END)
104 self._len = fp.tell()
104 self._len = fp.tell()
105 fp.seek(0)
105 fp.seek(0)
106
106
107 def __len__(self):
107 def __len__(self):
108 return self._len
108 return self._len
109
109
110 def read(self, size):
110 def read(self, size):
111 if self._fp is None:
111 if self._fp is None:
112 return b''
112 return b''
113 data = self._fp.read(size)
113 return self._fp.read(size)
114 if not data:
114
115 def close(self):
116 if self._fp is not None:
115 self._fp.close()
117 self._fp.close()
116 self._fp = None
118 self._fp = None
117 return data
118
119
119
120
120 class local(object):
121 class local(object):
121 """Local blobstore for large file contents.
122 """Local blobstore for large file contents.
122
123
123 This blobstore is used both as a cache and as a staging area for large blobs
124 This blobstore is used both as a cache and as a staging area for large blobs
124 to be uploaded to the remote blobstore.
125 to be uploaded to the remote blobstore.
125 """
126 """
126
127
127 def __init__(self, repo):
128 def __init__(self, repo):
128 fullpath = repo.svfs.join(b'lfs/objects')
129 fullpath = repo.svfs.join(b'lfs/objects')
129 self.vfs = lfsvfs(fullpath)
130 self.vfs = lfsvfs(fullpath)
130
131
131 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
132 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
132 self.cachevfs = nullvfs()
133 self.cachevfs = nullvfs()
133 else:
134 else:
134 usercache = lfutil._usercachedir(repo.ui, b'lfs')
135 usercache = lfutil._usercachedir(repo.ui, b'lfs')
135 self.cachevfs = lfsvfs(usercache)
136 self.cachevfs = lfsvfs(usercache)
136 self.ui = repo.ui
137 self.ui = repo.ui
137
138
138 def open(self, oid):
139 def open(self, oid):
139 """Open a read-only file descriptor to the named blob, in either the
140 """Open a read-only file descriptor to the named blob, in either the
140 usercache or the local store."""
141 usercache or the local store."""
141 # The usercache is the most likely place to hold the file. Commit will
142 # The usercache is the most likely place to hold the file. Commit will
142 # write to both it and the local store, as will anything that downloads
143 # write to both it and the local store, as will anything that downloads
143 # the blobs. However, things like clone without an update won't
144 # the blobs. However, things like clone without an update won't
144 # populate the local store. For an init + push of a local clone,
145 # populate the local store. For an init + push of a local clone,
145 # the usercache is the only place it _could_ be. If not present, the
146 # the usercache is the only place it _could_ be. If not present, the
146 # missing file msg here will indicate the local repo, not the usercache.
147 # missing file msg here will indicate the local repo, not the usercache.
147 if self.cachevfs.exists(oid):
148 if self.cachevfs.exists(oid):
148 return self.cachevfs(oid, b'rb')
149 return self.cachevfs(oid, b'rb')
149
150
150 return self.vfs(oid, b'rb')
151 return self.vfs(oid, b'rb')
151
152
152 def download(self, oid, src, content_length):
153 def download(self, oid, src, content_length):
153 """Read the blob from the remote source in chunks, verify the content,
154 """Read the blob from the remote source in chunks, verify the content,
154 and write to this local blobstore."""
155 and write to this local blobstore."""
155 sha256 = hashlib.sha256()
156 sha256 = hashlib.sha256()
156 size = 0
157 size = 0
157
158
158 with self.vfs(oid, b'wb', atomictemp=True) as fp:
159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
159 for chunk in util.filechunkiter(src, size=1048576):
160 for chunk in util.filechunkiter(src, size=1048576):
160 fp.write(chunk)
161 fp.write(chunk)
161 sha256.update(chunk)
162 sha256.update(chunk)
162 size += len(chunk)
163 size += len(chunk)
163
164
164 # If the server advertised a length longer than what we actually
165 # If the server advertised a length longer than what we actually
165 # received, then we should expect that the server crashed while
166 # received, then we should expect that the server crashed while
166 # producing the response (but the server has no way of telling us
167 # producing the response (but the server has no way of telling us
167 # that), and we really don't need to try to write the response to
168 # that), and we really don't need to try to write the response to
168 # the localstore, because it's not going to match the expected.
169 # the localstore, because it's not going to match the expected.
169 if content_length is not None and int(content_length) != size:
170 if content_length is not None and int(content_length) != size:
170 msg = (
171 msg = (
171 b"Response length (%s) does not match Content-Length "
172 b"Response length (%s) does not match Content-Length "
172 b"header (%d): likely server-side crash"
173 b"header (%d): likely server-side crash"
173 )
174 )
174 raise LfsRemoteError(_(msg) % (size, int(content_length)))
175 raise LfsRemoteError(_(msg) % (size, int(content_length)))
175
176
176 realoid = node.hex(sha256.digest())
177 realoid = node.hex(sha256.digest())
177 if realoid != oid:
178 if realoid != oid:
178 raise LfsCorruptionError(
179 raise LfsCorruptionError(
179 _(b'corrupt remote lfs object: %s') % oid
180 _(b'corrupt remote lfs object: %s') % oid
180 )
181 )
181
182
182 self._linktousercache(oid)
183 self._linktousercache(oid)
183
184
184 def write(self, oid, data):
185 def write(self, oid, data):
185 """Write blob to local blobstore.
186 """Write blob to local blobstore.
186
187
187 This should only be called from the filelog during a commit or similar.
188 This should only be called from the filelog during a commit or similar.
188 As such, there is no need to verify the data. Imports from a remote
189 As such, there is no need to verify the data. Imports from a remote
189 store must use ``download()`` instead."""
190 store must use ``download()`` instead."""
190 with self.vfs(oid, b'wb', atomictemp=True) as fp:
191 with self.vfs(oid, b'wb', atomictemp=True) as fp:
191 fp.write(data)
192 fp.write(data)
192
193
193 self._linktousercache(oid)
194 self._linktousercache(oid)
194
195
195 def linkfromusercache(self, oid):
196 def linkfromusercache(self, oid):
196 """Link blobs found in the user cache into this store.
197 """Link blobs found in the user cache into this store.
197
198
198 The server module needs to do this when it lets the client know not to
199 The server module needs to do this when it lets the client know not to
199 upload the blob, to ensure it is always available in this store.
200 upload the blob, to ensure it is always available in this store.
200 Normally this is done implicitly when the client reads or writes the
201 Normally this is done implicitly when the client reads or writes the
201 blob, but that doesn't happen when the server tells the client that it
202 blob, but that doesn't happen when the server tells the client that it
202 already has the blob.
203 already has the blob.
203 """
204 """
204 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
205 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
205 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
206 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
206 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
207 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
207
208
208 def _linktousercache(self, oid):
209 def _linktousercache(self, oid):
209 # XXX: should we verify the content of the cache, and hardlink back to
210 # XXX: should we verify the content of the cache, and hardlink back to
210 # the local store on success, but truncate, write and link on failure?
211 # the local store on success, but truncate, write and link on failure?
211 if not self.cachevfs.exists(oid) and not isinstance(
212 if not self.cachevfs.exists(oid) and not isinstance(
212 self.cachevfs, nullvfs
213 self.cachevfs, nullvfs
213 ):
214 ):
214 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
215 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
215 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
216 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
216
217
217 def read(self, oid, verify=True):
218 def read(self, oid, verify=True):
218 """Read blob from local blobstore."""
219 """Read blob from local blobstore."""
219 if not self.vfs.exists(oid):
220 if not self.vfs.exists(oid):
220 blob = self._read(self.cachevfs, oid, verify)
221 blob = self._read(self.cachevfs, oid, verify)
221
222
222 # Even if revlog will verify the content, it needs to be verified
223 # Even if revlog will verify the content, it needs to be verified
223 # now before making the hardlink to avoid propagating corrupt blobs.
224 # now before making the hardlink to avoid propagating corrupt blobs.
224 # Don't abort if corruption is detected, because `hg verify` will
225 # Don't abort if corruption is detected, because `hg verify` will
225 # give more useful info about the corruption- simply don't add the
226 # give more useful info about the corruption- simply don't add the
226 # hardlink.
227 # hardlink.
227 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
228 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
228 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
229 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
229 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
230 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
230 else:
231 else:
231 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
232 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
232 blob = self._read(self.vfs, oid, verify)
233 blob = self._read(self.vfs, oid, verify)
233 return blob
234 return blob
234
235
235 def _read(self, vfs, oid, verify):
236 def _read(self, vfs, oid, verify):
236 """Read blob (after verifying) from the given store"""
237 """Read blob (after verifying) from the given store"""
237 blob = vfs.read(oid)
238 blob = vfs.read(oid)
238 if verify:
239 if verify:
239 _verify(oid, blob)
240 _verify(oid, blob)
240 return blob
241 return blob
241
242
242 def verify(self, oid):
243 def verify(self, oid):
243 """Indicate whether or not the hash of the underlying file matches its
244 """Indicate whether or not the hash of the underlying file matches its
244 name."""
245 name."""
245 sha256 = hashlib.sha256()
246 sha256 = hashlib.sha256()
246
247
247 with self.open(oid) as fp:
248 with self.open(oid) as fp:
248 for chunk in util.filechunkiter(fp, size=1048576):
249 for chunk in util.filechunkiter(fp, size=1048576):
249 sha256.update(chunk)
250 sha256.update(chunk)
250
251
251 return oid == node.hex(sha256.digest())
252 return oid == node.hex(sha256.digest())
252
253
253 def has(self, oid):
254 def has(self, oid):
254 """Returns True if the local blobstore contains the requested blob,
255 """Returns True if the local blobstore contains the requested blob,
255 False otherwise."""
256 False otherwise."""
256 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
257 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
257
258
258
259
259 def _urlerrorreason(urlerror):
260 def _urlerrorreason(urlerror):
260 '''Create a friendly message for the given URLError to be used in an
261 '''Create a friendly message for the given URLError to be used in an
261 LfsRemoteError message.
262 LfsRemoteError message.
262 '''
263 '''
263 inst = urlerror
264 inst = urlerror
264
265
265 if isinstance(urlerror.reason, Exception):
266 if isinstance(urlerror.reason, Exception):
266 inst = urlerror.reason
267 inst = urlerror.reason
267
268
268 if util.safehasattr(inst, b'reason'):
269 if util.safehasattr(inst, b'reason'):
269 try: # usually it is in the form (errno, strerror)
270 try: # usually it is in the form (errno, strerror)
270 reason = inst.reason.args[1]
271 reason = inst.reason.args[1]
271 except (AttributeError, IndexError):
272 except (AttributeError, IndexError):
272 # it might be anything, for example a string
273 # it might be anything, for example a string
273 reason = inst.reason
274 reason = inst.reason
274 if isinstance(reason, pycompat.unicode):
275 if isinstance(reason, pycompat.unicode):
275 # SSLError of Python 2.7.9 contains a unicode
276 # SSLError of Python 2.7.9 contains a unicode
276 reason = encoding.unitolocal(reason)
277 reason = encoding.unitolocal(reason)
277 return reason
278 return reason
278 elif getattr(inst, "strerror", None):
279 elif getattr(inst, "strerror", None):
279 return encoding.strtolocal(inst.strerror)
280 return encoding.strtolocal(inst.strerror)
280 else:
281 else:
281 return stringutil.forcebytestr(urlerror)
282 return stringutil.forcebytestr(urlerror)
282
283
283
284
284 class lfsauthhandler(util.urlreq.basehandler):
285 class lfsauthhandler(util.urlreq.basehandler):
285 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
286 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
286
287
287 def http_error_401(self, req, fp, code, msg, headers):
288 def http_error_401(self, req, fp, code, msg, headers):
288 """Enforces that any authentication performed is HTTP Basic
289 """Enforces that any authentication performed is HTTP Basic
289 Authentication. No authentication is also acceptable.
290 Authentication. No authentication is also acceptable.
290 """
291 """
291 authreq = headers.get('www-authenticate', None)
292 authreq = headers.get('www-authenticate', None)
292 if authreq:
293 if authreq:
293 scheme = authreq.split()[0]
294 scheme = authreq.split()[0]
294
295
295 if scheme.lower() != 'basic':
296 if scheme.lower() != 'basic':
296 msg = _(b'the server must support Basic Authentication')
297 msg = _(b'the server must support Basic Authentication')
297 raise util.urlerr.httperror(
298 raise util.urlerr.httperror(
298 req.get_full_url(),
299 req.get_full_url(),
299 code,
300 code,
300 encoding.strfromlocal(msg),
301 encoding.strfromlocal(msg),
301 headers,
302 headers,
302 fp,
303 fp,
303 )
304 )
304 return None
305 return None
305
306
306
307
307 class _gitlfsremote(object):
308 class _gitlfsremote(object):
308 def __init__(self, repo, url):
309 def __init__(self, repo, url):
309 ui = repo.ui
310 ui = repo.ui
310 self.ui = ui
311 self.ui = ui
311 baseurl, authinfo = url.authinfo()
312 baseurl, authinfo = url.authinfo()
312 self.baseurl = baseurl.rstrip(b'/')
313 self.baseurl = baseurl.rstrip(b'/')
313 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
314 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
314 if not useragent:
315 if not useragent:
315 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
316 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
316 self.urlopener = urlmod.opener(ui, authinfo, useragent)
317 self.urlopener = urlmod.opener(ui, authinfo, useragent)
317 self.urlopener.add_handler(lfsauthhandler())
318 self.urlopener.add_handler(lfsauthhandler())
318 self.retry = ui.configint(b'lfs', b'retry')
319 self.retry = ui.configint(b'lfs', b'retry')
319
320
320 def writebatch(self, pointers, fromstore):
321 def writebatch(self, pointers, fromstore):
321 """Batch upload from local to remote blobstore."""
322 """Batch upload from local to remote blobstore."""
322 self._batch(_deduplicate(pointers), fromstore, b'upload')
323 self._batch(_deduplicate(pointers), fromstore, b'upload')
323
324
324 def readbatch(self, pointers, tostore):
325 def readbatch(self, pointers, tostore):
325 """Batch download from remote to local blostore."""
326 """Batch download from remote to local blostore."""
326 self._batch(_deduplicate(pointers), tostore, b'download')
327 self._batch(_deduplicate(pointers), tostore, b'download')
327
328
328 def _batchrequest(self, pointers, action):
329 def _batchrequest(self, pointers, action):
329 """Get metadata about objects pointed by pointers for given action
330 """Get metadata about objects pointed by pointers for given action
330
331
331 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
332 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
332 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
333 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
333 """
334 """
334 objects = [
335 objects = [
335 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
336 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
336 for p in pointers
337 for p in pointers
337 ]
338 ]
338 requestdata = pycompat.bytesurl(
339 requestdata = pycompat.bytesurl(
339 json.dumps(
340 json.dumps(
340 {'objects': objects, 'operation': pycompat.strurl(action),}
341 {'objects': objects, 'operation': pycompat.strurl(action),}
341 )
342 )
342 )
343 )
343 url = b'%s/objects/batch' % self.baseurl
344 url = b'%s/objects/batch' % self.baseurl
344 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
345 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
345 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
346 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
346 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
347 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
347 try:
348 try:
348 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
349 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
349 rawjson = rsp.read()
350 rawjson = rsp.read()
350 except util.urlerr.httperror as ex:
351 except util.urlerr.httperror as ex:
351 hints = {
352 hints = {
352 400: _(
353 400: _(
353 b'check that lfs serving is enabled on %s and "%s" is '
354 b'check that lfs serving is enabled on %s and "%s" is '
354 b'supported'
355 b'supported'
355 )
356 )
356 % (self.baseurl, action),
357 % (self.baseurl, action),
357 404: _(b'the "lfs.url" config may be used to override %s')
358 404: _(b'the "lfs.url" config may be used to override %s')
358 % self.baseurl,
359 % self.baseurl,
359 }
360 }
360 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
361 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
361 raise LfsRemoteError(
362 raise LfsRemoteError(
362 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
363 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
363 hint=hint,
364 hint=hint,
364 )
365 )
365 except util.urlerr.urlerror as ex:
366 except util.urlerr.urlerror as ex:
366 hint = (
367 hint = (
367 _(b'the "lfs.url" config may be used to override %s')
368 _(b'the "lfs.url" config may be used to override %s')
368 % self.baseurl
369 % self.baseurl
369 )
370 )
370 raise LfsRemoteError(
371 raise LfsRemoteError(
371 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
372 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
372 )
373 )
373 try:
374 try:
374 response = pycompat.json_loads(rawjson)
375 response = pycompat.json_loads(rawjson)
375 except ValueError:
376 except ValueError:
376 raise LfsRemoteError(
377 raise LfsRemoteError(
377 _(b'LFS server returns invalid JSON: %s')
378 _(b'LFS server returns invalid JSON: %s')
378 % rawjson.encode("utf-8")
379 % rawjson.encode("utf-8")
379 )
380 )
380
381
381 if self.ui.debugflag:
382 if self.ui.debugflag:
382 self.ui.debug(b'Status: %d\n' % rsp.status)
383 self.ui.debug(b'Status: %d\n' % rsp.status)
383 # lfs-test-server and hg serve return headers in different order
384 # lfs-test-server and hg serve return headers in different order
384 headers = pycompat.bytestr(rsp.info()).strip()
385 headers = pycompat.bytestr(rsp.info()).strip()
385 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
386 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
386
387
387 if 'objects' in response:
388 if 'objects' in response:
388 response['objects'] = sorted(
389 response['objects'] = sorted(
389 response['objects'], key=lambda p: p['oid']
390 response['objects'], key=lambda p: p['oid']
390 )
391 )
391 self.ui.debug(
392 self.ui.debug(
392 b'%s\n'
393 b'%s\n'
393 % pycompat.bytesurl(
394 % pycompat.bytesurl(
394 json.dumps(
395 json.dumps(
395 response,
396 response,
396 indent=2,
397 indent=2,
397 separators=('', ': '),
398 separators=('', ': '),
398 sort_keys=True,
399 sort_keys=True,
399 )
400 )
400 )
401 )
401 )
402 )
402
403
403 def encodestr(x):
404 def encodestr(x):
404 if isinstance(x, pycompat.unicode):
405 if isinstance(x, pycompat.unicode):
405 return x.encode('utf-8')
406 return x.encode('utf-8')
406 return x
407 return x
407
408
408 return pycompat.rapply(encodestr, response)
409 return pycompat.rapply(encodestr, response)
409
410
410 def _checkforservererror(self, pointers, responses, action):
411 def _checkforservererror(self, pointers, responses, action):
411 """Scans errors from objects
412 """Scans errors from objects
412
413
413 Raises LfsRemoteError if any objects have an error"""
414 Raises LfsRemoteError if any objects have an error"""
414 for response in responses:
415 for response in responses:
415 # The server should return 404 when objects cannot be found. Some
416 # The server should return 404 when objects cannot be found. Some
416 # server implementation (ex. lfs-test-server) does not set "error"
417 # server implementation (ex. lfs-test-server) does not set "error"
417 # but just removes "download" from "actions". Treat that case
418 # but just removes "download" from "actions". Treat that case
418 # as the same as 404 error.
419 # as the same as 404 error.
419 if b'error' not in response:
420 if b'error' not in response:
420 if action == b'download' and action not in response.get(
421 if action == b'download' and action not in response.get(
421 b'actions', []
422 b'actions', []
422 ):
423 ):
423 code = 404
424 code = 404
424 else:
425 else:
425 continue
426 continue
426 else:
427 else:
427 # An error dict without a code doesn't make much sense, so
428 # An error dict without a code doesn't make much sense, so
428 # treat as a server error.
429 # treat as a server error.
429 code = response.get(b'error').get(b'code', 500)
430 code = response.get(b'error').get(b'code', 500)
430
431
431 ptrmap = {p.oid(): p for p in pointers}
432 ptrmap = {p.oid(): p for p in pointers}
432 p = ptrmap.get(response[b'oid'], None)
433 p = ptrmap.get(response[b'oid'], None)
433 if p:
434 if p:
434 filename = getattr(p, 'filename', b'unknown')
435 filename = getattr(p, 'filename', b'unknown')
435 errors = {
436 errors = {
436 404: b'The object does not exist',
437 404: b'The object does not exist',
437 410: b'The object was removed by the owner',
438 410: b'The object was removed by the owner',
438 422: b'Validation error',
439 422: b'Validation error',
439 500: b'Internal server error',
440 500: b'Internal server error',
440 }
441 }
441 msg = errors.get(code, b'status code %d' % code)
442 msg = errors.get(code, b'status code %d' % code)
442 raise LfsRemoteError(
443 raise LfsRemoteError(
443 _(b'LFS server error for "%s": %s') % (filename, msg)
444 _(b'LFS server error for "%s": %s') % (filename, msg)
444 )
445 )
445 else:
446 else:
446 raise LfsRemoteError(
447 raise LfsRemoteError(
447 _(b'LFS server error. Unsolicited response for oid %s')
448 _(b'LFS server error. Unsolicited response for oid %s')
448 % response[b'oid']
449 % response[b'oid']
449 )
450 )
450
451
451 def _extractobjects(self, response, pointers, action):
452 def _extractobjects(self, response, pointers, action):
452 """extract objects from response of the batch API
453 """extract objects from response of the batch API
453
454
454 response: parsed JSON object returned by batch API
455 response: parsed JSON object returned by batch API
455 return response['objects'] filtered by action
456 return response['objects'] filtered by action
456 raise if any object has an error
457 raise if any object has an error
457 """
458 """
458 # Scan errors from objects - fail early
459 # Scan errors from objects - fail early
459 objects = response.get(b'objects', [])
460 objects = response.get(b'objects', [])
460 self._checkforservererror(pointers, objects, action)
461 self._checkforservererror(pointers, objects, action)
461
462
462 # Filter objects with given action. Practically, this skips uploading
463 # Filter objects with given action. Practically, this skips uploading
463 # objects which exist in the server.
464 # objects which exist in the server.
464 filteredobjects = [
465 filteredobjects = [
465 o for o in objects if action in o.get(b'actions', [])
466 o for o in objects if action in o.get(b'actions', [])
466 ]
467 ]
467
468
468 return filteredobjects
469 return filteredobjects
469
470
470 def _basictransfer(self, obj, action, localstore):
471 def _basictransfer(self, obj, action, localstore):
471 """Download or upload a single object using basic transfer protocol
472 """Download or upload a single object using basic transfer protocol
472
473
473 obj: dict, an object description returned by batch API
474 obj: dict, an object description returned by batch API
474 action: string, one of ['upload', 'download']
475 action: string, one of ['upload', 'download']
475 localstore: blobstore.local
476 localstore: blobstore.local
476
477
477 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
478 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
478 basic-transfers.md
479 basic-transfers.md
479 """
480 """
480 oid = obj[b'oid']
481 oid = obj[b'oid']
481 href = obj[b'actions'][action].get(b'href')
482 href = obj[b'actions'][action].get(b'href')
482 headers = obj[b'actions'][action].get(b'header', {}).items()
483 headers = obj[b'actions'][action].get(b'header', {}).items()
483
484
484 request = util.urlreq.request(pycompat.strurl(href))
485 request = util.urlreq.request(pycompat.strurl(href))
485 if action == b'upload':
486 if action == b'upload':
486 # If uploading blobs, read data from local blobstore.
487 # If uploading blobs, read data from local blobstore.
487 if not localstore.verify(oid):
488 if not localstore.verify(oid):
488 raise error.Abort(
489 raise error.Abort(
489 _(b'detected corrupt lfs object: %s') % oid,
490 _(b'detected corrupt lfs object: %s') % oid,
490 hint=_(b'run hg verify'),
491 hint=_(b'run hg verify'),
491 )
492 )
492 request.data = lfsuploadfile(localstore.open(oid))
493 request.data = lfsuploadfile(localstore.open(oid))
493 request.get_method = lambda: r'PUT'
494 request.get_method = lambda: r'PUT'
494 request.add_header('Content-Type', 'application/octet-stream')
495 request.add_header('Content-Type', 'application/octet-stream')
495 request.add_header('Content-Length', len(request.data))
496 request.add_header('Content-Length', len(request.data))
496
497
497 for k, v in headers:
498 for k, v in headers:
498 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
499
500
500 try:
501 try:
501 with contextlib.closing(self.urlopener.open(request)) as res:
502 with contextlib.closing(self.urlopener.open(request)) as res:
502 contentlength = res.info().get(b"content-length")
503 contentlength = res.info().get(b"content-length")
503 ui = self.ui # Shorten debug lines
504 ui = self.ui # Shorten debug lines
504 if self.ui.debugflag:
505 if self.ui.debugflag:
505 ui.debug(b'Status: %d\n' % res.status)
506 ui.debug(b'Status: %d\n' % res.status)
506 # lfs-test-server and hg serve return headers in different
507 # lfs-test-server and hg serve return headers in different
507 # order
508 # order
508 headers = pycompat.bytestr(res.info()).strip()
509 headers = pycompat.bytestr(res.info()).strip()
509 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
510 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
510
511
511 if action == b'download':
512 if action == b'download':
512 # If downloading blobs, store downloaded data to local
513 # If downloading blobs, store downloaded data to local
513 # blobstore
514 # blobstore
514 localstore.download(oid, res, contentlength)
515 localstore.download(oid, res, contentlength)
515 else:
516 else:
516 blocks = []
517 blocks = []
517 while True:
518 while True:
518 data = res.read(1048576)
519 data = res.read(1048576)
519 if not data:
520 if not data:
520 break
521 break
521 blocks.append(data)
522 blocks.append(data)
522
523
523 response = b"".join(blocks)
524 response = b"".join(blocks)
524 if response:
525 if response:
525 ui.debug(b'lfs %s response: %s' % (action, response))
526 ui.debug(b'lfs %s response: %s' % (action, response))
526 except util.urlerr.httperror as ex:
527 except util.urlerr.httperror as ex:
527 if self.ui.debugflag:
528 if self.ui.debugflag:
528 self.ui.debug(
529 self.ui.debug(
529 b'%s: %s\n' % (oid, ex.read())
530 b'%s: %s\n' % (oid, ex.read())
530 ) # XXX: also bytes?
531 ) # XXX: also bytes?
531 raise LfsRemoteError(
532 raise LfsRemoteError(
532 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
533 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
533 % (stringutil.forcebytestr(ex), oid, action)
534 % (stringutil.forcebytestr(ex), oid, action)
534 )
535 )
535 except util.urlerr.urlerror as ex:
536 except util.urlerr.urlerror as ex:
536 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
537 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
537 util.urllibcompat.getfullurl(request)
538 util.urllibcompat.getfullurl(request)
538 )
539 )
539 raise LfsRemoteError(
540 raise LfsRemoteError(
540 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
541 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
541 )
542 )
543 finally:
544 if request.data:
545 request.data.close()
542
546
543 def _batch(self, pointers, localstore, action):
547 def _batch(self, pointers, localstore, action):
544 if action not in [b'upload', b'download']:
548 if action not in [b'upload', b'download']:
545 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
546
550
547 response = self._batchrequest(pointers, action)
551 response = self._batchrequest(pointers, action)
548 objects = self._extractobjects(response, pointers, action)
552 objects = self._extractobjects(response, pointers, action)
549 total = sum(x.get(b'size', 0) for x in objects)
553 total = sum(x.get(b'size', 0) for x in objects)
550 sizes = {}
554 sizes = {}
551 for obj in objects:
555 for obj in objects:
552 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
553 topic = {
557 topic = {
554 b'upload': _(b'lfs uploading'),
558 b'upload': _(b'lfs uploading'),
555 b'download': _(b'lfs downloading'),
559 b'download': _(b'lfs downloading'),
556 }[action]
560 }[action]
557 if len(objects) > 1:
561 if len(objects) > 1:
558 self.ui.note(
562 self.ui.note(
559 _(b'lfs: need to transfer %d objects (%s)\n')
563 _(b'lfs: need to transfer %d objects (%s)\n')
560 % (len(objects), util.bytecount(total))
564 % (len(objects), util.bytecount(total))
561 )
565 )
562
566
563 def transfer(chunk):
567 def transfer(chunk):
564 for obj in chunk:
568 for obj in chunk:
565 objsize = obj.get(b'size', 0)
569 objsize = obj.get(b'size', 0)
566 if self.ui.verbose:
570 if self.ui.verbose:
567 if action == b'download':
571 if action == b'download':
568 msg = _(b'lfs: downloading %s (%s)\n')
572 msg = _(b'lfs: downloading %s (%s)\n')
569 elif action == b'upload':
573 elif action == b'upload':
570 msg = _(b'lfs: uploading %s (%s)\n')
574 msg = _(b'lfs: uploading %s (%s)\n')
571 self.ui.note(
575 self.ui.note(
572 msg % (obj.get(b'oid'), util.bytecount(objsize))
576 msg % (obj.get(b'oid'), util.bytecount(objsize))
573 )
577 )
574 retry = self.retry
578 retry = self.retry
575 while True:
579 while True:
576 try:
580 try:
577 self._basictransfer(obj, action, localstore)
581 self._basictransfer(obj, action, localstore)
578 yield 1, obj.get(b'oid')
582 yield 1, obj.get(b'oid')
579 break
583 break
580 except socket.error as ex:
584 except socket.error as ex:
581 if retry > 0:
585 if retry > 0:
582 self.ui.note(
586 self.ui.note(
583 _(b'lfs: failed: %r (remaining retry %d)\n')
587 _(b'lfs: failed: %r (remaining retry %d)\n')
584 % (stringutil.forcebytestr(ex), retry)
588 % (stringutil.forcebytestr(ex), retry)
585 )
589 )
586 retry -= 1
590 retry -= 1
587 continue
591 continue
588 raise
592 raise
589
593
590 # Until https multiplexing gets sorted out
594 # Until https multiplexing gets sorted out
591 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
592 oids = worker.worker(
596 oids = worker.worker(
593 self.ui,
597 self.ui,
594 0.1,
598 0.1,
595 transfer,
599 transfer,
596 (),
600 (),
597 sorted(objects, key=lambda o: o.get(b'oid')),
601 sorted(objects, key=lambda o: o.get(b'oid')),
598 )
602 )
599 else:
603 else:
600 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
601
605
602 with self.ui.makeprogress(
606 with self.ui.makeprogress(
603 topic, unit=_(b"bytes"), total=total
607 topic, unit=_(b"bytes"), total=total
604 ) as progress:
608 ) as progress:
605 progress.update(0)
609 progress.update(0)
606 processed = 0
610 processed = 0
607 blobs = 0
611 blobs = 0
608 for _one, oid in oids:
612 for _one, oid in oids:
609 processed += sizes[oid]
613 processed += sizes[oid]
610 blobs += 1
614 blobs += 1
611 progress.update(processed)
615 progress.update(processed)
612 self.ui.note(_(b'lfs: processed: %s\n') % oid)
616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
613
617
614 if blobs > 0:
618 if blobs > 0:
615 if action == b'upload':
619 if action == b'upload':
616 self.ui.status(
620 self.ui.status(
617 _(b'lfs: uploaded %d files (%s)\n')
621 _(b'lfs: uploaded %d files (%s)\n')
618 % (blobs, util.bytecount(processed))
622 % (blobs, util.bytecount(processed))
619 )
623 )
620 elif action == b'download':
624 elif action == b'download':
621 self.ui.status(
625 self.ui.status(
622 _(b'lfs: downloaded %d files (%s)\n')
626 _(b'lfs: downloaded %d files (%s)\n')
623 % (blobs, util.bytecount(processed))
627 % (blobs, util.bytecount(processed))
624 )
628 )
625
629
626 def __del__(self):
630 def __del__(self):
627 # copied from mercurial/httppeer.py
631 # copied from mercurial/httppeer.py
628 urlopener = getattr(self, 'urlopener', None)
632 urlopener = getattr(self, 'urlopener', None)
629 if urlopener:
633 if urlopener:
630 for h in urlopener.handlers:
634 for h in urlopener.handlers:
631 h.close()
635 h.close()
632 getattr(h, "close_all", lambda: None)()
636 getattr(h, "close_all", lambda: None)()
633
637
634
638
635 class _dummyremote(object):
639 class _dummyremote(object):
636 """Dummy store storing blobs to temp directory."""
640 """Dummy store storing blobs to temp directory."""
637
641
638 def __init__(self, repo, url):
642 def __init__(self, repo, url):
639 fullpath = repo.vfs.join(b'lfs', url.path)
643 fullpath = repo.vfs.join(b'lfs', url.path)
640 self.vfs = lfsvfs(fullpath)
644 self.vfs = lfsvfs(fullpath)
641
645
642 def writebatch(self, pointers, fromstore):
646 def writebatch(self, pointers, fromstore):
643 for p in _deduplicate(pointers):
647 for p in _deduplicate(pointers):
644 content = fromstore.read(p.oid(), verify=True)
648 content = fromstore.read(p.oid(), verify=True)
645 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
646 fp.write(content)
650 fp.write(content)
647
651
648 def readbatch(self, pointers, tostore):
652 def readbatch(self, pointers, tostore):
649 for p in _deduplicate(pointers):
653 for p in _deduplicate(pointers):
650 with self.vfs(p.oid(), b'rb') as fp:
654 with self.vfs(p.oid(), b'rb') as fp:
651 tostore.download(p.oid(), fp, None)
655 tostore.download(p.oid(), fp, None)
652
656
653
657
654 class _nullremote(object):
658 class _nullremote(object):
655 """Null store storing blobs to /dev/null."""
659 """Null store storing blobs to /dev/null."""
656
660
657 def __init__(self, repo, url):
661 def __init__(self, repo, url):
658 pass
662 pass
659
663
660 def writebatch(self, pointers, fromstore):
664 def writebatch(self, pointers, fromstore):
661 pass
665 pass
662
666
663 def readbatch(self, pointers, tostore):
667 def readbatch(self, pointers, tostore):
664 pass
668 pass
665
669
666
670
667 class _promptremote(object):
671 class _promptremote(object):
668 """Prompt user to set lfs.url when accessed."""
672 """Prompt user to set lfs.url when accessed."""
669
673
670 def __init__(self, repo, url):
674 def __init__(self, repo, url):
671 pass
675 pass
672
676
673 def writebatch(self, pointers, fromstore, ui=None):
677 def writebatch(self, pointers, fromstore, ui=None):
674 self._prompt()
678 self._prompt()
675
679
676 def readbatch(self, pointers, tostore, ui=None):
680 def readbatch(self, pointers, tostore, ui=None):
677 self._prompt()
681 self._prompt()
678
682
679 def _prompt(self):
683 def _prompt(self):
680 raise error.Abort(_(b'lfs.url needs to be configured'))
684 raise error.Abort(_(b'lfs.url needs to be configured'))
681
685
682
686
683 _storemap = {
687 _storemap = {
684 b'https': _gitlfsremote,
688 b'https': _gitlfsremote,
685 b'http': _gitlfsremote,
689 b'http': _gitlfsremote,
686 b'file': _dummyremote,
690 b'file': _dummyremote,
687 b'null': _nullremote,
691 b'null': _nullremote,
688 None: _promptremote,
692 None: _promptremote,
689 }
693 }
690
694
691
695
692 def _deduplicate(pointers):
696 def _deduplicate(pointers):
693 """Remove any duplicate oids that exist in the list"""
697 """Remove any duplicate oids that exist in the list"""
694 reduced = util.sortdict()
698 reduced = util.sortdict()
695 for p in pointers:
699 for p in pointers:
696 reduced[p.oid()] = p
700 reduced[p.oid()] = p
697 return reduced.values()
701 return reduced.values()
698
702
699
703
700 def _verify(oid, content):
704 def _verify(oid, content):
701 realoid = node.hex(hashlib.sha256(content).digest())
705 realoid = node.hex(hashlib.sha256(content).digest())
702 if realoid != oid:
706 if realoid != oid:
703 raise LfsCorruptionError(
707 raise LfsCorruptionError(
704 _(b'detected corrupt lfs object: %s') % oid,
708 _(b'detected corrupt lfs object: %s') % oid,
705 hint=_(b'run hg verify'),
709 hint=_(b'run hg verify'),
706 )
710 )
707
711
708
712
709 def remote(repo, remote=None):
713 def remote(repo, remote=None):
710 """remotestore factory. return a store in _storemap depending on config
714 """remotestore factory. return a store in _storemap depending on config
711
715
712 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
713 infer the endpoint, based on the remote repository using the same path
717 infer the endpoint, based on the remote repository using the same path
714 adjustments as git. As an extension, 'http' is supported as well so that
718 adjustments as git. As an extension, 'http' is supported as well so that
715 ``hg serve`` works out of the box.
719 ``hg serve`` works out of the box.
716
720
717 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
718 """
722 """
719 lfsurl = repo.ui.config(b'lfs', b'url')
723 lfsurl = repo.ui.config(b'lfs', b'url')
720 url = util.url(lfsurl or b'')
724 url = util.url(lfsurl or b'')
721 if lfsurl is None:
725 if lfsurl is None:
722 if remote:
726 if remote:
723 path = remote
727 path = remote
724 elif util.safehasattr(repo, b'_subtoppath'):
728 elif util.safehasattr(repo, b'_subtoppath'):
725 # The pull command sets this during the optional update phase, which
729 # The pull command sets this during the optional update phase, which
726 # tells exactly where the pull originated, whether 'paths.default'
730 # tells exactly where the pull originated, whether 'paths.default'
727 # or explicit.
731 # or explicit.
728 path = repo._subtoppath
732 path = repo._subtoppath
729 else:
733 else:
730 # TODO: investigate 'paths.remote:lfsurl' style path customization,
734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
731 # and fall back to inferring from 'paths.remote' if unspecified.
735 # and fall back to inferring from 'paths.remote' if unspecified.
732 path = repo.ui.config(b'paths', b'default') or b''
736 path = repo.ui.config(b'paths', b'default') or b''
733
737
734 defaulturl = util.url(path)
738 defaulturl = util.url(path)
735
739
736 # TODO: support local paths as well.
740 # TODO: support local paths as well.
737 # TODO: consider the ssh -> https transformation that git applies
741 # TODO: consider the ssh -> https transformation that git applies
738 if defaulturl.scheme in (b'http', b'https'):
742 if defaulturl.scheme in (b'http', b'https'):
739 if defaulturl.path and defaulturl.path[:-1] != b'/':
743 if defaulturl.path and defaulturl.path[:-1] != b'/':
740 defaulturl.path += b'/'
744 defaulturl.path += b'/'
741 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
742
746
743 url = util.url(bytes(defaulturl))
747 url = util.url(bytes(defaulturl))
744 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
745
749
746 scheme = url.scheme
750 scheme = url.scheme
747 if scheme not in _storemap:
751 if scheme not in _storemap:
748 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
749 return _storemap[scheme](repo, url)
753 return _storemap[scheme](repo, url)
750
754
751
755
752 class LfsRemoteError(error.StorageError):
756 class LfsRemoteError(error.StorageError):
753 pass
757 pass
754
758
755
759
756 class LfsCorruptionError(error.Abort):
760 class LfsCorruptionError(error.Abort):
757 """Raised when a corrupt blob is detected, aborting an operation
761 """Raised when a corrupt blob is detected, aborting an operation
758
762
759 It exists to allow specialized handling on the server side."""
763 It exists to allow specialized handling on the server side."""
General Comments 0
You need to be logged in to leave comments. Login now