##// END OF EJS Templates
lfs: check content length after downloading content...
Matt Harbison -
r44929:0ee0a3f6 default
parent child Browse files
Show More
@@ -1,748 +1,763 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import re
15 import re
16 import socket
16 import socket
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.pycompat import getattr
19 from mercurial.pycompat import getattr
20
20
21 from mercurial import (
21 from mercurial import (
22 encoding,
22 encoding,
23 error,
23 error,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 url as urlmod,
27 url as urlmod,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 worker,
30 worker,
31 )
31 )
32
32
33 from mercurial.utils import stringutil
33 from mercurial.utils import stringutil
34
34
35 from ..largefiles import lfutil
35 from ..largefiles import lfutil
36
36
37 # 64 bytes for SHA256
37 # 64 bytes for SHA256
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39
39
40
40
41 class lfsvfs(vfsmod.vfs):
41 class lfsvfs(vfsmod.vfs):
42 def join(self, path):
42 def join(self, path):
43 """split the path at first two characters, like: XX/XXXXX..."""
43 """split the path at first two characters, like: XX/XXXXX..."""
44 if not _lfsre.match(path):
44 if not _lfsre.match(path):
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 return super(lfsvfs, self).join(path[0:2], path[2:])
46 return super(lfsvfs, self).join(path[0:2], path[2:])
47
47
48 def walk(self, path=None, onerror=None):
48 def walk(self, path=None, onerror=None):
49 """Yield (dirpath, [], oids) tuple for blobs under path
49 """Yield (dirpath, [], oids) tuple for blobs under path
50
50
51 Oids only exist in the root of this vfs, so dirpath is always ''.
51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 """
52 """
53 root = os.path.normpath(self.base)
53 root = os.path.normpath(self.base)
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 # because len(dirpath) < prefixlen.
55 # because len(dirpath) < prefixlen.
56 prefixlen = len(pathutil.normasprefix(root))
56 prefixlen = len(pathutil.normasprefix(root))
57 oids = []
57 oids = []
58
58
59 for dirpath, dirs, files in os.walk(
59 for dirpath, dirs, files in os.walk(
60 self.reljoin(self.base, path or b''), onerror=onerror
60 self.reljoin(self.base, path or b''), onerror=onerror
61 ):
61 ):
62 dirpath = dirpath[prefixlen:]
62 dirpath = dirpath[prefixlen:]
63
63
64 # Silently skip unexpected files and directories
64 # Silently skip unexpected files and directories
65 if len(dirpath) == 2:
65 if len(dirpath) == 2:
66 oids.extend(
66 oids.extend(
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 )
68 )
69
69
70 yield (b'', [], oids)
70 yield (b'', [], oids)
71
71
72
72
73 class nullvfs(lfsvfs):
73 class nullvfs(lfsvfs):
74 def __init__(self):
74 def __init__(self):
75 pass
75 pass
76
76
77 def exists(self, oid):
77 def exists(self, oid):
78 return False
78 return False
79
79
80 def read(self, oid):
80 def read(self, oid):
81 # store.read() calls into here if the blob doesn't exist in its
81 # store.read() calls into here if the blob doesn't exist in its
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 # file that doesn't exist. The only difference is the full file path
83 # file that doesn't exist. The only difference is the full file path
84 # isn't available in the error.
84 # isn't available in the error.
85 raise IOError(
85 raise IOError(
86 errno.ENOENT,
86 errno.ENOENT,
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 )
88 )
89
89
90 def walk(self, path=None, onerror=None):
90 def walk(self, path=None, onerror=None):
91 return (b'', [], [])
91 return (b'', [], [])
92
92
93 def write(self, oid, data):
93 def write(self, oid, data):
94 pass
94 pass
95
95
96
96
97 class filewithprogress(object):
97 class filewithprogress(object):
98 """a file-like object that supports __len__ and read.
98 """a file-like object that supports __len__ and read.
99
99
100 Useful to provide progress information for how many bytes are read.
100 Useful to provide progress information for how many bytes are read.
101 """
101 """
102
102
103 def __init__(self, fp, callback):
103 def __init__(self, fp, callback):
104 self._fp = fp
104 self._fp = fp
105 self._callback = callback # func(readsize)
105 self._callback = callback # func(readsize)
106 fp.seek(0, os.SEEK_END)
106 fp.seek(0, os.SEEK_END)
107 self._len = fp.tell()
107 self._len = fp.tell()
108 fp.seek(0)
108 fp.seek(0)
109
109
110 def __len__(self):
110 def __len__(self):
111 return self._len
111 return self._len
112
112
113 def read(self, size):
113 def read(self, size):
114 if self._fp is None:
114 if self._fp is None:
115 return b''
115 return b''
116 data = self._fp.read(size)
116 data = self._fp.read(size)
117 if data:
117 if data:
118 if self._callback:
118 if self._callback:
119 self._callback(len(data))
119 self._callback(len(data))
120 else:
120 else:
121 self._fp.close()
121 self._fp.close()
122 self._fp = None
122 self._fp = None
123 return data
123 return data
124
124
125
125
126 class local(object):
126 class local(object):
127 """Local blobstore for large file contents.
127 """Local blobstore for large file contents.
128
128
129 This blobstore is used both as a cache and as a staging area for large blobs
129 This blobstore is used both as a cache and as a staging area for large blobs
130 to be uploaded to the remote blobstore.
130 to be uploaded to the remote blobstore.
131 """
131 """
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 fullpath = repo.svfs.join(b'lfs/objects')
134 fullpath = repo.svfs.join(b'lfs/objects')
135 self.vfs = lfsvfs(fullpath)
135 self.vfs = lfsvfs(fullpath)
136
136
137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
138 self.cachevfs = nullvfs()
138 self.cachevfs = nullvfs()
139 else:
139 else:
140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
141 self.cachevfs = lfsvfs(usercache)
141 self.cachevfs = lfsvfs(usercache)
142 self.ui = repo.ui
142 self.ui = repo.ui
143
143
144 def open(self, oid):
144 def open(self, oid):
145 """Open a read-only file descriptor to the named blob, in either the
145 """Open a read-only file descriptor to the named blob, in either the
146 usercache or the local store."""
146 usercache or the local store."""
147 # The usercache is the most likely place to hold the file. Commit will
147 # The usercache is the most likely place to hold the file. Commit will
148 # write to both it and the local store, as will anything that downloads
148 # write to both it and the local store, as will anything that downloads
149 # the blobs. However, things like clone without an update won't
149 # the blobs. However, things like clone without an update won't
150 # populate the local store. For an init + push of a local clone,
150 # populate the local store. For an init + push of a local clone,
151 # the usercache is the only place it _could_ be. If not present, the
151 # the usercache is the only place it _could_ be. If not present, the
152 # missing file msg here will indicate the local repo, not the usercache.
152 # missing file msg here will indicate the local repo, not the usercache.
153 if self.cachevfs.exists(oid):
153 if self.cachevfs.exists(oid):
154 return self.cachevfs(oid, b'rb')
154 return self.cachevfs(oid, b'rb')
155
155
156 return self.vfs(oid, b'rb')
156 return self.vfs(oid, b'rb')
157
157
158 def download(self, oid, src):
158 def download(self, oid, src, content_length):
159 """Read the blob from the remote source in chunks, verify the content,
159 """Read the blob from the remote source in chunks, verify the content,
160 and write to this local blobstore."""
160 and write to this local blobstore."""
161 sha256 = hashlib.sha256()
161 sha256 = hashlib.sha256()
162 size = 0
162
163
163 with self.vfs(oid, b'wb', atomictemp=True) as fp:
164 with self.vfs(oid, b'wb', atomictemp=True) as fp:
164 for chunk in util.filechunkiter(src, size=1048576):
165 for chunk in util.filechunkiter(src, size=1048576):
165 fp.write(chunk)
166 fp.write(chunk)
166 sha256.update(chunk)
167 sha256.update(chunk)
168 size += len(chunk)
169
170 # If the server advertised a length longer than what we actually
171 # received, then we should expect that the server crashed while
172 # producing the response (but the server has no way of telling us
173 # that), and we really don't need to try to write the response to
174 # the localstore, because it's not going to match the expected.
175 if content_length is not None and int(content_length) != size:
176 msg = (
177 b"Response length (%s) does not match Content-Length "
178 b"header (%d): likely server-side crash"
179 )
180 raise LfsRemoteError(_(msg) % (size, int(content_length)))
167
181
168 realoid = node.hex(sha256.digest())
182 realoid = node.hex(sha256.digest())
169 if realoid != oid:
183 if realoid != oid:
170 raise LfsCorruptionError(
184 raise LfsCorruptionError(
171 _(b'corrupt remote lfs object: %s') % oid
185 _(b'corrupt remote lfs object: %s') % oid
172 )
186 )
173
187
174 self._linktousercache(oid)
188 self._linktousercache(oid)
175
189
176 def write(self, oid, data):
190 def write(self, oid, data):
177 """Write blob to local blobstore.
191 """Write blob to local blobstore.
178
192
179 This should only be called from the filelog during a commit or similar.
193 This should only be called from the filelog during a commit or similar.
180 As such, there is no need to verify the data. Imports from a remote
194 As such, there is no need to verify the data. Imports from a remote
181 store must use ``download()`` instead."""
195 store must use ``download()`` instead."""
182 with self.vfs(oid, b'wb', atomictemp=True) as fp:
196 with self.vfs(oid, b'wb', atomictemp=True) as fp:
183 fp.write(data)
197 fp.write(data)
184
198
185 self._linktousercache(oid)
199 self._linktousercache(oid)
186
200
187 def linkfromusercache(self, oid):
201 def linkfromusercache(self, oid):
188 """Link blobs found in the user cache into this store.
202 """Link blobs found in the user cache into this store.
189
203
190 The server module needs to do this when it lets the client know not to
204 The server module needs to do this when it lets the client know not to
191 upload the blob, to ensure it is always available in this store.
205 upload the blob, to ensure it is always available in this store.
192 Normally this is done implicitly when the client reads or writes the
206 Normally this is done implicitly when the client reads or writes the
193 blob, but that doesn't happen when the server tells the client that it
207 blob, but that doesn't happen when the server tells the client that it
194 already has the blob.
208 already has the blob.
195 """
209 """
196 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
210 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
197 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
211 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
198 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
212 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
199
213
200 def _linktousercache(self, oid):
214 def _linktousercache(self, oid):
201 # XXX: should we verify the content of the cache, and hardlink back to
215 # XXX: should we verify the content of the cache, and hardlink back to
202 # the local store on success, but truncate, write and link on failure?
216 # the local store on success, but truncate, write and link on failure?
203 if not self.cachevfs.exists(oid) and not isinstance(
217 if not self.cachevfs.exists(oid) and not isinstance(
204 self.cachevfs, nullvfs
218 self.cachevfs, nullvfs
205 ):
219 ):
206 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
220 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
207 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
221 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
208
222
209 def read(self, oid, verify=True):
223 def read(self, oid, verify=True):
210 """Read blob from local blobstore."""
224 """Read blob from local blobstore."""
211 if not self.vfs.exists(oid):
225 if not self.vfs.exists(oid):
212 blob = self._read(self.cachevfs, oid, verify)
226 blob = self._read(self.cachevfs, oid, verify)
213
227
214 # Even if revlog will verify the content, it needs to be verified
228 # Even if revlog will verify the content, it needs to be verified
215 # now before making the hardlink to avoid propagating corrupt blobs.
229 # now before making the hardlink to avoid propagating corrupt blobs.
216 # Don't abort if corruption is detected, because `hg verify` will
230 # Don't abort if corruption is detected, because `hg verify` will
217 # give more useful info about the corruption- simply don't add the
231 # give more useful info about the corruption- simply don't add the
218 # hardlink.
232 # hardlink.
219 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
233 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
220 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
234 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
221 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
235 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
222 else:
236 else:
223 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
237 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
224 blob = self._read(self.vfs, oid, verify)
238 blob = self._read(self.vfs, oid, verify)
225 return blob
239 return blob
226
240
227 def _read(self, vfs, oid, verify):
241 def _read(self, vfs, oid, verify):
228 """Read blob (after verifying) from the given store"""
242 """Read blob (after verifying) from the given store"""
229 blob = vfs.read(oid)
243 blob = vfs.read(oid)
230 if verify:
244 if verify:
231 _verify(oid, blob)
245 _verify(oid, blob)
232 return blob
246 return blob
233
247
234 def verify(self, oid):
248 def verify(self, oid):
235 """Indicate whether or not the hash of the underlying file matches its
249 """Indicate whether or not the hash of the underlying file matches its
236 name."""
250 name."""
237 sha256 = hashlib.sha256()
251 sha256 = hashlib.sha256()
238
252
239 with self.open(oid) as fp:
253 with self.open(oid) as fp:
240 for chunk in util.filechunkiter(fp, size=1048576):
254 for chunk in util.filechunkiter(fp, size=1048576):
241 sha256.update(chunk)
255 sha256.update(chunk)
242
256
243 return oid == node.hex(sha256.digest())
257 return oid == node.hex(sha256.digest())
244
258
245 def has(self, oid):
259 def has(self, oid):
246 """Returns True if the local blobstore contains the requested blob,
260 """Returns True if the local blobstore contains the requested blob,
247 False otherwise."""
261 False otherwise."""
248 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
262 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
249
263
250
264
251 def _urlerrorreason(urlerror):
265 def _urlerrorreason(urlerror):
252 '''Create a friendly message for the given URLError to be used in an
266 '''Create a friendly message for the given URLError to be used in an
253 LfsRemoteError message.
267 LfsRemoteError message.
254 '''
268 '''
255 inst = urlerror
269 inst = urlerror
256
270
257 if isinstance(urlerror.reason, Exception):
271 if isinstance(urlerror.reason, Exception):
258 inst = urlerror.reason
272 inst = urlerror.reason
259
273
260 if util.safehasattr(inst, b'reason'):
274 if util.safehasattr(inst, b'reason'):
261 try: # usually it is in the form (errno, strerror)
275 try: # usually it is in the form (errno, strerror)
262 reason = inst.reason.args[1]
276 reason = inst.reason.args[1]
263 except (AttributeError, IndexError):
277 except (AttributeError, IndexError):
264 # it might be anything, for example a string
278 # it might be anything, for example a string
265 reason = inst.reason
279 reason = inst.reason
266 if isinstance(reason, pycompat.unicode):
280 if isinstance(reason, pycompat.unicode):
267 # SSLError of Python 2.7.9 contains a unicode
281 # SSLError of Python 2.7.9 contains a unicode
268 reason = encoding.unitolocal(reason)
282 reason = encoding.unitolocal(reason)
269 return reason
283 return reason
270 elif getattr(inst, "strerror", None):
284 elif getattr(inst, "strerror", None):
271 return encoding.strtolocal(inst.strerror)
285 return encoding.strtolocal(inst.strerror)
272 else:
286 else:
273 return stringutil.forcebytestr(urlerror)
287 return stringutil.forcebytestr(urlerror)
274
288
275
289
276 class lfsauthhandler(util.urlreq.basehandler):
290 class lfsauthhandler(util.urlreq.basehandler):
277 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
291 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
278
292
279 def http_error_401(self, req, fp, code, msg, headers):
293 def http_error_401(self, req, fp, code, msg, headers):
280 """Enforces that any authentication performed is HTTP Basic
294 """Enforces that any authentication performed is HTTP Basic
281 Authentication. No authentication is also acceptable.
295 Authentication. No authentication is also acceptable.
282 """
296 """
283 authreq = headers.get('www-authenticate', None)
297 authreq = headers.get('www-authenticate', None)
284 if authreq:
298 if authreq:
285 scheme = authreq.split()[0]
299 scheme = authreq.split()[0]
286
300
287 if scheme.lower() != 'basic':
301 if scheme.lower() != 'basic':
288 msg = _(b'the server must support Basic Authentication')
302 msg = _(b'the server must support Basic Authentication')
289 raise util.urlerr.httperror(
303 raise util.urlerr.httperror(
290 req.get_full_url(),
304 req.get_full_url(),
291 code,
305 code,
292 encoding.strfromlocal(msg),
306 encoding.strfromlocal(msg),
293 headers,
307 headers,
294 fp,
308 fp,
295 )
309 )
296 return None
310 return None
297
311
298
312
299 class _gitlfsremote(object):
313 class _gitlfsremote(object):
300 def __init__(self, repo, url):
314 def __init__(self, repo, url):
301 ui = repo.ui
315 ui = repo.ui
302 self.ui = ui
316 self.ui = ui
303 baseurl, authinfo = url.authinfo()
317 baseurl, authinfo = url.authinfo()
304 self.baseurl = baseurl.rstrip(b'/')
318 self.baseurl = baseurl.rstrip(b'/')
305 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
319 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
306 if not useragent:
320 if not useragent:
307 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
321 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
308 self.urlopener = urlmod.opener(ui, authinfo, useragent)
322 self.urlopener = urlmod.opener(ui, authinfo, useragent)
309 self.urlopener.add_handler(lfsauthhandler())
323 self.urlopener.add_handler(lfsauthhandler())
310 self.retry = ui.configint(b'lfs', b'retry')
324 self.retry = ui.configint(b'lfs', b'retry')
311
325
312 def writebatch(self, pointers, fromstore):
326 def writebatch(self, pointers, fromstore):
313 """Batch upload from local to remote blobstore."""
327 """Batch upload from local to remote blobstore."""
314 self._batch(_deduplicate(pointers), fromstore, b'upload')
328 self._batch(_deduplicate(pointers), fromstore, b'upload')
315
329
316 def readbatch(self, pointers, tostore):
330 def readbatch(self, pointers, tostore):
317 """Batch download from remote to local blostore."""
331 """Batch download from remote to local blostore."""
318 self._batch(_deduplicate(pointers), tostore, b'download')
332 self._batch(_deduplicate(pointers), tostore, b'download')
319
333
320 def _batchrequest(self, pointers, action):
334 def _batchrequest(self, pointers, action):
321 """Get metadata about objects pointed by pointers for given action
335 """Get metadata about objects pointed by pointers for given action
322
336
323 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
337 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
324 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
338 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
325 """
339 """
326 objects = [
340 objects = [
327 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
341 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
328 for p in pointers
342 for p in pointers
329 ]
343 ]
330 requestdata = pycompat.bytesurl(
344 requestdata = pycompat.bytesurl(
331 json.dumps(
345 json.dumps(
332 {'objects': objects, 'operation': pycompat.strurl(action),}
346 {'objects': objects, 'operation': pycompat.strurl(action),}
333 )
347 )
334 )
348 )
335 url = b'%s/objects/batch' % self.baseurl
349 url = b'%s/objects/batch' % self.baseurl
336 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
350 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
337 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
351 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
338 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
352 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
339 try:
353 try:
340 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
354 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
341 rawjson = rsp.read()
355 rawjson = rsp.read()
342 except util.urlerr.httperror as ex:
356 except util.urlerr.httperror as ex:
343 hints = {
357 hints = {
344 400: _(
358 400: _(
345 b'check that lfs serving is enabled on %s and "%s" is '
359 b'check that lfs serving is enabled on %s and "%s" is '
346 b'supported'
360 b'supported'
347 )
361 )
348 % (self.baseurl, action),
362 % (self.baseurl, action),
349 404: _(b'the "lfs.url" config may be used to override %s')
363 404: _(b'the "lfs.url" config may be used to override %s')
350 % self.baseurl,
364 % self.baseurl,
351 }
365 }
352 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
366 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
353 raise LfsRemoteError(
367 raise LfsRemoteError(
354 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
368 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
355 hint=hint,
369 hint=hint,
356 )
370 )
357 except util.urlerr.urlerror as ex:
371 except util.urlerr.urlerror as ex:
358 hint = (
372 hint = (
359 _(b'the "lfs.url" config may be used to override %s')
373 _(b'the "lfs.url" config may be used to override %s')
360 % self.baseurl
374 % self.baseurl
361 )
375 )
362 raise LfsRemoteError(
376 raise LfsRemoteError(
363 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
377 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
364 )
378 )
365 try:
379 try:
366 response = pycompat.json_loads(rawjson)
380 response = pycompat.json_loads(rawjson)
367 except ValueError:
381 except ValueError:
368 raise LfsRemoteError(
382 raise LfsRemoteError(
369 _(b'LFS server returns invalid JSON: %s')
383 _(b'LFS server returns invalid JSON: %s')
370 % rawjson.encode("utf-8")
384 % rawjson.encode("utf-8")
371 )
385 )
372
386
373 if self.ui.debugflag:
387 if self.ui.debugflag:
374 self.ui.debug(b'Status: %d\n' % rsp.status)
388 self.ui.debug(b'Status: %d\n' % rsp.status)
375 # lfs-test-server and hg serve return headers in different order
389 # lfs-test-server and hg serve return headers in different order
376 headers = pycompat.bytestr(rsp.info()).strip()
390 headers = pycompat.bytestr(rsp.info()).strip()
377 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
391 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
378
392
379 if 'objects' in response:
393 if 'objects' in response:
380 response['objects'] = sorted(
394 response['objects'] = sorted(
381 response['objects'], key=lambda p: p['oid']
395 response['objects'], key=lambda p: p['oid']
382 )
396 )
383 self.ui.debug(
397 self.ui.debug(
384 b'%s\n'
398 b'%s\n'
385 % pycompat.bytesurl(
399 % pycompat.bytesurl(
386 json.dumps(
400 json.dumps(
387 response,
401 response,
388 indent=2,
402 indent=2,
389 separators=('', ': '),
403 separators=('', ': '),
390 sort_keys=True,
404 sort_keys=True,
391 )
405 )
392 )
406 )
393 )
407 )
394
408
395 def encodestr(x):
409 def encodestr(x):
396 if isinstance(x, pycompat.unicode):
410 if isinstance(x, pycompat.unicode):
397 return x.encode('utf-8')
411 return x.encode('utf-8')
398 return x
412 return x
399
413
400 return pycompat.rapply(encodestr, response)
414 return pycompat.rapply(encodestr, response)
401
415
402 def _checkforservererror(self, pointers, responses, action):
416 def _checkforservererror(self, pointers, responses, action):
403 """Scans errors from objects
417 """Scans errors from objects
404
418
405 Raises LfsRemoteError if any objects have an error"""
419 Raises LfsRemoteError if any objects have an error"""
406 for response in responses:
420 for response in responses:
407 # The server should return 404 when objects cannot be found. Some
421 # The server should return 404 when objects cannot be found. Some
408 # server implementation (ex. lfs-test-server) does not set "error"
422 # server implementation (ex. lfs-test-server) does not set "error"
409 # but just removes "download" from "actions". Treat that case
423 # but just removes "download" from "actions". Treat that case
410 # as the same as 404 error.
424 # as the same as 404 error.
411 if b'error' not in response:
425 if b'error' not in response:
412 if action == b'download' and action not in response.get(
426 if action == b'download' and action not in response.get(
413 b'actions', []
427 b'actions', []
414 ):
428 ):
415 code = 404
429 code = 404
416 else:
430 else:
417 continue
431 continue
418 else:
432 else:
419 # An error dict without a code doesn't make much sense, so
433 # An error dict without a code doesn't make much sense, so
420 # treat as a server error.
434 # treat as a server error.
421 code = response.get(b'error').get(b'code', 500)
435 code = response.get(b'error').get(b'code', 500)
422
436
423 ptrmap = {p.oid(): p for p in pointers}
437 ptrmap = {p.oid(): p for p in pointers}
424 p = ptrmap.get(response[b'oid'], None)
438 p = ptrmap.get(response[b'oid'], None)
425 if p:
439 if p:
426 filename = getattr(p, 'filename', b'unknown')
440 filename = getattr(p, 'filename', b'unknown')
427 errors = {
441 errors = {
428 404: b'The object does not exist',
442 404: b'The object does not exist',
429 410: b'The object was removed by the owner',
443 410: b'The object was removed by the owner',
430 422: b'Validation error',
444 422: b'Validation error',
431 500: b'Internal server error',
445 500: b'Internal server error',
432 }
446 }
433 msg = errors.get(code, b'status code %d' % code)
447 msg = errors.get(code, b'status code %d' % code)
434 raise LfsRemoteError(
448 raise LfsRemoteError(
435 _(b'LFS server error for "%s": %s') % (filename, msg)
449 _(b'LFS server error for "%s": %s') % (filename, msg)
436 )
450 )
437 else:
451 else:
438 raise LfsRemoteError(
452 raise LfsRemoteError(
439 _(b'LFS server error. Unsolicited response for oid %s')
453 _(b'LFS server error. Unsolicited response for oid %s')
440 % response[b'oid']
454 % response[b'oid']
441 )
455 )
442
456
443 def _extractobjects(self, response, pointers, action):
457 def _extractobjects(self, response, pointers, action):
444 """extract objects from response of the batch API
458 """extract objects from response of the batch API
445
459
446 response: parsed JSON object returned by batch API
460 response: parsed JSON object returned by batch API
447 return response['objects'] filtered by action
461 return response['objects'] filtered by action
448 raise if any object has an error
462 raise if any object has an error
449 """
463 """
450 # Scan errors from objects - fail early
464 # Scan errors from objects - fail early
451 objects = response.get(b'objects', [])
465 objects = response.get(b'objects', [])
452 self._checkforservererror(pointers, objects, action)
466 self._checkforservererror(pointers, objects, action)
453
467
454 # Filter objects with given action. Practically, this skips uploading
468 # Filter objects with given action. Practically, this skips uploading
455 # objects which exist in the server.
469 # objects which exist in the server.
456 filteredobjects = [
470 filteredobjects = [
457 o for o in objects if action in o.get(b'actions', [])
471 o for o in objects if action in o.get(b'actions', [])
458 ]
472 ]
459
473
460 return filteredobjects
474 return filteredobjects
461
475
462 def _basictransfer(self, obj, action, localstore):
476 def _basictransfer(self, obj, action, localstore):
463 """Download or upload a single object using basic transfer protocol
477 """Download or upload a single object using basic transfer protocol
464
478
465 obj: dict, an object description returned by batch API
479 obj: dict, an object description returned by batch API
466 action: string, one of ['upload', 'download']
480 action: string, one of ['upload', 'download']
467 localstore: blobstore.local
481 localstore: blobstore.local
468
482
469 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
483 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
470 basic-transfers.md
484 basic-transfers.md
471 """
485 """
472 oid = obj[b'oid']
486 oid = obj[b'oid']
473 href = obj[b'actions'][action].get(b'href')
487 href = obj[b'actions'][action].get(b'href')
474 headers = obj[b'actions'][action].get(b'header', {}).items()
488 headers = obj[b'actions'][action].get(b'header', {}).items()
475
489
476 request = util.urlreq.request(pycompat.strurl(href))
490 request = util.urlreq.request(pycompat.strurl(href))
477 if action == b'upload':
491 if action == b'upload':
478 # If uploading blobs, read data from local blobstore.
492 # If uploading blobs, read data from local blobstore.
479 if not localstore.verify(oid):
493 if not localstore.verify(oid):
480 raise error.Abort(
494 raise error.Abort(
481 _(b'detected corrupt lfs object: %s') % oid,
495 _(b'detected corrupt lfs object: %s') % oid,
482 hint=_(b'run hg verify'),
496 hint=_(b'run hg verify'),
483 )
497 )
484 request.data = filewithprogress(localstore.open(oid), None)
498 request.data = filewithprogress(localstore.open(oid), None)
485 request.get_method = lambda: r'PUT'
499 request.get_method = lambda: r'PUT'
486 request.add_header('Content-Type', 'application/octet-stream')
500 request.add_header('Content-Type', 'application/octet-stream')
487 request.add_header('Content-Length', len(request.data))
501 request.add_header('Content-Length', len(request.data))
488
502
489 for k, v in headers:
503 for k, v in headers:
490 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
504 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
491
505
492 response = b''
506 response = b''
493 try:
507 try:
494 with contextlib.closing(self.urlopener.open(request)) as res:
508 with contextlib.closing(self.urlopener.open(request)) as res:
509 contentlength = res.info().get(b"content-length")
495 ui = self.ui # Shorten debug lines
510 ui = self.ui # Shorten debug lines
496 if self.ui.debugflag:
511 if self.ui.debugflag:
497 ui.debug(b'Status: %d\n' % res.status)
512 ui.debug(b'Status: %d\n' % res.status)
498 # lfs-test-server and hg serve return headers in different
513 # lfs-test-server and hg serve return headers in different
499 # order
514 # order
500 headers = pycompat.bytestr(res.info()).strip()
515 headers = pycompat.bytestr(res.info()).strip()
501 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
516 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
502
517
503 if action == b'download':
518 if action == b'download':
504 # If downloading blobs, store downloaded data to local
519 # If downloading blobs, store downloaded data to local
505 # blobstore
520 # blobstore
506 localstore.download(oid, res)
521 localstore.download(oid, res, contentlength)
507 else:
522 else:
508 while True:
523 while True:
509 data = res.read(1048576)
524 data = res.read(1048576)
510 if not data:
525 if not data:
511 break
526 break
512 response += data
527 response += data
513 if response:
528 if response:
514 ui.debug(b'lfs %s response: %s' % (action, response))
529 ui.debug(b'lfs %s response: %s' % (action, response))
515 except util.urlerr.httperror as ex:
530 except util.urlerr.httperror as ex:
516 if self.ui.debugflag:
531 if self.ui.debugflag:
517 self.ui.debug(
532 self.ui.debug(
518 b'%s: %s\n' % (oid, ex.read())
533 b'%s: %s\n' % (oid, ex.read())
519 ) # XXX: also bytes?
534 ) # XXX: also bytes?
520 raise LfsRemoteError(
535 raise LfsRemoteError(
521 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
536 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
522 % (stringutil.forcebytestr(ex), oid, action)
537 % (stringutil.forcebytestr(ex), oid, action)
523 )
538 )
524 except util.urlerr.urlerror as ex:
539 except util.urlerr.urlerror as ex:
525 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
540 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
526 util.urllibcompat.getfullurl(request)
541 util.urllibcompat.getfullurl(request)
527 )
542 )
528 raise LfsRemoteError(
543 raise LfsRemoteError(
529 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
544 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
530 )
545 )
531
546
532 def _batch(self, pointers, localstore, action):
547 def _batch(self, pointers, localstore, action):
533 if action not in [b'upload', b'download']:
548 if action not in [b'upload', b'download']:
534 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
535
550
536 response = self._batchrequest(pointers, action)
551 response = self._batchrequest(pointers, action)
537 objects = self._extractobjects(response, pointers, action)
552 objects = self._extractobjects(response, pointers, action)
538 total = sum(x.get(b'size', 0) for x in objects)
553 total = sum(x.get(b'size', 0) for x in objects)
539 sizes = {}
554 sizes = {}
540 for obj in objects:
555 for obj in objects:
541 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
542 topic = {
557 topic = {
543 b'upload': _(b'lfs uploading'),
558 b'upload': _(b'lfs uploading'),
544 b'download': _(b'lfs downloading'),
559 b'download': _(b'lfs downloading'),
545 }[action]
560 }[action]
546 if len(objects) > 1:
561 if len(objects) > 1:
547 self.ui.note(
562 self.ui.note(
548 _(b'lfs: need to transfer %d objects (%s)\n')
563 _(b'lfs: need to transfer %d objects (%s)\n')
549 % (len(objects), util.bytecount(total))
564 % (len(objects), util.bytecount(total))
550 )
565 )
551
566
552 def transfer(chunk):
567 def transfer(chunk):
553 for obj in chunk:
568 for obj in chunk:
554 objsize = obj.get(b'size', 0)
569 objsize = obj.get(b'size', 0)
555 if self.ui.verbose:
570 if self.ui.verbose:
556 if action == b'download':
571 if action == b'download':
557 msg = _(b'lfs: downloading %s (%s)\n')
572 msg = _(b'lfs: downloading %s (%s)\n')
558 elif action == b'upload':
573 elif action == b'upload':
559 msg = _(b'lfs: uploading %s (%s)\n')
574 msg = _(b'lfs: uploading %s (%s)\n')
560 self.ui.note(
575 self.ui.note(
561 msg % (obj.get(b'oid'), util.bytecount(objsize))
576 msg % (obj.get(b'oid'), util.bytecount(objsize))
562 )
577 )
563 retry = self.retry
578 retry = self.retry
564 while True:
579 while True:
565 try:
580 try:
566 self._basictransfer(obj, action, localstore)
581 self._basictransfer(obj, action, localstore)
567 yield 1, obj.get(b'oid')
582 yield 1, obj.get(b'oid')
568 break
583 break
569 except socket.error as ex:
584 except socket.error as ex:
570 if retry > 0:
585 if retry > 0:
571 self.ui.note(
586 self.ui.note(
572 _(b'lfs: failed: %r (remaining retry %d)\n')
587 _(b'lfs: failed: %r (remaining retry %d)\n')
573 % (stringutil.forcebytestr(ex), retry)
588 % (stringutil.forcebytestr(ex), retry)
574 )
589 )
575 retry -= 1
590 retry -= 1
576 continue
591 continue
577 raise
592 raise
578
593
579 # Until https multiplexing gets sorted out
594 # Until https multiplexing gets sorted out
580 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
581 oids = worker.worker(
596 oids = worker.worker(
582 self.ui,
597 self.ui,
583 0.1,
598 0.1,
584 transfer,
599 transfer,
585 (),
600 (),
586 sorted(objects, key=lambda o: o.get(b'oid')),
601 sorted(objects, key=lambda o: o.get(b'oid')),
587 )
602 )
588 else:
603 else:
589 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
590
605
591 with self.ui.makeprogress(
606 with self.ui.makeprogress(
592 topic, unit=_(b"bytes"), total=total
607 topic, unit=_(b"bytes"), total=total
593 ) as progress:
608 ) as progress:
594 progress.update(0)
609 progress.update(0)
595 processed = 0
610 processed = 0
596 blobs = 0
611 blobs = 0
597 for _one, oid in oids:
612 for _one, oid in oids:
598 processed += sizes[oid]
613 processed += sizes[oid]
599 blobs += 1
614 blobs += 1
600 progress.update(processed)
615 progress.update(processed)
601 self.ui.note(_(b'lfs: processed: %s\n') % oid)
616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
602
617
603 if blobs > 0:
618 if blobs > 0:
604 if action == b'upload':
619 if action == b'upload':
605 self.ui.status(
620 self.ui.status(
606 _(b'lfs: uploaded %d files (%s)\n')
621 _(b'lfs: uploaded %d files (%s)\n')
607 % (blobs, util.bytecount(processed))
622 % (blobs, util.bytecount(processed))
608 )
623 )
609 elif action == b'download':
624 elif action == b'download':
610 self.ui.status(
625 self.ui.status(
611 _(b'lfs: downloaded %d files (%s)\n')
626 _(b'lfs: downloaded %d files (%s)\n')
612 % (blobs, util.bytecount(processed))
627 % (blobs, util.bytecount(processed))
613 )
628 )
614
629
615 def __del__(self):
630 def __del__(self):
616 # copied from mercurial/httppeer.py
631 # copied from mercurial/httppeer.py
617 urlopener = getattr(self, 'urlopener', None)
632 urlopener = getattr(self, 'urlopener', None)
618 if urlopener:
633 if urlopener:
619 for h in urlopener.handlers:
634 for h in urlopener.handlers:
620 h.close()
635 h.close()
621 getattr(h, "close_all", lambda: None)()
636 getattr(h, "close_all", lambda: None)()
622
637
623
638
624 class _dummyremote(object):
639 class _dummyremote(object):
625 """Dummy store storing blobs to temp directory."""
640 """Dummy store storing blobs to temp directory."""
626
641
627 def __init__(self, repo, url):
642 def __init__(self, repo, url):
628 fullpath = repo.vfs.join(b'lfs', url.path)
643 fullpath = repo.vfs.join(b'lfs', url.path)
629 self.vfs = lfsvfs(fullpath)
644 self.vfs = lfsvfs(fullpath)
630
645
631 def writebatch(self, pointers, fromstore):
646 def writebatch(self, pointers, fromstore):
632 for p in _deduplicate(pointers):
647 for p in _deduplicate(pointers):
633 content = fromstore.read(p.oid(), verify=True)
648 content = fromstore.read(p.oid(), verify=True)
634 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
635 fp.write(content)
650 fp.write(content)
636
651
637 def readbatch(self, pointers, tostore):
652 def readbatch(self, pointers, tostore):
638 for p in _deduplicate(pointers):
653 for p in _deduplicate(pointers):
639 with self.vfs(p.oid(), b'rb') as fp:
654 with self.vfs(p.oid(), b'rb') as fp:
640 tostore.download(p.oid(), fp)
655 tostore.download(p.oid(), fp, None)
641
656
642
657
643 class _nullremote(object):
658 class _nullremote(object):
644 """Null store storing blobs to /dev/null."""
659 """Null store storing blobs to /dev/null."""
645
660
646 def __init__(self, repo, url):
661 def __init__(self, repo, url):
647 pass
662 pass
648
663
649 def writebatch(self, pointers, fromstore):
664 def writebatch(self, pointers, fromstore):
650 pass
665 pass
651
666
652 def readbatch(self, pointers, tostore):
667 def readbatch(self, pointers, tostore):
653 pass
668 pass
654
669
655
670
656 class _promptremote(object):
671 class _promptremote(object):
657 """Prompt user to set lfs.url when accessed."""
672 """Prompt user to set lfs.url when accessed."""
658
673
659 def __init__(self, repo, url):
674 def __init__(self, repo, url):
660 pass
675 pass
661
676
662 def writebatch(self, pointers, fromstore, ui=None):
677 def writebatch(self, pointers, fromstore, ui=None):
663 self._prompt()
678 self._prompt()
664
679
665 def readbatch(self, pointers, tostore, ui=None):
680 def readbatch(self, pointers, tostore, ui=None):
666 self._prompt()
681 self._prompt()
667
682
668 def _prompt(self):
683 def _prompt(self):
669 raise error.Abort(_(b'lfs.url needs to be configured'))
684 raise error.Abort(_(b'lfs.url needs to be configured'))
670
685
671
686
672 _storemap = {
687 _storemap = {
673 b'https': _gitlfsremote,
688 b'https': _gitlfsremote,
674 b'http': _gitlfsremote,
689 b'http': _gitlfsremote,
675 b'file': _dummyremote,
690 b'file': _dummyremote,
676 b'null': _nullremote,
691 b'null': _nullremote,
677 None: _promptremote,
692 None: _promptremote,
678 }
693 }
679
694
680
695
681 def _deduplicate(pointers):
696 def _deduplicate(pointers):
682 """Remove any duplicate oids that exist in the list"""
697 """Remove any duplicate oids that exist in the list"""
683 reduced = util.sortdict()
698 reduced = util.sortdict()
684 for p in pointers:
699 for p in pointers:
685 reduced[p.oid()] = p
700 reduced[p.oid()] = p
686 return reduced.values()
701 return reduced.values()
687
702
688
703
689 def _verify(oid, content):
704 def _verify(oid, content):
690 realoid = node.hex(hashlib.sha256(content).digest())
705 realoid = node.hex(hashlib.sha256(content).digest())
691 if realoid != oid:
706 if realoid != oid:
692 raise LfsCorruptionError(
707 raise LfsCorruptionError(
693 _(b'detected corrupt lfs object: %s') % oid,
708 _(b'detected corrupt lfs object: %s') % oid,
694 hint=_(b'run hg verify'),
709 hint=_(b'run hg verify'),
695 )
710 )
696
711
697
712
698 def remote(repo, remote=None):
713 def remote(repo, remote=None):
699 """remotestore factory. return a store in _storemap depending on config
714 """remotestore factory. return a store in _storemap depending on config
700
715
701 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
702 infer the endpoint, based on the remote repository using the same path
717 infer the endpoint, based on the remote repository using the same path
703 adjustments as git. As an extension, 'http' is supported as well so that
718 adjustments as git. As an extension, 'http' is supported as well so that
704 ``hg serve`` works out of the box.
719 ``hg serve`` works out of the box.
705
720
706 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
707 """
722 """
708 lfsurl = repo.ui.config(b'lfs', b'url')
723 lfsurl = repo.ui.config(b'lfs', b'url')
709 url = util.url(lfsurl or b'')
724 url = util.url(lfsurl or b'')
710 if lfsurl is None:
725 if lfsurl is None:
711 if remote:
726 if remote:
712 path = remote
727 path = remote
713 elif util.safehasattr(repo, b'_subtoppath'):
728 elif util.safehasattr(repo, b'_subtoppath'):
714 # The pull command sets this during the optional update phase, which
729 # The pull command sets this during the optional update phase, which
715 # tells exactly where the pull originated, whether 'paths.default'
730 # tells exactly where the pull originated, whether 'paths.default'
716 # or explicit.
731 # or explicit.
717 path = repo._subtoppath
732 path = repo._subtoppath
718 else:
733 else:
719 # TODO: investigate 'paths.remote:lfsurl' style path customization,
734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
720 # and fall back to inferring from 'paths.remote' if unspecified.
735 # and fall back to inferring from 'paths.remote' if unspecified.
721 path = repo.ui.config(b'paths', b'default') or b''
736 path = repo.ui.config(b'paths', b'default') or b''
722
737
723 defaulturl = util.url(path)
738 defaulturl = util.url(path)
724
739
725 # TODO: support local paths as well.
740 # TODO: support local paths as well.
726 # TODO: consider the ssh -> https transformation that git applies
741 # TODO: consider the ssh -> https transformation that git applies
727 if defaulturl.scheme in (b'http', b'https'):
742 if defaulturl.scheme in (b'http', b'https'):
728 if defaulturl.path and defaulturl.path[:-1] != b'/':
743 if defaulturl.path and defaulturl.path[:-1] != b'/':
729 defaulturl.path += b'/'
744 defaulturl.path += b'/'
730 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
731
746
732 url = util.url(bytes(defaulturl))
747 url = util.url(bytes(defaulturl))
733 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
734
749
735 scheme = url.scheme
750 scheme = url.scheme
736 if scheme not in _storemap:
751 if scheme not in _storemap:
737 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
738 return _storemap[scheme](repo, url)
753 return _storemap[scheme](repo, url)
739
754
740
755
741 class LfsRemoteError(error.StorageError):
756 class LfsRemoteError(error.StorageError):
742 pass
757 pass
743
758
744
759
745 class LfsCorruptionError(error.Abort):
760 class LfsCorruptionError(error.Abort):
746 """Raised when a corrupt blob is detected, aborting an operation
761 """Raised when a corrupt blob is detected, aborting an operation
747
762
748 It exists to allow specialized handling on the server side."""
763 It exists to allow specialized handling on the server side."""
@@ -1,370 +1,370 b''
1 # wireprotolfsserver.py - lfs protocol server side implementation
1 # wireprotolfsserver.py - lfs protocol server side implementation
2 #
2 #
3 # Copyright 2018 Matt Harbison <matt_harbison@yahoo.com>
3 # Copyright 2018 Matt Harbison <matt_harbison@yahoo.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import datetime
10 import datetime
11 import errno
11 import errno
12 import json
12 import json
13 import traceback
13 import traceback
14
14
15 from mercurial.hgweb import common as hgwebcommon
15 from mercurial.hgweb import common as hgwebcommon
16
16
17 from mercurial import (
17 from mercurial import (
18 exthelper,
18 exthelper,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 wireprotoserver,
21 wireprotoserver,
22 )
22 )
23
23
24 from . import blobstore
24 from . import blobstore
25
25
26 HTTP_OK = hgwebcommon.HTTP_OK
26 HTTP_OK = hgwebcommon.HTTP_OK
27 HTTP_CREATED = hgwebcommon.HTTP_CREATED
27 HTTP_CREATED = hgwebcommon.HTTP_CREATED
28 HTTP_BAD_REQUEST = hgwebcommon.HTTP_BAD_REQUEST
28 HTTP_BAD_REQUEST = hgwebcommon.HTTP_BAD_REQUEST
29 HTTP_NOT_FOUND = hgwebcommon.HTTP_NOT_FOUND
29 HTTP_NOT_FOUND = hgwebcommon.HTTP_NOT_FOUND
30 HTTP_METHOD_NOT_ALLOWED = hgwebcommon.HTTP_METHOD_NOT_ALLOWED
30 HTTP_METHOD_NOT_ALLOWED = hgwebcommon.HTTP_METHOD_NOT_ALLOWED
31 HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE
31 HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE
32 HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE
32 HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE
33
33
34 eh = exthelper.exthelper()
34 eh = exthelper.exthelper()
35
35
36
36
37 @eh.wrapfunction(wireprotoserver, b'handlewsgirequest')
37 @eh.wrapfunction(wireprotoserver, b'handlewsgirequest')
38 def handlewsgirequest(orig, rctx, req, res, checkperm):
38 def handlewsgirequest(orig, rctx, req, res, checkperm):
39 """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
39 """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
40 request if it is left unprocessed by the wrapped method.
40 request if it is left unprocessed by the wrapped method.
41 """
41 """
42 if orig(rctx, req, res, checkperm):
42 if orig(rctx, req, res, checkperm):
43 return True
43 return True
44
44
45 if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'):
45 if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'):
46 return False
46 return False
47
47
48 if not util.safehasattr(rctx.repo.svfs, 'lfslocalblobstore'):
48 if not util.safehasattr(rctx.repo.svfs, 'lfslocalblobstore'):
49 return False
49 return False
50
50
51 if not req.dispatchpath:
51 if not req.dispatchpath:
52 return False
52 return False
53
53
54 try:
54 try:
55 if req.dispatchpath == b'.git/info/lfs/objects/batch':
55 if req.dispatchpath == b'.git/info/lfs/objects/batch':
56 checkperm(rctx, req, b'pull')
56 checkperm(rctx, req, b'pull')
57 return _processbatchrequest(rctx.repo, req, res)
57 return _processbatchrequest(rctx.repo, req, res)
58 # TODO: reserve and use a path in the proposed http wireprotocol /api/
58 # TODO: reserve and use a path in the proposed http wireprotocol /api/
59 # namespace?
59 # namespace?
60 elif req.dispatchpath.startswith(b'.hg/lfs/objects'):
60 elif req.dispatchpath.startswith(b'.hg/lfs/objects'):
61 return _processbasictransfer(
61 return _processbasictransfer(
62 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
62 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
63 )
63 )
64 return False
64 return False
65 except hgwebcommon.ErrorResponse as e:
65 except hgwebcommon.ErrorResponse as e:
66 # XXX: copied from the handler surrounding wireprotoserver._callhttp()
66 # XXX: copied from the handler surrounding wireprotoserver._callhttp()
67 # in the wrapped function. Should this be moved back to hgweb to
67 # in the wrapped function. Should this be moved back to hgweb to
68 # be a common handler?
68 # be a common handler?
69 for k, v in e.headers:
69 for k, v in e.headers:
70 res.headers[k] = v
70 res.headers[k] = v
71 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
71 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
72 res.setbodybytes(b'0\n%s\n' % pycompat.bytestr(e))
72 res.setbodybytes(b'0\n%s\n' % pycompat.bytestr(e))
73 return True
73 return True
74
74
75
75
76 def _sethttperror(res, code, message=None):
76 def _sethttperror(res, code, message=None):
77 res.status = hgwebcommon.statusmessage(code, message=message)
77 res.status = hgwebcommon.statusmessage(code, message=message)
78 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
78 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
79 res.setbodybytes(b'')
79 res.setbodybytes(b'')
80
80
81
81
82 def _logexception(req):
82 def _logexception(req):
83 """Write information about the current exception to wsgi.errors."""
83 """Write information about the current exception to wsgi.errors."""
84 tb = pycompat.sysbytes(traceback.format_exc())
84 tb = pycompat.sysbytes(traceback.format_exc())
85 errorlog = req.rawenv[b'wsgi.errors']
85 errorlog = req.rawenv[b'wsgi.errors']
86
86
87 uri = b''
87 uri = b''
88 if req.apppath:
88 if req.apppath:
89 uri += req.apppath
89 uri += req.apppath
90 uri += b'/' + req.dispatchpath
90 uri += b'/' + req.dispatchpath
91
91
92 errorlog.write(
92 errorlog.write(
93 b"Exception happened while processing request '%s':\n%s" % (uri, tb)
93 b"Exception happened while processing request '%s':\n%s" % (uri, tb)
94 )
94 )
95
95
96
96
97 def _processbatchrequest(repo, req, res):
97 def _processbatchrequest(repo, req, res):
98 """Handle a request for the Batch API, which is the gateway to granting file
98 """Handle a request for the Batch API, which is the gateway to granting file
99 access.
99 access.
100
100
101 https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
101 https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
102 """
102 """
103
103
104 # Mercurial client request:
104 # Mercurial client request:
105 #
105 #
106 # HOST: localhost:$HGPORT
106 # HOST: localhost:$HGPORT
107 # ACCEPT: application/vnd.git-lfs+json
107 # ACCEPT: application/vnd.git-lfs+json
108 # ACCEPT-ENCODING: identity
108 # ACCEPT-ENCODING: identity
109 # USER-AGENT: git-lfs/2.3.4 (Mercurial 4.5.2+1114-f48b9754f04c+20180316)
109 # USER-AGENT: git-lfs/2.3.4 (Mercurial 4.5.2+1114-f48b9754f04c+20180316)
110 # Content-Length: 125
110 # Content-Length: 125
111 # Content-Type: application/vnd.git-lfs+json
111 # Content-Type: application/vnd.git-lfs+json
112 #
112 #
113 # {
113 # {
114 # "objects": [
114 # "objects": [
115 # {
115 # {
116 # "oid": "31cf...8e5b"
116 # "oid": "31cf...8e5b"
117 # "size": 12
117 # "size": 12
118 # }
118 # }
119 # ]
119 # ]
120 # "operation": "upload"
120 # "operation": "upload"
121 # }
121 # }
122
122
123 if req.method != b'POST':
123 if req.method != b'POST':
124 _sethttperror(res, HTTP_METHOD_NOT_ALLOWED)
124 _sethttperror(res, HTTP_METHOD_NOT_ALLOWED)
125 return True
125 return True
126
126
127 if req.headers[b'Content-Type'] != b'application/vnd.git-lfs+json':
127 if req.headers[b'Content-Type'] != b'application/vnd.git-lfs+json':
128 _sethttperror(res, HTTP_UNSUPPORTED_MEDIA_TYPE)
128 _sethttperror(res, HTTP_UNSUPPORTED_MEDIA_TYPE)
129 return True
129 return True
130
130
131 if req.headers[b'Accept'] != b'application/vnd.git-lfs+json':
131 if req.headers[b'Accept'] != b'application/vnd.git-lfs+json':
132 _sethttperror(res, HTTP_NOT_ACCEPTABLE)
132 _sethttperror(res, HTTP_NOT_ACCEPTABLE)
133 return True
133 return True
134
134
135 # XXX: specify an encoding?
135 # XXX: specify an encoding?
136 lfsreq = pycompat.json_loads(req.bodyfh.read())
136 lfsreq = pycompat.json_loads(req.bodyfh.read())
137
137
138 # If no transfer handlers are explicitly requested, 'basic' is assumed.
138 # If no transfer handlers are explicitly requested, 'basic' is assumed.
139 if 'basic' not in lfsreq.get('transfers', ['basic']):
139 if 'basic' not in lfsreq.get('transfers', ['basic']):
140 _sethttperror(
140 _sethttperror(
141 res,
141 res,
142 HTTP_BAD_REQUEST,
142 HTTP_BAD_REQUEST,
143 b'Only the basic LFS transfer handler is supported',
143 b'Only the basic LFS transfer handler is supported',
144 )
144 )
145 return True
145 return True
146
146
147 operation = lfsreq.get('operation')
147 operation = lfsreq.get('operation')
148 operation = pycompat.bytestr(operation)
148 operation = pycompat.bytestr(operation)
149
149
150 if operation not in (b'upload', b'download'):
150 if operation not in (b'upload', b'download'):
151 _sethttperror(
151 _sethttperror(
152 res,
152 res,
153 HTTP_BAD_REQUEST,
153 HTTP_BAD_REQUEST,
154 b'Unsupported LFS transfer operation: %s' % operation,
154 b'Unsupported LFS transfer operation: %s' % operation,
155 )
155 )
156 return True
156 return True
157
157
158 localstore = repo.svfs.lfslocalblobstore
158 localstore = repo.svfs.lfslocalblobstore
159
159
160 objects = [
160 objects = [
161 p
161 p
162 for p in _batchresponseobjects(
162 for p in _batchresponseobjects(
163 req, lfsreq.get('objects', []), operation, localstore
163 req, lfsreq.get('objects', []), operation, localstore
164 )
164 )
165 ]
165 ]
166
166
167 rsp = {
167 rsp = {
168 'transfer': 'basic',
168 'transfer': 'basic',
169 'objects': objects,
169 'objects': objects,
170 }
170 }
171
171
172 res.status = hgwebcommon.statusmessage(HTTP_OK)
172 res.status = hgwebcommon.statusmessage(HTTP_OK)
173 res.headers[b'Content-Type'] = b'application/vnd.git-lfs+json'
173 res.headers[b'Content-Type'] = b'application/vnd.git-lfs+json'
174 res.setbodybytes(pycompat.bytestr(json.dumps(rsp)))
174 res.setbodybytes(pycompat.bytestr(json.dumps(rsp)))
175
175
176 return True
176 return True
177
177
178
178
179 def _batchresponseobjects(req, objects, action, store):
179 def _batchresponseobjects(req, objects, action, store):
180 """Yield one dictionary of attributes for the Batch API response for each
180 """Yield one dictionary of attributes for the Batch API response for each
181 object in the list.
181 object in the list.
182
182
183 req: The parsedrequest for the Batch API request
183 req: The parsedrequest for the Batch API request
184 objects: The list of objects in the Batch API object request list
184 objects: The list of objects in the Batch API object request list
185 action: 'upload' or 'download'
185 action: 'upload' or 'download'
186 store: The local blob store for servicing requests"""
186 store: The local blob store for servicing requests"""
187
187
188 # Successful lfs-test-server response to solict an upload:
188 # Successful lfs-test-server response to solict an upload:
189 # {
189 # {
190 # u'objects': [{
190 # u'objects': [{
191 # u'size': 12,
191 # u'size': 12,
192 # u'oid': u'31cf...8e5b',
192 # u'oid': u'31cf...8e5b',
193 # u'actions': {
193 # u'actions': {
194 # u'upload': {
194 # u'upload': {
195 # u'href': u'http://localhost:$HGPORT/objects/31cf...8e5b',
195 # u'href': u'http://localhost:$HGPORT/objects/31cf...8e5b',
196 # u'expires_at': u'0001-01-01T00:00:00Z',
196 # u'expires_at': u'0001-01-01T00:00:00Z',
197 # u'header': {
197 # u'header': {
198 # u'Accept': u'application/vnd.git-lfs'
198 # u'Accept': u'application/vnd.git-lfs'
199 # }
199 # }
200 # }
200 # }
201 # }
201 # }
202 # }]
202 # }]
203 # }
203 # }
204
204
205 # TODO: Sort out the expires_at/expires_in/authenticated keys.
205 # TODO: Sort out the expires_at/expires_in/authenticated keys.
206
206
207 for obj in objects:
207 for obj in objects:
208 # Convert unicode to ASCII to create a filesystem path
208 # Convert unicode to ASCII to create a filesystem path
209 soid = obj.get('oid')
209 soid = obj.get('oid')
210 oid = soid.encode('ascii')
210 oid = soid.encode('ascii')
211 rsp = {
211 rsp = {
212 'oid': soid,
212 'oid': soid,
213 'size': obj.get('size'), # XXX: should this check the local size?
213 'size': obj.get('size'), # XXX: should this check the local size?
214 # 'authenticated': True,
214 # 'authenticated': True,
215 }
215 }
216
216
217 exists = True
217 exists = True
218 verifies = False
218 verifies = False
219
219
220 # Verify an existing file on the upload request, so that the client is
220 # Verify an existing file on the upload request, so that the client is
221 # solicited to re-upload if it corrupt locally. Download requests are
221 # solicited to re-upload if it corrupt locally. Download requests are
222 # also verified, so the error can be flagged in the Batch API response.
222 # also verified, so the error can be flagged in the Batch API response.
223 # (Maybe we can use this to short circuit the download for `hg verify`,
223 # (Maybe we can use this to short circuit the download for `hg verify`,
224 # IFF the client can assert that the remote end is an hg server.)
224 # IFF the client can assert that the remote end is an hg server.)
225 # Otherwise, it's potentially overkill on download, since it is also
225 # Otherwise, it's potentially overkill on download, since it is also
226 # verified as the file is streamed to the caller.
226 # verified as the file is streamed to the caller.
227 try:
227 try:
228 verifies = store.verify(oid)
228 verifies = store.verify(oid)
229 if verifies and action == b'upload':
229 if verifies and action == b'upload':
230 # The client will skip this upload, but make sure it remains
230 # The client will skip this upload, but make sure it remains
231 # available locally.
231 # available locally.
232 store.linkfromusercache(oid)
232 store.linkfromusercache(oid)
233 except IOError as inst:
233 except IOError as inst:
234 if inst.errno != errno.ENOENT:
234 if inst.errno != errno.ENOENT:
235 _logexception(req)
235 _logexception(req)
236
236
237 rsp['error'] = {
237 rsp['error'] = {
238 'code': 500,
238 'code': 500,
239 'message': inst.strerror or 'Internal Server Server',
239 'message': inst.strerror or 'Internal Server Server',
240 }
240 }
241 yield rsp
241 yield rsp
242 continue
242 continue
243
243
244 exists = False
244 exists = False
245
245
246 # Items are always listed for downloads. They are dropped for uploads
246 # Items are always listed for downloads. They are dropped for uploads
247 # IFF they already exist locally.
247 # IFF they already exist locally.
248 if action == b'download':
248 if action == b'download':
249 if not exists:
249 if not exists:
250 rsp['error'] = {
250 rsp['error'] = {
251 'code': 404,
251 'code': 404,
252 'message': "The object does not exist",
252 'message': "The object does not exist",
253 }
253 }
254 yield rsp
254 yield rsp
255 continue
255 continue
256
256
257 elif not verifies:
257 elif not verifies:
258 rsp['error'] = {
258 rsp['error'] = {
259 'code': 422, # XXX: is this the right code?
259 'code': 422, # XXX: is this the right code?
260 'message': "The object is corrupt",
260 'message': "The object is corrupt",
261 }
261 }
262 yield rsp
262 yield rsp
263 continue
263 continue
264
264
265 elif verifies:
265 elif verifies:
266 yield rsp # Skip 'actions': already uploaded
266 yield rsp # Skip 'actions': already uploaded
267 continue
267 continue
268
268
269 expiresat = datetime.datetime.now() + datetime.timedelta(minutes=10)
269 expiresat = datetime.datetime.now() + datetime.timedelta(minutes=10)
270
270
271 def _buildheader():
271 def _buildheader():
272 # The spec doesn't mention the Accept header here, but avoid
272 # The spec doesn't mention the Accept header here, but avoid
273 # a gratuitous deviation from lfs-test-server in the test
273 # a gratuitous deviation from lfs-test-server in the test
274 # output.
274 # output.
275 hdr = {'Accept': 'application/vnd.git-lfs'}
275 hdr = {'Accept': 'application/vnd.git-lfs'}
276
276
277 auth = req.headers.get(b'Authorization', b'')
277 auth = req.headers.get(b'Authorization', b'')
278 if auth.startswith(b'Basic '):
278 if auth.startswith(b'Basic '):
279 hdr['Authorization'] = pycompat.strurl(auth)
279 hdr['Authorization'] = pycompat.strurl(auth)
280
280
281 return hdr
281 return hdr
282
282
283 rsp['actions'] = {
283 rsp['actions'] = {
284 '%s'
284 '%s'
285 % pycompat.strurl(action): {
285 % pycompat.strurl(action): {
286 'href': pycompat.strurl(
286 'href': pycompat.strurl(
287 b'%s%s/.hg/lfs/objects/%s' % (req.baseurl, req.apppath, oid)
287 b'%s%s/.hg/lfs/objects/%s' % (req.baseurl, req.apppath, oid)
288 ),
288 ),
289 # datetime.isoformat() doesn't include the 'Z' suffix
289 # datetime.isoformat() doesn't include the 'Z' suffix
290 "expires_at": expiresat.strftime('%Y-%m-%dT%H:%M:%SZ'),
290 "expires_at": expiresat.strftime('%Y-%m-%dT%H:%M:%SZ'),
291 'header': _buildheader(),
291 'header': _buildheader(),
292 }
292 }
293 }
293 }
294
294
295 yield rsp
295 yield rsp
296
296
297
297
298 def _processbasictransfer(repo, req, res, checkperm):
298 def _processbasictransfer(repo, req, res, checkperm):
299 """Handle a single file upload (PUT) or download (GET) action for the Basic
299 """Handle a single file upload (PUT) or download (GET) action for the Basic
300 Transfer Adapter.
300 Transfer Adapter.
301
301
302 After determining if the request is for an upload or download, the access
302 After determining if the request is for an upload or download, the access
303 must be checked by calling ``checkperm()`` with either 'pull' or 'upload'
303 must be checked by calling ``checkperm()`` with either 'pull' or 'upload'
304 before accessing the files.
304 before accessing the files.
305
305
306 https://github.com/git-lfs/git-lfs/blob/master/docs/api/basic-transfers.md
306 https://github.com/git-lfs/git-lfs/blob/master/docs/api/basic-transfers.md
307 """
307 """
308
308
309 method = req.method
309 method = req.method
310 oid = req.dispatchparts[-1]
310 oid = req.dispatchparts[-1]
311 localstore = repo.svfs.lfslocalblobstore
311 localstore = repo.svfs.lfslocalblobstore
312
312
313 if len(req.dispatchparts) != 4:
313 if len(req.dispatchparts) != 4:
314 _sethttperror(res, HTTP_NOT_FOUND)
314 _sethttperror(res, HTTP_NOT_FOUND)
315 return True
315 return True
316
316
317 if method == b'PUT':
317 if method == b'PUT':
318 checkperm(b'upload')
318 checkperm(b'upload')
319
319
320 # TODO: verify Content-Type?
320 # TODO: verify Content-Type?
321
321
322 existed = localstore.has(oid)
322 existed = localstore.has(oid)
323
323
324 # TODO: how to handle timeouts? The body proxy handles limiting to
324 # TODO: how to handle timeouts? The body proxy handles limiting to
325 # Content-Length, but what happens if a client sends less than it
325 # Content-Length, but what happens if a client sends less than it
326 # says it will?
326 # says it will?
327
327
328 statusmessage = hgwebcommon.statusmessage
328 statusmessage = hgwebcommon.statusmessage
329 try:
329 try:
330 localstore.download(oid, req.bodyfh)
330 localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
331 res.status = statusmessage(HTTP_OK if existed else HTTP_CREATED)
331 res.status = statusmessage(HTTP_OK if existed else HTTP_CREATED)
332 except blobstore.LfsCorruptionError:
332 except blobstore.LfsCorruptionError:
333 _logexception(req)
333 _logexception(req)
334
334
335 # XXX: Is this the right code?
335 # XXX: Is this the right code?
336 res.status = statusmessage(422, b'corrupt blob')
336 res.status = statusmessage(422, b'corrupt blob')
337
337
338 # There's no payload here, but this is the header that lfs-test-server
338 # There's no payload here, but this is the header that lfs-test-server
339 # sends back. This eliminates some gratuitous test output conditionals.
339 # sends back. This eliminates some gratuitous test output conditionals.
340 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
340 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
341 res.setbodybytes(b'')
341 res.setbodybytes(b'')
342
342
343 return True
343 return True
344 elif method == b'GET':
344 elif method == b'GET':
345 checkperm(b'pull')
345 checkperm(b'pull')
346
346
347 res.status = hgwebcommon.statusmessage(HTTP_OK)
347 res.status = hgwebcommon.statusmessage(HTTP_OK)
348 res.headers[b'Content-Type'] = b'application/octet-stream'
348 res.headers[b'Content-Type'] = b'application/octet-stream'
349
349
350 try:
350 try:
351 # TODO: figure out how to send back the file in chunks, instead of
351 # TODO: figure out how to send back the file in chunks, instead of
352 # reading the whole thing. (Also figure out how to send back
352 # reading the whole thing. (Also figure out how to send back
353 # an error status if an IOError occurs after a partial write
353 # an error status if an IOError occurs after a partial write
354 # in that case. Here, everything is read before starting.)
354 # in that case. Here, everything is read before starting.)
355 res.setbodybytes(localstore.read(oid))
355 res.setbodybytes(localstore.read(oid))
356 except blobstore.LfsCorruptionError:
356 except blobstore.LfsCorruptionError:
357 _logexception(req)
357 _logexception(req)
358
358
359 # XXX: Is this the right code?
359 # XXX: Is this the right code?
360 res.status = hgwebcommon.statusmessage(422, b'corrupt blob')
360 res.status = hgwebcommon.statusmessage(422, b'corrupt blob')
361 res.setbodybytes(b'')
361 res.setbodybytes(b'')
362
362
363 return True
363 return True
364 else:
364 else:
365 _sethttperror(
365 _sethttperror(
366 res,
366 res,
367 HTTP_METHOD_NOT_ALLOWED,
367 HTTP_METHOD_NOT_ALLOWED,
368 message=b'Unsupported LFS transfer method: %s' % method,
368 message=b'Unsupported LFS transfer method: %s' % method,
369 )
369 )
370 return True
370 return True
@@ -1,508 +1,508 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > lfs=
5 > lfs=
6 > [lfs]
6 > [lfs]
7 > track=all()
7 > track=all()
8 > [web]
8 > [web]
9 > push_ssl = False
9 > push_ssl = False
10 > allow-push = *
10 > allow-push = *
11 > EOF
11 > EOF
12
12
13 Serving LFS files can experimentally be turned off. The long term solution is
13 Serving LFS files can experimentally be turned off. The long term solution is
14 to support the 'verify' action in both client and server, so that the server can
14 to support the 'verify' action in both client and server, so that the server can
15 tell the client to store files elsewhere.
15 tell the client to store files elsewhere.
16
16
17 $ hg init server
17 $ hg init server
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 > --config experimental.lfs.serve=False -R server serve -d \
19 > --config experimental.lfs.serve=False -R server serve -d \
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 $ cat hg.pid >> $DAEMON_PIDS
21 $ cat hg.pid >> $DAEMON_PIDS
22
22
23 Uploads fail...
23 Uploads fail...
24
24
25 $ hg init client
25 $ hg init client
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 $ hg -R client ci -Am 'initial commit'
27 $ hg -R client ci -Am 'initial commit'
28 adding lfs.bin
28 adding lfs.bin
29 $ hg -R client push http://localhost:$HGPORT
29 $ hg -R client push http://localhost:$HGPORT
30 pushing to http://localhost:$HGPORT/
30 pushing to http://localhost:$HGPORT/
31 searching for changes
31 searching for changes
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
33 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
33 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
34 [255]
34 [255]
35
35
36 ... so do a local push to make the data available. Remove the blob from the
36 ... so do a local push to make the data available. Remove the blob from the
37 default cache, so it attempts to download.
37 default cache, so it attempts to download.
38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
39 > --config "lfs.url=null://" \
39 > --config "lfs.url=null://" \
40 > -R client push -q server
40 > -R client push -q server
41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
42
42
43 Downloads fail...
43 Downloads fail...
44
44
45 $ hg clone http://localhost:$HGPORT httpclone
45 $ hg clone http://localhost:$HGPORT httpclone
46 (remote is using large file support (lfs); lfs will be enabled for this repository)
46 (remote is using large file support (lfs); lfs will be enabled for this repository)
47 requesting all changes
47 requesting all changes
48 adding changesets
48 adding changesets
49 adding manifests
49 adding manifests
50 adding file changes
50 adding file changes
51 added 1 changesets with 1 changes to 1 files
51 added 1 changesets with 1 changes to 1 files
52 new changesets 525251863cad
52 new changesets 525251863cad
53 updating to branch default
53 updating to branch default
54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
55 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
55 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
56 [255]
56 [255]
57
57
58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
59
59
60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
70
70
71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
75 $ cat hg.pid >> $DAEMON_PIDS
75 $ cat hg.pid >> $DAEMON_PIDS
76
76
77 Reasonable hint for a misconfigured blob server
77 Reasonable hint for a misconfigured blob server
78
78
79 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
79 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
80 abort: LFS HTTP error: HTTP Error 404: Not Found!
80 abort: LFS HTTP error: HTTP Error 404: Not Found!
81 (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
81 (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
82 [255]
82 [255]
83
83
84 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT2/missing
84 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT2/missing
85 abort: LFS error: *onnection *refused*! (glob) (?)
85 abort: LFS error: *onnection *refused*! (glob) (?)
86 abort: LFS error: $EADDRNOTAVAIL$! (glob) (?)
86 abort: LFS error: $EADDRNOTAVAIL$! (glob) (?)
87 abort: LFS error: No route to host! (?)
87 abort: LFS error: No route to host! (?)
88 (the "lfs.url" config may be used to override http://localhost:$HGPORT2/missing)
88 (the "lfs.url" config may be used to override http://localhost:$HGPORT2/missing)
89 [255]
89 [255]
90
90
91 Blob URIs are correct when --prefix is used
91 Blob URIs are correct when --prefix is used
92
92
93 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
93 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
94 using http://localhost:$HGPORT/subdir/mount/point
94 using http://localhost:$HGPORT/subdir/mount/point
95 sending capabilities command
95 sending capabilities command
96 (remote is using large file support (lfs); lfs will be enabled for this repository)
96 (remote is using large file support (lfs); lfs will be enabled for this repository)
97 query 1; heads
97 query 1; heads
98 sending batch command
98 sending batch command
99 requesting all changes
99 requesting all changes
100 sending getbundle command
100 sending getbundle command
101 bundle2-input-bundle: with-transaction
101 bundle2-input-bundle: with-transaction
102 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
102 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
103 adding changesets
103 adding changesets
104 add changeset 525251863cad
104 add changeset 525251863cad
105 adding manifests
105 adding manifests
106 adding file changes
106 adding file changes
107 adding lfs.bin revisions
107 adding lfs.bin revisions
108 bundle2-input-part: total payload size 648
108 bundle2-input-part: total payload size 648
109 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
109 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
110 bundle2-input-part: "phase-heads" supported
110 bundle2-input-part: "phase-heads" supported
111 bundle2-input-part: total payload size 24
111 bundle2-input-part: total payload size 24
112 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
112 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
113 bundle2-input-part: total payload size 39
113 bundle2-input-part: total payload size 39
114 bundle2-input-bundle: 4 parts total
114 bundle2-input-bundle: 4 parts total
115 checking for updated bookmarks
115 checking for updated bookmarks
116 updating the branch cache
116 updating the branch cache
117 added 1 changesets with 1 changes to 1 files
117 added 1 changesets with 1 changes to 1 files
118 new changesets 525251863cad
118 new changesets 525251863cad
119 updating to branch default
119 updating to branch default
120 resolving manifests
120 resolving manifests
121 branchmerge: False, force: False, partial: False
121 branchmerge: False, force: False, partial: False
122 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
122 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
123 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
123 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
124 Status: 200
124 Status: 200
125 Content-Length: 371
125 Content-Length: 371
126 Content-Type: application/vnd.git-lfs+json
126 Content-Type: application/vnd.git-lfs+json
127 Date: $HTTP_DATE$
127 Date: $HTTP_DATE$
128 Server: testing stub value
128 Server: testing stub value
129 {
129 {
130 "objects": [
130 "objects": [
131 {
131 {
132 "actions": {
132 "actions": {
133 "download": {
133 "download": {
134 "expires_at": "$ISO_8601_DATE_TIME$"
134 "expires_at": "$ISO_8601_DATE_TIME$"
135 "header": {
135 "header": {
136 "Accept": "application/vnd.git-lfs"
136 "Accept": "application/vnd.git-lfs"
137 }
137 }
138 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
138 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
139 }
139 }
140 }
140 }
141 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
141 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
142 "size": 20
142 "size": 20
143 }
143 }
144 ]
144 ]
145 "transfer": "basic"
145 "transfer": "basic"
146 }
146 }
147 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
147 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
148 Status: 200
148 Status: 200
149 Content-Length: 20
149 Content-Length: 20
150 Content-Type: application/octet-stream
150 Content-Type: application/octet-stream
151 Date: $HTTP_DATE$
151 Date: $HTTP_DATE$
152 Server: testing stub value
152 Server: testing stub value
153 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
153 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
154 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
154 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
155 lfs: downloaded 1 files (20 bytes)
155 lfs: downloaded 1 files (20 bytes)
156 lfs.bin: remote created -> g
156 lfs.bin: remote created -> g
157 getting lfs.bin
157 getting lfs.bin
158 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
158 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
160 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
161
161
162 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
162 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
163
163
164 $ cat $TESTTMP/access.log $TESTTMP/errors.log
164 $ cat $TESTTMP/access.log $TESTTMP/errors.log
165 $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
165 $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
166 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
166 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
167 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
167 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
168 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
168 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
169 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
169 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
170 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
170 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
171
171
172 Blobs that already exist in the usercache are linked into the repo store, even
172 Blobs that already exist in the usercache are linked into the repo store, even
173 though the client doesn't send the blob.
173 though the client doesn't send the blob.
174
174
175 $ hg init server2
175 $ hg init server2
176 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
176 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
177 > -p $HGPORT --pid-file=hg.pid \
177 > -p $HGPORT --pid-file=hg.pid \
178 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
178 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
179 $ cat hg.pid >> $DAEMON_PIDS
179 $ cat hg.pid >> $DAEMON_PIDS
180
180
181 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
181 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
182 > push http://localhost:$HGPORT | grep '^[{} ]'
182 > push http://localhost:$HGPORT | grep '^[{} ]'
183 {
183 {
184 "objects": [
184 "objects": [
185 {
185 {
186 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
186 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
187 "size": 20
187 "size": 20
188 }
188 }
189 ]
189 ]
190 "transfer": "basic"
190 "transfer": "basic"
191 }
191 }
192 $ find server2/.hg/store/lfs/objects | sort
192 $ find server2/.hg/store/lfs/objects | sort
193 server2/.hg/store/lfs/objects
193 server2/.hg/store/lfs/objects
194 server2/.hg/store/lfs/objects/f0
194 server2/.hg/store/lfs/objects/f0
195 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
195 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
196 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
196 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
197 $ cat $TESTTMP/errors.log
197 $ cat $TESTTMP/errors.log
198
198
199 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
199 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
200 > import errno
200 > import errno
201 > from hgext.lfs import blobstore
201 > from hgext.lfs import blobstore
202 >
202 >
203 > _numverifies = 0
203 > _numverifies = 0
204 > _readerr = True
204 > _readerr = True
205 >
205 >
206 > def reposetup(ui, repo):
206 > def reposetup(ui, repo):
207 > # Nothing to do with a remote repo
207 > # Nothing to do with a remote repo
208 > if not repo.local():
208 > if not repo.local():
209 > return
209 > return
210 >
210 >
211 > store = repo.svfs.lfslocalblobstore
211 > store = repo.svfs.lfslocalblobstore
212 > class badstore(store.__class__):
212 > class badstore(store.__class__):
213 > def download(self, oid, src):
213 > def download(self, oid, src, contentlength):
214 > '''Called in the server to handle reading from the client in a
214 > '''Called in the server to handle reading from the client in a
215 > PUT request.'''
215 > PUT request.'''
216 > origread = src.read
216 > origread = src.read
217 > def _badread(nbytes):
217 > def _badread(nbytes):
218 > # Simulate bad data/checksum failure from the client
218 > # Simulate bad data/checksum failure from the client
219 > return b'0' * len(origread(nbytes))
219 > return b'0' * len(origread(nbytes))
220 > src.read = _badread
220 > src.read = _badread
221 > super(badstore, self).download(oid, src)
221 > super(badstore, self).download(oid, src, contentlength)
222 >
222 >
223 > def _read(self, vfs, oid, verify):
223 > def _read(self, vfs, oid, verify):
224 > '''Called in the server to read data for a GET request, and then
224 > '''Called in the server to read data for a GET request, and then
225 > calls self._verify() on it before returning.'''
225 > calls self._verify() on it before returning.'''
226 > global _readerr
226 > global _readerr
227 > # One time simulation of a read error
227 > # One time simulation of a read error
228 > if _readerr:
228 > if _readerr:
229 > _readerr = False
229 > _readerr = False
230 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
230 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
231 > # Simulate corrupt content on client download
231 > # Simulate corrupt content on client download
232 > blobstore._verify(oid, b'dummy content')
232 > blobstore._verify(oid, b'dummy content')
233 >
233 >
234 > def verify(self, oid):
234 > def verify(self, oid):
235 > '''Called in the server to populate the Batch API response,
235 > '''Called in the server to populate the Batch API response,
236 > letting the client re-upload if the file is corrupt.'''
236 > letting the client re-upload if the file is corrupt.'''
237 > # Fail verify in Batch API for one clone command and one push
237 > # Fail verify in Batch API for one clone command and one push
238 > # command with an IOError. Then let it through to access other
238 > # command with an IOError. Then let it through to access other
239 > # functions. Checksum failure is tested elsewhere.
239 > # functions. Checksum failure is tested elsewhere.
240 > global _numverifies
240 > global _numverifies
241 > _numverifies += 1
241 > _numverifies += 1
242 > if _numverifies <= 2:
242 > if _numverifies <= 2:
243 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
243 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
244 > return super(badstore, self).verify(oid)
244 > return super(badstore, self).verify(oid)
245 >
245 >
246 > store.__class__ = badstore
246 > store.__class__ = badstore
247 > EOF
247 > EOF
248
248
249 $ rm -rf `hg config lfs.usercache`
249 $ rm -rf `hg config lfs.usercache`
250 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
250 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
251 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
251 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
252 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
252 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
253 > -R server serve -d \
253 > -R server serve -d \
254 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
254 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
255 $ cat hg.pid >> $DAEMON_PIDS
255 $ cat hg.pid >> $DAEMON_PIDS
256
256
257 Test an I/O error in localstore.verify() (Batch API) with GET
257 Test an I/O error in localstore.verify() (Batch API) with GET
258
258
259 $ hg clone http://localhost:$HGPORT1 httpclone2
259 $ hg clone http://localhost:$HGPORT1 httpclone2
260 (remote is using large file support (lfs); lfs will be enabled for this repository)
260 (remote is using large file support (lfs); lfs will be enabled for this repository)
261 requesting all changes
261 requesting all changes
262 adding changesets
262 adding changesets
263 adding manifests
263 adding manifests
264 adding file changes
264 adding file changes
265 added 1 changesets with 1 changes to 1 files
265 added 1 changesets with 1 changes to 1 files
266 new changesets 525251863cad
266 new changesets 525251863cad
267 updating to branch default
267 updating to branch default
268 abort: LFS server error for "lfs.bin": Internal server error!
268 abort: LFS server error for "lfs.bin": Internal server error!
269 [255]
269 [255]
270
270
271 Test an I/O error in localstore.verify() (Batch API) with PUT
271 Test an I/O error in localstore.verify() (Batch API) with PUT
272
272
273 $ echo foo > client/lfs.bin
273 $ echo foo > client/lfs.bin
274 $ hg -R client ci -m 'mod lfs'
274 $ hg -R client ci -m 'mod lfs'
275 $ hg -R client push http://localhost:$HGPORT1
275 $ hg -R client push http://localhost:$HGPORT1
276 pushing to http://localhost:$HGPORT1/
276 pushing to http://localhost:$HGPORT1/
277 searching for changes
277 searching for changes
278 abort: LFS server error for "unknown": Internal server error!
278 abort: LFS server error for "unknown": Internal server error!
279 [255]
279 [255]
280 TODO: figure out how to associate the file name in the error above
280 TODO: figure out how to associate the file name in the error above
281
281
282 Test a bad checksum sent by the client in the transfer API
282 Test a bad checksum sent by the client in the transfer API
283
283
284 $ hg -R client push http://localhost:$HGPORT1
284 $ hg -R client push http://localhost:$HGPORT1
285 pushing to http://localhost:$HGPORT1/
285 pushing to http://localhost:$HGPORT1/
286 searching for changes
286 searching for changes
287 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
287 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
288 [255]
288 [255]
289
289
290 $ echo 'test lfs file' > server/lfs3.bin
290 $ echo 'test lfs file' > server/lfs3.bin
291 $ hg --config experimental.lfs.disableusercache=True \
291 $ hg --config experimental.lfs.disableusercache=True \
292 > -R server ci -Aqm 'another lfs file'
292 > -R server ci -Aqm 'another lfs file'
293 $ hg -R client pull -q http://localhost:$HGPORT1
293 $ hg -R client pull -q http://localhost:$HGPORT1
294
294
295 Test an I/O error during the processing of the GET request
295 Test an I/O error during the processing of the GET request
296
296
297 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
297 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
298 > -R client update -r tip
298 > -R client update -r tip
299 abort: LFS HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
299 abort: LFS HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
300 [255]
300 [255]
301
301
302 Test a checksum failure during the processing of the GET request
302 Test a checksum failure during the processing of the GET request
303
303
304 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
304 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
305 > -R client update -r tip
305 > -R client update -r tip
306 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
306 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
307 [255]
307 [255]
308
308
309 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
309 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
310
310
311 $ cat $TESTTMP/access.log
311 $ cat $TESTTMP/access.log
312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
320 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
320 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
321 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
321 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
322 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
322 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
323 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
323 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
324 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
324 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
325 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
325 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
326 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
326 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
327 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
327 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
328 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
328 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
329 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
329 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
330 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
330 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
331 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
331 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
332 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
332 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
333 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
333 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
334 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
334 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
335 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
335 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
336 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
336 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
337 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
337 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
338
338
339 $ grep -v ' File "' $TESTTMP/errors.log
339 $ grep -v ' File "' $TESTTMP/errors.log
340 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
340 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
341 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
341 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
342 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
342 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
343 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
343 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
344 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
344 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
345 $LOCALIP - - [$ERRDATE$] HG error: (glob)
345 $LOCALIP - - [$ERRDATE$] HG error: (glob)
346 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
346 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
347 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
347 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
348 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
348 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
349 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
349 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
350 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
350 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
351 $LOCALIP - - [$ERRDATE$] HG error: (glob)
351 $LOCALIP - - [$ERRDATE$] HG error: (glob)
352 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
352 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
353 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
353 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
354 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
354 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
355 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
355 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src, contentlength)
356 $LOCALIP - - [$ERRDATE$] HG error: _(b'corrupt remote lfs object: %s') % oid (glob)
356 $LOCALIP - - [$ERRDATE$] HG error: _(b'corrupt remote lfs object: %s') % oid (glob)
357 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (no-py3 !)
357 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (no-py3 !)
358 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
358 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
359 $LOCALIP - - [$ERRDATE$] HG error: (glob)
359 $LOCALIP - - [$ERRDATE$] HG error: (glob)
360 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
360 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
361 Traceback (most recent call last):
361 Traceback (most recent call last):
362 self.do_write()
362 self.do_write()
363 self.do_hgweb()
363 self.do_hgweb()
364 for chunk in self.server.application(env, self._start_response):
364 for chunk in self.server.application(env, self._start_response):
365 for r in self._runwsgi(req, res, repo):
365 for r in self._runwsgi(req, res, repo):
366 rctx, req, res, self.check_perm
366 rctx, req, res, self.check_perm
367 return func(*(args + a), **kw) (no-py3 !)
367 return func(*(args + a), **kw) (no-py3 !)
368 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
368 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
369 res.setbodybytes(localstore.read(oid))
369 res.setbodybytes(localstore.read(oid))
370 blob = self._read(self.vfs, oid, verify)
370 blob = self._read(self.vfs, oid, verify)
371 raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
371 raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
372 *Error: [Errno *] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
372 *Error: [Errno *] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
373
373
374 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
374 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
375 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
375 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
376 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
376 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
377 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
377 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
378 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, b'dummy content') (glob)
378 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, b'dummy content') (glob)
379 $LOCALIP - - [$ERRDATE$] HG error: hint=_(b'run hg verify'), (glob)
379 $LOCALIP - - [$ERRDATE$] HG error: hint=_(b'run hg verify'), (glob)
380 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (no-py3 !)
380 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (no-py3 !)
381 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
381 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
382 $LOCALIP - - [$ERRDATE$] HG error: (glob)
382 $LOCALIP - - [$ERRDATE$] HG error: (glob)
383
383
384 Basic Authorization headers are returned by the Batch API, and sent back with
384 Basic Authorization headers are returned by the Batch API, and sent back with
385 the GET/PUT request.
385 the GET/PUT request.
386
386
387 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
387 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
388
388
389 $ cat >> $HGRCPATH << EOF
389 $ cat >> $HGRCPATH << EOF
390 > [experimental]
390 > [experimental]
391 > lfs.disableusercache = True
391 > lfs.disableusercache = True
392 > [auth]
392 > [auth]
393 > l.schemes=http
393 > l.schemes=http
394 > l.prefix=lo
394 > l.prefix=lo
395 > l.username=user
395 > l.username=user
396 > l.password=pass
396 > l.password=pass
397 > EOF
397 > EOF
398
398
399 $ hg --config extensions.x=$TESTDIR/httpserverauth.py \
399 $ hg --config extensions.x=$TESTDIR/httpserverauth.py \
400 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
400 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
401 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
401 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
402 $ mv hg.pid $DAEMON_PIDS
402 $ mv hg.pid $DAEMON_PIDS
403
403
404 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
404 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
405 {
405 {
406 "objects": [
406 "objects": [
407 {
407 {
408 "actions": {
408 "actions": {
409 "download": {
409 "download": {
410 "expires_at": "$ISO_8601_DATE_TIME$"
410 "expires_at": "$ISO_8601_DATE_TIME$"
411 "header": {
411 "header": {
412 "Accept": "application/vnd.git-lfs"
412 "Accept": "application/vnd.git-lfs"
413 "Authorization": "Basic dXNlcjpwYXNz"
413 "Authorization": "Basic dXNlcjpwYXNz"
414 }
414 }
415 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
415 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
416 }
416 }
417 }
417 }
418 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
418 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
419 "size": 14
419 "size": 14
420 }
420 }
421 ]
421 ]
422 "transfer": "basic"
422 "transfer": "basic"
423 }
423 }
424
424
425 $ echo 'another blob' > auth_clone/lfs.blob
425 $ echo 'another blob' > auth_clone/lfs.blob
426 $ hg -R auth_clone ci -Aqm 'add blob'
426 $ hg -R auth_clone ci -Aqm 'add blob'
427
427
428 $ cat > use_digests.py << EOF
428 $ cat > use_digests.py << EOF
429 > from mercurial import (
429 > from mercurial import (
430 > exthelper,
430 > exthelper,
431 > url,
431 > url,
432 > )
432 > )
433 >
433 >
434 > eh = exthelper.exthelper()
434 > eh = exthelper.exthelper()
435 > uisetup = eh.finaluisetup
435 > uisetup = eh.finaluisetup
436 >
436 >
437 > @eh.wrapfunction(url, 'opener')
437 > @eh.wrapfunction(url, 'opener')
438 > def urlopener(orig, *args, **kwargs):
438 > def urlopener(orig, *args, **kwargs):
439 > opener = orig(*args, **kwargs)
439 > opener = orig(*args, **kwargs)
440 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
440 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
441 > return opener
441 > return opener
442 > EOF
442 > EOF
443
443
444 Test that Digest Auth fails gracefully before testing the successful Basic Auth
444 Test that Digest Auth fails gracefully before testing the successful Basic Auth
445
445
446 $ hg -R auth_clone push --config extensions.x=use_digests.py
446 $ hg -R auth_clone push --config extensions.x=use_digests.py
447 pushing to http://localhost:$HGPORT1/
447 pushing to http://localhost:$HGPORT1/
448 searching for changes
448 searching for changes
449 abort: LFS HTTP error: HTTP Error 401: the server must support Basic Authentication!
449 abort: LFS HTTP error: HTTP Error 401: the server must support Basic Authentication!
450 (api=http://localhost:$HGPORT1/.git/info/lfs/objects/batch, action=upload)
450 (api=http://localhost:$HGPORT1/.git/info/lfs/objects/batch, action=upload)
451 [255]
451 [255]
452
452
453 $ hg -R auth_clone --debug push | egrep '^[{}]| '
453 $ hg -R auth_clone --debug push | egrep '^[{}]| '
454 {
454 {
455 "objects": [
455 "objects": [
456 {
456 {
457 "actions": {
457 "actions": {
458 "upload": {
458 "upload": {
459 "expires_at": "$ISO_8601_DATE_TIME$"
459 "expires_at": "$ISO_8601_DATE_TIME$"
460 "header": {
460 "header": {
461 "Accept": "application/vnd.git-lfs"
461 "Accept": "application/vnd.git-lfs"
462 "Authorization": "Basic dXNlcjpwYXNz"
462 "Authorization": "Basic dXNlcjpwYXNz"
463 }
463 }
464 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
464 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
465 }
465 }
466 }
466 }
467 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
467 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
468 "size": 13
468 "size": 13
469 }
469 }
470 ]
470 ]
471 "transfer": "basic"
471 "transfer": "basic"
472 }
472 }
473
473
474 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
474 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
475
475
476 $ cat $TESTTMP/access.log $TESTTMP/errors.log
476 $ cat $TESTTMP/access.log $TESTTMP/errors.log
477 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
477 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
478 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
478 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
479 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
479 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
480 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
480 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
481 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
481 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
482 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
482 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
483 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
483 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
484 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
484 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
485 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest (glob)
485 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest (glob)
486 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 401 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
486 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 401 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
487 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
487 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
488 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
488 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
489 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
489 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
490 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
490 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
491 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
491 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
492 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
492 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
493 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
493 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
494 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
494 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
495 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
495 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
496 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
496 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
497 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
497 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
498 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
498 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
499 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
499 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
500 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
500 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
501 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
501 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
502 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
502 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
503 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
503 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
504 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
504 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
505 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
505 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
506 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
506 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
507 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
507 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
508 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
508 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
General Comments 0
You need to be logged in to leave comments. Login now