##// END OF EJS Templates
lfs: improve the hints for common errors in the Batch API...
Matt Harbison -
r40696:9f78d107 default
parent child Browse files
Show More
@@ -1,596 +1,601
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import json
12 import json
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 error,
20 error,
21 pathutil,
21 pathutil,
22 pycompat,
22 pycompat,
23 url as urlmod,
23 url as urlmod,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 worker,
26 worker,
27 )
27 )
28
28
29 from ..largefiles import lfutil
29 from ..largefiles import lfutil
30
30
31 # 64 bytes for SHA256
31 # 64 bytes for SHA256
32 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
32 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
33
33
34 class lfsvfs(vfsmod.vfs):
34 class lfsvfs(vfsmod.vfs):
35 def join(self, path):
35 def join(self, path):
36 """split the path at first two characters, like: XX/XXXXX..."""
36 """split the path at first two characters, like: XX/XXXXX..."""
37 if not _lfsre.match(path):
37 if not _lfsre.match(path):
38 raise error.ProgrammingError('unexpected lfs path: %s' % path)
38 raise error.ProgrammingError('unexpected lfs path: %s' % path)
39 return super(lfsvfs, self).join(path[0:2], path[2:])
39 return super(lfsvfs, self).join(path[0:2], path[2:])
40
40
41 def walk(self, path=None, onerror=None):
41 def walk(self, path=None, onerror=None):
42 """Yield (dirpath, [], oids) tuple for blobs under path
42 """Yield (dirpath, [], oids) tuple for blobs under path
43
43
44 Oids only exist in the root of this vfs, so dirpath is always ''.
44 Oids only exist in the root of this vfs, so dirpath is always ''.
45 """
45 """
46 root = os.path.normpath(self.base)
46 root = os.path.normpath(self.base)
47 # when dirpath == root, dirpath[prefixlen:] becomes empty
47 # when dirpath == root, dirpath[prefixlen:] becomes empty
48 # because len(dirpath) < prefixlen.
48 # because len(dirpath) < prefixlen.
49 prefixlen = len(pathutil.normasprefix(root))
49 prefixlen = len(pathutil.normasprefix(root))
50 oids = []
50 oids = []
51
51
52 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
52 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
53 onerror=onerror):
53 onerror=onerror):
54 dirpath = dirpath[prefixlen:]
54 dirpath = dirpath[prefixlen:]
55
55
56 # Silently skip unexpected files and directories
56 # Silently skip unexpected files and directories
57 if len(dirpath) == 2:
57 if len(dirpath) == 2:
58 oids.extend([dirpath + f for f in files
58 oids.extend([dirpath + f for f in files
59 if _lfsre.match(dirpath + f)])
59 if _lfsre.match(dirpath + f)])
60
60
61 yield ('', [], oids)
61 yield ('', [], oids)
62
62
63 class nullvfs(lfsvfs):
63 class nullvfs(lfsvfs):
64 def __init__(self):
64 def __init__(self):
65 pass
65 pass
66
66
67 def exists(self, oid):
67 def exists(self, oid):
68 return False
68 return False
69
69
70 def read(self, oid):
70 def read(self, oid):
71 # store.read() calls into here if the blob doesn't exist in its
71 # store.read() calls into here if the blob doesn't exist in its
72 # self.vfs. Raise the same error as a normal vfs when asked to read a
72 # self.vfs. Raise the same error as a normal vfs when asked to read a
73 # file that doesn't exist. The only difference is the full file path
73 # file that doesn't exist. The only difference is the full file path
74 # isn't available in the error.
74 # isn't available in the error.
75 raise IOError(errno.ENOENT, '%s: No such file or directory' % oid)
75 raise IOError(errno.ENOENT, '%s: No such file or directory' % oid)
76
76
77 def walk(self, path=None, onerror=None):
77 def walk(self, path=None, onerror=None):
78 return ('', [], [])
78 return ('', [], [])
79
79
80 def write(self, oid, data):
80 def write(self, oid, data):
81 pass
81 pass
82
82
83 class filewithprogress(object):
83 class filewithprogress(object):
84 """a file-like object that supports __len__ and read.
84 """a file-like object that supports __len__ and read.
85
85
86 Useful to provide progress information for how many bytes are read.
86 Useful to provide progress information for how many bytes are read.
87 """
87 """
88
88
89 def __init__(self, fp, callback):
89 def __init__(self, fp, callback):
90 self._fp = fp
90 self._fp = fp
91 self._callback = callback # func(readsize)
91 self._callback = callback # func(readsize)
92 fp.seek(0, os.SEEK_END)
92 fp.seek(0, os.SEEK_END)
93 self._len = fp.tell()
93 self._len = fp.tell()
94 fp.seek(0)
94 fp.seek(0)
95
95
96 def __len__(self):
96 def __len__(self):
97 return self._len
97 return self._len
98
98
99 def read(self, size):
99 def read(self, size):
100 if self._fp is None:
100 if self._fp is None:
101 return b''
101 return b''
102 data = self._fp.read(size)
102 data = self._fp.read(size)
103 if data:
103 if data:
104 if self._callback:
104 if self._callback:
105 self._callback(len(data))
105 self._callback(len(data))
106 else:
106 else:
107 self._fp.close()
107 self._fp.close()
108 self._fp = None
108 self._fp = None
109 return data
109 return data
110
110
111 class local(object):
111 class local(object):
112 """Local blobstore for large file contents.
112 """Local blobstore for large file contents.
113
113
114 This blobstore is used both as a cache and as a staging area for large blobs
114 This blobstore is used both as a cache and as a staging area for large blobs
115 to be uploaded to the remote blobstore.
115 to be uploaded to the remote blobstore.
116 """
116 """
117
117
118 def __init__(self, repo):
118 def __init__(self, repo):
119 fullpath = repo.svfs.join('lfs/objects')
119 fullpath = repo.svfs.join('lfs/objects')
120 self.vfs = lfsvfs(fullpath)
120 self.vfs = lfsvfs(fullpath)
121
121
122 if repo.ui.configbool('experimental', 'lfs.disableusercache'):
122 if repo.ui.configbool('experimental', 'lfs.disableusercache'):
123 self.cachevfs = nullvfs()
123 self.cachevfs = nullvfs()
124 else:
124 else:
125 usercache = lfutil._usercachedir(repo.ui, 'lfs')
125 usercache = lfutil._usercachedir(repo.ui, 'lfs')
126 self.cachevfs = lfsvfs(usercache)
126 self.cachevfs = lfsvfs(usercache)
127 self.ui = repo.ui
127 self.ui = repo.ui
128
128
129 def open(self, oid):
129 def open(self, oid):
130 """Open a read-only file descriptor to the named blob, in either the
130 """Open a read-only file descriptor to the named blob, in either the
131 usercache or the local store."""
131 usercache or the local store."""
132 # The usercache is the most likely place to hold the file. Commit will
132 # The usercache is the most likely place to hold the file. Commit will
133 # write to both it and the local store, as will anything that downloads
133 # write to both it and the local store, as will anything that downloads
134 # the blobs. However, things like clone without an update won't
134 # the blobs. However, things like clone without an update won't
135 # populate the local store. For an init + push of a local clone,
135 # populate the local store. For an init + push of a local clone,
136 # the usercache is the only place it _could_ be. If not present, the
136 # the usercache is the only place it _could_ be. If not present, the
137 # missing file msg here will indicate the local repo, not the usercache.
137 # missing file msg here will indicate the local repo, not the usercache.
138 if self.cachevfs.exists(oid):
138 if self.cachevfs.exists(oid):
139 return self.cachevfs(oid, 'rb')
139 return self.cachevfs(oid, 'rb')
140
140
141 return self.vfs(oid, 'rb')
141 return self.vfs(oid, 'rb')
142
142
143 def download(self, oid, src):
143 def download(self, oid, src):
144 """Read the blob from the remote source in chunks, verify the content,
144 """Read the blob from the remote source in chunks, verify the content,
145 and write to this local blobstore."""
145 and write to this local blobstore."""
146 sha256 = hashlib.sha256()
146 sha256 = hashlib.sha256()
147
147
148 with self.vfs(oid, 'wb', atomictemp=True) as fp:
148 with self.vfs(oid, 'wb', atomictemp=True) as fp:
149 for chunk in util.filechunkiter(src, size=1048576):
149 for chunk in util.filechunkiter(src, size=1048576):
150 fp.write(chunk)
150 fp.write(chunk)
151 sha256.update(chunk)
151 sha256.update(chunk)
152
152
153 realoid = sha256.hexdigest()
153 realoid = sha256.hexdigest()
154 if realoid != oid:
154 if realoid != oid:
155 raise LfsCorruptionError(_('corrupt remote lfs object: %s')
155 raise LfsCorruptionError(_('corrupt remote lfs object: %s')
156 % oid)
156 % oid)
157
157
158 self._linktousercache(oid)
158 self._linktousercache(oid)
159
159
160 def write(self, oid, data):
160 def write(self, oid, data):
161 """Write blob to local blobstore.
161 """Write blob to local blobstore.
162
162
163 This should only be called from the filelog during a commit or similar.
163 This should only be called from the filelog during a commit or similar.
164 As such, there is no need to verify the data. Imports from a remote
164 As such, there is no need to verify the data. Imports from a remote
165 store must use ``download()`` instead."""
165 store must use ``download()`` instead."""
166 with self.vfs(oid, 'wb', atomictemp=True) as fp:
166 with self.vfs(oid, 'wb', atomictemp=True) as fp:
167 fp.write(data)
167 fp.write(data)
168
168
169 self._linktousercache(oid)
169 self._linktousercache(oid)
170
170
171 def linkfromusercache(self, oid):
171 def linkfromusercache(self, oid):
172 """Link blobs found in the user cache into this store.
172 """Link blobs found in the user cache into this store.
173
173
174 The server module needs to do this when it lets the client know not to
174 The server module needs to do this when it lets the client know not to
175 upload the blob, to ensure it is always available in this store.
175 upload the blob, to ensure it is always available in this store.
176 Normally this is done implicitly when the client reads or writes the
176 Normally this is done implicitly when the client reads or writes the
177 blob, but that doesn't happen when the server tells the client that it
177 blob, but that doesn't happen when the server tells the client that it
178 already has the blob.
178 already has the blob.
179 """
179 """
180 if (not isinstance(self.cachevfs, nullvfs)
180 if (not isinstance(self.cachevfs, nullvfs)
181 and not self.vfs.exists(oid)):
181 and not self.vfs.exists(oid)):
182 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
182 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
183 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
183 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
184
184
185 def _linktousercache(self, oid):
185 def _linktousercache(self, oid):
186 # XXX: should we verify the content of the cache, and hardlink back to
186 # XXX: should we verify the content of the cache, and hardlink back to
187 # the local store on success, but truncate, write and link on failure?
187 # the local store on success, but truncate, write and link on failure?
188 if (not self.cachevfs.exists(oid)
188 if (not self.cachevfs.exists(oid)
189 and not isinstance(self.cachevfs, nullvfs)):
189 and not isinstance(self.cachevfs, nullvfs)):
190 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
190 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
191 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
191 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
192
192
193 def read(self, oid, verify=True):
193 def read(self, oid, verify=True):
194 """Read blob from local blobstore."""
194 """Read blob from local blobstore."""
195 if not self.vfs.exists(oid):
195 if not self.vfs.exists(oid):
196 blob = self._read(self.cachevfs, oid, verify)
196 blob = self._read(self.cachevfs, oid, verify)
197
197
198 # Even if revlog will verify the content, it needs to be verified
198 # Even if revlog will verify the content, it needs to be verified
199 # now before making the hardlink to avoid propagating corrupt blobs.
199 # now before making the hardlink to avoid propagating corrupt blobs.
200 # Don't abort if corruption is detected, because `hg verify` will
200 # Don't abort if corruption is detected, because `hg verify` will
201 # give more useful info about the corruption- simply don't add the
201 # give more useful info about the corruption- simply don't add the
202 # hardlink.
202 # hardlink.
203 if verify or hashlib.sha256(blob).hexdigest() == oid:
203 if verify or hashlib.sha256(blob).hexdigest() == oid:
204 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
204 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
205 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
205 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
206 else:
206 else:
207 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
207 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
208 blob = self._read(self.vfs, oid, verify)
208 blob = self._read(self.vfs, oid, verify)
209 return blob
209 return blob
210
210
211 def _read(self, vfs, oid, verify):
211 def _read(self, vfs, oid, verify):
212 """Read blob (after verifying) from the given store"""
212 """Read blob (after verifying) from the given store"""
213 blob = vfs.read(oid)
213 blob = vfs.read(oid)
214 if verify:
214 if verify:
215 _verify(oid, blob)
215 _verify(oid, blob)
216 return blob
216 return blob
217
217
218 def verify(self, oid):
218 def verify(self, oid):
219 """Indicate whether or not the hash of the underlying file matches its
219 """Indicate whether or not the hash of the underlying file matches its
220 name."""
220 name."""
221 sha256 = hashlib.sha256()
221 sha256 = hashlib.sha256()
222
222
223 with self.open(oid) as fp:
223 with self.open(oid) as fp:
224 for chunk in util.filechunkiter(fp, size=1048576):
224 for chunk in util.filechunkiter(fp, size=1048576):
225 sha256.update(chunk)
225 sha256.update(chunk)
226
226
227 return oid == sha256.hexdigest()
227 return oid == sha256.hexdigest()
228
228
229 def has(self, oid):
229 def has(self, oid):
230 """Returns True if the local blobstore contains the requested blob,
230 """Returns True if the local blobstore contains the requested blob,
231 False otherwise."""
231 False otherwise."""
232 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
232 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
233
233
234 class _gitlfsremote(object):
234 class _gitlfsremote(object):
235
235
236 def __init__(self, repo, url):
236 def __init__(self, repo, url):
237 ui = repo.ui
237 ui = repo.ui
238 self.ui = ui
238 self.ui = ui
239 baseurl, authinfo = url.authinfo()
239 baseurl, authinfo = url.authinfo()
240 self.baseurl = baseurl.rstrip('/')
240 self.baseurl = baseurl.rstrip('/')
241 useragent = repo.ui.config('experimental', 'lfs.user-agent')
241 useragent = repo.ui.config('experimental', 'lfs.user-agent')
242 if not useragent:
242 if not useragent:
243 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
243 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
244 self.urlopener = urlmod.opener(ui, authinfo, useragent)
244 self.urlopener = urlmod.opener(ui, authinfo, useragent)
245 self.retry = ui.configint('lfs', 'retry')
245 self.retry = ui.configint('lfs', 'retry')
246
246
247 def writebatch(self, pointers, fromstore):
247 def writebatch(self, pointers, fromstore):
248 """Batch upload from local to remote blobstore."""
248 """Batch upload from local to remote blobstore."""
249 self._batch(_deduplicate(pointers), fromstore, 'upload')
249 self._batch(_deduplicate(pointers), fromstore, 'upload')
250
250
251 def readbatch(self, pointers, tostore):
251 def readbatch(self, pointers, tostore):
252 """Batch download from remote to local blostore."""
252 """Batch download from remote to local blostore."""
253 self._batch(_deduplicate(pointers), tostore, 'download')
253 self._batch(_deduplicate(pointers), tostore, 'download')
254
254
255 def _batchrequest(self, pointers, action):
255 def _batchrequest(self, pointers, action):
256 """Get metadata about objects pointed by pointers for given action
256 """Get metadata about objects pointed by pointers for given action
257
257
258 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
258 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
259 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
259 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
260 """
260 """
261 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
261 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
262 requestdata = json.dumps({
262 requestdata = json.dumps({
263 'objects': objects,
263 'objects': objects,
264 'operation': action,
264 'operation': action,
265 })
265 })
266 url = '%s/objects/batch' % self.baseurl
266 url = '%s/objects/batch' % self.baseurl
267 batchreq = util.urlreq.request(url, data=requestdata)
267 batchreq = util.urlreq.request(url, data=requestdata)
268 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
268 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
269 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
269 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
270 try:
270 try:
271 rsp = self.urlopener.open(batchreq)
271 rsp = self.urlopener.open(batchreq)
272 rawjson = rsp.read()
272 rawjson = rsp.read()
273 except util.urlerr.httperror as ex:
273 except util.urlerr.httperror as ex:
274 raise LfsRemoteError(_('LFS HTTP error: %s') % ex,
274 hints = {
275 hint=_('api=%s, action=%s')
275 400: _('check that lfs serving is enabled on %s and "%s" is '
276 % (url, action))
276 'supported') % (self.baseurl, action),
277 404: _('the "lfs.url" config may be used to override %s')
278 % self.baseurl,
279 }
280 hint = hints.get(ex.code, _('api=%s, action=%s') % (url, action))
281 raise LfsRemoteError(_('LFS HTTP error: %s') % ex, hint=hint)
277 try:
282 try:
278 response = json.loads(rawjson)
283 response = json.loads(rawjson)
279 except ValueError:
284 except ValueError:
280 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
285 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
281 % rawjson)
286 % rawjson)
282
287
283 if self.ui.debugflag:
288 if self.ui.debugflag:
284 self.ui.debug('Status: %d\n' % rsp.status)
289 self.ui.debug('Status: %d\n' % rsp.status)
285 # lfs-test-server and hg serve return headers in different order
290 # lfs-test-server and hg serve return headers in different order
286 self.ui.debug('%s\n'
291 self.ui.debug('%s\n'
287 % '\n'.join(sorted(str(rsp.info()).splitlines())))
292 % '\n'.join(sorted(str(rsp.info()).splitlines())))
288
293
289 if 'objects' in response:
294 if 'objects' in response:
290 response['objects'] = sorted(response['objects'],
295 response['objects'] = sorted(response['objects'],
291 key=lambda p: p['oid'])
296 key=lambda p: p['oid'])
292 self.ui.debug('%s\n'
297 self.ui.debug('%s\n'
293 % json.dumps(response, indent=2,
298 % json.dumps(response, indent=2,
294 separators=('', ': '), sort_keys=True))
299 separators=('', ': '), sort_keys=True))
295
300
296 return response
301 return response
297
302
298 def _checkforservererror(self, pointers, responses, action):
303 def _checkforservererror(self, pointers, responses, action):
299 """Scans errors from objects
304 """Scans errors from objects
300
305
301 Raises LfsRemoteError if any objects have an error"""
306 Raises LfsRemoteError if any objects have an error"""
302 for response in responses:
307 for response in responses:
303 # The server should return 404 when objects cannot be found. Some
308 # The server should return 404 when objects cannot be found. Some
304 # server implementation (ex. lfs-test-server) does not set "error"
309 # server implementation (ex. lfs-test-server) does not set "error"
305 # but just removes "download" from "actions". Treat that case
310 # but just removes "download" from "actions". Treat that case
306 # as the same as 404 error.
311 # as the same as 404 error.
307 if 'error' not in response:
312 if 'error' not in response:
308 if (action == 'download'
313 if (action == 'download'
309 and action not in response.get('actions', [])):
314 and action not in response.get('actions', [])):
310 code = 404
315 code = 404
311 else:
316 else:
312 continue
317 continue
313 else:
318 else:
314 # An error dict without a code doesn't make much sense, so
319 # An error dict without a code doesn't make much sense, so
315 # treat as a server error.
320 # treat as a server error.
316 code = response.get('error').get('code', 500)
321 code = response.get('error').get('code', 500)
317
322
318 ptrmap = {p.oid(): p for p in pointers}
323 ptrmap = {p.oid(): p for p in pointers}
319 p = ptrmap.get(response['oid'], None)
324 p = ptrmap.get(response['oid'], None)
320 if p:
325 if p:
321 filename = getattr(p, 'filename', 'unknown')
326 filename = getattr(p, 'filename', 'unknown')
322 errors = {
327 errors = {
323 404: 'The object does not exist',
328 404: 'The object does not exist',
324 410: 'The object was removed by the owner',
329 410: 'The object was removed by the owner',
325 422: 'Validation error',
330 422: 'Validation error',
326 500: 'Internal server error',
331 500: 'Internal server error',
327 }
332 }
328 msg = errors.get(code, 'status code %d' % code)
333 msg = errors.get(code, 'status code %d' % code)
329 raise LfsRemoteError(_('LFS server error for "%s": %s')
334 raise LfsRemoteError(_('LFS server error for "%s": %s')
330 % (filename, msg))
335 % (filename, msg))
331 else:
336 else:
332 raise LfsRemoteError(
337 raise LfsRemoteError(
333 _('LFS server error. Unsolicited response for oid %s')
338 _('LFS server error. Unsolicited response for oid %s')
334 % response['oid'])
339 % response['oid'])
335
340
336 def _extractobjects(self, response, pointers, action):
341 def _extractobjects(self, response, pointers, action):
337 """extract objects from response of the batch API
342 """extract objects from response of the batch API
338
343
339 response: parsed JSON object returned by batch API
344 response: parsed JSON object returned by batch API
340 return response['objects'] filtered by action
345 return response['objects'] filtered by action
341 raise if any object has an error
346 raise if any object has an error
342 """
347 """
343 # Scan errors from objects - fail early
348 # Scan errors from objects - fail early
344 objects = response.get('objects', [])
349 objects = response.get('objects', [])
345 self._checkforservererror(pointers, objects, action)
350 self._checkforservererror(pointers, objects, action)
346
351
347 # Filter objects with given action. Practically, this skips uploading
352 # Filter objects with given action. Practically, this skips uploading
348 # objects which exist in the server.
353 # objects which exist in the server.
349 filteredobjects = [o for o in objects if action in o.get('actions', [])]
354 filteredobjects = [o for o in objects if action in o.get('actions', [])]
350
355
351 return filteredobjects
356 return filteredobjects
352
357
353 def _basictransfer(self, obj, action, localstore):
358 def _basictransfer(self, obj, action, localstore):
354 """Download or upload a single object using basic transfer protocol
359 """Download or upload a single object using basic transfer protocol
355
360
356 obj: dict, an object description returned by batch API
361 obj: dict, an object description returned by batch API
357 action: string, one of ['upload', 'download']
362 action: string, one of ['upload', 'download']
358 localstore: blobstore.local
363 localstore: blobstore.local
359
364
360 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
365 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
361 basic-transfers.md
366 basic-transfers.md
362 """
367 """
363 oid = pycompat.bytestr(obj['oid'])
368 oid = pycompat.bytestr(obj['oid'])
364
369
365 href = pycompat.bytestr(obj['actions'][action].get('href'))
370 href = pycompat.bytestr(obj['actions'][action].get('href'))
366 headers = obj['actions'][action].get('header', {}).items()
371 headers = obj['actions'][action].get('header', {}).items()
367
372
368 request = util.urlreq.request(href)
373 request = util.urlreq.request(href)
369 if action == 'upload':
374 if action == 'upload':
370 # If uploading blobs, read data from local blobstore.
375 # If uploading blobs, read data from local blobstore.
371 if not localstore.verify(oid):
376 if not localstore.verify(oid):
372 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
377 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
373 hint=_('run hg verify'))
378 hint=_('run hg verify'))
374 request.data = filewithprogress(localstore.open(oid), None)
379 request.data = filewithprogress(localstore.open(oid), None)
375 request.get_method = lambda: 'PUT'
380 request.get_method = lambda: 'PUT'
376 request.add_header('Content-Type', 'application/octet-stream')
381 request.add_header('Content-Type', 'application/octet-stream')
377
382
378 for k, v in headers:
383 for k, v in headers:
379 request.add_header(k, v)
384 request.add_header(k, v)
380
385
381 response = b''
386 response = b''
382 try:
387 try:
383 req = self.urlopener.open(request)
388 req = self.urlopener.open(request)
384
389
385 if self.ui.debugflag:
390 if self.ui.debugflag:
386 self.ui.debug('Status: %d\n' % req.status)
391 self.ui.debug('Status: %d\n' % req.status)
387 # lfs-test-server and hg serve return headers in different order
392 # lfs-test-server and hg serve return headers in different order
388 self.ui.debug('%s\n'
393 self.ui.debug('%s\n'
389 % '\n'.join(sorted(str(req.info()).splitlines())))
394 % '\n'.join(sorted(str(req.info()).splitlines())))
390
395
391 if action == 'download':
396 if action == 'download':
392 # If downloading blobs, store downloaded data to local blobstore
397 # If downloading blobs, store downloaded data to local blobstore
393 localstore.download(oid, req)
398 localstore.download(oid, req)
394 else:
399 else:
395 while True:
400 while True:
396 data = req.read(1048576)
401 data = req.read(1048576)
397 if not data:
402 if not data:
398 break
403 break
399 response += data
404 response += data
400 if response:
405 if response:
401 self.ui.debug('lfs %s response: %s' % (action, response))
406 self.ui.debug('lfs %s response: %s' % (action, response))
402 except util.urlerr.httperror as ex:
407 except util.urlerr.httperror as ex:
403 if self.ui.debugflag:
408 if self.ui.debugflag:
404 self.ui.debug('%s: %s\n' % (oid, ex.read()))
409 self.ui.debug('%s: %s\n' % (oid, ex.read()))
405 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
410 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
406 % (ex, oid, action))
411 % (ex, oid, action))
407
412
408 def _batch(self, pointers, localstore, action):
413 def _batch(self, pointers, localstore, action):
409 if action not in ['upload', 'download']:
414 if action not in ['upload', 'download']:
410 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
415 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
411
416
412 response = self._batchrequest(pointers, action)
417 response = self._batchrequest(pointers, action)
413 objects = self._extractobjects(response, pointers, action)
418 objects = self._extractobjects(response, pointers, action)
414 total = sum(x.get('size', 0) for x in objects)
419 total = sum(x.get('size', 0) for x in objects)
415 sizes = {}
420 sizes = {}
416 for obj in objects:
421 for obj in objects:
417 sizes[obj.get('oid')] = obj.get('size', 0)
422 sizes[obj.get('oid')] = obj.get('size', 0)
418 topic = {'upload': _('lfs uploading'),
423 topic = {'upload': _('lfs uploading'),
419 'download': _('lfs downloading')}[action]
424 'download': _('lfs downloading')}[action]
420 if len(objects) > 1:
425 if len(objects) > 1:
421 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
426 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
422 % (len(objects), util.bytecount(total)))
427 % (len(objects), util.bytecount(total)))
423
428
424 def transfer(chunk):
429 def transfer(chunk):
425 for obj in chunk:
430 for obj in chunk:
426 objsize = obj.get('size', 0)
431 objsize = obj.get('size', 0)
427 if self.ui.verbose:
432 if self.ui.verbose:
428 if action == 'download':
433 if action == 'download':
429 msg = _('lfs: downloading %s (%s)\n')
434 msg = _('lfs: downloading %s (%s)\n')
430 elif action == 'upload':
435 elif action == 'upload':
431 msg = _('lfs: uploading %s (%s)\n')
436 msg = _('lfs: uploading %s (%s)\n')
432 self.ui.note(msg % (obj.get('oid'),
437 self.ui.note(msg % (obj.get('oid'),
433 util.bytecount(objsize)))
438 util.bytecount(objsize)))
434 retry = self.retry
439 retry = self.retry
435 while True:
440 while True:
436 try:
441 try:
437 self._basictransfer(obj, action, localstore)
442 self._basictransfer(obj, action, localstore)
438 yield 1, obj.get('oid')
443 yield 1, obj.get('oid')
439 break
444 break
440 except socket.error as ex:
445 except socket.error as ex:
441 if retry > 0:
446 if retry > 0:
442 self.ui.note(
447 self.ui.note(
443 _('lfs: failed: %r (remaining retry %d)\n')
448 _('lfs: failed: %r (remaining retry %d)\n')
444 % (ex, retry))
449 % (ex, retry))
445 retry -= 1
450 retry -= 1
446 continue
451 continue
447 raise
452 raise
448
453
449 # Until https multiplexing gets sorted out
454 # Until https multiplexing gets sorted out
450 if self.ui.configbool('experimental', 'lfs.worker-enable'):
455 if self.ui.configbool('experimental', 'lfs.worker-enable'):
451 oids = worker.worker(self.ui, 0.1, transfer, (),
456 oids = worker.worker(self.ui, 0.1, transfer, (),
452 sorted(objects, key=lambda o: o.get('oid')))
457 sorted(objects, key=lambda o: o.get('oid')))
453 else:
458 else:
454 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
459 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
455
460
456 with self.ui.makeprogress(topic, total=total) as progress:
461 with self.ui.makeprogress(topic, total=total) as progress:
457 progress.update(0)
462 progress.update(0)
458 processed = 0
463 processed = 0
459 blobs = 0
464 blobs = 0
460 for _one, oid in oids:
465 for _one, oid in oids:
461 processed += sizes[oid]
466 processed += sizes[oid]
462 blobs += 1
467 blobs += 1
463 progress.update(processed)
468 progress.update(processed)
464 self.ui.note(_('lfs: processed: %s\n') % oid)
469 self.ui.note(_('lfs: processed: %s\n') % oid)
465
470
466 if blobs > 0:
471 if blobs > 0:
467 if action == 'upload':
472 if action == 'upload':
468 self.ui.status(_('lfs: uploaded %d files (%s)\n')
473 self.ui.status(_('lfs: uploaded %d files (%s)\n')
469 % (blobs, util.bytecount(processed)))
474 % (blobs, util.bytecount(processed)))
470 elif action == 'download':
475 elif action == 'download':
471 self.ui.status(_('lfs: downloaded %d files (%s)\n')
476 self.ui.status(_('lfs: downloaded %d files (%s)\n')
472 % (blobs, util.bytecount(processed)))
477 % (blobs, util.bytecount(processed)))
473
478
474 def __del__(self):
479 def __del__(self):
475 # copied from mercurial/httppeer.py
480 # copied from mercurial/httppeer.py
476 urlopener = getattr(self, 'urlopener', None)
481 urlopener = getattr(self, 'urlopener', None)
477 if urlopener:
482 if urlopener:
478 for h in urlopener.handlers:
483 for h in urlopener.handlers:
479 h.close()
484 h.close()
480 getattr(h, "close_all", lambda : None)()
485 getattr(h, "close_all", lambda : None)()
481
486
482 class _dummyremote(object):
487 class _dummyremote(object):
483 """Dummy store storing blobs to temp directory."""
488 """Dummy store storing blobs to temp directory."""
484
489
485 def __init__(self, repo, url):
490 def __init__(self, repo, url):
486 fullpath = repo.vfs.join('lfs', url.path)
491 fullpath = repo.vfs.join('lfs', url.path)
487 self.vfs = lfsvfs(fullpath)
492 self.vfs = lfsvfs(fullpath)
488
493
489 def writebatch(self, pointers, fromstore):
494 def writebatch(self, pointers, fromstore):
490 for p in _deduplicate(pointers):
495 for p in _deduplicate(pointers):
491 content = fromstore.read(p.oid(), verify=True)
496 content = fromstore.read(p.oid(), verify=True)
492 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
497 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
493 fp.write(content)
498 fp.write(content)
494
499
495 def readbatch(self, pointers, tostore):
500 def readbatch(self, pointers, tostore):
496 for p in _deduplicate(pointers):
501 for p in _deduplicate(pointers):
497 with self.vfs(p.oid(), 'rb') as fp:
502 with self.vfs(p.oid(), 'rb') as fp:
498 tostore.download(p.oid(), fp)
503 tostore.download(p.oid(), fp)
499
504
500 class _nullremote(object):
505 class _nullremote(object):
501 """Null store storing blobs to /dev/null."""
506 """Null store storing blobs to /dev/null."""
502
507
503 def __init__(self, repo, url):
508 def __init__(self, repo, url):
504 pass
509 pass
505
510
506 def writebatch(self, pointers, fromstore):
511 def writebatch(self, pointers, fromstore):
507 pass
512 pass
508
513
509 def readbatch(self, pointers, tostore):
514 def readbatch(self, pointers, tostore):
510 pass
515 pass
511
516
512 class _promptremote(object):
517 class _promptremote(object):
513 """Prompt user to set lfs.url when accessed."""
518 """Prompt user to set lfs.url when accessed."""
514
519
515 def __init__(self, repo, url):
520 def __init__(self, repo, url):
516 pass
521 pass
517
522
518 def writebatch(self, pointers, fromstore, ui=None):
523 def writebatch(self, pointers, fromstore, ui=None):
519 self._prompt()
524 self._prompt()
520
525
521 def readbatch(self, pointers, tostore, ui=None):
526 def readbatch(self, pointers, tostore, ui=None):
522 self._prompt()
527 self._prompt()
523
528
524 def _prompt(self):
529 def _prompt(self):
525 raise error.Abort(_('lfs.url needs to be configured'))
530 raise error.Abort(_('lfs.url needs to be configured'))
526
531
527 _storemap = {
532 _storemap = {
528 'https': _gitlfsremote,
533 'https': _gitlfsremote,
529 'http': _gitlfsremote,
534 'http': _gitlfsremote,
530 'file': _dummyremote,
535 'file': _dummyremote,
531 'null': _nullremote,
536 'null': _nullremote,
532 None: _promptremote,
537 None: _promptremote,
533 }
538 }
534
539
535 def _deduplicate(pointers):
540 def _deduplicate(pointers):
536 """Remove any duplicate oids that exist in the list"""
541 """Remove any duplicate oids that exist in the list"""
537 reduced = util.sortdict()
542 reduced = util.sortdict()
538 for p in pointers:
543 for p in pointers:
539 reduced[p.oid()] = p
544 reduced[p.oid()] = p
540 return reduced.values()
545 return reduced.values()
541
546
542 def _verify(oid, content):
547 def _verify(oid, content):
543 realoid = hashlib.sha256(content).hexdigest()
548 realoid = hashlib.sha256(content).hexdigest()
544 if realoid != oid:
549 if realoid != oid:
545 raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid,
550 raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid,
546 hint=_('run hg verify'))
551 hint=_('run hg verify'))
547
552
548 def remote(repo, remote=None):
553 def remote(repo, remote=None):
549 """remotestore factory. return a store in _storemap depending on config
554 """remotestore factory. return a store in _storemap depending on config
550
555
551 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
556 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
552 infer the endpoint, based on the remote repository using the same path
557 infer the endpoint, based on the remote repository using the same path
553 adjustments as git. As an extension, 'http' is supported as well so that
558 adjustments as git. As an extension, 'http' is supported as well so that
554 ``hg serve`` works out of the box.
559 ``hg serve`` works out of the box.
555
560
556 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
561 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
557 """
562 """
558 lfsurl = repo.ui.config('lfs', 'url')
563 lfsurl = repo.ui.config('lfs', 'url')
559 url = util.url(lfsurl or '')
564 url = util.url(lfsurl or '')
560 if lfsurl is None:
565 if lfsurl is None:
561 if remote:
566 if remote:
562 path = remote
567 path = remote
563 elif util.safehasattr(repo, '_subtoppath'):
568 elif util.safehasattr(repo, '_subtoppath'):
564 # The pull command sets this during the optional update phase, which
569 # The pull command sets this during the optional update phase, which
565 # tells exactly where the pull originated, whether 'paths.default'
570 # tells exactly where the pull originated, whether 'paths.default'
566 # or explicit.
571 # or explicit.
567 path = repo._subtoppath
572 path = repo._subtoppath
568 else:
573 else:
569 # TODO: investigate 'paths.remote:lfsurl' style path customization,
574 # TODO: investigate 'paths.remote:lfsurl' style path customization,
570 # and fall back to inferring from 'paths.remote' if unspecified.
575 # and fall back to inferring from 'paths.remote' if unspecified.
571 path = repo.ui.config('paths', 'default') or ''
576 path = repo.ui.config('paths', 'default') or ''
572
577
573 defaulturl = util.url(path)
578 defaulturl = util.url(path)
574
579
575 # TODO: support local paths as well.
580 # TODO: support local paths as well.
576 # TODO: consider the ssh -> https transformation that git applies
581 # TODO: consider the ssh -> https transformation that git applies
577 if defaulturl.scheme in (b'http', b'https'):
582 if defaulturl.scheme in (b'http', b'https'):
578 if defaulturl.path and defaulturl.path[:-1] != b'/':
583 if defaulturl.path and defaulturl.path[:-1] != b'/':
579 defaulturl.path += b'/'
584 defaulturl.path += b'/'
580 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
585 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
581
586
582 url = util.url(bytes(defaulturl))
587 url = util.url(bytes(defaulturl))
583 repo.ui.note(_('lfs: assuming remote store: %s\n') % url)
588 repo.ui.note(_('lfs: assuming remote store: %s\n') % url)
584
589
585 scheme = url.scheme
590 scheme = url.scheme
586 if scheme not in _storemap:
591 if scheme not in _storemap:
587 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
592 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
588 return _storemap[scheme](repo, url)
593 return _storemap[scheme](repo, url)
589
594
590 class LfsRemoteError(error.StorageError):
595 class LfsRemoteError(error.StorageError):
591 pass
596 pass
592
597
593 class LfsCorruptionError(error.Abort):
598 class LfsCorruptionError(error.Abort):
594 """Raised when a corrupt blob is detected, aborting an operation
599 """Raised when a corrupt blob is detected, aborting an operation
595
600
596 It exists to allow specialized handling on the server side."""
601 It exists to allow specialized handling on the server side."""
@@ -1,467 +1,475
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > lfs=
5 > lfs=
6 > [lfs]
6 > [lfs]
7 > track=all()
7 > track=all()
8 > [web]
8 > [web]
9 > push_ssl = False
9 > push_ssl = False
10 > allow-push = *
10 > allow-push = *
11 > EOF
11 > EOF
12
12
13 Serving LFS files can experimentally be turned off. The long term solution is
13 Serving LFS files can experimentally be turned off. The long term solution is
14 to support the 'verify' action in both client and server, so that the server can
14 to support the 'verify' action in both client and server, so that the server can
15 tell the client to store files elsewhere.
15 tell the client to store files elsewhere.
16
16
17 $ hg init server
17 $ hg init server
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 > --config experimental.lfs.serve=False -R server serve -d \
19 > --config experimental.lfs.serve=False -R server serve -d \
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 $ cat hg.pid >> $DAEMON_PIDS
21 $ cat hg.pid >> $DAEMON_PIDS
22
22
23 Uploads fail...
23 Uploads fail...
24
24
25 $ hg init client
25 $ hg init client
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 $ hg -R client ci -Am 'initial commit'
27 $ hg -R client ci -Am 'initial commit'
28 adding lfs.bin
28 adding lfs.bin
29 $ hg -R client push http://localhost:$HGPORT
29 $ hg -R client push http://localhost:$HGPORT
30 pushing to http://localhost:$HGPORT/
30 pushing to http://localhost:$HGPORT/
31 searching for changes
31 searching for changes
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
33 (api=http://localhost:$HGPORT/.git/info/lfs/objects/batch, action=upload)
33 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
34 [255]
34 [255]
35
35
36 ... so do a local push to make the data available. Remove the blob from the
36 ... so do a local push to make the data available. Remove the blob from the
37 default cache, so it attempts to download.
37 default cache, so it attempts to download.
38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
39 > --config "lfs.url=null://" \
39 > --config "lfs.url=null://" \
40 > -R client push -q server
40 > -R client push -q server
41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
42
42
43 Downloads fail...
43 Downloads fail...
44
44
45 $ hg clone http://localhost:$HGPORT httpclone
45 $ hg clone http://localhost:$HGPORT httpclone
46 (remote is using large file support (lfs); lfs will be enabled for this repository)
46 (remote is using large file support (lfs); lfs will be enabled for this repository)
47 requesting all changes
47 requesting all changes
48 adding changesets
48 adding changesets
49 adding manifests
49 adding manifests
50 adding file changes
50 adding file changes
51 added 1 changesets with 1 changes to 1 files
51 added 1 changesets with 1 changes to 1 files
52 new changesets 525251863cad
52 new changesets 525251863cad
53 updating to branch default
53 updating to branch default
54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
55 (api=http://localhost:$HGPORT/.git/info/lfs/objects/batch, action=download)
55 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
56 [255]
56 [255]
57
57
58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
59
59
60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
70
70
71 Blob URIs are correct when --prefix is used
72
73 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
74 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
75 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
76 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
77 $ cat hg.pid >> $DAEMON_PIDS
75 $ cat hg.pid >> $DAEMON_PIDS
78
76
77 Reasonable hint for a misconfigured blob server
78
79 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
80 abort: LFS HTTP error: HTTP Error 404: Not Found!
81 (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
82 [255]
83
84 Blob URIs are correct when --prefix is used
85
79 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
86 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
80 using http://localhost:$HGPORT/subdir/mount/point
87 using http://localhost:$HGPORT/subdir/mount/point
81 sending capabilities command
88 sending capabilities command
82 (remote is using large file support (lfs); lfs will be enabled for this repository)
89 (remote is using large file support (lfs); lfs will be enabled for this repository)
83 query 1; heads
90 query 1; heads
84 sending batch command
91 sending batch command
85 requesting all changes
92 requesting all changes
86 sending getbundle command
93 sending getbundle command
87 bundle2-input-bundle: with-transaction
94 bundle2-input-bundle: with-transaction
88 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
95 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
89 adding changesets
96 adding changesets
90 add changeset 525251863cad
97 add changeset 525251863cad
91 adding manifests
98 adding manifests
92 adding file changes
99 adding file changes
93 adding lfs.bin revisions
100 adding lfs.bin revisions
94 added 1 changesets with 1 changes to 1 files
101 added 1 changesets with 1 changes to 1 files
95 bundle2-input-part: total payload size 648
102 bundle2-input-part: total payload size 648
96 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
97 bundle2-input-part: "phase-heads" supported
104 bundle2-input-part: "phase-heads" supported
98 bundle2-input-part: total payload size 24
105 bundle2-input-part: total payload size 24
99 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
106 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
100 bundle2-input-part: total payload size 39
107 bundle2-input-part: total payload size 39
101 bundle2-input-bundle: 3 parts total
108 bundle2-input-bundle: 3 parts total
102 checking for updated bookmarks
109 checking for updated bookmarks
103 updating the branch cache
110 updating the branch cache
104 new changesets 525251863cad
111 new changesets 525251863cad
105 updating to branch default
112 updating to branch default
106 resolving manifests
113 resolving manifests
107 branchmerge: False, force: False, partial: False
114 branchmerge: False, force: False, partial: False
108 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
115 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
109 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
116 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
110 Status: 200
117 Status: 200
111 Content-Length: 371
118 Content-Length: 371
112 Content-Type: application/vnd.git-lfs+json
119 Content-Type: application/vnd.git-lfs+json
113 Date: $HTTP_DATE$
120 Date: $HTTP_DATE$
114 Server: testing stub value
121 Server: testing stub value
115 {
122 {
116 "objects": [
123 "objects": [
117 {
124 {
118 "actions": {
125 "actions": {
119 "download": {
126 "download": {
120 "expires_at": "$ISO_8601_DATE_TIME$"
127 "expires_at": "$ISO_8601_DATE_TIME$"
121 "header": {
128 "header": {
122 "Accept": "application/vnd.git-lfs"
129 "Accept": "application/vnd.git-lfs"
123 }
130 }
124 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
131 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
125 }
132 }
126 }
133 }
127 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
134 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
128 "size": 20
135 "size": 20
129 }
136 }
130 ]
137 ]
131 "transfer": "basic"
138 "transfer": "basic"
132 }
139 }
133 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
140 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
134 Status: 200
141 Status: 200
135 Content-Length: 20
142 Content-Length: 20
136 Content-Type: application/octet-stream
143 Content-Type: application/octet-stream
137 Date: $HTTP_DATE$
144 Date: $HTTP_DATE$
138 Server: testing stub value
145 Server: testing stub value
139 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
146 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
140 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
147 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
141 lfs: downloaded 1 files (20 bytes)
148 lfs: downloaded 1 files (20 bytes)
142 lfs.bin: remote created -> g
149 lfs.bin: remote created -> g
143 getting lfs.bin
150 getting lfs.bin
144 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
151 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
153 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
147
154
148 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
155 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
149
156
150 $ cat $TESTTMP/access.log $TESTTMP/errors.log
157 $ cat $TESTTMP/access.log $TESTTMP/errors.log
158 $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
151 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
159 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
152 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
160 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
153 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
161 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
154 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
162 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
155 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
163 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
156
164
157 Blobs that already exist in the usercache are linked into the repo store, even
165 Blobs that already exist in the usercache are linked into the repo store, even
158 though the client doesn't send the blob.
166 though the client doesn't send the blob.
159
167
160 $ hg init server2
168 $ hg init server2
161 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
169 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
162 > -p $HGPORT --pid-file=hg.pid \
170 > -p $HGPORT --pid-file=hg.pid \
163 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
171 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
164 $ cat hg.pid >> $DAEMON_PIDS
172 $ cat hg.pid >> $DAEMON_PIDS
165
173
166 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
174 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
167 > push http://localhost:$HGPORT | grep '^[{} ]'
175 > push http://localhost:$HGPORT | grep '^[{} ]'
168 {
176 {
169 "objects": [
177 "objects": [
170 {
178 {
171 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
179 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
172 "size": 20
180 "size": 20
173 }
181 }
174 ]
182 ]
175 "transfer": "basic"
183 "transfer": "basic"
176 }
184 }
177 $ find server2/.hg/store/lfs/objects | sort
185 $ find server2/.hg/store/lfs/objects | sort
178 server2/.hg/store/lfs/objects
186 server2/.hg/store/lfs/objects
179 server2/.hg/store/lfs/objects/f0
187 server2/.hg/store/lfs/objects/f0
180 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
188 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
181 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
189 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
182 $ cat $TESTTMP/errors.log
190 $ cat $TESTTMP/errors.log
183
191
184 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
192 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
185 > import errno
193 > import errno
186 > from hgext.lfs import blobstore
194 > from hgext.lfs import blobstore
187 >
195 >
188 > _numverifies = 0
196 > _numverifies = 0
189 > _readerr = True
197 > _readerr = True
190 >
198 >
191 > def reposetup(ui, repo):
199 > def reposetup(ui, repo):
192 > # Nothing to do with a remote repo
200 > # Nothing to do with a remote repo
193 > if not repo.local():
201 > if not repo.local():
194 > return
202 > return
195 >
203 >
196 > store = repo.svfs.lfslocalblobstore
204 > store = repo.svfs.lfslocalblobstore
197 > class badstore(store.__class__):
205 > class badstore(store.__class__):
198 > def download(self, oid, src):
206 > def download(self, oid, src):
199 > '''Called in the server to handle reading from the client in a
207 > '''Called in the server to handle reading from the client in a
200 > PUT request.'''
208 > PUT request.'''
201 > origread = src.read
209 > origread = src.read
202 > def _badread(nbytes):
210 > def _badread(nbytes):
203 > # Simulate bad data/checksum failure from the client
211 > # Simulate bad data/checksum failure from the client
204 > return b'0' * len(origread(nbytes))
212 > return b'0' * len(origread(nbytes))
205 > src.read = _badread
213 > src.read = _badread
206 > super(badstore, self).download(oid, src)
214 > super(badstore, self).download(oid, src)
207 >
215 >
208 > def _read(self, vfs, oid, verify):
216 > def _read(self, vfs, oid, verify):
209 > '''Called in the server to read data for a GET request, and then
217 > '''Called in the server to read data for a GET request, and then
210 > calls self._verify() on it before returning.'''
218 > calls self._verify() on it before returning.'''
211 > global _readerr
219 > global _readerr
212 > # One time simulation of a read error
220 > # One time simulation of a read error
213 > if _readerr:
221 > if _readerr:
214 > _readerr = False
222 > _readerr = False
215 > raise IOError(errno.EIO, '%s: I/O error' % oid)
223 > raise IOError(errno.EIO, '%s: I/O error' % oid)
216 > # Simulate corrupt content on client download
224 > # Simulate corrupt content on client download
217 > blobstore._verify(oid, 'dummy content')
225 > blobstore._verify(oid, 'dummy content')
218 >
226 >
219 > def verify(self, oid):
227 > def verify(self, oid):
220 > '''Called in the server to populate the Batch API response,
228 > '''Called in the server to populate the Batch API response,
221 > letting the client re-upload if the file is corrupt.'''
229 > letting the client re-upload if the file is corrupt.'''
222 > # Fail verify in Batch API for one clone command and one push
230 > # Fail verify in Batch API for one clone command and one push
223 > # command with an IOError. Then let it through to access other
231 > # command with an IOError. Then let it through to access other
224 > # functions. Checksum failure is tested elsewhere.
232 > # functions. Checksum failure is tested elsewhere.
225 > global _numverifies
233 > global _numverifies
226 > _numverifies += 1
234 > _numverifies += 1
227 > if _numverifies <= 2:
235 > if _numverifies <= 2:
228 > raise IOError(errno.EIO, '%s: I/O error' % oid)
236 > raise IOError(errno.EIO, '%s: I/O error' % oid)
229 > return super(badstore, self).verify(oid)
237 > return super(badstore, self).verify(oid)
230 >
238 >
231 > store.__class__ = badstore
239 > store.__class__ = badstore
232 > EOF
240 > EOF
233
241
234 $ rm -rf `hg config lfs.usercache`
242 $ rm -rf `hg config lfs.usercache`
235 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
243 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
236 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
244 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
237 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
245 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
238 > -R server serve -d \
246 > -R server serve -d \
239 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
247 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
240 $ cat hg.pid >> $DAEMON_PIDS
248 $ cat hg.pid >> $DAEMON_PIDS
241
249
242 Test an I/O error in localstore.verify() (Batch API) with GET
250 Test an I/O error in localstore.verify() (Batch API) with GET
243
251
244 $ hg clone http://localhost:$HGPORT1 httpclone2
252 $ hg clone http://localhost:$HGPORT1 httpclone2
245 (remote is using large file support (lfs); lfs will be enabled for this repository)
253 (remote is using large file support (lfs); lfs will be enabled for this repository)
246 requesting all changes
254 requesting all changes
247 adding changesets
255 adding changesets
248 adding manifests
256 adding manifests
249 adding file changes
257 adding file changes
250 added 1 changesets with 1 changes to 1 files
258 added 1 changesets with 1 changes to 1 files
251 new changesets 525251863cad
259 new changesets 525251863cad
252 updating to branch default
260 updating to branch default
253 abort: LFS server error for "lfs.bin": Internal server error!
261 abort: LFS server error for "lfs.bin": Internal server error!
254 [255]
262 [255]
255
263
256 Test an I/O error in localstore.verify() (Batch API) with PUT
264 Test an I/O error in localstore.verify() (Batch API) with PUT
257
265
258 $ echo foo > client/lfs.bin
266 $ echo foo > client/lfs.bin
259 $ hg -R client ci -m 'mod lfs'
267 $ hg -R client ci -m 'mod lfs'
260 $ hg -R client push http://localhost:$HGPORT1
268 $ hg -R client push http://localhost:$HGPORT1
261 pushing to http://localhost:$HGPORT1/
269 pushing to http://localhost:$HGPORT1/
262 searching for changes
270 searching for changes
263 abort: LFS server error for "unknown": Internal server error!
271 abort: LFS server error for "unknown": Internal server error!
264 [255]
272 [255]
265 TODO: figure out how to associate the file name in the error above
273 TODO: figure out how to associate the file name in the error above
266
274
267 Test a bad checksum sent by the client in the transfer API
275 Test a bad checksum sent by the client in the transfer API
268
276
269 $ hg -R client push http://localhost:$HGPORT1
277 $ hg -R client push http://localhost:$HGPORT1
270 pushing to http://localhost:$HGPORT1/
278 pushing to http://localhost:$HGPORT1/
271 searching for changes
279 searching for changes
272 abort: HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
280 abort: HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
273 [255]
281 [255]
274
282
275 $ echo 'test lfs file' > server/lfs3.bin
283 $ echo 'test lfs file' > server/lfs3.bin
276 $ hg --config experimental.lfs.disableusercache=True \
284 $ hg --config experimental.lfs.disableusercache=True \
277 > -R server ci -Aqm 'another lfs file'
285 > -R server ci -Aqm 'another lfs file'
278 $ hg -R client pull -q http://localhost:$HGPORT1
286 $ hg -R client pull -q http://localhost:$HGPORT1
279
287
280 Test an I/O error during the processing of the GET request
288 Test an I/O error during the processing of the GET request
281
289
282 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
290 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
283 > -R client update -r tip
291 > -R client update -r tip
284 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
292 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
285 [255]
293 [255]
286
294
287 Test a checksum failure during the processing of the GET request
295 Test a checksum failure during the processing of the GET request
288
296
289 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
297 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
290 > -R client update -r tip
298 > -R client update -r tip
291 abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
299 abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
292 [255]
300 [255]
293
301
294 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
302 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
295
303
296 $ cat $TESTTMP/access.log
304 $ cat $TESTTMP/access.log
297 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
305 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
298 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
306 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
299 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
307 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
300 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
308 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
301 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
309 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
302 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
310 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
303 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
311 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
304 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
305 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
306 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
307 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
308 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
309 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
310 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
311 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
320 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
321 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
322 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
315 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
323 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
324 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
325 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
326 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
327 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
320 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
328 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
321 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
329 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
322 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
330 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
323
331
324 $ grep -v ' File "' $TESTTMP/errors.log
332 $ grep -v ' File "' $TESTTMP/errors.log
325 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
333 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
326 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
334 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
327 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
335 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
328 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
336 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
329 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
337 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
330 $LOCALIP - - [$ERRDATE$] HG error: (glob)
338 $LOCALIP - - [$ERRDATE$] HG error: (glob)
331 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
339 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
332 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
340 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
333 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
341 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
334 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
342 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
335 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
343 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
336 $LOCALIP - - [$ERRDATE$] HG error: (glob)
344 $LOCALIP - - [$ERRDATE$] HG error: (glob)
337 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
345 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
338 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
346 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
339 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
347 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
340 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
348 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
341 $LOCALIP - - [$ERRDATE$] HG error: % oid) (glob)
349 $LOCALIP - - [$ERRDATE$] HG error: % oid) (glob)
342 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
350 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
343 $LOCALIP - - [$ERRDATE$] HG error: (glob)
351 $LOCALIP - - [$ERRDATE$] HG error: (glob)
344 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
352 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
345 Traceback (most recent call last):
353 Traceback (most recent call last):
346 self.do_write()
354 self.do_write()
347 self.do_hgweb()
355 self.do_hgweb()
348 for chunk in self.server.application(env, self._start_response):
356 for chunk in self.server.application(env, self._start_response):
349 for r in self._runwsgi(req, res, repo):
357 for r in self._runwsgi(req, res, repo):
350 rctx, req, res, self.check_perm)
358 rctx, req, res, self.check_perm)
351 return func(*(args + a), **kw)
359 return func(*(args + a), **kw)
352 lambda perm:
360 lambda perm:
353 res.setbodybytes(localstore.read(oid))
361 res.setbodybytes(localstore.read(oid))
354 blob = self._read(self.vfs, oid, verify)
362 blob = self._read(self.vfs, oid, verify)
355 raise IOError(errno.EIO, '%s: I/O error' % oid)
363 raise IOError(errno.EIO, '%s: I/O error' % oid)
356 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
364 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
357
365
358 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
366 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
359 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
367 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
360 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
368 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
361 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
369 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
362 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, 'dummy content') (glob)
370 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, 'dummy content') (glob)
363 $LOCALIP - - [$ERRDATE$] HG error: hint=_('run hg verify')) (glob)
371 $LOCALIP - - [$ERRDATE$] HG error: hint=_('run hg verify')) (glob)
364 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
372 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
365 $LOCALIP - - [$ERRDATE$] HG error: (glob)
373 $LOCALIP - - [$ERRDATE$] HG error: (glob)
366
374
367 Basic Authorization headers are returned by the Batch API, and sent back with
375 Basic Authorization headers are returned by the Batch API, and sent back with
368 the GET/PUT request.
376 the GET/PUT request.
369
377
370 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
378 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
371
379
372 $ cat >> $HGRCPATH << EOF
380 $ cat >> $HGRCPATH << EOF
373 > [experimental]
381 > [experimental]
374 > lfs.disableusercache = True
382 > lfs.disableusercache = True
375 > [auth]
383 > [auth]
376 > l.schemes=http
384 > l.schemes=http
377 > l.prefix=lo
385 > l.prefix=lo
378 > l.username=user
386 > l.username=user
379 > l.password=pass
387 > l.password=pass
380 > EOF
388 > EOF
381
389
382 $ cat << EOF > userpass.py
390 $ cat << EOF > userpass.py
383 > import base64
391 > import base64
384 > from mercurial.hgweb import common
392 > from mercurial.hgweb import common
385 > def perform_authentication(hgweb, req, op):
393 > def perform_authentication(hgweb, req, op):
386 > auth = req.headers.get(b'Authorization')
394 > auth = req.headers.get(b'Authorization')
387 > if not auth:
395 > if not auth:
388 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
396 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
389 > [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
397 > [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
390 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
398 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
391 > b'pass']:
399 > b'pass']:
392 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
400 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
393 > def extsetup():
401 > def extsetup():
394 > common.permhooks.insert(0, perform_authentication)
402 > common.permhooks.insert(0, perform_authentication)
395 > EOF
403 > EOF
396
404
397 $ hg --config extensions.x=$TESTTMP/userpass.py \
405 $ hg --config extensions.x=$TESTTMP/userpass.py \
398 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
406 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
399 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
407 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
400 $ mv hg.pid $DAEMON_PIDS
408 $ mv hg.pid $DAEMON_PIDS
401
409
402 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
410 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
403 {
411 {
404 "objects": [
412 "objects": [
405 {
413 {
406 "actions": {
414 "actions": {
407 "download": {
415 "download": {
408 "expires_at": "$ISO_8601_DATE_TIME$"
416 "expires_at": "$ISO_8601_DATE_TIME$"
409 "header": {
417 "header": {
410 "Accept": "application/vnd.git-lfs"
418 "Accept": "application/vnd.git-lfs"
411 "Authorization": "Basic dXNlcjpwYXNz"
419 "Authorization": "Basic dXNlcjpwYXNz"
412 }
420 }
413 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
421 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
414 }
422 }
415 }
423 }
416 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
424 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
417 "size": 14
425 "size": 14
418 }
426 }
419 ]
427 ]
420 "transfer": "basic"
428 "transfer": "basic"
421 }
429 }
422
430
423 $ echo 'another blob' > auth_clone/lfs.blob
431 $ echo 'another blob' > auth_clone/lfs.blob
424 $ hg -R auth_clone ci -Aqm 'add blob'
432 $ hg -R auth_clone ci -Aqm 'add blob'
425 $ hg -R auth_clone --debug push | egrep '^[{}]| '
433 $ hg -R auth_clone --debug push | egrep '^[{}]| '
426 {
434 {
427 "objects": [
435 "objects": [
428 {
436 {
429 "actions": {
437 "actions": {
430 "upload": {
438 "upload": {
431 "expires_at": "$ISO_8601_DATE_TIME$"
439 "expires_at": "$ISO_8601_DATE_TIME$"
432 "header": {
440 "header": {
433 "Accept": "application/vnd.git-lfs"
441 "Accept": "application/vnd.git-lfs"
434 "Authorization": "Basic dXNlcjpwYXNz"
442 "Authorization": "Basic dXNlcjpwYXNz"
435 }
443 }
436 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
444 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
437 }
445 }
438 }
446 }
439 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
447 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
440 "size": 13
448 "size": 13
441 }
449 }
442 ]
450 ]
443 "transfer": "basic"
451 "transfer": "basic"
444 }
452 }
445
453
446 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
454 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
447
455
448 $ cat $TESTTMP/access.log $TESTTMP/errors.log
456 $ cat $TESTTMP/access.log $TESTTMP/errors.log
449 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
457 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
450 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
458 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
451 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
459 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
452 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
460 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
453 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
461 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
454 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
462 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
455 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
463 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
456 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
464 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
457 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
465 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
458 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
466 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
459 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
467 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
460 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
468 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
461 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
469 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
462 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
470 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
463 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
471 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
464 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
472 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
465 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
473 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
466 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
474 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
467 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
475 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
General Comments 0
You need to be logged in to leave comments. Login now