##// END OF EJS Templates
lfs: fix the inferred remote store path when using a --prefix...
Matt Harbison -
r37709:d241e663 default
parent child Browse files
Show More
@@ -1,575 +1,575 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import json
12 import json
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 error,
20 error,
21 pathutil,
21 pathutil,
22 pycompat,
22 pycompat,
23 url as urlmod,
23 url as urlmod,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 worker,
26 worker,
27 )
27 )
28
28
29 from ..largefiles import lfutil
29 from ..largefiles import lfutil
30
30
31 # 64 bytes for SHA256
31 # 64 bytes for SHA256
32 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
32 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
33
33
34 class lfsvfs(vfsmod.vfs):
34 class lfsvfs(vfsmod.vfs):
35 def join(self, path):
35 def join(self, path):
36 """split the path at first two characters, like: XX/XXXXX..."""
36 """split the path at first two characters, like: XX/XXXXX..."""
37 if not _lfsre.match(path):
37 if not _lfsre.match(path):
38 raise error.ProgrammingError('unexpected lfs path: %s' % path)
38 raise error.ProgrammingError('unexpected lfs path: %s' % path)
39 return super(lfsvfs, self).join(path[0:2], path[2:])
39 return super(lfsvfs, self).join(path[0:2], path[2:])
40
40
41 def walk(self, path=None, onerror=None):
41 def walk(self, path=None, onerror=None):
42 """Yield (dirpath, [], oids) tuple for blobs under path
42 """Yield (dirpath, [], oids) tuple for blobs under path
43
43
44 Oids only exist in the root of this vfs, so dirpath is always ''.
44 Oids only exist in the root of this vfs, so dirpath is always ''.
45 """
45 """
46 root = os.path.normpath(self.base)
46 root = os.path.normpath(self.base)
47 # when dirpath == root, dirpath[prefixlen:] becomes empty
47 # when dirpath == root, dirpath[prefixlen:] becomes empty
48 # because len(dirpath) < prefixlen.
48 # because len(dirpath) < prefixlen.
49 prefixlen = len(pathutil.normasprefix(root))
49 prefixlen = len(pathutil.normasprefix(root))
50 oids = []
50 oids = []
51
51
52 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
52 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
53 onerror=onerror):
53 onerror=onerror):
54 dirpath = dirpath[prefixlen:]
54 dirpath = dirpath[prefixlen:]
55
55
56 # Silently skip unexpected files and directories
56 # Silently skip unexpected files and directories
57 if len(dirpath) == 2:
57 if len(dirpath) == 2:
58 oids.extend([dirpath + f for f in files
58 oids.extend([dirpath + f for f in files
59 if _lfsre.match(dirpath + f)])
59 if _lfsre.match(dirpath + f)])
60
60
61 yield ('', [], oids)
61 yield ('', [], oids)
62
62
63 class nullvfs(lfsvfs):
63 class nullvfs(lfsvfs):
64 def __init__(self):
64 def __init__(self):
65 pass
65 pass
66
66
67 def exists(self, oid):
67 def exists(self, oid):
68 return False
68 return False
69
69
70 def read(self, oid):
70 def read(self, oid):
71 # store.read() calls into here if the blob doesn't exist in its
71 # store.read() calls into here if the blob doesn't exist in its
72 # self.vfs. Raise the same error as a normal vfs when asked to read a
72 # self.vfs. Raise the same error as a normal vfs when asked to read a
73 # file that doesn't exist. The only difference is the full file path
73 # file that doesn't exist. The only difference is the full file path
74 # isn't available in the error.
74 # isn't available in the error.
75 raise IOError(errno.ENOENT, '%s: No such file or directory' % oid)
75 raise IOError(errno.ENOENT, '%s: No such file or directory' % oid)
76
76
77 def walk(self, path=None, onerror=None):
77 def walk(self, path=None, onerror=None):
78 return ('', [], [])
78 return ('', [], [])
79
79
80 def write(self, oid, data):
80 def write(self, oid, data):
81 pass
81 pass
82
82
83 class filewithprogress(object):
83 class filewithprogress(object):
84 """a file-like object that supports __len__ and read.
84 """a file-like object that supports __len__ and read.
85
85
86 Useful to provide progress information for how many bytes are read.
86 Useful to provide progress information for how many bytes are read.
87 """
87 """
88
88
89 def __init__(self, fp, callback):
89 def __init__(self, fp, callback):
90 self._fp = fp
90 self._fp = fp
91 self._callback = callback # func(readsize)
91 self._callback = callback # func(readsize)
92 fp.seek(0, os.SEEK_END)
92 fp.seek(0, os.SEEK_END)
93 self._len = fp.tell()
93 self._len = fp.tell()
94 fp.seek(0)
94 fp.seek(0)
95
95
96 def __len__(self):
96 def __len__(self):
97 return self._len
97 return self._len
98
98
99 def read(self, size):
99 def read(self, size):
100 if self._fp is None:
100 if self._fp is None:
101 return b''
101 return b''
102 data = self._fp.read(size)
102 data = self._fp.read(size)
103 if data:
103 if data:
104 if self._callback:
104 if self._callback:
105 self._callback(len(data))
105 self._callback(len(data))
106 else:
106 else:
107 self._fp.close()
107 self._fp.close()
108 self._fp = None
108 self._fp = None
109 return data
109 return data
110
110
111 class local(object):
111 class local(object):
112 """Local blobstore for large file contents.
112 """Local blobstore for large file contents.
113
113
114 This blobstore is used both as a cache and as a staging area for large blobs
114 This blobstore is used both as a cache and as a staging area for large blobs
115 to be uploaded to the remote blobstore.
115 to be uploaded to the remote blobstore.
116 """
116 """
117
117
118 def __init__(self, repo):
118 def __init__(self, repo):
119 fullpath = repo.svfs.join('lfs/objects')
119 fullpath = repo.svfs.join('lfs/objects')
120 self.vfs = lfsvfs(fullpath)
120 self.vfs = lfsvfs(fullpath)
121
121
122 if repo.ui.configbool('experimental', 'lfs.disableusercache'):
122 if repo.ui.configbool('experimental', 'lfs.disableusercache'):
123 self.cachevfs = nullvfs()
123 self.cachevfs = nullvfs()
124 else:
124 else:
125 usercache = lfutil._usercachedir(repo.ui, 'lfs')
125 usercache = lfutil._usercachedir(repo.ui, 'lfs')
126 self.cachevfs = lfsvfs(usercache)
126 self.cachevfs = lfsvfs(usercache)
127 self.ui = repo.ui
127 self.ui = repo.ui
128
128
129 def open(self, oid):
129 def open(self, oid):
130 """Open a read-only file descriptor to the named blob, in either the
130 """Open a read-only file descriptor to the named blob, in either the
131 usercache or the local store."""
131 usercache or the local store."""
132 # The usercache is the most likely place to hold the file. Commit will
132 # The usercache is the most likely place to hold the file. Commit will
133 # write to both it and the local store, as will anything that downloads
133 # write to both it and the local store, as will anything that downloads
134 # the blobs. However, things like clone without an update won't
134 # the blobs. However, things like clone without an update won't
135 # populate the local store. For an init + push of a local clone,
135 # populate the local store. For an init + push of a local clone,
136 # the usercache is the only place it _could_ be. If not present, the
136 # the usercache is the only place it _could_ be. If not present, the
137 # missing file msg here will indicate the local repo, not the usercache.
137 # missing file msg here will indicate the local repo, not the usercache.
138 if self.cachevfs.exists(oid):
138 if self.cachevfs.exists(oid):
139 return self.cachevfs(oid, 'rb')
139 return self.cachevfs(oid, 'rb')
140
140
141 return self.vfs(oid, 'rb')
141 return self.vfs(oid, 'rb')
142
142
143 def download(self, oid, src):
143 def download(self, oid, src):
144 """Read the blob from the remote source in chunks, verify the content,
144 """Read the blob from the remote source in chunks, verify the content,
145 and write to this local blobstore."""
145 and write to this local blobstore."""
146 sha256 = hashlib.sha256()
146 sha256 = hashlib.sha256()
147
147
148 with self.vfs(oid, 'wb', atomictemp=True) as fp:
148 with self.vfs(oid, 'wb', atomictemp=True) as fp:
149 for chunk in util.filechunkiter(src, size=1048576):
149 for chunk in util.filechunkiter(src, size=1048576):
150 fp.write(chunk)
150 fp.write(chunk)
151 sha256.update(chunk)
151 sha256.update(chunk)
152
152
153 realoid = sha256.hexdigest()
153 realoid = sha256.hexdigest()
154 if realoid != oid:
154 if realoid != oid:
155 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
155 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
156
156
157 self._linktousercache(oid)
157 self._linktousercache(oid)
158
158
159 def write(self, oid, data):
159 def write(self, oid, data):
160 """Write blob to local blobstore.
160 """Write blob to local blobstore.
161
161
162 This should only be called from the filelog during a commit or similar.
162 This should only be called from the filelog during a commit or similar.
163 As such, there is no need to verify the data. Imports from a remote
163 As such, there is no need to verify the data. Imports from a remote
164 store must use ``download()`` instead."""
164 store must use ``download()`` instead."""
165 with self.vfs(oid, 'wb', atomictemp=True) as fp:
165 with self.vfs(oid, 'wb', atomictemp=True) as fp:
166 fp.write(data)
166 fp.write(data)
167
167
168 self._linktousercache(oid)
168 self._linktousercache(oid)
169
169
170 def _linktousercache(self, oid):
170 def _linktousercache(self, oid):
171 # XXX: should we verify the content of the cache, and hardlink back to
171 # XXX: should we verify the content of the cache, and hardlink back to
172 # the local store on success, but truncate, write and link on failure?
172 # the local store on success, but truncate, write and link on failure?
173 if (not self.cachevfs.exists(oid)
173 if (not self.cachevfs.exists(oid)
174 and not isinstance(self.cachevfs, nullvfs)):
174 and not isinstance(self.cachevfs, nullvfs)):
175 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
175 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
176 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
176 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
177
177
178 def read(self, oid, verify=True):
178 def read(self, oid, verify=True):
179 """Read blob from local blobstore."""
179 """Read blob from local blobstore."""
180 if not self.vfs.exists(oid):
180 if not self.vfs.exists(oid):
181 blob = self._read(self.cachevfs, oid, verify)
181 blob = self._read(self.cachevfs, oid, verify)
182
182
183 # Even if revlog will verify the content, it needs to be verified
183 # Even if revlog will verify the content, it needs to be verified
184 # now before making the hardlink to avoid propagating corrupt blobs.
184 # now before making the hardlink to avoid propagating corrupt blobs.
185 # Don't abort if corruption is detected, because `hg verify` will
185 # Don't abort if corruption is detected, because `hg verify` will
186 # give more useful info about the corruption- simply don't add the
186 # give more useful info about the corruption- simply don't add the
187 # hardlink.
187 # hardlink.
188 if verify or hashlib.sha256(blob).hexdigest() == oid:
188 if verify or hashlib.sha256(blob).hexdigest() == oid:
189 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
189 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
190 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
190 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
191 else:
191 else:
192 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
192 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
193 blob = self._read(self.vfs, oid, verify)
193 blob = self._read(self.vfs, oid, verify)
194 return blob
194 return blob
195
195
196 def _read(self, vfs, oid, verify):
196 def _read(self, vfs, oid, verify):
197 """Read blob (after verifying) from the given store"""
197 """Read blob (after verifying) from the given store"""
198 blob = vfs.read(oid)
198 blob = vfs.read(oid)
199 if verify:
199 if verify:
200 _verify(oid, blob)
200 _verify(oid, blob)
201 return blob
201 return blob
202
202
203 def verify(self, oid):
203 def verify(self, oid):
204 """Indicate whether or not the hash of the underlying file matches its
204 """Indicate whether or not the hash of the underlying file matches its
205 name."""
205 name."""
206 sha256 = hashlib.sha256()
206 sha256 = hashlib.sha256()
207
207
208 with self.open(oid) as fp:
208 with self.open(oid) as fp:
209 for chunk in util.filechunkiter(fp, size=1048576):
209 for chunk in util.filechunkiter(fp, size=1048576):
210 sha256.update(chunk)
210 sha256.update(chunk)
211
211
212 return oid == sha256.hexdigest()
212 return oid == sha256.hexdigest()
213
213
214 def has(self, oid):
214 def has(self, oid):
215 """Returns True if the local blobstore contains the requested blob,
215 """Returns True if the local blobstore contains the requested blob,
216 False otherwise."""
216 False otherwise."""
217 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
217 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
218
218
219 class _gitlfsremote(object):
219 class _gitlfsremote(object):
220
220
221 def __init__(self, repo, url):
221 def __init__(self, repo, url):
222 ui = repo.ui
222 ui = repo.ui
223 self.ui = ui
223 self.ui = ui
224 baseurl, authinfo = url.authinfo()
224 baseurl, authinfo = url.authinfo()
225 self.baseurl = baseurl.rstrip('/')
225 self.baseurl = baseurl.rstrip('/')
226 useragent = repo.ui.config('experimental', 'lfs.user-agent')
226 useragent = repo.ui.config('experimental', 'lfs.user-agent')
227 if not useragent:
227 if not useragent:
228 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
228 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
229 self.urlopener = urlmod.opener(ui, authinfo, useragent)
229 self.urlopener = urlmod.opener(ui, authinfo, useragent)
230 self.retry = ui.configint('lfs', 'retry')
230 self.retry = ui.configint('lfs', 'retry')
231
231
232 def writebatch(self, pointers, fromstore):
232 def writebatch(self, pointers, fromstore):
233 """Batch upload from local to remote blobstore."""
233 """Batch upload from local to remote blobstore."""
234 self._batch(_deduplicate(pointers), fromstore, 'upload')
234 self._batch(_deduplicate(pointers), fromstore, 'upload')
235
235
236 def readbatch(self, pointers, tostore):
236 def readbatch(self, pointers, tostore):
237 """Batch download from remote to local blostore."""
237 """Batch download from remote to local blostore."""
238 self._batch(_deduplicate(pointers), tostore, 'download')
238 self._batch(_deduplicate(pointers), tostore, 'download')
239
239
240 def _batchrequest(self, pointers, action):
240 def _batchrequest(self, pointers, action):
241 """Get metadata about objects pointed by pointers for given action
241 """Get metadata about objects pointed by pointers for given action
242
242
243 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
243 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
244 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
244 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
245 """
245 """
246 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
246 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
247 requestdata = json.dumps({
247 requestdata = json.dumps({
248 'objects': objects,
248 'objects': objects,
249 'operation': action,
249 'operation': action,
250 })
250 })
251 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
251 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
252 data=requestdata)
252 data=requestdata)
253 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
253 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
254 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
254 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
255 try:
255 try:
256 rsp = self.urlopener.open(batchreq)
256 rsp = self.urlopener.open(batchreq)
257 rawjson = rsp.read()
257 rawjson = rsp.read()
258 except util.urlerr.httperror as ex:
258 except util.urlerr.httperror as ex:
259 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
259 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
260 % (ex, action))
260 % (ex, action))
261 try:
261 try:
262 response = json.loads(rawjson)
262 response = json.loads(rawjson)
263 except ValueError:
263 except ValueError:
264 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
264 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
265 % rawjson)
265 % rawjson)
266
266
267 if self.ui.debugflag:
267 if self.ui.debugflag:
268 self.ui.debug('Status: %d\n' % rsp.status)
268 self.ui.debug('Status: %d\n' % rsp.status)
269 # lfs-test-server and hg serve return headers in different order
269 # lfs-test-server and hg serve return headers in different order
270 self.ui.debug('%s\n'
270 self.ui.debug('%s\n'
271 % '\n'.join(sorted(str(rsp.info()).splitlines())))
271 % '\n'.join(sorted(str(rsp.info()).splitlines())))
272
272
273 if 'objects' in response:
273 if 'objects' in response:
274 response['objects'] = sorted(response['objects'],
274 response['objects'] = sorted(response['objects'],
275 key=lambda p: p['oid'])
275 key=lambda p: p['oid'])
276 self.ui.debug('%s\n'
276 self.ui.debug('%s\n'
277 % json.dumps(response, indent=2,
277 % json.dumps(response, indent=2,
278 separators=('', ': '), sort_keys=True))
278 separators=('', ': '), sort_keys=True))
279
279
280 return response
280 return response
281
281
282 def _checkforservererror(self, pointers, responses, action):
282 def _checkforservererror(self, pointers, responses, action):
283 """Scans errors from objects
283 """Scans errors from objects
284
284
285 Raises LfsRemoteError if any objects have an error"""
285 Raises LfsRemoteError if any objects have an error"""
286 for response in responses:
286 for response in responses:
287 # The server should return 404 when objects cannot be found. Some
287 # The server should return 404 when objects cannot be found. Some
288 # server implementation (ex. lfs-test-server) does not set "error"
288 # server implementation (ex. lfs-test-server) does not set "error"
289 # but just removes "download" from "actions". Treat that case
289 # but just removes "download" from "actions". Treat that case
290 # as the same as 404 error.
290 # as the same as 404 error.
291 if 'error' not in response:
291 if 'error' not in response:
292 if (action == 'download'
292 if (action == 'download'
293 and action not in response.get('actions', [])):
293 and action not in response.get('actions', [])):
294 code = 404
294 code = 404
295 else:
295 else:
296 continue
296 continue
297 else:
297 else:
298 # An error dict without a code doesn't make much sense, so
298 # An error dict without a code doesn't make much sense, so
299 # treat as a server error.
299 # treat as a server error.
300 code = response.get('error').get('code', 500)
300 code = response.get('error').get('code', 500)
301
301
302 ptrmap = {p.oid(): p for p in pointers}
302 ptrmap = {p.oid(): p for p in pointers}
303 p = ptrmap.get(response['oid'], None)
303 p = ptrmap.get(response['oid'], None)
304 if p:
304 if p:
305 filename = getattr(p, 'filename', 'unknown')
305 filename = getattr(p, 'filename', 'unknown')
306 errors = {
306 errors = {
307 404: 'The object does not exist',
307 404: 'The object does not exist',
308 410: 'The object was removed by the owner',
308 410: 'The object was removed by the owner',
309 422: 'Validation error',
309 422: 'Validation error',
310 500: 'Internal server error',
310 500: 'Internal server error',
311 }
311 }
312 msg = errors.get(code, 'status code %d' % code)
312 msg = errors.get(code, 'status code %d' % code)
313 raise LfsRemoteError(_('LFS server error for "%s": %s')
313 raise LfsRemoteError(_('LFS server error for "%s": %s')
314 % (filename, msg))
314 % (filename, msg))
315 else:
315 else:
316 raise LfsRemoteError(
316 raise LfsRemoteError(
317 _('LFS server error. Unsolicited response for oid %s')
317 _('LFS server error. Unsolicited response for oid %s')
318 % response['oid'])
318 % response['oid'])
319
319
320 def _extractobjects(self, response, pointers, action):
320 def _extractobjects(self, response, pointers, action):
321 """extract objects from response of the batch API
321 """extract objects from response of the batch API
322
322
323 response: parsed JSON object returned by batch API
323 response: parsed JSON object returned by batch API
324 return response['objects'] filtered by action
324 return response['objects'] filtered by action
325 raise if any object has an error
325 raise if any object has an error
326 """
326 """
327 # Scan errors from objects - fail early
327 # Scan errors from objects - fail early
328 objects = response.get('objects', [])
328 objects = response.get('objects', [])
329 self._checkforservererror(pointers, objects, action)
329 self._checkforservererror(pointers, objects, action)
330
330
331 # Filter objects with given action. Practically, this skips uploading
331 # Filter objects with given action. Practically, this skips uploading
332 # objects which exist in the server.
332 # objects which exist in the server.
333 filteredobjects = [o for o in objects if action in o.get('actions', [])]
333 filteredobjects = [o for o in objects if action in o.get('actions', [])]
334
334
335 return filteredobjects
335 return filteredobjects
336
336
337 def _basictransfer(self, obj, action, localstore):
337 def _basictransfer(self, obj, action, localstore):
338 """Download or upload a single object using basic transfer protocol
338 """Download or upload a single object using basic transfer protocol
339
339
340 obj: dict, an object description returned by batch API
340 obj: dict, an object description returned by batch API
341 action: string, one of ['upload', 'download']
341 action: string, one of ['upload', 'download']
342 localstore: blobstore.local
342 localstore: blobstore.local
343
343
344 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
344 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
345 basic-transfers.md
345 basic-transfers.md
346 """
346 """
347 oid = pycompat.bytestr(obj['oid'])
347 oid = pycompat.bytestr(obj['oid'])
348
348
349 href = pycompat.bytestr(obj['actions'][action].get('href'))
349 href = pycompat.bytestr(obj['actions'][action].get('href'))
350 headers = obj['actions'][action].get('header', {}).items()
350 headers = obj['actions'][action].get('header', {}).items()
351
351
352 request = util.urlreq.request(href)
352 request = util.urlreq.request(href)
353 if action == 'upload':
353 if action == 'upload':
354 # If uploading blobs, read data from local blobstore.
354 # If uploading blobs, read data from local blobstore.
355 if not localstore.verify(oid):
355 if not localstore.verify(oid):
356 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
356 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
357 hint=_('run hg verify'))
357 hint=_('run hg verify'))
358 request.data = filewithprogress(localstore.open(oid), None)
358 request.data = filewithprogress(localstore.open(oid), None)
359 request.get_method = lambda: 'PUT'
359 request.get_method = lambda: 'PUT'
360 request.add_header('Content-Type', 'application/octet-stream')
360 request.add_header('Content-Type', 'application/octet-stream')
361
361
362 for k, v in headers:
362 for k, v in headers:
363 request.add_header(k, v)
363 request.add_header(k, v)
364
364
365 response = b''
365 response = b''
366 try:
366 try:
367 req = self.urlopener.open(request)
367 req = self.urlopener.open(request)
368
368
369 if self.ui.debugflag:
369 if self.ui.debugflag:
370 self.ui.debug('Status: %d\n' % req.status)
370 self.ui.debug('Status: %d\n' % req.status)
371 # lfs-test-server and hg serve return headers in different order
371 # lfs-test-server and hg serve return headers in different order
372 self.ui.debug('%s\n'
372 self.ui.debug('%s\n'
373 % '\n'.join(sorted(str(req.info()).splitlines())))
373 % '\n'.join(sorted(str(req.info()).splitlines())))
374
374
375 if action == 'download':
375 if action == 'download':
376 # If downloading blobs, store downloaded data to local blobstore
376 # If downloading blobs, store downloaded data to local blobstore
377 localstore.download(oid, req)
377 localstore.download(oid, req)
378 else:
378 else:
379 while True:
379 while True:
380 data = req.read(1048576)
380 data = req.read(1048576)
381 if not data:
381 if not data:
382 break
382 break
383 response += data
383 response += data
384 if response:
384 if response:
385 self.ui.debug('lfs %s response: %s' % (action, response))
385 self.ui.debug('lfs %s response: %s' % (action, response))
386 except util.urlerr.httperror as ex:
386 except util.urlerr.httperror as ex:
387 if self.ui.debugflag:
387 if self.ui.debugflag:
388 self.ui.debug('%s: %s\n' % (oid, ex.read()))
388 self.ui.debug('%s: %s\n' % (oid, ex.read()))
389 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
389 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
390 % (ex, oid, action))
390 % (ex, oid, action))
391
391
392 def _batch(self, pointers, localstore, action):
392 def _batch(self, pointers, localstore, action):
393 if action not in ['upload', 'download']:
393 if action not in ['upload', 'download']:
394 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
394 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
395
395
396 response = self._batchrequest(pointers, action)
396 response = self._batchrequest(pointers, action)
397 objects = self._extractobjects(response, pointers, action)
397 objects = self._extractobjects(response, pointers, action)
398 total = sum(x.get('size', 0) for x in objects)
398 total = sum(x.get('size', 0) for x in objects)
399 sizes = {}
399 sizes = {}
400 for obj in objects:
400 for obj in objects:
401 sizes[obj.get('oid')] = obj.get('size', 0)
401 sizes[obj.get('oid')] = obj.get('size', 0)
402 topic = {'upload': _('lfs uploading'),
402 topic = {'upload': _('lfs uploading'),
403 'download': _('lfs downloading')}[action]
403 'download': _('lfs downloading')}[action]
404 if len(objects) > 1:
404 if len(objects) > 1:
405 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
405 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
406 % (len(objects), util.bytecount(total)))
406 % (len(objects), util.bytecount(total)))
407 self.ui.progress(topic, 0, total=total)
407 self.ui.progress(topic, 0, total=total)
408 def transfer(chunk):
408 def transfer(chunk):
409 for obj in chunk:
409 for obj in chunk:
410 objsize = obj.get('size', 0)
410 objsize = obj.get('size', 0)
411 if self.ui.verbose:
411 if self.ui.verbose:
412 if action == 'download':
412 if action == 'download':
413 msg = _('lfs: downloading %s (%s)\n')
413 msg = _('lfs: downloading %s (%s)\n')
414 elif action == 'upload':
414 elif action == 'upload':
415 msg = _('lfs: uploading %s (%s)\n')
415 msg = _('lfs: uploading %s (%s)\n')
416 self.ui.note(msg % (obj.get('oid'),
416 self.ui.note(msg % (obj.get('oid'),
417 util.bytecount(objsize)))
417 util.bytecount(objsize)))
418 retry = self.retry
418 retry = self.retry
419 while True:
419 while True:
420 try:
420 try:
421 self._basictransfer(obj, action, localstore)
421 self._basictransfer(obj, action, localstore)
422 yield 1, obj.get('oid')
422 yield 1, obj.get('oid')
423 break
423 break
424 except socket.error as ex:
424 except socket.error as ex:
425 if retry > 0:
425 if retry > 0:
426 self.ui.note(
426 self.ui.note(
427 _('lfs: failed: %r (remaining retry %d)\n')
427 _('lfs: failed: %r (remaining retry %d)\n')
428 % (ex, retry))
428 % (ex, retry))
429 retry -= 1
429 retry -= 1
430 continue
430 continue
431 raise
431 raise
432
432
433 # Until https multiplexing gets sorted out
433 # Until https multiplexing gets sorted out
434 if self.ui.configbool('experimental', 'lfs.worker-enable'):
434 if self.ui.configbool('experimental', 'lfs.worker-enable'):
435 oids = worker.worker(self.ui, 0.1, transfer, (),
435 oids = worker.worker(self.ui, 0.1, transfer, (),
436 sorted(objects, key=lambda o: o.get('oid')))
436 sorted(objects, key=lambda o: o.get('oid')))
437 else:
437 else:
438 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
438 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
439
439
440 processed = 0
440 processed = 0
441 blobs = 0
441 blobs = 0
442 for _one, oid in oids:
442 for _one, oid in oids:
443 processed += sizes[oid]
443 processed += sizes[oid]
444 blobs += 1
444 blobs += 1
445 self.ui.progress(topic, processed, total=total)
445 self.ui.progress(topic, processed, total=total)
446 self.ui.note(_('lfs: processed: %s\n') % oid)
446 self.ui.note(_('lfs: processed: %s\n') % oid)
447 self.ui.progress(topic, pos=None, total=total)
447 self.ui.progress(topic, pos=None, total=total)
448
448
449 if blobs > 0:
449 if blobs > 0:
450 if action == 'upload':
450 if action == 'upload':
451 self.ui.status(_('lfs: uploaded %d files (%s)\n')
451 self.ui.status(_('lfs: uploaded %d files (%s)\n')
452 % (blobs, util.bytecount(processed)))
452 % (blobs, util.bytecount(processed)))
453 # TODO: coalesce the download requests, and comment this in
453 # TODO: coalesce the download requests, and comment this in
454 #elif action == 'download':
454 #elif action == 'download':
455 # self.ui.status(_('lfs: downloaded %d files (%s)\n')
455 # self.ui.status(_('lfs: downloaded %d files (%s)\n')
456 # % (blobs, util.bytecount(processed)))
456 # % (blobs, util.bytecount(processed)))
457
457
458 def __del__(self):
458 def __del__(self):
459 # copied from mercurial/httppeer.py
459 # copied from mercurial/httppeer.py
460 urlopener = getattr(self, 'urlopener', None)
460 urlopener = getattr(self, 'urlopener', None)
461 if urlopener:
461 if urlopener:
462 for h in urlopener.handlers:
462 for h in urlopener.handlers:
463 h.close()
463 h.close()
464 getattr(h, "close_all", lambda : None)()
464 getattr(h, "close_all", lambda : None)()
465
465
466 class _dummyremote(object):
466 class _dummyremote(object):
467 """Dummy store storing blobs to temp directory."""
467 """Dummy store storing blobs to temp directory."""
468
468
469 def __init__(self, repo, url):
469 def __init__(self, repo, url):
470 fullpath = repo.vfs.join('lfs', url.path)
470 fullpath = repo.vfs.join('lfs', url.path)
471 self.vfs = lfsvfs(fullpath)
471 self.vfs = lfsvfs(fullpath)
472
472
473 def writebatch(self, pointers, fromstore):
473 def writebatch(self, pointers, fromstore):
474 for p in _deduplicate(pointers):
474 for p in _deduplicate(pointers):
475 content = fromstore.read(p.oid(), verify=True)
475 content = fromstore.read(p.oid(), verify=True)
476 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
476 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
477 fp.write(content)
477 fp.write(content)
478
478
479 def readbatch(self, pointers, tostore):
479 def readbatch(self, pointers, tostore):
480 for p in _deduplicate(pointers):
480 for p in _deduplicate(pointers):
481 with self.vfs(p.oid(), 'rb') as fp:
481 with self.vfs(p.oid(), 'rb') as fp:
482 tostore.download(p.oid(), fp)
482 tostore.download(p.oid(), fp)
483
483
484 class _nullremote(object):
484 class _nullremote(object):
485 """Null store storing blobs to /dev/null."""
485 """Null store storing blobs to /dev/null."""
486
486
487 def __init__(self, repo, url):
487 def __init__(self, repo, url):
488 pass
488 pass
489
489
490 def writebatch(self, pointers, fromstore):
490 def writebatch(self, pointers, fromstore):
491 pass
491 pass
492
492
493 def readbatch(self, pointers, tostore):
493 def readbatch(self, pointers, tostore):
494 pass
494 pass
495
495
496 class _promptremote(object):
496 class _promptremote(object):
497 """Prompt user to set lfs.url when accessed."""
497 """Prompt user to set lfs.url when accessed."""
498
498
499 def __init__(self, repo, url):
499 def __init__(self, repo, url):
500 pass
500 pass
501
501
502 def writebatch(self, pointers, fromstore, ui=None):
502 def writebatch(self, pointers, fromstore, ui=None):
503 self._prompt()
503 self._prompt()
504
504
505 def readbatch(self, pointers, tostore, ui=None):
505 def readbatch(self, pointers, tostore, ui=None):
506 self._prompt()
506 self._prompt()
507
507
508 def _prompt(self):
508 def _prompt(self):
509 raise error.Abort(_('lfs.url needs to be configured'))
509 raise error.Abort(_('lfs.url needs to be configured'))
510
510
511 _storemap = {
511 _storemap = {
512 'https': _gitlfsremote,
512 'https': _gitlfsremote,
513 'http': _gitlfsremote,
513 'http': _gitlfsremote,
514 'file': _dummyremote,
514 'file': _dummyremote,
515 'null': _nullremote,
515 'null': _nullremote,
516 None: _promptremote,
516 None: _promptremote,
517 }
517 }
518
518
519 def _deduplicate(pointers):
519 def _deduplicate(pointers):
520 """Remove any duplicate oids that exist in the list"""
520 """Remove any duplicate oids that exist in the list"""
521 reduced = util.sortdict()
521 reduced = util.sortdict()
522 for p in pointers:
522 for p in pointers:
523 reduced[p.oid()] = p
523 reduced[p.oid()] = p
524 return reduced.values()
524 return reduced.values()
525
525
526 def _verify(oid, content):
526 def _verify(oid, content):
527 realoid = hashlib.sha256(content).hexdigest()
527 realoid = hashlib.sha256(content).hexdigest()
528 if realoid != oid:
528 if realoid != oid:
529 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
529 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
530 hint=_('run hg verify'))
530 hint=_('run hg verify'))
531
531
532 def remote(repo, remote=None):
532 def remote(repo, remote=None):
533 """remotestore factory. return a store in _storemap depending on config
533 """remotestore factory. return a store in _storemap depending on config
534
534
535 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
535 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
536 infer the endpoint, based on the remote repository using the same path
536 infer the endpoint, based on the remote repository using the same path
537 adjustments as git. As an extension, 'http' is supported as well so that
537 adjustments as git. As an extension, 'http' is supported as well so that
538 ``hg serve`` works out of the box.
538 ``hg serve`` works out of the box.
539
539
540 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
540 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
541 """
541 """
542 lfsurl = repo.ui.config('lfs', 'url')
542 lfsurl = repo.ui.config('lfs', 'url')
543 url = util.url(lfsurl or '')
543 url = util.url(lfsurl or '')
544 if lfsurl is None:
544 if lfsurl is None:
545 if remote:
545 if remote:
546 path = remote
546 path = remote
547 elif util.safehasattr(repo, '_subtoppath'):
547 elif util.safehasattr(repo, '_subtoppath'):
548 # The pull command sets this during the optional update phase, which
548 # The pull command sets this during the optional update phase, which
549 # tells exactly where the pull originated, whether 'paths.default'
549 # tells exactly where the pull originated, whether 'paths.default'
550 # or explicit.
550 # or explicit.
551 path = repo._subtoppath
551 path = repo._subtoppath
552 else:
552 else:
553 # TODO: investigate 'paths.remote:lfsurl' style path customization,
553 # TODO: investigate 'paths.remote:lfsurl' style path customization,
554 # and fall back to inferring from 'paths.remote' if unspecified.
554 # and fall back to inferring from 'paths.remote' if unspecified.
555 path = repo.ui.config('paths', 'default') or ''
555 path = repo.ui.config('paths', 'default') or ''
556
556
557 defaulturl = util.url(path)
557 defaulturl = util.url(path)
558
558
559 # TODO: support local paths as well.
559 # TODO: support local paths as well.
560 # TODO: consider the ssh -> https transformation that git applies
560 # TODO: consider the ssh -> https transformation that git applies
561 if defaulturl.scheme in (b'http', b'https'):
561 if defaulturl.scheme in (b'http', b'https'):
562 if defaulturl.path and defaulturl.path[:-1] != b'/':
562 if defaulturl.path and defaulturl.path[:-1] != b'/':
563 defaulturl.path += b'/'
563 defaulturl.path += b'/'
564 defaulturl.path = defaulturl.path or b'' + b'.git/info/lfs'
564 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
565
565
566 url = util.url(bytes(defaulturl))
566 url = util.url(bytes(defaulturl))
567 repo.ui.note(_('lfs: assuming remote store: %s\n') % url)
567 repo.ui.note(_('lfs: assuming remote store: %s\n') % url)
568
568
569 scheme = url.scheme
569 scheme = url.scheme
570 if scheme not in _storemap:
570 if scheme not in _storemap:
571 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
571 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
572 return _storemap[scheme](repo, url)
572 return _storemap[scheme](repo, url)
573
573
574 class LfsRemoteError(error.RevlogError):
574 class LfsRemoteError(error.RevlogError):
575 pass
575 pass
@@ -1,346 +1,346 b''
1 #require serve no-reposimplestore
1 #require serve no-reposimplestore
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > lfs=
5 > lfs=
6 > [lfs]
6 > [lfs]
7 > track=all()
7 > track=all()
8 > [web]
8 > [web]
9 > push_ssl = False
9 > push_ssl = False
10 > allow-push = *
10 > allow-push = *
11 > EOF
11 > EOF
12
12
13 Serving LFS files can experimentally be turned off. The long term solution is
13 Serving LFS files can experimentally be turned off. The long term solution is
14 to support the 'verify' action in both client and server, so that the server can
14 to support the 'verify' action in both client and server, so that the server can
15 tell the client to store files elsewhere.
15 tell the client to store files elsewhere.
16
16
17 $ hg init server
17 $ hg init server
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 > --config experimental.lfs.serve=False -R server serve -d \
19 > --config experimental.lfs.serve=False -R server serve -d \
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 $ cat hg.pid >> $DAEMON_PIDS
21 $ cat hg.pid >> $DAEMON_PIDS
22
22
23 Uploads fail...
23 Uploads fail...
24
24
25 $ hg init client
25 $ hg init client
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 $ hg -R client ci -Am 'initial commit'
27 $ hg -R client ci -Am 'initial commit'
28 adding lfs.bin
28 adding lfs.bin
29 $ hg -R client push http://localhost:$HGPORT
29 $ hg -R client push http://localhost:$HGPORT
30 pushing to http://localhost:$HGPORT/
30 pushing to http://localhost:$HGPORT/
31 searching for changes
31 searching for changes
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=upload)!
32 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=upload)!
33 [255]
33 [255]
34
34
35 ... so do a local push to make the data available. Remove the blob from the
35 ... so do a local push to make the data available. Remove the blob from the
36 default cache, so it attempts to download.
36 default cache, so it attempts to download.
37 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
37 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
38 > --config "lfs.url=null://" \
38 > --config "lfs.url=null://" \
39 > -R client push -q server
39 > -R client push -q server
40 $ mv `hg config lfs.usercache` $TESTTMP/servercache
40 $ mv `hg config lfs.usercache` $TESTTMP/servercache
41
41
42 Downloads fail...
42 Downloads fail...
43
43
44 $ hg clone http://localhost:$HGPORT httpclone
44 $ hg clone http://localhost:$HGPORT httpclone
45 requesting all changes
45 requesting all changes
46 adding changesets
46 adding changesets
47 adding manifests
47 adding manifests
48 adding file changes
48 adding file changes
49 added 1 changesets with 1 changes to 1 files
49 added 1 changesets with 1 changes to 1 files
50 new changesets 525251863cad
50 new changesets 525251863cad
51 updating to branch default
51 updating to branch default
52 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
52 abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
53 [255]
53 [255]
54
54
55 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
55 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
56
56
57 $ cat $TESTTMP/access.log $TESTTMP/errors.log
57 $ cat $TESTTMP/access.log $TESTTMP/errors.log
58 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
58 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
59 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
59 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
60 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
60 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
62 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
62 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
66 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
66 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
67
67
68 Blob URIs are correct when --prefix is used
68 Blob URIs are correct when --prefix is used
69
69
70 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
70 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
71 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
71 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
72 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
72 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
73 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
73 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
74 $ cat hg.pid >> $DAEMON_PIDS
74 $ cat hg.pid >> $DAEMON_PIDS
75
75
76 $ hg --config lfs.url=http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs \
76 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
77 > clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
78 using http://localhost:$HGPORT/subdir/mount/point
77 using http://localhost:$HGPORT/subdir/mount/point
79 sending capabilities command
78 sending capabilities command
80 query 1; heads
79 query 1; heads
81 sending batch command
80 sending batch command
82 requesting all changes
81 requesting all changes
83 sending getbundle command
82 sending getbundle command
84 bundle2-input-bundle: with-transaction
83 bundle2-input-bundle: with-transaction
85 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
84 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
86 adding changesets
85 adding changesets
87 add changeset 525251863cad
86 add changeset 525251863cad
88 adding manifests
87 adding manifests
89 adding file changes
88 adding file changes
90 adding lfs.bin revisions
89 adding lfs.bin revisions
91 added 1 changesets with 1 changes to 1 files
90 added 1 changesets with 1 changes to 1 files
92 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
91 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
93 bundle2-input-part: total payload size 648
92 bundle2-input-part: total payload size 648
94 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
93 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
95 bundle2-input-part: "phase-heads" supported
94 bundle2-input-part: "phase-heads" supported
96 bundle2-input-part: total payload size 24
95 bundle2-input-part: total payload size 24
97 bundle2-input-part: "cache:rev-branch-cache" supported
96 bundle2-input-part: "cache:rev-branch-cache" supported
98 bundle2-input-part: total payload size 39
97 bundle2-input-part: total payload size 39
99 bundle2-input-bundle: 3 parts total
98 bundle2-input-bundle: 3 parts total
100 checking for updated bookmarks
99 checking for updated bookmarks
101 updating the branch cache
100 updating the branch cache
102 new changesets 525251863cad
101 new changesets 525251863cad
103 updating to branch default
102 updating to branch default
104 resolving manifests
103 resolving manifests
105 branchmerge: False, force: False, partial: False
104 branchmerge: False, force: False, partial: False
106 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
105 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
106 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
107 Status: 200
107 Status: 200
108 Content-Length: 371
108 Content-Length: 371
109 Content-Type: application/vnd.git-lfs+json
109 Content-Type: application/vnd.git-lfs+json
110 Date: $HTTP_DATE$
110 Date: $HTTP_DATE$
111 Server: testing stub value
111 Server: testing stub value
112 {
112 {
113 "objects": [
113 "objects": [
114 {
114 {
115 "actions": {
115 "actions": {
116 "download": {
116 "download": {
117 "expires_at": "$ISO_8601_DATE_TIME$"
117 "expires_at": "$ISO_8601_DATE_TIME$"
118 "header": {
118 "header": {
119 "Accept": "application/vnd.git-lfs"
119 "Accept": "application/vnd.git-lfs"
120 }
120 }
121 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
121 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
122 }
122 }
123 }
123 }
124 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
124 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
125 "size": 20
125 "size": 20
126 }
126 }
127 ]
127 ]
128 "transfer": "basic"
128 "transfer": "basic"
129 }
129 }
130 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
130 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
131 Status: 200
131 Status: 200
132 Content-Length: 20
132 Content-Length: 20
133 Content-Type: application/octet-stream
133 Content-Type: application/octet-stream
134 Date: $HTTP_DATE$
134 Date: $HTTP_DATE$
135 Server: testing stub value
135 Server: testing stub value
136 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
136 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
137 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
137 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
138 lfs.bin: remote created -> g
138 lfs.bin: remote created -> g
139 getting lfs.bin
139 getting lfs.bin
140 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
140 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
141 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
142
142
143 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
143 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
144
144
145 $ cat $TESTTMP/access.log $TESTTMP/errors.log
145 $ cat $TESTTMP/access.log $TESTTMP/errors.log
146 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
146 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
147 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
147 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
148 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
148 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
149 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
149 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
150 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
150 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
151
151
152 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
152 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
153 > import errno
153 > import errno
154 > from hgext.lfs import blobstore
154 > from hgext.lfs import blobstore
155 >
155 >
156 > _numverifies = 0
156 > _numverifies = 0
157 > _readerr = True
157 > _readerr = True
158 >
158 >
159 > def reposetup(ui, repo):
159 > def reposetup(ui, repo):
160 > # Nothing to do with a remote repo
160 > # Nothing to do with a remote repo
161 > if not repo.local():
161 > if not repo.local():
162 > return
162 > return
163 >
163 >
164 > store = repo.svfs.lfslocalblobstore
164 > store = repo.svfs.lfslocalblobstore
165 > class badstore(store.__class__):
165 > class badstore(store.__class__):
166 > def download(self, oid, src):
166 > def download(self, oid, src):
167 > '''Called in the server to handle reading from the client in a
167 > '''Called in the server to handle reading from the client in a
168 > PUT request.'''
168 > PUT request.'''
169 > origread = src.read
169 > origread = src.read
170 > def _badread(nbytes):
170 > def _badread(nbytes):
171 > # Simulate bad data/checksum failure from the client
171 > # Simulate bad data/checksum failure from the client
172 > return b'0' * len(origread(nbytes))
172 > return b'0' * len(origread(nbytes))
173 > src.read = _badread
173 > src.read = _badread
174 > super(badstore, self).download(oid, src)
174 > super(badstore, self).download(oid, src)
175 >
175 >
176 > def _read(self, vfs, oid, verify):
176 > def _read(self, vfs, oid, verify):
177 > '''Called in the server to read data for a GET request, and then
177 > '''Called in the server to read data for a GET request, and then
178 > calls self._verify() on it before returning.'''
178 > calls self._verify() on it before returning.'''
179 > global _readerr
179 > global _readerr
180 > # One time simulation of a read error
180 > # One time simulation of a read error
181 > if _readerr:
181 > if _readerr:
182 > _readerr = False
182 > _readerr = False
183 > raise IOError(errno.EIO, '%s: I/O error' % oid)
183 > raise IOError(errno.EIO, '%s: I/O error' % oid)
184 > # Simulate corrupt content on client download
184 > # Simulate corrupt content on client download
185 > blobstore._verify(oid, 'dummy content')
185 > blobstore._verify(oid, 'dummy content')
186 >
186 >
187 > def verify(self, oid):
187 > def verify(self, oid):
188 > '''Called in the server to populate the Batch API response,
188 > '''Called in the server to populate the Batch API response,
189 > letting the client re-upload if the file is corrupt.'''
189 > letting the client re-upload if the file is corrupt.'''
190 > # Fail verify in Batch API for one clone command and one push
190 > # Fail verify in Batch API for one clone command and one push
191 > # command with an IOError. Then let it through to access other
191 > # command with an IOError. Then let it through to access other
192 > # functions. Checksum failure is tested elsewhere.
192 > # functions. Checksum failure is tested elsewhere.
193 > global _numverifies
193 > global _numverifies
194 > _numverifies += 1
194 > _numverifies += 1
195 > if _numverifies <= 2:
195 > if _numverifies <= 2:
196 > raise IOError(errno.EIO, '%s: I/O error' % oid)
196 > raise IOError(errno.EIO, '%s: I/O error' % oid)
197 > return super(badstore, self).verify(oid)
197 > return super(badstore, self).verify(oid)
198 >
198 >
199 > store.__class__ = badstore
199 > store.__class__ = badstore
200 > EOF
200 > EOF
201
201
202 $ rm -rf `hg config lfs.usercache`
202 $ rm -rf `hg config lfs.usercache`
203 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
203 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
204 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
204 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
205 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
205 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
206 > -R server serve -d \
206 > -R server serve -d \
207 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
207 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
208 $ cat hg.pid >> $DAEMON_PIDS
208 $ cat hg.pid >> $DAEMON_PIDS
209
209
210 Test an I/O error in localstore.verify() (Batch API) with GET
210 Test an I/O error in localstore.verify() (Batch API) with GET
211
211
212 $ hg clone http://localhost:$HGPORT1 httpclone2
212 $ hg clone http://localhost:$HGPORT1 httpclone2
213 requesting all changes
213 requesting all changes
214 adding changesets
214 adding changesets
215 adding manifests
215 adding manifests
216 adding file changes
216 adding file changes
217 added 1 changesets with 1 changes to 1 files
217 added 1 changesets with 1 changes to 1 files
218 new changesets 525251863cad
218 new changesets 525251863cad
219 updating to branch default
219 updating to branch default
220 abort: LFS server error for "lfs.bin": Internal server error!
220 abort: LFS server error for "lfs.bin": Internal server error!
221 [255]
221 [255]
222
222
223 Test an I/O error in localstore.verify() (Batch API) with PUT
223 Test an I/O error in localstore.verify() (Batch API) with PUT
224
224
225 $ echo foo > client/lfs.bin
225 $ echo foo > client/lfs.bin
226 $ hg -R client ci -m 'mod lfs'
226 $ hg -R client ci -m 'mod lfs'
227 $ hg -R client push http://localhost:$HGPORT1
227 $ hg -R client push http://localhost:$HGPORT1
228 pushing to http://localhost:$HGPORT1/
228 pushing to http://localhost:$HGPORT1/
229 searching for changes
229 searching for changes
230 abort: LFS server error for "unknown": Internal server error!
230 abort: LFS server error for "unknown": Internal server error!
231 [255]
231 [255]
232 TODO: figure out how to associate the file name in the error above
232 TODO: figure out how to associate the file name in the error above
233
233
234 Test a bad checksum sent by the client in the transfer API
234 Test a bad checksum sent by the client in the transfer API
235
235
236 $ hg -R client push http://localhost:$HGPORT1
236 $ hg -R client push http://localhost:$HGPORT1
237 pushing to http://localhost:$HGPORT1/
237 pushing to http://localhost:$HGPORT1/
238 searching for changes
238 searching for changes
239 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
239 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
240 [255]
240 [255]
241
241
242 $ echo 'test lfs file' > server/lfs3.bin
242 $ echo 'test lfs file' > server/lfs3.bin
243 $ hg --config experimental.lfs.disableusercache=True \
243 $ hg --config experimental.lfs.disableusercache=True \
244 > -R server ci -Aqm 'another lfs file'
244 > -R server ci -Aqm 'another lfs file'
245 $ hg -R client pull -q http://localhost:$HGPORT1
245 $ hg -R client pull -q http://localhost:$HGPORT1
246
246
247 Test an I/O error during the processing of the GET request
247 Test an I/O error during the processing of the GET request
248
248
249 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
249 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
250 > -R client update -r tip
250 > -R client update -r tip
251 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
251 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
252 [255]
252 [255]
253
253
254 Test a checksum failure during the processing of the GET request
254 Test a checksum failure during the processing of the GET request
255
255
256 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
256 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
257 > -R client update -r tip
257 > -R client update -r tip
258 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
258 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
259 [255]
259 [255]
260
260
261 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
261 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
262
262
263 $ cat $TESTTMP/access.log
263 $ cat $TESTTMP/access.log
264 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
264 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
265 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
265 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
266 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
266 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
267 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
267 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
268 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
268 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
269 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
269 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
270 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
270 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
271 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
271 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
272 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
272 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
273 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
273 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
274 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
274 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
275 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
275 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
276 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
276 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
277 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
277 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
278 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
278 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
279 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
279 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
280 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
280 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
281 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
281 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
282 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 500 - (glob)
282 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 500 - (glob)
283 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
283 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
284 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
284 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
285 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
285 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
286 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
286 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
287 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
287 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
288 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
288 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
289 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
289 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
290
290
291 $ grep -v ' File "' $TESTTMP/errors.log
291 $ grep -v ' File "' $TESTTMP/errors.log
292 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
292 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
293 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
293 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
294 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
294 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
295 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
295 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
296 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
296 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
297 $LOCALIP - - [$ERRDATE$] HG error: (glob)
297 $LOCALIP - - [$ERRDATE$] HG error: (glob)
298 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
298 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
299 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
299 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
300 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
300 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
301 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
301 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, '%s: I/O error' % oid) (glob)
302 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
302 $LOCALIP - - [$ERRDATE$] HG error: IOError: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
303 $LOCALIP - - [$ERRDATE$] HG error: (glob)
303 $LOCALIP - - [$ERRDATE$] HG error: (glob)
304 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
304 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
305 Traceback (most recent call last):
305 Traceback (most recent call last):
306 self.do_write()
306 self.do_write()
307 self.do_hgweb()
307 self.do_hgweb()
308 for chunk in self.server.application(env, self._start_response):
308 for chunk in self.server.application(env, self._start_response):
309 for r in self._runwsgi(req, res, repo):
309 for r in self._runwsgi(req, res, repo):
310 rctx, req, res, self.check_perm)
310 rctx, req, res, self.check_perm)
311 return func(*(args + a), **kw)
311 return func(*(args + a), **kw)
312 lambda perm:
312 lambda perm:
313 localstore.download(oid, req.bodyfh)
313 localstore.download(oid, req.bodyfh)
314 super(badstore, self).download(oid, src)
314 super(badstore, self).download(oid, src)
315 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
315 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
316 Abort: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
316 Abort: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
317
317
318 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
318 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
319 Traceback (most recent call last):
319 Traceback (most recent call last):
320 self.do_write()
320 self.do_write()
321 self.do_hgweb()
321 self.do_hgweb()
322 for chunk in self.server.application(env, self._start_response):
322 for chunk in self.server.application(env, self._start_response):
323 for r in self._runwsgi(req, res, repo):
323 for r in self._runwsgi(req, res, repo):
324 rctx, req, res, self.check_perm)
324 rctx, req, res, self.check_perm)
325 return func(*(args + a), **kw)
325 return func(*(args + a), **kw)
326 lambda perm:
326 lambda perm:
327 res.setbodybytes(localstore.read(oid))
327 res.setbodybytes(localstore.read(oid))
328 blob = self._read(self.vfs, oid, verify)
328 blob = self._read(self.vfs, oid, verify)
329 raise IOError(errno.EIO, '%s: I/O error' % oid)
329 raise IOError(errno.EIO, '%s: I/O error' % oid)
330 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
330 IOError: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error
331
331
332 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
332 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
333 Traceback (most recent call last):
333 Traceback (most recent call last):
334 self.do_write()
334 self.do_write()
335 self.do_hgweb()
335 self.do_hgweb()
336 for chunk in self.server.application(env, self._start_response):
336 for chunk in self.server.application(env, self._start_response):
337 for r in self._runwsgi(req, res, repo):
337 for r in self._runwsgi(req, res, repo):
338 rctx, req, res, self.check_perm)
338 rctx, req, res, self.check_perm)
339 return func(*(args + a), **kw)
339 return func(*(args + a), **kw)
340 lambda perm:
340 lambda perm:
341 res.setbodybytes(localstore.read(oid))
341 res.setbodybytes(localstore.read(oid))
342 blob = self._read(self.vfs, oid, verify)
342 blob = self._read(self.vfs, oid, verify)
343 blobstore._verify(oid, 'dummy content')
343 blobstore._verify(oid, 'dummy content')
344 hint=_('run hg verify'))
344 hint=_('run hg verify'))
345 Abort: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d
345 Abort: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d
346
346
General Comments 0
You need to be logged in to leave comments. Login now