##// END OF EJS Templates
lfs: deduplicate oids in the transfer...
Matt Harbison -
r35945:9b413478 default
parent child Browse files
Show More
@@ -1,474 +1,481 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import json
11 import json
12 import os
12 import os
13 import re
13 import re
14 import socket
14 import socket
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 error,
19 error,
20 pathutil,
20 pathutil,
21 url as urlmod,
21 url as urlmod,
22 util,
22 util,
23 vfs as vfsmod,
23 vfs as vfsmod,
24 worker,
24 worker,
25 )
25 )
26
26
27 from ..largefiles import lfutil
27 from ..largefiles import lfutil
28
28
29 # 64 bytes for SHA256
29 # 64 bytes for SHA256
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31
31
32 class lfsvfs(vfsmod.vfs):
32 class lfsvfs(vfsmod.vfs):
33 def join(self, path):
33 def join(self, path):
34 """split the path at first two characters, like: XX/XXXXX..."""
34 """split the path at first two characters, like: XX/XXXXX..."""
35 if not _lfsre.match(path):
35 if not _lfsre.match(path):
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 return super(lfsvfs, self).join(path[0:2], path[2:])
37 return super(lfsvfs, self).join(path[0:2], path[2:])
38
38
39 def walk(self, path=None, onerror=None):
39 def walk(self, path=None, onerror=None):
40 """Yield (dirpath, [], oids) tuple for blobs under path
40 """Yield (dirpath, [], oids) tuple for blobs under path
41
41
42 Oids only exist in the root of this vfs, so dirpath is always ''.
42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 """
43 """
44 root = os.path.normpath(self.base)
44 root = os.path.normpath(self.base)
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 # because len(dirpath) < prefixlen.
46 # because len(dirpath) < prefixlen.
47 prefixlen = len(pathutil.normasprefix(root))
47 prefixlen = len(pathutil.normasprefix(root))
48 oids = []
48 oids = []
49
49
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 onerror=onerror):
51 onerror=onerror):
52 dirpath = dirpath[prefixlen:]
52 dirpath = dirpath[prefixlen:]
53
53
54 # Silently skip unexpected files and directories
54 # Silently skip unexpected files and directories
55 if len(dirpath) == 2:
55 if len(dirpath) == 2:
56 oids.extend([dirpath + f for f in files
56 oids.extend([dirpath + f for f in files
57 if _lfsre.match(dirpath + f)])
57 if _lfsre.match(dirpath + f)])
58
58
59 yield ('', [], oids)
59 yield ('', [], oids)
60
60
61 class filewithprogress(object):
61 class filewithprogress(object):
62 """a file-like object that supports __len__ and read.
62 """a file-like object that supports __len__ and read.
63
63
64 Useful to provide progress information for how many bytes are read.
64 Useful to provide progress information for how many bytes are read.
65 """
65 """
66
66
67 def __init__(self, fp, callback):
67 def __init__(self, fp, callback):
68 self._fp = fp
68 self._fp = fp
69 self._callback = callback # func(readsize)
69 self._callback = callback # func(readsize)
70 fp.seek(0, os.SEEK_END)
70 fp.seek(0, os.SEEK_END)
71 self._len = fp.tell()
71 self._len = fp.tell()
72 fp.seek(0)
72 fp.seek(0)
73
73
74 def __len__(self):
74 def __len__(self):
75 return self._len
75 return self._len
76
76
77 def read(self, size):
77 def read(self, size):
78 if self._fp is None:
78 if self._fp is None:
79 return b''
79 return b''
80 data = self._fp.read(size)
80 data = self._fp.read(size)
81 if data:
81 if data:
82 if self._callback:
82 if self._callback:
83 self._callback(len(data))
83 self._callback(len(data))
84 else:
84 else:
85 self._fp.close()
85 self._fp.close()
86 self._fp = None
86 self._fp = None
87 return data
87 return data
88
88
89 class local(object):
89 class local(object):
90 """Local blobstore for large file contents.
90 """Local blobstore for large file contents.
91
91
92 This blobstore is used both as a cache and as a staging area for large blobs
92 This blobstore is used both as a cache and as a staging area for large blobs
93 to be uploaded to the remote blobstore.
93 to be uploaded to the remote blobstore.
94 """
94 """
95
95
96 def __init__(self, repo):
96 def __init__(self, repo):
97 fullpath = repo.svfs.join('lfs/objects')
97 fullpath = repo.svfs.join('lfs/objects')
98 self.vfs = lfsvfs(fullpath)
98 self.vfs = lfsvfs(fullpath)
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 self.cachevfs = lfsvfs(usercache)
100 self.cachevfs = lfsvfs(usercache)
101 self.ui = repo.ui
101 self.ui = repo.ui
102
102
103 def open(self, oid):
103 def open(self, oid):
104 """Open a read-only file descriptor to the named blob, in either the
104 """Open a read-only file descriptor to the named blob, in either the
105 usercache or the local store."""
105 usercache or the local store."""
106 # The usercache is the most likely place to hold the file. Commit will
106 # The usercache is the most likely place to hold the file. Commit will
107 # write to both it and the local store, as will anything that downloads
107 # write to both it and the local store, as will anything that downloads
108 # the blobs. However, things like clone without an update won't
108 # the blobs. However, things like clone without an update won't
109 # populate the local store. For an init + push of a local clone,
109 # populate the local store. For an init + push of a local clone,
110 # the usercache is the only place it _could_ be. If not present, the
110 # the usercache is the only place it _could_ be. If not present, the
111 # missing file msg here will indicate the local repo, not the usercache.
111 # missing file msg here will indicate the local repo, not the usercache.
112 if self.cachevfs.exists(oid):
112 if self.cachevfs.exists(oid):
113 return self.cachevfs(oid, 'rb')
113 return self.cachevfs(oid, 'rb')
114
114
115 return self.vfs(oid, 'rb')
115 return self.vfs(oid, 'rb')
116
116
117 def download(self, oid, src):
117 def download(self, oid, src):
118 """Read the blob from the remote source in chunks, verify the content,
118 """Read the blob from the remote source in chunks, verify the content,
119 and write to this local blobstore."""
119 and write to this local blobstore."""
120 sha256 = hashlib.sha256()
120 sha256 = hashlib.sha256()
121
121
122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
123 for chunk in util.filechunkiter(src, size=1048576):
123 for chunk in util.filechunkiter(src, size=1048576):
124 fp.write(chunk)
124 fp.write(chunk)
125 sha256.update(chunk)
125 sha256.update(chunk)
126
126
127 realoid = sha256.hexdigest()
127 realoid = sha256.hexdigest()
128 if realoid != oid:
128 if realoid != oid:
129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
130
130
131 # XXX: should we verify the content of the cache, and hardlink back to
131 # XXX: should we verify the content of the cache, and hardlink back to
132 # the local store on success, but truncate, write and link on failure?
132 # the local store on success, but truncate, write and link on failure?
133 if not self.cachevfs.exists(oid):
133 if not self.cachevfs.exists(oid):
134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
136
136
137 def write(self, oid, data):
137 def write(self, oid, data):
138 """Write blob to local blobstore.
138 """Write blob to local blobstore.
139
139
140 This should only be called from the filelog during a commit or similar.
140 This should only be called from the filelog during a commit or similar.
141 As such, there is no need to verify the data. Imports from a remote
141 As such, there is no need to verify the data. Imports from a remote
142 store must use ``download()`` instead."""
142 store must use ``download()`` instead."""
143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
144 fp.write(data)
144 fp.write(data)
145
145
146 # XXX: should we verify the content of the cache, and hardlink back to
146 # XXX: should we verify the content of the cache, and hardlink back to
147 # the local store on success, but truncate, write and link on failure?
147 # the local store on success, but truncate, write and link on failure?
148 if not self.cachevfs.exists(oid):
148 if not self.cachevfs.exists(oid):
149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
151
151
152 def read(self, oid, verify=True):
152 def read(self, oid, verify=True):
153 """Read blob from local blobstore."""
153 """Read blob from local blobstore."""
154 if not self.vfs.exists(oid):
154 if not self.vfs.exists(oid):
155 blob = self._read(self.cachevfs, oid, verify)
155 blob = self._read(self.cachevfs, oid, verify)
156
156
157 # Even if revlog will verify the content, it needs to be verified
157 # Even if revlog will verify the content, it needs to be verified
158 # now before making the hardlink to avoid propagating corrupt blobs.
158 # now before making the hardlink to avoid propagating corrupt blobs.
159 # Don't abort if corruption is detected, because `hg verify` will
159 # Don't abort if corruption is detected, because `hg verify` will
160 # give more useful info about the corruption- simply don't add the
160 # give more useful info about the corruption- simply don't add the
161 # hardlink.
161 # hardlink.
162 if verify or hashlib.sha256(blob).hexdigest() == oid:
162 if verify or hashlib.sha256(blob).hexdigest() == oid:
163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
165 else:
165 else:
166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
167 blob = self._read(self.vfs, oid, verify)
167 blob = self._read(self.vfs, oid, verify)
168 return blob
168 return blob
169
169
170 def _read(self, vfs, oid, verify):
170 def _read(self, vfs, oid, verify):
171 """Read blob (after verifying) from the given store"""
171 """Read blob (after verifying) from the given store"""
172 blob = vfs.read(oid)
172 blob = vfs.read(oid)
173 if verify:
173 if verify:
174 _verify(oid, blob)
174 _verify(oid, blob)
175 return blob
175 return blob
176
176
177 def has(self, oid):
177 def has(self, oid):
178 """Returns True if the local blobstore contains the requested blob,
178 """Returns True if the local blobstore contains the requested blob,
179 False otherwise."""
179 False otherwise."""
180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
181
181
182 class _gitlfsremote(object):
182 class _gitlfsremote(object):
183
183
184 def __init__(self, repo, url):
184 def __init__(self, repo, url):
185 ui = repo.ui
185 ui = repo.ui
186 self.ui = ui
186 self.ui = ui
187 baseurl, authinfo = url.authinfo()
187 baseurl, authinfo = url.authinfo()
188 self.baseurl = baseurl.rstrip('/')
188 self.baseurl = baseurl.rstrip('/')
189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
190 if not useragent:
190 if not useragent:
191 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
191 useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
193 self.retry = ui.configint('lfs', 'retry')
193 self.retry = ui.configint('lfs', 'retry')
194
194
195 def writebatch(self, pointers, fromstore):
195 def writebatch(self, pointers, fromstore):
196 """Batch upload from local to remote blobstore."""
196 """Batch upload from local to remote blobstore."""
197 self._batch(pointers, fromstore, 'upload')
197 self._batch(_deduplicate(pointers), fromstore, 'upload')
198
198
199 def readbatch(self, pointers, tostore):
199 def readbatch(self, pointers, tostore):
200 """Batch download from remote to local blostore."""
200 """Batch download from remote to local blostore."""
201 self._batch(pointers, tostore, 'download')
201 self._batch(_deduplicate(pointers), tostore, 'download')
202
202
203 def _batchrequest(self, pointers, action):
203 def _batchrequest(self, pointers, action):
204 """Get metadata about objects pointed by pointers for given action
204 """Get metadata about objects pointed by pointers for given action
205
205
206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
208 """
208 """
209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
210 requestdata = json.dumps({
210 requestdata = json.dumps({
211 'objects': objects,
211 'objects': objects,
212 'operation': action,
212 'operation': action,
213 })
213 })
214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
215 data=requestdata)
215 data=requestdata)
216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
218 try:
218 try:
219 rawjson = self.urlopener.open(batchreq).read()
219 rawjson = self.urlopener.open(batchreq).read()
220 except util.urlerr.httperror as ex:
220 except util.urlerr.httperror as ex:
221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
222 % (ex, action))
222 % (ex, action))
223 try:
223 try:
224 response = json.loads(rawjson)
224 response = json.loads(rawjson)
225 except ValueError:
225 except ValueError:
226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
227 % rawjson)
227 % rawjson)
228 return response
228 return response
229
229
230 def _checkforservererror(self, pointers, responses, action):
230 def _checkforservererror(self, pointers, responses, action):
231 """Scans errors from objects
231 """Scans errors from objects
232
232
233 Raises LfsRemoteError if any objects have an error"""
233 Raises LfsRemoteError if any objects have an error"""
234 for response in responses:
234 for response in responses:
235 # The server should return 404 when objects cannot be found. Some
235 # The server should return 404 when objects cannot be found. Some
236 # server implementation (ex. lfs-test-server) does not set "error"
236 # server implementation (ex. lfs-test-server) does not set "error"
237 # but just removes "download" from "actions". Treat that case
237 # but just removes "download" from "actions". Treat that case
238 # as the same as 404 error.
238 # as the same as 404 error.
239 notfound = (response.get('error', {}).get('code') == 404
239 notfound = (response.get('error', {}).get('code') == 404
240 or (action == 'download'
240 or (action == 'download'
241 and action not in response.get('actions', [])))
241 and action not in response.get('actions', [])))
242 if notfound:
242 if notfound:
243 ptrmap = {p.oid(): p for p in pointers}
243 ptrmap = {p.oid(): p for p in pointers}
244 p = ptrmap.get(response['oid'], None)
244 p = ptrmap.get(response['oid'], None)
245 if p:
245 if p:
246 filename = getattr(p, 'filename', 'unknown')
246 filename = getattr(p, 'filename', 'unknown')
247 raise LfsRemoteError(
247 raise LfsRemoteError(
248 _(('LFS server error. Remote object '
248 _(('LFS server error. Remote object '
249 'for "%s" not found: %r')) % (filename, response))
249 'for "%s" not found: %r')) % (filename, response))
250 else:
250 else:
251 raise LfsRemoteError(
251 raise LfsRemoteError(
252 _('LFS server error. Unsolicited response for oid %s')
252 _('LFS server error. Unsolicited response for oid %s')
253 % response['oid'])
253 % response['oid'])
254 if 'error' in response:
254 if 'error' in response:
255 raise LfsRemoteError(_('LFS server error: %r') % response)
255 raise LfsRemoteError(_('LFS server error: %r') % response)
256
256
257 def _extractobjects(self, response, pointers, action):
257 def _extractobjects(self, response, pointers, action):
258 """extract objects from response of the batch API
258 """extract objects from response of the batch API
259
259
260 response: parsed JSON object returned by batch API
260 response: parsed JSON object returned by batch API
261 return response['objects'] filtered by action
261 return response['objects'] filtered by action
262 raise if any object has an error
262 raise if any object has an error
263 """
263 """
264 # Scan errors from objects - fail early
264 # Scan errors from objects - fail early
265 objects = response.get('objects', [])
265 objects = response.get('objects', [])
266 self._checkforservererror(pointers, objects, action)
266 self._checkforservererror(pointers, objects, action)
267
267
268 # Filter objects with given action. Practically, this skips uploading
268 # Filter objects with given action. Practically, this skips uploading
269 # objects which exist in the server.
269 # objects which exist in the server.
270 filteredobjects = [o for o in objects if action in o.get('actions', [])]
270 filteredobjects = [o for o in objects if action in o.get('actions', [])]
271
271
272 return filteredobjects
272 return filteredobjects
273
273
274 def _basictransfer(self, obj, action, localstore):
274 def _basictransfer(self, obj, action, localstore):
275 """Download or upload a single object using basic transfer protocol
275 """Download or upload a single object using basic transfer protocol
276
276
277 obj: dict, an object description returned by batch API
277 obj: dict, an object description returned by batch API
278 action: string, one of ['upload', 'download']
278 action: string, one of ['upload', 'download']
279 localstore: blobstore.local
279 localstore: blobstore.local
280
280
281 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
281 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
282 basic-transfers.md
282 basic-transfers.md
283 """
283 """
284 oid = str(obj['oid'])
284 oid = str(obj['oid'])
285
285
286 href = str(obj['actions'][action].get('href'))
286 href = str(obj['actions'][action].get('href'))
287 headers = obj['actions'][action].get('header', {}).items()
287 headers = obj['actions'][action].get('header', {}).items()
288
288
289 request = util.urlreq.request(href)
289 request = util.urlreq.request(href)
290 if action == 'upload':
290 if action == 'upload':
291 # If uploading blobs, read data from local blobstore.
291 # If uploading blobs, read data from local blobstore.
292 with localstore.open(oid) as fp:
292 with localstore.open(oid) as fp:
293 _verifyfile(oid, fp)
293 _verifyfile(oid, fp)
294 request.data = filewithprogress(localstore.open(oid), None)
294 request.data = filewithprogress(localstore.open(oid), None)
295 request.get_method = lambda: 'PUT'
295 request.get_method = lambda: 'PUT'
296
296
297 for k, v in headers:
297 for k, v in headers:
298 request.add_header(k, v)
298 request.add_header(k, v)
299
299
300 response = b''
300 response = b''
301 try:
301 try:
302 req = self.urlopener.open(request)
302 req = self.urlopener.open(request)
303 if action == 'download':
303 if action == 'download':
304 # If downloading blobs, store downloaded data to local blobstore
304 # If downloading blobs, store downloaded data to local blobstore
305 localstore.download(oid, req)
305 localstore.download(oid, req)
306 else:
306 else:
307 while True:
307 while True:
308 data = req.read(1048576)
308 data = req.read(1048576)
309 if not data:
309 if not data:
310 break
310 break
311 response += data
311 response += data
312 if response:
312 if response:
313 self.ui.debug('lfs %s response: %s' % (action, response))
313 self.ui.debug('lfs %s response: %s' % (action, response))
314 except util.urlerr.httperror as ex:
314 except util.urlerr.httperror as ex:
315 if self.ui.debugflag:
315 if self.ui.debugflag:
316 self.ui.debug('%s: %s\n' % (oid, ex.read()))
316 self.ui.debug('%s: %s\n' % (oid, ex.read()))
317 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
317 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
318 % (ex, oid, action))
318 % (ex, oid, action))
319
319
320 def _batch(self, pointers, localstore, action):
320 def _batch(self, pointers, localstore, action):
321 if action not in ['upload', 'download']:
321 if action not in ['upload', 'download']:
322 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
322 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
323
323
324 response = self._batchrequest(pointers, action)
324 response = self._batchrequest(pointers, action)
325 objects = self._extractobjects(response, pointers, action)
325 objects = self._extractobjects(response, pointers, action)
326 total = sum(x.get('size', 0) for x in objects)
326 total = sum(x.get('size', 0) for x in objects)
327 sizes = {}
327 sizes = {}
328 for obj in objects:
328 for obj in objects:
329 sizes[obj.get('oid')] = obj.get('size', 0)
329 sizes[obj.get('oid')] = obj.get('size', 0)
330 topic = {'upload': _('lfs uploading'),
330 topic = {'upload': _('lfs uploading'),
331 'download': _('lfs downloading')}[action]
331 'download': _('lfs downloading')}[action]
332 if len(objects) > 1:
332 if len(objects) > 1:
333 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
333 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
334 % (len(objects), util.bytecount(total)))
334 % (len(objects), util.bytecount(total)))
335 self.ui.progress(topic, 0, total=total)
335 self.ui.progress(topic, 0, total=total)
336 def transfer(chunk):
336 def transfer(chunk):
337 for obj in chunk:
337 for obj in chunk:
338 objsize = obj.get('size', 0)
338 objsize = obj.get('size', 0)
339 if self.ui.verbose:
339 if self.ui.verbose:
340 if action == 'download':
340 if action == 'download':
341 msg = _('lfs: downloading %s (%s)\n')
341 msg = _('lfs: downloading %s (%s)\n')
342 elif action == 'upload':
342 elif action == 'upload':
343 msg = _('lfs: uploading %s (%s)\n')
343 msg = _('lfs: uploading %s (%s)\n')
344 self.ui.note(msg % (obj.get('oid'),
344 self.ui.note(msg % (obj.get('oid'),
345 util.bytecount(objsize)))
345 util.bytecount(objsize)))
346 retry = self.retry
346 retry = self.retry
347 while True:
347 while True:
348 try:
348 try:
349 self._basictransfer(obj, action, localstore)
349 self._basictransfer(obj, action, localstore)
350 yield 1, obj.get('oid')
350 yield 1, obj.get('oid')
351 break
351 break
352 except socket.error as ex:
352 except socket.error as ex:
353 if retry > 0:
353 if retry > 0:
354 self.ui.note(
354 self.ui.note(
355 _('lfs: failed: %r (remaining retry %d)\n')
355 _('lfs: failed: %r (remaining retry %d)\n')
356 % (ex, retry))
356 % (ex, retry))
357 retry -= 1
357 retry -= 1
358 continue
358 continue
359 raise
359 raise
360
360
361 # Until https multiplexing gets sorted out
361 # Until https multiplexing gets sorted out
362 if self.ui.configbool('experimental', 'lfs.worker-enable'):
362 if self.ui.configbool('experimental', 'lfs.worker-enable'):
363 oids = worker.worker(self.ui, 0.1, transfer, (),
363 oids = worker.worker(self.ui, 0.1, transfer, (),
364 sorted(objects, key=lambda o: o.get('oid')))
364 sorted(objects, key=lambda o: o.get('oid')))
365 else:
365 else:
366 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
366 oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
367
367
368 processed = 0
368 processed = 0
369 blobs = 0
369 blobs = 0
370 for _one, oid in oids:
370 for _one, oid in oids:
371 processed += sizes[oid]
371 processed += sizes[oid]
372 blobs += 1
372 blobs += 1
373 self.ui.progress(topic, processed, total=total)
373 self.ui.progress(topic, processed, total=total)
374 self.ui.note(_('lfs: processed: %s\n') % oid)
374 self.ui.note(_('lfs: processed: %s\n') % oid)
375 self.ui.progress(topic, pos=None, total=total)
375 self.ui.progress(topic, pos=None, total=total)
376
376
377 if blobs > 0:
377 if blobs > 0:
378 if action == 'upload':
378 if action == 'upload':
379 self.ui.status(_('lfs: uploaded %d files (%s)\n')
379 self.ui.status(_('lfs: uploaded %d files (%s)\n')
380 % (blobs, util.bytecount(processed)))
380 % (blobs, util.bytecount(processed)))
381 # TODO: coalesce the download requests, and comment this in
381 # TODO: coalesce the download requests, and comment this in
382 #elif action == 'download':
382 #elif action == 'download':
383 # self.ui.status(_('lfs: downloaded %d files (%s)\n')
383 # self.ui.status(_('lfs: downloaded %d files (%s)\n')
384 # % (blobs, util.bytecount(processed)))
384 # % (blobs, util.bytecount(processed)))
385
385
386 def __del__(self):
386 def __del__(self):
387 # copied from mercurial/httppeer.py
387 # copied from mercurial/httppeer.py
388 urlopener = getattr(self, 'urlopener', None)
388 urlopener = getattr(self, 'urlopener', None)
389 if urlopener:
389 if urlopener:
390 for h in urlopener.handlers:
390 for h in urlopener.handlers:
391 h.close()
391 h.close()
392 getattr(h, "close_all", lambda : None)()
392 getattr(h, "close_all", lambda : None)()
393
393
394 class _dummyremote(object):
394 class _dummyremote(object):
395 """Dummy store storing blobs to temp directory."""
395 """Dummy store storing blobs to temp directory."""
396
396
397 def __init__(self, repo, url):
397 def __init__(self, repo, url):
398 fullpath = repo.vfs.join('lfs', url.path)
398 fullpath = repo.vfs.join('lfs', url.path)
399 self.vfs = lfsvfs(fullpath)
399 self.vfs = lfsvfs(fullpath)
400
400
401 def writebatch(self, pointers, fromstore):
401 def writebatch(self, pointers, fromstore):
402 for p in pointers:
402 for p in _deduplicate(pointers):
403 content = fromstore.read(p.oid(), verify=True)
403 content = fromstore.read(p.oid(), verify=True)
404 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
404 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
405 fp.write(content)
405 fp.write(content)
406
406
407 def readbatch(self, pointers, tostore):
407 def readbatch(self, pointers, tostore):
408 for p in pointers:
408 for p in _deduplicate(pointers):
409 with self.vfs(p.oid(), 'rb') as fp:
409 with self.vfs(p.oid(), 'rb') as fp:
410 tostore.download(p.oid(), fp)
410 tostore.download(p.oid(), fp)
411
411
412 class _nullremote(object):
412 class _nullremote(object):
413 """Null store storing blobs to /dev/null."""
413 """Null store storing blobs to /dev/null."""
414
414
415 def __init__(self, repo, url):
415 def __init__(self, repo, url):
416 pass
416 pass
417
417
418 def writebatch(self, pointers, fromstore):
418 def writebatch(self, pointers, fromstore):
419 pass
419 pass
420
420
421 def readbatch(self, pointers, tostore):
421 def readbatch(self, pointers, tostore):
422 pass
422 pass
423
423
424 class _promptremote(object):
424 class _promptremote(object):
425 """Prompt user to set lfs.url when accessed."""
425 """Prompt user to set lfs.url when accessed."""
426
426
427 def __init__(self, repo, url):
427 def __init__(self, repo, url):
428 pass
428 pass
429
429
430 def writebatch(self, pointers, fromstore, ui=None):
430 def writebatch(self, pointers, fromstore, ui=None):
431 self._prompt()
431 self._prompt()
432
432
433 def readbatch(self, pointers, tostore, ui=None):
433 def readbatch(self, pointers, tostore, ui=None):
434 self._prompt()
434 self._prompt()
435
435
436 def _prompt(self):
436 def _prompt(self):
437 raise error.Abort(_('lfs.url needs to be configured'))
437 raise error.Abort(_('lfs.url needs to be configured'))
438
438
439 _storemap = {
439 _storemap = {
440 'https': _gitlfsremote,
440 'https': _gitlfsremote,
441 'http': _gitlfsremote,
441 'http': _gitlfsremote,
442 'file': _dummyremote,
442 'file': _dummyremote,
443 'null': _nullremote,
443 'null': _nullremote,
444 None: _promptremote,
444 None: _promptremote,
445 }
445 }
446
446
447 def _deduplicate(pointers):
448 """Remove any duplicate oids that exist in the list"""
449 reduced = util.sortdict()
450 for p in pointers:
451 reduced[p.oid()] = p
452 return reduced.values()
453
447 def _verify(oid, content):
454 def _verify(oid, content):
448 realoid = hashlib.sha256(content).hexdigest()
455 realoid = hashlib.sha256(content).hexdigest()
449 if realoid != oid:
456 if realoid != oid:
450 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
457 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
451 hint=_('run hg verify'))
458 hint=_('run hg verify'))
452
459
453 def _verifyfile(oid, fp):
460 def _verifyfile(oid, fp):
454 sha256 = hashlib.sha256()
461 sha256 = hashlib.sha256()
455 while True:
462 while True:
456 data = fp.read(1024 * 1024)
463 data = fp.read(1024 * 1024)
457 if not data:
464 if not data:
458 break
465 break
459 sha256.update(data)
466 sha256.update(data)
460 realoid = sha256.hexdigest()
467 realoid = sha256.hexdigest()
461 if realoid != oid:
468 if realoid != oid:
462 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
469 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
463 hint=_('run hg verify'))
470 hint=_('run hg verify'))
464
471
465 def remote(repo):
472 def remote(repo):
466 """remotestore factory. return a store in _storemap depending on config"""
473 """remotestore factory. return a store in _storemap depending on config"""
467 url = util.url(repo.ui.config('lfs', 'url') or '')
474 url = util.url(repo.ui.config('lfs', 'url') or '')
468 scheme = url.scheme
475 scheme = url.scheme
469 if scheme not in _storemap:
476 if scheme not in _storemap:
470 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
477 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
471 return _storemap[scheme](repo, url)
478 return _storemap[scheme](repo, url)
472
479
473 class LfsRemoteError(error.RevlogError):
480 class LfsRemoteError(error.RevlogError):
474 pass
481 pass
@@ -1,279 +1,273 b''
1 #require lfs-test-server
1 #require lfs-test-server
2
2
3 $ LFS_LISTEN="tcp://:$HGPORT"
3 $ LFS_LISTEN="tcp://:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
5 $ LFS_PUBLIC=1
5 $ LFS_PUBLIC=1
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 #if no-windows
7 #if no-windows
8 $ lfs-test-server &> lfs-server.log &
8 $ lfs-test-server &> lfs-server.log &
9 $ echo $! >> $DAEMON_PIDS
9 $ echo $! >> $DAEMON_PIDS
10 #else
10 #else
11 $ cat >> $TESTTMP/spawn.py <<EOF
11 $ cat >> $TESTTMP/spawn.py <<EOF
12 > import os
12 > import os
13 > import subprocess
13 > import subprocess
14 > import sys
14 > import sys
15 >
15 >
16 > for path in os.environ["PATH"].split(os.pathsep):
16 > for path in os.environ["PATH"].split(os.pathsep):
17 > exe = os.path.join(path, 'lfs-test-server.exe')
17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 > if os.path.exists(exe):
18 > if os.path.exists(exe):
19 > with open('lfs-server.log', 'wb') as out:
19 > with open('lfs-server.log', 'wb') as out:
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 > sys.stdout.write('%s\n' % p.pid)
21 > sys.stdout.write('%s\n' % p.pid)
22 > sys.exit(0)
22 > sys.exit(0)
23 > sys.exit(1)
23 > sys.exit(1)
24 > EOF
24 > EOF
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 #endif
26 #endif
27
27
28 $ cat >> $HGRCPATH <<EOF
28 $ cat >> $HGRCPATH <<EOF
29 > [extensions]
29 > [extensions]
30 > lfs=
30 > lfs=
31 > [lfs]
31 > [lfs]
32 > url=http://foo:bar@$LFS_HOST/
32 > url=http://foo:bar@$LFS_HOST/
33 > track=all()
33 > track=all()
34 > EOF
34 > EOF
35
35
36 $ hg init repo1
36 $ hg init repo1
37 $ cd repo1
37 $ cd repo1
38 $ echo THIS-IS-LFS > a
38 $ echo THIS-IS-LFS > a
39 $ hg commit -m a -A a
39 $ hg commit -m a -A a
40
40
41 A push can be serviced directly from the usercache if it isn't in the local
41 A push can be serviced directly from the usercache if it isn't in the local
42 store.
42 store.
43
43
44 $ hg init ../repo2
44 $ hg init ../repo2
45 $ mv .hg/store/lfs .hg/store/lfs_
45 $ mv .hg/store/lfs .hg/store/lfs_
46 $ hg push ../repo2 -v
46 $ hg push ../repo2 -v
47 pushing to ../repo2
47 pushing to ../repo2
48 searching for changes
48 searching for changes
49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
51 lfs: uploaded 1 files (12 bytes)
51 lfs: uploaded 1 files (12 bytes)
52 1 changesets found
52 1 changesets found
53 uncompressed size of bundle content:
53 uncompressed size of bundle content:
54 * (changelog) (glob)
54 * (changelog) (glob)
55 * (manifests) (glob)
55 * (manifests) (glob)
56 * a (glob)
56 * a (glob)
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 1 changesets with 1 changes to 1 files
60 added 1 changesets with 1 changes to 1 files
61 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
62 $ mv .hg/store/lfs_ .hg/store/lfs
62 $ mv .hg/store/lfs_ .hg/store/lfs
63
63
64 Clear the cache to force a download
64 Clear the cache to force a download
65 $ rm -rf `hg config lfs.usercache`
65 $ rm -rf `hg config lfs.usercache`
66 $ cd ../repo2
66 $ cd ../repo2
67 $ hg update tip -v
67 $ hg update tip -v
68 resolving manifests
68 resolving manifests
69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
72 getting a
72 getting a
73 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
73 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
74 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
74 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75
75
76 When the server has some blobs already
76 When the server has some blobs already
77
77
78 $ hg mv a b
78 $ hg mv a b
79 $ echo ANOTHER-LARGE-FILE > c
79 $ echo ANOTHER-LARGE-FILE > c
80 $ echo ANOTHER-LARGE-FILE2 > d
80 $ echo ANOTHER-LARGE-FILE2 > d
81 $ hg commit -m b-and-c -A b c d
81 $ hg commit -m b-and-c -A b c d
82 $ hg push ../repo1 -v | grep -v '^ '
82 $ hg push ../repo1 -v | grep -v '^ '
83 pushing to ../repo1
83 pushing to ../repo1
84 searching for changes
84 searching for changes
85 lfs: need to transfer 2 objects (39 bytes)
85 lfs: need to transfer 2 objects (39 bytes)
86 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
86 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
87 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
87 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
88 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
88 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
89 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
89 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
90 lfs: uploaded 2 files (39 bytes)
90 lfs: uploaded 2 files (39 bytes)
91 1 changesets found
91 1 changesets found
92 uncompressed size of bundle content:
92 uncompressed size of bundle content:
93 adding changesets
93 adding changesets
94 adding manifests
94 adding manifests
95 adding file changes
95 adding file changes
96 added 1 changesets with 3 changes to 3 files
96 added 1 changesets with 3 changes to 3 files
97
97
98 Clear the cache to force a download
98 Clear the cache to force a download
99 $ rm -rf `hg config lfs.usercache`
99 $ rm -rf `hg config lfs.usercache`
100 $ hg --repo ../repo1 update tip -v
100 $ hg --repo ../repo1 update tip -v
101 resolving manifests
101 resolving manifests
102 lfs: need to transfer 2 objects (39 bytes)
102 lfs: need to transfer 2 objects (39 bytes)
103 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
103 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
104 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
104 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
105 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
105 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
106 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
106 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
107 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
107 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
108 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
108 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
109 getting b
109 getting b
110 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
110 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
111 getting c
111 getting c
112 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
112 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
113 getting d
113 getting d
114 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
114 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
115 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
116
116
117 Test a corrupt file download, but clear the cache first to force a download.
117 Test a corrupt file download, but clear the cache first to force a download.
118
118
119 $ rm -rf `hg config lfs.usercache`
119 $ rm -rf `hg config lfs.usercache`
120 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
120 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
121 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
121 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
122 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
122 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
123 $ rm ../repo1/*
123 $ rm ../repo1/*
124
124
125 $ hg --repo ../repo1 update -C tip -v
125 $ hg --repo ../repo1 update -C tip -v
126 resolving manifests
126 resolving manifests
127 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
127 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
128 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
128 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
129 [255]
129 [255]
130
130
131 The corrupted blob is not added to the usercache or local store
131 The corrupted blob is not added to the usercache or local store
132
132
133 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
133 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
134 [1]
134 [1]
135 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
135 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
136 [1]
136 [1]
137 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
137 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
138
138
139 Test a corrupted file upload
139 Test a corrupted file upload
140
140
141 $ echo 'another lfs blob' > b
141 $ echo 'another lfs blob' > b
142 $ hg ci -m 'another blob'
142 $ hg ci -m 'another blob'
143 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
143 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
144 $ hg push -v ../repo1
144 $ hg push -v ../repo1
145 pushing to ../repo1
145 pushing to ../repo1
146 searching for changes
146 searching for changes
147 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
147 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
148 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
148 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
149 (run hg verify)
149 (run hg verify)
150 [255]
150 [255]
151
151
152 Archive will prefetch blobs in a group
152 Archive will prefetch blobs in a group
153
153
154 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
154 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
155 $ hg archive -vr 1 ../archive
155 $ hg archive -vr 1 ../archive
156 lfs: need to transfer 4 objects (63 bytes)
156 lfs: need to transfer 3 objects (51 bytes)
157 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
157 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
158 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
158 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
159 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
159 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
160 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
161 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
162 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
160 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
163 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
161 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
164 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
162 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
165 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
163 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
166 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
164 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
167 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
165 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
168 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
166 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
169 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
167 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
170 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
168 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
171 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
169 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
172 $ find ../archive | sort
170 $ find ../archive | sort
173 ../archive
171 ../archive
174 ../archive/.hg_archival.txt
172 ../archive/.hg_archival.txt
175 ../archive/a
173 ../archive/a
176 ../archive/b
174 ../archive/b
177 ../archive/c
175 ../archive/c
178 ../archive/d
176 ../archive/d
179
177
180 Cat will prefetch blobs in a group
178 Cat will prefetch blobs in a group
181
179
182 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
180 $ rm -rf .hg/store/lfs `hg config lfs.usercache`
183 $ hg cat -vr 1 a b c
181 $ hg cat -vr 1 a b c
184 lfs: need to transfer 3 objects (43 bytes)
182 lfs: need to transfer 2 objects (31 bytes)
185 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
183 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
186 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
184 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
187 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
185 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
188 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
189 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
190 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
186 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
191 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
187 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
192 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
188 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
193 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
189 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
194 THIS-IS-LFS
190 THIS-IS-LFS
195 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
191 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
196 THIS-IS-LFS
192 THIS-IS-LFS
197 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
193 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
198 ANOTHER-LARGE-FILE
194 ANOTHER-LARGE-FILE
199
195
200 Revert will prefetch blobs in a group
196 Revert will prefetch blobs in a group
201
197
202 $ rm -rf .hg/store/lfs
198 $ rm -rf .hg/store/lfs
203 $ rm -rf `hg config lfs.usercache`
199 $ rm -rf `hg config lfs.usercache`
204 $ rm *
200 $ rm *
205 $ hg revert --all -r 1 -v
201 $ hg revert --all -r 1 -v
206 adding a
202 adding a
207 reverting b
203 reverting b
208 reverting c
204 reverting c
209 reverting d
205 reverting d
210 lfs: need to transfer 4 objects (63 bytes)
206 lfs: need to transfer 3 objects (51 bytes)
211 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
207 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
212 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
208 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
213 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
209 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
214 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
215 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
216 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
210 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
217 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
211 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
218 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
212 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
219 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
213 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
220 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
214 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
221 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
215 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
222 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
216 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
223 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
217 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
224 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
218 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
225 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
219 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
226
220
227 Check error message when the remote missed a blob:
221 Check error message when the remote missed a blob:
228
222
229 $ echo FFFFF > b
223 $ echo FFFFF > b
230 $ hg commit -m b -A b
224 $ hg commit -m b -A b
231 $ echo FFFFF >> b
225 $ echo FFFFF >> b
232 $ hg commit -m b b
226 $ hg commit -m b b
233 $ rm -rf .hg/store/lfs
227 $ rm -rf .hg/store/lfs
234 $ rm -rf `hg config lfs.usercache`
228 $ rm -rf `hg config lfs.usercache`
235 $ hg update -C '.^'
229 $ hg update -C '.^'
236 abort: LFS server error. Remote object for "b" not found:(.*)! (re)
230 abort: LFS server error. Remote object for "b" not found:(.*)! (re)
237 [255]
231 [255]
238
232
239 Check error message when object does not exist:
233 Check error message when object does not exist:
240
234
241 $ cd $TESTTMP
235 $ cd $TESTTMP
242 $ hg init test && cd test
236 $ hg init test && cd test
243 $ echo "[extensions]" >> .hg/hgrc
237 $ echo "[extensions]" >> .hg/hgrc
244 $ echo "lfs=" >> .hg/hgrc
238 $ echo "lfs=" >> .hg/hgrc
245 $ echo "[lfs]" >> .hg/hgrc
239 $ echo "[lfs]" >> .hg/hgrc
246 $ echo "threshold=1" >> .hg/hgrc
240 $ echo "threshold=1" >> .hg/hgrc
247 $ echo a > a
241 $ echo a > a
248 $ hg add a
242 $ hg add a
249 $ hg commit -m 'test'
243 $ hg commit -m 'test'
250 $ echo aaaaa > a
244 $ echo aaaaa > a
251 $ hg commit -m 'largefile'
245 $ hg commit -m 'largefile'
252 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
246 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
253 version https://git-lfs.github.com/spec/v1
247 version https://git-lfs.github.com/spec/v1
254 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
248 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
255 size 6
249 size 6
256 x-is-binary 0
250 x-is-binary 0
257 $ cd ..
251 $ cd ..
258 $ rm -rf `hg config lfs.usercache`
252 $ rm -rf `hg config lfs.usercache`
259
253
260 (Restart the server in a different location so it no longer has the content)
254 (Restart the server in a different location so it no longer has the content)
261
255
262 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
256 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
263 $ rm $DAEMON_PIDS
257 $ rm $DAEMON_PIDS
264 $ mkdir $TESTTMP/lfs-server2
258 $ mkdir $TESTTMP/lfs-server2
265 $ cd $TESTTMP/lfs-server2
259 $ cd $TESTTMP/lfs-server2
266 #if no-windows
260 #if no-windows
267 $ lfs-test-server &> lfs-server.log &
261 $ lfs-test-server &> lfs-server.log &
268 $ echo $! >> $DAEMON_PIDS
262 $ echo $! >> $DAEMON_PIDS
269 #else
263 #else
270 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
264 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
271 #endif
265 #endif
272
266
273 $ cd $TESTTMP
267 $ cd $TESTTMP
274 $ hg clone test test2
268 $ hg clone test test2
275 updating to branch default
269 updating to branch default
276 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
270 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
277 [255]
271 [255]
278
272
279 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
273 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now