##// END OF EJS Templates
lfs: improve the error message for a missing remote blob...
Matt Harbison -
r35584:ebf14075 default
parent child Browse files
Show More
@@ -1,470 +1,470 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import json
11 import json
12 import os
12 import os
13 import re
13 import re
14 import socket
14 import socket
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 error,
19 error,
20 pathutil,
20 pathutil,
21 url as urlmod,
21 url as urlmod,
22 util,
22 util,
23 vfs as vfsmod,
23 vfs as vfsmod,
24 worker,
24 worker,
25 )
25 )
26
26
27 from ..largefiles import lfutil
27 from ..largefiles import lfutil
28
28
29 # 64 bytes for SHA256
29 # 64 bytes for SHA256
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31
31
32 class lfsvfs(vfsmod.vfs):
32 class lfsvfs(vfsmod.vfs):
33 def join(self, path):
33 def join(self, path):
34 """split the path at first two characters, like: XX/XXXXX..."""
34 """split the path at first two characters, like: XX/XXXXX..."""
35 if not _lfsre.match(path):
35 if not _lfsre.match(path):
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 return super(lfsvfs, self).join(path[0:2], path[2:])
37 return super(lfsvfs, self).join(path[0:2], path[2:])
38
38
39 def walk(self, path=None, onerror=None):
39 def walk(self, path=None, onerror=None):
40 """Yield (dirpath, [], oids) tuple for blobs under path
40 """Yield (dirpath, [], oids) tuple for blobs under path
41
41
42 Oids only exist in the root of this vfs, so dirpath is always ''.
42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 """
43 """
44 root = os.path.normpath(self.base)
44 root = os.path.normpath(self.base)
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 # because len(dirpath) < prefixlen.
46 # because len(dirpath) < prefixlen.
47 prefixlen = len(pathutil.normasprefix(root))
47 prefixlen = len(pathutil.normasprefix(root))
48 oids = []
48 oids = []
49
49
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 onerror=onerror):
51 onerror=onerror):
52 dirpath = dirpath[prefixlen:]
52 dirpath = dirpath[prefixlen:]
53
53
54 # Silently skip unexpected files and directories
54 # Silently skip unexpected files and directories
55 if len(dirpath) == 2:
55 if len(dirpath) == 2:
56 oids.extend([dirpath + f for f in files
56 oids.extend([dirpath + f for f in files
57 if _lfsre.match(dirpath + f)])
57 if _lfsre.match(dirpath + f)])
58
58
59 yield ('', [], oids)
59 yield ('', [], oids)
60
60
61 class filewithprogress(object):
61 class filewithprogress(object):
62 """a file-like object that supports __len__ and read.
62 """a file-like object that supports __len__ and read.
63
63
64 Useful to provide progress information for how many bytes are read.
64 Useful to provide progress information for how many bytes are read.
65 """
65 """
66
66
67 def __init__(self, fp, callback):
67 def __init__(self, fp, callback):
68 self._fp = fp
68 self._fp = fp
69 self._callback = callback # func(readsize)
69 self._callback = callback # func(readsize)
70 fp.seek(0, os.SEEK_END)
70 fp.seek(0, os.SEEK_END)
71 self._len = fp.tell()
71 self._len = fp.tell()
72 fp.seek(0)
72 fp.seek(0)
73
73
74 def __len__(self):
74 def __len__(self):
75 return self._len
75 return self._len
76
76
77 def read(self, size):
77 def read(self, size):
78 if self._fp is None:
78 if self._fp is None:
79 return b''
79 return b''
80 data = self._fp.read(size)
80 data = self._fp.read(size)
81 if data:
81 if data:
82 if self._callback:
82 if self._callback:
83 self._callback(len(data))
83 self._callback(len(data))
84 else:
84 else:
85 self._fp.close()
85 self._fp.close()
86 self._fp = None
86 self._fp = None
87 return data
87 return data
88
88
89 class local(object):
89 class local(object):
90 """Local blobstore for large file contents.
90 """Local blobstore for large file contents.
91
91
92 This blobstore is used both as a cache and as a staging area for large blobs
92 This blobstore is used both as a cache and as a staging area for large blobs
93 to be uploaded to the remote blobstore.
93 to be uploaded to the remote blobstore.
94 """
94 """
95
95
96 def __init__(self, repo):
96 def __init__(self, repo):
97 fullpath = repo.svfs.join('lfs/objects')
97 fullpath = repo.svfs.join('lfs/objects')
98 self.vfs = lfsvfs(fullpath)
98 self.vfs = lfsvfs(fullpath)
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 self.cachevfs = lfsvfs(usercache)
100 self.cachevfs = lfsvfs(usercache)
101 self.ui = repo.ui
101 self.ui = repo.ui
102
102
103 def open(self, oid):
103 def open(self, oid):
104 """Open a read-only file descriptor to the named blob, in either the
104 """Open a read-only file descriptor to the named blob, in either the
105 usercache or the local store."""
105 usercache or the local store."""
106 # The usercache is the most likely place to hold the file. Commit will
106 # The usercache is the most likely place to hold the file. Commit will
107 # write to both it and the local store, as will anything that downloads
107 # write to both it and the local store, as will anything that downloads
108 # the blobs. However, things like clone without an update won't
108 # the blobs. However, things like clone without an update won't
109 # populate the local store. For an init + push of a local clone,
109 # populate the local store. For an init + push of a local clone,
110 # the usercache is the only place it _could_ be. If not present, the
110 # the usercache is the only place it _could_ be. If not present, the
111 # missing file msg here will indicate the local repo, not the usercache.
111 # missing file msg here will indicate the local repo, not the usercache.
112 if self.cachevfs.exists(oid):
112 if self.cachevfs.exists(oid):
113 return self.cachevfs(oid, 'rb')
113 return self.cachevfs(oid, 'rb')
114
114
115 return self.vfs(oid, 'rb')
115 return self.vfs(oid, 'rb')
116
116
117 def download(self, oid, src):
117 def download(self, oid, src):
118 """Read the blob from the remote source in chunks, verify the content,
118 """Read the blob from the remote source in chunks, verify the content,
119 and write to this local blobstore."""
119 and write to this local blobstore."""
120 sha256 = hashlib.sha256()
120 sha256 = hashlib.sha256()
121
121
122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
122 with self.vfs(oid, 'wb', atomictemp=True) as fp:
123 for chunk in util.filechunkiter(src, size=1048576):
123 for chunk in util.filechunkiter(src, size=1048576):
124 fp.write(chunk)
124 fp.write(chunk)
125 sha256.update(chunk)
125 sha256.update(chunk)
126
126
127 realoid = sha256.hexdigest()
127 realoid = sha256.hexdigest()
128 if realoid != oid:
128 if realoid != oid:
129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
129 raise error.Abort(_('corrupt remote lfs object: %s') % oid)
130
130
131 # XXX: should we verify the content of the cache, and hardlink back to
131 # XXX: should we verify the content of the cache, and hardlink back to
132 # the local store on success, but truncate, write and link on failure?
132 # the local store on success, but truncate, write and link on failure?
133 if not self.cachevfs.exists(oid):
133 if not self.cachevfs.exists(oid):
134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
134 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
135 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
136
136
137 def write(self, oid, data):
137 def write(self, oid, data):
138 """Write blob to local blobstore.
138 """Write blob to local blobstore.
139
139
140 This should only be called from the filelog during a commit or similar.
140 This should only be called from the filelog during a commit or similar.
141 As such, there is no need to verify the data. Imports from a remote
141 As such, there is no need to verify the data. Imports from a remote
142 store must use ``download()`` instead."""
142 store must use ``download()`` instead."""
143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
143 with self.vfs(oid, 'wb', atomictemp=True) as fp:
144 fp.write(data)
144 fp.write(data)
145
145
146 # XXX: should we verify the content of the cache, and hardlink back to
146 # XXX: should we verify the content of the cache, and hardlink back to
147 # the local store on success, but truncate, write and link on failure?
147 # the local store on success, but truncate, write and link on failure?
148 if not self.cachevfs.exists(oid):
148 if not self.cachevfs.exists(oid):
149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
149 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
150 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
151
151
152 def read(self, oid, verify=True):
152 def read(self, oid, verify=True):
153 """Read blob from local blobstore."""
153 """Read blob from local blobstore."""
154 if not self.vfs.exists(oid):
154 if not self.vfs.exists(oid):
155 blob = self._read(self.cachevfs, oid, verify)
155 blob = self._read(self.cachevfs, oid, verify)
156
156
157 # Even if revlog will verify the content, it needs to be verified
157 # Even if revlog will verify the content, it needs to be verified
158 # now before making the hardlink to avoid propagating corrupt blobs.
158 # now before making the hardlink to avoid propagating corrupt blobs.
159 # Don't abort if corruption is detected, because `hg verify` will
159 # Don't abort if corruption is detected, because `hg verify` will
160 # give more useful info about the corruption- simply don't add the
160 # give more useful info about the corruption- simply don't add the
161 # hardlink.
161 # hardlink.
162 if verify or hashlib.sha256(blob).hexdigest() == oid:
162 if verify or hashlib.sha256(blob).hexdigest() == oid:
163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
163 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
164 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
165 else:
165 else:
166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
166 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
167 blob = self._read(self.vfs, oid, verify)
167 blob = self._read(self.vfs, oid, verify)
168 return blob
168 return blob
169
169
170 def _read(self, vfs, oid, verify):
170 def _read(self, vfs, oid, verify):
171 """Read blob (after verifying) from the given store"""
171 """Read blob (after verifying) from the given store"""
172 blob = vfs.read(oid)
172 blob = vfs.read(oid)
173 if verify:
173 if verify:
174 _verify(oid, blob)
174 _verify(oid, blob)
175 return blob
175 return blob
176
176
177 def has(self, oid):
177 def has(self, oid):
178 """Returns True if the local blobstore contains the requested blob,
178 """Returns True if the local blobstore contains the requested blob,
179 False otherwise."""
179 False otherwise."""
180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
180 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
181
181
182 class _gitlfsremote(object):
182 class _gitlfsremote(object):
183
183
184 def __init__(self, repo, url):
184 def __init__(self, repo, url):
185 ui = repo.ui
185 ui = repo.ui
186 self.ui = ui
186 self.ui = ui
187 baseurl, authinfo = url.authinfo()
187 baseurl, authinfo = url.authinfo()
188 self.baseurl = baseurl.rstrip('/')
188 self.baseurl = baseurl.rstrip('/')
189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
189 useragent = repo.ui.config('experimental', 'lfs.user-agent')
190 if not useragent:
190 if not useragent:
191 useragent = 'mercurial/%s git/2.15.1' % util.version()
191 useragent = 'mercurial/%s git/2.15.1' % util.version()
192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
192 self.urlopener = urlmod.opener(ui, authinfo, useragent)
193 self.retry = ui.configint('lfs', 'retry')
193 self.retry = ui.configint('lfs', 'retry')
194
194
195 def writebatch(self, pointers, fromstore):
195 def writebatch(self, pointers, fromstore):
196 """Batch upload from local to remote blobstore."""
196 """Batch upload from local to remote blobstore."""
197 self._batch(pointers, fromstore, 'upload')
197 self._batch(pointers, fromstore, 'upload')
198
198
199 def readbatch(self, pointers, tostore):
199 def readbatch(self, pointers, tostore):
200 """Batch download from remote to local blostore."""
200 """Batch download from remote to local blostore."""
201 self._batch(pointers, tostore, 'download')
201 self._batch(pointers, tostore, 'download')
202
202
203 def _batchrequest(self, pointers, action):
203 def _batchrequest(self, pointers, action):
204 """Get metadata about objects pointed by pointers for given action
204 """Get metadata about objects pointed by pointers for given action
205
205
206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
206 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
207 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
208 """
208 """
209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
209 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
210 requestdata = json.dumps({
210 requestdata = json.dumps({
211 'objects': objects,
211 'objects': objects,
212 'operation': action,
212 'operation': action,
213 })
213 })
214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
214 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
215 data=requestdata)
215 data=requestdata)
216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
216 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
217 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
218 try:
218 try:
219 rawjson = self.urlopener.open(batchreq).read()
219 rawjson = self.urlopener.open(batchreq).read()
220 except util.urlerr.httperror as ex:
220 except util.urlerr.httperror as ex:
221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
221 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
222 % (ex, action))
222 % (ex, action))
223 try:
223 try:
224 response = json.loads(rawjson)
224 response = json.loads(rawjson)
225 except ValueError:
225 except ValueError:
226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
226 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
227 % rawjson)
227 % rawjson)
228 return response
228 return response
229
229
230 def _checkforservererror(self, pointers, responses):
230 def _checkforservererror(self, pointers, responses):
231 """Scans errors from objects
231 """Scans errors from objects
232
232
233 Returns LfsRemoteError if any objects has an error"""
233 Returns LfsRemoteError if any objects has an error"""
234 for response in responses:
234 for response in responses:
235 error = response.get('error')
235 error = response.get('error')
236 if error:
236 if error:
237 ptrmap = {p.oid(): p for p in pointers}
237 ptrmap = {p.oid(): p for p in pointers}
238 p = ptrmap.get(response['oid'], None)
238 p = ptrmap.get(response['oid'], None)
239 if error['code'] == 404 and p:
239 if error['code'] == 404 and p:
240 filename = getattr(p, 'filename', 'unknown')
240 filename = getattr(p, 'filename', 'unknown')
241 raise LfsRemoteError(
241 raise LfsRemoteError(
242 _(('LFS server error. Remote object '
242 _(('LFS server error. Remote object '
243 'for file %s not found: %r')) % (filename, response))
243 'for "%s" not found: %r')) % (filename, response))
244 raise LfsRemoteError(_('LFS server error: %r') % response)
244 raise LfsRemoteError(_('LFS server error: %r') % response)
245
245
246 def _extractobjects(self, response, pointers, action):
246 def _extractobjects(self, response, pointers, action):
247 """extract objects from response of the batch API
247 """extract objects from response of the batch API
248
248
249 response: parsed JSON object returned by batch API
249 response: parsed JSON object returned by batch API
250 return response['objects'] filtered by action
250 return response['objects'] filtered by action
251 raise if any object has an error
251 raise if any object has an error
252 """
252 """
253 # Scan errors from objects - fail early
253 # Scan errors from objects - fail early
254 objects = response.get('objects', [])
254 objects = response.get('objects', [])
255 self._checkforservererror(pointers, objects)
255 self._checkforservererror(pointers, objects)
256
256
257 # Filter objects with given action. Practically, this skips uploading
257 # Filter objects with given action. Practically, this skips uploading
258 # objects which exist in the server.
258 # objects which exist in the server.
259 filteredobjects = [o for o in objects if action in o.get('actions', [])]
259 filteredobjects = [o for o in objects if action in o.get('actions', [])]
260 # But for downloading, we want all objects. Therefore missing objects
260 # But for downloading, we want all objects. Therefore missing objects
261 # should be considered an error.
261 # should be considered an error.
262 if action == 'download':
262 if action == 'download':
263 if len(filteredobjects) < len(objects):
263 if len(filteredobjects) < len(objects):
264 missing = [o.get('oid', '?')
264 missing = [o.get('oid', '?')
265 for o in objects
265 for o in objects
266 if action not in o.get('actions', [])]
266 if action not in o.get('actions', [])]
267 raise LfsRemoteError(
267 raise LfsRemoteError(
268 _('LFS server claims required objects do not exist:\n%s')
268 _('LFS server claims required objects do not exist:\n%s')
269 % '\n'.join(missing))
269 % '\n'.join(missing))
270
270
271 return filteredobjects
271 return filteredobjects
272
272
273 def _basictransfer(self, obj, action, localstore):
273 def _basictransfer(self, obj, action, localstore):
274 """Download or upload a single object using basic transfer protocol
274 """Download or upload a single object using basic transfer protocol
275
275
276 obj: dict, an object description returned by batch API
276 obj: dict, an object description returned by batch API
277 action: string, one of ['upload', 'download']
277 action: string, one of ['upload', 'download']
278 localstore: blobstore.local
278 localstore: blobstore.local
279
279
280 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
280 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
281 basic-transfers.md
281 basic-transfers.md
282 """
282 """
283 oid = str(obj['oid'])
283 oid = str(obj['oid'])
284
284
285 href = str(obj['actions'][action].get('href'))
285 href = str(obj['actions'][action].get('href'))
286 headers = obj['actions'][action].get('header', {}).items()
286 headers = obj['actions'][action].get('header', {}).items()
287
287
288 request = util.urlreq.request(href)
288 request = util.urlreq.request(href)
289 if action == 'upload':
289 if action == 'upload':
290 # If uploading blobs, read data from local blobstore.
290 # If uploading blobs, read data from local blobstore.
291 with localstore.open(oid) as fp:
291 with localstore.open(oid) as fp:
292 _verifyfile(oid, fp)
292 _verifyfile(oid, fp)
293 request.data = filewithprogress(localstore.open(oid), None)
293 request.data = filewithprogress(localstore.open(oid), None)
294 request.get_method = lambda: 'PUT'
294 request.get_method = lambda: 'PUT'
295
295
296 for k, v in headers:
296 for k, v in headers:
297 request.add_header(k, v)
297 request.add_header(k, v)
298
298
299 response = b''
299 response = b''
300 try:
300 try:
301 req = self.urlopener.open(request)
301 req = self.urlopener.open(request)
302 if action == 'download':
302 if action == 'download':
303 # If downloading blobs, store downloaded data to local blobstore
303 # If downloading blobs, store downloaded data to local blobstore
304 localstore.download(oid, req)
304 localstore.download(oid, req)
305 else:
305 else:
306 while True:
306 while True:
307 data = req.read(1048576)
307 data = req.read(1048576)
308 if not data:
308 if not data:
309 break
309 break
310 response += data
310 response += data
311 if response:
311 if response:
312 self.ui.debug('lfs %s response: %s' % (action, response))
312 self.ui.debug('lfs %s response: %s' % (action, response))
313 except util.urlerr.httperror as ex:
313 except util.urlerr.httperror as ex:
314 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
314 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
315 % (ex, oid, action))
315 % (ex, oid, action))
316
316
317 def _batch(self, pointers, localstore, action):
317 def _batch(self, pointers, localstore, action):
318 if action not in ['upload', 'download']:
318 if action not in ['upload', 'download']:
319 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
319 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
320
320
321 response = self._batchrequest(pointers, action)
321 response = self._batchrequest(pointers, action)
322 objects = self._extractobjects(response, pointers, action)
322 objects = self._extractobjects(response, pointers, action)
323 total = sum(x.get('size', 0) for x in objects)
323 total = sum(x.get('size', 0) for x in objects)
324 sizes = {}
324 sizes = {}
325 for obj in objects:
325 for obj in objects:
326 sizes[obj.get('oid')] = obj.get('size', 0)
326 sizes[obj.get('oid')] = obj.get('size', 0)
327 topic = {'upload': _('lfs uploading'),
327 topic = {'upload': _('lfs uploading'),
328 'download': _('lfs downloading')}[action]
328 'download': _('lfs downloading')}[action]
329 if len(objects) > 1:
329 if len(objects) > 1:
330 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
330 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
331 % (len(objects), util.bytecount(total)))
331 % (len(objects), util.bytecount(total)))
332 self.ui.progress(topic, 0, total=total)
332 self.ui.progress(topic, 0, total=total)
333 def transfer(chunk):
333 def transfer(chunk):
334 for obj in chunk:
334 for obj in chunk:
335 objsize = obj.get('size', 0)
335 objsize = obj.get('size', 0)
336 if self.ui.verbose:
336 if self.ui.verbose:
337 if action == 'download':
337 if action == 'download':
338 msg = _('lfs: downloading %s (%s)\n')
338 msg = _('lfs: downloading %s (%s)\n')
339 elif action == 'upload':
339 elif action == 'upload':
340 msg = _('lfs: uploading %s (%s)\n')
340 msg = _('lfs: uploading %s (%s)\n')
341 self.ui.note(msg % (obj.get('oid'),
341 self.ui.note(msg % (obj.get('oid'),
342 util.bytecount(objsize)))
342 util.bytecount(objsize)))
343 retry = self.retry
343 retry = self.retry
344 while True:
344 while True:
345 try:
345 try:
346 self._basictransfer(obj, action, localstore)
346 self._basictransfer(obj, action, localstore)
347 yield 1, obj.get('oid')
347 yield 1, obj.get('oid')
348 break
348 break
349 except socket.error as ex:
349 except socket.error as ex:
350 if retry > 0:
350 if retry > 0:
351 self.ui.note(
351 self.ui.note(
352 _('lfs: failed: %r (remaining retry %d)\n')
352 _('lfs: failed: %r (remaining retry %d)\n')
353 % (ex, retry))
353 % (ex, retry))
354 retry -= 1
354 retry -= 1
355 continue
355 continue
356 raise
356 raise
357
357
358 oids = worker.worker(self.ui, 0.1, transfer, (),
358 oids = worker.worker(self.ui, 0.1, transfer, (),
359 sorted(objects, key=lambda o: o.get('oid')))
359 sorted(objects, key=lambda o: o.get('oid')))
360 processed = 0
360 processed = 0
361 for _one, oid in oids:
361 for _one, oid in oids:
362 processed += sizes[oid]
362 processed += sizes[oid]
363 self.ui.progress(topic, processed, total=total)
363 self.ui.progress(topic, processed, total=total)
364 self.ui.note(_('lfs: processed: %s\n') % oid)
364 self.ui.note(_('lfs: processed: %s\n') % oid)
365 self.ui.progress(topic, pos=None, total=total)
365 self.ui.progress(topic, pos=None, total=total)
366
366
367 def __del__(self):
367 def __del__(self):
368 # copied from mercurial/httppeer.py
368 # copied from mercurial/httppeer.py
369 urlopener = getattr(self, 'urlopener', None)
369 urlopener = getattr(self, 'urlopener', None)
370 if urlopener:
370 if urlopener:
371 for h in urlopener.handlers:
371 for h in urlopener.handlers:
372 h.close()
372 h.close()
373 getattr(h, "close_all", lambda : None)()
373 getattr(h, "close_all", lambda : None)()
374
374
375 class _dummyremote(object):
375 class _dummyremote(object):
376 """Dummy store storing blobs to temp directory."""
376 """Dummy store storing blobs to temp directory."""
377
377
378 def __init__(self, repo, url):
378 def __init__(self, repo, url):
379 fullpath = repo.vfs.join('lfs', url.path)
379 fullpath = repo.vfs.join('lfs', url.path)
380 self.vfs = lfsvfs(fullpath)
380 self.vfs = lfsvfs(fullpath)
381
381
382 def writebatch(self, pointers, fromstore):
382 def writebatch(self, pointers, fromstore):
383 for p in pointers:
383 for p in pointers:
384 content = fromstore.read(p.oid(), verify=True)
384 content = fromstore.read(p.oid(), verify=True)
385 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
385 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
386 fp.write(content)
386 fp.write(content)
387
387
388 def readbatch(self, pointers, tostore):
388 def readbatch(self, pointers, tostore):
389 for p in pointers:
389 for p in pointers:
390 with self.vfs(p.oid(), 'rb') as fp:
390 with self.vfs(p.oid(), 'rb') as fp:
391 tostore.download(p.oid(), fp)
391 tostore.download(p.oid(), fp)
392
392
393 class _nullremote(object):
393 class _nullremote(object):
394 """Null store storing blobs to /dev/null."""
394 """Null store storing blobs to /dev/null."""
395
395
396 def __init__(self, repo, url):
396 def __init__(self, repo, url):
397 pass
397 pass
398
398
399 def writebatch(self, pointers, fromstore):
399 def writebatch(self, pointers, fromstore):
400 pass
400 pass
401
401
402 def readbatch(self, pointers, tostore):
402 def readbatch(self, pointers, tostore):
403 pass
403 pass
404
404
405 class _promptremote(object):
405 class _promptremote(object):
406 """Prompt user to set lfs.url when accessed."""
406 """Prompt user to set lfs.url when accessed."""
407
407
408 def __init__(self, repo, url):
408 def __init__(self, repo, url):
409 pass
409 pass
410
410
411 def writebatch(self, pointers, fromstore, ui=None):
411 def writebatch(self, pointers, fromstore, ui=None):
412 self._prompt()
412 self._prompt()
413
413
414 def readbatch(self, pointers, tostore, ui=None):
414 def readbatch(self, pointers, tostore, ui=None):
415 self._prompt()
415 self._prompt()
416
416
417 def _prompt(self):
417 def _prompt(self):
418 raise error.Abort(_('lfs.url needs to be configured'))
418 raise error.Abort(_('lfs.url needs to be configured'))
419
419
420 _storemap = {
420 _storemap = {
421 'https': _gitlfsremote,
421 'https': _gitlfsremote,
422 'http': _gitlfsremote,
422 'http': _gitlfsremote,
423 'file': _dummyremote,
423 'file': _dummyremote,
424 'null': _nullremote,
424 'null': _nullremote,
425 None: _promptremote,
425 None: _promptremote,
426 }
426 }
427
427
428 def _verify(oid, content):
428 def _verify(oid, content):
429 realoid = hashlib.sha256(content).hexdigest()
429 realoid = hashlib.sha256(content).hexdigest()
430 if realoid != oid:
430 if realoid != oid:
431 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
431 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
432 hint=_('run hg verify'))
432 hint=_('run hg verify'))
433
433
434 def _verifyfile(oid, fp):
434 def _verifyfile(oid, fp):
435 sha256 = hashlib.sha256()
435 sha256 = hashlib.sha256()
436 while True:
436 while True:
437 data = fp.read(1024 * 1024)
437 data = fp.read(1024 * 1024)
438 if not data:
438 if not data:
439 break
439 break
440 sha256.update(data)
440 sha256.update(data)
441 realoid = sha256.hexdigest()
441 realoid = sha256.hexdigest()
442 if realoid != oid:
442 if realoid != oid:
443 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
443 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
444 hint=_('run hg verify'))
444 hint=_('run hg verify'))
445
445
446 def remote(repo):
446 def remote(repo):
447 """remotestore factory. return a store in _storemap depending on config"""
447 """remotestore factory. return a store in _storemap depending on config"""
448 defaulturl = ''
448 defaulturl = ''
449
449
450 # convert deprecated configs to the new url. TODO: remove this if other
450 # convert deprecated configs to the new url. TODO: remove this if other
451 # places are migrated to the new url config.
451 # places are migrated to the new url config.
452 # deprecated config: lfs.remotestore
452 # deprecated config: lfs.remotestore
453 deprecatedstore = repo.ui.config('lfs', 'remotestore')
453 deprecatedstore = repo.ui.config('lfs', 'remotestore')
454 if deprecatedstore == 'dummy':
454 if deprecatedstore == 'dummy':
455 # deprecated config: lfs.remotepath
455 # deprecated config: lfs.remotepath
456 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
456 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
457 elif deprecatedstore == 'git-lfs':
457 elif deprecatedstore == 'git-lfs':
458 # deprecated config: lfs.remoteurl
458 # deprecated config: lfs.remoteurl
459 defaulturl = repo.ui.config('lfs', 'remoteurl')
459 defaulturl = repo.ui.config('lfs', 'remoteurl')
460 elif deprecatedstore == 'null':
460 elif deprecatedstore == 'null':
461 defaulturl = 'null://'
461 defaulturl = 'null://'
462
462
463 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
463 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
464 scheme = url.scheme
464 scheme = url.scheme
465 if scheme not in _storemap:
465 if scheme not in _storemap:
466 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
466 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
467 return _storemap[scheme](repo, url)
467 return _storemap[scheme](repo, url)
468
468
469 class LfsRemoteError(error.RevlogError):
469 class LfsRemoteError(error.RevlogError):
470 pass
470 pass
@@ -1,345 +1,345 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, nullid, short
13 from mercurial.node import bin, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from ..largefiles import lfutil
22 from ..largefiles import lfutil
23
23
24 from . import (
24 from . import (
25 blobstore,
25 blobstore,
26 pointer,
26 pointer,
27 )
27 )
28
28
29 def supportedoutgoingversions(orig, repo):
29 def supportedoutgoingversions(orig, repo):
30 versions = orig(repo)
30 versions = orig(repo)
31 if 'lfs' in repo.requirements:
31 if 'lfs' in repo.requirements:
32 versions.discard('01')
32 versions.discard('01')
33 versions.discard('02')
33 versions.discard('02')
34 versions.add('03')
34 versions.add('03')
35 return versions
35 return versions
36
36
37 def allsupportedversions(orig, ui):
37 def allsupportedversions(orig, ui):
38 versions = orig(ui)
38 versions = orig(ui)
39 versions.add('03')
39 versions.add('03')
40 return versions
40 return versions
41
41
42 def _capabilities(orig, repo, proto):
42 def _capabilities(orig, repo, proto):
43 '''Wrap server command to announce lfs server capability'''
43 '''Wrap server command to announce lfs server capability'''
44 caps = orig(repo, proto)
44 caps = orig(repo, proto)
45 # XXX: change to 'lfs=serve' when separate git server isn't required?
45 # XXX: change to 'lfs=serve' when separate git server isn't required?
46 caps.append('lfs')
46 caps.append('lfs')
47 return caps
47 return caps
48
48
49 def bypasscheckhash(self, text):
49 def bypasscheckhash(self, text):
50 return False
50 return False
51
51
52 def readfromstore(self, text):
52 def readfromstore(self, text):
53 """Read filelog content from local blobstore transform for flagprocessor.
53 """Read filelog content from local blobstore transform for flagprocessor.
54
54
55 Default tranform for flagprocessor, returning contents from blobstore.
55 Default tranform for flagprocessor, returning contents from blobstore.
56 Returns a 2-typle (text, validatehash) where validatehash is True as the
56 Returns a 2-typle (text, validatehash) where validatehash is True as the
57 contents of the blobstore should be checked using checkhash.
57 contents of the blobstore should be checked using checkhash.
58 """
58 """
59 p = pointer.deserialize(text)
59 p = pointer.deserialize(text)
60 oid = p.oid()
60 oid = p.oid()
61 store = self.opener.lfslocalblobstore
61 store = self.opener.lfslocalblobstore
62 if not store.has(oid):
62 if not store.has(oid):
63 p.filename = getattr(self, 'indexfile', None)
63 p.filename = self.filename
64 self.opener.lfsremoteblobstore.readbatch([p], store)
64 self.opener.lfsremoteblobstore.readbatch([p], store)
65
65
66 # The caller will validate the content
66 # The caller will validate the content
67 text = store.read(oid, verify=False)
67 text = store.read(oid, verify=False)
68
68
69 # pack hg filelog metadata
69 # pack hg filelog metadata
70 hgmeta = {}
70 hgmeta = {}
71 for k in p.keys():
71 for k in p.keys():
72 if k.startswith('x-hg-'):
72 if k.startswith('x-hg-'):
73 name = k[len('x-hg-'):]
73 name = k[len('x-hg-'):]
74 hgmeta[name] = p[k]
74 hgmeta[name] = p[k]
75 if hgmeta or text.startswith('\1\n'):
75 if hgmeta or text.startswith('\1\n'):
76 text = filelog.packmeta(hgmeta, text)
76 text = filelog.packmeta(hgmeta, text)
77
77
78 return (text, True)
78 return (text, True)
79
79
80 def writetostore(self, text):
80 def writetostore(self, text):
81 # hg filelog metadata (includes rename, etc)
81 # hg filelog metadata (includes rename, etc)
82 hgmeta, offset = filelog.parsemeta(text)
82 hgmeta, offset = filelog.parsemeta(text)
83 if offset and offset > 0:
83 if offset and offset > 0:
84 # lfs blob does not contain hg filelog metadata
84 # lfs blob does not contain hg filelog metadata
85 text = text[offset:]
85 text = text[offset:]
86
86
87 # git-lfs only supports sha256
87 # git-lfs only supports sha256
88 oid = hashlib.sha256(text).hexdigest()
88 oid = hashlib.sha256(text).hexdigest()
89 self.opener.lfslocalblobstore.write(oid, text)
89 self.opener.lfslocalblobstore.write(oid, text)
90
90
91 # replace contents with metadata
91 # replace contents with metadata
92 longoid = 'sha256:%s' % oid
92 longoid = 'sha256:%s' % oid
93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
94
94
95 # by default, we expect the content to be binary. however, LFS could also
95 # by default, we expect the content to be binary. however, LFS could also
96 # be used for non-binary content. add a special entry for non-binary data.
96 # be used for non-binary content. add a special entry for non-binary data.
97 # this will be used by filectx.isbinary().
97 # this will be used by filectx.isbinary().
98 if not util.binary(text):
98 if not util.binary(text):
99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
100 metadata['x-is-binary'] = '0'
100 metadata['x-is-binary'] = '0'
101
101
102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
103 if hgmeta is not None:
103 if hgmeta is not None:
104 for k, v in hgmeta.iteritems():
104 for k, v in hgmeta.iteritems():
105 metadata['x-hg-%s' % k] = v
105 metadata['x-hg-%s' % k] = v
106
106
107 rawtext = metadata.serialize()
107 rawtext = metadata.serialize()
108 return (rawtext, False)
108 return (rawtext, False)
109
109
110 def _islfs(rlog, node=None, rev=None):
110 def _islfs(rlog, node=None, rev=None):
111 if rev is None:
111 if rev is None:
112 if node is None:
112 if node is None:
113 # both None - likely working copy content where node is not ready
113 # both None - likely working copy content where node is not ready
114 return False
114 return False
115 rev = rlog.rev(node)
115 rev = rlog.rev(node)
116 else:
116 else:
117 node = rlog.node(rev)
117 node = rlog.node(rev)
118 if node == nullid:
118 if node == nullid:
119 return False
119 return False
120 flags = rlog.flags(rev)
120 flags = rlog.flags(rev)
121 return bool(flags & revlog.REVIDX_EXTSTORED)
121 return bool(flags & revlog.REVIDX_EXTSTORED)
122
122
123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
124 cachedelta=None, node=None,
124 cachedelta=None, node=None,
125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
126 threshold = self.opener.options['lfsthreshold']
126 threshold = self.opener.options['lfsthreshold']
127 textlen = len(text)
127 textlen = len(text)
128 # exclude hg rename meta from file size
128 # exclude hg rename meta from file size
129 meta, offset = filelog.parsemeta(text)
129 meta, offset = filelog.parsemeta(text)
130 if offset:
130 if offset:
131 textlen -= offset
131 textlen -= offset
132
132
133 if threshold and textlen > threshold:
133 if threshold and textlen > threshold:
134 flags |= revlog.REVIDX_EXTSTORED
134 flags |= revlog.REVIDX_EXTSTORED
135
135
136 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
136 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
137 node=node, flags=flags, **kwds)
137 node=node, flags=flags, **kwds)
138
138
139 def filelogrenamed(orig, self, node):
139 def filelogrenamed(orig, self, node):
140 if _islfs(self, node):
140 if _islfs(self, node):
141 rawtext = self.revision(node, raw=True)
141 rawtext = self.revision(node, raw=True)
142 if not rawtext:
142 if not rawtext:
143 return False
143 return False
144 metadata = pointer.deserialize(rawtext)
144 metadata = pointer.deserialize(rawtext)
145 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
145 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
146 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
146 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
147 else:
147 else:
148 return False
148 return False
149 return orig(self, node)
149 return orig(self, node)
150
150
151 def filelogsize(orig, self, rev):
151 def filelogsize(orig, self, rev):
152 if _islfs(self, rev=rev):
152 if _islfs(self, rev=rev):
153 # fast path: use lfs metadata to answer size
153 # fast path: use lfs metadata to answer size
154 rawtext = self.revision(rev, raw=True)
154 rawtext = self.revision(rev, raw=True)
155 metadata = pointer.deserialize(rawtext)
155 metadata = pointer.deserialize(rawtext)
156 return int(metadata['size'])
156 return int(metadata['size'])
157 return orig(self, rev)
157 return orig(self, rev)
158
158
159 def filectxcmp(orig, self, fctx):
159 def filectxcmp(orig, self, fctx):
160 """returns True if text is different than fctx"""
160 """returns True if text is different than fctx"""
161 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
161 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
162 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
162 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
163 # fast path: check LFS oid
163 # fast path: check LFS oid
164 p1 = pointer.deserialize(self.rawdata())
164 p1 = pointer.deserialize(self.rawdata())
165 p2 = pointer.deserialize(fctx.rawdata())
165 p2 = pointer.deserialize(fctx.rawdata())
166 return p1.oid() != p2.oid()
166 return p1.oid() != p2.oid()
167 return orig(self, fctx)
167 return orig(self, fctx)
168
168
169 def filectxisbinary(orig, self):
169 def filectxisbinary(orig, self):
170 if self.islfs():
170 if self.islfs():
171 # fast path: use lfs metadata to answer isbinary
171 # fast path: use lfs metadata to answer isbinary
172 metadata = pointer.deserialize(self.rawdata())
172 metadata = pointer.deserialize(self.rawdata())
173 # if lfs metadata says nothing, assume it's binary by default
173 # if lfs metadata says nothing, assume it's binary by default
174 return bool(int(metadata.get('x-is-binary', 1)))
174 return bool(int(metadata.get('x-is-binary', 1)))
175 return orig(self)
175 return orig(self)
176
176
177 def filectxislfs(self):
177 def filectxislfs(self):
178 return _islfs(self.filelog(), self.filenode())
178 return _islfs(self.filelog(), self.filenode())
179
179
180 def convertsink(orig, sink):
180 def convertsink(orig, sink):
181 sink = orig(sink)
181 sink = orig(sink)
182 if sink.repotype == 'hg':
182 if sink.repotype == 'hg':
183 class lfssink(sink.__class__):
183 class lfssink(sink.__class__):
184 def putcommit(self, files, copies, parents, commit, source, revmap,
184 def putcommit(self, files, copies, parents, commit, source, revmap,
185 full, cleanp2):
185 full, cleanp2):
186 pc = super(lfssink, self).putcommit
186 pc = super(lfssink, self).putcommit
187 node = pc(files, copies, parents, commit, source, revmap, full,
187 node = pc(files, copies, parents, commit, source, revmap, full,
188 cleanp2)
188 cleanp2)
189
189
190 if 'lfs' not in self.repo.requirements:
190 if 'lfs' not in self.repo.requirements:
191 ctx = self.repo[node]
191 ctx = self.repo[node]
192
192
193 # The file list may contain removed files, so check for
193 # The file list may contain removed files, so check for
194 # membership before assuming it is in the context.
194 # membership before assuming it is in the context.
195 if any(f in ctx and ctx[f].islfs() for f, n in files):
195 if any(f in ctx and ctx[f].islfs() for f, n in files):
196 self.repo.requirements.add('lfs')
196 self.repo.requirements.add('lfs')
197 self.repo._writerequirements()
197 self.repo._writerequirements()
198
198
199 # Permanently enable lfs locally
199 # Permanently enable lfs locally
200 with self.repo.vfs('hgrc', 'a', text=True) as fp:
200 with self.repo.vfs('hgrc', 'a', text=True) as fp:
201 fp.write('\n[extensions]\nlfs=\n')
201 fp.write('\n[extensions]\nlfs=\n')
202
202
203 return node
203 return node
204
204
205 sink.__class__ = lfssink
205 sink.__class__ = lfssink
206
206
207 return sink
207 return sink
208
208
209 def vfsinit(orig, self, othervfs):
209 def vfsinit(orig, self, othervfs):
210 orig(self, othervfs)
210 orig(self, othervfs)
211 # copy lfs related options
211 # copy lfs related options
212 for k, v in othervfs.options.items():
212 for k, v in othervfs.options.items():
213 if k.startswith('lfs'):
213 if k.startswith('lfs'):
214 self.options[k] = v
214 self.options[k] = v
215 # also copy lfs blobstores. note: this can run before reposetup, so lfs
215 # also copy lfs blobstores. note: this can run before reposetup, so lfs
216 # blobstore attributes are not always ready at this time.
216 # blobstore attributes are not always ready at this time.
217 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
217 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
218 if util.safehasattr(othervfs, name):
218 if util.safehasattr(othervfs, name):
219 setattr(self, name, getattr(othervfs, name))
219 setattr(self, name, getattr(othervfs, name))
220
220
221 def hgclone(orig, ui, opts, *args, **kwargs):
221 def hgclone(orig, ui, opts, *args, **kwargs):
222 result = orig(ui, opts, *args, **kwargs)
222 result = orig(ui, opts, *args, **kwargs)
223
223
224 if result is not None:
224 if result is not None:
225 sourcerepo, destrepo = result
225 sourcerepo, destrepo = result
226 repo = destrepo.local()
226 repo = destrepo.local()
227
227
228 # When cloning to a remote repo (like through SSH), no repo is available
228 # When cloning to a remote repo (like through SSH), no repo is available
229 # from the peer. Therefore the hgrc can't be updated.
229 # from the peer. Therefore the hgrc can't be updated.
230 if not repo:
230 if not repo:
231 return result
231 return result
232
232
233 # If lfs is required for this repo, permanently enable it locally
233 # If lfs is required for this repo, permanently enable it locally
234 if 'lfs' in repo.requirements:
234 if 'lfs' in repo.requirements:
235 with repo.vfs('hgrc', 'a', text=True) as fp:
235 with repo.vfs('hgrc', 'a', text=True) as fp:
236 fp.write('\n[extensions]\nlfs=\n')
236 fp.write('\n[extensions]\nlfs=\n')
237
237
238 return result
238 return result
239
239
240 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
240 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
241 orig(sourcerepo, destrepo, bookmarks, defaultpath)
241 orig(sourcerepo, destrepo, bookmarks, defaultpath)
242
242
243 # If lfs is required for this repo, permanently enable it locally
243 # If lfs is required for this repo, permanently enable it locally
244 if 'lfs' in destrepo.requirements:
244 if 'lfs' in destrepo.requirements:
245 with destrepo.vfs('hgrc', 'a', text=True) as fp:
245 with destrepo.vfs('hgrc', 'a', text=True) as fp:
246 fp.write('\n[extensions]\nlfs=\n')
246 fp.write('\n[extensions]\nlfs=\n')
247
247
248 def _canskipupload(repo):
248 def _canskipupload(repo):
249 # if remotestore is a null store, upload is a no-op and can be skipped
249 # if remotestore is a null store, upload is a no-op and can be skipped
250 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
250 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
251
251
252 def candownload(repo):
252 def candownload(repo):
253 # if remotestore is a null store, downloads will lead to nothing
253 # if remotestore is a null store, downloads will lead to nothing
254 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
254 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
255
255
256 def uploadblobsfromrevs(repo, revs):
256 def uploadblobsfromrevs(repo, revs):
257 '''upload lfs blobs introduced by revs
257 '''upload lfs blobs introduced by revs
258
258
259 Note: also used by other extensions e. g. infinitepush. avoid renaming.
259 Note: also used by other extensions e. g. infinitepush. avoid renaming.
260 '''
260 '''
261 if _canskipupload(repo):
261 if _canskipupload(repo):
262 return
262 return
263 pointers = extractpointers(repo, revs)
263 pointers = extractpointers(repo, revs)
264 uploadblobs(repo, pointers)
264 uploadblobs(repo, pointers)
265
265
266 def prepush(pushop):
266 def prepush(pushop):
267 """Prepush hook.
267 """Prepush hook.
268
268
269 Read through the revisions to push, looking for filelog entries that can be
269 Read through the revisions to push, looking for filelog entries that can be
270 deserialized into metadata so that we can block the push on their upload to
270 deserialized into metadata so that we can block the push on their upload to
271 the remote blobstore.
271 the remote blobstore.
272 """
272 """
273 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
273 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
274
274
275 def push(orig, repo, remote, *args, **kwargs):
275 def push(orig, repo, remote, *args, **kwargs):
276 """bail on push if the extension isn't enabled on remote when needed"""
276 """bail on push if the extension isn't enabled on remote when needed"""
277 if 'lfs' in repo.requirements:
277 if 'lfs' in repo.requirements:
278 # If the remote peer is for a local repo, the requirement tests in the
278 # If the remote peer is for a local repo, the requirement tests in the
279 # base class method enforce lfs support. Otherwise, some revisions in
279 # base class method enforce lfs support. Otherwise, some revisions in
280 # this repo use lfs, and the remote repo needs the extension loaded.
280 # this repo use lfs, and the remote repo needs the extension loaded.
281 if not remote.local() and not remote.capable('lfs'):
281 if not remote.local() and not remote.capable('lfs'):
282 # This is a copy of the message in exchange.push() when requirements
282 # This is a copy of the message in exchange.push() when requirements
283 # are missing between local repos.
283 # are missing between local repos.
284 m = _("required features are not supported in the destination: %s")
284 m = _("required features are not supported in the destination: %s")
285 raise error.Abort(m % 'lfs',
285 raise error.Abort(m % 'lfs',
286 hint=_('enable the lfs extension on the server'))
286 hint=_('enable the lfs extension on the server'))
287 return orig(repo, remote, *args, **kwargs)
287 return orig(repo, remote, *args, **kwargs)
288
288
289 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
289 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
290 *args, **kwargs):
290 *args, **kwargs):
291 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
291 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
292 uploadblobsfromrevs(repo, outgoing.missing)
292 uploadblobsfromrevs(repo, outgoing.missing)
293 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
293 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
294 **kwargs)
294 **kwargs)
295
295
296 def extractpointers(repo, revs):
296 def extractpointers(repo, revs):
297 """return a list of lfs pointers added by given revs"""
297 """return a list of lfs pointers added by given revs"""
298 repo.ui.debug('lfs: computing set of blobs to upload\n')
298 repo.ui.debug('lfs: computing set of blobs to upload\n')
299 pointers = {}
299 pointers = {}
300 for r in revs:
300 for r in revs:
301 ctx = repo[r]
301 ctx = repo[r]
302 for p in pointersfromctx(ctx).values():
302 for p in pointersfromctx(ctx).values():
303 pointers[p.oid()] = p
303 pointers[p.oid()] = p
304 return sorted(pointers.values())
304 return sorted(pointers.values())
305
305
306 def pointersfromctx(ctx):
306 def pointersfromctx(ctx):
307 """return a dict {path: pointer} for given single changectx"""
307 """return a dict {path: pointer} for given single changectx"""
308 result = {}
308 result = {}
309 for f in ctx.files():
309 for f in ctx.files():
310 if f not in ctx:
310 if f not in ctx:
311 continue
311 continue
312 fctx = ctx[f]
312 fctx = ctx[f]
313 if not _islfs(fctx.filelog(), fctx.filenode()):
313 if not _islfs(fctx.filelog(), fctx.filenode()):
314 continue
314 continue
315 try:
315 try:
316 result[f] = pointer.deserialize(fctx.rawdata())
316 result[f] = pointer.deserialize(fctx.rawdata())
317 except pointer.InvalidPointer as ex:
317 except pointer.InvalidPointer as ex:
318 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
318 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
319 % (f, short(ctx.node()), ex))
319 % (f, short(ctx.node()), ex))
320 return result
320 return result
321
321
322 def uploadblobs(repo, pointers):
322 def uploadblobs(repo, pointers):
323 """upload given pointers from local blobstore"""
323 """upload given pointers from local blobstore"""
324 if not pointers:
324 if not pointers:
325 return
325 return
326
326
327 remoteblob = repo.svfs.lfsremoteblobstore
327 remoteblob = repo.svfs.lfsremoteblobstore
328 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
328 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
329
329
330 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
330 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
331 orig(ui, srcrepo, dstrepo, requirements)
331 orig(ui, srcrepo, dstrepo, requirements)
332
332
333 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
333 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
334 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
334 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
335
335
336 for dirpath, dirs, files in srclfsvfs.walk():
336 for dirpath, dirs, files in srclfsvfs.walk():
337 for oid in files:
337 for oid in files:
338 ui.write(_('copying lfs blob %s\n') % oid)
338 ui.write(_('copying lfs blob %s\n') % oid)
339 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
339 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
340
340
341 def upgraderequirements(orig, repo):
341 def upgraderequirements(orig, repo):
342 reqs = orig(repo)
342 reqs = orig(repo)
343 if 'lfs' in repo.requirements:
343 if 'lfs' in repo.requirements:
344 reqs.add('lfs')
344 reqs.add('lfs')
345 return reqs
345 return reqs
@@ -1,191 +1,191 b''
1 #require lfs-test-server
1 #require lfs-test-server
2
2
3 $ LFS_LISTEN="tcp://:$HGPORT"
3 $ LFS_LISTEN="tcp://:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
5 $ LFS_PUBLIC=1
5 $ LFS_PUBLIC=1
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 #if no-windows
7 #if no-windows
8 $ lfs-test-server &> lfs-server.log &
8 $ lfs-test-server &> lfs-server.log &
9 $ echo $! >> $DAEMON_PIDS
9 $ echo $! >> $DAEMON_PIDS
10 #else
10 #else
11 $ cat >> $TESTTMP/spawn.py <<EOF
11 $ cat >> $TESTTMP/spawn.py <<EOF
12 > import os
12 > import os
13 > import subprocess
13 > import subprocess
14 > import sys
14 > import sys
15 >
15 >
16 > for path in os.environ["PATH"].split(os.pathsep):
16 > for path in os.environ["PATH"].split(os.pathsep):
17 > exe = os.path.join(path, 'lfs-test-server.exe')
17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 > if os.path.exists(exe):
18 > if os.path.exists(exe):
19 > with open('lfs-server.log', 'wb') as out:
19 > with open('lfs-server.log', 'wb') as out:
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 > sys.stdout.write('%s\n' % p.pid)
21 > sys.stdout.write('%s\n' % p.pid)
22 > sys.exit(0)
22 > sys.exit(0)
23 > sys.exit(1)
23 > sys.exit(1)
24 > EOF
24 > EOF
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 #endif
26 #endif
27
27
28 $ cat >> $HGRCPATH <<EOF
28 $ cat >> $HGRCPATH <<EOF
29 > [extensions]
29 > [extensions]
30 > lfs=
30 > lfs=
31 > [lfs]
31 > [lfs]
32 > url=http://foo:bar@$LFS_HOST/
32 > url=http://foo:bar@$LFS_HOST/
33 > threshold=1
33 > threshold=1
34 > EOF
34 > EOF
35
35
36 $ hg init repo1
36 $ hg init repo1
37 $ cd repo1
37 $ cd repo1
38 $ echo THIS-IS-LFS > a
38 $ echo THIS-IS-LFS > a
39 $ hg commit -m a -A a
39 $ hg commit -m a -A a
40
40
41 A push can be serviced directly from the usercache if it isn't in the local
41 A push can be serviced directly from the usercache if it isn't in the local
42 store.
42 store.
43
43
44 $ hg init ../repo2
44 $ hg init ../repo2
45 $ mv .hg/store/lfs .hg/store/lfs_
45 $ mv .hg/store/lfs .hg/store/lfs_
46 $ hg push ../repo2 -v
46 $ hg push ../repo2 -v
47 pushing to ../repo2
47 pushing to ../repo2
48 searching for changes
48 searching for changes
49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
51 1 changesets found
51 1 changesets found
52 uncompressed size of bundle content:
52 uncompressed size of bundle content:
53 * (changelog) (glob)
53 * (changelog) (glob)
54 * (manifests) (glob)
54 * (manifests) (glob)
55 * a (glob)
55 * a (glob)
56 adding changesets
56 adding changesets
57 adding manifests
57 adding manifests
58 adding file changes
58 adding file changes
59 added 1 changesets with 1 changes to 1 files
59 added 1 changesets with 1 changes to 1 files
60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 $ mv .hg/store/lfs_ .hg/store/lfs
61 $ mv .hg/store/lfs_ .hg/store/lfs
62
62
63 Clear the cache to force a download
63 Clear the cache to force a download
64 $ rm -rf `hg config lfs.usercache`
64 $ rm -rf `hg config lfs.usercache`
65 $ cd ../repo2
65 $ cd ../repo2
66 $ hg update tip -v
66 $ hg update tip -v
67 resolving manifests
67 resolving manifests
68 getting a
68 getting a
69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
74
74
75 When the server has some blobs already
75 When the server has some blobs already
76
76
77 $ hg mv a b
77 $ hg mv a b
78 $ echo ANOTHER-LARGE-FILE > c
78 $ echo ANOTHER-LARGE-FILE > c
79 $ echo ANOTHER-LARGE-FILE2 > d
79 $ echo ANOTHER-LARGE-FILE2 > d
80 $ hg commit -m b-and-c -A b c d
80 $ hg commit -m b-and-c -A b c d
81 $ hg push ../repo1 -v | grep -v '^ '
81 $ hg push ../repo1 -v | grep -v '^ '
82 pushing to ../repo1
82 pushing to ../repo1
83 searching for changes
83 searching for changes
84 lfs: need to transfer 2 objects (39 bytes)
84 lfs: need to transfer 2 objects (39 bytes)
85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
89 1 changesets found
89 1 changesets found
90 uncompressed size of bundle content:
90 uncompressed size of bundle content:
91 adding changesets
91 adding changesets
92 adding manifests
92 adding manifests
93 adding file changes
93 adding file changes
94 added 1 changesets with 3 changes to 3 files
94 added 1 changesets with 3 changes to 3 files
95
95
96 Clear the cache to force a download
96 Clear the cache to force a download
97 $ rm -rf `hg config lfs.usercache`
97 $ rm -rf `hg config lfs.usercache`
98 $ hg --repo ../repo1 update tip -v
98 $ hg --repo ../repo1 update tip -v
99 resolving manifests
99 resolving manifests
100 getting b
100 getting b
101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
102 getting c
102 getting c
103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
107 getting d
107 getting d
108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 Test a corrupt file download, but clear the cache first to force a download.
114 Test a corrupt file download, but clear the cache first to force a download.
115
115
116 $ rm -rf `hg config lfs.usercache`
116 $ rm -rf `hg config lfs.usercache`
117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
120 $ rm ../repo1/*
120 $ rm ../repo1/*
121
121
122 $ hg --repo ../repo1 update -C tip -v
122 $ hg --repo ../repo1 update -C tip -v
123 resolving manifests
123 resolving manifests
124 getting a
124 getting a
125 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
125 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
126 getting b
126 getting b
127 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
127 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
128 getting c
128 getting c
129 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
129 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
130 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
130 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
131 [255]
131 [255]
132
132
133 The corrupted blob is not added to the usercache or local store
133 The corrupted blob is not added to the usercache or local store
134
134
135 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
135 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
136 [1]
136 [1]
137 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
137 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
138 [1]
138 [1]
139 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
139 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
140
140
141 Test a corrupted file upload
141 Test a corrupted file upload
142
142
143 $ echo 'another lfs blob' > b
143 $ echo 'another lfs blob' > b
144 $ hg ci -m 'another blob'
144 $ hg ci -m 'another blob'
145 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
145 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
146 $ hg push -v ../repo1
146 $ hg push -v ../repo1
147 pushing to ../repo1
147 pushing to ../repo1
148 searching for changes
148 searching for changes
149 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
149 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
150 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
150 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
151 (run hg verify)
151 (run hg verify)
152 [255]
152 [255]
153
153
154 Check error message when the remote missed a blob:
154 Check error message when the remote missed a blob:
155
155
156 $ echo FFFFF > b
156 $ echo FFFFF > b
157 $ hg commit -m b -A b
157 $ hg commit -m b -A b
158 $ echo FFFFF >> b
158 $ echo FFFFF >> b
159 $ hg commit -m b b
159 $ hg commit -m b b
160 $ rm -rf .hg/store/lfs
160 $ rm -rf .hg/store/lfs
161 $ rm -rf `hg config lfs.usercache`
161 $ rm -rf `hg config lfs.usercache`
162 $ hg update -C '.^'
162 $ hg update -C '.^'
163 abort: LFS server claims required objects do not exist:
163 abort: LFS server claims required objects do not exist:
164 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
164 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
165 [255]
165 [255]
166
166
167 Check error message when object does not exist:
167 Check error message when object does not exist:
168
168
169 $ hg init test && cd test
169 $ hg init test && cd test
170 $ echo "[extensions]" >> .hg/hgrc
170 $ echo "[extensions]" >> .hg/hgrc
171 $ echo "lfs=" >> .hg/hgrc
171 $ echo "lfs=" >> .hg/hgrc
172 $ echo "[lfs]" >> .hg/hgrc
172 $ echo "[lfs]" >> .hg/hgrc
173 $ echo "threshold=1" >> .hg/hgrc
173 $ echo "threshold=1" >> .hg/hgrc
174 $ echo a > a
174 $ echo a > a
175 $ hg add a
175 $ hg add a
176 $ hg commit -m 'test'
176 $ hg commit -m 'test'
177 $ echo aaaaa > a
177 $ echo aaaaa > a
178 $ hg commit -m 'largefile'
178 $ hg commit -m 'largefile'
179 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
179 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
180 version https://git-lfs.github.com/spec/v1
180 version https://git-lfs.github.com/spec/v1
181 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
181 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
182 size 6
182 size 6
183 x-is-binary 0
183 x-is-binary 0
184 $ cd ..
184 $ cd ..
185 $ rm -rf `hg config lfs.usercache`
185 $ rm -rf `hg config lfs.usercache`
186 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
186 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
187 updating to branch default
187 updating to branch default
188 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
188 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
189 [255]
189 [255]
190
190
191 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
191 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now