##// END OF EJS Templates
lfs: use the local store method for opening a blob...
Matt Harbison -
r35544:e8f80529 default
parent child Browse files
Show More
@@ -1,442 +1,442 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import json
11 import json
12 import os
12 import os
13 import re
13 import re
14 import socket
14 import socket
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 from mercurial import (
18 from mercurial import (
19 error,
19 error,
20 pathutil,
20 pathutil,
21 url as urlmod,
21 url as urlmod,
22 util,
22 util,
23 vfs as vfsmod,
23 vfs as vfsmod,
24 worker,
24 worker,
25 )
25 )
26
26
27 from ..largefiles import lfutil
27 from ..largefiles import lfutil
28
28
29 # 64 bytes for SHA256
29 # 64 bytes for SHA256
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
31
31
32 class lfsvfs(vfsmod.vfs):
32 class lfsvfs(vfsmod.vfs):
33 def join(self, path):
33 def join(self, path):
34 """split the path at first two characters, like: XX/XXXXX..."""
34 """split the path at first two characters, like: XX/XXXXX..."""
35 if not _lfsre.match(path):
35 if not _lfsre.match(path):
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
37 return super(lfsvfs, self).join(path[0:2], path[2:])
37 return super(lfsvfs, self).join(path[0:2], path[2:])
38
38
39 def walk(self, path=None, onerror=None):
39 def walk(self, path=None, onerror=None):
40 """Yield (dirpath, [], oids) tuple for blobs under path
40 """Yield (dirpath, [], oids) tuple for blobs under path
41
41
42 Oids only exist in the root of this vfs, so dirpath is always ''.
42 Oids only exist in the root of this vfs, so dirpath is always ''.
43 """
43 """
44 root = os.path.normpath(self.base)
44 root = os.path.normpath(self.base)
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
45 # when dirpath == root, dirpath[prefixlen:] becomes empty
46 # because len(dirpath) < prefixlen.
46 # because len(dirpath) < prefixlen.
47 prefixlen = len(pathutil.normasprefix(root))
47 prefixlen = len(pathutil.normasprefix(root))
48 oids = []
48 oids = []
49
49
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
51 onerror=onerror):
51 onerror=onerror):
52 dirpath = dirpath[prefixlen:]
52 dirpath = dirpath[prefixlen:]
53
53
54 # Silently skip unexpected files and directories
54 # Silently skip unexpected files and directories
55 if len(dirpath) == 2:
55 if len(dirpath) == 2:
56 oids.extend([dirpath + f for f in files
56 oids.extend([dirpath + f for f in files
57 if _lfsre.match(dirpath + f)])
57 if _lfsre.match(dirpath + f)])
58
58
59 yield ('', [], oids)
59 yield ('', [], oids)
60
60
61 class filewithprogress(object):
61 class filewithprogress(object):
62 """a file-like object that supports __len__ and read.
62 """a file-like object that supports __len__ and read.
63
63
64 Useful to provide progress information for how many bytes are read.
64 Useful to provide progress information for how many bytes are read.
65 """
65 """
66
66
67 def __init__(self, fp, callback):
67 def __init__(self, fp, callback):
68 self._fp = fp
68 self._fp = fp
69 self._callback = callback # func(readsize)
69 self._callback = callback # func(readsize)
70 fp.seek(0, os.SEEK_END)
70 fp.seek(0, os.SEEK_END)
71 self._len = fp.tell()
71 self._len = fp.tell()
72 fp.seek(0)
72 fp.seek(0)
73
73
74 def __len__(self):
74 def __len__(self):
75 return self._len
75 return self._len
76
76
77 def read(self, size):
77 def read(self, size):
78 if self._fp is None:
78 if self._fp is None:
79 return b''
79 return b''
80 data = self._fp.read(size)
80 data = self._fp.read(size)
81 if data:
81 if data:
82 if self._callback:
82 if self._callback:
83 self._callback(len(data))
83 self._callback(len(data))
84 else:
84 else:
85 self._fp.close()
85 self._fp.close()
86 self._fp = None
86 self._fp = None
87 return data
87 return data
88
88
89 class local(object):
89 class local(object):
90 """Local blobstore for large file contents.
90 """Local blobstore for large file contents.
91
91
92 This blobstore is used both as a cache and as a staging area for large blobs
92 This blobstore is used both as a cache and as a staging area for large blobs
93 to be uploaded to the remote blobstore.
93 to be uploaded to the remote blobstore.
94 """
94 """
95
95
96 def __init__(self, repo):
96 def __init__(self, repo):
97 fullpath = repo.svfs.join('lfs/objects')
97 fullpath = repo.svfs.join('lfs/objects')
98 self.vfs = lfsvfs(fullpath)
98 self.vfs = lfsvfs(fullpath)
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
100 self.cachevfs = lfsvfs(usercache)
100 self.cachevfs = lfsvfs(usercache)
101 self.ui = repo.ui
101 self.ui = repo.ui
102
102
103 def open(self, oid):
103 def open(self, oid):
104 """Open a read-only file descriptor to the named blob, in either the
104 """Open a read-only file descriptor to the named blob, in either the
105 usercache or the local store."""
105 usercache or the local store."""
106 if self.cachevfs.exists(oid):
106 if self.cachevfs.exists(oid):
107 return self.cachevfs(oid, 'rb')
107 return self.cachevfs(oid, 'rb')
108
108
109 return self.vfs(oid, 'rb')
109 return self.vfs(oid, 'rb')
110
110
111 def write(self, oid, data, verify=True):
111 def write(self, oid, data, verify=True):
112 """Write blob to local blobstore."""
112 """Write blob to local blobstore."""
113 if verify:
113 if verify:
114 _verify(oid, data)
114 _verify(oid, data)
115
115
116 with self.vfs(oid, 'wb', atomictemp=True) as fp:
116 with self.vfs(oid, 'wb', atomictemp=True) as fp:
117 fp.write(data)
117 fp.write(data)
118
118
119 # XXX: should we verify the content of the cache, and hardlink back to
119 # XXX: should we verify the content of the cache, and hardlink back to
120 # the local store on success, but truncate, write and link on failure?
120 # the local store on success, but truncate, write and link on failure?
121 if not self.cachevfs.exists(oid):
121 if not self.cachevfs.exists(oid):
122 if verify or hashlib.sha256(data).hexdigest() == oid:
122 if verify or hashlib.sha256(data).hexdigest() == oid:
123 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
123 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
124 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
124 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
125
125
126 def read(self, oid, verify=True):
126 def read(self, oid, verify=True):
127 """Read blob from local blobstore."""
127 """Read blob from local blobstore."""
128 if not self.vfs.exists(oid):
128 if not self.vfs.exists(oid):
129 blob = self._read(self.cachevfs, oid, verify)
129 blob = self._read(self.cachevfs, oid, verify)
130
130
131 # Even if revlog will verify the content, it needs to be verified
131 # Even if revlog will verify the content, it needs to be verified
132 # now before making the hardlink to avoid propagating corrupt blobs.
132 # now before making the hardlink to avoid propagating corrupt blobs.
133 # Don't abort if corruption is detected, because `hg verify` will
133 # Don't abort if corruption is detected, because `hg verify` will
134 # give more useful info about the corruption- simply don't add the
134 # give more useful info about the corruption- simply don't add the
135 # hardlink.
135 # hardlink.
136 if verify or hashlib.sha256(blob).hexdigest() == oid:
136 if verify or hashlib.sha256(blob).hexdigest() == oid:
137 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
137 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
138 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
138 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
139 else:
139 else:
140 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
140 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
141 blob = self._read(self.vfs, oid, verify)
141 blob = self._read(self.vfs, oid, verify)
142 return blob
142 return blob
143
143
144 def _read(self, vfs, oid, verify):
144 def _read(self, vfs, oid, verify):
145 """Read blob (after verifying) from the given store"""
145 """Read blob (after verifying) from the given store"""
146 blob = vfs.read(oid)
146 blob = vfs.read(oid)
147 if verify:
147 if verify:
148 _verify(oid, blob)
148 _verify(oid, blob)
149 return blob
149 return blob
150
150
151 def has(self, oid):
151 def has(self, oid):
152 """Returns True if the local blobstore contains the requested blob,
152 """Returns True if the local blobstore contains the requested blob,
153 False otherwise."""
153 False otherwise."""
154 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
154 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
155
155
156 class _gitlfsremote(object):
156 class _gitlfsremote(object):
157
157
158 def __init__(self, repo, url):
158 def __init__(self, repo, url):
159 ui = repo.ui
159 ui = repo.ui
160 self.ui = ui
160 self.ui = ui
161 baseurl, authinfo = url.authinfo()
161 baseurl, authinfo = url.authinfo()
162 self.baseurl = baseurl.rstrip('/')
162 self.baseurl = baseurl.rstrip('/')
163 useragent = repo.ui.config('experimental', 'lfs.user-agent')
163 useragent = repo.ui.config('experimental', 'lfs.user-agent')
164 if not useragent:
164 if not useragent:
165 useragent = 'mercurial/%s git/2.15.1' % util.version()
165 useragent = 'mercurial/%s git/2.15.1' % util.version()
166 self.urlopener = urlmod.opener(ui, authinfo, useragent)
166 self.urlopener = urlmod.opener(ui, authinfo, useragent)
167 self.retry = ui.configint('lfs', 'retry')
167 self.retry = ui.configint('lfs', 'retry')
168
168
169 def writebatch(self, pointers, fromstore):
169 def writebatch(self, pointers, fromstore):
170 """Batch upload from local to remote blobstore."""
170 """Batch upload from local to remote blobstore."""
171 self._batch(pointers, fromstore, 'upload')
171 self._batch(pointers, fromstore, 'upload')
172
172
173 def readbatch(self, pointers, tostore):
173 def readbatch(self, pointers, tostore):
174 """Batch download from remote to local blostore."""
174 """Batch download from remote to local blostore."""
175 self._batch(pointers, tostore, 'download')
175 self._batch(pointers, tostore, 'download')
176
176
177 def _batchrequest(self, pointers, action):
177 def _batchrequest(self, pointers, action):
178 """Get metadata about objects pointed by pointers for given action
178 """Get metadata about objects pointed by pointers for given action
179
179
180 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
180 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
181 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
181 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
182 """
182 """
183 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
183 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
184 requestdata = json.dumps({
184 requestdata = json.dumps({
185 'objects': objects,
185 'objects': objects,
186 'operation': action,
186 'operation': action,
187 })
187 })
188 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
188 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
189 data=requestdata)
189 data=requestdata)
190 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
190 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
191 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
191 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
192 try:
192 try:
193 rawjson = self.urlopener.open(batchreq).read()
193 rawjson = self.urlopener.open(batchreq).read()
194 except util.urlerr.httperror as ex:
194 except util.urlerr.httperror as ex:
195 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
195 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
196 % (ex, action))
196 % (ex, action))
197 try:
197 try:
198 response = json.loads(rawjson)
198 response = json.loads(rawjson)
199 except ValueError:
199 except ValueError:
200 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
200 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
201 % rawjson)
201 % rawjson)
202 return response
202 return response
203
203
204 def _checkforservererror(self, pointers, responses):
204 def _checkforservererror(self, pointers, responses):
205 """Scans errors from objects
205 """Scans errors from objects
206
206
207 Returns LfsRemoteError if any objects has an error"""
207 Returns LfsRemoteError if any objects has an error"""
208 for response in responses:
208 for response in responses:
209 error = response.get('error')
209 error = response.get('error')
210 if error:
210 if error:
211 ptrmap = {p.oid(): p for p in pointers}
211 ptrmap = {p.oid(): p for p in pointers}
212 p = ptrmap.get(response['oid'], None)
212 p = ptrmap.get(response['oid'], None)
213 if error['code'] == 404 and p:
213 if error['code'] == 404 and p:
214 filename = getattr(p, 'filename', 'unknown')
214 filename = getattr(p, 'filename', 'unknown')
215 raise LfsRemoteError(
215 raise LfsRemoteError(
216 _(('LFS server error. Remote object '
216 _(('LFS server error. Remote object '
217 'for file %s not found: %r')) % (filename, response))
217 'for file %s not found: %r')) % (filename, response))
218 raise LfsRemoteError(_('LFS server error: %r') % response)
218 raise LfsRemoteError(_('LFS server error: %r') % response)
219
219
220 def _extractobjects(self, response, pointers, action):
220 def _extractobjects(self, response, pointers, action):
221 """extract objects from response of the batch API
221 """extract objects from response of the batch API
222
222
223 response: parsed JSON object returned by batch API
223 response: parsed JSON object returned by batch API
224 return response['objects'] filtered by action
224 return response['objects'] filtered by action
225 raise if any object has an error
225 raise if any object has an error
226 """
226 """
227 # Scan errors from objects - fail early
227 # Scan errors from objects - fail early
228 objects = response.get('objects', [])
228 objects = response.get('objects', [])
229 self._checkforservererror(pointers, objects)
229 self._checkforservererror(pointers, objects)
230
230
231 # Filter objects with given action. Practically, this skips uploading
231 # Filter objects with given action. Practically, this skips uploading
232 # objects which exist in the server.
232 # objects which exist in the server.
233 filteredobjects = [o for o in objects if action in o.get('actions', [])]
233 filteredobjects = [o for o in objects if action in o.get('actions', [])]
234 # But for downloading, we want all objects. Therefore missing objects
234 # But for downloading, we want all objects. Therefore missing objects
235 # should be considered an error.
235 # should be considered an error.
236 if action == 'download':
236 if action == 'download':
237 if len(filteredobjects) < len(objects):
237 if len(filteredobjects) < len(objects):
238 missing = [o.get('oid', '?')
238 missing = [o.get('oid', '?')
239 for o in objects
239 for o in objects
240 if action not in o.get('actions', [])]
240 if action not in o.get('actions', [])]
241 raise LfsRemoteError(
241 raise LfsRemoteError(
242 _('LFS server claims required objects do not exist:\n%s')
242 _('LFS server claims required objects do not exist:\n%s')
243 % '\n'.join(missing))
243 % '\n'.join(missing))
244
244
245 return filteredobjects
245 return filteredobjects
246
246
247 def _basictransfer(self, obj, action, localstore):
247 def _basictransfer(self, obj, action, localstore):
248 """Download or upload a single object using basic transfer protocol
248 """Download or upload a single object using basic transfer protocol
249
249
250 obj: dict, an object description returned by batch API
250 obj: dict, an object description returned by batch API
251 action: string, one of ['upload', 'download']
251 action: string, one of ['upload', 'download']
252 localstore: blobstore.local
252 localstore: blobstore.local
253
253
254 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
254 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
255 basic-transfers.md
255 basic-transfers.md
256 """
256 """
257 oid = str(obj['oid'])
257 oid = str(obj['oid'])
258
258
259 href = str(obj['actions'][action].get('href'))
259 href = str(obj['actions'][action].get('href'))
260 headers = obj['actions'][action].get('header', {}).items()
260 headers = obj['actions'][action].get('header', {}).items()
261
261
262 request = util.urlreq.request(href)
262 request = util.urlreq.request(href)
263 if action == 'upload':
263 if action == 'upload':
264 # If uploading blobs, read data from local blobstore.
264 # If uploading blobs, read data from local blobstore.
265 with localstore.vfs(oid) as fp:
265 with localstore.open(oid) as fp:
266 _verifyfile(oid, fp)
266 _verifyfile(oid, fp)
267 request.data = filewithprogress(localstore.vfs(oid), None)
267 request.data = filewithprogress(localstore.open(oid), None)
268 request.get_method = lambda: 'PUT'
268 request.get_method = lambda: 'PUT'
269
269
270 for k, v in headers:
270 for k, v in headers:
271 request.add_header(k, v)
271 request.add_header(k, v)
272
272
273 response = b''
273 response = b''
274 try:
274 try:
275 req = self.urlopener.open(request)
275 req = self.urlopener.open(request)
276 while True:
276 while True:
277 data = req.read(1048576)
277 data = req.read(1048576)
278 if not data:
278 if not data:
279 break
279 break
280 response += data
280 response += data
281 except util.urlerr.httperror as ex:
281 except util.urlerr.httperror as ex:
282 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
282 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
283 % (ex, oid, action))
283 % (ex, oid, action))
284
284
285 if action == 'download':
285 if action == 'download':
286 # If downloading blobs, store downloaded data to local blobstore
286 # If downloading blobs, store downloaded data to local blobstore
287 localstore.write(oid, response, verify=True)
287 localstore.write(oid, response, verify=True)
288
288
289 def _batch(self, pointers, localstore, action):
289 def _batch(self, pointers, localstore, action):
290 if action not in ['upload', 'download']:
290 if action not in ['upload', 'download']:
291 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
291 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
292
292
293 response = self._batchrequest(pointers, action)
293 response = self._batchrequest(pointers, action)
294 objects = self._extractobjects(response, pointers, action)
294 objects = self._extractobjects(response, pointers, action)
295 total = sum(x.get('size', 0) for x in objects)
295 total = sum(x.get('size', 0) for x in objects)
296 sizes = {}
296 sizes = {}
297 for obj in objects:
297 for obj in objects:
298 sizes[obj.get('oid')] = obj.get('size', 0)
298 sizes[obj.get('oid')] = obj.get('size', 0)
299 topic = {'upload': _('lfs uploading'),
299 topic = {'upload': _('lfs uploading'),
300 'download': _('lfs downloading')}[action]
300 'download': _('lfs downloading')}[action]
301 if len(objects) > 1:
301 if len(objects) > 1:
302 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
302 self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
303 % (len(objects), util.bytecount(total)))
303 % (len(objects), util.bytecount(total)))
304 self.ui.progress(topic, 0, total=total)
304 self.ui.progress(topic, 0, total=total)
305 def transfer(chunk):
305 def transfer(chunk):
306 for obj in chunk:
306 for obj in chunk:
307 objsize = obj.get('size', 0)
307 objsize = obj.get('size', 0)
308 if self.ui.verbose:
308 if self.ui.verbose:
309 if action == 'download':
309 if action == 'download':
310 msg = _('lfs: downloading %s (%s)\n')
310 msg = _('lfs: downloading %s (%s)\n')
311 elif action == 'upload':
311 elif action == 'upload':
312 msg = _('lfs: uploading %s (%s)\n')
312 msg = _('lfs: uploading %s (%s)\n')
313 self.ui.note(msg % (obj.get('oid'),
313 self.ui.note(msg % (obj.get('oid'),
314 util.bytecount(objsize)))
314 util.bytecount(objsize)))
315 retry = self.retry
315 retry = self.retry
316 while True:
316 while True:
317 try:
317 try:
318 self._basictransfer(obj, action, localstore)
318 self._basictransfer(obj, action, localstore)
319 yield 1, obj.get('oid')
319 yield 1, obj.get('oid')
320 break
320 break
321 except socket.error as ex:
321 except socket.error as ex:
322 if retry > 0:
322 if retry > 0:
323 self.ui.note(
323 self.ui.note(
324 _('lfs: failed: %r (remaining retry %d)\n')
324 _('lfs: failed: %r (remaining retry %d)\n')
325 % (ex, retry))
325 % (ex, retry))
326 retry -= 1
326 retry -= 1
327 continue
327 continue
328 raise
328 raise
329
329
330 oids = worker.worker(self.ui, 0.1, transfer, (),
330 oids = worker.worker(self.ui, 0.1, transfer, (),
331 sorted(objects, key=lambda o: o.get('oid')))
331 sorted(objects, key=lambda o: o.get('oid')))
332 processed = 0
332 processed = 0
333 for _one, oid in oids:
333 for _one, oid in oids:
334 processed += sizes[oid]
334 processed += sizes[oid]
335 self.ui.progress(topic, processed, total=total)
335 self.ui.progress(topic, processed, total=total)
336 self.ui.note(_('lfs: processed: %s\n') % oid)
336 self.ui.note(_('lfs: processed: %s\n') % oid)
337 self.ui.progress(topic, pos=None, total=total)
337 self.ui.progress(topic, pos=None, total=total)
338
338
339 def __del__(self):
339 def __del__(self):
340 # copied from mercurial/httppeer.py
340 # copied from mercurial/httppeer.py
341 urlopener = getattr(self, 'urlopener', None)
341 urlopener = getattr(self, 'urlopener', None)
342 if urlopener:
342 if urlopener:
343 for h in urlopener.handlers:
343 for h in urlopener.handlers:
344 h.close()
344 h.close()
345 getattr(h, "close_all", lambda : None)()
345 getattr(h, "close_all", lambda : None)()
346
346
347 class _dummyremote(object):
347 class _dummyremote(object):
348 """Dummy store storing blobs to temp directory."""
348 """Dummy store storing blobs to temp directory."""
349
349
350 def __init__(self, repo, url):
350 def __init__(self, repo, url):
351 fullpath = repo.vfs.join('lfs', url.path)
351 fullpath = repo.vfs.join('lfs', url.path)
352 self.vfs = lfsvfs(fullpath)
352 self.vfs = lfsvfs(fullpath)
353
353
354 def writebatch(self, pointers, fromstore):
354 def writebatch(self, pointers, fromstore):
355 for p in pointers:
355 for p in pointers:
356 content = fromstore.read(p.oid(), verify=True)
356 content = fromstore.read(p.oid(), verify=True)
357 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
357 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
358 fp.write(content)
358 fp.write(content)
359
359
360 def readbatch(self, pointers, tostore):
360 def readbatch(self, pointers, tostore):
361 for p in pointers:
361 for p in pointers:
362 content = self.vfs.read(p.oid())
362 content = self.vfs.read(p.oid())
363 tostore.write(p.oid(), content, verify=True)
363 tostore.write(p.oid(), content, verify=True)
364
364
365 class _nullremote(object):
365 class _nullremote(object):
366 """Null store storing blobs to /dev/null."""
366 """Null store storing blobs to /dev/null."""
367
367
368 def __init__(self, repo, url):
368 def __init__(self, repo, url):
369 pass
369 pass
370
370
371 def writebatch(self, pointers, fromstore):
371 def writebatch(self, pointers, fromstore):
372 pass
372 pass
373
373
374 def readbatch(self, pointers, tostore):
374 def readbatch(self, pointers, tostore):
375 pass
375 pass
376
376
377 class _promptremote(object):
377 class _promptremote(object):
378 """Prompt user to set lfs.url when accessed."""
378 """Prompt user to set lfs.url when accessed."""
379
379
380 def __init__(self, repo, url):
380 def __init__(self, repo, url):
381 pass
381 pass
382
382
383 def writebatch(self, pointers, fromstore, ui=None):
383 def writebatch(self, pointers, fromstore, ui=None):
384 self._prompt()
384 self._prompt()
385
385
386 def readbatch(self, pointers, tostore, ui=None):
386 def readbatch(self, pointers, tostore, ui=None):
387 self._prompt()
387 self._prompt()
388
388
389 def _prompt(self):
389 def _prompt(self):
390 raise error.Abort(_('lfs.url needs to be configured'))
390 raise error.Abort(_('lfs.url needs to be configured'))
391
391
392 _storemap = {
392 _storemap = {
393 'https': _gitlfsremote,
393 'https': _gitlfsremote,
394 'http': _gitlfsremote,
394 'http': _gitlfsremote,
395 'file': _dummyremote,
395 'file': _dummyremote,
396 'null': _nullremote,
396 'null': _nullremote,
397 None: _promptremote,
397 None: _promptremote,
398 }
398 }
399
399
400 def _verify(oid, content):
400 def _verify(oid, content):
401 realoid = hashlib.sha256(content).hexdigest()
401 realoid = hashlib.sha256(content).hexdigest()
402 if realoid != oid:
402 if realoid != oid:
403 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
403 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
404 hint=_('run hg verify'))
404 hint=_('run hg verify'))
405
405
406 def _verifyfile(oid, fp):
406 def _verifyfile(oid, fp):
407 sha256 = hashlib.sha256()
407 sha256 = hashlib.sha256()
408 while True:
408 while True:
409 data = fp.read(1024 * 1024)
409 data = fp.read(1024 * 1024)
410 if not data:
410 if not data:
411 break
411 break
412 sha256.update(data)
412 sha256.update(data)
413 realoid = sha256.hexdigest()
413 realoid = sha256.hexdigest()
414 if realoid != oid:
414 if realoid != oid:
415 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
415 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
416 hint=_('run hg verify'))
416 hint=_('run hg verify'))
417
417
418 def remote(repo):
418 def remote(repo):
419 """remotestore factory. return a store in _storemap depending on config"""
419 """remotestore factory. return a store in _storemap depending on config"""
420 defaulturl = ''
420 defaulturl = ''
421
421
422 # convert deprecated configs to the new url. TODO: remove this if other
422 # convert deprecated configs to the new url. TODO: remove this if other
423 # places are migrated to the new url config.
423 # places are migrated to the new url config.
424 # deprecated config: lfs.remotestore
424 # deprecated config: lfs.remotestore
425 deprecatedstore = repo.ui.config('lfs', 'remotestore')
425 deprecatedstore = repo.ui.config('lfs', 'remotestore')
426 if deprecatedstore == 'dummy':
426 if deprecatedstore == 'dummy':
427 # deprecated config: lfs.remotepath
427 # deprecated config: lfs.remotepath
428 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
428 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
429 elif deprecatedstore == 'git-lfs':
429 elif deprecatedstore == 'git-lfs':
430 # deprecated config: lfs.remoteurl
430 # deprecated config: lfs.remoteurl
431 defaulturl = repo.ui.config('lfs', 'remoteurl')
431 defaulturl = repo.ui.config('lfs', 'remoteurl')
432 elif deprecatedstore == 'null':
432 elif deprecatedstore == 'null':
433 defaulturl = 'null://'
433 defaulturl = 'null://'
434
434
435 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
435 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
436 scheme = url.scheme
436 scheme = url.scheme
437 if scheme not in _storemap:
437 if scheme not in _storemap:
438 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
438 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
439 return _storemap[scheme](repo, url)
439 return _storemap[scheme](repo, url)
440
440
441 class LfsRemoteError(error.RevlogError):
441 class LfsRemoteError(error.RevlogError):
442 pass
442 pass
@@ -1,188 +1,193 b''
1 #require lfs-test-server
1 #require lfs-test-server
2
2
3 $ LFS_LISTEN="tcp://:$HGPORT"
3 $ LFS_LISTEN="tcp://:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
4 $ LFS_HOST="localhost:$HGPORT"
5 $ LFS_PUBLIC=1
5 $ LFS_PUBLIC=1
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 #if no-windows
7 #if no-windows
8 $ lfs-test-server &> lfs-server.log &
8 $ lfs-test-server &> lfs-server.log &
9 $ echo $! >> $DAEMON_PIDS
9 $ echo $! >> $DAEMON_PIDS
10 #else
10 #else
11 $ cat >> $TESTTMP/spawn.py <<EOF
11 $ cat >> $TESTTMP/spawn.py <<EOF
12 > import os
12 > import os
13 > import subprocess
13 > import subprocess
14 > import sys
14 > import sys
15 >
15 >
16 > for path in os.environ["PATH"].split(os.pathsep):
16 > for path in os.environ["PATH"].split(os.pathsep):
17 > exe = os.path.join(path, 'lfs-test-server.exe')
17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 > if os.path.exists(exe):
18 > if os.path.exists(exe):
19 > with open('lfs-server.log', 'wb') as out:
19 > with open('lfs-server.log', 'wb') as out:
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 > sys.stdout.write('%s\n' % p.pid)
21 > sys.stdout.write('%s\n' % p.pid)
22 > sys.exit(0)
22 > sys.exit(0)
23 > sys.exit(1)
23 > sys.exit(1)
24 > EOF
24 > EOF
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 #endif
26 #endif
27
27
28 $ cat >> $HGRCPATH <<EOF
28 $ cat >> $HGRCPATH <<EOF
29 > [extensions]
29 > [extensions]
30 > lfs=
30 > lfs=
31 > [lfs]
31 > [lfs]
32 > url=http://foo:bar@$LFS_HOST/
32 > url=http://foo:bar@$LFS_HOST/
33 > threshold=1
33 > threshold=1
34 > EOF
34 > EOF
35
35
36 $ hg init repo1
36 $ hg init repo1
37 $ cd repo1
37 $ cd repo1
38 $ echo THIS-IS-LFS > a
38 $ echo THIS-IS-LFS > a
39 $ hg commit -m a -A a
39 $ hg commit -m a -A a
40
40
41 A push can be serviced directly from the usercache if it isn't in the local
42 store.
43
41 $ hg init ../repo2
44 $ hg init ../repo2
45 $ mv .hg/store/lfs .hg/store/lfs_
42 $ hg push ../repo2 -v
46 $ hg push ../repo2 -v
43 pushing to ../repo2
47 pushing to ../repo2
44 searching for changes
48 searching for changes
45 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
46 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
47 1 changesets found
51 1 changesets found
48 uncompressed size of bundle content:
52 uncompressed size of bundle content:
49 * (changelog) (glob)
53 * (changelog) (glob)
50 * (manifests) (glob)
54 * (manifests) (glob)
51 * a (glob)
55 * a (glob)
52 adding changesets
56 adding changesets
53 adding manifests
57 adding manifests
54 adding file changes
58 adding file changes
55 added 1 changesets with 1 changes to 1 files
59 added 1 changesets with 1 changes to 1 files
56 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 $ mv .hg/store/lfs_ .hg/store/lfs
57
62
58 Clear the cache to force a download
63 Clear the cache to force a download
59 $ rm -rf `hg config lfs.usercache`
64 $ rm -rf `hg config lfs.usercache`
60 $ cd ../repo2
65 $ cd ../repo2
61 $ hg update tip -v
66 $ hg update tip -v
62 resolving manifests
67 resolving manifests
63 getting a
68 getting a
64 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
65 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
66 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
67 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
68 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69
74
70 When the server has some blobs already
75 When the server has some blobs already
71
76
72 $ hg mv a b
77 $ hg mv a b
73 $ echo ANOTHER-LARGE-FILE > c
78 $ echo ANOTHER-LARGE-FILE > c
74 $ echo ANOTHER-LARGE-FILE2 > d
79 $ echo ANOTHER-LARGE-FILE2 > d
75 $ hg commit -m b-and-c -A b c d
80 $ hg commit -m b-and-c -A b c d
76 $ hg push ../repo1 -v | grep -v '^ '
81 $ hg push ../repo1 -v | grep -v '^ '
77 pushing to ../repo1
82 pushing to ../repo1
78 searching for changes
83 searching for changes
79 lfs: need to transfer 2 objects (39 bytes)
84 lfs: need to transfer 2 objects (39 bytes)
80 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
81 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
82 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
83 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
84 1 changesets found
89 1 changesets found
85 uncompressed size of bundle content:
90 uncompressed size of bundle content:
86 adding changesets
91 adding changesets
87 adding manifests
92 adding manifests
88 adding file changes
93 adding file changes
89 added 1 changesets with 3 changes to 3 files
94 added 1 changesets with 3 changes to 3 files
90
95
91 Clear the cache to force a download
96 Clear the cache to force a download
92 $ rm -rf `hg config lfs.usercache`
97 $ rm -rf `hg config lfs.usercache`
93 $ hg --repo ../repo1 update tip -v
98 $ hg --repo ../repo1 update tip -v
94 resolving manifests
99 resolving manifests
95 getting b
100 getting b
96 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
97 getting c
102 getting c
98 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
99 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
100 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
101 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
102 getting d
107 getting d
103 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
104 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
105 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
106 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
107 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
108
113
109 Test a corrupt file download, but clear the cache first to force a download.
114 Test a corrupt file download, but clear the cache first to force a download.
110
115
111 $ rm -rf `hg config lfs.usercache`
116 $ rm -rf `hg config lfs.usercache`
112 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
113 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
114 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
115 $ rm ../repo1/*
120 $ rm ../repo1/*
116
121
117 XXX: suggesting `hg verify` won't help with a corrupt file on the lfs server.
122 XXX: suggesting `hg verify` won't help with a corrupt file on the lfs server.
118 $ hg --repo ../repo1 update -C tip -v
123 $ hg --repo ../repo1 update -C tip -v
119 resolving manifests
124 resolving manifests
120 getting a
125 getting a
121 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
126 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
122 getting b
127 getting b
123 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
128 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
124 getting c
129 getting c
125 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
130 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
126 abort: detected corrupt lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
131 abort: detected corrupt lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
127 (run hg verify)
132 (run hg verify)
128 [255]
133 [255]
129
134
130 The corrupted blob is not added to the usercache or local store
135 The corrupted blob is not added to the usercache or local store
131
136
132 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
137 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
133 [1]
138 [1]
134 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
139 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
135 [1]
140 [1]
136 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
141 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
137
142
138 Test a corrupted file upload
143 Test a corrupted file upload
139
144
140 $ echo 'another lfs blob' > b
145 $ echo 'another lfs blob' > b
141 $ hg ci -m 'another blob'
146 $ hg ci -m 'another blob'
142 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
147 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
143 $ hg push -v ../repo1
148 $ hg push -v ../repo1
144 pushing to ../repo1
149 pushing to ../repo1
145 searching for changes
150 searching for changes
146 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
151 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
147 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
152 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
148 (run hg verify)
153 (run hg verify)
149 [255]
154 [255]
150
155
151 Check error message when the remote missed a blob:
156 Check error message when the remote missed a blob:
152
157
153 $ echo FFFFF > b
158 $ echo FFFFF > b
154 $ hg commit -m b -A b
159 $ hg commit -m b -A b
155 $ echo FFFFF >> b
160 $ echo FFFFF >> b
156 $ hg commit -m b b
161 $ hg commit -m b b
157 $ rm -rf .hg/store/lfs
162 $ rm -rf .hg/store/lfs
158 $ rm -rf `hg config lfs.usercache`
163 $ rm -rf `hg config lfs.usercache`
159 $ hg update -C '.^'
164 $ hg update -C '.^'
160 abort: LFS server claims required objects do not exist:
165 abort: LFS server claims required objects do not exist:
161 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
166 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
162 [255]
167 [255]
163
168
164 Check error message when object does not exist:
169 Check error message when object does not exist:
165
170
166 $ hg init test && cd test
171 $ hg init test && cd test
167 $ echo "[extensions]" >> .hg/hgrc
172 $ echo "[extensions]" >> .hg/hgrc
168 $ echo "lfs=" >> .hg/hgrc
173 $ echo "lfs=" >> .hg/hgrc
169 $ echo "[lfs]" >> .hg/hgrc
174 $ echo "[lfs]" >> .hg/hgrc
170 $ echo "threshold=1" >> .hg/hgrc
175 $ echo "threshold=1" >> .hg/hgrc
171 $ echo a > a
176 $ echo a > a
172 $ hg add a
177 $ hg add a
173 $ hg commit -m 'test'
178 $ hg commit -m 'test'
174 $ echo aaaaa > a
179 $ echo aaaaa > a
175 $ hg commit -m 'largefile'
180 $ hg commit -m 'largefile'
176 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
181 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
177 version https://git-lfs.github.com/spec/v1
182 version https://git-lfs.github.com/spec/v1
178 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
183 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
179 size 6
184 size 6
180 x-is-binary 0
185 x-is-binary 0
181 $ cd ..
186 $ cd ..
182 $ rm -rf `hg config lfs.usercache`
187 $ rm -rf `hg config lfs.usercache`
183 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
188 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
184 updating to branch default
189 updating to branch default
185 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
190 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
186 [255]
191 [255]
187
192
188 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
193 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
General Comments 0
You need to be logged in to leave comments. Login now