##// END OF EJS Templates
lfs: verify lfs object content when transferring to and from the remote store...
Matt Harbison -
r35492:417e8e04 default
parent child Browse files
Show More
@@ -1,395 +1,428
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 import hashlib
10 11 import json
11 12 import os
12 13 import re
13 14 import socket
14 15
15 16 from mercurial.i18n import _
16 17
17 18 from mercurial import (
18 19 error,
19 20 pathutil,
20 21 url as urlmod,
21 22 util,
22 23 vfs as vfsmod,
23 24 worker,
24 25 )
25 26
26 27 from ..largefiles import lfutil
27 28
28 29 # 64 bytes for SHA256
29 30 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
30 31
31 32 class lfsvfs(vfsmod.vfs):
32 33 def join(self, path):
33 34 """split the path at first two characters, like: XX/XXXXX..."""
34 35 if not _lfsre.match(path):
35 36 raise error.ProgrammingError('unexpected lfs path: %s' % path)
36 37 return super(lfsvfs, self).join(path[0:2], path[2:])
37 38
38 39 def walk(self, path=None, onerror=None):
39 40 """Yield (dirpath, [], oids) tuple for blobs under path
40 41
41 42 Oids only exist in the root of this vfs, so dirpath is always ''.
42 43 """
43 44 root = os.path.normpath(self.base)
44 45 # when dirpath == root, dirpath[prefixlen:] becomes empty
45 46 # because len(dirpath) < prefixlen.
46 47 prefixlen = len(pathutil.normasprefix(root))
47 48 oids = []
48 49
49 50 for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
50 51 onerror=onerror):
51 52 dirpath = dirpath[prefixlen:]
52 53
53 54 # Silently skip unexpected files and directories
54 55 if len(dirpath) == 2:
55 56 oids.extend([dirpath + f for f in files
56 57 if _lfsre.match(dirpath + f)])
57 58
58 59 yield ('', [], oids)
59 60
60 61 class filewithprogress(object):
61 62 """a file-like object that supports __len__ and read.
62 63
63 64 Useful to provide progress information for how many bytes are read.
64 65 """
65 66
66 67 def __init__(self, fp, callback):
67 68 self._fp = fp
68 69 self._callback = callback # func(readsize)
69 70 fp.seek(0, os.SEEK_END)
70 71 self._len = fp.tell()
71 72 fp.seek(0)
72 73
73 74 def __len__(self):
74 75 return self._len
75 76
76 77 def read(self, size):
77 78 if self._fp is None:
78 79 return b''
79 80 data = self._fp.read(size)
80 81 if data:
81 82 if self._callback:
82 83 self._callback(len(data))
83 84 else:
84 85 self._fp.close()
85 86 self._fp = None
86 87 return data
87 88
88 89 class local(object):
89 90 """Local blobstore for large file contents.
90 91
91 92 This blobstore is used both as a cache and as a staging area for large blobs
92 93 to be uploaded to the remote blobstore.
93 94 """
94 95
95 96 def __init__(self, repo):
96 97 fullpath = repo.svfs.join('lfs/objects')
97 98 self.vfs = lfsvfs(fullpath)
98 99 usercache = lfutil._usercachedir(repo.ui, 'lfs')
99 100 self.cachevfs = lfsvfs(usercache)
100 101 self.ui = repo.ui
101 102
102 def write(self, oid, data):
103 def write(self, oid, data, verify=True):
103 104 """Write blob to local blobstore."""
105 if verify:
106 _verify(oid, data)
107
104 108 with self.vfs(oid, 'wb', atomictemp=True) as fp:
105 109 fp.write(data)
106 110
107 111 # XXX: should we verify the content of the cache, and hardlink back to
108 112 # the local store on success, but truncate, write and link on failure?
109 113 if not self.cachevfs.exists(oid):
110 114 self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
111 115 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
112 116
113 def read(self, oid):
117 def read(self, oid, verify=True):
114 118 """Read blob from local blobstore."""
115 119 if not self.vfs.exists(oid):
120 blob = self._read(self.cachevfs, oid, verify)
121 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
116 122 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
117 self.ui.note(_('lfs: found %s in the usercache\n') % oid)
118 123 else:
119 124 self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
120 return self.vfs.read(oid)
125 blob = self._read(self.vfs, oid, verify)
126 return blob
127
128 def _read(self, vfs, oid, verify):
129 """Read blob (after verifying) from the given store"""
130 blob = vfs.read(oid)
131 if verify:
132 _verify(oid, blob)
133 return blob
121 134
122 135 def has(self, oid):
123 136 """Returns True if the local blobstore contains the requested blob,
124 137 False otherwise."""
125 138 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
126 139
127 140 class _gitlfsremote(object):
128 141
129 142 def __init__(self, repo, url):
130 143 ui = repo.ui
131 144 self.ui = ui
132 145 baseurl, authinfo = url.authinfo()
133 146 self.baseurl = baseurl.rstrip('/')
134 147 useragent = repo.ui.config('experimental', 'lfs.user-agent')
135 148 if not useragent:
136 149 useragent = 'mercurial/%s git/2.15.1' % util.version()
137 150 self.urlopener = urlmod.opener(ui, authinfo, useragent)
138 151 self.retry = ui.configint('lfs', 'retry')
139 152
140 153 def writebatch(self, pointers, fromstore):
141 154 """Batch upload from local to remote blobstore."""
142 155 self._batch(pointers, fromstore, 'upload')
143 156
144 157 def readbatch(self, pointers, tostore):
145 158 """Batch download from remote to local blostore."""
146 159 self._batch(pointers, tostore, 'download')
147 160
148 161 def _batchrequest(self, pointers, action):
149 162 """Get metadata about objects pointed by pointers for given action
150 163
151 164 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
152 165 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
153 166 """
154 167 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
155 168 requestdata = json.dumps({
156 169 'objects': objects,
157 170 'operation': action,
158 171 })
159 172 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
160 173 data=requestdata)
161 174 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
162 175 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
163 176 try:
164 177 rawjson = self.urlopener.open(batchreq).read()
165 178 except util.urlerr.httperror as ex:
166 179 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
167 180 % (ex, action))
168 181 try:
169 182 response = json.loads(rawjson)
170 183 except ValueError:
171 184 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
172 185 % rawjson)
173 186 return response
174 187
175 188 def _checkforservererror(self, pointers, responses):
176 189 """Scans errors from objects
177 190
178 191 Returns LfsRemoteError if any objects has an error"""
179 192 for response in responses:
180 193 error = response.get('error')
181 194 if error:
182 195 ptrmap = {p.oid(): p for p in pointers}
183 196 p = ptrmap.get(response['oid'], None)
184 197 if error['code'] == 404 and p:
185 198 filename = getattr(p, 'filename', 'unknown')
186 199 raise LfsRemoteError(
187 200 _(('LFS server error. Remote object '
188 201 'for file %s not found: %r')) % (filename, response))
189 202 raise LfsRemoteError(_('LFS server error: %r') % response)
190 203
191 204 def _extractobjects(self, response, pointers, action):
192 205 """extract objects from response of the batch API
193 206
194 207 response: parsed JSON object returned by batch API
195 208 return response['objects'] filtered by action
196 209 raise if any object has an error
197 210 """
198 211 # Scan errors from objects - fail early
199 212 objects = response.get('objects', [])
200 213 self._checkforservererror(pointers, objects)
201 214
202 215 # Filter objects with given action. Practically, this skips uploading
203 216 # objects which exist in the server.
204 217 filteredobjects = [o for o in objects if action in o.get('actions', [])]
205 218 # But for downloading, we want all objects. Therefore missing objects
206 219 # should be considered an error.
207 220 if action == 'download':
208 221 if len(filteredobjects) < len(objects):
209 222 missing = [o.get('oid', '?')
210 223 for o in objects
211 224 if action not in o.get('actions', [])]
212 225 raise LfsRemoteError(
213 226 _('LFS server claims required objects do not exist:\n%s')
214 227 % '\n'.join(missing))
215 228
216 229 return filteredobjects
217 230
218 231 def _basictransfer(self, obj, action, localstore):
219 232 """Download or upload a single object using basic transfer protocol
220 233
221 234 obj: dict, an object description returned by batch API
222 235 action: string, one of ['upload', 'download']
223 236 localstore: blobstore.local
224 237
225 238 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
226 239 basic-transfers.md
227 240 """
228 241 oid = str(obj['oid'])
229 242
230 243 href = str(obj['actions'][action].get('href'))
231 244 headers = obj['actions'][action].get('header', {}).items()
232 245
233 246 request = util.urlreq.request(href)
234 247 if action == 'upload':
235 248 # If uploading blobs, read data from local blobstore.
249 with localstore.vfs(oid) as fp:
250 _verifyfile(oid, fp)
236 251 request.data = filewithprogress(localstore.vfs(oid), None)
237 252 request.get_method = lambda: 'PUT'
238 253
239 254 for k, v in headers:
240 255 request.add_header(k, v)
241 256
242 257 response = b''
243 258 try:
244 259 req = self.urlopener.open(request)
245 260 while True:
246 261 data = req.read(1048576)
247 262 if not data:
248 263 break
249 264 response += data
250 265 except util.urlerr.httperror as ex:
251 266 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
252 267 % (ex, oid, action))
253 268
254 269 if action == 'download':
255 270 # If downloading blobs, store downloaded data to local blobstore
256 localstore.write(oid, response)
271 localstore.write(oid, response, verify=True)
257 272
258 273 def _batch(self, pointers, localstore, action):
259 274 if action not in ['upload', 'download']:
260 275 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
261 276
262 277 response = self._batchrequest(pointers, action)
263 278 objects = self._extractobjects(response, pointers, action)
264 279 total = sum(x.get('size', 0) for x in objects)
265 280 sizes = {}
266 281 for obj in objects:
267 282 sizes[obj.get('oid')] = obj.get('size', 0)
268 283 topic = {'upload': _('lfs uploading'),
269 284 'download': _('lfs downloading')}[action]
270 285 if self.ui.verbose and len(objects) > 1:
271 286 self.ui.write(_('lfs: need to transfer %d objects (%s)\n')
272 287 % (len(objects), util.bytecount(total)))
273 288 self.ui.progress(topic, 0, total=total)
274 289 def transfer(chunk):
275 290 for obj in chunk:
276 291 objsize = obj.get('size', 0)
277 292 if self.ui.verbose:
278 293 if action == 'download':
279 294 msg = _('lfs: downloading %s (%s)\n')
280 295 elif action == 'upload':
281 296 msg = _('lfs: uploading %s (%s)\n')
282 297 self.ui.write(msg % (obj.get('oid'),
283 298 util.bytecount(objsize)))
284 299 retry = self.retry
285 300 while True:
286 301 try:
287 302 self._basictransfer(obj, action, localstore)
288 303 yield 1, obj.get('oid')
289 304 break
290 305 except socket.error as ex:
291 306 if retry > 0:
292 307 if self.ui.verbose:
293 308 self.ui.write(
294 309 _('lfs: failed: %r (remaining retry %d)\n')
295 310 % (ex, retry))
296 311 retry -= 1
297 312 continue
298 313 raise
299 314
300 315 oids = worker.worker(self.ui, 0.1, transfer, (),
301 316 sorted(objects, key=lambda o: o.get('oid')))
302 317 processed = 0
303 318 for _one, oid in oids:
304 319 processed += sizes[oid]
305 320 self.ui.progress(topic, processed, total=total)
306 321 if self.ui.verbose:
307 322 self.ui.write(_('lfs: processed: %s\n') % oid)
308 323 self.ui.progress(topic, pos=None, total=total)
309 324
310 325 def __del__(self):
311 326 # copied from mercurial/httppeer.py
312 327 urlopener = getattr(self, 'urlopener', None)
313 328 if urlopener:
314 329 for h in urlopener.handlers:
315 330 h.close()
316 331 getattr(h, "close_all", lambda : None)()
317 332
318 333 class _dummyremote(object):
319 334 """Dummy store storing blobs to temp directory."""
320 335
321 336 def __init__(self, repo, url):
322 337 fullpath = repo.vfs.join('lfs', url.path)
323 338 self.vfs = lfsvfs(fullpath)
324 339
325 340 def writebatch(self, pointers, fromstore):
326 341 for p in pointers:
327 content = fromstore.read(p.oid())
342 content = fromstore.read(p.oid(), verify=True)
328 343 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
329 344 fp.write(content)
330 345
331 346 def readbatch(self, pointers, tostore):
332 347 for p in pointers:
333 348 content = self.vfs.read(p.oid())
334 tostore.write(p.oid(), content)
349 tostore.write(p.oid(), content, verify=True)
335 350
336 351 class _nullremote(object):
337 352 """Null store storing blobs to /dev/null."""
338 353
339 354 def __init__(self, repo, url):
340 355 pass
341 356
342 357 def writebatch(self, pointers, fromstore):
343 358 pass
344 359
345 360 def readbatch(self, pointers, tostore):
346 361 pass
347 362
348 363 class _promptremote(object):
349 364 """Prompt user to set lfs.url when accessed."""
350 365
351 366 def __init__(self, repo, url):
352 367 pass
353 368
354 369 def writebatch(self, pointers, fromstore, ui=None):
355 370 self._prompt()
356 371
357 372 def readbatch(self, pointers, tostore, ui=None):
358 373 self._prompt()
359 374
360 375 def _prompt(self):
361 376 raise error.Abort(_('lfs.url needs to be configured'))
362 377
363 378 _storemap = {
364 379 'https': _gitlfsremote,
365 380 'http': _gitlfsremote,
366 381 'file': _dummyremote,
367 382 'null': _nullremote,
368 383 None: _promptremote,
369 384 }
370 385
386 def _verify(oid, content):
387 realoid = hashlib.sha256(content).hexdigest()
388 if realoid != oid:
389 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
390 hint=_('run hg verify'))
391
392 def _verifyfile(oid, fp):
393 sha256 = hashlib.sha256()
394 while True:
395 data = fp.read(1024 * 1024)
396 if not data:
397 break
398 sha256.update(data)
399 realoid = sha256.hexdigest()
400 if realoid != oid:
401 raise error.Abort(_('detected corrupt lfs object: %s') % oid,
402 hint=_('run hg verify'))
403
371 404 def remote(repo):
372 405 """remotestore factory. return a store in _storemap depending on config"""
373 406 defaulturl = ''
374 407
375 408 # convert deprecated configs to the new url. TODO: remove this if other
376 409 # places are migrated to the new url config.
377 410 # deprecated config: lfs.remotestore
378 411 deprecatedstore = repo.ui.config('lfs', 'remotestore')
379 412 if deprecatedstore == 'dummy':
380 413 # deprecated config: lfs.remotepath
381 414 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
382 415 elif deprecatedstore == 'git-lfs':
383 416 # deprecated config: lfs.remoteurl
384 417 defaulturl = repo.ui.config('lfs', 'remoteurl')
385 418 elif deprecatedstore == 'null':
386 419 defaulturl = 'null://'
387 420
388 421 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
389 422 scheme = url.scheme
390 423 if scheme not in _storemap:
391 424 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
392 425 return _storemap[scheme](repo, url)
393 426
394 427 class LfsRemoteError(error.RevlogError):
395 428 pass
@@ -1,323 +1,325
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import bin, nullid, short
14 14
15 15 from mercurial import (
16 16 error,
17 17 filelog,
18 18 revlog,
19 19 util,
20 20 )
21 21
22 22 from ..largefiles import lfutil
23 23
24 24 from . import (
25 25 blobstore,
26 26 pointer,
27 27 )
28 28
29 29 def supportedoutgoingversions(orig, repo):
30 30 versions = orig(repo)
31 31 versions.discard('01')
32 32 versions.discard('02')
33 33 versions.add('03')
34 34 return versions
35 35
36 36 def allsupportedversions(orig, ui):
37 37 versions = orig(ui)
38 38 versions.add('03')
39 39 return versions
40 40
41 41 def bypasscheckhash(self, text):
42 42 return False
43 43
44 44 def readfromstore(self, text):
45 45 """Read filelog content from local blobstore transform for flagprocessor.
46 46
47 47 Default tranform for flagprocessor, returning contents from blobstore.
48 48 Returns a 2-typle (text, validatehash) where validatehash is True as the
49 49 contents of the blobstore should be checked using checkhash.
50 50 """
51 51 p = pointer.deserialize(text)
52 52 oid = p.oid()
53 53 store = self.opener.lfslocalblobstore
54 54 if not store.has(oid):
55 55 p.filename = getattr(self, 'indexfile', None)
56 56 self.opener.lfsremoteblobstore.readbatch([p], store)
57 text = store.read(oid)
57
58 # The caller will validate the content
59 text = store.read(oid, verify=False)
58 60
59 61 # pack hg filelog metadata
60 62 hgmeta = {}
61 63 for k in p.keys():
62 64 if k.startswith('x-hg-'):
63 65 name = k[len('x-hg-'):]
64 66 hgmeta[name] = p[k]
65 67 if hgmeta or text.startswith('\1\n'):
66 68 text = filelog.packmeta(hgmeta, text)
67 69
68 70 return (text, True)
69 71
70 72 def writetostore(self, text):
71 73 # hg filelog metadata (includes rename, etc)
72 74 hgmeta, offset = filelog.parsemeta(text)
73 75 if offset and offset > 0:
74 76 # lfs blob does not contain hg filelog metadata
75 77 text = text[offset:]
76 78
77 79 # git-lfs only supports sha256
78 80 oid = hashlib.sha256(text).hexdigest()
79 self.opener.lfslocalblobstore.write(oid, text)
81 self.opener.lfslocalblobstore.write(oid, text, verify=False)
80 82
81 83 # replace contents with metadata
82 84 longoid = 'sha256:%s' % oid
83 85 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
84 86
85 87 # by default, we expect the content to be binary. however, LFS could also
86 88 # be used for non-binary content. add a special entry for non-binary data.
87 89 # this will be used by filectx.isbinary().
88 90 if not util.binary(text):
89 91 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
90 92 metadata['x-is-binary'] = '0'
91 93
92 94 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
93 95 if hgmeta is not None:
94 96 for k, v in hgmeta.iteritems():
95 97 metadata['x-hg-%s' % k] = v
96 98
97 99 rawtext = metadata.serialize()
98 100 return (rawtext, False)
99 101
100 102 def _islfs(rlog, node=None, rev=None):
101 103 if rev is None:
102 104 if node is None:
103 105 # both None - likely working copy content where node is not ready
104 106 return False
105 107 rev = rlog.rev(node)
106 108 else:
107 109 node = rlog.node(rev)
108 110 if node == nullid:
109 111 return False
110 112 flags = rlog.flags(rev)
111 113 return bool(flags & revlog.REVIDX_EXTSTORED)
112 114
113 115 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
114 116 cachedelta=None, node=None,
115 117 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
116 118 threshold = self.opener.options['lfsthreshold']
117 119 textlen = len(text)
118 120 # exclude hg rename meta from file size
119 121 meta, offset = filelog.parsemeta(text)
120 122 if offset:
121 123 textlen -= offset
122 124
123 125 if threshold and textlen > threshold:
124 126 flags |= revlog.REVIDX_EXTSTORED
125 127
126 128 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
127 129 node=node, flags=flags, **kwds)
128 130
129 131 def filelogrenamed(orig, self, node):
130 132 if _islfs(self, node):
131 133 rawtext = self.revision(node, raw=True)
132 134 if not rawtext:
133 135 return False
134 136 metadata = pointer.deserialize(rawtext)
135 137 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
136 138 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
137 139 else:
138 140 return False
139 141 return orig(self, node)
140 142
141 143 def filelogsize(orig, self, rev):
142 144 if _islfs(self, rev=rev):
143 145 # fast path: use lfs metadata to answer size
144 146 rawtext = self.revision(rev, raw=True)
145 147 metadata = pointer.deserialize(rawtext)
146 148 return int(metadata['size'])
147 149 return orig(self, rev)
148 150
149 151 def filectxcmp(orig, self, fctx):
150 152 """returns True if text is different than fctx"""
151 153 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
152 154 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
153 155 # fast path: check LFS oid
154 156 p1 = pointer.deserialize(self.rawdata())
155 157 p2 = pointer.deserialize(fctx.rawdata())
156 158 return p1.oid() != p2.oid()
157 159 return orig(self, fctx)
158 160
159 161 def filectxisbinary(orig, self):
160 162 if self.islfs():
161 163 # fast path: use lfs metadata to answer isbinary
162 164 metadata = pointer.deserialize(self.rawdata())
163 165 # if lfs metadata says nothing, assume it's binary by default
164 166 return bool(int(metadata.get('x-is-binary', 1)))
165 167 return orig(self)
166 168
167 169 def filectxislfs(self):
168 170 return _islfs(self.filelog(), self.filenode())
169 171
170 172 def convertsink(orig, sink):
171 173 sink = orig(sink)
172 174 if sink.repotype == 'hg':
173 175 class lfssink(sink.__class__):
174 176 def putcommit(self, files, copies, parents, commit, source, revmap,
175 177 full, cleanp2):
176 178 pc = super(lfssink, self).putcommit
177 179 node = pc(files, copies, parents, commit, source, revmap, full,
178 180 cleanp2)
179 181
180 182 if 'lfs' not in self.repo.requirements:
181 183 ctx = self.repo[node]
182 184
183 185 # The file list may contain removed files, so check for
184 186 # membership before assuming it is in the context.
185 187 if any(f in ctx and ctx[f].islfs() for f, n in files):
186 188 self.repo.requirements.add('lfs')
187 189 self.repo._writerequirements()
188 190
189 191 # Permanently enable lfs locally
190 192 with self.repo.vfs('hgrc', 'a', text=True) as fp:
191 193 fp.write('\n[extensions]\nlfs=\n')
192 194
193 195 return node
194 196
195 197 sink.__class__ = lfssink
196 198
197 199 return sink
198 200
199 201 def vfsinit(orig, self, othervfs):
200 202 orig(self, othervfs)
201 203 # copy lfs related options
202 204 for k, v in othervfs.options.items():
203 205 if k.startswith('lfs'):
204 206 self.options[k] = v
205 207 # also copy lfs blobstores. note: this can run before reposetup, so lfs
206 208 # blobstore attributes are not always ready at this time.
207 209 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
208 210 if util.safehasattr(othervfs, name):
209 211 setattr(self, name, getattr(othervfs, name))
210 212
211 213 def hgclone(orig, ui, opts, *args, **kwargs):
212 214 result = orig(ui, opts, *args, **kwargs)
213 215
214 216 if result is not None:
215 217 sourcerepo, destrepo = result
216 218 repo = destrepo.local()
217 219
218 220 # When cloning to a remote repo (like through SSH), no repo is available
219 221 # from the peer. Therefore the hgrc can't be updated.
220 222 if not repo:
221 223 return result
222 224
223 225 # If lfs is required for this repo, permanently enable it locally
224 226 if 'lfs' in repo.requirements:
225 227 with repo.vfs('hgrc', 'a', text=True) as fp:
226 228 fp.write('\n[extensions]\nlfs=\n')
227 229
228 230 return result
229 231
230 232 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
231 233 orig(sourcerepo, destrepo, bookmarks, defaultpath)
232 234
233 235 # If lfs is required for this repo, permanently enable it locally
234 236 if 'lfs' in destrepo.requirements:
235 237 with destrepo.vfs('hgrc', 'a', text=True) as fp:
236 238 fp.write('\n[extensions]\nlfs=\n')
237 239
238 240 def _canskipupload(repo):
239 241 # if remotestore is a null store, upload is a no-op and can be skipped
240 242 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
241 243
242 244 def candownload(repo):
243 245 # if remotestore is a null store, downloads will lead to nothing
244 246 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
245 247
246 248 def uploadblobsfromrevs(repo, revs):
247 249 '''upload lfs blobs introduced by revs
248 250
249 251 Note: also used by other extensions e. g. infinitepush. avoid renaming.
250 252 '''
251 253 if _canskipupload(repo):
252 254 return
253 255 pointers = extractpointers(repo, revs)
254 256 uploadblobs(repo, pointers)
255 257
256 258 def prepush(pushop):
257 259 """Prepush hook.
258 260
259 261 Read through the revisions to push, looking for filelog entries that can be
260 262 deserialized into metadata so that we can block the push on their upload to
261 263 the remote blobstore.
262 264 """
263 265 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
264 266
265 267 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
266 268 *args, **kwargs):
267 269 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
268 270 uploadblobsfromrevs(repo, outgoing.missing)
269 271 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
270 272 **kwargs)
271 273
272 274 def extractpointers(repo, revs):
273 275 """return a list of lfs pointers added by given revs"""
274 276 ui = repo.ui
275 277 if ui.debugflag:
276 278 ui.write(_('lfs: computing set of blobs to upload\n'))
277 279 pointers = {}
278 280 for r in revs:
279 281 ctx = repo[r]
280 282 for p in pointersfromctx(ctx).values():
281 283 pointers[p.oid()] = p
282 284 return sorted(pointers.values())
283 285
284 286 def pointersfromctx(ctx):
285 287 """return a dict {path: pointer} for given single changectx"""
286 288 result = {}
287 289 for f in ctx.files():
288 290 if f not in ctx:
289 291 continue
290 292 fctx = ctx[f]
291 293 if not _islfs(fctx.filelog(), fctx.filenode()):
292 294 continue
293 295 try:
294 296 result[f] = pointer.deserialize(fctx.rawdata())
295 297 except pointer.InvalidPointer as ex:
296 298 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
297 299 % (f, short(ctx.node()), ex))
298 300 return result
299 301
300 302 def uploadblobs(repo, pointers):
301 303 """upload given pointers from local blobstore"""
302 304 if not pointers:
303 305 return
304 306
305 307 remoteblob = repo.svfs.lfsremoteblobstore
306 308 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
307 309
308 310 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
309 311 orig(ui, srcrepo, dstrepo, requirements)
310 312
311 313 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
312 314 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
313 315
314 316 for dirpath, dirs, files in srclfsvfs.walk():
315 317 for oid in files:
316 318 ui.write(_('copying lfs blob %s\n') % oid)
317 319 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
318 320
319 321 def upgraderequirements(orig, repo):
320 322 reqs = orig(repo)
321 323 if 'lfs' in repo.requirements:
322 324 reqs.add('lfs')
323 325 return reqs
@@ -1,189 +1,187
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 33 > threshold=1
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 41 $ hg init ../repo2
42 42 $ hg push ../repo2 -v
43 43 pushing to ../repo2
44 44 searching for changes
45 45 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
46 46 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
47 47 1 changesets found
48 48 uncompressed size of bundle content:
49 49 * (changelog) (glob)
50 50 * (manifests) (glob)
51 51 * a (glob)
52 52 adding changesets
53 53 adding manifests
54 54 adding file changes
55 55 added 1 changesets with 1 changes to 1 files
56 56
57 57 Clear the cache to force a download
58 58 $ rm -rf `hg config lfs.usercache`
59 59 $ cd ../repo2
60 60 $ hg update tip -v
61 61 resolving manifests
62 62 getting a
63 63 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
64 64 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
65 65 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
66 66 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
67 67 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 68
69 69 When the server has some blobs already
70 70
71 71 $ hg mv a b
72 72 $ echo ANOTHER-LARGE-FILE > c
73 73 $ echo ANOTHER-LARGE-FILE2 > d
74 74 $ hg commit -m b-and-c -A b c d
75 75 $ hg push ../repo1 -v | grep -v '^ '
76 76 pushing to ../repo1
77 77 searching for changes
78 78 lfs: need to transfer 2 objects (39 bytes)
79 79 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
80 80 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
81 81 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
82 82 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
83 83 1 changesets found
84 84 uncompressed size of bundle content:
85 85 adding changesets
86 86 adding manifests
87 87 adding file changes
88 88 added 1 changesets with 3 changes to 3 files
89 89
90 90 Clear the cache to force a download
91 91 $ rm -rf `hg config lfs.usercache`
92 92 $ hg --repo ../repo1 update tip -v
93 93 resolving manifests
94 94 getting b
95 95 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
96 96 getting c
97 97 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
98 98 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
99 99 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
100 100 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
101 101 getting d
102 102 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
103 103 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
104 104 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
105 105 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
106 106 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 107
108 Test a corrupt file download, but clear the cache first to force a download
109
110 XXX: ideally, the validation would occur before polluting the usercache and
111 local store, with a clearer error message.
108 Test a corrupt file download, but clear the cache first to force a download.
112 109
113 110 $ rm -rf `hg config lfs.usercache`
114 111 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
115 112 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
116 113 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
117 114 $ rm ../repo1/*
115
116 XXX: suggesting `hg verify` won't help with a corrupt file on the lfs server.
118 117 $ hg --repo ../repo1 update -C tip -v
119 118 resolving manifests
120 119 getting a
121 120 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
122 121 getting b
123 122 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
124 123 getting c
125 124 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
126 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
127 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
128 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
129 abort: integrity check failed on data/c.i:0!
125 abort: detected corrupt lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
126 (run hg verify)
130 127 [255]
131 128
132 BUG: the corrupted blob was added to the usercache and local store
129 The corrupted blob is not added to the usercache or local store
133 130
134 $ cat ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 | $TESTDIR/f --sha256
135 sha256=fa82ca222fc9813afad3559637960bf311170cdd80ed35287f4623eb2320a660
136 $ cat `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 | $TESTDIR/f --sha256
137 sha256=fa82ca222fc9813afad3559637960bf311170cdd80ed35287f4623eb2320a660
131 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
132 [1]
133 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
134 [1]
138 135 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
139 136
140 137 Test a corrupted file upload
141 138
142 139 $ echo 'another lfs blob' > b
143 140 $ hg ci -m 'another blob'
144 141 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
145 142 $ hg push -v ../repo1
146 143 pushing to ../repo1
147 144 searching for changes
148 145 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
149 abort: HTTP error: HTTP Error 500: Internal Server Error (oid=e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0, action=upload)!
146 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
147 (run hg verify)
150 148 [255]
151 149
152 150 Check error message when the remote missed a blob:
153 151
154 152 $ echo FFFFF > b
155 153 $ hg commit -m b -A b
156 154 $ echo FFFFF >> b
157 155 $ hg commit -m b b
158 156 $ rm -rf .hg/store/lfs
159 157 $ rm -rf `hg config lfs.usercache`
160 158 $ hg update -C '.^'
161 159 abort: LFS server claims required objects do not exist:
162 160 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
163 161 [255]
164 162
165 163 Check error message when object does not exist:
166 164
167 165 $ hg init test && cd test
168 166 $ echo "[extensions]" >> .hg/hgrc
169 167 $ echo "lfs=" >> .hg/hgrc
170 168 $ echo "[lfs]" >> .hg/hgrc
171 169 $ echo "threshold=1" >> .hg/hgrc
172 170 $ echo a > a
173 171 $ hg add a
174 172 $ hg commit -m 'test'
175 173 $ echo aaaaa > a
176 174 $ hg commit -m 'largefile'
177 175 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
178 176 version https://git-lfs.github.com/spec/v1
179 177 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
180 178 size 6
181 179 x-is-binary 0
182 180 $ cd ..
183 181 $ rm -rf `hg config lfs.usercache`
184 182 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
185 183 updating to branch default
186 184 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
187 185 [255]
188 186
189 187 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
@@ -1,884 +1,867
1 1 # Initial setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > lfs=
6 6 > [lfs]
7 7 > threshold=1000B
8 8 > EOF
9 9
10 10 $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
11 11
12 12 # Prepare server and enable extension
13 13 $ hg init server
14 14 $ hg clone -q server client
15 15 $ cd client
16 16
17 17 # Commit small file
18 18 $ echo s > smallfile
19 19 $ hg commit -Aqm "add small file"
20 20
21 21 # Commit large file
22 22 $ echo $LONG > largefile
23 23 $ grep lfs .hg/requires
24 24 [1]
25 25 $ hg commit --traceback -Aqm "add large file"
26 26 $ grep lfs .hg/requires
27 27 lfs
28 28
29 29 # Ensure metadata is stored
30 30 $ hg debugdata largefile 0
31 31 version https://git-lfs.github.com/spec/v1
32 32 oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
33 33 size 1501
34 34 x-is-binary 0
35 35
36 36 # Check the blobstore is populated
37 37 $ find .hg/store/lfs/objects | sort
38 38 .hg/store/lfs/objects
39 39 .hg/store/lfs/objects/f1
40 40 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
41 41
42 42 # Check the blob stored contains the actual contents of the file
43 43 $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
44 44 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
45 45
46 46 # Push changes to the server
47 47
48 48 $ hg push
49 49 pushing to $TESTTMP/server
50 50 searching for changes
51 51 abort: lfs.url needs to be configured
52 52 [255]
53 53
54 54 $ cat >> $HGRCPATH << EOF
55 55 > [lfs]
56 56 > url=file:$TESTTMP/dummy-remote/
57 57 > EOF
58 58
59 59 $ hg push -v | egrep -v '^(uncompressed| )'
60 60 pushing to $TESTTMP/server
61 61 searching for changes
62 62 lfs: found f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b in the local lfs store
63 63 2 changesets found
64 64 adding changesets
65 65 adding manifests
66 66 adding file changes
67 67 added 2 changesets with 2 changes to 2 files
68 68
69 69 # Unknown URL scheme
70 70
71 71 $ hg push --config lfs.url=ftp://foobar
72 72 abort: lfs: unknown url scheme: ftp
73 73 [255]
74 74
75 75 $ cd ../
76 76
77 77 # Initialize new client (not cloning) and setup extension
78 78 $ hg init client2
79 79 $ cd client2
80 80 $ cat >> .hg/hgrc <<EOF
81 81 > [paths]
82 82 > default = $TESTTMP/server
83 83 > EOF
84 84
85 85 # Pull from server
86 86 $ hg pull default
87 87 pulling from $TESTTMP/server
88 88 requesting all changes
89 89 adding changesets
90 90 adding manifests
91 91 adding file changes
92 92 added 2 changesets with 2 changes to 2 files
93 93 new changesets b29ba743f89d:00c137947d30
94 94 (run 'hg update' to get a working copy)
95 95
96 96 # Check the blobstore is not yet populated
97 97 $ [ -d .hg/store/lfs/objects ]
98 98 [1]
99 99
100 100 # Update to the last revision containing the large file
101 101 $ hg update
102 102 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
103 103
104 104 # Check the blobstore has been populated on update
105 105 $ find .hg/store/lfs/objects | sort
106 106 .hg/store/lfs/objects
107 107 .hg/store/lfs/objects/f1
108 108 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
109 109
110 110 # Check the contents of the file are fetched from blobstore when requested
111 111 $ hg cat -r . largefile
112 112 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
113 113
114 114 # Check the file has been copied in the working copy
115 115 $ cat largefile
116 116 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
117 117
118 118 $ cd ..
119 119
120 120 # Check rename, and switch between large and small files
121 121
122 122 $ hg init repo3
123 123 $ cd repo3
124 124 $ cat >> .hg/hgrc << EOF
125 125 > [lfs]
126 126 > threshold=10B
127 127 > EOF
128 128
129 129 $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large
130 130 $ echo SHORTER > small
131 131 $ hg add . -q
132 132 $ hg commit -m 'commit with lfs content'
133 133
134 134 $ hg mv large l
135 135 $ hg mv small s
136 136 $ hg commit -m 'renames'
137 137
138 138 $ echo SHORT > l
139 139 $ echo BECOME-LARGER-FROM-SHORTER > s
140 140 $ hg commit -m 'large to small, small to large'
141 141
142 142 $ echo 1 >> l
143 143 $ echo 2 >> s
144 144 $ hg commit -m 'random modifications'
145 145
146 146 $ echo RESTORE-TO-BE-LARGE > l
147 147 $ echo SHORTER > s
148 148 $ hg commit -m 'switch large and small again'
149 149
150 150 # Test lfs_files template
151 151
152 152 $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
153 153 0 large
154 154 1 l
155 155 2 s
156 156 3 s
157 157 4 l
158 158
159 159 # Push and pull the above repo
160 160
161 161 $ hg --cwd .. init repo4
162 162 $ hg push ../repo4
163 163 pushing to ../repo4
164 164 searching for changes
165 165 adding changesets
166 166 adding manifests
167 167 adding file changes
168 168 added 5 changesets with 10 changes to 4 files
169 169
170 170 $ hg --cwd .. init repo5
171 171 $ hg --cwd ../repo5 pull ../repo3
172 172 pulling from ../repo3
173 173 requesting all changes
174 174 adding changesets
175 175 adding manifests
176 176 adding file changes
177 177 added 5 changesets with 10 changes to 4 files
178 178 new changesets fd47a419c4f7:5adf850972b9
179 179 (run 'hg update' to get a working copy)
180 180
181 181 $ cd ..
182 182
183 183 # Test clone
184 184
185 185 $ hg init repo6
186 186 $ cd repo6
187 187 $ cat >> .hg/hgrc << EOF
188 188 > [lfs]
189 189 > threshold=30B
190 190 > EOF
191 191
192 192 $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large
193 193 $ echo SMALL > small
194 194 $ hg commit -Aqm 'create a lfs file' large small
195 195 $ hg debuglfsupload -r 'all()' -v
196 196 lfs: found 8e92251415339ae9b148c8da89ed5ec665905166a1ab11b09dca8fad83344738 in the local lfs store
197 197
198 198 $ cd ..
199 199
200 200 $ hg clone repo6 repo7
201 201 updating to branch default
202 202 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 203 $ cd repo7
204 204 $ hg config extensions --debug | grep lfs
205 205 $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
206 206 $ cat large
207 207 LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
208 208 $ cat small
209 209 SMALL
210 210
211 211 $ cd ..
212 212
213 213 $ hg --config extensions.share= share repo7 sharedrepo
214 214 updating working directory
215 215 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 216 $ hg -R sharedrepo config extensions --debug | grep lfs
217 217 $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
218 218
219 219 # Test rename and status
220 220
221 221 $ hg init repo8
222 222 $ cd repo8
223 223 $ cat >> .hg/hgrc << EOF
224 224 > [lfs]
225 225 > threshold=10B
226 226 > EOF
227 227
228 228 $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1
229 229 $ echo SMALL > a2
230 230 $ hg commit -m a -A a1 a2
231 231 $ hg status
232 232 $ hg mv a1 b1
233 233 $ hg mv a2 a1
234 234 $ hg mv b1 a2
235 235 $ hg commit -m b
236 236 $ hg status
237 237 >>> with open('a2', 'wb') as f:
238 238 ... f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA')
239 239 >>> with open('a1', 'wb') as f:
240 240 ... f.write(b'\1\nMETA\n')
241 241 $ hg commit -m meta
242 242 $ hg status
243 243 $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
244 244 2: | |
245 245 1: a1 (a2)a2 (a1) | |
246 246 0: | | a1 a2
247 247
248 248 $ for n in a1 a2; do
249 249 > for r in 0 1 2; do
250 250 > printf '\n%s @ %s\n' $n $r
251 251 > hg debugdata $n $r
252 252 > done
253 253 > done
254 254
255 255 a1 @ 0
256 256 version https://git-lfs.github.com/spec/v1
257 257 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
258 258 size 29
259 259 x-is-binary 0
260 260
261 261 a1 @ 1
262 262 \x01 (esc)
263 263 copy: a2
264 264 copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9
265 265 \x01 (esc)
266 266 SMALL
267 267
268 268 a1 @ 2
269 269 \x01 (esc)
270 270 \x01 (esc)
271 271 \x01 (esc)
272 272 META
273 273
274 274 a2 @ 0
275 275 SMALL
276 276
277 277 a2 @ 1
278 278 version https://git-lfs.github.com/spec/v1
279 279 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
280 280 size 29
281 281 x-hg-copy a1
282 282 x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4
283 283 x-is-binary 0
284 284
285 285 a2 @ 2
286 286 version https://git-lfs.github.com/spec/v1
287 287 oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
288 288 size 32
289 289 x-is-binary 0
290 290
291 291 # Verify commit hashes include rename metadata
292 292
293 293 $ hg log -T '{rev}:{node|short} {desc}\n'
294 294 2:0fae949de7fa meta
295 295 1:9cd6bdffdac0 b
296 296 0:7f96794915f7 a
297 297
298 298 $ cd ..
299 299
300 300 # Test bundle
301 301
302 302 $ hg init repo9
303 303 $ cd repo9
304 304 $ cat >> .hg/hgrc << EOF
305 305 > [lfs]
306 306 > threshold=10B
307 307 > [diff]
308 308 > git=1
309 309 > EOF
310 310
311 311 $ for i in 0 single two three 4; do
312 312 > echo 'THIS-IS-LFS-'$i > a
313 313 > hg commit -m a-$i -A a
314 314 > done
315 315
316 316 $ hg update 2 -q
317 317 $ echo 'THIS-IS-LFS-2-CHILD' > a
318 318 $ hg commit -m branching -q
319 319
320 320 $ hg bundle --base 1 bundle.hg -v
321 321 lfs: found 5ab7a3739a5feec94a562d070a14f36dba7cad17e5484a4a89eea8e5f3166888 in the local lfs store
322 322 lfs: found a9c7d1cd6ce2b9bbdf46ed9a862845228717b921c089d0d42e3bcaed29eb612e in the local lfs store
323 323 lfs: found f693890c49c409ec33673b71e53f297681f76c1166daf33b2ad7ebf8b1d3237e in the local lfs store
324 324 lfs: found fda198fea753eb66a252e9856915e1f5cddbe41723bd4b695ece2604ad3c9f75 in the local lfs store
325 325 4 changesets found
326 326 uncompressed size of bundle content:
327 327 * (changelog) (glob)
328 328 * (manifests) (glob)
329 329 * a (glob)
330 330 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
331 331 $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a
332 332 5 branching
333 333 diff --git a/a b/a
334 334 --- a/a
335 335 +++ b/a
336 336 @@ -1,1 +1,1 @@
337 337 -THIS-IS-LFS-two
338 338 +THIS-IS-LFS-2-CHILD
339 339
340 340 4 a-4
341 341 diff --git a/a b/a
342 342 --- a/a
343 343 +++ b/a
344 344 @@ -1,1 +1,1 @@
345 345 -THIS-IS-LFS-three
346 346 +THIS-IS-LFS-4
347 347
348 348 3 a-three
349 349 diff --git a/a b/a
350 350 --- a/a
351 351 +++ b/a
352 352 @@ -1,1 +1,1 @@
353 353 -THIS-IS-LFS-two
354 354 +THIS-IS-LFS-three
355 355
356 356 2 a-two
357 357 diff --git a/a b/a
358 358 --- a/a
359 359 +++ b/a
360 360 @@ -1,1 +1,1 @@
361 361 -THIS-IS-LFS-single
362 362 +THIS-IS-LFS-two
363 363
364 364 1 a-single
365 365 diff --git a/a b/a
366 366 --- a/a
367 367 +++ b/a
368 368 @@ -1,1 +1,1 @@
369 369 -THIS-IS-LFS-0
370 370 +THIS-IS-LFS-single
371 371
372 372 0 a-0
373 373 diff --git a/a b/a
374 374 new file mode 100644
375 375 --- /dev/null
376 376 +++ b/a
377 377 @@ -0,0 +1,1 @@
378 378 +THIS-IS-LFS-0
379 379
380 380 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
381 381 $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a
382 382 5 branching
383 383 diff --git a/a b/a
384 384 --- a/a
385 385 +++ b/a
386 386 @@ -1,1 +1,1 @@
387 387 -THIS-IS-LFS-two
388 388 +THIS-IS-LFS-2-CHILD
389 389
390 390 4 a-4
391 391 diff --git a/a b/a
392 392 --- a/a
393 393 +++ b/a
394 394 @@ -1,1 +1,1 @@
395 395 -THIS-IS-LFS-three
396 396 +THIS-IS-LFS-4
397 397
398 398 3 a-three
399 399 diff --git a/a b/a
400 400 --- a/a
401 401 +++ b/a
402 402 @@ -1,1 +1,1 @@
403 403 -THIS-IS-LFS-two
404 404 +THIS-IS-LFS-three
405 405
406 406 2 a-two
407 407 diff --git a/a b/a
408 408 --- a/a
409 409 +++ b/a
410 410 @@ -1,1 +1,1 @@
411 411 -THIS-IS-LFS-single
412 412 +THIS-IS-LFS-two
413 413
414 414 1 a-single
415 415 diff --git a/a b/a
416 416 --- a/a
417 417 +++ b/a
418 418 @@ -1,1 +1,1 @@
419 419 -THIS-IS-LFS-0
420 420 +THIS-IS-LFS-single
421 421
422 422 0 a-0
423 423 diff --git a/a b/a
424 424 new file mode 100644
425 425 --- /dev/null
426 426 +++ b/a
427 427 @@ -0,0 +1,1 @@
428 428 +THIS-IS-LFS-0
429 429
430 430 $ cd ..
431 431
432 432 # Test isbinary
433 433
434 434 $ hg init repo10
435 435 $ cd repo10
436 436 $ cat >> .hg/hgrc << EOF
437 437 > [extensions]
438 438 > lfs=
439 439 > [lfs]
440 440 > threshold=1
441 441 > EOF
442 442 $ $PYTHON <<'EOF'
443 443 > def write(path, content):
444 444 > with open(path, 'wb') as f:
445 445 > f.write(content)
446 446 > write('a', b'\0\0')
447 447 > write('b', b'\1\n')
448 448 > write('c', b'\1\n\0')
449 449 > write('d', b'xx')
450 450 > EOF
451 451 $ hg add a b c d
452 452 $ hg diff --stat
453 453 a | Bin
454 454 b | 1 +
455 455 c | Bin
456 456 d | 1 +
457 457 4 files changed, 2 insertions(+), 0 deletions(-)
458 458 $ hg commit -m binarytest
459 459 $ cat > $TESTTMP/dumpbinary.py << EOF
460 460 > def reposetup(ui, repo):
461 461 > for n in 'abcd':
462 462 > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
463 463 > EOF
464 464 $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
465 465 a: binary=True
466 466 b: binary=False
467 467 c: binary=True
468 468 d: binary=False
469 469 b55353847f02 tip
470 470
471 471 $ cd ..
472 472
473 473 # Test fctx.cmp fastpath - diff without LFS blobs
474 474
475 475 $ hg init repo11
476 476 $ cd repo11
477 477 $ cat >> .hg/hgrc <<EOF
478 478 > [lfs]
479 479 > threshold=1
480 480 > EOF
481 481 $ cat > ../patch.diff <<EOF
482 482 > # HG changeset patch
483 483 > 2
484 484 >
485 485 > diff --git a/a b/a
486 486 > old mode 100644
487 487 > new mode 100755
488 488 > EOF
489 489
490 490 $ for i in 1 2 3; do
491 491 > cp ../repo10/a a
492 492 > if [ $i = 3 ]; then
493 493 > # make a content-only change
494 494 > hg import -q --bypass ../patch.diff
495 495 > hg update -q
496 496 > rm ../patch.diff
497 497 > else
498 498 > echo $i >> a
499 499 > hg commit -m $i -A a
500 500 > fi
501 501 > done
502 502 $ [ -d .hg/store/lfs/objects ]
503 503
504 504 $ cd ..
505 505
506 506 $ hg clone repo11 repo12 --noupdate
507 507 $ cd repo12
508 508 $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git
509 509 2
510 510 diff --git a/a b/a
511 511 old mode 100644
512 512 new mode 100755
513 513
514 514 2
515 515 diff --git a/a b/a
516 516 Binary file a has changed
517 517
518 518 1
519 519 diff --git a/a b/a
520 520 new file mode 100644
521 521 Binary file a has changed
522 522
523 523 $ [ -d .hg/store/lfs/objects ]
524 524 [1]
525 525
526 526 $ cd ..
527 527
528 528 # Verify the repos
529 529
530 530 $ cat > $TESTTMP/dumpflog.py << EOF
531 531 > # print raw revision sizes, flags, and hashes for certain files
532 532 > import hashlib
533 533 > from mercurial import revlog
534 534 > from mercurial.node import short
535 535 > def hash(rawtext):
536 536 > h = hashlib.sha512()
537 537 > h.update(rawtext)
538 538 > return h.hexdigest()[:4]
539 539 > def reposetup(ui, repo):
540 540 > # these 2 files are interesting
541 541 > for name in ['l', 's']:
542 542 > fl = repo.file(name)
543 543 > if len(fl) == 0:
544 544 > continue
545 545 > sizes = [revlog.revlog.rawsize(fl, i) for i in fl]
546 546 > texts = [fl.revision(i, raw=True) for i in fl]
547 547 > flags = [int(fl.flags(i)) for i in fl]
548 548 > hashes = [hash(t) for t in texts]
549 549 > print(' %s: rawsizes=%r flags=%r hashes=%r'
550 550 > % (name, sizes, flags, hashes))
551 551 > EOF
552 552
553 553 $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
554 554 > repo10; do
555 555 > echo 'repo:' $i
556 556 > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q
557 557 > done
558 558 repo: client
559 559 repo: client2
560 560 repo: server
561 561 repo: repo3
562 562 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
563 563 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
564 564 repo: repo4
565 565 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
566 566 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
567 567 repo: repo5
568 568 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
569 569 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
570 570 repo: repo6
571 571 repo: repo7
572 572 repo: repo8
573 573 repo: repo9
574 574 repo: repo10
575 575
576 576 repo12 doesn't have any cached lfs files and its source never pushed its
577 577 files. Therefore, the files don't exist in the remote store. Use the files in
578 578 the user cache.
579 579
580 580 $ test -d $TESTTMP/repo12/.hg/store/lfs/objects
581 581 [1]
582 582
583 583 $ hg --config extensions.share= share repo12 repo13
584 584 updating working directory
585 585 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
586 586 $ hg -R repo13 -q verify
587 587
588 588 $ hg clone repo12 repo14
589 589 updating to branch default
590 590 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
591 591 $ hg -R repo14 -q verify
592 592
593 593 If the source repo doesn't have the blob (maybe it was pulled or cloned with
594 594 --noupdate), the blob is still accessible via the global cache to send to the
595 595 remote store.
596 596
597 597 $ rm -rf $TESTTMP/repo14/.hg/store/lfs
598 598 $ hg init repo15
599 599 $ hg -R repo14 push repo15
600 600 pushing to repo15
601 601 searching for changes
602 602 adding changesets
603 603 adding manifests
604 604 adding file changes
605 605 added 3 changesets with 2 changes to 1 files
606 606 $ hg -R repo14 -q verify
607 607
608 608 Test damaged file scenarios. (This also damages the usercache because of the
609 609 hardlinks.)
610 610
611 611 $ echo 'damage' >> repo5/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
612 612
613 613 Repo with damaged lfs objects in any revision will fail verification.
614 614
615 615 $ hg -R repo5 verify
616 616 checking changesets
617 617 checking manifests
618 618 crosschecking files in changesets and manifests
619 619 checking files
620 620 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
621 621 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
622 622 4 files, 5 changesets, 10 total revisions
623 623 2 integrity errors encountered!
624 624 (first damaged changeset appears to be 0)
625 625 [1]
626 626
627 627 Updates work after cloning a damaged repo, if the damaged lfs objects aren't in
628 628 the update destination. Those objects won't be added to the new repo's store
629 629 because they aren't accessed.
630 630
631 631 $ hg clone -v repo5 fromcorrupt
632 632 updating to branch default
633 633 resolving manifests
634 634 getting l
635 635 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the usercache
636 636 getting s
637 637 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
638 638 $ test -f fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
639 639 [1]
640 640
641 641 Verify will copy/link all lfs objects into the local store that aren't already
642 642 present. Bypass the corrupted usercache to show that verify works when fed by
643 643 the (uncorrupted) remote store.
644 644
645 645 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
646 646 repository uses revlog format 1
647 647 checking changesets
648 648 checking manifests
649 649 crosschecking files in changesets and manifests
650 650 checking files
651 651 lfs: adding 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e to the usercache
652 652 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
653 653 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
654 654 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
655 655 lfs: adding 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 to the usercache
656 656 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
657 657 lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
658 658 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
659 659 4 files, 5 changesets, 10 total revisions
660 660
661 661 BUG: Verify will copy/link a corrupted file from the usercache into the local
662 662 store, and poison it. (The verify with a good remote now fails.)
663 663
664 664 $ rm -r fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
665 665 $ hg -R fromcorrupt verify -v
666 666 repository uses revlog format 1
667 667 checking changesets
668 668 checking manifests
669 669 crosschecking files in changesets and manifests
670 670 checking files
671 671 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the usercache
672 672 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
673 673 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
674 674 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
675 675 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
676 676 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
677 677 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
678 678 4 files, 5 changesets, 10 total revisions
679 679 2 integrity errors encountered!
680 680 (first damaged changeset appears to be 0)
681 681 [1]
682 682 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
683 683 repository uses revlog format 1
684 684 checking changesets
685 685 checking manifests
686 686 crosschecking files in changesets and manifests
687 687 checking files
688 688 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
689 689 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
690 690 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
691 691 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
692 692 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
693 693 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
694 694 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
695 695 4 files, 5 changesets, 10 total revisions
696 696 2 integrity errors encountered!
697 697 (first damaged changeset appears to be 0)
698 698 [1]
699 699
700 700 Damaging a file required by the update destination fails the update.
701 701
702 702 $ echo 'damage' >> $TESTTMP/dummy-remote/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
703 703 $ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
704 704 updating to branch default
705 705 resolving manifests
706 706 getting l
707 lfs: adding 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b to the usercache
708 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
709 abort: integrity check failed on data/l.i:3!
707 abort: detected corrupt lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
708 (run hg verify)
710 709 [255]
711 710
712 BUG: A corrupted lfs blob either shouldn't be created after a transfer from a
713 file://remotestore, or it shouldn't be left behind.
711 A corrupted lfs blob is not transferred from a file://remotestore to the
712 usercache or local store.
714 713
715 $ cat emptycache/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
716 sha256=40f67c7e91d554db4bc500f8f62c2e40f9f61daa5b62388e577bbae26f5396ff
717 $ cat fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
718 sha256=40f67c7e91d554db4bc500f8f62c2e40f9f61daa5b62388e577bbae26f5396ff
714 $ test -f emptycache/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
715 [1]
716 $ test -f fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
717 [1]
719 718
720 719 $ hg -R fromcorrupt2 verify
721 720 checking changesets
722 721 checking manifests
723 722 crosschecking files in changesets and manifests
724 723 checking files
725 724 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
726 l@4: unpacking 6f1ff1f39c11: integrity check failed on data/l.i:3
727 725 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
728 726 4 files, 5 changesets, 10 total revisions
729 3 integrity errors encountered!
727 2 integrity errors encountered!
730 728 (first damaged changeset appears to be 0)
731 729 [1]
732 730
733 BUG: push will happily send corrupt files upstream. (The alternate dummy remote
731 Corrupt local files are not sent upstream. (The alternate dummy remote
734 732 avoids the corrupt lfs object in the original remote.)
735 733
736 734 $ mkdir $TESTTMP/dummy-remote2
737 735 $ hg init dest
738 736 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 push -v dest
739 737 pushing to dest
740 738 searching for changes
741 739 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
742 740 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
743 741 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
744 742 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
745 5 changesets found
746 uncompressed size of bundle content:
747 997 (changelog)
748 1032 (manifests)
749 841 l
750 272 large
751 788 s
752 139 small
753 adding changesets
754 adding manifests
755 adding file changes
756 added 5 changesets with 10 changes to 4 files
743 abort: detected corrupt lfs object: 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
744 (run hg verify)
745 [255]
757 746
758 747 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 verify -v
759 748 repository uses revlog format 1
760 749 checking changesets
761 750 checking manifests
762 751 crosschecking files in changesets and manifests
763 752 checking files
764 753 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
765 754 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
766 755 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
767 l@4: unpacking 6f1ff1f39c11: integrity check failed on data/l.i:3
768 756 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
769 757 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
770 758 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
771 759 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
772 760 4 files, 5 changesets, 10 total revisions
773 3 integrity errors encountered!
761 2 integrity errors encountered!
774 762 (first damaged changeset appears to be 0)
775 763 [1]
776 764
777 765 $ cat $TESTTMP/dummy-remote2/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
778 sha256=40f67c7e91d554db4bc500f8f62c2e40f9f61daa5b62388e577bbae26f5396ff
766 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
779 767 $ cat fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
780 sha256=40f67c7e91d554db4bc500f8f62c2e40f9f61daa5b62388e577bbae26f5396ff
781
782 $ cat $TESTTMP/dummy-remote2/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
783 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
784 damage
785 $ cat $TESTTMP/dummy-remote2/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
786 RESTORE-TO-BE-LARGE
787 damage
768 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
769 $ test -f $TESTTMP/dummy-remote2/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
770 [1]
788 771
789 772 Accessing a corrupt file will complain
790 773
791 774 $ hg --cwd fromcorrupt2 cat -r 0 large
792 775 abort: integrity check failed on data/large.i:0!
793 776 [255]
794 777
795 778 lfs -> normal -> lfs round trip conversions are possible. The threshold for the
796 779 lfs destination is specified here because it was originally listed in the local
797 780 .hgrc, and the global one is too high to trigger lfs usage. For lfs -> normal,
798 781 there's no 'lfs' destination repo requirement. For normal -> lfs, there is.
799 782
800 783 XXX: There's not a great way to ensure that the conversion to normal files
801 784 actually converts _everything_ to normal. The extension needs to be loaded for
802 785 the source, but there's no way to disable it for the destination. The best that
803 786 can be done is to raise the threshold so that lfs isn't used on the destination.
804 787 It doesn't like using '!' to unset the value on the command line.
805 788
806 789 $ hg --config extensions.convert= --config lfs.threshold=1000M \
807 790 > convert repo8 convert_normal
808 791 initializing destination convert_normal repository
809 792 scanning source...
810 793 sorting...
811 794 converting...
812 795 2 a
813 796 1 b
814 797 0 meta
815 798 $ grep 'lfs' convert_normal/.hg/requires
816 799 [1]
817 800 $ hg --cwd convert_normal debugdata a1 0
818 801 THIS-IS-LFS-BECAUSE-10-BYTES
819 802
820 803 $ hg --config extensions.convert= --config lfs.threshold=10B \
821 804 > convert convert_normal convert_lfs
822 805 initializing destination convert_lfs repository
823 806 scanning source...
824 807 sorting...
825 808 converting...
826 809 2 a
827 810 1 b
828 811 0 meta
829 812 $ hg --cwd convert_lfs debugdata a1 0
830 813 version https://git-lfs.github.com/spec/v1
831 814 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
832 815 size 29
833 816 x-is-binary 0
834 817 $ grep 'lfs' convert_lfs/.hg/requires
835 818 lfs
836 819
837 820 This convert is trickier, because it contains deleted files (via `hg mv`)
838 821
839 822 $ hg --config extensions.convert= --config lfs.threshold=1000M \
840 823 > convert repo3 convert_normal2
841 824 initializing destination convert_normal2 repository
842 825 scanning source...
843 826 sorting...
844 827 converting...
845 828 4 commit with lfs content
846 829 3 renames
847 830 2 large to small, small to large
848 831 1 random modifications
849 832 0 switch large and small again
850 833 $ grep 'lfs' convert_normal2/.hg/requires
851 834 [1]
852 835 $ hg --cwd convert_normal2 debugdata large 0
853 836 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
854 837
855 838 $ hg --config extensions.convert= --config lfs.threshold=10B \
856 839 > convert convert_normal2 convert_lfs2
857 840 initializing destination convert_lfs2 repository
858 841 scanning source...
859 842 sorting...
860 843 converting...
861 844 4 commit with lfs content
862 845 3 renames
863 846 2 large to small, small to large
864 847 1 random modifications
865 848 0 switch large and small again
866 849 $ grep 'lfs' convert_lfs2/.hg/requires
867 850 lfs
868 851 $ hg --cwd convert_lfs2 debugdata large 0
869 852 version https://git-lfs.github.com/spec/v1
870 853 oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
871 854 size 39
872 855 x-is-binary 0
873 856
874 857 $ hg -R convert_lfs2 config --debug extensions | grep lfs
875 858 $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
876 859
877 860 Committing deleted files works:
878 861
879 862 $ hg init $TESTTMP/repo-del
880 863 $ cd $TESTTMP/repo-del
881 864 $ echo 1 > A
882 865 $ hg commit -m 'add A' -A A
883 866 $ hg rm A
884 867 $ hg commit -m 'rm A'
General Comments 0
You need to be logged in to leave comments. Login now