##// END OF EJS Templates
lfs: migrate most file filtering from threshold to custom filter...
Matt Harbison -
r35636:c780e064 default
parent child Browse files
Show More
@@ -1,206 +1,233 b''
1 1 # lfs - hash-preserving large file support using Git-LFS protocol
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """lfs - large file support (EXPERIMENTAL)
9 9
10 10 Configs::
11 11
12 12 [lfs]
13 13 # Remote endpoint. Multiple protocols are supported:
14 14 # - http(s)://user:pass@example.com/path
15 15 # git-lfs endpoint
16 16 # - file:///tmp/path
17 17 # local filesystem, usually for testing
18 18 # if unset, lfs will prompt setting this when it must use this value.
19 19 # (default: unset)
20 20 url = https://example.com/lfs
21 21
22 # size of a file to make it use LFS
23 threshold = 10M
22 # Which files to track in LFS. Path tests are "**.extname" for file
23 # extensions, and "path:under/some/directory" for path prefix. Both
24 # are relative to the repository root, and the latter must be quoted.
25 # File size can be tested with the "size()" fileset, and tests can be
26 # joined with fileset operators. (See "hg help filesets.operators".)
27 #
28 # Some examples:
29 # - all() # everything
30 # - none() # nothing
31 # - size(">20MB") # larger than 20MB
32 # - !**.txt # anything not a *.txt file
33 # - **.zip | **.tar.gz | **.7z # some types of compressed files
34 # - "path:bin" # files under "bin" in the project root
35 # - (**.php & size(">2MB")) | (**.js & size(">5MB")) | **.tar.gz
36 # | ("path:bin" & !"path:/bin/README") | size(">1GB")
37 # (default: none())
38 track = size(">10M")
24 39
25 40 # how many times to retry before giving up on transferring an object
26 41 retry = 5
27 42
28 43 # the local directory to store lfs files for sharing across local clones.
29 44 # If not set, the cache is located in an OS specific cache location.
30 45 usercache = /path/to/global/cache
31 46 """
32 47
33 48 from __future__ import absolute_import
34 49
35 50 from mercurial.i18n import _
36 51
37 52 from mercurial import (
38 53 bundle2,
39 54 changegroup,
40 55 context,
41 56 exchange,
42 57 extensions,
43 58 filelog,
59 fileset,
44 60 hg,
45 61 localrepo,
62 minifileset,
46 63 node,
47 64 registrar,
48 65 revlog,
49 66 scmutil,
50 67 upgrade,
51 68 vfs as vfsmod,
52 69 wireproto,
53 70 )
54 71
55 72 from . import (
56 73 blobstore,
57 74 wrapper,
58 75 )
59 76
60 77 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 78 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 79 # be specifying the version(s) of Mercurial they are tested with, or
63 80 # leave the attribute unspecified.
64 81 testedwith = 'ships-with-hg-core'
65 82
66 83 configtable = {}
67 84 configitem = registrar.configitem(configtable)
68 85
69 86 configitem('experimental', 'lfs.user-agent',
70 87 default=None,
71 88 )
72 89
73 90 configitem('lfs', 'url',
74 91 default=None,
75 92 )
76 93 configitem('lfs', 'usercache',
77 94 default=None,
78 95 )
96 # Deprecated
79 97 configitem('lfs', 'threshold',
80 98 default=None,
81 99 )
100 configitem('lfs', 'track',
101 default='none()',
102 )
82 103 configitem('lfs', 'retry',
83 104 default=5,
84 105 )
85 106
86 107 cmdtable = {}
87 108 command = registrar.command(cmdtable)
88 109
89 110 templatekeyword = registrar.templatekeyword()
90 111
91 112 def featuresetup(ui, supported):
92 113 # don't die on seeing a repo with the lfs requirement
93 114 supported |= {'lfs'}
94 115
95 116 def uisetup(ui):
96 117 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
97 118
98 119 def reposetup(ui, repo):
99 120 # Nothing to do with a remote repo
100 121 if not repo.local():
101 122 return
102 123
103 threshold = repo.ui.configbytes('lfs', 'threshold')
124 trackspec = repo.ui.config('lfs', 'track')
104 125
105 repo.svfs.options['lfsthreshold'] = threshold
126 # deprecated config: lfs.threshold
127 threshold = repo.ui.configbytes('lfs', 'threshold')
128 if threshold:
129 fileset.parse(trackspec) # make sure syntax errors are confined
130 trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
131
132 repo.svfs.options['lfstrack'] = minifileset.compile(trackspec)
106 133 repo.svfs.lfslocalblobstore = blobstore.local(repo)
107 134 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
108 135
109 136 # Push hook
110 137 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
111 138
112 139 if 'lfs' not in repo.requirements:
113 140 def checkrequireslfs(ui, repo, **kwargs):
114 141 if 'lfs' not in repo.requirements:
115 142 last = kwargs.get('node_last')
116 143 _bin = node.bin
117 144 if last:
118 145 s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last))
119 146 else:
120 147 s = repo.set('%n', _bin(kwargs['node']))
121 148 for ctx in s:
122 149 # TODO: is there a way to just walk the files in the commit?
123 150 if any(ctx[f].islfs() for f in ctx.files() if f in ctx):
124 151 repo.requirements.add('lfs')
125 152 repo._writerequirements()
126 153 break
127 154
128 155 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
129 156 ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs')
130 157
131 158 def wrapfilelog(filelog):
132 159 wrapfunction = extensions.wrapfunction
133 160
134 161 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
135 162 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
136 163 wrapfunction(filelog, 'size', wrapper.filelogsize)
137 164
138 165 def extsetup(ui):
139 166 wrapfilelog(filelog.filelog)
140 167
141 168 wrapfunction = extensions.wrapfunction
142 169
143 170 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
144 171
145 172 wrapfunction(upgrade, '_finishdatamigration',
146 173 wrapper.upgradefinishdatamigration)
147 174
148 175 wrapfunction(upgrade, 'preservedrequirements',
149 176 wrapper.upgraderequirements)
150 177
151 178 wrapfunction(upgrade, 'supporteddestrequirements',
152 179 wrapper.upgraderequirements)
153 180
154 181 wrapfunction(changegroup,
155 182 'supportedoutgoingversions',
156 183 wrapper.supportedoutgoingversions)
157 184 wrapfunction(changegroup,
158 185 'allsupportedversions',
159 186 wrapper.allsupportedversions)
160 187
161 188 wrapfunction(exchange, 'push', wrapper.push)
162 189 wrapfunction(wireproto, '_capabilities', wrapper._capabilities)
163 190
164 191 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
165 192 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
166 193 context.basefilectx.islfs = wrapper.filectxislfs
167 194
168 195 revlog.addflagprocessor(
169 196 revlog.REVIDX_EXTSTORED,
170 197 (
171 198 wrapper.readfromstore,
172 199 wrapper.writetostore,
173 200 wrapper.bypasscheckhash,
174 201 ),
175 202 )
176 203
177 204 wrapfunction(hg, 'clone', wrapper.hgclone)
178 205 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
179 206
180 207 # Make bundle choose changegroup3 instead of changegroup2. This affects
181 208 # "hg bundle" command. Note: it does not cover all bundle formats like
182 209 # "packed1". Using "packed1" with lfs will likely cause trouble.
183 210 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
184 211 for k in names:
185 212 exchange._bundlespeccgversions[k] = '03'
186 213
187 214 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
188 215 # options and blob stores are passed from othervfs to the new readonlyvfs.
189 216 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
190 217
191 218 # when writing a bundle via "hg bundle" command, upload related LFS blobs
192 219 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
193 220
194 221 @templatekeyword('lfs_files')
195 222 def lfsfiles(repo, ctx, **args):
196 223 """List of strings. LFS files added or modified by the changeset."""
197 224 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
198 225 return sorted(pointers.keys())
199 226
200 227 @command('debuglfsupload',
201 228 [('r', 'rev', [], _('upload large files introduced by REV'))])
202 229 def debuglfsupload(ui, repo, **opts):
203 230 """upload lfs blobs added by the working copy parent or given revisions"""
204 231 revs = opts.get('rev', [])
205 232 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
206 233 wrapper.uploadblobs(repo, pointers)
@@ -1,345 +1,345 b''
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import bin, nullid, short
14 14
15 15 from mercurial import (
16 16 error,
17 17 filelog,
18 18 revlog,
19 19 util,
20 20 )
21 21
22 22 from ..largefiles import lfutil
23 23
24 24 from . import (
25 25 blobstore,
26 26 pointer,
27 27 )
28 28
29 29 def supportedoutgoingversions(orig, repo):
30 30 versions = orig(repo)
31 31 if 'lfs' in repo.requirements:
32 32 versions.discard('01')
33 33 versions.discard('02')
34 34 versions.add('03')
35 35 return versions
36 36
37 37 def allsupportedversions(orig, ui):
38 38 versions = orig(ui)
39 39 versions.add('03')
40 40 return versions
41 41
42 42 def _capabilities(orig, repo, proto):
43 43 '''Wrap server command to announce lfs server capability'''
44 44 caps = orig(repo, proto)
45 45 # XXX: change to 'lfs=serve' when separate git server isn't required?
46 46 caps.append('lfs')
47 47 return caps
48 48
49 49 def bypasscheckhash(self, text):
50 50 return False
51 51
52 52 def readfromstore(self, text):
53 53 """Read filelog content from local blobstore transform for flagprocessor.
54 54
55 55 Default tranform for flagprocessor, returning contents from blobstore.
56 56 Returns a 2-typle (text, validatehash) where validatehash is True as the
57 57 contents of the blobstore should be checked using checkhash.
58 58 """
59 59 p = pointer.deserialize(text)
60 60 oid = p.oid()
61 61 store = self.opener.lfslocalblobstore
62 62 if not store.has(oid):
63 63 p.filename = self.filename
64 64 self.opener.lfsremoteblobstore.readbatch([p], store)
65 65
66 66 # The caller will validate the content
67 67 text = store.read(oid, verify=False)
68 68
69 69 # pack hg filelog metadata
70 70 hgmeta = {}
71 71 for k in p.keys():
72 72 if k.startswith('x-hg-'):
73 73 name = k[len('x-hg-'):]
74 74 hgmeta[name] = p[k]
75 75 if hgmeta or text.startswith('\1\n'):
76 76 text = filelog.packmeta(hgmeta, text)
77 77
78 78 return (text, True)
79 79
80 80 def writetostore(self, text):
81 81 # hg filelog metadata (includes rename, etc)
82 82 hgmeta, offset = filelog.parsemeta(text)
83 83 if offset and offset > 0:
84 84 # lfs blob does not contain hg filelog metadata
85 85 text = text[offset:]
86 86
87 87 # git-lfs only supports sha256
88 88 oid = hashlib.sha256(text).hexdigest()
89 89 self.opener.lfslocalblobstore.write(oid, text)
90 90
91 91 # replace contents with metadata
92 92 longoid = 'sha256:%s' % oid
93 93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
94 94
95 95 # by default, we expect the content to be binary. however, LFS could also
96 96 # be used for non-binary content. add a special entry for non-binary data.
97 97 # this will be used by filectx.isbinary().
98 98 if not util.binary(text):
99 99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
100 100 metadata['x-is-binary'] = '0'
101 101
102 102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
103 103 if hgmeta is not None:
104 104 for k, v in hgmeta.iteritems():
105 105 metadata['x-hg-%s' % k] = v
106 106
107 107 rawtext = metadata.serialize()
108 108 return (rawtext, False)
109 109
110 110 def _islfs(rlog, node=None, rev=None):
111 111 if rev is None:
112 112 if node is None:
113 113 # both None - likely working copy content where node is not ready
114 114 return False
115 115 rev = rlog.rev(node)
116 116 else:
117 117 node = rlog.node(rev)
118 118 if node == nullid:
119 119 return False
120 120 flags = rlog.flags(rev)
121 121 return bool(flags & revlog.REVIDX_EXTSTORED)
122 122
123 123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
124 124 cachedelta=None, node=None,
125 125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
126 threshold = self.opener.options['lfsthreshold']
127 126 textlen = len(text)
128 127 # exclude hg rename meta from file size
129 128 meta, offset = filelog.parsemeta(text)
130 129 if offset:
131 130 textlen -= offset
132 131
133 if threshold and textlen > threshold:
132 lfstrack = self.opener.options['lfstrack']
133 if lfstrack(self.filename, textlen):
134 134 flags |= revlog.REVIDX_EXTSTORED
135 135
136 136 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
137 137 node=node, flags=flags, **kwds)
138 138
139 139 def filelogrenamed(orig, self, node):
140 140 if _islfs(self, node):
141 141 rawtext = self.revision(node, raw=True)
142 142 if not rawtext:
143 143 return False
144 144 metadata = pointer.deserialize(rawtext)
145 145 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
146 146 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
147 147 else:
148 148 return False
149 149 return orig(self, node)
150 150
151 151 def filelogsize(orig, self, rev):
152 152 if _islfs(self, rev=rev):
153 153 # fast path: use lfs metadata to answer size
154 154 rawtext = self.revision(rev, raw=True)
155 155 metadata = pointer.deserialize(rawtext)
156 156 return int(metadata['size'])
157 157 return orig(self, rev)
158 158
159 159 def filectxcmp(orig, self, fctx):
160 160 """returns True if text is different than fctx"""
161 161 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
162 162 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
163 163 # fast path: check LFS oid
164 164 p1 = pointer.deserialize(self.rawdata())
165 165 p2 = pointer.deserialize(fctx.rawdata())
166 166 return p1.oid() != p2.oid()
167 167 return orig(self, fctx)
168 168
169 169 def filectxisbinary(orig, self):
170 170 if self.islfs():
171 171 # fast path: use lfs metadata to answer isbinary
172 172 metadata = pointer.deserialize(self.rawdata())
173 173 # if lfs metadata says nothing, assume it's binary by default
174 174 return bool(int(metadata.get('x-is-binary', 1)))
175 175 return orig(self)
176 176
177 177 def filectxislfs(self):
178 178 return _islfs(self.filelog(), self.filenode())
179 179
180 180 def convertsink(orig, sink):
181 181 sink = orig(sink)
182 182 if sink.repotype == 'hg':
183 183 class lfssink(sink.__class__):
184 184 def putcommit(self, files, copies, parents, commit, source, revmap,
185 185 full, cleanp2):
186 186 pc = super(lfssink, self).putcommit
187 187 node = pc(files, copies, parents, commit, source, revmap, full,
188 188 cleanp2)
189 189
190 190 if 'lfs' not in self.repo.requirements:
191 191 ctx = self.repo[node]
192 192
193 193 # The file list may contain removed files, so check for
194 194 # membership before assuming it is in the context.
195 195 if any(f in ctx and ctx[f].islfs() for f, n in files):
196 196 self.repo.requirements.add('lfs')
197 197 self.repo._writerequirements()
198 198
199 199 # Permanently enable lfs locally
200 200 with self.repo.vfs('hgrc', 'a', text=True) as fp:
201 201 fp.write('\n[extensions]\nlfs=\n')
202 202
203 203 return node
204 204
205 205 sink.__class__ = lfssink
206 206
207 207 return sink
208 208
209 209 def vfsinit(orig, self, othervfs):
210 210 orig(self, othervfs)
211 211 # copy lfs related options
212 212 for k, v in othervfs.options.items():
213 213 if k.startswith('lfs'):
214 214 self.options[k] = v
215 215 # also copy lfs blobstores. note: this can run before reposetup, so lfs
216 216 # blobstore attributes are not always ready at this time.
217 217 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
218 218 if util.safehasattr(othervfs, name):
219 219 setattr(self, name, getattr(othervfs, name))
220 220
221 221 def hgclone(orig, ui, opts, *args, **kwargs):
222 222 result = orig(ui, opts, *args, **kwargs)
223 223
224 224 if result is not None:
225 225 sourcerepo, destrepo = result
226 226 repo = destrepo.local()
227 227
228 228 # When cloning to a remote repo (like through SSH), no repo is available
229 229 # from the peer. Therefore the hgrc can't be updated.
230 230 if not repo:
231 231 return result
232 232
233 233 # If lfs is required for this repo, permanently enable it locally
234 234 if 'lfs' in repo.requirements:
235 235 with repo.vfs('hgrc', 'a', text=True) as fp:
236 236 fp.write('\n[extensions]\nlfs=\n')
237 237
238 238 return result
239 239
240 240 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
241 241 orig(sourcerepo, destrepo, bookmarks, defaultpath)
242 242
243 243 # If lfs is required for this repo, permanently enable it locally
244 244 if 'lfs' in destrepo.requirements:
245 245 with destrepo.vfs('hgrc', 'a', text=True) as fp:
246 246 fp.write('\n[extensions]\nlfs=\n')
247 247
248 248 def _canskipupload(repo):
249 249 # if remotestore is a null store, upload is a no-op and can be skipped
250 250 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
251 251
252 252 def candownload(repo):
253 253 # if remotestore is a null store, downloads will lead to nothing
254 254 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
255 255
256 256 def uploadblobsfromrevs(repo, revs):
257 257 '''upload lfs blobs introduced by revs
258 258
259 259 Note: also used by other extensions e. g. infinitepush. avoid renaming.
260 260 '''
261 261 if _canskipupload(repo):
262 262 return
263 263 pointers = extractpointers(repo, revs)
264 264 uploadblobs(repo, pointers)
265 265
266 266 def prepush(pushop):
267 267 """Prepush hook.
268 268
269 269 Read through the revisions to push, looking for filelog entries that can be
270 270 deserialized into metadata so that we can block the push on their upload to
271 271 the remote blobstore.
272 272 """
273 273 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
274 274
275 275 def push(orig, repo, remote, *args, **kwargs):
276 276 """bail on push if the extension isn't enabled on remote when needed"""
277 277 if 'lfs' in repo.requirements:
278 278 # If the remote peer is for a local repo, the requirement tests in the
279 279 # base class method enforce lfs support. Otherwise, some revisions in
280 280 # this repo use lfs, and the remote repo needs the extension loaded.
281 281 if not remote.local() and not remote.capable('lfs'):
282 282 # This is a copy of the message in exchange.push() when requirements
283 283 # are missing between local repos.
284 284 m = _("required features are not supported in the destination: %s")
285 285 raise error.Abort(m % 'lfs',
286 286 hint=_('enable the lfs extension on the server'))
287 287 return orig(repo, remote, *args, **kwargs)
288 288
289 289 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
290 290 *args, **kwargs):
291 291 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
292 292 uploadblobsfromrevs(repo, outgoing.missing)
293 293 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
294 294 **kwargs)
295 295
296 296 def extractpointers(repo, revs):
297 297 """return a list of lfs pointers added by given revs"""
298 298 repo.ui.debug('lfs: computing set of blobs to upload\n')
299 299 pointers = {}
300 300 for r in revs:
301 301 ctx = repo[r]
302 302 for p in pointersfromctx(ctx).values():
303 303 pointers[p.oid()] = p
304 304 return sorted(pointers.values())
305 305
306 306 def pointersfromctx(ctx):
307 307 """return a dict {path: pointer} for given single changectx"""
308 308 result = {}
309 309 for f in ctx.files():
310 310 if f not in ctx:
311 311 continue
312 312 fctx = ctx[f]
313 313 if not _islfs(fctx.filelog(), fctx.filenode()):
314 314 continue
315 315 try:
316 316 result[f] = pointer.deserialize(fctx.rawdata())
317 317 except pointer.InvalidPointer as ex:
318 318 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
319 319 % (f, short(ctx.node()), ex))
320 320 return result
321 321
322 322 def uploadblobs(repo, pointers):
323 323 """upload given pointers from local blobstore"""
324 324 if not pointers:
325 325 return
326 326
327 327 remoteblob = repo.svfs.lfsremoteblobstore
328 328 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
329 329
330 330 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
331 331 orig(ui, srcrepo, dstrepo, requirements)
332 332
333 333 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
334 334 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
335 335
336 336 for dirpath, dirs, files in srclfsvfs.walk():
337 337 for oid in files:
338 338 ui.write(_('copying lfs blob %s\n') % oid)
339 339 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
340 340
341 341 def upgraderequirements(orig, repo):
342 342 reqs = orig(repo)
343 343 if 'lfs' in repo.requirements:
344 344 reqs.add('lfs')
345 345 return reqs
@@ -1,191 +1,191 b''
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 > threshold=1
33 > track=all()
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 41 A push can be serviced directly from the usercache if it isn't in the local
42 42 store.
43 43
44 44 $ hg init ../repo2
45 45 $ mv .hg/store/lfs .hg/store/lfs_
46 46 $ hg push ../repo2 -v
47 47 pushing to ../repo2
48 48 searching for changes
49 49 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
50 50 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
51 51 1 changesets found
52 52 uncompressed size of bundle content:
53 53 * (changelog) (glob)
54 54 * (manifests) (glob)
55 55 * a (glob)
56 56 adding changesets
57 57 adding manifests
58 58 adding file changes
59 59 added 1 changesets with 1 changes to 1 files
60 60 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
61 61 $ mv .hg/store/lfs_ .hg/store/lfs
62 62
63 63 Clear the cache to force a download
64 64 $ rm -rf `hg config lfs.usercache`
65 65 $ cd ../repo2
66 66 $ hg update tip -v
67 67 resolving manifests
68 68 getting a
69 69 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
70 70 lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
71 71 lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
72 72 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
73 73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
74 74
75 75 When the server has some blobs already
76 76
77 77 $ hg mv a b
78 78 $ echo ANOTHER-LARGE-FILE > c
79 79 $ echo ANOTHER-LARGE-FILE2 > d
80 80 $ hg commit -m b-and-c -A b c d
81 81 $ hg push ../repo1 -v | grep -v '^ '
82 82 pushing to ../repo1
83 83 searching for changes
84 84 lfs: need to transfer 2 objects (39 bytes)
85 85 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
86 86 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
87 87 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
88 88 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
89 89 1 changesets found
90 90 uncompressed size of bundle content:
91 91 adding changesets
92 92 adding manifests
93 93 adding file changes
94 94 added 1 changesets with 3 changes to 3 files
95 95
96 96 Clear the cache to force a download
97 97 $ rm -rf `hg config lfs.usercache`
98 98 $ hg --repo ../repo1 update tip -v
99 99 resolving manifests
100 100 getting b
101 101 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
102 102 getting c
103 103 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
104 104 lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
105 105 lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
106 106 lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
107 107 getting d
108 108 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
109 109 lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
110 110 lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
111 111 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
112 112 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 113
114 114 Test a corrupt file download, but clear the cache first to force a download.
115 115
116 116 $ rm -rf `hg config lfs.usercache`
117 117 $ cp $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 blob
118 118 $ echo 'damage' > $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
119 119 $ rm ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
120 120 $ rm ../repo1/*
121 121
122 122 $ hg --repo ../repo1 update -C tip -v
123 123 resolving manifests
124 124 getting a
125 125 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
126 126 getting b
127 127 lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
128 128 getting c
129 129 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
130 130 abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
131 131 [255]
132 132
133 133 The corrupted blob is not added to the usercache or local store
134 134
135 135 $ test -f ../repo1/.hg/store/lfs/objects/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
136 136 [1]
137 137 $ test -f `hg config lfs.usercache`/d1/1e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
138 138 [1]
139 139 $ cp blob $TESTTMP/lfs-content/d1/1e/1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
140 140
141 141 Test a corrupted file upload
142 142
143 143 $ echo 'another lfs blob' > b
144 144 $ hg ci -m 'another blob'
145 145 $ echo 'damage' > .hg/store/lfs/objects/e6/59058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
146 146 $ hg push -v ../repo1
147 147 pushing to ../repo1
148 148 searching for changes
149 149 lfs: uploading e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0 (17 bytes)
150 150 abort: detected corrupt lfs object: e659058e26b07b39d2a9c7145b3f99b41f797b6621c8076600e9cb7ee88291f0
151 151 (run hg verify)
152 152 [255]
153 153
154 154 Check error message when the remote missed a blob:
155 155
156 156 $ echo FFFFF > b
157 157 $ hg commit -m b -A b
158 158 $ echo FFFFF >> b
159 159 $ hg commit -m b b
160 160 $ rm -rf .hg/store/lfs
161 161 $ rm -rf `hg config lfs.usercache`
162 162 $ hg update -C '.^'
163 163 abort: LFS server claims required objects do not exist:
164 164 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
165 165 [255]
166 166
167 167 Check error message when object does not exist:
168 168
169 169 $ hg init test && cd test
170 170 $ echo "[extensions]" >> .hg/hgrc
171 171 $ echo "lfs=" >> .hg/hgrc
172 172 $ echo "[lfs]" >> .hg/hgrc
173 173 $ echo "threshold=1" >> .hg/hgrc
174 174 $ echo a > a
175 175 $ hg add a
176 176 $ hg commit -m 'test'
177 177 $ echo aaaaa > a
178 178 $ hg commit -m 'largefile'
179 179 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
180 180 version https://git-lfs.github.com/spec/v1
181 181 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
182 182 size 6
183 183 x-is-binary 0
184 184 $ cd ..
185 185 $ rm -rf `hg config lfs.usercache`
186 186 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
187 187 updating to branch default
188 188 abort: LFS server error. Remote object for "a" not found:(.*)! (re)
189 189 [255]
190 190
191 191 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
@@ -1,915 +1,957 b''
1 1 # Initial setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > lfs=
6 6 > [lfs]
7 > # Test deprecated config
7 8 > threshold=1000B
8 9 > EOF
9 10
10 11 $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
11 12
12 13 # Prepare server and enable extension
13 14 $ hg init server
14 15 $ hg clone -q server client
15 16 $ cd client
16 17
17 18 # Commit small file
18 19 $ echo s > smallfile
19 20 $ hg commit -Aqm "add small file"
20 21
21 22 # Commit large file
22 23 $ echo $LONG > largefile
23 24 $ grep lfs .hg/requires
24 25 [1]
25 26 $ hg commit --traceback -Aqm "add large file"
26 27 $ grep lfs .hg/requires
27 28 lfs
28 29
29 30 # Ensure metadata is stored
30 31 $ hg debugdata largefile 0
31 32 version https://git-lfs.github.com/spec/v1
32 33 oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
33 34 size 1501
34 35 x-is-binary 0
35 36
36 37 # Check the blobstore is populated
37 38 $ find .hg/store/lfs/objects | sort
38 39 .hg/store/lfs/objects
39 40 .hg/store/lfs/objects/f1
40 41 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
41 42
42 43 # Check the blob stored contains the actual contents of the file
43 44 $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
44 45 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
45 46
46 47 # Push changes to the server
47 48
48 49 $ hg push
49 50 pushing to $TESTTMP/server
50 51 searching for changes
51 52 abort: lfs.url needs to be configured
52 53 [255]
53 54
54 55 $ cat >> $HGRCPATH << EOF
55 56 > [lfs]
56 57 > url=file:$TESTTMP/dummy-remote/
57 58 > EOF
58 59
59 60 Push to a local non-lfs repo with the extension enabled will add the
60 61 lfs requirement
61 62
62 63 $ grep lfs $TESTTMP/server/.hg/requires
63 64 [1]
64 65 $ hg push -v | egrep -v '^(uncompressed| )'
65 66 pushing to $TESTTMP/server
66 67 searching for changes
67 68 lfs: found f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b in the local lfs store
68 69 2 changesets found
69 70 adding changesets
70 71 adding manifests
71 72 adding file changes
72 73 added 2 changesets with 2 changes to 2 files
73 74 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
74 75 $ grep lfs $TESTTMP/server/.hg/requires
75 76 lfs
76 77
77 78 # Unknown URL scheme
78 79
79 80 $ hg push --config lfs.url=ftp://foobar
80 81 abort: lfs: unknown url scheme: ftp
81 82 [255]
82 83
83 84 $ cd ../
84 85
85 86 # Initialize new client (not cloning) and setup extension
86 87 $ hg init client2
87 88 $ cd client2
88 89 $ cat >> .hg/hgrc <<EOF
89 90 > [paths]
90 91 > default = $TESTTMP/server
91 92 > EOF
92 93
93 94 # Pull from server
94 95
95 96 Pulling a local lfs repo into a local non-lfs repo with the extension
96 97 enabled adds the lfs requirement
97 98
98 99 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
99 100 $TESTTMP/server/.hg/requires:lfs
100 101 $ hg pull default
101 102 pulling from $TESTTMP/server
102 103 requesting all changes
103 104 adding changesets
104 105 adding manifests
105 106 adding file changes
106 107 added 2 changesets with 2 changes to 2 files
107 108 new changesets b29ba743f89d:00c137947d30
108 109 (run 'hg update' to get a working copy)
109 110 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
110 111 .hg/requires:lfs
111 112 $TESTTMP/server/.hg/requires:lfs
112 113
113 114 # Check the blobstore is not yet populated
114 115 $ [ -d .hg/store/lfs/objects ]
115 116 [1]
116 117
117 118 # Update to the last revision containing the large file
118 119 $ hg update
119 120 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
120 121
121 122 # Check the blobstore has been populated on update
122 123 $ find .hg/store/lfs/objects | sort
123 124 .hg/store/lfs/objects
124 125 .hg/store/lfs/objects/f1
125 126 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
126 127
127 128 # Check the contents of the file are fetched from blobstore when requested
128 129 $ hg cat -r . largefile
129 130 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
130 131
131 132 # Check the file has been copied in the working copy
132 133 $ cat largefile
133 134 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
134 135
135 136 $ cd ..
136 137
137 138 # Check rename, and switch between large and small files
138 139
139 140 $ hg init repo3
140 141 $ cd repo3
141 142 $ cat >> .hg/hgrc << EOF
142 143 > [lfs]
143 > threshold=10B
144 > track=size(">10B")
144 145 > EOF
145 146
146 147 $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large
147 148 $ echo SHORTER > small
148 149 $ hg add . -q
149 150 $ hg commit -m 'commit with lfs content'
150 151
151 152 $ hg mv large l
152 153 $ hg mv small s
153 154 $ hg commit -m 'renames'
154 155
155 156 $ echo SHORT > l
156 157 $ echo BECOME-LARGER-FROM-SHORTER > s
157 158 $ hg commit -m 'large to small, small to large'
158 159
159 160 $ echo 1 >> l
160 161 $ echo 2 >> s
161 162 $ hg commit -m 'random modifications'
162 163
163 164 $ echo RESTORE-TO-BE-LARGE > l
164 165 $ echo SHORTER > s
165 166 $ hg commit -m 'switch large and small again'
166 167
167 168 # Test lfs_files template
168 169
169 170 $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
170 171 0 large
171 172 1 l
172 173 2 s
173 174 3 s
174 175 4 l
175 176
176 177 # Push and pull the above repo
177 178
178 179 $ hg --cwd .. init repo4
179 180 $ hg push ../repo4
180 181 pushing to ../repo4
181 182 searching for changes
182 183 adding changesets
183 184 adding manifests
184 185 adding file changes
185 186 added 5 changesets with 10 changes to 4 files
186 187
187 188 $ hg --cwd .. init repo5
188 189 $ hg --cwd ../repo5 pull ../repo3
189 190 pulling from ../repo3
190 191 requesting all changes
191 192 adding changesets
192 193 adding manifests
193 194 adding file changes
194 195 added 5 changesets with 10 changes to 4 files
195 196 new changesets fd47a419c4f7:5adf850972b9
196 197 (run 'hg update' to get a working copy)
197 198
198 199 $ cd ..
199 200
200 201 # Test clone
201 202
202 203 $ hg init repo6
203 204 $ cd repo6
204 205 $ cat >> .hg/hgrc << EOF
205 206 > [lfs]
206 > threshold=30B
207 > track=size(">30B")
207 208 > EOF
208 209
209 210 $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large
210 211 $ echo SMALL > small
211 212 $ hg commit -Aqm 'create a lfs file' large small
212 213 $ hg debuglfsupload -r 'all()' -v
213 214 lfs: found 8e92251415339ae9b148c8da89ed5ec665905166a1ab11b09dca8fad83344738 in the local lfs store
214 215
215 216 $ cd ..
216 217
217 218 $ hg clone repo6 repo7
218 219 updating to branch default
219 220 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 221 $ cd repo7
221 222 $ hg config extensions --debug | grep lfs
222 223 $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
223 224 $ cat large
224 225 LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
225 226 $ cat small
226 227 SMALL
227 228
228 229 $ cd ..
229 230
230 231 $ hg --config extensions.share= share repo7 sharedrepo
231 232 updating working directory
232 233 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 234 $ hg -R sharedrepo config extensions --debug | grep lfs
234 235 $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
235 236
236 237 # Test rename and status
237 238
238 239 $ hg init repo8
239 240 $ cd repo8
240 241 $ cat >> .hg/hgrc << EOF
241 242 > [lfs]
242 > threshold=10B
243 > track=size(">10B")
243 244 > EOF
244 245
245 246 $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1
246 247 $ echo SMALL > a2
247 248 $ hg commit -m a -A a1 a2
248 249 $ hg status
249 250 $ hg mv a1 b1
250 251 $ hg mv a2 a1
251 252 $ hg mv b1 a2
252 253 $ hg commit -m b
253 254 $ hg status
254 255 >>> with open('a2', 'wb') as f:
255 256 ... f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA')
256 257 >>> with open('a1', 'wb') as f:
257 258 ... f.write(b'\1\nMETA\n')
258 259 $ hg commit -m meta
259 260 $ hg status
260 261 $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
261 262 2: | |
262 263 1: a1 (a2)a2 (a1) | |
263 264 0: | | a1 a2
264 265
265 266 $ for n in a1 a2; do
266 267 > for r in 0 1 2; do
267 268 > printf '\n%s @ %s\n' $n $r
268 269 > hg debugdata $n $r
269 270 > done
270 271 > done
271 272
272 273 a1 @ 0
273 274 version https://git-lfs.github.com/spec/v1
274 275 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
275 276 size 29
276 277 x-is-binary 0
277 278
278 279 a1 @ 1
279 280 \x01 (esc)
280 281 copy: a2
281 282 copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9
282 283 \x01 (esc)
283 284 SMALL
284 285
285 286 a1 @ 2
286 287 \x01 (esc)
287 288 \x01 (esc)
288 289 \x01 (esc)
289 290 META
290 291
291 292 a2 @ 0
292 293 SMALL
293 294
294 295 a2 @ 1
295 296 version https://git-lfs.github.com/spec/v1
296 297 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
297 298 size 29
298 299 x-hg-copy a1
299 300 x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4
300 301 x-is-binary 0
301 302
302 303 a2 @ 2
303 304 version https://git-lfs.github.com/spec/v1
304 305 oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
305 306 size 32
306 307 x-is-binary 0
307 308
308 309 # Verify commit hashes include rename metadata
309 310
310 311 $ hg log -T '{rev}:{node|short} {desc}\n'
311 312 2:0fae949de7fa meta
312 313 1:9cd6bdffdac0 b
313 314 0:7f96794915f7 a
314 315
315 316 $ cd ..
316 317
317 318 # Test bundle
318 319
319 320 $ hg init repo9
320 321 $ cd repo9
321 322 $ cat >> .hg/hgrc << EOF
322 323 > [lfs]
323 > threshold=10B
324 > track=size(">10B")
324 325 > [diff]
325 326 > git=1
326 327 > EOF
327 328
328 329 $ for i in 0 single two three 4; do
329 330 > echo 'THIS-IS-LFS-'$i > a
330 331 > hg commit -m a-$i -A a
331 332 > done
332 333
333 334 $ hg update 2 -q
334 335 $ echo 'THIS-IS-LFS-2-CHILD' > a
335 336 $ hg commit -m branching -q
336 337
337 338 $ hg bundle --base 1 bundle.hg -v
338 339 lfs: found 5ab7a3739a5feec94a562d070a14f36dba7cad17e5484a4a89eea8e5f3166888 in the local lfs store
339 340 lfs: found a9c7d1cd6ce2b9bbdf46ed9a862845228717b921c089d0d42e3bcaed29eb612e in the local lfs store
340 341 lfs: found f693890c49c409ec33673b71e53f297681f76c1166daf33b2ad7ebf8b1d3237e in the local lfs store
341 342 lfs: found fda198fea753eb66a252e9856915e1f5cddbe41723bd4b695ece2604ad3c9f75 in the local lfs store
342 343 4 changesets found
343 344 uncompressed size of bundle content:
344 345 * (changelog) (glob)
345 346 * (manifests) (glob)
346 347 * a (glob)
347 348 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
348 349 $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a
349 350 5 branching
350 351 diff --git a/a b/a
351 352 --- a/a
352 353 +++ b/a
353 354 @@ -1,1 +1,1 @@
354 355 -THIS-IS-LFS-two
355 356 +THIS-IS-LFS-2-CHILD
356 357
357 358 4 a-4
358 359 diff --git a/a b/a
359 360 --- a/a
360 361 +++ b/a
361 362 @@ -1,1 +1,1 @@
362 363 -THIS-IS-LFS-three
363 364 +THIS-IS-LFS-4
364 365
365 366 3 a-three
366 367 diff --git a/a b/a
367 368 --- a/a
368 369 +++ b/a
369 370 @@ -1,1 +1,1 @@
370 371 -THIS-IS-LFS-two
371 372 +THIS-IS-LFS-three
372 373
373 374 2 a-two
374 375 diff --git a/a b/a
375 376 --- a/a
376 377 +++ b/a
377 378 @@ -1,1 +1,1 @@
378 379 -THIS-IS-LFS-single
379 380 +THIS-IS-LFS-two
380 381
381 382 1 a-single
382 383 diff --git a/a b/a
383 384 --- a/a
384 385 +++ b/a
385 386 @@ -1,1 +1,1 @@
386 387 -THIS-IS-LFS-0
387 388 +THIS-IS-LFS-single
388 389
389 390 0 a-0
390 391 diff --git a/a b/a
391 392 new file mode 100644
392 393 --- /dev/null
393 394 +++ b/a
394 395 @@ -0,0 +1,1 @@
395 396 +THIS-IS-LFS-0
396 397
397 398 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
398 399 $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a
399 400 5 branching
400 401 diff --git a/a b/a
401 402 --- a/a
402 403 +++ b/a
403 404 @@ -1,1 +1,1 @@
404 405 -THIS-IS-LFS-two
405 406 +THIS-IS-LFS-2-CHILD
406 407
407 408 4 a-4
408 409 diff --git a/a b/a
409 410 --- a/a
410 411 +++ b/a
411 412 @@ -1,1 +1,1 @@
412 413 -THIS-IS-LFS-three
413 414 +THIS-IS-LFS-4
414 415
415 416 3 a-three
416 417 diff --git a/a b/a
417 418 --- a/a
418 419 +++ b/a
419 420 @@ -1,1 +1,1 @@
420 421 -THIS-IS-LFS-two
421 422 +THIS-IS-LFS-three
422 423
423 424 2 a-two
424 425 diff --git a/a b/a
425 426 --- a/a
426 427 +++ b/a
427 428 @@ -1,1 +1,1 @@
428 429 -THIS-IS-LFS-single
429 430 +THIS-IS-LFS-two
430 431
431 432 1 a-single
432 433 diff --git a/a b/a
433 434 --- a/a
434 435 +++ b/a
435 436 @@ -1,1 +1,1 @@
436 437 -THIS-IS-LFS-0
437 438 +THIS-IS-LFS-single
438 439
439 440 0 a-0
440 441 diff --git a/a b/a
441 442 new file mode 100644
442 443 --- /dev/null
443 444 +++ b/a
444 445 @@ -0,0 +1,1 @@
445 446 +THIS-IS-LFS-0
446 447
447 448 $ cd ..
448 449
449 450 # Test isbinary
450 451
451 452 $ hg init repo10
452 453 $ cd repo10
453 454 $ cat >> .hg/hgrc << EOF
454 455 > [extensions]
455 456 > lfs=
456 457 > [lfs]
457 > threshold=1
458 > track=all()
458 459 > EOF
459 460 $ $PYTHON <<'EOF'
460 461 > def write(path, content):
461 462 > with open(path, 'wb') as f:
462 463 > f.write(content)
463 464 > write('a', b'\0\0')
464 465 > write('b', b'\1\n')
465 466 > write('c', b'\1\n\0')
466 467 > write('d', b'xx')
467 468 > EOF
468 469 $ hg add a b c d
469 470 $ hg diff --stat
470 471 a | Bin
471 472 b | 1 +
472 473 c | Bin
473 474 d | 1 +
474 475 4 files changed, 2 insertions(+), 0 deletions(-)
475 476 $ hg commit -m binarytest
476 477 $ cat > $TESTTMP/dumpbinary.py << EOF
477 478 > def reposetup(ui, repo):
478 479 > for n in 'abcd':
479 480 > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
480 481 > EOF
481 482 $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
482 483 a: binary=True
483 484 b: binary=False
484 485 c: binary=True
485 486 d: binary=False
486 487 b55353847f02 tip
487 488
488 489 $ cd ..
489 490
490 491 # Test fctx.cmp fastpath - diff without LFS blobs
491 492
492 493 $ hg init repo12
493 494 $ cd repo12
494 495 $ cat >> .hg/hgrc <<EOF
495 496 > [lfs]
496 497 > threshold=1
497 498 > EOF
498 499 $ cat > ../patch.diff <<EOF
499 500 > # HG changeset patch
500 501 > 2
501 502 >
502 503 > diff --git a/a b/a
503 504 > old mode 100644
504 505 > new mode 100755
505 506 > EOF
506 507
507 508 $ for i in 1 2 3; do
508 509 > cp ../repo10/a a
509 510 > if [ $i = 3 ]; then
510 511 > # make a content-only change
511 512 > hg import -q --bypass ../patch.diff
512 513 > hg update -q
513 514 > rm ../patch.diff
514 515 > else
515 516 > echo $i >> a
516 517 > hg commit -m $i -A a
517 518 > fi
518 519 > done
519 520 $ [ -d .hg/store/lfs/objects ]
520 521
521 522 $ cd ..
522 523
523 524 $ hg clone repo12 repo13 --noupdate
524 525 $ cd repo13
525 526 $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git
526 527 2
527 528 diff --git a/a b/a
528 529 old mode 100644
529 530 new mode 100755
530 531
531 532 2
532 533 diff --git a/a b/a
533 534 Binary file a has changed
534 535
535 536 1
536 537 diff --git a/a b/a
537 538 new file mode 100644
538 539 Binary file a has changed
539 540
540 541 $ [ -d .hg/store/lfs/objects ]
541 542 [1]
542 543
543 544 $ cd ..
544 545
546 # Test filter
547
548 $ hg init repo11
549 $ cd repo11
550 $ cat >> .hg/hgrc << EOF
551 > [lfs]
552 > track=(**.a & size(">5B")) | (**.b & !size(">5B"))
553 > | (**.c & "path:d" & !"path:d/c.c") | size(">10B")
554 > EOF
555
556 $ mkdir a
557 $ echo aaaaaa > a/1.a
558 $ echo a > a/2.a
559 $ echo aaaaaa > 1.b
560 $ echo a > 2.b
561 $ echo a > 1.c
562 $ mkdir d
563 $ echo a > d/c.c
564 $ echo a > d/d.c
565 $ echo aaaaaaaaaaaa > x
566 $ hg add . -q
567 $ hg commit -m files
568
569 $ for p in a/1.a a/2.a 1.b 2.b 1.c d/c.c d/d.c x; do
570 > if hg debugdata $p 0 2>&1 | grep git-lfs >/dev/null; then
571 > echo "${p}: is lfs"
572 > else
573 > echo "${p}: not lfs"
574 > fi
575 > done
576 a/1.a: is lfs
577 a/2.a: not lfs
578 1.b: not lfs
579 2.b: is lfs
580 1.c: not lfs
581 d/c.c: not lfs
582 d/d.c: is lfs
583 x: is lfs
584
585 $ cd ..
586
545 587 # Verify the repos
546 588
547 589 $ cat > $TESTTMP/dumpflog.py << EOF
548 590 > # print raw revision sizes, flags, and hashes for certain files
549 591 > import hashlib
550 592 > from mercurial import revlog
551 593 > from mercurial.node import short
552 594 > def hash(rawtext):
553 595 > h = hashlib.sha512()
554 596 > h.update(rawtext)
555 597 > return h.hexdigest()[:4]
556 598 > def reposetup(ui, repo):
557 599 > # these 2 files are interesting
558 600 > for name in ['l', 's']:
559 601 > fl = repo.file(name)
560 602 > if len(fl) == 0:
561 603 > continue
562 604 > sizes = [revlog.revlog.rawsize(fl, i) for i in fl]
563 605 > texts = [fl.revision(i, raw=True) for i in fl]
564 606 > flags = [int(fl.flags(i)) for i in fl]
565 607 > hashes = [hash(t) for t in texts]
566 608 > print(' %s: rawsizes=%r flags=%r hashes=%r'
567 609 > % (name, sizes, flags, hashes))
568 610 > EOF
569 611
570 612 $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
571 613 > repo10; do
572 614 > echo 'repo:' $i
573 615 > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q
574 616 > done
575 617 repo: client
576 618 repo: client2
577 619 repo: server
578 620 repo: repo3
579 621 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
580 622 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
581 623 repo: repo4
582 624 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
583 625 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
584 626 repo: repo5
585 627 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
586 628 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
587 629 repo: repo6
588 630 repo: repo7
589 631 repo: repo8
590 632 repo: repo9
591 633 repo: repo10
592 634
593 635 repo13 doesn't have any cached lfs files and its source never pushed its
594 636 files. Therefore, the files don't exist in the remote store. Use the files in
595 637 the user cache.
596 638
597 639 $ test -d $TESTTMP/repo13/.hg/store/lfs/objects
598 640 [1]
599 641
600 642 $ hg --config extensions.share= share repo13 repo14
601 643 updating working directory
602 644 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
603 645 $ hg -R repo14 -q verify
604 646
605 647 $ hg clone repo13 repo15
606 648 updating to branch default
607 649 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 650 $ hg -R repo15 -q verify
609 651
610 652 If the source repo doesn't have the blob (maybe it was pulled or cloned with
611 653 --noupdate), the blob is still accessible via the global cache to send to the
612 654 remote store.
613 655
614 656 $ rm -rf $TESTTMP/repo15/.hg/store/lfs
615 657 $ hg init repo16
616 658 $ hg -R repo15 push repo16
617 659 pushing to repo16
618 660 searching for changes
619 661 adding changesets
620 662 adding manifests
621 663 adding file changes
622 664 added 3 changesets with 2 changes to 1 files
623 665 $ hg -R repo15 -q verify
624 666
625 667 Test damaged file scenarios. (This also damages the usercache because of the
626 668 hardlinks.)
627 669
628 670 $ echo 'damage' >> repo5/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
629 671
630 672 Repo with damaged lfs objects in any revision will fail verification.
631 673
632 674 $ hg -R repo5 verify
633 675 checking changesets
634 676 checking manifests
635 677 crosschecking files in changesets and manifests
636 678 checking files
637 679 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
638 680 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
639 681 4 files, 5 changesets, 10 total revisions
640 682 2 integrity errors encountered!
641 683 (first damaged changeset appears to be 0)
642 684 [1]
643 685
644 686 Updates work after cloning a damaged repo, if the damaged lfs objects aren't in
645 687 the update destination. Those objects won't be added to the new repo's store
646 688 because they aren't accessed.
647 689
648 690 $ hg clone -v repo5 fromcorrupt
649 691 updating to branch default
650 692 resolving manifests
651 693 getting l
652 694 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the usercache
653 695 getting s
654 696 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
655 697 $ test -f fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
656 698 [1]
657 699
658 700 Verify will copy/link all lfs objects into the local store that aren't already
659 701 present. Bypass the corrupted usercache to show that verify works when fed by
660 702 the (uncorrupted) remote store.
661 703
662 704 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
663 705 repository uses revlog format 1
664 706 checking changesets
665 707 checking manifests
666 708 crosschecking files in changesets and manifests
667 709 checking files
668 710 lfs: adding 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e to the usercache
669 711 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
670 712 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
671 713 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
672 714 lfs: adding 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 to the usercache
673 715 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
674 716 lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
675 717 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
676 718 4 files, 5 changesets, 10 total revisions
677 719
678 720 Verify will not copy/link a corrupted file from the usercache into the local
679 721 store, and poison it. (The verify with a good remote now works.)
680 722
681 723 $ rm -r fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
682 724 $ hg -R fromcorrupt verify -v
683 725 repository uses revlog format 1
684 726 checking changesets
685 727 checking manifests
686 728 crosschecking files in changesets and manifests
687 729 checking files
688 730 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
689 731 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
690 732 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
691 733 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
692 734 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
693 735 4 files, 5 changesets, 10 total revisions
694 736 2 integrity errors encountered!
695 737 (first damaged changeset appears to be 0)
696 738 [1]
697 739 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
698 740 repository uses revlog format 1
699 741 checking changesets
700 742 checking manifests
701 743 crosschecking files in changesets and manifests
702 744 checking files
703 745 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the usercache
704 746 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
705 747 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
706 748 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
707 749 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
708 750 4 files, 5 changesets, 10 total revisions
709 751
710 752 Damaging a file required by the update destination fails the update.
711 753
712 754 $ echo 'damage' >> $TESTTMP/dummy-remote/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
713 755 $ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
714 756 updating to branch default
715 757 resolving manifests
716 758 getting l
717 759 abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
718 760 [255]
719 761
720 762 A corrupted lfs blob is not transferred from a file://remotestore to the
721 763 usercache or local store.
722 764
723 765 $ test -f emptycache/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
724 766 [1]
725 767 $ test -f fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
726 768 [1]
727 769
728 770 $ hg -R fromcorrupt2 verify
729 771 checking changesets
730 772 checking manifests
731 773 crosschecking files in changesets and manifests
732 774 checking files
733 775 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
734 776 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
735 777 4 files, 5 changesets, 10 total revisions
736 778 2 integrity errors encountered!
737 779 (first damaged changeset appears to be 0)
738 780 [1]
739 781
740 782 Corrupt local files are not sent upstream. (The alternate dummy remote
741 783 avoids the corrupt lfs object in the original remote.)
742 784
743 785 $ mkdir $TESTTMP/dummy-remote2
744 786 $ hg init dest
745 787 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 push -v dest
746 788 pushing to dest
747 789 searching for changes
748 790 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
749 791 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
750 792 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
751 793 abort: detected corrupt lfs object: 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
752 794 (run hg verify)
753 795 [255]
754 796
755 797 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 verify -v
756 798 repository uses revlog format 1
757 799 checking changesets
758 800 checking manifests
759 801 crosschecking files in changesets and manifests
760 802 checking files
761 803 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
762 804 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
763 805 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
764 806 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
765 807 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
766 808 4 files, 5 changesets, 10 total revisions
767 809 2 integrity errors encountered!
768 810 (first damaged changeset appears to be 0)
769 811 [1]
770 812
771 813 $ cat $TESTTMP/dummy-remote2/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
772 814 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
773 815 $ cat fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
774 816 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
775 817 $ test -f $TESTTMP/dummy-remote2/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
776 818 [1]
777 819
778 820 Accessing a corrupt file will complain
779 821
780 822 $ hg --cwd fromcorrupt2 cat -r 0 large
781 823 abort: integrity check failed on data/large.i:0!
782 824 [255]
783 825
784 826 lfs -> normal -> lfs round trip conversions are possible. The threshold for the
785 827 lfs destination is specified here because it was originally listed in the local
786 828 .hgrc, and the global one is too high to trigger lfs usage. For lfs -> normal,
787 829 there's no 'lfs' destination repo requirement. For normal -> lfs, there is.
788 830
789 831 XXX: There's not a great way to ensure that the conversion to normal files
790 832 actually converts _everything_ to normal. The extension needs to be loaded for
791 833 the source, but there's no way to disable it for the destination. The best that
792 834 can be done is to raise the threshold so that lfs isn't used on the destination.
793 835 It doesn't like using '!' to unset the value on the command line.
794 836
795 837 $ hg --config extensions.convert= --config lfs.threshold=1000M \
796 838 > convert repo8 convert_normal
797 839 initializing destination convert_normal repository
798 840 scanning source...
799 841 sorting...
800 842 converting...
801 843 2 a
802 844 1 b
803 845 0 meta
804 846 $ grep 'lfs' convert_normal/.hg/requires
805 847 [1]
806 848 $ hg --cwd convert_normal debugdata a1 0
807 849 THIS-IS-LFS-BECAUSE-10-BYTES
808 850
809 851 $ hg --config extensions.convert= --config lfs.threshold=10B \
810 852 > convert convert_normal convert_lfs
811 853 initializing destination convert_lfs repository
812 854 scanning source...
813 855 sorting...
814 856 converting...
815 857 2 a
816 858 1 b
817 859 0 meta
818 860 $ hg --cwd convert_lfs debugdata a1 0
819 861 version https://git-lfs.github.com/spec/v1
820 862 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
821 863 size 29
822 864 x-is-binary 0
823 865 $ grep 'lfs' convert_lfs/.hg/requires
824 866 lfs
825 867
826 868 This convert is trickier, because it contains deleted files (via `hg mv`)
827 869
828 870 $ hg --config extensions.convert= --config lfs.threshold=1000M \
829 871 > convert repo3 convert_normal2
830 872 initializing destination convert_normal2 repository
831 873 scanning source...
832 874 sorting...
833 875 converting...
834 876 4 commit with lfs content
835 877 3 renames
836 878 2 large to small, small to large
837 879 1 random modifications
838 880 0 switch large and small again
839 881 $ grep 'lfs' convert_normal2/.hg/requires
840 882 [1]
841 883 $ hg --cwd convert_normal2 debugdata large 0
842 884 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
843 885
844 886 $ hg --config extensions.convert= --config lfs.threshold=10B \
845 887 > convert convert_normal2 convert_lfs2
846 888 initializing destination convert_lfs2 repository
847 889 scanning source...
848 890 sorting...
849 891 converting...
850 892 4 commit with lfs content
851 893 3 renames
852 894 2 large to small, small to large
853 895 1 random modifications
854 896 0 switch large and small again
855 897 $ grep 'lfs' convert_lfs2/.hg/requires
856 898 lfs
857 899 $ hg --cwd convert_lfs2 debugdata large 0
858 900 version https://git-lfs.github.com/spec/v1
859 901 oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
860 902 size 39
861 903 x-is-binary 0
862 904
863 905 $ hg -R convert_lfs2 config --debug extensions | grep lfs
864 906 $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
865 907
866 908 Committing deleted files works:
867 909
868 910 $ hg init $TESTTMP/repo-del
869 911 $ cd $TESTTMP/repo-del
870 912 $ echo 1 > A
871 913 $ hg commit -m 'add A' -A A
872 914 $ hg rm A
873 915 $ hg commit -m 'rm A'
874 916 $ cd ..
875 917
876 918 Unbundling adds a requirement to a non-lfs repo, if necessary.
877 919
878 920 $ hg bundle -R $TESTTMP/repo-del -qr 0 --base null nolfs.hg
879 921 $ hg bundle -R convert_lfs2 -qr tip --base null lfs.hg
880 922 $ hg init unbundle
881 923 $ hg pull -R unbundle -q nolfs.hg
882 924 $ grep lfs unbundle/.hg/requires
883 925 [1]
884 926 $ hg pull -R unbundle -q lfs.hg
885 927 $ grep lfs unbundle/.hg/requires
886 928 lfs
887 929
888 930 $ hg init no_lfs
889 931 $ cat >> no_lfs/.hg/hgrc <<EOF
890 932 > [experimental]
891 933 > changegroup3 = True
892 934 > [extensions]
893 935 > lfs=!
894 936 > EOF
895 937 $ cp -R no_lfs no_lfs2
896 938
897 939 Pushing from a local lfs repo to a local repo without an lfs requirement and
898 940 with lfs disabled, fails.
899 941
900 942 $ hg push -R convert_lfs2 no_lfs
901 943 pushing to no_lfs
902 944 abort: required features are not supported in the destination: lfs
903 945 [255]
904 946 $ grep lfs no_lfs/.hg/requires
905 947 [1]
906 948
907 949 Pulling from a local lfs repo to a local repo without an lfs requirement and
908 950 with lfs disabled, fails.
909 951
910 952 $ hg pull -R no_lfs2 convert_lfs2
911 953 pulling from convert_lfs2
912 954 abort: required features are not supported in the destination: lfs
913 955 [255]
914 956 $ grep lfs no_lfs2/.hg/requires
915 957 [1]
General Comments 0
You need to be logged in to leave comments. Login now