##// END OF EJS Templates
lfs: allow non-lfs exchanges when the extension is only enabled on one side...
Matt Harbison -
r35521:2526579a default
parent child Browse files
Show More
@@ -1,323 +1,324
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, nullid, short
13 from mercurial.node import bin, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from ..largefiles import lfutil
22 from ..largefiles import lfutil
23
23
24 from . import (
24 from . import (
25 blobstore,
25 blobstore,
26 pointer,
26 pointer,
27 )
27 )
28
28
29 def supportedoutgoingversions(orig, repo):
29 def supportedoutgoingversions(orig, repo):
30 versions = orig(repo)
30 versions = orig(repo)
31 if 'lfs' in repo.requirements:
31 versions.discard('01')
32 versions.discard('01')
32 versions.discard('02')
33 versions.discard('02')
33 versions.add('03')
34 versions.add('03')
34 return versions
35 return versions
35
36
36 def allsupportedversions(orig, ui):
37 def allsupportedversions(orig, ui):
37 versions = orig(ui)
38 versions = orig(ui)
38 versions.add('03')
39 versions.add('03')
39 return versions
40 return versions
40
41
41 def bypasscheckhash(self, text):
42 def bypasscheckhash(self, text):
42 return False
43 return False
43
44
44 def readfromstore(self, text):
45 def readfromstore(self, text):
45 """Read filelog content from local blobstore transform for flagprocessor.
46 """Read filelog content from local blobstore transform for flagprocessor.
46
47
47 Default tranform for flagprocessor, returning contents from blobstore.
48 Default tranform for flagprocessor, returning contents from blobstore.
48 Returns a 2-typle (text, validatehash) where validatehash is True as the
49 Returns a 2-typle (text, validatehash) where validatehash is True as the
49 contents of the blobstore should be checked using checkhash.
50 contents of the blobstore should be checked using checkhash.
50 """
51 """
51 p = pointer.deserialize(text)
52 p = pointer.deserialize(text)
52 oid = p.oid()
53 oid = p.oid()
53 store = self.opener.lfslocalblobstore
54 store = self.opener.lfslocalblobstore
54 if not store.has(oid):
55 if not store.has(oid):
55 p.filename = getattr(self, 'indexfile', None)
56 p.filename = getattr(self, 'indexfile', None)
56 self.opener.lfsremoteblobstore.readbatch([p], store)
57 self.opener.lfsremoteblobstore.readbatch([p], store)
57
58
58 # The caller will validate the content
59 # The caller will validate the content
59 text = store.read(oid, verify=False)
60 text = store.read(oid, verify=False)
60
61
61 # pack hg filelog metadata
62 # pack hg filelog metadata
62 hgmeta = {}
63 hgmeta = {}
63 for k in p.keys():
64 for k in p.keys():
64 if k.startswith('x-hg-'):
65 if k.startswith('x-hg-'):
65 name = k[len('x-hg-'):]
66 name = k[len('x-hg-'):]
66 hgmeta[name] = p[k]
67 hgmeta[name] = p[k]
67 if hgmeta or text.startswith('\1\n'):
68 if hgmeta or text.startswith('\1\n'):
68 text = filelog.packmeta(hgmeta, text)
69 text = filelog.packmeta(hgmeta, text)
69
70
70 return (text, True)
71 return (text, True)
71
72
72 def writetostore(self, text):
73 def writetostore(self, text):
73 # hg filelog metadata (includes rename, etc)
74 # hg filelog metadata (includes rename, etc)
74 hgmeta, offset = filelog.parsemeta(text)
75 hgmeta, offset = filelog.parsemeta(text)
75 if offset and offset > 0:
76 if offset and offset > 0:
76 # lfs blob does not contain hg filelog metadata
77 # lfs blob does not contain hg filelog metadata
77 text = text[offset:]
78 text = text[offset:]
78
79
79 # git-lfs only supports sha256
80 # git-lfs only supports sha256
80 oid = hashlib.sha256(text).hexdigest()
81 oid = hashlib.sha256(text).hexdigest()
81 self.opener.lfslocalblobstore.write(oid, text, verify=False)
82 self.opener.lfslocalblobstore.write(oid, text, verify=False)
82
83
83 # replace contents with metadata
84 # replace contents with metadata
84 longoid = 'sha256:%s' % oid
85 longoid = 'sha256:%s' % oid
85 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
86 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
86
87
87 # by default, we expect the content to be binary. however, LFS could also
88 # by default, we expect the content to be binary. however, LFS could also
88 # be used for non-binary content. add a special entry for non-binary data.
89 # be used for non-binary content. add a special entry for non-binary data.
89 # this will be used by filectx.isbinary().
90 # this will be used by filectx.isbinary().
90 if not util.binary(text):
91 if not util.binary(text):
91 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
92 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
92 metadata['x-is-binary'] = '0'
93 metadata['x-is-binary'] = '0'
93
94
94 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
95 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
95 if hgmeta is not None:
96 if hgmeta is not None:
96 for k, v in hgmeta.iteritems():
97 for k, v in hgmeta.iteritems():
97 metadata['x-hg-%s' % k] = v
98 metadata['x-hg-%s' % k] = v
98
99
99 rawtext = metadata.serialize()
100 rawtext = metadata.serialize()
100 return (rawtext, False)
101 return (rawtext, False)
101
102
102 def _islfs(rlog, node=None, rev=None):
103 def _islfs(rlog, node=None, rev=None):
103 if rev is None:
104 if rev is None:
104 if node is None:
105 if node is None:
105 # both None - likely working copy content where node is not ready
106 # both None - likely working copy content where node is not ready
106 return False
107 return False
107 rev = rlog.rev(node)
108 rev = rlog.rev(node)
108 else:
109 else:
109 node = rlog.node(rev)
110 node = rlog.node(rev)
110 if node == nullid:
111 if node == nullid:
111 return False
112 return False
112 flags = rlog.flags(rev)
113 flags = rlog.flags(rev)
113 return bool(flags & revlog.REVIDX_EXTSTORED)
114 return bool(flags & revlog.REVIDX_EXTSTORED)
114
115
115 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
116 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
116 cachedelta=None, node=None,
117 cachedelta=None, node=None,
117 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
118 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
118 threshold = self.opener.options['lfsthreshold']
119 threshold = self.opener.options['lfsthreshold']
119 textlen = len(text)
120 textlen = len(text)
120 # exclude hg rename meta from file size
121 # exclude hg rename meta from file size
121 meta, offset = filelog.parsemeta(text)
122 meta, offset = filelog.parsemeta(text)
122 if offset:
123 if offset:
123 textlen -= offset
124 textlen -= offset
124
125
125 if threshold and textlen > threshold:
126 if threshold and textlen > threshold:
126 flags |= revlog.REVIDX_EXTSTORED
127 flags |= revlog.REVIDX_EXTSTORED
127
128
128 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
129 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
129 node=node, flags=flags, **kwds)
130 node=node, flags=flags, **kwds)
130
131
131 def filelogrenamed(orig, self, node):
132 def filelogrenamed(orig, self, node):
132 if _islfs(self, node):
133 if _islfs(self, node):
133 rawtext = self.revision(node, raw=True)
134 rawtext = self.revision(node, raw=True)
134 if not rawtext:
135 if not rawtext:
135 return False
136 return False
136 metadata = pointer.deserialize(rawtext)
137 metadata = pointer.deserialize(rawtext)
137 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
138 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
138 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
139 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
139 else:
140 else:
140 return False
141 return False
141 return orig(self, node)
142 return orig(self, node)
142
143
143 def filelogsize(orig, self, rev):
144 def filelogsize(orig, self, rev):
144 if _islfs(self, rev=rev):
145 if _islfs(self, rev=rev):
145 # fast path: use lfs metadata to answer size
146 # fast path: use lfs metadata to answer size
146 rawtext = self.revision(rev, raw=True)
147 rawtext = self.revision(rev, raw=True)
147 metadata = pointer.deserialize(rawtext)
148 metadata = pointer.deserialize(rawtext)
148 return int(metadata['size'])
149 return int(metadata['size'])
149 return orig(self, rev)
150 return orig(self, rev)
150
151
151 def filectxcmp(orig, self, fctx):
152 def filectxcmp(orig, self, fctx):
152 """returns True if text is different than fctx"""
153 """returns True if text is different than fctx"""
153 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
154 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
154 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
155 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
155 # fast path: check LFS oid
156 # fast path: check LFS oid
156 p1 = pointer.deserialize(self.rawdata())
157 p1 = pointer.deserialize(self.rawdata())
157 p2 = pointer.deserialize(fctx.rawdata())
158 p2 = pointer.deserialize(fctx.rawdata())
158 return p1.oid() != p2.oid()
159 return p1.oid() != p2.oid()
159 return orig(self, fctx)
160 return orig(self, fctx)
160
161
161 def filectxisbinary(orig, self):
162 def filectxisbinary(orig, self):
162 if self.islfs():
163 if self.islfs():
163 # fast path: use lfs metadata to answer isbinary
164 # fast path: use lfs metadata to answer isbinary
164 metadata = pointer.deserialize(self.rawdata())
165 metadata = pointer.deserialize(self.rawdata())
165 # if lfs metadata says nothing, assume it's binary by default
166 # if lfs metadata says nothing, assume it's binary by default
166 return bool(int(metadata.get('x-is-binary', 1)))
167 return bool(int(metadata.get('x-is-binary', 1)))
167 return orig(self)
168 return orig(self)
168
169
169 def filectxislfs(self):
170 def filectxislfs(self):
170 return _islfs(self.filelog(), self.filenode())
171 return _islfs(self.filelog(), self.filenode())
171
172
172 def convertsink(orig, sink):
173 def convertsink(orig, sink):
173 sink = orig(sink)
174 sink = orig(sink)
174 if sink.repotype == 'hg':
175 if sink.repotype == 'hg':
175 class lfssink(sink.__class__):
176 class lfssink(sink.__class__):
176 def putcommit(self, files, copies, parents, commit, source, revmap,
177 def putcommit(self, files, copies, parents, commit, source, revmap,
177 full, cleanp2):
178 full, cleanp2):
178 pc = super(lfssink, self).putcommit
179 pc = super(lfssink, self).putcommit
179 node = pc(files, copies, parents, commit, source, revmap, full,
180 node = pc(files, copies, parents, commit, source, revmap, full,
180 cleanp2)
181 cleanp2)
181
182
182 if 'lfs' not in self.repo.requirements:
183 if 'lfs' not in self.repo.requirements:
183 ctx = self.repo[node]
184 ctx = self.repo[node]
184
185
185 # The file list may contain removed files, so check for
186 # The file list may contain removed files, so check for
186 # membership before assuming it is in the context.
187 # membership before assuming it is in the context.
187 if any(f in ctx and ctx[f].islfs() for f, n in files):
188 if any(f in ctx and ctx[f].islfs() for f, n in files):
188 self.repo.requirements.add('lfs')
189 self.repo.requirements.add('lfs')
189 self.repo._writerequirements()
190 self.repo._writerequirements()
190
191
191 # Permanently enable lfs locally
192 # Permanently enable lfs locally
192 with self.repo.vfs('hgrc', 'a', text=True) as fp:
193 with self.repo.vfs('hgrc', 'a', text=True) as fp:
193 fp.write('\n[extensions]\nlfs=\n')
194 fp.write('\n[extensions]\nlfs=\n')
194
195
195 return node
196 return node
196
197
197 sink.__class__ = lfssink
198 sink.__class__ = lfssink
198
199
199 return sink
200 return sink
200
201
201 def vfsinit(orig, self, othervfs):
202 def vfsinit(orig, self, othervfs):
202 orig(self, othervfs)
203 orig(self, othervfs)
203 # copy lfs related options
204 # copy lfs related options
204 for k, v in othervfs.options.items():
205 for k, v in othervfs.options.items():
205 if k.startswith('lfs'):
206 if k.startswith('lfs'):
206 self.options[k] = v
207 self.options[k] = v
207 # also copy lfs blobstores. note: this can run before reposetup, so lfs
208 # also copy lfs blobstores. note: this can run before reposetup, so lfs
208 # blobstore attributes are not always ready at this time.
209 # blobstore attributes are not always ready at this time.
209 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
210 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
210 if util.safehasattr(othervfs, name):
211 if util.safehasattr(othervfs, name):
211 setattr(self, name, getattr(othervfs, name))
212 setattr(self, name, getattr(othervfs, name))
212
213
213 def hgclone(orig, ui, opts, *args, **kwargs):
214 def hgclone(orig, ui, opts, *args, **kwargs):
214 result = orig(ui, opts, *args, **kwargs)
215 result = orig(ui, opts, *args, **kwargs)
215
216
216 if result is not None:
217 if result is not None:
217 sourcerepo, destrepo = result
218 sourcerepo, destrepo = result
218 repo = destrepo.local()
219 repo = destrepo.local()
219
220
220 # When cloning to a remote repo (like through SSH), no repo is available
221 # When cloning to a remote repo (like through SSH), no repo is available
221 # from the peer. Therefore the hgrc can't be updated.
222 # from the peer. Therefore the hgrc can't be updated.
222 if not repo:
223 if not repo:
223 return result
224 return result
224
225
225 # If lfs is required for this repo, permanently enable it locally
226 # If lfs is required for this repo, permanently enable it locally
226 if 'lfs' in repo.requirements:
227 if 'lfs' in repo.requirements:
227 with repo.vfs('hgrc', 'a', text=True) as fp:
228 with repo.vfs('hgrc', 'a', text=True) as fp:
228 fp.write('\n[extensions]\nlfs=\n')
229 fp.write('\n[extensions]\nlfs=\n')
229
230
230 return result
231 return result
231
232
232 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
233 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
233 orig(sourcerepo, destrepo, bookmarks, defaultpath)
234 orig(sourcerepo, destrepo, bookmarks, defaultpath)
234
235
235 # If lfs is required for this repo, permanently enable it locally
236 # If lfs is required for this repo, permanently enable it locally
236 if 'lfs' in destrepo.requirements:
237 if 'lfs' in destrepo.requirements:
237 with destrepo.vfs('hgrc', 'a', text=True) as fp:
238 with destrepo.vfs('hgrc', 'a', text=True) as fp:
238 fp.write('\n[extensions]\nlfs=\n')
239 fp.write('\n[extensions]\nlfs=\n')
239
240
240 def _canskipupload(repo):
241 def _canskipupload(repo):
241 # if remotestore is a null store, upload is a no-op and can be skipped
242 # if remotestore is a null store, upload is a no-op and can be skipped
242 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
243 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
243
244
244 def candownload(repo):
245 def candownload(repo):
245 # if remotestore is a null store, downloads will lead to nothing
246 # if remotestore is a null store, downloads will lead to nothing
246 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
247 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
247
248
248 def uploadblobsfromrevs(repo, revs):
249 def uploadblobsfromrevs(repo, revs):
249 '''upload lfs blobs introduced by revs
250 '''upload lfs blobs introduced by revs
250
251
251 Note: also used by other extensions e. g. infinitepush. avoid renaming.
252 Note: also used by other extensions e. g. infinitepush. avoid renaming.
252 '''
253 '''
253 if _canskipupload(repo):
254 if _canskipupload(repo):
254 return
255 return
255 pointers = extractpointers(repo, revs)
256 pointers = extractpointers(repo, revs)
256 uploadblobs(repo, pointers)
257 uploadblobs(repo, pointers)
257
258
258 def prepush(pushop):
259 def prepush(pushop):
259 """Prepush hook.
260 """Prepush hook.
260
261
261 Read through the revisions to push, looking for filelog entries that can be
262 Read through the revisions to push, looking for filelog entries that can be
262 deserialized into metadata so that we can block the push on their upload to
263 deserialized into metadata so that we can block the push on their upload to
263 the remote blobstore.
264 the remote blobstore.
264 """
265 """
265 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
266 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
266
267
267 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
268 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
268 *args, **kwargs):
269 *args, **kwargs):
269 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
270 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
270 uploadblobsfromrevs(repo, outgoing.missing)
271 uploadblobsfromrevs(repo, outgoing.missing)
271 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
272 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
272 **kwargs)
273 **kwargs)
273
274
274 def extractpointers(repo, revs):
275 def extractpointers(repo, revs):
275 """return a list of lfs pointers added by given revs"""
276 """return a list of lfs pointers added by given revs"""
276 repo.ui.debug('lfs: computing set of blobs to upload\n')
277 repo.ui.debug('lfs: computing set of blobs to upload\n')
277 pointers = {}
278 pointers = {}
278 for r in revs:
279 for r in revs:
279 ctx = repo[r]
280 ctx = repo[r]
280 for p in pointersfromctx(ctx).values():
281 for p in pointersfromctx(ctx).values():
281 pointers[p.oid()] = p
282 pointers[p.oid()] = p
282 return sorted(pointers.values())
283 return sorted(pointers.values())
283
284
284 def pointersfromctx(ctx):
285 def pointersfromctx(ctx):
285 """return a dict {path: pointer} for given single changectx"""
286 """return a dict {path: pointer} for given single changectx"""
286 result = {}
287 result = {}
287 for f in ctx.files():
288 for f in ctx.files():
288 if f not in ctx:
289 if f not in ctx:
289 continue
290 continue
290 fctx = ctx[f]
291 fctx = ctx[f]
291 if not _islfs(fctx.filelog(), fctx.filenode()):
292 if not _islfs(fctx.filelog(), fctx.filenode()):
292 continue
293 continue
293 try:
294 try:
294 result[f] = pointer.deserialize(fctx.rawdata())
295 result[f] = pointer.deserialize(fctx.rawdata())
295 except pointer.InvalidPointer as ex:
296 except pointer.InvalidPointer as ex:
296 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
297 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
297 % (f, short(ctx.node()), ex))
298 % (f, short(ctx.node()), ex))
298 return result
299 return result
299
300
300 def uploadblobs(repo, pointers):
301 def uploadblobs(repo, pointers):
301 """upload given pointers from local blobstore"""
302 """upload given pointers from local blobstore"""
302 if not pointers:
303 if not pointers:
303 return
304 return
304
305
305 remoteblob = repo.svfs.lfsremoteblobstore
306 remoteblob = repo.svfs.lfsremoteblobstore
306 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
307 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
307
308
308 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
309 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
309 orig(ui, srcrepo, dstrepo, requirements)
310 orig(ui, srcrepo, dstrepo, requirements)
310
311
311 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
312 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
312 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
313 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
313
314
314 for dirpath, dirs, files in srclfsvfs.walk():
315 for dirpath, dirs, files in srclfsvfs.walk():
315 for oid in files:
316 for oid in files:
316 ui.write(_('copying lfs blob %s\n') % oid)
317 ui.write(_('copying lfs blob %s\n') % oid)
317 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
318 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
318
319
319 def upgraderequirements(orig, repo):
320 def upgraderequirements(orig, repo):
320 reqs = orig(repo)
321 reqs = orig(repo)
321 if 'lfs' in repo.requirements:
322 if 'lfs' in repo.requirements:
322 reqs.add('lfs')
323 reqs.add('lfs')
323 return reqs
324 return reqs
@@ -1,299 +1,279
1 #testcases lfsremote-on lfsremote-off
1 #testcases lfsremote-on lfsremote-off
2 #require serve
2 #require serve
3
3
4 This test splits `hg serve` with and without using the extension into separate
4 This test splits `hg serve` with and without using the extension into separate
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 indicates whether or not the extension is loaded. The "X" cases are not tested
7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 individually, because the lfs requirement causes the process to bail early if
8 individually, because the lfs requirement causes the process to bail early if
9 the extension is disabled.
9 the extension is disabled.
10
10
11 . Server
11 . Server
12 .
12 .
13 . No-LFS LFS
13 . No-LFS LFS
14 . +----------------------------+
14 . +----------------------------+
15 . | || D | E | D | E |
15 . | || D | E | D | E |
16 . |---++=======================|
16 . |---++=======================|
17 . C | D || N/A | #1 | X | #4 |
17 . C | D || N/A | #1 | X | #4 |
18 . l No +---++-----------------------|
18 . l No +---++-----------------------|
19 . i LFS | E || #2 | #2 | X | #5 |
19 . i LFS | E || #2 | #2 | X | #5 |
20 . e +---++-----------------------|
20 . e +---++-----------------------|
21 . n | D || X | X | X | X |
21 . n | D || X | X | X | X |
22 . t LFS |---++-----------------------|
22 . t LFS |---++-----------------------|
23 . | E || #3 | #3 | X | #6 |
23 . | E || #3 | #3 | X | #6 |
24 . |---++-----------------------+
24 . |---++-----------------------+
25
25
26 $ hg init server
26 $ hg init server
27 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
27 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
28
28
29 Skip the experimental.changegroup3=True config. Failure to agree on this comes
29 Skip the experimental.changegroup3=True config. Failure to agree on this comes
30 first, and causes a "ValueError: no common changegroup version" or "abort:
30 first, and causes a "ValueError: no common changegroup version" or "abort:
31 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
31 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
32 side. If that *is* enabled, the subsequent failure is "abort: missing processor
32 side. If that *is* enabled, the subsequent failure is "abort: missing processor
33 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
33 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
34 masked by the Internal Server Error message).
34 masked by the Internal Server Error message).
35 $ cat >> $HGRCPATH <<EOF
35 $ cat >> $HGRCPATH <<EOF
36 > [lfs]
36 > [lfs]
37 > url=file:$TESTTMP/dummy-remote/
37 > url=file:$TESTTMP/dummy-remote/
38 > threshold=10
38 > threshold=10
39 > [web]
39 > [web]
40 > allow_push=*
40 > allow_push=*
41 > push_ssl=False
41 > push_ssl=False
42 > EOF
42 > EOF
43
43
44 #if lfsremote-on
44 #if lfsremote-on
45 $ hg --config extensions.lfs= -R server \
45 $ hg --config extensions.lfs= -R server \
46 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
46 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
47 #else
47 #else
48 $ hg --config extensions.lfs=! -R server \
48 $ hg --config extensions.lfs=! -R server \
49 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
49 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
50 #endif
50 #endif
51
51
52 $ cat hg.pid >> $DAEMON_PIDS
52 $ cat hg.pid >> $DAEMON_PIDS
53 $ hg clone -q http://localhost:$HGPORT client
53 $ hg clone -q http://localhost:$HGPORT client
54 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
54 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
55 [1]
55 [1]
56
56
57 --------------------------------------------------------------------------------
57 --------------------------------------------------------------------------------
58 Case #1: client with non-lfs content and the extension disabled; server with
58 Case #1: client with non-lfs content and the extension disabled; server with
59 non-lfs content, and the extension enabled.
59 non-lfs content, and the extension enabled.
60
60
61 $ cd client
61 $ cd client
62 $ echo 'non-lfs' > nonlfs.txt
62 $ echo 'non-lfs' > nonlfs.txt
63 $ hg ci -Aqm 'non-lfs'
63 $ hg ci -Aqm 'non-lfs'
64 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
64 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
65 [1]
65 [1]
66
66
67 #if lfsremote-on
67 #if lfsremote-on
68
68
69 $ hg push -q
69 $ hg push -q
70 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
70 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
71 [1]
71 [1]
72
72
73 TODO: fail more gracefully, or don't mandate changegroup3 for non-lfs repos.
74
75 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
73 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
76 abort: HTTP Error 500: Internal Server Error
77 [255]
78 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
74 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
79 grep: $TESTTMP/client1_clone/.hg/requires: $ENOENT$
75 [1]
80 [2]
81
82 TODO: fail more gracefully, or don't mandate changegroup3 for non-lfs repos.
83
76
84 $ hg init $TESTTMP/client1_pull
77 $ hg init $TESTTMP/client1_pull
85 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
78 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
86 abort: HTTP Error 500: Internal Server Error
87 [255]
88 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
79 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
89 [1]
80 [1]
90
81
91 $ hg identify http://localhost:$HGPORT
82 $ hg identify http://localhost:$HGPORT
92 d437e1d24fbd
83 d437e1d24fbd
93
84
94 #endif
85 #endif
95
86
96 --------------------------------------------------------------------------------
87 --------------------------------------------------------------------------------
97 Case #2: client with non-lfs content and the extension enabled; server with
88 Case #2: client with non-lfs content and the extension enabled; server with
98 non-lfs content, and the extension state controlled by #testcases.
89 non-lfs content, and the extension state controlled by #testcases.
99
90
100 $ cat >> $HGRCPATH <<EOF
91 $ cat >> $HGRCPATH <<EOF
101 > [extensions]
92 > [extensions]
102 > lfs =
93 > lfs =
103 > EOF
94 > EOF
104 $ echo 'non-lfs' > nonlfs2.txt
95 $ echo 'non-lfs' > nonlfs2.txt
105 $ hg ci -Aqm 'non-lfs file with lfs client'
96 $ hg ci -Aqm 'non-lfs file with lfs client'
106
97
107 TODO: fail more gracefully here
98 Since no lfs content has been added yet, the push is allowed, even when the
108 $ hg push -q 2>&1 | grep '^[A-Z]' || true
99 extension is not enabled remotely.
109 Traceback (most recent call last): (lfsremote-off !)
100
110 ValueError: no common changegroup version (lfsremote-off !)
101 $ hg push -q
111 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
102 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
112 [1]
103 [1]
113
104
114 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
105 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
115 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
106 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
116 [1]
107 [1]
117
108
118 $ hg init $TESTTMP/client2_pull
109 $ hg init $TESTTMP/client2_pull
119 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
110 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
120 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
111 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
121 [1]
112 [1]
122
113
123 XXX: The difference here is the push failed above when the extension isn't
124 enabled on the server. The extension shouldn't need to mess with changegroup
125 versions if there is no lfs content. But the requirement needs to be
126 consistently added before that can be ratcheted back.
127 $ hg identify http://localhost:$HGPORT
114 $ hg identify http://localhost:$HGPORT
128 1477875038c6 (lfsremote-on !)
115 1477875038c6
129 000000000000 (lfsremote-off !)
130
116
131 --------------------------------------------------------------------------------
117 --------------------------------------------------------------------------------
132 Case #3: client with lfs content and the extension enabled; server with
118 Case #3: client with lfs content and the extension enabled; server with
133 non-lfs content, and the extension state controlled by #testcases. The server
119 non-lfs content, and the extension state controlled by #testcases. The server
134 should have an 'lfs' requirement after it picks up its first commit with a blob.
120 should have an 'lfs' requirement after it picks up its first commit with a blob.
135
121
136 $ echo 'this is a big lfs file' > lfs.bin
122 $ echo 'this is a big lfs file' > lfs.bin
137 $ hg ci -Aqm 'lfs'
123 $ hg ci -Aqm 'lfs'
138 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
124 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
139 .hg/requires:lfs
125 .hg/requires:lfs
140
126
141 TODO: fail more gracefully here
127 TODO: fail more gracefully here
142 $ hg push -q 2>&1 | grep '^[A-Z]' || true
128 $ hg push -q 2>&1 | grep '^[A-Z]' || true
143 Traceback (most recent call last): (lfsremote-off !)
129 Traceback (most recent call last): (lfsremote-off !)
144 ValueError: no common changegroup version (lfsremote-off !)
130 ValueError: no common changegroup version (lfsremote-off !)
145 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
131 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
146 .hg/requires:lfs
132 .hg/requires:lfs
147 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
133 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
148
134
149 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
135 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
150 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
136 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
151 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
137 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
152 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
138 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
153
139
154 $ hg init $TESTTMP/client3_pull
140 $ hg init $TESTTMP/client3_pull
155 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
141 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
156 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
142 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
157 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
143 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
158 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
144 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
159
145
160 XXX: The difference here is the push failed above when the extension isn't
146 The difference here is the push failed above when the extension isn't
161 enabled on the server. The extension shouldn't need to mess with changegroup
147 enabled on the server.
162 versions if there is no lfs content. But the requirement needs to be
163 consistently added before that can be ratcheted back.
164 $ hg identify http://localhost:$HGPORT
148 $ hg identify http://localhost:$HGPORT
165 8374dc4052cb (lfsremote-on !)
149 8374dc4052cb (lfsremote-on !)
166 000000000000 (lfsremote-off !)
150 1477875038c6 (lfsremote-off !)
167
151
168 Don't bother testing the lfsremote-off cases- the server won't be able
152 Don't bother testing the lfsremote-off cases- the server won't be able
169 to launch if there's lfs content and the extension is disabled.
153 to launch if there's lfs content and the extension is disabled.
170
154
171 #if lfsremote-on
155 #if lfsremote-on
172
156
173 --------------------------------------------------------------------------------
157 --------------------------------------------------------------------------------
174 Case #4: client with non-lfs content and the extension disabled; server with
158 Case #4: client with non-lfs content and the extension disabled; server with
175 lfs content, and the extension enabled.
159 lfs content, and the extension enabled.
176
160
177 $ cat >> $HGRCPATH <<EOF
161 $ cat >> $HGRCPATH <<EOF
178 > [extensions]
162 > [extensions]
179 > lfs = !
163 > lfs = !
180 > EOF
164 > EOF
181
165
182 $ hg init $TESTTMP/client4
166 $ hg init $TESTTMP/client4
183 $ cd $TESTTMP/client4
167 $ cd $TESTTMP/client4
184 $ cat >> .hg/hgrc <<EOF
168 $ cat >> .hg/hgrc <<EOF
185 > [paths]
169 > [paths]
186 > default = http://localhost:$HGPORT
170 > default = http://localhost:$HGPORT
187 > EOF
171 > EOF
188 $ echo 'non-lfs' > nonlfs2.txt
172 $ echo 'non-lfs' > nonlfs2.txt
189 $ hg ci -Aqm 'non-lfs'
173 $ hg ci -Aqm 'non-lfs'
190 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
174 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
191 $TESTTMP/server/.hg/requires:lfs
175 $TESTTMP/server/.hg/requires:lfs
192
176
193 $ hg push -q --force
177 $ hg push -q --force
194 warning: repository is unrelated
178 warning: repository is unrelated
195 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
179 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
196 $TESTTMP/server/.hg/requires:lfs
180 $TESTTMP/server/.hg/requires:lfs
197
181
198 TODO: fail more gracefully.
182 TODO: fail more gracefully.
199
183
200 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
184 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
201 abort: HTTP Error 500: Internal Server Error
185 abort: HTTP Error 500: Internal Server Error
202 [255]
186 [255]
203 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
187 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
204 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
188 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
205 $TESTTMP/server/.hg/requires:lfs
189 $TESTTMP/server/.hg/requires:lfs
206 [2]
190 [2]
207
191
208 TODO: fail more gracefully.
192 TODO: fail more gracefully.
209
193
210 $ hg init $TESTTMP/client4_pull
194 $ hg init $TESTTMP/client4_pull
211 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
195 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
212 abort: HTTP Error 500: Internal Server Error
196 abort: HTTP Error 500: Internal Server Error
213 [255]
197 [255]
214 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
198 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
215 $TESTTMP/server/.hg/requires:lfs
199 $TESTTMP/server/.hg/requires:lfs
216
200
217 $ hg identify http://localhost:$HGPORT
201 $ hg identify http://localhost:$HGPORT
218 03b080fa9d93
202 03b080fa9d93
219
203
220 --------------------------------------------------------------------------------
204 --------------------------------------------------------------------------------
221 Case #5: client with non-lfs content and the extension enabled; server with
205 Case #5: client with non-lfs content and the extension enabled; server with
222 lfs content, and the extension enabled.
206 lfs content, and the extension enabled.
223
207
224 $ cat >> $HGRCPATH <<EOF
208 $ cat >> $HGRCPATH <<EOF
225 > [extensions]
209 > [extensions]
226 > lfs =
210 > lfs =
227 > EOF
211 > EOF
228 $ echo 'non-lfs' > nonlfs3.txt
212 $ echo 'non-lfs' > nonlfs3.txt
229 $ hg ci -Aqm 'non-lfs file with lfs client'
213 $ hg ci -Aqm 'non-lfs file with lfs client'
230
214
231 $ hg push -q
215 $ hg push -q
232 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
216 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
233 $TESTTMP/server/.hg/requires:lfs
217 $TESTTMP/server/.hg/requires:lfs
234
218
235 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
219 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
236 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
220 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
237 $TESTTMP/client5_clone/.hg/requires:lfs
221 $TESTTMP/client5_clone/.hg/requires:lfs
238 $TESTTMP/server/.hg/requires:lfs
222 $TESTTMP/server/.hg/requires:lfs
239
223
240 $ hg init $TESTTMP/client5_pull
224 $ hg init $TESTTMP/client5_pull
241 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
225 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
242 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
226 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
243 $TESTTMP/client5_pull/.hg/requires:lfs
227 $TESTTMP/client5_pull/.hg/requires:lfs
244 $TESTTMP/server/.hg/requires:lfs
228 $TESTTMP/server/.hg/requires:lfs
245
229
246 $ hg identify http://localhost:$HGPORT
230 $ hg identify http://localhost:$HGPORT
247 c729025cc5e3
231 c729025cc5e3
248
232
249 --------------------------------------------------------------------------------
233 --------------------------------------------------------------------------------
250 Case #6: client with lfs content and the extension enabled; server with
234 Case #6: client with lfs content and the extension enabled; server with
251 lfs content, and the extension enabled.
235 lfs content, and the extension enabled.
252
236
253 $ echo 'this is another lfs file' > lfs2.txt
237 $ echo 'this is another lfs file' > lfs2.txt
254 $ hg ci -Aqm 'lfs file with lfs client'
238 $ hg ci -Aqm 'lfs file with lfs client'
255
239
256 $ hg push -q
240 $ hg push -q
257 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
241 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
258 .hg/requires:lfs
242 .hg/requires:lfs
259 $TESTTMP/server/.hg/requires:lfs
243 $TESTTMP/server/.hg/requires:lfs
260
244
261 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
245 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
262 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
246 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
263 $TESTTMP/client6_clone/.hg/requires:lfs
247 $TESTTMP/client6_clone/.hg/requires:lfs
264 $TESTTMP/server/.hg/requires:lfs
248 $TESTTMP/server/.hg/requires:lfs
265
249
266 $ hg init $TESTTMP/client6_pull
250 $ hg init $TESTTMP/client6_pull
267 $ hg -R $TESTTMP/client6_pull pull -q http://localhost:$HGPORT
251 $ hg -R $TESTTMP/client6_pull pull -q http://localhost:$HGPORT
268 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
252 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
269 $TESTTMP/client6_pull/.hg/requires:lfs
253 $TESTTMP/client6_pull/.hg/requires:lfs
270 $TESTTMP/server/.hg/requires:lfs
254 $TESTTMP/server/.hg/requires:lfs
271
255
272 $ hg identify http://localhost:$HGPORT
256 $ hg identify http://localhost:$HGPORT
273 d3b84d50eacb
257 d3b84d50eacb
274
258
275 --------------------------------------------------------------------------------
259 --------------------------------------------------------------------------------
276 Misc: process dies early if a requirement exists and the extension is disabled
260 Misc: process dies early if a requirement exists and the extension is disabled
277
261
278 $ hg --config extensions.lfs=! summary
262 $ hg --config extensions.lfs=! summary
279 abort: repository requires features unknown to this Mercurial: lfs!
263 abort: repository requires features unknown to this Mercurial: lfs!
280 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
264 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
281 [255]
265 [255]
282
266
283 #endif
267 #endif
284
268
285 $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
269 $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
286
270
287 #if lfsremote-on
271 #if lfsremote-on
288 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
272 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
289 Traceback (most recent call last):
273 Traceback (most recent call last):
290 ValueError: no common changegroup version
274 ValueError: no common changegroup version
291 Traceback (most recent call last):
275 Traceback (most recent call last):
292 ValueError: no common changegroup version
276 ValueError: no common changegroup version
293 Traceback (most recent call last):
294 ValueError: no common changegroup version
295 Traceback (most recent call last):
296 ValueError: no common changegroup version
297 #else
277 #else
298 $ cat $TESTTMP/errors.log
278 $ cat $TESTTMP/errors.log
299 #endif
279 #endif
General Comments 0
You need to be logged in to leave comments. Login now