##// END OF EJS Templates
lfs: allow non-lfs exchanges when the extension is only enabled on one side...
Matt Harbison -
r35521:2526579a default
parent child Browse files
Show More
@@ -1,323 +1,324
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import bin, nullid, short
14 14
15 15 from mercurial import (
16 16 error,
17 17 filelog,
18 18 revlog,
19 19 util,
20 20 )
21 21
22 22 from ..largefiles import lfutil
23 23
24 24 from . import (
25 25 blobstore,
26 26 pointer,
27 27 )
28 28
29 29 def supportedoutgoingversions(orig, repo):
30 30 versions = orig(repo)
31 versions.discard('01')
32 versions.discard('02')
31 if 'lfs' in repo.requirements:
32 versions.discard('01')
33 versions.discard('02')
33 34 versions.add('03')
34 35 return versions
35 36
36 37 def allsupportedversions(orig, ui):
37 38 versions = orig(ui)
38 39 versions.add('03')
39 40 return versions
40 41
41 42 def bypasscheckhash(self, text):
42 43 return False
43 44
44 45 def readfromstore(self, text):
45 46 """Read filelog content from local blobstore transform for flagprocessor.
46 47
47 48 Default tranform for flagprocessor, returning contents from blobstore.
48 49 Returns a 2-typle (text, validatehash) where validatehash is True as the
49 50 contents of the blobstore should be checked using checkhash.
50 51 """
51 52 p = pointer.deserialize(text)
52 53 oid = p.oid()
53 54 store = self.opener.lfslocalblobstore
54 55 if not store.has(oid):
55 56 p.filename = getattr(self, 'indexfile', None)
56 57 self.opener.lfsremoteblobstore.readbatch([p], store)
57 58
58 59 # The caller will validate the content
59 60 text = store.read(oid, verify=False)
60 61
61 62 # pack hg filelog metadata
62 63 hgmeta = {}
63 64 for k in p.keys():
64 65 if k.startswith('x-hg-'):
65 66 name = k[len('x-hg-'):]
66 67 hgmeta[name] = p[k]
67 68 if hgmeta or text.startswith('\1\n'):
68 69 text = filelog.packmeta(hgmeta, text)
69 70
70 71 return (text, True)
71 72
72 73 def writetostore(self, text):
73 74 # hg filelog metadata (includes rename, etc)
74 75 hgmeta, offset = filelog.parsemeta(text)
75 76 if offset and offset > 0:
76 77 # lfs blob does not contain hg filelog metadata
77 78 text = text[offset:]
78 79
79 80 # git-lfs only supports sha256
80 81 oid = hashlib.sha256(text).hexdigest()
81 82 self.opener.lfslocalblobstore.write(oid, text, verify=False)
82 83
83 84 # replace contents with metadata
84 85 longoid = 'sha256:%s' % oid
85 86 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
86 87
87 88 # by default, we expect the content to be binary. however, LFS could also
88 89 # be used for non-binary content. add a special entry for non-binary data.
89 90 # this will be used by filectx.isbinary().
90 91 if not util.binary(text):
91 92 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
92 93 metadata['x-is-binary'] = '0'
93 94
94 95 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
95 96 if hgmeta is not None:
96 97 for k, v in hgmeta.iteritems():
97 98 metadata['x-hg-%s' % k] = v
98 99
99 100 rawtext = metadata.serialize()
100 101 return (rawtext, False)
101 102
102 103 def _islfs(rlog, node=None, rev=None):
103 104 if rev is None:
104 105 if node is None:
105 106 # both None - likely working copy content where node is not ready
106 107 return False
107 108 rev = rlog.rev(node)
108 109 else:
109 110 node = rlog.node(rev)
110 111 if node == nullid:
111 112 return False
112 113 flags = rlog.flags(rev)
113 114 return bool(flags & revlog.REVIDX_EXTSTORED)
114 115
115 116 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
116 117 cachedelta=None, node=None,
117 118 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
118 119 threshold = self.opener.options['lfsthreshold']
119 120 textlen = len(text)
120 121 # exclude hg rename meta from file size
121 122 meta, offset = filelog.parsemeta(text)
122 123 if offset:
123 124 textlen -= offset
124 125
125 126 if threshold and textlen > threshold:
126 127 flags |= revlog.REVIDX_EXTSTORED
127 128
128 129 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
129 130 node=node, flags=flags, **kwds)
130 131
131 132 def filelogrenamed(orig, self, node):
132 133 if _islfs(self, node):
133 134 rawtext = self.revision(node, raw=True)
134 135 if not rawtext:
135 136 return False
136 137 metadata = pointer.deserialize(rawtext)
137 138 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
138 139 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
139 140 else:
140 141 return False
141 142 return orig(self, node)
142 143
143 144 def filelogsize(orig, self, rev):
144 145 if _islfs(self, rev=rev):
145 146 # fast path: use lfs metadata to answer size
146 147 rawtext = self.revision(rev, raw=True)
147 148 metadata = pointer.deserialize(rawtext)
148 149 return int(metadata['size'])
149 150 return orig(self, rev)
150 151
151 152 def filectxcmp(orig, self, fctx):
152 153 """returns True if text is different than fctx"""
153 154 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
154 155 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
155 156 # fast path: check LFS oid
156 157 p1 = pointer.deserialize(self.rawdata())
157 158 p2 = pointer.deserialize(fctx.rawdata())
158 159 return p1.oid() != p2.oid()
159 160 return orig(self, fctx)
160 161
161 162 def filectxisbinary(orig, self):
162 163 if self.islfs():
163 164 # fast path: use lfs metadata to answer isbinary
164 165 metadata = pointer.deserialize(self.rawdata())
165 166 # if lfs metadata says nothing, assume it's binary by default
166 167 return bool(int(metadata.get('x-is-binary', 1)))
167 168 return orig(self)
168 169
169 170 def filectxislfs(self):
170 171 return _islfs(self.filelog(), self.filenode())
171 172
172 173 def convertsink(orig, sink):
173 174 sink = orig(sink)
174 175 if sink.repotype == 'hg':
175 176 class lfssink(sink.__class__):
176 177 def putcommit(self, files, copies, parents, commit, source, revmap,
177 178 full, cleanp2):
178 179 pc = super(lfssink, self).putcommit
179 180 node = pc(files, copies, parents, commit, source, revmap, full,
180 181 cleanp2)
181 182
182 183 if 'lfs' not in self.repo.requirements:
183 184 ctx = self.repo[node]
184 185
185 186 # The file list may contain removed files, so check for
186 187 # membership before assuming it is in the context.
187 188 if any(f in ctx and ctx[f].islfs() for f, n in files):
188 189 self.repo.requirements.add('lfs')
189 190 self.repo._writerequirements()
190 191
191 192 # Permanently enable lfs locally
192 193 with self.repo.vfs('hgrc', 'a', text=True) as fp:
193 194 fp.write('\n[extensions]\nlfs=\n')
194 195
195 196 return node
196 197
197 198 sink.__class__ = lfssink
198 199
199 200 return sink
200 201
201 202 def vfsinit(orig, self, othervfs):
202 203 orig(self, othervfs)
203 204 # copy lfs related options
204 205 for k, v in othervfs.options.items():
205 206 if k.startswith('lfs'):
206 207 self.options[k] = v
207 208 # also copy lfs blobstores. note: this can run before reposetup, so lfs
208 209 # blobstore attributes are not always ready at this time.
209 210 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
210 211 if util.safehasattr(othervfs, name):
211 212 setattr(self, name, getattr(othervfs, name))
212 213
213 214 def hgclone(orig, ui, opts, *args, **kwargs):
214 215 result = orig(ui, opts, *args, **kwargs)
215 216
216 217 if result is not None:
217 218 sourcerepo, destrepo = result
218 219 repo = destrepo.local()
219 220
220 221 # When cloning to a remote repo (like through SSH), no repo is available
221 222 # from the peer. Therefore the hgrc can't be updated.
222 223 if not repo:
223 224 return result
224 225
225 226 # If lfs is required for this repo, permanently enable it locally
226 227 if 'lfs' in repo.requirements:
227 228 with repo.vfs('hgrc', 'a', text=True) as fp:
228 229 fp.write('\n[extensions]\nlfs=\n')
229 230
230 231 return result
231 232
232 233 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
233 234 orig(sourcerepo, destrepo, bookmarks, defaultpath)
234 235
235 236 # If lfs is required for this repo, permanently enable it locally
236 237 if 'lfs' in destrepo.requirements:
237 238 with destrepo.vfs('hgrc', 'a', text=True) as fp:
238 239 fp.write('\n[extensions]\nlfs=\n')
239 240
240 241 def _canskipupload(repo):
241 242 # if remotestore is a null store, upload is a no-op and can be skipped
242 243 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
243 244
244 245 def candownload(repo):
245 246 # if remotestore is a null store, downloads will lead to nothing
246 247 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
247 248
248 249 def uploadblobsfromrevs(repo, revs):
249 250 '''upload lfs blobs introduced by revs
250 251
251 252 Note: also used by other extensions e. g. infinitepush. avoid renaming.
252 253 '''
253 254 if _canskipupload(repo):
254 255 return
255 256 pointers = extractpointers(repo, revs)
256 257 uploadblobs(repo, pointers)
257 258
258 259 def prepush(pushop):
259 260 """Prepush hook.
260 261
261 262 Read through the revisions to push, looking for filelog entries that can be
262 263 deserialized into metadata so that we can block the push on their upload to
263 264 the remote blobstore.
264 265 """
265 266 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
266 267
267 268 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
268 269 *args, **kwargs):
269 270 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
270 271 uploadblobsfromrevs(repo, outgoing.missing)
271 272 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
272 273 **kwargs)
273 274
274 275 def extractpointers(repo, revs):
275 276 """return a list of lfs pointers added by given revs"""
276 277 repo.ui.debug('lfs: computing set of blobs to upload\n')
277 278 pointers = {}
278 279 for r in revs:
279 280 ctx = repo[r]
280 281 for p in pointersfromctx(ctx).values():
281 282 pointers[p.oid()] = p
282 283 return sorted(pointers.values())
283 284
284 285 def pointersfromctx(ctx):
285 286 """return a dict {path: pointer} for given single changectx"""
286 287 result = {}
287 288 for f in ctx.files():
288 289 if f not in ctx:
289 290 continue
290 291 fctx = ctx[f]
291 292 if not _islfs(fctx.filelog(), fctx.filenode()):
292 293 continue
293 294 try:
294 295 result[f] = pointer.deserialize(fctx.rawdata())
295 296 except pointer.InvalidPointer as ex:
296 297 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
297 298 % (f, short(ctx.node()), ex))
298 299 return result
299 300
300 301 def uploadblobs(repo, pointers):
301 302 """upload given pointers from local blobstore"""
302 303 if not pointers:
303 304 return
304 305
305 306 remoteblob = repo.svfs.lfsremoteblobstore
306 307 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
307 308
308 309 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
309 310 orig(ui, srcrepo, dstrepo, requirements)
310 311
311 312 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
312 313 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
313 314
314 315 for dirpath, dirs, files in srclfsvfs.walk():
315 316 for oid in files:
316 317 ui.write(_('copying lfs blob %s\n') % oid)
317 318 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
318 319
319 320 def upgraderequirements(orig, repo):
320 321 reqs = orig(repo)
321 322 if 'lfs' in repo.requirements:
322 323 reqs.add('lfs')
323 324 return reqs
@@ -1,299 +1,279
1 1 #testcases lfsremote-on lfsremote-off
2 2 #require serve
3 3
4 4 This test splits `hg serve` with and without using the extension into separate
5 5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 8 individually, because the lfs requirement causes the process to bail early if
9 9 the extension is disabled.
10 10
11 11 . Server
12 12 .
13 13 . No-LFS LFS
14 14 . +----------------------------+
15 15 . | || D | E | D | E |
16 16 . |---++=======================|
17 17 . C | D || N/A | #1 | X | #4 |
18 18 . l No +---++-----------------------|
19 19 . i LFS | E || #2 | #2 | X | #5 |
20 20 . e +---++-----------------------|
21 21 . n | D || X | X | X | X |
22 22 . t LFS |---++-----------------------|
23 23 . | E || #3 | #3 | X | #6 |
24 24 . |---++-----------------------+
25 25
26 26 $ hg init server
27 27 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
28 28
29 29 Skip the experimental.changegroup3=True config. Failure to agree on this comes
30 30 first, and causes a "ValueError: no common changegroup version" or "abort:
31 31 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
32 32 side. If that *is* enabled, the subsequent failure is "abort: missing processor
33 33 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
34 34 masked by the Internal Server Error message).
35 35 $ cat >> $HGRCPATH <<EOF
36 36 > [lfs]
37 37 > url=file:$TESTTMP/dummy-remote/
38 38 > threshold=10
39 39 > [web]
40 40 > allow_push=*
41 41 > push_ssl=False
42 42 > EOF
43 43
44 44 #if lfsremote-on
45 45 $ hg --config extensions.lfs= -R server \
46 46 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
47 47 #else
48 48 $ hg --config extensions.lfs=! -R server \
49 49 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
50 50 #endif
51 51
52 52 $ cat hg.pid >> $DAEMON_PIDS
53 53 $ hg clone -q http://localhost:$HGPORT client
54 54 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
55 55 [1]
56 56
57 57 --------------------------------------------------------------------------------
58 58 Case #1: client with non-lfs content and the extension disabled; server with
59 59 non-lfs content, and the extension enabled.
60 60
61 61 $ cd client
62 62 $ echo 'non-lfs' > nonlfs.txt
63 63 $ hg ci -Aqm 'non-lfs'
64 64 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
65 65 [1]
66 66
67 67 #if lfsremote-on
68 68
69 69 $ hg push -q
70 70 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
71 71 [1]
72 72
73 TODO: fail more gracefully, or don't mandate changegroup3 for non-lfs repos.
74
75 73 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
76 abort: HTTP Error 500: Internal Server Error
77 [255]
78 74 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
79 grep: $TESTTMP/client1_clone/.hg/requires: $ENOENT$
80 [2]
81
82 TODO: fail more gracefully, or don't mandate changegroup3 for non-lfs repos.
75 [1]
83 76
84 77 $ hg init $TESTTMP/client1_pull
85 78 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
86 abort: HTTP Error 500: Internal Server Error
87 [255]
88 79 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
89 80 [1]
90 81
91 82 $ hg identify http://localhost:$HGPORT
92 83 d437e1d24fbd
93 84
94 85 #endif
95 86
96 87 --------------------------------------------------------------------------------
97 88 Case #2: client with non-lfs content and the extension enabled; server with
98 89 non-lfs content, and the extension state controlled by #testcases.
99 90
100 91 $ cat >> $HGRCPATH <<EOF
101 92 > [extensions]
102 93 > lfs =
103 94 > EOF
104 95 $ echo 'non-lfs' > nonlfs2.txt
105 96 $ hg ci -Aqm 'non-lfs file with lfs client'
106 97
107 TODO: fail more gracefully here
108 $ hg push -q 2>&1 | grep '^[A-Z]' || true
109 Traceback (most recent call last): (lfsremote-off !)
110 ValueError: no common changegroup version (lfsremote-off !)
98 Since no lfs content has been added yet, the push is allowed, even when the
99 extension is not enabled remotely.
100
101 $ hg push -q
111 102 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
112 103 [1]
113 104
114 105 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
115 106 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
116 107 [1]
117 108
118 109 $ hg init $TESTTMP/client2_pull
119 110 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
120 111 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
121 112 [1]
122 113
123 XXX: The difference here is the push failed above when the extension isn't
124 enabled on the server. The extension shouldn't need to mess with changegroup
125 versions if there is no lfs content. But the requirement needs to be
126 consistently added before that can be ratcheted back.
127 114 $ hg identify http://localhost:$HGPORT
128 1477875038c6 (lfsremote-on !)
129 000000000000 (lfsremote-off !)
115 1477875038c6
130 116
131 117 --------------------------------------------------------------------------------
132 118 Case #3: client with lfs content and the extension enabled; server with
133 119 non-lfs content, and the extension state controlled by #testcases. The server
134 120 should have an 'lfs' requirement after it picks up its first commit with a blob.
135 121
136 122 $ echo 'this is a big lfs file' > lfs.bin
137 123 $ hg ci -Aqm 'lfs'
138 124 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
139 125 .hg/requires:lfs
140 126
141 127 TODO: fail more gracefully here
142 128 $ hg push -q 2>&1 | grep '^[A-Z]' || true
143 129 Traceback (most recent call last): (lfsremote-off !)
144 130 ValueError: no common changegroup version (lfsremote-off !)
145 131 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
146 132 .hg/requires:lfs
147 133 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
148 134
149 135 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
150 136 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
151 137 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
152 138 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
153 139
154 140 $ hg init $TESTTMP/client3_pull
155 141 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
156 142 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
157 143 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
158 144 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
159 145
160 XXX: The difference here is the push failed above when the extension isn't
161 enabled on the server. The extension shouldn't need to mess with changegroup
162 versions if there is no lfs content. But the requirement needs to be
163 consistently added before that can be ratcheted back.
146 The difference here is the push failed above when the extension isn't
147 enabled on the server.
164 148 $ hg identify http://localhost:$HGPORT
165 149 8374dc4052cb (lfsremote-on !)
166 000000000000 (lfsremote-off !)
150 1477875038c6 (lfsremote-off !)
167 151
168 152 Don't bother testing the lfsremote-off cases- the server won't be able
169 153 to launch if there's lfs content and the extension is disabled.
170 154
171 155 #if lfsremote-on
172 156
173 157 --------------------------------------------------------------------------------
174 158 Case #4: client with non-lfs content and the extension disabled; server with
175 159 lfs content, and the extension enabled.
176 160
177 161 $ cat >> $HGRCPATH <<EOF
178 162 > [extensions]
179 163 > lfs = !
180 164 > EOF
181 165
182 166 $ hg init $TESTTMP/client4
183 167 $ cd $TESTTMP/client4
184 168 $ cat >> .hg/hgrc <<EOF
185 169 > [paths]
186 170 > default = http://localhost:$HGPORT
187 171 > EOF
188 172 $ echo 'non-lfs' > nonlfs2.txt
189 173 $ hg ci -Aqm 'non-lfs'
190 174 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
191 175 $TESTTMP/server/.hg/requires:lfs
192 176
193 177 $ hg push -q --force
194 178 warning: repository is unrelated
195 179 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
196 180 $TESTTMP/server/.hg/requires:lfs
197 181
198 182 TODO: fail more gracefully.
199 183
200 184 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
201 185 abort: HTTP Error 500: Internal Server Error
202 186 [255]
203 187 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
204 188 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
205 189 $TESTTMP/server/.hg/requires:lfs
206 190 [2]
207 191
208 192 TODO: fail more gracefully.
209 193
210 194 $ hg init $TESTTMP/client4_pull
211 195 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
212 196 abort: HTTP Error 500: Internal Server Error
213 197 [255]
214 198 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
215 199 $TESTTMP/server/.hg/requires:lfs
216 200
217 201 $ hg identify http://localhost:$HGPORT
218 202 03b080fa9d93
219 203
220 204 --------------------------------------------------------------------------------
221 205 Case #5: client with non-lfs content and the extension enabled; server with
222 206 lfs content, and the extension enabled.
223 207
224 208 $ cat >> $HGRCPATH <<EOF
225 209 > [extensions]
226 210 > lfs =
227 211 > EOF
228 212 $ echo 'non-lfs' > nonlfs3.txt
229 213 $ hg ci -Aqm 'non-lfs file with lfs client'
230 214
231 215 $ hg push -q
232 216 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
233 217 $TESTTMP/server/.hg/requires:lfs
234 218
235 219 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
236 220 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
237 221 $TESTTMP/client5_clone/.hg/requires:lfs
238 222 $TESTTMP/server/.hg/requires:lfs
239 223
240 224 $ hg init $TESTTMP/client5_pull
241 225 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
242 226 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
243 227 $TESTTMP/client5_pull/.hg/requires:lfs
244 228 $TESTTMP/server/.hg/requires:lfs
245 229
246 230 $ hg identify http://localhost:$HGPORT
247 231 c729025cc5e3
248 232
249 233 --------------------------------------------------------------------------------
250 234 Case #6: client with lfs content and the extension enabled; server with
251 235 lfs content, and the extension enabled.
252 236
253 237 $ echo 'this is another lfs file' > lfs2.txt
254 238 $ hg ci -Aqm 'lfs file with lfs client'
255 239
256 240 $ hg push -q
257 241 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
258 242 .hg/requires:lfs
259 243 $TESTTMP/server/.hg/requires:lfs
260 244
261 245 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
262 246 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
263 247 $TESTTMP/client6_clone/.hg/requires:lfs
264 248 $TESTTMP/server/.hg/requires:lfs
265 249
266 250 $ hg init $TESTTMP/client6_pull
267 251 $ hg -R $TESTTMP/client6_pull pull -q http://localhost:$HGPORT
268 252 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
269 253 $TESTTMP/client6_pull/.hg/requires:lfs
270 254 $TESTTMP/server/.hg/requires:lfs
271 255
272 256 $ hg identify http://localhost:$HGPORT
273 257 d3b84d50eacb
274 258
275 259 --------------------------------------------------------------------------------
276 260 Misc: process dies early if a requirement exists and the extension is disabled
277 261
278 262 $ hg --config extensions.lfs=! summary
279 263 abort: repository requires features unknown to this Mercurial: lfs!
280 264 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
281 265 [255]
282 266
283 267 #endif
284 268
285 269 $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
286 270
287 271 #if lfsremote-on
288 272 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
289 273 Traceback (most recent call last):
290 274 ValueError: no common changegroup version
291 275 Traceback (most recent call last):
292 276 ValueError: no common changegroup version
293 Traceback (most recent call last):
294 ValueError: no common changegroup version
295 Traceback (most recent call last):
296 ValueError: no common changegroup version
297 277 #else
298 278 $ cat $TESTTMP/errors.log
299 279 #endif
General Comments 0
You need to be logged in to leave comments. Login now