##// END OF EJS Templates
lfs: show a friendly message when pushing lfs to a server without lfs enabled...
Matt Harbison -
r35522:fa865878 default
parent child Browse files
Show More
@@ -1,214 +1,218 b''
1 # lfs - hash-preserving large file support using Git-LFS protocol
1 # lfs - hash-preserving large file support using Git-LFS protocol
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """lfs - large file support (EXPERIMENTAL)
8 """lfs - large file support (EXPERIMENTAL)
9
9
10 Configs::
10 Configs::
11
11
12 [lfs]
12 [lfs]
13 # Remote endpoint. Multiple protocols are supported:
13 # Remote endpoint. Multiple protocols are supported:
14 # - http(s)://user:pass@example.com/path
14 # - http(s)://user:pass@example.com/path
15 # git-lfs endpoint
15 # git-lfs endpoint
16 # - file:///tmp/path
16 # - file:///tmp/path
17 # local filesystem, usually for testing
17 # local filesystem, usually for testing
18 # if unset, lfs will prompt setting this when it must use this value.
18 # if unset, lfs will prompt setting this when it must use this value.
19 # (default: unset)
19 # (default: unset)
20 url = https://example.com/lfs
20 url = https://example.com/lfs
21
21
22 # size of a file to make it use LFS
22 # size of a file to make it use LFS
23 threshold = 10M
23 threshold = 10M
24
24
25 # how many times to retry before giving up on transferring an object
25 # how many times to retry before giving up on transferring an object
26 retry = 5
26 retry = 5
27
27
28 # the local directory to store lfs files for sharing across local clones.
28 # the local directory to store lfs files for sharing across local clones.
29 # If not set, the cache is located in an OS specific cache location.
29 # If not set, the cache is located in an OS specific cache location.
30 usercache = /path/to/global/cache
30 usercache = /path/to/global/cache
31 """
31 """
32
32
33 from __future__ import absolute_import
33 from __future__ import absolute_import
34
34
35 from mercurial.i18n import _
35 from mercurial.i18n import _
36
36
37 from mercurial import (
37 from mercurial import (
38 bundle2,
38 bundle2,
39 changegroup,
39 changegroup,
40 context,
40 context,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hg,
44 hg,
45 localrepo,
45 localrepo,
46 node,
46 node,
47 registrar,
47 registrar,
48 revlog,
48 revlog,
49 scmutil,
49 scmutil,
50 upgrade,
50 upgrade,
51 vfs as vfsmod,
51 vfs as vfsmod,
52 wireproto,
52 )
53 )
53
54
54 from . import (
55 from . import (
55 blobstore,
56 blobstore,
56 wrapper,
57 wrapper,
57 )
58 )
58
59
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # leave the attribute unspecified.
63 # leave the attribute unspecified.
63 testedwith = 'ships-with-hg-core'
64 testedwith = 'ships-with-hg-core'
64
65
65 configtable = {}
66 configtable = {}
66 configitem = registrar.configitem(configtable)
67 configitem = registrar.configitem(configtable)
67
68
68 configitem('experimental', 'lfs.user-agent',
69 configitem('experimental', 'lfs.user-agent',
69 default=None,
70 default=None,
70 )
71 )
71
72
72 configitem('lfs', 'url',
73 configitem('lfs', 'url',
73 default=configitem.dynamicdefault,
74 default=configitem.dynamicdefault,
74 )
75 )
75 configitem('lfs', 'usercache',
76 configitem('lfs', 'usercache',
76 default=None,
77 default=None,
77 )
78 )
78 configitem('lfs', 'threshold',
79 configitem('lfs', 'threshold',
79 default=None,
80 default=None,
80 )
81 )
81 configitem('lfs', 'retry',
82 configitem('lfs', 'retry',
82 default=5,
83 default=5,
83 )
84 )
84 # Deprecated
85 # Deprecated
85 configitem('lfs', 'remotestore',
86 configitem('lfs', 'remotestore',
86 default=None,
87 default=None,
87 )
88 )
88 # Deprecated
89 # Deprecated
89 configitem('lfs', 'dummy',
90 configitem('lfs', 'dummy',
90 default=None,
91 default=None,
91 )
92 )
92 # Deprecated
93 # Deprecated
93 configitem('lfs', 'git-lfs',
94 configitem('lfs', 'git-lfs',
94 default=None,
95 default=None,
95 )
96 )
96
97
97 cmdtable = {}
98 cmdtable = {}
98 command = registrar.command(cmdtable)
99 command = registrar.command(cmdtable)
99
100
100 templatekeyword = registrar.templatekeyword()
101 templatekeyword = registrar.templatekeyword()
101
102
102 def featuresetup(ui, supported):
103 def featuresetup(ui, supported):
103 # don't die on seeing a repo with the lfs requirement
104 # don't die on seeing a repo with the lfs requirement
104 supported |= {'lfs'}
105 supported |= {'lfs'}
105
106
106 def uisetup(ui):
107 def uisetup(ui):
107 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
108 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
108
109
109 def reposetup(ui, repo):
110 def reposetup(ui, repo):
110 # Nothing to do with a remote repo
111 # Nothing to do with a remote repo
111 if not repo.local():
112 if not repo.local():
112 return
113 return
113
114
114 threshold = repo.ui.configbytes('lfs', 'threshold')
115 threshold = repo.ui.configbytes('lfs', 'threshold')
115
116
116 repo.svfs.options['lfsthreshold'] = threshold
117 repo.svfs.options['lfsthreshold'] = threshold
117 repo.svfs.lfslocalblobstore = blobstore.local(repo)
118 repo.svfs.lfslocalblobstore = blobstore.local(repo)
118 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
119 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
119
120
120 # Push hook
121 # Push hook
121 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
122 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
122
123
123 if 'lfs' not in repo.requirements:
124 if 'lfs' not in repo.requirements:
124 def checkrequireslfs(ui, repo, **kwargs):
125 def checkrequireslfs(ui, repo, **kwargs):
125 if 'lfs' not in repo.requirements:
126 if 'lfs' not in repo.requirements:
126 last = kwargs.get('node_last')
127 last = kwargs.get('node_last')
127 _bin = node.bin
128 _bin = node.bin
128 if last:
129 if last:
129 s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last))
130 s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last))
130 else:
131 else:
131 s = repo.set('%n', _bin(kwargs['node']))
132 s = repo.set('%n', _bin(kwargs['node']))
132 for ctx in s:
133 for ctx in s:
133 # TODO: is there a way to just walk the files in the commit?
134 # TODO: is there a way to just walk the files in the commit?
134 if any(ctx[f].islfs() for f in ctx.files() if f in ctx):
135 if any(ctx[f].islfs() for f in ctx.files() if f in ctx):
135 repo.requirements.add('lfs')
136 repo.requirements.add('lfs')
136 repo._writerequirements()
137 repo._writerequirements()
137 break
138 break
138
139
139 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
140 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
140 ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs')
141 ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs')
141
142
142 def wrapfilelog(filelog):
143 def wrapfilelog(filelog):
143 wrapfunction = extensions.wrapfunction
144 wrapfunction = extensions.wrapfunction
144
145
145 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
146 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
146 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
147 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
147 wrapfunction(filelog, 'size', wrapper.filelogsize)
148 wrapfunction(filelog, 'size', wrapper.filelogsize)
148
149
149 def extsetup(ui):
150 def extsetup(ui):
150 wrapfilelog(filelog.filelog)
151 wrapfilelog(filelog.filelog)
151
152
152 wrapfunction = extensions.wrapfunction
153 wrapfunction = extensions.wrapfunction
153
154
154 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
155 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
155
156
156 wrapfunction(upgrade, '_finishdatamigration',
157 wrapfunction(upgrade, '_finishdatamigration',
157 wrapper.upgradefinishdatamigration)
158 wrapper.upgradefinishdatamigration)
158
159
159 wrapfunction(upgrade, 'preservedrequirements',
160 wrapfunction(upgrade, 'preservedrequirements',
160 wrapper.upgraderequirements)
161 wrapper.upgraderequirements)
161
162
162 wrapfunction(upgrade, 'supporteddestrequirements',
163 wrapfunction(upgrade, 'supporteddestrequirements',
163 wrapper.upgraderequirements)
164 wrapper.upgraderequirements)
164
165
165 wrapfunction(changegroup,
166 wrapfunction(changegroup,
166 'supportedoutgoingversions',
167 'supportedoutgoingversions',
167 wrapper.supportedoutgoingversions)
168 wrapper.supportedoutgoingversions)
168 wrapfunction(changegroup,
169 wrapfunction(changegroup,
169 'allsupportedversions',
170 'allsupportedversions',
170 wrapper.allsupportedversions)
171 wrapper.allsupportedversions)
171
172
173 wrapfunction(exchange, 'push', wrapper.push)
174 wrapfunction(wireproto, '_capabilities', wrapper._capabilities)
175
172 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
176 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
173 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
177 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
174 context.basefilectx.islfs = wrapper.filectxislfs
178 context.basefilectx.islfs = wrapper.filectxislfs
175
179
176 revlog.addflagprocessor(
180 revlog.addflagprocessor(
177 revlog.REVIDX_EXTSTORED,
181 revlog.REVIDX_EXTSTORED,
178 (
182 (
179 wrapper.readfromstore,
183 wrapper.readfromstore,
180 wrapper.writetostore,
184 wrapper.writetostore,
181 wrapper.bypasscheckhash,
185 wrapper.bypasscheckhash,
182 ),
186 ),
183 )
187 )
184
188
185 wrapfunction(hg, 'clone', wrapper.hgclone)
189 wrapfunction(hg, 'clone', wrapper.hgclone)
186 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
190 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
187
191
188 # Make bundle choose changegroup3 instead of changegroup2. This affects
192 # Make bundle choose changegroup3 instead of changegroup2. This affects
189 # "hg bundle" command. Note: it does not cover all bundle formats like
193 # "hg bundle" command. Note: it does not cover all bundle formats like
190 # "packed1". Using "packed1" with lfs will likely cause trouble.
194 # "packed1". Using "packed1" with lfs will likely cause trouble.
191 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
195 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
192 for k in names:
196 for k in names:
193 exchange._bundlespeccgversions[k] = '03'
197 exchange._bundlespeccgversions[k] = '03'
194
198
195 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
199 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
196 # options and blob stores are passed from othervfs to the new readonlyvfs.
200 # options and blob stores are passed from othervfs to the new readonlyvfs.
197 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
201 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
198
202
199 # when writing a bundle via "hg bundle" command, upload related LFS blobs
203 # when writing a bundle via "hg bundle" command, upload related LFS blobs
200 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
204 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
201
205
202 @templatekeyword('lfs_files')
206 @templatekeyword('lfs_files')
203 def lfsfiles(repo, ctx, **args):
207 def lfsfiles(repo, ctx, **args):
204 """List of strings. LFS files added or modified by the changeset."""
208 """List of strings. LFS files added or modified by the changeset."""
205 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
209 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
206 return sorted(pointers.keys())
210 return sorted(pointers.keys())
207
211
208 @command('debuglfsupload',
212 @command('debuglfsupload',
209 [('r', 'rev', [], _('upload large files introduced by REV'))])
213 [('r', 'rev', [], _('upload large files introduced by REV'))])
210 def debuglfsupload(ui, repo, **opts):
214 def debuglfsupload(ui, repo, **opts):
211 """upload lfs blobs added by the working copy parent or given revisions"""
215 """upload lfs blobs added by the working copy parent or given revisions"""
212 revs = opts.get('rev', [])
216 revs = opts.get('rev', [])
213 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
217 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
214 wrapper.uploadblobs(repo, pointers)
218 wrapper.uploadblobs(repo, pointers)
@@ -1,324 +1,345 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, nullid, short
13 from mercurial.node import bin, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from ..largefiles import lfutil
22 from ..largefiles import lfutil
23
23
24 from . import (
24 from . import (
25 blobstore,
25 blobstore,
26 pointer,
26 pointer,
27 )
27 )
28
28
29 def supportedoutgoingversions(orig, repo):
29 def supportedoutgoingversions(orig, repo):
30 versions = orig(repo)
30 versions = orig(repo)
31 if 'lfs' in repo.requirements:
31 if 'lfs' in repo.requirements:
32 versions.discard('01')
32 versions.discard('01')
33 versions.discard('02')
33 versions.discard('02')
34 versions.add('03')
34 versions.add('03')
35 return versions
35 return versions
36
36
37 def allsupportedversions(orig, ui):
37 def allsupportedversions(orig, ui):
38 versions = orig(ui)
38 versions = orig(ui)
39 versions.add('03')
39 versions.add('03')
40 return versions
40 return versions
41
41
42 def _capabilities(orig, repo, proto):
43 '''Wrap server command to announce lfs server capability'''
44 caps = orig(repo, proto)
45 # XXX: change to 'lfs=serve' when separate git server isn't required?
46 caps.append('lfs')
47 return caps
48
42 def bypasscheckhash(self, text):
49 def bypasscheckhash(self, text):
43 return False
50 return False
44
51
45 def readfromstore(self, text):
52 def readfromstore(self, text):
46 """Read filelog content from local blobstore transform for flagprocessor.
53 """Read filelog content from local blobstore transform for flagprocessor.
47
54
48 Default tranform for flagprocessor, returning contents from blobstore.
55 Default tranform for flagprocessor, returning contents from blobstore.
49 Returns a 2-typle (text, validatehash) where validatehash is True as the
56 Returns a 2-typle (text, validatehash) where validatehash is True as the
50 contents of the blobstore should be checked using checkhash.
57 contents of the blobstore should be checked using checkhash.
51 """
58 """
52 p = pointer.deserialize(text)
59 p = pointer.deserialize(text)
53 oid = p.oid()
60 oid = p.oid()
54 store = self.opener.lfslocalblobstore
61 store = self.opener.lfslocalblobstore
55 if not store.has(oid):
62 if not store.has(oid):
56 p.filename = getattr(self, 'indexfile', None)
63 p.filename = getattr(self, 'indexfile', None)
57 self.opener.lfsremoteblobstore.readbatch([p], store)
64 self.opener.lfsremoteblobstore.readbatch([p], store)
58
65
59 # The caller will validate the content
66 # The caller will validate the content
60 text = store.read(oid, verify=False)
67 text = store.read(oid, verify=False)
61
68
62 # pack hg filelog metadata
69 # pack hg filelog metadata
63 hgmeta = {}
70 hgmeta = {}
64 for k in p.keys():
71 for k in p.keys():
65 if k.startswith('x-hg-'):
72 if k.startswith('x-hg-'):
66 name = k[len('x-hg-'):]
73 name = k[len('x-hg-'):]
67 hgmeta[name] = p[k]
74 hgmeta[name] = p[k]
68 if hgmeta or text.startswith('\1\n'):
75 if hgmeta or text.startswith('\1\n'):
69 text = filelog.packmeta(hgmeta, text)
76 text = filelog.packmeta(hgmeta, text)
70
77
71 return (text, True)
78 return (text, True)
72
79
73 def writetostore(self, text):
80 def writetostore(self, text):
74 # hg filelog metadata (includes rename, etc)
81 # hg filelog metadata (includes rename, etc)
75 hgmeta, offset = filelog.parsemeta(text)
82 hgmeta, offset = filelog.parsemeta(text)
76 if offset and offset > 0:
83 if offset and offset > 0:
77 # lfs blob does not contain hg filelog metadata
84 # lfs blob does not contain hg filelog metadata
78 text = text[offset:]
85 text = text[offset:]
79
86
80 # git-lfs only supports sha256
87 # git-lfs only supports sha256
81 oid = hashlib.sha256(text).hexdigest()
88 oid = hashlib.sha256(text).hexdigest()
82 self.opener.lfslocalblobstore.write(oid, text, verify=False)
89 self.opener.lfslocalblobstore.write(oid, text, verify=False)
83
90
84 # replace contents with metadata
91 # replace contents with metadata
85 longoid = 'sha256:%s' % oid
92 longoid = 'sha256:%s' % oid
86 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
87
94
88 # by default, we expect the content to be binary. however, LFS could also
95 # by default, we expect the content to be binary. however, LFS could also
89 # be used for non-binary content. add a special entry for non-binary data.
96 # be used for non-binary content. add a special entry for non-binary data.
90 # this will be used by filectx.isbinary().
97 # this will be used by filectx.isbinary().
91 if not util.binary(text):
98 if not util.binary(text):
92 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
93 metadata['x-is-binary'] = '0'
100 metadata['x-is-binary'] = '0'
94
101
95 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
96 if hgmeta is not None:
103 if hgmeta is not None:
97 for k, v in hgmeta.iteritems():
104 for k, v in hgmeta.iteritems():
98 metadata['x-hg-%s' % k] = v
105 metadata['x-hg-%s' % k] = v
99
106
100 rawtext = metadata.serialize()
107 rawtext = metadata.serialize()
101 return (rawtext, False)
108 return (rawtext, False)
102
109
103 def _islfs(rlog, node=None, rev=None):
110 def _islfs(rlog, node=None, rev=None):
104 if rev is None:
111 if rev is None:
105 if node is None:
112 if node is None:
106 # both None - likely working copy content where node is not ready
113 # both None - likely working copy content where node is not ready
107 return False
114 return False
108 rev = rlog.rev(node)
115 rev = rlog.rev(node)
109 else:
116 else:
110 node = rlog.node(rev)
117 node = rlog.node(rev)
111 if node == nullid:
118 if node == nullid:
112 return False
119 return False
113 flags = rlog.flags(rev)
120 flags = rlog.flags(rev)
114 return bool(flags & revlog.REVIDX_EXTSTORED)
121 return bool(flags & revlog.REVIDX_EXTSTORED)
115
122
116 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
117 cachedelta=None, node=None,
124 cachedelta=None, node=None,
118 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
119 threshold = self.opener.options['lfsthreshold']
126 threshold = self.opener.options['lfsthreshold']
120 textlen = len(text)
127 textlen = len(text)
121 # exclude hg rename meta from file size
128 # exclude hg rename meta from file size
122 meta, offset = filelog.parsemeta(text)
129 meta, offset = filelog.parsemeta(text)
123 if offset:
130 if offset:
124 textlen -= offset
131 textlen -= offset
125
132
126 if threshold and textlen > threshold:
133 if threshold and textlen > threshold:
127 flags |= revlog.REVIDX_EXTSTORED
134 flags |= revlog.REVIDX_EXTSTORED
128
135
129 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
136 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
130 node=node, flags=flags, **kwds)
137 node=node, flags=flags, **kwds)
131
138
132 def filelogrenamed(orig, self, node):
139 def filelogrenamed(orig, self, node):
133 if _islfs(self, node):
140 if _islfs(self, node):
134 rawtext = self.revision(node, raw=True)
141 rawtext = self.revision(node, raw=True)
135 if not rawtext:
142 if not rawtext:
136 return False
143 return False
137 metadata = pointer.deserialize(rawtext)
144 metadata = pointer.deserialize(rawtext)
138 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
145 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
139 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
146 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
140 else:
147 else:
141 return False
148 return False
142 return orig(self, node)
149 return orig(self, node)
143
150
144 def filelogsize(orig, self, rev):
151 def filelogsize(orig, self, rev):
145 if _islfs(self, rev=rev):
152 if _islfs(self, rev=rev):
146 # fast path: use lfs metadata to answer size
153 # fast path: use lfs metadata to answer size
147 rawtext = self.revision(rev, raw=True)
154 rawtext = self.revision(rev, raw=True)
148 metadata = pointer.deserialize(rawtext)
155 metadata = pointer.deserialize(rawtext)
149 return int(metadata['size'])
156 return int(metadata['size'])
150 return orig(self, rev)
157 return orig(self, rev)
151
158
152 def filectxcmp(orig, self, fctx):
159 def filectxcmp(orig, self, fctx):
153 """returns True if text is different than fctx"""
160 """returns True if text is different than fctx"""
154 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
161 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
155 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
162 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
156 # fast path: check LFS oid
163 # fast path: check LFS oid
157 p1 = pointer.deserialize(self.rawdata())
164 p1 = pointer.deserialize(self.rawdata())
158 p2 = pointer.deserialize(fctx.rawdata())
165 p2 = pointer.deserialize(fctx.rawdata())
159 return p1.oid() != p2.oid()
166 return p1.oid() != p2.oid()
160 return orig(self, fctx)
167 return orig(self, fctx)
161
168
162 def filectxisbinary(orig, self):
169 def filectxisbinary(orig, self):
163 if self.islfs():
170 if self.islfs():
164 # fast path: use lfs metadata to answer isbinary
171 # fast path: use lfs metadata to answer isbinary
165 metadata = pointer.deserialize(self.rawdata())
172 metadata = pointer.deserialize(self.rawdata())
166 # if lfs metadata says nothing, assume it's binary by default
173 # if lfs metadata says nothing, assume it's binary by default
167 return bool(int(metadata.get('x-is-binary', 1)))
174 return bool(int(metadata.get('x-is-binary', 1)))
168 return orig(self)
175 return orig(self)
169
176
170 def filectxislfs(self):
177 def filectxislfs(self):
171 return _islfs(self.filelog(), self.filenode())
178 return _islfs(self.filelog(), self.filenode())
172
179
173 def convertsink(orig, sink):
180 def convertsink(orig, sink):
174 sink = orig(sink)
181 sink = orig(sink)
175 if sink.repotype == 'hg':
182 if sink.repotype == 'hg':
176 class lfssink(sink.__class__):
183 class lfssink(sink.__class__):
177 def putcommit(self, files, copies, parents, commit, source, revmap,
184 def putcommit(self, files, copies, parents, commit, source, revmap,
178 full, cleanp2):
185 full, cleanp2):
179 pc = super(lfssink, self).putcommit
186 pc = super(lfssink, self).putcommit
180 node = pc(files, copies, parents, commit, source, revmap, full,
187 node = pc(files, copies, parents, commit, source, revmap, full,
181 cleanp2)
188 cleanp2)
182
189
183 if 'lfs' not in self.repo.requirements:
190 if 'lfs' not in self.repo.requirements:
184 ctx = self.repo[node]
191 ctx = self.repo[node]
185
192
186 # The file list may contain removed files, so check for
193 # The file list may contain removed files, so check for
187 # membership before assuming it is in the context.
194 # membership before assuming it is in the context.
188 if any(f in ctx and ctx[f].islfs() for f, n in files):
195 if any(f in ctx and ctx[f].islfs() for f, n in files):
189 self.repo.requirements.add('lfs')
196 self.repo.requirements.add('lfs')
190 self.repo._writerequirements()
197 self.repo._writerequirements()
191
198
192 # Permanently enable lfs locally
199 # Permanently enable lfs locally
193 with self.repo.vfs('hgrc', 'a', text=True) as fp:
200 with self.repo.vfs('hgrc', 'a', text=True) as fp:
194 fp.write('\n[extensions]\nlfs=\n')
201 fp.write('\n[extensions]\nlfs=\n')
195
202
196 return node
203 return node
197
204
198 sink.__class__ = lfssink
205 sink.__class__ = lfssink
199
206
200 return sink
207 return sink
201
208
202 def vfsinit(orig, self, othervfs):
209 def vfsinit(orig, self, othervfs):
203 orig(self, othervfs)
210 orig(self, othervfs)
204 # copy lfs related options
211 # copy lfs related options
205 for k, v in othervfs.options.items():
212 for k, v in othervfs.options.items():
206 if k.startswith('lfs'):
213 if k.startswith('lfs'):
207 self.options[k] = v
214 self.options[k] = v
208 # also copy lfs blobstores. note: this can run before reposetup, so lfs
215 # also copy lfs blobstores. note: this can run before reposetup, so lfs
209 # blobstore attributes are not always ready at this time.
216 # blobstore attributes are not always ready at this time.
210 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
217 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
211 if util.safehasattr(othervfs, name):
218 if util.safehasattr(othervfs, name):
212 setattr(self, name, getattr(othervfs, name))
219 setattr(self, name, getattr(othervfs, name))
213
220
214 def hgclone(orig, ui, opts, *args, **kwargs):
221 def hgclone(orig, ui, opts, *args, **kwargs):
215 result = orig(ui, opts, *args, **kwargs)
222 result = orig(ui, opts, *args, **kwargs)
216
223
217 if result is not None:
224 if result is not None:
218 sourcerepo, destrepo = result
225 sourcerepo, destrepo = result
219 repo = destrepo.local()
226 repo = destrepo.local()
220
227
221 # When cloning to a remote repo (like through SSH), no repo is available
228 # When cloning to a remote repo (like through SSH), no repo is available
222 # from the peer. Therefore the hgrc can't be updated.
229 # from the peer. Therefore the hgrc can't be updated.
223 if not repo:
230 if not repo:
224 return result
231 return result
225
232
226 # If lfs is required for this repo, permanently enable it locally
233 # If lfs is required for this repo, permanently enable it locally
227 if 'lfs' in repo.requirements:
234 if 'lfs' in repo.requirements:
228 with repo.vfs('hgrc', 'a', text=True) as fp:
235 with repo.vfs('hgrc', 'a', text=True) as fp:
229 fp.write('\n[extensions]\nlfs=\n')
236 fp.write('\n[extensions]\nlfs=\n')
230
237
231 return result
238 return result
232
239
233 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
240 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
234 orig(sourcerepo, destrepo, bookmarks, defaultpath)
241 orig(sourcerepo, destrepo, bookmarks, defaultpath)
235
242
236 # If lfs is required for this repo, permanently enable it locally
243 # If lfs is required for this repo, permanently enable it locally
237 if 'lfs' in destrepo.requirements:
244 if 'lfs' in destrepo.requirements:
238 with destrepo.vfs('hgrc', 'a', text=True) as fp:
245 with destrepo.vfs('hgrc', 'a', text=True) as fp:
239 fp.write('\n[extensions]\nlfs=\n')
246 fp.write('\n[extensions]\nlfs=\n')
240
247
241 def _canskipupload(repo):
248 def _canskipupload(repo):
242 # if remotestore is a null store, upload is a no-op and can be skipped
249 # if remotestore is a null store, upload is a no-op and can be skipped
243 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
250 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
244
251
245 def candownload(repo):
252 def candownload(repo):
246 # if remotestore is a null store, downloads will lead to nothing
253 # if remotestore is a null store, downloads will lead to nothing
247 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
254 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
248
255
249 def uploadblobsfromrevs(repo, revs):
256 def uploadblobsfromrevs(repo, revs):
250 '''upload lfs blobs introduced by revs
257 '''upload lfs blobs introduced by revs
251
258
252 Note: also used by other extensions e. g. infinitepush. avoid renaming.
259 Note: also used by other extensions e. g. infinitepush. avoid renaming.
253 '''
260 '''
254 if _canskipupload(repo):
261 if _canskipupload(repo):
255 return
262 return
256 pointers = extractpointers(repo, revs)
263 pointers = extractpointers(repo, revs)
257 uploadblobs(repo, pointers)
264 uploadblobs(repo, pointers)
258
265
259 def prepush(pushop):
266 def prepush(pushop):
260 """Prepush hook.
267 """Prepush hook.
261
268
262 Read through the revisions to push, looking for filelog entries that can be
269 Read through the revisions to push, looking for filelog entries that can be
263 deserialized into metadata so that we can block the push on their upload to
270 deserialized into metadata so that we can block the push on their upload to
264 the remote blobstore.
271 the remote blobstore.
265 """
272 """
266 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
273 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
267
274
275 def push(orig, repo, remote, *args, **kwargs):
276 """bail on push if the extension isn't enabled on remote when needed"""
277 if 'lfs' in repo.requirements:
278 # If the remote peer is for a local repo, the requirement tests in the
279 # base class method enforce lfs support. Otherwise, some revisions in
280 # this repo use lfs, and the remote repo needs the extension loaded.
281 if not remote.local() and not remote.capable('lfs'):
282 # This is a copy of the message in exchange.push() when requirements
283 # are missing between local repos.
284 m = _("required features are not supported in the destination: %s")
285 raise error.Abort(m % 'lfs',
286 hint=_('enable the lfs extension on the server'))
287 return orig(repo, remote, *args, **kwargs)
288
268 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
289 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
269 *args, **kwargs):
290 *args, **kwargs):
270 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
291 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
271 uploadblobsfromrevs(repo, outgoing.missing)
292 uploadblobsfromrevs(repo, outgoing.missing)
272 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
293 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
273 **kwargs)
294 **kwargs)
274
295
275 def extractpointers(repo, revs):
296 def extractpointers(repo, revs):
276 """return a list of lfs pointers added by given revs"""
297 """return a list of lfs pointers added by given revs"""
277 repo.ui.debug('lfs: computing set of blobs to upload\n')
298 repo.ui.debug('lfs: computing set of blobs to upload\n')
278 pointers = {}
299 pointers = {}
279 for r in revs:
300 for r in revs:
280 ctx = repo[r]
301 ctx = repo[r]
281 for p in pointersfromctx(ctx).values():
302 for p in pointersfromctx(ctx).values():
282 pointers[p.oid()] = p
303 pointers[p.oid()] = p
283 return sorted(pointers.values())
304 return sorted(pointers.values())
284
305
285 def pointersfromctx(ctx):
306 def pointersfromctx(ctx):
286 """return a dict {path: pointer} for given single changectx"""
307 """return a dict {path: pointer} for given single changectx"""
287 result = {}
308 result = {}
288 for f in ctx.files():
309 for f in ctx.files():
289 if f not in ctx:
310 if f not in ctx:
290 continue
311 continue
291 fctx = ctx[f]
312 fctx = ctx[f]
292 if not _islfs(fctx.filelog(), fctx.filenode()):
313 if not _islfs(fctx.filelog(), fctx.filenode()):
293 continue
314 continue
294 try:
315 try:
295 result[f] = pointer.deserialize(fctx.rawdata())
316 result[f] = pointer.deserialize(fctx.rawdata())
296 except pointer.InvalidPointer as ex:
317 except pointer.InvalidPointer as ex:
297 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
318 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
298 % (f, short(ctx.node()), ex))
319 % (f, short(ctx.node()), ex))
299 return result
320 return result
300
321
301 def uploadblobs(repo, pointers):
322 def uploadblobs(repo, pointers):
302 """upload given pointers from local blobstore"""
323 """upload given pointers from local blobstore"""
303 if not pointers:
324 if not pointers:
304 return
325 return
305
326
306 remoteblob = repo.svfs.lfsremoteblobstore
327 remoteblob = repo.svfs.lfsremoteblobstore
307 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
328 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
308
329
309 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
330 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
310 orig(ui, srcrepo, dstrepo, requirements)
331 orig(ui, srcrepo, dstrepo, requirements)
311
332
312 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
333 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
313 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
334 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
314
335
315 for dirpath, dirs, files in srclfsvfs.walk():
336 for dirpath, dirs, files in srclfsvfs.walk():
316 for oid in files:
337 for oid in files:
317 ui.write(_('copying lfs blob %s\n') % oid)
338 ui.write(_('copying lfs blob %s\n') % oid)
318 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
339 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
319
340
320 def upgraderequirements(orig, repo):
341 def upgraderequirements(orig, repo):
321 reqs = orig(repo)
342 reqs = orig(repo)
322 if 'lfs' in repo.requirements:
343 if 'lfs' in repo.requirements:
323 reqs.add('lfs')
344 reqs.add('lfs')
324 return reqs
345 return reqs
@@ -1,279 +1,283 b''
1 #testcases lfsremote-on lfsremote-off
1 #testcases lfsremote-on lfsremote-off
2 #require serve
2 #require serve
3
3
4 This test splits `hg serve` with and without using the extension into separate
4 This test splits `hg serve` with and without using the extension into separate
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
5 tests cases. The tests are broken down as follows, where "LFS"/"No-LFS"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
6 indicates whether or not there are commits that use an LFS file, and "D"/"E"
7 indicates whether or not the extension is loaded. The "X" cases are not tested
7 indicates whether or not the extension is loaded. The "X" cases are not tested
8 individually, because the lfs requirement causes the process to bail early if
8 individually, because the lfs requirement causes the process to bail early if
9 the extension is disabled.
9 the extension is disabled.
10
10
11 . Server
11 . Server
12 .
12 .
13 . No-LFS LFS
13 . No-LFS LFS
14 . +----------------------------+
14 . +----------------------------+
15 . | || D | E | D | E |
15 . | || D | E | D | E |
16 . |---++=======================|
16 . |---++=======================|
17 . C | D || N/A | #1 | X | #4 |
17 . C | D || N/A | #1 | X | #4 |
18 . l No +---++-----------------------|
18 . l No +---++-----------------------|
19 . i LFS | E || #2 | #2 | X | #5 |
19 . i LFS | E || #2 | #2 | X | #5 |
20 . e +---++-----------------------|
20 . e +---++-----------------------|
21 . n | D || X | X | X | X |
21 . n | D || X | X | X | X |
22 . t LFS |---++-----------------------|
22 . t LFS |---++-----------------------|
23 . | E || #3 | #3 | X | #6 |
23 . | E || #3 | #3 | X | #6 |
24 . |---++-----------------------+
24 . |---++-----------------------+
25
25
26 $ hg init server
26 $ hg init server
27 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
27 $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
28
28
29 Skip the experimental.changegroup3=True config. Failure to agree on this comes
29 Skip the experimental.changegroup3=True config. Failure to agree on this comes
30 first, and causes a "ValueError: no common changegroup version" or "abort:
30 first, and causes a "ValueError: no common changegroup version" or "abort:
31 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
31 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
32 side. If that *is* enabled, the subsequent failure is "abort: missing processor
32 side. If that *is* enabled, the subsequent failure is "abort: missing processor
33 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
33 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
34 masked by the Internal Server Error message).
34 masked by the Internal Server Error message).
35 $ cat >> $HGRCPATH <<EOF
35 $ cat >> $HGRCPATH <<EOF
36 > [lfs]
36 > [lfs]
37 > url=file:$TESTTMP/dummy-remote/
37 > url=file:$TESTTMP/dummy-remote/
38 > threshold=10
38 > threshold=10
39 > [web]
39 > [web]
40 > allow_push=*
40 > allow_push=*
41 > push_ssl=False
41 > push_ssl=False
42 > EOF
42 > EOF
43
43
44 #if lfsremote-on
44 #if lfsremote-on
45 $ hg --config extensions.lfs= -R server \
45 $ hg --config extensions.lfs= -R server \
46 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
46 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
47 #else
47 #else
48 $ hg --config extensions.lfs=! -R server \
48 $ hg --config extensions.lfs=! -R server \
49 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
49 > serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
50 #endif
50 #endif
51
51
52 $ cat hg.pid >> $DAEMON_PIDS
52 $ cat hg.pid >> $DAEMON_PIDS
53 $ hg clone -q http://localhost:$HGPORT client
53 $ hg clone -q http://localhost:$HGPORT client
54 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
54 $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
55 [1]
55 [1]
56
56
57 --------------------------------------------------------------------------------
57 --------------------------------------------------------------------------------
58 Case #1: client with non-lfs content and the extension disabled; server with
58 Case #1: client with non-lfs content and the extension disabled; server with
59 non-lfs content, and the extension enabled.
59 non-lfs content, and the extension enabled.
60
60
61 $ cd client
61 $ cd client
62 $ echo 'non-lfs' > nonlfs.txt
62 $ echo 'non-lfs' > nonlfs.txt
63 $ hg ci -Aqm 'non-lfs'
63 $ hg ci -Aqm 'non-lfs'
64 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
64 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
65 [1]
65 [1]
66
66
67 #if lfsremote-on
67 #if lfsremote-on
68
68
69 $ hg push -q
69 $ hg push -q
70 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
70 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
71 [1]
71 [1]
72
72
73 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
73 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
74 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
74 $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
75 [1]
75 [1]
76
76
77 $ hg init $TESTTMP/client1_pull
77 $ hg init $TESTTMP/client1_pull
78 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
78 $ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
79 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
79 $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
80 [1]
80 [1]
81
81
82 $ hg identify http://localhost:$HGPORT
82 $ hg identify http://localhost:$HGPORT
83 d437e1d24fbd
83 d437e1d24fbd
84
84
85 #endif
85 #endif
86
86
87 --------------------------------------------------------------------------------
87 --------------------------------------------------------------------------------
88 Case #2: client with non-lfs content and the extension enabled; server with
88 Case #2: client with non-lfs content and the extension enabled; server with
89 non-lfs content, and the extension state controlled by #testcases.
89 non-lfs content, and the extension state controlled by #testcases.
90
90
91 $ cat >> $HGRCPATH <<EOF
91 $ cat >> $HGRCPATH <<EOF
92 > [extensions]
92 > [extensions]
93 > lfs =
93 > lfs =
94 > EOF
94 > EOF
95 $ echo 'non-lfs' > nonlfs2.txt
95 $ echo 'non-lfs' > nonlfs2.txt
96 $ hg ci -Aqm 'non-lfs file with lfs client'
96 $ hg ci -Aqm 'non-lfs file with lfs client'
97
97
98 Since no lfs content has been added yet, the push is allowed, even when the
98 Since no lfs content has been added yet, the push is allowed, even when the
99 extension is not enabled remotely.
99 extension is not enabled remotely.
100
100
101 $ hg push -q
101 $ hg push -q
102 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
102 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
103 [1]
103 [1]
104
104
105 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
105 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
106 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
106 $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
107 [1]
107 [1]
108
108
109 $ hg init $TESTTMP/client2_pull
109 $ hg init $TESTTMP/client2_pull
110 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
110 $ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
111 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
111 $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
112 [1]
112 [1]
113
113
114 $ hg identify http://localhost:$HGPORT
114 $ hg identify http://localhost:$HGPORT
115 1477875038c6
115 1477875038c6
116
116
117 --------------------------------------------------------------------------------
117 --------------------------------------------------------------------------------
118 Case #3: client with lfs content and the extension enabled; server with
118 Case #3: client with lfs content and the extension enabled; server with
119 non-lfs content, and the extension state controlled by #testcases. The server
119 non-lfs content, and the extension state controlled by #testcases. The server
120 should have an 'lfs' requirement after it picks up its first commit with a blob.
120 should have an 'lfs' requirement after it picks up its first commit with a blob.
121
121
122 $ echo 'this is a big lfs file' > lfs.bin
122 $ echo 'this is a big lfs file' > lfs.bin
123 $ hg ci -Aqm 'lfs'
123 $ hg ci -Aqm 'lfs'
124 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
124 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
125 .hg/requires:lfs
125 .hg/requires:lfs
126
126
127 TODO: fail more gracefully here
127 #if lfsremote-off
128 $ hg push -q 2>&1 | grep '^[A-Z]' || true
128 $ hg push -q
129 Traceback (most recent call last): (lfsremote-off !)
129 abort: required features are not supported in the destination: lfs
130 ValueError: no common changegroup version (lfsremote-off !)
130 (enable the lfs extension on the server)
131 [255]
132 #else
133 $ hg push -q
134 #endif
131 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
135 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
132 .hg/requires:lfs
136 .hg/requires:lfs
133 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
137 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
134
138
135 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
139 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
136 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
140 $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
137 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
141 $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
138 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
142 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
139
143
140 $ hg init $TESTTMP/client3_pull
144 $ hg init $TESTTMP/client3_pull
141 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
145 $ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
142 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
146 $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
143 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
147 $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
144 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
148 $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
145
149
146 The difference here is the push failed above when the extension isn't
150 The difference here is the push failed above when the extension isn't
147 enabled on the server.
151 enabled on the server.
148 $ hg identify http://localhost:$HGPORT
152 $ hg identify http://localhost:$HGPORT
149 8374dc4052cb (lfsremote-on !)
153 8374dc4052cb (lfsremote-on !)
150 1477875038c6 (lfsremote-off !)
154 1477875038c6 (lfsremote-off !)
151
155
152 Don't bother testing the lfsremote-off cases- the server won't be able
156 Don't bother testing the lfsremote-off cases- the server won't be able
153 to launch if there's lfs content and the extension is disabled.
157 to launch if there's lfs content and the extension is disabled.
154
158
155 #if lfsremote-on
159 #if lfsremote-on
156
160
157 --------------------------------------------------------------------------------
161 --------------------------------------------------------------------------------
158 Case #4: client with non-lfs content and the extension disabled; server with
162 Case #4: client with non-lfs content and the extension disabled; server with
159 lfs content, and the extension enabled.
163 lfs content, and the extension enabled.
160
164
161 $ cat >> $HGRCPATH <<EOF
165 $ cat >> $HGRCPATH <<EOF
162 > [extensions]
166 > [extensions]
163 > lfs = !
167 > lfs = !
164 > EOF
168 > EOF
165
169
166 $ hg init $TESTTMP/client4
170 $ hg init $TESTTMP/client4
167 $ cd $TESTTMP/client4
171 $ cd $TESTTMP/client4
168 $ cat >> .hg/hgrc <<EOF
172 $ cat >> .hg/hgrc <<EOF
169 > [paths]
173 > [paths]
170 > default = http://localhost:$HGPORT
174 > default = http://localhost:$HGPORT
171 > EOF
175 > EOF
172 $ echo 'non-lfs' > nonlfs2.txt
176 $ echo 'non-lfs' > nonlfs2.txt
173 $ hg ci -Aqm 'non-lfs'
177 $ hg ci -Aqm 'non-lfs'
174 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
178 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
175 $TESTTMP/server/.hg/requires:lfs
179 $TESTTMP/server/.hg/requires:lfs
176
180
177 $ hg push -q --force
181 $ hg push -q --force
178 warning: repository is unrelated
182 warning: repository is unrelated
179 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
183 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
180 $TESTTMP/server/.hg/requires:lfs
184 $TESTTMP/server/.hg/requires:lfs
181
185
182 TODO: fail more gracefully.
186 TODO: fail more gracefully.
183
187
184 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
188 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
185 abort: HTTP Error 500: Internal Server Error
189 abort: HTTP Error 500: Internal Server Error
186 [255]
190 [255]
187 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
191 $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
188 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
192 grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
189 $TESTTMP/server/.hg/requires:lfs
193 $TESTTMP/server/.hg/requires:lfs
190 [2]
194 [2]
191
195
192 TODO: fail more gracefully.
196 TODO: fail more gracefully.
193
197
194 $ hg init $TESTTMP/client4_pull
198 $ hg init $TESTTMP/client4_pull
195 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
199 $ hg -R $TESTTMP/client4_pull pull -q http://localhost:$HGPORT
196 abort: HTTP Error 500: Internal Server Error
200 abort: HTTP Error 500: Internal Server Error
197 [255]
201 [255]
198 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
202 $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
199 $TESTTMP/server/.hg/requires:lfs
203 $TESTTMP/server/.hg/requires:lfs
200
204
201 $ hg identify http://localhost:$HGPORT
205 $ hg identify http://localhost:$HGPORT
202 03b080fa9d93
206 03b080fa9d93
203
207
204 --------------------------------------------------------------------------------
208 --------------------------------------------------------------------------------
205 Case #5: client with non-lfs content and the extension enabled; server with
209 Case #5: client with non-lfs content and the extension enabled; server with
206 lfs content, and the extension enabled.
210 lfs content, and the extension enabled.
207
211
208 $ cat >> $HGRCPATH <<EOF
212 $ cat >> $HGRCPATH <<EOF
209 > [extensions]
213 > [extensions]
210 > lfs =
214 > lfs =
211 > EOF
215 > EOF
212 $ echo 'non-lfs' > nonlfs3.txt
216 $ echo 'non-lfs' > nonlfs3.txt
213 $ hg ci -Aqm 'non-lfs file with lfs client'
217 $ hg ci -Aqm 'non-lfs file with lfs client'
214
218
215 $ hg push -q
219 $ hg push -q
216 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
220 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
217 $TESTTMP/server/.hg/requires:lfs
221 $TESTTMP/server/.hg/requires:lfs
218
222
219 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
223 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
220 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
224 $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
221 $TESTTMP/client5_clone/.hg/requires:lfs
225 $TESTTMP/client5_clone/.hg/requires:lfs
222 $TESTTMP/server/.hg/requires:lfs
226 $TESTTMP/server/.hg/requires:lfs
223
227
224 $ hg init $TESTTMP/client5_pull
228 $ hg init $TESTTMP/client5_pull
225 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
229 $ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
226 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
230 $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
227 $TESTTMP/client5_pull/.hg/requires:lfs
231 $TESTTMP/client5_pull/.hg/requires:lfs
228 $TESTTMP/server/.hg/requires:lfs
232 $TESTTMP/server/.hg/requires:lfs
229
233
230 $ hg identify http://localhost:$HGPORT
234 $ hg identify http://localhost:$HGPORT
231 c729025cc5e3
235 c729025cc5e3
232
236
233 --------------------------------------------------------------------------------
237 --------------------------------------------------------------------------------
234 Case #6: client with lfs content and the extension enabled; server with
238 Case #6: client with lfs content and the extension enabled; server with
235 lfs content, and the extension enabled.
239 lfs content, and the extension enabled.
236
240
237 $ echo 'this is another lfs file' > lfs2.txt
241 $ echo 'this is another lfs file' > lfs2.txt
238 $ hg ci -Aqm 'lfs file with lfs client'
242 $ hg ci -Aqm 'lfs file with lfs client'
239
243
240 $ hg push -q
244 $ hg push -q
241 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
245 $ grep 'lfs' .hg/requires $SERVER_REQUIRES
242 .hg/requires:lfs
246 .hg/requires:lfs
243 $TESTTMP/server/.hg/requires:lfs
247 $TESTTMP/server/.hg/requires:lfs
244
248
245 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
249 $ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
246 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
250 $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
247 $TESTTMP/client6_clone/.hg/requires:lfs
251 $TESTTMP/client6_clone/.hg/requires:lfs
248 $TESTTMP/server/.hg/requires:lfs
252 $TESTTMP/server/.hg/requires:lfs
249
253
250 $ hg init $TESTTMP/client6_pull
254 $ hg init $TESTTMP/client6_pull
251 $ hg -R $TESTTMP/client6_pull pull -q http://localhost:$HGPORT
255 $ hg -R $TESTTMP/client6_pull pull -q http://localhost:$HGPORT
252 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
256 $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
253 $TESTTMP/client6_pull/.hg/requires:lfs
257 $TESTTMP/client6_pull/.hg/requires:lfs
254 $TESTTMP/server/.hg/requires:lfs
258 $TESTTMP/server/.hg/requires:lfs
255
259
256 $ hg identify http://localhost:$HGPORT
260 $ hg identify http://localhost:$HGPORT
257 d3b84d50eacb
261 d3b84d50eacb
258
262
259 --------------------------------------------------------------------------------
263 --------------------------------------------------------------------------------
260 Misc: process dies early if a requirement exists and the extension is disabled
264 Misc: process dies early if a requirement exists and the extension is disabled
261
265
262 $ hg --config extensions.lfs=! summary
266 $ hg --config extensions.lfs=! summary
263 abort: repository requires features unknown to this Mercurial: lfs!
267 abort: repository requires features unknown to this Mercurial: lfs!
264 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
268 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
265 [255]
269 [255]
266
270
267 #endif
271 #endif
268
272
269 $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
273 $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
270
274
271 #if lfsremote-on
275 #if lfsremote-on
272 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
276 $ cat $TESTTMP/errors.log | grep '^[A-Z]'
273 Traceback (most recent call last):
277 Traceback (most recent call last):
274 ValueError: no common changegroup version
278 ValueError: no common changegroup version
275 Traceback (most recent call last):
279 Traceback (most recent call last):
276 ValueError: no common changegroup version
280 ValueError: no common changegroup version
277 #else
281 #else
278 $ cat $TESTTMP/errors.log
282 $ cat $TESTTMP/errors.log
279 #endif
283 #endif
General Comments 0
You need to be logged in to leave comments. Login now