Show More
@@ -1,446 +1,446 b'' | |||||
1 | # wrapper.py - methods wrapping core mercurial logic |
|
1 | # wrapper.py - methods wrapping core mercurial logic | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Facebook, Inc. |
|
3 | # Copyright 2017 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 |
|
14 | |||
15 | from mercurial import ( |
|
15 | from mercurial import ( | |
16 | bundle2, |
|
16 | bundle2, | |
17 | changegroup, |
|
17 | changegroup, | |
18 | cmdutil, |
|
18 | cmdutil, | |
19 | context, |
|
19 | context, | |
20 | error, |
|
20 | error, | |
21 | exchange, |
|
21 | exchange, | |
22 | exthelper, |
|
22 | exthelper, | |
23 | localrepo, |
|
23 | localrepo, | |
24 | repository, |
|
24 | repository, | |
25 | revlog, |
|
25 | revlog, | |
26 | scmutil, |
|
26 | scmutil, | |
27 | upgrade, |
|
27 | upgrade, | |
28 | util, |
|
28 | util, | |
29 | vfs as vfsmod, |
|
29 | vfs as vfsmod, | |
30 | wireprotov1server, |
|
30 | wireprotov1server, | |
31 | ) |
|
31 | ) | |
32 |
|
32 | |||
33 | from mercurial.utils import ( |
|
33 | from mercurial.utils import ( | |
34 | storageutil, |
|
34 | storageutil, | |
35 | stringutil, |
|
35 | stringutil, | |
36 | ) |
|
36 | ) | |
37 |
|
37 | |||
38 | from ..largefiles import lfutil |
|
38 | from ..largefiles import lfutil | |
39 |
|
39 | |||
40 | from . import ( |
|
40 | from . import ( | |
41 | blobstore, |
|
41 | blobstore, | |
42 | pointer, |
|
42 | pointer, | |
43 | ) |
|
43 | ) | |
44 |
|
44 | |||
45 | eh = exthelper.exthelper() |
|
45 | eh = exthelper.exthelper() | |
46 |
|
46 | |||
47 | @eh.wrapfunction(localrepo, 'makefilestorage') |
|
47 | @eh.wrapfunction(localrepo, 'makefilestorage') | |
48 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
48 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | |
49 | if b'lfs' in requirements: |
|
49 | if b'lfs' in requirements: | |
50 | features.add(repository.REPO_FEATURE_LFS) |
|
50 | features.add(repository.REPO_FEATURE_LFS) | |
51 |
|
51 | |||
52 | return orig(requirements=requirements, features=features, **kwargs) |
|
52 | return orig(requirements=requirements, features=features, **kwargs) | |
53 |
|
53 | |||
54 | @eh.wrapfunction(changegroup, 'allsupportedversions') |
|
54 | @eh.wrapfunction(changegroup, 'allsupportedversions') | |
55 | def allsupportedversions(orig, ui): |
|
55 | def allsupportedversions(orig, ui): | |
56 | versions = orig(ui) |
|
56 | versions = orig(ui) | |
57 | versions.add('03') |
|
57 | versions.add('03') | |
58 | return versions |
|
58 | return versions | |
59 |
|
59 | |||
60 | @eh.wrapfunction(wireprotov1server, '_capabilities') |
|
60 | @eh.wrapfunction(wireprotov1server, '_capabilities') | |
61 | def _capabilities(orig, repo, proto): |
|
61 | def _capabilities(orig, repo, proto): | |
62 | '''Wrap server command to announce lfs server capability''' |
|
62 | '''Wrap server command to announce lfs server capability''' | |
63 | caps = orig(repo, proto) |
|
63 | caps = orig(repo, proto) | |
64 | if util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
|
64 | if util.safehasattr(repo.svfs, 'lfslocalblobstore'): | |
65 | # Advertise a slightly different capability when lfs is *required*, so |
|
65 | # Advertise a slightly different capability when lfs is *required*, so | |
66 | # that the client knows it MUST load the extension. If lfs is not |
|
66 | # that the client knows it MUST load the extension. If lfs is not | |
67 | # required on the server, there's no reason to autoload the extension |
|
67 | # required on the server, there's no reason to autoload the extension | |
68 | # on the client. |
|
68 | # on the client. | |
69 | if b'lfs' in repo.requirements: |
|
69 | if b'lfs' in repo.requirements: | |
70 | caps.append('lfs-serve') |
|
70 | caps.append('lfs-serve') | |
71 |
|
71 | |||
72 | caps.append('lfs') |
|
72 | caps.append('lfs') | |
73 | return caps |
|
73 | return caps | |
74 |
|
74 | |||
75 | def bypasscheckhash(self, text): |
|
75 | def bypasscheckhash(self, text): | |
76 | return False |
|
76 | return False | |
77 |
|
77 | |||
78 | def readfromstore(self, text): |
|
78 | def readfromstore(self, text): | |
79 | """Read filelog content from local blobstore transform for flagprocessor. |
|
79 | """Read filelog content from local blobstore transform for flagprocessor. | |
80 |
|
80 | |||
81 | Default tranform for flagprocessor, returning contents from blobstore. |
|
81 | Default tranform for flagprocessor, returning contents from blobstore. | |
82 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
82 | Returns a 2-typle (text, validatehash) where validatehash is True as the | |
83 | contents of the blobstore should be checked using checkhash. |
|
83 | contents of the blobstore should be checked using checkhash. | |
84 | """ |
|
84 | """ | |
85 | p = pointer.deserialize(text) |
|
85 | p = pointer.deserialize(text) | |
86 | oid = p.oid() |
|
86 | oid = p.oid() | |
87 | store = self.opener.lfslocalblobstore |
|
87 | store = self.opener.lfslocalblobstore | |
88 | if not store.has(oid): |
|
88 | if not store.has(oid): | |
89 | p.filename = self.filename |
|
89 | p.filename = self.filename | |
90 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
90 | self.opener.lfsremoteblobstore.readbatch([p], store) | |
91 |
|
91 | |||
92 | # The caller will validate the content |
|
92 | # The caller will validate the content | |
93 | text = store.read(oid, verify=False) |
|
93 | text = store.read(oid, verify=False) | |
94 |
|
94 | |||
95 | # pack hg filelog metadata |
|
95 | # pack hg filelog metadata | |
96 | hgmeta = {} |
|
96 | hgmeta = {} | |
97 | for k in p.keys(): |
|
97 | for k in p.keys(): | |
98 | if k.startswith('x-hg-'): |
|
98 | if k.startswith('x-hg-'): | |
99 | name = k[len('x-hg-'):] |
|
99 | name = k[len('x-hg-'):] | |
100 | hgmeta[name] = p[k] |
|
100 | hgmeta[name] = p[k] | |
101 | if hgmeta or text.startswith('\1\n'): |
|
101 | if hgmeta or text.startswith('\1\n'): | |
102 | text = storageutil.packmeta(hgmeta, text) |
|
102 | text = storageutil.packmeta(hgmeta, text) | |
103 |
|
103 | |||
104 | return (text, True) |
|
104 | return (text, True) | |
105 |
|
105 | |||
106 | def writetostore(self, text): |
|
106 | def writetostore(self, text): | |
107 | # hg filelog metadata (includes rename, etc) |
|
107 | # hg filelog metadata (includes rename, etc) | |
108 | hgmeta, offset = storageutil.parsemeta(text) |
|
108 | hgmeta, offset = storageutil.parsemeta(text) | |
109 | if offset and offset > 0: |
|
109 | if offset and offset > 0: | |
110 | # lfs blob does not contain hg filelog metadata |
|
110 | # lfs blob does not contain hg filelog metadata | |
111 | text = text[offset:] |
|
111 | text = text[offset:] | |
112 |
|
112 | |||
113 | # git-lfs only supports sha256 |
|
113 | # git-lfs only supports sha256 | |
114 | oid = hex(hashlib.sha256(text).digest()) |
|
114 | oid = hex(hashlib.sha256(text).digest()) | |
115 | self.opener.lfslocalblobstore.write(oid, text) |
|
115 | self.opener.lfslocalblobstore.write(oid, text) | |
116 |
|
116 | |||
117 | # replace contents with metadata |
|
117 | # replace contents with metadata | |
118 | longoid = 'sha256:%s' % oid |
|
118 | longoid = 'sha256:%s' % oid | |
119 | metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text)) |
|
119 | metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text)) | |
120 |
|
120 | |||
121 | # by default, we expect the content to be binary. however, LFS could also |
|
121 | # by default, we expect the content to be binary. however, LFS could also | |
122 | # be used for non-binary content. add a special entry for non-binary data. |
|
122 | # be used for non-binary content. add a special entry for non-binary data. | |
123 | # this will be used by filectx.isbinary(). |
|
123 | # this will be used by filectx.isbinary(). | |
124 | if not stringutil.binary(text): |
|
124 | if not stringutil.binary(text): | |
125 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
125 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | |
126 | metadata['x-is-binary'] = '0' |
|
126 | metadata['x-is-binary'] = '0' | |
127 |
|
127 | |||
128 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
128 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix | |
129 | if hgmeta is not None: |
|
129 | if hgmeta is not None: | |
130 | for k, v in hgmeta.iteritems(): |
|
130 | for k, v in hgmeta.iteritems(): | |
131 | metadata['x-hg-%s' % k] = v |
|
131 | metadata['x-hg-%s' % k] = v | |
132 |
|
132 | |||
133 | rawtext = metadata.serialize() |
|
133 | rawtext = metadata.serialize() | |
134 | return (rawtext, False) |
|
134 | return (rawtext, False) | |
135 |
|
135 | |||
136 | def _islfs(rlog, node=None, rev=None): |
|
136 | def _islfs(rlog, node=None, rev=None): | |
137 | if rev is None: |
|
137 | if rev is None: | |
138 | if node is None: |
|
138 | if node is None: | |
139 | # both None - likely working copy content where node is not ready |
|
139 | # both None - likely working copy content where node is not ready | |
140 | return False |
|
140 | return False | |
141 | rev = rlog._revlog.rev(node) |
|
141 | rev = rlog._revlog.rev(node) | |
142 | else: |
|
142 | else: | |
143 | node = rlog._revlog.node(rev) |
|
143 | node = rlog._revlog.node(rev) | |
144 | if node == nullid: |
|
144 | if node == nullid: | |
145 | return False |
|
145 | return False | |
146 | flags = rlog._revlog.flags(rev) |
|
146 | flags = rlog._revlog.flags(rev) | |
147 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
147 | return bool(flags & revlog.REVIDX_EXTSTORED) | |
148 |
|
148 | |||
149 | # Wrapping may also be applied by remotefilelog |
|
149 | # Wrapping may also be applied by remotefilelog | |
150 | def filelogaddrevision(orig, self, text, transaction, link, p1, p2, |
|
150 | def filelogaddrevision(orig, self, text, transaction, link, p1, p2, | |
151 | cachedelta=None, node=None, |
|
151 | cachedelta=None, node=None, | |
152 | flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): |
|
152 | flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): | |
153 | # The matcher isn't available if reposetup() wasn't called. |
|
153 | # The matcher isn't available if reposetup() wasn't called. | |
154 | lfstrack = self._revlog.opener.options.get('lfstrack') |
|
154 | lfstrack = self._revlog.opener.options.get('lfstrack') | |
155 |
|
155 | |||
156 | if lfstrack: |
|
156 | if lfstrack: | |
157 | textlen = len(text) |
|
157 | textlen = len(text) | |
158 | # exclude hg rename meta from file size |
|
158 | # exclude hg rename meta from file size | |
159 | meta, offset = storageutil.parsemeta(text) |
|
159 | meta, offset = storageutil.parsemeta(text) | |
160 | if offset: |
|
160 | if offset: | |
161 | textlen -= offset |
|
161 | textlen -= offset | |
162 |
|
162 | |||
163 | if lfstrack(self._revlog.filename, textlen): |
|
163 | if lfstrack(self._revlog.filename, textlen): | |
164 | flags |= revlog.REVIDX_EXTSTORED |
|
164 | flags |= revlog.REVIDX_EXTSTORED | |
165 |
|
165 | |||
166 | return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, |
|
166 | return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, | |
167 | node=node, flags=flags, **kwds) |
|
167 | node=node, flags=flags, **kwds) | |
168 |
|
168 | |||
169 | # Wrapping may also be applied by remotefilelog |
|
169 | # Wrapping may also be applied by remotefilelog | |
170 | def filelogrenamed(orig, self, node): |
|
170 | def filelogrenamed(orig, self, node): | |
171 | if _islfs(self, node): |
|
171 | if _islfs(self, node): | |
172 |
rawtext = self._revlog.r |
|
172 | rawtext = self._revlog.rawdata(node) | |
173 | if not rawtext: |
|
173 | if not rawtext: | |
174 | return False |
|
174 | return False | |
175 | metadata = pointer.deserialize(rawtext) |
|
175 | metadata = pointer.deserialize(rawtext) | |
176 | if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: |
|
176 | if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: | |
177 | return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) |
|
177 | return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) | |
178 | else: |
|
178 | else: | |
179 | return False |
|
179 | return False | |
180 | return orig(self, node) |
|
180 | return orig(self, node) | |
181 |
|
181 | |||
182 | # Wrapping may also be applied by remotefilelog |
|
182 | # Wrapping may also be applied by remotefilelog | |
183 | def filelogsize(orig, self, rev): |
|
183 | def filelogsize(orig, self, rev): | |
184 | if _islfs(self, rev=rev): |
|
184 | if _islfs(self, rev=rev): | |
185 | # fast path: use lfs metadata to answer size |
|
185 | # fast path: use lfs metadata to answer size | |
186 |
rawtext = self._revlog.r |
|
186 | rawtext = self._revlog.rawdata(rev) | |
187 | metadata = pointer.deserialize(rawtext) |
|
187 | metadata = pointer.deserialize(rawtext) | |
188 | return int(metadata['size']) |
|
188 | return int(metadata['size']) | |
189 | return orig(self, rev) |
|
189 | return orig(self, rev) | |
190 |
|
190 | |||
191 | @eh.wrapfunction(context.basefilectx, 'cmp') |
|
191 | @eh.wrapfunction(context.basefilectx, 'cmp') | |
192 | def filectxcmp(orig, self, fctx): |
|
192 | def filectxcmp(orig, self, fctx): | |
193 | """returns True if text is different than fctx""" |
|
193 | """returns True if text is different than fctx""" | |
194 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
194 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |
195 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
195 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | |
196 | # fast path: check LFS oid |
|
196 | # fast path: check LFS oid | |
197 | p1 = pointer.deserialize(self.rawdata()) |
|
197 | p1 = pointer.deserialize(self.rawdata()) | |
198 | p2 = pointer.deserialize(fctx.rawdata()) |
|
198 | p2 = pointer.deserialize(fctx.rawdata()) | |
199 | return p1.oid() != p2.oid() |
|
199 | return p1.oid() != p2.oid() | |
200 | return orig(self, fctx) |
|
200 | return orig(self, fctx) | |
201 |
|
201 | |||
202 | @eh.wrapfunction(context.basefilectx, 'isbinary') |
|
202 | @eh.wrapfunction(context.basefilectx, 'isbinary') | |
203 | def filectxisbinary(orig, self): |
|
203 | def filectxisbinary(orig, self): | |
204 | if self.islfs(): |
|
204 | if self.islfs(): | |
205 | # fast path: use lfs metadata to answer isbinary |
|
205 | # fast path: use lfs metadata to answer isbinary | |
206 | metadata = pointer.deserialize(self.rawdata()) |
|
206 | metadata = pointer.deserialize(self.rawdata()) | |
207 | # if lfs metadata says nothing, assume it's binary by default |
|
207 | # if lfs metadata says nothing, assume it's binary by default | |
208 | return bool(int(metadata.get('x-is-binary', 1))) |
|
208 | return bool(int(metadata.get('x-is-binary', 1))) | |
209 | return orig(self) |
|
209 | return orig(self) | |
210 |
|
210 | |||
211 | def filectxislfs(self): |
|
211 | def filectxislfs(self): | |
212 | return _islfs(self.filelog(), self.filenode()) |
|
212 | return _islfs(self.filelog(), self.filenode()) | |
213 |
|
213 | |||
214 | @eh.wrapfunction(cmdutil, '_updatecatformatter') |
|
214 | @eh.wrapfunction(cmdutil, '_updatecatformatter') | |
215 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
215 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | |
216 | orig(fm, ctx, matcher, path, decode) |
|
216 | orig(fm, ctx, matcher, path, decode) | |
217 | fm.data(rawdata=ctx[path].rawdata()) |
|
217 | fm.data(rawdata=ctx[path].rawdata()) | |
218 |
|
218 | |||
219 | @eh.wrapfunction(scmutil, 'wrapconvertsink') |
|
219 | @eh.wrapfunction(scmutil, 'wrapconvertsink') | |
220 | def convertsink(orig, sink): |
|
220 | def convertsink(orig, sink): | |
221 | sink = orig(sink) |
|
221 | sink = orig(sink) | |
222 | if sink.repotype == 'hg': |
|
222 | if sink.repotype == 'hg': | |
223 | class lfssink(sink.__class__): |
|
223 | class lfssink(sink.__class__): | |
224 | def putcommit(self, files, copies, parents, commit, source, revmap, |
|
224 | def putcommit(self, files, copies, parents, commit, source, revmap, | |
225 | full, cleanp2): |
|
225 | full, cleanp2): | |
226 | pc = super(lfssink, self).putcommit |
|
226 | pc = super(lfssink, self).putcommit | |
227 | node = pc(files, copies, parents, commit, source, revmap, full, |
|
227 | node = pc(files, copies, parents, commit, source, revmap, full, | |
228 | cleanp2) |
|
228 | cleanp2) | |
229 |
|
229 | |||
230 | if 'lfs' not in self.repo.requirements: |
|
230 | if 'lfs' not in self.repo.requirements: | |
231 | ctx = self.repo[node] |
|
231 | ctx = self.repo[node] | |
232 |
|
232 | |||
233 | # The file list may contain removed files, so check for |
|
233 | # The file list may contain removed files, so check for | |
234 | # membership before assuming it is in the context. |
|
234 | # membership before assuming it is in the context. | |
235 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
235 | if any(f in ctx and ctx[f].islfs() for f, n in files): | |
236 | self.repo.requirements.add('lfs') |
|
236 | self.repo.requirements.add('lfs') | |
237 | self.repo._writerequirements() |
|
237 | self.repo._writerequirements() | |
238 |
|
238 | |||
239 | return node |
|
239 | return node | |
240 |
|
240 | |||
241 | sink.__class__ = lfssink |
|
241 | sink.__class__ = lfssink | |
242 |
|
242 | |||
243 | return sink |
|
243 | return sink | |
244 |
|
244 | |||
245 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
245 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |
246 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
246 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |
247 | @eh.wrapfunction(vfsmod.readonlyvfs, '__init__') |
|
247 | @eh.wrapfunction(vfsmod.readonlyvfs, '__init__') | |
248 | def vfsinit(orig, self, othervfs): |
|
248 | def vfsinit(orig, self, othervfs): | |
249 | orig(self, othervfs) |
|
249 | orig(self, othervfs) | |
250 | # copy lfs related options |
|
250 | # copy lfs related options | |
251 | for k, v in othervfs.options.items(): |
|
251 | for k, v in othervfs.options.items(): | |
252 | if k.startswith('lfs'): |
|
252 | if k.startswith('lfs'): | |
253 | self.options[k] = v |
|
253 | self.options[k] = v | |
254 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
254 | # also copy lfs blobstores. note: this can run before reposetup, so lfs | |
255 | # blobstore attributes are not always ready at this time. |
|
255 | # blobstore attributes are not always ready at this time. | |
256 | for name in ['lfslocalblobstore', 'lfsremoteblobstore']: |
|
256 | for name in ['lfslocalblobstore', 'lfsremoteblobstore']: | |
257 | if util.safehasattr(othervfs, name): |
|
257 | if util.safehasattr(othervfs, name): | |
258 | setattr(self, name, getattr(othervfs, name)) |
|
258 | setattr(self, name, getattr(othervfs, name)) | |
259 |
|
259 | |||
260 | def _prefetchfiles(repo, revs, match): |
|
260 | def _prefetchfiles(repo, revs, match): | |
261 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
261 | """Ensure that required LFS blobs are present, fetching them as a group if | |
262 | needed.""" |
|
262 | needed.""" | |
263 | if not util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
|
263 | if not util.safehasattr(repo.svfs, 'lfslocalblobstore'): | |
264 | return |
|
264 | return | |
265 |
|
265 | |||
266 | pointers = [] |
|
266 | pointers = [] | |
267 | oids = set() |
|
267 | oids = set() | |
268 | localstore = repo.svfs.lfslocalblobstore |
|
268 | localstore = repo.svfs.lfslocalblobstore | |
269 |
|
269 | |||
270 | for rev in revs: |
|
270 | for rev in revs: | |
271 | ctx = repo[rev] |
|
271 | ctx = repo[rev] | |
272 | for f in ctx.walk(match): |
|
272 | for f in ctx.walk(match): | |
273 | p = pointerfromctx(ctx, f) |
|
273 | p = pointerfromctx(ctx, f) | |
274 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
274 | if p and p.oid() not in oids and not localstore.has(p.oid()): | |
275 | p.filename = f |
|
275 | p.filename = f | |
276 | pointers.append(p) |
|
276 | pointers.append(p) | |
277 | oids.add(p.oid()) |
|
277 | oids.add(p.oid()) | |
278 |
|
278 | |||
279 | if pointers: |
|
279 | if pointers: | |
280 | # Recalculating the repo store here allows 'paths.default' that is set |
|
280 | # Recalculating the repo store here allows 'paths.default' that is set | |
281 | # on the repo by a clone command to be used for the update. |
|
281 | # on the repo by a clone command to be used for the update. | |
282 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
282 | blobstore.remote(repo).readbatch(pointers, localstore) | |
283 |
|
283 | |||
284 | def _canskipupload(repo): |
|
284 | def _canskipupload(repo): | |
285 | # Skip if this hasn't been passed to reposetup() |
|
285 | # Skip if this hasn't been passed to reposetup() | |
286 | if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
|
286 | if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): | |
287 | return True |
|
287 | return True | |
288 |
|
288 | |||
289 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
289 | # if remotestore is a null store, upload is a no-op and can be skipped | |
290 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
290 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
291 |
|
291 | |||
292 | def candownload(repo): |
|
292 | def candownload(repo): | |
293 | # Skip if this hasn't been passed to reposetup() |
|
293 | # Skip if this hasn't been passed to reposetup() | |
294 | if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
|
294 | if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): | |
295 | return False |
|
295 | return False | |
296 |
|
296 | |||
297 | # if remotestore is a null store, downloads will lead to nothing |
|
297 | # if remotestore is a null store, downloads will lead to nothing | |
298 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
298 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
299 |
|
299 | |||
300 | def uploadblobsfromrevs(repo, revs): |
|
300 | def uploadblobsfromrevs(repo, revs): | |
301 | '''upload lfs blobs introduced by revs |
|
301 | '''upload lfs blobs introduced by revs | |
302 |
|
302 | |||
303 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
303 | Note: also used by other extensions e. g. infinitepush. avoid renaming. | |
304 | ''' |
|
304 | ''' | |
305 | if _canskipupload(repo): |
|
305 | if _canskipupload(repo): | |
306 | return |
|
306 | return | |
307 | pointers = extractpointers(repo, revs) |
|
307 | pointers = extractpointers(repo, revs) | |
308 | uploadblobs(repo, pointers) |
|
308 | uploadblobs(repo, pointers) | |
309 |
|
309 | |||
310 | def prepush(pushop): |
|
310 | def prepush(pushop): | |
311 | """Prepush hook. |
|
311 | """Prepush hook. | |
312 |
|
312 | |||
313 | Read through the revisions to push, looking for filelog entries that can be |
|
313 | Read through the revisions to push, looking for filelog entries that can be | |
314 | deserialized into metadata so that we can block the push on their upload to |
|
314 | deserialized into metadata so that we can block the push on their upload to | |
315 | the remote blobstore. |
|
315 | the remote blobstore. | |
316 | """ |
|
316 | """ | |
317 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
317 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |
318 |
|
318 | |||
319 | @eh.wrapfunction(exchange, 'push') |
|
319 | @eh.wrapfunction(exchange, 'push') | |
320 | def push(orig, repo, remote, *args, **kwargs): |
|
320 | def push(orig, repo, remote, *args, **kwargs): | |
321 | """bail on push if the extension isn't enabled on remote when needed, and |
|
321 | """bail on push if the extension isn't enabled on remote when needed, and | |
322 | update the remote store based on the destination path.""" |
|
322 | update the remote store based on the destination path.""" | |
323 | if 'lfs' in repo.requirements: |
|
323 | if 'lfs' in repo.requirements: | |
324 | # If the remote peer is for a local repo, the requirement tests in the |
|
324 | # If the remote peer is for a local repo, the requirement tests in the | |
325 | # base class method enforce lfs support. Otherwise, some revisions in |
|
325 | # base class method enforce lfs support. Otherwise, some revisions in | |
326 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
326 | # this repo use lfs, and the remote repo needs the extension loaded. | |
327 | if not remote.local() and not remote.capable('lfs'): |
|
327 | if not remote.local() and not remote.capable('lfs'): | |
328 | # This is a copy of the message in exchange.push() when requirements |
|
328 | # This is a copy of the message in exchange.push() when requirements | |
329 | # are missing between local repos. |
|
329 | # are missing between local repos. | |
330 | m = _("required features are not supported in the destination: %s") |
|
330 | m = _("required features are not supported in the destination: %s") | |
331 | raise error.Abort(m % 'lfs', |
|
331 | raise error.Abort(m % 'lfs', | |
332 | hint=_('enable the lfs extension on the server')) |
|
332 | hint=_('enable the lfs extension on the server')) | |
333 |
|
333 | |||
334 | # Repositories where this extension is disabled won't have the field. |
|
334 | # Repositories where this extension is disabled won't have the field. | |
335 | # But if there's a requirement, then the extension must be loaded AND |
|
335 | # But if there's a requirement, then the extension must be loaded AND | |
336 | # there may be blobs to push. |
|
336 | # there may be blobs to push. | |
337 | remotestore = repo.svfs.lfsremoteblobstore |
|
337 | remotestore = repo.svfs.lfsremoteblobstore | |
338 | try: |
|
338 | try: | |
339 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
339 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) | |
340 | return orig(repo, remote, *args, **kwargs) |
|
340 | return orig(repo, remote, *args, **kwargs) | |
341 | finally: |
|
341 | finally: | |
342 | repo.svfs.lfsremoteblobstore = remotestore |
|
342 | repo.svfs.lfsremoteblobstore = remotestore | |
343 | else: |
|
343 | else: | |
344 | return orig(repo, remote, *args, **kwargs) |
|
344 | return orig(repo, remote, *args, **kwargs) | |
345 |
|
345 | |||
346 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
346 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |
347 | @eh.wrapfunction(bundle2, 'writenewbundle') |
|
347 | @eh.wrapfunction(bundle2, 'writenewbundle') | |
348 | def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, |
|
348 | def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, | |
349 | *args, **kwargs): |
|
349 | *args, **kwargs): | |
350 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
350 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |
351 | uploadblobsfromrevs(repo, outgoing.missing) |
|
351 | uploadblobsfromrevs(repo, outgoing.missing) | |
352 | return orig(ui, repo, source, filename, bundletype, outgoing, *args, |
|
352 | return orig(ui, repo, source, filename, bundletype, outgoing, *args, | |
353 | **kwargs) |
|
353 | **kwargs) | |
354 |
|
354 | |||
355 | def extractpointers(repo, revs): |
|
355 | def extractpointers(repo, revs): | |
356 | """return a list of lfs pointers added by given revs""" |
|
356 | """return a list of lfs pointers added by given revs""" | |
357 | repo.ui.debug('lfs: computing set of blobs to upload\n') |
|
357 | repo.ui.debug('lfs: computing set of blobs to upload\n') | |
358 | pointers = {} |
|
358 | pointers = {} | |
359 |
|
359 | |||
360 | makeprogress = repo.ui.makeprogress |
|
360 | makeprogress = repo.ui.makeprogress | |
361 | with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress: |
|
361 | with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress: | |
362 | for r in revs: |
|
362 | for r in revs: | |
363 | ctx = repo[r] |
|
363 | ctx = repo[r] | |
364 | for p in pointersfromctx(ctx).values(): |
|
364 | for p in pointersfromctx(ctx).values(): | |
365 | pointers[p.oid()] = p |
|
365 | pointers[p.oid()] = p | |
366 | progress.increment() |
|
366 | progress.increment() | |
367 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
367 | return sorted(pointers.values(), key=lambda p: p.oid()) | |
368 |
|
368 | |||
369 | def pointerfromctx(ctx, f, removed=False): |
|
369 | def pointerfromctx(ctx, f, removed=False): | |
370 | """return a pointer for the named file from the given changectx, or None if |
|
370 | """return a pointer for the named file from the given changectx, or None if | |
371 | the file isn't LFS. |
|
371 | the file isn't LFS. | |
372 |
|
372 | |||
373 | Optionally, the pointer for a file deleted from the context can be returned. |
|
373 | Optionally, the pointer for a file deleted from the context can be returned. | |
374 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
374 | Since no such pointer is actually stored, and to distinguish from a non LFS | |
375 | file, this pointer is represented by an empty dict. |
|
375 | file, this pointer is represented by an empty dict. | |
376 | """ |
|
376 | """ | |
377 | _ctx = ctx |
|
377 | _ctx = ctx | |
378 | if f not in ctx: |
|
378 | if f not in ctx: | |
379 | if not removed: |
|
379 | if not removed: | |
380 | return None |
|
380 | return None | |
381 | if f in ctx.p1(): |
|
381 | if f in ctx.p1(): | |
382 | _ctx = ctx.p1() |
|
382 | _ctx = ctx.p1() | |
383 | elif f in ctx.p2(): |
|
383 | elif f in ctx.p2(): | |
384 | _ctx = ctx.p2() |
|
384 | _ctx = ctx.p2() | |
385 | else: |
|
385 | else: | |
386 | return None |
|
386 | return None | |
387 | fctx = _ctx[f] |
|
387 | fctx = _ctx[f] | |
388 | if not _islfs(fctx.filelog(), fctx.filenode()): |
|
388 | if not _islfs(fctx.filelog(), fctx.filenode()): | |
389 | return None |
|
389 | return None | |
390 | try: |
|
390 | try: | |
391 | p = pointer.deserialize(fctx.rawdata()) |
|
391 | p = pointer.deserialize(fctx.rawdata()) | |
392 | if ctx == _ctx: |
|
392 | if ctx == _ctx: | |
393 | return p |
|
393 | return p | |
394 | return {} |
|
394 | return {} | |
395 | except pointer.InvalidPointer as ex: |
|
395 | except pointer.InvalidPointer as ex: | |
396 | raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') |
|
396 | raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') | |
397 | % (f, short(_ctx.node()), ex)) |
|
397 | % (f, short(_ctx.node()), ex)) | |
398 |
|
398 | |||
399 | def pointersfromctx(ctx, removed=False): |
|
399 | def pointersfromctx(ctx, removed=False): | |
400 | """return a dict {path: pointer} for given single changectx. |
|
400 | """return a dict {path: pointer} for given single changectx. | |
401 |
|
401 | |||
402 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
402 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value | |
403 | stored for the path is an empty dict. |
|
403 | stored for the path is an empty dict. | |
404 | """ |
|
404 | """ | |
405 | result = {} |
|
405 | result = {} | |
406 | m = ctx.repo().narrowmatch() |
|
406 | m = ctx.repo().narrowmatch() | |
407 |
|
407 | |||
408 | # TODO: consider manifest.fastread() instead |
|
408 | # TODO: consider manifest.fastread() instead | |
409 | for f in ctx.files(): |
|
409 | for f in ctx.files(): | |
410 | if not m(f): |
|
410 | if not m(f): | |
411 | continue |
|
411 | continue | |
412 | p = pointerfromctx(ctx, f, removed=removed) |
|
412 | p = pointerfromctx(ctx, f, removed=removed) | |
413 | if p is not None: |
|
413 | if p is not None: | |
414 | result[f] = p |
|
414 | result[f] = p | |
415 | return result |
|
415 | return result | |
416 |
|
416 | |||
417 | def uploadblobs(repo, pointers): |
|
417 | def uploadblobs(repo, pointers): | |
418 | """upload given pointers from local blobstore""" |
|
418 | """upload given pointers from local blobstore""" | |
419 | if not pointers: |
|
419 | if not pointers: | |
420 | return |
|
420 | return | |
421 |
|
421 | |||
422 | remoteblob = repo.svfs.lfsremoteblobstore |
|
422 | remoteblob = repo.svfs.lfsremoteblobstore | |
423 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
423 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
424 |
|
424 | |||
425 | @eh.wrapfunction(upgrade, '_finishdatamigration') |
|
425 | @eh.wrapfunction(upgrade, '_finishdatamigration') | |
426 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
426 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
427 | orig(ui, srcrepo, dstrepo, requirements) |
|
427 | orig(ui, srcrepo, dstrepo, requirements) | |
428 |
|
428 | |||
429 | # Skip if this hasn't been passed to reposetup() |
|
429 | # Skip if this hasn't been passed to reposetup() | |
430 | if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and |
|
430 | if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and | |
431 | util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')): |
|
431 | util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')): | |
432 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
432 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs | |
433 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
433 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs | |
434 |
|
434 | |||
435 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
435 | for dirpath, dirs, files in srclfsvfs.walk(): | |
436 | for oid in files: |
|
436 | for oid in files: | |
437 | ui.write(_('copying lfs blob %s\n') % oid) |
|
437 | ui.write(_('copying lfs blob %s\n') % oid) | |
438 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
438 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
439 |
|
439 | |||
440 | @eh.wrapfunction(upgrade, 'preservedrequirements') |
|
440 | @eh.wrapfunction(upgrade, 'preservedrequirements') | |
441 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
|
441 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') | |
442 | def upgraderequirements(orig, repo): |
|
442 | def upgraderequirements(orig, repo): | |
443 | reqs = orig(repo) |
|
443 | reqs = orig(repo) | |
444 | if 'lfs' in repo.requirements: |
|
444 | if 'lfs' in repo.requirements: | |
445 | reqs.add('lfs') |
|
445 | reqs.add('lfs') | |
446 | return reqs |
|
446 | return reqs |
General Comments 0
You need to be logged in to leave comments.
Login now