Show More
@@ -1,527 +1,527 b'' | |||||
1 | # wrapper.py - methods wrapping core mercurial logic |
|
1 | # wrapper.py - methods wrapping core mercurial logic | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Facebook, Inc. |
|
3 | # Copyright 2017 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 | from mercurial.pycompat import ( |
|
14 | from mercurial.pycompat import ( | |
15 | getattr, |
|
15 | getattr, | |
16 | setattr, |
|
16 | setattr, | |
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | bundle2, |
|
20 | bundle2, | |
21 | changegroup, |
|
21 | changegroup, | |
22 | cmdutil, |
|
22 | cmdutil, | |
23 | context, |
|
23 | context, | |
24 | error, |
|
24 | error, | |
25 | exchange, |
|
25 | exchange, | |
26 | exthelper, |
|
26 | exthelper, | |
27 | localrepo, |
|
27 | localrepo, | |
28 | pycompat, |
|
28 | pycompat, | |
29 | revlog, |
|
29 | revlog, | |
30 | scmutil, |
|
30 | scmutil, | |
31 | upgrade, |
|
31 | upgrade, | |
32 | util, |
|
32 | util, | |
33 | vfs as vfsmod, |
|
33 | vfs as vfsmod, | |
34 | wireprotov1server, |
|
34 | wireprotov1server, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
37 | from mercurial.interfaces import repository |
|
37 | from mercurial.interfaces import repository | |
38 |
|
38 | |||
39 | from mercurial.utils import ( |
|
39 | from mercurial.utils import ( | |
40 | storageutil, |
|
40 | storageutil, | |
41 | stringutil, |
|
41 | stringutil, | |
42 | ) |
|
42 | ) | |
43 |
|
43 | |||
44 | from ..largefiles import lfutil |
|
44 | from ..largefiles import lfutil | |
45 |
|
45 | |||
46 | from . import ( |
|
46 | from . import ( | |
47 | blobstore, |
|
47 | blobstore, | |
48 | pointer, |
|
48 | pointer, | |
49 | ) |
|
49 | ) | |
50 |
|
50 | |||
51 | eh = exthelper.exthelper() |
|
51 | eh = exthelper.exthelper() | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | @eh.wrapfunction(localrepo, b'makefilestorage') |
|
54 | @eh.wrapfunction(localrepo, b'makefilestorage') | |
55 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
55 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | |
56 | if b'lfs' in requirements: |
|
56 | if b'lfs' in requirements: | |
57 | features.add(repository.REPO_FEATURE_LFS) |
|
57 | features.add(repository.REPO_FEATURE_LFS) | |
58 |
|
58 | |||
59 | return orig(requirements=requirements, features=features, **kwargs) |
|
59 | return orig(requirements=requirements, features=features, **kwargs) | |
60 |
|
60 | |||
61 |
|
61 | |||
62 | @eh.wrapfunction(changegroup, b'allsupportedversions') |
|
62 | @eh.wrapfunction(changegroup, b'allsupportedversions') | |
63 | def allsupportedversions(orig, ui): |
|
63 | def allsupportedversions(orig, ui): | |
64 | versions = orig(ui) |
|
64 | versions = orig(ui) | |
65 | versions.add(b'03') |
|
65 | versions.add(b'03') | |
66 | return versions |
|
66 | return versions | |
67 |
|
67 | |||
68 |
|
68 | |||
69 | @eh.wrapfunction(wireprotov1server, b'_capabilities') |
|
69 | @eh.wrapfunction(wireprotov1server, b'_capabilities') | |
70 | def _capabilities(orig, repo, proto): |
|
70 | def _capabilities(orig, repo, proto): | |
71 | '''Wrap server command to announce lfs server capability''' |
|
71 | '''Wrap server command to announce lfs server capability''' | |
72 | caps = orig(repo, proto) |
|
72 | caps = orig(repo, proto) | |
73 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
73 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
74 | # Advertise a slightly different capability when lfs is *required*, so |
|
74 | # Advertise a slightly different capability when lfs is *required*, so | |
75 | # that the client knows it MUST load the extension. If lfs is not |
|
75 | # that the client knows it MUST load the extension. If lfs is not | |
76 | # required on the server, there's no reason to autoload the extension |
|
76 | # required on the server, there's no reason to autoload the extension | |
77 | # on the client. |
|
77 | # on the client. | |
78 | if b'lfs' in repo.requirements: |
|
78 | if b'lfs' in repo.requirements: | |
79 | caps.append(b'lfs-serve') |
|
79 | caps.append(b'lfs-serve') | |
80 |
|
80 | |||
81 | caps.append(b'lfs') |
|
81 | caps.append(b'lfs') | |
82 | return caps |
|
82 | return caps | |
83 |
|
83 | |||
84 |
|
84 | |||
85 | def bypasscheckhash(self, text): |
|
85 | def bypasscheckhash(self, text): | |
86 | return False |
|
86 | return False | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | def readfromstore(self, text): |
|
89 | def readfromstore(self, text): | |
90 | """Read filelog content from local blobstore transform for flagprocessor. |
|
90 | """Read filelog content from local blobstore transform for flagprocessor. | |
91 |
|
91 | |||
92 | Default tranform for flagprocessor, returning contents from blobstore. |
|
92 | Default tranform for flagprocessor, returning contents from blobstore. | |
93 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
93 | Returns a 2-typle (text, validatehash) where validatehash is True as the | |
94 | contents of the blobstore should be checked using checkhash. |
|
94 | contents of the blobstore should be checked using checkhash. | |
95 | """ |
|
95 | """ | |
96 | p = pointer.deserialize(text) |
|
96 | p = pointer.deserialize(text) | |
97 | oid = p.oid() |
|
97 | oid = p.oid() | |
98 | store = self.opener.lfslocalblobstore |
|
98 | store = self.opener.lfslocalblobstore | |
99 | if not store.has(oid): |
|
99 | if not store.has(oid): | |
100 | p.filename = self.filename |
|
100 | p.filename = self.filename | |
101 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
101 | self.opener.lfsremoteblobstore.readbatch([p], store) | |
102 |
|
102 | |||
103 | # The caller will validate the content |
|
103 | # The caller will validate the content | |
104 | text = store.read(oid, verify=False) |
|
104 | text = store.read(oid, verify=False) | |
105 |
|
105 | |||
106 | # pack hg filelog metadata |
|
106 | # pack hg filelog metadata | |
107 | hgmeta = {} |
|
107 | hgmeta = {} | |
108 | for k in p.keys(): |
|
108 | for k in p.keys(): | |
109 | if k.startswith(b'x-hg-'): |
|
109 | if k.startswith(b'x-hg-'): | |
110 | name = k[len(b'x-hg-') :] |
|
110 | name = k[len(b'x-hg-') :] | |
111 | hgmeta[name] = p[k] |
|
111 | hgmeta[name] = p[k] | |
112 | if hgmeta or text.startswith(b'\1\n'): |
|
112 | if hgmeta or text.startswith(b'\1\n'): | |
113 | text = storageutil.packmeta(hgmeta, text) |
|
113 | text = storageutil.packmeta(hgmeta, text) | |
114 |
|
114 | |||
115 | return (text, True, {}) |
|
115 | return (text, True, {}) | |
116 |
|
116 | |||
117 |
|
117 | |||
118 | def writetostore(self, text, sidedata): |
|
118 | def writetostore(self, text, sidedata): | |
119 | # hg filelog metadata (includes rename, etc) |
|
119 | # hg filelog metadata (includes rename, etc) | |
120 | hgmeta, offset = storageutil.parsemeta(text) |
|
120 | hgmeta, offset = storageutil.parsemeta(text) | |
121 | if offset and offset > 0: |
|
121 | if offset and offset > 0: | |
122 | # lfs blob does not contain hg filelog metadata |
|
122 | # lfs blob does not contain hg filelog metadata | |
123 | text = text[offset:] |
|
123 | text = text[offset:] | |
124 |
|
124 | |||
125 | # git-lfs only supports sha256 |
|
125 | # git-lfs only supports sha256 | |
126 | oid = hex(hashlib.sha256(text).digest()) |
|
126 | oid = hex(hashlib.sha256(text).digest()) | |
127 | self.opener.lfslocalblobstore.write(oid, text) |
|
127 | self.opener.lfslocalblobstore.write(oid, text) | |
128 |
|
128 | |||
129 | # replace contents with metadata |
|
129 | # replace contents with metadata | |
130 | longoid = b'sha256:%s' % oid |
|
130 | longoid = b'sha256:%s' % oid | |
131 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
|
131 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) | |
132 |
|
132 | |||
133 | # by default, we expect the content to be binary. however, LFS could also |
|
133 | # by default, we expect the content to be binary. however, LFS could also | |
134 | # be used for non-binary content. add a special entry for non-binary data. |
|
134 | # be used for non-binary content. add a special entry for non-binary data. | |
135 | # this will be used by filectx.isbinary(). |
|
135 | # this will be used by filectx.isbinary(). | |
136 | if not stringutil.binary(text): |
|
136 | if not stringutil.binary(text): | |
137 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
137 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | |
138 | metadata[b'x-is-binary'] = b'0' |
|
138 | metadata[b'x-is-binary'] = b'0' | |
139 |
|
139 | |||
140 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
140 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix | |
141 | if hgmeta is not None: |
|
141 | if hgmeta is not None: | |
142 | for k, v in pycompat.iteritems(hgmeta): |
|
142 | for k, v in pycompat.iteritems(hgmeta): | |
143 | metadata[b'x-hg-%s' % k] = v |
|
143 | metadata[b'x-hg-%s' % k] = v | |
144 |
|
144 | |||
145 | rawtext = metadata.serialize() |
|
145 | rawtext = metadata.serialize() | |
146 | return (rawtext, False) |
|
146 | return (rawtext, False) | |
147 |
|
147 | |||
148 |
|
148 | |||
149 | def _islfs(rlog, node=None, rev=None): |
|
149 | def _islfs(rlog, node=None, rev=None): | |
150 | if rev is None: |
|
150 | if rev is None: | |
151 | if node is None: |
|
151 | if node is None: | |
152 | # both None - likely working copy content where node is not ready |
|
152 | # both None - likely working copy content where node is not ready | |
153 | return False |
|
153 | return False | |
154 |
rev = rlog. |
|
154 | rev = rlog.rev(node) | |
155 | else: |
|
155 | else: | |
156 |
node = rlog. |
|
156 | node = rlog.node(rev) | |
157 | if node == nullid: |
|
157 | if node == nullid: | |
158 | return False |
|
158 | return False | |
159 |
flags = rlog. |
|
159 | flags = rlog.flags(rev) | |
160 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
160 | return bool(flags & revlog.REVIDX_EXTSTORED) | |
161 |
|
161 | |||
162 |
|
162 | |||
163 | # Wrapping may also be applied by remotefilelog |
|
163 | # Wrapping may also be applied by remotefilelog | |
164 | def filelogaddrevision( |
|
164 | def filelogaddrevision( | |
165 | orig, |
|
165 | orig, | |
166 | self, |
|
166 | self, | |
167 | text, |
|
167 | text, | |
168 | transaction, |
|
168 | transaction, | |
169 | link, |
|
169 | link, | |
170 | p1, |
|
170 | p1, | |
171 | p2, |
|
171 | p2, | |
172 | cachedelta=None, |
|
172 | cachedelta=None, | |
173 | node=None, |
|
173 | node=None, | |
174 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
174 | flags=revlog.REVIDX_DEFAULT_FLAGS, | |
175 | **kwds |
|
175 | **kwds | |
176 | ): |
|
176 | ): | |
177 | # The matcher isn't available if reposetup() wasn't called. |
|
177 | # The matcher isn't available if reposetup() wasn't called. | |
178 | lfstrack = self._revlog.opener.options.get(b'lfstrack') |
|
178 | lfstrack = self._revlog.opener.options.get(b'lfstrack') | |
179 |
|
179 | |||
180 | if lfstrack: |
|
180 | if lfstrack: | |
181 | textlen = len(text) |
|
181 | textlen = len(text) | |
182 | # exclude hg rename meta from file size |
|
182 | # exclude hg rename meta from file size | |
183 | meta, offset = storageutil.parsemeta(text) |
|
183 | meta, offset = storageutil.parsemeta(text) | |
184 | if offset: |
|
184 | if offset: | |
185 | textlen -= offset |
|
185 | textlen -= offset | |
186 |
|
186 | |||
187 | if lfstrack(self._revlog.filename, textlen): |
|
187 | if lfstrack(self._revlog.filename, textlen): | |
188 | flags |= revlog.REVIDX_EXTSTORED |
|
188 | flags |= revlog.REVIDX_EXTSTORED | |
189 |
|
189 | |||
190 | return orig( |
|
190 | return orig( | |
191 | self, |
|
191 | self, | |
192 | text, |
|
192 | text, | |
193 | transaction, |
|
193 | transaction, | |
194 | link, |
|
194 | link, | |
195 | p1, |
|
195 | p1, | |
196 | p2, |
|
196 | p2, | |
197 | cachedelta=cachedelta, |
|
197 | cachedelta=cachedelta, | |
198 | node=node, |
|
198 | node=node, | |
199 | flags=flags, |
|
199 | flags=flags, | |
200 | **kwds |
|
200 | **kwds | |
201 | ) |
|
201 | ) | |
202 |
|
202 | |||
203 |
|
203 | |||
204 | # Wrapping may also be applied by remotefilelog |
|
204 | # Wrapping may also be applied by remotefilelog | |
205 | def filelogrenamed(orig, self, node): |
|
205 | def filelogrenamed(orig, self, node): | |
206 | if _islfs(self, node): |
|
206 | if _islfs(self._revlog, node): | |
207 | rawtext = self._revlog.rawdata(node) |
|
207 | rawtext = self._revlog.rawdata(node) | |
208 | if not rawtext: |
|
208 | if not rawtext: | |
209 | return False |
|
209 | return False | |
210 | metadata = pointer.deserialize(rawtext) |
|
210 | metadata = pointer.deserialize(rawtext) | |
211 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
|
211 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: | |
212 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
|
212 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) | |
213 | else: |
|
213 | else: | |
214 | return False |
|
214 | return False | |
215 | return orig(self, node) |
|
215 | return orig(self, node) | |
216 |
|
216 | |||
217 |
|
217 | |||
218 | # Wrapping may also be applied by remotefilelog |
|
218 | # Wrapping may also be applied by remotefilelog | |
219 | def filelogsize(orig, self, rev): |
|
219 | def filelogsize(orig, self, rev): | |
220 | if _islfs(self, rev=rev): |
|
220 | if _islfs(self._revlog, rev=rev): | |
221 | # fast path: use lfs metadata to answer size |
|
221 | # fast path: use lfs metadata to answer size | |
222 | rawtext = self._revlog.rawdata(rev) |
|
222 | rawtext = self._revlog.rawdata(rev) | |
223 | metadata = pointer.deserialize(rawtext) |
|
223 | metadata = pointer.deserialize(rawtext) | |
224 | return int(metadata[b'size']) |
|
224 | return int(metadata[b'size']) | |
225 | return orig(self, rev) |
|
225 | return orig(self, rev) | |
226 |
|
226 | |||
227 |
|
227 | |||
228 | @eh.wrapfunction(context.basefilectx, b'cmp') |
|
228 | @eh.wrapfunction(context.basefilectx, b'cmp') | |
229 | def filectxcmp(orig, self, fctx): |
|
229 | def filectxcmp(orig, self, fctx): | |
230 | """returns True if text is different than fctx""" |
|
230 | """returns True if text is different than fctx""" | |
231 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
231 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |
232 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
232 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | |
233 | # fast path: check LFS oid |
|
233 | # fast path: check LFS oid | |
234 | p1 = pointer.deserialize(self.rawdata()) |
|
234 | p1 = pointer.deserialize(self.rawdata()) | |
235 | p2 = pointer.deserialize(fctx.rawdata()) |
|
235 | p2 = pointer.deserialize(fctx.rawdata()) | |
236 | return p1.oid() != p2.oid() |
|
236 | return p1.oid() != p2.oid() | |
237 | return orig(self, fctx) |
|
237 | return orig(self, fctx) | |
238 |
|
238 | |||
239 |
|
239 | |||
240 | @eh.wrapfunction(context.basefilectx, b'isbinary') |
|
240 | @eh.wrapfunction(context.basefilectx, b'isbinary') | |
241 | def filectxisbinary(orig, self): |
|
241 | def filectxisbinary(orig, self): | |
242 | if self.islfs(): |
|
242 | if self.islfs(): | |
243 | # fast path: use lfs metadata to answer isbinary |
|
243 | # fast path: use lfs metadata to answer isbinary | |
244 | metadata = pointer.deserialize(self.rawdata()) |
|
244 | metadata = pointer.deserialize(self.rawdata()) | |
245 | # if lfs metadata says nothing, assume it's binary by default |
|
245 | # if lfs metadata says nothing, assume it's binary by default | |
246 | return bool(int(metadata.get(b'x-is-binary', 1))) |
|
246 | return bool(int(metadata.get(b'x-is-binary', 1))) | |
247 | return orig(self) |
|
247 | return orig(self) | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | def filectxislfs(self): |
|
250 | def filectxislfs(self): | |
251 | return _islfs(self.filelog(), self.filenode()) |
|
251 | return _islfs(self.filelog()._revlog, self.filenode()) | |
252 |
|
252 | |||
253 |
|
253 | |||
254 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
|
254 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') | |
255 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
255 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | |
256 | orig(fm, ctx, matcher, path, decode) |
|
256 | orig(fm, ctx, matcher, path, decode) | |
257 | fm.data(rawdata=ctx[path].rawdata()) |
|
257 | fm.data(rawdata=ctx[path].rawdata()) | |
258 |
|
258 | |||
259 |
|
259 | |||
260 | @eh.wrapfunction(scmutil, b'wrapconvertsink') |
|
260 | @eh.wrapfunction(scmutil, b'wrapconvertsink') | |
261 | def convertsink(orig, sink): |
|
261 | def convertsink(orig, sink): | |
262 | sink = orig(sink) |
|
262 | sink = orig(sink) | |
263 | if sink.repotype == b'hg': |
|
263 | if sink.repotype == b'hg': | |
264 |
|
264 | |||
265 | class lfssink(sink.__class__): |
|
265 | class lfssink(sink.__class__): | |
266 | def putcommit( |
|
266 | def putcommit( | |
267 | self, |
|
267 | self, | |
268 | files, |
|
268 | files, | |
269 | copies, |
|
269 | copies, | |
270 | parents, |
|
270 | parents, | |
271 | commit, |
|
271 | commit, | |
272 | source, |
|
272 | source, | |
273 | revmap, |
|
273 | revmap, | |
274 | full, |
|
274 | full, | |
275 | cleanp2, |
|
275 | cleanp2, | |
276 | ): |
|
276 | ): | |
277 | pc = super(lfssink, self).putcommit |
|
277 | pc = super(lfssink, self).putcommit | |
278 | node = pc( |
|
278 | node = pc( | |
279 | files, |
|
279 | files, | |
280 | copies, |
|
280 | copies, | |
281 | parents, |
|
281 | parents, | |
282 | commit, |
|
282 | commit, | |
283 | source, |
|
283 | source, | |
284 | revmap, |
|
284 | revmap, | |
285 | full, |
|
285 | full, | |
286 | cleanp2, |
|
286 | cleanp2, | |
287 | ) |
|
287 | ) | |
288 |
|
288 | |||
289 | if b'lfs' not in self.repo.requirements: |
|
289 | if b'lfs' not in self.repo.requirements: | |
290 | ctx = self.repo[node] |
|
290 | ctx = self.repo[node] | |
291 |
|
291 | |||
292 | # The file list may contain removed files, so check for |
|
292 | # The file list may contain removed files, so check for | |
293 | # membership before assuming it is in the context. |
|
293 | # membership before assuming it is in the context. | |
294 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
294 | if any(f in ctx and ctx[f].islfs() for f, n in files): | |
295 | self.repo.requirements.add(b'lfs') |
|
295 | self.repo.requirements.add(b'lfs') | |
296 | self.repo._writerequirements() |
|
296 | self.repo._writerequirements() | |
297 |
|
297 | |||
298 | return node |
|
298 | return node | |
299 |
|
299 | |||
300 | sink.__class__ = lfssink |
|
300 | sink.__class__ = lfssink | |
301 |
|
301 | |||
302 | return sink |
|
302 | return sink | |
303 |
|
303 | |||
304 |
|
304 | |||
305 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
305 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |
306 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
306 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |
307 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
|
307 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') | |
308 | def vfsinit(orig, self, othervfs): |
|
308 | def vfsinit(orig, self, othervfs): | |
309 | orig(self, othervfs) |
|
309 | orig(self, othervfs) | |
310 | # copy lfs related options |
|
310 | # copy lfs related options | |
311 | for k, v in othervfs.options.items(): |
|
311 | for k, v in othervfs.options.items(): | |
312 | if k.startswith(b'lfs'): |
|
312 | if k.startswith(b'lfs'): | |
313 | self.options[k] = v |
|
313 | self.options[k] = v | |
314 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
314 | # also copy lfs blobstores. note: this can run before reposetup, so lfs | |
315 | # blobstore attributes are not always ready at this time. |
|
315 | # blobstore attributes are not always ready at this time. | |
316 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
|
316 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: | |
317 | if util.safehasattr(othervfs, name): |
|
317 | if util.safehasattr(othervfs, name): | |
318 | setattr(self, name, getattr(othervfs, name)) |
|
318 | setattr(self, name, getattr(othervfs, name)) | |
319 |
|
319 | |||
320 |
|
320 | |||
321 | def _prefetchfiles(repo, revs, match): |
|
321 | def _prefetchfiles(repo, revs, match): | |
322 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
322 | """Ensure that required LFS blobs are present, fetching them as a group if | |
323 | needed.""" |
|
323 | needed.""" | |
324 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
324 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
325 | return |
|
325 | return | |
326 |
|
326 | |||
327 | pointers = [] |
|
327 | pointers = [] | |
328 | oids = set() |
|
328 | oids = set() | |
329 | localstore = repo.svfs.lfslocalblobstore |
|
329 | localstore = repo.svfs.lfslocalblobstore | |
330 |
|
330 | |||
331 | for rev in revs: |
|
331 | for rev in revs: | |
332 | ctx = repo[rev] |
|
332 | ctx = repo[rev] | |
333 | for f in ctx.walk(match): |
|
333 | for f in ctx.walk(match): | |
334 | p = pointerfromctx(ctx, f) |
|
334 | p = pointerfromctx(ctx, f) | |
335 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
335 | if p and p.oid() not in oids and not localstore.has(p.oid()): | |
336 | p.filename = f |
|
336 | p.filename = f | |
337 | pointers.append(p) |
|
337 | pointers.append(p) | |
338 | oids.add(p.oid()) |
|
338 | oids.add(p.oid()) | |
339 |
|
339 | |||
340 | if pointers: |
|
340 | if pointers: | |
341 | # Recalculating the repo store here allows 'paths.default' that is set |
|
341 | # Recalculating the repo store here allows 'paths.default' that is set | |
342 | # on the repo by a clone command to be used for the update. |
|
342 | # on the repo by a clone command to be used for the update. | |
343 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
343 | blobstore.remote(repo).readbatch(pointers, localstore) | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def _canskipupload(repo): |
|
346 | def _canskipupload(repo): | |
347 | # Skip if this hasn't been passed to reposetup() |
|
347 | # Skip if this hasn't been passed to reposetup() | |
348 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
348 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
349 | return True |
|
349 | return True | |
350 |
|
350 | |||
351 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
351 | # if remotestore is a null store, upload is a no-op and can be skipped | |
352 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
352 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
353 |
|
353 | |||
354 |
|
354 | |||
355 | def candownload(repo): |
|
355 | def candownload(repo): | |
356 | # Skip if this hasn't been passed to reposetup() |
|
356 | # Skip if this hasn't been passed to reposetup() | |
357 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
357 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
358 | return False |
|
358 | return False | |
359 |
|
359 | |||
360 | # if remotestore is a null store, downloads will lead to nothing |
|
360 | # if remotestore is a null store, downloads will lead to nothing | |
361 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
361 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
362 |
|
362 | |||
363 |
|
363 | |||
364 | def uploadblobsfromrevs(repo, revs): |
|
364 | def uploadblobsfromrevs(repo, revs): | |
365 | '''upload lfs blobs introduced by revs |
|
365 | '''upload lfs blobs introduced by revs | |
366 |
|
366 | |||
367 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
367 | Note: also used by other extensions e. g. infinitepush. avoid renaming. | |
368 | ''' |
|
368 | ''' | |
369 | if _canskipupload(repo): |
|
369 | if _canskipupload(repo): | |
370 | return |
|
370 | return | |
371 | pointers = extractpointers(repo, revs) |
|
371 | pointers = extractpointers(repo, revs) | |
372 | uploadblobs(repo, pointers) |
|
372 | uploadblobs(repo, pointers) | |
373 |
|
373 | |||
374 |
|
374 | |||
375 | def prepush(pushop): |
|
375 | def prepush(pushop): | |
376 | """Prepush hook. |
|
376 | """Prepush hook. | |
377 |
|
377 | |||
378 | Read through the revisions to push, looking for filelog entries that can be |
|
378 | Read through the revisions to push, looking for filelog entries that can be | |
379 | deserialized into metadata so that we can block the push on their upload to |
|
379 | deserialized into metadata so that we can block the push on their upload to | |
380 | the remote blobstore. |
|
380 | the remote blobstore. | |
381 | """ |
|
381 | """ | |
382 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
382 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |
383 |
|
383 | |||
384 |
|
384 | |||
385 | @eh.wrapfunction(exchange, b'push') |
|
385 | @eh.wrapfunction(exchange, b'push') | |
386 | def push(orig, repo, remote, *args, **kwargs): |
|
386 | def push(orig, repo, remote, *args, **kwargs): | |
387 | """bail on push if the extension isn't enabled on remote when needed, and |
|
387 | """bail on push if the extension isn't enabled on remote when needed, and | |
388 | update the remote store based on the destination path.""" |
|
388 | update the remote store based on the destination path.""" | |
389 | if b'lfs' in repo.requirements: |
|
389 | if b'lfs' in repo.requirements: | |
390 | # If the remote peer is for a local repo, the requirement tests in the |
|
390 | # If the remote peer is for a local repo, the requirement tests in the | |
391 | # base class method enforce lfs support. Otherwise, some revisions in |
|
391 | # base class method enforce lfs support. Otherwise, some revisions in | |
392 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
392 | # this repo use lfs, and the remote repo needs the extension loaded. | |
393 | if not remote.local() and not remote.capable(b'lfs'): |
|
393 | if not remote.local() and not remote.capable(b'lfs'): | |
394 | # This is a copy of the message in exchange.push() when requirements |
|
394 | # This is a copy of the message in exchange.push() when requirements | |
395 | # are missing between local repos. |
|
395 | # are missing between local repos. | |
396 | m = _(b"required features are not supported in the destination: %s") |
|
396 | m = _(b"required features are not supported in the destination: %s") | |
397 | raise error.Abort( |
|
397 | raise error.Abort( | |
398 | m % b'lfs', hint=_(b'enable the lfs extension on the server') |
|
398 | m % b'lfs', hint=_(b'enable the lfs extension on the server') | |
399 | ) |
|
399 | ) | |
400 |
|
400 | |||
401 | # Repositories where this extension is disabled won't have the field. |
|
401 | # Repositories where this extension is disabled won't have the field. | |
402 | # But if there's a requirement, then the extension must be loaded AND |
|
402 | # But if there's a requirement, then the extension must be loaded AND | |
403 | # there may be blobs to push. |
|
403 | # there may be blobs to push. | |
404 | remotestore = repo.svfs.lfsremoteblobstore |
|
404 | remotestore = repo.svfs.lfsremoteblobstore | |
405 | try: |
|
405 | try: | |
406 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
406 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) | |
407 | return orig(repo, remote, *args, **kwargs) |
|
407 | return orig(repo, remote, *args, **kwargs) | |
408 | finally: |
|
408 | finally: | |
409 | repo.svfs.lfsremoteblobstore = remotestore |
|
409 | repo.svfs.lfsremoteblobstore = remotestore | |
410 | else: |
|
410 | else: | |
411 | return orig(repo, remote, *args, **kwargs) |
|
411 | return orig(repo, remote, *args, **kwargs) | |
412 |
|
412 | |||
413 |
|
413 | |||
414 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
414 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |
415 | @eh.wrapfunction(bundle2, b'writenewbundle') |
|
415 | @eh.wrapfunction(bundle2, b'writenewbundle') | |
416 | def writenewbundle( |
|
416 | def writenewbundle( | |
417 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
417 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
418 | ): |
|
418 | ): | |
419 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
419 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |
420 | uploadblobsfromrevs(repo, outgoing.missing) |
|
420 | uploadblobsfromrevs(repo, outgoing.missing) | |
421 | return orig( |
|
421 | return orig( | |
422 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
422 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
423 | ) |
|
423 | ) | |
424 |
|
424 | |||
425 |
|
425 | |||
426 | def extractpointers(repo, revs): |
|
426 | def extractpointers(repo, revs): | |
427 | """return a list of lfs pointers added by given revs""" |
|
427 | """return a list of lfs pointers added by given revs""" | |
428 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
|
428 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') | |
429 | pointers = {} |
|
429 | pointers = {} | |
430 |
|
430 | |||
431 | makeprogress = repo.ui.makeprogress |
|
431 | makeprogress = repo.ui.makeprogress | |
432 | with makeprogress( |
|
432 | with makeprogress( | |
433 | _(b'lfs search'), _(b'changesets'), len(revs) |
|
433 | _(b'lfs search'), _(b'changesets'), len(revs) | |
434 | ) as progress: |
|
434 | ) as progress: | |
435 | for r in revs: |
|
435 | for r in revs: | |
436 | ctx = repo[r] |
|
436 | ctx = repo[r] | |
437 | for p in pointersfromctx(ctx).values(): |
|
437 | for p in pointersfromctx(ctx).values(): | |
438 | pointers[p.oid()] = p |
|
438 | pointers[p.oid()] = p | |
439 | progress.increment() |
|
439 | progress.increment() | |
440 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
440 | return sorted(pointers.values(), key=lambda p: p.oid()) | |
441 |
|
441 | |||
442 |
|
442 | |||
443 | def pointerfromctx(ctx, f, removed=False): |
|
443 | def pointerfromctx(ctx, f, removed=False): | |
444 | """return a pointer for the named file from the given changectx, or None if |
|
444 | """return a pointer for the named file from the given changectx, or None if | |
445 | the file isn't LFS. |
|
445 | the file isn't LFS. | |
446 |
|
446 | |||
447 | Optionally, the pointer for a file deleted from the context can be returned. |
|
447 | Optionally, the pointer for a file deleted from the context can be returned. | |
448 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
448 | Since no such pointer is actually stored, and to distinguish from a non LFS | |
449 | file, this pointer is represented by an empty dict. |
|
449 | file, this pointer is represented by an empty dict. | |
450 | """ |
|
450 | """ | |
451 | _ctx = ctx |
|
451 | _ctx = ctx | |
452 | if f not in ctx: |
|
452 | if f not in ctx: | |
453 | if not removed: |
|
453 | if not removed: | |
454 | return None |
|
454 | return None | |
455 | if f in ctx.p1(): |
|
455 | if f in ctx.p1(): | |
456 | _ctx = ctx.p1() |
|
456 | _ctx = ctx.p1() | |
457 | elif f in ctx.p2(): |
|
457 | elif f in ctx.p2(): | |
458 | _ctx = ctx.p2() |
|
458 | _ctx = ctx.p2() | |
459 | else: |
|
459 | else: | |
460 | return None |
|
460 | return None | |
461 | fctx = _ctx[f] |
|
461 | fctx = _ctx[f] | |
462 | if not _islfs(fctx.filelog(), fctx.filenode()): |
|
462 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): | |
463 | return None |
|
463 | return None | |
464 | try: |
|
464 | try: | |
465 | p = pointer.deserialize(fctx.rawdata()) |
|
465 | p = pointer.deserialize(fctx.rawdata()) | |
466 | if ctx == _ctx: |
|
466 | if ctx == _ctx: | |
467 | return p |
|
467 | return p | |
468 | return {} |
|
468 | return {} | |
469 | except pointer.InvalidPointer as ex: |
|
469 | except pointer.InvalidPointer as ex: | |
470 | raise error.Abort( |
|
470 | raise error.Abort( | |
471 | _(b'lfs: corrupted pointer (%s@%s): %s\n') |
|
471 | _(b'lfs: corrupted pointer (%s@%s): %s\n') | |
472 | % (f, short(_ctx.node()), ex) |
|
472 | % (f, short(_ctx.node()), ex) | |
473 | ) |
|
473 | ) | |
474 |
|
474 | |||
475 |
|
475 | |||
476 | def pointersfromctx(ctx, removed=False): |
|
476 | def pointersfromctx(ctx, removed=False): | |
477 | """return a dict {path: pointer} for given single changectx. |
|
477 | """return a dict {path: pointer} for given single changectx. | |
478 |
|
478 | |||
479 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
479 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value | |
480 | stored for the path is an empty dict. |
|
480 | stored for the path is an empty dict. | |
481 | """ |
|
481 | """ | |
482 | result = {} |
|
482 | result = {} | |
483 | m = ctx.repo().narrowmatch() |
|
483 | m = ctx.repo().narrowmatch() | |
484 |
|
484 | |||
485 | # TODO: consider manifest.fastread() instead |
|
485 | # TODO: consider manifest.fastread() instead | |
486 | for f in ctx.files(): |
|
486 | for f in ctx.files(): | |
487 | if not m(f): |
|
487 | if not m(f): | |
488 | continue |
|
488 | continue | |
489 | p = pointerfromctx(ctx, f, removed=removed) |
|
489 | p = pointerfromctx(ctx, f, removed=removed) | |
490 | if p is not None: |
|
490 | if p is not None: | |
491 | result[f] = p |
|
491 | result[f] = p | |
492 | return result |
|
492 | return result | |
493 |
|
493 | |||
494 |
|
494 | |||
495 | def uploadblobs(repo, pointers): |
|
495 | def uploadblobs(repo, pointers): | |
496 | """upload given pointers from local blobstore""" |
|
496 | """upload given pointers from local blobstore""" | |
497 | if not pointers: |
|
497 | if not pointers: | |
498 | return |
|
498 | return | |
499 |
|
499 | |||
500 | remoteblob = repo.svfs.lfsremoteblobstore |
|
500 | remoteblob = repo.svfs.lfsremoteblobstore | |
501 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
501 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
502 |
|
502 | |||
503 |
|
503 | |||
504 | @eh.wrapfunction(upgrade, b'_finishdatamigration') |
|
504 | @eh.wrapfunction(upgrade, b'_finishdatamigration') | |
505 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
505 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
506 | orig(ui, srcrepo, dstrepo, requirements) |
|
506 | orig(ui, srcrepo, dstrepo, requirements) | |
507 |
|
507 | |||
508 | # Skip if this hasn't been passed to reposetup() |
|
508 | # Skip if this hasn't been passed to reposetup() | |
509 | if util.safehasattr( |
|
509 | if util.safehasattr( | |
510 | srcrepo.svfs, b'lfslocalblobstore' |
|
510 | srcrepo.svfs, b'lfslocalblobstore' | |
511 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
|
511 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): | |
512 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
512 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs | |
513 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
513 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs | |
514 |
|
514 | |||
515 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
515 | for dirpath, dirs, files in srclfsvfs.walk(): | |
516 | for oid in files: |
|
516 | for oid in files: | |
517 | ui.write(_(b'copying lfs blob %s\n') % oid) |
|
517 | ui.write(_(b'copying lfs blob %s\n') % oid) | |
518 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
518 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
519 |
|
519 | |||
520 |
|
520 | |||
521 | @eh.wrapfunction(upgrade, b'preservedrequirements') |
|
521 | @eh.wrapfunction(upgrade, b'preservedrequirements') | |
522 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
|
522 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') | |
523 | def upgraderequirements(orig, repo): |
|
523 | def upgraderequirements(orig, repo): | |
524 | reqs = orig(repo) |
|
524 | reqs = orig(repo) | |
525 | if b'lfs' in repo.requirements: |
|
525 | if b'lfs' in repo.requirements: | |
526 | reqs.add(b'lfs') |
|
526 | reqs.add(b'lfs') | |
527 | return reqs |
|
527 | return reqs |
General Comments 0
You need to be logged in to leave comments.
Login now