Show More
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 |
@@ -1,546 +1,548 b'' | |||||
1 | # wrapper.py - methods wrapping core mercurial logic |
|
1 | # wrapper.py - methods wrapping core mercurial logic | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Facebook, Inc. |
|
3 | # Copyright 2017 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 |
|
11 | |||
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 | from mercurial.pycompat import ( |
|
14 | from mercurial.pycompat import ( | |
15 | getattr, |
|
15 | getattr, | |
16 | setattr, |
|
16 | setattr, | |
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | bundle2, |
|
20 | bundle2, | |
21 | changegroup, |
|
21 | changegroup, | |
22 | cmdutil, |
|
22 | cmdutil, | |
23 | context, |
|
23 | context, | |
24 | error, |
|
24 | error, | |
25 | exchange, |
|
25 | exchange, | |
26 | exthelper, |
|
26 | exthelper, | |
27 | localrepo, |
|
27 | localrepo, | |
28 | pycompat, |
|
28 | pycompat, | |
29 | revlog, |
|
29 | revlog, | |
30 | scmutil, |
|
30 | scmutil, | |
31 | upgrade, |
|
31 | upgrade, | |
32 | util, |
|
32 | util, | |
33 | vfs as vfsmod, |
|
33 | vfs as vfsmod, | |
34 | wireprotov1server, |
|
34 | wireprotov1server, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
|
37 | from mercurial.upgrade_utils import engine as upgrade_engine | |||
|
38 | ||||
37 | from mercurial.interfaces import repository |
|
39 | from mercurial.interfaces import repository | |
38 |
|
40 | |||
39 | from mercurial.utils import ( |
|
41 | from mercurial.utils import ( | |
40 | storageutil, |
|
42 | storageutil, | |
41 | stringutil, |
|
43 | stringutil, | |
42 | ) |
|
44 | ) | |
43 |
|
45 | |||
44 | from ..largefiles import lfutil |
|
46 | from ..largefiles import lfutil | |
45 |
|
47 | |||
46 | from . import ( |
|
48 | from . import ( | |
47 | blobstore, |
|
49 | blobstore, | |
48 | pointer, |
|
50 | pointer, | |
49 | ) |
|
51 | ) | |
50 |
|
52 | |||
51 | eh = exthelper.exthelper() |
|
53 | eh = exthelper.exthelper() | |
52 |
|
54 | |||
53 |
|
55 | |||
54 | @eh.wrapfunction(localrepo, b'makefilestorage') |
|
56 | @eh.wrapfunction(localrepo, b'makefilestorage') | |
55 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
57 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | |
56 | if b'lfs' in requirements: |
|
58 | if b'lfs' in requirements: | |
57 | features.add(repository.REPO_FEATURE_LFS) |
|
59 | features.add(repository.REPO_FEATURE_LFS) | |
58 |
|
60 | |||
59 | return orig(requirements=requirements, features=features, **kwargs) |
|
61 | return orig(requirements=requirements, features=features, **kwargs) | |
60 |
|
62 | |||
61 |
|
63 | |||
62 | @eh.wrapfunction(changegroup, b'allsupportedversions') |
|
64 | @eh.wrapfunction(changegroup, b'allsupportedversions') | |
63 | def allsupportedversions(orig, ui): |
|
65 | def allsupportedversions(orig, ui): | |
64 | versions = orig(ui) |
|
66 | versions = orig(ui) | |
65 | versions.add(b'03') |
|
67 | versions.add(b'03') | |
66 | return versions |
|
68 | return versions | |
67 |
|
69 | |||
68 |
|
70 | |||
69 | @eh.wrapfunction(wireprotov1server, b'_capabilities') |
|
71 | @eh.wrapfunction(wireprotov1server, b'_capabilities') | |
70 | def _capabilities(orig, repo, proto): |
|
72 | def _capabilities(orig, repo, proto): | |
71 | '''Wrap server command to announce lfs server capability''' |
|
73 | '''Wrap server command to announce lfs server capability''' | |
72 | caps = orig(repo, proto) |
|
74 | caps = orig(repo, proto) | |
73 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
75 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
74 | # Advertise a slightly different capability when lfs is *required*, so |
|
76 | # Advertise a slightly different capability when lfs is *required*, so | |
75 | # that the client knows it MUST load the extension. If lfs is not |
|
77 | # that the client knows it MUST load the extension. If lfs is not | |
76 | # required on the server, there's no reason to autoload the extension |
|
78 | # required on the server, there's no reason to autoload the extension | |
77 | # on the client. |
|
79 | # on the client. | |
78 | if b'lfs' in repo.requirements: |
|
80 | if b'lfs' in repo.requirements: | |
79 | caps.append(b'lfs-serve') |
|
81 | caps.append(b'lfs-serve') | |
80 |
|
82 | |||
81 | caps.append(b'lfs') |
|
83 | caps.append(b'lfs') | |
82 | return caps |
|
84 | return caps | |
83 |
|
85 | |||
84 |
|
86 | |||
85 | def bypasscheckhash(self, text): |
|
87 | def bypasscheckhash(self, text): | |
86 | return False |
|
88 | return False | |
87 |
|
89 | |||
88 |
|
90 | |||
89 | def readfromstore(self, text): |
|
91 | def readfromstore(self, text): | |
90 | """Read filelog content from local blobstore transform for flagprocessor. |
|
92 | """Read filelog content from local blobstore transform for flagprocessor. | |
91 |
|
93 | |||
92 | Default tranform for flagprocessor, returning contents from blobstore. |
|
94 | Default tranform for flagprocessor, returning contents from blobstore. | |
93 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
95 | Returns a 2-typle (text, validatehash) where validatehash is True as the | |
94 | contents of the blobstore should be checked using checkhash. |
|
96 | contents of the blobstore should be checked using checkhash. | |
95 | """ |
|
97 | """ | |
96 | p = pointer.deserialize(text) |
|
98 | p = pointer.deserialize(text) | |
97 | oid = p.oid() |
|
99 | oid = p.oid() | |
98 | store = self.opener.lfslocalblobstore |
|
100 | store = self.opener.lfslocalblobstore | |
99 | if not store.has(oid): |
|
101 | if not store.has(oid): | |
100 | p.filename = self.filename |
|
102 | p.filename = self.filename | |
101 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
103 | self.opener.lfsremoteblobstore.readbatch([p], store) | |
102 |
|
104 | |||
103 | # The caller will validate the content |
|
105 | # The caller will validate the content | |
104 | text = store.read(oid, verify=False) |
|
106 | text = store.read(oid, verify=False) | |
105 |
|
107 | |||
106 | # pack hg filelog metadata |
|
108 | # pack hg filelog metadata | |
107 | hgmeta = {} |
|
109 | hgmeta = {} | |
108 | for k in p.keys(): |
|
110 | for k in p.keys(): | |
109 | if k.startswith(b'x-hg-'): |
|
111 | if k.startswith(b'x-hg-'): | |
110 | name = k[len(b'x-hg-') :] |
|
112 | name = k[len(b'x-hg-') :] | |
111 | hgmeta[name] = p[k] |
|
113 | hgmeta[name] = p[k] | |
112 | if hgmeta or text.startswith(b'\1\n'): |
|
114 | if hgmeta or text.startswith(b'\1\n'): | |
113 | text = storageutil.packmeta(hgmeta, text) |
|
115 | text = storageutil.packmeta(hgmeta, text) | |
114 |
|
116 | |||
115 | return (text, True, {}) |
|
117 | return (text, True, {}) | |
116 |
|
118 | |||
117 |
|
119 | |||
118 | def writetostore(self, text, sidedata): |
|
120 | def writetostore(self, text, sidedata): | |
119 | # hg filelog metadata (includes rename, etc) |
|
121 | # hg filelog metadata (includes rename, etc) | |
120 | hgmeta, offset = storageutil.parsemeta(text) |
|
122 | hgmeta, offset = storageutil.parsemeta(text) | |
121 | if offset and offset > 0: |
|
123 | if offset and offset > 0: | |
122 | # lfs blob does not contain hg filelog metadata |
|
124 | # lfs blob does not contain hg filelog metadata | |
123 | text = text[offset:] |
|
125 | text = text[offset:] | |
124 |
|
126 | |||
125 | # git-lfs only supports sha256 |
|
127 | # git-lfs only supports sha256 | |
126 | oid = hex(hashlib.sha256(text).digest()) |
|
128 | oid = hex(hashlib.sha256(text).digest()) | |
127 | self.opener.lfslocalblobstore.write(oid, text) |
|
129 | self.opener.lfslocalblobstore.write(oid, text) | |
128 |
|
130 | |||
129 | # replace contents with metadata |
|
131 | # replace contents with metadata | |
130 | longoid = b'sha256:%s' % oid |
|
132 | longoid = b'sha256:%s' % oid | |
131 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
|
133 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) | |
132 |
|
134 | |||
133 | # by default, we expect the content to be binary. however, LFS could also |
|
135 | # by default, we expect the content to be binary. however, LFS could also | |
134 | # be used for non-binary content. add a special entry for non-binary data. |
|
136 | # be used for non-binary content. add a special entry for non-binary data. | |
135 | # this will be used by filectx.isbinary(). |
|
137 | # this will be used by filectx.isbinary(). | |
136 | if not stringutil.binary(text): |
|
138 | if not stringutil.binary(text): | |
137 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
139 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | |
138 | metadata[b'x-is-binary'] = b'0' |
|
140 | metadata[b'x-is-binary'] = b'0' | |
139 |
|
141 | |||
140 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
142 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix | |
141 | if hgmeta is not None: |
|
143 | if hgmeta is not None: | |
142 | for k, v in pycompat.iteritems(hgmeta): |
|
144 | for k, v in pycompat.iteritems(hgmeta): | |
143 | metadata[b'x-hg-%s' % k] = v |
|
145 | metadata[b'x-hg-%s' % k] = v | |
144 |
|
146 | |||
145 | rawtext = metadata.serialize() |
|
147 | rawtext = metadata.serialize() | |
146 | return (rawtext, False) |
|
148 | return (rawtext, False) | |
147 |
|
149 | |||
148 |
|
150 | |||
149 | def _islfs(rlog, node=None, rev=None): |
|
151 | def _islfs(rlog, node=None, rev=None): | |
150 | if rev is None: |
|
152 | if rev is None: | |
151 | if node is None: |
|
153 | if node is None: | |
152 | # both None - likely working copy content where node is not ready |
|
154 | # both None - likely working copy content where node is not ready | |
153 | return False |
|
155 | return False | |
154 | rev = rlog.rev(node) |
|
156 | rev = rlog.rev(node) | |
155 | else: |
|
157 | else: | |
156 | node = rlog.node(rev) |
|
158 | node = rlog.node(rev) | |
157 | if node == nullid: |
|
159 | if node == nullid: | |
158 | return False |
|
160 | return False | |
159 | flags = rlog.flags(rev) |
|
161 | flags = rlog.flags(rev) | |
160 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
162 | return bool(flags & revlog.REVIDX_EXTSTORED) | |
161 |
|
163 | |||
162 |
|
164 | |||
163 | # Wrapping may also be applied by remotefilelog |
|
165 | # Wrapping may also be applied by remotefilelog | |
164 | def filelogaddrevision( |
|
166 | def filelogaddrevision( | |
165 | orig, |
|
167 | orig, | |
166 | self, |
|
168 | self, | |
167 | text, |
|
169 | text, | |
168 | transaction, |
|
170 | transaction, | |
169 | link, |
|
171 | link, | |
170 | p1, |
|
172 | p1, | |
171 | p2, |
|
173 | p2, | |
172 | cachedelta=None, |
|
174 | cachedelta=None, | |
173 | node=None, |
|
175 | node=None, | |
174 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
176 | flags=revlog.REVIDX_DEFAULT_FLAGS, | |
175 | **kwds |
|
177 | **kwds | |
176 | ): |
|
178 | ): | |
177 | # The matcher isn't available if reposetup() wasn't called. |
|
179 | # The matcher isn't available if reposetup() wasn't called. | |
178 | lfstrack = self._revlog.opener.options.get(b'lfstrack') |
|
180 | lfstrack = self._revlog.opener.options.get(b'lfstrack') | |
179 |
|
181 | |||
180 | if lfstrack: |
|
182 | if lfstrack: | |
181 | textlen = len(text) |
|
183 | textlen = len(text) | |
182 | # exclude hg rename meta from file size |
|
184 | # exclude hg rename meta from file size | |
183 | meta, offset = storageutil.parsemeta(text) |
|
185 | meta, offset = storageutil.parsemeta(text) | |
184 | if offset: |
|
186 | if offset: | |
185 | textlen -= offset |
|
187 | textlen -= offset | |
186 |
|
188 | |||
187 | if lfstrack(self._revlog.filename, textlen): |
|
189 | if lfstrack(self._revlog.filename, textlen): | |
188 | flags |= revlog.REVIDX_EXTSTORED |
|
190 | flags |= revlog.REVIDX_EXTSTORED | |
189 |
|
191 | |||
190 | return orig( |
|
192 | return orig( | |
191 | self, |
|
193 | self, | |
192 | text, |
|
194 | text, | |
193 | transaction, |
|
195 | transaction, | |
194 | link, |
|
196 | link, | |
195 | p1, |
|
197 | p1, | |
196 | p2, |
|
198 | p2, | |
197 | cachedelta=cachedelta, |
|
199 | cachedelta=cachedelta, | |
198 | node=node, |
|
200 | node=node, | |
199 | flags=flags, |
|
201 | flags=flags, | |
200 | **kwds |
|
202 | **kwds | |
201 | ) |
|
203 | ) | |
202 |
|
204 | |||
203 |
|
205 | |||
204 | # Wrapping may also be applied by remotefilelog |
|
206 | # Wrapping may also be applied by remotefilelog | |
205 | def filelogrenamed(orig, self, node): |
|
207 | def filelogrenamed(orig, self, node): | |
206 | if _islfs(self._revlog, node): |
|
208 | if _islfs(self._revlog, node): | |
207 | rawtext = self._revlog.rawdata(node) |
|
209 | rawtext = self._revlog.rawdata(node) | |
208 | if not rawtext: |
|
210 | if not rawtext: | |
209 | return False |
|
211 | return False | |
210 | metadata = pointer.deserialize(rawtext) |
|
212 | metadata = pointer.deserialize(rawtext) | |
211 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
|
213 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: | |
212 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
|
214 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) | |
213 | else: |
|
215 | else: | |
214 | return False |
|
216 | return False | |
215 | return orig(self, node) |
|
217 | return orig(self, node) | |
216 |
|
218 | |||
217 |
|
219 | |||
218 | # Wrapping may also be applied by remotefilelog |
|
220 | # Wrapping may also be applied by remotefilelog | |
219 | def filelogsize(orig, self, rev): |
|
221 | def filelogsize(orig, self, rev): | |
220 | if _islfs(self._revlog, rev=rev): |
|
222 | if _islfs(self._revlog, rev=rev): | |
221 | # fast path: use lfs metadata to answer size |
|
223 | # fast path: use lfs metadata to answer size | |
222 | rawtext = self._revlog.rawdata(rev) |
|
224 | rawtext = self._revlog.rawdata(rev) | |
223 | metadata = pointer.deserialize(rawtext) |
|
225 | metadata = pointer.deserialize(rawtext) | |
224 | return int(metadata[b'size']) |
|
226 | return int(metadata[b'size']) | |
225 | return orig(self, rev) |
|
227 | return orig(self, rev) | |
226 |
|
228 | |||
227 |
|
229 | |||
228 | @eh.wrapfunction(revlog, b'_verify_revision') |
|
230 | @eh.wrapfunction(revlog, b'_verify_revision') | |
229 | def _verify_revision(orig, rl, skipflags, state, node): |
|
231 | def _verify_revision(orig, rl, skipflags, state, node): | |
230 | if _islfs(rl, node=node): |
|
232 | if _islfs(rl, node=node): | |
231 | rawtext = rl.rawdata(node) |
|
233 | rawtext = rl.rawdata(node) | |
232 | metadata = pointer.deserialize(rawtext) |
|
234 | metadata = pointer.deserialize(rawtext) | |
233 |
|
235 | |||
234 | # Don't skip blobs that are stored locally, as local verification is |
|
236 | # Don't skip blobs that are stored locally, as local verification is | |
235 | # relatively cheap and there's no other way to verify the raw data in |
|
237 | # relatively cheap and there's no other way to verify the raw data in | |
236 | # the revlog. |
|
238 | # the revlog. | |
237 | if rl.opener.lfslocalblobstore.has(metadata.oid()): |
|
239 | if rl.opener.lfslocalblobstore.has(metadata.oid()): | |
238 | skipflags &= ~revlog.REVIDX_EXTSTORED |
|
240 | skipflags &= ~revlog.REVIDX_EXTSTORED | |
239 | elif skipflags & revlog.REVIDX_EXTSTORED: |
|
241 | elif skipflags & revlog.REVIDX_EXTSTORED: | |
240 | # The wrapped method will set `skipread`, but there's enough local |
|
242 | # The wrapped method will set `skipread`, but there's enough local | |
241 | # info to check renames. |
|
243 | # info to check renames. | |
242 | state[b'safe_renamed'].add(node) |
|
244 | state[b'safe_renamed'].add(node) | |
243 |
|
245 | |||
244 | orig(rl, skipflags, state, node) |
|
246 | orig(rl, skipflags, state, node) | |
245 |
|
247 | |||
246 |
|
248 | |||
247 | @eh.wrapfunction(context.basefilectx, b'cmp') |
|
249 | @eh.wrapfunction(context.basefilectx, b'cmp') | |
248 | def filectxcmp(orig, self, fctx): |
|
250 | def filectxcmp(orig, self, fctx): | |
249 | """returns True if text is different than fctx""" |
|
251 | """returns True if text is different than fctx""" | |
250 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
252 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |
251 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
253 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | |
252 | # fast path: check LFS oid |
|
254 | # fast path: check LFS oid | |
253 | p1 = pointer.deserialize(self.rawdata()) |
|
255 | p1 = pointer.deserialize(self.rawdata()) | |
254 | p2 = pointer.deserialize(fctx.rawdata()) |
|
256 | p2 = pointer.deserialize(fctx.rawdata()) | |
255 | return p1.oid() != p2.oid() |
|
257 | return p1.oid() != p2.oid() | |
256 | return orig(self, fctx) |
|
258 | return orig(self, fctx) | |
257 |
|
259 | |||
258 |
|
260 | |||
259 | @eh.wrapfunction(context.basefilectx, b'isbinary') |
|
261 | @eh.wrapfunction(context.basefilectx, b'isbinary') | |
260 | def filectxisbinary(orig, self): |
|
262 | def filectxisbinary(orig, self): | |
261 | if self.islfs(): |
|
263 | if self.islfs(): | |
262 | # fast path: use lfs metadata to answer isbinary |
|
264 | # fast path: use lfs metadata to answer isbinary | |
263 | metadata = pointer.deserialize(self.rawdata()) |
|
265 | metadata = pointer.deserialize(self.rawdata()) | |
264 | # if lfs metadata says nothing, assume it's binary by default |
|
266 | # if lfs metadata says nothing, assume it's binary by default | |
265 | return bool(int(metadata.get(b'x-is-binary', 1))) |
|
267 | return bool(int(metadata.get(b'x-is-binary', 1))) | |
266 | return orig(self) |
|
268 | return orig(self) | |
267 |
|
269 | |||
268 |
|
270 | |||
269 | def filectxislfs(self): |
|
271 | def filectxislfs(self): | |
270 | return _islfs(self.filelog()._revlog, self.filenode()) |
|
272 | return _islfs(self.filelog()._revlog, self.filenode()) | |
271 |
|
273 | |||
272 |
|
274 | |||
273 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
|
275 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') | |
274 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
276 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | |
275 | orig(fm, ctx, matcher, path, decode) |
|
277 | orig(fm, ctx, matcher, path, decode) | |
276 | fm.data(rawdata=ctx[path].rawdata()) |
|
278 | fm.data(rawdata=ctx[path].rawdata()) | |
277 |
|
279 | |||
278 |
|
280 | |||
279 | @eh.wrapfunction(scmutil, b'wrapconvertsink') |
|
281 | @eh.wrapfunction(scmutil, b'wrapconvertsink') | |
280 | def convertsink(orig, sink): |
|
282 | def convertsink(orig, sink): | |
281 | sink = orig(sink) |
|
283 | sink = orig(sink) | |
282 | if sink.repotype == b'hg': |
|
284 | if sink.repotype == b'hg': | |
283 |
|
285 | |||
284 | class lfssink(sink.__class__): |
|
286 | class lfssink(sink.__class__): | |
285 | def putcommit( |
|
287 | def putcommit( | |
286 | self, |
|
288 | self, | |
287 | files, |
|
289 | files, | |
288 | copies, |
|
290 | copies, | |
289 | parents, |
|
291 | parents, | |
290 | commit, |
|
292 | commit, | |
291 | source, |
|
293 | source, | |
292 | revmap, |
|
294 | revmap, | |
293 | full, |
|
295 | full, | |
294 | cleanp2, |
|
296 | cleanp2, | |
295 | ): |
|
297 | ): | |
296 | pc = super(lfssink, self).putcommit |
|
298 | pc = super(lfssink, self).putcommit | |
297 | node = pc( |
|
299 | node = pc( | |
298 | files, |
|
300 | files, | |
299 | copies, |
|
301 | copies, | |
300 | parents, |
|
302 | parents, | |
301 | commit, |
|
303 | commit, | |
302 | source, |
|
304 | source, | |
303 | revmap, |
|
305 | revmap, | |
304 | full, |
|
306 | full, | |
305 | cleanp2, |
|
307 | cleanp2, | |
306 | ) |
|
308 | ) | |
307 |
|
309 | |||
308 | if b'lfs' not in self.repo.requirements: |
|
310 | if b'lfs' not in self.repo.requirements: | |
309 | ctx = self.repo[node] |
|
311 | ctx = self.repo[node] | |
310 |
|
312 | |||
311 | # The file list may contain removed files, so check for |
|
313 | # The file list may contain removed files, so check for | |
312 | # membership before assuming it is in the context. |
|
314 | # membership before assuming it is in the context. | |
313 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
315 | if any(f in ctx and ctx[f].islfs() for f, n in files): | |
314 | self.repo.requirements.add(b'lfs') |
|
316 | self.repo.requirements.add(b'lfs') | |
315 | scmutil.writereporequirements(self.repo) |
|
317 | scmutil.writereporequirements(self.repo) | |
316 |
|
318 | |||
317 | return node |
|
319 | return node | |
318 |
|
320 | |||
319 | sink.__class__ = lfssink |
|
321 | sink.__class__ = lfssink | |
320 |
|
322 | |||
321 | return sink |
|
323 | return sink | |
322 |
|
324 | |||
323 |
|
325 | |||
324 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
326 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |
325 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
327 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |
326 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
|
328 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') | |
327 | def vfsinit(orig, self, othervfs): |
|
329 | def vfsinit(orig, self, othervfs): | |
328 | orig(self, othervfs) |
|
330 | orig(self, othervfs) | |
329 | # copy lfs related options |
|
331 | # copy lfs related options | |
330 | for k, v in othervfs.options.items(): |
|
332 | for k, v in othervfs.options.items(): | |
331 | if k.startswith(b'lfs'): |
|
333 | if k.startswith(b'lfs'): | |
332 | self.options[k] = v |
|
334 | self.options[k] = v | |
333 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
335 | # also copy lfs blobstores. note: this can run before reposetup, so lfs | |
334 | # blobstore attributes are not always ready at this time. |
|
336 | # blobstore attributes are not always ready at this time. | |
335 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
|
337 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: | |
336 | if util.safehasattr(othervfs, name): |
|
338 | if util.safehasattr(othervfs, name): | |
337 | setattr(self, name, getattr(othervfs, name)) |
|
339 | setattr(self, name, getattr(othervfs, name)) | |
338 |
|
340 | |||
339 |
|
341 | |||
340 | def _prefetchfiles(repo, revmatches): |
|
342 | def _prefetchfiles(repo, revmatches): | |
341 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
343 | """Ensure that required LFS blobs are present, fetching them as a group if | |
342 | needed.""" |
|
344 | needed.""" | |
343 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
345 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): | |
344 | return |
|
346 | return | |
345 |
|
347 | |||
346 | pointers = [] |
|
348 | pointers = [] | |
347 | oids = set() |
|
349 | oids = set() | |
348 | localstore = repo.svfs.lfslocalblobstore |
|
350 | localstore = repo.svfs.lfslocalblobstore | |
349 |
|
351 | |||
350 | for rev, match in revmatches: |
|
352 | for rev, match in revmatches: | |
351 | ctx = repo[rev] |
|
353 | ctx = repo[rev] | |
352 | for f in ctx.walk(match): |
|
354 | for f in ctx.walk(match): | |
353 | p = pointerfromctx(ctx, f) |
|
355 | p = pointerfromctx(ctx, f) | |
354 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
356 | if p and p.oid() not in oids and not localstore.has(p.oid()): | |
355 | p.filename = f |
|
357 | p.filename = f | |
356 | pointers.append(p) |
|
358 | pointers.append(p) | |
357 | oids.add(p.oid()) |
|
359 | oids.add(p.oid()) | |
358 |
|
360 | |||
359 | if pointers: |
|
361 | if pointers: | |
360 | # Recalculating the repo store here allows 'paths.default' that is set |
|
362 | # Recalculating the repo store here allows 'paths.default' that is set | |
361 | # on the repo by a clone command to be used for the update. |
|
363 | # on the repo by a clone command to be used for the update. | |
362 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
364 | blobstore.remote(repo).readbatch(pointers, localstore) | |
363 |
|
365 | |||
364 |
|
366 | |||
365 | def _canskipupload(repo): |
|
367 | def _canskipupload(repo): | |
366 | # Skip if this hasn't been passed to reposetup() |
|
368 | # Skip if this hasn't been passed to reposetup() | |
367 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
369 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
368 | return True |
|
370 | return True | |
369 |
|
371 | |||
370 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
372 | # if remotestore is a null store, upload is a no-op and can be skipped | |
371 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
373 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
372 |
|
374 | |||
373 |
|
375 | |||
374 | def candownload(repo): |
|
376 | def candownload(repo): | |
375 | # Skip if this hasn't been passed to reposetup() |
|
377 | # Skip if this hasn't been passed to reposetup() | |
376 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
378 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | |
377 | return False |
|
379 | return False | |
378 |
|
380 | |||
379 | # if remotestore is a null store, downloads will lead to nothing |
|
381 | # if remotestore is a null store, downloads will lead to nothing | |
380 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
382 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |
381 |
|
383 | |||
382 |
|
384 | |||
383 | def uploadblobsfromrevs(repo, revs): |
|
385 | def uploadblobsfromrevs(repo, revs): | |
384 | """upload lfs blobs introduced by revs |
|
386 | """upload lfs blobs introduced by revs | |
385 |
|
387 | |||
386 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
388 | Note: also used by other extensions e. g. infinitepush. avoid renaming. | |
387 | """ |
|
389 | """ | |
388 | if _canskipupload(repo): |
|
390 | if _canskipupload(repo): | |
389 | return |
|
391 | return | |
390 | pointers = extractpointers(repo, revs) |
|
392 | pointers = extractpointers(repo, revs) | |
391 | uploadblobs(repo, pointers) |
|
393 | uploadblobs(repo, pointers) | |
392 |
|
394 | |||
393 |
|
395 | |||
394 | def prepush(pushop): |
|
396 | def prepush(pushop): | |
395 | """Prepush hook. |
|
397 | """Prepush hook. | |
396 |
|
398 | |||
397 | Read through the revisions to push, looking for filelog entries that can be |
|
399 | Read through the revisions to push, looking for filelog entries that can be | |
398 | deserialized into metadata so that we can block the push on their upload to |
|
400 | deserialized into metadata so that we can block the push on their upload to | |
399 | the remote blobstore. |
|
401 | the remote blobstore. | |
400 | """ |
|
402 | """ | |
401 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
403 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |
402 |
|
404 | |||
403 |
|
405 | |||
404 | @eh.wrapfunction(exchange, b'push') |
|
406 | @eh.wrapfunction(exchange, b'push') | |
405 | def push(orig, repo, remote, *args, **kwargs): |
|
407 | def push(orig, repo, remote, *args, **kwargs): | |
406 | """bail on push if the extension isn't enabled on remote when needed, and |
|
408 | """bail on push if the extension isn't enabled on remote when needed, and | |
407 | update the remote store based on the destination path.""" |
|
409 | update the remote store based on the destination path.""" | |
408 | if b'lfs' in repo.requirements: |
|
410 | if b'lfs' in repo.requirements: | |
409 | # If the remote peer is for a local repo, the requirement tests in the |
|
411 | # If the remote peer is for a local repo, the requirement tests in the | |
410 | # base class method enforce lfs support. Otherwise, some revisions in |
|
412 | # base class method enforce lfs support. Otherwise, some revisions in | |
411 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
413 | # this repo use lfs, and the remote repo needs the extension loaded. | |
412 | if not remote.local() and not remote.capable(b'lfs'): |
|
414 | if not remote.local() and not remote.capable(b'lfs'): | |
413 | # This is a copy of the message in exchange.push() when requirements |
|
415 | # This is a copy of the message in exchange.push() when requirements | |
414 | # are missing between local repos. |
|
416 | # are missing between local repos. | |
415 | m = _(b"required features are not supported in the destination: %s") |
|
417 | m = _(b"required features are not supported in the destination: %s") | |
416 | raise error.Abort( |
|
418 | raise error.Abort( | |
417 | m % b'lfs', hint=_(b'enable the lfs extension on the server') |
|
419 | m % b'lfs', hint=_(b'enable the lfs extension on the server') | |
418 | ) |
|
420 | ) | |
419 |
|
421 | |||
420 | # Repositories where this extension is disabled won't have the field. |
|
422 | # Repositories where this extension is disabled won't have the field. | |
421 | # But if there's a requirement, then the extension must be loaded AND |
|
423 | # But if there's a requirement, then the extension must be loaded AND | |
422 | # there may be blobs to push. |
|
424 | # there may be blobs to push. | |
423 | remotestore = repo.svfs.lfsremoteblobstore |
|
425 | remotestore = repo.svfs.lfsremoteblobstore | |
424 | try: |
|
426 | try: | |
425 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
427 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) | |
426 | return orig(repo, remote, *args, **kwargs) |
|
428 | return orig(repo, remote, *args, **kwargs) | |
427 | finally: |
|
429 | finally: | |
428 | repo.svfs.lfsremoteblobstore = remotestore |
|
430 | repo.svfs.lfsremoteblobstore = remotestore | |
429 | else: |
|
431 | else: | |
430 | return orig(repo, remote, *args, **kwargs) |
|
432 | return orig(repo, remote, *args, **kwargs) | |
431 |
|
433 | |||
432 |
|
434 | |||
433 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
435 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |
434 | @eh.wrapfunction(bundle2, b'writenewbundle') |
|
436 | @eh.wrapfunction(bundle2, b'writenewbundle') | |
435 | def writenewbundle( |
|
437 | def writenewbundle( | |
436 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
438 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
437 | ): |
|
439 | ): | |
438 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
440 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |
439 | uploadblobsfromrevs(repo, outgoing.missing) |
|
441 | uploadblobsfromrevs(repo, outgoing.missing) | |
440 | return orig( |
|
442 | return orig( | |
441 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
443 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs | |
442 | ) |
|
444 | ) | |
443 |
|
445 | |||
444 |
|
446 | |||
445 | def extractpointers(repo, revs): |
|
447 | def extractpointers(repo, revs): | |
446 | """return a list of lfs pointers added by given revs""" |
|
448 | """return a list of lfs pointers added by given revs""" | |
447 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
|
449 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') | |
448 | pointers = {} |
|
450 | pointers = {} | |
449 |
|
451 | |||
450 | makeprogress = repo.ui.makeprogress |
|
452 | makeprogress = repo.ui.makeprogress | |
451 | with makeprogress( |
|
453 | with makeprogress( | |
452 | _(b'lfs search'), _(b'changesets'), len(revs) |
|
454 | _(b'lfs search'), _(b'changesets'), len(revs) | |
453 | ) as progress: |
|
455 | ) as progress: | |
454 | for r in revs: |
|
456 | for r in revs: | |
455 | ctx = repo[r] |
|
457 | ctx = repo[r] | |
456 | for p in pointersfromctx(ctx).values(): |
|
458 | for p in pointersfromctx(ctx).values(): | |
457 | pointers[p.oid()] = p |
|
459 | pointers[p.oid()] = p | |
458 | progress.increment() |
|
460 | progress.increment() | |
459 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
461 | return sorted(pointers.values(), key=lambda p: p.oid()) | |
460 |
|
462 | |||
461 |
|
463 | |||
462 | def pointerfromctx(ctx, f, removed=False): |
|
464 | def pointerfromctx(ctx, f, removed=False): | |
463 | """return a pointer for the named file from the given changectx, or None if |
|
465 | """return a pointer for the named file from the given changectx, or None if | |
464 | the file isn't LFS. |
|
466 | the file isn't LFS. | |
465 |
|
467 | |||
466 | Optionally, the pointer for a file deleted from the context can be returned. |
|
468 | Optionally, the pointer for a file deleted from the context can be returned. | |
467 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
469 | Since no such pointer is actually stored, and to distinguish from a non LFS | |
468 | file, this pointer is represented by an empty dict. |
|
470 | file, this pointer is represented by an empty dict. | |
469 | """ |
|
471 | """ | |
470 | _ctx = ctx |
|
472 | _ctx = ctx | |
471 | if f not in ctx: |
|
473 | if f not in ctx: | |
472 | if not removed: |
|
474 | if not removed: | |
473 | return None |
|
475 | return None | |
474 | if f in ctx.p1(): |
|
476 | if f in ctx.p1(): | |
475 | _ctx = ctx.p1() |
|
477 | _ctx = ctx.p1() | |
476 | elif f in ctx.p2(): |
|
478 | elif f in ctx.p2(): | |
477 | _ctx = ctx.p2() |
|
479 | _ctx = ctx.p2() | |
478 | else: |
|
480 | else: | |
479 | return None |
|
481 | return None | |
480 | fctx = _ctx[f] |
|
482 | fctx = _ctx[f] | |
481 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): |
|
483 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): | |
482 | return None |
|
484 | return None | |
483 | try: |
|
485 | try: | |
484 | p = pointer.deserialize(fctx.rawdata()) |
|
486 | p = pointer.deserialize(fctx.rawdata()) | |
485 | if ctx == _ctx: |
|
487 | if ctx == _ctx: | |
486 | return p |
|
488 | return p | |
487 | return {} |
|
489 | return {} | |
488 | except pointer.InvalidPointer as ex: |
|
490 | except pointer.InvalidPointer as ex: | |
489 | raise error.Abort( |
|
491 | raise error.Abort( | |
490 | _(b'lfs: corrupted pointer (%s@%s): %s\n') |
|
492 | _(b'lfs: corrupted pointer (%s@%s): %s\n') | |
491 | % (f, short(_ctx.node()), ex) |
|
493 | % (f, short(_ctx.node()), ex) | |
492 | ) |
|
494 | ) | |
493 |
|
495 | |||
494 |
|
496 | |||
495 | def pointersfromctx(ctx, removed=False): |
|
497 | def pointersfromctx(ctx, removed=False): | |
496 | """return a dict {path: pointer} for given single changectx. |
|
498 | """return a dict {path: pointer} for given single changectx. | |
497 |
|
499 | |||
498 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
500 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value | |
499 | stored for the path is an empty dict. |
|
501 | stored for the path is an empty dict. | |
500 | """ |
|
502 | """ | |
501 | result = {} |
|
503 | result = {} | |
502 | m = ctx.repo().narrowmatch() |
|
504 | m = ctx.repo().narrowmatch() | |
503 |
|
505 | |||
504 | # TODO: consider manifest.fastread() instead |
|
506 | # TODO: consider manifest.fastread() instead | |
505 | for f in ctx.files(): |
|
507 | for f in ctx.files(): | |
506 | if not m(f): |
|
508 | if not m(f): | |
507 | continue |
|
509 | continue | |
508 | p = pointerfromctx(ctx, f, removed=removed) |
|
510 | p = pointerfromctx(ctx, f, removed=removed) | |
509 | if p is not None: |
|
511 | if p is not None: | |
510 | result[f] = p |
|
512 | result[f] = p | |
511 | return result |
|
513 | return result | |
512 |
|
514 | |||
513 |
|
515 | |||
514 | def uploadblobs(repo, pointers): |
|
516 | def uploadblobs(repo, pointers): | |
515 | """upload given pointers from local blobstore""" |
|
517 | """upload given pointers from local blobstore""" | |
516 | if not pointers: |
|
518 | if not pointers: | |
517 | return |
|
519 | return | |
518 |
|
520 | |||
519 | remoteblob = repo.svfs.lfsremoteblobstore |
|
521 | remoteblob = repo.svfs.lfsremoteblobstore | |
520 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
522 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
521 |
|
523 | |||
522 |
|
524 | |||
523 | @eh.wrapfunction(upgrade, b'_finishdatamigration') |
|
525 | @eh.wrapfunction(upgrade_engine, b'_finishdatamigration') | |
524 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
526 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
525 | orig(ui, srcrepo, dstrepo, requirements) |
|
527 | orig(ui, srcrepo, dstrepo, requirements) | |
526 |
|
528 | |||
527 | # Skip if this hasn't been passed to reposetup() |
|
529 | # Skip if this hasn't been passed to reposetup() | |
528 | if util.safehasattr( |
|
530 | if util.safehasattr( | |
529 | srcrepo.svfs, b'lfslocalblobstore' |
|
531 | srcrepo.svfs, b'lfslocalblobstore' | |
530 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
|
532 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): | |
531 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
533 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs | |
532 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
534 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs | |
533 |
|
535 | |||
534 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
536 | for dirpath, dirs, files in srclfsvfs.walk(): | |
535 | for oid in files: |
|
537 | for oid in files: | |
536 | ui.write(_(b'copying lfs blob %s\n') % oid) |
|
538 | ui.write(_(b'copying lfs blob %s\n') % oid) | |
537 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
539 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
538 |
|
540 | |||
539 |
|
541 | |||
540 | @eh.wrapfunction(upgrade, b'preservedrequirements') |
|
542 | @eh.wrapfunction(upgrade, b'preservedrequirements') | |
541 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
|
543 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') | |
542 | def upgraderequirements(orig, repo): |
|
544 | def upgraderequirements(orig, repo): | |
543 | reqs = orig(repo) |
|
545 | reqs = orig(repo) | |
544 | if b'lfs' in repo.requirements: |
|
546 | if b'lfs' in repo.requirements: | |
545 | reqs.add(b'lfs') |
|
547 | reqs.add(b'lfs') | |
546 | return reqs |
|
548 | return reqs |
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them | |||||
@@ -1,1492 +1,1012 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import stat |
|
|||
11 |
|
||||
12 | from .i18n import _ |
|
10 | from .i18n import _ | |
13 | from .pycompat import getattr |
|
|||
14 | from . import ( |
|
11 | from . import ( | |
15 | changelog, |
|
|||
16 | error, |
|
12 | error, | |
17 | filelog, |
|
|||
18 | hg, |
|
13 | hg, | |
19 | localrepo, |
|
14 | localrepo, | |
20 | manifest, |
|
|||
21 | metadata, |
|
|||
22 | pycompat, |
|
15 | pycompat, | |
23 | requirements, |
|
16 | requirements, | |
24 | revlog, |
|
|||
25 | scmutil, |
|
|||
26 | util, |
|
17 | util, | |
27 | vfs as vfsmod, |
|
18 | ) | |
|
19 | ||||
|
20 | from .upgrade_utils import ( | |||
|
21 | engine as upgrade_engine, | |||
28 | ) |
|
22 | ) | |
29 |
|
23 | |||
30 | from .utils import compression |
|
24 | from .utils import compression | |
31 |
|
25 | |||
32 | # list of requirements that request a clone of all revlog if added/removed |
|
26 | # list of requirements that request a clone of all revlog if added/removed | |
33 | RECLONES_REQUIREMENTS = { |
|
27 | RECLONES_REQUIREMENTS = { | |
34 | b'generaldelta', |
|
28 | b'generaldelta', | |
35 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
29 | requirements.SPARSEREVLOG_REQUIREMENT, | |
36 | } |
|
30 | } | |
37 |
|
31 | |||
38 |
|
32 | |||
39 | def requiredsourcerequirements(repo): |
|
33 | def requiredsourcerequirements(repo): | |
40 | """Obtain requirements required to be present to upgrade a repo. |
|
34 | """Obtain requirements required to be present to upgrade a repo. | |
41 |
|
35 | |||
42 | An upgrade will not be allowed if the repository doesn't have the |
|
36 | An upgrade will not be allowed if the repository doesn't have the | |
43 | requirements returned by this function. |
|
37 | requirements returned by this function. | |
44 | """ |
|
38 | """ | |
45 | return { |
|
39 | return { | |
46 | # Introduced in Mercurial 0.9.2. |
|
40 | # Introduced in Mercurial 0.9.2. | |
47 | b'revlogv1', |
|
41 | b'revlogv1', | |
48 | # Introduced in Mercurial 0.9.2. |
|
42 | # Introduced in Mercurial 0.9.2. | |
49 | b'store', |
|
43 | b'store', | |
50 | } |
|
44 | } | |
51 |
|
45 | |||
52 |
|
46 | |||
53 | def blocksourcerequirements(repo): |
|
47 | def blocksourcerequirements(repo): | |
54 | """Obtain requirements that will prevent an upgrade from occurring. |
|
48 | """Obtain requirements that will prevent an upgrade from occurring. | |
55 |
|
49 | |||
56 | An upgrade cannot be performed if the source repository contains a |
|
50 | An upgrade cannot be performed if the source repository contains a | |
57 | requirements in the returned set. |
|
51 | requirements in the returned set. | |
58 | """ |
|
52 | """ | |
59 | return { |
|
53 | return { | |
60 | # The upgrade code does not yet support these experimental features. |
|
54 | # The upgrade code does not yet support these experimental features. | |
61 | # This is an artificial limitation. |
|
55 | # This is an artificial limitation. | |
62 | requirements.TREEMANIFEST_REQUIREMENT, |
|
56 | requirements.TREEMANIFEST_REQUIREMENT, | |
63 | # This was a precursor to generaldelta and was never enabled by default. |
|
57 | # This was a precursor to generaldelta and was never enabled by default. | |
64 | # It should (hopefully) not exist in the wild. |
|
58 | # It should (hopefully) not exist in the wild. | |
65 | b'parentdelta', |
|
59 | b'parentdelta', | |
66 | # Upgrade should operate on the actual store, not the shared link. |
|
60 | # Upgrade should operate on the actual store, not the shared link. | |
67 | requirements.SHARED_REQUIREMENT, |
|
61 | requirements.SHARED_REQUIREMENT, | |
68 | } |
|
62 | } | |
69 |
|
63 | |||
70 |
|
64 | |||
71 | def supportremovedrequirements(repo): |
|
65 | def supportremovedrequirements(repo): | |
72 | """Obtain requirements that can be removed during an upgrade. |
|
66 | """Obtain requirements that can be removed during an upgrade. | |
73 |
|
67 | |||
74 | If an upgrade were to create a repository that dropped a requirement, |
|
68 | If an upgrade were to create a repository that dropped a requirement, | |
75 | the dropped requirement must appear in the returned set for the upgrade |
|
69 | the dropped requirement must appear in the returned set for the upgrade | |
76 | to be allowed. |
|
70 | to be allowed. | |
77 | """ |
|
71 | """ | |
78 | supported = { |
|
72 | supported = { | |
79 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
73 | requirements.SPARSEREVLOG_REQUIREMENT, | |
80 | requirements.SIDEDATA_REQUIREMENT, |
|
74 | requirements.SIDEDATA_REQUIREMENT, | |
81 | requirements.COPIESSDC_REQUIREMENT, |
|
75 | requirements.COPIESSDC_REQUIREMENT, | |
82 | requirements.NODEMAP_REQUIREMENT, |
|
76 | requirements.NODEMAP_REQUIREMENT, | |
83 | requirements.SHARESAFE_REQUIREMENT, |
|
77 | requirements.SHARESAFE_REQUIREMENT, | |
84 | } |
|
78 | } | |
85 | for name in compression.compengines: |
|
79 | for name in compression.compengines: | |
86 | engine = compression.compengines[name] |
|
80 | engine = compression.compengines[name] | |
87 | if engine.available() and engine.revlogheader(): |
|
81 | if engine.available() and engine.revlogheader(): | |
88 | supported.add(b'exp-compression-%s' % name) |
|
82 | supported.add(b'exp-compression-%s' % name) | |
89 | if engine.name() == b'zstd': |
|
83 | if engine.name() == b'zstd': | |
90 | supported.add(b'revlog-compression-zstd') |
|
84 | supported.add(b'revlog-compression-zstd') | |
91 | return supported |
|
85 | return supported | |
92 |
|
86 | |||
93 |
|
87 | |||
94 | def supporteddestrequirements(repo): |
|
88 | def supporteddestrequirements(repo): | |
95 | """Obtain requirements that upgrade supports in the destination. |
|
89 | """Obtain requirements that upgrade supports in the destination. | |
96 |
|
90 | |||
97 | If the result of the upgrade would create requirements not in this set, |
|
91 | If the result of the upgrade would create requirements not in this set, | |
98 | the upgrade is disallowed. |
|
92 | the upgrade is disallowed. | |
99 |
|
93 | |||
100 | Extensions should monkeypatch this to add their custom requirements. |
|
94 | Extensions should monkeypatch this to add their custom requirements. | |
101 | """ |
|
95 | """ | |
102 | supported = { |
|
96 | supported = { | |
103 | b'dotencode', |
|
97 | b'dotencode', | |
104 | b'fncache', |
|
98 | b'fncache', | |
105 | b'generaldelta', |
|
99 | b'generaldelta', | |
106 | b'revlogv1', |
|
100 | b'revlogv1', | |
107 | b'store', |
|
101 | b'store', | |
108 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
102 | requirements.SPARSEREVLOG_REQUIREMENT, | |
109 | requirements.SIDEDATA_REQUIREMENT, |
|
103 | requirements.SIDEDATA_REQUIREMENT, | |
110 | requirements.COPIESSDC_REQUIREMENT, |
|
104 | requirements.COPIESSDC_REQUIREMENT, | |
111 | requirements.NODEMAP_REQUIREMENT, |
|
105 | requirements.NODEMAP_REQUIREMENT, | |
112 | requirements.SHARESAFE_REQUIREMENT, |
|
106 | requirements.SHARESAFE_REQUIREMENT, | |
113 | } |
|
107 | } | |
114 | for name in compression.compengines: |
|
108 | for name in compression.compengines: | |
115 | engine = compression.compengines[name] |
|
109 | engine = compression.compengines[name] | |
116 | if engine.available() and engine.revlogheader(): |
|
110 | if engine.available() and engine.revlogheader(): | |
117 | supported.add(b'exp-compression-%s' % name) |
|
111 | supported.add(b'exp-compression-%s' % name) | |
118 | if engine.name() == b'zstd': |
|
112 | if engine.name() == b'zstd': | |
119 | supported.add(b'revlog-compression-zstd') |
|
113 | supported.add(b'revlog-compression-zstd') | |
120 | return supported |
|
114 | return supported | |
121 |
|
115 | |||
122 |
|
116 | |||
123 | def allowednewrequirements(repo): |
|
117 | def allowednewrequirements(repo): | |
124 | """Obtain requirements that can be added to a repository during upgrade. |
|
118 | """Obtain requirements that can be added to a repository during upgrade. | |
125 |
|
119 | |||
126 | This is used to disallow proposed requirements from being added when |
|
120 | This is used to disallow proposed requirements from being added when | |
127 | they weren't present before. |
|
121 | they weren't present before. | |
128 |
|
122 | |||
129 | We use a list of allowed requirement additions instead of a list of known |
|
123 | We use a list of allowed requirement additions instead of a list of known | |
130 | bad additions because the whitelist approach is safer and will prevent |
|
124 | bad additions because the whitelist approach is safer and will prevent | |
131 | future, unknown requirements from accidentally being added. |
|
125 | future, unknown requirements from accidentally being added. | |
132 | """ |
|
126 | """ | |
133 | supported = { |
|
127 | supported = { | |
134 | b'dotencode', |
|
128 | b'dotencode', | |
135 | b'fncache', |
|
129 | b'fncache', | |
136 | b'generaldelta', |
|
130 | b'generaldelta', | |
137 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
131 | requirements.SPARSEREVLOG_REQUIREMENT, | |
138 | requirements.SIDEDATA_REQUIREMENT, |
|
132 | requirements.SIDEDATA_REQUIREMENT, | |
139 | requirements.COPIESSDC_REQUIREMENT, |
|
133 | requirements.COPIESSDC_REQUIREMENT, | |
140 | requirements.NODEMAP_REQUIREMENT, |
|
134 | requirements.NODEMAP_REQUIREMENT, | |
141 | requirements.SHARESAFE_REQUIREMENT, |
|
135 | requirements.SHARESAFE_REQUIREMENT, | |
142 | } |
|
136 | } | |
143 | for name in compression.compengines: |
|
137 | for name in compression.compengines: | |
144 | engine = compression.compengines[name] |
|
138 | engine = compression.compengines[name] | |
145 | if engine.available() and engine.revlogheader(): |
|
139 | if engine.available() and engine.revlogheader(): | |
146 | supported.add(b'exp-compression-%s' % name) |
|
140 | supported.add(b'exp-compression-%s' % name) | |
147 | if engine.name() == b'zstd': |
|
141 | if engine.name() == b'zstd': | |
148 | supported.add(b'revlog-compression-zstd') |
|
142 | supported.add(b'revlog-compression-zstd') | |
149 | return supported |
|
143 | return supported | |
150 |
|
144 | |||
151 |
|
145 | |||
152 | def preservedrequirements(repo): |
|
146 | def preservedrequirements(repo): | |
153 | return set() |
|
147 | return set() | |
154 |
|
148 | |||
155 |
|
149 | |||
156 | DEFICIENCY = b'deficiency' |
|
150 | DEFICIENCY = b'deficiency' | |
157 | OPTIMISATION = b'optimization' |
|
151 | OPTIMISATION = b'optimization' | |
158 |
|
152 | |||
159 |
|
153 | |||
160 | class improvement(object): |
|
154 | class improvement(object): | |
161 | """Represents an improvement that can be made as part of an upgrade. |
|
155 | """Represents an improvement that can be made as part of an upgrade. | |
162 |
|
156 | |||
163 | The following attributes are defined on each instance: |
|
157 | The following attributes are defined on each instance: | |
164 |
|
158 | |||
165 | name |
|
159 | name | |
166 | Machine-readable string uniquely identifying this improvement. It |
|
160 | Machine-readable string uniquely identifying this improvement. It | |
167 | will be mapped to an action later in the upgrade process. |
|
161 | will be mapped to an action later in the upgrade process. | |
168 |
|
162 | |||
169 | type |
|
163 | type | |
170 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
164 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious | |
171 | problem. An optimization is an action (sometimes optional) that |
|
165 | problem. An optimization is an action (sometimes optional) that | |
172 | can be taken to further improve the state of the repository. |
|
166 | can be taken to further improve the state of the repository. | |
173 |
|
167 | |||
174 | description |
|
168 | description | |
175 | Message intended for humans explaining the improvement in more detail, |
|
169 | Message intended for humans explaining the improvement in more detail, | |
176 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
170 | including the implications of it. For ``DEFICIENCY`` types, should be | |
177 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
171 | worded in the present tense. For ``OPTIMISATION`` types, should be | |
178 | worded in the future tense. |
|
172 | worded in the future tense. | |
179 |
|
173 | |||
180 | upgrademessage |
|
174 | upgrademessage | |
181 | Message intended for humans explaining what an upgrade addressing this |
|
175 | Message intended for humans explaining what an upgrade addressing this | |
182 | issue will do. Should be worded in the future tense. |
|
176 | issue will do. Should be worded in the future tense. | |
183 | """ |
|
177 | """ | |
184 |
|
178 | |||
185 | def __init__(self, name, type, description, upgrademessage): |
|
179 | def __init__(self, name, type, description, upgrademessage): | |
186 | self.name = name |
|
180 | self.name = name | |
187 | self.type = type |
|
181 | self.type = type | |
188 | self.description = description |
|
182 | self.description = description | |
189 | self.upgrademessage = upgrademessage |
|
183 | self.upgrademessage = upgrademessage | |
190 |
|
184 | |||
191 | def __eq__(self, other): |
|
185 | def __eq__(self, other): | |
192 | if not isinstance(other, improvement): |
|
186 | if not isinstance(other, improvement): | |
193 | # This is what python tell use to do |
|
187 | # This is what python tell use to do | |
194 | return NotImplemented |
|
188 | return NotImplemented | |
195 | return self.name == other.name |
|
189 | return self.name == other.name | |
196 |
|
190 | |||
197 | def __ne__(self, other): |
|
191 | def __ne__(self, other): | |
198 | return not (self == other) |
|
192 | return not (self == other) | |
199 |
|
193 | |||
200 | def __hash__(self): |
|
194 | def __hash__(self): | |
201 | return hash(self.name) |
|
195 | return hash(self.name) | |
202 |
|
196 | |||
203 |
|
197 | |||
204 | allformatvariant = [] |
|
198 | allformatvariant = [] | |
205 |
|
199 | |||
206 |
|
200 | |||
207 | def registerformatvariant(cls): |
|
201 | def registerformatvariant(cls): | |
208 | allformatvariant.append(cls) |
|
202 | allformatvariant.append(cls) | |
209 | return cls |
|
203 | return cls | |
210 |
|
204 | |||
211 |
|
205 | |||
212 | class formatvariant(improvement): |
|
206 | class formatvariant(improvement): | |
213 | """an improvement subclass dedicated to repository format""" |
|
207 | """an improvement subclass dedicated to repository format""" | |
214 |
|
208 | |||
215 | type = DEFICIENCY |
|
209 | type = DEFICIENCY | |
216 | ### The following attributes should be defined for each class: |
|
210 | ### The following attributes should be defined for each class: | |
217 |
|
211 | |||
218 | # machine-readable string uniquely identifying this improvement. it will be |
|
212 | # machine-readable string uniquely identifying this improvement. it will be | |
219 | # mapped to an action later in the upgrade process. |
|
213 | # mapped to an action later in the upgrade process. | |
220 | name = None |
|
214 | name = None | |
221 |
|
215 | |||
222 | # message intended for humans explaining the improvement in more detail, |
|
216 | # message intended for humans explaining the improvement in more detail, | |
223 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
217 | # including the implications of it ``DEFICIENCY`` types, should be worded | |
224 | # in the present tense. |
|
218 | # in the present tense. | |
225 | description = None |
|
219 | description = None | |
226 |
|
220 | |||
227 | # message intended for humans explaining what an upgrade addressing this |
|
221 | # message intended for humans explaining what an upgrade addressing this | |
228 | # issue will do. should be worded in the future tense. |
|
222 | # issue will do. should be worded in the future tense. | |
229 | upgrademessage = None |
|
223 | upgrademessage = None | |
230 |
|
224 | |||
231 | # value of current Mercurial default for new repository |
|
225 | # value of current Mercurial default for new repository | |
232 | default = None |
|
226 | default = None | |
233 |
|
227 | |||
234 | def __init__(self): |
|
228 | def __init__(self): | |
235 | raise NotImplementedError() |
|
229 | raise NotImplementedError() | |
236 |
|
230 | |||
237 | @staticmethod |
|
231 | @staticmethod | |
238 | def fromrepo(repo): |
|
232 | def fromrepo(repo): | |
239 | """current value of the variant in the repository""" |
|
233 | """current value of the variant in the repository""" | |
240 | raise NotImplementedError() |
|
234 | raise NotImplementedError() | |
241 |
|
235 | |||
242 | @staticmethod |
|
236 | @staticmethod | |
243 | def fromconfig(repo): |
|
237 | def fromconfig(repo): | |
244 | """current value of the variant in the configuration""" |
|
238 | """current value of the variant in the configuration""" | |
245 | raise NotImplementedError() |
|
239 | raise NotImplementedError() | |
246 |
|
240 | |||
247 |
|
241 | |||
248 | class requirementformatvariant(formatvariant): |
|
242 | class requirementformatvariant(formatvariant): | |
249 | """formatvariant based on a 'requirement' name. |
|
243 | """formatvariant based on a 'requirement' name. | |
250 |
|
244 | |||
251 | Many format variant are controlled by a 'requirement'. We define a small |
|
245 | Many format variant are controlled by a 'requirement'. We define a small | |
252 | subclass to factor the code. |
|
246 | subclass to factor the code. | |
253 | """ |
|
247 | """ | |
254 |
|
248 | |||
255 | # the requirement that control this format variant |
|
249 | # the requirement that control this format variant | |
256 | _requirement = None |
|
250 | _requirement = None | |
257 |
|
251 | |||
258 | @staticmethod |
|
252 | @staticmethod | |
259 | def _newreporequirements(ui): |
|
253 | def _newreporequirements(ui): | |
260 | return localrepo.newreporequirements( |
|
254 | return localrepo.newreporequirements( | |
261 | ui, localrepo.defaultcreateopts(ui) |
|
255 | ui, localrepo.defaultcreateopts(ui) | |
262 | ) |
|
256 | ) | |
263 |
|
257 | |||
264 | @classmethod |
|
258 | @classmethod | |
265 | def fromrepo(cls, repo): |
|
259 | def fromrepo(cls, repo): | |
266 | assert cls._requirement is not None |
|
260 | assert cls._requirement is not None | |
267 | return cls._requirement in repo.requirements |
|
261 | return cls._requirement in repo.requirements | |
268 |
|
262 | |||
269 | @classmethod |
|
263 | @classmethod | |
270 | def fromconfig(cls, repo): |
|
264 | def fromconfig(cls, repo): | |
271 | assert cls._requirement is not None |
|
265 | assert cls._requirement is not None | |
272 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
266 | return cls._requirement in cls._newreporequirements(repo.ui) | |
273 |
|
267 | |||
274 |
|
268 | |||
275 | @registerformatvariant |
|
269 | @registerformatvariant | |
276 | class fncache(requirementformatvariant): |
|
270 | class fncache(requirementformatvariant): | |
277 | name = b'fncache' |
|
271 | name = b'fncache' | |
278 |
|
272 | |||
279 | _requirement = b'fncache' |
|
273 | _requirement = b'fncache' | |
280 |
|
274 | |||
281 | default = True |
|
275 | default = True | |
282 |
|
276 | |||
283 | description = _( |
|
277 | description = _( | |
284 | b'long and reserved filenames may not work correctly; ' |
|
278 | b'long and reserved filenames may not work correctly; ' | |
285 | b'repository performance is sub-optimal' |
|
279 | b'repository performance is sub-optimal' | |
286 | ) |
|
280 | ) | |
287 |
|
281 | |||
288 | upgrademessage = _( |
|
282 | upgrademessage = _( | |
289 | b'repository will be more resilient to storing ' |
|
283 | b'repository will be more resilient to storing ' | |
290 | b'certain paths and performance of certain ' |
|
284 | b'certain paths and performance of certain ' | |
291 | b'operations should be improved' |
|
285 | b'operations should be improved' | |
292 | ) |
|
286 | ) | |
293 |
|
287 | |||
294 |
|
288 | |||
295 | @registerformatvariant |
|
289 | @registerformatvariant | |
296 | class dotencode(requirementformatvariant): |
|
290 | class dotencode(requirementformatvariant): | |
297 | name = b'dotencode' |
|
291 | name = b'dotencode' | |
298 |
|
292 | |||
299 | _requirement = b'dotencode' |
|
293 | _requirement = b'dotencode' | |
300 |
|
294 | |||
301 | default = True |
|
295 | default = True | |
302 |
|
296 | |||
303 | description = _( |
|
297 | description = _( | |
304 | b'storage of filenames beginning with a period or ' |
|
298 | b'storage of filenames beginning with a period or ' | |
305 | b'space may not work correctly' |
|
299 | b'space may not work correctly' | |
306 | ) |
|
300 | ) | |
307 |
|
301 | |||
308 | upgrademessage = _( |
|
302 | upgrademessage = _( | |
309 | b'repository will be better able to store files ' |
|
303 | b'repository will be better able to store files ' | |
310 | b'beginning with a space or period' |
|
304 | b'beginning with a space or period' | |
311 | ) |
|
305 | ) | |
312 |
|
306 | |||
313 |
|
307 | |||
314 | @registerformatvariant |
|
308 | @registerformatvariant | |
315 | class generaldelta(requirementformatvariant): |
|
309 | class generaldelta(requirementformatvariant): | |
316 | name = b'generaldelta' |
|
310 | name = b'generaldelta' | |
317 |
|
311 | |||
318 | _requirement = b'generaldelta' |
|
312 | _requirement = b'generaldelta' | |
319 |
|
313 | |||
320 | default = True |
|
314 | default = True | |
321 |
|
315 | |||
322 | description = _( |
|
316 | description = _( | |
323 | b'deltas within internal storage are unable to ' |
|
317 | b'deltas within internal storage are unable to ' | |
324 | b'choose optimal revisions; repository is larger and ' |
|
318 | b'choose optimal revisions; repository is larger and ' | |
325 | b'slower than it could be; interaction with other ' |
|
319 | b'slower than it could be; interaction with other ' | |
326 | b'repositories may require extra network and CPU ' |
|
320 | b'repositories may require extra network and CPU ' | |
327 | b'resources, making "hg push" and "hg pull" slower' |
|
321 | b'resources, making "hg push" and "hg pull" slower' | |
328 | ) |
|
322 | ) | |
329 |
|
323 | |||
330 | upgrademessage = _( |
|
324 | upgrademessage = _( | |
331 | b'repository storage will be able to create ' |
|
325 | b'repository storage will be able to create ' | |
332 | b'optimal deltas; new repository data will be ' |
|
326 | b'optimal deltas; new repository data will be ' | |
333 | b'smaller and read times should decrease; ' |
|
327 | b'smaller and read times should decrease; ' | |
334 | b'interacting with other repositories using this ' |
|
328 | b'interacting with other repositories using this ' | |
335 | b'storage model should require less network and ' |
|
329 | b'storage model should require less network and ' | |
336 | b'CPU resources, making "hg push" and "hg pull" ' |
|
330 | b'CPU resources, making "hg push" and "hg pull" ' | |
337 | b'faster' |
|
331 | b'faster' | |
338 | ) |
|
332 | ) | |
339 |
|
333 | |||
340 |
|
334 | |||
341 | @registerformatvariant |
|
335 | @registerformatvariant | |
342 | class sharedsafe(requirementformatvariant): |
|
336 | class sharedsafe(requirementformatvariant): | |
343 | name = b'exp-sharesafe' |
|
337 | name = b'exp-sharesafe' | |
344 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
338 | _requirement = requirements.SHARESAFE_REQUIREMENT | |
345 |
|
339 | |||
346 | default = False |
|
340 | default = False | |
347 |
|
341 | |||
348 | description = _( |
|
342 | description = _( | |
349 | b'old shared repositories do not share source repository ' |
|
343 | b'old shared repositories do not share source repository ' | |
350 | b'requirements and config. This leads to various problems ' |
|
344 | b'requirements and config. This leads to various problems ' | |
351 | b'when the source repository format is upgraded or some new ' |
|
345 | b'when the source repository format is upgraded or some new ' | |
352 | b'extensions are enabled.' |
|
346 | b'extensions are enabled.' | |
353 | ) |
|
347 | ) | |
354 |
|
348 | |||
355 | upgrademessage = _( |
|
349 | upgrademessage = _( | |
356 | b'Upgrades a repository to share-safe format so that future ' |
|
350 | b'Upgrades a repository to share-safe format so that future ' | |
357 | b'shares of this repository share its requirements and configs.' |
|
351 | b'shares of this repository share its requirements and configs.' | |
358 | ) |
|
352 | ) | |
359 |
|
353 | |||
360 |
|
354 | |||
361 | @registerformatvariant |
|
355 | @registerformatvariant | |
362 | class sparserevlog(requirementformatvariant): |
|
356 | class sparserevlog(requirementformatvariant): | |
363 | name = b'sparserevlog' |
|
357 | name = b'sparserevlog' | |
364 |
|
358 | |||
365 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
359 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT | |
366 |
|
360 | |||
367 | default = True |
|
361 | default = True | |
368 |
|
362 | |||
369 | description = _( |
|
363 | description = _( | |
370 | b'in order to limit disk reading and memory usage on older ' |
|
364 | b'in order to limit disk reading and memory usage on older ' | |
371 | b'version, the span of a delta chain from its root to its ' |
|
365 | b'version, the span of a delta chain from its root to its ' | |
372 | b'end is limited, whatever the relevant data in this span. ' |
|
366 | b'end is limited, whatever the relevant data in this span. ' | |
373 | b'This can severly limit Mercurial ability to build good ' |
|
367 | b'This can severly limit Mercurial ability to build good ' | |
374 | b'chain of delta resulting is much more storage space being ' |
|
368 | b'chain of delta resulting is much more storage space being ' | |
375 | b'taken and limit reusability of on disk delta during ' |
|
369 | b'taken and limit reusability of on disk delta during ' | |
376 | b'exchange.' |
|
370 | b'exchange.' | |
377 | ) |
|
371 | ) | |
378 |
|
372 | |||
379 | upgrademessage = _( |
|
373 | upgrademessage = _( | |
380 | b'Revlog supports delta chain with more unused data ' |
|
374 | b'Revlog supports delta chain with more unused data ' | |
381 | b'between payload. These gaps will be skipped at read ' |
|
375 | b'between payload. These gaps will be skipped at read ' | |
382 | b'time. This allows for better delta chains, making a ' |
|
376 | b'time. This allows for better delta chains, making a ' | |
383 | b'better compression and faster exchange with server.' |
|
377 | b'better compression and faster exchange with server.' | |
384 | ) |
|
378 | ) | |
385 |
|
379 | |||
386 |
|
380 | |||
387 | @registerformatvariant |
|
381 | @registerformatvariant | |
388 | class sidedata(requirementformatvariant): |
|
382 | class sidedata(requirementformatvariant): | |
389 | name = b'sidedata' |
|
383 | name = b'sidedata' | |
390 |
|
384 | |||
391 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
385 | _requirement = requirements.SIDEDATA_REQUIREMENT | |
392 |
|
386 | |||
393 | default = False |
|
387 | default = False | |
394 |
|
388 | |||
395 | description = _( |
|
389 | description = _( | |
396 | b'Allows storage of extra data alongside a revision, ' |
|
390 | b'Allows storage of extra data alongside a revision, ' | |
397 | b'unlocking various caching options.' |
|
391 | b'unlocking various caching options.' | |
398 | ) |
|
392 | ) | |
399 |
|
393 | |||
400 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
394 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') | |
401 |
|
395 | |||
402 |
|
396 | |||
403 | @registerformatvariant |
|
397 | @registerformatvariant | |
404 | class persistentnodemap(requirementformatvariant): |
|
398 | class persistentnodemap(requirementformatvariant): | |
405 | name = b'persistent-nodemap' |
|
399 | name = b'persistent-nodemap' | |
406 |
|
400 | |||
407 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
401 | _requirement = requirements.NODEMAP_REQUIREMENT | |
408 |
|
402 | |||
409 | default = False |
|
403 | default = False | |
410 |
|
404 | |||
411 | description = _( |
|
405 | description = _( | |
412 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
406 | b'persist the node -> rev mapping on disk to speedup lookup' | |
413 | ) |
|
407 | ) | |
414 |
|
408 | |||
415 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
409 | upgrademessage = _(b'Speedup revision lookup by node id.') | |
416 |
|
410 | |||
417 |
|
411 | |||
418 | @registerformatvariant |
|
412 | @registerformatvariant | |
419 | class copiessdc(requirementformatvariant): |
|
413 | class copiessdc(requirementformatvariant): | |
420 | name = b'copies-sdc' |
|
414 | name = b'copies-sdc' | |
421 |
|
415 | |||
422 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
416 | _requirement = requirements.COPIESSDC_REQUIREMENT | |
423 |
|
417 | |||
424 | default = False |
|
418 | default = False | |
425 |
|
419 | |||
426 | description = _(b'Stores copies information alongside changesets.') |
|
420 | description = _(b'Stores copies information alongside changesets.') | |
427 |
|
421 | |||
428 | upgrademessage = _( |
|
422 | upgrademessage = _( | |
429 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
423 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' | |
430 | ) |
|
424 | ) | |
431 |
|
425 | |||
432 |
|
426 | |||
433 | @registerformatvariant |
|
427 | @registerformatvariant | |
434 | class removecldeltachain(formatvariant): |
|
428 | class removecldeltachain(formatvariant): | |
435 | name = b'plain-cl-delta' |
|
429 | name = b'plain-cl-delta' | |
436 |
|
430 | |||
437 | default = True |
|
431 | default = True | |
438 |
|
432 | |||
439 | description = _( |
|
433 | description = _( | |
440 | b'changelog storage is using deltas instead of ' |
|
434 | b'changelog storage is using deltas instead of ' | |
441 | b'raw entries; changelog reading and any ' |
|
435 | b'raw entries; changelog reading and any ' | |
442 | b'operation relying on changelog data are slower ' |
|
436 | b'operation relying on changelog data are slower ' | |
443 | b'than they could be' |
|
437 | b'than they could be' | |
444 | ) |
|
438 | ) | |
445 |
|
439 | |||
446 | upgrademessage = _( |
|
440 | upgrademessage = _( | |
447 | b'changelog storage will be reformated to ' |
|
441 | b'changelog storage will be reformated to ' | |
448 | b'store raw entries; changelog reading will be ' |
|
442 | b'store raw entries; changelog reading will be ' | |
449 | b'faster; changelog size may be reduced' |
|
443 | b'faster; changelog size may be reduced' | |
450 | ) |
|
444 | ) | |
451 |
|
445 | |||
452 | @staticmethod |
|
446 | @staticmethod | |
453 | def fromrepo(repo): |
|
447 | def fromrepo(repo): | |
454 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
448 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for | |
455 | # changelogs with deltas. |
|
449 | # changelogs with deltas. | |
456 | cl = repo.changelog |
|
450 | cl = repo.changelog | |
457 | chainbase = cl.chainbase |
|
451 | chainbase = cl.chainbase | |
458 | return all(rev == chainbase(rev) for rev in cl) |
|
452 | return all(rev == chainbase(rev) for rev in cl) | |
459 |
|
453 | |||
460 | @staticmethod |
|
454 | @staticmethod | |
461 | def fromconfig(repo): |
|
455 | def fromconfig(repo): | |
462 | return True |
|
456 | return True | |
463 |
|
457 | |||
464 |
|
458 | |||
465 | @registerformatvariant |
|
459 | @registerformatvariant | |
466 | class compressionengine(formatvariant): |
|
460 | class compressionengine(formatvariant): | |
467 | name = b'compression' |
|
461 | name = b'compression' | |
468 | default = b'zlib' |
|
462 | default = b'zlib' | |
469 |
|
463 | |||
470 | description = _( |
|
464 | description = _( | |
471 | b'Compresion algorithm used to compress data. ' |
|
465 | b'Compresion algorithm used to compress data. ' | |
472 | b'Some engine are faster than other' |
|
466 | b'Some engine are faster than other' | |
473 | ) |
|
467 | ) | |
474 |
|
468 | |||
475 | upgrademessage = _( |
|
469 | upgrademessage = _( | |
476 | b'revlog content will be recompressed with the new algorithm.' |
|
470 | b'revlog content will be recompressed with the new algorithm.' | |
477 | ) |
|
471 | ) | |
478 |
|
472 | |||
479 | @classmethod |
|
473 | @classmethod | |
480 | def fromrepo(cls, repo): |
|
474 | def fromrepo(cls, repo): | |
481 | # we allow multiple compression engine requirement to co-exist because |
|
475 | # we allow multiple compression engine requirement to co-exist because | |
482 | # strickly speaking, revlog seems to support mixed compression style. |
|
476 | # strickly speaking, revlog seems to support mixed compression style. | |
483 | # |
|
477 | # | |
484 | # The compression used for new entries will be "the last one" |
|
478 | # The compression used for new entries will be "the last one" | |
485 | compression = b'zlib' |
|
479 | compression = b'zlib' | |
486 | for req in repo.requirements: |
|
480 | for req in repo.requirements: | |
487 | prefix = req.startswith |
|
481 | prefix = req.startswith | |
488 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
482 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): | |
489 | compression = req.split(b'-', 2)[2] |
|
483 | compression = req.split(b'-', 2)[2] | |
490 | return compression |
|
484 | return compression | |
491 |
|
485 | |||
492 | @classmethod |
|
486 | @classmethod | |
493 | def fromconfig(cls, repo): |
|
487 | def fromconfig(cls, repo): | |
494 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
488 | compengines = repo.ui.configlist(b'format', b'revlog-compression') | |
495 | # return the first valid value as the selection code would do |
|
489 | # return the first valid value as the selection code would do | |
496 | for comp in compengines: |
|
490 | for comp in compengines: | |
497 | if comp in util.compengines: |
|
491 | if comp in util.compengines: | |
498 | return comp |
|
492 | return comp | |
499 |
|
493 | |||
500 | # no valide compression found lets display it all for clarity |
|
494 | # no valide compression found lets display it all for clarity | |
501 | return b','.join(compengines) |
|
495 | return b','.join(compengines) | |
502 |
|
496 | |||
503 |
|
497 | |||
504 | @registerformatvariant |
|
498 | @registerformatvariant | |
505 | class compressionlevel(formatvariant): |
|
499 | class compressionlevel(formatvariant): | |
506 | name = b'compression-level' |
|
500 | name = b'compression-level' | |
507 | default = b'default' |
|
501 | default = b'default' | |
508 |
|
502 | |||
509 | description = _(b'compression level') |
|
503 | description = _(b'compression level') | |
510 |
|
504 | |||
511 | upgrademessage = _(b'revlog content will be recompressed') |
|
505 | upgrademessage = _(b'revlog content will be recompressed') | |
512 |
|
506 | |||
513 | @classmethod |
|
507 | @classmethod | |
514 | def fromrepo(cls, repo): |
|
508 | def fromrepo(cls, repo): | |
515 | comp = compressionengine.fromrepo(repo) |
|
509 | comp = compressionengine.fromrepo(repo) | |
516 | level = None |
|
510 | level = None | |
517 | if comp == b'zlib': |
|
511 | if comp == b'zlib': | |
518 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
512 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
519 | elif comp == b'zstd': |
|
513 | elif comp == b'zstd': | |
520 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
514 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
521 | if level is None: |
|
515 | if level is None: | |
522 | return b'default' |
|
516 | return b'default' | |
523 | return bytes(level) |
|
517 | return bytes(level) | |
524 |
|
518 | |||
525 | @classmethod |
|
519 | @classmethod | |
526 | def fromconfig(cls, repo): |
|
520 | def fromconfig(cls, repo): | |
527 | comp = compressionengine.fromconfig(repo) |
|
521 | comp = compressionengine.fromconfig(repo) | |
528 | level = None |
|
522 | level = None | |
529 | if comp == b'zlib': |
|
523 | if comp == b'zlib': | |
530 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
524 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
531 | elif comp == b'zstd': |
|
525 | elif comp == b'zstd': | |
532 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
526 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
533 | if level is None: |
|
527 | if level is None: | |
534 | return b'default' |
|
528 | return b'default' | |
535 | return bytes(level) |
|
529 | return bytes(level) | |
536 |
|
530 | |||
537 |
|
531 | |||
538 | def finddeficiencies(repo): |
|
532 | def finddeficiencies(repo): | |
539 | """returns a list of deficiencies that the repo suffer from""" |
|
533 | """returns a list of deficiencies that the repo suffer from""" | |
540 | deficiencies = [] |
|
534 | deficiencies = [] | |
541 |
|
535 | |||
542 | # We could detect lack of revlogv1 and store here, but they were added |
|
536 | # We could detect lack of revlogv1 and store here, but they were added | |
543 | # in 0.9.2 and we don't support upgrading repos without these |
|
537 | # in 0.9.2 and we don't support upgrading repos without these | |
544 | # requirements, so let's not bother. |
|
538 | # requirements, so let's not bother. | |
545 |
|
539 | |||
546 | for fv in allformatvariant: |
|
540 | for fv in allformatvariant: | |
547 | if not fv.fromrepo(repo): |
|
541 | if not fv.fromrepo(repo): | |
548 | deficiencies.append(fv) |
|
542 | deficiencies.append(fv) | |
549 |
|
543 | |||
550 | return deficiencies |
|
544 | return deficiencies | |
551 |
|
545 | |||
552 |
|
546 | |||
553 | # search without '-' to support older form on newer client. |
|
547 | # search without '-' to support older form on newer client. | |
554 | # |
|
548 | # | |
555 | # We don't enforce backward compatibility for debug command so this |
|
549 | # We don't enforce backward compatibility for debug command so this | |
556 | # might eventually be dropped. However, having to use two different |
|
550 | # might eventually be dropped. However, having to use two different | |
557 | # forms in script when comparing result is anoying enough to add |
|
551 | # forms in script when comparing result is anoying enough to add | |
558 | # backward compatibility for a while. |
|
552 | # backward compatibility for a while. | |
559 | legacy_opts_map = { |
|
553 | legacy_opts_map = { | |
560 | b'redeltaparent': b're-delta-parent', |
|
554 | b'redeltaparent': b're-delta-parent', | |
561 | b'redeltamultibase': b're-delta-multibase', |
|
555 | b'redeltamultibase': b're-delta-multibase', | |
562 | b'redeltaall': b're-delta-all', |
|
556 | b'redeltaall': b're-delta-all', | |
563 | b'redeltafulladd': b're-delta-fulladd', |
|
557 | b'redeltafulladd': b're-delta-fulladd', | |
564 | } |
|
558 | } | |
565 |
|
559 | |||
566 | ALL_OPTIMISATIONS = [] |
|
560 | ALL_OPTIMISATIONS = [] | |
567 |
|
561 | |||
568 |
|
562 | |||
569 | def register_optimization(obj): |
|
563 | def register_optimization(obj): | |
570 | ALL_OPTIMISATIONS.append(obj) |
|
564 | ALL_OPTIMISATIONS.append(obj) | |
571 | return obj |
|
565 | return obj | |
572 |
|
566 | |||
573 |
|
567 | |||
574 | register_optimization( |
|
568 | register_optimization( | |
575 | improvement( |
|
569 | improvement( | |
576 | name=b're-delta-parent', |
|
570 | name=b're-delta-parent', | |
577 | type=OPTIMISATION, |
|
571 | type=OPTIMISATION, | |
578 | description=_( |
|
572 | description=_( | |
579 | b'deltas within internal storage will be recalculated to ' |
|
573 | b'deltas within internal storage will be recalculated to ' | |
580 | b'choose an optimal base revision where this was not ' |
|
574 | b'choose an optimal base revision where this was not ' | |
581 | b'already done; the size of the repository may shrink and ' |
|
575 | b'already done; the size of the repository may shrink and ' | |
582 | b'various operations may become faster; the first time ' |
|
576 | b'various operations may become faster; the first time ' | |
583 | b'this optimization is performed could slow down upgrade ' |
|
577 | b'this optimization is performed could slow down upgrade ' | |
584 | b'execution considerably; subsequent invocations should ' |
|
578 | b'execution considerably; subsequent invocations should ' | |
585 | b'not run noticeably slower' |
|
579 | b'not run noticeably slower' | |
586 | ), |
|
580 | ), | |
587 | upgrademessage=_( |
|
581 | upgrademessage=_( | |
588 | b'deltas within internal storage will choose a new ' |
|
582 | b'deltas within internal storage will choose a new ' | |
589 | b'base revision if needed' |
|
583 | b'base revision if needed' | |
590 | ), |
|
584 | ), | |
591 | ) |
|
585 | ) | |
592 | ) |
|
586 | ) | |
593 |
|
587 | |||
594 | register_optimization( |
|
588 | register_optimization( | |
595 | improvement( |
|
589 | improvement( | |
596 | name=b're-delta-multibase', |
|
590 | name=b're-delta-multibase', | |
597 | type=OPTIMISATION, |
|
591 | type=OPTIMISATION, | |
598 | description=_( |
|
592 | description=_( | |
599 | b'deltas within internal storage will be recalculated ' |
|
593 | b'deltas within internal storage will be recalculated ' | |
600 | b'against multiple base revision and the smallest ' |
|
594 | b'against multiple base revision and the smallest ' | |
601 | b'difference will be used; the size of the repository may ' |
|
595 | b'difference will be used; the size of the repository may ' | |
602 | b'shrink significantly when there are many merges; this ' |
|
596 | b'shrink significantly when there are many merges; this ' | |
603 | b'optimization will slow down execution in proportion to ' |
|
597 | b'optimization will slow down execution in proportion to ' | |
604 | b'the number of merges in the repository and the amount ' |
|
598 | b'the number of merges in the repository and the amount ' | |
605 | b'of files in the repository; this slow down should not ' |
|
599 | b'of files in the repository; this slow down should not ' | |
606 | b'be significant unless there are tens of thousands of ' |
|
600 | b'be significant unless there are tens of thousands of ' | |
607 | b'files and thousands of merges' |
|
601 | b'files and thousands of merges' | |
608 | ), |
|
602 | ), | |
609 | upgrademessage=_( |
|
603 | upgrademessage=_( | |
610 | b'deltas within internal storage will choose an ' |
|
604 | b'deltas within internal storage will choose an ' | |
611 | b'optimal delta by computing deltas against multiple ' |
|
605 | b'optimal delta by computing deltas against multiple ' | |
612 | b'parents; may slow down execution time ' |
|
606 | b'parents; may slow down execution time ' | |
613 | b'significantly' |
|
607 | b'significantly' | |
614 | ), |
|
608 | ), | |
615 | ) |
|
609 | ) | |
616 | ) |
|
610 | ) | |
617 |
|
611 | |||
618 | register_optimization( |
|
612 | register_optimization( | |
619 | improvement( |
|
613 | improvement( | |
620 | name=b're-delta-all', |
|
614 | name=b're-delta-all', | |
621 | type=OPTIMISATION, |
|
615 | type=OPTIMISATION, | |
622 | description=_( |
|
616 | description=_( | |
623 | b'deltas within internal storage will always be ' |
|
617 | b'deltas within internal storage will always be ' | |
624 | b'recalculated without reusing prior deltas; this will ' |
|
618 | b'recalculated without reusing prior deltas; this will ' | |
625 | b'likely make execution run several times slower; this ' |
|
619 | b'likely make execution run several times slower; this ' | |
626 | b'optimization is typically not needed' |
|
620 | b'optimization is typically not needed' | |
627 | ), |
|
621 | ), | |
628 | upgrademessage=_( |
|
622 | upgrademessage=_( | |
629 | b'deltas within internal storage will be fully ' |
|
623 | b'deltas within internal storage will be fully ' | |
630 | b'recomputed; this will likely drastically slow down ' |
|
624 | b'recomputed; this will likely drastically slow down ' | |
631 | b'execution time' |
|
625 | b'execution time' | |
632 | ), |
|
626 | ), | |
633 | ) |
|
627 | ) | |
634 | ) |
|
628 | ) | |
635 |
|
629 | |||
636 | register_optimization( |
|
630 | register_optimization( | |
637 | improvement( |
|
631 | improvement( | |
638 | name=b're-delta-fulladd', |
|
632 | name=b're-delta-fulladd', | |
639 | type=OPTIMISATION, |
|
633 | type=OPTIMISATION, | |
640 | description=_( |
|
634 | description=_( | |
641 | b'every revision will be re-added as if it was new ' |
|
635 | b'every revision will be re-added as if it was new ' | |
642 | b'content. It will go through the full storage ' |
|
636 | b'content. It will go through the full storage ' | |
643 | b'mechanism giving extensions a chance to process it ' |
|
637 | b'mechanism giving extensions a chance to process it ' | |
644 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
638 | b'(eg. lfs). This is similar to "re-delta-all" but even ' | |
645 | b'slower since more logic is involved.' |
|
639 | b'slower since more logic is involved.' | |
646 | ), |
|
640 | ), | |
647 | upgrademessage=_( |
|
641 | upgrademessage=_( | |
648 | b'each revision will be added as new content to the ' |
|
642 | b'each revision will be added as new content to the ' | |
649 | b'internal storage; this will likely drastically slow ' |
|
643 | b'internal storage; this will likely drastically slow ' | |
650 | b'down execution time, but some extensions might need ' |
|
644 | b'down execution time, but some extensions might need ' | |
651 | b'it' |
|
645 | b'it' | |
652 | ), |
|
646 | ), | |
653 | ) |
|
647 | ) | |
654 | ) |
|
648 | ) | |
655 |
|
649 | |||
656 |
|
650 | |||
657 | def findoptimizations(repo): |
|
651 | def findoptimizations(repo): | |
658 | """Determine optimisation that could be used during upgrade""" |
|
652 | """Determine optimisation that could be used during upgrade""" | |
659 | # These are unconditionally added. There is logic later that figures out |
|
653 | # These are unconditionally added. There is logic later that figures out | |
660 | # which ones to apply. |
|
654 | # which ones to apply. | |
661 | return list(ALL_OPTIMISATIONS) |
|
655 | return list(ALL_OPTIMISATIONS) | |
662 |
|
656 | |||
663 |
|
657 | |||
664 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
658 | def determineactions(repo, deficiencies, sourcereqs, destreqs): | |
665 | """Determine upgrade actions that will be performed. |
|
659 | """Determine upgrade actions that will be performed. | |
666 |
|
660 | |||
667 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
661 | Given a list of improvements as returned by ``finddeficiencies`` and | |
668 | ``findoptimizations``, determine the list of upgrade actions that |
|
662 | ``findoptimizations``, determine the list of upgrade actions that | |
669 | will be performed. |
|
663 | will be performed. | |
670 |
|
664 | |||
671 | The role of this function is to filter improvements if needed, apply |
|
665 | The role of this function is to filter improvements if needed, apply | |
672 | recommended optimizations from the improvements list that make sense, |
|
666 | recommended optimizations from the improvements list that make sense, | |
673 | etc. |
|
667 | etc. | |
674 |
|
668 | |||
675 | Returns a list of action names. |
|
669 | Returns a list of action names. | |
676 | """ |
|
670 | """ | |
677 | newactions = [] |
|
671 | newactions = [] | |
678 |
|
672 | |||
679 | for d in deficiencies: |
|
673 | for d in deficiencies: | |
680 | name = d._requirement |
|
674 | name = d._requirement | |
681 |
|
675 | |||
682 | # If the action is a requirement that doesn't show up in the |
|
676 | # If the action is a requirement that doesn't show up in the | |
683 | # destination requirements, prune the action. |
|
677 | # destination requirements, prune the action. | |
684 | if name is not None and name not in destreqs: |
|
678 | if name is not None and name not in destreqs: | |
685 | continue |
|
679 | continue | |
686 |
|
680 | |||
687 | newactions.append(d) |
|
681 | newactions.append(d) | |
688 |
|
682 | |||
689 | # FUTURE consider adding some optimizations here for certain transitions. |
|
683 | # FUTURE consider adding some optimizations here for certain transitions. | |
690 | # e.g. adding generaldelta could schedule parent redeltas. |
|
684 | # e.g. adding generaldelta could schedule parent redeltas. | |
691 |
|
685 | |||
692 | return newactions |
|
686 | return newactions | |
693 |
|
687 | |||
694 |
|
688 | |||
695 | def _revlogfrompath(repo, path): |
|
|||
696 | """Obtain a revlog from a repo path. |
|
|||
697 |
|
||||
698 | An instance of the appropriate class is returned. |
|
|||
699 | """ |
|
|||
700 | if path == b'00changelog.i': |
|
|||
701 | return changelog.changelog(repo.svfs) |
|
|||
702 | elif path.endswith(b'00manifest.i'): |
|
|||
703 | mandir = path[: -len(b'00manifest.i')] |
|
|||
704 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
|||
705 | else: |
|
|||
706 | # reverse of "/".join(("data", path + ".i")) |
|
|||
707 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
|||
708 |
|
||||
709 |
|
||||
710 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
|||
711 | """copy all relevant files for `oldrl` into `destrepo` store |
|
|||
712 |
|
||||
713 | Files are copied "as is" without any transformation. The copy is performed |
|
|||
714 | without extra checks. Callers are responsible for making sure the copied |
|
|||
715 | content is compatible with format of the destination repository. |
|
|||
716 | """ |
|
|||
717 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
|||
718 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
|||
719 | newrl = getattr(newrl, '_revlog', newrl) |
|
|||
720 |
|
||||
721 | oldvfs = oldrl.opener |
|
|||
722 | newvfs = newrl.opener |
|
|||
723 | oldindex = oldvfs.join(oldrl.indexfile) |
|
|||
724 | newindex = newvfs.join(newrl.indexfile) |
|
|||
725 | olddata = oldvfs.join(oldrl.datafile) |
|
|||
726 | newdata = newvfs.join(newrl.datafile) |
|
|||
727 |
|
||||
728 | with newvfs(newrl.indexfile, b'w'): |
|
|||
729 | pass # create all the directories |
|
|||
730 |
|
||||
731 | util.copyfile(oldindex, newindex) |
|
|||
732 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
|||
733 | if copydata: |
|
|||
734 | util.copyfile(olddata, newdata) |
|
|||
735 |
|
||||
736 | if not ( |
|
|||
737 | unencodedname.endswith(b'00changelog.i') |
|
|||
738 | or unencodedname.endswith(b'00manifest.i') |
|
|||
739 | ): |
|
|||
740 | destrepo.svfs.fncache.add(unencodedname) |
|
|||
741 | if copydata: |
|
|||
742 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
|||
743 |
|
||||
744 |
|
||||
745 | UPGRADE_CHANGELOG = b"changelog" |
|
|||
746 | UPGRADE_MANIFEST = b"manifest" |
|
|||
747 | UPGRADE_FILELOGS = b"all-filelogs" |
|
|||
748 |
|
||||
749 | UPGRADE_ALL_REVLOGS = frozenset( |
|
|||
750 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
|||
751 | ) |
|
|||
752 |
|
||||
753 |
|
||||
754 | def getsidedatacompanion(srcrepo, dstrepo): |
|
|||
755 | sidedatacompanion = None |
|
|||
756 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
|||
757 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
|||
758 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
|||
759 |
|
||||
760 | def sidedatacompanion(rl, rev): |
|
|||
761 | rl = getattr(rl, '_revlog', rl) |
|
|||
762 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
|||
763 | return True, (), {}, 0, 0 |
|
|||
764 | return False, (), {}, 0, 0 |
|
|||
765 |
|
||||
766 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
|||
767 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
|||
768 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
|||
769 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
|||
770 | return sidedatacompanion |
|
|||
771 |
|
||||
772 |
|
||||
773 | def matchrevlog(revlogfilter, entry): |
|
|||
774 | """check if a revlog is selected for cloning. |
|
|||
775 |
|
||||
776 | In other words, are there any updates which need to be done on revlog |
|
|||
777 | or it can be blindly copied. |
|
|||
778 |
|
||||
779 | The store entry is checked against the passed filter""" |
|
|||
780 | if entry.endswith(b'00changelog.i'): |
|
|||
781 | return UPGRADE_CHANGELOG in revlogfilter |
|
|||
782 | elif entry.endswith(b'00manifest.i'): |
|
|||
783 | return UPGRADE_MANIFEST in revlogfilter |
|
|||
784 | return UPGRADE_FILELOGS in revlogfilter |
|
|||
785 |
|
||||
786 |
|
||||
787 | def _clonerevlogs( |
|
|||
788 | ui, |
|
|||
789 | srcrepo, |
|
|||
790 | dstrepo, |
|
|||
791 | tr, |
|
|||
792 | deltareuse, |
|
|||
793 | forcedeltabothparents, |
|
|||
794 | revlogs=UPGRADE_ALL_REVLOGS, |
|
|||
795 | ): |
|
|||
796 | """Copy revlogs between 2 repos.""" |
|
|||
797 | revcount = 0 |
|
|||
798 | srcsize = 0 |
|
|||
799 | srcrawsize = 0 |
|
|||
800 | dstsize = 0 |
|
|||
801 | fcount = 0 |
|
|||
802 | frevcount = 0 |
|
|||
803 | fsrcsize = 0 |
|
|||
804 | frawsize = 0 |
|
|||
805 | fdstsize = 0 |
|
|||
806 | mcount = 0 |
|
|||
807 | mrevcount = 0 |
|
|||
808 | msrcsize = 0 |
|
|||
809 | mrawsize = 0 |
|
|||
810 | mdstsize = 0 |
|
|||
811 | crevcount = 0 |
|
|||
812 | csrcsize = 0 |
|
|||
813 | crawsize = 0 |
|
|||
814 | cdstsize = 0 |
|
|||
815 |
|
||||
816 | alldatafiles = list(srcrepo.store.walk()) |
|
|||
817 |
|
||||
818 | # Perform a pass to collect metadata. This validates we can open all |
|
|||
819 | # source files and allows a unified progress bar to be displayed. |
|
|||
820 | for unencoded, encoded, size in alldatafiles: |
|
|||
821 | if unencoded.endswith(b'.d'): |
|
|||
822 | continue |
|
|||
823 |
|
||||
824 | rl = _revlogfrompath(srcrepo, unencoded) |
|
|||
825 |
|
||||
826 | info = rl.storageinfo( |
|
|||
827 | exclusivefiles=True, |
|
|||
828 | revisionscount=True, |
|
|||
829 | trackedsize=True, |
|
|||
830 | storedsize=True, |
|
|||
831 | ) |
|
|||
832 |
|
||||
833 | revcount += info[b'revisionscount'] or 0 |
|
|||
834 | datasize = info[b'storedsize'] or 0 |
|
|||
835 | rawsize = info[b'trackedsize'] or 0 |
|
|||
836 |
|
||||
837 | srcsize += datasize |
|
|||
838 | srcrawsize += rawsize |
|
|||
839 |
|
||||
840 | # This is for the separate progress bars. |
|
|||
841 | if isinstance(rl, changelog.changelog): |
|
|||
842 | crevcount += len(rl) |
|
|||
843 | csrcsize += datasize |
|
|||
844 | crawsize += rawsize |
|
|||
845 | elif isinstance(rl, manifest.manifestrevlog): |
|
|||
846 | mcount += 1 |
|
|||
847 | mrevcount += len(rl) |
|
|||
848 | msrcsize += datasize |
|
|||
849 | mrawsize += rawsize |
|
|||
850 | elif isinstance(rl, filelog.filelog): |
|
|||
851 | fcount += 1 |
|
|||
852 | frevcount += len(rl) |
|
|||
853 | fsrcsize += datasize |
|
|||
854 | frawsize += rawsize |
|
|||
855 | else: |
|
|||
856 | error.ProgrammingError(b'unknown revlog type') |
|
|||
857 |
|
||||
858 | if not revcount: |
|
|||
859 | return |
|
|||
860 |
|
||||
861 | ui.status( |
|
|||
862 | _( |
|
|||
863 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
|||
864 | b'%d in changelog)\n' |
|
|||
865 | ) |
|
|||
866 | % (revcount, frevcount, mrevcount, crevcount) |
|
|||
867 | ) |
|
|||
868 | ui.status( |
|
|||
869 | _(b'migrating %s in store; %s tracked data\n') |
|
|||
870 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
|||
871 | ) |
|
|||
872 |
|
||||
873 | # Used to keep track of progress. |
|
|||
874 | progress = None |
|
|||
875 |
|
||||
876 | def oncopiedrevision(rl, rev, node): |
|
|||
877 | progress.increment() |
|
|||
878 |
|
||||
879 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
|||
880 |
|
||||
881 | # Do the actual copying. |
|
|||
882 | # FUTURE this operation can be farmed off to worker processes. |
|
|||
883 | seen = set() |
|
|||
884 | for unencoded, encoded, size in alldatafiles: |
|
|||
885 | if unencoded.endswith(b'.d'): |
|
|||
886 | continue |
|
|||
887 |
|
||||
888 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
|||
889 |
|
||||
890 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: |
|
|||
891 | ui.status( |
|
|||
892 | _( |
|
|||
893 | b'finished migrating %d manifest revisions across %d ' |
|
|||
894 | b'manifests; change in size: %s\n' |
|
|||
895 | ) |
|
|||
896 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
|||
897 | ) |
|
|||
898 |
|
||||
899 | ui.status( |
|
|||
900 | _( |
|
|||
901 | b'migrating changelog containing %d revisions ' |
|
|||
902 | b'(%s in store; %s tracked data)\n' |
|
|||
903 | ) |
|
|||
904 | % ( |
|
|||
905 | crevcount, |
|
|||
906 | util.bytecount(csrcsize), |
|
|||
907 | util.bytecount(crawsize), |
|
|||
908 | ) |
|
|||
909 | ) |
|
|||
910 | seen.add(b'c') |
|
|||
911 | progress = srcrepo.ui.makeprogress( |
|
|||
912 | _(b'changelog revisions'), total=crevcount |
|
|||
913 | ) |
|
|||
914 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: |
|
|||
915 | ui.status( |
|
|||
916 | _( |
|
|||
917 | b'finished migrating %d filelog revisions across %d ' |
|
|||
918 | b'filelogs; change in size: %s\n' |
|
|||
919 | ) |
|
|||
920 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
|||
921 | ) |
|
|||
922 |
|
||||
923 | ui.status( |
|
|||
924 | _( |
|
|||
925 | b'migrating %d manifests containing %d revisions ' |
|
|||
926 | b'(%s in store; %s tracked data)\n' |
|
|||
927 | ) |
|
|||
928 | % ( |
|
|||
929 | mcount, |
|
|||
930 | mrevcount, |
|
|||
931 | util.bytecount(msrcsize), |
|
|||
932 | util.bytecount(mrawsize), |
|
|||
933 | ) |
|
|||
934 | ) |
|
|||
935 | seen.add(b'm') |
|
|||
936 | if progress: |
|
|||
937 | progress.complete() |
|
|||
938 | progress = srcrepo.ui.makeprogress( |
|
|||
939 | _(b'manifest revisions'), total=mrevcount |
|
|||
940 | ) |
|
|||
941 | elif b'f' not in seen: |
|
|||
942 | ui.status( |
|
|||
943 | _( |
|
|||
944 | b'migrating %d filelogs containing %d revisions ' |
|
|||
945 | b'(%s in store; %s tracked data)\n' |
|
|||
946 | ) |
|
|||
947 | % ( |
|
|||
948 | fcount, |
|
|||
949 | frevcount, |
|
|||
950 | util.bytecount(fsrcsize), |
|
|||
951 | util.bytecount(frawsize), |
|
|||
952 | ) |
|
|||
953 | ) |
|
|||
954 | seen.add(b'f') |
|
|||
955 | if progress: |
|
|||
956 | progress.complete() |
|
|||
957 | progress = srcrepo.ui.makeprogress( |
|
|||
958 | _(b'file revisions'), total=frevcount |
|
|||
959 | ) |
|
|||
960 |
|
||||
961 | if matchrevlog(revlogs, unencoded): |
|
|||
962 | ui.note( |
|
|||
963 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
|||
964 | ) |
|
|||
965 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
|||
966 | oldrl.clone( |
|
|||
967 | tr, |
|
|||
968 | newrl, |
|
|||
969 | addrevisioncb=oncopiedrevision, |
|
|||
970 | deltareuse=deltareuse, |
|
|||
971 | forcedeltabothparents=forcedeltabothparents, |
|
|||
972 | sidedatacompanion=sidedatacompanion, |
|
|||
973 | ) |
|
|||
974 | else: |
|
|||
975 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
|||
976 | ui.note(msg % (unencoded, len(oldrl))) |
|
|||
977 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
|||
978 |
|
||||
979 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
|||
980 |
|
||||
981 | info = newrl.storageinfo(storedsize=True) |
|
|||
982 | datasize = info[b'storedsize'] or 0 |
|
|||
983 |
|
||||
984 | dstsize += datasize |
|
|||
985 |
|
||||
986 | if isinstance(newrl, changelog.changelog): |
|
|||
987 | cdstsize += datasize |
|
|||
988 | elif isinstance(newrl, manifest.manifestrevlog): |
|
|||
989 | mdstsize += datasize |
|
|||
990 | else: |
|
|||
991 | fdstsize += datasize |
|
|||
992 |
|
||||
993 | progress.complete() |
|
|||
994 |
|
||||
995 | ui.status( |
|
|||
996 | _( |
|
|||
997 | b'finished migrating %d changelog revisions; change in size: ' |
|
|||
998 | b'%s\n' |
|
|||
999 | ) |
|
|||
1000 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
|||
1001 | ) |
|
|||
1002 |
|
||||
1003 | ui.status( |
|
|||
1004 | _( |
|
|||
1005 | b'finished migrating %d total revisions; total change in store ' |
|
|||
1006 | b'size: %s\n' |
|
|||
1007 | ) |
|
|||
1008 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
|||
1009 | ) |
|
|||
1010 |
|
||||
1011 |
|
||||
1012 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
|||
1013 | """Determine whether to copy a store file during upgrade. |
|
|||
1014 |
|
||||
1015 | This function is called when migrating store files from ``srcrepo`` to |
|
|||
1016 | ``dstrepo`` as part of upgrading a repository. |
|
|||
1017 |
|
||||
1018 | Args: |
|
|||
1019 | srcrepo: repo we are copying from |
|
|||
1020 | dstrepo: repo we are copying to |
|
|||
1021 | requirements: set of requirements for ``dstrepo`` |
|
|||
1022 | path: store file being examined |
|
|||
1023 | mode: the ``ST_MODE`` file type of ``path`` |
|
|||
1024 | st: ``stat`` data structure for ``path`` |
|
|||
1025 |
|
||||
1026 | Function should return ``True`` if the file is to be copied. |
|
|||
1027 | """ |
|
|||
1028 | # Skip revlogs. |
|
|||
1029 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
|||
1030 | return False |
|
|||
1031 | # Skip transaction related files. |
|
|||
1032 | if path.startswith(b'undo'): |
|
|||
1033 | return False |
|
|||
1034 | # Only copy regular files. |
|
|||
1035 | if mode != stat.S_IFREG: |
|
|||
1036 | return False |
|
|||
1037 | # Skip other skipped files. |
|
|||
1038 | if path in (b'lock', b'fncache'): |
|
|||
1039 | return False |
|
|||
1040 |
|
||||
1041 | return True |
|
|||
1042 |
|
||||
1043 |
|
||||
1044 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
|||
1045 | """Hook point for extensions to perform additional actions during upgrade. |
|
|||
1046 |
|
||||
1047 | This function is called after revlogs and store files have been copied but |
|
|||
1048 | before the new store is swapped into the original location. |
|
|||
1049 | """ |
|
|||
1050 |
|
||||
1051 |
|
||||
1052 | def _upgraderepo( |
|
|||
1053 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
|||
1054 | ): |
|
|||
1055 | """Do the low-level work of upgrading a repository. |
|
|||
1056 |
|
||||
1057 | The upgrade is effectively performed as a copy between a source |
|
|||
1058 | repository and a temporary destination repository. |
|
|||
1059 |
|
||||
1060 | The source repository is unmodified for as long as possible so the |
|
|||
1061 | upgrade can abort at any time without causing loss of service for |
|
|||
1062 | readers and without corrupting the source repository. |
|
|||
1063 | """ |
|
|||
1064 | assert srcrepo.currentwlock() |
|
|||
1065 | assert dstrepo.currentwlock() |
|
|||
1066 |
|
||||
1067 | ui.status( |
|
|||
1068 | _( |
|
|||
1069 | b'(it is safe to interrupt this process any time before ' |
|
|||
1070 | b'data migration completes)\n' |
|
|||
1071 | ) |
|
|||
1072 | ) |
|
|||
1073 |
|
||||
1074 | if b're-delta-all' in actions: |
|
|||
1075 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
|||
1076 | elif b're-delta-parent' in actions: |
|
|||
1077 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
|||
1078 | elif b're-delta-multibase' in actions: |
|
|||
1079 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
|||
1080 | elif b're-delta-fulladd' in actions: |
|
|||
1081 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
|||
1082 | else: |
|
|||
1083 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
|||
1084 |
|
||||
1085 | with dstrepo.transaction(b'upgrade') as tr: |
|
|||
1086 | _clonerevlogs( |
|
|||
1087 | ui, |
|
|||
1088 | srcrepo, |
|
|||
1089 | dstrepo, |
|
|||
1090 | tr, |
|
|||
1091 | deltareuse, |
|
|||
1092 | b're-delta-multibase' in actions, |
|
|||
1093 | revlogs=revlogs, |
|
|||
1094 | ) |
|
|||
1095 |
|
||||
1096 | # Now copy other files in the store directory. |
|
|||
1097 | # The sorted() makes execution deterministic. |
|
|||
1098 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
|||
1099 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): |
|
|||
1100 | continue |
|
|||
1101 |
|
||||
1102 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
|||
1103 | src = srcrepo.store.rawvfs.join(p) |
|
|||
1104 | dst = dstrepo.store.rawvfs.join(p) |
|
|||
1105 | util.copyfile(src, dst, copystat=True) |
|
|||
1106 |
|
||||
1107 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
|||
1108 |
|
||||
1109 | ui.status(_(b'data fully migrated to temporary repository\n')) |
|
|||
1110 |
|
||||
1111 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) |
|
|||
1112 | backupvfs = vfsmod.vfs(backuppath) |
|
|||
1113 |
|
||||
1114 | # Make a backup of requires file first, as it is the first to be modified. |
|
|||
1115 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) |
|
|||
1116 |
|
||||
1117 | # We install an arbitrary requirement that clients must not support |
|
|||
1118 | # as a mechanism to lock out new clients during the data swap. This is |
|
|||
1119 | # better than allowing a client to continue while the repository is in |
|
|||
1120 | # an inconsistent state. |
|
|||
1121 | ui.status( |
|
|||
1122 | _( |
|
|||
1123 | b'marking source repository as being upgraded; clients will be ' |
|
|||
1124 | b'unable to read from repository\n' |
|
|||
1125 | ) |
|
|||
1126 | ) |
|
|||
1127 | scmutil.writereporequirements( |
|
|||
1128 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
|||
1129 | ) |
|
|||
1130 |
|
||||
1131 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
|||
1132 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
|||
1133 |
|
||||
1134 | # Now swap in the new store directory. Doing it as a rename should make |
|
|||
1135 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
|||
1136 | # environments). |
|
|||
1137 | ui.status(_(b'replacing store...\n')) |
|
|||
1138 | tstart = util.timer() |
|
|||
1139 | util.rename(srcrepo.spath, backupvfs.join(b'store')) |
|
|||
1140 | util.rename(dstrepo.spath, srcrepo.spath) |
|
|||
1141 | elapsed = util.timer() - tstart |
|
|||
1142 | ui.status( |
|
|||
1143 | _( |
|
|||
1144 | b'store replacement complete; repository was inconsistent for ' |
|
|||
1145 | b'%0.1fs\n' |
|
|||
1146 | ) |
|
|||
1147 | % elapsed |
|
|||
1148 | ) |
|
|||
1149 |
|
||||
1150 | # We first write the requirements file. Any new requirements will lock |
|
|||
1151 | # out legacy clients. |
|
|||
1152 | ui.status( |
|
|||
1153 | _( |
|
|||
1154 | b'finalizing requirements file and making repository readable ' |
|
|||
1155 | b'again\n' |
|
|||
1156 | ) |
|
|||
1157 | ) |
|
|||
1158 | scmutil.writereporequirements(srcrepo, requirements) |
|
|||
1159 |
|
||||
1160 | # The lock file from the old store won't be removed because nothing has a |
|
|||
1161 | # reference to its new location. So clean it up manually. Alternatively, we |
|
|||
1162 | # could update srcrepo.svfs and other variables to point to the new |
|
|||
1163 | # location. This is simpler. |
|
|||
1164 | backupvfs.unlink(b'store/lock') |
|
|||
1165 |
|
||||
1166 | return backuppath |
|
|||
1167 |
|
||||
1168 |
|
||||
1169 | def upgraderepo( |
|
689 | def upgraderepo( | |
1170 | ui, |
|
690 | ui, | |
1171 | repo, |
|
691 | repo, | |
1172 | run=False, |
|
692 | run=False, | |
1173 | optimize=None, |
|
693 | optimize=None, | |
1174 | backup=True, |
|
694 | backup=True, | |
1175 | manifest=None, |
|
695 | manifest=None, | |
1176 | changelog=None, |
|
696 | changelog=None, | |
1177 | filelogs=None, |
|
697 | filelogs=None, | |
1178 | ): |
|
698 | ): | |
1179 | """Upgrade a repository in place.""" |
|
699 | """Upgrade a repository in place.""" | |
1180 | if optimize is None: |
|
700 | if optimize is None: | |
1181 | optimize = [] |
|
701 | optimize = [] | |
1182 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
702 | optimize = {legacy_opts_map.get(o, o) for o in optimize} | |
1183 | repo = repo.unfiltered() |
|
703 | repo = repo.unfiltered() | |
1184 |
|
704 | |||
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) |
|
705 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) | |
1186 | specentries = ( |
|
706 | specentries = ( | |
1187 | (UPGRADE_CHANGELOG, changelog), |
|
707 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), | |
1188 | (UPGRADE_MANIFEST, manifest), |
|
708 | (upgrade_engine.UPGRADE_MANIFEST, manifest), | |
1189 | (UPGRADE_FILELOGS, filelogs), |
|
709 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), | |
1190 | ) |
|
710 | ) | |
1191 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
711 | specified = [(y, x) for (y, x) in specentries if x is not None] | |
1192 | if specified: |
|
712 | if specified: | |
1193 | # we have some limitation on revlogs to be recloned |
|
713 | # we have some limitation on revlogs to be recloned | |
1194 | if any(x for y, x in specified): |
|
714 | if any(x for y, x in specified): | |
1195 | revlogs = set() |
|
715 | revlogs = set() | |
1196 | for upgrade, enabled in specified: |
|
716 | for upgrade, enabled in specified: | |
1197 | if enabled: |
|
717 | if enabled: | |
1198 | revlogs.add(upgrade) |
|
718 | revlogs.add(upgrade) | |
1199 | else: |
|
719 | else: | |
1200 | # none are enabled |
|
720 | # none are enabled | |
1201 | for upgrade, __ in specified: |
|
721 | for upgrade, __ in specified: | |
1202 | revlogs.discard(upgrade) |
|
722 | revlogs.discard(upgrade) | |
1203 |
|
723 | |||
1204 | # Ensure the repository can be upgraded. |
|
724 | # Ensure the repository can be upgraded. | |
1205 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
725 | missingreqs = requiredsourcerequirements(repo) - repo.requirements | |
1206 | if missingreqs: |
|
726 | if missingreqs: | |
1207 | raise error.Abort( |
|
727 | raise error.Abort( | |
1208 | _(b'cannot upgrade repository; requirement missing: %s') |
|
728 | _(b'cannot upgrade repository; requirement missing: %s') | |
1209 | % _(b', ').join(sorted(missingreqs)) |
|
729 | % _(b', ').join(sorted(missingreqs)) | |
1210 | ) |
|
730 | ) | |
1211 |
|
731 | |||
1212 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
732 | blockedreqs = blocksourcerequirements(repo) & repo.requirements | |
1213 | if blockedreqs: |
|
733 | if blockedreqs: | |
1214 | raise error.Abort( |
|
734 | raise error.Abort( | |
1215 | _( |
|
735 | _( | |
1216 | b'cannot upgrade repository; unsupported source ' |
|
736 | b'cannot upgrade repository; unsupported source ' | |
1217 | b'requirement: %s' |
|
737 | b'requirement: %s' | |
1218 | ) |
|
738 | ) | |
1219 | % _(b', ').join(sorted(blockedreqs)) |
|
739 | % _(b', ').join(sorted(blockedreqs)) | |
1220 | ) |
|
740 | ) | |
1221 |
|
741 | |||
1222 | # FUTURE there is potentially a need to control the wanted requirements via |
|
742 | # FUTURE there is potentially a need to control the wanted requirements via | |
1223 | # command arguments or via an extension hook point. |
|
743 | # command arguments or via an extension hook point. | |
1224 | newreqs = localrepo.newreporequirements( |
|
744 | newreqs = localrepo.newreporequirements( | |
1225 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
745 | repo.ui, localrepo.defaultcreateopts(repo.ui) | |
1226 | ) |
|
746 | ) | |
1227 | newreqs.update(preservedrequirements(repo)) |
|
747 | newreqs.update(preservedrequirements(repo)) | |
1228 |
|
748 | |||
1229 | noremovereqs = ( |
|
749 | noremovereqs = ( | |
1230 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
750 | repo.requirements - newreqs - supportremovedrequirements(repo) | |
1231 | ) |
|
751 | ) | |
1232 | if noremovereqs: |
|
752 | if noremovereqs: | |
1233 | raise error.Abort( |
|
753 | raise error.Abort( | |
1234 | _( |
|
754 | _( | |
1235 | b'cannot upgrade repository; requirement would be ' |
|
755 | b'cannot upgrade repository; requirement would be ' | |
1236 | b'removed: %s' |
|
756 | b'removed: %s' | |
1237 | ) |
|
757 | ) | |
1238 | % _(b', ').join(sorted(noremovereqs)) |
|
758 | % _(b', ').join(sorted(noremovereqs)) | |
1239 | ) |
|
759 | ) | |
1240 |
|
760 | |||
1241 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
761 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) | |
1242 | if noaddreqs: |
|
762 | if noaddreqs: | |
1243 | raise error.Abort( |
|
763 | raise error.Abort( | |
1244 | _( |
|
764 | _( | |
1245 | b'cannot upgrade repository; do not support adding ' |
|
765 | b'cannot upgrade repository; do not support adding ' | |
1246 | b'requirement: %s' |
|
766 | b'requirement: %s' | |
1247 | ) |
|
767 | ) | |
1248 | % _(b', ').join(sorted(noaddreqs)) |
|
768 | % _(b', ').join(sorted(noaddreqs)) | |
1249 | ) |
|
769 | ) | |
1250 |
|
770 | |||
1251 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
771 | unsupportedreqs = newreqs - supporteddestrequirements(repo) | |
1252 | if unsupportedreqs: |
|
772 | if unsupportedreqs: | |
1253 | raise error.Abort( |
|
773 | raise error.Abort( | |
1254 | _( |
|
774 | _( | |
1255 | b'cannot upgrade repository; do not support ' |
|
775 | b'cannot upgrade repository; do not support ' | |
1256 | b'destination requirement: %s' |
|
776 | b'destination requirement: %s' | |
1257 | ) |
|
777 | ) | |
1258 | % _(b', ').join(sorted(unsupportedreqs)) |
|
778 | % _(b', ').join(sorted(unsupportedreqs)) | |
1259 | ) |
|
779 | ) | |
1260 |
|
780 | |||
1261 | # Find and validate all improvements that can be made. |
|
781 | # Find and validate all improvements that can be made. | |
1262 | alloptimizations = findoptimizations(repo) |
|
782 | alloptimizations = findoptimizations(repo) | |
1263 |
|
783 | |||
1264 | # Apply and Validate arguments. |
|
784 | # Apply and Validate arguments. | |
1265 | optimizations = [] |
|
785 | optimizations = [] | |
1266 | for o in alloptimizations: |
|
786 | for o in alloptimizations: | |
1267 | if o.name in optimize: |
|
787 | if o.name in optimize: | |
1268 | optimizations.append(o) |
|
788 | optimizations.append(o) | |
1269 | optimize.discard(o.name) |
|
789 | optimize.discard(o.name) | |
1270 |
|
790 | |||
1271 | if optimize: # anything left is unknown |
|
791 | if optimize: # anything left is unknown | |
1272 | raise error.Abort( |
|
792 | raise error.Abort( | |
1273 | _(b'unknown optimization action requested: %s') |
|
793 | _(b'unknown optimization action requested: %s') | |
1274 | % b', '.join(sorted(optimize)), |
|
794 | % b', '.join(sorted(optimize)), | |
1275 | hint=_(b'run without arguments to see valid optimizations'), |
|
795 | hint=_(b'run without arguments to see valid optimizations'), | |
1276 | ) |
|
796 | ) | |
1277 |
|
797 | |||
1278 | deficiencies = finddeficiencies(repo) |
|
798 | deficiencies = finddeficiencies(repo) | |
1279 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
799 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) | |
1280 | actions.extend( |
|
800 | actions.extend( | |
1281 | o |
|
801 | o | |
1282 | for o in sorted(optimizations) |
|
802 | for o in sorted(optimizations) | |
1283 | # determineactions could have added optimisation |
|
803 | # determineactions could have added optimisation | |
1284 | if o not in actions |
|
804 | if o not in actions | |
1285 | ) |
|
805 | ) | |
1286 |
|
806 | |||
1287 | removedreqs = repo.requirements - newreqs |
|
807 | removedreqs = repo.requirements - newreqs | |
1288 | addedreqs = newreqs - repo.requirements |
|
808 | addedreqs = newreqs - repo.requirements | |
1289 |
|
809 | |||
1290 | if revlogs != UPGRADE_ALL_REVLOGS: |
|
810 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: | |
1291 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
811 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) | |
1292 | if incompatible: |
|
812 | if incompatible: | |
1293 | msg = _( |
|
813 | msg = _( | |
1294 | b'ignoring revlogs selection flags, format requirements ' |
|
814 | b'ignoring revlogs selection flags, format requirements ' | |
1295 | b'change: %s\n' |
|
815 | b'change: %s\n' | |
1296 | ) |
|
816 | ) | |
1297 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
817 | ui.warn(msg % b', '.join(sorted(incompatible))) | |
1298 | revlogs = UPGRADE_ALL_REVLOGS |
|
818 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS | |
1299 |
|
819 | |||
1300 | def write_labeled(l, label): |
|
820 | def write_labeled(l, label): | |
1301 | first = True |
|
821 | first = True | |
1302 | for r in sorted(l): |
|
822 | for r in sorted(l): | |
1303 | if not first: |
|
823 | if not first: | |
1304 | ui.write(b', ') |
|
824 | ui.write(b', ') | |
1305 | ui.write(r, label=label) |
|
825 | ui.write(r, label=label) | |
1306 | first = False |
|
826 | first = False | |
1307 |
|
827 | |||
1308 | def printrequirements(): |
|
828 | def printrequirements(): | |
1309 | ui.write(_(b'requirements\n')) |
|
829 | ui.write(_(b'requirements\n')) | |
1310 | ui.write(_(b' preserved: ')) |
|
830 | ui.write(_(b' preserved: ')) | |
1311 | write_labeled( |
|
831 | write_labeled( | |
1312 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
832 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" | |
1313 | ) |
|
833 | ) | |
1314 | ui.write((b'\n')) |
|
834 | ui.write((b'\n')) | |
1315 | removed = repo.requirements - newreqs |
|
835 | removed = repo.requirements - newreqs | |
1316 | if repo.requirements - newreqs: |
|
836 | if repo.requirements - newreqs: | |
1317 | ui.write(_(b' removed: ')) |
|
837 | ui.write(_(b' removed: ')) | |
1318 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
838 | write_labeled(removed, "upgrade-repo.requirement.removed") | |
1319 | ui.write((b'\n')) |
|
839 | ui.write((b'\n')) | |
1320 | added = newreqs - repo.requirements |
|
840 | added = newreqs - repo.requirements | |
1321 | if added: |
|
841 | if added: | |
1322 | ui.write(_(b' added: ')) |
|
842 | ui.write(_(b' added: ')) | |
1323 | write_labeled(added, "upgrade-repo.requirement.added") |
|
843 | write_labeled(added, "upgrade-repo.requirement.added") | |
1324 | ui.write((b'\n')) |
|
844 | ui.write((b'\n')) | |
1325 | ui.write(b'\n') |
|
845 | ui.write(b'\n') | |
1326 |
|
846 | |||
1327 | def printoptimisations(): |
|
847 | def printoptimisations(): | |
1328 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
848 | optimisations = [a for a in actions if a.type == OPTIMISATION] | |
1329 | optimisations.sort(key=lambda a: a.name) |
|
849 | optimisations.sort(key=lambda a: a.name) | |
1330 | if optimisations: |
|
850 | if optimisations: | |
1331 | ui.write(_(b'optimisations: ')) |
|
851 | ui.write(_(b'optimisations: ')) | |
1332 | write_labeled( |
|
852 | write_labeled( | |
1333 | [a.name for a in optimisations], |
|
853 | [a.name for a in optimisations], | |
1334 | "upgrade-repo.optimisation.performed", |
|
854 | "upgrade-repo.optimisation.performed", | |
1335 | ) |
|
855 | ) | |
1336 | ui.write(b'\n\n') |
|
856 | ui.write(b'\n\n') | |
1337 |
|
857 | |||
1338 | def printupgradeactions(): |
|
858 | def printupgradeactions(): | |
1339 | for a in actions: |
|
859 | for a in actions: | |
1340 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
860 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) | |
1341 |
|
861 | |||
1342 | def print_affected_revlogs(): |
|
862 | def print_affected_revlogs(): | |
1343 | if not revlogs: |
|
863 | if not revlogs: | |
1344 | ui.write((b'no revlogs to process\n')) |
|
864 | ui.write((b'no revlogs to process\n')) | |
1345 | else: |
|
865 | else: | |
1346 | ui.write((b'processed revlogs:\n')) |
|
866 | ui.write((b'processed revlogs:\n')) | |
1347 | for r in sorted(revlogs): |
|
867 | for r in sorted(revlogs): | |
1348 | ui.write((b' - %s\n' % r)) |
|
868 | ui.write((b' - %s\n' % r)) | |
1349 | ui.write((b'\n')) |
|
869 | ui.write((b'\n')) | |
1350 |
|
870 | |||
1351 | if not run: |
|
871 | if not run: | |
1352 | fromconfig = [] |
|
872 | fromconfig = [] | |
1353 | onlydefault = [] |
|
873 | onlydefault = [] | |
1354 |
|
874 | |||
1355 | for d in deficiencies: |
|
875 | for d in deficiencies: | |
1356 | if d.fromconfig(repo): |
|
876 | if d.fromconfig(repo): | |
1357 | fromconfig.append(d) |
|
877 | fromconfig.append(d) | |
1358 | elif d.default: |
|
878 | elif d.default: | |
1359 | onlydefault.append(d) |
|
879 | onlydefault.append(d) | |
1360 |
|
880 | |||
1361 | if fromconfig or onlydefault: |
|
881 | if fromconfig or onlydefault: | |
1362 |
|
882 | |||
1363 | if fromconfig: |
|
883 | if fromconfig: | |
1364 | ui.status( |
|
884 | ui.status( | |
1365 | _( |
|
885 | _( | |
1366 | b'repository lacks features recommended by ' |
|
886 | b'repository lacks features recommended by ' | |
1367 | b'current config options:\n\n' |
|
887 | b'current config options:\n\n' | |
1368 | ) |
|
888 | ) | |
1369 | ) |
|
889 | ) | |
1370 | for i in fromconfig: |
|
890 | for i in fromconfig: | |
1371 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
891 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
1372 |
|
892 | |||
1373 | if onlydefault: |
|
893 | if onlydefault: | |
1374 | ui.status( |
|
894 | ui.status( | |
1375 | _( |
|
895 | _( | |
1376 | b'repository lacks features used by the default ' |
|
896 | b'repository lacks features used by the default ' | |
1377 | b'config options:\n\n' |
|
897 | b'config options:\n\n' | |
1378 | ) |
|
898 | ) | |
1379 | ) |
|
899 | ) | |
1380 | for i in onlydefault: |
|
900 | for i in onlydefault: | |
1381 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
901 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
1382 |
|
902 | |||
1383 | ui.status(b'\n') |
|
903 | ui.status(b'\n') | |
1384 | else: |
|
904 | else: | |
1385 | ui.status( |
|
905 | ui.status( | |
1386 | _( |
|
906 | _( | |
1387 | b'(no feature deficiencies found in existing ' |
|
907 | b'(no feature deficiencies found in existing ' | |
1388 | b'repository)\n' |
|
908 | b'repository)\n' | |
1389 | ) |
|
909 | ) | |
1390 | ) |
|
910 | ) | |
1391 |
|
911 | |||
1392 | ui.status( |
|
912 | ui.status( | |
1393 | _( |
|
913 | _( | |
1394 | b'performing an upgrade with "--run" will make the following ' |
|
914 | b'performing an upgrade with "--run" will make the following ' | |
1395 | b'changes:\n\n' |
|
915 | b'changes:\n\n' | |
1396 | ) |
|
916 | ) | |
1397 | ) |
|
917 | ) | |
1398 |
|
918 | |||
1399 | printrequirements() |
|
919 | printrequirements() | |
1400 | printoptimisations() |
|
920 | printoptimisations() | |
1401 | printupgradeactions() |
|
921 | printupgradeactions() | |
1402 | print_affected_revlogs() |
|
922 | print_affected_revlogs() | |
1403 |
|
923 | |||
1404 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
924 | unusedoptimize = [i for i in alloptimizations if i not in actions] | |
1405 |
|
925 | |||
1406 | if unusedoptimize: |
|
926 | if unusedoptimize: | |
1407 | ui.status( |
|
927 | ui.status( | |
1408 | _( |
|
928 | _( | |
1409 | b'additional optimizations are available by specifying ' |
|
929 | b'additional optimizations are available by specifying ' | |
1410 | b'"--optimize <name>":\n\n' |
|
930 | b'"--optimize <name>":\n\n' | |
1411 | ) |
|
931 | ) | |
1412 | ) |
|
932 | ) | |
1413 | for i in unusedoptimize: |
|
933 | for i in unusedoptimize: | |
1414 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
934 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) | |
1415 | return |
|
935 | return | |
1416 |
|
936 | |||
1417 | # Else we're in the run=true case. |
|
937 | # Else we're in the run=true case. | |
1418 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
938 | ui.write(_(b'upgrade will perform the following actions:\n\n')) | |
1419 | printrequirements() |
|
939 | printrequirements() | |
1420 | printoptimisations() |
|
940 | printoptimisations() | |
1421 | printupgradeactions() |
|
941 | printupgradeactions() | |
1422 | print_affected_revlogs() |
|
942 | print_affected_revlogs() | |
1423 |
|
943 | |||
1424 | upgradeactions = [a.name for a in actions] |
|
944 | upgradeactions = [a.name for a in actions] | |
1425 |
|
945 | |||
1426 | ui.status(_(b'beginning upgrade...\n')) |
|
946 | ui.status(_(b'beginning upgrade...\n')) | |
1427 | with repo.wlock(), repo.lock(): |
|
947 | with repo.wlock(), repo.lock(): | |
1428 | ui.status(_(b'repository locked and read-only\n')) |
|
948 | ui.status(_(b'repository locked and read-only\n')) | |
1429 | # Our strategy for upgrading the repository is to create a new, |
|
949 | # Our strategy for upgrading the repository is to create a new, | |
1430 | # temporary repository, write data to it, then do a swap of the |
|
950 | # temporary repository, write data to it, then do a swap of the | |
1431 | # data. There are less heavyweight ways to do this, but it is easier |
|
951 | # data. There are less heavyweight ways to do this, but it is easier | |
1432 | # to create a new repo object than to instantiate all the components |
|
952 | # to create a new repo object than to instantiate all the components | |
1433 | # (like the store) separately. |
|
953 | # (like the store) separately. | |
1434 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
954 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) | |
1435 | backuppath = None |
|
955 | backuppath = None | |
1436 | try: |
|
956 | try: | |
1437 | ui.status( |
|
957 | ui.status( | |
1438 | _( |
|
958 | _( | |
1439 | b'creating temporary repository to stage migrated ' |
|
959 | b'creating temporary repository to stage migrated ' | |
1440 | b'data: %s\n' |
|
960 | b'data: %s\n' | |
1441 | ) |
|
961 | ) | |
1442 | % tmppath |
|
962 | % tmppath | |
1443 | ) |
|
963 | ) | |
1444 |
|
964 | |||
1445 | # clone ui without using ui.copy because repo.ui is protected |
|
965 | # clone ui without using ui.copy because repo.ui is protected | |
1446 | repoui = repo.ui.__class__(repo.ui) |
|
966 | repoui = repo.ui.__class__(repo.ui) | |
1447 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
967 | dstrepo = hg.repository(repoui, path=tmppath, create=True) | |
1448 |
|
968 | |||
1449 | with dstrepo.wlock(), dstrepo.lock(): |
|
969 | with dstrepo.wlock(), dstrepo.lock(): | |
1450 |
backuppath = |
|
970 | backuppath = upgrade_engine.upgrade( | |
1451 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
971 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs | |
1452 | ) |
|
972 | ) | |
1453 | if not (backup or backuppath is None): |
|
973 | if not (backup or backuppath is None): | |
1454 | ui.status( |
|
974 | ui.status( | |
1455 | _(b'removing old repository content%s\n') % backuppath |
|
975 | _(b'removing old repository content%s\n') % backuppath | |
1456 | ) |
|
976 | ) | |
1457 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
977 | repo.vfs.rmtree(backuppath, forcibly=True) | |
1458 | backuppath = None |
|
978 | backuppath = None | |
1459 |
|
979 | |||
1460 | finally: |
|
980 | finally: | |
1461 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
981 | ui.status(_(b'removing temporary repository %s\n') % tmppath) | |
1462 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
982 | repo.vfs.rmtree(tmppath, forcibly=True) | |
1463 |
|
983 | |||
1464 | if backuppath and not ui.quiet: |
|
984 | if backuppath and not ui.quiet: | |
1465 | ui.warn( |
|
985 | ui.warn( | |
1466 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
986 | _(b'copy of old repository backed up at %s\n') % backuppath | |
1467 | ) |
|
987 | ) | |
1468 | ui.warn( |
|
988 | ui.warn( | |
1469 | _( |
|
989 | _( | |
1470 | b'the old repository will not be deleted; remove ' |
|
990 | b'the old repository will not be deleted; remove ' | |
1471 | b'it to free up disk space once the upgraded ' |
|
991 | b'it to free up disk space once the upgraded ' | |
1472 | b'repository is verified\n' |
|
992 | b'repository is verified\n' | |
1473 | ) |
|
993 | ) | |
1474 | ) |
|
994 | ) | |
1475 |
|
995 | |||
1476 | if sharedsafe.name in addedreqs: |
|
996 | if sharedsafe.name in addedreqs: | |
1477 | ui.warn( |
|
997 | ui.warn( | |
1478 | _( |
|
998 | _( | |
1479 | b'repository upgraded to share safe mode, existing' |
|
999 | b'repository upgraded to share safe mode, existing' | |
1480 | b' shares will still work in old non-safe mode. ' |
|
1000 | b' shares will still work in old non-safe mode. ' | |
1481 | b'Re-share existing shares to use them in safe mode' |
|
1001 | b'Re-share existing shares to use them in safe mode' | |
1482 | b' New shares will be created in safe mode.\n' |
|
1002 | b' New shares will be created in safe mode.\n' | |
1483 | ) |
|
1003 | ) | |
1484 | ) |
|
1004 | ) | |
1485 | if sharedsafe.name in removedreqs: |
|
1005 | if sharedsafe.name in removedreqs: | |
1486 | ui.warn( |
|
1006 | ui.warn( | |
1487 | _( |
|
1007 | _( | |
1488 | b'repository downgraded to not use share safe mode, ' |
|
1008 | b'repository downgraded to not use share safe mode, ' | |
1489 | b'existing shares will not work and needs to' |
|
1009 | b'existing shares will not work and needs to' | |
1490 | b' be reshared.\n' |
|
1010 | b' be reshared.\n' | |
1491 | ) |
|
1011 | ) | |
1492 | ) |
|
1012 | ) |
This diff has been collapsed as it changes many lines, (1000 lines changed) Show them Hide them | |||||
@@ -1,1492 +1,500 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import stat |
|
10 | import stat | |
11 |
|
11 | |||
12 | from .i18n import _ |
|
12 | from ..i18n import _ | |
13 | from .pycompat import getattr |
|
13 | from ..pycompat import getattr | |
14 | from . import ( |
|
14 | from .. import ( | |
15 | changelog, |
|
15 | changelog, | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | hg, |
|
|||
19 | localrepo, |
|
|||
20 | manifest, |
|
18 | manifest, | |
21 | metadata, |
|
19 | metadata, | |
22 | pycompat, |
|
20 | pycompat, | |
23 | requirements, |
|
21 | requirements, | |
24 | revlog, |
|
22 | revlog, | |
25 | scmutil, |
|
23 | scmutil, | |
26 | util, |
|
24 | util, | |
27 | vfs as vfsmod, |
|
25 | vfs as vfsmod, | |
28 | ) |
|
26 | ) | |
29 |
|
27 | |||
30 | from .utils import compression |
|
|||
31 |
|
||||
32 | # list of requirements that request a clone of all revlog if added/removed |
|
|||
33 | RECLONES_REQUIREMENTS = { |
|
|||
34 | b'generaldelta', |
|
|||
35 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
36 | } |
|
|||
37 |
|
||||
38 |
|
||||
39 | def requiredsourcerequirements(repo): |
|
|||
40 | """Obtain requirements required to be present to upgrade a repo. |
|
|||
41 |
|
||||
42 | An upgrade will not be allowed if the repository doesn't have the |
|
|||
43 | requirements returned by this function. |
|
|||
44 | """ |
|
|||
45 | return { |
|
|||
46 | # Introduced in Mercurial 0.9.2. |
|
|||
47 | b'revlogv1', |
|
|||
48 | # Introduced in Mercurial 0.9.2. |
|
|||
49 | b'store', |
|
|||
50 | } |
|
|||
51 |
|
||||
52 |
|
||||
53 | def blocksourcerequirements(repo): |
|
|||
54 | """Obtain requirements that will prevent an upgrade from occurring. |
|
|||
55 |
|
||||
56 | An upgrade cannot be performed if the source repository contains a |
|
|||
57 | requirements in the returned set. |
|
|||
58 | """ |
|
|||
59 | return { |
|
|||
60 | # The upgrade code does not yet support these experimental features. |
|
|||
61 | # This is an artificial limitation. |
|
|||
62 | requirements.TREEMANIFEST_REQUIREMENT, |
|
|||
63 | # This was a precursor to generaldelta and was never enabled by default. |
|
|||
64 | # It should (hopefully) not exist in the wild. |
|
|||
65 | b'parentdelta', |
|
|||
66 | # Upgrade should operate on the actual store, not the shared link. |
|
|||
67 | requirements.SHARED_REQUIREMENT, |
|
|||
68 | } |
|
|||
69 |
|
||||
70 |
|
||||
71 | def supportremovedrequirements(repo): |
|
|||
72 | """Obtain requirements that can be removed during an upgrade. |
|
|||
73 |
|
||||
74 | If an upgrade were to create a repository that dropped a requirement, |
|
|||
75 | the dropped requirement must appear in the returned set for the upgrade |
|
|||
76 | to be allowed. |
|
|||
77 | """ |
|
|||
78 | supported = { |
|
|||
79 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
80 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
81 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
82 | requirements.NODEMAP_REQUIREMENT, |
|
|||
83 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
84 | } |
|
|||
85 | for name in compression.compengines: |
|
|||
86 | engine = compression.compengines[name] |
|
|||
87 | if engine.available() and engine.revlogheader(): |
|
|||
88 | supported.add(b'exp-compression-%s' % name) |
|
|||
89 | if engine.name() == b'zstd': |
|
|||
90 | supported.add(b'revlog-compression-zstd') |
|
|||
91 | return supported |
|
|||
92 |
|
||||
93 |
|
||||
94 | def supporteddestrequirements(repo): |
|
|||
95 | """Obtain requirements that upgrade supports in the destination. |
|
|||
96 |
|
||||
97 | If the result of the upgrade would create requirements not in this set, |
|
|||
98 | the upgrade is disallowed. |
|
|||
99 |
|
||||
100 | Extensions should monkeypatch this to add their custom requirements. |
|
|||
101 | """ |
|
|||
102 | supported = { |
|
|||
103 | b'dotencode', |
|
|||
104 | b'fncache', |
|
|||
105 | b'generaldelta', |
|
|||
106 | b'revlogv1', |
|
|||
107 | b'store', |
|
|||
108 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
109 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
110 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
111 | requirements.NODEMAP_REQUIREMENT, |
|
|||
112 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
113 | } |
|
|||
114 | for name in compression.compengines: |
|
|||
115 | engine = compression.compengines[name] |
|
|||
116 | if engine.available() and engine.revlogheader(): |
|
|||
117 | supported.add(b'exp-compression-%s' % name) |
|
|||
118 | if engine.name() == b'zstd': |
|
|||
119 | supported.add(b'revlog-compression-zstd') |
|
|||
120 | return supported |
|
|||
121 |
|
||||
122 |
|
||||
123 | def allowednewrequirements(repo): |
|
|||
124 | """Obtain requirements that can be added to a repository during upgrade. |
|
|||
125 |
|
||||
126 | This is used to disallow proposed requirements from being added when |
|
|||
127 | they weren't present before. |
|
|||
128 |
|
||||
129 | We use a list of allowed requirement additions instead of a list of known |
|
|||
130 | bad additions because the whitelist approach is safer and will prevent |
|
|||
131 | future, unknown requirements from accidentally being added. |
|
|||
132 | """ |
|
|||
133 | supported = { |
|
|||
134 | b'dotencode', |
|
|||
135 | b'fncache', |
|
|||
136 | b'generaldelta', |
|
|||
137 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
138 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
139 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
140 | requirements.NODEMAP_REQUIREMENT, |
|
|||
141 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
142 | } |
|
|||
143 | for name in compression.compengines: |
|
|||
144 | engine = compression.compengines[name] |
|
|||
145 | if engine.available() and engine.revlogheader(): |
|
|||
146 | supported.add(b'exp-compression-%s' % name) |
|
|||
147 | if engine.name() == b'zstd': |
|
|||
148 | supported.add(b'revlog-compression-zstd') |
|
|||
149 | return supported |
|
|||
150 |
|
||||
151 |
|
||||
152 | def preservedrequirements(repo): |
|
|||
153 | return set() |
|
|||
154 |
|
||||
155 |
|
||||
156 | DEFICIENCY = b'deficiency' |
|
|||
157 | OPTIMISATION = b'optimization' |
|
|||
158 |
|
||||
159 |
|
||||
160 | class improvement(object): |
|
|||
161 | """Represents an improvement that can be made as part of an upgrade. |
|
|||
162 |
|
||||
163 | The following attributes are defined on each instance: |
|
|||
164 |
|
||||
165 | name |
|
|||
166 | Machine-readable string uniquely identifying this improvement. It |
|
|||
167 | will be mapped to an action later in the upgrade process. |
|
|||
168 |
|
||||
169 | type |
|
|||
170 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
|||
171 | problem. An optimization is an action (sometimes optional) that |
|
|||
172 | can be taken to further improve the state of the repository. |
|
|||
173 |
|
||||
174 | description |
|
|||
175 | Message intended for humans explaining the improvement in more detail, |
|
|||
176 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
|||
177 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
|||
178 | worded in the future tense. |
|
|||
179 |
|
||||
180 | upgrademessage |
|
|||
181 | Message intended for humans explaining what an upgrade addressing this |
|
|||
182 | issue will do. Should be worded in the future tense. |
|
|||
183 | """ |
|
|||
184 |
|
||||
185 | def __init__(self, name, type, description, upgrademessage): |
|
|||
186 | self.name = name |
|
|||
187 | self.type = type |
|
|||
188 | self.description = description |
|
|||
189 | self.upgrademessage = upgrademessage |
|
|||
190 |
|
||||
191 | def __eq__(self, other): |
|
|||
192 | if not isinstance(other, improvement): |
|
|||
193 | # This is what python tell use to do |
|
|||
194 | return NotImplemented |
|
|||
195 | return self.name == other.name |
|
|||
196 |
|
||||
197 | def __ne__(self, other): |
|
|||
198 | return not (self == other) |
|
|||
199 |
|
||||
200 | def __hash__(self): |
|
|||
201 | return hash(self.name) |
|
|||
202 |
|
||||
203 |
|
||||
204 | allformatvariant = [] |
|
|||
205 |
|
||||
206 |
|
||||
207 | def registerformatvariant(cls): |
|
|||
208 | allformatvariant.append(cls) |
|
|||
209 | return cls |
|
|||
210 |
|
||||
211 |
|
||||
212 | class formatvariant(improvement): |
|
|||
213 | """an improvement subclass dedicated to repository format""" |
|
|||
214 |
|
||||
215 | type = DEFICIENCY |
|
|||
216 | ### The following attributes should be defined for each class: |
|
|||
217 |
|
||||
218 | # machine-readable string uniquely identifying this improvement. it will be |
|
|||
219 | # mapped to an action later in the upgrade process. |
|
|||
220 | name = None |
|
|||
221 |
|
||||
222 | # message intended for humans explaining the improvement in more detail, |
|
|||
223 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
|||
224 | # in the present tense. |
|
|||
225 | description = None |
|
|||
226 |
|
||||
227 | # message intended for humans explaining what an upgrade addressing this |
|
|||
228 | # issue will do. should be worded in the future tense. |
|
|||
229 | upgrademessage = None |
|
|||
230 |
|
||||
231 | # value of current Mercurial default for new repository |
|
|||
232 | default = None |
|
|||
233 |
|
||||
234 | def __init__(self): |
|
|||
235 | raise NotImplementedError() |
|
|||
236 |
|
||||
237 | @staticmethod |
|
|||
238 | def fromrepo(repo): |
|
|||
239 | """current value of the variant in the repository""" |
|
|||
240 | raise NotImplementedError() |
|
|||
241 |
|
||||
242 | @staticmethod |
|
|||
243 | def fromconfig(repo): |
|
|||
244 | """current value of the variant in the configuration""" |
|
|||
245 | raise NotImplementedError() |
|
|||
246 |
|
||||
247 |
|
||||
248 | class requirementformatvariant(formatvariant): |
|
|||
249 | """formatvariant based on a 'requirement' name. |
|
|||
250 |
|
||||
251 | Many format variant are controlled by a 'requirement'. We define a small |
|
|||
252 | subclass to factor the code. |
|
|||
253 | """ |
|
|||
254 |
|
||||
255 | # the requirement that control this format variant |
|
|||
256 | _requirement = None |
|
|||
257 |
|
||||
258 | @staticmethod |
|
|||
259 | def _newreporequirements(ui): |
|
|||
260 | return localrepo.newreporequirements( |
|
|||
261 | ui, localrepo.defaultcreateopts(ui) |
|
|||
262 | ) |
|
|||
263 |
|
||||
264 | @classmethod |
|
|||
265 | def fromrepo(cls, repo): |
|
|||
266 | assert cls._requirement is not None |
|
|||
267 | return cls._requirement in repo.requirements |
|
|||
268 |
|
||||
269 | @classmethod |
|
|||
270 | def fromconfig(cls, repo): |
|
|||
271 | assert cls._requirement is not None |
|
|||
272 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
|||
273 |
|
||||
274 |
|
||||
275 | @registerformatvariant |
|
|||
276 | class fncache(requirementformatvariant): |
|
|||
277 | name = b'fncache' |
|
|||
278 |
|
||||
279 | _requirement = b'fncache' |
|
|||
280 |
|
||||
281 | default = True |
|
|||
282 |
|
||||
283 | description = _( |
|
|||
284 | b'long and reserved filenames may not work correctly; ' |
|
|||
285 | b'repository performance is sub-optimal' |
|
|||
286 | ) |
|
|||
287 |
|
||||
288 | upgrademessage = _( |
|
|||
289 | b'repository will be more resilient to storing ' |
|
|||
290 | b'certain paths and performance of certain ' |
|
|||
291 | b'operations should be improved' |
|
|||
292 | ) |
|
|||
293 |
|
||||
294 |
|
||||
295 | @registerformatvariant |
|
|||
296 | class dotencode(requirementformatvariant): |
|
|||
297 | name = b'dotencode' |
|
|||
298 |
|
||||
299 | _requirement = b'dotencode' |
|
|||
300 |
|
||||
301 | default = True |
|
|||
302 |
|
||||
303 | description = _( |
|
|||
304 | b'storage of filenames beginning with a period or ' |
|
|||
305 | b'space may not work correctly' |
|
|||
306 | ) |
|
|||
307 |
|
||||
308 | upgrademessage = _( |
|
|||
309 | b'repository will be better able to store files ' |
|
|||
310 | b'beginning with a space or period' |
|
|||
311 | ) |
|
|||
312 |
|
||||
313 |
|
||||
314 | @registerformatvariant |
|
|||
315 | class generaldelta(requirementformatvariant): |
|
|||
316 | name = b'generaldelta' |
|
|||
317 |
|
||||
318 | _requirement = b'generaldelta' |
|
|||
319 |
|
||||
320 | default = True |
|
|||
321 |
|
||||
322 | description = _( |
|
|||
323 | b'deltas within internal storage are unable to ' |
|
|||
324 | b'choose optimal revisions; repository is larger and ' |
|
|||
325 | b'slower than it could be; interaction with other ' |
|
|||
326 | b'repositories may require extra network and CPU ' |
|
|||
327 | b'resources, making "hg push" and "hg pull" slower' |
|
|||
328 | ) |
|
|||
329 |
|
||||
330 | upgrademessage = _( |
|
|||
331 | b'repository storage will be able to create ' |
|
|||
332 | b'optimal deltas; new repository data will be ' |
|
|||
333 | b'smaller and read times should decrease; ' |
|
|||
334 | b'interacting with other repositories using this ' |
|
|||
335 | b'storage model should require less network and ' |
|
|||
336 | b'CPU resources, making "hg push" and "hg pull" ' |
|
|||
337 | b'faster' |
|
|||
338 | ) |
|
|||
339 |
|
||||
340 |
|
||||
341 | @registerformatvariant |
|
|||
342 | class sharedsafe(requirementformatvariant): |
|
|||
343 | name = b'exp-sharesafe' |
|
|||
344 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
|||
345 |
|
||||
346 | default = False |
|
|||
347 |
|
||||
348 | description = _( |
|
|||
349 | b'old shared repositories do not share source repository ' |
|
|||
350 | b'requirements and config. This leads to various problems ' |
|
|||
351 | b'when the source repository format is upgraded or some new ' |
|
|||
352 | b'extensions are enabled.' |
|
|||
353 | ) |
|
|||
354 |
|
||||
355 | upgrademessage = _( |
|
|||
356 | b'Upgrades a repository to share-safe format so that future ' |
|
|||
357 | b'shares of this repository share its requirements and configs.' |
|
|||
358 | ) |
|
|||
359 |
|
||||
360 |
|
||||
361 | @registerformatvariant |
|
|||
362 | class sparserevlog(requirementformatvariant): |
|
|||
363 | name = b'sparserevlog' |
|
|||
364 |
|
||||
365 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
|||
366 |
|
||||
367 | default = True |
|
|||
368 |
|
||||
369 | description = _( |
|
|||
370 | b'in order to limit disk reading and memory usage on older ' |
|
|||
371 | b'version, the span of a delta chain from its root to its ' |
|
|||
372 | b'end is limited, whatever the relevant data in this span. ' |
|
|||
373 | b'This can severly limit Mercurial ability to build good ' |
|
|||
374 | b'chain of delta resulting is much more storage space being ' |
|
|||
375 | b'taken and limit reusability of on disk delta during ' |
|
|||
376 | b'exchange.' |
|
|||
377 | ) |
|
|||
378 |
|
||||
379 | upgrademessage = _( |
|
|||
380 | b'Revlog supports delta chain with more unused data ' |
|
|||
381 | b'between payload. These gaps will be skipped at read ' |
|
|||
382 | b'time. This allows for better delta chains, making a ' |
|
|||
383 | b'better compression and faster exchange with server.' |
|
|||
384 | ) |
|
|||
385 |
|
||||
386 |
|
||||
387 | @registerformatvariant |
|
|||
388 | class sidedata(requirementformatvariant): |
|
|||
389 | name = b'sidedata' |
|
|||
390 |
|
||||
391 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
|||
392 |
|
||||
393 | default = False |
|
|||
394 |
|
||||
395 | description = _( |
|
|||
396 | b'Allows storage of extra data alongside a revision, ' |
|
|||
397 | b'unlocking various caching options.' |
|
|||
398 | ) |
|
|||
399 |
|
||||
400 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
|||
401 |
|
||||
402 |
|
||||
403 | @registerformatvariant |
|
|||
404 | class persistentnodemap(requirementformatvariant): |
|
|||
405 | name = b'persistent-nodemap' |
|
|||
406 |
|
||||
407 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
|||
408 |
|
||||
409 | default = False |
|
|||
410 |
|
||||
411 | description = _( |
|
|||
412 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
|||
413 | ) |
|
|||
414 |
|
||||
415 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
|||
416 |
|
||||
417 |
|
||||
418 | @registerformatvariant |
|
|||
419 | class copiessdc(requirementformatvariant): |
|
|||
420 | name = b'copies-sdc' |
|
|||
421 |
|
||||
422 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
|||
423 |
|
||||
424 | default = False |
|
|||
425 |
|
||||
426 | description = _(b'Stores copies information alongside changesets.') |
|
|||
427 |
|
||||
428 | upgrademessage = _( |
|
|||
429 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
|||
430 | ) |
|
|||
431 |
|
||||
432 |
|
||||
433 | @registerformatvariant |
|
|||
434 | class removecldeltachain(formatvariant): |
|
|||
435 | name = b'plain-cl-delta' |
|
|||
436 |
|
||||
437 | default = True |
|
|||
438 |
|
||||
439 | description = _( |
|
|||
440 | b'changelog storage is using deltas instead of ' |
|
|||
441 | b'raw entries; changelog reading and any ' |
|
|||
442 | b'operation relying on changelog data are slower ' |
|
|||
443 | b'than they could be' |
|
|||
444 | ) |
|
|||
445 |
|
||||
446 | upgrademessage = _( |
|
|||
447 | b'changelog storage will be reformated to ' |
|
|||
448 | b'store raw entries; changelog reading will be ' |
|
|||
449 | b'faster; changelog size may be reduced' |
|
|||
450 | ) |
|
|||
451 |
|
||||
452 | @staticmethod |
|
|||
453 | def fromrepo(repo): |
|
|||
454 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
|||
455 | # changelogs with deltas. |
|
|||
456 | cl = repo.changelog |
|
|||
457 | chainbase = cl.chainbase |
|
|||
458 | return all(rev == chainbase(rev) for rev in cl) |
|
|||
459 |
|
||||
460 | @staticmethod |
|
|||
461 | def fromconfig(repo): |
|
|||
462 | return True |
|
|||
463 |
|
||||
464 |
|
||||
465 | @registerformatvariant |
|
|||
466 | class compressionengine(formatvariant): |
|
|||
467 | name = b'compression' |
|
|||
468 | default = b'zlib' |
|
|||
469 |
|
||||
470 | description = _( |
|
|||
471 | b'Compresion algorithm used to compress data. ' |
|
|||
472 | b'Some engine are faster than other' |
|
|||
473 | ) |
|
|||
474 |
|
||||
475 | upgrademessage = _( |
|
|||
476 | b'revlog content will be recompressed with the new algorithm.' |
|
|||
477 | ) |
|
|||
478 |
|
||||
479 | @classmethod |
|
|||
480 | def fromrepo(cls, repo): |
|
|||
481 | # we allow multiple compression engine requirement to co-exist because |
|
|||
482 | # strickly speaking, revlog seems to support mixed compression style. |
|
|||
483 | # |
|
|||
484 | # The compression used for new entries will be "the last one" |
|
|||
485 | compression = b'zlib' |
|
|||
486 | for req in repo.requirements: |
|
|||
487 | prefix = req.startswith |
|
|||
488 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
|||
489 | compression = req.split(b'-', 2)[2] |
|
|||
490 | return compression |
|
|||
491 |
|
||||
492 | @classmethod |
|
|||
493 | def fromconfig(cls, repo): |
|
|||
494 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
|||
495 | # return the first valid value as the selection code would do |
|
|||
496 | for comp in compengines: |
|
|||
497 | if comp in util.compengines: |
|
|||
498 | return comp |
|
|||
499 |
|
||||
500 | # no valide compression found lets display it all for clarity |
|
|||
501 | return b','.join(compengines) |
|
|||
502 |
|
||||
503 |
|
||||
504 | @registerformatvariant |
|
|||
505 | class compressionlevel(formatvariant): |
|
|||
506 | name = b'compression-level' |
|
|||
507 | default = b'default' |
|
|||
508 |
|
||||
509 | description = _(b'compression level') |
|
|||
510 |
|
||||
511 | upgrademessage = _(b'revlog content will be recompressed') |
|
|||
512 |
|
||||
513 | @classmethod |
|
|||
514 | def fromrepo(cls, repo): |
|
|||
515 | comp = compressionengine.fromrepo(repo) |
|
|||
516 | level = None |
|
|||
517 | if comp == b'zlib': |
|
|||
518 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
519 | elif comp == b'zstd': |
|
|||
520 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
521 | if level is None: |
|
|||
522 | return b'default' |
|
|||
523 | return bytes(level) |
|
|||
524 |
|
||||
525 | @classmethod |
|
|||
526 | def fromconfig(cls, repo): |
|
|||
527 | comp = compressionengine.fromconfig(repo) |
|
|||
528 | level = None |
|
|||
529 | if comp == b'zlib': |
|
|||
530 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
531 | elif comp == b'zstd': |
|
|||
532 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
533 | if level is None: |
|
|||
534 | return b'default' |
|
|||
535 | return bytes(level) |
|
|||
536 |
|
||||
537 |
|
||||
538 | def finddeficiencies(repo): |
|
|||
539 | """returns a list of deficiencies that the repo suffer from""" |
|
|||
540 | deficiencies = [] |
|
|||
541 |
|
||||
542 | # We could detect lack of revlogv1 and store here, but they were added |
|
|||
543 | # in 0.9.2 and we don't support upgrading repos without these |
|
|||
544 | # requirements, so let's not bother. |
|
|||
545 |
|
||||
546 | for fv in allformatvariant: |
|
|||
547 | if not fv.fromrepo(repo): |
|
|||
548 | deficiencies.append(fv) |
|
|||
549 |
|
||||
550 | return deficiencies |
|
|||
551 |
|
||||
552 |
|
||||
553 | # search without '-' to support older form on newer client. |
|
|||
554 | # |
|
|||
555 | # We don't enforce backward compatibility for debug command so this |
|
|||
556 | # might eventually be dropped. However, having to use two different |
|
|||
557 | # forms in script when comparing result is anoying enough to add |
|
|||
558 | # backward compatibility for a while. |
|
|||
559 | legacy_opts_map = { |
|
|||
560 | b'redeltaparent': b're-delta-parent', |
|
|||
561 | b'redeltamultibase': b're-delta-multibase', |
|
|||
562 | b'redeltaall': b're-delta-all', |
|
|||
563 | b'redeltafulladd': b're-delta-fulladd', |
|
|||
564 | } |
|
|||
565 |
|
||||
566 | ALL_OPTIMISATIONS = [] |
|
|||
567 |
|
||||
568 |
|
||||
569 | def register_optimization(obj): |
|
|||
570 | ALL_OPTIMISATIONS.append(obj) |
|
|||
571 | return obj |
|
|||
572 |
|
||||
573 |
|
||||
574 | register_optimization( |
|
|||
575 | improvement( |
|
|||
576 | name=b're-delta-parent', |
|
|||
577 | type=OPTIMISATION, |
|
|||
578 | description=_( |
|
|||
579 | b'deltas within internal storage will be recalculated to ' |
|
|||
580 | b'choose an optimal base revision where this was not ' |
|
|||
581 | b'already done; the size of the repository may shrink and ' |
|
|||
582 | b'various operations may become faster; the first time ' |
|
|||
583 | b'this optimization is performed could slow down upgrade ' |
|
|||
584 | b'execution considerably; subsequent invocations should ' |
|
|||
585 | b'not run noticeably slower' |
|
|||
586 | ), |
|
|||
587 | upgrademessage=_( |
|
|||
588 | b'deltas within internal storage will choose a new ' |
|
|||
589 | b'base revision if needed' |
|
|||
590 | ), |
|
|||
591 | ) |
|
|||
592 | ) |
|
|||
593 |
|
||||
594 | register_optimization( |
|
|||
595 | improvement( |
|
|||
596 | name=b're-delta-multibase', |
|
|||
597 | type=OPTIMISATION, |
|
|||
598 | description=_( |
|
|||
599 | b'deltas within internal storage will be recalculated ' |
|
|||
600 | b'against multiple base revision and the smallest ' |
|
|||
601 | b'difference will be used; the size of the repository may ' |
|
|||
602 | b'shrink significantly when there are many merges; this ' |
|
|||
603 | b'optimization will slow down execution in proportion to ' |
|
|||
604 | b'the number of merges in the repository and the amount ' |
|
|||
605 | b'of files in the repository; this slow down should not ' |
|
|||
606 | b'be significant unless there are tens of thousands of ' |
|
|||
607 | b'files and thousands of merges' |
|
|||
608 | ), |
|
|||
609 | upgrademessage=_( |
|
|||
610 | b'deltas within internal storage will choose an ' |
|
|||
611 | b'optimal delta by computing deltas against multiple ' |
|
|||
612 | b'parents; may slow down execution time ' |
|
|||
613 | b'significantly' |
|
|||
614 | ), |
|
|||
615 | ) |
|
|||
616 | ) |
|
|||
617 |
|
||||
618 | register_optimization( |
|
|||
619 | improvement( |
|
|||
620 | name=b're-delta-all', |
|
|||
621 | type=OPTIMISATION, |
|
|||
622 | description=_( |
|
|||
623 | b'deltas within internal storage will always be ' |
|
|||
624 | b'recalculated without reusing prior deltas; this will ' |
|
|||
625 | b'likely make execution run several times slower; this ' |
|
|||
626 | b'optimization is typically not needed' |
|
|||
627 | ), |
|
|||
628 | upgrademessage=_( |
|
|||
629 | b'deltas within internal storage will be fully ' |
|
|||
630 | b'recomputed; this will likely drastically slow down ' |
|
|||
631 | b'execution time' |
|
|||
632 | ), |
|
|||
633 | ) |
|
|||
634 | ) |
|
|||
635 |
|
||||
636 | register_optimization( |
|
|||
637 | improvement( |
|
|||
638 | name=b're-delta-fulladd', |
|
|||
639 | type=OPTIMISATION, |
|
|||
640 | description=_( |
|
|||
641 | b'every revision will be re-added as if it was new ' |
|
|||
642 | b'content. It will go through the full storage ' |
|
|||
643 | b'mechanism giving extensions a chance to process it ' |
|
|||
644 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
|||
645 | b'slower since more logic is involved.' |
|
|||
646 | ), |
|
|||
647 | upgrademessage=_( |
|
|||
648 | b'each revision will be added as new content to the ' |
|
|||
649 | b'internal storage; this will likely drastically slow ' |
|
|||
650 | b'down execution time, but some extensions might need ' |
|
|||
651 | b'it' |
|
|||
652 | ), |
|
|||
653 | ) |
|
|||
654 | ) |
|
|||
655 |
|
||||
656 |
|
||||
657 | def findoptimizations(repo): |
|
|||
658 | """Determine optimisation that could be used during upgrade""" |
|
|||
659 | # These are unconditionally added. There is logic later that figures out |
|
|||
660 | # which ones to apply. |
|
|||
661 | return list(ALL_OPTIMISATIONS) |
|
|||
662 |
|
||||
663 |
|
||||
664 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
|||
665 | """Determine upgrade actions that will be performed. |
|
|||
666 |
|
||||
667 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
|||
668 | ``findoptimizations``, determine the list of upgrade actions that |
|
|||
669 | will be performed. |
|
|||
670 |
|
||||
671 | The role of this function is to filter improvements if needed, apply |
|
|||
672 | recommended optimizations from the improvements list that make sense, |
|
|||
673 | etc. |
|
|||
674 |
|
||||
675 | Returns a list of action names. |
|
|||
676 | """ |
|
|||
677 | newactions = [] |
|
|||
678 |
|
||||
679 | for d in deficiencies: |
|
|||
680 | name = d._requirement |
|
|||
681 |
|
||||
682 | # If the action is a requirement that doesn't show up in the |
|
|||
683 | # destination requirements, prune the action. |
|
|||
684 | if name is not None and name not in destreqs: |
|
|||
685 | continue |
|
|||
686 |
|
||||
687 | newactions.append(d) |
|
|||
688 |
|
||||
689 | # FUTURE consider adding some optimizations here for certain transitions. |
|
|||
690 | # e.g. adding generaldelta could schedule parent redeltas. |
|
|||
691 |
|
||||
692 | return newactions |
|
|||
693 |
|
||||
694 |
|
28 | |||
695 | def _revlogfrompath(repo, path): |
|
29 | def _revlogfrompath(repo, path): | |
696 | """Obtain a revlog from a repo path. |
|
30 | """Obtain a revlog from a repo path. | |
697 |
|
31 | |||
698 | An instance of the appropriate class is returned. |
|
32 | An instance of the appropriate class is returned. | |
699 | """ |
|
33 | """ | |
700 | if path == b'00changelog.i': |
|
34 | if path == b'00changelog.i': | |
701 | return changelog.changelog(repo.svfs) |
|
35 | return changelog.changelog(repo.svfs) | |
702 | elif path.endswith(b'00manifest.i'): |
|
36 | elif path.endswith(b'00manifest.i'): | |
703 | mandir = path[: -len(b'00manifest.i')] |
|
37 | mandir = path[: -len(b'00manifest.i')] | |
704 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
38 | return manifest.manifestrevlog(repo.svfs, tree=mandir) | |
705 | else: |
|
39 | else: | |
706 | # reverse of "/".join(("data", path + ".i")) |
|
40 | # reverse of "/".join(("data", path + ".i")) | |
707 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
41 | return filelog.filelog(repo.svfs, path[5:-2]) | |
708 |
|
42 | |||
709 |
|
43 | |||
710 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
44 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): | |
711 | """copy all relevant files for `oldrl` into `destrepo` store |
|
45 | """copy all relevant files for `oldrl` into `destrepo` store | |
712 |
|
46 | |||
713 | Files are copied "as is" without any transformation. The copy is performed |
|
47 | Files are copied "as is" without any transformation. The copy is performed | |
714 | without extra checks. Callers are responsible for making sure the copied |
|
48 | without extra checks. Callers are responsible for making sure the copied | |
715 | content is compatible with format of the destination repository. |
|
49 | content is compatible with format of the destination repository. | |
716 | """ |
|
50 | """ | |
717 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
51 | oldrl = getattr(oldrl, '_revlog', oldrl) | |
718 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
52 | newrl = _revlogfrompath(destrepo, unencodedname) | |
719 | newrl = getattr(newrl, '_revlog', newrl) |
|
53 | newrl = getattr(newrl, '_revlog', newrl) | |
720 |
|
54 | |||
721 | oldvfs = oldrl.opener |
|
55 | oldvfs = oldrl.opener | |
722 | newvfs = newrl.opener |
|
56 | newvfs = newrl.opener | |
723 | oldindex = oldvfs.join(oldrl.indexfile) |
|
57 | oldindex = oldvfs.join(oldrl.indexfile) | |
724 | newindex = newvfs.join(newrl.indexfile) |
|
58 | newindex = newvfs.join(newrl.indexfile) | |
725 | olddata = oldvfs.join(oldrl.datafile) |
|
59 | olddata = oldvfs.join(oldrl.datafile) | |
726 | newdata = newvfs.join(newrl.datafile) |
|
60 | newdata = newvfs.join(newrl.datafile) | |
727 |
|
61 | |||
728 | with newvfs(newrl.indexfile, b'w'): |
|
62 | with newvfs(newrl.indexfile, b'w'): | |
729 | pass # create all the directories |
|
63 | pass # create all the directories | |
730 |
|
64 | |||
731 | util.copyfile(oldindex, newindex) |
|
65 | util.copyfile(oldindex, newindex) | |
732 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
66 | copydata = oldrl.opener.exists(oldrl.datafile) | |
733 | if copydata: |
|
67 | if copydata: | |
734 | util.copyfile(olddata, newdata) |
|
68 | util.copyfile(olddata, newdata) | |
735 |
|
69 | |||
736 | if not ( |
|
70 | if not ( | |
737 | unencodedname.endswith(b'00changelog.i') |
|
71 | unencodedname.endswith(b'00changelog.i') | |
738 | or unencodedname.endswith(b'00manifest.i') |
|
72 | or unencodedname.endswith(b'00manifest.i') | |
739 | ): |
|
73 | ): | |
740 | destrepo.svfs.fncache.add(unencodedname) |
|
74 | destrepo.svfs.fncache.add(unencodedname) | |
741 | if copydata: |
|
75 | if copydata: | |
742 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
76 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | |
743 |
|
77 | |||
744 |
|
78 | |||
745 | UPGRADE_CHANGELOG = b"changelog" |
|
79 | UPGRADE_CHANGELOG = b"changelog" | |
746 | UPGRADE_MANIFEST = b"manifest" |
|
80 | UPGRADE_MANIFEST = b"manifest" | |
747 | UPGRADE_FILELOGS = b"all-filelogs" |
|
81 | UPGRADE_FILELOGS = b"all-filelogs" | |
748 |
|
82 | |||
749 | UPGRADE_ALL_REVLOGS = frozenset( |
|
83 | UPGRADE_ALL_REVLOGS = frozenset( | |
750 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
84 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | |
751 | ) |
|
85 | ) | |
752 |
|
86 | |||
753 |
|
87 | |||
754 | def getsidedatacompanion(srcrepo, dstrepo): |
|
88 | def getsidedatacompanion(srcrepo, dstrepo): | |
755 | sidedatacompanion = None |
|
89 | sidedatacompanion = None | |
756 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
90 | removedreqs = srcrepo.requirements - dstrepo.requirements | |
757 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
91 | addedreqs = dstrepo.requirements - srcrepo.requirements | |
758 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
92 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: | |
759 |
|
93 | |||
760 | def sidedatacompanion(rl, rev): |
|
94 | def sidedatacompanion(rl, rev): | |
761 | rl = getattr(rl, '_revlog', rl) |
|
95 | rl = getattr(rl, '_revlog', rl) | |
762 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
96 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: | |
763 | return True, (), {}, 0, 0 |
|
97 | return True, (), {}, 0, 0 | |
764 | return False, (), {}, 0, 0 |
|
98 | return False, (), {}, 0, 0 | |
765 |
|
99 | |||
766 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
100 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: | |
767 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
101 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) | |
768 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
102 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: | |
769 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
103 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) | |
770 | return sidedatacompanion |
|
104 | return sidedatacompanion | |
771 |
|
105 | |||
772 |
|
106 | |||
773 | def matchrevlog(revlogfilter, entry): |
|
107 | def matchrevlog(revlogfilter, entry): | |
774 | """check if a revlog is selected for cloning. |
|
108 | """check if a revlog is selected for cloning. | |
775 |
|
109 | |||
776 | In other words, are there any updates which need to be done on revlog |
|
110 | In other words, are there any updates which need to be done on revlog | |
777 | or it can be blindly copied. |
|
111 | or it can be blindly copied. | |
778 |
|
112 | |||
779 | The store entry is checked against the passed filter""" |
|
113 | The store entry is checked against the passed filter""" | |
780 | if entry.endswith(b'00changelog.i'): |
|
114 | if entry.endswith(b'00changelog.i'): | |
781 | return UPGRADE_CHANGELOG in revlogfilter |
|
115 | return UPGRADE_CHANGELOG in revlogfilter | |
782 | elif entry.endswith(b'00manifest.i'): |
|
116 | elif entry.endswith(b'00manifest.i'): | |
783 | return UPGRADE_MANIFEST in revlogfilter |
|
117 | return UPGRADE_MANIFEST in revlogfilter | |
784 | return UPGRADE_FILELOGS in revlogfilter |
|
118 | return UPGRADE_FILELOGS in revlogfilter | |
785 |
|
119 | |||
786 |
|
120 | |||
787 | def _clonerevlogs( |
|
121 | def _clonerevlogs( | |
788 | ui, |
|
122 | ui, | |
789 | srcrepo, |
|
123 | srcrepo, | |
790 | dstrepo, |
|
124 | dstrepo, | |
791 | tr, |
|
125 | tr, | |
792 | deltareuse, |
|
126 | deltareuse, | |
793 | forcedeltabothparents, |
|
127 | forcedeltabothparents, | |
794 | revlogs=UPGRADE_ALL_REVLOGS, |
|
128 | revlogs=UPGRADE_ALL_REVLOGS, | |
795 | ): |
|
129 | ): | |
796 | """Copy revlogs between 2 repos.""" |
|
130 | """Copy revlogs between 2 repos.""" | |
797 | revcount = 0 |
|
131 | revcount = 0 | |
798 | srcsize = 0 |
|
132 | srcsize = 0 | |
799 | srcrawsize = 0 |
|
133 | srcrawsize = 0 | |
800 | dstsize = 0 |
|
134 | dstsize = 0 | |
801 | fcount = 0 |
|
135 | fcount = 0 | |
802 | frevcount = 0 |
|
136 | frevcount = 0 | |
803 | fsrcsize = 0 |
|
137 | fsrcsize = 0 | |
804 | frawsize = 0 |
|
138 | frawsize = 0 | |
805 | fdstsize = 0 |
|
139 | fdstsize = 0 | |
806 | mcount = 0 |
|
140 | mcount = 0 | |
807 | mrevcount = 0 |
|
141 | mrevcount = 0 | |
808 | msrcsize = 0 |
|
142 | msrcsize = 0 | |
809 | mrawsize = 0 |
|
143 | mrawsize = 0 | |
810 | mdstsize = 0 |
|
144 | mdstsize = 0 | |
811 | crevcount = 0 |
|
145 | crevcount = 0 | |
812 | csrcsize = 0 |
|
146 | csrcsize = 0 | |
813 | crawsize = 0 |
|
147 | crawsize = 0 | |
814 | cdstsize = 0 |
|
148 | cdstsize = 0 | |
815 |
|
149 | |||
816 | alldatafiles = list(srcrepo.store.walk()) |
|
150 | alldatafiles = list(srcrepo.store.walk()) | |
817 |
|
151 | |||
818 | # Perform a pass to collect metadata. This validates we can open all |
|
152 | # Perform a pass to collect metadata. This validates we can open all | |
819 | # source files and allows a unified progress bar to be displayed. |
|
153 | # source files and allows a unified progress bar to be displayed. | |
820 | for unencoded, encoded, size in alldatafiles: |
|
154 | for unencoded, encoded, size in alldatafiles: | |
821 | if unencoded.endswith(b'.d'): |
|
155 | if unencoded.endswith(b'.d'): | |
822 | continue |
|
156 | continue | |
823 |
|
157 | |||
824 | rl = _revlogfrompath(srcrepo, unencoded) |
|
158 | rl = _revlogfrompath(srcrepo, unencoded) | |
825 |
|
159 | |||
826 | info = rl.storageinfo( |
|
160 | info = rl.storageinfo( | |
827 | exclusivefiles=True, |
|
161 | exclusivefiles=True, | |
828 | revisionscount=True, |
|
162 | revisionscount=True, | |
829 | trackedsize=True, |
|
163 | trackedsize=True, | |
830 | storedsize=True, |
|
164 | storedsize=True, | |
831 | ) |
|
165 | ) | |
832 |
|
166 | |||
833 | revcount += info[b'revisionscount'] or 0 |
|
167 | revcount += info[b'revisionscount'] or 0 | |
834 | datasize = info[b'storedsize'] or 0 |
|
168 | datasize = info[b'storedsize'] or 0 | |
835 | rawsize = info[b'trackedsize'] or 0 |
|
169 | rawsize = info[b'trackedsize'] or 0 | |
836 |
|
170 | |||
837 | srcsize += datasize |
|
171 | srcsize += datasize | |
838 | srcrawsize += rawsize |
|
172 | srcrawsize += rawsize | |
839 |
|
173 | |||
840 | # This is for the separate progress bars. |
|
174 | # This is for the separate progress bars. | |
841 | if isinstance(rl, changelog.changelog): |
|
175 | if isinstance(rl, changelog.changelog): | |
842 | crevcount += len(rl) |
|
176 | crevcount += len(rl) | |
843 | csrcsize += datasize |
|
177 | csrcsize += datasize | |
844 | crawsize += rawsize |
|
178 | crawsize += rawsize | |
845 | elif isinstance(rl, manifest.manifestrevlog): |
|
179 | elif isinstance(rl, manifest.manifestrevlog): | |
846 | mcount += 1 |
|
180 | mcount += 1 | |
847 | mrevcount += len(rl) |
|
181 | mrevcount += len(rl) | |
848 | msrcsize += datasize |
|
182 | msrcsize += datasize | |
849 | mrawsize += rawsize |
|
183 | mrawsize += rawsize | |
850 | elif isinstance(rl, filelog.filelog): |
|
184 | elif isinstance(rl, filelog.filelog): | |
851 | fcount += 1 |
|
185 | fcount += 1 | |
852 | frevcount += len(rl) |
|
186 | frevcount += len(rl) | |
853 | fsrcsize += datasize |
|
187 | fsrcsize += datasize | |
854 | frawsize += rawsize |
|
188 | frawsize += rawsize | |
855 | else: |
|
189 | else: | |
856 | error.ProgrammingError(b'unknown revlog type') |
|
190 | error.ProgrammingError(b'unknown revlog type') | |
857 |
|
191 | |||
858 | if not revcount: |
|
192 | if not revcount: | |
859 | return |
|
193 | return | |
860 |
|
194 | |||
861 | ui.status( |
|
195 | ui.status( | |
862 | _( |
|
196 | _( | |
863 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
197 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
864 | b'%d in changelog)\n' |
|
198 | b'%d in changelog)\n' | |
865 | ) |
|
199 | ) | |
866 | % (revcount, frevcount, mrevcount, crevcount) |
|
200 | % (revcount, frevcount, mrevcount, crevcount) | |
867 | ) |
|
201 | ) | |
868 | ui.status( |
|
202 | ui.status( | |
869 | _(b'migrating %s in store; %s tracked data\n') |
|
203 | _(b'migrating %s in store; %s tracked data\n') | |
870 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
204 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | |
871 | ) |
|
205 | ) | |
872 |
|
206 | |||
873 | # Used to keep track of progress. |
|
207 | # Used to keep track of progress. | |
874 | progress = None |
|
208 | progress = None | |
875 |
|
209 | |||
876 | def oncopiedrevision(rl, rev, node): |
|
210 | def oncopiedrevision(rl, rev, node): | |
877 | progress.increment() |
|
211 | progress.increment() | |
878 |
|
212 | |||
879 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
213 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) | |
880 |
|
214 | |||
881 | # Do the actual copying. |
|
215 | # Do the actual copying. | |
882 | # FUTURE this operation can be farmed off to worker processes. |
|
216 | # FUTURE this operation can be farmed off to worker processes. | |
883 | seen = set() |
|
217 | seen = set() | |
884 | for unencoded, encoded, size in alldatafiles: |
|
218 | for unencoded, encoded, size in alldatafiles: | |
885 | if unencoded.endswith(b'.d'): |
|
219 | if unencoded.endswith(b'.d'): | |
886 | continue |
|
220 | continue | |
887 |
|
221 | |||
888 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
222 | oldrl = _revlogfrompath(srcrepo, unencoded) | |
889 |
|
223 | |||
890 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: |
|
224 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: | |
891 | ui.status( |
|
225 | ui.status( | |
892 | _( |
|
226 | _( | |
893 | b'finished migrating %d manifest revisions across %d ' |
|
227 | b'finished migrating %d manifest revisions across %d ' | |
894 | b'manifests; change in size: %s\n' |
|
228 | b'manifests; change in size: %s\n' | |
895 | ) |
|
229 | ) | |
896 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
230 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | |
897 | ) |
|
231 | ) | |
898 |
|
232 | |||
899 | ui.status( |
|
233 | ui.status( | |
900 | _( |
|
234 | _( | |
901 | b'migrating changelog containing %d revisions ' |
|
235 | b'migrating changelog containing %d revisions ' | |
902 | b'(%s in store; %s tracked data)\n' |
|
236 | b'(%s in store; %s tracked data)\n' | |
903 | ) |
|
237 | ) | |
904 | % ( |
|
238 | % ( | |
905 | crevcount, |
|
239 | crevcount, | |
906 | util.bytecount(csrcsize), |
|
240 | util.bytecount(csrcsize), | |
907 | util.bytecount(crawsize), |
|
241 | util.bytecount(crawsize), | |
908 | ) |
|
242 | ) | |
909 | ) |
|
243 | ) | |
910 | seen.add(b'c') |
|
244 | seen.add(b'c') | |
911 | progress = srcrepo.ui.makeprogress( |
|
245 | progress = srcrepo.ui.makeprogress( | |
912 | _(b'changelog revisions'), total=crevcount |
|
246 | _(b'changelog revisions'), total=crevcount | |
913 | ) |
|
247 | ) | |
914 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: |
|
248 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: | |
915 | ui.status( |
|
249 | ui.status( | |
916 | _( |
|
250 | _( | |
917 | b'finished migrating %d filelog revisions across %d ' |
|
251 | b'finished migrating %d filelog revisions across %d ' | |
918 | b'filelogs; change in size: %s\n' |
|
252 | b'filelogs; change in size: %s\n' | |
919 | ) |
|
253 | ) | |
920 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
254 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | |
921 | ) |
|
255 | ) | |
922 |
|
256 | |||
923 | ui.status( |
|
257 | ui.status( | |
924 | _( |
|
258 | _( | |
925 | b'migrating %d manifests containing %d revisions ' |
|
259 | b'migrating %d manifests containing %d revisions ' | |
926 | b'(%s in store; %s tracked data)\n' |
|
260 | b'(%s in store; %s tracked data)\n' | |
927 | ) |
|
261 | ) | |
928 | % ( |
|
262 | % ( | |
929 | mcount, |
|
263 | mcount, | |
930 | mrevcount, |
|
264 | mrevcount, | |
931 | util.bytecount(msrcsize), |
|
265 | util.bytecount(msrcsize), | |
932 | util.bytecount(mrawsize), |
|
266 | util.bytecount(mrawsize), | |
933 | ) |
|
267 | ) | |
934 | ) |
|
268 | ) | |
935 | seen.add(b'm') |
|
269 | seen.add(b'm') | |
936 | if progress: |
|
270 | if progress: | |
937 | progress.complete() |
|
271 | progress.complete() | |
938 | progress = srcrepo.ui.makeprogress( |
|
272 | progress = srcrepo.ui.makeprogress( | |
939 | _(b'manifest revisions'), total=mrevcount |
|
273 | _(b'manifest revisions'), total=mrevcount | |
940 | ) |
|
274 | ) | |
941 | elif b'f' not in seen: |
|
275 | elif b'f' not in seen: | |
942 | ui.status( |
|
276 | ui.status( | |
943 | _( |
|
277 | _( | |
944 | b'migrating %d filelogs containing %d revisions ' |
|
278 | b'migrating %d filelogs containing %d revisions ' | |
945 | b'(%s in store; %s tracked data)\n' |
|
279 | b'(%s in store; %s tracked data)\n' | |
946 | ) |
|
280 | ) | |
947 | % ( |
|
281 | % ( | |
948 | fcount, |
|
282 | fcount, | |
949 | frevcount, |
|
283 | frevcount, | |
950 | util.bytecount(fsrcsize), |
|
284 | util.bytecount(fsrcsize), | |
951 | util.bytecount(frawsize), |
|
285 | util.bytecount(frawsize), | |
952 | ) |
|
286 | ) | |
953 | ) |
|
287 | ) | |
954 | seen.add(b'f') |
|
288 | seen.add(b'f') | |
955 | if progress: |
|
289 | if progress: | |
956 | progress.complete() |
|
290 | progress.complete() | |
957 | progress = srcrepo.ui.makeprogress( |
|
291 | progress = srcrepo.ui.makeprogress( | |
958 | _(b'file revisions'), total=frevcount |
|
292 | _(b'file revisions'), total=frevcount | |
959 | ) |
|
293 | ) | |
960 |
|
294 | |||
961 | if matchrevlog(revlogs, unencoded): |
|
295 | if matchrevlog(revlogs, unencoded): | |
962 | ui.note( |
|
296 | ui.note( | |
963 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
297 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) | |
964 | ) |
|
298 | ) | |
965 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
299 | newrl = _revlogfrompath(dstrepo, unencoded) | |
966 | oldrl.clone( |
|
300 | oldrl.clone( | |
967 | tr, |
|
301 | tr, | |
968 | newrl, |
|
302 | newrl, | |
969 | addrevisioncb=oncopiedrevision, |
|
303 | addrevisioncb=oncopiedrevision, | |
970 | deltareuse=deltareuse, |
|
304 | deltareuse=deltareuse, | |
971 | forcedeltabothparents=forcedeltabothparents, |
|
305 | forcedeltabothparents=forcedeltabothparents, | |
972 | sidedatacompanion=sidedatacompanion, |
|
306 | sidedatacompanion=sidedatacompanion, | |
973 | ) |
|
307 | ) | |
974 | else: |
|
308 | else: | |
975 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
309 | msg = _(b'blindly copying %s containing %i revisions\n') | |
976 | ui.note(msg % (unencoded, len(oldrl))) |
|
310 | ui.note(msg % (unencoded, len(oldrl))) | |
977 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
311 | _copyrevlog(tr, dstrepo, oldrl, unencoded) | |
978 |
|
312 | |||
979 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
313 | newrl = _revlogfrompath(dstrepo, unencoded) | |
980 |
|
314 | |||
981 | info = newrl.storageinfo(storedsize=True) |
|
315 | info = newrl.storageinfo(storedsize=True) | |
982 | datasize = info[b'storedsize'] or 0 |
|
316 | datasize = info[b'storedsize'] or 0 | |
983 |
|
317 | |||
984 | dstsize += datasize |
|
318 | dstsize += datasize | |
985 |
|
319 | |||
986 | if isinstance(newrl, changelog.changelog): |
|
320 | if isinstance(newrl, changelog.changelog): | |
987 | cdstsize += datasize |
|
321 | cdstsize += datasize | |
988 | elif isinstance(newrl, manifest.manifestrevlog): |
|
322 | elif isinstance(newrl, manifest.manifestrevlog): | |
989 | mdstsize += datasize |
|
323 | mdstsize += datasize | |
990 | else: |
|
324 | else: | |
991 | fdstsize += datasize |
|
325 | fdstsize += datasize | |
992 |
|
326 | |||
993 | progress.complete() |
|
327 | progress.complete() | |
994 |
|
328 | |||
995 | ui.status( |
|
329 | ui.status( | |
996 | _( |
|
330 | _( | |
997 | b'finished migrating %d changelog revisions; change in size: ' |
|
331 | b'finished migrating %d changelog revisions; change in size: ' | |
998 | b'%s\n' |
|
332 | b'%s\n' | |
999 | ) |
|
333 | ) | |
1000 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
334 | % (crevcount, util.bytecount(cdstsize - csrcsize)) | |
1001 | ) |
|
335 | ) | |
1002 |
|
336 | |||
1003 | ui.status( |
|
337 | ui.status( | |
1004 | _( |
|
338 | _( | |
1005 | b'finished migrating %d total revisions; total change in store ' |
|
339 | b'finished migrating %d total revisions; total change in store ' | |
1006 | b'size: %s\n' |
|
340 | b'size: %s\n' | |
1007 | ) |
|
341 | ) | |
1008 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
342 | % (revcount, util.bytecount(dstsize - srcsize)) | |
1009 | ) |
|
343 | ) | |
1010 |
|
344 | |||
1011 |
|
345 | |||
1012 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
346 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): | |
1013 | """Determine whether to copy a store file during upgrade. |
|
347 | """Determine whether to copy a store file during upgrade. | |
1014 |
|
348 | |||
1015 | This function is called when migrating store files from ``srcrepo`` to |
|
349 | This function is called when migrating store files from ``srcrepo`` to | |
1016 | ``dstrepo`` as part of upgrading a repository. |
|
350 | ``dstrepo`` as part of upgrading a repository. | |
1017 |
|
351 | |||
1018 | Args: |
|
352 | Args: | |
1019 | srcrepo: repo we are copying from |
|
353 | srcrepo: repo we are copying from | |
1020 | dstrepo: repo we are copying to |
|
354 | dstrepo: repo we are copying to | |
1021 | requirements: set of requirements for ``dstrepo`` |
|
355 | requirements: set of requirements for ``dstrepo`` | |
1022 | path: store file being examined |
|
356 | path: store file being examined | |
1023 | mode: the ``ST_MODE`` file type of ``path`` |
|
357 | mode: the ``ST_MODE`` file type of ``path`` | |
1024 | st: ``stat`` data structure for ``path`` |
|
358 | st: ``stat`` data structure for ``path`` | |
1025 |
|
359 | |||
1026 | Function should return ``True`` if the file is to be copied. |
|
360 | Function should return ``True`` if the file is to be copied. | |
1027 | """ |
|
361 | """ | |
1028 | # Skip revlogs. |
|
362 | # Skip revlogs. | |
1029 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
363 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): | |
1030 | return False |
|
364 | return False | |
1031 | # Skip transaction related files. |
|
365 | # Skip transaction related files. | |
1032 | if path.startswith(b'undo'): |
|
366 | if path.startswith(b'undo'): | |
1033 | return False |
|
367 | return False | |
1034 | # Only copy regular files. |
|
368 | # Only copy regular files. | |
1035 | if mode != stat.S_IFREG: |
|
369 | if mode != stat.S_IFREG: | |
1036 | return False |
|
370 | return False | |
1037 | # Skip other skipped files. |
|
371 | # Skip other skipped files. | |
1038 | if path in (b'lock', b'fncache'): |
|
372 | if path in (b'lock', b'fncache'): | |
1039 | return False |
|
373 | return False | |
1040 |
|
374 | |||
1041 | return True |
|
375 | return True | |
1042 |
|
376 | |||
1043 |
|
377 | |||
1044 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
378 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): | |
1045 | """Hook point for extensions to perform additional actions during upgrade. |
|
379 | """Hook point for extensions to perform additional actions during upgrade. | |
1046 |
|
380 | |||
1047 | This function is called after revlogs and store files have been copied but |
|
381 | This function is called after revlogs and store files have been copied but | |
1048 | before the new store is swapped into the original location. |
|
382 | before the new store is swapped into the original location. | |
1049 | """ |
|
383 | """ | |
1050 |
|
384 | |||
1051 |
|
385 | |||
1052 |
def |
|
386 | def upgrade( | |
1053 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
387 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS | |
1054 | ): |
|
388 | ): | |
1055 | """Do the low-level work of upgrading a repository. |
|
389 | """Do the low-level work of upgrading a repository. | |
1056 |
|
390 | |||
1057 | The upgrade is effectively performed as a copy between a source |
|
391 | The upgrade is effectively performed as a copy between a source | |
1058 | repository and a temporary destination repository. |
|
392 | repository and a temporary destination repository. | |
1059 |
|
393 | |||
1060 | The source repository is unmodified for as long as possible so the |
|
394 | The source repository is unmodified for as long as possible so the | |
1061 | upgrade can abort at any time without causing loss of service for |
|
395 | upgrade can abort at any time without causing loss of service for | |
1062 | readers and without corrupting the source repository. |
|
396 | readers and without corrupting the source repository. | |
1063 | """ |
|
397 | """ | |
1064 | assert srcrepo.currentwlock() |
|
398 | assert srcrepo.currentwlock() | |
1065 | assert dstrepo.currentwlock() |
|
399 | assert dstrepo.currentwlock() | |
1066 |
|
400 | |||
1067 | ui.status( |
|
401 | ui.status( | |
1068 | _( |
|
402 | _( | |
1069 | b'(it is safe to interrupt this process any time before ' |
|
403 | b'(it is safe to interrupt this process any time before ' | |
1070 | b'data migration completes)\n' |
|
404 | b'data migration completes)\n' | |
1071 | ) |
|
405 | ) | |
1072 | ) |
|
406 | ) | |
1073 |
|
407 | |||
1074 | if b're-delta-all' in actions: |
|
408 | if b're-delta-all' in actions: | |
1075 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
409 | deltareuse = revlog.revlog.DELTAREUSENEVER | |
1076 | elif b're-delta-parent' in actions: |
|
410 | elif b're-delta-parent' in actions: | |
1077 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
411 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
1078 | elif b're-delta-multibase' in actions: |
|
412 | elif b're-delta-multibase' in actions: | |
1079 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
413 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
1080 | elif b're-delta-fulladd' in actions: |
|
414 | elif b're-delta-fulladd' in actions: | |
1081 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
415 | deltareuse = revlog.revlog.DELTAREUSEFULLADD | |
1082 | else: |
|
416 | else: | |
1083 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
417 | deltareuse = revlog.revlog.DELTAREUSEALWAYS | |
1084 |
|
418 | |||
1085 | with dstrepo.transaction(b'upgrade') as tr: |
|
419 | with dstrepo.transaction(b'upgrade') as tr: | |
1086 | _clonerevlogs( |
|
420 | _clonerevlogs( | |
1087 | ui, |
|
421 | ui, | |
1088 | srcrepo, |
|
422 | srcrepo, | |
1089 | dstrepo, |
|
423 | dstrepo, | |
1090 | tr, |
|
424 | tr, | |
1091 | deltareuse, |
|
425 | deltareuse, | |
1092 | b're-delta-multibase' in actions, |
|
426 | b're-delta-multibase' in actions, | |
1093 | revlogs=revlogs, |
|
427 | revlogs=revlogs, | |
1094 | ) |
|
428 | ) | |
1095 |
|
429 | |||
1096 | # Now copy other files in the store directory. |
|
430 | # Now copy other files in the store directory. | |
1097 | # The sorted() makes execution deterministic. |
|
431 | # The sorted() makes execution deterministic. | |
1098 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
432 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | |
1099 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): |
|
433 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): | |
1100 | continue |
|
434 | continue | |
1101 |
|
435 | |||
1102 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
436 | srcrepo.ui.status(_(b'copying %s\n') % p) | |
1103 | src = srcrepo.store.rawvfs.join(p) |
|
437 | src = srcrepo.store.rawvfs.join(p) | |
1104 | dst = dstrepo.store.rawvfs.join(p) |
|
438 | dst = dstrepo.store.rawvfs.join(p) | |
1105 | util.copyfile(src, dst, copystat=True) |
|
439 | util.copyfile(src, dst, copystat=True) | |
1106 |
|
440 | |||
1107 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
441 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) | |
1108 |
|
442 | |||
1109 | ui.status(_(b'data fully migrated to temporary repository\n')) |
|
443 | ui.status(_(b'data fully migrated to temporary repository\n')) | |
1110 |
|
444 | |||
1111 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) |
|
445 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) | |
1112 | backupvfs = vfsmod.vfs(backuppath) |
|
446 | backupvfs = vfsmod.vfs(backuppath) | |
1113 |
|
447 | |||
1114 | # Make a backup of requires file first, as it is the first to be modified. |
|
448 | # Make a backup of requires file first, as it is the first to be modified. | |
1115 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) |
|
449 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) | |
1116 |
|
450 | |||
1117 | # We install an arbitrary requirement that clients must not support |
|
451 | # We install an arbitrary requirement that clients must not support | |
1118 | # as a mechanism to lock out new clients during the data swap. This is |
|
452 | # as a mechanism to lock out new clients during the data swap. This is | |
1119 | # better than allowing a client to continue while the repository is in |
|
453 | # better than allowing a client to continue while the repository is in | |
1120 | # an inconsistent state. |
|
454 | # an inconsistent state. | |
1121 | ui.status( |
|
455 | ui.status( | |
1122 | _( |
|
456 | _( | |
1123 | b'marking source repository as being upgraded; clients will be ' |
|
457 | b'marking source repository as being upgraded; clients will be ' | |
1124 | b'unable to read from repository\n' |
|
458 | b'unable to read from repository\n' | |
1125 | ) |
|
459 | ) | |
1126 | ) |
|
460 | ) | |
1127 | scmutil.writereporequirements( |
|
461 | scmutil.writereporequirements( | |
1128 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
462 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | |
1129 | ) |
|
463 | ) | |
1130 |
|
464 | |||
1131 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
465 | ui.status(_(b'starting in-place swap of repository data\n')) | |
1132 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
466 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | |
1133 |
|
467 | |||
1134 | # Now swap in the new store directory. Doing it as a rename should make |
|
468 | # Now swap in the new store directory. Doing it as a rename should make | |
1135 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
469 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
1136 | # environments). |
|
470 | # environments). | |
1137 | ui.status(_(b'replacing store...\n')) |
|
471 | ui.status(_(b'replacing store...\n')) | |
1138 | tstart = util.timer() |
|
472 | tstart = util.timer() | |
1139 | util.rename(srcrepo.spath, backupvfs.join(b'store')) |
|
473 | util.rename(srcrepo.spath, backupvfs.join(b'store')) | |
1140 | util.rename(dstrepo.spath, srcrepo.spath) |
|
474 | util.rename(dstrepo.spath, srcrepo.spath) | |
1141 | elapsed = util.timer() - tstart |
|
475 | elapsed = util.timer() - tstart | |
1142 | ui.status( |
|
476 | ui.status( | |
1143 | _( |
|
477 | _( | |
1144 | b'store replacement complete; repository was inconsistent for ' |
|
478 | b'store replacement complete; repository was inconsistent for ' | |
1145 | b'%0.1fs\n' |
|
479 | b'%0.1fs\n' | |
1146 | ) |
|
480 | ) | |
1147 | % elapsed |
|
481 | % elapsed | |
1148 | ) |
|
482 | ) | |
1149 |
|
483 | |||
1150 | # We first write the requirements file. Any new requirements will lock |
|
484 | # We first write the requirements file. Any new requirements will lock | |
1151 | # out legacy clients. |
|
485 | # out legacy clients. | |
1152 | ui.status( |
|
486 | ui.status( | |
1153 | _( |
|
487 | _( | |
1154 | b'finalizing requirements file and making repository readable ' |
|
488 | b'finalizing requirements file and making repository readable ' | |
1155 | b'again\n' |
|
489 | b'again\n' | |
1156 | ) |
|
490 | ) | |
1157 | ) |
|
491 | ) | |
1158 | scmutil.writereporequirements(srcrepo, requirements) |
|
492 | scmutil.writereporequirements(srcrepo, requirements) | |
1159 |
|
493 | |||
1160 | # The lock file from the old store won't be removed because nothing has a |
|
494 | # The lock file from the old store won't be removed because nothing has a | |
1161 | # reference to its new location. So clean it up manually. Alternatively, we |
|
495 | # reference to its new location. So clean it up manually. Alternatively, we | |
1162 | # could update srcrepo.svfs and other variables to point to the new |
|
496 | # could update srcrepo.svfs and other variables to point to the new | |
1163 | # location. This is simpler. |
|
497 | # location. This is simpler. | |
1164 | backupvfs.unlink(b'store/lock') |
|
498 | backupvfs.unlink(b'store/lock') | |
1165 |
|
499 | |||
1166 | return backuppath |
|
500 | return backuppath | |
1167 |
|
||||
1168 |
|
||||
1169 | def upgraderepo( |
|
|||
1170 | ui, |
|
|||
1171 | repo, |
|
|||
1172 | run=False, |
|
|||
1173 | optimize=None, |
|
|||
1174 | backup=True, |
|
|||
1175 | manifest=None, |
|
|||
1176 | changelog=None, |
|
|||
1177 | filelogs=None, |
|
|||
1178 | ): |
|
|||
1179 | """Upgrade a repository in place.""" |
|
|||
1180 | if optimize is None: |
|
|||
1181 | optimize = [] |
|
|||
1182 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
|||
1183 | repo = repo.unfiltered() |
|
|||
1184 |
|
||||
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) |
|
|||
1186 | specentries = ( |
|
|||
1187 | (UPGRADE_CHANGELOG, changelog), |
|
|||
1188 | (UPGRADE_MANIFEST, manifest), |
|
|||
1189 | (UPGRADE_FILELOGS, filelogs), |
|
|||
1190 | ) |
|
|||
1191 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
|||
1192 | if specified: |
|
|||
1193 | # we have some limitation on revlogs to be recloned |
|
|||
1194 | if any(x for y, x in specified): |
|
|||
1195 | revlogs = set() |
|
|||
1196 | for upgrade, enabled in specified: |
|
|||
1197 | if enabled: |
|
|||
1198 | revlogs.add(upgrade) |
|
|||
1199 | else: |
|
|||
1200 | # none are enabled |
|
|||
1201 | for upgrade, __ in specified: |
|
|||
1202 | revlogs.discard(upgrade) |
|
|||
1203 |
|
||||
1204 | # Ensure the repository can be upgraded. |
|
|||
1205 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
|||
1206 | if missingreqs: |
|
|||
1207 | raise error.Abort( |
|
|||
1208 | _(b'cannot upgrade repository; requirement missing: %s') |
|
|||
1209 | % _(b', ').join(sorted(missingreqs)) |
|
|||
1210 | ) |
|
|||
1211 |
|
||||
1212 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
|||
1213 | if blockedreqs: |
|
|||
1214 | raise error.Abort( |
|
|||
1215 | _( |
|
|||
1216 | b'cannot upgrade repository; unsupported source ' |
|
|||
1217 | b'requirement: %s' |
|
|||
1218 | ) |
|
|||
1219 | % _(b', ').join(sorted(blockedreqs)) |
|
|||
1220 | ) |
|
|||
1221 |
|
||||
1222 | # FUTURE there is potentially a need to control the wanted requirements via |
|
|||
1223 | # command arguments or via an extension hook point. |
|
|||
1224 | newreqs = localrepo.newreporequirements( |
|
|||
1225 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
|||
1226 | ) |
|
|||
1227 | newreqs.update(preservedrequirements(repo)) |
|
|||
1228 |
|
||||
1229 | noremovereqs = ( |
|
|||
1230 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
|||
1231 | ) |
|
|||
1232 | if noremovereqs: |
|
|||
1233 | raise error.Abort( |
|
|||
1234 | _( |
|
|||
1235 | b'cannot upgrade repository; requirement would be ' |
|
|||
1236 | b'removed: %s' |
|
|||
1237 | ) |
|
|||
1238 | % _(b', ').join(sorted(noremovereqs)) |
|
|||
1239 | ) |
|
|||
1240 |
|
||||
1241 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
|||
1242 | if noaddreqs: |
|
|||
1243 | raise error.Abort( |
|
|||
1244 | _( |
|
|||
1245 | b'cannot upgrade repository; do not support adding ' |
|
|||
1246 | b'requirement: %s' |
|
|||
1247 | ) |
|
|||
1248 | % _(b', ').join(sorted(noaddreqs)) |
|
|||
1249 | ) |
|
|||
1250 |
|
||||
1251 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
|||
1252 | if unsupportedreqs: |
|
|||
1253 | raise error.Abort( |
|
|||
1254 | _( |
|
|||
1255 | b'cannot upgrade repository; do not support ' |
|
|||
1256 | b'destination requirement: %s' |
|
|||
1257 | ) |
|
|||
1258 | % _(b', ').join(sorted(unsupportedreqs)) |
|
|||
1259 | ) |
|
|||
1260 |
|
||||
1261 | # Find and validate all improvements that can be made. |
|
|||
1262 | alloptimizations = findoptimizations(repo) |
|
|||
1263 |
|
||||
1264 | # Apply and Validate arguments. |
|
|||
1265 | optimizations = [] |
|
|||
1266 | for o in alloptimizations: |
|
|||
1267 | if o.name in optimize: |
|
|||
1268 | optimizations.append(o) |
|
|||
1269 | optimize.discard(o.name) |
|
|||
1270 |
|
||||
1271 | if optimize: # anything left is unknown |
|
|||
1272 | raise error.Abort( |
|
|||
1273 | _(b'unknown optimization action requested: %s') |
|
|||
1274 | % b', '.join(sorted(optimize)), |
|
|||
1275 | hint=_(b'run without arguments to see valid optimizations'), |
|
|||
1276 | ) |
|
|||
1277 |
|
||||
1278 | deficiencies = finddeficiencies(repo) |
|
|||
1279 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
|||
1280 | actions.extend( |
|
|||
1281 | o |
|
|||
1282 | for o in sorted(optimizations) |
|
|||
1283 | # determineactions could have added optimisation |
|
|||
1284 | if o not in actions |
|
|||
1285 | ) |
|
|||
1286 |
|
||||
1287 | removedreqs = repo.requirements - newreqs |
|
|||
1288 | addedreqs = newreqs - repo.requirements |
|
|||
1289 |
|
||||
1290 | if revlogs != UPGRADE_ALL_REVLOGS: |
|
|||
1291 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
|||
1292 | if incompatible: |
|
|||
1293 | msg = _( |
|
|||
1294 | b'ignoring revlogs selection flags, format requirements ' |
|
|||
1295 | b'change: %s\n' |
|
|||
1296 | ) |
|
|||
1297 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
|||
1298 | revlogs = UPGRADE_ALL_REVLOGS |
|
|||
1299 |
|
||||
1300 | def write_labeled(l, label): |
|
|||
1301 | first = True |
|
|||
1302 | for r in sorted(l): |
|
|||
1303 | if not first: |
|
|||
1304 | ui.write(b', ') |
|
|||
1305 | ui.write(r, label=label) |
|
|||
1306 | first = False |
|
|||
1307 |
|
||||
1308 | def printrequirements(): |
|
|||
1309 | ui.write(_(b'requirements\n')) |
|
|||
1310 | ui.write(_(b' preserved: ')) |
|
|||
1311 | write_labeled( |
|
|||
1312 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
|||
1313 | ) |
|
|||
1314 | ui.write((b'\n')) |
|
|||
1315 | removed = repo.requirements - newreqs |
|
|||
1316 | if repo.requirements - newreqs: |
|
|||
1317 | ui.write(_(b' removed: ')) |
|
|||
1318 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
|||
1319 | ui.write((b'\n')) |
|
|||
1320 | added = newreqs - repo.requirements |
|
|||
1321 | if added: |
|
|||
1322 | ui.write(_(b' added: ')) |
|
|||
1323 | write_labeled(added, "upgrade-repo.requirement.added") |
|
|||
1324 | ui.write((b'\n')) |
|
|||
1325 | ui.write(b'\n') |
|
|||
1326 |
|
||||
1327 | def printoptimisations(): |
|
|||
1328 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
|||
1329 | optimisations.sort(key=lambda a: a.name) |
|
|||
1330 | if optimisations: |
|
|||
1331 | ui.write(_(b'optimisations: ')) |
|
|||
1332 | write_labeled( |
|
|||
1333 | [a.name for a in optimisations], |
|
|||
1334 | "upgrade-repo.optimisation.performed", |
|
|||
1335 | ) |
|
|||
1336 | ui.write(b'\n\n') |
|
|||
1337 |
|
||||
1338 | def printupgradeactions(): |
|
|||
1339 | for a in actions: |
|
|||
1340 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
|||
1341 |
|
||||
1342 | def print_affected_revlogs(): |
|
|||
1343 | if not revlogs: |
|
|||
1344 | ui.write((b'no revlogs to process\n')) |
|
|||
1345 | else: |
|
|||
1346 | ui.write((b'processed revlogs:\n')) |
|
|||
1347 | for r in sorted(revlogs): |
|
|||
1348 | ui.write((b' - %s\n' % r)) |
|
|||
1349 | ui.write((b'\n')) |
|
|||
1350 |
|
||||
1351 | if not run: |
|
|||
1352 | fromconfig = [] |
|
|||
1353 | onlydefault = [] |
|
|||
1354 |
|
||||
1355 | for d in deficiencies: |
|
|||
1356 | if d.fromconfig(repo): |
|
|||
1357 | fromconfig.append(d) |
|
|||
1358 | elif d.default: |
|
|||
1359 | onlydefault.append(d) |
|
|||
1360 |
|
||||
1361 | if fromconfig or onlydefault: |
|
|||
1362 |
|
||||
1363 | if fromconfig: |
|
|||
1364 | ui.status( |
|
|||
1365 | _( |
|
|||
1366 | b'repository lacks features recommended by ' |
|
|||
1367 | b'current config options:\n\n' |
|
|||
1368 | ) |
|
|||
1369 | ) |
|
|||
1370 | for i in fromconfig: |
|
|||
1371 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
1372 |
|
||||
1373 | if onlydefault: |
|
|||
1374 | ui.status( |
|
|||
1375 | _( |
|
|||
1376 | b'repository lacks features used by the default ' |
|
|||
1377 | b'config options:\n\n' |
|
|||
1378 | ) |
|
|||
1379 | ) |
|
|||
1380 | for i in onlydefault: |
|
|||
1381 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
1382 |
|
||||
1383 | ui.status(b'\n') |
|
|||
1384 | else: |
|
|||
1385 | ui.status( |
|
|||
1386 | _( |
|
|||
1387 | b'(no feature deficiencies found in existing ' |
|
|||
1388 | b'repository)\n' |
|
|||
1389 | ) |
|
|||
1390 | ) |
|
|||
1391 |
|
||||
1392 | ui.status( |
|
|||
1393 | _( |
|
|||
1394 | b'performing an upgrade with "--run" will make the following ' |
|
|||
1395 | b'changes:\n\n' |
|
|||
1396 | ) |
|
|||
1397 | ) |
|
|||
1398 |
|
||||
1399 | printrequirements() |
|
|||
1400 | printoptimisations() |
|
|||
1401 | printupgradeactions() |
|
|||
1402 | print_affected_revlogs() |
|
|||
1403 |
|
||||
1404 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
|||
1405 |
|
||||
1406 | if unusedoptimize: |
|
|||
1407 | ui.status( |
|
|||
1408 | _( |
|
|||
1409 | b'additional optimizations are available by specifying ' |
|
|||
1410 | b'"--optimize <name>":\n\n' |
|
|||
1411 | ) |
|
|||
1412 | ) |
|
|||
1413 | for i in unusedoptimize: |
|
|||
1414 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
|||
1415 | return |
|
|||
1416 |
|
||||
1417 | # Else we're in the run=true case. |
|
|||
1418 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
|||
1419 | printrequirements() |
|
|||
1420 | printoptimisations() |
|
|||
1421 | printupgradeactions() |
|
|||
1422 | print_affected_revlogs() |
|
|||
1423 |
|
||||
1424 | upgradeactions = [a.name for a in actions] |
|
|||
1425 |
|
||||
1426 | ui.status(_(b'beginning upgrade...\n')) |
|
|||
1427 | with repo.wlock(), repo.lock(): |
|
|||
1428 | ui.status(_(b'repository locked and read-only\n')) |
|
|||
1429 | # Our strategy for upgrading the repository is to create a new, |
|
|||
1430 | # temporary repository, write data to it, then do a swap of the |
|
|||
1431 | # data. There are less heavyweight ways to do this, but it is easier |
|
|||
1432 | # to create a new repo object than to instantiate all the components |
|
|||
1433 | # (like the store) separately. |
|
|||
1434 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
|||
1435 | backuppath = None |
|
|||
1436 | try: |
|
|||
1437 | ui.status( |
|
|||
1438 | _( |
|
|||
1439 | b'creating temporary repository to stage migrated ' |
|
|||
1440 | b'data: %s\n' |
|
|||
1441 | ) |
|
|||
1442 | % tmppath |
|
|||
1443 | ) |
|
|||
1444 |
|
||||
1445 | # clone ui without using ui.copy because repo.ui is protected |
|
|||
1446 | repoui = repo.ui.__class__(repo.ui) |
|
|||
1447 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
|||
1448 |
|
||||
1449 | with dstrepo.wlock(), dstrepo.lock(): |
|
|||
1450 | backuppath = _upgraderepo( |
|
|||
1451 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
|||
1452 | ) |
|
|||
1453 | if not (backup or backuppath is None): |
|
|||
1454 | ui.status( |
|
|||
1455 | _(b'removing old repository content%s\n') % backuppath |
|
|||
1456 | ) |
|
|||
1457 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
|||
1458 | backuppath = None |
|
|||
1459 |
|
||||
1460 | finally: |
|
|||
1461 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
|||
1462 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
|||
1463 |
|
||||
1464 | if backuppath and not ui.quiet: |
|
|||
1465 | ui.warn( |
|
|||
1466 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
|||
1467 | ) |
|
|||
1468 | ui.warn( |
|
|||
1469 | _( |
|
|||
1470 | b'the old repository will not be deleted; remove ' |
|
|||
1471 | b'it to free up disk space once the upgraded ' |
|
|||
1472 | b'repository is verified\n' |
|
|||
1473 | ) |
|
|||
1474 | ) |
|
|||
1475 |
|
||||
1476 | if sharedsafe.name in addedreqs: |
|
|||
1477 | ui.warn( |
|
|||
1478 | _( |
|
|||
1479 | b'repository upgraded to share safe mode, existing' |
|
|||
1480 | b' shares will still work in old non-safe mode. ' |
|
|||
1481 | b'Re-share existing shares to use them in safe mode' |
|
|||
1482 | b' New shares will be created in safe mode.\n' |
|
|||
1483 | ) |
|
|||
1484 | ) |
|
|||
1485 | if sharedsafe.name in removedreqs: |
|
|||
1486 | ui.warn( |
|
|||
1487 | _( |
|
|||
1488 | b'repository downgraded to not use share safe mode, ' |
|
|||
1489 | b'existing shares will not work and needs to' |
|
|||
1490 | b' be reshared.\n' |
|
|||
1491 | ) |
|
|||
1492 | ) |
|
@@ -1,1827 +1,1828 b'' | |||||
1 | # |
|
1 | # | |
2 | # This is the mercurial setup script. |
|
2 | # This is the mercurial setup script. | |
3 | # |
|
3 | # | |
4 | # 'python setup.py install', or |
|
4 | # 'python setup.py install', or | |
5 | # 'python setup.py --help' for more options |
|
5 | # 'python setup.py --help' for more options | |
6 | import os |
|
6 | import os | |
7 |
|
7 | |||
8 | # Mercurial will never work on Python 3 before 3.5 due to a lack |
|
8 | # Mercurial will never work on Python 3 before 3.5 due to a lack | |
9 | # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1 |
|
9 | # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1 | |
10 | # due to a bug in % formatting in bytestrings. |
|
10 | # due to a bug in % formatting in bytestrings. | |
11 | # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in |
|
11 | # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in | |
12 | # codecs.escape_encode() where it raises SystemError on empty bytestring |
|
12 | # codecs.escape_encode() where it raises SystemError on empty bytestring | |
13 | # bug link: https://bugs.python.org/issue25270 |
|
13 | # bug link: https://bugs.python.org/issue25270 | |
14 | supportedpy = ','.join( |
|
14 | supportedpy = ','.join( | |
15 | [ |
|
15 | [ | |
16 | '>=2.7.4', |
|
16 | '>=2.7.4', | |
17 | '!=3.0.*', |
|
17 | '!=3.0.*', | |
18 | '!=3.1.*', |
|
18 | '!=3.1.*', | |
19 | '!=3.2.*', |
|
19 | '!=3.2.*', | |
20 | '!=3.3.*', |
|
20 | '!=3.3.*', | |
21 | '!=3.4.*', |
|
21 | '!=3.4.*', | |
22 | '!=3.5.0', |
|
22 | '!=3.5.0', | |
23 | '!=3.5.1', |
|
23 | '!=3.5.1', | |
24 | '!=3.5.2', |
|
24 | '!=3.5.2', | |
25 | '!=3.6.0', |
|
25 | '!=3.6.0', | |
26 | '!=3.6.1', |
|
26 | '!=3.6.1', | |
27 | ] |
|
27 | ] | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 | import sys, platform |
|
30 | import sys, platform | |
31 | import sysconfig |
|
31 | import sysconfig | |
32 |
|
32 | |||
33 | if sys.version_info[0] >= 3: |
|
33 | if sys.version_info[0] >= 3: | |
34 | printf = eval('print') |
|
34 | printf = eval('print') | |
35 | libdir_escape = 'unicode_escape' |
|
35 | libdir_escape = 'unicode_escape' | |
36 |
|
36 | |||
37 | def sysstr(s): |
|
37 | def sysstr(s): | |
38 | return s.decode('latin-1') |
|
38 | return s.decode('latin-1') | |
39 |
|
39 | |||
40 |
|
40 | |||
41 | else: |
|
41 | else: | |
42 | libdir_escape = 'string_escape' |
|
42 | libdir_escape = 'string_escape' | |
43 |
|
43 | |||
44 | def printf(*args, **kwargs): |
|
44 | def printf(*args, **kwargs): | |
45 | f = kwargs.get('file', sys.stdout) |
|
45 | f = kwargs.get('file', sys.stdout) | |
46 | end = kwargs.get('end', '\n') |
|
46 | end = kwargs.get('end', '\n') | |
47 | f.write(b' '.join(args) + end) |
|
47 | f.write(b' '.join(args) + end) | |
48 |
|
48 | |||
49 | def sysstr(s): |
|
49 | def sysstr(s): | |
50 | return s |
|
50 | return s | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | # Attempt to guide users to a modern pip - this means that 2.6 users |
|
53 | # Attempt to guide users to a modern pip - this means that 2.6 users | |
54 | # should have a chance of getting a 4.2 release, and when we ratchet |
|
54 | # should have a chance of getting a 4.2 release, and when we ratchet | |
55 | # the version requirement forward again hopefully everyone will get |
|
55 | # the version requirement forward again hopefully everyone will get | |
56 | # something that works for them. |
|
56 | # something that works for them. | |
57 | if sys.version_info < (2, 7, 4, 'final'): |
|
57 | if sys.version_info < (2, 7, 4, 'final'): | |
58 | pip_message = ( |
|
58 | pip_message = ( | |
59 | 'This may be due to an out of date pip. ' |
|
59 | 'This may be due to an out of date pip. ' | |
60 | 'Make sure you have pip >= 9.0.1.' |
|
60 | 'Make sure you have pip >= 9.0.1.' | |
61 | ) |
|
61 | ) | |
62 | try: |
|
62 | try: | |
63 | import pip |
|
63 | import pip | |
64 |
|
64 | |||
65 | pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]]) |
|
65 | pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]]) | |
66 | if pip_version < (9, 0, 1): |
|
66 | if pip_version < (9, 0, 1): | |
67 | pip_message = ( |
|
67 | pip_message = ( | |
68 | 'Your pip version is out of date, please install ' |
|
68 | 'Your pip version is out of date, please install ' | |
69 | 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__) |
|
69 | 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__) | |
70 | ) |
|
70 | ) | |
71 | else: |
|
71 | else: | |
72 | # pip is new enough - it must be something else |
|
72 | # pip is new enough - it must be something else | |
73 | pip_message = '' |
|
73 | pip_message = '' | |
74 | except Exception: |
|
74 | except Exception: | |
75 | pass |
|
75 | pass | |
76 | error = """ |
|
76 | error = """ | |
77 | Mercurial does not support Python older than 2.7.4. |
|
77 | Mercurial does not support Python older than 2.7.4. | |
78 | Python {py} detected. |
|
78 | Python {py} detected. | |
79 | {pip} |
|
79 | {pip} | |
80 | """.format( |
|
80 | """.format( | |
81 | py=sys.version_info, pip=pip_message |
|
81 | py=sys.version_info, pip=pip_message | |
82 | ) |
|
82 | ) | |
83 | printf(error, file=sys.stderr) |
|
83 | printf(error, file=sys.stderr) | |
84 | sys.exit(1) |
|
84 | sys.exit(1) | |
85 |
|
85 | |||
86 | import ssl |
|
86 | import ssl | |
87 |
|
87 | |||
88 | try: |
|
88 | try: | |
89 | ssl.SSLContext |
|
89 | ssl.SSLContext | |
90 | except AttributeError: |
|
90 | except AttributeError: | |
91 | error = """ |
|
91 | error = """ | |
92 | The `ssl` module does not have the `SSLContext` class. This indicates an old |
|
92 | The `ssl` module does not have the `SSLContext` class. This indicates an old | |
93 | Python version which does not support modern security features (which were |
|
93 | Python version which does not support modern security features (which were | |
94 | added to Python 2.7 as part of "PEP 466"). Please make sure you have installed |
|
94 | added to Python 2.7 as part of "PEP 466"). Please make sure you have installed | |
95 | at least Python 2.7.9 or a Python version with backports of these security |
|
95 | at least Python 2.7.9 or a Python version with backports of these security | |
96 | features. |
|
96 | features. | |
97 | """ |
|
97 | """ | |
98 | printf(error, file=sys.stderr) |
|
98 | printf(error, file=sys.stderr) | |
99 | sys.exit(1) |
|
99 | sys.exit(1) | |
100 |
|
100 | |||
101 | # ssl.HAS_TLSv1* are preferred to check support but they were added in Python |
|
101 | # ssl.HAS_TLSv1* are preferred to check support but they were added in Python | |
102 | # 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98 |
|
102 | # 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98 | |
103 | # (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2 |
|
103 | # (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2 | |
104 | # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2 |
|
104 | # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2 | |
105 | # support. At the mentioned commit, they were unconditionally defined. |
|
105 | # support. At the mentioned commit, they were unconditionally defined. | |
106 | _notset = object() |
|
106 | _notset = object() | |
107 | has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset) |
|
107 | has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset) | |
108 | if has_tlsv1_1 is _notset: |
|
108 | if has_tlsv1_1 is _notset: | |
109 | has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset |
|
109 | has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset | |
110 | has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset) |
|
110 | has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset) | |
111 | if has_tlsv1_2 is _notset: |
|
111 | if has_tlsv1_2 is _notset: | |
112 | has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset |
|
112 | has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset | |
113 | if not (has_tlsv1_1 or has_tlsv1_2): |
|
113 | if not (has_tlsv1_1 or has_tlsv1_2): | |
114 | error = """ |
|
114 | error = """ | |
115 | The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2. |
|
115 | The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2. | |
116 | Please make sure that your Python installation was compiled against an OpenSSL |
|
116 | Please make sure that your Python installation was compiled against an OpenSSL | |
117 | version enabling these features (likely this requires the OpenSSL version to |
|
117 | version enabling these features (likely this requires the OpenSSL version to | |
118 | be at least 1.0.1). |
|
118 | be at least 1.0.1). | |
119 | """ |
|
119 | """ | |
120 | printf(error, file=sys.stderr) |
|
120 | printf(error, file=sys.stderr) | |
121 | sys.exit(1) |
|
121 | sys.exit(1) | |
122 |
|
122 | |||
123 | if sys.version_info[0] >= 3: |
|
123 | if sys.version_info[0] >= 3: | |
124 | DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX'] |
|
124 | DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX'] | |
125 | else: |
|
125 | else: | |
126 | # deprecated in Python 3 |
|
126 | # deprecated in Python 3 | |
127 | DYLIB_SUFFIX = sysconfig.get_config_vars()['SO'] |
|
127 | DYLIB_SUFFIX = sysconfig.get_config_vars()['SO'] | |
128 |
|
128 | |||
129 | # Solaris Python packaging brain damage |
|
129 | # Solaris Python packaging brain damage | |
130 | try: |
|
130 | try: | |
131 | import hashlib |
|
131 | import hashlib | |
132 |
|
132 | |||
133 | sha = hashlib.sha1() |
|
133 | sha = hashlib.sha1() | |
134 | except ImportError: |
|
134 | except ImportError: | |
135 | try: |
|
135 | try: | |
136 | import sha |
|
136 | import sha | |
137 |
|
137 | |||
138 | sha.sha # silence unused import warning |
|
138 | sha.sha # silence unused import warning | |
139 | except ImportError: |
|
139 | except ImportError: | |
140 | raise SystemExit( |
|
140 | raise SystemExit( | |
141 | "Couldn't import standard hashlib (incomplete Python install)." |
|
141 | "Couldn't import standard hashlib (incomplete Python install)." | |
142 | ) |
|
142 | ) | |
143 |
|
143 | |||
144 | try: |
|
144 | try: | |
145 | import zlib |
|
145 | import zlib | |
146 |
|
146 | |||
147 | zlib.compressobj # silence unused import warning |
|
147 | zlib.compressobj # silence unused import warning | |
148 | except ImportError: |
|
148 | except ImportError: | |
149 | raise SystemExit( |
|
149 | raise SystemExit( | |
150 | "Couldn't import standard zlib (incomplete Python install)." |
|
150 | "Couldn't import standard zlib (incomplete Python install)." | |
151 | ) |
|
151 | ) | |
152 |
|
152 | |||
153 | # The base IronPython distribution (as of 2.7.1) doesn't support bz2 |
|
153 | # The base IronPython distribution (as of 2.7.1) doesn't support bz2 | |
154 | isironpython = False |
|
154 | isironpython = False | |
155 | try: |
|
155 | try: | |
156 | isironpython = ( |
|
156 | isironpython = ( | |
157 | platform.python_implementation().lower().find("ironpython") != -1 |
|
157 | platform.python_implementation().lower().find("ironpython") != -1 | |
158 | ) |
|
158 | ) | |
159 | except AttributeError: |
|
159 | except AttributeError: | |
160 | pass |
|
160 | pass | |
161 |
|
161 | |||
162 | if isironpython: |
|
162 | if isironpython: | |
163 | sys.stderr.write("warning: IronPython detected (no bz2 support)\n") |
|
163 | sys.stderr.write("warning: IronPython detected (no bz2 support)\n") | |
164 | else: |
|
164 | else: | |
165 | try: |
|
165 | try: | |
166 | import bz2 |
|
166 | import bz2 | |
167 |
|
167 | |||
168 | bz2.BZ2Compressor # silence unused import warning |
|
168 | bz2.BZ2Compressor # silence unused import warning | |
169 | except ImportError: |
|
169 | except ImportError: | |
170 | raise SystemExit( |
|
170 | raise SystemExit( | |
171 | "Couldn't import standard bz2 (incomplete Python install)." |
|
171 | "Couldn't import standard bz2 (incomplete Python install)." | |
172 | ) |
|
172 | ) | |
173 |
|
173 | |||
174 | ispypy = "PyPy" in sys.version |
|
174 | ispypy = "PyPy" in sys.version | |
175 |
|
175 | |||
176 | import ctypes |
|
176 | import ctypes | |
177 | import errno |
|
177 | import errno | |
178 | import stat, subprocess, time |
|
178 | import stat, subprocess, time | |
179 | import re |
|
179 | import re | |
180 | import shutil |
|
180 | import shutil | |
181 | import tempfile |
|
181 | import tempfile | |
182 | from distutils import log |
|
182 | from distutils import log | |
183 |
|
183 | |||
184 | # We have issues with setuptools on some platforms and builders. Until |
|
184 | # We have issues with setuptools on some platforms and builders. Until | |
185 | # those are resolved, setuptools is opt-in except for platforms where |
|
185 | # those are resolved, setuptools is opt-in except for platforms where | |
186 | # we don't have issues. |
|
186 | # we don't have issues. | |
187 | issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ |
|
187 | issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ | |
188 | if issetuptools: |
|
188 | if issetuptools: | |
189 | from setuptools import setup |
|
189 | from setuptools import setup | |
190 | else: |
|
190 | else: | |
191 | from distutils.core import setup |
|
191 | from distutils.core import setup | |
192 | from distutils.ccompiler import new_compiler |
|
192 | from distutils.ccompiler import new_compiler | |
193 | from distutils.core import Command, Extension |
|
193 | from distutils.core import Command, Extension | |
194 | from distutils.dist import Distribution |
|
194 | from distutils.dist import Distribution | |
195 | from distutils.command.build import build |
|
195 | from distutils.command.build import build | |
196 | from distutils.command.build_ext import build_ext |
|
196 | from distutils.command.build_ext import build_ext | |
197 | from distutils.command.build_py import build_py |
|
197 | from distutils.command.build_py import build_py | |
198 | from distutils.command.build_scripts import build_scripts |
|
198 | from distutils.command.build_scripts import build_scripts | |
199 | from distutils.command.install import install |
|
199 | from distutils.command.install import install | |
200 | from distutils.command.install_lib import install_lib |
|
200 | from distutils.command.install_lib import install_lib | |
201 | from distutils.command.install_scripts import install_scripts |
|
201 | from distutils.command.install_scripts import install_scripts | |
202 | from distutils.spawn import spawn, find_executable |
|
202 | from distutils.spawn import spawn, find_executable | |
203 | from distutils import file_util |
|
203 | from distutils import file_util | |
204 | from distutils.errors import ( |
|
204 | from distutils.errors import ( | |
205 | CCompilerError, |
|
205 | CCompilerError, | |
206 | DistutilsError, |
|
206 | DistutilsError, | |
207 | DistutilsExecError, |
|
207 | DistutilsExecError, | |
208 | ) |
|
208 | ) | |
209 | from distutils.sysconfig import get_python_inc, get_config_var |
|
209 | from distutils.sysconfig import get_python_inc, get_config_var | |
210 | from distutils.version import StrictVersion |
|
210 | from distutils.version import StrictVersion | |
211 |
|
211 | |||
212 | # Explain to distutils.StrictVersion how our release candidates are versionned |
|
212 | # Explain to distutils.StrictVersion how our release candidates are versionned | |
213 | StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') |
|
213 | StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') | |
214 |
|
214 | |||
215 |
|
215 | |||
216 | def write_if_changed(path, content): |
|
216 | def write_if_changed(path, content): | |
217 | """Write content to a file iff the content hasn't changed.""" |
|
217 | """Write content to a file iff the content hasn't changed.""" | |
218 | if os.path.exists(path): |
|
218 | if os.path.exists(path): | |
219 | with open(path, 'rb') as fh: |
|
219 | with open(path, 'rb') as fh: | |
220 | current = fh.read() |
|
220 | current = fh.read() | |
221 | else: |
|
221 | else: | |
222 | current = b'' |
|
222 | current = b'' | |
223 |
|
223 | |||
224 | if current != content: |
|
224 | if current != content: | |
225 | with open(path, 'wb') as fh: |
|
225 | with open(path, 'wb') as fh: | |
226 | fh.write(content) |
|
226 | fh.write(content) | |
227 |
|
227 | |||
228 |
|
228 | |||
229 | scripts = ['hg'] |
|
229 | scripts = ['hg'] | |
230 | if os.name == 'nt': |
|
230 | if os.name == 'nt': | |
231 | # We remove hg.bat if we are able to build hg.exe. |
|
231 | # We remove hg.bat if we are able to build hg.exe. | |
232 | scripts.append('contrib/win32/hg.bat') |
|
232 | scripts.append('contrib/win32/hg.bat') | |
233 |
|
233 | |||
234 |
|
234 | |||
235 | def cancompile(cc, code): |
|
235 | def cancompile(cc, code): | |
236 | tmpdir = tempfile.mkdtemp(prefix='hg-install-') |
|
236 | tmpdir = tempfile.mkdtemp(prefix='hg-install-') | |
237 | devnull = oldstderr = None |
|
237 | devnull = oldstderr = None | |
238 | try: |
|
238 | try: | |
239 | fname = os.path.join(tmpdir, 'testcomp.c') |
|
239 | fname = os.path.join(tmpdir, 'testcomp.c') | |
240 | f = open(fname, 'w') |
|
240 | f = open(fname, 'w') | |
241 | f.write(code) |
|
241 | f.write(code) | |
242 | f.close() |
|
242 | f.close() | |
243 | # Redirect stderr to /dev/null to hide any error messages |
|
243 | # Redirect stderr to /dev/null to hide any error messages | |
244 | # from the compiler. |
|
244 | # from the compiler. | |
245 | # This will have to be changed if we ever have to check |
|
245 | # This will have to be changed if we ever have to check | |
246 | # for a function on Windows. |
|
246 | # for a function on Windows. | |
247 | devnull = open('/dev/null', 'w') |
|
247 | devnull = open('/dev/null', 'w') | |
248 | oldstderr = os.dup(sys.stderr.fileno()) |
|
248 | oldstderr = os.dup(sys.stderr.fileno()) | |
249 | os.dup2(devnull.fileno(), sys.stderr.fileno()) |
|
249 | os.dup2(devnull.fileno(), sys.stderr.fileno()) | |
250 | objects = cc.compile([fname], output_dir=tmpdir) |
|
250 | objects = cc.compile([fname], output_dir=tmpdir) | |
251 | cc.link_executable(objects, os.path.join(tmpdir, "a.out")) |
|
251 | cc.link_executable(objects, os.path.join(tmpdir, "a.out")) | |
252 | return True |
|
252 | return True | |
253 | except Exception: |
|
253 | except Exception: | |
254 | return False |
|
254 | return False | |
255 | finally: |
|
255 | finally: | |
256 | if oldstderr is not None: |
|
256 | if oldstderr is not None: | |
257 | os.dup2(oldstderr, sys.stderr.fileno()) |
|
257 | os.dup2(oldstderr, sys.stderr.fileno()) | |
258 | if devnull is not None: |
|
258 | if devnull is not None: | |
259 | devnull.close() |
|
259 | devnull.close() | |
260 | shutil.rmtree(tmpdir) |
|
260 | shutil.rmtree(tmpdir) | |
261 |
|
261 | |||
262 |
|
262 | |||
263 | # simplified version of distutils.ccompiler.CCompiler.has_function |
|
263 | # simplified version of distutils.ccompiler.CCompiler.has_function | |
264 | # that actually removes its temporary files. |
|
264 | # that actually removes its temporary files. | |
265 | def hasfunction(cc, funcname): |
|
265 | def hasfunction(cc, funcname): | |
266 | code = 'int main(void) { %s(); }\n' % funcname |
|
266 | code = 'int main(void) { %s(); }\n' % funcname | |
267 | return cancompile(cc, code) |
|
267 | return cancompile(cc, code) | |
268 |
|
268 | |||
269 |
|
269 | |||
270 | def hasheader(cc, headername): |
|
270 | def hasheader(cc, headername): | |
271 | code = '#include <%s>\nint main(void) { return 0; }\n' % headername |
|
271 | code = '#include <%s>\nint main(void) { return 0; }\n' % headername | |
272 | return cancompile(cc, code) |
|
272 | return cancompile(cc, code) | |
273 |
|
273 | |||
274 |
|
274 | |||
275 | # py2exe needs to be installed to work |
|
275 | # py2exe needs to be installed to work | |
276 | try: |
|
276 | try: | |
277 | import py2exe |
|
277 | import py2exe | |
278 |
|
278 | |||
279 | py2exe.Distribution # silence unused import warning |
|
279 | py2exe.Distribution # silence unused import warning | |
280 | py2exeloaded = True |
|
280 | py2exeloaded = True | |
281 | # import py2exe's patched Distribution class |
|
281 | # import py2exe's patched Distribution class | |
282 | from distutils.core import Distribution |
|
282 | from distutils.core import Distribution | |
283 | except ImportError: |
|
283 | except ImportError: | |
284 | py2exeloaded = False |
|
284 | py2exeloaded = False | |
285 |
|
285 | |||
286 |
|
286 | |||
287 | def runcmd(cmd, env, cwd=None): |
|
287 | def runcmd(cmd, env, cwd=None): | |
288 | p = subprocess.Popen( |
|
288 | p = subprocess.Popen( | |
289 | cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd |
|
289 | cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd | |
290 | ) |
|
290 | ) | |
291 | out, err = p.communicate() |
|
291 | out, err = p.communicate() | |
292 | return p.returncode, out, err |
|
292 | return p.returncode, out, err | |
293 |
|
293 | |||
294 |
|
294 | |||
295 | class hgcommand(object): |
|
295 | class hgcommand(object): | |
296 | def __init__(self, cmd, env): |
|
296 | def __init__(self, cmd, env): | |
297 | self.cmd = cmd |
|
297 | self.cmd = cmd | |
298 | self.env = env |
|
298 | self.env = env | |
299 |
|
299 | |||
300 | def run(self, args): |
|
300 | def run(self, args): | |
301 | cmd = self.cmd + args |
|
301 | cmd = self.cmd + args | |
302 | returncode, out, err = runcmd(cmd, self.env) |
|
302 | returncode, out, err = runcmd(cmd, self.env) | |
303 | err = filterhgerr(err) |
|
303 | err = filterhgerr(err) | |
304 | if err or returncode != 0: |
|
304 | if err or returncode != 0: | |
305 | printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) |
|
305 | printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) | |
306 | printf(err, file=sys.stderr) |
|
306 | printf(err, file=sys.stderr) | |
307 | return b'' |
|
307 | return b'' | |
308 | return out |
|
308 | return out | |
309 |
|
309 | |||
310 |
|
310 | |||
311 | def filterhgerr(err): |
|
311 | def filterhgerr(err): | |
312 | # If root is executing setup.py, but the repository is owned by |
|
312 | # If root is executing setup.py, but the repository is owned by | |
313 | # another user (as in "sudo python setup.py install") we will get |
|
313 | # another user (as in "sudo python setup.py install") we will get | |
314 | # trust warnings since the .hg/hgrc file is untrusted. That is |
|
314 | # trust warnings since the .hg/hgrc file is untrusted. That is | |
315 | # fine, we don't want to load it anyway. Python may warn about |
|
315 | # fine, we don't want to load it anyway. Python may warn about | |
316 | # a missing __init__.py in mercurial/locale, we also ignore that. |
|
316 | # a missing __init__.py in mercurial/locale, we also ignore that. | |
317 | err = [ |
|
317 | err = [ | |
318 | e |
|
318 | e | |
319 | for e in err.splitlines() |
|
319 | for e in err.splitlines() | |
320 | if ( |
|
320 | if ( | |
321 | not e.startswith(b'not trusting file') |
|
321 | not e.startswith(b'not trusting file') | |
322 | and not e.startswith(b'warning: Not importing') |
|
322 | and not e.startswith(b'warning: Not importing') | |
323 | and not e.startswith(b'obsolete feature not enabled') |
|
323 | and not e.startswith(b'obsolete feature not enabled') | |
324 | and not e.startswith(b'*** failed to import extension') |
|
324 | and not e.startswith(b'*** failed to import extension') | |
325 | and not e.startswith(b'devel-warn:') |
|
325 | and not e.startswith(b'devel-warn:') | |
326 | and not ( |
|
326 | and not ( | |
327 | e.startswith(b'(third party extension') |
|
327 | e.startswith(b'(third party extension') | |
328 | and e.endswith(b'or newer of Mercurial; disabling)') |
|
328 | and e.endswith(b'or newer of Mercurial; disabling)') | |
329 | ) |
|
329 | ) | |
330 | ) |
|
330 | ) | |
331 | ] |
|
331 | ] | |
332 | return b'\n'.join(b' ' + e for e in err) |
|
332 | return b'\n'.join(b' ' + e for e in err) | |
333 |
|
333 | |||
334 |
|
334 | |||
335 | def findhg(): |
|
335 | def findhg(): | |
336 | """Try to figure out how we should invoke hg for examining the local |
|
336 | """Try to figure out how we should invoke hg for examining the local | |
337 | repository contents. |
|
337 | repository contents. | |
338 |
|
338 | |||
339 | Returns an hgcommand object.""" |
|
339 | Returns an hgcommand object.""" | |
340 | # By default, prefer the "hg" command in the user's path. This was |
|
340 | # By default, prefer the "hg" command in the user's path. This was | |
341 | # presumably the hg command that the user used to create this repository. |
|
341 | # presumably the hg command that the user used to create this repository. | |
342 | # |
|
342 | # | |
343 | # This repository may require extensions or other settings that would not |
|
343 | # This repository may require extensions or other settings that would not | |
344 | # be enabled by running the hg script directly from this local repository. |
|
344 | # be enabled by running the hg script directly from this local repository. | |
345 | hgenv = os.environ.copy() |
|
345 | hgenv = os.environ.copy() | |
346 | # Use HGPLAIN to disable hgrc settings that would change output formatting, |
|
346 | # Use HGPLAIN to disable hgrc settings that would change output formatting, | |
347 | # and disable localization for the same reasons. |
|
347 | # and disable localization for the same reasons. | |
348 | hgenv['HGPLAIN'] = '1' |
|
348 | hgenv['HGPLAIN'] = '1' | |
349 | hgenv['LANGUAGE'] = 'C' |
|
349 | hgenv['LANGUAGE'] = 'C' | |
350 | hgcmd = ['hg'] |
|
350 | hgcmd = ['hg'] | |
351 | # Run a simple "hg log" command just to see if using hg from the user's |
|
351 | # Run a simple "hg log" command just to see if using hg from the user's | |
352 | # path works and can successfully interact with this repository. Windows |
|
352 | # path works and can successfully interact with this repository. Windows | |
353 | # gives precedence to hg.exe in the current directory, so fall back to the |
|
353 | # gives precedence to hg.exe in the current directory, so fall back to the | |
354 | # python invocation of local hg, where pythonXY.dll can always be found. |
|
354 | # python invocation of local hg, where pythonXY.dll can always be found. | |
355 | check_cmd = ['log', '-r.', '-Ttest'] |
|
355 | check_cmd = ['log', '-r.', '-Ttest'] | |
356 | if os.name != 'nt' or not os.path.exists("hg.exe"): |
|
356 | if os.name != 'nt' or not os.path.exists("hg.exe"): | |
357 | try: |
|
357 | try: | |
358 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
358 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) | |
359 | except EnvironmentError: |
|
359 | except EnvironmentError: | |
360 | retcode = -1 |
|
360 | retcode = -1 | |
361 | if retcode == 0 and not filterhgerr(err): |
|
361 | if retcode == 0 and not filterhgerr(err): | |
362 | return hgcommand(hgcmd, hgenv) |
|
362 | return hgcommand(hgcmd, hgenv) | |
363 |
|
363 | |||
364 | # Fall back to trying the local hg installation. |
|
364 | # Fall back to trying the local hg installation. | |
365 | hgenv = localhgenv() |
|
365 | hgenv = localhgenv() | |
366 | hgcmd = [sys.executable, 'hg'] |
|
366 | hgcmd = [sys.executable, 'hg'] | |
367 | try: |
|
367 | try: | |
368 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
368 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) | |
369 | except EnvironmentError: |
|
369 | except EnvironmentError: | |
370 | retcode = -1 |
|
370 | retcode = -1 | |
371 | if retcode == 0 and not filterhgerr(err): |
|
371 | if retcode == 0 and not filterhgerr(err): | |
372 | return hgcommand(hgcmd, hgenv) |
|
372 | return hgcommand(hgcmd, hgenv) | |
373 |
|
373 | |||
374 | raise SystemExit( |
|
374 | raise SystemExit( | |
375 | 'Unable to find a working hg binary to extract the ' |
|
375 | 'Unable to find a working hg binary to extract the ' | |
376 | 'version from the repository tags' |
|
376 | 'version from the repository tags' | |
377 | ) |
|
377 | ) | |
378 |
|
378 | |||
379 |
|
379 | |||
380 | def localhgenv(): |
|
380 | def localhgenv(): | |
381 | """Get an environment dictionary to use for invoking or importing |
|
381 | """Get an environment dictionary to use for invoking or importing | |
382 | mercurial from the local repository.""" |
|
382 | mercurial from the local repository.""" | |
383 | # Execute hg out of this directory with a custom environment which takes |
|
383 | # Execute hg out of this directory with a custom environment which takes | |
384 | # care to not use any hgrc files and do no localization. |
|
384 | # care to not use any hgrc files and do no localization. | |
385 | env = { |
|
385 | env = { | |
386 | 'HGMODULEPOLICY': 'py', |
|
386 | 'HGMODULEPOLICY': 'py', | |
387 | 'HGRCPATH': '', |
|
387 | 'HGRCPATH': '', | |
388 | 'LANGUAGE': 'C', |
|
388 | 'LANGUAGE': 'C', | |
389 | 'PATH': '', |
|
389 | 'PATH': '', | |
390 | } # make pypi modules that use os.environ['PATH'] happy |
|
390 | } # make pypi modules that use os.environ['PATH'] happy | |
391 | if 'LD_LIBRARY_PATH' in os.environ: |
|
391 | if 'LD_LIBRARY_PATH' in os.environ: | |
392 | env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] |
|
392 | env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] | |
393 | if 'SystemRoot' in os.environ: |
|
393 | if 'SystemRoot' in os.environ: | |
394 | # SystemRoot is required by Windows to load various DLLs. See: |
|
394 | # SystemRoot is required by Windows to load various DLLs. See: | |
395 | # https://bugs.python.org/issue13524#msg148850 |
|
395 | # https://bugs.python.org/issue13524#msg148850 | |
396 | env['SystemRoot'] = os.environ['SystemRoot'] |
|
396 | env['SystemRoot'] = os.environ['SystemRoot'] | |
397 | return env |
|
397 | return env | |
398 |
|
398 | |||
399 |
|
399 | |||
400 | version = '' |
|
400 | version = '' | |
401 |
|
401 | |||
402 | if os.path.isdir('.hg'): |
|
402 | if os.path.isdir('.hg'): | |
403 | hg = findhg() |
|
403 | hg = findhg() | |
404 | cmd = ['log', '-r', '.', '--template', '{tags}\n'] |
|
404 | cmd = ['log', '-r', '.', '--template', '{tags}\n'] | |
405 | numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] |
|
405 | numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] | |
406 | hgid = sysstr(hg.run(['id', '-i'])).strip() |
|
406 | hgid = sysstr(hg.run(['id', '-i'])).strip() | |
407 | if not hgid: |
|
407 | if not hgid: | |
408 | # Bail out if hg is having problems interacting with this repository, |
|
408 | # Bail out if hg is having problems interacting with this repository, | |
409 | # rather than falling through and producing a bogus version number. |
|
409 | # rather than falling through and producing a bogus version number. | |
410 | # Continuing with an invalid version number will break extensions |
|
410 | # Continuing with an invalid version number will break extensions | |
411 | # that define minimumhgversion. |
|
411 | # that define minimumhgversion. | |
412 | raise SystemExit('Unable to determine hg version from local repository') |
|
412 | raise SystemExit('Unable to determine hg version from local repository') | |
413 | if numerictags: # tag(s) found |
|
413 | if numerictags: # tag(s) found | |
414 | version = numerictags[-1] |
|
414 | version = numerictags[-1] | |
415 | if hgid.endswith('+'): # propagate the dirty status to the tag |
|
415 | if hgid.endswith('+'): # propagate the dirty status to the tag | |
416 | version += '+' |
|
416 | version += '+' | |
417 | else: # no tag found |
|
417 | else: # no tag found | |
418 | ltagcmd = ['parents', '--template', '{latesttag}'] |
|
418 | ltagcmd = ['parents', '--template', '{latesttag}'] | |
419 | ltag = sysstr(hg.run(ltagcmd)) |
|
419 | ltag = sysstr(hg.run(ltagcmd)) | |
420 | changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] |
|
420 | changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] | |
421 | changessince = len(hg.run(changessincecmd).splitlines()) |
|
421 | changessince = len(hg.run(changessincecmd).splitlines()) | |
422 | version = '%s+%s-%s' % (ltag, changessince, hgid) |
|
422 | version = '%s+%s-%s' % (ltag, changessince, hgid) | |
423 | if version.endswith('+'): |
|
423 | if version.endswith('+'): | |
424 | version += time.strftime('%Y%m%d') |
|
424 | version += time.strftime('%Y%m%d') | |
425 | elif os.path.exists('.hg_archival.txt'): |
|
425 | elif os.path.exists('.hg_archival.txt'): | |
426 | kw = dict( |
|
426 | kw = dict( | |
427 | [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')] |
|
427 | [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')] | |
428 | ) |
|
428 | ) | |
429 | if 'tag' in kw: |
|
429 | if 'tag' in kw: | |
430 | version = kw['tag'] |
|
430 | version = kw['tag'] | |
431 | elif 'latesttag' in kw: |
|
431 | elif 'latesttag' in kw: | |
432 | if 'changessincelatesttag' in kw: |
|
432 | if 'changessincelatesttag' in kw: | |
433 | version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw |
|
433 | version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw | |
434 | else: |
|
434 | else: | |
435 | version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw |
|
435 | version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw | |
436 | else: |
|
436 | else: | |
437 | version = kw.get('node', '')[:12] |
|
437 | version = kw.get('node', '')[:12] | |
438 |
|
438 | |||
439 | if version: |
|
439 | if version: | |
440 | versionb = version |
|
440 | versionb = version | |
441 | if not isinstance(versionb, bytes): |
|
441 | if not isinstance(versionb, bytes): | |
442 | versionb = versionb.encode('ascii') |
|
442 | versionb = versionb.encode('ascii') | |
443 |
|
443 | |||
444 | write_if_changed( |
|
444 | write_if_changed( | |
445 | 'mercurial/__version__.py', |
|
445 | 'mercurial/__version__.py', | |
446 | b''.join( |
|
446 | b''.join( | |
447 | [ |
|
447 | [ | |
448 | b'# this file is autogenerated by setup.py\n' |
|
448 | b'# this file is autogenerated by setup.py\n' | |
449 | b'version = b"%s"\n' % versionb, |
|
449 | b'version = b"%s"\n' % versionb, | |
450 | ] |
|
450 | ] | |
451 | ), |
|
451 | ), | |
452 | ) |
|
452 | ) | |
453 |
|
453 | |||
454 | try: |
|
454 | try: | |
455 | oldpolicy = os.environ.get('HGMODULEPOLICY', None) |
|
455 | oldpolicy = os.environ.get('HGMODULEPOLICY', None) | |
456 | os.environ['HGMODULEPOLICY'] = 'py' |
|
456 | os.environ['HGMODULEPOLICY'] = 'py' | |
457 | from mercurial import __version__ |
|
457 | from mercurial import __version__ | |
458 |
|
458 | |||
459 | version = __version__.version |
|
459 | version = __version__.version | |
460 | except ImportError: |
|
460 | except ImportError: | |
461 | version = b'unknown' |
|
461 | version = b'unknown' | |
462 | finally: |
|
462 | finally: | |
463 | if oldpolicy is None: |
|
463 | if oldpolicy is None: | |
464 | del os.environ['HGMODULEPOLICY'] |
|
464 | del os.environ['HGMODULEPOLICY'] | |
465 | else: |
|
465 | else: | |
466 | os.environ['HGMODULEPOLICY'] = oldpolicy |
|
466 | os.environ['HGMODULEPOLICY'] = oldpolicy | |
467 |
|
467 | |||
468 |
|
468 | |||
469 | class hgbuild(build): |
|
469 | class hgbuild(build): | |
470 | # Insert hgbuildmo first so that files in mercurial/locale/ are found |
|
470 | # Insert hgbuildmo first so that files in mercurial/locale/ are found | |
471 | # when build_py is run next. |
|
471 | # when build_py is run next. | |
472 | sub_commands = [('build_mo', None)] + build.sub_commands |
|
472 | sub_commands = [('build_mo', None)] + build.sub_commands | |
473 |
|
473 | |||
474 |
|
474 | |||
475 | class hgbuildmo(build): |
|
475 | class hgbuildmo(build): | |
476 |
|
476 | |||
477 | description = "build translations (.mo files)" |
|
477 | description = "build translations (.mo files)" | |
478 |
|
478 | |||
479 | def run(self): |
|
479 | def run(self): | |
480 | if not find_executable('msgfmt'): |
|
480 | if not find_executable('msgfmt'): | |
481 | self.warn( |
|
481 | self.warn( | |
482 | "could not find msgfmt executable, no translations " |
|
482 | "could not find msgfmt executable, no translations " | |
483 | "will be built" |
|
483 | "will be built" | |
484 | ) |
|
484 | ) | |
485 | return |
|
485 | return | |
486 |
|
486 | |||
487 | podir = 'i18n' |
|
487 | podir = 'i18n' | |
488 | if not os.path.isdir(podir): |
|
488 | if not os.path.isdir(podir): | |
489 | self.warn("could not find %s/ directory" % podir) |
|
489 | self.warn("could not find %s/ directory" % podir) | |
490 | return |
|
490 | return | |
491 |
|
491 | |||
492 | join = os.path.join |
|
492 | join = os.path.join | |
493 | for po in os.listdir(podir): |
|
493 | for po in os.listdir(podir): | |
494 | if not po.endswith('.po'): |
|
494 | if not po.endswith('.po'): | |
495 | continue |
|
495 | continue | |
496 | pofile = join(podir, po) |
|
496 | pofile = join(podir, po) | |
497 | modir = join('locale', po[:-3], 'LC_MESSAGES') |
|
497 | modir = join('locale', po[:-3], 'LC_MESSAGES') | |
498 | mofile = join(modir, 'hg.mo') |
|
498 | mofile = join(modir, 'hg.mo') | |
499 | mobuildfile = join('mercurial', mofile) |
|
499 | mobuildfile = join('mercurial', mofile) | |
500 | cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile] |
|
500 | cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile] | |
501 | if sys.platform != 'sunos5': |
|
501 | if sys.platform != 'sunos5': | |
502 | # msgfmt on Solaris does not know about -c |
|
502 | # msgfmt on Solaris does not know about -c | |
503 | cmd.append('-c') |
|
503 | cmd.append('-c') | |
504 | self.mkpath(join('mercurial', modir)) |
|
504 | self.mkpath(join('mercurial', modir)) | |
505 | self.make_file([pofile], mobuildfile, spawn, (cmd,)) |
|
505 | self.make_file([pofile], mobuildfile, spawn, (cmd,)) | |
506 |
|
506 | |||
507 |
|
507 | |||
508 | class hgdist(Distribution): |
|
508 | class hgdist(Distribution): | |
509 | pure = False |
|
509 | pure = False | |
510 | rust = False |
|
510 | rust = False | |
511 | no_rust = False |
|
511 | no_rust = False | |
512 | cffi = ispypy |
|
512 | cffi = ispypy | |
513 |
|
513 | |||
514 | global_options = Distribution.global_options + [ |
|
514 | global_options = Distribution.global_options + [ | |
515 | ('pure', None, "use pure (slow) Python code instead of C extensions"), |
|
515 | ('pure', None, "use pure (slow) Python code instead of C extensions"), | |
516 | ('rust', None, "use Rust extensions additionally to C extensions"), |
|
516 | ('rust', None, "use Rust extensions additionally to C extensions"), | |
517 | ( |
|
517 | ( | |
518 | 'no-rust', |
|
518 | 'no-rust', | |
519 | None, |
|
519 | None, | |
520 | "do not use Rust extensions additionally to C extensions", |
|
520 | "do not use Rust extensions additionally to C extensions", | |
521 | ), |
|
521 | ), | |
522 | ] |
|
522 | ] | |
523 |
|
523 | |||
524 | negative_opt = Distribution.negative_opt.copy() |
|
524 | negative_opt = Distribution.negative_opt.copy() | |
525 | boolean_options = ['pure', 'rust', 'no-rust'] |
|
525 | boolean_options = ['pure', 'rust', 'no-rust'] | |
526 | negative_opt['no-rust'] = 'rust' |
|
526 | negative_opt['no-rust'] = 'rust' | |
527 |
|
527 | |||
528 | def _set_command_options(self, command_obj, option_dict=None): |
|
528 | def _set_command_options(self, command_obj, option_dict=None): | |
529 | # Not all distutils versions in the wild have boolean_options. |
|
529 | # Not all distutils versions in the wild have boolean_options. | |
530 | # This should be cleaned up when we're Python 3 only. |
|
530 | # This should be cleaned up when we're Python 3 only. | |
531 | command_obj.boolean_options = ( |
|
531 | command_obj.boolean_options = ( | |
532 | getattr(command_obj, 'boolean_options', []) + self.boolean_options |
|
532 | getattr(command_obj, 'boolean_options', []) + self.boolean_options | |
533 | ) |
|
533 | ) | |
534 | return Distribution._set_command_options( |
|
534 | return Distribution._set_command_options( | |
535 | self, command_obj, option_dict=option_dict |
|
535 | self, command_obj, option_dict=option_dict | |
536 | ) |
|
536 | ) | |
537 |
|
537 | |||
538 | def parse_command_line(self): |
|
538 | def parse_command_line(self): | |
539 | ret = Distribution.parse_command_line(self) |
|
539 | ret = Distribution.parse_command_line(self) | |
540 | if not (self.rust or self.no_rust): |
|
540 | if not (self.rust or self.no_rust): | |
541 | hgrustext = os.environ.get('HGWITHRUSTEXT') |
|
541 | hgrustext = os.environ.get('HGWITHRUSTEXT') | |
542 | # TODO record it for proper rebuild upon changes |
|
542 | # TODO record it for proper rebuild upon changes | |
543 | # (see mercurial/__modulepolicy__.py) |
|
543 | # (see mercurial/__modulepolicy__.py) | |
544 | if hgrustext != 'cpython' and hgrustext is not None: |
|
544 | if hgrustext != 'cpython' and hgrustext is not None: | |
545 | if hgrustext: |
|
545 | if hgrustext: | |
546 | msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext |
|
546 | msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext | |
547 | printf(msg, file=sys.stderr) |
|
547 | printf(msg, file=sys.stderr) | |
548 | hgrustext = None |
|
548 | hgrustext = None | |
549 | self.rust = hgrustext is not None |
|
549 | self.rust = hgrustext is not None | |
550 | self.no_rust = not self.rust |
|
550 | self.no_rust = not self.rust | |
551 | return ret |
|
551 | return ret | |
552 |
|
552 | |||
553 | def has_ext_modules(self): |
|
553 | def has_ext_modules(self): | |
554 | # self.ext_modules is emptied in hgbuildpy.finalize_options which is |
|
554 | # self.ext_modules is emptied in hgbuildpy.finalize_options which is | |
555 | # too late for some cases |
|
555 | # too late for some cases | |
556 | return not self.pure and Distribution.has_ext_modules(self) |
|
556 | return not self.pure and Distribution.has_ext_modules(self) | |
557 |
|
557 | |||
558 |
|
558 | |||
559 | # This is ugly as a one-liner. So use a variable. |
|
559 | # This is ugly as a one-liner. So use a variable. | |
560 | buildextnegops = dict(getattr(build_ext, 'negative_options', {})) |
|
560 | buildextnegops = dict(getattr(build_ext, 'negative_options', {})) | |
561 | buildextnegops['no-zstd'] = 'zstd' |
|
561 | buildextnegops['no-zstd'] = 'zstd' | |
562 | buildextnegops['no-rust'] = 'rust' |
|
562 | buildextnegops['no-rust'] = 'rust' | |
563 |
|
563 | |||
564 |
|
564 | |||
565 | class hgbuildext(build_ext): |
|
565 | class hgbuildext(build_ext): | |
566 | user_options = build_ext.user_options + [ |
|
566 | user_options = build_ext.user_options + [ | |
567 | ('zstd', None, 'compile zstd bindings [default]'), |
|
567 | ('zstd', None, 'compile zstd bindings [default]'), | |
568 | ('no-zstd', None, 'do not compile zstd bindings'), |
|
568 | ('no-zstd', None, 'do not compile zstd bindings'), | |
569 | ( |
|
569 | ( | |
570 | 'rust', |
|
570 | 'rust', | |
571 | None, |
|
571 | None, | |
572 | 'compile Rust extensions if they are in use ' |
|
572 | 'compile Rust extensions if they are in use ' | |
573 | '(requires Cargo) [default]', |
|
573 | '(requires Cargo) [default]', | |
574 | ), |
|
574 | ), | |
575 | ('no-rust', None, 'do not compile Rust extensions'), |
|
575 | ('no-rust', None, 'do not compile Rust extensions'), | |
576 | ] |
|
576 | ] | |
577 |
|
577 | |||
578 | boolean_options = build_ext.boolean_options + ['zstd', 'rust'] |
|
578 | boolean_options = build_ext.boolean_options + ['zstd', 'rust'] | |
579 | negative_opt = buildextnegops |
|
579 | negative_opt = buildextnegops | |
580 |
|
580 | |||
581 | def initialize_options(self): |
|
581 | def initialize_options(self): | |
582 | self.zstd = True |
|
582 | self.zstd = True | |
583 | self.rust = True |
|
583 | self.rust = True | |
584 |
|
584 | |||
585 | return build_ext.initialize_options(self) |
|
585 | return build_ext.initialize_options(self) | |
586 |
|
586 | |||
587 | def finalize_options(self): |
|
587 | def finalize_options(self): | |
588 | # Unless overridden by the end user, build extensions in parallel. |
|
588 | # Unless overridden by the end user, build extensions in parallel. | |
589 | # Only influences behavior on Python 3.5+. |
|
589 | # Only influences behavior on Python 3.5+. | |
590 | if getattr(self, 'parallel', None) is None: |
|
590 | if getattr(self, 'parallel', None) is None: | |
591 | self.parallel = True |
|
591 | self.parallel = True | |
592 |
|
592 | |||
593 | return build_ext.finalize_options(self) |
|
593 | return build_ext.finalize_options(self) | |
594 |
|
594 | |||
595 | def build_extensions(self): |
|
595 | def build_extensions(self): | |
596 | ruststandalones = [ |
|
596 | ruststandalones = [ | |
597 | e for e in self.extensions if isinstance(e, RustStandaloneExtension) |
|
597 | e for e in self.extensions if isinstance(e, RustStandaloneExtension) | |
598 | ] |
|
598 | ] | |
599 | self.extensions = [ |
|
599 | self.extensions = [ | |
600 | e for e in self.extensions if e not in ruststandalones |
|
600 | e for e in self.extensions if e not in ruststandalones | |
601 | ] |
|
601 | ] | |
602 | # Filter out zstd if disabled via argument. |
|
602 | # Filter out zstd if disabled via argument. | |
603 | if not self.zstd: |
|
603 | if not self.zstd: | |
604 | self.extensions = [ |
|
604 | self.extensions = [ | |
605 | e for e in self.extensions if e.name != 'mercurial.zstd' |
|
605 | e for e in self.extensions if e.name != 'mercurial.zstd' | |
606 | ] |
|
606 | ] | |
607 |
|
607 | |||
608 | # Build Rust standalon extensions if it'll be used |
|
608 | # Build Rust standalon extensions if it'll be used | |
609 | # and its build is not explictely disabled (for external build |
|
609 | # and its build is not explictely disabled (for external build | |
610 | # as Linux distributions would do) |
|
610 | # as Linux distributions would do) | |
611 | if self.distribution.rust and self.rust: |
|
611 | if self.distribution.rust and self.rust: | |
612 | for rustext in ruststandalones: |
|
612 | for rustext in ruststandalones: | |
613 | rustext.build('' if self.inplace else self.build_lib) |
|
613 | rustext.build('' if self.inplace else self.build_lib) | |
614 |
|
614 | |||
615 | return build_ext.build_extensions(self) |
|
615 | return build_ext.build_extensions(self) | |
616 |
|
616 | |||
617 | def build_extension(self, ext): |
|
617 | def build_extension(self, ext): | |
618 | if ( |
|
618 | if ( | |
619 | self.distribution.rust |
|
619 | self.distribution.rust | |
620 | and self.rust |
|
620 | and self.rust | |
621 | and isinstance(ext, RustExtension) |
|
621 | and isinstance(ext, RustExtension) | |
622 | ): |
|
622 | ): | |
623 | ext.rustbuild() |
|
623 | ext.rustbuild() | |
624 | try: |
|
624 | try: | |
625 | build_ext.build_extension(self, ext) |
|
625 | build_ext.build_extension(self, ext) | |
626 | except CCompilerError: |
|
626 | except CCompilerError: | |
627 | if not getattr(ext, 'optional', False): |
|
627 | if not getattr(ext, 'optional', False): | |
628 | raise |
|
628 | raise | |
629 | log.warn( |
|
629 | log.warn( | |
630 | "Failed to build optional extension '%s' (skipping)", ext.name |
|
630 | "Failed to build optional extension '%s' (skipping)", ext.name | |
631 | ) |
|
631 | ) | |
632 |
|
632 | |||
633 |
|
633 | |||
634 | class hgbuildscripts(build_scripts): |
|
634 | class hgbuildscripts(build_scripts): | |
635 | def run(self): |
|
635 | def run(self): | |
636 | if os.name != 'nt' or self.distribution.pure: |
|
636 | if os.name != 'nt' or self.distribution.pure: | |
637 | return build_scripts.run(self) |
|
637 | return build_scripts.run(self) | |
638 |
|
638 | |||
639 | exebuilt = False |
|
639 | exebuilt = False | |
640 | try: |
|
640 | try: | |
641 | self.run_command('build_hgexe') |
|
641 | self.run_command('build_hgexe') | |
642 | exebuilt = True |
|
642 | exebuilt = True | |
643 | except (DistutilsError, CCompilerError): |
|
643 | except (DistutilsError, CCompilerError): | |
644 | log.warn('failed to build optional hg.exe') |
|
644 | log.warn('failed to build optional hg.exe') | |
645 |
|
645 | |||
646 | if exebuilt: |
|
646 | if exebuilt: | |
647 | # Copying hg.exe to the scripts build directory ensures it is |
|
647 | # Copying hg.exe to the scripts build directory ensures it is | |
648 | # installed by the install_scripts command. |
|
648 | # installed by the install_scripts command. | |
649 | hgexecommand = self.get_finalized_command('build_hgexe') |
|
649 | hgexecommand = self.get_finalized_command('build_hgexe') | |
650 | dest = os.path.join(self.build_dir, 'hg.exe') |
|
650 | dest = os.path.join(self.build_dir, 'hg.exe') | |
651 | self.mkpath(self.build_dir) |
|
651 | self.mkpath(self.build_dir) | |
652 | self.copy_file(hgexecommand.hgexepath, dest) |
|
652 | self.copy_file(hgexecommand.hgexepath, dest) | |
653 |
|
653 | |||
654 | # Remove hg.bat because it is redundant with hg.exe. |
|
654 | # Remove hg.bat because it is redundant with hg.exe. | |
655 | self.scripts.remove('contrib/win32/hg.bat') |
|
655 | self.scripts.remove('contrib/win32/hg.bat') | |
656 |
|
656 | |||
657 | return build_scripts.run(self) |
|
657 | return build_scripts.run(self) | |
658 |
|
658 | |||
659 |
|
659 | |||
660 | class hgbuildpy(build_py): |
|
660 | class hgbuildpy(build_py): | |
661 | def finalize_options(self): |
|
661 | def finalize_options(self): | |
662 | build_py.finalize_options(self) |
|
662 | build_py.finalize_options(self) | |
663 |
|
663 | |||
664 | if self.distribution.pure: |
|
664 | if self.distribution.pure: | |
665 | self.distribution.ext_modules = [] |
|
665 | self.distribution.ext_modules = [] | |
666 | elif self.distribution.cffi: |
|
666 | elif self.distribution.cffi: | |
667 | from mercurial.cffi import ( |
|
667 | from mercurial.cffi import ( | |
668 | bdiffbuild, |
|
668 | bdiffbuild, | |
669 | mpatchbuild, |
|
669 | mpatchbuild, | |
670 | ) |
|
670 | ) | |
671 |
|
671 | |||
672 | exts = [ |
|
672 | exts = [ | |
673 | mpatchbuild.ffi.distutils_extension(), |
|
673 | mpatchbuild.ffi.distutils_extension(), | |
674 | bdiffbuild.ffi.distutils_extension(), |
|
674 | bdiffbuild.ffi.distutils_extension(), | |
675 | ] |
|
675 | ] | |
676 | # cffi modules go here |
|
676 | # cffi modules go here | |
677 | if sys.platform == 'darwin': |
|
677 | if sys.platform == 'darwin': | |
678 | from mercurial.cffi import osutilbuild |
|
678 | from mercurial.cffi import osutilbuild | |
679 |
|
679 | |||
680 | exts.append(osutilbuild.ffi.distutils_extension()) |
|
680 | exts.append(osutilbuild.ffi.distutils_extension()) | |
681 | self.distribution.ext_modules = exts |
|
681 | self.distribution.ext_modules = exts | |
682 | else: |
|
682 | else: | |
683 | h = os.path.join(get_python_inc(), 'Python.h') |
|
683 | h = os.path.join(get_python_inc(), 'Python.h') | |
684 | if not os.path.exists(h): |
|
684 | if not os.path.exists(h): | |
685 | raise SystemExit( |
|
685 | raise SystemExit( | |
686 | 'Python headers are required to build ' |
|
686 | 'Python headers are required to build ' | |
687 | 'Mercurial but weren\'t found in %s' % h |
|
687 | 'Mercurial but weren\'t found in %s' % h | |
688 | ) |
|
688 | ) | |
689 |
|
689 | |||
690 | def run(self): |
|
690 | def run(self): | |
691 | basepath = os.path.join(self.build_lib, 'mercurial') |
|
691 | basepath = os.path.join(self.build_lib, 'mercurial') | |
692 | self.mkpath(basepath) |
|
692 | self.mkpath(basepath) | |
693 |
|
693 | |||
694 | rust = self.distribution.rust |
|
694 | rust = self.distribution.rust | |
695 | if self.distribution.pure: |
|
695 | if self.distribution.pure: | |
696 | modulepolicy = 'py' |
|
696 | modulepolicy = 'py' | |
697 | elif self.build_lib == '.': |
|
697 | elif self.build_lib == '.': | |
698 | # in-place build should run without rebuilding and Rust extensions |
|
698 | # in-place build should run without rebuilding and Rust extensions | |
699 | modulepolicy = 'rust+c-allow' if rust else 'allow' |
|
699 | modulepolicy = 'rust+c-allow' if rust else 'allow' | |
700 | else: |
|
700 | else: | |
701 | modulepolicy = 'rust+c' if rust else 'c' |
|
701 | modulepolicy = 'rust+c' if rust else 'c' | |
702 |
|
702 | |||
703 | content = b''.join( |
|
703 | content = b''.join( | |
704 | [ |
|
704 | [ | |
705 | b'# this file is autogenerated by setup.py\n', |
|
705 | b'# this file is autogenerated by setup.py\n', | |
706 | b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), |
|
706 | b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), | |
707 | ] |
|
707 | ] | |
708 | ) |
|
708 | ) | |
709 | write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content) |
|
709 | write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content) | |
710 |
|
710 | |||
711 | build_py.run(self) |
|
711 | build_py.run(self) | |
712 |
|
712 | |||
713 |
|
713 | |||
714 | class buildhgextindex(Command): |
|
714 | class buildhgextindex(Command): | |
715 | description = 'generate prebuilt index of hgext (for frozen package)' |
|
715 | description = 'generate prebuilt index of hgext (for frozen package)' | |
716 | user_options = [] |
|
716 | user_options = [] | |
717 | _indexfilename = 'hgext/__index__.py' |
|
717 | _indexfilename = 'hgext/__index__.py' | |
718 |
|
718 | |||
719 | def initialize_options(self): |
|
719 | def initialize_options(self): | |
720 | pass |
|
720 | pass | |
721 |
|
721 | |||
722 | def finalize_options(self): |
|
722 | def finalize_options(self): | |
723 | pass |
|
723 | pass | |
724 |
|
724 | |||
725 | def run(self): |
|
725 | def run(self): | |
726 | if os.path.exists(self._indexfilename): |
|
726 | if os.path.exists(self._indexfilename): | |
727 | with open(self._indexfilename, 'w') as f: |
|
727 | with open(self._indexfilename, 'w') as f: | |
728 | f.write('# empty\n') |
|
728 | f.write('# empty\n') | |
729 |
|
729 | |||
730 | # here no extension enabled, disabled() lists up everything |
|
730 | # here no extension enabled, disabled() lists up everything | |
731 | code = ( |
|
731 | code = ( | |
732 | 'import pprint; from mercurial import extensions; ' |
|
732 | 'import pprint; from mercurial import extensions; ' | |
733 | 'ext = extensions.disabled();' |
|
733 | 'ext = extensions.disabled();' | |
734 | 'ext.pop("__index__", None);' |
|
734 | 'ext.pop("__index__", None);' | |
735 | 'pprint.pprint(ext)' |
|
735 | 'pprint.pprint(ext)' | |
736 | ) |
|
736 | ) | |
737 | returncode, out, err = runcmd( |
|
737 | returncode, out, err = runcmd( | |
738 | [sys.executable, '-c', code], localhgenv() |
|
738 | [sys.executable, '-c', code], localhgenv() | |
739 | ) |
|
739 | ) | |
740 | if err or returncode != 0: |
|
740 | if err or returncode != 0: | |
741 | raise DistutilsExecError(err) |
|
741 | raise DistutilsExecError(err) | |
742 |
|
742 | |||
743 | with open(self._indexfilename, 'wb') as f: |
|
743 | with open(self._indexfilename, 'wb') as f: | |
744 | f.write(b'# this file is autogenerated by setup.py\n') |
|
744 | f.write(b'# this file is autogenerated by setup.py\n') | |
745 | f.write(b'docs = ') |
|
745 | f.write(b'docs = ') | |
746 | f.write(out) |
|
746 | f.write(out) | |
747 |
|
747 | |||
748 |
|
748 | |||
749 | class buildhgexe(build_ext): |
|
749 | class buildhgexe(build_ext): | |
750 | description = 'compile hg.exe from mercurial/exewrapper.c' |
|
750 | description = 'compile hg.exe from mercurial/exewrapper.c' | |
751 | user_options = build_ext.user_options + [ |
|
751 | user_options = build_ext.user_options + [ | |
752 | ( |
|
752 | ( | |
753 | 'long-paths-support', |
|
753 | 'long-paths-support', | |
754 | None, |
|
754 | None, | |
755 | 'enable support for long paths on ' |
|
755 | 'enable support for long paths on ' | |
756 | 'Windows (off by default and ' |
|
756 | 'Windows (off by default and ' | |
757 | 'experimental)', |
|
757 | 'experimental)', | |
758 | ), |
|
758 | ), | |
759 | ] |
|
759 | ] | |
760 |
|
760 | |||
761 | LONG_PATHS_MANIFEST = """ |
|
761 | LONG_PATHS_MANIFEST = """ | |
762 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> |
|
762 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> | |
763 | <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> |
|
763 | <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> | |
764 | <application> |
|
764 | <application> | |
765 | <windowsSettings |
|
765 | <windowsSettings | |
766 | xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> |
|
766 | xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> | |
767 | <ws2:longPathAware>true</ws2:longPathAware> |
|
767 | <ws2:longPathAware>true</ws2:longPathAware> | |
768 | </windowsSettings> |
|
768 | </windowsSettings> | |
769 | </application> |
|
769 | </application> | |
770 | </assembly>""" |
|
770 | </assembly>""" | |
771 |
|
771 | |||
772 | def initialize_options(self): |
|
772 | def initialize_options(self): | |
773 | build_ext.initialize_options(self) |
|
773 | build_ext.initialize_options(self) | |
774 | self.long_paths_support = False |
|
774 | self.long_paths_support = False | |
775 |
|
775 | |||
776 | def build_extensions(self): |
|
776 | def build_extensions(self): | |
777 | if os.name != 'nt': |
|
777 | if os.name != 'nt': | |
778 | return |
|
778 | return | |
779 | if isinstance(self.compiler, HackedMingw32CCompiler): |
|
779 | if isinstance(self.compiler, HackedMingw32CCompiler): | |
780 | self.compiler.compiler_so = self.compiler.compiler # no -mdll |
|
780 | self.compiler.compiler_so = self.compiler.compiler # no -mdll | |
781 | self.compiler.dll_libraries = [] # no -lmsrvc90 |
|
781 | self.compiler.dll_libraries = [] # no -lmsrvc90 | |
782 |
|
782 | |||
783 | pythonlib = None |
|
783 | pythonlib = None | |
784 |
|
784 | |||
785 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
785 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) | |
786 | self.hgtarget = os.path.join(dir, 'hg') |
|
786 | self.hgtarget = os.path.join(dir, 'hg') | |
787 |
|
787 | |||
788 | if getattr(sys, 'dllhandle', None): |
|
788 | if getattr(sys, 'dllhandle', None): | |
789 | # Different Python installs can have different Python library |
|
789 | # Different Python installs can have different Python library | |
790 | # names. e.g. the official CPython distribution uses pythonXY.dll |
|
790 | # names. e.g. the official CPython distribution uses pythonXY.dll | |
791 | # and MinGW uses libpythonX.Y.dll. |
|
791 | # and MinGW uses libpythonX.Y.dll. | |
792 | _kernel32 = ctypes.windll.kernel32 |
|
792 | _kernel32 = ctypes.windll.kernel32 | |
793 | _kernel32.GetModuleFileNameA.argtypes = [ |
|
793 | _kernel32.GetModuleFileNameA.argtypes = [ | |
794 | ctypes.c_void_p, |
|
794 | ctypes.c_void_p, | |
795 | ctypes.c_void_p, |
|
795 | ctypes.c_void_p, | |
796 | ctypes.c_ulong, |
|
796 | ctypes.c_ulong, | |
797 | ] |
|
797 | ] | |
798 | _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong |
|
798 | _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong | |
799 | size = 1000 |
|
799 | size = 1000 | |
800 | buf = ctypes.create_string_buffer(size + 1) |
|
800 | buf = ctypes.create_string_buffer(size + 1) | |
801 | filelen = _kernel32.GetModuleFileNameA( |
|
801 | filelen = _kernel32.GetModuleFileNameA( | |
802 | sys.dllhandle, ctypes.byref(buf), size |
|
802 | sys.dllhandle, ctypes.byref(buf), size | |
803 | ) |
|
803 | ) | |
804 |
|
804 | |||
805 | if filelen > 0 and filelen != size: |
|
805 | if filelen > 0 and filelen != size: | |
806 | dllbasename = os.path.basename(buf.value) |
|
806 | dllbasename = os.path.basename(buf.value) | |
807 | if not dllbasename.lower().endswith(b'.dll'): |
|
807 | if not dllbasename.lower().endswith(b'.dll'): | |
808 | raise SystemExit( |
|
808 | raise SystemExit( | |
809 | 'Python DLL does not end with .dll: %s' % dllbasename |
|
809 | 'Python DLL does not end with .dll: %s' % dllbasename | |
810 | ) |
|
810 | ) | |
811 | pythonlib = dllbasename[:-4] |
|
811 | pythonlib = dllbasename[:-4] | |
812 |
|
812 | |||
813 | # Copy the pythonXY.dll next to the binary so that it runs |
|
813 | # Copy the pythonXY.dll next to the binary so that it runs | |
814 | # without tampering with PATH. |
|
814 | # without tampering with PATH. | |
815 | fsdecode = lambda x: x |
|
815 | fsdecode = lambda x: x | |
816 | if sys.version_info[0] >= 3: |
|
816 | if sys.version_info[0] >= 3: | |
817 | fsdecode = os.fsdecode |
|
817 | fsdecode = os.fsdecode | |
818 | dest = os.path.join( |
|
818 | dest = os.path.join( | |
819 | os.path.dirname(self.hgtarget), |
|
819 | os.path.dirname(self.hgtarget), | |
820 | fsdecode(dllbasename), |
|
820 | fsdecode(dllbasename), | |
821 | ) |
|
821 | ) | |
822 |
|
822 | |||
823 | if not os.path.exists(dest): |
|
823 | if not os.path.exists(dest): | |
824 | shutil.copy(buf.value, dest) |
|
824 | shutil.copy(buf.value, dest) | |
825 |
|
825 | |||
826 | if not pythonlib: |
|
826 | if not pythonlib: | |
827 | log.warn( |
|
827 | log.warn( | |
828 | 'could not determine Python DLL filename; assuming pythonXY' |
|
828 | 'could not determine Python DLL filename; assuming pythonXY' | |
829 | ) |
|
829 | ) | |
830 |
|
830 | |||
831 | hv = sys.hexversion |
|
831 | hv = sys.hexversion | |
832 | pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF) |
|
832 | pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF) | |
833 |
|
833 | |||
834 | log.info('using %s as Python library name' % pythonlib) |
|
834 | log.info('using %s as Python library name' % pythonlib) | |
835 | with open('mercurial/hgpythonlib.h', 'wb') as f: |
|
835 | with open('mercurial/hgpythonlib.h', 'wb') as f: | |
836 | f.write(b'/* this file is autogenerated by setup.py */\n') |
|
836 | f.write(b'/* this file is autogenerated by setup.py */\n') | |
837 | f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib) |
|
837 | f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib) | |
838 |
|
838 | |||
839 | macros = None |
|
839 | macros = None | |
840 | if sys.version_info[0] >= 3: |
|
840 | if sys.version_info[0] >= 3: | |
841 | macros = [('_UNICODE', None), ('UNICODE', None)] |
|
841 | macros = [('_UNICODE', None), ('UNICODE', None)] | |
842 |
|
842 | |||
843 | objects = self.compiler.compile( |
|
843 | objects = self.compiler.compile( | |
844 | ['mercurial/exewrapper.c'], |
|
844 | ['mercurial/exewrapper.c'], | |
845 | output_dir=self.build_temp, |
|
845 | output_dir=self.build_temp, | |
846 | macros=macros, |
|
846 | macros=macros, | |
847 | ) |
|
847 | ) | |
848 | self.compiler.link_executable( |
|
848 | self.compiler.link_executable( | |
849 | objects, self.hgtarget, libraries=[], output_dir=self.build_temp |
|
849 | objects, self.hgtarget, libraries=[], output_dir=self.build_temp | |
850 | ) |
|
850 | ) | |
851 | if self.long_paths_support: |
|
851 | if self.long_paths_support: | |
852 | self.addlongpathsmanifest() |
|
852 | self.addlongpathsmanifest() | |
853 |
|
853 | |||
854 | def addlongpathsmanifest(self): |
|
854 | def addlongpathsmanifest(self): | |
855 | r"""Add manifest pieces so that hg.exe understands long paths |
|
855 | r"""Add manifest pieces so that hg.exe understands long paths | |
856 |
|
856 | |||
857 | This is an EXPERIMENTAL feature, use with care. |
|
857 | This is an EXPERIMENTAL feature, use with care. | |
858 | To enable long paths support, one needs to do two things: |
|
858 | To enable long paths support, one needs to do two things: | |
859 | - build Mercurial with --long-paths-support option |
|
859 | - build Mercurial with --long-paths-support option | |
860 | - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\ |
|
860 | - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\ | |
861 | LongPathsEnabled to have value 1. |
|
861 | LongPathsEnabled to have value 1. | |
862 |
|
862 | |||
863 | Please ignore 'warning 81010002: Unrecognized Element "longPathAware"'; |
|
863 | Please ignore 'warning 81010002: Unrecognized Element "longPathAware"'; | |
864 | it happens because Mercurial uses mt.exe circa 2008, which is not |
|
864 | it happens because Mercurial uses mt.exe circa 2008, which is not | |
865 | yet aware of long paths support in the manifest (I think so at least). |
|
865 | yet aware of long paths support in the manifest (I think so at least). | |
866 | This does not stop mt.exe from embedding/merging the XML properly. |
|
866 | This does not stop mt.exe from embedding/merging the XML properly. | |
867 |
|
867 | |||
868 | Why resource #1 should be used for .exe manifests? I don't know and |
|
868 | Why resource #1 should be used for .exe manifests? I don't know and | |
869 | wasn't able to find an explanation for mortals. But it seems to work. |
|
869 | wasn't able to find an explanation for mortals. But it seems to work. | |
870 | """ |
|
870 | """ | |
871 | exefname = self.compiler.executable_filename(self.hgtarget) |
|
871 | exefname = self.compiler.executable_filename(self.hgtarget) | |
872 | fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest') |
|
872 | fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest') | |
873 | os.close(fdauto) |
|
873 | os.close(fdauto) | |
874 | with open(manfname, 'w') as f: |
|
874 | with open(manfname, 'w') as f: | |
875 | f.write(self.LONG_PATHS_MANIFEST) |
|
875 | f.write(self.LONG_PATHS_MANIFEST) | |
876 | log.info("long paths manifest is written to '%s'" % manfname) |
|
876 | log.info("long paths manifest is written to '%s'" % manfname) | |
877 | inputresource = '-inputresource:%s;#1' % exefname |
|
877 | inputresource = '-inputresource:%s;#1' % exefname | |
878 | outputresource = '-outputresource:%s;#1' % exefname |
|
878 | outputresource = '-outputresource:%s;#1' % exefname | |
879 | log.info("running mt.exe to update hg.exe's manifest in-place") |
|
879 | log.info("running mt.exe to update hg.exe's manifest in-place") | |
880 | # supplying both -manifest and -inputresource to mt.exe makes |
|
880 | # supplying both -manifest and -inputresource to mt.exe makes | |
881 | # it merge the embedded and supplied manifests in the -outputresource |
|
881 | # it merge the embedded and supplied manifests in the -outputresource | |
882 | self.spawn( |
|
882 | self.spawn( | |
883 | [ |
|
883 | [ | |
884 | 'mt.exe', |
|
884 | 'mt.exe', | |
885 | '-nologo', |
|
885 | '-nologo', | |
886 | '-manifest', |
|
886 | '-manifest', | |
887 | manfname, |
|
887 | manfname, | |
888 | inputresource, |
|
888 | inputresource, | |
889 | outputresource, |
|
889 | outputresource, | |
890 | ] |
|
890 | ] | |
891 | ) |
|
891 | ) | |
892 | log.info("done updating hg.exe's manifest") |
|
892 | log.info("done updating hg.exe's manifest") | |
893 | os.remove(manfname) |
|
893 | os.remove(manfname) | |
894 |
|
894 | |||
895 | @property |
|
895 | @property | |
896 | def hgexepath(self): |
|
896 | def hgexepath(self): | |
897 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
897 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) | |
898 | return os.path.join(self.build_temp, dir, 'hg.exe') |
|
898 | return os.path.join(self.build_temp, dir, 'hg.exe') | |
899 |
|
899 | |||
900 |
|
900 | |||
901 | class hgbuilddoc(Command): |
|
901 | class hgbuilddoc(Command): | |
902 | description = 'build documentation' |
|
902 | description = 'build documentation' | |
903 | user_options = [ |
|
903 | user_options = [ | |
904 | ('man', None, 'generate man pages'), |
|
904 | ('man', None, 'generate man pages'), | |
905 | ('html', None, 'generate html pages'), |
|
905 | ('html', None, 'generate html pages'), | |
906 | ] |
|
906 | ] | |
907 |
|
907 | |||
908 | def initialize_options(self): |
|
908 | def initialize_options(self): | |
909 | self.man = None |
|
909 | self.man = None | |
910 | self.html = None |
|
910 | self.html = None | |
911 |
|
911 | |||
912 | def finalize_options(self): |
|
912 | def finalize_options(self): | |
913 | # If --man or --html are set, only generate what we're told to. |
|
913 | # If --man or --html are set, only generate what we're told to. | |
914 | # Otherwise generate everything. |
|
914 | # Otherwise generate everything. | |
915 | have_subset = self.man is not None or self.html is not None |
|
915 | have_subset = self.man is not None or self.html is not None | |
916 |
|
916 | |||
917 | if have_subset: |
|
917 | if have_subset: | |
918 | self.man = True if self.man else False |
|
918 | self.man = True if self.man else False | |
919 | self.html = True if self.html else False |
|
919 | self.html = True if self.html else False | |
920 | else: |
|
920 | else: | |
921 | self.man = True |
|
921 | self.man = True | |
922 | self.html = True |
|
922 | self.html = True | |
923 |
|
923 | |||
924 | def run(self): |
|
924 | def run(self): | |
925 | def normalizecrlf(p): |
|
925 | def normalizecrlf(p): | |
926 | with open(p, 'rb') as fh: |
|
926 | with open(p, 'rb') as fh: | |
927 | orig = fh.read() |
|
927 | orig = fh.read() | |
928 |
|
928 | |||
929 | if b'\r\n' not in orig: |
|
929 | if b'\r\n' not in orig: | |
930 | return |
|
930 | return | |
931 |
|
931 | |||
932 | log.info('normalizing %s to LF line endings' % p) |
|
932 | log.info('normalizing %s to LF line endings' % p) | |
933 | with open(p, 'wb') as fh: |
|
933 | with open(p, 'wb') as fh: | |
934 | fh.write(orig.replace(b'\r\n', b'\n')) |
|
934 | fh.write(orig.replace(b'\r\n', b'\n')) | |
935 |
|
935 | |||
936 | def gentxt(root): |
|
936 | def gentxt(root): | |
937 | txt = 'doc/%s.txt' % root |
|
937 | txt = 'doc/%s.txt' % root | |
938 | log.info('generating %s' % txt) |
|
938 | log.info('generating %s' % txt) | |
939 | res, out, err = runcmd( |
|
939 | res, out, err = runcmd( | |
940 | [sys.executable, 'gendoc.py', root], os.environ, cwd='doc' |
|
940 | [sys.executable, 'gendoc.py', root], os.environ, cwd='doc' | |
941 | ) |
|
941 | ) | |
942 | if res: |
|
942 | if res: | |
943 | raise SystemExit( |
|
943 | raise SystemExit( | |
944 | 'error running gendoc.py: %s' |
|
944 | 'error running gendoc.py: %s' | |
945 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
945 | % '\n'.join([sysstr(out), sysstr(err)]) | |
946 | ) |
|
946 | ) | |
947 |
|
947 | |||
948 | with open(txt, 'wb') as fh: |
|
948 | with open(txt, 'wb') as fh: | |
949 | fh.write(out) |
|
949 | fh.write(out) | |
950 |
|
950 | |||
951 | def gengendoc(root): |
|
951 | def gengendoc(root): | |
952 | gendoc = 'doc/%s.gendoc.txt' % root |
|
952 | gendoc = 'doc/%s.gendoc.txt' % root | |
953 |
|
953 | |||
954 | log.info('generating %s' % gendoc) |
|
954 | log.info('generating %s' % gendoc) | |
955 | res, out, err = runcmd( |
|
955 | res, out, err = runcmd( | |
956 | [sys.executable, 'gendoc.py', '%s.gendoc' % root], |
|
956 | [sys.executable, 'gendoc.py', '%s.gendoc' % root], | |
957 | os.environ, |
|
957 | os.environ, | |
958 | cwd='doc', |
|
958 | cwd='doc', | |
959 | ) |
|
959 | ) | |
960 | if res: |
|
960 | if res: | |
961 | raise SystemExit( |
|
961 | raise SystemExit( | |
962 | 'error running gendoc: %s' |
|
962 | 'error running gendoc: %s' | |
963 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
963 | % '\n'.join([sysstr(out), sysstr(err)]) | |
964 | ) |
|
964 | ) | |
965 |
|
965 | |||
966 | with open(gendoc, 'wb') as fh: |
|
966 | with open(gendoc, 'wb') as fh: | |
967 | fh.write(out) |
|
967 | fh.write(out) | |
968 |
|
968 | |||
969 | def genman(root): |
|
969 | def genman(root): | |
970 | log.info('generating doc/%s' % root) |
|
970 | log.info('generating doc/%s' % root) | |
971 | res, out, err = runcmd( |
|
971 | res, out, err = runcmd( | |
972 | [ |
|
972 | [ | |
973 | sys.executable, |
|
973 | sys.executable, | |
974 | 'runrst', |
|
974 | 'runrst', | |
975 | 'hgmanpage', |
|
975 | 'hgmanpage', | |
976 | '--halt', |
|
976 | '--halt', | |
977 | 'warning', |
|
977 | 'warning', | |
978 | '--strip-elements-with-class', |
|
978 | '--strip-elements-with-class', | |
979 | 'htmlonly', |
|
979 | 'htmlonly', | |
980 | '%s.txt' % root, |
|
980 | '%s.txt' % root, | |
981 | root, |
|
981 | root, | |
982 | ], |
|
982 | ], | |
983 | os.environ, |
|
983 | os.environ, | |
984 | cwd='doc', |
|
984 | cwd='doc', | |
985 | ) |
|
985 | ) | |
986 | if res: |
|
986 | if res: | |
987 | raise SystemExit( |
|
987 | raise SystemExit( | |
988 | 'error running runrst: %s' |
|
988 | 'error running runrst: %s' | |
989 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
989 | % '\n'.join([sysstr(out), sysstr(err)]) | |
990 | ) |
|
990 | ) | |
991 |
|
991 | |||
992 | normalizecrlf('doc/%s' % root) |
|
992 | normalizecrlf('doc/%s' % root) | |
993 |
|
993 | |||
994 | def genhtml(root): |
|
994 | def genhtml(root): | |
995 | log.info('generating doc/%s.html' % root) |
|
995 | log.info('generating doc/%s.html' % root) | |
996 | res, out, err = runcmd( |
|
996 | res, out, err = runcmd( | |
997 | [ |
|
997 | [ | |
998 | sys.executable, |
|
998 | sys.executable, | |
999 | 'runrst', |
|
999 | 'runrst', | |
1000 | 'html', |
|
1000 | 'html', | |
1001 | '--halt', |
|
1001 | '--halt', | |
1002 | 'warning', |
|
1002 | 'warning', | |
1003 | '--link-stylesheet', |
|
1003 | '--link-stylesheet', | |
1004 | '--stylesheet-path', |
|
1004 | '--stylesheet-path', | |
1005 | 'style.css', |
|
1005 | 'style.css', | |
1006 | '%s.txt' % root, |
|
1006 | '%s.txt' % root, | |
1007 | '%s.html' % root, |
|
1007 | '%s.html' % root, | |
1008 | ], |
|
1008 | ], | |
1009 | os.environ, |
|
1009 | os.environ, | |
1010 | cwd='doc', |
|
1010 | cwd='doc', | |
1011 | ) |
|
1011 | ) | |
1012 | if res: |
|
1012 | if res: | |
1013 | raise SystemExit( |
|
1013 | raise SystemExit( | |
1014 | 'error running runrst: %s' |
|
1014 | 'error running runrst: %s' | |
1015 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
1015 | % '\n'.join([sysstr(out), sysstr(err)]) | |
1016 | ) |
|
1016 | ) | |
1017 |
|
1017 | |||
1018 | normalizecrlf('doc/%s.html' % root) |
|
1018 | normalizecrlf('doc/%s.html' % root) | |
1019 |
|
1019 | |||
1020 | # This logic is duplicated in doc/Makefile. |
|
1020 | # This logic is duplicated in doc/Makefile. | |
1021 | sources = { |
|
1021 | sources = { | |
1022 | f |
|
1022 | f | |
1023 | for f in os.listdir('mercurial/helptext') |
|
1023 | for f in os.listdir('mercurial/helptext') | |
1024 | if re.search(r'[0-9]\.txt$', f) |
|
1024 | if re.search(r'[0-9]\.txt$', f) | |
1025 | } |
|
1025 | } | |
1026 |
|
1026 | |||
1027 | # common.txt is a one-off. |
|
1027 | # common.txt is a one-off. | |
1028 | gentxt('common') |
|
1028 | gentxt('common') | |
1029 |
|
1029 | |||
1030 | for source in sorted(sources): |
|
1030 | for source in sorted(sources): | |
1031 | assert source[-4:] == '.txt' |
|
1031 | assert source[-4:] == '.txt' | |
1032 | root = source[:-4] |
|
1032 | root = source[:-4] | |
1033 |
|
1033 | |||
1034 | gentxt(root) |
|
1034 | gentxt(root) | |
1035 | gengendoc(root) |
|
1035 | gengendoc(root) | |
1036 |
|
1036 | |||
1037 | if self.man: |
|
1037 | if self.man: | |
1038 | genman(root) |
|
1038 | genman(root) | |
1039 | if self.html: |
|
1039 | if self.html: | |
1040 | genhtml(root) |
|
1040 | genhtml(root) | |
1041 |
|
1041 | |||
1042 |
|
1042 | |||
1043 | class hginstall(install): |
|
1043 | class hginstall(install): | |
1044 |
|
1044 | |||
1045 | user_options = install.user_options + [ |
|
1045 | user_options = install.user_options + [ | |
1046 | ( |
|
1046 | ( | |
1047 | 'old-and-unmanageable', |
|
1047 | 'old-and-unmanageable', | |
1048 | None, |
|
1048 | None, | |
1049 | 'noop, present for eggless setuptools compat', |
|
1049 | 'noop, present for eggless setuptools compat', | |
1050 | ), |
|
1050 | ), | |
1051 | ( |
|
1051 | ( | |
1052 | 'single-version-externally-managed', |
|
1052 | 'single-version-externally-managed', | |
1053 | None, |
|
1053 | None, | |
1054 | 'noop, present for eggless setuptools compat', |
|
1054 | 'noop, present for eggless setuptools compat', | |
1055 | ), |
|
1055 | ), | |
1056 | ] |
|
1056 | ] | |
1057 |
|
1057 | |||
1058 | # Also helps setuptools not be sad while we refuse to create eggs. |
|
1058 | # Also helps setuptools not be sad while we refuse to create eggs. | |
1059 | single_version_externally_managed = True |
|
1059 | single_version_externally_managed = True | |
1060 |
|
1060 | |||
1061 | def get_sub_commands(self): |
|
1061 | def get_sub_commands(self): | |
1062 | # Screen out egg related commands to prevent egg generation. But allow |
|
1062 | # Screen out egg related commands to prevent egg generation. But allow | |
1063 | # mercurial.egg-info generation, since that is part of modern |
|
1063 | # mercurial.egg-info generation, since that is part of modern | |
1064 | # packaging. |
|
1064 | # packaging. | |
1065 | excl = {'bdist_egg'} |
|
1065 | excl = {'bdist_egg'} | |
1066 | return filter(lambda x: x not in excl, install.get_sub_commands(self)) |
|
1066 | return filter(lambda x: x not in excl, install.get_sub_commands(self)) | |
1067 |
|
1067 | |||
1068 |
|
1068 | |||
1069 | class hginstalllib(install_lib): |
|
1069 | class hginstalllib(install_lib): | |
1070 | """ |
|
1070 | """ | |
1071 | This is a specialization of install_lib that replaces the copy_file used |
|
1071 | This is a specialization of install_lib that replaces the copy_file used | |
1072 | there so that it supports setting the mode of files after copying them, |
|
1072 | there so that it supports setting the mode of files after copying them, | |
1073 | instead of just preserving the mode that the files originally had. If your |
|
1073 | instead of just preserving the mode that the files originally had. If your | |
1074 | system has a umask of something like 027, preserving the permissions when |
|
1074 | system has a umask of something like 027, preserving the permissions when | |
1075 | copying will lead to a broken install. |
|
1075 | copying will lead to a broken install. | |
1076 |
|
1076 | |||
1077 | Note that just passing keep_permissions=False to copy_file would be |
|
1077 | Note that just passing keep_permissions=False to copy_file would be | |
1078 | insufficient, as it might still be applying a umask. |
|
1078 | insufficient, as it might still be applying a umask. | |
1079 | """ |
|
1079 | """ | |
1080 |
|
1080 | |||
1081 | def run(self): |
|
1081 | def run(self): | |
1082 | realcopyfile = file_util.copy_file |
|
1082 | realcopyfile = file_util.copy_file | |
1083 |
|
1083 | |||
1084 | def copyfileandsetmode(*args, **kwargs): |
|
1084 | def copyfileandsetmode(*args, **kwargs): | |
1085 | src, dst = args[0], args[1] |
|
1085 | src, dst = args[0], args[1] | |
1086 | dst, copied = realcopyfile(*args, **kwargs) |
|
1086 | dst, copied = realcopyfile(*args, **kwargs) | |
1087 | if copied: |
|
1087 | if copied: | |
1088 | st = os.stat(src) |
|
1088 | st = os.stat(src) | |
1089 | # Persist executable bit (apply it to group and other if user |
|
1089 | # Persist executable bit (apply it to group and other if user | |
1090 | # has it) |
|
1090 | # has it) | |
1091 | if st[stat.ST_MODE] & stat.S_IXUSR: |
|
1091 | if st[stat.ST_MODE] & stat.S_IXUSR: | |
1092 | setmode = int('0755', 8) |
|
1092 | setmode = int('0755', 8) | |
1093 | else: |
|
1093 | else: | |
1094 | setmode = int('0644', 8) |
|
1094 | setmode = int('0644', 8) | |
1095 | m = stat.S_IMODE(st[stat.ST_MODE]) |
|
1095 | m = stat.S_IMODE(st[stat.ST_MODE]) | |
1096 | m = (m & ~int('0777', 8)) | setmode |
|
1096 | m = (m & ~int('0777', 8)) | setmode | |
1097 | os.chmod(dst, m) |
|
1097 | os.chmod(dst, m) | |
1098 |
|
1098 | |||
1099 | file_util.copy_file = copyfileandsetmode |
|
1099 | file_util.copy_file = copyfileandsetmode | |
1100 | try: |
|
1100 | try: | |
1101 | install_lib.run(self) |
|
1101 | install_lib.run(self) | |
1102 | finally: |
|
1102 | finally: | |
1103 | file_util.copy_file = realcopyfile |
|
1103 | file_util.copy_file = realcopyfile | |
1104 |
|
1104 | |||
1105 |
|
1105 | |||
1106 | class hginstallscripts(install_scripts): |
|
1106 | class hginstallscripts(install_scripts): | |
1107 | """ |
|
1107 | """ | |
1108 | This is a specialization of install_scripts that replaces the @LIBDIR@ with |
|
1108 | This is a specialization of install_scripts that replaces the @LIBDIR@ with | |
1109 | the configured directory for modules. If possible, the path is made relative |
|
1109 | the configured directory for modules. If possible, the path is made relative | |
1110 | to the directory for scripts. |
|
1110 | to the directory for scripts. | |
1111 | """ |
|
1111 | """ | |
1112 |
|
1112 | |||
1113 | def initialize_options(self): |
|
1113 | def initialize_options(self): | |
1114 | install_scripts.initialize_options(self) |
|
1114 | install_scripts.initialize_options(self) | |
1115 |
|
1115 | |||
1116 | self.install_lib = None |
|
1116 | self.install_lib = None | |
1117 |
|
1117 | |||
1118 | def finalize_options(self): |
|
1118 | def finalize_options(self): | |
1119 | install_scripts.finalize_options(self) |
|
1119 | install_scripts.finalize_options(self) | |
1120 | self.set_undefined_options('install', ('install_lib', 'install_lib')) |
|
1120 | self.set_undefined_options('install', ('install_lib', 'install_lib')) | |
1121 |
|
1121 | |||
1122 | def run(self): |
|
1122 | def run(self): | |
1123 | install_scripts.run(self) |
|
1123 | install_scripts.run(self) | |
1124 |
|
1124 | |||
1125 | # It only makes sense to replace @LIBDIR@ with the install path if |
|
1125 | # It only makes sense to replace @LIBDIR@ with the install path if | |
1126 | # the install path is known. For wheels, the logic below calculates |
|
1126 | # the install path is known. For wheels, the logic below calculates | |
1127 | # the libdir to be "../..". This is because the internal layout of a |
|
1127 | # the libdir to be "../..". This is because the internal layout of a | |
1128 | # wheel archive looks like: |
|
1128 | # wheel archive looks like: | |
1129 | # |
|
1129 | # | |
1130 | # mercurial-3.6.1.data/scripts/hg |
|
1130 | # mercurial-3.6.1.data/scripts/hg | |
1131 | # mercurial/__init__.py |
|
1131 | # mercurial/__init__.py | |
1132 | # |
|
1132 | # | |
1133 | # When installing wheels, the subdirectories of the "<pkg>.data" |
|
1133 | # When installing wheels, the subdirectories of the "<pkg>.data" | |
1134 | # directory are translated to system local paths and files therein |
|
1134 | # directory are translated to system local paths and files therein | |
1135 | # are copied in place. The mercurial/* files are installed into the |
|
1135 | # are copied in place. The mercurial/* files are installed into the | |
1136 | # site-packages directory. However, the site-packages directory |
|
1136 | # site-packages directory. However, the site-packages directory | |
1137 | # isn't known until wheel install time. This means we have no clue |
|
1137 | # isn't known until wheel install time. This means we have no clue | |
1138 | # at wheel generation time what the installed site-packages directory |
|
1138 | # at wheel generation time what the installed site-packages directory | |
1139 | # will be. And, wheels don't appear to provide the ability to register |
|
1139 | # will be. And, wheels don't appear to provide the ability to register | |
1140 | # custom code to run during wheel installation. This all means that |
|
1140 | # custom code to run during wheel installation. This all means that | |
1141 | # we can't reliably set the libdir in wheels: the default behavior |
|
1141 | # we can't reliably set the libdir in wheels: the default behavior | |
1142 | # of looking in sys.path must do. |
|
1142 | # of looking in sys.path must do. | |
1143 |
|
1143 | |||
1144 | if ( |
|
1144 | if ( | |
1145 | os.path.splitdrive(self.install_dir)[0] |
|
1145 | os.path.splitdrive(self.install_dir)[0] | |
1146 | != os.path.splitdrive(self.install_lib)[0] |
|
1146 | != os.path.splitdrive(self.install_lib)[0] | |
1147 | ): |
|
1147 | ): | |
1148 | # can't make relative paths from one drive to another, so use an |
|
1148 | # can't make relative paths from one drive to another, so use an | |
1149 | # absolute path instead |
|
1149 | # absolute path instead | |
1150 | libdir = self.install_lib |
|
1150 | libdir = self.install_lib | |
1151 | else: |
|
1151 | else: | |
1152 | libdir = os.path.relpath(self.install_lib, self.install_dir) |
|
1152 | libdir = os.path.relpath(self.install_lib, self.install_dir) | |
1153 |
|
1153 | |||
1154 | for outfile in self.outfiles: |
|
1154 | for outfile in self.outfiles: | |
1155 | with open(outfile, 'rb') as fp: |
|
1155 | with open(outfile, 'rb') as fp: | |
1156 | data = fp.read() |
|
1156 | data = fp.read() | |
1157 |
|
1157 | |||
1158 | # skip binary files |
|
1158 | # skip binary files | |
1159 | if b'\0' in data: |
|
1159 | if b'\0' in data: | |
1160 | continue |
|
1160 | continue | |
1161 |
|
1161 | |||
1162 | # During local installs, the shebang will be rewritten to the final |
|
1162 | # During local installs, the shebang will be rewritten to the final | |
1163 | # install path. During wheel packaging, the shebang has a special |
|
1163 | # install path. During wheel packaging, the shebang has a special | |
1164 | # value. |
|
1164 | # value. | |
1165 | if data.startswith(b'#!python'): |
|
1165 | if data.startswith(b'#!python'): | |
1166 | log.info( |
|
1166 | log.info( | |
1167 | 'not rewriting @LIBDIR@ in %s because install path ' |
|
1167 | 'not rewriting @LIBDIR@ in %s because install path ' | |
1168 | 'not known' % outfile |
|
1168 | 'not known' % outfile | |
1169 | ) |
|
1169 | ) | |
1170 | continue |
|
1170 | continue | |
1171 |
|
1171 | |||
1172 | data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) |
|
1172 | data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) | |
1173 | with open(outfile, 'wb') as fp: |
|
1173 | with open(outfile, 'wb') as fp: | |
1174 | fp.write(data) |
|
1174 | fp.write(data) | |
1175 |
|
1175 | |||
1176 |
|
1176 | |||
1177 | # virtualenv installs custom distutils/__init__.py and |
|
1177 | # virtualenv installs custom distutils/__init__.py and | |
1178 | # distutils/distutils.cfg files which essentially proxy back to the |
|
1178 | # distutils/distutils.cfg files which essentially proxy back to the | |
1179 | # "real" distutils in the main Python install. The presence of this |
|
1179 | # "real" distutils in the main Python install. The presence of this | |
1180 | # directory causes py2exe to pick up the "hacked" distutils package |
|
1180 | # directory causes py2exe to pick up the "hacked" distutils package | |
1181 | # from the virtualenv and "import distutils" will fail from the py2exe |
|
1181 | # from the virtualenv and "import distutils" will fail from the py2exe | |
1182 | # build because the "real" distutils files can't be located. |
|
1182 | # build because the "real" distutils files can't be located. | |
1183 | # |
|
1183 | # | |
1184 | # We work around this by monkeypatching the py2exe code finding Python |
|
1184 | # We work around this by monkeypatching the py2exe code finding Python | |
1185 | # modules to replace the found virtualenv distutils modules with the |
|
1185 | # modules to replace the found virtualenv distutils modules with the | |
1186 | # original versions via filesystem scanning. This is a bit hacky. But |
|
1186 | # original versions via filesystem scanning. This is a bit hacky. But | |
1187 | # it allows us to use virtualenvs for py2exe packaging, which is more |
|
1187 | # it allows us to use virtualenvs for py2exe packaging, which is more | |
1188 | # deterministic and reproducible. |
|
1188 | # deterministic and reproducible. | |
1189 | # |
|
1189 | # | |
1190 | # It's worth noting that the common StackOverflow suggestions for this |
|
1190 | # It's worth noting that the common StackOverflow suggestions for this | |
1191 | # problem involve copying the original distutils files into the |
|
1191 | # problem involve copying the original distutils files into the | |
1192 | # virtualenv or into the staging directory after setup() is invoked. |
|
1192 | # virtualenv or into the staging directory after setup() is invoked. | |
1193 | # The former is very brittle and can easily break setup(). Our hacking |
|
1193 | # The former is very brittle and can easily break setup(). Our hacking | |
1194 | # of the found modules routine has a similar result as copying the files |
|
1194 | # of the found modules routine has a similar result as copying the files | |
1195 | # manually. But it makes fewer assumptions about how py2exe works and |
|
1195 | # manually. But it makes fewer assumptions about how py2exe works and | |
1196 | # is less brittle. |
|
1196 | # is less brittle. | |
1197 |
|
1197 | |||
1198 | # This only catches virtualenvs made with virtualenv (as opposed to |
|
1198 | # This only catches virtualenvs made with virtualenv (as opposed to | |
1199 | # venv, which is likely what Python 3 uses). |
|
1199 | # venv, which is likely what Python 3 uses). | |
1200 | py2exehacked = py2exeloaded and getattr(sys, 'real_prefix', None) is not None |
|
1200 | py2exehacked = py2exeloaded and getattr(sys, 'real_prefix', None) is not None | |
1201 |
|
1201 | |||
1202 | if py2exehacked: |
|
1202 | if py2exehacked: | |
1203 | from distutils.command.py2exe import py2exe as buildpy2exe |
|
1203 | from distutils.command.py2exe import py2exe as buildpy2exe | |
1204 | from py2exe.mf import Module as py2exemodule |
|
1204 | from py2exe.mf import Module as py2exemodule | |
1205 |
|
1205 | |||
1206 | class hgbuildpy2exe(buildpy2exe): |
|
1206 | class hgbuildpy2exe(buildpy2exe): | |
1207 | def find_needed_modules(self, mf, files, modules): |
|
1207 | def find_needed_modules(self, mf, files, modules): | |
1208 | res = buildpy2exe.find_needed_modules(self, mf, files, modules) |
|
1208 | res = buildpy2exe.find_needed_modules(self, mf, files, modules) | |
1209 |
|
1209 | |||
1210 | # Replace virtualenv's distutils modules with the real ones. |
|
1210 | # Replace virtualenv's distutils modules with the real ones. | |
1211 | modules = {} |
|
1211 | modules = {} | |
1212 | for k, v in res.modules.items(): |
|
1212 | for k, v in res.modules.items(): | |
1213 | if k != 'distutils' and not k.startswith('distutils.'): |
|
1213 | if k != 'distutils' and not k.startswith('distutils.'): | |
1214 | modules[k] = v |
|
1214 | modules[k] = v | |
1215 |
|
1215 | |||
1216 | res.modules = modules |
|
1216 | res.modules = modules | |
1217 |
|
1217 | |||
1218 | import opcode |
|
1218 | import opcode | |
1219 |
|
1219 | |||
1220 | distutilsreal = os.path.join( |
|
1220 | distutilsreal = os.path.join( | |
1221 | os.path.dirname(opcode.__file__), 'distutils' |
|
1221 | os.path.dirname(opcode.__file__), 'distutils' | |
1222 | ) |
|
1222 | ) | |
1223 |
|
1223 | |||
1224 | for root, dirs, files in os.walk(distutilsreal): |
|
1224 | for root, dirs, files in os.walk(distutilsreal): | |
1225 | for f in sorted(files): |
|
1225 | for f in sorted(files): | |
1226 | if not f.endswith('.py'): |
|
1226 | if not f.endswith('.py'): | |
1227 | continue |
|
1227 | continue | |
1228 |
|
1228 | |||
1229 | full = os.path.join(root, f) |
|
1229 | full = os.path.join(root, f) | |
1230 |
|
1230 | |||
1231 | parents = ['distutils'] |
|
1231 | parents = ['distutils'] | |
1232 |
|
1232 | |||
1233 | if root != distutilsreal: |
|
1233 | if root != distutilsreal: | |
1234 | rel = os.path.relpath(root, distutilsreal) |
|
1234 | rel = os.path.relpath(root, distutilsreal) | |
1235 | parents.extend(p for p in rel.split(os.sep)) |
|
1235 | parents.extend(p for p in rel.split(os.sep)) | |
1236 |
|
1236 | |||
1237 | modname = '%s.%s' % ('.'.join(parents), f[:-3]) |
|
1237 | modname = '%s.%s' % ('.'.join(parents), f[:-3]) | |
1238 |
|
1238 | |||
1239 | if modname.startswith('distutils.tests.'): |
|
1239 | if modname.startswith('distutils.tests.'): | |
1240 | continue |
|
1240 | continue | |
1241 |
|
1241 | |||
1242 | if modname.endswith('.__init__'): |
|
1242 | if modname.endswith('.__init__'): | |
1243 | modname = modname[: -len('.__init__')] |
|
1243 | modname = modname[: -len('.__init__')] | |
1244 | path = os.path.dirname(full) |
|
1244 | path = os.path.dirname(full) | |
1245 | else: |
|
1245 | else: | |
1246 | path = None |
|
1246 | path = None | |
1247 |
|
1247 | |||
1248 | res.modules[modname] = py2exemodule( |
|
1248 | res.modules[modname] = py2exemodule( | |
1249 | modname, full, path=path |
|
1249 | modname, full, path=path | |
1250 | ) |
|
1250 | ) | |
1251 |
|
1251 | |||
1252 | if 'distutils' not in res.modules: |
|
1252 | if 'distutils' not in res.modules: | |
1253 | raise SystemExit('could not find distutils modules') |
|
1253 | raise SystemExit('could not find distutils modules') | |
1254 |
|
1254 | |||
1255 | return res |
|
1255 | return res | |
1256 |
|
1256 | |||
1257 |
|
1257 | |||
1258 | cmdclass = { |
|
1258 | cmdclass = { | |
1259 | 'build': hgbuild, |
|
1259 | 'build': hgbuild, | |
1260 | 'build_doc': hgbuilddoc, |
|
1260 | 'build_doc': hgbuilddoc, | |
1261 | 'build_mo': hgbuildmo, |
|
1261 | 'build_mo': hgbuildmo, | |
1262 | 'build_ext': hgbuildext, |
|
1262 | 'build_ext': hgbuildext, | |
1263 | 'build_py': hgbuildpy, |
|
1263 | 'build_py': hgbuildpy, | |
1264 | 'build_scripts': hgbuildscripts, |
|
1264 | 'build_scripts': hgbuildscripts, | |
1265 | 'build_hgextindex': buildhgextindex, |
|
1265 | 'build_hgextindex': buildhgextindex, | |
1266 | 'install': hginstall, |
|
1266 | 'install': hginstall, | |
1267 | 'install_lib': hginstalllib, |
|
1267 | 'install_lib': hginstalllib, | |
1268 | 'install_scripts': hginstallscripts, |
|
1268 | 'install_scripts': hginstallscripts, | |
1269 | 'build_hgexe': buildhgexe, |
|
1269 | 'build_hgexe': buildhgexe, | |
1270 | } |
|
1270 | } | |
1271 |
|
1271 | |||
1272 | if py2exehacked: |
|
1272 | if py2exehacked: | |
1273 | cmdclass['py2exe'] = hgbuildpy2exe |
|
1273 | cmdclass['py2exe'] = hgbuildpy2exe | |
1274 |
|
1274 | |||
1275 | packages = [ |
|
1275 | packages = [ | |
1276 | 'mercurial', |
|
1276 | 'mercurial', | |
1277 | 'mercurial.cext', |
|
1277 | 'mercurial.cext', | |
1278 | 'mercurial.cffi', |
|
1278 | 'mercurial.cffi', | |
1279 | 'mercurial.defaultrc', |
|
1279 | 'mercurial.defaultrc', | |
1280 | 'mercurial.helptext', |
|
1280 | 'mercurial.helptext', | |
1281 | 'mercurial.helptext.internals', |
|
1281 | 'mercurial.helptext.internals', | |
1282 | 'mercurial.hgweb', |
|
1282 | 'mercurial.hgweb', | |
1283 | 'mercurial.interfaces', |
|
1283 | 'mercurial.interfaces', | |
1284 | 'mercurial.pure', |
|
1284 | 'mercurial.pure', | |
1285 | 'mercurial.templates', |
|
1285 | 'mercurial.templates', | |
1286 | 'mercurial.thirdparty', |
|
1286 | 'mercurial.thirdparty', | |
1287 | 'mercurial.thirdparty.attr', |
|
1287 | 'mercurial.thirdparty.attr', | |
1288 | 'mercurial.thirdparty.zope', |
|
1288 | 'mercurial.thirdparty.zope', | |
1289 | 'mercurial.thirdparty.zope.interface', |
|
1289 | 'mercurial.thirdparty.zope.interface', | |
|
1290 | 'mercurial.upgrade_utils', | |||
1290 | 'mercurial.utils', |
|
1291 | 'mercurial.utils', | |
1291 | 'mercurial.revlogutils', |
|
1292 | 'mercurial.revlogutils', | |
1292 | 'mercurial.testing', |
|
1293 | 'mercurial.testing', | |
1293 | 'hgext', |
|
1294 | 'hgext', | |
1294 | 'hgext.convert', |
|
1295 | 'hgext.convert', | |
1295 | 'hgext.fsmonitor', |
|
1296 | 'hgext.fsmonitor', | |
1296 | 'hgext.fastannotate', |
|
1297 | 'hgext.fastannotate', | |
1297 | 'hgext.fsmonitor.pywatchman', |
|
1298 | 'hgext.fsmonitor.pywatchman', | |
1298 | 'hgext.git', |
|
1299 | 'hgext.git', | |
1299 | 'hgext.highlight', |
|
1300 | 'hgext.highlight', | |
1300 | 'hgext.hooklib', |
|
1301 | 'hgext.hooklib', | |
1301 | 'hgext.infinitepush', |
|
1302 | 'hgext.infinitepush', | |
1302 | 'hgext.largefiles', |
|
1303 | 'hgext.largefiles', | |
1303 | 'hgext.lfs', |
|
1304 | 'hgext.lfs', | |
1304 | 'hgext.narrow', |
|
1305 | 'hgext.narrow', | |
1305 | 'hgext.remotefilelog', |
|
1306 | 'hgext.remotefilelog', | |
1306 | 'hgext.zeroconf', |
|
1307 | 'hgext.zeroconf', | |
1307 | 'hgext3rd', |
|
1308 | 'hgext3rd', | |
1308 | 'hgdemandimport', |
|
1309 | 'hgdemandimport', | |
1309 | ] |
|
1310 | ] | |
1310 |
|
1311 | |||
1311 | for name in os.listdir(os.path.join('mercurial', 'templates')): |
|
1312 | for name in os.listdir(os.path.join('mercurial', 'templates')): | |
1312 | if name != '__pycache__' and os.path.isdir( |
|
1313 | if name != '__pycache__' and os.path.isdir( | |
1313 | os.path.join('mercurial', 'templates', name) |
|
1314 | os.path.join('mercurial', 'templates', name) | |
1314 | ): |
|
1315 | ): | |
1315 | packages.append('mercurial.templates.%s' % name) |
|
1316 | packages.append('mercurial.templates.%s' % name) | |
1316 |
|
1317 | |||
1317 | if sys.version_info[0] == 2: |
|
1318 | if sys.version_info[0] == 2: | |
1318 | packages.extend( |
|
1319 | packages.extend( | |
1319 | [ |
|
1320 | [ | |
1320 | 'mercurial.thirdparty.concurrent', |
|
1321 | 'mercurial.thirdparty.concurrent', | |
1321 | 'mercurial.thirdparty.concurrent.futures', |
|
1322 | 'mercurial.thirdparty.concurrent.futures', | |
1322 | ] |
|
1323 | ] | |
1323 | ) |
|
1324 | ) | |
1324 |
|
1325 | |||
1325 | if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ: |
|
1326 | if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ: | |
1326 | # py2exe can't cope with namespace packages very well, so we have to |
|
1327 | # py2exe can't cope with namespace packages very well, so we have to | |
1327 | # install any hgext3rd.* extensions that we want in the final py2exe |
|
1328 | # install any hgext3rd.* extensions that we want in the final py2exe | |
1328 | # image here. This is gross, but you gotta do what you gotta do. |
|
1329 | # image here. This is gross, but you gotta do what you gotta do. | |
1329 | packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' ')) |
|
1330 | packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' ')) | |
1330 |
|
1331 | |||
1331 | common_depends = [ |
|
1332 | common_depends = [ | |
1332 | 'mercurial/bitmanipulation.h', |
|
1333 | 'mercurial/bitmanipulation.h', | |
1333 | 'mercurial/compat.h', |
|
1334 | 'mercurial/compat.h', | |
1334 | 'mercurial/cext/util.h', |
|
1335 | 'mercurial/cext/util.h', | |
1335 | ] |
|
1336 | ] | |
1336 | common_include_dirs = ['mercurial'] |
|
1337 | common_include_dirs = ['mercurial'] | |
1337 |
|
1338 | |||
1338 | common_cflags = [] |
|
1339 | common_cflags = [] | |
1339 |
|
1340 | |||
1340 | # MSVC 2008 still needs declarations at the top of the scope, but Python 3.9 |
|
1341 | # MSVC 2008 still needs declarations at the top of the scope, but Python 3.9 | |
1341 | # makes declarations not at the top of a scope in the headers. |
|
1342 | # makes declarations not at the top of a scope in the headers. | |
1342 | if os.name != 'nt' and sys.version_info[1] < 9: |
|
1343 | if os.name != 'nt' and sys.version_info[1] < 9: | |
1343 | common_cflags = ['-Werror=declaration-after-statement'] |
|
1344 | common_cflags = ['-Werror=declaration-after-statement'] | |
1344 |
|
1345 | |||
1345 | osutil_cflags = [] |
|
1346 | osutil_cflags = [] | |
1346 | osutil_ldflags = [] |
|
1347 | osutil_ldflags = [] | |
1347 |
|
1348 | |||
1348 | # platform specific macros |
|
1349 | # platform specific macros | |
1349 | for plat, func in [('bsd', 'setproctitle')]: |
|
1350 | for plat, func in [('bsd', 'setproctitle')]: | |
1350 | if re.search(plat, sys.platform) and hasfunction(new_compiler(), func): |
|
1351 | if re.search(plat, sys.platform) and hasfunction(new_compiler(), func): | |
1351 | osutil_cflags.append('-DHAVE_%s' % func.upper()) |
|
1352 | osutil_cflags.append('-DHAVE_%s' % func.upper()) | |
1352 |
|
1353 | |||
1353 | for plat, macro, code in [ |
|
1354 | for plat, macro, code in [ | |
1354 | ( |
|
1355 | ( | |
1355 | 'bsd|darwin', |
|
1356 | 'bsd|darwin', | |
1356 | 'BSD_STATFS', |
|
1357 | 'BSD_STATFS', | |
1357 | ''' |
|
1358 | ''' | |
1358 | #include <sys/param.h> |
|
1359 | #include <sys/param.h> | |
1359 | #include <sys/mount.h> |
|
1360 | #include <sys/mount.h> | |
1360 | int main() { struct statfs s; return sizeof(s.f_fstypename); } |
|
1361 | int main() { struct statfs s; return sizeof(s.f_fstypename); } | |
1361 | ''', |
|
1362 | ''', | |
1362 | ), |
|
1363 | ), | |
1363 | ( |
|
1364 | ( | |
1364 | 'linux', |
|
1365 | 'linux', | |
1365 | 'LINUX_STATFS', |
|
1366 | 'LINUX_STATFS', | |
1366 | ''' |
|
1367 | ''' | |
1367 | #include <linux/magic.h> |
|
1368 | #include <linux/magic.h> | |
1368 | #include <sys/vfs.h> |
|
1369 | #include <sys/vfs.h> | |
1369 | int main() { struct statfs s; return sizeof(s.f_type); } |
|
1370 | int main() { struct statfs s; return sizeof(s.f_type); } | |
1370 | ''', |
|
1371 | ''', | |
1371 | ), |
|
1372 | ), | |
1372 | ]: |
|
1373 | ]: | |
1373 | if re.search(plat, sys.platform) and cancompile(new_compiler(), code): |
|
1374 | if re.search(plat, sys.platform) and cancompile(new_compiler(), code): | |
1374 | osutil_cflags.append('-DHAVE_%s' % macro) |
|
1375 | osutil_cflags.append('-DHAVE_%s' % macro) | |
1375 |
|
1376 | |||
1376 | if sys.platform == 'darwin': |
|
1377 | if sys.platform == 'darwin': | |
1377 | osutil_ldflags += ['-framework', 'ApplicationServices'] |
|
1378 | osutil_ldflags += ['-framework', 'ApplicationServices'] | |
1378 |
|
1379 | |||
1379 | if sys.platform == 'sunos5': |
|
1380 | if sys.platform == 'sunos5': | |
1380 | osutil_ldflags += ['-lsocket'] |
|
1381 | osutil_ldflags += ['-lsocket'] | |
1381 |
|
1382 | |||
1382 | xdiff_srcs = [ |
|
1383 | xdiff_srcs = [ | |
1383 | 'mercurial/thirdparty/xdiff/xdiffi.c', |
|
1384 | 'mercurial/thirdparty/xdiff/xdiffi.c', | |
1384 | 'mercurial/thirdparty/xdiff/xprepare.c', |
|
1385 | 'mercurial/thirdparty/xdiff/xprepare.c', | |
1385 | 'mercurial/thirdparty/xdiff/xutils.c', |
|
1386 | 'mercurial/thirdparty/xdiff/xutils.c', | |
1386 | ] |
|
1387 | ] | |
1387 |
|
1388 | |||
1388 | xdiff_headers = [ |
|
1389 | xdiff_headers = [ | |
1389 | 'mercurial/thirdparty/xdiff/xdiff.h', |
|
1390 | 'mercurial/thirdparty/xdiff/xdiff.h', | |
1390 | 'mercurial/thirdparty/xdiff/xdiffi.h', |
|
1391 | 'mercurial/thirdparty/xdiff/xdiffi.h', | |
1391 | 'mercurial/thirdparty/xdiff/xinclude.h', |
|
1392 | 'mercurial/thirdparty/xdiff/xinclude.h', | |
1392 | 'mercurial/thirdparty/xdiff/xmacros.h', |
|
1393 | 'mercurial/thirdparty/xdiff/xmacros.h', | |
1393 | 'mercurial/thirdparty/xdiff/xprepare.h', |
|
1394 | 'mercurial/thirdparty/xdiff/xprepare.h', | |
1394 | 'mercurial/thirdparty/xdiff/xtypes.h', |
|
1395 | 'mercurial/thirdparty/xdiff/xtypes.h', | |
1395 | 'mercurial/thirdparty/xdiff/xutils.h', |
|
1396 | 'mercurial/thirdparty/xdiff/xutils.h', | |
1396 | ] |
|
1397 | ] | |
1397 |
|
1398 | |||
1398 |
|
1399 | |||
1399 | class RustCompilationError(CCompilerError): |
|
1400 | class RustCompilationError(CCompilerError): | |
1400 | """Exception class for Rust compilation errors.""" |
|
1401 | """Exception class for Rust compilation errors.""" | |
1401 |
|
1402 | |||
1402 |
|
1403 | |||
1403 | class RustExtension(Extension): |
|
1404 | class RustExtension(Extension): | |
1404 | """Base classes for concrete Rust Extension classes.""" |
|
1405 | """Base classes for concrete Rust Extension classes.""" | |
1405 |
|
1406 | |||
1406 | rusttargetdir = os.path.join('rust', 'target', 'release') |
|
1407 | rusttargetdir = os.path.join('rust', 'target', 'release') | |
1407 |
|
1408 | |||
1408 | def __init__( |
|
1409 | def __init__( | |
1409 | self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw |
|
1410 | self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw | |
1410 | ): |
|
1411 | ): | |
1411 | Extension.__init__(self, mpath, sources, **kw) |
|
1412 | Extension.__init__(self, mpath, sources, **kw) | |
1412 | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) |
|
1413 | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) | |
1413 | self.py3_features = py3_features |
|
1414 | self.py3_features = py3_features | |
1414 |
|
1415 | |||
1415 | # adding Rust source and control files to depends so that the extension |
|
1416 | # adding Rust source and control files to depends so that the extension | |
1416 | # gets rebuilt if they've changed |
|
1417 | # gets rebuilt if they've changed | |
1417 | self.depends.append(os.path.join(srcdir, 'Cargo.toml')) |
|
1418 | self.depends.append(os.path.join(srcdir, 'Cargo.toml')) | |
1418 | cargo_lock = os.path.join(srcdir, 'Cargo.lock') |
|
1419 | cargo_lock = os.path.join(srcdir, 'Cargo.lock') | |
1419 | if os.path.exists(cargo_lock): |
|
1420 | if os.path.exists(cargo_lock): | |
1420 | self.depends.append(cargo_lock) |
|
1421 | self.depends.append(cargo_lock) | |
1421 | for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')): |
|
1422 | for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')): | |
1422 | self.depends.extend( |
|
1423 | self.depends.extend( | |
1423 | os.path.join(dirpath, fname) |
|
1424 | os.path.join(dirpath, fname) | |
1424 | for fname in fnames |
|
1425 | for fname in fnames | |
1425 | if os.path.splitext(fname)[1] == '.rs' |
|
1426 | if os.path.splitext(fname)[1] == '.rs' | |
1426 | ) |
|
1427 | ) | |
1427 |
|
1428 | |||
1428 | @staticmethod |
|
1429 | @staticmethod | |
1429 | def rustdylibsuffix(): |
|
1430 | def rustdylibsuffix(): | |
1430 | """Return the suffix for shared libraries produced by rustc. |
|
1431 | """Return the suffix for shared libraries produced by rustc. | |
1431 |
|
1432 | |||
1432 | See also: https://doc.rust-lang.org/reference/linkage.html |
|
1433 | See also: https://doc.rust-lang.org/reference/linkage.html | |
1433 | """ |
|
1434 | """ | |
1434 | if sys.platform == 'darwin': |
|
1435 | if sys.platform == 'darwin': | |
1435 | return '.dylib' |
|
1436 | return '.dylib' | |
1436 | elif os.name == 'nt': |
|
1437 | elif os.name == 'nt': | |
1437 | return '.dll' |
|
1438 | return '.dll' | |
1438 | else: |
|
1439 | else: | |
1439 | return '.so' |
|
1440 | return '.so' | |
1440 |
|
1441 | |||
1441 | def rustbuild(self): |
|
1442 | def rustbuild(self): | |
1442 | env = os.environ.copy() |
|
1443 | env = os.environ.copy() | |
1443 | if 'HGTEST_RESTOREENV' in env: |
|
1444 | if 'HGTEST_RESTOREENV' in env: | |
1444 | # Mercurial tests change HOME to a temporary directory, |
|
1445 | # Mercurial tests change HOME to a temporary directory, | |
1445 | # but, if installed with rustup, the Rust toolchain needs |
|
1446 | # but, if installed with rustup, the Rust toolchain needs | |
1446 | # HOME to be correct (otherwise the 'no default toolchain' |
|
1447 | # HOME to be correct (otherwise the 'no default toolchain' | |
1447 | # error message is issued and the build fails). |
|
1448 | # error message is issued and the build fails). | |
1448 | # This happens currently with test-hghave.t, which does |
|
1449 | # This happens currently with test-hghave.t, which does | |
1449 | # invoke this build. |
|
1450 | # invoke this build. | |
1450 |
|
1451 | |||
1451 | # Unix only fix (os.path.expanduser not really reliable if |
|
1452 | # Unix only fix (os.path.expanduser not really reliable if | |
1452 | # HOME is shadowed like this) |
|
1453 | # HOME is shadowed like this) | |
1453 | import pwd |
|
1454 | import pwd | |
1454 |
|
1455 | |||
1455 | env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir |
|
1456 | env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir | |
1456 |
|
1457 | |||
1457 | cargocmd = ['cargo', 'rustc', '--release'] |
|
1458 | cargocmd = ['cargo', 'rustc', '--release'] | |
1458 |
|
1459 | |||
1459 | feature_flags = [] |
|
1460 | feature_flags = [] | |
1460 |
|
1461 | |||
1461 | if sys.version_info[0] == 3 and self.py3_features is not None: |
|
1462 | if sys.version_info[0] == 3 and self.py3_features is not None: | |
1462 | feature_flags.append(self.py3_features) |
|
1463 | feature_flags.append(self.py3_features) | |
1463 | cargocmd.append('--no-default-features') |
|
1464 | cargocmd.append('--no-default-features') | |
1464 |
|
1465 | |||
1465 | rust_features = env.get("HG_RUST_FEATURES") |
|
1466 | rust_features = env.get("HG_RUST_FEATURES") | |
1466 | if rust_features: |
|
1467 | if rust_features: | |
1467 | feature_flags.append(rust_features) |
|
1468 | feature_flags.append(rust_features) | |
1468 |
|
1469 | |||
1469 | cargocmd.extend(('--features', " ".join(feature_flags))) |
|
1470 | cargocmd.extend(('--features', " ".join(feature_flags))) | |
1470 |
|
1471 | |||
1471 | cargocmd.append('--') |
|
1472 | cargocmd.append('--') | |
1472 | if sys.platform == 'darwin': |
|
1473 | if sys.platform == 'darwin': | |
1473 | cargocmd.extend( |
|
1474 | cargocmd.extend( | |
1474 | ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup") |
|
1475 | ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup") | |
1475 | ) |
|
1476 | ) | |
1476 | try: |
|
1477 | try: | |
1477 | subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir) |
|
1478 | subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir) | |
1478 | except OSError as exc: |
|
1479 | except OSError as exc: | |
1479 | if exc.errno == errno.ENOENT: |
|
1480 | if exc.errno == errno.ENOENT: | |
1480 | raise RustCompilationError("Cargo not found") |
|
1481 | raise RustCompilationError("Cargo not found") | |
1481 | elif exc.errno == errno.EACCES: |
|
1482 | elif exc.errno == errno.EACCES: | |
1482 | raise RustCompilationError( |
|
1483 | raise RustCompilationError( | |
1483 | "Cargo found, but permisssion to execute it is denied" |
|
1484 | "Cargo found, but permisssion to execute it is denied" | |
1484 | ) |
|
1485 | ) | |
1485 | else: |
|
1486 | else: | |
1486 | raise |
|
1487 | raise | |
1487 | except subprocess.CalledProcessError: |
|
1488 | except subprocess.CalledProcessError: | |
1488 | raise RustCompilationError( |
|
1489 | raise RustCompilationError( | |
1489 | "Cargo failed. Working directory: %r, " |
|
1490 | "Cargo failed. Working directory: %r, " | |
1490 | "command: %r, environment: %r" |
|
1491 | "command: %r, environment: %r" | |
1491 | % (self.rustsrcdir, cargocmd, env) |
|
1492 | % (self.rustsrcdir, cargocmd, env) | |
1492 | ) |
|
1493 | ) | |
1493 |
|
1494 | |||
1494 |
|
1495 | |||
1495 | class RustStandaloneExtension(RustExtension): |
|
1496 | class RustStandaloneExtension(RustExtension): | |
1496 | def __init__(self, pydottedname, rustcrate, dylibname, **kw): |
|
1497 | def __init__(self, pydottedname, rustcrate, dylibname, **kw): | |
1497 | RustExtension.__init__( |
|
1498 | RustExtension.__init__( | |
1498 | self, pydottedname, [], dylibname, rustcrate, **kw |
|
1499 | self, pydottedname, [], dylibname, rustcrate, **kw | |
1499 | ) |
|
1500 | ) | |
1500 | self.dylibname = dylibname |
|
1501 | self.dylibname = dylibname | |
1501 |
|
1502 | |||
1502 | def build(self, target_dir): |
|
1503 | def build(self, target_dir): | |
1503 | self.rustbuild() |
|
1504 | self.rustbuild() | |
1504 | target = [target_dir] |
|
1505 | target = [target_dir] | |
1505 | target.extend(self.name.split('.')) |
|
1506 | target.extend(self.name.split('.')) | |
1506 | target[-1] += DYLIB_SUFFIX |
|
1507 | target[-1] += DYLIB_SUFFIX | |
1507 | shutil.copy2( |
|
1508 | shutil.copy2( | |
1508 | os.path.join( |
|
1509 | os.path.join( | |
1509 | self.rusttargetdir, self.dylibname + self.rustdylibsuffix() |
|
1510 | self.rusttargetdir, self.dylibname + self.rustdylibsuffix() | |
1510 | ), |
|
1511 | ), | |
1511 | os.path.join(*target), |
|
1512 | os.path.join(*target), | |
1512 | ) |
|
1513 | ) | |
1513 |
|
1514 | |||
1514 |
|
1515 | |||
1515 | extmodules = [ |
|
1516 | extmodules = [ | |
1516 | Extension( |
|
1517 | Extension( | |
1517 | 'mercurial.cext.base85', |
|
1518 | 'mercurial.cext.base85', | |
1518 | ['mercurial/cext/base85.c'], |
|
1519 | ['mercurial/cext/base85.c'], | |
1519 | include_dirs=common_include_dirs, |
|
1520 | include_dirs=common_include_dirs, | |
1520 | extra_compile_args=common_cflags, |
|
1521 | extra_compile_args=common_cflags, | |
1521 | depends=common_depends, |
|
1522 | depends=common_depends, | |
1522 | ), |
|
1523 | ), | |
1523 | Extension( |
|
1524 | Extension( | |
1524 | 'mercurial.cext.bdiff', |
|
1525 | 'mercurial.cext.bdiff', | |
1525 | ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs, |
|
1526 | ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs, | |
1526 | include_dirs=common_include_dirs, |
|
1527 | include_dirs=common_include_dirs, | |
1527 | extra_compile_args=common_cflags, |
|
1528 | extra_compile_args=common_cflags, | |
1528 | depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers, |
|
1529 | depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers, | |
1529 | ), |
|
1530 | ), | |
1530 | Extension( |
|
1531 | Extension( | |
1531 | 'mercurial.cext.mpatch', |
|
1532 | 'mercurial.cext.mpatch', | |
1532 | ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'], |
|
1533 | ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'], | |
1533 | include_dirs=common_include_dirs, |
|
1534 | include_dirs=common_include_dirs, | |
1534 | extra_compile_args=common_cflags, |
|
1535 | extra_compile_args=common_cflags, | |
1535 | depends=common_depends, |
|
1536 | depends=common_depends, | |
1536 | ), |
|
1537 | ), | |
1537 | Extension( |
|
1538 | Extension( | |
1538 | 'mercurial.cext.parsers', |
|
1539 | 'mercurial.cext.parsers', | |
1539 | [ |
|
1540 | [ | |
1540 | 'mercurial/cext/charencode.c', |
|
1541 | 'mercurial/cext/charencode.c', | |
1541 | 'mercurial/cext/dirs.c', |
|
1542 | 'mercurial/cext/dirs.c', | |
1542 | 'mercurial/cext/manifest.c', |
|
1543 | 'mercurial/cext/manifest.c', | |
1543 | 'mercurial/cext/parsers.c', |
|
1544 | 'mercurial/cext/parsers.c', | |
1544 | 'mercurial/cext/pathencode.c', |
|
1545 | 'mercurial/cext/pathencode.c', | |
1545 | 'mercurial/cext/revlog.c', |
|
1546 | 'mercurial/cext/revlog.c', | |
1546 | ], |
|
1547 | ], | |
1547 | include_dirs=common_include_dirs, |
|
1548 | include_dirs=common_include_dirs, | |
1548 | extra_compile_args=common_cflags, |
|
1549 | extra_compile_args=common_cflags, | |
1549 | depends=common_depends |
|
1550 | depends=common_depends | |
1550 | + [ |
|
1551 | + [ | |
1551 | 'mercurial/cext/charencode.h', |
|
1552 | 'mercurial/cext/charencode.h', | |
1552 | 'mercurial/cext/revlog.h', |
|
1553 | 'mercurial/cext/revlog.h', | |
1553 | ], |
|
1554 | ], | |
1554 | ), |
|
1555 | ), | |
1555 | Extension( |
|
1556 | Extension( | |
1556 | 'mercurial.cext.osutil', |
|
1557 | 'mercurial.cext.osutil', | |
1557 | ['mercurial/cext/osutil.c'], |
|
1558 | ['mercurial/cext/osutil.c'], | |
1558 | include_dirs=common_include_dirs, |
|
1559 | include_dirs=common_include_dirs, | |
1559 | extra_compile_args=common_cflags + osutil_cflags, |
|
1560 | extra_compile_args=common_cflags + osutil_cflags, | |
1560 | extra_link_args=osutil_ldflags, |
|
1561 | extra_link_args=osutil_ldflags, | |
1561 | depends=common_depends, |
|
1562 | depends=common_depends, | |
1562 | ), |
|
1563 | ), | |
1563 | Extension( |
|
1564 | Extension( | |
1564 | 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', |
|
1565 | 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', | |
1565 | [ |
|
1566 | [ | |
1566 | 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c', |
|
1567 | 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c', | |
1567 | ], |
|
1568 | ], | |
1568 | extra_compile_args=common_cflags, |
|
1569 | extra_compile_args=common_cflags, | |
1569 | ), |
|
1570 | ), | |
1570 | Extension( |
|
1571 | Extension( | |
1571 | 'mercurial.thirdparty.sha1dc', |
|
1572 | 'mercurial.thirdparty.sha1dc', | |
1572 | [ |
|
1573 | [ | |
1573 | 'mercurial/thirdparty/sha1dc/cext.c', |
|
1574 | 'mercurial/thirdparty/sha1dc/cext.c', | |
1574 | 'mercurial/thirdparty/sha1dc/lib/sha1.c', |
|
1575 | 'mercurial/thirdparty/sha1dc/lib/sha1.c', | |
1575 | 'mercurial/thirdparty/sha1dc/lib/ubc_check.c', |
|
1576 | 'mercurial/thirdparty/sha1dc/lib/ubc_check.c', | |
1576 | ], |
|
1577 | ], | |
1577 | extra_compile_args=common_cflags, |
|
1578 | extra_compile_args=common_cflags, | |
1578 | ), |
|
1579 | ), | |
1579 | Extension( |
|
1580 | Extension( | |
1580 | 'hgext.fsmonitor.pywatchman.bser', |
|
1581 | 'hgext.fsmonitor.pywatchman.bser', | |
1581 | ['hgext/fsmonitor/pywatchman/bser.c'], |
|
1582 | ['hgext/fsmonitor/pywatchman/bser.c'], | |
1582 | extra_compile_args=common_cflags, |
|
1583 | extra_compile_args=common_cflags, | |
1583 | ), |
|
1584 | ), | |
1584 | RustStandaloneExtension( |
|
1585 | RustStandaloneExtension( | |
1585 | 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3' |
|
1586 | 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3' | |
1586 | ), |
|
1587 | ), | |
1587 | ] |
|
1588 | ] | |
1588 |
|
1589 | |||
1589 |
|
1590 | |||
1590 | sys.path.insert(0, 'contrib/python-zstandard') |
|
1591 | sys.path.insert(0, 'contrib/python-zstandard') | |
1591 | import setup_zstd |
|
1592 | import setup_zstd | |
1592 |
|
1593 | |||
1593 | zstd = setup_zstd.get_c_extension( |
|
1594 | zstd = setup_zstd.get_c_extension( | |
1594 | name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__)) |
|
1595 | name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__)) | |
1595 | ) |
|
1596 | ) | |
1596 | zstd.extra_compile_args += common_cflags |
|
1597 | zstd.extra_compile_args += common_cflags | |
1597 | extmodules.append(zstd) |
|
1598 | extmodules.append(zstd) | |
1598 |
|
1599 | |||
1599 | try: |
|
1600 | try: | |
1600 | from distutils import cygwinccompiler |
|
1601 | from distutils import cygwinccompiler | |
1601 |
|
1602 | |||
1602 | # the -mno-cygwin option has been deprecated for years |
|
1603 | # the -mno-cygwin option has been deprecated for years | |
1603 | mingw32compilerclass = cygwinccompiler.Mingw32CCompiler |
|
1604 | mingw32compilerclass = cygwinccompiler.Mingw32CCompiler | |
1604 |
|
1605 | |||
1605 | class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler): |
|
1606 | class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler): | |
1606 | def __init__(self, *args, **kwargs): |
|
1607 | def __init__(self, *args, **kwargs): | |
1607 | mingw32compilerclass.__init__(self, *args, **kwargs) |
|
1608 | mingw32compilerclass.__init__(self, *args, **kwargs) | |
1608 | for i in 'compiler compiler_so linker_exe linker_so'.split(): |
|
1609 | for i in 'compiler compiler_so linker_exe linker_so'.split(): | |
1609 | try: |
|
1610 | try: | |
1610 | getattr(self, i).remove('-mno-cygwin') |
|
1611 | getattr(self, i).remove('-mno-cygwin') | |
1611 | except ValueError: |
|
1612 | except ValueError: | |
1612 | pass |
|
1613 | pass | |
1613 |
|
1614 | |||
1614 | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler |
|
1615 | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler | |
1615 | except ImportError: |
|
1616 | except ImportError: | |
1616 | # the cygwinccompiler package is not available on some Python |
|
1617 | # the cygwinccompiler package is not available on some Python | |
1617 | # distributions like the ones from the optware project for Synology |
|
1618 | # distributions like the ones from the optware project for Synology | |
1618 | # DiskStation boxes |
|
1619 | # DiskStation boxes | |
1619 | class HackedMingw32CCompiler(object): |
|
1620 | class HackedMingw32CCompiler(object): | |
1620 | pass |
|
1621 | pass | |
1621 |
|
1622 | |||
1622 |
|
1623 | |||
1623 | if os.name == 'nt': |
|
1624 | if os.name == 'nt': | |
1624 | # Allow compiler/linker flags to be added to Visual Studio builds. Passing |
|
1625 | # Allow compiler/linker flags to be added to Visual Studio builds. Passing | |
1625 | # extra_link_args to distutils.extensions.Extension() doesn't have any |
|
1626 | # extra_link_args to distutils.extensions.Extension() doesn't have any | |
1626 | # effect. |
|
1627 | # effect. | |
1627 | from distutils import msvccompiler |
|
1628 | from distutils import msvccompiler | |
1628 |
|
1629 | |||
1629 | msvccompilerclass = msvccompiler.MSVCCompiler |
|
1630 | msvccompilerclass = msvccompiler.MSVCCompiler | |
1630 |
|
1631 | |||
1631 | class HackedMSVCCompiler(msvccompiler.MSVCCompiler): |
|
1632 | class HackedMSVCCompiler(msvccompiler.MSVCCompiler): | |
1632 | def initialize(self): |
|
1633 | def initialize(self): | |
1633 | msvccompilerclass.initialize(self) |
|
1634 | msvccompilerclass.initialize(self) | |
1634 | # "warning LNK4197: export 'func' specified multiple times" |
|
1635 | # "warning LNK4197: export 'func' specified multiple times" | |
1635 | self.ldflags_shared.append('/ignore:4197') |
|
1636 | self.ldflags_shared.append('/ignore:4197') | |
1636 | self.ldflags_shared_debug.append('/ignore:4197') |
|
1637 | self.ldflags_shared_debug.append('/ignore:4197') | |
1637 |
|
1638 | |||
1638 | msvccompiler.MSVCCompiler = HackedMSVCCompiler |
|
1639 | msvccompiler.MSVCCompiler = HackedMSVCCompiler | |
1639 |
|
1640 | |||
1640 | packagedata = { |
|
1641 | packagedata = { | |
1641 | 'mercurial': [ |
|
1642 | 'mercurial': [ | |
1642 | 'locale/*/LC_MESSAGES/hg.mo', |
|
1643 | 'locale/*/LC_MESSAGES/hg.mo', | |
1643 | 'dummycert.pem', |
|
1644 | 'dummycert.pem', | |
1644 | ], |
|
1645 | ], | |
1645 | 'mercurial.defaultrc': [ |
|
1646 | 'mercurial.defaultrc': [ | |
1646 | '*.rc', |
|
1647 | '*.rc', | |
1647 | ], |
|
1648 | ], | |
1648 | 'mercurial.helptext': [ |
|
1649 | 'mercurial.helptext': [ | |
1649 | '*.txt', |
|
1650 | '*.txt', | |
1650 | ], |
|
1651 | ], | |
1651 | 'mercurial.helptext.internals': [ |
|
1652 | 'mercurial.helptext.internals': [ | |
1652 | '*.txt', |
|
1653 | '*.txt', | |
1653 | ], |
|
1654 | ], | |
1654 | } |
|
1655 | } | |
1655 |
|
1656 | |||
1656 |
|
1657 | |||
1657 | def ordinarypath(p): |
|
1658 | def ordinarypath(p): | |
1658 | return p and p[0] != '.' and p[-1] != '~' |
|
1659 | return p and p[0] != '.' and p[-1] != '~' | |
1659 |
|
1660 | |||
1660 |
|
1661 | |||
1661 | for root in ('templates',): |
|
1662 | for root in ('templates',): | |
1662 | for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): |
|
1663 | for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): | |
1663 | packagename = curdir.replace(os.sep, '.') |
|
1664 | packagename = curdir.replace(os.sep, '.') | |
1664 | packagedata[packagename] = list(filter(ordinarypath, files)) |
|
1665 | packagedata[packagename] = list(filter(ordinarypath, files)) | |
1665 |
|
1666 | |||
1666 | datafiles = [] |
|
1667 | datafiles = [] | |
1667 |
|
1668 | |||
1668 | # distutils expects version to be str/unicode. Converting it to |
|
1669 | # distutils expects version to be str/unicode. Converting it to | |
1669 | # unicode on Python 2 still works because it won't contain any |
|
1670 | # unicode on Python 2 still works because it won't contain any | |
1670 | # non-ascii bytes and will be implicitly converted back to bytes |
|
1671 | # non-ascii bytes and will be implicitly converted back to bytes | |
1671 | # when operated on. |
|
1672 | # when operated on. | |
1672 | assert isinstance(version, bytes) |
|
1673 | assert isinstance(version, bytes) | |
1673 | setupversion = version.decode('ascii') |
|
1674 | setupversion = version.decode('ascii') | |
1674 |
|
1675 | |||
1675 | extra = {} |
|
1676 | extra = {} | |
1676 |
|
1677 | |||
1677 | py2exepackages = [ |
|
1678 | py2exepackages = [ | |
1678 | 'hgdemandimport', |
|
1679 | 'hgdemandimport', | |
1679 | 'hgext3rd', |
|
1680 | 'hgext3rd', | |
1680 | 'hgext', |
|
1681 | 'hgext', | |
1681 | 'email', |
|
1682 | 'email', | |
1682 | # implicitly imported per module policy |
|
1683 | # implicitly imported per module policy | |
1683 | # (cffi wouldn't be used as a frozen exe) |
|
1684 | # (cffi wouldn't be used as a frozen exe) | |
1684 | 'mercurial.cext', |
|
1685 | 'mercurial.cext', | |
1685 | #'mercurial.cffi', |
|
1686 | #'mercurial.cffi', | |
1686 | 'mercurial.pure', |
|
1687 | 'mercurial.pure', | |
1687 | ] |
|
1688 | ] | |
1688 |
|
1689 | |||
1689 | py2exeexcludes = [] |
|
1690 | py2exeexcludes = [] | |
1690 | py2exedllexcludes = ['crypt32.dll'] |
|
1691 | py2exedllexcludes = ['crypt32.dll'] | |
1691 |
|
1692 | |||
1692 | if issetuptools: |
|
1693 | if issetuptools: | |
1693 | extra['python_requires'] = supportedpy |
|
1694 | extra['python_requires'] = supportedpy | |
1694 |
|
1695 | |||
1695 | if py2exeloaded: |
|
1696 | if py2exeloaded: | |
1696 | extra['console'] = [ |
|
1697 | extra['console'] = [ | |
1697 | { |
|
1698 | { | |
1698 | 'script': 'hg', |
|
1699 | 'script': 'hg', | |
1699 | 'copyright': 'Copyright (C) 2005-2020 Matt Mackall and others', |
|
1700 | 'copyright': 'Copyright (C) 2005-2020 Matt Mackall and others', | |
1700 | 'product_version': version, |
|
1701 | 'product_version': version, | |
1701 | } |
|
1702 | } | |
1702 | ] |
|
1703 | ] | |
1703 | # Sub command of 'build' because 'py2exe' does not handle sub_commands. |
|
1704 | # Sub command of 'build' because 'py2exe' does not handle sub_commands. | |
1704 | # Need to override hgbuild because it has a private copy of |
|
1705 | # Need to override hgbuild because it has a private copy of | |
1705 | # build.sub_commands. |
|
1706 | # build.sub_commands. | |
1706 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) |
|
1707 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) | |
1707 | # put dlls in sub directory so that they won't pollute PATH |
|
1708 | # put dlls in sub directory so that they won't pollute PATH | |
1708 | extra['zipfile'] = 'lib/library.zip' |
|
1709 | extra['zipfile'] = 'lib/library.zip' | |
1709 |
|
1710 | |||
1710 | # We allow some configuration to be supplemented via environment |
|
1711 | # We allow some configuration to be supplemented via environment | |
1711 | # variables. This is better than setup.cfg files because it allows |
|
1712 | # variables. This is better than setup.cfg files because it allows | |
1712 | # supplementing configs instead of replacing them. |
|
1713 | # supplementing configs instead of replacing them. | |
1713 | extrapackages = os.environ.get('HG_PY2EXE_EXTRA_PACKAGES') |
|
1714 | extrapackages = os.environ.get('HG_PY2EXE_EXTRA_PACKAGES') | |
1714 | if extrapackages: |
|
1715 | if extrapackages: | |
1715 | py2exepackages.extend(extrapackages.split(' ')) |
|
1716 | py2exepackages.extend(extrapackages.split(' ')) | |
1716 |
|
1717 | |||
1717 | excludes = os.environ.get('HG_PY2EXE_EXTRA_EXCLUDES') |
|
1718 | excludes = os.environ.get('HG_PY2EXE_EXTRA_EXCLUDES') | |
1718 | if excludes: |
|
1719 | if excludes: | |
1719 | py2exeexcludes.extend(excludes.split(' ')) |
|
1720 | py2exeexcludes.extend(excludes.split(' ')) | |
1720 |
|
1721 | |||
1721 | dllexcludes = os.environ.get('HG_PY2EXE_EXTRA_DLL_EXCLUDES') |
|
1722 | dllexcludes = os.environ.get('HG_PY2EXE_EXTRA_DLL_EXCLUDES') | |
1722 | if dllexcludes: |
|
1723 | if dllexcludes: | |
1723 | py2exedllexcludes.extend(dllexcludes.split(' ')) |
|
1724 | py2exedllexcludes.extend(dllexcludes.split(' ')) | |
1724 |
|
1725 | |||
1725 | if os.environ.get('PYOXIDIZER'): |
|
1726 | if os.environ.get('PYOXIDIZER'): | |
1726 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) |
|
1727 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) | |
1727 |
|
1728 | |||
1728 | if os.name == 'nt': |
|
1729 | if os.name == 'nt': | |
1729 | # Windows binary file versions for exe/dll files must have the |
|
1730 | # Windows binary file versions for exe/dll files must have the | |
1730 | # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535 |
|
1731 | # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535 | |
1731 | setupversion = setupversion.split(r'+', 1)[0] |
|
1732 | setupversion = setupversion.split(r'+', 1)[0] | |
1732 |
|
1733 | |||
1733 | if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): |
|
1734 | if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): | |
1734 | version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines() |
|
1735 | version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines() | |
1735 | if version: |
|
1736 | if version: | |
1736 | version = version[0] |
|
1737 | version = version[0] | |
1737 | if sys.version_info[0] == 3: |
|
1738 | if sys.version_info[0] == 3: | |
1738 | version = version.decode('utf-8') |
|
1739 | version = version.decode('utf-8') | |
1739 | xcode4 = version.startswith('Xcode') and StrictVersion( |
|
1740 | xcode4 = version.startswith('Xcode') and StrictVersion( | |
1740 | version.split()[1] |
|
1741 | version.split()[1] | |
1741 | ) >= StrictVersion('4.0') |
|
1742 | ) >= StrictVersion('4.0') | |
1742 | xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None |
|
1743 | xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None | |
1743 | else: |
|
1744 | else: | |
1744 | # xcodebuild returns empty on OS X Lion with XCode 4.3 not |
|
1745 | # xcodebuild returns empty on OS X Lion with XCode 4.3 not | |
1745 | # installed, but instead with only command-line tools. Assume |
|
1746 | # installed, but instead with only command-line tools. Assume | |
1746 | # that only happens on >= Lion, thus no PPC support. |
|
1747 | # that only happens on >= Lion, thus no PPC support. | |
1747 | xcode4 = True |
|
1748 | xcode4 = True | |
1748 | xcode51 = False |
|
1749 | xcode51 = False | |
1749 |
|
1750 | |||
1750 | # XCode 4.0 dropped support for ppc architecture, which is hardcoded in |
|
1751 | # XCode 4.0 dropped support for ppc architecture, which is hardcoded in | |
1751 | # distutils.sysconfig |
|
1752 | # distutils.sysconfig | |
1752 | if xcode4: |
|
1753 | if xcode4: | |
1753 | os.environ['ARCHFLAGS'] = '' |
|
1754 | os.environ['ARCHFLAGS'] = '' | |
1754 |
|
1755 | |||
1755 | # XCode 5.1 changes clang such that it now fails to compile if the |
|
1756 | # XCode 5.1 changes clang such that it now fails to compile if the | |
1756 | # -mno-fused-madd flag is passed, but the version of Python shipped with |
|
1757 | # -mno-fused-madd flag is passed, but the version of Python shipped with | |
1757 | # OS X 10.9 Mavericks includes this flag. This causes problems in all |
|
1758 | # OS X 10.9 Mavericks includes this flag. This causes problems in all | |
1758 | # C extension modules, and a bug has been filed upstream at |
|
1759 | # C extension modules, and a bug has been filed upstream at | |
1759 | # http://bugs.python.org/issue21244. We also need to patch this here |
|
1760 | # http://bugs.python.org/issue21244. We also need to patch this here | |
1760 | # so Mercurial can continue to compile in the meantime. |
|
1761 | # so Mercurial can continue to compile in the meantime. | |
1761 | if xcode51: |
|
1762 | if xcode51: | |
1762 | cflags = get_config_var('CFLAGS') |
|
1763 | cflags = get_config_var('CFLAGS') | |
1763 | if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None: |
|
1764 | if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None: | |
1764 | os.environ['CFLAGS'] = ( |
|
1765 | os.environ['CFLAGS'] = ( | |
1765 | os.environ.get('CFLAGS', '') + ' -Qunused-arguments' |
|
1766 | os.environ.get('CFLAGS', '') + ' -Qunused-arguments' | |
1766 | ) |
|
1767 | ) | |
1767 |
|
1768 | |||
1768 | setup( |
|
1769 | setup( | |
1769 | name='mercurial', |
|
1770 | name='mercurial', | |
1770 | version=setupversion, |
|
1771 | version=setupversion, | |
1771 | author='Matt Mackall and many others', |
|
1772 | author='Matt Mackall and many others', | |
1772 | author_email='mercurial@mercurial-scm.org', |
|
1773 | author_email='mercurial@mercurial-scm.org', | |
1773 | url='https://mercurial-scm.org/', |
|
1774 | url='https://mercurial-scm.org/', | |
1774 | download_url='https://mercurial-scm.org/release/', |
|
1775 | download_url='https://mercurial-scm.org/release/', | |
1775 | description=( |
|
1776 | description=( | |
1776 | 'Fast scalable distributed SCM (revision control, version ' |
|
1777 | 'Fast scalable distributed SCM (revision control, version ' | |
1777 | 'control) system' |
|
1778 | 'control) system' | |
1778 | ), |
|
1779 | ), | |
1779 | long_description=( |
|
1780 | long_description=( | |
1780 | 'Mercurial is a distributed SCM tool written in Python.' |
|
1781 | 'Mercurial is a distributed SCM tool written in Python.' | |
1781 | ' It is used by a number of large projects that require' |
|
1782 | ' It is used by a number of large projects that require' | |
1782 | ' fast, reliable distributed revision control, such as ' |
|
1783 | ' fast, reliable distributed revision control, such as ' | |
1783 | 'Mozilla.' |
|
1784 | 'Mozilla.' | |
1784 | ), |
|
1785 | ), | |
1785 | license='GNU GPLv2 or any later version', |
|
1786 | license='GNU GPLv2 or any later version', | |
1786 | classifiers=[ |
|
1787 | classifiers=[ | |
1787 | 'Development Status :: 6 - Mature', |
|
1788 | 'Development Status :: 6 - Mature', | |
1788 | 'Environment :: Console', |
|
1789 | 'Environment :: Console', | |
1789 | 'Intended Audience :: Developers', |
|
1790 | 'Intended Audience :: Developers', | |
1790 | 'Intended Audience :: System Administrators', |
|
1791 | 'Intended Audience :: System Administrators', | |
1791 | 'License :: OSI Approved :: GNU General Public License (GPL)', |
|
1792 | 'License :: OSI Approved :: GNU General Public License (GPL)', | |
1792 | 'Natural Language :: Danish', |
|
1793 | 'Natural Language :: Danish', | |
1793 | 'Natural Language :: English', |
|
1794 | 'Natural Language :: English', | |
1794 | 'Natural Language :: German', |
|
1795 | 'Natural Language :: German', | |
1795 | 'Natural Language :: Italian', |
|
1796 | 'Natural Language :: Italian', | |
1796 | 'Natural Language :: Japanese', |
|
1797 | 'Natural Language :: Japanese', | |
1797 | 'Natural Language :: Portuguese (Brazilian)', |
|
1798 | 'Natural Language :: Portuguese (Brazilian)', | |
1798 | 'Operating System :: Microsoft :: Windows', |
|
1799 | 'Operating System :: Microsoft :: Windows', | |
1799 | 'Operating System :: OS Independent', |
|
1800 | 'Operating System :: OS Independent', | |
1800 | 'Operating System :: POSIX', |
|
1801 | 'Operating System :: POSIX', | |
1801 | 'Programming Language :: C', |
|
1802 | 'Programming Language :: C', | |
1802 | 'Programming Language :: Python', |
|
1803 | 'Programming Language :: Python', | |
1803 | 'Topic :: Software Development :: Version Control', |
|
1804 | 'Topic :: Software Development :: Version Control', | |
1804 | ], |
|
1805 | ], | |
1805 | scripts=scripts, |
|
1806 | scripts=scripts, | |
1806 | packages=packages, |
|
1807 | packages=packages, | |
1807 | ext_modules=extmodules, |
|
1808 | ext_modules=extmodules, | |
1808 | data_files=datafiles, |
|
1809 | data_files=datafiles, | |
1809 | package_data=packagedata, |
|
1810 | package_data=packagedata, | |
1810 | cmdclass=cmdclass, |
|
1811 | cmdclass=cmdclass, | |
1811 | distclass=hgdist, |
|
1812 | distclass=hgdist, | |
1812 | options={ |
|
1813 | options={ | |
1813 | 'py2exe': { |
|
1814 | 'py2exe': { | |
1814 | 'bundle_files': 3, |
|
1815 | 'bundle_files': 3, | |
1815 | 'dll_excludes': py2exedllexcludes, |
|
1816 | 'dll_excludes': py2exedllexcludes, | |
1816 | 'excludes': py2exeexcludes, |
|
1817 | 'excludes': py2exeexcludes, | |
1817 | 'packages': py2exepackages, |
|
1818 | 'packages': py2exepackages, | |
1818 | }, |
|
1819 | }, | |
1819 | 'bdist_mpkg': { |
|
1820 | 'bdist_mpkg': { | |
1820 | 'zipdist': False, |
|
1821 | 'zipdist': False, | |
1821 | 'license': 'COPYING', |
|
1822 | 'license': 'COPYING', | |
1822 | 'readme': 'contrib/packaging/macosx/Readme.html', |
|
1823 | 'readme': 'contrib/packaging/macosx/Readme.html', | |
1823 | 'welcome': 'contrib/packaging/macosx/Welcome.html', |
|
1824 | 'welcome': 'contrib/packaging/macosx/Welcome.html', | |
1824 | }, |
|
1825 | }, | |
1825 | }, |
|
1826 | }, | |
1826 | **extra |
|
1827 | **extra | |
1827 | ) |
|
1828 | ) |
@@ -1,83 +1,84 b'' | |||||
1 | # ext-sidedata.py - small extension to test the sidedata logic |
|
1 | # ext-sidedata.py - small extension to test the sidedata logic | |
2 | # |
|
2 | # | |
3 | # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net) |
|
3 | # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net) | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 | import struct |
|
11 | import struct | |
12 |
|
12 | |||
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | extensions, |
|
14 | extensions, | |
15 | node, |
|
15 | node, | |
16 | requirements, |
|
16 | requirements, | |
17 | revlog, |
|
17 | revlog, | |
18 | upgrade, |
|
|||
19 | ) |
|
18 | ) | |
20 |
|
19 | |||
|
20 | from mercurial.upgrade_utils import engine as upgrade_engine | |||
|
21 | ||||
21 | from mercurial.revlogutils import sidedata |
|
22 | from mercurial.revlogutils import sidedata | |
22 |
|
23 | |||
23 |
|
24 | |||
24 | def wrapaddrevision( |
|
25 | def wrapaddrevision( | |
25 | orig, self, text, transaction, link, p1, p2, *args, **kwargs |
|
26 | orig, self, text, transaction, link, p1, p2, *args, **kwargs | |
26 | ): |
|
27 | ): | |
27 | if kwargs.get('sidedata') is None: |
|
28 | if kwargs.get('sidedata') is None: | |
28 | kwargs['sidedata'] = {} |
|
29 | kwargs['sidedata'] = {} | |
29 | sd = kwargs['sidedata'] |
|
30 | sd = kwargs['sidedata'] | |
30 | ## let's store some arbitrary data just for testing |
|
31 | ## let's store some arbitrary data just for testing | |
31 | # text length |
|
32 | # text length | |
32 | sd[sidedata.SD_TEST1] = struct.pack('>I', len(text)) |
|
33 | sd[sidedata.SD_TEST1] = struct.pack('>I', len(text)) | |
33 | # and sha2 hashes |
|
34 | # and sha2 hashes | |
34 | sha256 = hashlib.sha256(text).digest() |
|
35 | sha256 = hashlib.sha256(text).digest() | |
35 | sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256) |
|
36 | sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256) | |
36 | return orig(self, text, transaction, link, p1, p2, *args, **kwargs) |
|
37 | return orig(self, text, transaction, link, p1, p2, *args, **kwargs) | |
37 |
|
38 | |||
38 |
|
39 | |||
39 | def wraprevision(orig, self, nodeorrev, *args, **kwargs): |
|
40 | def wraprevision(orig, self, nodeorrev, *args, **kwargs): | |
40 | text = orig(self, nodeorrev, *args, **kwargs) |
|
41 | text = orig(self, nodeorrev, *args, **kwargs) | |
41 | if getattr(self, 'sidedatanocheck', False): |
|
42 | if getattr(self, 'sidedatanocheck', False): | |
42 | return text |
|
43 | return text | |
43 | if nodeorrev != node.nullrev and nodeorrev != node.nullid: |
|
44 | if nodeorrev != node.nullrev and nodeorrev != node.nullid: | |
44 | sd = self.sidedata(nodeorrev) |
|
45 | sd = self.sidedata(nodeorrev) | |
45 | if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]: |
|
46 | if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]: | |
46 | raise RuntimeError('text size mismatch') |
|
47 | raise RuntimeError('text size mismatch') | |
47 | expected = sd[sidedata.SD_TEST2] |
|
48 | expected = sd[sidedata.SD_TEST2] | |
48 | got = hashlib.sha256(text).digest() |
|
49 | got = hashlib.sha256(text).digest() | |
49 | if got != expected: |
|
50 | if got != expected: | |
50 | raise RuntimeError('sha256 mismatch') |
|
51 | raise RuntimeError('sha256 mismatch') | |
51 | return text |
|
52 | return text | |
52 |
|
53 | |||
53 |
|
54 | |||
54 | def wrapgetsidedatacompanion(orig, srcrepo, dstrepo): |
|
55 | def wrapgetsidedatacompanion(orig, srcrepo, dstrepo): | |
55 | sidedatacompanion = orig(srcrepo, dstrepo) |
|
56 | sidedatacompanion = orig(srcrepo, dstrepo) | |
56 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
57 | addedreqs = dstrepo.requirements - srcrepo.requirements | |
57 | if requirements.SIDEDATA_REQUIREMENT in addedreqs: |
|
58 | if requirements.SIDEDATA_REQUIREMENT in addedreqs: | |
58 | assert sidedatacompanion is None # deal with composition later |
|
59 | assert sidedatacompanion is None # deal with composition later | |
59 |
|
60 | |||
60 | def sidedatacompanion(revlog, rev): |
|
61 | def sidedatacompanion(revlog, rev): | |
61 | update = {} |
|
62 | update = {} | |
62 | revlog.sidedatanocheck = True |
|
63 | revlog.sidedatanocheck = True | |
63 | try: |
|
64 | try: | |
64 | text = revlog.revision(rev) |
|
65 | text = revlog.revision(rev) | |
65 | finally: |
|
66 | finally: | |
66 | del revlog.sidedatanocheck |
|
67 | del revlog.sidedatanocheck | |
67 | ## let's store some arbitrary data just for testing |
|
68 | ## let's store some arbitrary data just for testing | |
68 | # text length |
|
69 | # text length | |
69 | update[sidedata.SD_TEST1] = struct.pack('>I', len(text)) |
|
70 | update[sidedata.SD_TEST1] = struct.pack('>I', len(text)) | |
70 | # and sha2 hashes |
|
71 | # and sha2 hashes | |
71 | sha256 = hashlib.sha256(text).digest() |
|
72 | sha256 = hashlib.sha256(text).digest() | |
72 | update[sidedata.SD_TEST2] = struct.pack('>32s', sha256) |
|
73 | update[sidedata.SD_TEST2] = struct.pack('>32s', sha256) | |
73 | return False, (), update, 0, 0 |
|
74 | return False, (), update, 0, 0 | |
74 |
|
75 | |||
75 | return sidedatacompanion |
|
76 | return sidedatacompanion | |
76 |
|
77 | |||
77 |
|
78 | |||
78 | def extsetup(ui): |
|
79 | def extsetup(ui): | |
79 | extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision) |
|
80 | extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision) | |
80 | extensions.wrapfunction(revlog.revlog, 'revision', wraprevision) |
|
81 | extensions.wrapfunction(revlog.revlog, 'revision', wraprevision) | |
81 | extensions.wrapfunction( |
|
82 | extensions.wrapfunction( | |
82 | upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion |
|
83 | upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion | |
83 | ) |
|
84 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now