Show More
|
1 | NO CONTENT: new file 100644 |
@@ -1,546 +1,548 b'' | |||
|
1 | 1 | # wrapper.py - methods wrapping core mercurial logic |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import hashlib |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.node import bin, hex, nullid, short |
|
14 | 14 | from mercurial.pycompat import ( |
|
15 | 15 | getattr, |
|
16 | 16 | setattr, |
|
17 | 17 | ) |
|
18 | 18 | |
|
19 | 19 | from mercurial import ( |
|
20 | 20 | bundle2, |
|
21 | 21 | changegroup, |
|
22 | 22 | cmdutil, |
|
23 | 23 | context, |
|
24 | 24 | error, |
|
25 | 25 | exchange, |
|
26 | 26 | exthelper, |
|
27 | 27 | localrepo, |
|
28 | 28 | pycompat, |
|
29 | 29 | revlog, |
|
30 | 30 | scmutil, |
|
31 | 31 | upgrade, |
|
32 | 32 | util, |
|
33 | 33 | vfs as vfsmod, |
|
34 | 34 | wireprotov1server, |
|
35 | 35 | ) |
|
36 | 36 | |
|
37 | from mercurial.upgrade_utils import engine as upgrade_engine | |
|
38 | ||
|
37 | 39 | from mercurial.interfaces import repository |
|
38 | 40 | |
|
39 | 41 | from mercurial.utils import ( |
|
40 | 42 | storageutil, |
|
41 | 43 | stringutil, |
|
42 | 44 | ) |
|
43 | 45 | |
|
44 | 46 | from ..largefiles import lfutil |
|
45 | 47 | |
|
46 | 48 | from . import ( |
|
47 | 49 | blobstore, |
|
48 | 50 | pointer, |
|
49 | 51 | ) |
|
50 | 52 | |
|
51 | 53 | eh = exthelper.exthelper() |
|
52 | 54 | |
|
53 | 55 | |
|
54 | 56 | @eh.wrapfunction(localrepo, b'makefilestorage') |
|
55 | 57 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
56 | 58 | if b'lfs' in requirements: |
|
57 | 59 | features.add(repository.REPO_FEATURE_LFS) |
|
58 | 60 | |
|
59 | 61 | return orig(requirements=requirements, features=features, **kwargs) |
|
60 | 62 | |
|
61 | 63 | |
|
62 | 64 | @eh.wrapfunction(changegroup, b'allsupportedversions') |
|
63 | 65 | def allsupportedversions(orig, ui): |
|
64 | 66 | versions = orig(ui) |
|
65 | 67 | versions.add(b'03') |
|
66 | 68 | return versions |
|
67 | 69 | |
|
68 | 70 | |
|
69 | 71 | @eh.wrapfunction(wireprotov1server, b'_capabilities') |
|
70 | 72 | def _capabilities(orig, repo, proto): |
|
71 | 73 | '''Wrap server command to announce lfs server capability''' |
|
72 | 74 | caps = orig(repo, proto) |
|
73 | 75 | if util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
74 | 76 | # Advertise a slightly different capability when lfs is *required*, so |
|
75 | 77 | # that the client knows it MUST load the extension. If lfs is not |
|
76 | 78 | # required on the server, there's no reason to autoload the extension |
|
77 | 79 | # on the client. |
|
78 | 80 | if b'lfs' in repo.requirements: |
|
79 | 81 | caps.append(b'lfs-serve') |
|
80 | 82 | |
|
81 | 83 | caps.append(b'lfs') |
|
82 | 84 | return caps |
|
83 | 85 | |
|
84 | 86 | |
|
85 | 87 | def bypasscheckhash(self, text): |
|
86 | 88 | return False |
|
87 | 89 | |
|
88 | 90 | |
|
89 | 91 | def readfromstore(self, text): |
|
90 | 92 | """Read filelog content from local blobstore transform for flagprocessor. |
|
91 | 93 | |
|
92 | 94 | Default tranform for flagprocessor, returning contents from blobstore. |
|
93 | 95 | Returns a 2-typle (text, validatehash) where validatehash is True as the |
|
94 | 96 | contents of the blobstore should be checked using checkhash. |
|
95 | 97 | """ |
|
96 | 98 | p = pointer.deserialize(text) |
|
97 | 99 | oid = p.oid() |
|
98 | 100 | store = self.opener.lfslocalblobstore |
|
99 | 101 | if not store.has(oid): |
|
100 | 102 | p.filename = self.filename |
|
101 | 103 | self.opener.lfsremoteblobstore.readbatch([p], store) |
|
102 | 104 | |
|
103 | 105 | # The caller will validate the content |
|
104 | 106 | text = store.read(oid, verify=False) |
|
105 | 107 | |
|
106 | 108 | # pack hg filelog metadata |
|
107 | 109 | hgmeta = {} |
|
108 | 110 | for k in p.keys(): |
|
109 | 111 | if k.startswith(b'x-hg-'): |
|
110 | 112 | name = k[len(b'x-hg-') :] |
|
111 | 113 | hgmeta[name] = p[k] |
|
112 | 114 | if hgmeta or text.startswith(b'\1\n'): |
|
113 | 115 | text = storageutil.packmeta(hgmeta, text) |
|
114 | 116 | |
|
115 | 117 | return (text, True, {}) |
|
116 | 118 | |
|
117 | 119 | |
|
118 | 120 | def writetostore(self, text, sidedata): |
|
119 | 121 | # hg filelog metadata (includes rename, etc) |
|
120 | 122 | hgmeta, offset = storageutil.parsemeta(text) |
|
121 | 123 | if offset and offset > 0: |
|
122 | 124 | # lfs blob does not contain hg filelog metadata |
|
123 | 125 | text = text[offset:] |
|
124 | 126 | |
|
125 | 127 | # git-lfs only supports sha256 |
|
126 | 128 | oid = hex(hashlib.sha256(text).digest()) |
|
127 | 129 | self.opener.lfslocalblobstore.write(oid, text) |
|
128 | 130 | |
|
129 | 131 | # replace contents with metadata |
|
130 | 132 | longoid = b'sha256:%s' % oid |
|
131 | 133 | metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) |
|
132 | 134 | |
|
133 | 135 | # by default, we expect the content to be binary. however, LFS could also |
|
134 | 136 | # be used for non-binary content. add a special entry for non-binary data. |
|
135 | 137 | # this will be used by filectx.isbinary(). |
|
136 | 138 | if not stringutil.binary(text): |
|
137 | 139 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix |
|
138 | 140 | metadata[b'x-is-binary'] = b'0' |
|
139 | 141 | |
|
140 | 142 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix |
|
141 | 143 | if hgmeta is not None: |
|
142 | 144 | for k, v in pycompat.iteritems(hgmeta): |
|
143 | 145 | metadata[b'x-hg-%s' % k] = v |
|
144 | 146 | |
|
145 | 147 | rawtext = metadata.serialize() |
|
146 | 148 | return (rawtext, False) |
|
147 | 149 | |
|
148 | 150 | |
|
149 | 151 | def _islfs(rlog, node=None, rev=None): |
|
150 | 152 | if rev is None: |
|
151 | 153 | if node is None: |
|
152 | 154 | # both None - likely working copy content where node is not ready |
|
153 | 155 | return False |
|
154 | 156 | rev = rlog.rev(node) |
|
155 | 157 | else: |
|
156 | 158 | node = rlog.node(rev) |
|
157 | 159 | if node == nullid: |
|
158 | 160 | return False |
|
159 | 161 | flags = rlog.flags(rev) |
|
160 | 162 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
161 | 163 | |
|
162 | 164 | |
|
163 | 165 | # Wrapping may also be applied by remotefilelog |
|
164 | 166 | def filelogaddrevision( |
|
165 | 167 | orig, |
|
166 | 168 | self, |
|
167 | 169 | text, |
|
168 | 170 | transaction, |
|
169 | 171 | link, |
|
170 | 172 | p1, |
|
171 | 173 | p2, |
|
172 | 174 | cachedelta=None, |
|
173 | 175 | node=None, |
|
174 | 176 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
175 | 177 | **kwds |
|
176 | 178 | ): |
|
177 | 179 | # The matcher isn't available if reposetup() wasn't called. |
|
178 | 180 | lfstrack = self._revlog.opener.options.get(b'lfstrack') |
|
179 | 181 | |
|
180 | 182 | if lfstrack: |
|
181 | 183 | textlen = len(text) |
|
182 | 184 | # exclude hg rename meta from file size |
|
183 | 185 | meta, offset = storageutil.parsemeta(text) |
|
184 | 186 | if offset: |
|
185 | 187 | textlen -= offset |
|
186 | 188 | |
|
187 | 189 | if lfstrack(self._revlog.filename, textlen): |
|
188 | 190 | flags |= revlog.REVIDX_EXTSTORED |
|
189 | 191 | |
|
190 | 192 | return orig( |
|
191 | 193 | self, |
|
192 | 194 | text, |
|
193 | 195 | transaction, |
|
194 | 196 | link, |
|
195 | 197 | p1, |
|
196 | 198 | p2, |
|
197 | 199 | cachedelta=cachedelta, |
|
198 | 200 | node=node, |
|
199 | 201 | flags=flags, |
|
200 | 202 | **kwds |
|
201 | 203 | ) |
|
202 | 204 | |
|
203 | 205 | |
|
204 | 206 | # Wrapping may also be applied by remotefilelog |
|
205 | 207 | def filelogrenamed(orig, self, node): |
|
206 | 208 | if _islfs(self._revlog, node): |
|
207 | 209 | rawtext = self._revlog.rawdata(node) |
|
208 | 210 | if not rawtext: |
|
209 | 211 | return False |
|
210 | 212 | metadata = pointer.deserialize(rawtext) |
|
211 | 213 | if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: |
|
212 | 214 | return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) |
|
213 | 215 | else: |
|
214 | 216 | return False |
|
215 | 217 | return orig(self, node) |
|
216 | 218 | |
|
217 | 219 | |
|
218 | 220 | # Wrapping may also be applied by remotefilelog |
|
219 | 221 | def filelogsize(orig, self, rev): |
|
220 | 222 | if _islfs(self._revlog, rev=rev): |
|
221 | 223 | # fast path: use lfs metadata to answer size |
|
222 | 224 | rawtext = self._revlog.rawdata(rev) |
|
223 | 225 | metadata = pointer.deserialize(rawtext) |
|
224 | 226 | return int(metadata[b'size']) |
|
225 | 227 | return orig(self, rev) |
|
226 | 228 | |
|
227 | 229 | |
|
228 | 230 | @eh.wrapfunction(revlog, b'_verify_revision') |
|
229 | 231 | def _verify_revision(orig, rl, skipflags, state, node): |
|
230 | 232 | if _islfs(rl, node=node): |
|
231 | 233 | rawtext = rl.rawdata(node) |
|
232 | 234 | metadata = pointer.deserialize(rawtext) |
|
233 | 235 | |
|
234 | 236 | # Don't skip blobs that are stored locally, as local verification is |
|
235 | 237 | # relatively cheap and there's no other way to verify the raw data in |
|
236 | 238 | # the revlog. |
|
237 | 239 | if rl.opener.lfslocalblobstore.has(metadata.oid()): |
|
238 | 240 | skipflags &= ~revlog.REVIDX_EXTSTORED |
|
239 | 241 | elif skipflags & revlog.REVIDX_EXTSTORED: |
|
240 | 242 | # The wrapped method will set `skipread`, but there's enough local |
|
241 | 243 | # info to check renames. |
|
242 | 244 | state[b'safe_renamed'].add(node) |
|
243 | 245 | |
|
244 | 246 | orig(rl, skipflags, state, node) |
|
245 | 247 | |
|
246 | 248 | |
|
247 | 249 | @eh.wrapfunction(context.basefilectx, b'cmp') |
|
248 | 250 | def filectxcmp(orig, self, fctx): |
|
249 | 251 | """returns True if text is different than fctx""" |
|
250 | 252 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
251 | 253 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): |
|
252 | 254 | # fast path: check LFS oid |
|
253 | 255 | p1 = pointer.deserialize(self.rawdata()) |
|
254 | 256 | p2 = pointer.deserialize(fctx.rawdata()) |
|
255 | 257 | return p1.oid() != p2.oid() |
|
256 | 258 | return orig(self, fctx) |
|
257 | 259 | |
|
258 | 260 | |
|
259 | 261 | @eh.wrapfunction(context.basefilectx, b'isbinary') |
|
260 | 262 | def filectxisbinary(orig, self): |
|
261 | 263 | if self.islfs(): |
|
262 | 264 | # fast path: use lfs metadata to answer isbinary |
|
263 | 265 | metadata = pointer.deserialize(self.rawdata()) |
|
264 | 266 | # if lfs metadata says nothing, assume it's binary by default |
|
265 | 267 | return bool(int(metadata.get(b'x-is-binary', 1))) |
|
266 | 268 | return orig(self) |
|
267 | 269 | |
|
268 | 270 | |
|
269 | 271 | def filectxislfs(self): |
|
270 | 272 | return _islfs(self.filelog()._revlog, self.filenode()) |
|
271 | 273 | |
|
272 | 274 | |
|
273 | 275 | @eh.wrapfunction(cmdutil, b'_updatecatformatter') |
|
274 | 276 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
275 | 277 | orig(fm, ctx, matcher, path, decode) |
|
276 | 278 | fm.data(rawdata=ctx[path].rawdata()) |
|
277 | 279 | |
|
278 | 280 | |
|
279 | 281 | @eh.wrapfunction(scmutil, b'wrapconvertsink') |
|
280 | 282 | def convertsink(orig, sink): |
|
281 | 283 | sink = orig(sink) |
|
282 | 284 | if sink.repotype == b'hg': |
|
283 | 285 | |
|
284 | 286 | class lfssink(sink.__class__): |
|
285 | 287 | def putcommit( |
|
286 | 288 | self, |
|
287 | 289 | files, |
|
288 | 290 | copies, |
|
289 | 291 | parents, |
|
290 | 292 | commit, |
|
291 | 293 | source, |
|
292 | 294 | revmap, |
|
293 | 295 | full, |
|
294 | 296 | cleanp2, |
|
295 | 297 | ): |
|
296 | 298 | pc = super(lfssink, self).putcommit |
|
297 | 299 | node = pc( |
|
298 | 300 | files, |
|
299 | 301 | copies, |
|
300 | 302 | parents, |
|
301 | 303 | commit, |
|
302 | 304 | source, |
|
303 | 305 | revmap, |
|
304 | 306 | full, |
|
305 | 307 | cleanp2, |
|
306 | 308 | ) |
|
307 | 309 | |
|
308 | 310 | if b'lfs' not in self.repo.requirements: |
|
309 | 311 | ctx = self.repo[node] |
|
310 | 312 | |
|
311 | 313 | # The file list may contain removed files, so check for |
|
312 | 314 | # membership before assuming it is in the context. |
|
313 | 315 | if any(f in ctx and ctx[f].islfs() for f, n in files): |
|
314 | 316 | self.repo.requirements.add(b'lfs') |
|
315 | 317 | scmutil.writereporequirements(self.repo) |
|
316 | 318 | |
|
317 | 319 | return node |
|
318 | 320 | |
|
319 | 321 | sink.__class__ = lfssink |
|
320 | 322 | |
|
321 | 323 | return sink |
|
322 | 324 | |
|
323 | 325 | |
|
324 | 326 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
325 | 327 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
326 | 328 | @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__') |
|
327 | 329 | def vfsinit(orig, self, othervfs): |
|
328 | 330 | orig(self, othervfs) |
|
329 | 331 | # copy lfs related options |
|
330 | 332 | for k, v in othervfs.options.items(): |
|
331 | 333 | if k.startswith(b'lfs'): |
|
332 | 334 | self.options[k] = v |
|
333 | 335 | # also copy lfs blobstores. note: this can run before reposetup, so lfs |
|
334 | 336 | # blobstore attributes are not always ready at this time. |
|
335 | 337 | for name in [b'lfslocalblobstore', b'lfsremoteblobstore']: |
|
336 | 338 | if util.safehasattr(othervfs, name): |
|
337 | 339 | setattr(self, name, getattr(othervfs, name)) |
|
338 | 340 | |
|
339 | 341 | |
|
340 | 342 | def _prefetchfiles(repo, revmatches): |
|
341 | 343 | """Ensure that required LFS blobs are present, fetching them as a group if |
|
342 | 344 | needed.""" |
|
343 | 345 | if not util.safehasattr(repo.svfs, b'lfslocalblobstore'): |
|
344 | 346 | return |
|
345 | 347 | |
|
346 | 348 | pointers = [] |
|
347 | 349 | oids = set() |
|
348 | 350 | localstore = repo.svfs.lfslocalblobstore |
|
349 | 351 | |
|
350 | 352 | for rev, match in revmatches: |
|
351 | 353 | ctx = repo[rev] |
|
352 | 354 | for f in ctx.walk(match): |
|
353 | 355 | p = pointerfromctx(ctx, f) |
|
354 | 356 | if p and p.oid() not in oids and not localstore.has(p.oid()): |
|
355 | 357 | p.filename = f |
|
356 | 358 | pointers.append(p) |
|
357 | 359 | oids.add(p.oid()) |
|
358 | 360 | |
|
359 | 361 | if pointers: |
|
360 | 362 | # Recalculating the repo store here allows 'paths.default' that is set |
|
361 | 363 | # on the repo by a clone command to be used for the update. |
|
362 | 364 | blobstore.remote(repo).readbatch(pointers, localstore) |
|
363 | 365 | |
|
364 | 366 | |
|
365 | 367 | def _canskipupload(repo): |
|
366 | 368 | # Skip if this hasn't been passed to reposetup() |
|
367 | 369 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
368 | 370 | return True |
|
369 | 371 | |
|
370 | 372 | # if remotestore is a null store, upload is a no-op and can be skipped |
|
371 | 373 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
372 | 374 | |
|
373 | 375 | |
|
374 | 376 | def candownload(repo): |
|
375 | 377 | # Skip if this hasn't been passed to reposetup() |
|
376 | 378 | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): |
|
377 | 379 | return False |
|
378 | 380 | |
|
379 | 381 | # if remotestore is a null store, downloads will lead to nothing |
|
380 | 382 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
381 | 383 | |
|
382 | 384 | |
|
383 | 385 | def uploadblobsfromrevs(repo, revs): |
|
384 | 386 | """upload lfs blobs introduced by revs |
|
385 | 387 | |
|
386 | 388 | Note: also used by other extensions e. g. infinitepush. avoid renaming. |
|
387 | 389 | """ |
|
388 | 390 | if _canskipupload(repo): |
|
389 | 391 | return |
|
390 | 392 | pointers = extractpointers(repo, revs) |
|
391 | 393 | uploadblobs(repo, pointers) |
|
392 | 394 | |
|
393 | 395 | |
|
394 | 396 | def prepush(pushop): |
|
395 | 397 | """Prepush hook. |
|
396 | 398 | |
|
397 | 399 | Read through the revisions to push, looking for filelog entries that can be |
|
398 | 400 | deserialized into metadata so that we can block the push on their upload to |
|
399 | 401 | the remote blobstore. |
|
400 | 402 | """ |
|
401 | 403 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
402 | 404 | |
|
403 | 405 | |
|
404 | 406 | @eh.wrapfunction(exchange, b'push') |
|
405 | 407 | def push(orig, repo, remote, *args, **kwargs): |
|
406 | 408 | """bail on push if the extension isn't enabled on remote when needed, and |
|
407 | 409 | update the remote store based on the destination path.""" |
|
408 | 410 | if b'lfs' in repo.requirements: |
|
409 | 411 | # If the remote peer is for a local repo, the requirement tests in the |
|
410 | 412 | # base class method enforce lfs support. Otherwise, some revisions in |
|
411 | 413 | # this repo use lfs, and the remote repo needs the extension loaded. |
|
412 | 414 | if not remote.local() and not remote.capable(b'lfs'): |
|
413 | 415 | # This is a copy of the message in exchange.push() when requirements |
|
414 | 416 | # are missing between local repos. |
|
415 | 417 | m = _(b"required features are not supported in the destination: %s") |
|
416 | 418 | raise error.Abort( |
|
417 | 419 | m % b'lfs', hint=_(b'enable the lfs extension on the server') |
|
418 | 420 | ) |
|
419 | 421 | |
|
420 | 422 | # Repositories where this extension is disabled won't have the field. |
|
421 | 423 | # But if there's a requirement, then the extension must be loaded AND |
|
422 | 424 | # there may be blobs to push. |
|
423 | 425 | remotestore = repo.svfs.lfsremoteblobstore |
|
424 | 426 | try: |
|
425 | 427 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url()) |
|
426 | 428 | return orig(repo, remote, *args, **kwargs) |
|
427 | 429 | finally: |
|
428 | 430 | repo.svfs.lfsremoteblobstore = remotestore |
|
429 | 431 | else: |
|
430 | 432 | return orig(repo, remote, *args, **kwargs) |
|
431 | 433 | |
|
432 | 434 | |
|
433 | 435 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
434 | 436 | @eh.wrapfunction(bundle2, b'writenewbundle') |
|
435 | 437 | def writenewbundle( |
|
436 | 438 | orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
437 | 439 | ): |
|
438 | 440 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
439 | 441 | uploadblobsfromrevs(repo, outgoing.missing) |
|
440 | 442 | return orig( |
|
441 | 443 | ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
442 | 444 | ) |
|
443 | 445 | |
|
444 | 446 | |
|
445 | 447 | def extractpointers(repo, revs): |
|
446 | 448 | """return a list of lfs pointers added by given revs""" |
|
447 | 449 | repo.ui.debug(b'lfs: computing set of blobs to upload\n') |
|
448 | 450 | pointers = {} |
|
449 | 451 | |
|
450 | 452 | makeprogress = repo.ui.makeprogress |
|
451 | 453 | with makeprogress( |
|
452 | 454 | _(b'lfs search'), _(b'changesets'), len(revs) |
|
453 | 455 | ) as progress: |
|
454 | 456 | for r in revs: |
|
455 | 457 | ctx = repo[r] |
|
456 | 458 | for p in pointersfromctx(ctx).values(): |
|
457 | 459 | pointers[p.oid()] = p |
|
458 | 460 | progress.increment() |
|
459 | 461 | return sorted(pointers.values(), key=lambda p: p.oid()) |
|
460 | 462 | |
|
461 | 463 | |
|
462 | 464 | def pointerfromctx(ctx, f, removed=False): |
|
463 | 465 | """return a pointer for the named file from the given changectx, or None if |
|
464 | 466 | the file isn't LFS. |
|
465 | 467 | |
|
466 | 468 | Optionally, the pointer for a file deleted from the context can be returned. |
|
467 | 469 | Since no such pointer is actually stored, and to distinguish from a non LFS |
|
468 | 470 | file, this pointer is represented by an empty dict. |
|
469 | 471 | """ |
|
470 | 472 | _ctx = ctx |
|
471 | 473 | if f not in ctx: |
|
472 | 474 | if not removed: |
|
473 | 475 | return None |
|
474 | 476 | if f in ctx.p1(): |
|
475 | 477 | _ctx = ctx.p1() |
|
476 | 478 | elif f in ctx.p2(): |
|
477 | 479 | _ctx = ctx.p2() |
|
478 | 480 | else: |
|
479 | 481 | return None |
|
480 | 482 | fctx = _ctx[f] |
|
481 | 483 | if not _islfs(fctx.filelog()._revlog, fctx.filenode()): |
|
482 | 484 | return None |
|
483 | 485 | try: |
|
484 | 486 | p = pointer.deserialize(fctx.rawdata()) |
|
485 | 487 | if ctx == _ctx: |
|
486 | 488 | return p |
|
487 | 489 | return {} |
|
488 | 490 | except pointer.InvalidPointer as ex: |
|
489 | 491 | raise error.Abort( |
|
490 | 492 | _(b'lfs: corrupted pointer (%s@%s): %s\n') |
|
491 | 493 | % (f, short(_ctx.node()), ex) |
|
492 | 494 | ) |
|
493 | 495 | |
|
494 | 496 | |
|
495 | 497 | def pointersfromctx(ctx, removed=False): |
|
496 | 498 | """return a dict {path: pointer} for given single changectx. |
|
497 | 499 | |
|
498 | 500 | If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
|
499 | 501 | stored for the path is an empty dict. |
|
500 | 502 | """ |
|
501 | 503 | result = {} |
|
502 | 504 | m = ctx.repo().narrowmatch() |
|
503 | 505 | |
|
504 | 506 | # TODO: consider manifest.fastread() instead |
|
505 | 507 | for f in ctx.files(): |
|
506 | 508 | if not m(f): |
|
507 | 509 | continue |
|
508 | 510 | p = pointerfromctx(ctx, f, removed=removed) |
|
509 | 511 | if p is not None: |
|
510 | 512 | result[f] = p |
|
511 | 513 | return result |
|
512 | 514 | |
|
513 | 515 | |
|
514 | 516 | def uploadblobs(repo, pointers): |
|
515 | 517 | """upload given pointers from local blobstore""" |
|
516 | 518 | if not pointers: |
|
517 | 519 | return |
|
518 | 520 | |
|
519 | 521 | remoteblob = repo.svfs.lfsremoteblobstore |
|
520 | 522 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
521 | 523 | |
|
522 | 524 | |
|
523 | @eh.wrapfunction(upgrade, b'_finishdatamigration') | |
|
525 | @eh.wrapfunction(upgrade_engine, b'_finishdatamigration') | |
|
524 | 526 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
525 | 527 | orig(ui, srcrepo, dstrepo, requirements) |
|
526 | 528 | |
|
527 | 529 | # Skip if this hasn't been passed to reposetup() |
|
528 | 530 | if util.safehasattr( |
|
529 | 531 | srcrepo.svfs, b'lfslocalblobstore' |
|
530 | 532 | ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'): |
|
531 | 533 | srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
|
532 | 534 | dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
|
533 | 535 | |
|
534 | 536 | for dirpath, dirs, files in srclfsvfs.walk(): |
|
535 | 537 | for oid in files: |
|
536 | 538 | ui.write(_(b'copying lfs blob %s\n') % oid) |
|
537 | 539 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
538 | 540 | |
|
539 | 541 | |
|
540 | 542 | @eh.wrapfunction(upgrade, b'preservedrequirements') |
|
541 | 543 | @eh.wrapfunction(upgrade, b'supporteddestrequirements') |
|
542 | 544 | def upgraderequirements(orig, repo): |
|
543 | 545 | reqs = orig(repo) |
|
544 | 546 | if b'lfs' in repo.requirements: |
|
545 | 547 | reqs.add(b'lfs') |
|
546 | 548 | return reqs |
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them | |||
@@ -1,1492 +1,1012 b'' | |||
|
1 | 1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright (c) 2016-present, Gregory Szorc |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import stat | |
|
11 | ||
|
12 | 10 | from .i18n import _ |
|
13 | from .pycompat import getattr | |
|
14 | 11 | from . import ( |
|
15 | changelog, | |
|
16 | 12 | error, |
|
17 | filelog, | |
|
18 | 13 | hg, |
|
19 | 14 | localrepo, |
|
20 | manifest, | |
|
21 | metadata, | |
|
22 | 15 | pycompat, |
|
23 | 16 | requirements, |
|
24 | revlog, | |
|
25 | scmutil, | |
|
26 | 17 | util, |
|
27 | vfs as vfsmod, | |
|
18 | ) | |
|
19 | ||
|
20 | from .upgrade_utils import ( | |
|
21 | engine as upgrade_engine, | |
|
28 | 22 | ) |
|
29 | 23 | |
|
30 | 24 | from .utils import compression |
|
31 | 25 | |
|
32 | 26 | # list of requirements that request a clone of all revlog if added/removed |
|
33 | 27 | RECLONES_REQUIREMENTS = { |
|
34 | 28 | b'generaldelta', |
|
35 | 29 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
36 | 30 | } |
|
37 | 31 | |
|
38 | 32 | |
|
39 | 33 | def requiredsourcerequirements(repo): |
|
40 | 34 | """Obtain requirements required to be present to upgrade a repo. |
|
41 | 35 | |
|
42 | 36 | An upgrade will not be allowed if the repository doesn't have the |
|
43 | 37 | requirements returned by this function. |
|
44 | 38 | """ |
|
45 | 39 | return { |
|
46 | 40 | # Introduced in Mercurial 0.9.2. |
|
47 | 41 | b'revlogv1', |
|
48 | 42 | # Introduced in Mercurial 0.9.2. |
|
49 | 43 | b'store', |
|
50 | 44 | } |
|
51 | 45 | |
|
52 | 46 | |
|
53 | 47 | def blocksourcerequirements(repo): |
|
54 | 48 | """Obtain requirements that will prevent an upgrade from occurring. |
|
55 | 49 | |
|
56 | 50 | An upgrade cannot be performed if the source repository contains a |
|
57 | 51 | requirements in the returned set. |
|
58 | 52 | """ |
|
59 | 53 | return { |
|
60 | 54 | # The upgrade code does not yet support these experimental features. |
|
61 | 55 | # This is an artificial limitation. |
|
62 | 56 | requirements.TREEMANIFEST_REQUIREMENT, |
|
63 | 57 | # This was a precursor to generaldelta and was never enabled by default. |
|
64 | 58 | # It should (hopefully) not exist in the wild. |
|
65 | 59 | b'parentdelta', |
|
66 | 60 | # Upgrade should operate on the actual store, not the shared link. |
|
67 | 61 | requirements.SHARED_REQUIREMENT, |
|
68 | 62 | } |
|
69 | 63 | |
|
70 | 64 | |
|
71 | 65 | def supportremovedrequirements(repo): |
|
72 | 66 | """Obtain requirements that can be removed during an upgrade. |
|
73 | 67 | |
|
74 | 68 | If an upgrade were to create a repository that dropped a requirement, |
|
75 | 69 | the dropped requirement must appear in the returned set for the upgrade |
|
76 | 70 | to be allowed. |
|
77 | 71 | """ |
|
78 | 72 | supported = { |
|
79 | 73 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
80 | 74 | requirements.SIDEDATA_REQUIREMENT, |
|
81 | 75 | requirements.COPIESSDC_REQUIREMENT, |
|
82 | 76 | requirements.NODEMAP_REQUIREMENT, |
|
83 | 77 | requirements.SHARESAFE_REQUIREMENT, |
|
84 | 78 | } |
|
85 | 79 | for name in compression.compengines: |
|
86 | 80 | engine = compression.compengines[name] |
|
87 | 81 | if engine.available() and engine.revlogheader(): |
|
88 | 82 | supported.add(b'exp-compression-%s' % name) |
|
89 | 83 | if engine.name() == b'zstd': |
|
90 | 84 | supported.add(b'revlog-compression-zstd') |
|
91 | 85 | return supported |
|
92 | 86 | |
|
93 | 87 | |
|
94 | 88 | def supporteddestrequirements(repo): |
|
95 | 89 | """Obtain requirements that upgrade supports in the destination. |
|
96 | 90 | |
|
97 | 91 | If the result of the upgrade would create requirements not in this set, |
|
98 | 92 | the upgrade is disallowed. |
|
99 | 93 | |
|
100 | 94 | Extensions should monkeypatch this to add their custom requirements. |
|
101 | 95 | """ |
|
102 | 96 | supported = { |
|
103 | 97 | b'dotencode', |
|
104 | 98 | b'fncache', |
|
105 | 99 | b'generaldelta', |
|
106 | 100 | b'revlogv1', |
|
107 | 101 | b'store', |
|
108 | 102 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
109 | 103 | requirements.SIDEDATA_REQUIREMENT, |
|
110 | 104 | requirements.COPIESSDC_REQUIREMENT, |
|
111 | 105 | requirements.NODEMAP_REQUIREMENT, |
|
112 | 106 | requirements.SHARESAFE_REQUIREMENT, |
|
113 | 107 | } |
|
114 | 108 | for name in compression.compengines: |
|
115 | 109 | engine = compression.compengines[name] |
|
116 | 110 | if engine.available() and engine.revlogheader(): |
|
117 | 111 | supported.add(b'exp-compression-%s' % name) |
|
118 | 112 | if engine.name() == b'zstd': |
|
119 | 113 | supported.add(b'revlog-compression-zstd') |
|
120 | 114 | return supported |
|
121 | 115 | |
|
122 | 116 | |
|
123 | 117 | def allowednewrequirements(repo): |
|
124 | 118 | """Obtain requirements that can be added to a repository during upgrade. |
|
125 | 119 | |
|
126 | 120 | This is used to disallow proposed requirements from being added when |
|
127 | 121 | they weren't present before. |
|
128 | 122 | |
|
129 | 123 | We use a list of allowed requirement additions instead of a list of known |
|
130 | 124 | bad additions because the whitelist approach is safer and will prevent |
|
131 | 125 | future, unknown requirements from accidentally being added. |
|
132 | 126 | """ |
|
133 | 127 | supported = { |
|
134 | 128 | b'dotencode', |
|
135 | 129 | b'fncache', |
|
136 | 130 | b'generaldelta', |
|
137 | 131 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
138 | 132 | requirements.SIDEDATA_REQUIREMENT, |
|
139 | 133 | requirements.COPIESSDC_REQUIREMENT, |
|
140 | 134 | requirements.NODEMAP_REQUIREMENT, |
|
141 | 135 | requirements.SHARESAFE_REQUIREMENT, |
|
142 | 136 | } |
|
143 | 137 | for name in compression.compengines: |
|
144 | 138 | engine = compression.compengines[name] |
|
145 | 139 | if engine.available() and engine.revlogheader(): |
|
146 | 140 | supported.add(b'exp-compression-%s' % name) |
|
147 | 141 | if engine.name() == b'zstd': |
|
148 | 142 | supported.add(b'revlog-compression-zstd') |
|
149 | 143 | return supported |
|
150 | 144 | |
|
151 | 145 | |
|
152 | 146 | def preservedrequirements(repo): |
|
153 | 147 | return set() |
|
154 | 148 | |
|
155 | 149 | |
|
156 | 150 | DEFICIENCY = b'deficiency' |
|
157 | 151 | OPTIMISATION = b'optimization' |
|
158 | 152 | |
|
159 | 153 | |
|
160 | 154 | class improvement(object): |
|
161 | 155 | """Represents an improvement that can be made as part of an upgrade. |
|
162 | 156 | |
|
163 | 157 | The following attributes are defined on each instance: |
|
164 | 158 | |
|
165 | 159 | name |
|
166 | 160 | Machine-readable string uniquely identifying this improvement. It |
|
167 | 161 | will be mapped to an action later in the upgrade process. |
|
168 | 162 | |
|
169 | 163 | type |
|
170 | 164 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
171 | 165 | problem. An optimization is an action (sometimes optional) that |
|
172 | 166 | can be taken to further improve the state of the repository. |
|
173 | 167 | |
|
174 | 168 | description |
|
175 | 169 | Message intended for humans explaining the improvement in more detail, |
|
176 | 170 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
177 | 171 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
178 | 172 | worded in the future tense. |
|
179 | 173 | |
|
180 | 174 | upgrademessage |
|
181 | 175 | Message intended for humans explaining what an upgrade addressing this |
|
182 | 176 | issue will do. Should be worded in the future tense. |
|
183 | 177 | """ |
|
184 | 178 | |
|
185 | 179 | def __init__(self, name, type, description, upgrademessage): |
|
186 | 180 | self.name = name |
|
187 | 181 | self.type = type |
|
188 | 182 | self.description = description |
|
189 | 183 | self.upgrademessage = upgrademessage |
|
190 | 184 | |
|
191 | 185 | def __eq__(self, other): |
|
192 | 186 | if not isinstance(other, improvement): |
|
193 | 187 | # This is what python tell use to do |
|
194 | 188 | return NotImplemented |
|
195 | 189 | return self.name == other.name |
|
196 | 190 | |
|
197 | 191 | def __ne__(self, other): |
|
198 | 192 | return not (self == other) |
|
199 | 193 | |
|
200 | 194 | def __hash__(self): |
|
201 | 195 | return hash(self.name) |
|
202 | 196 | |
|
203 | 197 | |
|
204 | 198 | allformatvariant = [] |
|
205 | 199 | |
|
206 | 200 | |
|
207 | 201 | def registerformatvariant(cls): |
|
208 | 202 | allformatvariant.append(cls) |
|
209 | 203 | return cls |
|
210 | 204 | |
|
211 | 205 | |
|
212 | 206 | class formatvariant(improvement): |
|
213 | 207 | """an improvement subclass dedicated to repository format""" |
|
214 | 208 | |
|
215 | 209 | type = DEFICIENCY |
|
216 | 210 | ### The following attributes should be defined for each class: |
|
217 | 211 | |
|
218 | 212 | # machine-readable string uniquely identifying this improvement. it will be |
|
219 | 213 | # mapped to an action later in the upgrade process. |
|
220 | 214 | name = None |
|
221 | 215 | |
|
222 | 216 | # message intended for humans explaining the improvement in more detail, |
|
223 | 217 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
224 | 218 | # in the present tense. |
|
225 | 219 | description = None |
|
226 | 220 | |
|
227 | 221 | # message intended for humans explaining what an upgrade addressing this |
|
228 | 222 | # issue will do. should be worded in the future tense. |
|
229 | 223 | upgrademessage = None |
|
230 | 224 | |
|
231 | 225 | # value of current Mercurial default for new repository |
|
232 | 226 | default = None |
|
233 | 227 | |
|
234 | 228 | def __init__(self): |
|
235 | 229 | raise NotImplementedError() |
|
236 | 230 | |
|
237 | 231 | @staticmethod |
|
238 | 232 | def fromrepo(repo): |
|
239 | 233 | """current value of the variant in the repository""" |
|
240 | 234 | raise NotImplementedError() |
|
241 | 235 | |
|
242 | 236 | @staticmethod |
|
243 | 237 | def fromconfig(repo): |
|
244 | 238 | """current value of the variant in the configuration""" |
|
245 | 239 | raise NotImplementedError() |
|
246 | 240 | |
|
247 | 241 | |
|
248 | 242 | class requirementformatvariant(formatvariant): |
|
249 | 243 | """formatvariant based on a 'requirement' name. |
|
250 | 244 | |
|
251 | 245 | Many format variant are controlled by a 'requirement'. We define a small |
|
252 | 246 | subclass to factor the code. |
|
253 | 247 | """ |
|
254 | 248 | |
|
255 | 249 | # the requirement that control this format variant |
|
256 | 250 | _requirement = None |
|
257 | 251 | |
|
258 | 252 | @staticmethod |
|
259 | 253 | def _newreporequirements(ui): |
|
260 | 254 | return localrepo.newreporequirements( |
|
261 | 255 | ui, localrepo.defaultcreateopts(ui) |
|
262 | 256 | ) |
|
263 | 257 | |
|
264 | 258 | @classmethod |
|
265 | 259 | def fromrepo(cls, repo): |
|
266 | 260 | assert cls._requirement is not None |
|
267 | 261 | return cls._requirement in repo.requirements |
|
268 | 262 | |
|
269 | 263 | @classmethod |
|
270 | 264 | def fromconfig(cls, repo): |
|
271 | 265 | assert cls._requirement is not None |
|
272 | 266 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
273 | 267 | |
|
274 | 268 | |
|
275 | 269 | @registerformatvariant |
|
276 | 270 | class fncache(requirementformatvariant): |
|
277 | 271 | name = b'fncache' |
|
278 | 272 | |
|
279 | 273 | _requirement = b'fncache' |
|
280 | 274 | |
|
281 | 275 | default = True |
|
282 | 276 | |
|
283 | 277 | description = _( |
|
284 | 278 | b'long and reserved filenames may not work correctly; ' |
|
285 | 279 | b'repository performance is sub-optimal' |
|
286 | 280 | ) |
|
287 | 281 | |
|
288 | 282 | upgrademessage = _( |
|
289 | 283 | b'repository will be more resilient to storing ' |
|
290 | 284 | b'certain paths and performance of certain ' |
|
291 | 285 | b'operations should be improved' |
|
292 | 286 | ) |
|
293 | 287 | |
|
294 | 288 | |
|
295 | 289 | @registerformatvariant |
|
296 | 290 | class dotencode(requirementformatvariant): |
|
297 | 291 | name = b'dotencode' |
|
298 | 292 | |
|
299 | 293 | _requirement = b'dotencode' |
|
300 | 294 | |
|
301 | 295 | default = True |
|
302 | 296 | |
|
303 | 297 | description = _( |
|
304 | 298 | b'storage of filenames beginning with a period or ' |
|
305 | 299 | b'space may not work correctly' |
|
306 | 300 | ) |
|
307 | 301 | |
|
308 | 302 | upgrademessage = _( |
|
309 | 303 | b'repository will be better able to store files ' |
|
310 | 304 | b'beginning with a space or period' |
|
311 | 305 | ) |
|
312 | 306 | |
|
313 | 307 | |
|
314 | 308 | @registerformatvariant |
|
315 | 309 | class generaldelta(requirementformatvariant): |
|
316 | 310 | name = b'generaldelta' |
|
317 | 311 | |
|
318 | 312 | _requirement = b'generaldelta' |
|
319 | 313 | |
|
320 | 314 | default = True |
|
321 | 315 | |
|
322 | 316 | description = _( |
|
323 | 317 | b'deltas within internal storage are unable to ' |
|
324 | 318 | b'choose optimal revisions; repository is larger and ' |
|
325 | 319 | b'slower than it could be; interaction with other ' |
|
326 | 320 | b'repositories may require extra network and CPU ' |
|
327 | 321 | b'resources, making "hg push" and "hg pull" slower' |
|
328 | 322 | ) |
|
329 | 323 | |
|
330 | 324 | upgrademessage = _( |
|
331 | 325 | b'repository storage will be able to create ' |
|
332 | 326 | b'optimal deltas; new repository data will be ' |
|
333 | 327 | b'smaller and read times should decrease; ' |
|
334 | 328 | b'interacting with other repositories using this ' |
|
335 | 329 | b'storage model should require less network and ' |
|
336 | 330 | b'CPU resources, making "hg push" and "hg pull" ' |
|
337 | 331 | b'faster' |
|
338 | 332 | ) |
|
339 | 333 | |
|
340 | 334 | |
|
341 | 335 | @registerformatvariant |
|
342 | 336 | class sharedsafe(requirementformatvariant): |
|
343 | 337 | name = b'exp-sharesafe' |
|
344 | 338 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
345 | 339 | |
|
346 | 340 | default = False |
|
347 | 341 | |
|
348 | 342 | description = _( |
|
349 | 343 | b'old shared repositories do not share source repository ' |
|
350 | 344 | b'requirements and config. This leads to various problems ' |
|
351 | 345 | b'when the source repository format is upgraded or some new ' |
|
352 | 346 | b'extensions are enabled.' |
|
353 | 347 | ) |
|
354 | 348 | |
|
355 | 349 | upgrademessage = _( |
|
356 | 350 | b'Upgrades a repository to share-safe format so that future ' |
|
357 | 351 | b'shares of this repository share its requirements and configs.' |
|
358 | 352 | ) |
|
359 | 353 | |
|
360 | 354 | |
|
361 | 355 | @registerformatvariant |
|
362 | 356 | class sparserevlog(requirementformatvariant): |
|
363 | 357 | name = b'sparserevlog' |
|
364 | 358 | |
|
365 | 359 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
366 | 360 | |
|
367 | 361 | default = True |
|
368 | 362 | |
|
369 | 363 | description = _( |
|
370 | 364 | b'in order to limit disk reading and memory usage on older ' |
|
371 | 365 | b'version, the span of a delta chain from its root to its ' |
|
372 | 366 | b'end is limited, whatever the relevant data in this span. ' |
|
373 | 367 | b'This can severly limit Mercurial ability to build good ' |
|
374 | 368 | b'chain of delta resulting is much more storage space being ' |
|
375 | 369 | b'taken and limit reusability of on disk delta during ' |
|
376 | 370 | b'exchange.' |
|
377 | 371 | ) |
|
378 | 372 | |
|
379 | 373 | upgrademessage = _( |
|
380 | 374 | b'Revlog supports delta chain with more unused data ' |
|
381 | 375 | b'between payload. These gaps will be skipped at read ' |
|
382 | 376 | b'time. This allows for better delta chains, making a ' |
|
383 | 377 | b'better compression and faster exchange with server.' |
|
384 | 378 | ) |
|
385 | 379 | |
|
386 | 380 | |
|
387 | 381 | @registerformatvariant |
|
388 | 382 | class sidedata(requirementformatvariant): |
|
389 | 383 | name = b'sidedata' |
|
390 | 384 | |
|
391 | 385 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
392 | 386 | |
|
393 | 387 | default = False |
|
394 | 388 | |
|
395 | 389 | description = _( |
|
396 | 390 | b'Allows storage of extra data alongside a revision, ' |
|
397 | 391 | b'unlocking various caching options.' |
|
398 | 392 | ) |
|
399 | 393 | |
|
400 | 394 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
401 | 395 | |
|
402 | 396 | |
|
403 | 397 | @registerformatvariant |
|
404 | 398 | class persistentnodemap(requirementformatvariant): |
|
405 | 399 | name = b'persistent-nodemap' |
|
406 | 400 | |
|
407 | 401 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
408 | 402 | |
|
409 | 403 | default = False |
|
410 | 404 | |
|
411 | 405 | description = _( |
|
412 | 406 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
413 | 407 | ) |
|
414 | 408 | |
|
415 | 409 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
416 | 410 | |
|
417 | 411 | |
|
418 | 412 | @registerformatvariant |
|
419 | 413 | class copiessdc(requirementformatvariant): |
|
420 | 414 | name = b'copies-sdc' |
|
421 | 415 | |
|
422 | 416 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
423 | 417 | |
|
424 | 418 | default = False |
|
425 | 419 | |
|
426 | 420 | description = _(b'Stores copies information alongside changesets.') |
|
427 | 421 | |
|
428 | 422 | upgrademessage = _( |
|
429 | 423 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
430 | 424 | ) |
|
431 | 425 | |
|
432 | 426 | |
|
433 | 427 | @registerformatvariant |
|
434 | 428 | class removecldeltachain(formatvariant): |
|
435 | 429 | name = b'plain-cl-delta' |
|
436 | 430 | |
|
437 | 431 | default = True |
|
438 | 432 | |
|
439 | 433 | description = _( |
|
440 | 434 | b'changelog storage is using deltas instead of ' |
|
441 | 435 | b'raw entries; changelog reading and any ' |
|
442 | 436 | b'operation relying on changelog data are slower ' |
|
443 | 437 | b'than they could be' |
|
444 | 438 | ) |
|
445 | 439 | |
|
446 | 440 | upgrademessage = _( |
|
447 | 441 | b'changelog storage will be reformated to ' |
|
448 | 442 | b'store raw entries; changelog reading will be ' |
|
449 | 443 | b'faster; changelog size may be reduced' |
|
450 | 444 | ) |
|
451 | 445 | |
|
452 | 446 | @staticmethod |
|
453 | 447 | def fromrepo(repo): |
|
454 | 448 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
455 | 449 | # changelogs with deltas. |
|
456 | 450 | cl = repo.changelog |
|
457 | 451 | chainbase = cl.chainbase |
|
458 | 452 | return all(rev == chainbase(rev) for rev in cl) |
|
459 | 453 | |
|
460 | 454 | @staticmethod |
|
461 | 455 | def fromconfig(repo): |
|
462 | 456 | return True |
|
463 | 457 | |
|
464 | 458 | |
|
465 | 459 | @registerformatvariant |
|
466 | 460 | class compressionengine(formatvariant): |
|
467 | 461 | name = b'compression' |
|
468 | 462 | default = b'zlib' |
|
469 | 463 | |
|
470 | 464 | description = _( |
|
471 | 465 | b'Compresion algorithm used to compress data. ' |
|
472 | 466 | b'Some engine are faster than other' |
|
473 | 467 | ) |
|
474 | 468 | |
|
475 | 469 | upgrademessage = _( |
|
476 | 470 | b'revlog content will be recompressed with the new algorithm.' |
|
477 | 471 | ) |
|
478 | 472 | |
|
479 | 473 | @classmethod |
|
480 | 474 | def fromrepo(cls, repo): |
|
481 | 475 | # we allow multiple compression engine requirement to co-exist because |
|
482 | 476 | # strickly speaking, revlog seems to support mixed compression style. |
|
483 | 477 | # |
|
484 | 478 | # The compression used for new entries will be "the last one" |
|
485 | 479 | compression = b'zlib' |
|
486 | 480 | for req in repo.requirements: |
|
487 | 481 | prefix = req.startswith |
|
488 | 482 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
489 | 483 | compression = req.split(b'-', 2)[2] |
|
490 | 484 | return compression |
|
491 | 485 | |
|
492 | 486 | @classmethod |
|
493 | 487 | def fromconfig(cls, repo): |
|
494 | 488 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
495 | 489 | # return the first valid value as the selection code would do |
|
496 | 490 | for comp in compengines: |
|
497 | 491 | if comp in util.compengines: |
|
498 | 492 | return comp |
|
499 | 493 | |
|
500 | 494 | # no valide compression found lets display it all for clarity |
|
501 | 495 | return b','.join(compengines) |
|
502 | 496 | |
|
503 | 497 | |
|
504 | 498 | @registerformatvariant |
|
505 | 499 | class compressionlevel(formatvariant): |
|
506 | 500 | name = b'compression-level' |
|
507 | 501 | default = b'default' |
|
508 | 502 | |
|
509 | 503 | description = _(b'compression level') |
|
510 | 504 | |
|
511 | 505 | upgrademessage = _(b'revlog content will be recompressed') |
|
512 | 506 | |
|
513 | 507 | @classmethod |
|
514 | 508 | def fromrepo(cls, repo): |
|
515 | 509 | comp = compressionengine.fromrepo(repo) |
|
516 | 510 | level = None |
|
517 | 511 | if comp == b'zlib': |
|
518 | 512 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
519 | 513 | elif comp == b'zstd': |
|
520 | 514 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
521 | 515 | if level is None: |
|
522 | 516 | return b'default' |
|
523 | 517 | return bytes(level) |
|
524 | 518 | |
|
525 | 519 | @classmethod |
|
526 | 520 | def fromconfig(cls, repo): |
|
527 | 521 | comp = compressionengine.fromconfig(repo) |
|
528 | 522 | level = None |
|
529 | 523 | if comp == b'zlib': |
|
530 | 524 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
531 | 525 | elif comp == b'zstd': |
|
532 | 526 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
533 | 527 | if level is None: |
|
534 | 528 | return b'default' |
|
535 | 529 | return bytes(level) |
|
536 | 530 | |
|
537 | 531 | |
|
538 | 532 | def finddeficiencies(repo): |
|
539 | 533 | """returns a list of deficiencies that the repo suffer from""" |
|
540 | 534 | deficiencies = [] |
|
541 | 535 | |
|
542 | 536 | # We could detect lack of revlogv1 and store here, but they were added |
|
543 | 537 | # in 0.9.2 and we don't support upgrading repos without these |
|
544 | 538 | # requirements, so let's not bother. |
|
545 | 539 | |
|
546 | 540 | for fv in allformatvariant: |
|
547 | 541 | if not fv.fromrepo(repo): |
|
548 | 542 | deficiencies.append(fv) |
|
549 | 543 | |
|
550 | 544 | return deficiencies |
|
551 | 545 | |
|
552 | 546 | |
|
553 | 547 | # search without '-' to support older form on newer client. |
|
554 | 548 | # |
|
555 | 549 | # We don't enforce backward compatibility for debug command so this |
|
556 | 550 | # might eventually be dropped. However, having to use two different |
|
557 | 551 | # forms in script when comparing result is anoying enough to add |
|
558 | 552 | # backward compatibility for a while. |
|
559 | 553 | legacy_opts_map = { |
|
560 | 554 | b'redeltaparent': b're-delta-parent', |
|
561 | 555 | b'redeltamultibase': b're-delta-multibase', |
|
562 | 556 | b'redeltaall': b're-delta-all', |
|
563 | 557 | b'redeltafulladd': b're-delta-fulladd', |
|
564 | 558 | } |
|
565 | 559 | |
|
566 | 560 | ALL_OPTIMISATIONS = [] |
|
567 | 561 | |
|
568 | 562 | |
|
569 | 563 | def register_optimization(obj): |
|
570 | 564 | ALL_OPTIMISATIONS.append(obj) |
|
571 | 565 | return obj |
|
572 | 566 | |
|
573 | 567 | |
|
574 | 568 | register_optimization( |
|
575 | 569 | improvement( |
|
576 | 570 | name=b're-delta-parent', |
|
577 | 571 | type=OPTIMISATION, |
|
578 | 572 | description=_( |
|
579 | 573 | b'deltas within internal storage will be recalculated to ' |
|
580 | 574 | b'choose an optimal base revision where this was not ' |
|
581 | 575 | b'already done; the size of the repository may shrink and ' |
|
582 | 576 | b'various operations may become faster; the first time ' |
|
583 | 577 | b'this optimization is performed could slow down upgrade ' |
|
584 | 578 | b'execution considerably; subsequent invocations should ' |
|
585 | 579 | b'not run noticeably slower' |
|
586 | 580 | ), |
|
587 | 581 | upgrademessage=_( |
|
588 | 582 | b'deltas within internal storage will choose a new ' |
|
589 | 583 | b'base revision if needed' |
|
590 | 584 | ), |
|
591 | 585 | ) |
|
592 | 586 | ) |
|
593 | 587 | |
|
594 | 588 | register_optimization( |
|
595 | 589 | improvement( |
|
596 | 590 | name=b're-delta-multibase', |
|
597 | 591 | type=OPTIMISATION, |
|
598 | 592 | description=_( |
|
599 | 593 | b'deltas within internal storage will be recalculated ' |
|
600 | 594 | b'against multiple base revision and the smallest ' |
|
601 | 595 | b'difference will be used; the size of the repository may ' |
|
602 | 596 | b'shrink significantly when there are many merges; this ' |
|
603 | 597 | b'optimization will slow down execution in proportion to ' |
|
604 | 598 | b'the number of merges in the repository and the amount ' |
|
605 | 599 | b'of files in the repository; this slow down should not ' |
|
606 | 600 | b'be significant unless there are tens of thousands of ' |
|
607 | 601 | b'files and thousands of merges' |
|
608 | 602 | ), |
|
609 | 603 | upgrademessage=_( |
|
610 | 604 | b'deltas within internal storage will choose an ' |
|
611 | 605 | b'optimal delta by computing deltas against multiple ' |
|
612 | 606 | b'parents; may slow down execution time ' |
|
613 | 607 | b'significantly' |
|
614 | 608 | ), |
|
615 | 609 | ) |
|
616 | 610 | ) |
|
617 | 611 | |
|
618 | 612 | register_optimization( |
|
619 | 613 | improvement( |
|
620 | 614 | name=b're-delta-all', |
|
621 | 615 | type=OPTIMISATION, |
|
622 | 616 | description=_( |
|
623 | 617 | b'deltas within internal storage will always be ' |
|
624 | 618 | b'recalculated without reusing prior deltas; this will ' |
|
625 | 619 | b'likely make execution run several times slower; this ' |
|
626 | 620 | b'optimization is typically not needed' |
|
627 | 621 | ), |
|
628 | 622 | upgrademessage=_( |
|
629 | 623 | b'deltas within internal storage will be fully ' |
|
630 | 624 | b'recomputed; this will likely drastically slow down ' |
|
631 | 625 | b'execution time' |
|
632 | 626 | ), |
|
633 | 627 | ) |
|
634 | 628 | ) |
|
635 | 629 | |
|
636 | 630 | register_optimization( |
|
637 | 631 | improvement( |
|
638 | 632 | name=b're-delta-fulladd', |
|
639 | 633 | type=OPTIMISATION, |
|
640 | 634 | description=_( |
|
641 | 635 | b'every revision will be re-added as if it was new ' |
|
642 | 636 | b'content. It will go through the full storage ' |
|
643 | 637 | b'mechanism giving extensions a chance to process it ' |
|
644 | 638 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
645 | 639 | b'slower since more logic is involved.' |
|
646 | 640 | ), |
|
647 | 641 | upgrademessage=_( |
|
648 | 642 | b'each revision will be added as new content to the ' |
|
649 | 643 | b'internal storage; this will likely drastically slow ' |
|
650 | 644 | b'down execution time, but some extensions might need ' |
|
651 | 645 | b'it' |
|
652 | 646 | ), |
|
653 | 647 | ) |
|
654 | 648 | ) |
|
655 | 649 | |
|
656 | 650 | |
|
657 | 651 | def findoptimizations(repo): |
|
658 | 652 | """Determine optimisation that could be used during upgrade""" |
|
659 | 653 | # These are unconditionally added. There is logic later that figures out |
|
660 | 654 | # which ones to apply. |
|
661 | 655 | return list(ALL_OPTIMISATIONS) |
|
662 | 656 | |
|
663 | 657 | |
|
664 | 658 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
665 | 659 | """Determine upgrade actions that will be performed. |
|
666 | 660 | |
|
667 | 661 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
668 | 662 | ``findoptimizations``, determine the list of upgrade actions that |
|
669 | 663 | will be performed. |
|
670 | 664 | |
|
671 | 665 | The role of this function is to filter improvements if needed, apply |
|
672 | 666 | recommended optimizations from the improvements list that make sense, |
|
673 | 667 | etc. |
|
674 | 668 | |
|
675 | 669 | Returns a list of action names. |
|
676 | 670 | """ |
|
677 | 671 | newactions = [] |
|
678 | 672 | |
|
679 | 673 | for d in deficiencies: |
|
680 | 674 | name = d._requirement |
|
681 | 675 | |
|
682 | 676 | # If the action is a requirement that doesn't show up in the |
|
683 | 677 | # destination requirements, prune the action. |
|
684 | 678 | if name is not None and name not in destreqs: |
|
685 | 679 | continue |
|
686 | 680 | |
|
687 | 681 | newactions.append(d) |
|
688 | 682 | |
|
689 | 683 | # FUTURE consider adding some optimizations here for certain transitions. |
|
690 | 684 | # e.g. adding generaldelta could schedule parent redeltas. |
|
691 | 685 | |
|
692 | 686 | return newactions |
|
693 | 687 | |
|
694 | 688 | |
|
695 | def _revlogfrompath(repo, path): | |
|
696 | """Obtain a revlog from a repo path. | |
|
697 | ||
|
698 | An instance of the appropriate class is returned. | |
|
699 | """ | |
|
700 | if path == b'00changelog.i': | |
|
701 | return changelog.changelog(repo.svfs) | |
|
702 | elif path.endswith(b'00manifest.i'): | |
|
703 | mandir = path[: -len(b'00manifest.i')] | |
|
704 | return manifest.manifestrevlog(repo.svfs, tree=mandir) | |
|
705 | else: | |
|
706 | # reverse of "/".join(("data", path + ".i")) | |
|
707 | return filelog.filelog(repo.svfs, path[5:-2]) | |
|
708 | ||
|
709 | ||
|
710 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): | |
|
711 | """copy all relevant files for `oldrl` into `destrepo` store | |
|
712 | ||
|
713 | Files are copied "as is" without any transformation. The copy is performed | |
|
714 | without extra checks. Callers are responsible for making sure the copied | |
|
715 | content is compatible with format of the destination repository. | |
|
716 | """ | |
|
717 | oldrl = getattr(oldrl, '_revlog', oldrl) | |
|
718 | newrl = _revlogfrompath(destrepo, unencodedname) | |
|
719 | newrl = getattr(newrl, '_revlog', newrl) | |
|
720 | ||
|
721 | oldvfs = oldrl.opener | |
|
722 | newvfs = newrl.opener | |
|
723 | oldindex = oldvfs.join(oldrl.indexfile) | |
|
724 | newindex = newvfs.join(newrl.indexfile) | |
|
725 | olddata = oldvfs.join(oldrl.datafile) | |
|
726 | newdata = newvfs.join(newrl.datafile) | |
|
727 | ||
|
728 | with newvfs(newrl.indexfile, b'w'): | |
|
729 | pass # create all the directories | |
|
730 | ||
|
731 | util.copyfile(oldindex, newindex) | |
|
732 | copydata = oldrl.opener.exists(oldrl.datafile) | |
|
733 | if copydata: | |
|
734 | util.copyfile(olddata, newdata) | |
|
735 | ||
|
736 | if not ( | |
|
737 | unencodedname.endswith(b'00changelog.i') | |
|
738 | or unencodedname.endswith(b'00manifest.i') | |
|
739 | ): | |
|
740 | destrepo.svfs.fncache.add(unencodedname) | |
|
741 | if copydata: | |
|
742 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | |
|
743 | ||
|
744 | ||
|
745 | UPGRADE_CHANGELOG = b"changelog" | |
|
746 | UPGRADE_MANIFEST = b"manifest" | |
|
747 | UPGRADE_FILELOGS = b"all-filelogs" | |
|
748 | ||
|
749 | UPGRADE_ALL_REVLOGS = frozenset( | |
|
750 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | |
|
751 | ) | |
|
752 | ||
|
753 | ||
|
754 | def getsidedatacompanion(srcrepo, dstrepo): | |
|
755 | sidedatacompanion = None | |
|
756 | removedreqs = srcrepo.requirements - dstrepo.requirements | |
|
757 | addedreqs = dstrepo.requirements - srcrepo.requirements | |
|
758 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: | |
|
759 | ||
|
760 | def sidedatacompanion(rl, rev): | |
|
761 | rl = getattr(rl, '_revlog', rl) | |
|
762 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: | |
|
763 | return True, (), {}, 0, 0 | |
|
764 | return False, (), {}, 0, 0 | |
|
765 | ||
|
766 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: | |
|
767 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) | |
|
768 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: | |
|
769 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) | |
|
770 | return sidedatacompanion | |
|
771 | ||
|
772 | ||
|
773 | def matchrevlog(revlogfilter, entry): | |
|
774 | """check if a revlog is selected for cloning. | |
|
775 | ||
|
776 | In other words, are there any updates which need to be done on revlog | |
|
777 | or it can be blindly copied. | |
|
778 | ||
|
779 | The store entry is checked against the passed filter""" | |
|
780 | if entry.endswith(b'00changelog.i'): | |
|
781 | return UPGRADE_CHANGELOG in revlogfilter | |
|
782 | elif entry.endswith(b'00manifest.i'): | |
|
783 | return UPGRADE_MANIFEST in revlogfilter | |
|
784 | return UPGRADE_FILELOGS in revlogfilter | |
|
785 | ||
|
786 | ||
|
787 | def _clonerevlogs( | |
|
788 | ui, | |
|
789 | srcrepo, | |
|
790 | dstrepo, | |
|
791 | tr, | |
|
792 | deltareuse, | |
|
793 | forcedeltabothparents, | |
|
794 | revlogs=UPGRADE_ALL_REVLOGS, | |
|
795 | ): | |
|
796 | """Copy revlogs between 2 repos.""" | |
|
797 | revcount = 0 | |
|
798 | srcsize = 0 | |
|
799 | srcrawsize = 0 | |
|
800 | dstsize = 0 | |
|
801 | fcount = 0 | |
|
802 | frevcount = 0 | |
|
803 | fsrcsize = 0 | |
|
804 | frawsize = 0 | |
|
805 | fdstsize = 0 | |
|
806 | mcount = 0 | |
|
807 | mrevcount = 0 | |
|
808 | msrcsize = 0 | |
|
809 | mrawsize = 0 | |
|
810 | mdstsize = 0 | |
|
811 | crevcount = 0 | |
|
812 | csrcsize = 0 | |
|
813 | crawsize = 0 | |
|
814 | cdstsize = 0 | |
|
815 | ||
|
816 | alldatafiles = list(srcrepo.store.walk()) | |
|
817 | ||
|
818 | # Perform a pass to collect metadata. This validates we can open all | |
|
819 | # source files and allows a unified progress bar to be displayed. | |
|
820 | for unencoded, encoded, size in alldatafiles: | |
|
821 | if unencoded.endswith(b'.d'): | |
|
822 | continue | |
|
823 | ||
|
824 | rl = _revlogfrompath(srcrepo, unencoded) | |
|
825 | ||
|
826 | info = rl.storageinfo( | |
|
827 | exclusivefiles=True, | |
|
828 | revisionscount=True, | |
|
829 | trackedsize=True, | |
|
830 | storedsize=True, | |
|
831 | ) | |
|
832 | ||
|
833 | revcount += info[b'revisionscount'] or 0 | |
|
834 | datasize = info[b'storedsize'] or 0 | |
|
835 | rawsize = info[b'trackedsize'] or 0 | |
|
836 | ||
|
837 | srcsize += datasize | |
|
838 | srcrawsize += rawsize | |
|
839 | ||
|
840 | # This is for the separate progress bars. | |
|
841 | if isinstance(rl, changelog.changelog): | |
|
842 | crevcount += len(rl) | |
|
843 | csrcsize += datasize | |
|
844 | crawsize += rawsize | |
|
845 | elif isinstance(rl, manifest.manifestrevlog): | |
|
846 | mcount += 1 | |
|
847 | mrevcount += len(rl) | |
|
848 | msrcsize += datasize | |
|
849 | mrawsize += rawsize | |
|
850 | elif isinstance(rl, filelog.filelog): | |
|
851 | fcount += 1 | |
|
852 | frevcount += len(rl) | |
|
853 | fsrcsize += datasize | |
|
854 | frawsize += rawsize | |
|
855 | else: | |
|
856 | error.ProgrammingError(b'unknown revlog type') | |
|
857 | ||
|
858 | if not revcount: | |
|
859 | return | |
|
860 | ||
|
861 | ui.status( | |
|
862 | _( | |
|
863 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
|
864 | b'%d in changelog)\n' | |
|
865 | ) | |
|
866 | % (revcount, frevcount, mrevcount, crevcount) | |
|
867 | ) | |
|
868 | ui.status( | |
|
869 | _(b'migrating %s in store; %s tracked data\n') | |
|
870 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | |
|
871 | ) | |
|
872 | ||
|
873 | # Used to keep track of progress. | |
|
874 | progress = None | |
|
875 | ||
|
876 | def oncopiedrevision(rl, rev, node): | |
|
877 | progress.increment() | |
|
878 | ||
|
879 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) | |
|
880 | ||
|
881 | # Do the actual copying. | |
|
882 | # FUTURE this operation can be farmed off to worker processes. | |
|
883 | seen = set() | |
|
884 | for unencoded, encoded, size in alldatafiles: | |
|
885 | if unencoded.endswith(b'.d'): | |
|
886 | continue | |
|
887 | ||
|
888 | oldrl = _revlogfrompath(srcrepo, unencoded) | |
|
889 | ||
|
890 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: | |
|
891 | ui.status( | |
|
892 | _( | |
|
893 | b'finished migrating %d manifest revisions across %d ' | |
|
894 | b'manifests; change in size: %s\n' | |
|
895 | ) | |
|
896 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | |
|
897 | ) | |
|
898 | ||
|
899 | ui.status( | |
|
900 | _( | |
|
901 | b'migrating changelog containing %d revisions ' | |
|
902 | b'(%s in store; %s tracked data)\n' | |
|
903 | ) | |
|
904 | % ( | |
|
905 | crevcount, | |
|
906 | util.bytecount(csrcsize), | |
|
907 | util.bytecount(crawsize), | |
|
908 | ) | |
|
909 | ) | |
|
910 | seen.add(b'c') | |
|
911 | progress = srcrepo.ui.makeprogress( | |
|
912 | _(b'changelog revisions'), total=crevcount | |
|
913 | ) | |
|
914 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: | |
|
915 | ui.status( | |
|
916 | _( | |
|
917 | b'finished migrating %d filelog revisions across %d ' | |
|
918 | b'filelogs; change in size: %s\n' | |
|
919 | ) | |
|
920 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | |
|
921 | ) | |
|
922 | ||
|
923 | ui.status( | |
|
924 | _( | |
|
925 | b'migrating %d manifests containing %d revisions ' | |
|
926 | b'(%s in store; %s tracked data)\n' | |
|
927 | ) | |
|
928 | % ( | |
|
929 | mcount, | |
|
930 | mrevcount, | |
|
931 | util.bytecount(msrcsize), | |
|
932 | util.bytecount(mrawsize), | |
|
933 | ) | |
|
934 | ) | |
|
935 | seen.add(b'm') | |
|
936 | if progress: | |
|
937 | progress.complete() | |
|
938 | progress = srcrepo.ui.makeprogress( | |
|
939 | _(b'manifest revisions'), total=mrevcount | |
|
940 | ) | |
|
941 | elif b'f' not in seen: | |
|
942 | ui.status( | |
|
943 | _( | |
|
944 | b'migrating %d filelogs containing %d revisions ' | |
|
945 | b'(%s in store; %s tracked data)\n' | |
|
946 | ) | |
|
947 | % ( | |
|
948 | fcount, | |
|
949 | frevcount, | |
|
950 | util.bytecount(fsrcsize), | |
|
951 | util.bytecount(frawsize), | |
|
952 | ) | |
|
953 | ) | |
|
954 | seen.add(b'f') | |
|
955 | if progress: | |
|
956 | progress.complete() | |
|
957 | progress = srcrepo.ui.makeprogress( | |
|
958 | _(b'file revisions'), total=frevcount | |
|
959 | ) | |
|
960 | ||
|
961 | if matchrevlog(revlogs, unencoded): | |
|
962 | ui.note( | |
|
963 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) | |
|
964 | ) | |
|
965 | newrl = _revlogfrompath(dstrepo, unencoded) | |
|
966 | oldrl.clone( | |
|
967 | tr, | |
|
968 | newrl, | |
|
969 | addrevisioncb=oncopiedrevision, | |
|
970 | deltareuse=deltareuse, | |
|
971 | forcedeltabothparents=forcedeltabothparents, | |
|
972 | sidedatacompanion=sidedatacompanion, | |
|
973 | ) | |
|
974 | else: | |
|
975 | msg = _(b'blindly copying %s containing %i revisions\n') | |
|
976 | ui.note(msg % (unencoded, len(oldrl))) | |
|
977 | _copyrevlog(tr, dstrepo, oldrl, unencoded) | |
|
978 | ||
|
979 | newrl = _revlogfrompath(dstrepo, unencoded) | |
|
980 | ||
|
981 | info = newrl.storageinfo(storedsize=True) | |
|
982 | datasize = info[b'storedsize'] or 0 | |
|
983 | ||
|
984 | dstsize += datasize | |
|
985 | ||
|
986 | if isinstance(newrl, changelog.changelog): | |
|
987 | cdstsize += datasize | |
|
988 | elif isinstance(newrl, manifest.manifestrevlog): | |
|
989 | mdstsize += datasize | |
|
990 | else: | |
|
991 | fdstsize += datasize | |
|
992 | ||
|
993 | progress.complete() | |
|
994 | ||
|
995 | ui.status( | |
|
996 | _( | |
|
997 | b'finished migrating %d changelog revisions; change in size: ' | |
|
998 | b'%s\n' | |
|
999 | ) | |
|
1000 | % (crevcount, util.bytecount(cdstsize - csrcsize)) | |
|
1001 | ) | |
|
1002 | ||
|
1003 | ui.status( | |
|
1004 | _( | |
|
1005 | b'finished migrating %d total revisions; total change in store ' | |
|
1006 | b'size: %s\n' | |
|
1007 | ) | |
|
1008 | % (revcount, util.bytecount(dstsize - srcsize)) | |
|
1009 | ) | |
|
1010 | ||
|
1011 | ||
|
1012 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): | |
|
1013 | """Determine whether to copy a store file during upgrade. | |
|
1014 | ||
|
1015 | This function is called when migrating store files from ``srcrepo`` to | |
|
1016 | ``dstrepo`` as part of upgrading a repository. | |
|
1017 | ||
|
1018 | Args: | |
|
1019 | srcrepo: repo we are copying from | |
|
1020 | dstrepo: repo we are copying to | |
|
1021 | requirements: set of requirements for ``dstrepo`` | |
|
1022 | path: store file being examined | |
|
1023 | mode: the ``ST_MODE`` file type of ``path`` | |
|
1024 | st: ``stat`` data structure for ``path`` | |
|
1025 | ||
|
1026 | Function should return ``True`` if the file is to be copied. | |
|
1027 | """ | |
|
1028 | # Skip revlogs. | |
|
1029 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): | |
|
1030 | return False | |
|
1031 | # Skip transaction related files. | |
|
1032 | if path.startswith(b'undo'): | |
|
1033 | return False | |
|
1034 | # Only copy regular files. | |
|
1035 | if mode != stat.S_IFREG: | |
|
1036 | return False | |
|
1037 | # Skip other skipped files. | |
|
1038 | if path in (b'lock', b'fncache'): | |
|
1039 | return False | |
|
1040 | ||
|
1041 | return True | |
|
1042 | ||
|
1043 | ||
|
1044 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): | |
|
1045 | """Hook point for extensions to perform additional actions during upgrade. | |
|
1046 | ||
|
1047 | This function is called after revlogs and store files have been copied but | |
|
1048 | before the new store is swapped into the original location. | |
|
1049 | """ | |
|
1050 | ||
|
1051 | ||
|
1052 | def _upgraderepo( | |
|
1053 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS | |
|
1054 | ): | |
|
1055 | """Do the low-level work of upgrading a repository. | |
|
1056 | ||
|
1057 | The upgrade is effectively performed as a copy between a source | |
|
1058 | repository and a temporary destination repository. | |
|
1059 | ||
|
1060 | The source repository is unmodified for as long as possible so the | |
|
1061 | upgrade can abort at any time without causing loss of service for | |
|
1062 | readers and without corrupting the source repository. | |
|
1063 | """ | |
|
1064 | assert srcrepo.currentwlock() | |
|
1065 | assert dstrepo.currentwlock() | |
|
1066 | ||
|
1067 | ui.status( | |
|
1068 | _( | |
|
1069 | b'(it is safe to interrupt this process any time before ' | |
|
1070 | b'data migration completes)\n' | |
|
1071 | ) | |
|
1072 | ) | |
|
1073 | ||
|
1074 | if b're-delta-all' in actions: | |
|
1075 | deltareuse = revlog.revlog.DELTAREUSENEVER | |
|
1076 | elif b're-delta-parent' in actions: | |
|
1077 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
|
1078 | elif b're-delta-multibase' in actions: | |
|
1079 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
|
1080 | elif b're-delta-fulladd' in actions: | |
|
1081 | deltareuse = revlog.revlog.DELTAREUSEFULLADD | |
|
1082 | else: | |
|
1083 | deltareuse = revlog.revlog.DELTAREUSEALWAYS | |
|
1084 | ||
|
1085 | with dstrepo.transaction(b'upgrade') as tr: | |
|
1086 | _clonerevlogs( | |
|
1087 | ui, | |
|
1088 | srcrepo, | |
|
1089 | dstrepo, | |
|
1090 | tr, | |
|
1091 | deltareuse, | |
|
1092 | b're-delta-multibase' in actions, | |
|
1093 | revlogs=revlogs, | |
|
1094 | ) | |
|
1095 | ||
|
1096 | # Now copy other files in the store directory. | |
|
1097 | # The sorted() makes execution deterministic. | |
|
1098 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | |
|
1099 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): | |
|
1100 | continue | |
|
1101 | ||
|
1102 | srcrepo.ui.status(_(b'copying %s\n') % p) | |
|
1103 | src = srcrepo.store.rawvfs.join(p) | |
|
1104 | dst = dstrepo.store.rawvfs.join(p) | |
|
1105 | util.copyfile(src, dst, copystat=True) | |
|
1106 | ||
|
1107 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) | |
|
1108 | ||
|
1109 | ui.status(_(b'data fully migrated to temporary repository\n')) | |
|
1110 | ||
|
1111 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) | |
|
1112 | backupvfs = vfsmod.vfs(backuppath) | |
|
1113 | ||
|
1114 | # Make a backup of requires file first, as it is the first to be modified. | |
|
1115 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) | |
|
1116 | ||
|
1117 | # We install an arbitrary requirement that clients must not support | |
|
1118 | # as a mechanism to lock out new clients during the data swap. This is | |
|
1119 | # better than allowing a client to continue while the repository is in | |
|
1120 | # an inconsistent state. | |
|
1121 | ui.status( | |
|
1122 | _( | |
|
1123 | b'marking source repository as being upgraded; clients will be ' | |
|
1124 | b'unable to read from repository\n' | |
|
1125 | ) | |
|
1126 | ) | |
|
1127 | scmutil.writereporequirements( | |
|
1128 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | |
|
1129 | ) | |
|
1130 | ||
|
1131 | ui.status(_(b'starting in-place swap of repository data\n')) | |
|
1132 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | |
|
1133 | ||
|
1134 | # Now swap in the new store directory. Doing it as a rename should make | |
|
1135 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
|
1136 | # environments). | |
|
1137 | ui.status(_(b'replacing store...\n')) | |
|
1138 | tstart = util.timer() | |
|
1139 | util.rename(srcrepo.spath, backupvfs.join(b'store')) | |
|
1140 | util.rename(dstrepo.spath, srcrepo.spath) | |
|
1141 | elapsed = util.timer() - tstart | |
|
1142 | ui.status( | |
|
1143 | _( | |
|
1144 | b'store replacement complete; repository was inconsistent for ' | |
|
1145 | b'%0.1fs\n' | |
|
1146 | ) | |
|
1147 | % elapsed | |
|
1148 | ) | |
|
1149 | ||
|
1150 | # We first write the requirements file. Any new requirements will lock | |
|
1151 | # out legacy clients. | |
|
1152 | ui.status( | |
|
1153 | _( | |
|
1154 | b'finalizing requirements file and making repository readable ' | |
|
1155 | b'again\n' | |
|
1156 | ) | |
|
1157 | ) | |
|
1158 | scmutil.writereporequirements(srcrepo, requirements) | |
|
1159 | ||
|
1160 | # The lock file from the old store won't be removed because nothing has a | |
|
1161 | # reference to its new location. So clean it up manually. Alternatively, we | |
|
1162 | # could update srcrepo.svfs and other variables to point to the new | |
|
1163 | # location. This is simpler. | |
|
1164 | backupvfs.unlink(b'store/lock') | |
|
1165 | ||
|
1166 | return backuppath | |
|
1167 | ||
|
1168 | ||
|
1169 | 689 | def upgraderepo( |
|
1170 | 690 | ui, |
|
1171 | 691 | repo, |
|
1172 | 692 | run=False, |
|
1173 | 693 | optimize=None, |
|
1174 | 694 | backup=True, |
|
1175 | 695 | manifest=None, |
|
1176 | 696 | changelog=None, |
|
1177 | 697 | filelogs=None, |
|
1178 | 698 | ): |
|
1179 | 699 | """Upgrade a repository in place.""" |
|
1180 | 700 | if optimize is None: |
|
1181 | 701 | optimize = [] |
|
1182 | 702 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
1183 | 703 | repo = repo.unfiltered() |
|
1184 | 704 | |
|
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) | |
|
705 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) | |
|
1186 | 706 | specentries = ( |
|
1187 | (UPGRADE_CHANGELOG, changelog), | |
|
1188 | (UPGRADE_MANIFEST, manifest), | |
|
1189 | (UPGRADE_FILELOGS, filelogs), | |
|
707 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), | |
|
708 | (upgrade_engine.UPGRADE_MANIFEST, manifest), | |
|
709 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), | |
|
1190 | 710 | ) |
|
1191 | 711 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
1192 | 712 | if specified: |
|
1193 | 713 | # we have some limitation on revlogs to be recloned |
|
1194 | 714 | if any(x for y, x in specified): |
|
1195 | 715 | revlogs = set() |
|
1196 | 716 | for upgrade, enabled in specified: |
|
1197 | 717 | if enabled: |
|
1198 | 718 | revlogs.add(upgrade) |
|
1199 | 719 | else: |
|
1200 | 720 | # none are enabled |
|
1201 | 721 | for upgrade, __ in specified: |
|
1202 | 722 | revlogs.discard(upgrade) |
|
1203 | 723 | |
|
1204 | 724 | # Ensure the repository can be upgraded. |
|
1205 | 725 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
1206 | 726 | if missingreqs: |
|
1207 | 727 | raise error.Abort( |
|
1208 | 728 | _(b'cannot upgrade repository; requirement missing: %s') |
|
1209 | 729 | % _(b', ').join(sorted(missingreqs)) |
|
1210 | 730 | ) |
|
1211 | 731 | |
|
1212 | 732 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
1213 | 733 | if blockedreqs: |
|
1214 | 734 | raise error.Abort( |
|
1215 | 735 | _( |
|
1216 | 736 | b'cannot upgrade repository; unsupported source ' |
|
1217 | 737 | b'requirement: %s' |
|
1218 | 738 | ) |
|
1219 | 739 | % _(b', ').join(sorted(blockedreqs)) |
|
1220 | 740 | ) |
|
1221 | 741 | |
|
1222 | 742 | # FUTURE there is potentially a need to control the wanted requirements via |
|
1223 | 743 | # command arguments or via an extension hook point. |
|
1224 | 744 | newreqs = localrepo.newreporequirements( |
|
1225 | 745 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
1226 | 746 | ) |
|
1227 | 747 | newreqs.update(preservedrequirements(repo)) |
|
1228 | 748 | |
|
1229 | 749 | noremovereqs = ( |
|
1230 | 750 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
1231 | 751 | ) |
|
1232 | 752 | if noremovereqs: |
|
1233 | 753 | raise error.Abort( |
|
1234 | 754 | _( |
|
1235 | 755 | b'cannot upgrade repository; requirement would be ' |
|
1236 | 756 | b'removed: %s' |
|
1237 | 757 | ) |
|
1238 | 758 | % _(b', ').join(sorted(noremovereqs)) |
|
1239 | 759 | ) |
|
1240 | 760 | |
|
1241 | 761 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
1242 | 762 | if noaddreqs: |
|
1243 | 763 | raise error.Abort( |
|
1244 | 764 | _( |
|
1245 | 765 | b'cannot upgrade repository; do not support adding ' |
|
1246 | 766 | b'requirement: %s' |
|
1247 | 767 | ) |
|
1248 | 768 | % _(b', ').join(sorted(noaddreqs)) |
|
1249 | 769 | ) |
|
1250 | 770 | |
|
1251 | 771 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
1252 | 772 | if unsupportedreqs: |
|
1253 | 773 | raise error.Abort( |
|
1254 | 774 | _( |
|
1255 | 775 | b'cannot upgrade repository; do not support ' |
|
1256 | 776 | b'destination requirement: %s' |
|
1257 | 777 | ) |
|
1258 | 778 | % _(b', ').join(sorted(unsupportedreqs)) |
|
1259 | 779 | ) |
|
1260 | 780 | |
|
1261 | 781 | # Find and validate all improvements that can be made. |
|
1262 | 782 | alloptimizations = findoptimizations(repo) |
|
1263 | 783 | |
|
1264 | 784 | # Apply and Validate arguments. |
|
1265 | 785 | optimizations = [] |
|
1266 | 786 | for o in alloptimizations: |
|
1267 | 787 | if o.name in optimize: |
|
1268 | 788 | optimizations.append(o) |
|
1269 | 789 | optimize.discard(o.name) |
|
1270 | 790 | |
|
1271 | 791 | if optimize: # anything left is unknown |
|
1272 | 792 | raise error.Abort( |
|
1273 | 793 | _(b'unknown optimization action requested: %s') |
|
1274 | 794 | % b', '.join(sorted(optimize)), |
|
1275 | 795 | hint=_(b'run without arguments to see valid optimizations'), |
|
1276 | 796 | ) |
|
1277 | 797 | |
|
1278 | 798 | deficiencies = finddeficiencies(repo) |
|
1279 | 799 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
1280 | 800 | actions.extend( |
|
1281 | 801 | o |
|
1282 | 802 | for o in sorted(optimizations) |
|
1283 | 803 | # determineactions could have added optimisation |
|
1284 | 804 | if o not in actions |
|
1285 | 805 | ) |
|
1286 | 806 | |
|
1287 | 807 | removedreqs = repo.requirements - newreqs |
|
1288 | 808 | addedreqs = newreqs - repo.requirements |
|
1289 | 809 | |
|
1290 | if revlogs != UPGRADE_ALL_REVLOGS: | |
|
810 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: | |
|
1291 | 811 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
1292 | 812 | if incompatible: |
|
1293 | 813 | msg = _( |
|
1294 | 814 | b'ignoring revlogs selection flags, format requirements ' |
|
1295 | 815 | b'change: %s\n' |
|
1296 | 816 | ) |
|
1297 | 817 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
1298 | revlogs = UPGRADE_ALL_REVLOGS | |
|
818 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS | |
|
1299 | 819 | |
|
1300 | 820 | def write_labeled(l, label): |
|
1301 | 821 | first = True |
|
1302 | 822 | for r in sorted(l): |
|
1303 | 823 | if not first: |
|
1304 | 824 | ui.write(b', ') |
|
1305 | 825 | ui.write(r, label=label) |
|
1306 | 826 | first = False |
|
1307 | 827 | |
|
1308 | 828 | def printrequirements(): |
|
1309 | 829 | ui.write(_(b'requirements\n')) |
|
1310 | 830 | ui.write(_(b' preserved: ')) |
|
1311 | 831 | write_labeled( |
|
1312 | 832 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
1313 | 833 | ) |
|
1314 | 834 | ui.write((b'\n')) |
|
1315 | 835 | removed = repo.requirements - newreqs |
|
1316 | 836 | if repo.requirements - newreqs: |
|
1317 | 837 | ui.write(_(b' removed: ')) |
|
1318 | 838 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
1319 | 839 | ui.write((b'\n')) |
|
1320 | 840 | added = newreqs - repo.requirements |
|
1321 | 841 | if added: |
|
1322 | 842 | ui.write(_(b' added: ')) |
|
1323 | 843 | write_labeled(added, "upgrade-repo.requirement.added") |
|
1324 | 844 | ui.write((b'\n')) |
|
1325 | 845 | ui.write(b'\n') |
|
1326 | 846 | |
|
1327 | 847 | def printoptimisations(): |
|
1328 | 848 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
1329 | 849 | optimisations.sort(key=lambda a: a.name) |
|
1330 | 850 | if optimisations: |
|
1331 | 851 | ui.write(_(b'optimisations: ')) |
|
1332 | 852 | write_labeled( |
|
1333 | 853 | [a.name for a in optimisations], |
|
1334 | 854 | "upgrade-repo.optimisation.performed", |
|
1335 | 855 | ) |
|
1336 | 856 | ui.write(b'\n\n') |
|
1337 | 857 | |
|
1338 | 858 | def printupgradeactions(): |
|
1339 | 859 | for a in actions: |
|
1340 | 860 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
1341 | 861 | |
|
1342 | 862 | def print_affected_revlogs(): |
|
1343 | 863 | if not revlogs: |
|
1344 | 864 | ui.write((b'no revlogs to process\n')) |
|
1345 | 865 | else: |
|
1346 | 866 | ui.write((b'processed revlogs:\n')) |
|
1347 | 867 | for r in sorted(revlogs): |
|
1348 | 868 | ui.write((b' - %s\n' % r)) |
|
1349 | 869 | ui.write((b'\n')) |
|
1350 | 870 | |
|
1351 | 871 | if not run: |
|
1352 | 872 | fromconfig = [] |
|
1353 | 873 | onlydefault = [] |
|
1354 | 874 | |
|
1355 | 875 | for d in deficiencies: |
|
1356 | 876 | if d.fromconfig(repo): |
|
1357 | 877 | fromconfig.append(d) |
|
1358 | 878 | elif d.default: |
|
1359 | 879 | onlydefault.append(d) |
|
1360 | 880 | |
|
1361 | 881 | if fromconfig or onlydefault: |
|
1362 | 882 | |
|
1363 | 883 | if fromconfig: |
|
1364 | 884 | ui.status( |
|
1365 | 885 | _( |
|
1366 | 886 | b'repository lacks features recommended by ' |
|
1367 | 887 | b'current config options:\n\n' |
|
1368 | 888 | ) |
|
1369 | 889 | ) |
|
1370 | 890 | for i in fromconfig: |
|
1371 | 891 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
1372 | 892 | |
|
1373 | 893 | if onlydefault: |
|
1374 | 894 | ui.status( |
|
1375 | 895 | _( |
|
1376 | 896 | b'repository lacks features used by the default ' |
|
1377 | 897 | b'config options:\n\n' |
|
1378 | 898 | ) |
|
1379 | 899 | ) |
|
1380 | 900 | for i in onlydefault: |
|
1381 | 901 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
1382 | 902 | |
|
1383 | 903 | ui.status(b'\n') |
|
1384 | 904 | else: |
|
1385 | 905 | ui.status( |
|
1386 | 906 | _( |
|
1387 | 907 | b'(no feature deficiencies found in existing ' |
|
1388 | 908 | b'repository)\n' |
|
1389 | 909 | ) |
|
1390 | 910 | ) |
|
1391 | 911 | |
|
1392 | 912 | ui.status( |
|
1393 | 913 | _( |
|
1394 | 914 | b'performing an upgrade with "--run" will make the following ' |
|
1395 | 915 | b'changes:\n\n' |
|
1396 | 916 | ) |
|
1397 | 917 | ) |
|
1398 | 918 | |
|
1399 | 919 | printrequirements() |
|
1400 | 920 | printoptimisations() |
|
1401 | 921 | printupgradeactions() |
|
1402 | 922 | print_affected_revlogs() |
|
1403 | 923 | |
|
1404 | 924 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
1405 | 925 | |
|
1406 | 926 | if unusedoptimize: |
|
1407 | 927 | ui.status( |
|
1408 | 928 | _( |
|
1409 | 929 | b'additional optimizations are available by specifying ' |
|
1410 | 930 | b'"--optimize <name>":\n\n' |
|
1411 | 931 | ) |
|
1412 | 932 | ) |
|
1413 | 933 | for i in unusedoptimize: |
|
1414 | 934 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
1415 | 935 | return |
|
1416 | 936 | |
|
1417 | 937 | # Else we're in the run=true case. |
|
1418 | 938 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
1419 | 939 | printrequirements() |
|
1420 | 940 | printoptimisations() |
|
1421 | 941 | printupgradeactions() |
|
1422 | 942 | print_affected_revlogs() |
|
1423 | 943 | |
|
1424 | 944 | upgradeactions = [a.name for a in actions] |
|
1425 | 945 | |
|
1426 | 946 | ui.status(_(b'beginning upgrade...\n')) |
|
1427 | 947 | with repo.wlock(), repo.lock(): |
|
1428 | 948 | ui.status(_(b'repository locked and read-only\n')) |
|
1429 | 949 | # Our strategy for upgrading the repository is to create a new, |
|
1430 | 950 | # temporary repository, write data to it, then do a swap of the |
|
1431 | 951 | # data. There are less heavyweight ways to do this, but it is easier |
|
1432 | 952 | # to create a new repo object than to instantiate all the components |
|
1433 | 953 | # (like the store) separately. |
|
1434 | 954 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
1435 | 955 | backuppath = None |
|
1436 | 956 | try: |
|
1437 | 957 | ui.status( |
|
1438 | 958 | _( |
|
1439 | 959 | b'creating temporary repository to stage migrated ' |
|
1440 | 960 | b'data: %s\n' |
|
1441 | 961 | ) |
|
1442 | 962 | % tmppath |
|
1443 | 963 | ) |
|
1444 | 964 | |
|
1445 | 965 | # clone ui without using ui.copy because repo.ui is protected |
|
1446 | 966 | repoui = repo.ui.__class__(repo.ui) |
|
1447 | 967 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
1448 | 968 | |
|
1449 | 969 | with dstrepo.wlock(), dstrepo.lock(): |
|
1450 |
backuppath = |
|
|
970 | backuppath = upgrade_engine.upgrade( | |
|
1451 | 971 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
1452 | 972 | ) |
|
1453 | 973 | if not (backup or backuppath is None): |
|
1454 | 974 | ui.status( |
|
1455 | 975 | _(b'removing old repository content%s\n') % backuppath |
|
1456 | 976 | ) |
|
1457 | 977 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
1458 | 978 | backuppath = None |
|
1459 | 979 | |
|
1460 | 980 | finally: |
|
1461 | 981 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
1462 | 982 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
1463 | 983 | |
|
1464 | 984 | if backuppath and not ui.quiet: |
|
1465 | 985 | ui.warn( |
|
1466 | 986 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
1467 | 987 | ) |
|
1468 | 988 | ui.warn( |
|
1469 | 989 | _( |
|
1470 | 990 | b'the old repository will not be deleted; remove ' |
|
1471 | 991 | b'it to free up disk space once the upgraded ' |
|
1472 | 992 | b'repository is verified\n' |
|
1473 | 993 | ) |
|
1474 | 994 | ) |
|
1475 | 995 | |
|
1476 | 996 | if sharedsafe.name in addedreqs: |
|
1477 | 997 | ui.warn( |
|
1478 | 998 | _( |
|
1479 | 999 | b'repository upgraded to share safe mode, existing' |
|
1480 | 1000 | b' shares will still work in old non-safe mode. ' |
|
1481 | 1001 | b'Re-share existing shares to use them in safe mode' |
|
1482 | 1002 | b' New shares will be created in safe mode.\n' |
|
1483 | 1003 | ) |
|
1484 | 1004 | ) |
|
1485 | 1005 | if sharedsafe.name in removedreqs: |
|
1486 | 1006 | ui.warn( |
|
1487 | 1007 | _( |
|
1488 | 1008 | b'repository downgraded to not use share safe mode, ' |
|
1489 | 1009 | b'existing shares will not work and needs to' |
|
1490 | 1010 | b' be reshared.\n' |
|
1491 | 1011 | ) |
|
1492 | 1012 | ) |
This diff has been collapsed as it changes many lines, (1000 lines changed) Show them Hide them | |||
@@ -1,1492 +1,500 b'' | |||
|
1 | 1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright (c) 2016-present, Gregory Szorc |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import stat |
|
11 | 11 | |
|
12 | from .i18n import _ | |
|
13 | from .pycompat import getattr | |
|
14 | from . import ( | |
|
12 | from ..i18n import _ | |
|
13 | from ..pycompat import getattr | |
|
14 | from .. import ( | |
|
15 | 15 | changelog, |
|
16 | 16 | error, |
|
17 | 17 | filelog, |
|
18 | hg, | |
|
19 | localrepo, | |
|
20 | 18 | manifest, |
|
21 | 19 | metadata, |
|
22 | 20 | pycompat, |
|
23 | 21 | requirements, |
|
24 | 22 | revlog, |
|
25 | 23 | scmutil, |
|
26 | 24 | util, |
|
27 | 25 | vfs as vfsmod, |
|
28 | 26 | ) |
|
29 | 27 | |
|
30 | from .utils import compression | |
|
31 | ||
|
32 | # list of requirements that request a clone of all revlog if added/removed | |
|
33 | RECLONES_REQUIREMENTS = { | |
|
34 | b'generaldelta', | |
|
35 | requirements.SPARSEREVLOG_REQUIREMENT, | |
|
36 | } | |
|
37 | ||
|
38 | ||
|
39 | def requiredsourcerequirements(repo): | |
|
40 | """Obtain requirements required to be present to upgrade a repo. | |
|
41 | ||
|
42 | An upgrade will not be allowed if the repository doesn't have the | |
|
43 | requirements returned by this function. | |
|
44 | """ | |
|
45 | return { | |
|
46 | # Introduced in Mercurial 0.9.2. | |
|
47 | b'revlogv1', | |
|
48 | # Introduced in Mercurial 0.9.2. | |
|
49 | b'store', | |
|
50 | } | |
|
51 | ||
|
52 | ||
|
53 | def blocksourcerequirements(repo): | |
|
54 | """Obtain requirements that will prevent an upgrade from occurring. | |
|
55 | ||
|
56 | An upgrade cannot be performed if the source repository contains a | |
|
57 | requirements in the returned set. | |
|
58 | """ | |
|
59 | return { | |
|
60 | # The upgrade code does not yet support these experimental features. | |
|
61 | # This is an artificial limitation. | |
|
62 | requirements.TREEMANIFEST_REQUIREMENT, | |
|
63 | # This was a precursor to generaldelta and was never enabled by default. | |
|
64 | # It should (hopefully) not exist in the wild. | |
|
65 | b'parentdelta', | |
|
66 | # Upgrade should operate on the actual store, not the shared link. | |
|
67 | requirements.SHARED_REQUIREMENT, | |
|
68 | } | |
|
69 | ||
|
70 | ||
|
71 | def supportremovedrequirements(repo): | |
|
72 | """Obtain requirements that can be removed during an upgrade. | |
|
73 | ||
|
74 | If an upgrade were to create a repository that dropped a requirement, | |
|
75 | the dropped requirement must appear in the returned set for the upgrade | |
|
76 | to be allowed. | |
|
77 | """ | |
|
78 | supported = { | |
|
79 | requirements.SPARSEREVLOG_REQUIREMENT, | |
|
80 | requirements.SIDEDATA_REQUIREMENT, | |
|
81 | requirements.COPIESSDC_REQUIREMENT, | |
|
82 | requirements.NODEMAP_REQUIREMENT, | |
|
83 | requirements.SHARESAFE_REQUIREMENT, | |
|
84 | } | |
|
85 | for name in compression.compengines: | |
|
86 | engine = compression.compengines[name] | |
|
87 | if engine.available() and engine.revlogheader(): | |
|
88 | supported.add(b'exp-compression-%s' % name) | |
|
89 | if engine.name() == b'zstd': | |
|
90 | supported.add(b'revlog-compression-zstd') | |
|
91 | return supported | |
|
92 | ||
|
93 | ||
|
94 | def supporteddestrequirements(repo): | |
|
95 | """Obtain requirements that upgrade supports in the destination. | |
|
96 | ||
|
97 | If the result of the upgrade would create requirements not in this set, | |
|
98 | the upgrade is disallowed. | |
|
99 | ||
|
100 | Extensions should monkeypatch this to add their custom requirements. | |
|
101 | """ | |
|
102 | supported = { | |
|
103 | b'dotencode', | |
|
104 | b'fncache', | |
|
105 | b'generaldelta', | |
|
106 | b'revlogv1', | |
|
107 | b'store', | |
|
108 | requirements.SPARSEREVLOG_REQUIREMENT, | |
|
109 | requirements.SIDEDATA_REQUIREMENT, | |
|
110 | requirements.COPIESSDC_REQUIREMENT, | |
|
111 | requirements.NODEMAP_REQUIREMENT, | |
|
112 | requirements.SHARESAFE_REQUIREMENT, | |
|
113 | } | |
|
114 | for name in compression.compengines: | |
|
115 | engine = compression.compengines[name] | |
|
116 | if engine.available() and engine.revlogheader(): | |
|
117 | supported.add(b'exp-compression-%s' % name) | |
|
118 | if engine.name() == b'zstd': | |
|
119 | supported.add(b'revlog-compression-zstd') | |
|
120 | return supported | |
|
121 | ||
|
122 | ||
|
123 | def allowednewrequirements(repo): | |
|
124 | """Obtain requirements that can be added to a repository during upgrade. | |
|
125 | ||
|
126 | This is used to disallow proposed requirements from being added when | |
|
127 | they weren't present before. | |
|
128 | ||
|
129 | We use a list of allowed requirement additions instead of a list of known | |
|
130 | bad additions because the whitelist approach is safer and will prevent | |
|
131 | future, unknown requirements from accidentally being added. | |
|
132 | """ | |
|
133 | supported = { | |
|
134 | b'dotencode', | |
|
135 | b'fncache', | |
|
136 | b'generaldelta', | |
|
137 | requirements.SPARSEREVLOG_REQUIREMENT, | |
|
138 | requirements.SIDEDATA_REQUIREMENT, | |
|
139 | requirements.COPIESSDC_REQUIREMENT, | |
|
140 | requirements.NODEMAP_REQUIREMENT, | |
|
141 | requirements.SHARESAFE_REQUIREMENT, | |
|
142 | } | |
|
143 | for name in compression.compengines: | |
|
144 | engine = compression.compengines[name] | |
|
145 | if engine.available() and engine.revlogheader(): | |
|
146 | supported.add(b'exp-compression-%s' % name) | |
|
147 | if engine.name() == b'zstd': | |
|
148 | supported.add(b'revlog-compression-zstd') | |
|
149 | return supported | |
|
150 | ||
|
151 | ||
|
152 | def preservedrequirements(repo): | |
|
153 | return set() | |
|
154 | ||
|
155 | ||
|
156 | DEFICIENCY = b'deficiency' | |
|
157 | OPTIMISATION = b'optimization' | |
|
158 | ||
|
159 | ||
|
160 | class improvement(object): | |
|
161 | """Represents an improvement that can be made as part of an upgrade. | |
|
162 | ||
|
163 | The following attributes are defined on each instance: | |
|
164 | ||
|
165 | name | |
|
166 | Machine-readable string uniquely identifying this improvement. It | |
|
167 | will be mapped to an action later in the upgrade process. | |
|
168 | ||
|
169 | type | |
|
170 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious | |
|
171 | problem. An optimization is an action (sometimes optional) that | |
|
172 | can be taken to further improve the state of the repository. | |
|
173 | ||
|
174 | description | |
|
175 | Message intended for humans explaining the improvement in more detail, | |
|
176 | including the implications of it. For ``DEFICIENCY`` types, should be | |
|
177 | worded in the present tense. For ``OPTIMISATION`` types, should be | |
|
178 | worded in the future tense. | |
|
179 | ||
|
180 | upgrademessage | |
|
181 | Message intended for humans explaining what an upgrade addressing this | |
|
182 | issue will do. Should be worded in the future tense. | |
|
183 | """ | |
|
184 | ||
|
185 | def __init__(self, name, type, description, upgrademessage): | |
|
186 | self.name = name | |
|
187 | self.type = type | |
|
188 | self.description = description | |
|
189 | self.upgrademessage = upgrademessage | |
|
190 | ||
|
191 | def __eq__(self, other): | |
|
192 | if not isinstance(other, improvement): | |
|
193 | # This is what python tell use to do | |
|
194 | return NotImplemented | |
|
195 | return self.name == other.name | |
|
196 | ||
|
197 | def __ne__(self, other): | |
|
198 | return not (self == other) | |
|
199 | ||
|
200 | def __hash__(self): | |
|
201 | return hash(self.name) | |
|
202 | ||
|
203 | ||
|
204 | allformatvariant = [] | |
|
205 | ||
|
206 | ||
|
207 | def registerformatvariant(cls): | |
|
208 | allformatvariant.append(cls) | |
|
209 | return cls | |
|
210 | ||
|
211 | ||
|
212 | class formatvariant(improvement): | |
|
213 | """an improvement subclass dedicated to repository format""" | |
|
214 | ||
|
215 | type = DEFICIENCY | |
|
216 | ### The following attributes should be defined for each class: | |
|
217 | ||
|
218 | # machine-readable string uniquely identifying this improvement. it will be | |
|
219 | # mapped to an action later in the upgrade process. | |
|
220 | name = None | |
|
221 | ||
|
222 | # message intended for humans explaining the improvement in more detail, | |
|
223 | # including the implications of it ``DEFICIENCY`` types, should be worded | |
|
224 | # in the present tense. | |
|
225 | description = None | |
|
226 | ||
|
227 | # message intended for humans explaining what an upgrade addressing this | |
|
228 | # issue will do. should be worded in the future tense. | |
|
229 | upgrademessage = None | |
|
230 | ||
|
231 | # value of current Mercurial default for new repository | |
|
232 | default = None | |
|
233 | ||
|
234 | def __init__(self): | |
|
235 | raise NotImplementedError() | |
|
236 | ||
|
237 | @staticmethod | |
|
238 | def fromrepo(repo): | |
|
239 | """current value of the variant in the repository""" | |
|
240 | raise NotImplementedError() | |
|
241 | ||
|
242 | @staticmethod | |
|
243 | def fromconfig(repo): | |
|
244 | """current value of the variant in the configuration""" | |
|
245 | raise NotImplementedError() | |
|
246 | ||
|
247 | ||
|
248 | class requirementformatvariant(formatvariant): | |
|
249 | """formatvariant based on a 'requirement' name. | |
|
250 | ||
|
251 | Many format variant are controlled by a 'requirement'. We define a small | |
|
252 | subclass to factor the code. | |
|
253 | """ | |
|
254 | ||
|
255 | # the requirement that control this format variant | |
|
256 | _requirement = None | |
|
257 | ||
|
258 | @staticmethod | |
|
259 | def _newreporequirements(ui): | |
|
260 | return localrepo.newreporequirements( | |
|
261 | ui, localrepo.defaultcreateopts(ui) | |
|
262 | ) | |
|
263 | ||
|
264 | @classmethod | |
|
265 | def fromrepo(cls, repo): | |
|
266 | assert cls._requirement is not None | |
|
267 | return cls._requirement in repo.requirements | |
|
268 | ||
|
269 | @classmethod | |
|
270 | def fromconfig(cls, repo): | |
|
271 | assert cls._requirement is not None | |
|
272 | return cls._requirement in cls._newreporequirements(repo.ui) | |
|
273 | ||
|
274 | ||
|
275 | @registerformatvariant | |
|
276 | class fncache(requirementformatvariant): | |
|
277 | name = b'fncache' | |
|
278 | ||
|
279 | _requirement = b'fncache' | |
|
280 | ||
|
281 | default = True | |
|
282 | ||
|
283 | description = _( | |
|
284 | b'long and reserved filenames may not work correctly; ' | |
|
285 | b'repository performance is sub-optimal' | |
|
286 | ) | |
|
287 | ||
|
288 | upgrademessage = _( | |
|
289 | b'repository will be more resilient to storing ' | |
|
290 | b'certain paths and performance of certain ' | |
|
291 | b'operations should be improved' | |
|
292 | ) | |
|
293 | ||
|
294 | ||
|
295 | @registerformatvariant | |
|
296 | class dotencode(requirementformatvariant): | |
|
297 | name = b'dotencode' | |
|
298 | ||
|
299 | _requirement = b'dotencode' | |
|
300 | ||
|
301 | default = True | |
|
302 | ||
|
303 | description = _( | |
|
304 | b'storage of filenames beginning with a period or ' | |
|
305 | b'space may not work correctly' | |
|
306 | ) | |
|
307 | ||
|
308 | upgrademessage = _( | |
|
309 | b'repository will be better able to store files ' | |
|
310 | b'beginning with a space or period' | |
|
311 | ) | |
|
312 | ||
|
313 | ||
|
314 | @registerformatvariant | |
|
315 | class generaldelta(requirementformatvariant): | |
|
316 | name = b'generaldelta' | |
|
317 | ||
|
318 | _requirement = b'generaldelta' | |
|
319 | ||
|
320 | default = True | |
|
321 | ||
|
322 | description = _( | |
|
323 | b'deltas within internal storage are unable to ' | |
|
324 | b'choose optimal revisions; repository is larger and ' | |
|
325 | b'slower than it could be; interaction with other ' | |
|
326 | b'repositories may require extra network and CPU ' | |
|
327 | b'resources, making "hg push" and "hg pull" slower' | |
|
328 | ) | |
|
329 | ||
|
330 | upgrademessage = _( | |
|
331 | b'repository storage will be able to create ' | |
|
332 | b'optimal deltas; new repository data will be ' | |
|
333 | b'smaller and read times should decrease; ' | |
|
334 | b'interacting with other repositories using this ' | |
|
335 | b'storage model should require less network and ' | |
|
336 | b'CPU resources, making "hg push" and "hg pull" ' | |
|
337 | b'faster' | |
|
338 | ) | |
|
339 | ||
|
340 | ||
|
341 | @registerformatvariant | |
|
342 | class sharedsafe(requirementformatvariant): | |
|
343 | name = b'exp-sharesafe' | |
|
344 | _requirement = requirements.SHARESAFE_REQUIREMENT | |
|
345 | ||
|
346 | default = False | |
|
347 | ||
|
348 | description = _( | |
|
349 | b'old shared repositories do not share source repository ' | |
|
350 | b'requirements and config. This leads to various problems ' | |
|
351 | b'when the source repository format is upgraded or some new ' | |
|
352 | b'extensions are enabled.' | |
|
353 | ) | |
|
354 | ||
|
355 | upgrademessage = _( | |
|
356 | b'Upgrades a repository to share-safe format so that future ' | |
|
357 | b'shares of this repository share its requirements and configs.' | |
|
358 | ) | |
|
359 | ||
|
360 | ||
|
361 | @registerformatvariant | |
|
362 | class sparserevlog(requirementformatvariant): | |
|
363 | name = b'sparserevlog' | |
|
364 | ||
|
365 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT | |
|
366 | ||
|
367 | default = True | |
|
368 | ||
|
369 | description = _( | |
|
370 | b'in order to limit disk reading and memory usage on older ' | |
|
371 | b'version, the span of a delta chain from its root to its ' | |
|
372 | b'end is limited, whatever the relevant data in this span. ' | |
|
373 | b'This can severly limit Mercurial ability to build good ' | |
|
374 | b'chain of delta resulting is much more storage space being ' | |
|
375 | b'taken and limit reusability of on disk delta during ' | |
|
376 | b'exchange.' | |
|
377 | ) | |
|
378 | ||
|
379 | upgrademessage = _( | |
|
380 | b'Revlog supports delta chain with more unused data ' | |
|
381 | b'between payload. These gaps will be skipped at read ' | |
|
382 | b'time. This allows for better delta chains, making a ' | |
|
383 | b'better compression and faster exchange with server.' | |
|
384 | ) | |
|
385 | ||
|
386 | ||
|
387 | @registerformatvariant | |
|
388 | class sidedata(requirementformatvariant): | |
|
389 | name = b'sidedata' | |
|
390 | ||
|
391 | _requirement = requirements.SIDEDATA_REQUIREMENT | |
|
392 | ||
|
393 | default = False | |
|
394 | ||
|
395 | description = _( | |
|
396 | b'Allows storage of extra data alongside a revision, ' | |
|
397 | b'unlocking various caching options.' | |
|
398 | ) | |
|
399 | ||
|
400 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') | |
|
401 | ||
|
402 | ||
|
403 | @registerformatvariant | |
|
404 | class persistentnodemap(requirementformatvariant): | |
|
405 | name = b'persistent-nodemap' | |
|
406 | ||
|
407 | _requirement = requirements.NODEMAP_REQUIREMENT | |
|
408 | ||
|
409 | default = False | |
|
410 | ||
|
411 | description = _( | |
|
412 | b'persist the node -> rev mapping on disk to speedup lookup' | |
|
413 | ) | |
|
414 | ||
|
415 | upgrademessage = _(b'Speedup revision lookup by node id.') | |
|
416 | ||
|
417 | ||
|
418 | @registerformatvariant | |
|
419 | class copiessdc(requirementformatvariant): | |
|
420 | name = b'copies-sdc' | |
|
421 | ||
|
422 | _requirement = requirements.COPIESSDC_REQUIREMENT | |
|
423 | ||
|
424 | default = False | |
|
425 | ||
|
426 | description = _(b'Stores copies information alongside changesets.') | |
|
427 | ||
|
428 | upgrademessage = _( | |
|
429 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' | |
|
430 | ) | |
|
431 | ||
|
432 | ||
|
433 | @registerformatvariant | |
|
434 | class removecldeltachain(formatvariant): | |
|
435 | name = b'plain-cl-delta' | |
|
436 | ||
|
437 | default = True | |
|
438 | ||
|
439 | description = _( | |
|
440 | b'changelog storage is using deltas instead of ' | |
|
441 | b'raw entries; changelog reading and any ' | |
|
442 | b'operation relying on changelog data are slower ' | |
|
443 | b'than they could be' | |
|
444 | ) | |
|
445 | ||
|
446 | upgrademessage = _( | |
|
447 | b'changelog storage will be reformated to ' | |
|
448 | b'store raw entries; changelog reading will be ' | |
|
449 | b'faster; changelog size may be reduced' | |
|
450 | ) | |
|
451 | ||
|
452 | @staticmethod | |
|
453 | def fromrepo(repo): | |
|
454 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for | |
|
455 | # changelogs with deltas. | |
|
456 | cl = repo.changelog | |
|
457 | chainbase = cl.chainbase | |
|
458 | return all(rev == chainbase(rev) for rev in cl) | |
|
459 | ||
|
460 | @staticmethod | |
|
461 | def fromconfig(repo): | |
|
462 | return True | |
|
463 | ||
|
464 | ||
|
465 | @registerformatvariant | |
|
466 | class compressionengine(formatvariant): | |
|
467 | name = b'compression' | |
|
468 | default = b'zlib' | |
|
469 | ||
|
470 | description = _( | |
|
471 | b'Compresion algorithm used to compress data. ' | |
|
472 | b'Some engine are faster than other' | |
|
473 | ) | |
|
474 | ||
|
475 | upgrademessage = _( | |
|
476 | b'revlog content will be recompressed with the new algorithm.' | |
|
477 | ) | |
|
478 | ||
|
479 | @classmethod | |
|
480 | def fromrepo(cls, repo): | |
|
481 | # we allow multiple compression engine requirement to co-exist because | |
|
482 | # strickly speaking, revlog seems to support mixed compression style. | |
|
483 | # | |
|
484 | # The compression used for new entries will be "the last one" | |
|
485 | compression = b'zlib' | |
|
486 | for req in repo.requirements: | |
|
487 | prefix = req.startswith | |
|
488 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): | |
|
489 | compression = req.split(b'-', 2)[2] | |
|
490 | return compression | |
|
491 | ||
|
492 | @classmethod | |
|
493 | def fromconfig(cls, repo): | |
|
494 | compengines = repo.ui.configlist(b'format', b'revlog-compression') | |
|
495 | # return the first valid value as the selection code would do | |
|
496 | for comp in compengines: | |
|
497 | if comp in util.compengines: | |
|
498 | return comp | |
|
499 | ||
|
500 | # no valide compression found lets display it all for clarity | |
|
501 | return b','.join(compengines) | |
|
502 | ||
|
503 | ||
|
504 | @registerformatvariant | |
|
505 | class compressionlevel(formatvariant): | |
|
506 | name = b'compression-level' | |
|
507 | default = b'default' | |
|
508 | ||
|
509 | description = _(b'compression level') | |
|
510 | ||
|
511 | upgrademessage = _(b'revlog content will be recompressed') | |
|
512 | ||
|
513 | @classmethod | |
|
514 | def fromrepo(cls, repo): | |
|
515 | comp = compressionengine.fromrepo(repo) | |
|
516 | level = None | |
|
517 | if comp == b'zlib': | |
|
518 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
|
519 | elif comp == b'zstd': | |
|
520 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
|
521 | if level is None: | |
|
522 | return b'default' | |
|
523 | return bytes(level) | |
|
524 | ||
|
525 | @classmethod | |
|
526 | def fromconfig(cls, repo): | |
|
527 | comp = compressionengine.fromconfig(repo) | |
|
528 | level = None | |
|
529 | if comp == b'zlib': | |
|
530 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
|
531 | elif comp == b'zstd': | |
|
532 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
|
533 | if level is None: | |
|
534 | return b'default' | |
|
535 | return bytes(level) | |
|
536 | ||
|
537 | ||
|
538 | def finddeficiencies(repo): | |
|
539 | """returns a list of deficiencies that the repo suffer from""" | |
|
540 | deficiencies = [] | |
|
541 | ||
|
542 | # We could detect lack of revlogv1 and store here, but they were added | |
|
543 | # in 0.9.2 and we don't support upgrading repos without these | |
|
544 | # requirements, so let's not bother. | |
|
545 | ||
|
546 | for fv in allformatvariant: | |
|
547 | if not fv.fromrepo(repo): | |
|
548 | deficiencies.append(fv) | |
|
549 | ||
|
550 | return deficiencies | |
|
551 | ||
|
552 | ||
|
553 | # search without '-' to support older form on newer client. | |
|
554 | # | |
|
555 | # We don't enforce backward compatibility for debug command so this | |
|
556 | # might eventually be dropped. However, having to use two different | |
|
557 | # forms in script when comparing result is anoying enough to add | |
|
558 | # backward compatibility for a while. | |
|
559 | legacy_opts_map = { | |
|
560 | b'redeltaparent': b're-delta-parent', | |
|
561 | b'redeltamultibase': b're-delta-multibase', | |
|
562 | b'redeltaall': b're-delta-all', | |
|
563 | b'redeltafulladd': b're-delta-fulladd', | |
|
564 | } | |
|
565 | ||
|
566 | ALL_OPTIMISATIONS = [] | |
|
567 | ||
|
568 | ||
|
569 | def register_optimization(obj): | |
|
570 | ALL_OPTIMISATIONS.append(obj) | |
|
571 | return obj | |
|
572 | ||
|
573 | ||
|
574 | register_optimization( | |
|
575 | improvement( | |
|
576 | name=b're-delta-parent', | |
|
577 | type=OPTIMISATION, | |
|
578 | description=_( | |
|
579 | b'deltas within internal storage will be recalculated to ' | |
|
580 | b'choose an optimal base revision where this was not ' | |
|
581 | b'already done; the size of the repository may shrink and ' | |
|
582 | b'various operations may become faster; the first time ' | |
|
583 | b'this optimization is performed could slow down upgrade ' | |
|
584 | b'execution considerably; subsequent invocations should ' | |
|
585 | b'not run noticeably slower' | |
|
586 | ), | |
|
587 | upgrademessage=_( | |
|
588 | b'deltas within internal storage will choose a new ' | |
|
589 | b'base revision if needed' | |
|
590 | ), | |
|
591 | ) | |
|
592 | ) | |
|
593 | ||
|
594 | register_optimization( | |
|
595 | improvement( | |
|
596 | name=b're-delta-multibase', | |
|
597 | type=OPTIMISATION, | |
|
598 | description=_( | |
|
599 | b'deltas within internal storage will be recalculated ' | |
|
600 | b'against multiple base revision and the smallest ' | |
|
601 | b'difference will be used; the size of the repository may ' | |
|
602 | b'shrink significantly when there are many merges; this ' | |
|
603 | b'optimization will slow down execution in proportion to ' | |
|
604 | b'the number of merges in the repository and the amount ' | |
|
605 | b'of files in the repository; this slow down should not ' | |
|
606 | b'be significant unless there are tens of thousands of ' | |
|
607 | b'files and thousands of merges' | |
|
608 | ), | |
|
609 | upgrademessage=_( | |
|
610 | b'deltas within internal storage will choose an ' | |
|
611 | b'optimal delta by computing deltas against multiple ' | |
|
612 | b'parents; may slow down execution time ' | |
|
613 | b'significantly' | |
|
614 | ), | |
|
615 | ) | |
|
616 | ) | |
|
617 | ||
|
618 | register_optimization( | |
|
619 | improvement( | |
|
620 | name=b're-delta-all', | |
|
621 | type=OPTIMISATION, | |
|
622 | description=_( | |
|
623 | b'deltas within internal storage will always be ' | |
|
624 | b'recalculated without reusing prior deltas; this will ' | |
|
625 | b'likely make execution run several times slower; this ' | |
|
626 | b'optimization is typically not needed' | |
|
627 | ), | |
|
628 | upgrademessage=_( | |
|
629 | b'deltas within internal storage will be fully ' | |
|
630 | b'recomputed; this will likely drastically slow down ' | |
|
631 | b'execution time' | |
|
632 | ), | |
|
633 | ) | |
|
634 | ) | |
|
635 | ||
|
636 | register_optimization( | |
|
637 | improvement( | |
|
638 | name=b're-delta-fulladd', | |
|
639 | type=OPTIMISATION, | |
|
640 | description=_( | |
|
641 | b'every revision will be re-added as if it was new ' | |
|
642 | b'content. It will go through the full storage ' | |
|
643 | b'mechanism giving extensions a chance to process it ' | |
|
644 | b'(eg. lfs). This is similar to "re-delta-all" but even ' | |
|
645 | b'slower since more logic is involved.' | |
|
646 | ), | |
|
647 | upgrademessage=_( | |
|
648 | b'each revision will be added as new content to the ' | |
|
649 | b'internal storage; this will likely drastically slow ' | |
|
650 | b'down execution time, but some extensions might need ' | |
|
651 | b'it' | |
|
652 | ), | |
|
653 | ) | |
|
654 | ) | |
|
655 | ||
|
656 | ||
|
657 | def findoptimizations(repo): | |
|
658 | """Determine optimisation that could be used during upgrade""" | |
|
659 | # These are unconditionally added. There is logic later that figures out | |
|
660 | # which ones to apply. | |
|
661 | return list(ALL_OPTIMISATIONS) | |
|
662 | ||
|
663 | ||
|
664 | def determineactions(repo, deficiencies, sourcereqs, destreqs): | |
|
665 | """Determine upgrade actions that will be performed. | |
|
666 | ||
|
667 | Given a list of improvements as returned by ``finddeficiencies`` and | |
|
668 | ``findoptimizations``, determine the list of upgrade actions that | |
|
669 | will be performed. | |
|
670 | ||
|
671 | The role of this function is to filter improvements if needed, apply | |
|
672 | recommended optimizations from the improvements list that make sense, | |
|
673 | etc. | |
|
674 | ||
|
675 | Returns a list of action names. | |
|
676 | """ | |
|
677 | newactions = [] | |
|
678 | ||
|
679 | for d in deficiencies: | |
|
680 | name = d._requirement | |
|
681 | ||
|
682 | # If the action is a requirement that doesn't show up in the | |
|
683 | # destination requirements, prune the action. | |
|
684 | if name is not None and name not in destreqs: | |
|
685 | continue | |
|
686 | ||
|
687 | newactions.append(d) | |
|
688 | ||
|
689 | # FUTURE consider adding some optimizations here for certain transitions. | |
|
690 | # e.g. adding generaldelta could schedule parent redeltas. | |
|
691 | ||
|
692 | return newactions | |
|
693 | ||
|
694 | 28 | |
|
695 | 29 | def _revlogfrompath(repo, path): |
|
696 | 30 | """Obtain a revlog from a repo path. |
|
697 | 31 | |
|
698 | 32 | An instance of the appropriate class is returned. |
|
699 | 33 | """ |
|
700 | 34 | if path == b'00changelog.i': |
|
701 | 35 | return changelog.changelog(repo.svfs) |
|
702 | 36 | elif path.endswith(b'00manifest.i'): |
|
703 | 37 | mandir = path[: -len(b'00manifest.i')] |
|
704 | 38 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
705 | 39 | else: |
|
706 | 40 | # reverse of "/".join(("data", path + ".i")) |
|
707 | 41 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
708 | 42 | |
|
709 | 43 | |
|
710 | 44 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
711 | 45 | """copy all relevant files for `oldrl` into `destrepo` store |
|
712 | 46 | |
|
713 | 47 | Files are copied "as is" without any transformation. The copy is performed |
|
714 | 48 | without extra checks. Callers are responsible for making sure the copied |
|
715 | 49 | content is compatible with format of the destination repository. |
|
716 | 50 | """ |
|
717 | 51 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
718 | 52 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
719 | 53 | newrl = getattr(newrl, '_revlog', newrl) |
|
720 | 54 | |
|
721 | 55 | oldvfs = oldrl.opener |
|
722 | 56 | newvfs = newrl.opener |
|
723 | 57 | oldindex = oldvfs.join(oldrl.indexfile) |
|
724 | 58 | newindex = newvfs.join(newrl.indexfile) |
|
725 | 59 | olddata = oldvfs.join(oldrl.datafile) |
|
726 | 60 | newdata = newvfs.join(newrl.datafile) |
|
727 | 61 | |
|
728 | 62 | with newvfs(newrl.indexfile, b'w'): |
|
729 | 63 | pass # create all the directories |
|
730 | 64 | |
|
731 | 65 | util.copyfile(oldindex, newindex) |
|
732 | 66 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
733 | 67 | if copydata: |
|
734 | 68 | util.copyfile(olddata, newdata) |
|
735 | 69 | |
|
736 | 70 | if not ( |
|
737 | 71 | unencodedname.endswith(b'00changelog.i') |
|
738 | 72 | or unencodedname.endswith(b'00manifest.i') |
|
739 | 73 | ): |
|
740 | 74 | destrepo.svfs.fncache.add(unencodedname) |
|
741 | 75 | if copydata: |
|
742 | 76 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
743 | 77 | |
|
744 | 78 | |
|
745 | 79 | UPGRADE_CHANGELOG = b"changelog" |
|
746 | 80 | UPGRADE_MANIFEST = b"manifest" |
|
747 | 81 | UPGRADE_FILELOGS = b"all-filelogs" |
|
748 | 82 | |
|
749 | 83 | UPGRADE_ALL_REVLOGS = frozenset( |
|
750 | 84 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
751 | 85 | ) |
|
752 | 86 | |
|
753 | 87 | |
|
754 | 88 | def getsidedatacompanion(srcrepo, dstrepo): |
|
755 | 89 | sidedatacompanion = None |
|
756 | 90 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
757 | 91 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
758 | 92 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
759 | 93 | |
|
760 | 94 | def sidedatacompanion(rl, rev): |
|
761 | 95 | rl = getattr(rl, '_revlog', rl) |
|
762 | 96 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
763 | 97 | return True, (), {}, 0, 0 |
|
764 | 98 | return False, (), {}, 0, 0 |
|
765 | 99 | |
|
766 | 100 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
767 | 101 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
768 | 102 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
769 | 103 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
770 | 104 | return sidedatacompanion |
|
771 | 105 | |
|
772 | 106 | |
|
773 | 107 | def matchrevlog(revlogfilter, entry): |
|
774 | 108 | """check if a revlog is selected for cloning. |
|
775 | 109 | |
|
776 | 110 | In other words, are there any updates which need to be done on revlog |
|
777 | 111 | or it can be blindly copied. |
|
778 | 112 | |
|
779 | 113 | The store entry is checked against the passed filter""" |
|
780 | 114 | if entry.endswith(b'00changelog.i'): |
|
781 | 115 | return UPGRADE_CHANGELOG in revlogfilter |
|
782 | 116 | elif entry.endswith(b'00manifest.i'): |
|
783 | 117 | return UPGRADE_MANIFEST in revlogfilter |
|
784 | 118 | return UPGRADE_FILELOGS in revlogfilter |
|
785 | 119 | |
|
786 | 120 | |
|
787 | 121 | def _clonerevlogs( |
|
788 | 122 | ui, |
|
789 | 123 | srcrepo, |
|
790 | 124 | dstrepo, |
|
791 | 125 | tr, |
|
792 | 126 | deltareuse, |
|
793 | 127 | forcedeltabothparents, |
|
794 | 128 | revlogs=UPGRADE_ALL_REVLOGS, |
|
795 | 129 | ): |
|
796 | 130 | """Copy revlogs between 2 repos.""" |
|
797 | 131 | revcount = 0 |
|
798 | 132 | srcsize = 0 |
|
799 | 133 | srcrawsize = 0 |
|
800 | 134 | dstsize = 0 |
|
801 | 135 | fcount = 0 |
|
802 | 136 | frevcount = 0 |
|
803 | 137 | fsrcsize = 0 |
|
804 | 138 | frawsize = 0 |
|
805 | 139 | fdstsize = 0 |
|
806 | 140 | mcount = 0 |
|
807 | 141 | mrevcount = 0 |
|
808 | 142 | msrcsize = 0 |
|
809 | 143 | mrawsize = 0 |
|
810 | 144 | mdstsize = 0 |
|
811 | 145 | crevcount = 0 |
|
812 | 146 | csrcsize = 0 |
|
813 | 147 | crawsize = 0 |
|
814 | 148 | cdstsize = 0 |
|
815 | 149 | |
|
816 | 150 | alldatafiles = list(srcrepo.store.walk()) |
|
817 | 151 | |
|
818 | 152 | # Perform a pass to collect metadata. This validates we can open all |
|
819 | 153 | # source files and allows a unified progress bar to be displayed. |
|
820 | 154 | for unencoded, encoded, size in alldatafiles: |
|
821 | 155 | if unencoded.endswith(b'.d'): |
|
822 | 156 | continue |
|
823 | 157 | |
|
824 | 158 | rl = _revlogfrompath(srcrepo, unencoded) |
|
825 | 159 | |
|
826 | 160 | info = rl.storageinfo( |
|
827 | 161 | exclusivefiles=True, |
|
828 | 162 | revisionscount=True, |
|
829 | 163 | trackedsize=True, |
|
830 | 164 | storedsize=True, |
|
831 | 165 | ) |
|
832 | 166 | |
|
833 | 167 | revcount += info[b'revisionscount'] or 0 |
|
834 | 168 | datasize = info[b'storedsize'] or 0 |
|
835 | 169 | rawsize = info[b'trackedsize'] or 0 |
|
836 | 170 | |
|
837 | 171 | srcsize += datasize |
|
838 | 172 | srcrawsize += rawsize |
|
839 | 173 | |
|
840 | 174 | # This is for the separate progress bars. |
|
841 | 175 | if isinstance(rl, changelog.changelog): |
|
842 | 176 | crevcount += len(rl) |
|
843 | 177 | csrcsize += datasize |
|
844 | 178 | crawsize += rawsize |
|
845 | 179 | elif isinstance(rl, manifest.manifestrevlog): |
|
846 | 180 | mcount += 1 |
|
847 | 181 | mrevcount += len(rl) |
|
848 | 182 | msrcsize += datasize |
|
849 | 183 | mrawsize += rawsize |
|
850 | 184 | elif isinstance(rl, filelog.filelog): |
|
851 | 185 | fcount += 1 |
|
852 | 186 | frevcount += len(rl) |
|
853 | 187 | fsrcsize += datasize |
|
854 | 188 | frawsize += rawsize |
|
855 | 189 | else: |
|
856 | 190 | error.ProgrammingError(b'unknown revlog type') |
|
857 | 191 | |
|
858 | 192 | if not revcount: |
|
859 | 193 | return |
|
860 | 194 | |
|
861 | 195 | ui.status( |
|
862 | 196 | _( |
|
863 | 197 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
864 | 198 | b'%d in changelog)\n' |
|
865 | 199 | ) |
|
866 | 200 | % (revcount, frevcount, mrevcount, crevcount) |
|
867 | 201 | ) |
|
868 | 202 | ui.status( |
|
869 | 203 | _(b'migrating %s in store; %s tracked data\n') |
|
870 | 204 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
871 | 205 | ) |
|
872 | 206 | |
|
873 | 207 | # Used to keep track of progress. |
|
874 | 208 | progress = None |
|
875 | 209 | |
|
876 | 210 | def oncopiedrevision(rl, rev, node): |
|
877 | 211 | progress.increment() |
|
878 | 212 | |
|
879 | 213 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
880 | 214 | |
|
881 | 215 | # Do the actual copying. |
|
882 | 216 | # FUTURE this operation can be farmed off to worker processes. |
|
883 | 217 | seen = set() |
|
884 | 218 | for unencoded, encoded, size in alldatafiles: |
|
885 | 219 | if unencoded.endswith(b'.d'): |
|
886 | 220 | continue |
|
887 | 221 | |
|
888 | 222 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
889 | 223 | |
|
890 | 224 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: |
|
891 | 225 | ui.status( |
|
892 | 226 | _( |
|
893 | 227 | b'finished migrating %d manifest revisions across %d ' |
|
894 | 228 | b'manifests; change in size: %s\n' |
|
895 | 229 | ) |
|
896 | 230 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
897 | 231 | ) |
|
898 | 232 | |
|
899 | 233 | ui.status( |
|
900 | 234 | _( |
|
901 | 235 | b'migrating changelog containing %d revisions ' |
|
902 | 236 | b'(%s in store; %s tracked data)\n' |
|
903 | 237 | ) |
|
904 | 238 | % ( |
|
905 | 239 | crevcount, |
|
906 | 240 | util.bytecount(csrcsize), |
|
907 | 241 | util.bytecount(crawsize), |
|
908 | 242 | ) |
|
909 | 243 | ) |
|
910 | 244 | seen.add(b'c') |
|
911 | 245 | progress = srcrepo.ui.makeprogress( |
|
912 | 246 | _(b'changelog revisions'), total=crevcount |
|
913 | 247 | ) |
|
914 | 248 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: |
|
915 | 249 | ui.status( |
|
916 | 250 | _( |
|
917 | 251 | b'finished migrating %d filelog revisions across %d ' |
|
918 | 252 | b'filelogs; change in size: %s\n' |
|
919 | 253 | ) |
|
920 | 254 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
921 | 255 | ) |
|
922 | 256 | |
|
923 | 257 | ui.status( |
|
924 | 258 | _( |
|
925 | 259 | b'migrating %d manifests containing %d revisions ' |
|
926 | 260 | b'(%s in store; %s tracked data)\n' |
|
927 | 261 | ) |
|
928 | 262 | % ( |
|
929 | 263 | mcount, |
|
930 | 264 | mrevcount, |
|
931 | 265 | util.bytecount(msrcsize), |
|
932 | 266 | util.bytecount(mrawsize), |
|
933 | 267 | ) |
|
934 | 268 | ) |
|
935 | 269 | seen.add(b'm') |
|
936 | 270 | if progress: |
|
937 | 271 | progress.complete() |
|
938 | 272 | progress = srcrepo.ui.makeprogress( |
|
939 | 273 | _(b'manifest revisions'), total=mrevcount |
|
940 | 274 | ) |
|
941 | 275 | elif b'f' not in seen: |
|
942 | 276 | ui.status( |
|
943 | 277 | _( |
|
944 | 278 | b'migrating %d filelogs containing %d revisions ' |
|
945 | 279 | b'(%s in store; %s tracked data)\n' |
|
946 | 280 | ) |
|
947 | 281 | % ( |
|
948 | 282 | fcount, |
|
949 | 283 | frevcount, |
|
950 | 284 | util.bytecount(fsrcsize), |
|
951 | 285 | util.bytecount(frawsize), |
|
952 | 286 | ) |
|
953 | 287 | ) |
|
954 | 288 | seen.add(b'f') |
|
955 | 289 | if progress: |
|
956 | 290 | progress.complete() |
|
957 | 291 | progress = srcrepo.ui.makeprogress( |
|
958 | 292 | _(b'file revisions'), total=frevcount |
|
959 | 293 | ) |
|
960 | 294 | |
|
961 | 295 | if matchrevlog(revlogs, unencoded): |
|
962 | 296 | ui.note( |
|
963 | 297 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
964 | 298 | ) |
|
965 | 299 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
966 | 300 | oldrl.clone( |
|
967 | 301 | tr, |
|
968 | 302 | newrl, |
|
969 | 303 | addrevisioncb=oncopiedrevision, |
|
970 | 304 | deltareuse=deltareuse, |
|
971 | 305 | forcedeltabothparents=forcedeltabothparents, |
|
972 | 306 | sidedatacompanion=sidedatacompanion, |
|
973 | 307 | ) |
|
974 | 308 | else: |
|
975 | 309 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
976 | 310 | ui.note(msg % (unencoded, len(oldrl))) |
|
977 | 311 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
978 | 312 | |
|
979 | 313 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
980 | 314 | |
|
981 | 315 | info = newrl.storageinfo(storedsize=True) |
|
982 | 316 | datasize = info[b'storedsize'] or 0 |
|
983 | 317 | |
|
984 | 318 | dstsize += datasize |
|
985 | 319 | |
|
986 | 320 | if isinstance(newrl, changelog.changelog): |
|
987 | 321 | cdstsize += datasize |
|
988 | 322 | elif isinstance(newrl, manifest.manifestrevlog): |
|
989 | 323 | mdstsize += datasize |
|
990 | 324 | else: |
|
991 | 325 | fdstsize += datasize |
|
992 | 326 | |
|
993 | 327 | progress.complete() |
|
994 | 328 | |
|
995 | 329 | ui.status( |
|
996 | 330 | _( |
|
997 | 331 | b'finished migrating %d changelog revisions; change in size: ' |
|
998 | 332 | b'%s\n' |
|
999 | 333 | ) |
|
1000 | 334 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
1001 | 335 | ) |
|
1002 | 336 | |
|
1003 | 337 | ui.status( |
|
1004 | 338 | _( |
|
1005 | 339 | b'finished migrating %d total revisions; total change in store ' |
|
1006 | 340 | b'size: %s\n' |
|
1007 | 341 | ) |
|
1008 | 342 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
1009 | 343 | ) |
|
1010 | 344 | |
|
1011 | 345 | |
|
1012 | 346 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
1013 | 347 | """Determine whether to copy a store file during upgrade. |
|
1014 | 348 | |
|
1015 | 349 | This function is called when migrating store files from ``srcrepo`` to |
|
1016 | 350 | ``dstrepo`` as part of upgrading a repository. |
|
1017 | 351 | |
|
1018 | 352 | Args: |
|
1019 | 353 | srcrepo: repo we are copying from |
|
1020 | 354 | dstrepo: repo we are copying to |
|
1021 | 355 | requirements: set of requirements for ``dstrepo`` |
|
1022 | 356 | path: store file being examined |
|
1023 | 357 | mode: the ``ST_MODE`` file type of ``path`` |
|
1024 | 358 | st: ``stat`` data structure for ``path`` |
|
1025 | 359 | |
|
1026 | 360 | Function should return ``True`` if the file is to be copied. |
|
1027 | 361 | """ |
|
1028 | 362 | # Skip revlogs. |
|
1029 | 363 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
1030 | 364 | return False |
|
1031 | 365 | # Skip transaction related files. |
|
1032 | 366 | if path.startswith(b'undo'): |
|
1033 | 367 | return False |
|
1034 | 368 | # Only copy regular files. |
|
1035 | 369 | if mode != stat.S_IFREG: |
|
1036 | 370 | return False |
|
1037 | 371 | # Skip other skipped files. |
|
1038 | 372 | if path in (b'lock', b'fncache'): |
|
1039 | 373 | return False |
|
1040 | 374 | |
|
1041 | 375 | return True |
|
1042 | 376 | |
|
1043 | 377 | |
|
1044 | 378 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
1045 | 379 | """Hook point for extensions to perform additional actions during upgrade. |
|
1046 | 380 | |
|
1047 | 381 | This function is called after revlogs and store files have been copied but |
|
1048 | 382 | before the new store is swapped into the original location. |
|
1049 | 383 | """ |
|
1050 | 384 | |
|
1051 | 385 | |
|
1052 |
def |
|
|
386 | def upgrade( | |
|
1053 | 387 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
1054 | 388 | ): |
|
1055 | 389 | """Do the low-level work of upgrading a repository. |
|
1056 | 390 | |
|
1057 | 391 | The upgrade is effectively performed as a copy between a source |
|
1058 | 392 | repository and a temporary destination repository. |
|
1059 | 393 | |
|
1060 | 394 | The source repository is unmodified for as long as possible so the |
|
1061 | 395 | upgrade can abort at any time without causing loss of service for |
|
1062 | 396 | readers and without corrupting the source repository. |
|
1063 | 397 | """ |
|
1064 | 398 | assert srcrepo.currentwlock() |
|
1065 | 399 | assert dstrepo.currentwlock() |
|
1066 | 400 | |
|
1067 | 401 | ui.status( |
|
1068 | 402 | _( |
|
1069 | 403 | b'(it is safe to interrupt this process any time before ' |
|
1070 | 404 | b'data migration completes)\n' |
|
1071 | 405 | ) |
|
1072 | 406 | ) |
|
1073 | 407 | |
|
1074 | 408 | if b're-delta-all' in actions: |
|
1075 | 409 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
1076 | 410 | elif b're-delta-parent' in actions: |
|
1077 | 411 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
1078 | 412 | elif b're-delta-multibase' in actions: |
|
1079 | 413 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
1080 | 414 | elif b're-delta-fulladd' in actions: |
|
1081 | 415 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
1082 | 416 | else: |
|
1083 | 417 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
1084 | 418 | |
|
1085 | 419 | with dstrepo.transaction(b'upgrade') as tr: |
|
1086 | 420 | _clonerevlogs( |
|
1087 | 421 | ui, |
|
1088 | 422 | srcrepo, |
|
1089 | 423 | dstrepo, |
|
1090 | 424 | tr, |
|
1091 | 425 | deltareuse, |
|
1092 | 426 | b're-delta-multibase' in actions, |
|
1093 | 427 | revlogs=revlogs, |
|
1094 | 428 | ) |
|
1095 | 429 | |
|
1096 | 430 | # Now copy other files in the store directory. |
|
1097 | 431 | # The sorted() makes execution deterministic. |
|
1098 | 432 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
1099 | 433 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): |
|
1100 | 434 | continue |
|
1101 | 435 | |
|
1102 | 436 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
1103 | 437 | src = srcrepo.store.rawvfs.join(p) |
|
1104 | 438 | dst = dstrepo.store.rawvfs.join(p) |
|
1105 | 439 | util.copyfile(src, dst, copystat=True) |
|
1106 | 440 | |
|
1107 | 441 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
1108 | 442 | |
|
1109 | 443 | ui.status(_(b'data fully migrated to temporary repository\n')) |
|
1110 | 444 | |
|
1111 | 445 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) |
|
1112 | 446 | backupvfs = vfsmod.vfs(backuppath) |
|
1113 | 447 | |
|
1114 | 448 | # Make a backup of requires file first, as it is the first to be modified. |
|
1115 | 449 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) |
|
1116 | 450 | |
|
1117 | 451 | # We install an arbitrary requirement that clients must not support |
|
1118 | 452 | # as a mechanism to lock out new clients during the data swap. This is |
|
1119 | 453 | # better than allowing a client to continue while the repository is in |
|
1120 | 454 | # an inconsistent state. |
|
1121 | 455 | ui.status( |
|
1122 | 456 | _( |
|
1123 | 457 | b'marking source repository as being upgraded; clients will be ' |
|
1124 | 458 | b'unable to read from repository\n' |
|
1125 | 459 | ) |
|
1126 | 460 | ) |
|
1127 | 461 | scmutil.writereporequirements( |
|
1128 | 462 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
1129 | 463 | ) |
|
1130 | 464 | |
|
1131 | 465 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
1132 | 466 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
1133 | 467 | |
|
1134 | 468 | # Now swap in the new store directory. Doing it as a rename should make |
|
1135 | 469 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
1136 | 470 | # environments). |
|
1137 | 471 | ui.status(_(b'replacing store...\n')) |
|
1138 | 472 | tstart = util.timer() |
|
1139 | 473 | util.rename(srcrepo.spath, backupvfs.join(b'store')) |
|
1140 | 474 | util.rename(dstrepo.spath, srcrepo.spath) |
|
1141 | 475 | elapsed = util.timer() - tstart |
|
1142 | 476 | ui.status( |
|
1143 | 477 | _( |
|
1144 | 478 | b'store replacement complete; repository was inconsistent for ' |
|
1145 | 479 | b'%0.1fs\n' |
|
1146 | 480 | ) |
|
1147 | 481 | % elapsed |
|
1148 | 482 | ) |
|
1149 | 483 | |
|
1150 | 484 | # We first write the requirements file. Any new requirements will lock |
|
1151 | 485 | # out legacy clients. |
|
1152 | 486 | ui.status( |
|
1153 | 487 | _( |
|
1154 | 488 | b'finalizing requirements file and making repository readable ' |
|
1155 | 489 | b'again\n' |
|
1156 | 490 | ) |
|
1157 | 491 | ) |
|
1158 | 492 | scmutil.writereporequirements(srcrepo, requirements) |
|
1159 | 493 | |
|
1160 | 494 | # The lock file from the old store won't be removed because nothing has a |
|
1161 | 495 | # reference to its new location. So clean it up manually. Alternatively, we |
|
1162 | 496 | # could update srcrepo.svfs and other variables to point to the new |
|
1163 | 497 | # location. This is simpler. |
|
1164 | 498 | backupvfs.unlink(b'store/lock') |
|
1165 | 499 | |
|
1166 | 500 | return backuppath |
|
1167 | ||
|
1168 | ||
|
1169 | def upgraderepo( | |
|
1170 | ui, | |
|
1171 | repo, | |
|
1172 | run=False, | |
|
1173 | optimize=None, | |
|
1174 | backup=True, | |
|
1175 | manifest=None, | |
|
1176 | changelog=None, | |
|
1177 | filelogs=None, | |
|
1178 | ): | |
|
1179 | """Upgrade a repository in place.""" | |
|
1180 | if optimize is None: | |
|
1181 | optimize = [] | |
|
1182 | optimize = {legacy_opts_map.get(o, o) for o in optimize} | |
|
1183 | repo = repo.unfiltered() | |
|
1184 | ||
|
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) | |
|
1186 | specentries = ( | |
|
1187 | (UPGRADE_CHANGELOG, changelog), | |
|
1188 | (UPGRADE_MANIFEST, manifest), | |
|
1189 | (UPGRADE_FILELOGS, filelogs), | |
|
1190 | ) | |
|
1191 | specified = [(y, x) for (y, x) in specentries if x is not None] | |
|
1192 | if specified: | |
|
1193 | # we have some limitation on revlogs to be recloned | |
|
1194 | if any(x for y, x in specified): | |
|
1195 | revlogs = set() | |
|
1196 | for upgrade, enabled in specified: | |
|
1197 | if enabled: | |
|
1198 | revlogs.add(upgrade) | |
|
1199 | else: | |
|
1200 | # none are enabled | |
|
1201 | for upgrade, __ in specified: | |
|
1202 | revlogs.discard(upgrade) | |
|
1203 | ||
|
1204 | # Ensure the repository can be upgraded. | |
|
1205 | missingreqs = requiredsourcerequirements(repo) - repo.requirements | |
|
1206 | if missingreqs: | |
|
1207 | raise error.Abort( | |
|
1208 | _(b'cannot upgrade repository; requirement missing: %s') | |
|
1209 | % _(b', ').join(sorted(missingreqs)) | |
|
1210 | ) | |
|
1211 | ||
|
1212 | blockedreqs = blocksourcerequirements(repo) & repo.requirements | |
|
1213 | if blockedreqs: | |
|
1214 | raise error.Abort( | |
|
1215 | _( | |
|
1216 | b'cannot upgrade repository; unsupported source ' | |
|
1217 | b'requirement: %s' | |
|
1218 | ) | |
|
1219 | % _(b', ').join(sorted(blockedreqs)) | |
|
1220 | ) | |
|
1221 | ||
|
1222 | # FUTURE there is potentially a need to control the wanted requirements via | |
|
1223 | # command arguments or via an extension hook point. | |
|
1224 | newreqs = localrepo.newreporequirements( | |
|
1225 | repo.ui, localrepo.defaultcreateopts(repo.ui) | |
|
1226 | ) | |
|
1227 | newreqs.update(preservedrequirements(repo)) | |
|
1228 | ||
|
1229 | noremovereqs = ( | |
|
1230 | repo.requirements - newreqs - supportremovedrequirements(repo) | |
|
1231 | ) | |
|
1232 | if noremovereqs: | |
|
1233 | raise error.Abort( | |
|
1234 | _( | |
|
1235 | b'cannot upgrade repository; requirement would be ' | |
|
1236 | b'removed: %s' | |
|
1237 | ) | |
|
1238 | % _(b', ').join(sorted(noremovereqs)) | |
|
1239 | ) | |
|
1240 | ||
|
1241 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) | |
|
1242 | if noaddreqs: | |
|
1243 | raise error.Abort( | |
|
1244 | _( | |
|
1245 | b'cannot upgrade repository; do not support adding ' | |
|
1246 | b'requirement: %s' | |
|
1247 | ) | |
|
1248 | % _(b', ').join(sorted(noaddreqs)) | |
|
1249 | ) | |
|
1250 | ||
|
1251 | unsupportedreqs = newreqs - supporteddestrequirements(repo) | |
|
1252 | if unsupportedreqs: | |
|
1253 | raise error.Abort( | |
|
1254 | _( | |
|
1255 | b'cannot upgrade repository; do not support ' | |
|
1256 | b'destination requirement: %s' | |
|
1257 | ) | |
|
1258 | % _(b', ').join(sorted(unsupportedreqs)) | |
|
1259 | ) | |
|
1260 | ||
|
1261 | # Find and validate all improvements that can be made. | |
|
1262 | alloptimizations = findoptimizations(repo) | |
|
1263 | ||
|
1264 | # Apply and Validate arguments. | |
|
1265 | optimizations = [] | |
|
1266 | for o in alloptimizations: | |
|
1267 | if o.name in optimize: | |
|
1268 | optimizations.append(o) | |
|
1269 | optimize.discard(o.name) | |
|
1270 | ||
|
1271 | if optimize: # anything left is unknown | |
|
1272 | raise error.Abort( | |
|
1273 | _(b'unknown optimization action requested: %s') | |
|
1274 | % b', '.join(sorted(optimize)), | |
|
1275 | hint=_(b'run without arguments to see valid optimizations'), | |
|
1276 | ) | |
|
1277 | ||
|
1278 | deficiencies = finddeficiencies(repo) | |
|
1279 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) | |
|
1280 | actions.extend( | |
|
1281 | o | |
|
1282 | for o in sorted(optimizations) | |
|
1283 | # determineactions could have added optimisation | |
|
1284 | if o not in actions | |
|
1285 | ) | |
|
1286 | ||
|
1287 | removedreqs = repo.requirements - newreqs | |
|
1288 | addedreqs = newreqs - repo.requirements | |
|
1289 | ||
|
1290 | if revlogs != UPGRADE_ALL_REVLOGS: | |
|
1291 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) | |
|
1292 | if incompatible: | |
|
1293 | msg = _( | |
|
1294 | b'ignoring revlogs selection flags, format requirements ' | |
|
1295 | b'change: %s\n' | |
|
1296 | ) | |
|
1297 | ui.warn(msg % b', '.join(sorted(incompatible))) | |
|
1298 | revlogs = UPGRADE_ALL_REVLOGS | |
|
1299 | ||
|
1300 | def write_labeled(l, label): | |
|
1301 | first = True | |
|
1302 | for r in sorted(l): | |
|
1303 | if not first: | |
|
1304 | ui.write(b', ') | |
|
1305 | ui.write(r, label=label) | |
|
1306 | first = False | |
|
1307 | ||
|
1308 | def printrequirements(): | |
|
1309 | ui.write(_(b'requirements\n')) | |
|
1310 | ui.write(_(b' preserved: ')) | |
|
1311 | write_labeled( | |
|
1312 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" | |
|
1313 | ) | |
|
1314 | ui.write((b'\n')) | |
|
1315 | removed = repo.requirements - newreqs | |
|
1316 | if repo.requirements - newreqs: | |
|
1317 | ui.write(_(b' removed: ')) | |
|
1318 | write_labeled(removed, "upgrade-repo.requirement.removed") | |
|
1319 | ui.write((b'\n')) | |
|
1320 | added = newreqs - repo.requirements | |
|
1321 | if added: | |
|
1322 | ui.write(_(b' added: ')) | |
|
1323 | write_labeled(added, "upgrade-repo.requirement.added") | |
|
1324 | ui.write((b'\n')) | |
|
1325 | ui.write(b'\n') | |
|
1326 | ||
|
1327 | def printoptimisations(): | |
|
1328 | optimisations = [a for a in actions if a.type == OPTIMISATION] | |
|
1329 | optimisations.sort(key=lambda a: a.name) | |
|
1330 | if optimisations: | |
|
1331 | ui.write(_(b'optimisations: ')) | |
|
1332 | write_labeled( | |
|
1333 | [a.name for a in optimisations], | |
|
1334 | "upgrade-repo.optimisation.performed", | |
|
1335 | ) | |
|
1336 | ui.write(b'\n\n') | |
|
1337 | ||
|
1338 | def printupgradeactions(): | |
|
1339 | for a in actions: | |
|
1340 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) | |
|
1341 | ||
|
1342 | def print_affected_revlogs(): | |
|
1343 | if not revlogs: | |
|
1344 | ui.write((b'no revlogs to process\n')) | |
|
1345 | else: | |
|
1346 | ui.write((b'processed revlogs:\n')) | |
|
1347 | for r in sorted(revlogs): | |
|
1348 | ui.write((b' - %s\n' % r)) | |
|
1349 | ui.write((b'\n')) | |
|
1350 | ||
|
1351 | if not run: | |
|
1352 | fromconfig = [] | |
|
1353 | onlydefault = [] | |
|
1354 | ||
|
1355 | for d in deficiencies: | |
|
1356 | if d.fromconfig(repo): | |
|
1357 | fromconfig.append(d) | |
|
1358 | elif d.default: | |
|
1359 | onlydefault.append(d) | |
|
1360 | ||
|
1361 | if fromconfig or onlydefault: | |
|
1362 | ||
|
1363 | if fromconfig: | |
|
1364 | ui.status( | |
|
1365 | _( | |
|
1366 | b'repository lacks features recommended by ' | |
|
1367 | b'current config options:\n\n' | |
|
1368 | ) | |
|
1369 | ) | |
|
1370 | for i in fromconfig: | |
|
1371 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
|
1372 | ||
|
1373 | if onlydefault: | |
|
1374 | ui.status( | |
|
1375 | _( | |
|
1376 | b'repository lacks features used by the default ' | |
|
1377 | b'config options:\n\n' | |
|
1378 | ) | |
|
1379 | ) | |
|
1380 | for i in onlydefault: | |
|
1381 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
|
1382 | ||
|
1383 | ui.status(b'\n') | |
|
1384 | else: | |
|
1385 | ui.status( | |
|
1386 | _( | |
|
1387 | b'(no feature deficiencies found in existing ' | |
|
1388 | b'repository)\n' | |
|
1389 | ) | |
|
1390 | ) | |
|
1391 | ||
|
1392 | ui.status( | |
|
1393 | _( | |
|
1394 | b'performing an upgrade with "--run" will make the following ' | |
|
1395 | b'changes:\n\n' | |
|
1396 | ) | |
|
1397 | ) | |
|
1398 | ||
|
1399 | printrequirements() | |
|
1400 | printoptimisations() | |
|
1401 | printupgradeactions() | |
|
1402 | print_affected_revlogs() | |
|
1403 | ||
|
1404 | unusedoptimize = [i for i in alloptimizations if i not in actions] | |
|
1405 | ||
|
1406 | if unusedoptimize: | |
|
1407 | ui.status( | |
|
1408 | _( | |
|
1409 | b'additional optimizations are available by specifying ' | |
|
1410 | b'"--optimize <name>":\n\n' | |
|
1411 | ) | |
|
1412 | ) | |
|
1413 | for i in unusedoptimize: | |
|
1414 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) | |
|
1415 | return | |
|
1416 | ||
|
1417 | # Else we're in the run=true case. | |
|
1418 | ui.write(_(b'upgrade will perform the following actions:\n\n')) | |
|
1419 | printrequirements() | |
|
1420 | printoptimisations() | |
|
1421 | printupgradeactions() | |
|
1422 | print_affected_revlogs() | |
|
1423 | ||
|
1424 | upgradeactions = [a.name for a in actions] | |
|
1425 | ||
|
1426 | ui.status(_(b'beginning upgrade...\n')) | |
|
1427 | with repo.wlock(), repo.lock(): | |
|
1428 | ui.status(_(b'repository locked and read-only\n')) | |
|
1429 | # Our strategy for upgrading the repository is to create a new, | |
|
1430 | # temporary repository, write data to it, then do a swap of the | |
|
1431 | # data. There are less heavyweight ways to do this, but it is easier | |
|
1432 | # to create a new repo object than to instantiate all the components | |
|
1433 | # (like the store) separately. | |
|
1434 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) | |
|
1435 | backuppath = None | |
|
1436 | try: | |
|
1437 | ui.status( | |
|
1438 | _( | |
|
1439 | b'creating temporary repository to stage migrated ' | |
|
1440 | b'data: %s\n' | |
|
1441 | ) | |
|
1442 | % tmppath | |
|
1443 | ) | |
|
1444 | ||
|
1445 | # clone ui without using ui.copy because repo.ui is protected | |
|
1446 | repoui = repo.ui.__class__(repo.ui) | |
|
1447 | dstrepo = hg.repository(repoui, path=tmppath, create=True) | |
|
1448 | ||
|
1449 | with dstrepo.wlock(), dstrepo.lock(): | |
|
1450 | backuppath = _upgraderepo( | |
|
1451 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs | |
|
1452 | ) | |
|
1453 | if not (backup or backuppath is None): | |
|
1454 | ui.status( | |
|
1455 | _(b'removing old repository content%s\n') % backuppath | |
|
1456 | ) | |
|
1457 | repo.vfs.rmtree(backuppath, forcibly=True) | |
|
1458 | backuppath = None | |
|
1459 | ||
|
1460 | finally: | |
|
1461 | ui.status(_(b'removing temporary repository %s\n') % tmppath) | |
|
1462 | repo.vfs.rmtree(tmppath, forcibly=True) | |
|
1463 | ||
|
1464 | if backuppath and not ui.quiet: | |
|
1465 | ui.warn( | |
|
1466 | _(b'copy of old repository backed up at %s\n') % backuppath | |
|
1467 | ) | |
|
1468 | ui.warn( | |
|
1469 | _( | |
|
1470 | b'the old repository will not be deleted; remove ' | |
|
1471 | b'it to free up disk space once the upgraded ' | |
|
1472 | b'repository is verified\n' | |
|
1473 | ) | |
|
1474 | ) | |
|
1475 | ||
|
1476 | if sharedsafe.name in addedreqs: | |
|
1477 | ui.warn( | |
|
1478 | _( | |
|
1479 | b'repository upgraded to share safe mode, existing' | |
|
1480 | b' shares will still work in old non-safe mode. ' | |
|
1481 | b'Re-share existing shares to use them in safe mode' | |
|
1482 | b' New shares will be created in safe mode.\n' | |
|
1483 | ) | |
|
1484 | ) | |
|
1485 | if sharedsafe.name in removedreqs: | |
|
1486 | ui.warn( | |
|
1487 | _( | |
|
1488 | b'repository downgraded to not use share safe mode, ' | |
|
1489 | b'existing shares will not work and needs to' | |
|
1490 | b' be reshared.\n' | |
|
1491 | ) | |
|
1492 | ) |
@@ -1,1827 +1,1828 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # This is the mercurial setup script. |
|
3 | 3 | # |
|
4 | 4 | # 'python setup.py install', or |
|
5 | 5 | # 'python setup.py --help' for more options |
|
6 | 6 | import os |
|
7 | 7 | |
|
8 | 8 | # Mercurial will never work on Python 3 before 3.5 due to a lack |
|
9 | 9 | # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1 |
|
10 | 10 | # due to a bug in % formatting in bytestrings. |
|
11 | 11 | # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in |
|
12 | 12 | # codecs.escape_encode() where it raises SystemError on empty bytestring |
|
13 | 13 | # bug link: https://bugs.python.org/issue25270 |
|
14 | 14 | supportedpy = ','.join( |
|
15 | 15 | [ |
|
16 | 16 | '>=2.7.4', |
|
17 | 17 | '!=3.0.*', |
|
18 | 18 | '!=3.1.*', |
|
19 | 19 | '!=3.2.*', |
|
20 | 20 | '!=3.3.*', |
|
21 | 21 | '!=3.4.*', |
|
22 | 22 | '!=3.5.0', |
|
23 | 23 | '!=3.5.1', |
|
24 | 24 | '!=3.5.2', |
|
25 | 25 | '!=3.6.0', |
|
26 | 26 | '!=3.6.1', |
|
27 | 27 | ] |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | import sys, platform |
|
31 | 31 | import sysconfig |
|
32 | 32 | |
|
33 | 33 | if sys.version_info[0] >= 3: |
|
34 | 34 | printf = eval('print') |
|
35 | 35 | libdir_escape = 'unicode_escape' |
|
36 | 36 | |
|
37 | 37 | def sysstr(s): |
|
38 | 38 | return s.decode('latin-1') |
|
39 | 39 | |
|
40 | 40 | |
|
41 | 41 | else: |
|
42 | 42 | libdir_escape = 'string_escape' |
|
43 | 43 | |
|
44 | 44 | def printf(*args, **kwargs): |
|
45 | 45 | f = kwargs.get('file', sys.stdout) |
|
46 | 46 | end = kwargs.get('end', '\n') |
|
47 | 47 | f.write(b' '.join(args) + end) |
|
48 | 48 | |
|
49 | 49 | def sysstr(s): |
|
50 | 50 | return s |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | # Attempt to guide users to a modern pip - this means that 2.6 users |
|
54 | 54 | # should have a chance of getting a 4.2 release, and when we ratchet |
|
55 | 55 | # the version requirement forward again hopefully everyone will get |
|
56 | 56 | # something that works for them. |
|
57 | 57 | if sys.version_info < (2, 7, 4, 'final'): |
|
58 | 58 | pip_message = ( |
|
59 | 59 | 'This may be due to an out of date pip. ' |
|
60 | 60 | 'Make sure you have pip >= 9.0.1.' |
|
61 | 61 | ) |
|
62 | 62 | try: |
|
63 | 63 | import pip |
|
64 | 64 | |
|
65 | 65 | pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]]) |
|
66 | 66 | if pip_version < (9, 0, 1): |
|
67 | 67 | pip_message = ( |
|
68 | 68 | 'Your pip version is out of date, please install ' |
|
69 | 69 | 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__) |
|
70 | 70 | ) |
|
71 | 71 | else: |
|
72 | 72 | # pip is new enough - it must be something else |
|
73 | 73 | pip_message = '' |
|
74 | 74 | except Exception: |
|
75 | 75 | pass |
|
76 | 76 | error = """ |
|
77 | 77 | Mercurial does not support Python older than 2.7.4. |
|
78 | 78 | Python {py} detected. |
|
79 | 79 | {pip} |
|
80 | 80 | """.format( |
|
81 | 81 | py=sys.version_info, pip=pip_message |
|
82 | 82 | ) |
|
83 | 83 | printf(error, file=sys.stderr) |
|
84 | 84 | sys.exit(1) |
|
85 | 85 | |
|
86 | 86 | import ssl |
|
87 | 87 | |
|
88 | 88 | try: |
|
89 | 89 | ssl.SSLContext |
|
90 | 90 | except AttributeError: |
|
91 | 91 | error = """ |
|
92 | 92 | The `ssl` module does not have the `SSLContext` class. This indicates an old |
|
93 | 93 | Python version which does not support modern security features (which were |
|
94 | 94 | added to Python 2.7 as part of "PEP 466"). Please make sure you have installed |
|
95 | 95 | at least Python 2.7.9 or a Python version with backports of these security |
|
96 | 96 | features. |
|
97 | 97 | """ |
|
98 | 98 | printf(error, file=sys.stderr) |
|
99 | 99 | sys.exit(1) |
|
100 | 100 | |
|
101 | 101 | # ssl.HAS_TLSv1* are preferred to check support but they were added in Python |
|
102 | 102 | # 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98 |
|
103 | 103 | # (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2 |
|
104 | 104 | # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2 |
|
105 | 105 | # support. At the mentioned commit, they were unconditionally defined. |
|
106 | 106 | _notset = object() |
|
107 | 107 | has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset) |
|
108 | 108 | if has_tlsv1_1 is _notset: |
|
109 | 109 | has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset |
|
110 | 110 | has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset) |
|
111 | 111 | if has_tlsv1_2 is _notset: |
|
112 | 112 | has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset |
|
113 | 113 | if not (has_tlsv1_1 or has_tlsv1_2): |
|
114 | 114 | error = """ |
|
115 | 115 | The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2. |
|
116 | 116 | Please make sure that your Python installation was compiled against an OpenSSL |
|
117 | 117 | version enabling these features (likely this requires the OpenSSL version to |
|
118 | 118 | be at least 1.0.1). |
|
119 | 119 | """ |
|
120 | 120 | printf(error, file=sys.stderr) |
|
121 | 121 | sys.exit(1) |
|
122 | 122 | |
|
123 | 123 | if sys.version_info[0] >= 3: |
|
124 | 124 | DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX'] |
|
125 | 125 | else: |
|
126 | 126 | # deprecated in Python 3 |
|
127 | 127 | DYLIB_SUFFIX = sysconfig.get_config_vars()['SO'] |
|
128 | 128 | |
|
129 | 129 | # Solaris Python packaging brain damage |
|
130 | 130 | try: |
|
131 | 131 | import hashlib |
|
132 | 132 | |
|
133 | 133 | sha = hashlib.sha1() |
|
134 | 134 | except ImportError: |
|
135 | 135 | try: |
|
136 | 136 | import sha |
|
137 | 137 | |
|
138 | 138 | sha.sha # silence unused import warning |
|
139 | 139 | except ImportError: |
|
140 | 140 | raise SystemExit( |
|
141 | 141 | "Couldn't import standard hashlib (incomplete Python install)." |
|
142 | 142 | ) |
|
143 | 143 | |
|
144 | 144 | try: |
|
145 | 145 | import zlib |
|
146 | 146 | |
|
147 | 147 | zlib.compressobj # silence unused import warning |
|
148 | 148 | except ImportError: |
|
149 | 149 | raise SystemExit( |
|
150 | 150 | "Couldn't import standard zlib (incomplete Python install)." |
|
151 | 151 | ) |
|
152 | 152 | |
|
153 | 153 | # The base IronPython distribution (as of 2.7.1) doesn't support bz2 |
|
154 | 154 | isironpython = False |
|
155 | 155 | try: |
|
156 | 156 | isironpython = ( |
|
157 | 157 | platform.python_implementation().lower().find("ironpython") != -1 |
|
158 | 158 | ) |
|
159 | 159 | except AttributeError: |
|
160 | 160 | pass |
|
161 | 161 | |
|
162 | 162 | if isironpython: |
|
163 | 163 | sys.stderr.write("warning: IronPython detected (no bz2 support)\n") |
|
164 | 164 | else: |
|
165 | 165 | try: |
|
166 | 166 | import bz2 |
|
167 | 167 | |
|
168 | 168 | bz2.BZ2Compressor # silence unused import warning |
|
169 | 169 | except ImportError: |
|
170 | 170 | raise SystemExit( |
|
171 | 171 | "Couldn't import standard bz2 (incomplete Python install)." |
|
172 | 172 | ) |
|
173 | 173 | |
|
174 | 174 | ispypy = "PyPy" in sys.version |
|
175 | 175 | |
|
176 | 176 | import ctypes |
|
177 | 177 | import errno |
|
178 | 178 | import stat, subprocess, time |
|
179 | 179 | import re |
|
180 | 180 | import shutil |
|
181 | 181 | import tempfile |
|
182 | 182 | from distutils import log |
|
183 | 183 | |
|
184 | 184 | # We have issues with setuptools on some platforms and builders. Until |
|
185 | 185 | # those are resolved, setuptools is opt-in except for platforms where |
|
186 | 186 | # we don't have issues. |
|
187 | 187 | issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ |
|
188 | 188 | if issetuptools: |
|
189 | 189 | from setuptools import setup |
|
190 | 190 | else: |
|
191 | 191 | from distutils.core import setup |
|
192 | 192 | from distutils.ccompiler import new_compiler |
|
193 | 193 | from distutils.core import Command, Extension |
|
194 | 194 | from distutils.dist import Distribution |
|
195 | 195 | from distutils.command.build import build |
|
196 | 196 | from distutils.command.build_ext import build_ext |
|
197 | 197 | from distutils.command.build_py import build_py |
|
198 | 198 | from distutils.command.build_scripts import build_scripts |
|
199 | 199 | from distutils.command.install import install |
|
200 | 200 | from distutils.command.install_lib import install_lib |
|
201 | 201 | from distutils.command.install_scripts import install_scripts |
|
202 | 202 | from distutils.spawn import spawn, find_executable |
|
203 | 203 | from distutils import file_util |
|
204 | 204 | from distutils.errors import ( |
|
205 | 205 | CCompilerError, |
|
206 | 206 | DistutilsError, |
|
207 | 207 | DistutilsExecError, |
|
208 | 208 | ) |
|
209 | 209 | from distutils.sysconfig import get_python_inc, get_config_var |
|
210 | 210 | from distutils.version import StrictVersion |
|
211 | 211 | |
|
212 | 212 | # Explain to distutils.StrictVersion how our release candidates are versionned |
|
213 | 213 | StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') |
|
214 | 214 | |
|
215 | 215 | |
|
216 | 216 | def write_if_changed(path, content): |
|
217 | 217 | """Write content to a file iff the content hasn't changed.""" |
|
218 | 218 | if os.path.exists(path): |
|
219 | 219 | with open(path, 'rb') as fh: |
|
220 | 220 | current = fh.read() |
|
221 | 221 | else: |
|
222 | 222 | current = b'' |
|
223 | 223 | |
|
224 | 224 | if current != content: |
|
225 | 225 | with open(path, 'wb') as fh: |
|
226 | 226 | fh.write(content) |
|
227 | 227 | |
|
228 | 228 | |
|
229 | 229 | scripts = ['hg'] |
|
230 | 230 | if os.name == 'nt': |
|
231 | 231 | # We remove hg.bat if we are able to build hg.exe. |
|
232 | 232 | scripts.append('contrib/win32/hg.bat') |
|
233 | 233 | |
|
234 | 234 | |
|
235 | 235 | def cancompile(cc, code): |
|
236 | 236 | tmpdir = tempfile.mkdtemp(prefix='hg-install-') |
|
237 | 237 | devnull = oldstderr = None |
|
238 | 238 | try: |
|
239 | 239 | fname = os.path.join(tmpdir, 'testcomp.c') |
|
240 | 240 | f = open(fname, 'w') |
|
241 | 241 | f.write(code) |
|
242 | 242 | f.close() |
|
243 | 243 | # Redirect stderr to /dev/null to hide any error messages |
|
244 | 244 | # from the compiler. |
|
245 | 245 | # This will have to be changed if we ever have to check |
|
246 | 246 | # for a function on Windows. |
|
247 | 247 | devnull = open('/dev/null', 'w') |
|
248 | 248 | oldstderr = os.dup(sys.stderr.fileno()) |
|
249 | 249 | os.dup2(devnull.fileno(), sys.stderr.fileno()) |
|
250 | 250 | objects = cc.compile([fname], output_dir=tmpdir) |
|
251 | 251 | cc.link_executable(objects, os.path.join(tmpdir, "a.out")) |
|
252 | 252 | return True |
|
253 | 253 | except Exception: |
|
254 | 254 | return False |
|
255 | 255 | finally: |
|
256 | 256 | if oldstderr is not None: |
|
257 | 257 | os.dup2(oldstderr, sys.stderr.fileno()) |
|
258 | 258 | if devnull is not None: |
|
259 | 259 | devnull.close() |
|
260 | 260 | shutil.rmtree(tmpdir) |
|
261 | 261 | |
|
262 | 262 | |
|
263 | 263 | # simplified version of distutils.ccompiler.CCompiler.has_function |
|
264 | 264 | # that actually removes its temporary files. |
|
265 | 265 | def hasfunction(cc, funcname): |
|
266 | 266 | code = 'int main(void) { %s(); }\n' % funcname |
|
267 | 267 | return cancompile(cc, code) |
|
268 | 268 | |
|
269 | 269 | |
|
270 | 270 | def hasheader(cc, headername): |
|
271 | 271 | code = '#include <%s>\nint main(void) { return 0; }\n' % headername |
|
272 | 272 | return cancompile(cc, code) |
|
273 | 273 | |
|
274 | 274 | |
|
275 | 275 | # py2exe needs to be installed to work |
|
276 | 276 | try: |
|
277 | 277 | import py2exe |
|
278 | 278 | |
|
279 | 279 | py2exe.Distribution # silence unused import warning |
|
280 | 280 | py2exeloaded = True |
|
281 | 281 | # import py2exe's patched Distribution class |
|
282 | 282 | from distutils.core import Distribution |
|
283 | 283 | except ImportError: |
|
284 | 284 | py2exeloaded = False |
|
285 | 285 | |
|
286 | 286 | |
|
287 | 287 | def runcmd(cmd, env, cwd=None): |
|
288 | 288 | p = subprocess.Popen( |
|
289 | 289 | cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd |
|
290 | 290 | ) |
|
291 | 291 | out, err = p.communicate() |
|
292 | 292 | return p.returncode, out, err |
|
293 | 293 | |
|
294 | 294 | |
|
295 | 295 | class hgcommand(object): |
|
296 | 296 | def __init__(self, cmd, env): |
|
297 | 297 | self.cmd = cmd |
|
298 | 298 | self.env = env |
|
299 | 299 | |
|
300 | 300 | def run(self, args): |
|
301 | 301 | cmd = self.cmd + args |
|
302 | 302 | returncode, out, err = runcmd(cmd, self.env) |
|
303 | 303 | err = filterhgerr(err) |
|
304 | 304 | if err or returncode != 0: |
|
305 | 305 | printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) |
|
306 | 306 | printf(err, file=sys.stderr) |
|
307 | 307 | return b'' |
|
308 | 308 | return out |
|
309 | 309 | |
|
310 | 310 | |
|
311 | 311 | def filterhgerr(err): |
|
312 | 312 | # If root is executing setup.py, but the repository is owned by |
|
313 | 313 | # another user (as in "sudo python setup.py install") we will get |
|
314 | 314 | # trust warnings since the .hg/hgrc file is untrusted. That is |
|
315 | 315 | # fine, we don't want to load it anyway. Python may warn about |
|
316 | 316 | # a missing __init__.py in mercurial/locale, we also ignore that. |
|
317 | 317 | err = [ |
|
318 | 318 | e |
|
319 | 319 | for e in err.splitlines() |
|
320 | 320 | if ( |
|
321 | 321 | not e.startswith(b'not trusting file') |
|
322 | 322 | and not e.startswith(b'warning: Not importing') |
|
323 | 323 | and not e.startswith(b'obsolete feature not enabled') |
|
324 | 324 | and not e.startswith(b'*** failed to import extension') |
|
325 | 325 | and not e.startswith(b'devel-warn:') |
|
326 | 326 | and not ( |
|
327 | 327 | e.startswith(b'(third party extension') |
|
328 | 328 | and e.endswith(b'or newer of Mercurial; disabling)') |
|
329 | 329 | ) |
|
330 | 330 | ) |
|
331 | 331 | ] |
|
332 | 332 | return b'\n'.join(b' ' + e for e in err) |
|
333 | 333 | |
|
334 | 334 | |
|
335 | 335 | def findhg(): |
|
336 | 336 | """Try to figure out how we should invoke hg for examining the local |
|
337 | 337 | repository contents. |
|
338 | 338 | |
|
339 | 339 | Returns an hgcommand object.""" |
|
340 | 340 | # By default, prefer the "hg" command in the user's path. This was |
|
341 | 341 | # presumably the hg command that the user used to create this repository. |
|
342 | 342 | # |
|
343 | 343 | # This repository may require extensions or other settings that would not |
|
344 | 344 | # be enabled by running the hg script directly from this local repository. |
|
345 | 345 | hgenv = os.environ.copy() |
|
346 | 346 | # Use HGPLAIN to disable hgrc settings that would change output formatting, |
|
347 | 347 | # and disable localization for the same reasons. |
|
348 | 348 | hgenv['HGPLAIN'] = '1' |
|
349 | 349 | hgenv['LANGUAGE'] = 'C' |
|
350 | 350 | hgcmd = ['hg'] |
|
351 | 351 | # Run a simple "hg log" command just to see if using hg from the user's |
|
352 | 352 | # path works and can successfully interact with this repository. Windows |
|
353 | 353 | # gives precedence to hg.exe in the current directory, so fall back to the |
|
354 | 354 | # python invocation of local hg, where pythonXY.dll can always be found. |
|
355 | 355 | check_cmd = ['log', '-r.', '-Ttest'] |
|
356 | 356 | if os.name != 'nt' or not os.path.exists("hg.exe"): |
|
357 | 357 | try: |
|
358 | 358 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
359 | 359 | except EnvironmentError: |
|
360 | 360 | retcode = -1 |
|
361 | 361 | if retcode == 0 and not filterhgerr(err): |
|
362 | 362 | return hgcommand(hgcmd, hgenv) |
|
363 | 363 | |
|
364 | 364 | # Fall back to trying the local hg installation. |
|
365 | 365 | hgenv = localhgenv() |
|
366 | 366 | hgcmd = [sys.executable, 'hg'] |
|
367 | 367 | try: |
|
368 | 368 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
369 | 369 | except EnvironmentError: |
|
370 | 370 | retcode = -1 |
|
371 | 371 | if retcode == 0 and not filterhgerr(err): |
|
372 | 372 | return hgcommand(hgcmd, hgenv) |
|
373 | 373 | |
|
374 | 374 | raise SystemExit( |
|
375 | 375 | 'Unable to find a working hg binary to extract the ' |
|
376 | 376 | 'version from the repository tags' |
|
377 | 377 | ) |
|
378 | 378 | |
|
379 | 379 | |
|
380 | 380 | def localhgenv(): |
|
381 | 381 | """Get an environment dictionary to use for invoking or importing |
|
382 | 382 | mercurial from the local repository.""" |
|
383 | 383 | # Execute hg out of this directory with a custom environment which takes |
|
384 | 384 | # care to not use any hgrc files and do no localization. |
|
385 | 385 | env = { |
|
386 | 386 | 'HGMODULEPOLICY': 'py', |
|
387 | 387 | 'HGRCPATH': '', |
|
388 | 388 | 'LANGUAGE': 'C', |
|
389 | 389 | 'PATH': '', |
|
390 | 390 | } # make pypi modules that use os.environ['PATH'] happy |
|
391 | 391 | if 'LD_LIBRARY_PATH' in os.environ: |
|
392 | 392 | env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] |
|
393 | 393 | if 'SystemRoot' in os.environ: |
|
394 | 394 | # SystemRoot is required by Windows to load various DLLs. See: |
|
395 | 395 | # https://bugs.python.org/issue13524#msg148850 |
|
396 | 396 | env['SystemRoot'] = os.environ['SystemRoot'] |
|
397 | 397 | return env |
|
398 | 398 | |
|
399 | 399 | |
|
400 | 400 | version = '' |
|
401 | 401 | |
|
402 | 402 | if os.path.isdir('.hg'): |
|
403 | 403 | hg = findhg() |
|
404 | 404 | cmd = ['log', '-r', '.', '--template', '{tags}\n'] |
|
405 | 405 | numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] |
|
406 | 406 | hgid = sysstr(hg.run(['id', '-i'])).strip() |
|
407 | 407 | if not hgid: |
|
408 | 408 | # Bail out if hg is having problems interacting with this repository, |
|
409 | 409 | # rather than falling through and producing a bogus version number. |
|
410 | 410 | # Continuing with an invalid version number will break extensions |
|
411 | 411 | # that define minimumhgversion. |
|
412 | 412 | raise SystemExit('Unable to determine hg version from local repository') |
|
413 | 413 | if numerictags: # tag(s) found |
|
414 | 414 | version = numerictags[-1] |
|
415 | 415 | if hgid.endswith('+'): # propagate the dirty status to the tag |
|
416 | 416 | version += '+' |
|
417 | 417 | else: # no tag found |
|
418 | 418 | ltagcmd = ['parents', '--template', '{latesttag}'] |
|
419 | 419 | ltag = sysstr(hg.run(ltagcmd)) |
|
420 | 420 | changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] |
|
421 | 421 | changessince = len(hg.run(changessincecmd).splitlines()) |
|
422 | 422 | version = '%s+%s-%s' % (ltag, changessince, hgid) |
|
423 | 423 | if version.endswith('+'): |
|
424 | 424 | version += time.strftime('%Y%m%d') |
|
425 | 425 | elif os.path.exists('.hg_archival.txt'): |
|
426 | 426 | kw = dict( |
|
427 | 427 | [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')] |
|
428 | 428 | ) |
|
429 | 429 | if 'tag' in kw: |
|
430 | 430 | version = kw['tag'] |
|
431 | 431 | elif 'latesttag' in kw: |
|
432 | 432 | if 'changessincelatesttag' in kw: |
|
433 | 433 | version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw |
|
434 | 434 | else: |
|
435 | 435 | version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw |
|
436 | 436 | else: |
|
437 | 437 | version = kw.get('node', '')[:12] |
|
438 | 438 | |
|
439 | 439 | if version: |
|
440 | 440 | versionb = version |
|
441 | 441 | if not isinstance(versionb, bytes): |
|
442 | 442 | versionb = versionb.encode('ascii') |
|
443 | 443 | |
|
444 | 444 | write_if_changed( |
|
445 | 445 | 'mercurial/__version__.py', |
|
446 | 446 | b''.join( |
|
447 | 447 | [ |
|
448 | 448 | b'# this file is autogenerated by setup.py\n' |
|
449 | 449 | b'version = b"%s"\n' % versionb, |
|
450 | 450 | ] |
|
451 | 451 | ), |
|
452 | 452 | ) |
|
453 | 453 | |
|
454 | 454 | try: |
|
455 | 455 | oldpolicy = os.environ.get('HGMODULEPOLICY', None) |
|
456 | 456 | os.environ['HGMODULEPOLICY'] = 'py' |
|
457 | 457 | from mercurial import __version__ |
|
458 | 458 | |
|
459 | 459 | version = __version__.version |
|
460 | 460 | except ImportError: |
|
461 | 461 | version = b'unknown' |
|
462 | 462 | finally: |
|
463 | 463 | if oldpolicy is None: |
|
464 | 464 | del os.environ['HGMODULEPOLICY'] |
|
465 | 465 | else: |
|
466 | 466 | os.environ['HGMODULEPOLICY'] = oldpolicy |
|
467 | 467 | |
|
468 | 468 | |
|
469 | 469 | class hgbuild(build): |
|
470 | 470 | # Insert hgbuildmo first so that files in mercurial/locale/ are found |
|
471 | 471 | # when build_py is run next. |
|
472 | 472 | sub_commands = [('build_mo', None)] + build.sub_commands |
|
473 | 473 | |
|
474 | 474 | |
|
475 | 475 | class hgbuildmo(build): |
|
476 | 476 | |
|
477 | 477 | description = "build translations (.mo files)" |
|
478 | 478 | |
|
479 | 479 | def run(self): |
|
480 | 480 | if not find_executable('msgfmt'): |
|
481 | 481 | self.warn( |
|
482 | 482 | "could not find msgfmt executable, no translations " |
|
483 | 483 | "will be built" |
|
484 | 484 | ) |
|
485 | 485 | return |
|
486 | 486 | |
|
487 | 487 | podir = 'i18n' |
|
488 | 488 | if not os.path.isdir(podir): |
|
489 | 489 | self.warn("could not find %s/ directory" % podir) |
|
490 | 490 | return |
|
491 | 491 | |
|
492 | 492 | join = os.path.join |
|
493 | 493 | for po in os.listdir(podir): |
|
494 | 494 | if not po.endswith('.po'): |
|
495 | 495 | continue |
|
496 | 496 | pofile = join(podir, po) |
|
497 | 497 | modir = join('locale', po[:-3], 'LC_MESSAGES') |
|
498 | 498 | mofile = join(modir, 'hg.mo') |
|
499 | 499 | mobuildfile = join('mercurial', mofile) |
|
500 | 500 | cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile] |
|
501 | 501 | if sys.platform != 'sunos5': |
|
502 | 502 | # msgfmt on Solaris does not know about -c |
|
503 | 503 | cmd.append('-c') |
|
504 | 504 | self.mkpath(join('mercurial', modir)) |
|
505 | 505 | self.make_file([pofile], mobuildfile, spawn, (cmd,)) |
|
506 | 506 | |
|
507 | 507 | |
|
508 | 508 | class hgdist(Distribution): |
|
509 | 509 | pure = False |
|
510 | 510 | rust = False |
|
511 | 511 | no_rust = False |
|
512 | 512 | cffi = ispypy |
|
513 | 513 | |
|
514 | 514 | global_options = Distribution.global_options + [ |
|
515 | 515 | ('pure', None, "use pure (slow) Python code instead of C extensions"), |
|
516 | 516 | ('rust', None, "use Rust extensions additionally to C extensions"), |
|
517 | 517 | ( |
|
518 | 518 | 'no-rust', |
|
519 | 519 | None, |
|
520 | 520 | "do not use Rust extensions additionally to C extensions", |
|
521 | 521 | ), |
|
522 | 522 | ] |
|
523 | 523 | |
|
524 | 524 | negative_opt = Distribution.negative_opt.copy() |
|
525 | 525 | boolean_options = ['pure', 'rust', 'no-rust'] |
|
526 | 526 | negative_opt['no-rust'] = 'rust' |
|
527 | 527 | |
|
528 | 528 | def _set_command_options(self, command_obj, option_dict=None): |
|
529 | 529 | # Not all distutils versions in the wild have boolean_options. |
|
530 | 530 | # This should be cleaned up when we're Python 3 only. |
|
531 | 531 | command_obj.boolean_options = ( |
|
532 | 532 | getattr(command_obj, 'boolean_options', []) + self.boolean_options |
|
533 | 533 | ) |
|
534 | 534 | return Distribution._set_command_options( |
|
535 | 535 | self, command_obj, option_dict=option_dict |
|
536 | 536 | ) |
|
537 | 537 | |
|
538 | 538 | def parse_command_line(self): |
|
539 | 539 | ret = Distribution.parse_command_line(self) |
|
540 | 540 | if not (self.rust or self.no_rust): |
|
541 | 541 | hgrustext = os.environ.get('HGWITHRUSTEXT') |
|
542 | 542 | # TODO record it for proper rebuild upon changes |
|
543 | 543 | # (see mercurial/__modulepolicy__.py) |
|
544 | 544 | if hgrustext != 'cpython' and hgrustext is not None: |
|
545 | 545 | if hgrustext: |
|
546 | 546 | msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext |
|
547 | 547 | printf(msg, file=sys.stderr) |
|
548 | 548 | hgrustext = None |
|
549 | 549 | self.rust = hgrustext is not None |
|
550 | 550 | self.no_rust = not self.rust |
|
551 | 551 | return ret |
|
552 | 552 | |
|
553 | 553 | def has_ext_modules(self): |
|
554 | 554 | # self.ext_modules is emptied in hgbuildpy.finalize_options which is |
|
555 | 555 | # too late for some cases |
|
556 | 556 | return not self.pure and Distribution.has_ext_modules(self) |
|
557 | 557 | |
|
558 | 558 | |
|
559 | 559 | # This is ugly as a one-liner. So use a variable. |
|
560 | 560 | buildextnegops = dict(getattr(build_ext, 'negative_options', {})) |
|
561 | 561 | buildextnegops['no-zstd'] = 'zstd' |
|
562 | 562 | buildextnegops['no-rust'] = 'rust' |
|
563 | 563 | |
|
564 | 564 | |
|
565 | 565 | class hgbuildext(build_ext): |
|
566 | 566 | user_options = build_ext.user_options + [ |
|
567 | 567 | ('zstd', None, 'compile zstd bindings [default]'), |
|
568 | 568 | ('no-zstd', None, 'do not compile zstd bindings'), |
|
569 | 569 | ( |
|
570 | 570 | 'rust', |
|
571 | 571 | None, |
|
572 | 572 | 'compile Rust extensions if they are in use ' |
|
573 | 573 | '(requires Cargo) [default]', |
|
574 | 574 | ), |
|
575 | 575 | ('no-rust', None, 'do not compile Rust extensions'), |
|
576 | 576 | ] |
|
577 | 577 | |
|
578 | 578 | boolean_options = build_ext.boolean_options + ['zstd', 'rust'] |
|
579 | 579 | negative_opt = buildextnegops |
|
580 | 580 | |
|
581 | 581 | def initialize_options(self): |
|
582 | 582 | self.zstd = True |
|
583 | 583 | self.rust = True |
|
584 | 584 | |
|
585 | 585 | return build_ext.initialize_options(self) |
|
586 | 586 | |
|
587 | 587 | def finalize_options(self): |
|
588 | 588 | # Unless overridden by the end user, build extensions in parallel. |
|
589 | 589 | # Only influences behavior on Python 3.5+. |
|
590 | 590 | if getattr(self, 'parallel', None) is None: |
|
591 | 591 | self.parallel = True |
|
592 | 592 | |
|
593 | 593 | return build_ext.finalize_options(self) |
|
594 | 594 | |
|
595 | 595 | def build_extensions(self): |
|
596 | 596 | ruststandalones = [ |
|
597 | 597 | e for e in self.extensions if isinstance(e, RustStandaloneExtension) |
|
598 | 598 | ] |
|
599 | 599 | self.extensions = [ |
|
600 | 600 | e for e in self.extensions if e not in ruststandalones |
|
601 | 601 | ] |
|
602 | 602 | # Filter out zstd if disabled via argument. |
|
603 | 603 | if not self.zstd: |
|
604 | 604 | self.extensions = [ |
|
605 | 605 | e for e in self.extensions if e.name != 'mercurial.zstd' |
|
606 | 606 | ] |
|
607 | 607 | |
|
608 | 608 | # Build Rust standalon extensions if it'll be used |
|
609 | 609 | # and its build is not explictely disabled (for external build |
|
610 | 610 | # as Linux distributions would do) |
|
611 | 611 | if self.distribution.rust and self.rust: |
|
612 | 612 | for rustext in ruststandalones: |
|
613 | 613 | rustext.build('' if self.inplace else self.build_lib) |
|
614 | 614 | |
|
615 | 615 | return build_ext.build_extensions(self) |
|
616 | 616 | |
|
617 | 617 | def build_extension(self, ext): |
|
618 | 618 | if ( |
|
619 | 619 | self.distribution.rust |
|
620 | 620 | and self.rust |
|
621 | 621 | and isinstance(ext, RustExtension) |
|
622 | 622 | ): |
|
623 | 623 | ext.rustbuild() |
|
624 | 624 | try: |
|
625 | 625 | build_ext.build_extension(self, ext) |
|
626 | 626 | except CCompilerError: |
|
627 | 627 | if not getattr(ext, 'optional', False): |
|
628 | 628 | raise |
|
629 | 629 | log.warn( |
|
630 | 630 | "Failed to build optional extension '%s' (skipping)", ext.name |
|
631 | 631 | ) |
|
632 | 632 | |
|
633 | 633 | |
|
634 | 634 | class hgbuildscripts(build_scripts): |
|
635 | 635 | def run(self): |
|
636 | 636 | if os.name != 'nt' or self.distribution.pure: |
|
637 | 637 | return build_scripts.run(self) |
|
638 | 638 | |
|
639 | 639 | exebuilt = False |
|
640 | 640 | try: |
|
641 | 641 | self.run_command('build_hgexe') |
|
642 | 642 | exebuilt = True |
|
643 | 643 | except (DistutilsError, CCompilerError): |
|
644 | 644 | log.warn('failed to build optional hg.exe') |
|
645 | 645 | |
|
646 | 646 | if exebuilt: |
|
647 | 647 | # Copying hg.exe to the scripts build directory ensures it is |
|
648 | 648 | # installed by the install_scripts command. |
|
649 | 649 | hgexecommand = self.get_finalized_command('build_hgexe') |
|
650 | 650 | dest = os.path.join(self.build_dir, 'hg.exe') |
|
651 | 651 | self.mkpath(self.build_dir) |
|
652 | 652 | self.copy_file(hgexecommand.hgexepath, dest) |
|
653 | 653 | |
|
654 | 654 | # Remove hg.bat because it is redundant with hg.exe. |
|
655 | 655 | self.scripts.remove('contrib/win32/hg.bat') |
|
656 | 656 | |
|
657 | 657 | return build_scripts.run(self) |
|
658 | 658 | |
|
659 | 659 | |
|
660 | 660 | class hgbuildpy(build_py): |
|
661 | 661 | def finalize_options(self): |
|
662 | 662 | build_py.finalize_options(self) |
|
663 | 663 | |
|
664 | 664 | if self.distribution.pure: |
|
665 | 665 | self.distribution.ext_modules = [] |
|
666 | 666 | elif self.distribution.cffi: |
|
667 | 667 | from mercurial.cffi import ( |
|
668 | 668 | bdiffbuild, |
|
669 | 669 | mpatchbuild, |
|
670 | 670 | ) |
|
671 | 671 | |
|
672 | 672 | exts = [ |
|
673 | 673 | mpatchbuild.ffi.distutils_extension(), |
|
674 | 674 | bdiffbuild.ffi.distutils_extension(), |
|
675 | 675 | ] |
|
676 | 676 | # cffi modules go here |
|
677 | 677 | if sys.platform == 'darwin': |
|
678 | 678 | from mercurial.cffi import osutilbuild |
|
679 | 679 | |
|
680 | 680 | exts.append(osutilbuild.ffi.distutils_extension()) |
|
681 | 681 | self.distribution.ext_modules = exts |
|
682 | 682 | else: |
|
683 | 683 | h = os.path.join(get_python_inc(), 'Python.h') |
|
684 | 684 | if not os.path.exists(h): |
|
685 | 685 | raise SystemExit( |
|
686 | 686 | 'Python headers are required to build ' |
|
687 | 687 | 'Mercurial but weren\'t found in %s' % h |
|
688 | 688 | ) |
|
689 | 689 | |
|
690 | 690 | def run(self): |
|
691 | 691 | basepath = os.path.join(self.build_lib, 'mercurial') |
|
692 | 692 | self.mkpath(basepath) |
|
693 | 693 | |
|
694 | 694 | rust = self.distribution.rust |
|
695 | 695 | if self.distribution.pure: |
|
696 | 696 | modulepolicy = 'py' |
|
697 | 697 | elif self.build_lib == '.': |
|
698 | 698 | # in-place build should run without rebuilding and Rust extensions |
|
699 | 699 | modulepolicy = 'rust+c-allow' if rust else 'allow' |
|
700 | 700 | else: |
|
701 | 701 | modulepolicy = 'rust+c' if rust else 'c' |
|
702 | 702 | |
|
703 | 703 | content = b''.join( |
|
704 | 704 | [ |
|
705 | 705 | b'# this file is autogenerated by setup.py\n', |
|
706 | 706 | b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), |
|
707 | 707 | ] |
|
708 | 708 | ) |
|
709 | 709 | write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content) |
|
710 | 710 | |
|
711 | 711 | build_py.run(self) |
|
712 | 712 | |
|
713 | 713 | |
|
714 | 714 | class buildhgextindex(Command): |
|
715 | 715 | description = 'generate prebuilt index of hgext (for frozen package)' |
|
716 | 716 | user_options = [] |
|
717 | 717 | _indexfilename = 'hgext/__index__.py' |
|
718 | 718 | |
|
719 | 719 | def initialize_options(self): |
|
720 | 720 | pass |
|
721 | 721 | |
|
722 | 722 | def finalize_options(self): |
|
723 | 723 | pass |
|
724 | 724 | |
|
725 | 725 | def run(self): |
|
726 | 726 | if os.path.exists(self._indexfilename): |
|
727 | 727 | with open(self._indexfilename, 'w') as f: |
|
728 | 728 | f.write('# empty\n') |
|
729 | 729 | |
|
730 | 730 | # here no extension enabled, disabled() lists up everything |
|
731 | 731 | code = ( |
|
732 | 732 | 'import pprint; from mercurial import extensions; ' |
|
733 | 733 | 'ext = extensions.disabled();' |
|
734 | 734 | 'ext.pop("__index__", None);' |
|
735 | 735 | 'pprint.pprint(ext)' |
|
736 | 736 | ) |
|
737 | 737 | returncode, out, err = runcmd( |
|
738 | 738 | [sys.executable, '-c', code], localhgenv() |
|
739 | 739 | ) |
|
740 | 740 | if err or returncode != 0: |
|
741 | 741 | raise DistutilsExecError(err) |
|
742 | 742 | |
|
743 | 743 | with open(self._indexfilename, 'wb') as f: |
|
744 | 744 | f.write(b'# this file is autogenerated by setup.py\n') |
|
745 | 745 | f.write(b'docs = ') |
|
746 | 746 | f.write(out) |
|
747 | 747 | |
|
748 | 748 | |
|
749 | 749 | class buildhgexe(build_ext): |
|
750 | 750 | description = 'compile hg.exe from mercurial/exewrapper.c' |
|
751 | 751 | user_options = build_ext.user_options + [ |
|
752 | 752 | ( |
|
753 | 753 | 'long-paths-support', |
|
754 | 754 | None, |
|
755 | 755 | 'enable support for long paths on ' |
|
756 | 756 | 'Windows (off by default and ' |
|
757 | 757 | 'experimental)', |
|
758 | 758 | ), |
|
759 | 759 | ] |
|
760 | 760 | |
|
761 | 761 | LONG_PATHS_MANIFEST = """ |
|
762 | 762 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> |
|
763 | 763 | <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> |
|
764 | 764 | <application> |
|
765 | 765 | <windowsSettings |
|
766 | 766 | xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> |
|
767 | 767 | <ws2:longPathAware>true</ws2:longPathAware> |
|
768 | 768 | </windowsSettings> |
|
769 | 769 | </application> |
|
770 | 770 | </assembly>""" |
|
771 | 771 | |
|
772 | 772 | def initialize_options(self): |
|
773 | 773 | build_ext.initialize_options(self) |
|
774 | 774 | self.long_paths_support = False |
|
775 | 775 | |
|
776 | 776 | def build_extensions(self): |
|
777 | 777 | if os.name != 'nt': |
|
778 | 778 | return |
|
779 | 779 | if isinstance(self.compiler, HackedMingw32CCompiler): |
|
780 | 780 | self.compiler.compiler_so = self.compiler.compiler # no -mdll |
|
781 | 781 | self.compiler.dll_libraries = [] # no -lmsrvc90 |
|
782 | 782 | |
|
783 | 783 | pythonlib = None |
|
784 | 784 | |
|
785 | 785 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
786 | 786 | self.hgtarget = os.path.join(dir, 'hg') |
|
787 | 787 | |
|
788 | 788 | if getattr(sys, 'dllhandle', None): |
|
789 | 789 | # Different Python installs can have different Python library |
|
790 | 790 | # names. e.g. the official CPython distribution uses pythonXY.dll |
|
791 | 791 | # and MinGW uses libpythonX.Y.dll. |
|
792 | 792 | _kernel32 = ctypes.windll.kernel32 |
|
793 | 793 | _kernel32.GetModuleFileNameA.argtypes = [ |
|
794 | 794 | ctypes.c_void_p, |
|
795 | 795 | ctypes.c_void_p, |
|
796 | 796 | ctypes.c_ulong, |
|
797 | 797 | ] |
|
798 | 798 | _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong |
|
799 | 799 | size = 1000 |
|
800 | 800 | buf = ctypes.create_string_buffer(size + 1) |
|
801 | 801 | filelen = _kernel32.GetModuleFileNameA( |
|
802 | 802 | sys.dllhandle, ctypes.byref(buf), size |
|
803 | 803 | ) |
|
804 | 804 | |
|
805 | 805 | if filelen > 0 and filelen != size: |
|
806 | 806 | dllbasename = os.path.basename(buf.value) |
|
807 | 807 | if not dllbasename.lower().endswith(b'.dll'): |
|
808 | 808 | raise SystemExit( |
|
809 | 809 | 'Python DLL does not end with .dll: %s' % dllbasename |
|
810 | 810 | ) |
|
811 | 811 | pythonlib = dllbasename[:-4] |
|
812 | 812 | |
|
813 | 813 | # Copy the pythonXY.dll next to the binary so that it runs |
|
814 | 814 | # without tampering with PATH. |
|
815 | 815 | fsdecode = lambda x: x |
|
816 | 816 | if sys.version_info[0] >= 3: |
|
817 | 817 | fsdecode = os.fsdecode |
|
818 | 818 | dest = os.path.join( |
|
819 | 819 | os.path.dirname(self.hgtarget), |
|
820 | 820 | fsdecode(dllbasename), |
|
821 | 821 | ) |
|
822 | 822 | |
|
823 | 823 | if not os.path.exists(dest): |
|
824 | 824 | shutil.copy(buf.value, dest) |
|
825 | 825 | |
|
826 | 826 | if not pythonlib: |
|
827 | 827 | log.warn( |
|
828 | 828 | 'could not determine Python DLL filename; assuming pythonXY' |
|
829 | 829 | ) |
|
830 | 830 | |
|
831 | 831 | hv = sys.hexversion |
|
832 | 832 | pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF) |
|
833 | 833 | |
|
834 | 834 | log.info('using %s as Python library name' % pythonlib) |
|
835 | 835 | with open('mercurial/hgpythonlib.h', 'wb') as f: |
|
836 | 836 | f.write(b'/* this file is autogenerated by setup.py */\n') |
|
837 | 837 | f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib) |
|
838 | 838 | |
|
839 | 839 | macros = None |
|
840 | 840 | if sys.version_info[0] >= 3: |
|
841 | 841 | macros = [('_UNICODE', None), ('UNICODE', None)] |
|
842 | 842 | |
|
843 | 843 | objects = self.compiler.compile( |
|
844 | 844 | ['mercurial/exewrapper.c'], |
|
845 | 845 | output_dir=self.build_temp, |
|
846 | 846 | macros=macros, |
|
847 | 847 | ) |
|
848 | 848 | self.compiler.link_executable( |
|
849 | 849 | objects, self.hgtarget, libraries=[], output_dir=self.build_temp |
|
850 | 850 | ) |
|
851 | 851 | if self.long_paths_support: |
|
852 | 852 | self.addlongpathsmanifest() |
|
853 | 853 | |
|
854 | 854 | def addlongpathsmanifest(self): |
|
855 | 855 | r"""Add manifest pieces so that hg.exe understands long paths |
|
856 | 856 | |
|
857 | 857 | This is an EXPERIMENTAL feature, use with care. |
|
858 | 858 | To enable long paths support, one needs to do two things: |
|
859 | 859 | - build Mercurial with --long-paths-support option |
|
860 | 860 | - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\ |
|
861 | 861 | LongPathsEnabled to have value 1. |
|
862 | 862 | |
|
863 | 863 | Please ignore 'warning 81010002: Unrecognized Element "longPathAware"'; |
|
864 | 864 | it happens because Mercurial uses mt.exe circa 2008, which is not |
|
865 | 865 | yet aware of long paths support in the manifest (I think so at least). |
|
866 | 866 | This does not stop mt.exe from embedding/merging the XML properly. |
|
867 | 867 | |
|
868 | 868 | Why resource #1 should be used for .exe manifests? I don't know and |
|
869 | 869 | wasn't able to find an explanation for mortals. But it seems to work. |
|
870 | 870 | """ |
|
871 | 871 | exefname = self.compiler.executable_filename(self.hgtarget) |
|
872 | 872 | fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest') |
|
873 | 873 | os.close(fdauto) |
|
874 | 874 | with open(manfname, 'w') as f: |
|
875 | 875 | f.write(self.LONG_PATHS_MANIFEST) |
|
876 | 876 | log.info("long paths manifest is written to '%s'" % manfname) |
|
877 | 877 | inputresource = '-inputresource:%s;#1' % exefname |
|
878 | 878 | outputresource = '-outputresource:%s;#1' % exefname |
|
879 | 879 | log.info("running mt.exe to update hg.exe's manifest in-place") |
|
880 | 880 | # supplying both -manifest and -inputresource to mt.exe makes |
|
881 | 881 | # it merge the embedded and supplied manifests in the -outputresource |
|
882 | 882 | self.spawn( |
|
883 | 883 | [ |
|
884 | 884 | 'mt.exe', |
|
885 | 885 | '-nologo', |
|
886 | 886 | '-manifest', |
|
887 | 887 | manfname, |
|
888 | 888 | inputresource, |
|
889 | 889 | outputresource, |
|
890 | 890 | ] |
|
891 | 891 | ) |
|
892 | 892 | log.info("done updating hg.exe's manifest") |
|
893 | 893 | os.remove(manfname) |
|
894 | 894 | |
|
895 | 895 | @property |
|
896 | 896 | def hgexepath(self): |
|
897 | 897 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
898 | 898 | return os.path.join(self.build_temp, dir, 'hg.exe') |
|
899 | 899 | |
|
900 | 900 | |
|
901 | 901 | class hgbuilddoc(Command): |
|
902 | 902 | description = 'build documentation' |
|
903 | 903 | user_options = [ |
|
904 | 904 | ('man', None, 'generate man pages'), |
|
905 | 905 | ('html', None, 'generate html pages'), |
|
906 | 906 | ] |
|
907 | 907 | |
|
908 | 908 | def initialize_options(self): |
|
909 | 909 | self.man = None |
|
910 | 910 | self.html = None |
|
911 | 911 | |
|
912 | 912 | def finalize_options(self): |
|
913 | 913 | # If --man or --html are set, only generate what we're told to. |
|
914 | 914 | # Otherwise generate everything. |
|
915 | 915 | have_subset = self.man is not None or self.html is not None |
|
916 | 916 | |
|
917 | 917 | if have_subset: |
|
918 | 918 | self.man = True if self.man else False |
|
919 | 919 | self.html = True if self.html else False |
|
920 | 920 | else: |
|
921 | 921 | self.man = True |
|
922 | 922 | self.html = True |
|
923 | 923 | |
|
924 | 924 | def run(self): |
|
925 | 925 | def normalizecrlf(p): |
|
926 | 926 | with open(p, 'rb') as fh: |
|
927 | 927 | orig = fh.read() |
|
928 | 928 | |
|
929 | 929 | if b'\r\n' not in orig: |
|
930 | 930 | return |
|
931 | 931 | |
|
932 | 932 | log.info('normalizing %s to LF line endings' % p) |
|
933 | 933 | with open(p, 'wb') as fh: |
|
934 | 934 | fh.write(orig.replace(b'\r\n', b'\n')) |
|
935 | 935 | |
|
936 | 936 | def gentxt(root): |
|
937 | 937 | txt = 'doc/%s.txt' % root |
|
938 | 938 | log.info('generating %s' % txt) |
|
939 | 939 | res, out, err = runcmd( |
|
940 | 940 | [sys.executable, 'gendoc.py', root], os.environ, cwd='doc' |
|
941 | 941 | ) |
|
942 | 942 | if res: |
|
943 | 943 | raise SystemExit( |
|
944 | 944 | 'error running gendoc.py: %s' |
|
945 | 945 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
946 | 946 | ) |
|
947 | 947 | |
|
948 | 948 | with open(txt, 'wb') as fh: |
|
949 | 949 | fh.write(out) |
|
950 | 950 | |
|
951 | 951 | def gengendoc(root): |
|
952 | 952 | gendoc = 'doc/%s.gendoc.txt' % root |
|
953 | 953 | |
|
954 | 954 | log.info('generating %s' % gendoc) |
|
955 | 955 | res, out, err = runcmd( |
|
956 | 956 | [sys.executable, 'gendoc.py', '%s.gendoc' % root], |
|
957 | 957 | os.environ, |
|
958 | 958 | cwd='doc', |
|
959 | 959 | ) |
|
960 | 960 | if res: |
|
961 | 961 | raise SystemExit( |
|
962 | 962 | 'error running gendoc: %s' |
|
963 | 963 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
964 | 964 | ) |
|
965 | 965 | |
|
966 | 966 | with open(gendoc, 'wb') as fh: |
|
967 | 967 | fh.write(out) |
|
968 | 968 | |
|
969 | 969 | def genman(root): |
|
970 | 970 | log.info('generating doc/%s' % root) |
|
971 | 971 | res, out, err = runcmd( |
|
972 | 972 | [ |
|
973 | 973 | sys.executable, |
|
974 | 974 | 'runrst', |
|
975 | 975 | 'hgmanpage', |
|
976 | 976 | '--halt', |
|
977 | 977 | 'warning', |
|
978 | 978 | '--strip-elements-with-class', |
|
979 | 979 | 'htmlonly', |
|
980 | 980 | '%s.txt' % root, |
|
981 | 981 | root, |
|
982 | 982 | ], |
|
983 | 983 | os.environ, |
|
984 | 984 | cwd='doc', |
|
985 | 985 | ) |
|
986 | 986 | if res: |
|
987 | 987 | raise SystemExit( |
|
988 | 988 | 'error running runrst: %s' |
|
989 | 989 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
990 | 990 | ) |
|
991 | 991 | |
|
992 | 992 | normalizecrlf('doc/%s' % root) |
|
993 | 993 | |
|
994 | 994 | def genhtml(root): |
|
995 | 995 | log.info('generating doc/%s.html' % root) |
|
996 | 996 | res, out, err = runcmd( |
|
997 | 997 | [ |
|
998 | 998 | sys.executable, |
|
999 | 999 | 'runrst', |
|
1000 | 1000 | 'html', |
|
1001 | 1001 | '--halt', |
|
1002 | 1002 | 'warning', |
|
1003 | 1003 | '--link-stylesheet', |
|
1004 | 1004 | '--stylesheet-path', |
|
1005 | 1005 | 'style.css', |
|
1006 | 1006 | '%s.txt' % root, |
|
1007 | 1007 | '%s.html' % root, |
|
1008 | 1008 | ], |
|
1009 | 1009 | os.environ, |
|
1010 | 1010 | cwd='doc', |
|
1011 | 1011 | ) |
|
1012 | 1012 | if res: |
|
1013 | 1013 | raise SystemExit( |
|
1014 | 1014 | 'error running runrst: %s' |
|
1015 | 1015 | % '\n'.join([sysstr(out), sysstr(err)]) |
|
1016 | 1016 | ) |
|
1017 | 1017 | |
|
1018 | 1018 | normalizecrlf('doc/%s.html' % root) |
|
1019 | 1019 | |
|
1020 | 1020 | # This logic is duplicated in doc/Makefile. |
|
1021 | 1021 | sources = { |
|
1022 | 1022 | f |
|
1023 | 1023 | for f in os.listdir('mercurial/helptext') |
|
1024 | 1024 | if re.search(r'[0-9]\.txt$', f) |
|
1025 | 1025 | } |
|
1026 | 1026 | |
|
1027 | 1027 | # common.txt is a one-off. |
|
1028 | 1028 | gentxt('common') |
|
1029 | 1029 | |
|
1030 | 1030 | for source in sorted(sources): |
|
1031 | 1031 | assert source[-4:] == '.txt' |
|
1032 | 1032 | root = source[:-4] |
|
1033 | 1033 | |
|
1034 | 1034 | gentxt(root) |
|
1035 | 1035 | gengendoc(root) |
|
1036 | 1036 | |
|
1037 | 1037 | if self.man: |
|
1038 | 1038 | genman(root) |
|
1039 | 1039 | if self.html: |
|
1040 | 1040 | genhtml(root) |
|
1041 | 1041 | |
|
1042 | 1042 | |
|
1043 | 1043 | class hginstall(install): |
|
1044 | 1044 | |
|
1045 | 1045 | user_options = install.user_options + [ |
|
1046 | 1046 | ( |
|
1047 | 1047 | 'old-and-unmanageable', |
|
1048 | 1048 | None, |
|
1049 | 1049 | 'noop, present for eggless setuptools compat', |
|
1050 | 1050 | ), |
|
1051 | 1051 | ( |
|
1052 | 1052 | 'single-version-externally-managed', |
|
1053 | 1053 | None, |
|
1054 | 1054 | 'noop, present for eggless setuptools compat', |
|
1055 | 1055 | ), |
|
1056 | 1056 | ] |
|
1057 | 1057 | |
|
1058 | 1058 | # Also helps setuptools not be sad while we refuse to create eggs. |
|
1059 | 1059 | single_version_externally_managed = True |
|
1060 | 1060 | |
|
1061 | 1061 | def get_sub_commands(self): |
|
1062 | 1062 | # Screen out egg related commands to prevent egg generation. But allow |
|
1063 | 1063 | # mercurial.egg-info generation, since that is part of modern |
|
1064 | 1064 | # packaging. |
|
1065 | 1065 | excl = {'bdist_egg'} |
|
1066 | 1066 | return filter(lambda x: x not in excl, install.get_sub_commands(self)) |
|
1067 | 1067 | |
|
1068 | 1068 | |
|
1069 | 1069 | class hginstalllib(install_lib): |
|
1070 | 1070 | """ |
|
1071 | 1071 | This is a specialization of install_lib that replaces the copy_file used |
|
1072 | 1072 | there so that it supports setting the mode of files after copying them, |
|
1073 | 1073 | instead of just preserving the mode that the files originally had. If your |
|
1074 | 1074 | system has a umask of something like 027, preserving the permissions when |
|
1075 | 1075 | copying will lead to a broken install. |
|
1076 | 1076 | |
|
1077 | 1077 | Note that just passing keep_permissions=False to copy_file would be |
|
1078 | 1078 | insufficient, as it might still be applying a umask. |
|
1079 | 1079 | """ |
|
1080 | 1080 | |
|
1081 | 1081 | def run(self): |
|
1082 | 1082 | realcopyfile = file_util.copy_file |
|
1083 | 1083 | |
|
1084 | 1084 | def copyfileandsetmode(*args, **kwargs): |
|
1085 | 1085 | src, dst = args[0], args[1] |
|
1086 | 1086 | dst, copied = realcopyfile(*args, **kwargs) |
|
1087 | 1087 | if copied: |
|
1088 | 1088 | st = os.stat(src) |
|
1089 | 1089 | # Persist executable bit (apply it to group and other if user |
|
1090 | 1090 | # has it) |
|
1091 | 1091 | if st[stat.ST_MODE] & stat.S_IXUSR: |
|
1092 | 1092 | setmode = int('0755', 8) |
|
1093 | 1093 | else: |
|
1094 | 1094 | setmode = int('0644', 8) |
|
1095 | 1095 | m = stat.S_IMODE(st[stat.ST_MODE]) |
|
1096 | 1096 | m = (m & ~int('0777', 8)) | setmode |
|
1097 | 1097 | os.chmod(dst, m) |
|
1098 | 1098 | |
|
1099 | 1099 | file_util.copy_file = copyfileandsetmode |
|
1100 | 1100 | try: |
|
1101 | 1101 | install_lib.run(self) |
|
1102 | 1102 | finally: |
|
1103 | 1103 | file_util.copy_file = realcopyfile |
|
1104 | 1104 | |
|
1105 | 1105 | |
|
1106 | 1106 | class hginstallscripts(install_scripts): |
|
1107 | 1107 | """ |
|
1108 | 1108 | This is a specialization of install_scripts that replaces the @LIBDIR@ with |
|
1109 | 1109 | the configured directory for modules. If possible, the path is made relative |
|
1110 | 1110 | to the directory for scripts. |
|
1111 | 1111 | """ |
|
1112 | 1112 | |
|
1113 | 1113 | def initialize_options(self): |
|
1114 | 1114 | install_scripts.initialize_options(self) |
|
1115 | 1115 | |
|
1116 | 1116 | self.install_lib = None |
|
1117 | 1117 | |
|
1118 | 1118 | def finalize_options(self): |
|
1119 | 1119 | install_scripts.finalize_options(self) |
|
1120 | 1120 | self.set_undefined_options('install', ('install_lib', 'install_lib')) |
|
1121 | 1121 | |
|
1122 | 1122 | def run(self): |
|
1123 | 1123 | install_scripts.run(self) |
|
1124 | 1124 | |
|
1125 | 1125 | # It only makes sense to replace @LIBDIR@ with the install path if |
|
1126 | 1126 | # the install path is known. For wheels, the logic below calculates |
|
1127 | 1127 | # the libdir to be "../..". This is because the internal layout of a |
|
1128 | 1128 | # wheel archive looks like: |
|
1129 | 1129 | # |
|
1130 | 1130 | # mercurial-3.6.1.data/scripts/hg |
|
1131 | 1131 | # mercurial/__init__.py |
|
1132 | 1132 | # |
|
1133 | 1133 | # When installing wheels, the subdirectories of the "<pkg>.data" |
|
1134 | 1134 | # directory are translated to system local paths and files therein |
|
1135 | 1135 | # are copied in place. The mercurial/* files are installed into the |
|
1136 | 1136 | # site-packages directory. However, the site-packages directory |
|
1137 | 1137 | # isn't known until wheel install time. This means we have no clue |
|
1138 | 1138 | # at wheel generation time what the installed site-packages directory |
|
1139 | 1139 | # will be. And, wheels don't appear to provide the ability to register |
|
1140 | 1140 | # custom code to run during wheel installation. This all means that |
|
1141 | 1141 | # we can't reliably set the libdir in wheels: the default behavior |
|
1142 | 1142 | # of looking in sys.path must do. |
|
1143 | 1143 | |
|
1144 | 1144 | if ( |
|
1145 | 1145 | os.path.splitdrive(self.install_dir)[0] |
|
1146 | 1146 | != os.path.splitdrive(self.install_lib)[0] |
|
1147 | 1147 | ): |
|
1148 | 1148 | # can't make relative paths from one drive to another, so use an |
|
1149 | 1149 | # absolute path instead |
|
1150 | 1150 | libdir = self.install_lib |
|
1151 | 1151 | else: |
|
1152 | 1152 | libdir = os.path.relpath(self.install_lib, self.install_dir) |
|
1153 | 1153 | |
|
1154 | 1154 | for outfile in self.outfiles: |
|
1155 | 1155 | with open(outfile, 'rb') as fp: |
|
1156 | 1156 | data = fp.read() |
|
1157 | 1157 | |
|
1158 | 1158 | # skip binary files |
|
1159 | 1159 | if b'\0' in data: |
|
1160 | 1160 | continue |
|
1161 | 1161 | |
|
1162 | 1162 | # During local installs, the shebang will be rewritten to the final |
|
1163 | 1163 | # install path. During wheel packaging, the shebang has a special |
|
1164 | 1164 | # value. |
|
1165 | 1165 | if data.startswith(b'#!python'): |
|
1166 | 1166 | log.info( |
|
1167 | 1167 | 'not rewriting @LIBDIR@ in %s because install path ' |
|
1168 | 1168 | 'not known' % outfile |
|
1169 | 1169 | ) |
|
1170 | 1170 | continue |
|
1171 | 1171 | |
|
1172 | 1172 | data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) |
|
1173 | 1173 | with open(outfile, 'wb') as fp: |
|
1174 | 1174 | fp.write(data) |
|
1175 | 1175 | |
|
1176 | 1176 | |
|
1177 | 1177 | # virtualenv installs custom distutils/__init__.py and |
|
1178 | 1178 | # distutils/distutils.cfg files which essentially proxy back to the |
|
1179 | 1179 | # "real" distutils in the main Python install. The presence of this |
|
1180 | 1180 | # directory causes py2exe to pick up the "hacked" distutils package |
|
1181 | 1181 | # from the virtualenv and "import distutils" will fail from the py2exe |
|
1182 | 1182 | # build because the "real" distutils files can't be located. |
|
1183 | 1183 | # |
|
1184 | 1184 | # We work around this by monkeypatching the py2exe code finding Python |
|
1185 | 1185 | # modules to replace the found virtualenv distutils modules with the |
|
1186 | 1186 | # original versions via filesystem scanning. This is a bit hacky. But |
|
1187 | 1187 | # it allows us to use virtualenvs for py2exe packaging, which is more |
|
1188 | 1188 | # deterministic and reproducible. |
|
1189 | 1189 | # |
|
1190 | 1190 | # It's worth noting that the common StackOverflow suggestions for this |
|
1191 | 1191 | # problem involve copying the original distutils files into the |
|
1192 | 1192 | # virtualenv or into the staging directory after setup() is invoked. |
|
1193 | 1193 | # The former is very brittle and can easily break setup(). Our hacking |
|
1194 | 1194 | # of the found modules routine has a similar result as copying the files |
|
1195 | 1195 | # manually. But it makes fewer assumptions about how py2exe works and |
|
1196 | 1196 | # is less brittle. |
|
1197 | 1197 | |
|
1198 | 1198 | # This only catches virtualenvs made with virtualenv (as opposed to |
|
1199 | 1199 | # venv, which is likely what Python 3 uses). |
|
1200 | 1200 | py2exehacked = py2exeloaded and getattr(sys, 'real_prefix', None) is not None |
|
1201 | 1201 | |
|
1202 | 1202 | if py2exehacked: |
|
1203 | 1203 | from distutils.command.py2exe import py2exe as buildpy2exe |
|
1204 | 1204 | from py2exe.mf import Module as py2exemodule |
|
1205 | 1205 | |
|
1206 | 1206 | class hgbuildpy2exe(buildpy2exe): |
|
1207 | 1207 | def find_needed_modules(self, mf, files, modules): |
|
1208 | 1208 | res = buildpy2exe.find_needed_modules(self, mf, files, modules) |
|
1209 | 1209 | |
|
1210 | 1210 | # Replace virtualenv's distutils modules with the real ones. |
|
1211 | 1211 | modules = {} |
|
1212 | 1212 | for k, v in res.modules.items(): |
|
1213 | 1213 | if k != 'distutils' and not k.startswith('distutils.'): |
|
1214 | 1214 | modules[k] = v |
|
1215 | 1215 | |
|
1216 | 1216 | res.modules = modules |
|
1217 | 1217 | |
|
1218 | 1218 | import opcode |
|
1219 | 1219 | |
|
1220 | 1220 | distutilsreal = os.path.join( |
|
1221 | 1221 | os.path.dirname(opcode.__file__), 'distutils' |
|
1222 | 1222 | ) |
|
1223 | 1223 | |
|
1224 | 1224 | for root, dirs, files in os.walk(distutilsreal): |
|
1225 | 1225 | for f in sorted(files): |
|
1226 | 1226 | if not f.endswith('.py'): |
|
1227 | 1227 | continue |
|
1228 | 1228 | |
|
1229 | 1229 | full = os.path.join(root, f) |
|
1230 | 1230 | |
|
1231 | 1231 | parents = ['distutils'] |
|
1232 | 1232 | |
|
1233 | 1233 | if root != distutilsreal: |
|
1234 | 1234 | rel = os.path.relpath(root, distutilsreal) |
|
1235 | 1235 | parents.extend(p for p in rel.split(os.sep)) |
|
1236 | 1236 | |
|
1237 | 1237 | modname = '%s.%s' % ('.'.join(parents), f[:-3]) |
|
1238 | 1238 | |
|
1239 | 1239 | if modname.startswith('distutils.tests.'): |
|
1240 | 1240 | continue |
|
1241 | 1241 | |
|
1242 | 1242 | if modname.endswith('.__init__'): |
|
1243 | 1243 | modname = modname[: -len('.__init__')] |
|
1244 | 1244 | path = os.path.dirname(full) |
|
1245 | 1245 | else: |
|
1246 | 1246 | path = None |
|
1247 | 1247 | |
|
1248 | 1248 | res.modules[modname] = py2exemodule( |
|
1249 | 1249 | modname, full, path=path |
|
1250 | 1250 | ) |
|
1251 | 1251 | |
|
1252 | 1252 | if 'distutils' not in res.modules: |
|
1253 | 1253 | raise SystemExit('could not find distutils modules') |
|
1254 | 1254 | |
|
1255 | 1255 | return res |
|
1256 | 1256 | |
|
1257 | 1257 | |
|
1258 | 1258 | cmdclass = { |
|
1259 | 1259 | 'build': hgbuild, |
|
1260 | 1260 | 'build_doc': hgbuilddoc, |
|
1261 | 1261 | 'build_mo': hgbuildmo, |
|
1262 | 1262 | 'build_ext': hgbuildext, |
|
1263 | 1263 | 'build_py': hgbuildpy, |
|
1264 | 1264 | 'build_scripts': hgbuildscripts, |
|
1265 | 1265 | 'build_hgextindex': buildhgextindex, |
|
1266 | 1266 | 'install': hginstall, |
|
1267 | 1267 | 'install_lib': hginstalllib, |
|
1268 | 1268 | 'install_scripts': hginstallscripts, |
|
1269 | 1269 | 'build_hgexe': buildhgexe, |
|
1270 | 1270 | } |
|
1271 | 1271 | |
|
1272 | 1272 | if py2exehacked: |
|
1273 | 1273 | cmdclass['py2exe'] = hgbuildpy2exe |
|
1274 | 1274 | |
|
1275 | 1275 | packages = [ |
|
1276 | 1276 | 'mercurial', |
|
1277 | 1277 | 'mercurial.cext', |
|
1278 | 1278 | 'mercurial.cffi', |
|
1279 | 1279 | 'mercurial.defaultrc', |
|
1280 | 1280 | 'mercurial.helptext', |
|
1281 | 1281 | 'mercurial.helptext.internals', |
|
1282 | 1282 | 'mercurial.hgweb', |
|
1283 | 1283 | 'mercurial.interfaces', |
|
1284 | 1284 | 'mercurial.pure', |
|
1285 | 1285 | 'mercurial.templates', |
|
1286 | 1286 | 'mercurial.thirdparty', |
|
1287 | 1287 | 'mercurial.thirdparty.attr', |
|
1288 | 1288 | 'mercurial.thirdparty.zope', |
|
1289 | 1289 | 'mercurial.thirdparty.zope.interface', |
|
1290 | 'mercurial.upgrade_utils', | |
|
1290 | 1291 | 'mercurial.utils', |
|
1291 | 1292 | 'mercurial.revlogutils', |
|
1292 | 1293 | 'mercurial.testing', |
|
1293 | 1294 | 'hgext', |
|
1294 | 1295 | 'hgext.convert', |
|
1295 | 1296 | 'hgext.fsmonitor', |
|
1296 | 1297 | 'hgext.fastannotate', |
|
1297 | 1298 | 'hgext.fsmonitor.pywatchman', |
|
1298 | 1299 | 'hgext.git', |
|
1299 | 1300 | 'hgext.highlight', |
|
1300 | 1301 | 'hgext.hooklib', |
|
1301 | 1302 | 'hgext.infinitepush', |
|
1302 | 1303 | 'hgext.largefiles', |
|
1303 | 1304 | 'hgext.lfs', |
|
1304 | 1305 | 'hgext.narrow', |
|
1305 | 1306 | 'hgext.remotefilelog', |
|
1306 | 1307 | 'hgext.zeroconf', |
|
1307 | 1308 | 'hgext3rd', |
|
1308 | 1309 | 'hgdemandimport', |
|
1309 | 1310 | ] |
|
1310 | 1311 | |
|
1311 | 1312 | for name in os.listdir(os.path.join('mercurial', 'templates')): |
|
1312 | 1313 | if name != '__pycache__' and os.path.isdir( |
|
1313 | 1314 | os.path.join('mercurial', 'templates', name) |
|
1314 | 1315 | ): |
|
1315 | 1316 | packages.append('mercurial.templates.%s' % name) |
|
1316 | 1317 | |
|
1317 | 1318 | if sys.version_info[0] == 2: |
|
1318 | 1319 | packages.extend( |
|
1319 | 1320 | [ |
|
1320 | 1321 | 'mercurial.thirdparty.concurrent', |
|
1321 | 1322 | 'mercurial.thirdparty.concurrent.futures', |
|
1322 | 1323 | ] |
|
1323 | 1324 | ) |
|
1324 | 1325 | |
|
1325 | 1326 | if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ: |
|
1326 | 1327 | # py2exe can't cope with namespace packages very well, so we have to |
|
1327 | 1328 | # install any hgext3rd.* extensions that we want in the final py2exe |
|
1328 | 1329 | # image here. This is gross, but you gotta do what you gotta do. |
|
1329 | 1330 | packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' ')) |
|
1330 | 1331 | |
|
1331 | 1332 | common_depends = [ |
|
1332 | 1333 | 'mercurial/bitmanipulation.h', |
|
1333 | 1334 | 'mercurial/compat.h', |
|
1334 | 1335 | 'mercurial/cext/util.h', |
|
1335 | 1336 | ] |
|
1336 | 1337 | common_include_dirs = ['mercurial'] |
|
1337 | 1338 | |
|
1338 | 1339 | common_cflags = [] |
|
1339 | 1340 | |
|
1340 | 1341 | # MSVC 2008 still needs declarations at the top of the scope, but Python 3.9 |
|
1341 | 1342 | # makes declarations not at the top of a scope in the headers. |
|
1342 | 1343 | if os.name != 'nt' and sys.version_info[1] < 9: |
|
1343 | 1344 | common_cflags = ['-Werror=declaration-after-statement'] |
|
1344 | 1345 | |
|
1345 | 1346 | osutil_cflags = [] |
|
1346 | 1347 | osutil_ldflags = [] |
|
1347 | 1348 | |
|
1348 | 1349 | # platform specific macros |
|
1349 | 1350 | for plat, func in [('bsd', 'setproctitle')]: |
|
1350 | 1351 | if re.search(plat, sys.platform) and hasfunction(new_compiler(), func): |
|
1351 | 1352 | osutil_cflags.append('-DHAVE_%s' % func.upper()) |
|
1352 | 1353 | |
|
1353 | 1354 | for plat, macro, code in [ |
|
1354 | 1355 | ( |
|
1355 | 1356 | 'bsd|darwin', |
|
1356 | 1357 | 'BSD_STATFS', |
|
1357 | 1358 | ''' |
|
1358 | 1359 | #include <sys/param.h> |
|
1359 | 1360 | #include <sys/mount.h> |
|
1360 | 1361 | int main() { struct statfs s; return sizeof(s.f_fstypename); } |
|
1361 | 1362 | ''', |
|
1362 | 1363 | ), |
|
1363 | 1364 | ( |
|
1364 | 1365 | 'linux', |
|
1365 | 1366 | 'LINUX_STATFS', |
|
1366 | 1367 | ''' |
|
1367 | 1368 | #include <linux/magic.h> |
|
1368 | 1369 | #include <sys/vfs.h> |
|
1369 | 1370 | int main() { struct statfs s; return sizeof(s.f_type); } |
|
1370 | 1371 | ''', |
|
1371 | 1372 | ), |
|
1372 | 1373 | ]: |
|
1373 | 1374 | if re.search(plat, sys.platform) and cancompile(new_compiler(), code): |
|
1374 | 1375 | osutil_cflags.append('-DHAVE_%s' % macro) |
|
1375 | 1376 | |
|
1376 | 1377 | if sys.platform == 'darwin': |
|
1377 | 1378 | osutil_ldflags += ['-framework', 'ApplicationServices'] |
|
1378 | 1379 | |
|
1379 | 1380 | if sys.platform == 'sunos5': |
|
1380 | 1381 | osutil_ldflags += ['-lsocket'] |
|
1381 | 1382 | |
|
1382 | 1383 | xdiff_srcs = [ |
|
1383 | 1384 | 'mercurial/thirdparty/xdiff/xdiffi.c', |
|
1384 | 1385 | 'mercurial/thirdparty/xdiff/xprepare.c', |
|
1385 | 1386 | 'mercurial/thirdparty/xdiff/xutils.c', |
|
1386 | 1387 | ] |
|
1387 | 1388 | |
|
1388 | 1389 | xdiff_headers = [ |
|
1389 | 1390 | 'mercurial/thirdparty/xdiff/xdiff.h', |
|
1390 | 1391 | 'mercurial/thirdparty/xdiff/xdiffi.h', |
|
1391 | 1392 | 'mercurial/thirdparty/xdiff/xinclude.h', |
|
1392 | 1393 | 'mercurial/thirdparty/xdiff/xmacros.h', |
|
1393 | 1394 | 'mercurial/thirdparty/xdiff/xprepare.h', |
|
1394 | 1395 | 'mercurial/thirdparty/xdiff/xtypes.h', |
|
1395 | 1396 | 'mercurial/thirdparty/xdiff/xutils.h', |
|
1396 | 1397 | ] |
|
1397 | 1398 | |
|
1398 | 1399 | |
|
1399 | 1400 | class RustCompilationError(CCompilerError): |
|
1400 | 1401 | """Exception class for Rust compilation errors.""" |
|
1401 | 1402 | |
|
1402 | 1403 | |
|
1403 | 1404 | class RustExtension(Extension): |
|
1404 | 1405 | """Base classes for concrete Rust Extension classes.""" |
|
1405 | 1406 | |
|
1406 | 1407 | rusttargetdir = os.path.join('rust', 'target', 'release') |
|
1407 | 1408 | |
|
1408 | 1409 | def __init__( |
|
1409 | 1410 | self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw |
|
1410 | 1411 | ): |
|
1411 | 1412 | Extension.__init__(self, mpath, sources, **kw) |
|
1412 | 1413 | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) |
|
1413 | 1414 | self.py3_features = py3_features |
|
1414 | 1415 | |
|
1415 | 1416 | # adding Rust source and control files to depends so that the extension |
|
1416 | 1417 | # gets rebuilt if they've changed |
|
1417 | 1418 | self.depends.append(os.path.join(srcdir, 'Cargo.toml')) |
|
1418 | 1419 | cargo_lock = os.path.join(srcdir, 'Cargo.lock') |
|
1419 | 1420 | if os.path.exists(cargo_lock): |
|
1420 | 1421 | self.depends.append(cargo_lock) |
|
1421 | 1422 | for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')): |
|
1422 | 1423 | self.depends.extend( |
|
1423 | 1424 | os.path.join(dirpath, fname) |
|
1424 | 1425 | for fname in fnames |
|
1425 | 1426 | if os.path.splitext(fname)[1] == '.rs' |
|
1426 | 1427 | ) |
|
1427 | 1428 | |
|
1428 | 1429 | @staticmethod |
|
1429 | 1430 | def rustdylibsuffix(): |
|
1430 | 1431 | """Return the suffix for shared libraries produced by rustc. |
|
1431 | 1432 | |
|
1432 | 1433 | See also: https://doc.rust-lang.org/reference/linkage.html |
|
1433 | 1434 | """ |
|
1434 | 1435 | if sys.platform == 'darwin': |
|
1435 | 1436 | return '.dylib' |
|
1436 | 1437 | elif os.name == 'nt': |
|
1437 | 1438 | return '.dll' |
|
1438 | 1439 | else: |
|
1439 | 1440 | return '.so' |
|
1440 | 1441 | |
|
1441 | 1442 | def rustbuild(self): |
|
1442 | 1443 | env = os.environ.copy() |
|
1443 | 1444 | if 'HGTEST_RESTOREENV' in env: |
|
1444 | 1445 | # Mercurial tests change HOME to a temporary directory, |
|
1445 | 1446 | # but, if installed with rustup, the Rust toolchain needs |
|
1446 | 1447 | # HOME to be correct (otherwise the 'no default toolchain' |
|
1447 | 1448 | # error message is issued and the build fails). |
|
1448 | 1449 | # This happens currently with test-hghave.t, which does |
|
1449 | 1450 | # invoke this build. |
|
1450 | 1451 | |
|
1451 | 1452 | # Unix only fix (os.path.expanduser not really reliable if |
|
1452 | 1453 | # HOME is shadowed like this) |
|
1453 | 1454 | import pwd |
|
1454 | 1455 | |
|
1455 | 1456 | env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir |
|
1456 | 1457 | |
|
1457 | 1458 | cargocmd = ['cargo', 'rustc', '--release'] |
|
1458 | 1459 | |
|
1459 | 1460 | feature_flags = [] |
|
1460 | 1461 | |
|
1461 | 1462 | if sys.version_info[0] == 3 and self.py3_features is not None: |
|
1462 | 1463 | feature_flags.append(self.py3_features) |
|
1463 | 1464 | cargocmd.append('--no-default-features') |
|
1464 | 1465 | |
|
1465 | 1466 | rust_features = env.get("HG_RUST_FEATURES") |
|
1466 | 1467 | if rust_features: |
|
1467 | 1468 | feature_flags.append(rust_features) |
|
1468 | 1469 | |
|
1469 | 1470 | cargocmd.extend(('--features', " ".join(feature_flags))) |
|
1470 | 1471 | |
|
1471 | 1472 | cargocmd.append('--') |
|
1472 | 1473 | if sys.platform == 'darwin': |
|
1473 | 1474 | cargocmd.extend( |
|
1474 | 1475 | ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup") |
|
1475 | 1476 | ) |
|
1476 | 1477 | try: |
|
1477 | 1478 | subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir) |
|
1478 | 1479 | except OSError as exc: |
|
1479 | 1480 | if exc.errno == errno.ENOENT: |
|
1480 | 1481 | raise RustCompilationError("Cargo not found") |
|
1481 | 1482 | elif exc.errno == errno.EACCES: |
|
1482 | 1483 | raise RustCompilationError( |
|
1483 | 1484 | "Cargo found, but permisssion to execute it is denied" |
|
1484 | 1485 | ) |
|
1485 | 1486 | else: |
|
1486 | 1487 | raise |
|
1487 | 1488 | except subprocess.CalledProcessError: |
|
1488 | 1489 | raise RustCompilationError( |
|
1489 | 1490 | "Cargo failed. Working directory: %r, " |
|
1490 | 1491 | "command: %r, environment: %r" |
|
1491 | 1492 | % (self.rustsrcdir, cargocmd, env) |
|
1492 | 1493 | ) |
|
1493 | 1494 | |
|
1494 | 1495 | |
|
1495 | 1496 | class RustStandaloneExtension(RustExtension): |
|
1496 | 1497 | def __init__(self, pydottedname, rustcrate, dylibname, **kw): |
|
1497 | 1498 | RustExtension.__init__( |
|
1498 | 1499 | self, pydottedname, [], dylibname, rustcrate, **kw |
|
1499 | 1500 | ) |
|
1500 | 1501 | self.dylibname = dylibname |
|
1501 | 1502 | |
|
1502 | 1503 | def build(self, target_dir): |
|
1503 | 1504 | self.rustbuild() |
|
1504 | 1505 | target = [target_dir] |
|
1505 | 1506 | target.extend(self.name.split('.')) |
|
1506 | 1507 | target[-1] += DYLIB_SUFFIX |
|
1507 | 1508 | shutil.copy2( |
|
1508 | 1509 | os.path.join( |
|
1509 | 1510 | self.rusttargetdir, self.dylibname + self.rustdylibsuffix() |
|
1510 | 1511 | ), |
|
1511 | 1512 | os.path.join(*target), |
|
1512 | 1513 | ) |
|
1513 | 1514 | |
|
1514 | 1515 | |
|
1515 | 1516 | extmodules = [ |
|
1516 | 1517 | Extension( |
|
1517 | 1518 | 'mercurial.cext.base85', |
|
1518 | 1519 | ['mercurial/cext/base85.c'], |
|
1519 | 1520 | include_dirs=common_include_dirs, |
|
1520 | 1521 | extra_compile_args=common_cflags, |
|
1521 | 1522 | depends=common_depends, |
|
1522 | 1523 | ), |
|
1523 | 1524 | Extension( |
|
1524 | 1525 | 'mercurial.cext.bdiff', |
|
1525 | 1526 | ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs, |
|
1526 | 1527 | include_dirs=common_include_dirs, |
|
1527 | 1528 | extra_compile_args=common_cflags, |
|
1528 | 1529 | depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers, |
|
1529 | 1530 | ), |
|
1530 | 1531 | Extension( |
|
1531 | 1532 | 'mercurial.cext.mpatch', |
|
1532 | 1533 | ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'], |
|
1533 | 1534 | include_dirs=common_include_dirs, |
|
1534 | 1535 | extra_compile_args=common_cflags, |
|
1535 | 1536 | depends=common_depends, |
|
1536 | 1537 | ), |
|
1537 | 1538 | Extension( |
|
1538 | 1539 | 'mercurial.cext.parsers', |
|
1539 | 1540 | [ |
|
1540 | 1541 | 'mercurial/cext/charencode.c', |
|
1541 | 1542 | 'mercurial/cext/dirs.c', |
|
1542 | 1543 | 'mercurial/cext/manifest.c', |
|
1543 | 1544 | 'mercurial/cext/parsers.c', |
|
1544 | 1545 | 'mercurial/cext/pathencode.c', |
|
1545 | 1546 | 'mercurial/cext/revlog.c', |
|
1546 | 1547 | ], |
|
1547 | 1548 | include_dirs=common_include_dirs, |
|
1548 | 1549 | extra_compile_args=common_cflags, |
|
1549 | 1550 | depends=common_depends |
|
1550 | 1551 | + [ |
|
1551 | 1552 | 'mercurial/cext/charencode.h', |
|
1552 | 1553 | 'mercurial/cext/revlog.h', |
|
1553 | 1554 | ], |
|
1554 | 1555 | ), |
|
1555 | 1556 | Extension( |
|
1556 | 1557 | 'mercurial.cext.osutil', |
|
1557 | 1558 | ['mercurial/cext/osutil.c'], |
|
1558 | 1559 | include_dirs=common_include_dirs, |
|
1559 | 1560 | extra_compile_args=common_cflags + osutil_cflags, |
|
1560 | 1561 | extra_link_args=osutil_ldflags, |
|
1561 | 1562 | depends=common_depends, |
|
1562 | 1563 | ), |
|
1563 | 1564 | Extension( |
|
1564 | 1565 | 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', |
|
1565 | 1566 | [ |
|
1566 | 1567 | 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c', |
|
1567 | 1568 | ], |
|
1568 | 1569 | extra_compile_args=common_cflags, |
|
1569 | 1570 | ), |
|
1570 | 1571 | Extension( |
|
1571 | 1572 | 'mercurial.thirdparty.sha1dc', |
|
1572 | 1573 | [ |
|
1573 | 1574 | 'mercurial/thirdparty/sha1dc/cext.c', |
|
1574 | 1575 | 'mercurial/thirdparty/sha1dc/lib/sha1.c', |
|
1575 | 1576 | 'mercurial/thirdparty/sha1dc/lib/ubc_check.c', |
|
1576 | 1577 | ], |
|
1577 | 1578 | extra_compile_args=common_cflags, |
|
1578 | 1579 | ), |
|
1579 | 1580 | Extension( |
|
1580 | 1581 | 'hgext.fsmonitor.pywatchman.bser', |
|
1581 | 1582 | ['hgext/fsmonitor/pywatchman/bser.c'], |
|
1582 | 1583 | extra_compile_args=common_cflags, |
|
1583 | 1584 | ), |
|
1584 | 1585 | RustStandaloneExtension( |
|
1585 | 1586 | 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3' |
|
1586 | 1587 | ), |
|
1587 | 1588 | ] |
|
1588 | 1589 | |
|
1589 | 1590 | |
|
1590 | 1591 | sys.path.insert(0, 'contrib/python-zstandard') |
|
1591 | 1592 | import setup_zstd |
|
1592 | 1593 | |
|
1593 | 1594 | zstd = setup_zstd.get_c_extension( |
|
1594 | 1595 | name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__)) |
|
1595 | 1596 | ) |
|
1596 | 1597 | zstd.extra_compile_args += common_cflags |
|
1597 | 1598 | extmodules.append(zstd) |
|
1598 | 1599 | |
|
1599 | 1600 | try: |
|
1600 | 1601 | from distutils import cygwinccompiler |
|
1601 | 1602 | |
|
1602 | 1603 | # the -mno-cygwin option has been deprecated for years |
|
1603 | 1604 | mingw32compilerclass = cygwinccompiler.Mingw32CCompiler |
|
1604 | 1605 | |
|
1605 | 1606 | class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler): |
|
1606 | 1607 | def __init__(self, *args, **kwargs): |
|
1607 | 1608 | mingw32compilerclass.__init__(self, *args, **kwargs) |
|
1608 | 1609 | for i in 'compiler compiler_so linker_exe linker_so'.split(): |
|
1609 | 1610 | try: |
|
1610 | 1611 | getattr(self, i).remove('-mno-cygwin') |
|
1611 | 1612 | except ValueError: |
|
1612 | 1613 | pass |
|
1613 | 1614 | |
|
1614 | 1615 | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler |
|
1615 | 1616 | except ImportError: |
|
1616 | 1617 | # the cygwinccompiler package is not available on some Python |
|
1617 | 1618 | # distributions like the ones from the optware project for Synology |
|
1618 | 1619 | # DiskStation boxes |
|
1619 | 1620 | class HackedMingw32CCompiler(object): |
|
1620 | 1621 | pass |
|
1621 | 1622 | |
|
1622 | 1623 | |
|
1623 | 1624 | if os.name == 'nt': |
|
1624 | 1625 | # Allow compiler/linker flags to be added to Visual Studio builds. Passing |
|
1625 | 1626 | # extra_link_args to distutils.extensions.Extension() doesn't have any |
|
1626 | 1627 | # effect. |
|
1627 | 1628 | from distutils import msvccompiler |
|
1628 | 1629 | |
|
1629 | 1630 | msvccompilerclass = msvccompiler.MSVCCompiler |
|
1630 | 1631 | |
|
1631 | 1632 | class HackedMSVCCompiler(msvccompiler.MSVCCompiler): |
|
1632 | 1633 | def initialize(self): |
|
1633 | 1634 | msvccompilerclass.initialize(self) |
|
1634 | 1635 | # "warning LNK4197: export 'func' specified multiple times" |
|
1635 | 1636 | self.ldflags_shared.append('/ignore:4197') |
|
1636 | 1637 | self.ldflags_shared_debug.append('/ignore:4197') |
|
1637 | 1638 | |
|
1638 | 1639 | msvccompiler.MSVCCompiler = HackedMSVCCompiler |
|
1639 | 1640 | |
|
1640 | 1641 | packagedata = { |
|
1641 | 1642 | 'mercurial': [ |
|
1642 | 1643 | 'locale/*/LC_MESSAGES/hg.mo', |
|
1643 | 1644 | 'dummycert.pem', |
|
1644 | 1645 | ], |
|
1645 | 1646 | 'mercurial.defaultrc': [ |
|
1646 | 1647 | '*.rc', |
|
1647 | 1648 | ], |
|
1648 | 1649 | 'mercurial.helptext': [ |
|
1649 | 1650 | '*.txt', |
|
1650 | 1651 | ], |
|
1651 | 1652 | 'mercurial.helptext.internals': [ |
|
1652 | 1653 | '*.txt', |
|
1653 | 1654 | ], |
|
1654 | 1655 | } |
|
1655 | 1656 | |
|
1656 | 1657 | |
|
1657 | 1658 | def ordinarypath(p): |
|
1658 | 1659 | return p and p[0] != '.' and p[-1] != '~' |
|
1659 | 1660 | |
|
1660 | 1661 | |
|
1661 | 1662 | for root in ('templates',): |
|
1662 | 1663 | for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): |
|
1663 | 1664 | packagename = curdir.replace(os.sep, '.') |
|
1664 | 1665 | packagedata[packagename] = list(filter(ordinarypath, files)) |
|
1665 | 1666 | |
|
1666 | 1667 | datafiles = [] |
|
1667 | 1668 | |
|
1668 | 1669 | # distutils expects version to be str/unicode. Converting it to |
|
1669 | 1670 | # unicode on Python 2 still works because it won't contain any |
|
1670 | 1671 | # non-ascii bytes and will be implicitly converted back to bytes |
|
1671 | 1672 | # when operated on. |
|
1672 | 1673 | assert isinstance(version, bytes) |
|
1673 | 1674 | setupversion = version.decode('ascii') |
|
1674 | 1675 | |
|
1675 | 1676 | extra = {} |
|
1676 | 1677 | |
|
1677 | 1678 | py2exepackages = [ |
|
1678 | 1679 | 'hgdemandimport', |
|
1679 | 1680 | 'hgext3rd', |
|
1680 | 1681 | 'hgext', |
|
1681 | 1682 | 'email', |
|
1682 | 1683 | # implicitly imported per module policy |
|
1683 | 1684 | # (cffi wouldn't be used as a frozen exe) |
|
1684 | 1685 | 'mercurial.cext', |
|
1685 | 1686 | #'mercurial.cffi', |
|
1686 | 1687 | 'mercurial.pure', |
|
1687 | 1688 | ] |
|
1688 | 1689 | |
|
1689 | 1690 | py2exeexcludes = [] |
|
1690 | 1691 | py2exedllexcludes = ['crypt32.dll'] |
|
1691 | 1692 | |
|
1692 | 1693 | if issetuptools: |
|
1693 | 1694 | extra['python_requires'] = supportedpy |
|
1694 | 1695 | |
|
1695 | 1696 | if py2exeloaded: |
|
1696 | 1697 | extra['console'] = [ |
|
1697 | 1698 | { |
|
1698 | 1699 | 'script': 'hg', |
|
1699 | 1700 | 'copyright': 'Copyright (C) 2005-2020 Matt Mackall and others', |
|
1700 | 1701 | 'product_version': version, |
|
1701 | 1702 | } |
|
1702 | 1703 | ] |
|
1703 | 1704 | # Sub command of 'build' because 'py2exe' does not handle sub_commands. |
|
1704 | 1705 | # Need to override hgbuild because it has a private copy of |
|
1705 | 1706 | # build.sub_commands. |
|
1706 | 1707 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) |
|
1707 | 1708 | # put dlls in sub directory so that they won't pollute PATH |
|
1708 | 1709 | extra['zipfile'] = 'lib/library.zip' |
|
1709 | 1710 | |
|
1710 | 1711 | # We allow some configuration to be supplemented via environment |
|
1711 | 1712 | # variables. This is better than setup.cfg files because it allows |
|
1712 | 1713 | # supplementing configs instead of replacing them. |
|
1713 | 1714 | extrapackages = os.environ.get('HG_PY2EXE_EXTRA_PACKAGES') |
|
1714 | 1715 | if extrapackages: |
|
1715 | 1716 | py2exepackages.extend(extrapackages.split(' ')) |
|
1716 | 1717 | |
|
1717 | 1718 | excludes = os.environ.get('HG_PY2EXE_EXTRA_EXCLUDES') |
|
1718 | 1719 | if excludes: |
|
1719 | 1720 | py2exeexcludes.extend(excludes.split(' ')) |
|
1720 | 1721 | |
|
1721 | 1722 | dllexcludes = os.environ.get('HG_PY2EXE_EXTRA_DLL_EXCLUDES') |
|
1722 | 1723 | if dllexcludes: |
|
1723 | 1724 | py2exedllexcludes.extend(dllexcludes.split(' ')) |
|
1724 | 1725 | |
|
1725 | 1726 | if os.environ.get('PYOXIDIZER'): |
|
1726 | 1727 | hgbuild.sub_commands.insert(0, ('build_hgextindex', None)) |
|
1727 | 1728 | |
|
1728 | 1729 | if os.name == 'nt': |
|
1729 | 1730 | # Windows binary file versions for exe/dll files must have the |
|
1730 | 1731 | # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535 |
|
1731 | 1732 | setupversion = setupversion.split(r'+', 1)[0] |
|
1732 | 1733 | |
|
1733 | 1734 | if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): |
|
1734 | 1735 | version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines() |
|
1735 | 1736 | if version: |
|
1736 | 1737 | version = version[0] |
|
1737 | 1738 | if sys.version_info[0] == 3: |
|
1738 | 1739 | version = version.decode('utf-8') |
|
1739 | 1740 | xcode4 = version.startswith('Xcode') and StrictVersion( |
|
1740 | 1741 | version.split()[1] |
|
1741 | 1742 | ) >= StrictVersion('4.0') |
|
1742 | 1743 | xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None |
|
1743 | 1744 | else: |
|
1744 | 1745 | # xcodebuild returns empty on OS X Lion with XCode 4.3 not |
|
1745 | 1746 | # installed, but instead with only command-line tools. Assume |
|
1746 | 1747 | # that only happens on >= Lion, thus no PPC support. |
|
1747 | 1748 | xcode4 = True |
|
1748 | 1749 | xcode51 = False |
|
1749 | 1750 | |
|
1750 | 1751 | # XCode 4.0 dropped support for ppc architecture, which is hardcoded in |
|
1751 | 1752 | # distutils.sysconfig |
|
1752 | 1753 | if xcode4: |
|
1753 | 1754 | os.environ['ARCHFLAGS'] = '' |
|
1754 | 1755 | |
|
1755 | 1756 | # XCode 5.1 changes clang such that it now fails to compile if the |
|
1756 | 1757 | # -mno-fused-madd flag is passed, but the version of Python shipped with |
|
1757 | 1758 | # OS X 10.9 Mavericks includes this flag. This causes problems in all |
|
1758 | 1759 | # C extension modules, and a bug has been filed upstream at |
|
1759 | 1760 | # http://bugs.python.org/issue21244. We also need to patch this here |
|
1760 | 1761 | # so Mercurial can continue to compile in the meantime. |
|
1761 | 1762 | if xcode51: |
|
1762 | 1763 | cflags = get_config_var('CFLAGS') |
|
1763 | 1764 | if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None: |
|
1764 | 1765 | os.environ['CFLAGS'] = ( |
|
1765 | 1766 | os.environ.get('CFLAGS', '') + ' -Qunused-arguments' |
|
1766 | 1767 | ) |
|
1767 | 1768 | |
|
1768 | 1769 | setup( |
|
1769 | 1770 | name='mercurial', |
|
1770 | 1771 | version=setupversion, |
|
1771 | 1772 | author='Matt Mackall and many others', |
|
1772 | 1773 | author_email='mercurial@mercurial-scm.org', |
|
1773 | 1774 | url='https://mercurial-scm.org/', |
|
1774 | 1775 | download_url='https://mercurial-scm.org/release/', |
|
1775 | 1776 | description=( |
|
1776 | 1777 | 'Fast scalable distributed SCM (revision control, version ' |
|
1777 | 1778 | 'control) system' |
|
1778 | 1779 | ), |
|
1779 | 1780 | long_description=( |
|
1780 | 1781 | 'Mercurial is a distributed SCM tool written in Python.' |
|
1781 | 1782 | ' It is used by a number of large projects that require' |
|
1782 | 1783 | ' fast, reliable distributed revision control, such as ' |
|
1783 | 1784 | 'Mozilla.' |
|
1784 | 1785 | ), |
|
1785 | 1786 | license='GNU GPLv2 or any later version', |
|
1786 | 1787 | classifiers=[ |
|
1787 | 1788 | 'Development Status :: 6 - Mature', |
|
1788 | 1789 | 'Environment :: Console', |
|
1789 | 1790 | 'Intended Audience :: Developers', |
|
1790 | 1791 | 'Intended Audience :: System Administrators', |
|
1791 | 1792 | 'License :: OSI Approved :: GNU General Public License (GPL)', |
|
1792 | 1793 | 'Natural Language :: Danish', |
|
1793 | 1794 | 'Natural Language :: English', |
|
1794 | 1795 | 'Natural Language :: German', |
|
1795 | 1796 | 'Natural Language :: Italian', |
|
1796 | 1797 | 'Natural Language :: Japanese', |
|
1797 | 1798 | 'Natural Language :: Portuguese (Brazilian)', |
|
1798 | 1799 | 'Operating System :: Microsoft :: Windows', |
|
1799 | 1800 | 'Operating System :: OS Independent', |
|
1800 | 1801 | 'Operating System :: POSIX', |
|
1801 | 1802 | 'Programming Language :: C', |
|
1802 | 1803 | 'Programming Language :: Python', |
|
1803 | 1804 | 'Topic :: Software Development :: Version Control', |
|
1804 | 1805 | ], |
|
1805 | 1806 | scripts=scripts, |
|
1806 | 1807 | packages=packages, |
|
1807 | 1808 | ext_modules=extmodules, |
|
1808 | 1809 | data_files=datafiles, |
|
1809 | 1810 | package_data=packagedata, |
|
1810 | 1811 | cmdclass=cmdclass, |
|
1811 | 1812 | distclass=hgdist, |
|
1812 | 1813 | options={ |
|
1813 | 1814 | 'py2exe': { |
|
1814 | 1815 | 'bundle_files': 3, |
|
1815 | 1816 | 'dll_excludes': py2exedllexcludes, |
|
1816 | 1817 | 'excludes': py2exeexcludes, |
|
1817 | 1818 | 'packages': py2exepackages, |
|
1818 | 1819 | }, |
|
1819 | 1820 | 'bdist_mpkg': { |
|
1820 | 1821 | 'zipdist': False, |
|
1821 | 1822 | 'license': 'COPYING', |
|
1822 | 1823 | 'readme': 'contrib/packaging/macosx/Readme.html', |
|
1823 | 1824 | 'welcome': 'contrib/packaging/macosx/Welcome.html', |
|
1824 | 1825 | }, |
|
1825 | 1826 | }, |
|
1826 | 1827 | **extra |
|
1827 | 1828 | ) |
@@ -1,83 +1,84 b'' | |||
|
1 | 1 | # ext-sidedata.py - small extension to test the sidedata logic |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net) |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import hashlib |
|
11 | 11 | import struct |
|
12 | 12 | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | extensions, |
|
15 | 15 | node, |
|
16 | 16 | requirements, |
|
17 | 17 | revlog, |
|
18 | upgrade, | |
|
19 | 18 | ) |
|
20 | 19 | |
|
20 | from mercurial.upgrade_utils import engine as upgrade_engine | |
|
21 | ||
|
21 | 22 | from mercurial.revlogutils import sidedata |
|
22 | 23 | |
|
23 | 24 | |
|
24 | 25 | def wrapaddrevision( |
|
25 | 26 | orig, self, text, transaction, link, p1, p2, *args, **kwargs |
|
26 | 27 | ): |
|
27 | 28 | if kwargs.get('sidedata') is None: |
|
28 | 29 | kwargs['sidedata'] = {} |
|
29 | 30 | sd = kwargs['sidedata'] |
|
30 | 31 | ## let's store some arbitrary data just for testing |
|
31 | 32 | # text length |
|
32 | 33 | sd[sidedata.SD_TEST1] = struct.pack('>I', len(text)) |
|
33 | 34 | # and sha2 hashes |
|
34 | 35 | sha256 = hashlib.sha256(text).digest() |
|
35 | 36 | sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256) |
|
36 | 37 | return orig(self, text, transaction, link, p1, p2, *args, **kwargs) |
|
37 | 38 | |
|
38 | 39 | |
|
39 | 40 | def wraprevision(orig, self, nodeorrev, *args, **kwargs): |
|
40 | 41 | text = orig(self, nodeorrev, *args, **kwargs) |
|
41 | 42 | if getattr(self, 'sidedatanocheck', False): |
|
42 | 43 | return text |
|
43 | 44 | if nodeorrev != node.nullrev and nodeorrev != node.nullid: |
|
44 | 45 | sd = self.sidedata(nodeorrev) |
|
45 | 46 | if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]: |
|
46 | 47 | raise RuntimeError('text size mismatch') |
|
47 | 48 | expected = sd[sidedata.SD_TEST2] |
|
48 | 49 | got = hashlib.sha256(text).digest() |
|
49 | 50 | if got != expected: |
|
50 | 51 | raise RuntimeError('sha256 mismatch') |
|
51 | 52 | return text |
|
52 | 53 | |
|
53 | 54 | |
|
54 | 55 | def wrapgetsidedatacompanion(orig, srcrepo, dstrepo): |
|
55 | 56 | sidedatacompanion = orig(srcrepo, dstrepo) |
|
56 | 57 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
57 | 58 | if requirements.SIDEDATA_REQUIREMENT in addedreqs: |
|
58 | 59 | assert sidedatacompanion is None # deal with composition later |
|
59 | 60 | |
|
60 | 61 | def sidedatacompanion(revlog, rev): |
|
61 | 62 | update = {} |
|
62 | 63 | revlog.sidedatanocheck = True |
|
63 | 64 | try: |
|
64 | 65 | text = revlog.revision(rev) |
|
65 | 66 | finally: |
|
66 | 67 | del revlog.sidedatanocheck |
|
67 | 68 | ## let's store some arbitrary data just for testing |
|
68 | 69 | # text length |
|
69 | 70 | update[sidedata.SD_TEST1] = struct.pack('>I', len(text)) |
|
70 | 71 | # and sha2 hashes |
|
71 | 72 | sha256 = hashlib.sha256(text).digest() |
|
72 | 73 | update[sidedata.SD_TEST2] = struct.pack('>32s', sha256) |
|
73 | 74 | return False, (), update, 0, 0 |
|
74 | 75 | |
|
75 | 76 | return sidedatacompanion |
|
76 | 77 | |
|
77 | 78 | |
|
78 | 79 | def extsetup(ui): |
|
79 | 80 | extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision) |
|
80 | 81 | extensions.wrapfunction(revlog.revlog, 'revision', wraprevision) |
|
81 | 82 | extensions.wrapfunction( |
|
82 | upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion | |
|
83 | upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion | |
|
83 | 84 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now