##// END OF EJS Templates
engine: make hook point for extension a public function...
Pulkit Goyal -
r46836:481d9aed default
parent child Browse files
Show More
@@ -1,550 +1,550 b''
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import bin, hex, nullid, short
14 14 from mercurial.pycompat import (
15 15 getattr,
16 16 setattr,
17 17 )
18 18
19 19 from mercurial import (
20 20 bundle2,
21 21 changegroup,
22 22 cmdutil,
23 23 context,
24 24 error,
25 25 exchange,
26 26 exthelper,
27 27 localrepo,
28 28 pycompat,
29 29 revlog,
30 30 scmutil,
31 31 util,
32 32 vfs as vfsmod,
33 33 wireprotov1server,
34 34 )
35 35
36 36 from mercurial.upgrade_utils import (
37 37 actions as upgrade_actions,
38 38 engine as upgrade_engine,
39 39 )
40 40
41 41 from mercurial.interfaces import repository
42 42
43 43 from mercurial.utils import (
44 44 storageutil,
45 45 stringutil,
46 46 )
47 47
48 48 from ..largefiles import lfutil
49 49
50 50 from . import (
51 51 blobstore,
52 52 pointer,
53 53 )
54 54
55 55 eh = exthelper.exthelper()
56 56
57 57
58 58 @eh.wrapfunction(localrepo, b'makefilestorage')
59 59 def localrepomakefilestorage(orig, requirements, features, **kwargs):
60 60 if b'lfs' in requirements:
61 61 features.add(repository.REPO_FEATURE_LFS)
62 62
63 63 return orig(requirements=requirements, features=features, **kwargs)
64 64
65 65
66 66 @eh.wrapfunction(changegroup, b'allsupportedversions')
67 67 def allsupportedversions(orig, ui):
68 68 versions = orig(ui)
69 69 versions.add(b'03')
70 70 return versions
71 71
72 72
73 73 @eh.wrapfunction(wireprotov1server, b'_capabilities')
74 74 def _capabilities(orig, repo, proto):
75 75 '''Wrap server command to announce lfs server capability'''
76 76 caps = orig(repo, proto)
77 77 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
78 78 # Advertise a slightly different capability when lfs is *required*, so
79 79 # that the client knows it MUST load the extension. If lfs is not
80 80 # required on the server, there's no reason to autoload the extension
81 81 # on the client.
82 82 if b'lfs' in repo.requirements:
83 83 caps.append(b'lfs-serve')
84 84
85 85 caps.append(b'lfs')
86 86 return caps
87 87
88 88
89 89 def bypasscheckhash(self, text):
90 90 return False
91 91
92 92
93 93 def readfromstore(self, text):
94 94 """Read filelog content from local blobstore transform for flagprocessor.
95 95
96 96 Default tranform for flagprocessor, returning contents from blobstore.
97 97 Returns a 2-typle (text, validatehash) where validatehash is True as the
98 98 contents of the blobstore should be checked using checkhash.
99 99 """
100 100 p = pointer.deserialize(text)
101 101 oid = p.oid()
102 102 store = self.opener.lfslocalblobstore
103 103 if not store.has(oid):
104 104 p.filename = self.filename
105 105 self.opener.lfsremoteblobstore.readbatch([p], store)
106 106
107 107 # The caller will validate the content
108 108 text = store.read(oid, verify=False)
109 109
110 110 # pack hg filelog metadata
111 111 hgmeta = {}
112 112 for k in p.keys():
113 113 if k.startswith(b'x-hg-'):
114 114 name = k[len(b'x-hg-') :]
115 115 hgmeta[name] = p[k]
116 116 if hgmeta or text.startswith(b'\1\n'):
117 117 text = storageutil.packmeta(hgmeta, text)
118 118
119 119 return (text, True, {})
120 120
121 121
122 122 def writetostore(self, text, sidedata):
123 123 # hg filelog metadata (includes rename, etc)
124 124 hgmeta, offset = storageutil.parsemeta(text)
125 125 if offset and offset > 0:
126 126 # lfs blob does not contain hg filelog metadata
127 127 text = text[offset:]
128 128
129 129 # git-lfs only supports sha256
130 130 oid = hex(hashlib.sha256(text).digest())
131 131 self.opener.lfslocalblobstore.write(oid, text)
132 132
133 133 # replace contents with metadata
134 134 longoid = b'sha256:%s' % oid
135 135 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
136 136
137 137 # by default, we expect the content to be binary. however, LFS could also
138 138 # be used for non-binary content. add a special entry for non-binary data.
139 139 # this will be used by filectx.isbinary().
140 140 if not stringutil.binary(text):
141 141 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
142 142 metadata[b'x-is-binary'] = b'0'
143 143
144 144 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
145 145 if hgmeta is not None:
146 146 for k, v in pycompat.iteritems(hgmeta):
147 147 metadata[b'x-hg-%s' % k] = v
148 148
149 149 rawtext = metadata.serialize()
150 150 return (rawtext, False)
151 151
152 152
153 153 def _islfs(rlog, node=None, rev=None):
154 154 if rev is None:
155 155 if node is None:
156 156 # both None - likely working copy content where node is not ready
157 157 return False
158 158 rev = rlog.rev(node)
159 159 else:
160 160 node = rlog.node(rev)
161 161 if node == nullid:
162 162 return False
163 163 flags = rlog.flags(rev)
164 164 return bool(flags & revlog.REVIDX_EXTSTORED)
165 165
166 166
167 167 # Wrapping may also be applied by remotefilelog
168 168 def filelogaddrevision(
169 169 orig,
170 170 self,
171 171 text,
172 172 transaction,
173 173 link,
174 174 p1,
175 175 p2,
176 176 cachedelta=None,
177 177 node=None,
178 178 flags=revlog.REVIDX_DEFAULT_FLAGS,
179 179 **kwds
180 180 ):
181 181 # The matcher isn't available if reposetup() wasn't called.
182 182 lfstrack = self._revlog.opener.options.get(b'lfstrack')
183 183
184 184 if lfstrack:
185 185 textlen = len(text)
186 186 # exclude hg rename meta from file size
187 187 meta, offset = storageutil.parsemeta(text)
188 188 if offset:
189 189 textlen -= offset
190 190
191 191 if lfstrack(self._revlog.filename, textlen):
192 192 flags |= revlog.REVIDX_EXTSTORED
193 193
194 194 return orig(
195 195 self,
196 196 text,
197 197 transaction,
198 198 link,
199 199 p1,
200 200 p2,
201 201 cachedelta=cachedelta,
202 202 node=node,
203 203 flags=flags,
204 204 **kwds
205 205 )
206 206
207 207
208 208 # Wrapping may also be applied by remotefilelog
209 209 def filelogrenamed(orig, self, node):
210 210 if _islfs(self._revlog, node):
211 211 rawtext = self._revlog.rawdata(node)
212 212 if not rawtext:
213 213 return False
214 214 metadata = pointer.deserialize(rawtext)
215 215 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
216 216 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
217 217 else:
218 218 return False
219 219 return orig(self, node)
220 220
221 221
222 222 # Wrapping may also be applied by remotefilelog
223 223 def filelogsize(orig, self, rev):
224 224 if _islfs(self._revlog, rev=rev):
225 225 # fast path: use lfs metadata to answer size
226 226 rawtext = self._revlog.rawdata(rev)
227 227 metadata = pointer.deserialize(rawtext)
228 228 return int(metadata[b'size'])
229 229 return orig(self, rev)
230 230
231 231
232 232 @eh.wrapfunction(revlog, b'_verify_revision')
233 233 def _verify_revision(orig, rl, skipflags, state, node):
234 234 if _islfs(rl, node=node):
235 235 rawtext = rl.rawdata(node)
236 236 metadata = pointer.deserialize(rawtext)
237 237
238 238 # Don't skip blobs that are stored locally, as local verification is
239 239 # relatively cheap and there's no other way to verify the raw data in
240 240 # the revlog.
241 241 if rl.opener.lfslocalblobstore.has(metadata.oid()):
242 242 skipflags &= ~revlog.REVIDX_EXTSTORED
243 243 elif skipflags & revlog.REVIDX_EXTSTORED:
244 244 # The wrapped method will set `skipread`, but there's enough local
245 245 # info to check renames.
246 246 state[b'safe_renamed'].add(node)
247 247
248 248 orig(rl, skipflags, state, node)
249 249
250 250
251 251 @eh.wrapfunction(context.basefilectx, b'cmp')
252 252 def filectxcmp(orig, self, fctx):
253 253 """returns True if text is different than fctx"""
254 254 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
255 255 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
256 256 # fast path: check LFS oid
257 257 p1 = pointer.deserialize(self.rawdata())
258 258 p2 = pointer.deserialize(fctx.rawdata())
259 259 return p1.oid() != p2.oid()
260 260 return orig(self, fctx)
261 261
262 262
263 263 @eh.wrapfunction(context.basefilectx, b'isbinary')
264 264 def filectxisbinary(orig, self):
265 265 if self.islfs():
266 266 # fast path: use lfs metadata to answer isbinary
267 267 metadata = pointer.deserialize(self.rawdata())
268 268 # if lfs metadata says nothing, assume it's binary by default
269 269 return bool(int(metadata.get(b'x-is-binary', 1)))
270 270 return orig(self)
271 271
272 272
273 273 def filectxislfs(self):
274 274 return _islfs(self.filelog()._revlog, self.filenode())
275 275
276 276
277 277 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
278 278 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
279 279 orig(fm, ctx, matcher, path, decode)
280 280 fm.data(rawdata=ctx[path].rawdata())
281 281
282 282
283 283 @eh.wrapfunction(scmutil, b'wrapconvertsink')
284 284 def convertsink(orig, sink):
285 285 sink = orig(sink)
286 286 if sink.repotype == b'hg':
287 287
288 288 class lfssink(sink.__class__):
289 289 def putcommit(
290 290 self,
291 291 files,
292 292 copies,
293 293 parents,
294 294 commit,
295 295 source,
296 296 revmap,
297 297 full,
298 298 cleanp2,
299 299 ):
300 300 pc = super(lfssink, self).putcommit
301 301 node = pc(
302 302 files,
303 303 copies,
304 304 parents,
305 305 commit,
306 306 source,
307 307 revmap,
308 308 full,
309 309 cleanp2,
310 310 )
311 311
312 312 if b'lfs' not in self.repo.requirements:
313 313 ctx = self.repo[node]
314 314
315 315 # The file list may contain removed files, so check for
316 316 # membership before assuming it is in the context.
317 317 if any(f in ctx and ctx[f].islfs() for f, n in files):
318 318 self.repo.requirements.add(b'lfs')
319 319 scmutil.writereporequirements(self.repo)
320 320
321 321 return node
322 322
323 323 sink.__class__ = lfssink
324 324
325 325 return sink
326 326
327 327
328 328 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
329 329 # options and blob stores are passed from othervfs to the new readonlyvfs.
330 330 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
331 331 def vfsinit(orig, self, othervfs):
332 332 orig(self, othervfs)
333 333 # copy lfs related options
334 334 for k, v in othervfs.options.items():
335 335 if k.startswith(b'lfs'):
336 336 self.options[k] = v
337 337 # also copy lfs blobstores. note: this can run before reposetup, so lfs
338 338 # blobstore attributes are not always ready at this time.
339 339 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
340 340 if util.safehasattr(othervfs, name):
341 341 setattr(self, name, getattr(othervfs, name))
342 342
343 343
344 344 def _prefetchfiles(repo, revmatches):
345 345 """Ensure that required LFS blobs are present, fetching them as a group if
346 346 needed."""
347 347 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
348 348 return
349 349
350 350 pointers = []
351 351 oids = set()
352 352 localstore = repo.svfs.lfslocalblobstore
353 353
354 354 for rev, match in revmatches:
355 355 ctx = repo[rev]
356 356 for f in ctx.walk(match):
357 357 p = pointerfromctx(ctx, f)
358 358 if p and p.oid() not in oids and not localstore.has(p.oid()):
359 359 p.filename = f
360 360 pointers.append(p)
361 361 oids.add(p.oid())
362 362
363 363 if pointers:
364 364 # Recalculating the repo store here allows 'paths.default' that is set
365 365 # on the repo by a clone command to be used for the update.
366 366 blobstore.remote(repo).readbatch(pointers, localstore)
367 367
368 368
369 369 def _canskipupload(repo):
370 370 # Skip if this hasn't been passed to reposetup()
371 371 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
372 372 return True
373 373
374 374 # if remotestore is a null store, upload is a no-op and can be skipped
375 375 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
376 376
377 377
378 378 def candownload(repo):
379 379 # Skip if this hasn't been passed to reposetup()
380 380 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
381 381 return False
382 382
383 383 # if remotestore is a null store, downloads will lead to nothing
384 384 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
385 385
386 386
387 387 def uploadblobsfromrevs(repo, revs):
388 388 """upload lfs blobs introduced by revs
389 389
390 390 Note: also used by other extensions e. g. infinitepush. avoid renaming.
391 391 """
392 392 if _canskipupload(repo):
393 393 return
394 394 pointers = extractpointers(repo, revs)
395 395 uploadblobs(repo, pointers)
396 396
397 397
398 398 def prepush(pushop):
399 399 """Prepush hook.
400 400
401 401 Read through the revisions to push, looking for filelog entries that can be
402 402 deserialized into metadata so that we can block the push on their upload to
403 403 the remote blobstore.
404 404 """
405 405 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
406 406
407 407
408 408 @eh.wrapfunction(exchange, b'push')
409 409 def push(orig, repo, remote, *args, **kwargs):
410 410 """bail on push if the extension isn't enabled on remote when needed, and
411 411 update the remote store based on the destination path."""
412 412 if b'lfs' in repo.requirements:
413 413 # If the remote peer is for a local repo, the requirement tests in the
414 414 # base class method enforce lfs support. Otherwise, some revisions in
415 415 # this repo use lfs, and the remote repo needs the extension loaded.
416 416 if not remote.local() and not remote.capable(b'lfs'):
417 417 # This is a copy of the message in exchange.push() when requirements
418 418 # are missing between local repos.
419 419 m = _(b"required features are not supported in the destination: %s")
420 420 raise error.Abort(
421 421 m % b'lfs', hint=_(b'enable the lfs extension on the server')
422 422 )
423 423
424 424 # Repositories where this extension is disabled won't have the field.
425 425 # But if there's a requirement, then the extension must be loaded AND
426 426 # there may be blobs to push.
427 427 remotestore = repo.svfs.lfsremoteblobstore
428 428 try:
429 429 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
430 430 return orig(repo, remote, *args, **kwargs)
431 431 finally:
432 432 repo.svfs.lfsremoteblobstore = remotestore
433 433 else:
434 434 return orig(repo, remote, *args, **kwargs)
435 435
436 436
437 437 # when writing a bundle via "hg bundle" command, upload related LFS blobs
438 438 @eh.wrapfunction(bundle2, b'writenewbundle')
439 439 def writenewbundle(
440 440 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
441 441 ):
442 442 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
443 443 uploadblobsfromrevs(repo, outgoing.missing)
444 444 return orig(
445 445 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
446 446 )
447 447
448 448
449 449 def extractpointers(repo, revs):
450 450 """return a list of lfs pointers added by given revs"""
451 451 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
452 452 pointers = {}
453 453
454 454 makeprogress = repo.ui.makeprogress
455 455 with makeprogress(
456 456 _(b'lfs search'), _(b'changesets'), len(revs)
457 457 ) as progress:
458 458 for r in revs:
459 459 ctx = repo[r]
460 460 for p in pointersfromctx(ctx).values():
461 461 pointers[p.oid()] = p
462 462 progress.increment()
463 463 return sorted(pointers.values(), key=lambda p: p.oid())
464 464
465 465
466 466 def pointerfromctx(ctx, f, removed=False):
467 467 """return a pointer for the named file from the given changectx, or None if
468 468 the file isn't LFS.
469 469
470 470 Optionally, the pointer for a file deleted from the context can be returned.
471 471 Since no such pointer is actually stored, and to distinguish from a non LFS
472 472 file, this pointer is represented by an empty dict.
473 473 """
474 474 _ctx = ctx
475 475 if f not in ctx:
476 476 if not removed:
477 477 return None
478 478 if f in ctx.p1():
479 479 _ctx = ctx.p1()
480 480 elif f in ctx.p2():
481 481 _ctx = ctx.p2()
482 482 else:
483 483 return None
484 484 fctx = _ctx[f]
485 485 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
486 486 return None
487 487 try:
488 488 p = pointer.deserialize(fctx.rawdata())
489 489 if ctx == _ctx:
490 490 return p
491 491 return {}
492 492 except pointer.InvalidPointer as ex:
493 493 raise error.Abort(
494 494 _(b'lfs: corrupted pointer (%s@%s): %s\n')
495 495 % (f, short(_ctx.node()), ex)
496 496 )
497 497
498 498
499 499 def pointersfromctx(ctx, removed=False):
500 500 """return a dict {path: pointer} for given single changectx.
501 501
502 502 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
503 503 stored for the path is an empty dict.
504 504 """
505 505 result = {}
506 506 m = ctx.repo().narrowmatch()
507 507
508 508 # TODO: consider manifest.fastread() instead
509 509 for f in ctx.files():
510 510 if not m(f):
511 511 continue
512 512 p = pointerfromctx(ctx, f, removed=removed)
513 513 if p is not None:
514 514 result[f] = p
515 515 return result
516 516
517 517
518 518 def uploadblobs(repo, pointers):
519 519 """upload given pointers from local blobstore"""
520 520 if not pointers:
521 521 return
522 522
523 523 remoteblob = repo.svfs.lfsremoteblobstore
524 524 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
525 525
526 526
527 @eh.wrapfunction(upgrade_engine, b'_finishdatamigration')
527 @eh.wrapfunction(upgrade_engine, b'finishdatamigration')
528 528 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
529 529 orig(ui, srcrepo, dstrepo, requirements)
530 530
531 531 # Skip if this hasn't been passed to reposetup()
532 532 if util.safehasattr(
533 533 srcrepo.svfs, b'lfslocalblobstore'
534 534 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
535 535 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
536 536 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
537 537
538 538 for dirpath, dirs, files in srclfsvfs.walk():
539 539 for oid in files:
540 540 ui.write(_(b'copying lfs blob %s\n') % oid)
541 541 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
542 542
543 543
544 544 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
545 545 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
546 546 def upgraderequirements(orig, repo):
547 547 reqs = orig(repo)
548 548 if b'lfs' in repo.requirements:
549 549 reqs.add(b'lfs')
550 550 return reqs
@@ -1,520 +1,520 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27
28 28
29 29 def _revlogfrompath(repo, path):
30 30 """Obtain a revlog from a repo path.
31 31
32 32 An instance of the appropriate class is returned.
33 33 """
34 34 if path == b'00changelog.i':
35 35 return changelog.changelog(repo.svfs)
36 36 elif path.endswith(b'00manifest.i'):
37 37 mandir = path[: -len(b'00manifest.i')]
38 38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 39 else:
40 40 # reverse of "/".join(("data", path + ".i"))
41 41 return filelog.filelog(repo.svfs, path[5:-2])
42 42
43 43
44 44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 45 """copy all relevant files for `oldrl` into `destrepo` store
46 46
47 47 Files are copied "as is" without any transformation. The copy is performed
48 48 without extra checks. Callers are responsible for making sure the copied
49 49 content is compatible with format of the destination repository.
50 50 """
51 51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 52 newrl = _revlogfrompath(destrepo, unencodedname)
53 53 newrl = getattr(newrl, '_revlog', newrl)
54 54
55 55 oldvfs = oldrl.opener
56 56 newvfs = newrl.opener
57 57 oldindex = oldvfs.join(oldrl.indexfile)
58 58 newindex = newvfs.join(newrl.indexfile)
59 59 olddata = oldvfs.join(oldrl.datafile)
60 60 newdata = newvfs.join(newrl.datafile)
61 61
62 62 with newvfs(newrl.indexfile, b'w'):
63 63 pass # create all the directories
64 64
65 65 util.copyfile(oldindex, newindex)
66 66 copydata = oldrl.opener.exists(oldrl.datafile)
67 67 if copydata:
68 68 util.copyfile(olddata, newdata)
69 69
70 70 if not (
71 71 unencodedname.endswith(b'00changelog.i')
72 72 or unencodedname.endswith(b'00manifest.i')
73 73 ):
74 74 destrepo.svfs.fncache.add(unencodedname)
75 75 if copydata:
76 76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 77
78 78
79 79 UPGRADE_CHANGELOG = b"changelog"
80 80 UPGRADE_MANIFEST = b"manifest"
81 81 UPGRADE_FILELOGS = b"all-filelogs"
82 82
83 83 UPGRADE_ALL_REVLOGS = frozenset(
84 84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 85 )
86 86
87 87
88 88 def getsidedatacompanion(srcrepo, dstrepo):
89 89 sidedatacompanion = None
90 90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 93
94 94 def sidedatacompanion(rl, rev):
95 95 rl = getattr(rl, '_revlog', rl)
96 96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 97 return True, (), {}, 0, 0
98 98 return False, (), {}, 0, 0
99 99
100 100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 104 return sidedatacompanion
105 105
106 106
107 107 def matchrevlog(revlogfilter, entry):
108 108 """check if a revlog is selected for cloning.
109 109
110 110 In other words, are there any updates which need to be done on revlog
111 111 or it can be blindly copied.
112 112
113 113 The store entry is checked against the passed filter"""
114 114 if entry.endswith(b'00changelog.i'):
115 115 return UPGRADE_CHANGELOG in revlogfilter
116 116 elif entry.endswith(b'00manifest.i'):
117 117 return UPGRADE_MANIFEST in revlogfilter
118 118 return UPGRADE_FILELOGS in revlogfilter
119 119
120 120
121 121 def _perform_clone(
122 122 ui,
123 123 dstrepo,
124 124 tr,
125 125 old_revlog,
126 126 unencoded,
127 127 upgrade_op,
128 128 sidedatacompanion,
129 129 oncopiedrevision,
130 130 ):
131 131 """ returns the new revlog object created"""
132 132 newrl = None
133 133 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 134 ui.note(
135 135 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 136 )
137 137 newrl = _revlogfrompath(dstrepo, unencoded)
138 138 old_revlog.clone(
139 139 tr,
140 140 newrl,
141 141 addrevisioncb=oncopiedrevision,
142 142 deltareuse=upgrade_op.delta_reuse_mode,
143 143 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
144 144 sidedatacompanion=sidedatacompanion,
145 145 )
146 146 else:
147 147 msg = _(b'blindly copying %s containing %i revisions\n')
148 148 ui.note(msg % (unencoded, len(old_revlog)))
149 149 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
150 150
151 151 newrl = _revlogfrompath(dstrepo, unencoded)
152 152 return newrl
153 153
154 154
155 155 def _clonerevlogs(
156 156 ui,
157 157 srcrepo,
158 158 dstrepo,
159 159 tr,
160 160 upgrade_op,
161 161 ):
162 162 """Copy revlogs between 2 repos."""
163 163 revcount = 0
164 164 srcsize = 0
165 165 srcrawsize = 0
166 166 dstsize = 0
167 167 fcount = 0
168 168 frevcount = 0
169 169 fsrcsize = 0
170 170 frawsize = 0
171 171 fdstsize = 0
172 172 mcount = 0
173 173 mrevcount = 0
174 174 msrcsize = 0
175 175 mrawsize = 0
176 176 mdstsize = 0
177 177 crevcount = 0
178 178 csrcsize = 0
179 179 crawsize = 0
180 180 cdstsize = 0
181 181
182 182 alldatafiles = list(srcrepo.store.walk())
183 183 # mapping of data files which needs to be cloned
184 184 # key is unencoded filename
185 185 # value is revlog_object_from_srcrepo
186 186 manifests = {}
187 187 changelogs = {}
188 188 filelogs = {}
189 189
190 190 # Perform a pass to collect metadata. This validates we can open all
191 191 # source files and allows a unified progress bar to be displayed.
192 192 for unencoded, encoded, size in alldatafiles:
193 193 if unencoded.endswith(b'.d'):
194 194 continue
195 195
196 196 rl = _revlogfrompath(srcrepo, unencoded)
197 197
198 198 info = rl.storageinfo(
199 199 exclusivefiles=True,
200 200 revisionscount=True,
201 201 trackedsize=True,
202 202 storedsize=True,
203 203 )
204 204
205 205 revcount += info[b'revisionscount'] or 0
206 206 datasize = info[b'storedsize'] or 0
207 207 rawsize = info[b'trackedsize'] or 0
208 208
209 209 srcsize += datasize
210 210 srcrawsize += rawsize
211 211
212 212 # This is for the separate progress bars.
213 213 if isinstance(rl, changelog.changelog):
214 214 changelogs[unencoded] = rl
215 215 crevcount += len(rl)
216 216 csrcsize += datasize
217 217 crawsize += rawsize
218 218 elif isinstance(rl, manifest.manifestrevlog):
219 219 manifests[unencoded] = rl
220 220 mcount += 1
221 221 mrevcount += len(rl)
222 222 msrcsize += datasize
223 223 mrawsize += rawsize
224 224 elif isinstance(rl, filelog.filelog):
225 225 filelogs[unencoded] = rl
226 226 fcount += 1
227 227 frevcount += len(rl)
228 228 fsrcsize += datasize
229 229 frawsize += rawsize
230 230 else:
231 231 error.ProgrammingError(b'unknown revlog type')
232 232
233 233 if not revcount:
234 234 return
235 235
236 236 ui.status(
237 237 _(
238 238 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
239 239 b'%d in changelog)\n'
240 240 )
241 241 % (revcount, frevcount, mrevcount, crevcount)
242 242 )
243 243 ui.status(
244 244 _(b'migrating %s in store; %s tracked data\n')
245 245 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
246 246 )
247 247
248 248 # Used to keep track of progress.
249 249 progress = None
250 250
251 251 def oncopiedrevision(rl, rev, node):
252 252 progress.increment()
253 253
254 254 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
255 255
256 256 # Migrating filelogs
257 257 ui.status(
258 258 _(
259 259 b'migrating %d filelogs containing %d revisions '
260 260 b'(%s in store; %s tracked data)\n'
261 261 )
262 262 % (
263 263 fcount,
264 264 frevcount,
265 265 util.bytecount(fsrcsize),
266 266 util.bytecount(frawsize),
267 267 )
268 268 )
269 269 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
270 270 for unencoded, oldrl in sorted(filelogs.items()):
271 271 newrl = _perform_clone(
272 272 ui,
273 273 dstrepo,
274 274 tr,
275 275 oldrl,
276 276 unencoded,
277 277 upgrade_op,
278 278 sidedatacompanion,
279 279 oncopiedrevision,
280 280 )
281 281 info = newrl.storageinfo(storedsize=True)
282 282 fdstsize += info[b'storedsize'] or 0
283 283 ui.status(
284 284 _(
285 285 b'finished migrating %d filelog revisions across %d '
286 286 b'filelogs; change in size: %s\n'
287 287 )
288 288 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
289 289 )
290 290
291 291 # Migrating manifests
292 292 ui.status(
293 293 _(
294 294 b'migrating %d manifests containing %d revisions '
295 295 b'(%s in store; %s tracked data)\n'
296 296 )
297 297 % (
298 298 mcount,
299 299 mrevcount,
300 300 util.bytecount(msrcsize),
301 301 util.bytecount(mrawsize),
302 302 )
303 303 )
304 304 if progress:
305 305 progress.complete()
306 306 progress = srcrepo.ui.makeprogress(
307 307 _(b'manifest revisions'), total=mrevcount
308 308 )
309 309 for unencoded, oldrl in sorted(manifests.items()):
310 310 newrl = _perform_clone(
311 311 ui,
312 312 dstrepo,
313 313 tr,
314 314 oldrl,
315 315 unencoded,
316 316 upgrade_op,
317 317 sidedatacompanion,
318 318 oncopiedrevision,
319 319 )
320 320 info = newrl.storageinfo(storedsize=True)
321 321 mdstsize += info[b'storedsize'] or 0
322 322 ui.status(
323 323 _(
324 324 b'finished migrating %d manifest revisions across %d '
325 325 b'manifests; change in size: %s\n'
326 326 )
327 327 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
328 328 )
329 329
330 330 # Migrating changelog
331 331 ui.status(
332 332 _(
333 333 b'migrating changelog containing %d revisions '
334 334 b'(%s in store; %s tracked data)\n'
335 335 )
336 336 % (
337 337 crevcount,
338 338 util.bytecount(csrcsize),
339 339 util.bytecount(crawsize),
340 340 )
341 341 )
342 342 if progress:
343 343 progress.complete()
344 344 progress = srcrepo.ui.makeprogress(
345 345 _(b'changelog revisions'), total=crevcount
346 346 )
347 347 for unencoded, oldrl in sorted(changelogs.items()):
348 348 newrl = _perform_clone(
349 349 ui,
350 350 dstrepo,
351 351 tr,
352 352 oldrl,
353 353 unencoded,
354 354 upgrade_op,
355 355 sidedatacompanion,
356 356 oncopiedrevision,
357 357 )
358 358 info = newrl.storageinfo(storedsize=True)
359 359 cdstsize += info[b'storedsize'] or 0
360 360 progress.complete()
361 361 ui.status(
362 362 _(
363 363 b'finished migrating %d changelog revisions; change in size: '
364 364 b'%s\n'
365 365 )
366 366 % (crevcount, util.bytecount(cdstsize - csrcsize))
367 367 )
368 368
369 369 dstsize = fdstsize + mdstsize + cdstsize
370 370 ui.status(
371 371 _(
372 372 b'finished migrating %d total revisions; total change in store '
373 373 b'size: %s\n'
374 374 )
375 375 % (revcount, util.bytecount(dstsize - srcsize))
376 376 )
377 377
378 378
379 379 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
380 380 """Determine whether to copy a store file during upgrade.
381 381
382 382 This function is called when migrating store files from ``srcrepo`` to
383 383 ``dstrepo`` as part of upgrading a repository.
384 384
385 385 Args:
386 386 srcrepo: repo we are copying from
387 387 dstrepo: repo we are copying to
388 388 requirements: set of requirements for ``dstrepo``
389 389 path: store file being examined
390 390 mode: the ``ST_MODE`` file type of ``path``
391 391 st: ``stat`` data structure for ``path``
392 392
393 393 Function should return ``True`` if the file is to be copied.
394 394 """
395 395 # Skip revlogs.
396 396 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
397 397 return False
398 398 # Skip transaction related files.
399 399 if path.startswith(b'undo'):
400 400 return False
401 401 # Only copy regular files.
402 402 if mode != stat.S_IFREG:
403 403 return False
404 404 # Skip other skipped files.
405 405 if path in (b'lock', b'fncache'):
406 406 return False
407 407
408 408 return True
409 409
410 410
411 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
411 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
412 412 """Hook point for extensions to perform additional actions during upgrade.
413 413
414 414 This function is called after revlogs and store files have been copied but
415 415 before the new store is swapped into the original location.
416 416 """
417 417
418 418
419 419 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
420 420 """Do the low-level work of upgrading a repository.
421 421
422 422 The upgrade is effectively performed as a copy between a source
423 423 repository and a temporary destination repository.
424 424
425 425 The source repository is unmodified for as long as possible so the
426 426 upgrade can abort at any time without causing loss of service for
427 427 readers and without corrupting the source repository.
428 428 """
429 429 assert srcrepo.currentwlock()
430 430 assert dstrepo.currentwlock()
431 431
432 432 ui.status(
433 433 _(
434 434 b'(it is safe to interrupt this process any time before '
435 435 b'data migration completes)\n'
436 436 )
437 437 )
438 438
439 439 with dstrepo.transaction(b'upgrade') as tr:
440 440 _clonerevlogs(
441 441 ui,
442 442 srcrepo,
443 443 dstrepo,
444 444 tr,
445 445 upgrade_op,
446 446 )
447 447
448 448 # Now copy other files in the store directory.
449 449 # The sorted() makes execution deterministic.
450 450 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
451 451 if not _filterstorefile(
452 452 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
453 453 ):
454 454 continue
455 455
456 456 srcrepo.ui.status(_(b'copying %s\n') % p)
457 457 src = srcrepo.store.rawvfs.join(p)
458 458 dst = dstrepo.store.rawvfs.join(p)
459 459 util.copyfile(src, dst, copystat=True)
460 460
461 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
461 finishdatamigration(ui, srcrepo, dstrepo, requirements)
462 462
463 463 ui.status(_(b'data fully migrated to temporary repository\n'))
464 464
465 465 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
466 466 backupvfs = vfsmod.vfs(backuppath)
467 467
468 468 # Make a backup of requires file first, as it is the first to be modified.
469 469 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
470 470
471 471 # We install an arbitrary requirement that clients must not support
472 472 # as a mechanism to lock out new clients during the data swap. This is
473 473 # better than allowing a client to continue while the repository is in
474 474 # an inconsistent state.
475 475 ui.status(
476 476 _(
477 477 b'marking source repository as being upgraded; clients will be '
478 478 b'unable to read from repository\n'
479 479 )
480 480 )
481 481 scmutil.writereporequirements(
482 482 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
483 483 )
484 484
485 485 ui.status(_(b'starting in-place swap of repository data\n'))
486 486 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
487 487
488 488 # Now swap in the new store directory. Doing it as a rename should make
489 489 # the operation nearly instantaneous and atomic (at least in well-behaved
490 490 # environments).
491 491 ui.status(_(b'replacing store...\n'))
492 492 tstart = util.timer()
493 493 util.rename(srcrepo.spath, backupvfs.join(b'store'))
494 494 util.rename(dstrepo.spath, srcrepo.spath)
495 495 elapsed = util.timer() - tstart
496 496 ui.status(
497 497 _(
498 498 b'store replacement complete; repository was inconsistent for '
499 499 b'%0.1fs\n'
500 500 )
501 501 % elapsed
502 502 )
503 503
504 504 # We first write the requirements file. Any new requirements will lock
505 505 # out legacy clients.
506 506 ui.status(
507 507 _(
508 508 b'finalizing requirements file and making repository readable '
509 509 b'again\n'
510 510 )
511 511 )
512 512 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
513 513
514 514 # The lock file from the old store won't be removed because nothing has a
515 515 # reference to its new location. So clean it up manually. Alternatively, we
516 516 # could update srcrepo.svfs and other variables to point to the new
517 517 # location. This is simpler.
518 518 backupvfs.unlink(b'store/lock')
519 519
520 520 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now