##// END OF EJS Templates
changelog: also monitor `00changelog.n` when applicable (issue6554)...
marmoute -
r48853:c094e829 stable
parent child Browse files
Show More
@@ -1,3851 +1,3866 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 class changelogcache(storecache):
148 """filecache for the changelog"""
149
150 def __init__(self):
151 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154
155 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
160
161
147 162 class mixedrepostorecache(_basefilecache):
148 163 """filecache for a mix files in .hg/store and outside"""
149 164
150 165 def __init__(self, *pathsandlocations):
151 166 # scmutil.filecache only uses the path for passing back into our
152 167 # join(), so we can safely pass a list of paths and locations
153 168 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 169 _cachedfiles.update(pathsandlocations)
155 170
156 171 def join(self, obj, fnameandlocation):
157 172 fname, location = fnameandlocation
158 173 if location == b'plain':
159 174 return obj.vfs.join(fname)
160 175 else:
161 176 if location != b'':
162 177 raise error.ProgrammingError(
163 178 b'unexpected location: %s' % location
164 179 )
165 180 return obj.sjoin(fname)
166 181
167 182
168 183 def isfilecached(repo, name):
169 184 """check if a repo has already cached "name" filecache-ed property
170 185
171 186 This returns (cachedobj-or-None, iscached) tuple.
172 187 """
173 188 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 189 if not cacheentry:
175 190 return None, False
176 191 return cacheentry.obj, True
177 192
178 193
179 194 class unfilteredpropertycache(util.propertycache):
180 195 """propertycache that apply to unfiltered repo only"""
181 196
182 197 def __get__(self, repo, type=None):
183 198 unfi = repo.unfiltered()
184 199 if unfi is repo:
185 200 return super(unfilteredpropertycache, self).__get__(unfi)
186 201 return getattr(unfi, self.name)
187 202
188 203
189 204 class filteredpropertycache(util.propertycache):
190 205 """propertycache that must take filtering in account"""
191 206
192 207 def cachevalue(self, obj, value):
193 208 object.__setattr__(obj, self.name, value)
194 209
195 210
196 211 def hasunfilteredcache(repo, name):
197 212 """check if a repo has an unfilteredpropertycache value for <name>"""
198 213 return name in vars(repo.unfiltered())
199 214
200 215
201 216 def unfilteredmethod(orig):
202 217 """decorate method that always need to be run on unfiltered version"""
203 218
204 219 @functools.wraps(orig)
205 220 def wrapper(repo, *args, **kwargs):
206 221 return orig(repo.unfiltered(), *args, **kwargs)
207 222
208 223 return wrapper
209 224
210 225
211 226 moderncaps = {
212 227 b'lookup',
213 228 b'branchmap',
214 229 b'pushkey',
215 230 b'known',
216 231 b'getbundle',
217 232 b'unbundle',
218 233 }
219 234 legacycaps = moderncaps.union({b'changegroupsubset'})
220 235
221 236
222 237 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 238 class localcommandexecutor(object):
224 239 def __init__(self, peer):
225 240 self._peer = peer
226 241 self._sent = False
227 242 self._closed = False
228 243
229 244 def __enter__(self):
230 245 return self
231 246
232 247 def __exit__(self, exctype, excvalue, exctb):
233 248 self.close()
234 249
235 250 def callcommand(self, command, args):
236 251 if self._sent:
237 252 raise error.ProgrammingError(
238 253 b'callcommand() cannot be used after sendcommands()'
239 254 )
240 255
241 256 if self._closed:
242 257 raise error.ProgrammingError(
243 258 b'callcommand() cannot be used after close()'
244 259 )
245 260
246 261 # We don't need to support anything fancy. Just call the named
247 262 # method on the peer and return a resolved future.
248 263 fn = getattr(self._peer, pycompat.sysstr(command))
249 264
250 265 f = pycompat.futures.Future()
251 266
252 267 try:
253 268 result = fn(**pycompat.strkwargs(args))
254 269 except Exception:
255 270 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 271 else:
257 272 f.set_result(result)
258 273
259 274 return f
260 275
261 276 def sendcommands(self):
262 277 self._sent = True
263 278
264 279 def close(self):
265 280 self._closed = True
266 281
267 282
268 283 @interfaceutil.implementer(repository.ipeercommands)
269 284 class localpeer(repository.peer):
270 285 '''peer for a local repo; reflects only the most recent API'''
271 286
272 287 def __init__(self, repo, caps=None):
273 288 super(localpeer, self).__init__()
274 289
275 290 if caps is None:
276 291 caps = moderncaps.copy()
277 292 self._repo = repo.filtered(b'served')
278 293 self.ui = repo.ui
279 294
280 295 if repo._wanted_sidedata:
281 296 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 297 caps.add(b'exp-wanted-sidedata=' + formatted)
283 298
284 299 self._caps = repo._restrictcapabilities(caps)
285 300
286 301 # Begin of _basepeer interface.
287 302
288 303 def url(self):
289 304 return self._repo.url()
290 305
291 306 def local(self):
292 307 return self._repo
293 308
294 309 def peer(self):
295 310 return self
296 311
297 312 def canpush(self):
298 313 return True
299 314
300 315 def close(self):
301 316 self._repo.close()
302 317
303 318 # End of _basepeer interface.
304 319
305 320 # Begin of _basewirecommands interface.
306 321
307 322 def branchmap(self):
308 323 return self._repo.branchmap()
309 324
310 325 def capabilities(self):
311 326 return self._caps
312 327
313 328 def clonebundles(self):
314 329 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315 330
316 331 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 332 """Used to test argument passing over the wire"""
318 333 return b"%s %s %s %s %s" % (
319 334 one,
320 335 two,
321 336 pycompat.bytestr(three),
322 337 pycompat.bytestr(four),
323 338 pycompat.bytestr(five),
324 339 )
325 340
326 341 def getbundle(
327 342 self,
328 343 source,
329 344 heads=None,
330 345 common=None,
331 346 bundlecaps=None,
332 347 remote_sidedata=None,
333 348 **kwargs
334 349 ):
335 350 chunks = exchange.getbundlechunks(
336 351 self._repo,
337 352 source,
338 353 heads=heads,
339 354 common=common,
340 355 bundlecaps=bundlecaps,
341 356 remote_sidedata=remote_sidedata,
342 357 **kwargs
343 358 )[1]
344 359 cb = util.chunkbuffer(chunks)
345 360
346 361 if exchange.bundle2requested(bundlecaps):
347 362 # When requesting a bundle2, getbundle returns a stream to make the
348 363 # wire level function happier. We need to build a proper object
349 364 # from it in local peer.
350 365 return bundle2.getunbundler(self.ui, cb)
351 366 else:
352 367 return changegroup.getunbundler(b'01', cb, None)
353 368
354 369 def heads(self):
355 370 return self._repo.heads()
356 371
357 372 def known(self, nodes):
358 373 return self._repo.known(nodes)
359 374
360 375 def listkeys(self, namespace):
361 376 return self._repo.listkeys(namespace)
362 377
363 378 def lookup(self, key):
364 379 return self._repo.lookup(key)
365 380
366 381 def pushkey(self, namespace, key, old, new):
367 382 return self._repo.pushkey(namespace, key, old, new)
368 383
369 384 def stream_out(self):
370 385 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371 386
372 387 def unbundle(self, bundle, heads, url):
373 388 """apply a bundle on a repo
374 389
375 390 This function handles the repo locking itself."""
376 391 try:
377 392 try:
378 393 bundle = exchange.readbundle(self.ui, bundle, None)
379 394 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 395 if util.safehasattr(ret, b'getchunks'):
381 396 # This is a bundle20 object, turn it into an unbundler.
382 397 # This little dance should be dropped eventually when the
383 398 # API is finally improved.
384 399 stream = util.chunkbuffer(ret.getchunks())
385 400 ret = bundle2.getunbundler(self.ui, stream)
386 401 return ret
387 402 except Exception as exc:
388 403 # If the exception contains output salvaged from a bundle2
389 404 # reply, we need to make sure it is printed before continuing
390 405 # to fail. So we build a bundle2 with such output and consume
391 406 # it directly.
392 407 #
393 408 # This is not very elegant but allows a "simple" solution for
394 409 # issue4594
395 410 output = getattr(exc, '_bundle2salvagedoutput', ())
396 411 if output:
397 412 bundler = bundle2.bundle20(self._repo.ui)
398 413 for out in output:
399 414 bundler.addpart(out)
400 415 stream = util.chunkbuffer(bundler.getchunks())
401 416 b = bundle2.getunbundler(self.ui, stream)
402 417 bundle2.processbundle(self._repo, b)
403 418 raise
404 419 except error.PushRaced as exc:
405 420 raise error.ResponseError(
406 421 _(b'push failed:'), stringutil.forcebytestr(exc)
407 422 )
408 423
409 424 # End of _basewirecommands interface.
410 425
411 426 # Begin of peer interface.
412 427
413 428 def commandexecutor(self):
414 429 return localcommandexecutor(self)
415 430
416 431 # End of peer interface.
417 432
418 433
419 434 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 435 class locallegacypeer(localpeer):
421 436 """peer extension which implements legacy methods too; used for tests with
422 437 restricted capabilities"""
423 438
424 439 def __init__(self, repo):
425 440 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426 441
427 442 # Begin of baselegacywirecommands interface.
428 443
429 444 def between(self, pairs):
430 445 return self._repo.between(pairs)
431 446
432 447 def branches(self, nodes):
433 448 return self._repo.branches(nodes)
434 449
435 450 def changegroup(self, nodes, source):
436 451 outgoing = discovery.outgoing(
437 452 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 453 )
439 454 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440 455
441 456 def changegroupsubset(self, bases, heads, source):
442 457 outgoing = discovery.outgoing(
443 458 self._repo, missingroots=bases, ancestorsof=heads
444 459 )
445 460 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446 461
447 462 # End of baselegacywirecommands interface.
448 463
449 464
450 465 # Functions receiving (ui, features) that extensions can register to impact
451 466 # the ability to load repositories with custom requirements. Only
452 467 # functions defined in loaded extensions are called.
453 468 #
454 469 # The function receives a set of requirement strings that the repository
455 470 # is capable of opening. Functions will typically add elements to the
456 471 # set to reflect that the extension knows how to handle that requirements.
457 472 featuresetupfuncs = set()
458 473
459 474
460 475 def _getsharedvfs(hgvfs, requirements):
461 476 """returns the vfs object pointing to root of shared source
462 477 repo for a shared repository
463 478
464 479 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 480 requirements is a set of requirements of current repo (shared one)
466 481 """
467 482 # The ``shared`` or ``relshared`` requirements indicate the
468 483 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 484 # This is an absolute path for ``shared`` and relative to
470 485 # ``.hg/`` for ``relshared``.
471 486 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 487 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 488 sharedpath = util.normpath(hgvfs.join(sharedpath))
474 489
475 490 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476 491
477 492 if not sharedvfs.exists():
478 493 raise error.RepoError(
479 494 _(b'.hg/sharedpath points to nonexistent directory %s')
480 495 % sharedvfs.base
481 496 )
482 497 return sharedvfs
483 498
484 499
485 500 def _readrequires(vfs, allowmissing):
486 501 """reads the require file present at root of this vfs
487 502 and return a set of requirements
488 503
489 504 If allowmissing is True, we suppress ENOENT if raised"""
490 505 # requires file contains a newline-delimited list of
491 506 # features/capabilities the opener (us) must have in order to use
492 507 # the repository. This file was introduced in Mercurial 0.9.2,
493 508 # which means very old repositories may not have one. We assume
494 509 # a missing file translates to no requirements.
495 510 try:
496 511 requirements = set(vfs.read(b'requires').splitlines())
497 512 except IOError as e:
498 513 if not (allowmissing and e.errno == errno.ENOENT):
499 514 raise
500 515 requirements = set()
501 516 return requirements
502 517
503 518
504 519 def makelocalrepository(baseui, path, intents=None):
505 520 """Create a local repository object.
506 521
507 522 Given arguments needed to construct a local repository, this function
508 523 performs various early repository loading functionality (such as
509 524 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 525 the repository can be opened, derives a type suitable for representing
511 526 that repository, and returns an instance of it.
512 527
513 528 The returned object conforms to the ``repository.completelocalrepository``
514 529 interface.
515 530
516 531 The repository type is derived by calling a series of factory functions
517 532 for each aspect/interface of the final repository. These are defined by
518 533 ``REPO_INTERFACES``.
519 534
520 535 Each factory function is called to produce a type implementing a specific
521 536 interface. The cumulative list of returned types will be combined into a
522 537 new type and that type will be instantiated to represent the local
523 538 repository.
524 539
525 540 The factory functions each receive various state that may be consulted
526 541 as part of deriving a type.
527 542
528 543 Extensions should wrap these factory functions to customize repository type
529 544 creation. Note that an extension's wrapped function may be called even if
530 545 that extension is not loaded for the repo being constructed. Extensions
531 546 should check if their ``__name__`` appears in the
532 547 ``extensionmodulenames`` set passed to the factory function and no-op if
533 548 not.
534 549 """
535 550 ui = baseui.copy()
536 551 # Prevent copying repo configuration.
537 552 ui.copy = baseui.copy
538 553
539 554 # Working directory VFS rooted at repository root.
540 555 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541 556
542 557 # Main VFS for .hg/ directory.
543 558 hgpath = wdirvfs.join(b'.hg')
544 559 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 560 # Whether this repository is shared one or not
546 561 shared = False
547 562 # If this repository is shared, vfs pointing to shared repo
548 563 sharedvfs = None
549 564
550 565 # The .hg/ path should exist and should be a directory. All other
551 566 # cases are errors.
552 567 if not hgvfs.isdir():
553 568 try:
554 569 hgvfs.stat()
555 570 except OSError as e:
556 571 if e.errno != errno.ENOENT:
557 572 raise
558 573 except ValueError as e:
559 574 # Can be raised on Python 3.8 when path is invalid.
560 575 raise error.Abort(
561 576 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 577 )
563 578
564 579 raise error.RepoError(_(b'repository %s not found') % path)
565 580
566 581 requirements = _readrequires(hgvfs, True)
567 582 shared = (
568 583 requirementsmod.SHARED_REQUIREMENT in requirements
569 584 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 585 )
571 586 storevfs = None
572 587 if shared:
573 588 # This is a shared repo
574 589 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 590 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 591 else:
577 592 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578 593
579 594 # if .hg/requires contains the sharesafe requirement, it means
580 595 # there exists a `.hg/store/requires` too and we should read it
581 596 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 597 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 598 # is not present, refer checkrequirementscompat() for that
584 599 #
585 600 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 601 # repository was shared the old way. We check the share source .hg/requires
587 602 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 603 # to be reshared
589 604 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 605 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591 606
592 607 if (
593 608 shared
594 609 and requirementsmod.SHARESAFE_REQUIREMENT
595 610 not in _readrequires(sharedvfs, True)
596 611 ):
597 612 mismatch_warn = ui.configbool(
598 613 b'share', b'safe-mismatch.source-not-safe.warn'
599 614 )
600 615 mismatch_config = ui.config(
601 616 b'share', b'safe-mismatch.source-not-safe'
602 617 )
603 618 if mismatch_config in (
604 619 b'downgrade-allow',
605 620 b'allow',
606 621 b'downgrade-abort',
607 622 ):
608 623 # prevent cyclic import localrepo -> upgrade -> localrepo
609 624 from . import upgrade
610 625
611 626 upgrade.downgrade_share_to_non_safe(
612 627 ui,
613 628 hgvfs,
614 629 sharedvfs,
615 630 requirements,
616 631 mismatch_config,
617 632 mismatch_warn,
618 633 )
619 634 elif mismatch_config == b'abort':
620 635 raise error.Abort(
621 636 _(b"share source does not support share-safe requirement"),
622 637 hint=hint,
623 638 )
624 639 else:
625 640 raise error.Abort(
626 641 _(
627 642 b"share-safe mismatch with source.\nUnrecognized"
628 643 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 644 b" set."
630 645 )
631 646 % mismatch_config,
632 647 hint=hint,
633 648 )
634 649 else:
635 650 requirements |= _readrequires(storevfs, False)
636 651 elif shared:
637 652 sourcerequires = _readrequires(sharedvfs, False)
638 653 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 654 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 655 mismatch_warn = ui.configbool(
641 656 b'share', b'safe-mismatch.source-safe.warn'
642 657 )
643 658 if mismatch_config in (
644 659 b'upgrade-allow',
645 660 b'allow',
646 661 b'upgrade-abort',
647 662 ):
648 663 # prevent cyclic import localrepo -> upgrade -> localrepo
649 664 from . import upgrade
650 665
651 666 upgrade.upgrade_share_to_safe(
652 667 ui,
653 668 hgvfs,
654 669 storevfs,
655 670 requirements,
656 671 mismatch_config,
657 672 mismatch_warn,
658 673 )
659 674 elif mismatch_config == b'abort':
660 675 raise error.Abort(
661 676 _(
662 677 b'version mismatch: source uses share-safe'
663 678 b' functionality while the current share does not'
664 679 ),
665 680 hint=hint,
666 681 )
667 682 else:
668 683 raise error.Abort(
669 684 _(
670 685 b"share-safe mismatch with source.\nUnrecognized"
671 686 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 687 )
673 688 % mismatch_config,
674 689 hint=hint,
675 690 )
676 691
677 692 # The .hg/hgrc file may load extensions or contain config options
678 693 # that influence repository construction. Attempt to load it and
679 694 # process any new extensions that it may have pulled in.
680 695 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 696 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 697 extensions.loadall(ui)
683 698 extensions.populateui(ui)
684 699
685 700 # Set of module names of extensions loaded for this repository.
686 701 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687 702
688 703 supportedrequirements = gathersupportedrequirements(ui)
689 704
690 705 # We first validate the requirements are known.
691 706 ensurerequirementsrecognized(requirements, supportedrequirements)
692 707
693 708 # Then we validate that the known set is reasonable to use together.
694 709 ensurerequirementscompatible(ui, requirements)
695 710
696 711 # TODO there are unhandled edge cases related to opening repositories with
697 712 # shared storage. If storage is shared, we should also test for requirements
698 713 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 714 # that repo, as that repo may load extensions needed to open it. This is a
700 715 # bit complicated because we don't want the other hgrc to overwrite settings
701 716 # in this hgrc.
702 717 #
703 718 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 719 # file when sharing repos. But if a requirement is added after the share is
705 720 # performed, thereby introducing a new requirement for the opener, we may
706 721 # will not see that and could encounter a run-time error interacting with
707 722 # that shared store since it has an unknown-to-us requirement.
708 723
709 724 # At this point, we know we should be capable of opening the repository.
710 725 # Now get on with doing that.
711 726
712 727 features = set()
713 728
714 729 # The "store" part of the repository holds versioned data. How it is
715 730 # accessed is determined by various requirements. If `shared` or
716 731 # `relshared` requirements are present, this indicates current repository
717 732 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 733 if shared:
719 734 storebasepath = sharedvfs.base
720 735 cachepath = sharedvfs.join(b'cache')
721 736 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 737 else:
723 738 storebasepath = hgvfs.base
724 739 cachepath = hgvfs.join(b'cache')
725 740 wcachepath = hgvfs.join(b'wcache')
726 741
727 742 # The store has changed over time and the exact layout is dictated by
728 743 # requirements. The store interface abstracts differences across all
729 744 # of them.
730 745 store = makestore(
731 746 requirements,
732 747 storebasepath,
733 748 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 749 )
735 750 hgvfs.createmode = store.createmode
736 751
737 752 storevfs = store.vfs
738 753 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739 754
740 755 if (
741 756 requirementsmod.REVLOGV2_REQUIREMENT in requirements
742 757 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
743 758 ):
744 759 features.add(repository.REPO_FEATURE_SIDE_DATA)
745 760 # the revlogv2 docket introduced race condition that we need to fix
746 761 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
747 762
748 763 # The cache vfs is used to manage cache files.
749 764 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
750 765 cachevfs.createmode = store.createmode
751 766 # The cache vfs is used to manage cache files related to the working copy
752 767 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
753 768 wcachevfs.createmode = store.createmode
754 769
755 770 # Now resolve the type for the repository object. We do this by repeatedly
756 771 # calling a factory function to produces types for specific aspects of the
757 772 # repo's operation. The aggregate returned types are used as base classes
758 773 # for a dynamically-derived type, which will represent our new repository.
759 774
760 775 bases = []
761 776 extrastate = {}
762 777
763 778 for iface, fn in REPO_INTERFACES:
764 779 # We pass all potentially useful state to give extensions tons of
765 780 # flexibility.
766 781 typ = fn()(
767 782 ui=ui,
768 783 intents=intents,
769 784 requirements=requirements,
770 785 features=features,
771 786 wdirvfs=wdirvfs,
772 787 hgvfs=hgvfs,
773 788 store=store,
774 789 storevfs=storevfs,
775 790 storeoptions=storevfs.options,
776 791 cachevfs=cachevfs,
777 792 wcachevfs=wcachevfs,
778 793 extensionmodulenames=extensionmodulenames,
779 794 extrastate=extrastate,
780 795 baseclasses=bases,
781 796 )
782 797
783 798 if not isinstance(typ, type):
784 799 raise error.ProgrammingError(
785 800 b'unable to construct type for %s' % iface
786 801 )
787 802
788 803 bases.append(typ)
789 804
790 805 # type() allows you to use characters in type names that wouldn't be
791 806 # recognized as Python symbols in source code. We abuse that to add
792 807 # rich information about our constructed repo.
793 808 name = pycompat.sysstr(
794 809 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
795 810 )
796 811
797 812 cls = type(name, tuple(bases), {})
798 813
799 814 return cls(
800 815 baseui=baseui,
801 816 ui=ui,
802 817 origroot=path,
803 818 wdirvfs=wdirvfs,
804 819 hgvfs=hgvfs,
805 820 requirements=requirements,
806 821 supportedrequirements=supportedrequirements,
807 822 sharedpath=storebasepath,
808 823 store=store,
809 824 cachevfs=cachevfs,
810 825 wcachevfs=wcachevfs,
811 826 features=features,
812 827 intents=intents,
813 828 )
814 829
815 830
816 831 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
817 832 """Load hgrc files/content into a ui instance.
818 833
819 834 This is called during repository opening to load any additional
820 835 config files or settings relevant to the current repository.
821 836
822 837 Returns a bool indicating whether any additional configs were loaded.
823 838
824 839 Extensions should monkeypatch this function to modify how per-repo
825 840 configs are loaded. For example, an extension may wish to pull in
826 841 configs from alternate files or sources.
827 842
828 843 sharedvfs is vfs object pointing to source repo if the current one is a
829 844 shared one
830 845 """
831 846 if not rcutil.use_repo_hgrc():
832 847 return False
833 848
834 849 ret = False
835 850 # first load config from shared source if we has to
836 851 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
837 852 try:
838 853 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
839 854 ret = True
840 855 except IOError:
841 856 pass
842 857
843 858 try:
844 859 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
845 860 ret = True
846 861 except IOError:
847 862 pass
848 863
849 864 try:
850 865 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
851 866 ret = True
852 867 except IOError:
853 868 pass
854 869
855 870 return ret
856 871
857 872
858 873 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
859 874 """Perform additional actions after .hg/hgrc is loaded.
860 875
861 876 This function is called during repository loading immediately after
862 877 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
863 878
864 879 The function can be used to validate configs, automatically add
865 880 options (including extensions) based on requirements, etc.
866 881 """
867 882
868 883 # Map of requirements to list of extensions to load automatically when
869 884 # requirement is present.
870 885 autoextensions = {
871 886 b'git': [b'git'],
872 887 b'largefiles': [b'largefiles'],
873 888 b'lfs': [b'lfs'],
874 889 }
875 890
876 891 for requirement, names in sorted(autoextensions.items()):
877 892 if requirement not in requirements:
878 893 continue
879 894
880 895 for name in names:
881 896 if not ui.hasconfig(b'extensions', name):
882 897 ui.setconfig(b'extensions', name, b'', source=b'autoload')
883 898
884 899
885 900 def gathersupportedrequirements(ui):
886 901 """Determine the complete set of recognized requirements."""
887 902 # Start with all requirements supported by this file.
888 903 supported = set(localrepository._basesupported)
889 904
890 905 if dirstate.SUPPORTS_DIRSTATE_V2:
891 906 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
892 907
893 908 # Execute ``featuresetupfuncs`` entries if they belong to an extension
894 909 # relevant to this ui instance.
895 910 modules = {m.__name__ for n, m in extensions.extensions(ui)}
896 911
897 912 for fn in featuresetupfuncs:
898 913 if fn.__module__ in modules:
899 914 fn(ui, supported)
900 915
901 916 # Add derived requirements from registered compression engines.
902 917 for name in util.compengines:
903 918 engine = util.compengines[name]
904 919 if engine.available() and engine.revlogheader():
905 920 supported.add(b'exp-compression-%s' % name)
906 921 if engine.name() == b'zstd':
907 922 supported.add(b'revlog-compression-zstd')
908 923
909 924 return supported
910 925
911 926
912 927 def ensurerequirementsrecognized(requirements, supported):
913 928 """Validate that a set of local requirements is recognized.
914 929
915 930 Receives a set of requirements. Raises an ``error.RepoError`` if there
916 931 exists any requirement in that set that currently loaded code doesn't
917 932 recognize.
918 933
919 934 Returns a set of supported requirements.
920 935 """
921 936 missing = set()
922 937
923 938 for requirement in requirements:
924 939 if requirement in supported:
925 940 continue
926 941
927 942 if not requirement or not requirement[0:1].isalnum():
928 943 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
929 944
930 945 missing.add(requirement)
931 946
932 947 if missing:
933 948 raise error.RequirementError(
934 949 _(b'repository requires features unknown to this Mercurial: %s')
935 950 % b' '.join(sorted(missing)),
936 951 hint=_(
937 952 b'see https://mercurial-scm.org/wiki/MissingRequirement '
938 953 b'for more information'
939 954 ),
940 955 )
941 956
942 957
943 958 def ensurerequirementscompatible(ui, requirements):
944 959 """Validates that a set of recognized requirements is mutually compatible.
945 960
946 961 Some requirements may not be compatible with others or require
947 962 config options that aren't enabled. This function is called during
948 963 repository opening to ensure that the set of requirements needed
949 964 to open a repository is sane and compatible with config options.
950 965
951 966 Extensions can monkeypatch this function to perform additional
952 967 checking.
953 968
954 969 ``error.RepoError`` should be raised on failure.
955 970 """
956 971 if (
957 972 requirementsmod.SPARSE_REQUIREMENT in requirements
958 973 and not sparse.enabled
959 974 ):
960 975 raise error.RepoError(
961 976 _(
962 977 b'repository is using sparse feature but '
963 978 b'sparse is not enabled; enable the '
964 979 b'"sparse" extensions to access'
965 980 )
966 981 )
967 982
968 983
969 984 def makestore(requirements, path, vfstype):
970 985 """Construct a storage object for a repository."""
971 986 if requirementsmod.STORE_REQUIREMENT in requirements:
972 987 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
973 988 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
974 989 return storemod.fncachestore(path, vfstype, dotencode)
975 990
976 991 return storemod.encodedstore(path, vfstype)
977 992
978 993 return storemod.basicstore(path, vfstype)
979 994
980 995
981 996 def resolvestorevfsoptions(ui, requirements, features):
982 997 """Resolve the options to pass to the store vfs opener.
983 998
984 999 The returned dict is used to influence behavior of the storage layer.
985 1000 """
986 1001 options = {}
987 1002
988 1003 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
989 1004 options[b'treemanifest'] = True
990 1005
991 1006 # experimental config: format.manifestcachesize
992 1007 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
993 1008 if manifestcachesize is not None:
994 1009 options[b'manifestcachesize'] = manifestcachesize
995 1010
996 1011 # In the absence of another requirement superseding a revlog-related
997 1012 # requirement, we have to assume the repo is using revlog version 0.
998 1013 # This revlog format is super old and we don't bother trying to parse
999 1014 # opener options for it because those options wouldn't do anything
1000 1015 # meaningful on such old repos.
1001 1016 if (
1002 1017 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1003 1018 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1004 1019 ):
1005 1020 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1006 1021 else: # explicitly mark repo as using revlogv0
1007 1022 options[b'revlogv0'] = True
1008 1023
1009 1024 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1010 1025 options[b'copies-storage'] = b'changeset-sidedata'
1011 1026 else:
1012 1027 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1013 1028 copiesextramode = (b'changeset-only', b'compatibility')
1014 1029 if writecopiesto in copiesextramode:
1015 1030 options[b'copies-storage'] = b'extra'
1016 1031
1017 1032 return options
1018 1033
1019 1034
1020 1035 def resolverevlogstorevfsoptions(ui, requirements, features):
1021 1036 """Resolve opener options specific to revlogs."""
1022 1037
1023 1038 options = {}
1024 1039 options[b'flagprocessors'] = {}
1025 1040
1026 1041 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1027 1042 options[b'revlogv1'] = True
1028 1043 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1029 1044 options[b'revlogv2'] = True
1030 1045 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1031 1046 options[b'changelogv2'] = True
1032 1047
1033 1048 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1034 1049 options[b'generaldelta'] = True
1035 1050
1036 1051 # experimental config: format.chunkcachesize
1037 1052 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1038 1053 if chunkcachesize is not None:
1039 1054 options[b'chunkcachesize'] = chunkcachesize
1040 1055
1041 1056 deltabothparents = ui.configbool(
1042 1057 b'storage', b'revlog.optimize-delta-parent-choice'
1043 1058 )
1044 1059 options[b'deltabothparents'] = deltabothparents
1045 1060
1046 1061 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1047 1062 options[b'issue6528.fix-incoming'] = issue6528
1048 1063
1049 1064 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1050 1065 lazydeltabase = False
1051 1066 if lazydelta:
1052 1067 lazydeltabase = ui.configbool(
1053 1068 b'storage', b'revlog.reuse-external-delta-parent'
1054 1069 )
1055 1070 if lazydeltabase is None:
1056 1071 lazydeltabase = not scmutil.gddeltaconfig(ui)
1057 1072 options[b'lazydelta'] = lazydelta
1058 1073 options[b'lazydeltabase'] = lazydeltabase
1059 1074
1060 1075 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1061 1076 if 0 <= chainspan:
1062 1077 options[b'maxdeltachainspan'] = chainspan
1063 1078
1064 1079 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1065 1080 if mmapindexthreshold is not None:
1066 1081 options[b'mmapindexthreshold'] = mmapindexthreshold
1067 1082
1068 1083 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1069 1084 srdensitythres = float(
1070 1085 ui.config(b'experimental', b'sparse-read.density-threshold')
1071 1086 )
1072 1087 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1073 1088 options[b'with-sparse-read'] = withsparseread
1074 1089 options[b'sparse-read-density-threshold'] = srdensitythres
1075 1090 options[b'sparse-read-min-gap-size'] = srmingapsize
1076 1091
1077 1092 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1078 1093 options[b'sparse-revlog'] = sparserevlog
1079 1094 if sparserevlog:
1080 1095 options[b'generaldelta'] = True
1081 1096
1082 1097 maxchainlen = None
1083 1098 if sparserevlog:
1084 1099 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1085 1100 # experimental config: format.maxchainlen
1086 1101 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1087 1102 if maxchainlen is not None:
1088 1103 options[b'maxchainlen'] = maxchainlen
1089 1104
1090 1105 for r in requirements:
1091 1106 # we allow multiple compression engine requirement to co-exist because
1092 1107 # strickly speaking, revlog seems to support mixed compression style.
1093 1108 #
1094 1109 # The compression used for new entries will be "the last one"
1095 1110 prefix = r.startswith
1096 1111 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1097 1112 options[b'compengine'] = r.split(b'-', 2)[2]
1098 1113
1099 1114 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1100 1115 if options[b'zlib.level'] is not None:
1101 1116 if not (0 <= options[b'zlib.level'] <= 9):
1102 1117 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1103 1118 raise error.Abort(msg % options[b'zlib.level'])
1104 1119 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1105 1120 if options[b'zstd.level'] is not None:
1106 1121 if not (0 <= options[b'zstd.level'] <= 22):
1107 1122 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1108 1123 raise error.Abort(msg % options[b'zstd.level'])
1109 1124
1110 1125 if requirementsmod.NARROW_REQUIREMENT in requirements:
1111 1126 options[b'enableellipsis'] = True
1112 1127
1113 1128 if ui.configbool(b'experimental', b'rust.index'):
1114 1129 options[b'rust.index'] = True
1115 1130 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1116 1131 slow_path = ui.config(
1117 1132 b'storage', b'revlog.persistent-nodemap.slow-path'
1118 1133 )
1119 1134 if slow_path not in (b'allow', b'warn', b'abort'):
1120 1135 default = ui.config_default(
1121 1136 b'storage', b'revlog.persistent-nodemap.slow-path'
1122 1137 )
1123 1138 msg = _(
1124 1139 b'unknown value for config '
1125 1140 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1126 1141 )
1127 1142 ui.warn(msg % slow_path)
1128 1143 if not ui.quiet:
1129 1144 ui.warn(_(b'falling back to default value: %s\n') % default)
1130 1145 slow_path = default
1131 1146
1132 1147 msg = _(
1133 1148 b"accessing `persistent-nodemap` repository without associated "
1134 1149 b"fast implementation."
1135 1150 )
1136 1151 hint = _(
1137 1152 b"check `hg help config.format.use-persistent-nodemap` "
1138 1153 b"for details"
1139 1154 )
1140 1155 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1141 1156 if slow_path == b'warn':
1142 1157 msg = b"warning: " + msg + b'\n'
1143 1158 ui.warn(msg)
1144 1159 if not ui.quiet:
1145 1160 hint = b'(' + hint + b')\n'
1146 1161 ui.warn(hint)
1147 1162 if slow_path == b'abort':
1148 1163 raise error.Abort(msg, hint=hint)
1149 1164 options[b'persistent-nodemap'] = True
1150 1165 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1151 1166 options[b'persistent-nodemap.mmap'] = True
1152 1167 if ui.configbool(b'devel', b'persistent-nodemap'):
1153 1168 options[b'devel-force-nodemap'] = True
1154 1169
1155 1170 return options
1156 1171
1157 1172
1158 1173 def makemain(**kwargs):
1159 1174 """Produce a type conforming to ``ilocalrepositorymain``."""
1160 1175 return localrepository
1161 1176
1162 1177
1163 1178 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1164 1179 class revlogfilestorage(object):
1165 1180 """File storage when using revlogs."""
1166 1181
1167 1182 def file(self, path):
1168 1183 if path.startswith(b'/'):
1169 1184 path = path[1:]
1170 1185
1171 1186 return filelog.filelog(self.svfs, path)
1172 1187
1173 1188
1174 1189 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1175 1190 class revlognarrowfilestorage(object):
1176 1191 """File storage when using revlogs and narrow files."""
1177 1192
1178 1193 def file(self, path):
1179 1194 if path.startswith(b'/'):
1180 1195 path = path[1:]
1181 1196
1182 1197 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1183 1198
1184 1199
1185 1200 def makefilestorage(requirements, features, **kwargs):
1186 1201 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1187 1202 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1188 1203 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1189 1204
1190 1205 if requirementsmod.NARROW_REQUIREMENT in requirements:
1191 1206 return revlognarrowfilestorage
1192 1207 else:
1193 1208 return revlogfilestorage
1194 1209
1195 1210
1196 1211 # List of repository interfaces and factory functions for them. Each
1197 1212 # will be called in order during ``makelocalrepository()`` to iteratively
1198 1213 # derive the final type for a local repository instance. We capture the
1199 1214 # function as a lambda so we don't hold a reference and the module-level
1200 1215 # functions can be wrapped.
1201 1216 REPO_INTERFACES = [
1202 1217 (repository.ilocalrepositorymain, lambda: makemain),
1203 1218 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1204 1219 ]
1205 1220
1206 1221
1207 1222 @interfaceutil.implementer(repository.ilocalrepositorymain)
1208 1223 class localrepository(object):
1209 1224 """Main class for representing local repositories.
1210 1225
1211 1226 All local repositories are instances of this class.
1212 1227
1213 1228 Constructed on its own, instances of this class are not usable as
1214 1229 repository objects. To obtain a usable repository object, call
1215 1230 ``hg.repository()``, ``localrepo.instance()``, or
1216 1231 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1217 1232 ``instance()`` adds support for creating new repositories.
1218 1233 ``hg.repository()`` adds more extension integration, including calling
1219 1234 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1220 1235 used.
1221 1236 """
1222 1237
1223 1238 # obsolete experimental requirements:
1224 1239 # - manifestv2: An experimental new manifest format that allowed
1225 1240 # for stem compression of long paths. Experiment ended up not
1226 1241 # being successful (repository sizes went up due to worse delta
1227 1242 # chains), and the code was deleted in 4.6.
1228 1243 supportedformats = {
1229 1244 requirementsmod.REVLOGV1_REQUIREMENT,
1230 1245 requirementsmod.GENERALDELTA_REQUIREMENT,
1231 1246 requirementsmod.TREEMANIFEST_REQUIREMENT,
1232 1247 requirementsmod.COPIESSDC_REQUIREMENT,
1233 1248 requirementsmod.REVLOGV2_REQUIREMENT,
1234 1249 requirementsmod.CHANGELOGV2_REQUIREMENT,
1235 1250 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1236 1251 requirementsmod.NODEMAP_REQUIREMENT,
1237 1252 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1238 1253 requirementsmod.SHARESAFE_REQUIREMENT,
1239 1254 }
1240 1255 _basesupported = supportedformats | {
1241 1256 requirementsmod.STORE_REQUIREMENT,
1242 1257 requirementsmod.FNCACHE_REQUIREMENT,
1243 1258 requirementsmod.SHARED_REQUIREMENT,
1244 1259 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1245 1260 requirementsmod.DOTENCODE_REQUIREMENT,
1246 1261 requirementsmod.SPARSE_REQUIREMENT,
1247 1262 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1248 1263 }
1249 1264
1250 1265 # list of prefix for file which can be written without 'wlock'
1251 1266 # Extensions should extend this list when needed
1252 1267 _wlockfreeprefix = {
1253 1268 # We migh consider requiring 'wlock' for the next
1254 1269 # two, but pretty much all the existing code assume
1255 1270 # wlock is not needed so we keep them excluded for
1256 1271 # now.
1257 1272 b'hgrc',
1258 1273 b'requires',
1259 1274 # XXX cache is a complicatged business someone
1260 1275 # should investigate this in depth at some point
1261 1276 b'cache/',
1262 1277 # XXX shouldn't be dirstate covered by the wlock?
1263 1278 b'dirstate',
1264 1279 # XXX bisect was still a bit too messy at the time
1265 1280 # this changeset was introduced. Someone should fix
1266 1281 # the remainig bit and drop this line
1267 1282 b'bisect.state',
1268 1283 }
1269 1284
1270 1285 def __init__(
1271 1286 self,
1272 1287 baseui,
1273 1288 ui,
1274 1289 origroot,
1275 1290 wdirvfs,
1276 1291 hgvfs,
1277 1292 requirements,
1278 1293 supportedrequirements,
1279 1294 sharedpath,
1280 1295 store,
1281 1296 cachevfs,
1282 1297 wcachevfs,
1283 1298 features,
1284 1299 intents=None,
1285 1300 ):
1286 1301 """Create a new local repository instance.
1287 1302
1288 1303 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1289 1304 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1290 1305 object.
1291 1306
1292 1307 Arguments:
1293 1308
1294 1309 baseui
1295 1310 ``ui.ui`` instance that ``ui`` argument was based off of.
1296 1311
1297 1312 ui
1298 1313 ``ui.ui`` instance for use by the repository.
1299 1314
1300 1315 origroot
1301 1316 ``bytes`` path to working directory root of this repository.
1302 1317
1303 1318 wdirvfs
1304 1319 ``vfs.vfs`` rooted at the working directory.
1305 1320
1306 1321 hgvfs
1307 1322 ``vfs.vfs`` rooted at .hg/
1308 1323
1309 1324 requirements
1310 1325 ``set`` of bytestrings representing repository opening requirements.
1311 1326
1312 1327 supportedrequirements
1313 1328 ``set`` of bytestrings representing repository requirements that we
1314 1329 know how to open. May be a supetset of ``requirements``.
1315 1330
1316 1331 sharedpath
1317 1332 ``bytes`` Defining path to storage base directory. Points to a
1318 1333 ``.hg/`` directory somewhere.
1319 1334
1320 1335 store
1321 1336 ``store.basicstore`` (or derived) instance providing access to
1322 1337 versioned storage.
1323 1338
1324 1339 cachevfs
1325 1340 ``vfs.vfs`` used for cache files.
1326 1341
1327 1342 wcachevfs
1328 1343 ``vfs.vfs`` used for cache files related to the working copy.
1329 1344
1330 1345 features
1331 1346 ``set`` of bytestrings defining features/capabilities of this
1332 1347 instance.
1333 1348
1334 1349 intents
1335 1350 ``set`` of system strings indicating what this repo will be used
1336 1351 for.
1337 1352 """
1338 1353 self.baseui = baseui
1339 1354 self.ui = ui
1340 1355 self.origroot = origroot
1341 1356 # vfs rooted at working directory.
1342 1357 self.wvfs = wdirvfs
1343 1358 self.root = wdirvfs.base
1344 1359 # vfs rooted at .hg/. Used to access most non-store paths.
1345 1360 self.vfs = hgvfs
1346 1361 self.path = hgvfs.base
1347 1362 self.requirements = requirements
1348 1363 self.nodeconstants = sha1nodeconstants
1349 1364 self.nullid = self.nodeconstants.nullid
1350 1365 self.supported = supportedrequirements
1351 1366 self.sharedpath = sharedpath
1352 1367 self.store = store
1353 1368 self.cachevfs = cachevfs
1354 1369 self.wcachevfs = wcachevfs
1355 1370 self.features = features
1356 1371
1357 1372 self.filtername = None
1358 1373
1359 1374 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 1375 b'devel', b'check-locks'
1361 1376 ):
1362 1377 self.vfs.audit = self._getvfsward(self.vfs.audit)
1363 1378 # A list of callback to shape the phase if no data were found.
1364 1379 # Callback are in the form: func(repo, roots) --> processed root.
1365 1380 # This list it to be filled by extension during repo setup
1366 1381 self._phasedefaults = []
1367 1382
1368 1383 color.setup(self.ui)
1369 1384
1370 1385 self.spath = self.store.path
1371 1386 self.svfs = self.store.vfs
1372 1387 self.sjoin = self.store.join
1373 1388 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1374 1389 b'devel', b'check-locks'
1375 1390 ):
1376 1391 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1377 1392 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1378 1393 else: # standard vfs
1379 1394 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1380 1395
1381 1396 self._dirstatevalidatewarned = False
1382 1397
1383 1398 self._branchcaches = branchmap.BranchMapCache()
1384 1399 self._revbranchcache = None
1385 1400 self._filterpats = {}
1386 1401 self._datafilters = {}
1387 1402 self._transref = self._lockref = self._wlockref = None
1388 1403
1389 1404 # A cache for various files under .hg/ that tracks file changes,
1390 1405 # (used by the filecache decorator)
1391 1406 #
1392 1407 # Maps a property name to its util.filecacheentry
1393 1408 self._filecache = {}
1394 1409
1395 1410 # hold sets of revision to be filtered
1396 1411 # should be cleared when something might have changed the filter value:
1397 1412 # - new changesets,
1398 1413 # - phase change,
1399 1414 # - new obsolescence marker,
1400 1415 # - working directory parent change,
1401 1416 # - bookmark changes
1402 1417 self.filteredrevcache = {}
1403 1418
1404 1419 # post-dirstate-status hooks
1405 1420 self._postdsstatus = []
1406 1421
1407 1422 # generic mapping between names and nodes
1408 1423 self.names = namespaces.namespaces()
1409 1424
1410 1425 # Key to signature value.
1411 1426 self._sparsesignaturecache = {}
1412 1427 # Signature to cached matcher instance.
1413 1428 self._sparsematchercache = {}
1414 1429
1415 1430 self._extrafilterid = repoview.extrafilter(ui)
1416 1431
1417 1432 self.filecopiesmode = None
1418 1433 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1419 1434 self.filecopiesmode = b'changeset-sidedata'
1420 1435
1421 1436 self._wanted_sidedata = set()
1422 1437 self._sidedata_computers = {}
1423 1438 sidedatamod.set_sidedata_spec_for_repo(self)
1424 1439
1425 1440 def _getvfsward(self, origfunc):
1426 1441 """build a ward for self.vfs"""
1427 1442 rref = weakref.ref(self)
1428 1443
1429 1444 def checkvfs(path, mode=None):
1430 1445 ret = origfunc(path, mode=mode)
1431 1446 repo = rref()
1432 1447 if (
1433 1448 repo is None
1434 1449 or not util.safehasattr(repo, b'_wlockref')
1435 1450 or not util.safehasattr(repo, b'_lockref')
1436 1451 ):
1437 1452 return
1438 1453 if mode in (None, b'r', b'rb'):
1439 1454 return
1440 1455 if path.startswith(repo.path):
1441 1456 # truncate name relative to the repository (.hg)
1442 1457 path = path[len(repo.path) + 1 :]
1443 1458 if path.startswith(b'cache/'):
1444 1459 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1445 1460 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1446 1461 # path prefixes covered by 'lock'
1447 1462 vfs_path_prefixes = (
1448 1463 b'journal.',
1449 1464 b'undo.',
1450 1465 b'strip-backup/',
1451 1466 b'cache/',
1452 1467 )
1453 1468 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1454 1469 if repo._currentlock(repo._lockref) is None:
1455 1470 repo.ui.develwarn(
1456 1471 b'write with no lock: "%s"' % path,
1457 1472 stacklevel=3,
1458 1473 config=b'check-locks',
1459 1474 )
1460 1475 elif repo._currentlock(repo._wlockref) is None:
1461 1476 # rest of vfs files are covered by 'wlock'
1462 1477 #
1463 1478 # exclude special files
1464 1479 for prefix in self._wlockfreeprefix:
1465 1480 if path.startswith(prefix):
1466 1481 return
1467 1482 repo.ui.develwarn(
1468 1483 b'write with no wlock: "%s"' % path,
1469 1484 stacklevel=3,
1470 1485 config=b'check-locks',
1471 1486 )
1472 1487 return ret
1473 1488
1474 1489 return checkvfs
1475 1490
1476 1491 def _getsvfsward(self, origfunc):
1477 1492 """build a ward for self.svfs"""
1478 1493 rref = weakref.ref(self)
1479 1494
1480 1495 def checksvfs(path, mode=None):
1481 1496 ret = origfunc(path, mode=mode)
1482 1497 repo = rref()
1483 1498 if repo is None or not util.safehasattr(repo, b'_lockref'):
1484 1499 return
1485 1500 if mode in (None, b'r', b'rb'):
1486 1501 return
1487 1502 if path.startswith(repo.sharedpath):
1488 1503 # truncate name relative to the repository (.hg)
1489 1504 path = path[len(repo.sharedpath) + 1 :]
1490 1505 if repo._currentlock(repo._lockref) is None:
1491 1506 repo.ui.develwarn(
1492 1507 b'write with no lock: "%s"' % path, stacklevel=4
1493 1508 )
1494 1509 return ret
1495 1510
1496 1511 return checksvfs
1497 1512
1498 1513 def close(self):
1499 1514 self._writecaches()
1500 1515
1501 1516 def _writecaches(self):
1502 1517 if self._revbranchcache:
1503 1518 self._revbranchcache.write()
1504 1519
1505 1520 def _restrictcapabilities(self, caps):
1506 1521 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1507 1522 caps = set(caps)
1508 1523 capsblob = bundle2.encodecaps(
1509 1524 bundle2.getrepocaps(self, role=b'client')
1510 1525 )
1511 1526 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1512 1527 if self.ui.configbool(b'experimental', b'narrow'):
1513 1528 caps.add(wireprototypes.NARROWCAP)
1514 1529 return caps
1515 1530
1516 1531 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1517 1532 # self -> auditor -> self._checknested -> self
1518 1533
1519 1534 @property
1520 1535 def auditor(self):
1521 1536 # This is only used by context.workingctx.match in order to
1522 1537 # detect files in subrepos.
1523 1538 return pathutil.pathauditor(self.root, callback=self._checknested)
1524 1539
1525 1540 @property
1526 1541 def nofsauditor(self):
1527 1542 # This is only used by context.basectx.match in order to detect
1528 1543 # files in subrepos.
1529 1544 return pathutil.pathauditor(
1530 1545 self.root, callback=self._checknested, realfs=False, cached=True
1531 1546 )
1532 1547
1533 1548 def _checknested(self, path):
1534 1549 """Determine if path is a legal nested repository."""
1535 1550 if not path.startswith(self.root):
1536 1551 return False
1537 1552 subpath = path[len(self.root) + 1 :]
1538 1553 normsubpath = util.pconvert(subpath)
1539 1554
1540 1555 # XXX: Checking against the current working copy is wrong in
1541 1556 # the sense that it can reject things like
1542 1557 #
1543 1558 # $ hg cat -r 10 sub/x.txt
1544 1559 #
1545 1560 # if sub/ is no longer a subrepository in the working copy
1546 1561 # parent revision.
1547 1562 #
1548 1563 # However, it can of course also allow things that would have
1549 1564 # been rejected before, such as the above cat command if sub/
1550 1565 # is a subrepository now, but was a normal directory before.
1551 1566 # The old path auditor would have rejected by mistake since it
1552 1567 # panics when it sees sub/.hg/.
1553 1568 #
1554 1569 # All in all, checking against the working copy seems sensible
1555 1570 # since we want to prevent access to nested repositories on
1556 1571 # the filesystem *now*.
1557 1572 ctx = self[None]
1558 1573 parts = util.splitpath(subpath)
1559 1574 while parts:
1560 1575 prefix = b'/'.join(parts)
1561 1576 if prefix in ctx.substate:
1562 1577 if prefix == normsubpath:
1563 1578 return True
1564 1579 else:
1565 1580 sub = ctx.sub(prefix)
1566 1581 return sub.checknested(subpath[len(prefix) + 1 :])
1567 1582 else:
1568 1583 parts.pop()
1569 1584 return False
1570 1585
1571 1586 def peer(self):
1572 1587 return localpeer(self) # not cached to avoid reference cycle
1573 1588
1574 1589 def unfiltered(self):
1575 1590 """Return unfiltered version of the repository
1576 1591
1577 1592 Intended to be overwritten by filtered repo."""
1578 1593 return self
1579 1594
1580 1595 def filtered(self, name, visibilityexceptions=None):
1581 1596 """Return a filtered version of a repository
1582 1597
1583 1598 The `name` parameter is the identifier of the requested view. This
1584 1599 will return a repoview object set "exactly" to the specified view.
1585 1600
1586 1601 This function does not apply recursive filtering to a repository. For
1587 1602 example calling `repo.filtered("served")` will return a repoview using
1588 1603 the "served" view, regardless of the initial view used by `repo`.
1589 1604
1590 1605 In other word, there is always only one level of `repoview` "filtering".
1591 1606 """
1592 1607 if self._extrafilterid is not None and b'%' not in name:
1593 1608 name = name + b'%' + self._extrafilterid
1594 1609
1595 1610 cls = repoview.newtype(self.unfiltered().__class__)
1596 1611 return cls(self, name, visibilityexceptions)
1597 1612
1598 1613 @mixedrepostorecache(
1599 1614 (b'bookmarks', b'plain'),
1600 1615 (b'bookmarks.current', b'plain'),
1601 1616 (b'bookmarks', b''),
1602 1617 (b'00changelog.i', b''),
1603 1618 )
1604 1619 def _bookmarks(self):
1605 1620 # Since the multiple files involved in the transaction cannot be
1606 1621 # written atomically (with current repository format), there is a race
1607 1622 # condition here.
1608 1623 #
1609 1624 # 1) changelog content A is read
1610 1625 # 2) outside transaction update changelog to content B
1611 1626 # 3) outside transaction update bookmark file referring to content B
1612 1627 # 4) bookmarks file content is read and filtered against changelog-A
1613 1628 #
1614 1629 # When this happens, bookmarks against nodes missing from A are dropped.
1615 1630 #
1616 1631 # Having this happening during read is not great, but it become worse
1617 1632 # when this happen during write because the bookmarks to the "unknown"
1618 1633 # nodes will be dropped for good. However, writes happen within locks.
1619 1634 # This locking makes it possible to have a race free consistent read.
1620 1635 # For this purpose data read from disc before locking are
1621 1636 # "invalidated" right after the locks are taken. This invalidations are
1622 1637 # "light", the `filecache` mechanism keep the data in memory and will
1623 1638 # reuse them if the underlying files did not changed. Not parsing the
1624 1639 # same data multiple times helps performances.
1625 1640 #
1626 1641 # Unfortunately in the case describe above, the files tracked by the
1627 1642 # bookmarks file cache might not have changed, but the in-memory
1628 1643 # content is still "wrong" because we used an older changelog content
1629 1644 # to process the on-disk data. So after locking, the changelog would be
1630 1645 # refreshed but `_bookmarks` would be preserved.
1631 1646 # Adding `00changelog.i` to the list of tracked file is not
1632 1647 # enough, because at the time we build the content for `_bookmarks` in
1633 1648 # (4), the changelog file has already diverged from the content used
1634 1649 # for loading `changelog` in (1)
1635 1650 #
1636 1651 # To prevent the issue, we force the changelog to be explicitly
1637 1652 # reloaded while computing `_bookmarks`. The data race can still happen
1638 1653 # without the lock (with a narrower window), but it would no longer go
1639 1654 # undetected during the lock time refresh.
1640 1655 #
1641 1656 # The new schedule is as follow
1642 1657 #
1643 1658 # 1) filecache logic detect that `_bookmarks` needs to be computed
1644 1659 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1645 1660 # 3) We force `changelog` filecache to be tested
1646 1661 # 4) cachestat for `changelog` are captured (for changelog)
1647 1662 # 5) `_bookmarks` is computed and cached
1648 1663 #
1649 1664 # The step in (3) ensure we have a changelog at least as recent as the
1650 1665 # cache stat computed in (1). As a result at locking time:
1651 1666 # * if the changelog did not changed since (1) -> we can reuse the data
1652 1667 # * otherwise -> the bookmarks get refreshed.
1653 1668 self._refreshchangelog()
1654 1669 return bookmarks.bmstore(self)
1655 1670
1656 1671 def _refreshchangelog(self):
1657 1672 """make sure the in memory changelog match the on-disk one"""
1658 1673 if 'changelog' in vars(self) and self.currenttransaction() is None:
1659 1674 del self.changelog
1660 1675
1661 1676 @property
1662 1677 def _activebookmark(self):
1663 1678 return self._bookmarks.active
1664 1679
1665 1680 # _phasesets depend on changelog. what we need is to call
1666 1681 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1667 1682 # can't be easily expressed in filecache mechanism.
1668 1683 @storecache(b'phaseroots', b'00changelog.i')
1669 1684 def _phasecache(self):
1670 1685 return phases.phasecache(self, self._phasedefaults)
1671 1686
1672 1687 @storecache(b'obsstore')
1673 1688 def obsstore(self):
1674 1689 return obsolete.makestore(self.ui, self)
1675 1690
1676 @storecache(b'00changelog.i')
1677 def changelog(self):
1691 @changelogcache()
1692 def changelog(repo):
1678 1693 # load dirstate before changelog to avoid race see issue6303
1679 self.dirstate.prefetch_parents()
1680 return self.store.changelog(
1681 txnutil.mayhavepending(self.root),
1682 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1694 repo.dirstate.prefetch_parents()
1695 return repo.store.changelog(
1696 txnutil.mayhavepending(repo.root),
1697 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1683 1698 )
1684 1699
1685 1700 @storecache(b'00manifest.i')
1686 1701 def manifestlog(self):
1687 1702 return self.store.manifestlog(self, self._storenarrowmatch)
1688 1703
1689 1704 @repofilecache(b'dirstate')
1690 1705 def dirstate(self):
1691 1706 return self._makedirstate()
1692 1707
1693 1708 def _makedirstate(self):
1694 1709 """Extension point for wrapping the dirstate per-repo."""
1695 1710 sparsematchfn = lambda: sparse.matcher(self)
1696 1711 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1697 1712 use_dirstate_v2 = v2_req in self.requirements
1698 1713
1699 1714 return dirstate.dirstate(
1700 1715 self.vfs,
1701 1716 self.ui,
1702 1717 self.root,
1703 1718 self._dirstatevalidate,
1704 1719 sparsematchfn,
1705 1720 self.nodeconstants,
1706 1721 use_dirstate_v2,
1707 1722 )
1708 1723
1709 1724 def _dirstatevalidate(self, node):
1710 1725 try:
1711 1726 self.changelog.rev(node)
1712 1727 return node
1713 1728 except error.LookupError:
1714 1729 if not self._dirstatevalidatewarned:
1715 1730 self._dirstatevalidatewarned = True
1716 1731 self.ui.warn(
1717 1732 _(b"warning: ignoring unknown working parent %s!\n")
1718 1733 % short(node)
1719 1734 )
1720 1735 return self.nullid
1721 1736
1722 1737 @storecache(narrowspec.FILENAME)
1723 1738 def narrowpats(self):
1724 1739 """matcher patterns for this repository's narrowspec
1725 1740
1726 1741 A tuple of (includes, excludes).
1727 1742 """
1728 1743 return narrowspec.load(self)
1729 1744
1730 1745 @storecache(narrowspec.FILENAME)
1731 1746 def _storenarrowmatch(self):
1732 1747 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1733 1748 return matchmod.always()
1734 1749 include, exclude = self.narrowpats
1735 1750 return narrowspec.match(self.root, include=include, exclude=exclude)
1736 1751
1737 1752 @storecache(narrowspec.FILENAME)
1738 1753 def _narrowmatch(self):
1739 1754 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1740 1755 return matchmod.always()
1741 1756 narrowspec.checkworkingcopynarrowspec(self)
1742 1757 include, exclude = self.narrowpats
1743 1758 return narrowspec.match(self.root, include=include, exclude=exclude)
1744 1759
1745 1760 def narrowmatch(self, match=None, includeexact=False):
1746 1761 """matcher corresponding the the repo's narrowspec
1747 1762
1748 1763 If `match` is given, then that will be intersected with the narrow
1749 1764 matcher.
1750 1765
1751 1766 If `includeexact` is True, then any exact matches from `match` will
1752 1767 be included even if they're outside the narrowspec.
1753 1768 """
1754 1769 if match:
1755 1770 if includeexact and not self._narrowmatch.always():
1756 1771 # do not exclude explicitly-specified paths so that they can
1757 1772 # be warned later on
1758 1773 em = matchmod.exact(match.files())
1759 1774 nm = matchmod.unionmatcher([self._narrowmatch, em])
1760 1775 return matchmod.intersectmatchers(match, nm)
1761 1776 return matchmod.intersectmatchers(match, self._narrowmatch)
1762 1777 return self._narrowmatch
1763 1778
1764 1779 def setnarrowpats(self, newincludes, newexcludes):
1765 1780 narrowspec.save(self, newincludes, newexcludes)
1766 1781 self.invalidate(clearfilecache=True)
1767 1782
1768 1783 @unfilteredpropertycache
1769 1784 def _quick_access_changeid_null(self):
1770 1785 return {
1771 1786 b'null': (nullrev, self.nodeconstants.nullid),
1772 1787 nullrev: (nullrev, self.nodeconstants.nullid),
1773 1788 self.nullid: (nullrev, self.nullid),
1774 1789 }
1775 1790
1776 1791 @unfilteredpropertycache
1777 1792 def _quick_access_changeid_wc(self):
1778 1793 # also fast path access to the working copy parents
1779 1794 # however, only do it for filter that ensure wc is visible.
1780 1795 quick = self._quick_access_changeid_null.copy()
1781 1796 cl = self.unfiltered().changelog
1782 1797 for node in self.dirstate.parents():
1783 1798 if node == self.nullid:
1784 1799 continue
1785 1800 rev = cl.index.get_rev(node)
1786 1801 if rev is None:
1787 1802 # unknown working copy parent case:
1788 1803 #
1789 1804 # skip the fast path and let higher code deal with it
1790 1805 continue
1791 1806 pair = (rev, node)
1792 1807 quick[rev] = pair
1793 1808 quick[node] = pair
1794 1809 # also add the parents of the parents
1795 1810 for r in cl.parentrevs(rev):
1796 1811 if r == nullrev:
1797 1812 continue
1798 1813 n = cl.node(r)
1799 1814 pair = (r, n)
1800 1815 quick[r] = pair
1801 1816 quick[n] = pair
1802 1817 p1node = self.dirstate.p1()
1803 1818 if p1node != self.nullid:
1804 1819 quick[b'.'] = quick[p1node]
1805 1820 return quick
1806 1821
1807 1822 @unfilteredmethod
1808 1823 def _quick_access_changeid_invalidate(self):
1809 1824 if '_quick_access_changeid_wc' in vars(self):
1810 1825 del self.__dict__['_quick_access_changeid_wc']
1811 1826
1812 1827 @property
1813 1828 def _quick_access_changeid(self):
1814 1829 """an helper dictionnary for __getitem__ calls
1815 1830
1816 1831 This contains a list of symbol we can recognise right away without
1817 1832 further processing.
1818 1833 """
1819 1834 if self.filtername in repoview.filter_has_wc:
1820 1835 return self._quick_access_changeid_wc
1821 1836 return self._quick_access_changeid_null
1822 1837
1823 1838 def __getitem__(self, changeid):
1824 1839 # dealing with special cases
1825 1840 if changeid is None:
1826 1841 return context.workingctx(self)
1827 1842 if isinstance(changeid, context.basectx):
1828 1843 return changeid
1829 1844
1830 1845 # dealing with multiple revisions
1831 1846 if isinstance(changeid, slice):
1832 1847 # wdirrev isn't contiguous so the slice shouldn't include it
1833 1848 return [
1834 1849 self[i]
1835 1850 for i in pycompat.xrange(*changeid.indices(len(self)))
1836 1851 if i not in self.changelog.filteredrevs
1837 1852 ]
1838 1853
1839 1854 # dealing with some special values
1840 1855 quick_access = self._quick_access_changeid.get(changeid)
1841 1856 if quick_access is not None:
1842 1857 rev, node = quick_access
1843 1858 return context.changectx(self, rev, node, maybe_filtered=False)
1844 1859 if changeid == b'tip':
1845 1860 node = self.changelog.tip()
1846 1861 rev = self.changelog.rev(node)
1847 1862 return context.changectx(self, rev, node)
1848 1863
1849 1864 # dealing with arbitrary values
1850 1865 try:
1851 1866 if isinstance(changeid, int):
1852 1867 node = self.changelog.node(changeid)
1853 1868 rev = changeid
1854 1869 elif changeid == b'.':
1855 1870 # this is a hack to delay/avoid loading obsmarkers
1856 1871 # when we know that '.' won't be hidden
1857 1872 node = self.dirstate.p1()
1858 1873 rev = self.unfiltered().changelog.rev(node)
1859 1874 elif len(changeid) == self.nodeconstants.nodelen:
1860 1875 try:
1861 1876 node = changeid
1862 1877 rev = self.changelog.rev(changeid)
1863 1878 except error.FilteredLookupError:
1864 1879 changeid = hex(changeid) # for the error message
1865 1880 raise
1866 1881 except LookupError:
1867 1882 # check if it might have come from damaged dirstate
1868 1883 #
1869 1884 # XXX we could avoid the unfiltered if we had a recognizable
1870 1885 # exception for filtered changeset access
1871 1886 if (
1872 1887 self.local()
1873 1888 and changeid in self.unfiltered().dirstate.parents()
1874 1889 ):
1875 1890 msg = _(b"working directory has unknown parent '%s'!")
1876 1891 raise error.Abort(msg % short(changeid))
1877 1892 changeid = hex(changeid) # for the error message
1878 1893 raise
1879 1894
1880 1895 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1881 1896 node = bin(changeid)
1882 1897 rev = self.changelog.rev(node)
1883 1898 else:
1884 1899 raise error.ProgrammingError(
1885 1900 b"unsupported changeid '%s' of type %s"
1886 1901 % (changeid, pycompat.bytestr(type(changeid)))
1887 1902 )
1888 1903
1889 1904 return context.changectx(self, rev, node)
1890 1905
1891 1906 except (error.FilteredIndexError, error.FilteredLookupError):
1892 1907 raise error.FilteredRepoLookupError(
1893 1908 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1894 1909 )
1895 1910 except (IndexError, LookupError):
1896 1911 raise error.RepoLookupError(
1897 1912 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1898 1913 )
1899 1914 except error.WdirUnsupported:
1900 1915 return context.workingctx(self)
1901 1916
1902 1917 def __contains__(self, changeid):
1903 1918 """True if the given changeid exists"""
1904 1919 try:
1905 1920 self[changeid]
1906 1921 return True
1907 1922 except error.RepoLookupError:
1908 1923 return False
1909 1924
1910 1925 def __nonzero__(self):
1911 1926 return True
1912 1927
1913 1928 __bool__ = __nonzero__
1914 1929
1915 1930 def __len__(self):
1916 1931 # no need to pay the cost of repoview.changelog
1917 1932 unfi = self.unfiltered()
1918 1933 return len(unfi.changelog)
1919 1934
1920 1935 def __iter__(self):
1921 1936 return iter(self.changelog)
1922 1937
1923 1938 def revs(self, expr, *args):
1924 1939 """Find revisions matching a revset.
1925 1940
1926 1941 The revset is specified as a string ``expr`` that may contain
1927 1942 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1928 1943
1929 1944 Revset aliases from the configuration are not expanded. To expand
1930 1945 user aliases, consider calling ``scmutil.revrange()`` or
1931 1946 ``repo.anyrevs([expr], user=True)``.
1932 1947
1933 1948 Returns a smartset.abstractsmartset, which is a list-like interface
1934 1949 that contains integer revisions.
1935 1950 """
1936 1951 tree = revsetlang.spectree(expr, *args)
1937 1952 return revset.makematcher(tree)(self)
1938 1953
1939 1954 def set(self, expr, *args):
1940 1955 """Find revisions matching a revset and emit changectx instances.
1941 1956
1942 1957 This is a convenience wrapper around ``revs()`` that iterates the
1943 1958 result and is a generator of changectx instances.
1944 1959
1945 1960 Revset aliases from the configuration are not expanded. To expand
1946 1961 user aliases, consider calling ``scmutil.revrange()``.
1947 1962 """
1948 1963 for r in self.revs(expr, *args):
1949 1964 yield self[r]
1950 1965
1951 1966 def anyrevs(self, specs, user=False, localalias=None):
1952 1967 """Find revisions matching one of the given revsets.
1953 1968
1954 1969 Revset aliases from the configuration are not expanded by default. To
1955 1970 expand user aliases, specify ``user=True``. To provide some local
1956 1971 definitions overriding user aliases, set ``localalias`` to
1957 1972 ``{name: definitionstring}``.
1958 1973 """
1959 1974 if specs == [b'null']:
1960 1975 return revset.baseset([nullrev])
1961 1976 if specs == [b'.']:
1962 1977 quick_data = self._quick_access_changeid.get(b'.')
1963 1978 if quick_data is not None:
1964 1979 return revset.baseset([quick_data[0]])
1965 1980 if user:
1966 1981 m = revset.matchany(
1967 1982 self.ui,
1968 1983 specs,
1969 1984 lookup=revset.lookupfn(self),
1970 1985 localalias=localalias,
1971 1986 )
1972 1987 else:
1973 1988 m = revset.matchany(None, specs, localalias=localalias)
1974 1989 return m(self)
1975 1990
1976 1991 def url(self):
1977 1992 return b'file:' + self.root
1978 1993
1979 1994 def hook(self, name, throw=False, **args):
1980 1995 """Call a hook, passing this repo instance.
1981 1996
1982 1997 This a convenience method to aid invoking hooks. Extensions likely
1983 1998 won't call this unless they have registered a custom hook or are
1984 1999 replacing code that is expected to call a hook.
1985 2000 """
1986 2001 return hook.hook(self.ui, self, name, throw, **args)
1987 2002
1988 2003 @filteredpropertycache
1989 2004 def _tagscache(self):
1990 2005 """Returns a tagscache object that contains various tags related
1991 2006 caches."""
1992 2007
1993 2008 # This simplifies its cache management by having one decorated
1994 2009 # function (this one) and the rest simply fetch things from it.
1995 2010 class tagscache(object):
1996 2011 def __init__(self):
1997 2012 # These two define the set of tags for this repository. tags
1998 2013 # maps tag name to node; tagtypes maps tag name to 'global' or
1999 2014 # 'local'. (Global tags are defined by .hgtags across all
2000 2015 # heads, and local tags are defined in .hg/localtags.)
2001 2016 # They constitute the in-memory cache of tags.
2002 2017 self.tags = self.tagtypes = None
2003 2018
2004 2019 self.nodetagscache = self.tagslist = None
2005 2020
2006 2021 cache = tagscache()
2007 2022 cache.tags, cache.tagtypes = self._findtags()
2008 2023
2009 2024 return cache
2010 2025
2011 2026 def tags(self):
2012 2027 '''return a mapping of tag to node'''
2013 2028 t = {}
2014 2029 if self.changelog.filteredrevs:
2015 2030 tags, tt = self._findtags()
2016 2031 else:
2017 2032 tags = self._tagscache.tags
2018 2033 rev = self.changelog.rev
2019 2034 for k, v in pycompat.iteritems(tags):
2020 2035 try:
2021 2036 # ignore tags to unknown nodes
2022 2037 rev(v)
2023 2038 t[k] = v
2024 2039 except (error.LookupError, ValueError):
2025 2040 pass
2026 2041 return t
2027 2042
2028 2043 def _findtags(self):
2029 2044 """Do the hard work of finding tags. Return a pair of dicts
2030 2045 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2031 2046 maps tag name to a string like \'global\' or \'local\'.
2032 2047 Subclasses or extensions are free to add their own tags, but
2033 2048 should be aware that the returned dicts will be retained for the
2034 2049 duration of the localrepo object."""
2035 2050
2036 2051 # XXX what tagtype should subclasses/extensions use? Currently
2037 2052 # mq and bookmarks add tags, but do not set the tagtype at all.
2038 2053 # Should each extension invent its own tag type? Should there
2039 2054 # be one tagtype for all such "virtual" tags? Or is the status
2040 2055 # quo fine?
2041 2056
2042 2057 # map tag name to (node, hist)
2043 2058 alltags = tagsmod.findglobaltags(self.ui, self)
2044 2059 # map tag name to tag type
2045 2060 tagtypes = {tag: b'global' for tag in alltags}
2046 2061
2047 2062 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2048 2063
2049 2064 # Build the return dicts. Have to re-encode tag names because
2050 2065 # the tags module always uses UTF-8 (in order not to lose info
2051 2066 # writing to the cache), but the rest of Mercurial wants them in
2052 2067 # local encoding.
2053 2068 tags = {}
2054 2069 for (name, (node, hist)) in pycompat.iteritems(alltags):
2055 2070 if node != self.nullid:
2056 2071 tags[encoding.tolocal(name)] = node
2057 2072 tags[b'tip'] = self.changelog.tip()
2058 2073 tagtypes = {
2059 2074 encoding.tolocal(name): value
2060 2075 for (name, value) in pycompat.iteritems(tagtypes)
2061 2076 }
2062 2077 return (tags, tagtypes)
2063 2078
2064 2079 def tagtype(self, tagname):
2065 2080 """
2066 2081 return the type of the given tag. result can be:
2067 2082
2068 2083 'local' : a local tag
2069 2084 'global' : a global tag
2070 2085 None : tag does not exist
2071 2086 """
2072 2087
2073 2088 return self._tagscache.tagtypes.get(tagname)
2074 2089
2075 2090 def tagslist(self):
2076 2091 '''return a list of tags ordered by revision'''
2077 2092 if not self._tagscache.tagslist:
2078 2093 l = []
2079 2094 for t, n in pycompat.iteritems(self.tags()):
2080 2095 l.append((self.changelog.rev(n), t, n))
2081 2096 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2082 2097
2083 2098 return self._tagscache.tagslist
2084 2099
2085 2100 def nodetags(self, node):
2086 2101 '''return the tags associated with a node'''
2087 2102 if not self._tagscache.nodetagscache:
2088 2103 nodetagscache = {}
2089 2104 for t, n in pycompat.iteritems(self._tagscache.tags):
2090 2105 nodetagscache.setdefault(n, []).append(t)
2091 2106 for tags in pycompat.itervalues(nodetagscache):
2092 2107 tags.sort()
2093 2108 self._tagscache.nodetagscache = nodetagscache
2094 2109 return self._tagscache.nodetagscache.get(node, [])
2095 2110
2096 2111 def nodebookmarks(self, node):
2097 2112 """return the list of bookmarks pointing to the specified node"""
2098 2113 return self._bookmarks.names(node)
2099 2114
2100 2115 def branchmap(self):
2101 2116 """returns a dictionary {branch: [branchheads]} with branchheads
2102 2117 ordered by increasing revision number"""
2103 2118 return self._branchcaches[self]
2104 2119
2105 2120 @unfilteredmethod
2106 2121 def revbranchcache(self):
2107 2122 if not self._revbranchcache:
2108 2123 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2109 2124 return self._revbranchcache
2110 2125
2111 2126 def register_changeset(self, rev, changelogrevision):
2112 2127 self.revbranchcache().setdata(rev, changelogrevision)
2113 2128
2114 2129 def branchtip(self, branch, ignoremissing=False):
2115 2130 """return the tip node for a given branch
2116 2131
2117 2132 If ignoremissing is True, then this method will not raise an error.
2118 2133 This is helpful for callers that only expect None for a missing branch
2119 2134 (e.g. namespace).
2120 2135
2121 2136 """
2122 2137 try:
2123 2138 return self.branchmap().branchtip(branch)
2124 2139 except KeyError:
2125 2140 if not ignoremissing:
2126 2141 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2127 2142 else:
2128 2143 pass
2129 2144
2130 2145 def lookup(self, key):
2131 2146 node = scmutil.revsymbol(self, key).node()
2132 2147 if node is None:
2133 2148 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2134 2149 return node
2135 2150
2136 2151 def lookupbranch(self, key):
2137 2152 if self.branchmap().hasbranch(key):
2138 2153 return key
2139 2154
2140 2155 return scmutil.revsymbol(self, key).branch()
2141 2156
2142 2157 def known(self, nodes):
2143 2158 cl = self.changelog
2144 2159 get_rev = cl.index.get_rev
2145 2160 filtered = cl.filteredrevs
2146 2161 result = []
2147 2162 for n in nodes:
2148 2163 r = get_rev(n)
2149 2164 resp = not (r is None or r in filtered)
2150 2165 result.append(resp)
2151 2166 return result
2152 2167
2153 2168 def local(self):
2154 2169 return self
2155 2170
2156 2171 def publishing(self):
2157 2172 # it's safe (and desirable) to trust the publish flag unconditionally
2158 2173 # so that we don't finalize changes shared between users via ssh or nfs
2159 2174 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2160 2175
2161 2176 def cancopy(self):
2162 2177 # so statichttprepo's override of local() works
2163 2178 if not self.local():
2164 2179 return False
2165 2180 if not self.publishing():
2166 2181 return True
2167 2182 # if publishing we can't copy if there is filtered content
2168 2183 return not self.filtered(b'visible').changelog.filteredrevs
2169 2184
2170 2185 def shared(self):
2171 2186 '''the type of shared repository (None if not shared)'''
2172 2187 if self.sharedpath != self.path:
2173 2188 return b'store'
2174 2189 return None
2175 2190
2176 2191 def wjoin(self, f, *insidef):
2177 2192 return self.vfs.reljoin(self.root, f, *insidef)
2178 2193
2179 2194 def setparents(self, p1, p2=None):
2180 2195 if p2 is None:
2181 2196 p2 = self.nullid
2182 2197 self[None].setparents(p1, p2)
2183 2198 self._quick_access_changeid_invalidate()
2184 2199
2185 2200 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2186 2201 """changeid must be a changeset revision, if specified.
2187 2202 fileid can be a file revision or node."""
2188 2203 return context.filectx(
2189 2204 self, path, changeid, fileid, changectx=changectx
2190 2205 )
2191 2206
2192 2207 def getcwd(self):
2193 2208 return self.dirstate.getcwd()
2194 2209
2195 2210 def pathto(self, f, cwd=None):
2196 2211 return self.dirstate.pathto(f, cwd)
2197 2212
2198 2213 def _loadfilter(self, filter):
2199 2214 if filter not in self._filterpats:
2200 2215 l = []
2201 2216 for pat, cmd in self.ui.configitems(filter):
2202 2217 if cmd == b'!':
2203 2218 continue
2204 2219 mf = matchmod.match(self.root, b'', [pat])
2205 2220 fn = None
2206 2221 params = cmd
2207 2222 for name, filterfn in pycompat.iteritems(self._datafilters):
2208 2223 if cmd.startswith(name):
2209 2224 fn = filterfn
2210 2225 params = cmd[len(name) :].lstrip()
2211 2226 break
2212 2227 if not fn:
2213 2228 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2214 2229 fn.__name__ = 'commandfilter'
2215 2230 # Wrap old filters not supporting keyword arguments
2216 2231 if not pycompat.getargspec(fn)[2]:
2217 2232 oldfn = fn
2218 2233 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2219 2234 fn.__name__ = 'compat-' + oldfn.__name__
2220 2235 l.append((mf, fn, params))
2221 2236 self._filterpats[filter] = l
2222 2237 return self._filterpats[filter]
2223 2238
2224 2239 def _filter(self, filterpats, filename, data):
2225 2240 for mf, fn, cmd in filterpats:
2226 2241 if mf(filename):
2227 2242 self.ui.debug(
2228 2243 b"filtering %s through %s\n"
2229 2244 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2230 2245 )
2231 2246 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2232 2247 break
2233 2248
2234 2249 return data
2235 2250
2236 2251 @unfilteredpropertycache
2237 2252 def _encodefilterpats(self):
2238 2253 return self._loadfilter(b'encode')
2239 2254
2240 2255 @unfilteredpropertycache
2241 2256 def _decodefilterpats(self):
2242 2257 return self._loadfilter(b'decode')
2243 2258
2244 2259 def adddatafilter(self, name, filter):
2245 2260 self._datafilters[name] = filter
2246 2261
2247 2262 def wread(self, filename):
2248 2263 if self.wvfs.islink(filename):
2249 2264 data = self.wvfs.readlink(filename)
2250 2265 else:
2251 2266 data = self.wvfs.read(filename)
2252 2267 return self._filter(self._encodefilterpats, filename, data)
2253 2268
2254 2269 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2255 2270 """write ``data`` into ``filename`` in the working directory
2256 2271
2257 2272 This returns length of written (maybe decoded) data.
2258 2273 """
2259 2274 data = self._filter(self._decodefilterpats, filename, data)
2260 2275 if b'l' in flags:
2261 2276 self.wvfs.symlink(data, filename)
2262 2277 else:
2263 2278 self.wvfs.write(
2264 2279 filename, data, backgroundclose=backgroundclose, **kwargs
2265 2280 )
2266 2281 if b'x' in flags:
2267 2282 self.wvfs.setflags(filename, False, True)
2268 2283 else:
2269 2284 self.wvfs.setflags(filename, False, False)
2270 2285 return len(data)
2271 2286
2272 2287 def wwritedata(self, filename, data):
2273 2288 return self._filter(self._decodefilterpats, filename, data)
2274 2289
2275 2290 def currenttransaction(self):
2276 2291 """return the current transaction or None if non exists"""
2277 2292 if self._transref:
2278 2293 tr = self._transref()
2279 2294 else:
2280 2295 tr = None
2281 2296
2282 2297 if tr and tr.running():
2283 2298 return tr
2284 2299 return None
2285 2300
2286 2301 def transaction(self, desc, report=None):
2287 2302 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2288 2303 b'devel', b'check-locks'
2289 2304 ):
2290 2305 if self._currentlock(self._lockref) is None:
2291 2306 raise error.ProgrammingError(b'transaction requires locking')
2292 2307 tr = self.currenttransaction()
2293 2308 if tr is not None:
2294 2309 return tr.nest(name=desc)
2295 2310
2296 2311 # abort here if the journal already exists
2297 2312 if self.svfs.exists(b"journal"):
2298 2313 raise error.RepoError(
2299 2314 _(b"abandoned transaction found"),
2300 2315 hint=_(b"run 'hg recover' to clean up transaction"),
2301 2316 )
2302 2317
2303 2318 idbase = b"%.40f#%f" % (random.random(), time.time())
2304 2319 ha = hex(hashutil.sha1(idbase).digest())
2305 2320 txnid = b'TXN:' + ha
2306 2321 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2307 2322
2308 2323 self._writejournal(desc)
2309 2324 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2310 2325 if report:
2311 2326 rp = report
2312 2327 else:
2313 2328 rp = self.ui.warn
2314 2329 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2315 2330 # we must avoid cyclic reference between repo and transaction.
2316 2331 reporef = weakref.ref(self)
2317 2332 # Code to track tag movement
2318 2333 #
2319 2334 # Since tags are all handled as file content, it is actually quite hard
2320 2335 # to track these movement from a code perspective. So we fallback to a
2321 2336 # tracking at the repository level. One could envision to track changes
2322 2337 # to the '.hgtags' file through changegroup apply but that fails to
2323 2338 # cope with case where transaction expose new heads without changegroup
2324 2339 # being involved (eg: phase movement).
2325 2340 #
2326 2341 # For now, We gate the feature behind a flag since this likely comes
2327 2342 # with performance impacts. The current code run more often than needed
2328 2343 # and do not use caches as much as it could. The current focus is on
2329 2344 # the behavior of the feature so we disable it by default. The flag
2330 2345 # will be removed when we are happy with the performance impact.
2331 2346 #
2332 2347 # Once this feature is no longer experimental move the following
2333 2348 # documentation to the appropriate help section:
2334 2349 #
2335 2350 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2336 2351 # tags (new or changed or deleted tags). In addition the details of
2337 2352 # these changes are made available in a file at:
2338 2353 # ``REPOROOT/.hg/changes/tags.changes``.
2339 2354 # Make sure you check for HG_TAG_MOVED before reading that file as it
2340 2355 # might exist from a previous transaction even if no tag were touched
2341 2356 # in this one. Changes are recorded in a line base format::
2342 2357 #
2343 2358 # <action> <hex-node> <tag-name>\n
2344 2359 #
2345 2360 # Actions are defined as follow:
2346 2361 # "-R": tag is removed,
2347 2362 # "+A": tag is added,
2348 2363 # "-M": tag is moved (old value),
2349 2364 # "+M": tag is moved (new value),
2350 2365 tracktags = lambda x: None
2351 2366 # experimental config: experimental.hook-track-tags
2352 2367 shouldtracktags = self.ui.configbool(
2353 2368 b'experimental', b'hook-track-tags'
2354 2369 )
2355 2370 if desc != b'strip' and shouldtracktags:
2356 2371 oldheads = self.changelog.headrevs()
2357 2372
2358 2373 def tracktags(tr2):
2359 2374 repo = reporef()
2360 2375 assert repo is not None # help pytype
2361 2376 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2362 2377 newheads = repo.changelog.headrevs()
2363 2378 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2364 2379 # notes: we compare lists here.
2365 2380 # As we do it only once buiding set would not be cheaper
2366 2381 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2367 2382 if changes:
2368 2383 tr2.hookargs[b'tag_moved'] = b'1'
2369 2384 with repo.vfs(
2370 2385 b'changes/tags.changes', b'w', atomictemp=True
2371 2386 ) as changesfile:
2372 2387 # note: we do not register the file to the transaction
2373 2388 # because we needs it to still exist on the transaction
2374 2389 # is close (for txnclose hooks)
2375 2390 tagsmod.writediff(changesfile, changes)
2376 2391
2377 2392 def validate(tr2):
2378 2393 """will run pre-closing hooks"""
2379 2394 # XXX the transaction API is a bit lacking here so we take a hacky
2380 2395 # path for now
2381 2396 #
2382 2397 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2383 2398 # dict is copied before these run. In addition we needs the data
2384 2399 # available to in memory hooks too.
2385 2400 #
2386 2401 # Moreover, we also need to make sure this runs before txnclose
2387 2402 # hooks and there is no "pending" mechanism that would execute
2388 2403 # logic only if hooks are about to run.
2389 2404 #
2390 2405 # Fixing this limitation of the transaction is also needed to track
2391 2406 # other families of changes (bookmarks, phases, obsolescence).
2392 2407 #
2393 2408 # This will have to be fixed before we remove the experimental
2394 2409 # gating.
2395 2410 tracktags(tr2)
2396 2411 repo = reporef()
2397 2412 assert repo is not None # help pytype
2398 2413
2399 2414 singleheadopt = (b'experimental', b'single-head-per-branch')
2400 2415 singlehead = repo.ui.configbool(*singleheadopt)
2401 2416 if singlehead:
2402 2417 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2403 2418 accountclosed = singleheadsub.get(
2404 2419 b"account-closed-heads", False
2405 2420 )
2406 2421 if singleheadsub.get(b"public-changes-only", False):
2407 2422 filtername = b"immutable"
2408 2423 else:
2409 2424 filtername = b"visible"
2410 2425 scmutil.enforcesinglehead(
2411 2426 repo, tr2, desc, accountclosed, filtername
2412 2427 )
2413 2428 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2414 2429 for name, (old, new) in sorted(
2415 2430 tr.changes[b'bookmarks'].items()
2416 2431 ):
2417 2432 args = tr.hookargs.copy()
2418 2433 args.update(bookmarks.preparehookargs(name, old, new))
2419 2434 repo.hook(
2420 2435 b'pretxnclose-bookmark',
2421 2436 throw=True,
2422 2437 **pycompat.strkwargs(args)
2423 2438 )
2424 2439 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2425 2440 cl = repo.unfiltered().changelog
2426 2441 for revs, (old, new) in tr.changes[b'phases']:
2427 2442 for rev in revs:
2428 2443 args = tr.hookargs.copy()
2429 2444 node = hex(cl.node(rev))
2430 2445 args.update(phases.preparehookargs(node, old, new))
2431 2446 repo.hook(
2432 2447 b'pretxnclose-phase',
2433 2448 throw=True,
2434 2449 **pycompat.strkwargs(args)
2435 2450 )
2436 2451
2437 2452 repo.hook(
2438 2453 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2439 2454 )
2440 2455
2441 2456 def releasefn(tr, success):
2442 2457 repo = reporef()
2443 2458 if repo is None:
2444 2459 # If the repo has been GC'd (and this release function is being
2445 2460 # called from transaction.__del__), there's not much we can do,
2446 2461 # so just leave the unfinished transaction there and let the
2447 2462 # user run `hg recover`.
2448 2463 return
2449 2464 if success:
2450 2465 # this should be explicitly invoked here, because
2451 2466 # in-memory changes aren't written out at closing
2452 2467 # transaction, if tr.addfilegenerator (via
2453 2468 # dirstate.write or so) isn't invoked while
2454 2469 # transaction running
2455 2470 repo.dirstate.write(None)
2456 2471 else:
2457 2472 # discard all changes (including ones already written
2458 2473 # out) in this transaction
2459 2474 narrowspec.restorebackup(self, b'journal.narrowspec')
2460 2475 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2461 2476 repo.dirstate.restorebackup(None, b'journal.dirstate')
2462 2477
2463 2478 repo.invalidate(clearfilecache=True)
2464 2479
2465 2480 tr = transaction.transaction(
2466 2481 rp,
2467 2482 self.svfs,
2468 2483 vfsmap,
2469 2484 b"journal",
2470 2485 b"undo",
2471 2486 aftertrans(renames),
2472 2487 self.store.createmode,
2473 2488 validator=validate,
2474 2489 releasefn=releasefn,
2475 2490 checkambigfiles=_cachedfiles,
2476 2491 name=desc,
2477 2492 )
2478 2493 tr.changes[b'origrepolen'] = len(self)
2479 2494 tr.changes[b'obsmarkers'] = set()
2480 2495 tr.changes[b'phases'] = []
2481 2496 tr.changes[b'bookmarks'] = {}
2482 2497
2483 2498 tr.hookargs[b'txnid'] = txnid
2484 2499 tr.hookargs[b'txnname'] = desc
2485 2500 tr.hookargs[b'changes'] = tr.changes
2486 2501 # note: writing the fncache only during finalize mean that the file is
2487 2502 # outdated when running hooks. As fncache is used for streaming clone,
2488 2503 # this is not expected to break anything that happen during the hooks.
2489 2504 tr.addfinalize(b'flush-fncache', self.store.write)
2490 2505
2491 2506 def txnclosehook(tr2):
2492 2507 """To be run if transaction is successful, will schedule a hook run"""
2493 2508 # Don't reference tr2 in hook() so we don't hold a reference.
2494 2509 # This reduces memory consumption when there are multiple
2495 2510 # transactions per lock. This can likely go away if issue5045
2496 2511 # fixes the function accumulation.
2497 2512 hookargs = tr2.hookargs
2498 2513
2499 2514 def hookfunc(unused_success):
2500 2515 repo = reporef()
2501 2516 assert repo is not None # help pytype
2502 2517
2503 2518 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2504 2519 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2505 2520 for name, (old, new) in bmchanges:
2506 2521 args = tr.hookargs.copy()
2507 2522 args.update(bookmarks.preparehookargs(name, old, new))
2508 2523 repo.hook(
2509 2524 b'txnclose-bookmark',
2510 2525 throw=False,
2511 2526 **pycompat.strkwargs(args)
2512 2527 )
2513 2528
2514 2529 if hook.hashook(repo.ui, b'txnclose-phase'):
2515 2530 cl = repo.unfiltered().changelog
2516 2531 phasemv = sorted(
2517 2532 tr.changes[b'phases'], key=lambda r: r[0][0]
2518 2533 )
2519 2534 for revs, (old, new) in phasemv:
2520 2535 for rev in revs:
2521 2536 args = tr.hookargs.copy()
2522 2537 node = hex(cl.node(rev))
2523 2538 args.update(phases.preparehookargs(node, old, new))
2524 2539 repo.hook(
2525 2540 b'txnclose-phase',
2526 2541 throw=False,
2527 2542 **pycompat.strkwargs(args)
2528 2543 )
2529 2544
2530 2545 repo.hook(
2531 2546 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2532 2547 )
2533 2548
2534 2549 repo = reporef()
2535 2550 assert repo is not None # help pytype
2536 2551 repo._afterlock(hookfunc)
2537 2552
2538 2553 tr.addfinalize(b'txnclose-hook', txnclosehook)
2539 2554 # Include a leading "-" to make it happen before the transaction summary
2540 2555 # reports registered via scmutil.registersummarycallback() whose names
2541 2556 # are 00-txnreport etc. That way, the caches will be warm when the
2542 2557 # callbacks run.
2543 2558 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2544 2559
2545 2560 def txnaborthook(tr2):
2546 2561 """To be run if transaction is aborted"""
2547 2562 repo = reporef()
2548 2563 assert repo is not None # help pytype
2549 2564 repo.hook(
2550 2565 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2551 2566 )
2552 2567
2553 2568 tr.addabort(b'txnabort-hook', txnaborthook)
2554 2569 # avoid eager cache invalidation. in-memory data should be identical
2555 2570 # to stored data if transaction has no error.
2556 2571 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2557 2572 self._transref = weakref.ref(tr)
2558 2573 scmutil.registersummarycallback(self, tr, desc)
2559 2574 return tr
2560 2575
2561 2576 def _journalfiles(self):
2562 2577 return (
2563 2578 (self.svfs, b'journal'),
2564 2579 (self.svfs, b'journal.narrowspec'),
2565 2580 (self.vfs, b'journal.narrowspec.dirstate'),
2566 2581 (self.vfs, b'journal.dirstate'),
2567 2582 (self.vfs, b'journal.branch'),
2568 2583 (self.vfs, b'journal.desc'),
2569 2584 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2570 2585 (self.svfs, b'journal.phaseroots'),
2571 2586 )
2572 2587
2573 2588 def undofiles(self):
2574 2589 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2575 2590
2576 2591 @unfilteredmethod
2577 2592 def _writejournal(self, desc):
2578 2593 self.dirstate.savebackup(None, b'journal.dirstate')
2579 2594 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2580 2595 narrowspec.savebackup(self, b'journal.narrowspec')
2581 2596 self.vfs.write(
2582 2597 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2583 2598 )
2584 2599 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2585 2600 bookmarksvfs = bookmarks.bookmarksvfs(self)
2586 2601 bookmarksvfs.write(
2587 2602 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2588 2603 )
2589 2604 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2590 2605
2591 2606 def recover(self):
2592 2607 with self.lock():
2593 2608 if self.svfs.exists(b"journal"):
2594 2609 self.ui.status(_(b"rolling back interrupted transaction\n"))
2595 2610 vfsmap = {
2596 2611 b'': self.svfs,
2597 2612 b'plain': self.vfs,
2598 2613 }
2599 2614 transaction.rollback(
2600 2615 self.svfs,
2601 2616 vfsmap,
2602 2617 b"journal",
2603 2618 self.ui.warn,
2604 2619 checkambigfiles=_cachedfiles,
2605 2620 )
2606 2621 self.invalidate()
2607 2622 return True
2608 2623 else:
2609 2624 self.ui.warn(_(b"no interrupted transaction available\n"))
2610 2625 return False
2611 2626
2612 2627 def rollback(self, dryrun=False, force=False):
2613 2628 wlock = lock = dsguard = None
2614 2629 try:
2615 2630 wlock = self.wlock()
2616 2631 lock = self.lock()
2617 2632 if self.svfs.exists(b"undo"):
2618 2633 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2619 2634
2620 2635 return self._rollback(dryrun, force, dsguard)
2621 2636 else:
2622 2637 self.ui.warn(_(b"no rollback information available\n"))
2623 2638 return 1
2624 2639 finally:
2625 2640 release(dsguard, lock, wlock)
2626 2641
2627 2642 @unfilteredmethod # Until we get smarter cache management
2628 2643 def _rollback(self, dryrun, force, dsguard):
2629 2644 ui = self.ui
2630 2645 try:
2631 2646 args = self.vfs.read(b'undo.desc').splitlines()
2632 2647 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2633 2648 if len(args) >= 3:
2634 2649 detail = args[2]
2635 2650 oldtip = oldlen - 1
2636 2651
2637 2652 if detail and ui.verbose:
2638 2653 msg = _(
2639 2654 b'repository tip rolled back to revision %d'
2640 2655 b' (undo %s: %s)\n'
2641 2656 ) % (oldtip, desc, detail)
2642 2657 else:
2643 2658 msg = _(
2644 2659 b'repository tip rolled back to revision %d (undo %s)\n'
2645 2660 ) % (oldtip, desc)
2646 2661 except IOError:
2647 2662 msg = _(b'rolling back unknown transaction\n')
2648 2663 desc = None
2649 2664
2650 2665 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2651 2666 raise error.Abort(
2652 2667 _(
2653 2668 b'rollback of last commit while not checked out '
2654 2669 b'may lose data'
2655 2670 ),
2656 2671 hint=_(b'use -f to force'),
2657 2672 )
2658 2673
2659 2674 ui.status(msg)
2660 2675 if dryrun:
2661 2676 return 0
2662 2677
2663 2678 parents = self.dirstate.parents()
2664 2679 self.destroying()
2665 2680 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2666 2681 transaction.rollback(
2667 2682 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2668 2683 )
2669 2684 bookmarksvfs = bookmarks.bookmarksvfs(self)
2670 2685 if bookmarksvfs.exists(b'undo.bookmarks'):
2671 2686 bookmarksvfs.rename(
2672 2687 b'undo.bookmarks', b'bookmarks', checkambig=True
2673 2688 )
2674 2689 if self.svfs.exists(b'undo.phaseroots'):
2675 2690 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2676 2691 self.invalidate()
2677 2692
2678 2693 has_node = self.changelog.index.has_node
2679 2694 parentgone = any(not has_node(p) for p in parents)
2680 2695 if parentgone:
2681 2696 # prevent dirstateguard from overwriting already restored one
2682 2697 dsguard.close()
2683 2698
2684 2699 narrowspec.restorebackup(self, b'undo.narrowspec')
2685 2700 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2686 2701 self.dirstate.restorebackup(None, b'undo.dirstate')
2687 2702 try:
2688 2703 branch = self.vfs.read(b'undo.branch')
2689 2704 self.dirstate.setbranch(encoding.tolocal(branch))
2690 2705 except IOError:
2691 2706 ui.warn(
2692 2707 _(
2693 2708 b'named branch could not be reset: '
2694 2709 b'current branch is still \'%s\'\n'
2695 2710 )
2696 2711 % self.dirstate.branch()
2697 2712 )
2698 2713
2699 2714 parents = tuple([p.rev() for p in self[None].parents()])
2700 2715 if len(parents) > 1:
2701 2716 ui.status(
2702 2717 _(
2703 2718 b'working directory now based on '
2704 2719 b'revisions %d and %d\n'
2705 2720 )
2706 2721 % parents
2707 2722 )
2708 2723 else:
2709 2724 ui.status(
2710 2725 _(b'working directory now based on revision %d\n') % parents
2711 2726 )
2712 2727 mergestatemod.mergestate.clean(self)
2713 2728
2714 2729 # TODO: if we know which new heads may result from this rollback, pass
2715 2730 # them to destroy(), which will prevent the branchhead cache from being
2716 2731 # invalidated.
2717 2732 self.destroyed()
2718 2733 return 0
2719 2734
2720 2735 def _buildcacheupdater(self, newtransaction):
2721 2736 """called during transaction to build the callback updating cache
2722 2737
2723 2738 Lives on the repository to help extension who might want to augment
2724 2739 this logic. For this purpose, the created transaction is passed to the
2725 2740 method.
2726 2741 """
2727 2742 # we must avoid cyclic reference between repo and transaction.
2728 2743 reporef = weakref.ref(self)
2729 2744
2730 2745 def updater(tr):
2731 2746 repo = reporef()
2732 2747 assert repo is not None # help pytype
2733 2748 repo.updatecaches(tr)
2734 2749
2735 2750 return updater
2736 2751
2737 2752 @unfilteredmethod
2738 2753 def updatecaches(self, tr=None, full=False, caches=None):
2739 2754 """warm appropriate caches
2740 2755
2741 2756 If this function is called after a transaction closed. The transaction
2742 2757 will be available in the 'tr' argument. This can be used to selectively
2743 2758 update caches relevant to the changes in that transaction.
2744 2759
2745 2760 If 'full' is set, make sure all caches the function knows about have
2746 2761 up-to-date data. Even the ones usually loaded more lazily.
2747 2762
2748 2763 The `full` argument can take a special "post-clone" value. In this case
2749 2764 the cache warming is made after a clone and of the slower cache might
2750 2765 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2751 2766 as we plan for a cleaner way to deal with this for 5.9.
2752 2767 """
2753 2768 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2754 2769 # During strip, many caches are invalid but
2755 2770 # later call to `destroyed` will refresh them.
2756 2771 return
2757 2772
2758 2773 unfi = self.unfiltered()
2759 2774
2760 2775 if full:
2761 2776 msg = (
2762 2777 "`full` argument for `repo.updatecaches` is deprecated\n"
2763 2778 "(use `caches=repository.CACHE_ALL` instead)"
2764 2779 )
2765 2780 self.ui.deprecwarn(msg, b"5.9")
2766 2781 caches = repository.CACHES_ALL
2767 2782 if full == b"post-clone":
2768 2783 caches = repository.CACHES_POST_CLONE
2769 2784 caches = repository.CACHES_ALL
2770 2785 elif caches is None:
2771 2786 caches = repository.CACHES_DEFAULT
2772 2787
2773 2788 if repository.CACHE_BRANCHMAP_SERVED in caches:
2774 2789 if tr is None or tr.changes[b'origrepolen'] < len(self):
2775 2790 # accessing the 'served' branchmap should refresh all the others,
2776 2791 self.ui.debug(b'updating the branch cache\n')
2777 2792 self.filtered(b'served').branchmap()
2778 2793 self.filtered(b'served.hidden').branchmap()
2779 2794
2780 2795 if repository.CACHE_CHANGELOG_CACHE in caches:
2781 2796 self.changelog.update_caches(transaction=tr)
2782 2797
2783 2798 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2784 2799 self.manifestlog.update_caches(transaction=tr)
2785 2800
2786 2801 if repository.CACHE_REV_BRANCH in caches:
2787 2802 rbc = unfi.revbranchcache()
2788 2803 for r in unfi.changelog:
2789 2804 rbc.branchinfo(r)
2790 2805 rbc.write()
2791 2806
2792 2807 if repository.CACHE_FULL_MANIFEST in caches:
2793 2808 # ensure the working copy parents are in the manifestfulltextcache
2794 2809 for ctx in self[b'.'].parents():
2795 2810 ctx.manifest() # accessing the manifest is enough
2796 2811
2797 2812 if repository.CACHE_FILE_NODE_TAGS in caches:
2798 2813 # accessing fnode cache warms the cache
2799 2814 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2800 2815
2801 2816 if repository.CACHE_TAGS_DEFAULT in caches:
2802 2817 # accessing tags warm the cache
2803 2818 self.tags()
2804 2819 if repository.CACHE_TAGS_SERVED in caches:
2805 2820 self.filtered(b'served').tags()
2806 2821
2807 2822 if repository.CACHE_BRANCHMAP_ALL in caches:
2808 2823 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2809 2824 # so we're forcing a write to cause these caches to be warmed up
2810 2825 # even if they haven't explicitly been requested yet (if they've
2811 2826 # never been used by hg, they won't ever have been written, even if
2812 2827 # they're a subset of another kind of cache that *has* been used).
2813 2828 for filt in repoview.filtertable.keys():
2814 2829 filtered = self.filtered(filt)
2815 2830 filtered.branchmap().write(filtered)
2816 2831
2817 2832 def invalidatecaches(self):
2818 2833
2819 2834 if '_tagscache' in vars(self):
2820 2835 # can't use delattr on proxy
2821 2836 del self.__dict__['_tagscache']
2822 2837
2823 2838 self._branchcaches.clear()
2824 2839 self.invalidatevolatilesets()
2825 2840 self._sparsesignaturecache.clear()
2826 2841
2827 2842 def invalidatevolatilesets(self):
2828 2843 self.filteredrevcache.clear()
2829 2844 obsolete.clearobscaches(self)
2830 2845 self._quick_access_changeid_invalidate()
2831 2846
2832 2847 def invalidatedirstate(self):
2833 2848 """Invalidates the dirstate, causing the next call to dirstate
2834 2849 to check if it was modified since the last time it was read,
2835 2850 rereading it if it has.
2836 2851
2837 2852 This is different to dirstate.invalidate() that it doesn't always
2838 2853 rereads the dirstate. Use dirstate.invalidate() if you want to
2839 2854 explicitly read the dirstate again (i.e. restoring it to a previous
2840 2855 known good state)."""
2841 2856 if hasunfilteredcache(self, 'dirstate'):
2842 2857 for k in self.dirstate._filecache:
2843 2858 try:
2844 2859 delattr(self.dirstate, k)
2845 2860 except AttributeError:
2846 2861 pass
2847 2862 delattr(self.unfiltered(), 'dirstate')
2848 2863
2849 2864 def invalidate(self, clearfilecache=False):
2850 2865 """Invalidates both store and non-store parts other than dirstate
2851 2866
2852 2867 If a transaction is running, invalidation of store is omitted,
2853 2868 because discarding in-memory changes might cause inconsistency
2854 2869 (e.g. incomplete fncache causes unintentional failure, but
2855 2870 redundant one doesn't).
2856 2871 """
2857 2872 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2858 2873 for k in list(self._filecache.keys()):
2859 2874 # dirstate is invalidated separately in invalidatedirstate()
2860 2875 if k == b'dirstate':
2861 2876 continue
2862 2877 if (
2863 2878 k == b'changelog'
2864 2879 and self.currenttransaction()
2865 2880 and self.changelog._delayed
2866 2881 ):
2867 2882 # The changelog object may store unwritten revisions. We don't
2868 2883 # want to lose them.
2869 2884 # TODO: Solve the problem instead of working around it.
2870 2885 continue
2871 2886
2872 2887 if clearfilecache:
2873 2888 del self._filecache[k]
2874 2889 try:
2875 2890 delattr(unfiltered, k)
2876 2891 except AttributeError:
2877 2892 pass
2878 2893 self.invalidatecaches()
2879 2894 if not self.currenttransaction():
2880 2895 # TODO: Changing contents of store outside transaction
2881 2896 # causes inconsistency. We should make in-memory store
2882 2897 # changes detectable, and abort if changed.
2883 2898 self.store.invalidatecaches()
2884 2899
2885 2900 def invalidateall(self):
2886 2901 """Fully invalidates both store and non-store parts, causing the
2887 2902 subsequent operation to reread any outside changes."""
2888 2903 # extension should hook this to invalidate its caches
2889 2904 self.invalidate()
2890 2905 self.invalidatedirstate()
2891 2906
2892 2907 @unfilteredmethod
2893 2908 def _refreshfilecachestats(self, tr):
2894 2909 """Reload stats of cached files so that they are flagged as valid"""
2895 2910 for k, ce in self._filecache.items():
2896 2911 k = pycompat.sysstr(k)
2897 2912 if k == 'dirstate' or k not in self.__dict__:
2898 2913 continue
2899 2914 ce.refresh()
2900 2915
2901 2916 def _lock(
2902 2917 self,
2903 2918 vfs,
2904 2919 lockname,
2905 2920 wait,
2906 2921 releasefn,
2907 2922 acquirefn,
2908 2923 desc,
2909 2924 ):
2910 2925 timeout = 0
2911 2926 warntimeout = 0
2912 2927 if wait:
2913 2928 timeout = self.ui.configint(b"ui", b"timeout")
2914 2929 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2915 2930 # internal config: ui.signal-safe-lock
2916 2931 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2917 2932
2918 2933 l = lockmod.trylock(
2919 2934 self.ui,
2920 2935 vfs,
2921 2936 lockname,
2922 2937 timeout,
2923 2938 warntimeout,
2924 2939 releasefn=releasefn,
2925 2940 acquirefn=acquirefn,
2926 2941 desc=desc,
2927 2942 signalsafe=signalsafe,
2928 2943 )
2929 2944 return l
2930 2945
2931 2946 def _afterlock(self, callback):
2932 2947 """add a callback to be run when the repository is fully unlocked
2933 2948
2934 2949 The callback will be executed when the outermost lock is released
2935 2950 (with wlock being higher level than 'lock')."""
2936 2951 for ref in (self._wlockref, self._lockref):
2937 2952 l = ref and ref()
2938 2953 if l and l.held:
2939 2954 l.postrelease.append(callback)
2940 2955 break
2941 2956 else: # no lock have been found.
2942 2957 callback(True)
2943 2958
2944 2959 def lock(self, wait=True):
2945 2960 """Lock the repository store (.hg/store) and return a weak reference
2946 2961 to the lock. Use this before modifying the store (e.g. committing or
2947 2962 stripping). If you are opening a transaction, get a lock as well.)
2948 2963
2949 2964 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2950 2965 'wlock' first to avoid a dead-lock hazard."""
2951 2966 l = self._currentlock(self._lockref)
2952 2967 if l is not None:
2953 2968 l.lock()
2954 2969 return l
2955 2970
2956 2971 l = self._lock(
2957 2972 vfs=self.svfs,
2958 2973 lockname=b"lock",
2959 2974 wait=wait,
2960 2975 releasefn=None,
2961 2976 acquirefn=self.invalidate,
2962 2977 desc=_(b'repository %s') % self.origroot,
2963 2978 )
2964 2979 self._lockref = weakref.ref(l)
2965 2980 return l
2966 2981
2967 2982 def wlock(self, wait=True):
2968 2983 """Lock the non-store parts of the repository (everything under
2969 2984 .hg except .hg/store) and return a weak reference to the lock.
2970 2985
2971 2986 Use this before modifying files in .hg.
2972 2987
2973 2988 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2974 2989 'wlock' first to avoid a dead-lock hazard."""
2975 2990 l = self._wlockref() if self._wlockref else None
2976 2991 if l is not None and l.held:
2977 2992 l.lock()
2978 2993 return l
2979 2994
2980 2995 # We do not need to check for non-waiting lock acquisition. Such
2981 2996 # acquisition would not cause dead-lock as they would just fail.
2982 2997 if wait and (
2983 2998 self.ui.configbool(b'devel', b'all-warnings')
2984 2999 or self.ui.configbool(b'devel', b'check-locks')
2985 3000 ):
2986 3001 if self._currentlock(self._lockref) is not None:
2987 3002 self.ui.develwarn(b'"wlock" acquired after "lock"')
2988 3003
2989 3004 def unlock():
2990 3005 if self.dirstate.pendingparentchange():
2991 3006 self.dirstate.invalidate()
2992 3007 else:
2993 3008 self.dirstate.write(None)
2994 3009
2995 3010 self._filecache[b'dirstate'].refresh()
2996 3011
2997 3012 l = self._lock(
2998 3013 self.vfs,
2999 3014 b"wlock",
3000 3015 wait,
3001 3016 unlock,
3002 3017 self.invalidatedirstate,
3003 3018 _(b'working directory of %s') % self.origroot,
3004 3019 )
3005 3020 self._wlockref = weakref.ref(l)
3006 3021 return l
3007 3022
3008 3023 def _currentlock(self, lockref):
3009 3024 """Returns the lock if it's held, or None if it's not."""
3010 3025 if lockref is None:
3011 3026 return None
3012 3027 l = lockref()
3013 3028 if l is None or not l.held:
3014 3029 return None
3015 3030 return l
3016 3031
3017 3032 def currentwlock(self):
3018 3033 """Returns the wlock if it's held, or None if it's not."""
3019 3034 return self._currentlock(self._wlockref)
3020 3035
3021 3036 def checkcommitpatterns(self, wctx, match, status, fail):
3022 3037 """check for commit arguments that aren't committable"""
3023 3038 if match.isexact() or match.prefix():
3024 3039 matched = set(status.modified + status.added + status.removed)
3025 3040
3026 3041 for f in match.files():
3027 3042 f = self.dirstate.normalize(f)
3028 3043 if f == b'.' or f in matched or f in wctx.substate:
3029 3044 continue
3030 3045 if f in status.deleted:
3031 3046 fail(f, _(b'file not found!'))
3032 3047 # Is it a directory that exists or used to exist?
3033 3048 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3034 3049 d = f + b'/'
3035 3050 for mf in matched:
3036 3051 if mf.startswith(d):
3037 3052 break
3038 3053 else:
3039 3054 fail(f, _(b"no match under directory!"))
3040 3055 elif f not in self.dirstate:
3041 3056 fail(f, _(b"file not tracked!"))
3042 3057
3043 3058 @unfilteredmethod
3044 3059 def commit(
3045 3060 self,
3046 3061 text=b"",
3047 3062 user=None,
3048 3063 date=None,
3049 3064 match=None,
3050 3065 force=False,
3051 3066 editor=None,
3052 3067 extra=None,
3053 3068 ):
3054 3069 """Add a new revision to current repository.
3055 3070
3056 3071 Revision information is gathered from the working directory,
3057 3072 match can be used to filter the committed files. If editor is
3058 3073 supplied, it is called to get a commit message.
3059 3074 """
3060 3075 if extra is None:
3061 3076 extra = {}
3062 3077
3063 3078 def fail(f, msg):
3064 3079 raise error.InputError(b'%s: %s' % (f, msg))
3065 3080
3066 3081 if not match:
3067 3082 match = matchmod.always()
3068 3083
3069 3084 if not force:
3070 3085 match.bad = fail
3071 3086
3072 3087 # lock() for recent changelog (see issue4368)
3073 3088 with self.wlock(), self.lock():
3074 3089 wctx = self[None]
3075 3090 merge = len(wctx.parents()) > 1
3076 3091
3077 3092 if not force and merge and not match.always():
3078 3093 raise error.Abort(
3079 3094 _(
3080 3095 b'cannot partially commit a merge '
3081 3096 b'(do not specify files or patterns)'
3082 3097 )
3083 3098 )
3084 3099
3085 3100 status = self.status(match=match, clean=force)
3086 3101 if force:
3087 3102 status.modified.extend(
3088 3103 status.clean
3089 3104 ) # mq may commit clean files
3090 3105
3091 3106 # check subrepos
3092 3107 subs, commitsubs, newstate = subrepoutil.precommit(
3093 3108 self.ui, wctx, status, match, force=force
3094 3109 )
3095 3110
3096 3111 # make sure all explicit patterns are matched
3097 3112 if not force:
3098 3113 self.checkcommitpatterns(wctx, match, status, fail)
3099 3114
3100 3115 cctx = context.workingcommitctx(
3101 3116 self, status, text, user, date, extra
3102 3117 )
3103 3118
3104 3119 ms = mergestatemod.mergestate.read(self)
3105 3120 mergeutil.checkunresolved(ms)
3106 3121
3107 3122 # internal config: ui.allowemptycommit
3108 3123 if cctx.isempty() and not self.ui.configbool(
3109 3124 b'ui', b'allowemptycommit'
3110 3125 ):
3111 3126 self.ui.debug(b'nothing to commit, clearing merge state\n')
3112 3127 ms.reset()
3113 3128 return None
3114 3129
3115 3130 if merge and cctx.deleted():
3116 3131 raise error.Abort(_(b"cannot commit merge with missing files"))
3117 3132
3118 3133 if editor:
3119 3134 cctx._text = editor(self, cctx, subs)
3120 3135 edited = text != cctx._text
3121 3136
3122 3137 # Save commit message in case this transaction gets rolled back
3123 3138 # (e.g. by a pretxncommit hook). Leave the content alone on
3124 3139 # the assumption that the user will use the same editor again.
3125 3140 msgfn = self.savecommitmessage(cctx._text)
3126 3141
3127 3142 # commit subs and write new state
3128 3143 if subs:
3129 3144 uipathfn = scmutil.getuipathfn(self)
3130 3145 for s in sorted(commitsubs):
3131 3146 sub = wctx.sub(s)
3132 3147 self.ui.status(
3133 3148 _(b'committing subrepository %s\n')
3134 3149 % uipathfn(subrepoutil.subrelpath(sub))
3135 3150 )
3136 3151 sr = sub.commit(cctx._text, user, date)
3137 3152 newstate[s] = (newstate[s][0], sr)
3138 3153 subrepoutil.writestate(self, newstate)
3139 3154
3140 3155 p1, p2 = self.dirstate.parents()
3141 3156 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3142 3157 try:
3143 3158 self.hook(
3144 3159 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3145 3160 )
3146 3161 with self.transaction(b'commit'):
3147 3162 ret = self.commitctx(cctx, True)
3148 3163 # update bookmarks, dirstate and mergestate
3149 3164 bookmarks.update(self, [p1, p2], ret)
3150 3165 cctx.markcommitted(ret)
3151 3166 ms.reset()
3152 3167 except: # re-raises
3153 3168 if edited:
3154 3169 self.ui.write(
3155 3170 _(b'note: commit message saved in %s\n') % msgfn
3156 3171 )
3157 3172 self.ui.write(
3158 3173 _(
3159 3174 b"note: use 'hg commit --logfile "
3160 3175 b".hg/last-message.txt --edit' to reuse it\n"
3161 3176 )
3162 3177 )
3163 3178 raise
3164 3179
3165 3180 def commithook(unused_success):
3166 3181 # hack for command that use a temporary commit (eg: histedit)
3167 3182 # temporary commit got stripped before hook release
3168 3183 if self.changelog.hasnode(ret):
3169 3184 self.hook(
3170 3185 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3171 3186 )
3172 3187
3173 3188 self._afterlock(commithook)
3174 3189 return ret
3175 3190
3176 3191 @unfilteredmethod
3177 3192 def commitctx(self, ctx, error=False, origctx=None):
3178 3193 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3179 3194
3180 3195 @unfilteredmethod
3181 3196 def destroying(self):
3182 3197 """Inform the repository that nodes are about to be destroyed.
3183 3198 Intended for use by strip and rollback, so there's a common
3184 3199 place for anything that has to be done before destroying history.
3185 3200
3186 3201 This is mostly useful for saving state that is in memory and waiting
3187 3202 to be flushed when the current lock is released. Because a call to
3188 3203 destroyed is imminent, the repo will be invalidated causing those
3189 3204 changes to stay in memory (waiting for the next unlock), or vanish
3190 3205 completely.
3191 3206 """
3192 3207 # When using the same lock to commit and strip, the phasecache is left
3193 3208 # dirty after committing. Then when we strip, the repo is invalidated,
3194 3209 # causing those changes to disappear.
3195 3210 if '_phasecache' in vars(self):
3196 3211 self._phasecache.write()
3197 3212
3198 3213 @unfilteredmethod
3199 3214 def destroyed(self):
3200 3215 """Inform the repository that nodes have been destroyed.
3201 3216 Intended for use by strip and rollback, so there's a common
3202 3217 place for anything that has to be done after destroying history.
3203 3218 """
3204 3219 # When one tries to:
3205 3220 # 1) destroy nodes thus calling this method (e.g. strip)
3206 3221 # 2) use phasecache somewhere (e.g. commit)
3207 3222 #
3208 3223 # then 2) will fail because the phasecache contains nodes that were
3209 3224 # removed. We can either remove phasecache from the filecache,
3210 3225 # causing it to reload next time it is accessed, or simply filter
3211 3226 # the removed nodes now and write the updated cache.
3212 3227 self._phasecache.filterunknown(self)
3213 3228 self._phasecache.write()
3214 3229
3215 3230 # refresh all repository caches
3216 3231 self.updatecaches()
3217 3232
3218 3233 # Ensure the persistent tag cache is updated. Doing it now
3219 3234 # means that the tag cache only has to worry about destroyed
3220 3235 # heads immediately after a strip/rollback. That in turn
3221 3236 # guarantees that "cachetip == currenttip" (comparing both rev
3222 3237 # and node) always means no nodes have been added or destroyed.
3223 3238
3224 3239 # XXX this is suboptimal when qrefresh'ing: we strip the current
3225 3240 # head, refresh the tag cache, then immediately add a new head.
3226 3241 # But I think doing it this way is necessary for the "instant
3227 3242 # tag cache retrieval" case to work.
3228 3243 self.invalidate()
3229 3244
3230 3245 def status(
3231 3246 self,
3232 3247 node1=b'.',
3233 3248 node2=None,
3234 3249 match=None,
3235 3250 ignored=False,
3236 3251 clean=False,
3237 3252 unknown=False,
3238 3253 listsubrepos=False,
3239 3254 ):
3240 3255 '''a convenience method that calls node1.status(node2)'''
3241 3256 return self[node1].status(
3242 3257 node2, match, ignored, clean, unknown, listsubrepos
3243 3258 )
3244 3259
3245 3260 def addpostdsstatus(self, ps):
3246 3261 """Add a callback to run within the wlock, at the point at which status
3247 3262 fixups happen.
3248 3263
3249 3264 On status completion, callback(wctx, status) will be called with the
3250 3265 wlock held, unless the dirstate has changed from underneath or the wlock
3251 3266 couldn't be grabbed.
3252 3267
3253 3268 Callbacks should not capture and use a cached copy of the dirstate --
3254 3269 it might change in the meanwhile. Instead, they should access the
3255 3270 dirstate via wctx.repo().dirstate.
3256 3271
3257 3272 This list is emptied out after each status run -- extensions should
3258 3273 make sure it adds to this list each time dirstate.status is called.
3259 3274 Extensions should also make sure they don't call this for statuses
3260 3275 that don't involve the dirstate.
3261 3276 """
3262 3277
3263 3278 # The list is located here for uniqueness reasons -- it is actually
3264 3279 # managed by the workingctx, but that isn't unique per-repo.
3265 3280 self._postdsstatus.append(ps)
3266 3281
3267 3282 def postdsstatus(self):
3268 3283 """Used by workingctx to get the list of post-dirstate-status hooks."""
3269 3284 return self._postdsstatus
3270 3285
3271 3286 def clearpostdsstatus(self):
3272 3287 """Used by workingctx to clear post-dirstate-status hooks."""
3273 3288 del self._postdsstatus[:]
3274 3289
3275 3290 def heads(self, start=None):
3276 3291 if start is None:
3277 3292 cl = self.changelog
3278 3293 headrevs = reversed(cl.headrevs())
3279 3294 return [cl.node(rev) for rev in headrevs]
3280 3295
3281 3296 heads = self.changelog.heads(start)
3282 3297 # sort the output in rev descending order
3283 3298 return sorted(heads, key=self.changelog.rev, reverse=True)
3284 3299
3285 3300 def branchheads(self, branch=None, start=None, closed=False):
3286 3301 """return a (possibly filtered) list of heads for the given branch
3287 3302
3288 3303 Heads are returned in topological order, from newest to oldest.
3289 3304 If branch is None, use the dirstate branch.
3290 3305 If start is not None, return only heads reachable from start.
3291 3306 If closed is True, return heads that are marked as closed as well.
3292 3307 """
3293 3308 if branch is None:
3294 3309 branch = self[None].branch()
3295 3310 branches = self.branchmap()
3296 3311 if not branches.hasbranch(branch):
3297 3312 return []
3298 3313 # the cache returns heads ordered lowest to highest
3299 3314 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3300 3315 if start is not None:
3301 3316 # filter out the heads that cannot be reached from startrev
3302 3317 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3303 3318 bheads = [h for h in bheads if h in fbheads]
3304 3319 return bheads
3305 3320
3306 3321 def branches(self, nodes):
3307 3322 if not nodes:
3308 3323 nodes = [self.changelog.tip()]
3309 3324 b = []
3310 3325 for n in nodes:
3311 3326 t = n
3312 3327 while True:
3313 3328 p = self.changelog.parents(n)
3314 3329 if p[1] != self.nullid or p[0] == self.nullid:
3315 3330 b.append((t, n, p[0], p[1]))
3316 3331 break
3317 3332 n = p[0]
3318 3333 return b
3319 3334
3320 3335 def between(self, pairs):
3321 3336 r = []
3322 3337
3323 3338 for top, bottom in pairs:
3324 3339 n, l, i = top, [], 0
3325 3340 f = 1
3326 3341
3327 3342 while n != bottom and n != self.nullid:
3328 3343 p = self.changelog.parents(n)[0]
3329 3344 if i == f:
3330 3345 l.append(n)
3331 3346 f = f * 2
3332 3347 n = p
3333 3348 i += 1
3334 3349
3335 3350 r.append(l)
3336 3351
3337 3352 return r
3338 3353
3339 3354 def checkpush(self, pushop):
3340 3355 """Extensions can override this function if additional checks have
3341 3356 to be performed before pushing, or call it if they override push
3342 3357 command.
3343 3358 """
3344 3359
3345 3360 @unfilteredpropertycache
3346 3361 def prepushoutgoinghooks(self):
3347 3362 """Return util.hooks consists of a pushop with repo, remote, outgoing
3348 3363 methods, which are called before pushing changesets.
3349 3364 """
3350 3365 return util.hooks()
3351 3366
3352 3367 def pushkey(self, namespace, key, old, new):
3353 3368 try:
3354 3369 tr = self.currenttransaction()
3355 3370 hookargs = {}
3356 3371 if tr is not None:
3357 3372 hookargs.update(tr.hookargs)
3358 3373 hookargs = pycompat.strkwargs(hookargs)
3359 3374 hookargs['namespace'] = namespace
3360 3375 hookargs['key'] = key
3361 3376 hookargs['old'] = old
3362 3377 hookargs['new'] = new
3363 3378 self.hook(b'prepushkey', throw=True, **hookargs)
3364 3379 except error.HookAbort as exc:
3365 3380 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3366 3381 if exc.hint:
3367 3382 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3368 3383 return False
3369 3384 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3370 3385 ret = pushkey.push(self, namespace, key, old, new)
3371 3386
3372 3387 def runhook(unused_success):
3373 3388 self.hook(
3374 3389 b'pushkey',
3375 3390 namespace=namespace,
3376 3391 key=key,
3377 3392 old=old,
3378 3393 new=new,
3379 3394 ret=ret,
3380 3395 )
3381 3396
3382 3397 self._afterlock(runhook)
3383 3398 return ret
3384 3399
3385 3400 def listkeys(self, namespace):
3386 3401 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3387 3402 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3388 3403 values = pushkey.list(self, namespace)
3389 3404 self.hook(b'listkeys', namespace=namespace, values=values)
3390 3405 return values
3391 3406
3392 3407 def debugwireargs(self, one, two, three=None, four=None, five=None):
3393 3408 '''used to test argument passing over the wire'''
3394 3409 return b"%s %s %s %s %s" % (
3395 3410 one,
3396 3411 two,
3397 3412 pycompat.bytestr(three),
3398 3413 pycompat.bytestr(four),
3399 3414 pycompat.bytestr(five),
3400 3415 )
3401 3416
3402 3417 def savecommitmessage(self, text):
3403 3418 fp = self.vfs(b'last-message.txt', b'wb')
3404 3419 try:
3405 3420 fp.write(text)
3406 3421 finally:
3407 3422 fp.close()
3408 3423 return self.pathto(fp.name[len(self.root) + 1 :])
3409 3424
3410 3425 def register_wanted_sidedata(self, category):
3411 3426 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3412 3427 # Only revlogv2 repos can want sidedata.
3413 3428 return
3414 3429 self._wanted_sidedata.add(pycompat.bytestr(category))
3415 3430
3416 3431 def register_sidedata_computer(
3417 3432 self, kind, category, keys, computer, flags, replace=False
3418 3433 ):
3419 3434 if kind not in revlogconst.ALL_KINDS:
3420 3435 msg = _(b"unexpected revlog kind '%s'.")
3421 3436 raise error.ProgrammingError(msg % kind)
3422 3437 category = pycompat.bytestr(category)
3423 3438 already_registered = category in self._sidedata_computers.get(kind, [])
3424 3439 if already_registered and not replace:
3425 3440 msg = _(
3426 3441 b"cannot register a sidedata computer twice for category '%s'."
3427 3442 )
3428 3443 raise error.ProgrammingError(msg % category)
3429 3444 if replace and not already_registered:
3430 3445 msg = _(
3431 3446 b"cannot replace a sidedata computer that isn't registered "
3432 3447 b"for category '%s'."
3433 3448 )
3434 3449 raise error.ProgrammingError(msg % category)
3435 3450 self._sidedata_computers.setdefault(kind, {})
3436 3451 self._sidedata_computers[kind][category] = (keys, computer, flags)
3437 3452
3438 3453
3439 3454 # used to avoid circular references so destructors work
3440 3455 def aftertrans(files):
3441 3456 renamefiles = [tuple(t) for t in files]
3442 3457
3443 3458 def a():
3444 3459 for vfs, src, dest in renamefiles:
3445 3460 # if src and dest refer to a same file, vfs.rename is a no-op,
3446 3461 # leaving both src and dest on disk. delete dest to make sure
3447 3462 # the rename couldn't be such a no-op.
3448 3463 vfs.tryunlink(dest)
3449 3464 try:
3450 3465 vfs.rename(src, dest)
3451 3466 except OSError as exc: # journal file does not yet exist
3452 3467 if exc.errno != errno.ENOENT:
3453 3468 raise
3454 3469
3455 3470 return a
3456 3471
3457 3472
3458 3473 def undoname(fn):
3459 3474 base, name = os.path.split(fn)
3460 3475 assert name.startswith(b'journal')
3461 3476 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3462 3477
3463 3478
3464 3479 def instance(ui, path, create, intents=None, createopts=None):
3465 3480 localpath = urlutil.urllocalpath(path)
3466 3481 if create:
3467 3482 createrepository(ui, localpath, createopts=createopts)
3468 3483
3469 3484 return makelocalrepository(ui, localpath, intents=intents)
3470 3485
3471 3486
3472 3487 def islocal(path):
3473 3488 return True
3474 3489
3475 3490
3476 3491 def defaultcreateopts(ui, createopts=None):
3477 3492 """Populate the default creation options for a repository.
3478 3493
3479 3494 A dictionary of explicitly requested creation options can be passed
3480 3495 in. Missing keys will be populated.
3481 3496 """
3482 3497 createopts = dict(createopts or {})
3483 3498
3484 3499 if b'backend' not in createopts:
3485 3500 # experimental config: storage.new-repo-backend
3486 3501 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3487 3502
3488 3503 return createopts
3489 3504
3490 3505
3491 3506 def clone_requirements(ui, createopts, srcrepo):
3492 3507 """clone the requirements of a local repo for a local clone
3493 3508
3494 3509 The store requirements are unchanged while the working copy requirements
3495 3510 depends on the configuration
3496 3511 """
3497 3512 target_requirements = set()
3498 3513 createopts = defaultcreateopts(ui, createopts=createopts)
3499 3514 for r in newreporequirements(ui, createopts):
3500 3515 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3501 3516 target_requirements.add(r)
3502 3517
3503 3518 for r in srcrepo.requirements:
3504 3519 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3505 3520 target_requirements.add(r)
3506 3521 return target_requirements
3507 3522
3508 3523
3509 3524 def newreporequirements(ui, createopts):
3510 3525 """Determine the set of requirements for a new local repository.
3511 3526
3512 3527 Extensions can wrap this function to specify custom requirements for
3513 3528 new repositories.
3514 3529 """
3515 3530 # If the repo is being created from a shared repository, we copy
3516 3531 # its requirements.
3517 3532 if b'sharedrepo' in createopts:
3518 3533 requirements = set(createopts[b'sharedrepo'].requirements)
3519 3534 if createopts.get(b'sharedrelative'):
3520 3535 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3521 3536 else:
3522 3537 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3523 3538
3524 3539 return requirements
3525 3540
3526 3541 if b'backend' not in createopts:
3527 3542 raise error.ProgrammingError(
3528 3543 b'backend key not present in createopts; '
3529 3544 b'was defaultcreateopts() called?'
3530 3545 )
3531 3546
3532 3547 if createopts[b'backend'] != b'revlogv1':
3533 3548 raise error.Abort(
3534 3549 _(
3535 3550 b'unable to determine repository requirements for '
3536 3551 b'storage backend: %s'
3537 3552 )
3538 3553 % createopts[b'backend']
3539 3554 )
3540 3555
3541 3556 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3542 3557 if ui.configbool(b'format', b'usestore'):
3543 3558 requirements.add(requirementsmod.STORE_REQUIREMENT)
3544 3559 if ui.configbool(b'format', b'usefncache'):
3545 3560 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3546 3561 if ui.configbool(b'format', b'dotencode'):
3547 3562 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3548 3563
3549 3564 compengines = ui.configlist(b'format', b'revlog-compression')
3550 3565 for compengine in compengines:
3551 3566 if compengine in util.compengines:
3552 3567 engine = util.compengines[compengine]
3553 3568 if engine.available() and engine.revlogheader():
3554 3569 break
3555 3570 else:
3556 3571 raise error.Abort(
3557 3572 _(
3558 3573 b'compression engines %s defined by '
3559 3574 b'format.revlog-compression not available'
3560 3575 )
3561 3576 % b', '.join(b'"%s"' % e for e in compengines),
3562 3577 hint=_(
3563 3578 b'run "hg debuginstall" to list available '
3564 3579 b'compression engines'
3565 3580 ),
3566 3581 )
3567 3582
3568 3583 # zlib is the historical default and doesn't need an explicit requirement.
3569 3584 if compengine == b'zstd':
3570 3585 requirements.add(b'revlog-compression-zstd')
3571 3586 elif compengine != b'zlib':
3572 3587 requirements.add(b'exp-compression-%s' % compengine)
3573 3588
3574 3589 if scmutil.gdinitconfig(ui):
3575 3590 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3576 3591 if ui.configbool(b'format', b'sparse-revlog'):
3577 3592 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3578 3593
3579 3594 # experimental config: format.exp-dirstate-v2
3580 3595 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3581 3596 if ui.configbool(b'format', b'exp-dirstate-v2'):
3582 3597 if dirstate.SUPPORTS_DIRSTATE_V2:
3583 3598 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3584 3599 else:
3585 3600 raise error.Abort(
3586 3601 _(
3587 3602 b"dirstate v2 format requested by config "
3588 3603 b"but not supported (requires Rust extensions)"
3589 3604 )
3590 3605 )
3591 3606
3592 3607 # experimental config: format.exp-use-copies-side-data-changeset
3593 3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3594 3609 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3595 3610 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3596 3611 if ui.configbool(b'experimental', b'treemanifest'):
3597 3612 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3598 3613
3599 3614 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3600 3615 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3601 3616 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3602 3617
3603 3618 revlogv2 = ui.config(b'experimental', b'revlogv2')
3604 3619 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3605 3620 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3606 3621 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3607 3622 # experimental config: format.internal-phase
3608 3623 if ui.configbool(b'format', b'internal-phase'):
3609 3624 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3610 3625
3611 3626 if createopts.get(b'narrowfiles'):
3612 3627 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3613 3628
3614 3629 if createopts.get(b'lfs'):
3615 3630 requirements.add(b'lfs')
3616 3631
3617 3632 if ui.configbool(b'format', b'bookmarks-in-store'):
3618 3633 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3619 3634
3620 3635 if ui.configbool(b'format', b'use-persistent-nodemap'):
3621 3636 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3622 3637
3623 3638 # if share-safe is enabled, let's create the new repository with the new
3624 3639 # requirement
3625 3640 if ui.configbool(b'format', b'use-share-safe'):
3626 3641 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3627 3642
3628 3643 return requirements
3629 3644
3630 3645
3631 3646 def checkrequirementscompat(ui, requirements):
3632 3647 """Checks compatibility of repository requirements enabled and disabled.
3633 3648
3634 3649 Returns a set of requirements which needs to be dropped because dependend
3635 3650 requirements are not enabled. Also warns users about it"""
3636 3651
3637 3652 dropped = set()
3638 3653
3639 3654 if requirementsmod.STORE_REQUIREMENT not in requirements:
3640 3655 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3641 3656 ui.warn(
3642 3657 _(
3643 3658 b'ignoring enabled \'format.bookmarks-in-store\' config '
3644 3659 b'beacuse it is incompatible with disabled '
3645 3660 b'\'format.usestore\' config\n'
3646 3661 )
3647 3662 )
3648 3663 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3649 3664
3650 3665 if (
3651 3666 requirementsmod.SHARED_REQUIREMENT in requirements
3652 3667 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3653 3668 ):
3654 3669 raise error.Abort(
3655 3670 _(
3656 3671 b"cannot create shared repository as source was created"
3657 3672 b" with 'format.usestore' config disabled"
3658 3673 )
3659 3674 )
3660 3675
3661 3676 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3662 3677 ui.warn(
3663 3678 _(
3664 3679 b"ignoring enabled 'format.use-share-safe' config because "
3665 3680 b"it is incompatible with disabled 'format.usestore'"
3666 3681 b" config\n"
3667 3682 )
3668 3683 )
3669 3684 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3670 3685
3671 3686 return dropped
3672 3687
3673 3688
3674 3689 def filterknowncreateopts(ui, createopts):
3675 3690 """Filters a dict of repo creation options against options that are known.
3676 3691
3677 3692 Receives a dict of repo creation options and returns a dict of those
3678 3693 options that we don't know how to handle.
3679 3694
3680 3695 This function is called as part of repository creation. If the
3681 3696 returned dict contains any items, repository creation will not
3682 3697 be allowed, as it means there was a request to create a repository
3683 3698 with options not recognized by loaded code.
3684 3699
3685 3700 Extensions can wrap this function to filter out creation options
3686 3701 they know how to handle.
3687 3702 """
3688 3703 known = {
3689 3704 b'backend',
3690 3705 b'lfs',
3691 3706 b'narrowfiles',
3692 3707 b'sharedrepo',
3693 3708 b'sharedrelative',
3694 3709 b'shareditems',
3695 3710 b'shallowfilestore',
3696 3711 }
3697 3712
3698 3713 return {k: v for k, v in createopts.items() if k not in known}
3699 3714
3700 3715
3701 3716 def createrepository(ui, path, createopts=None, requirements=None):
3702 3717 """Create a new repository in a vfs.
3703 3718
3704 3719 ``path`` path to the new repo's working directory.
3705 3720 ``createopts`` options for the new repository.
3706 3721 ``requirement`` predefined set of requirements.
3707 3722 (incompatible with ``createopts``)
3708 3723
3709 3724 The following keys for ``createopts`` are recognized:
3710 3725
3711 3726 backend
3712 3727 The storage backend to use.
3713 3728 lfs
3714 3729 Repository will be created with ``lfs`` requirement. The lfs extension
3715 3730 will automatically be loaded when the repository is accessed.
3716 3731 narrowfiles
3717 3732 Set up repository to support narrow file storage.
3718 3733 sharedrepo
3719 3734 Repository object from which storage should be shared.
3720 3735 sharedrelative
3721 3736 Boolean indicating if the path to the shared repo should be
3722 3737 stored as relative. By default, the pointer to the "parent" repo
3723 3738 is stored as an absolute path.
3724 3739 shareditems
3725 3740 Set of items to share to the new repository (in addition to storage).
3726 3741 shallowfilestore
3727 3742 Indicates that storage for files should be shallow (not all ancestor
3728 3743 revisions are known).
3729 3744 """
3730 3745
3731 3746 if requirements is not None:
3732 3747 if createopts is not None:
3733 3748 msg = b'cannot specify both createopts and requirements'
3734 3749 raise error.ProgrammingError(msg)
3735 3750 createopts = {}
3736 3751 else:
3737 3752 createopts = defaultcreateopts(ui, createopts=createopts)
3738 3753
3739 3754 unknownopts = filterknowncreateopts(ui, createopts)
3740 3755
3741 3756 if not isinstance(unknownopts, dict):
3742 3757 raise error.ProgrammingError(
3743 3758 b'filterknowncreateopts() did not return a dict'
3744 3759 )
3745 3760
3746 3761 if unknownopts:
3747 3762 raise error.Abort(
3748 3763 _(
3749 3764 b'unable to create repository because of unknown '
3750 3765 b'creation option: %s'
3751 3766 )
3752 3767 % b', '.join(sorted(unknownopts)),
3753 3768 hint=_(b'is a required extension not loaded?'),
3754 3769 )
3755 3770
3756 3771 requirements = newreporequirements(ui, createopts=createopts)
3757 3772 requirements -= checkrequirementscompat(ui, requirements)
3758 3773
3759 3774 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3760 3775
3761 3776 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3762 3777 if hgvfs.exists():
3763 3778 raise error.RepoError(_(b'repository %s already exists') % path)
3764 3779
3765 3780 if b'sharedrepo' in createopts:
3766 3781 sharedpath = createopts[b'sharedrepo'].sharedpath
3767 3782
3768 3783 if createopts.get(b'sharedrelative'):
3769 3784 try:
3770 3785 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3771 3786 sharedpath = util.pconvert(sharedpath)
3772 3787 except (IOError, ValueError) as e:
3773 3788 # ValueError is raised on Windows if the drive letters differ
3774 3789 # on each path.
3775 3790 raise error.Abort(
3776 3791 _(b'cannot calculate relative path'),
3777 3792 hint=stringutil.forcebytestr(e),
3778 3793 )
3779 3794
3780 3795 if not wdirvfs.exists():
3781 3796 wdirvfs.makedirs()
3782 3797
3783 3798 hgvfs.makedir(notindexed=True)
3784 3799 if b'sharedrepo' not in createopts:
3785 3800 hgvfs.mkdir(b'cache')
3786 3801 hgvfs.mkdir(b'wcache')
3787 3802
3788 3803 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3789 3804 if has_store and b'sharedrepo' not in createopts:
3790 3805 hgvfs.mkdir(b'store')
3791 3806
3792 3807 # We create an invalid changelog outside the store so very old
3793 3808 # Mercurial versions (which didn't know about the requirements
3794 3809 # file) encounter an error on reading the changelog. This
3795 3810 # effectively locks out old clients and prevents them from
3796 3811 # mucking with a repo in an unknown format.
3797 3812 #
3798 3813 # The revlog header has version 65535, which won't be recognized by
3799 3814 # such old clients.
3800 3815 hgvfs.append(
3801 3816 b'00changelog.i',
3802 3817 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3803 3818 b'layout',
3804 3819 )
3805 3820
3806 3821 # Filter the requirements into working copy and store ones
3807 3822 wcreq, storereq = scmutil.filterrequirements(requirements)
3808 3823 # write working copy ones
3809 3824 scmutil.writerequires(hgvfs, wcreq)
3810 3825 # If there are store requirements and the current repository
3811 3826 # is not a shared one, write stored requirements
3812 3827 # For new shared repository, we don't need to write the store
3813 3828 # requirements as they are already present in store requires
3814 3829 if storereq and b'sharedrepo' not in createopts:
3815 3830 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3816 3831 scmutil.writerequires(storevfs, storereq)
3817 3832
3818 3833 # Write out file telling readers where to find the shared store.
3819 3834 if b'sharedrepo' in createopts:
3820 3835 hgvfs.write(b'sharedpath', sharedpath)
3821 3836
3822 3837 if createopts.get(b'shareditems'):
3823 3838 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3824 3839 hgvfs.write(b'shared', shared)
3825 3840
3826 3841
3827 3842 def poisonrepository(repo):
3828 3843 """Poison a repository instance so it can no longer be used."""
3829 3844 # Perform any cleanup on the instance.
3830 3845 repo.close()
3831 3846
3832 3847 # Our strategy is to replace the type of the object with one that
3833 3848 # has all attribute lookups result in error.
3834 3849 #
3835 3850 # But we have to allow the close() method because some constructors
3836 3851 # of repos call close() on repo references.
3837 3852 class poisonedrepository(object):
3838 3853 def __getattribute__(self, item):
3839 3854 if item == 'close':
3840 3855 return object.__getattribute__(self, item)
3841 3856
3842 3857 raise error.ProgrammingError(
3843 3858 b'repo instances should not be used after unshare'
3844 3859 )
3845 3860
3846 3861 def close(self):
3847 3862 pass
3848 3863
3849 3864 # We may have a repoview, which intercepts __setattr__. So be sure
3850 3865 # we operate at the lowest level possible.
3851 3866 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1297 +1,1295 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [format]
8 8 > use-share-safe=yes
9 9 > [extensions]
10 10 > share=
11 11 > EOF
12 12
13 13 #if no-rust
14 14
15 15 $ cat << EOF >> $HGRCPATH
16 16 > [format]
17 17 > use-persistent-nodemap=yes
18 18 > [devel]
19 19 > persistent-nodemap=yes
20 20 > EOF
21 21
22 22 #endif
23 23
24 24 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
25 25 $ cd test-repo
26 26
27 27 Check handling of the default slow-path value
28 28
29 29 #if no-pure no-rust
30 30
31 31 $ hg id
32 32 abort: accessing `persistent-nodemap` repository without associated fast implementation.
33 33 (check `hg help config.format.use-persistent-nodemap` for details)
34 34 [255]
35 35
36 36 Unlock further check (we are here to test the feature)
37 37
38 38 $ cat << EOF >> $HGRCPATH
39 39 > [storage]
40 40 > # to avoid spamming the test
41 41 > revlog.persistent-nodemap.slow-path=allow
42 42 > EOF
43 43
44 44 #endif
45 45
46 46 #if rust
47 47
48 48 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
49 49 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
50 50 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
51 51 incorrectly used `libc::c_int` (32 bits).
52 52 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
53 53
54 54 $ hg log -r 00000000
55 55 changeset: -1:000000000000
56 56 tag: tip
57 57 user:
58 58 date: Thu Jan 01 00:00:00 1970 +0000
59 59
60 60
61 61 #endif
62 62
63 63
64 64 $ hg debugformat
65 65 format-variant repo
66 66 fncache: yes
67 67 dirstate-v2: no
68 68 dotencode: yes
69 69 generaldelta: yes
70 70 share-safe: yes
71 71 sparserevlog: yes
72 72 persistent-nodemap: yes
73 73 copies-sdc: no
74 74 revlog-v2: no
75 75 changelog-v2: no
76 76 plain-cl-delta: yes
77 77 compression: zlib (no-zstd !)
78 78 compression: zstd (zstd !)
79 79 compression-level: default
80 80 $ hg debugbuilddag .+5000 --new-file
81 81
82 82 $ hg debugnodemap --metadata
83 83 uid: ???????? (glob)
84 84 tip-rev: 5000
85 85 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
86 86 data-length: 121088
87 87 data-unused: 0
88 88 data-unused: 0.000%
89 89 $ f --size .hg/store/00changelog.n
90 90 .hg/store/00changelog.n: size=62
91 91
92 92 Simple lookup works
93 93
94 94 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
95 95 $ hg log -r "$ANYNODE" --template '{rev}\n'
96 96 5000
97 97
98 98
99 99 #if rust
100 100
101 101 $ f --sha256 .hg/store/00changelog-*.nd
102 102 .hg/store/00changelog-????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
103 103
104 104 $ f --sha256 .hg/store/00manifest-*.nd
105 105 .hg/store/00manifest-????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
106 106 $ hg debugnodemap --dump-new | f --sha256 --size
107 107 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
108 108 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
109 109 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
110 110 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
111 111 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
112 112 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
113 113 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
114 114 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
115 115 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
116 116 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
117 117 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
118 118 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
119 119 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
120 120 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
121 121 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
122 122 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
123 123 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
124 124 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
125 125 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
126 126
127 127
128 128 #else
129 129
130 130 $ f --sha256 .hg/store/00changelog-*.nd
131 131 .hg/store/00changelog-????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
132 132 $ hg debugnodemap --dump-new | f --sha256 --size
133 133 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
134 134 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
135 135 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
136 136 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
137 137 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
138 138 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
139 139 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
140 140 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
141 141 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
142 142 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
143 143 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
144 144 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
145 145 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
146 146 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
147 147 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
148 148 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
149 149 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
150 150 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
151 151 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
152 152
153 153 #endif
154 154
155 155 $ hg debugnodemap --check
156 156 revision in index: 5001
157 157 revision in nodemap: 5001
158 158
159 159 add a new commit
160 160
161 161 $ hg up
162 162 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 163 $ echo foo > foo
164 164 $ hg add foo
165 165
166 166
167 167 Check slow-path config value handling
168 168 -------------------------------------
169 169
170 170 #if no-pure no-rust
171 171
172 172 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
173 173 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
174 174 falling back to default value: abort
175 175 abort: accessing `persistent-nodemap` repository without associated fast implementation.
176 176 (check `hg help config.format.use-persistent-nodemap` for details)
177 177 [255]
178 178
179 179 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
180 180 warning: accessing `persistent-nodemap` repository without associated fast implementation.
181 181 (check `hg help config.format.use-persistent-nodemap` for details)
182 182 changeset: 5000:6b02b8c7b966
183 183 tag: tip
184 184 user: debugbuilddag
185 185 date: Thu Jan 01 01:23:20 1970 +0000
186 186 summary: r5000
187 187
188 188 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
189 189 abort: accessing `persistent-nodemap` repository without associated fast implementation.
190 190 (check `hg help config.format.use-persistent-nodemap` for details)
191 191 [255]
192 192
193 193 #else
194 194
195 195 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
196 196 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
197 197 falling back to default value: abort
198 198 6b02b8c7b966+ tip
199 199
200 200 #endif
201 201
202 202 $ hg ci -m 'foo'
203 203
204 204 #if no-pure no-rust
205 205 $ hg debugnodemap --metadata
206 206 uid: ???????? (glob)
207 207 tip-rev: 5001
208 208 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
209 209 data-length: 121088
210 210 data-unused: 0
211 211 data-unused: 0.000%
212 212 #else
213 213 $ hg debugnodemap --metadata
214 214 uid: ???????? (glob)
215 215 tip-rev: 5001
216 216 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
217 217 data-length: 121344
218 218 data-unused: 256
219 219 data-unused: 0.211%
220 220 #endif
221 221
222 222 $ f --size .hg/store/00changelog.n
223 223 .hg/store/00changelog.n: size=62
224 224
225 225 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
226 226
227 227 #if pure
228 228 $ f --sha256 .hg/store/00changelog-*.nd --size
229 229 .hg/store/00changelog-????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
230 230 #endif
231 231
232 232 #if rust
233 233 $ f --sha256 .hg/store/00changelog-*.nd --size
234 234 .hg/store/00changelog-????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
235 235 #endif
236 236
237 237 #if no-pure no-rust
238 238 $ f --sha256 .hg/store/00changelog-*.nd --size
239 239 .hg/store/00changelog-????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
240 240 #endif
241 241
242 242 $ hg debugnodemap --check
243 243 revision in index: 5002
244 244 revision in nodemap: 5002
245 245
246 246 Test code path without mmap
247 247 ---------------------------
248 248
249 249 $ echo bar > bar
250 250 $ hg add bar
251 251 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
252 252
253 253 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
254 254 revision in index: 5003
255 255 revision in nodemap: 5003
256 256 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
257 257 revision in index: 5003
258 258 revision in nodemap: 5003
259 259
260 260
261 261 #if pure
262 262 $ hg debugnodemap --metadata
263 263 uid: ???????? (glob)
264 264 tip-rev: 5002
265 265 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
266 266 data-length: 121600
267 267 data-unused: 512
268 268 data-unused: 0.421%
269 269 $ f --sha256 .hg/store/00changelog-*.nd --size
270 270 .hg/store/00changelog-????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
271 271 #endif
272 272 #if rust
273 273 $ hg debugnodemap --metadata
274 274 uid: ???????? (glob)
275 275 tip-rev: 5002
276 276 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
277 277 data-length: 121600
278 278 data-unused: 512
279 279 data-unused: 0.421%
280 280 $ f --sha256 .hg/store/00changelog-*.nd --size
281 281 .hg/store/00changelog-????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
282 282 #endif
283 283 #if no-pure no-rust
284 284 $ hg debugnodemap --metadata
285 285 uid: ???????? (glob)
286 286 tip-rev: 5002
287 287 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
288 288 data-length: 121088
289 289 data-unused: 0
290 290 data-unused: 0.000%
291 291 $ f --sha256 .hg/store/00changelog-*.nd --size
292 292 .hg/store/00changelog-????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
293 293 #endif
294 294
295 295 Test force warming the cache
296 296
297 297 $ rm .hg/store/00changelog.n
298 298 $ hg debugnodemap --metadata
299 299 $ hg debugupdatecache
300 300 #if pure
301 301 $ hg debugnodemap --metadata
302 302 uid: ???????? (glob)
303 303 tip-rev: 5002
304 304 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
305 305 data-length: 121088
306 306 data-unused: 0
307 307 data-unused: 0.000%
308 308 #else
309 309 $ hg debugnodemap --metadata
310 310 uid: ???????? (glob)
311 311 tip-rev: 5002
312 312 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
313 313 data-length: 121088
314 314 data-unused: 0
315 315 data-unused: 0.000%
316 316 #endif
317 317
318 318 Check out of sync nodemap
319 319 =========================
320 320
321 321 First copy old data on the side.
322 322
323 323 $ mkdir ../tmp-copies
324 324 $ cp .hg/store/00changelog-????????.nd .hg/store/00changelog.n ../tmp-copies
325 325
326 326 Nodemap lagging behind
327 327 ----------------------
328 328
329 329 make a new commit
330 330
331 331 $ echo bar2 > bar
332 332 $ hg ci -m 'bar2'
333 333 $ NODE=`hg log -r tip -T '{node}\n'`
334 334 $ hg log -r "$NODE" -T '{rev}\n'
335 335 5003
336 336
337 337 If the nodemap is lagging behind, it can catch up fine
338 338
339 339 $ hg debugnodemap --metadata
340 340 uid: ???????? (glob)
341 341 tip-rev: 5003
342 342 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
343 343 data-length: 121344 (pure !)
344 344 data-length: 121344 (rust !)
345 345 data-length: 121152 (no-rust no-pure !)
346 346 data-unused: 192 (pure !)
347 347 data-unused: 192 (rust !)
348 348 data-unused: 0 (no-rust no-pure !)
349 349 data-unused: 0.158% (pure !)
350 350 data-unused: 0.158% (rust !)
351 351 data-unused: 0.000% (no-rust no-pure !)
352 352 $ cp -f ../tmp-copies/* .hg/store/
353 353 $ hg debugnodemap --metadata
354 354 uid: ???????? (glob)
355 355 tip-rev: 5002
356 356 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
357 357 data-length: 121088
358 358 data-unused: 0
359 359 data-unused: 0.000%
360 360 $ hg log -r "$NODE" -T '{rev}\n'
361 361 5003
362 362
363 363 changelog altered
364 364 -----------------
365 365
366 366 If the nodemap is not gated behind a requirements, an unaware client can alter
367 367 the repository so the revlog used to generate the nodemap is not longer
368 368 compatible with the persistent nodemap. We need to detect that.
369 369
370 370 $ hg up "$NODE~5"
371 371 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
372 372 $ echo bar > babar
373 373 $ hg add babar
374 374 $ hg ci -m 'babar'
375 375 created new head
376 376 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
377 377 $ hg log -r "$OTHERNODE" -T '{rev}\n'
378 378 5004
379 379
380 380 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
381 381
382 382 the nodemap should detect the changelog have been tampered with and recover.
383 383
384 384 $ hg debugnodemap --metadata
385 385 uid: ???????? (glob)
386 386 tip-rev: 5002
387 387 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
388 388 data-length: 121536 (pure !)
389 389 data-length: 121088 (rust !)
390 390 data-length: 121088 (no-pure no-rust !)
391 391 data-unused: 448 (pure !)
392 392 data-unused: 0 (rust !)
393 393 data-unused: 0 (no-pure no-rust !)
394 394 data-unused: 0.000% (rust !)
395 395 data-unused: 0.369% (pure !)
396 396 data-unused: 0.000% (no-pure no-rust !)
397 397
398 398 $ cp -f ../tmp-copies/* .hg/store/
399 399 $ hg debugnodemap --metadata
400 400 uid: ???????? (glob)
401 401 tip-rev: 5002
402 402 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
403 403 data-length: 121088
404 404 data-unused: 0
405 405 data-unused: 0.000%
406 406 $ hg log -r "$OTHERNODE" -T '{rev}\n'
407 407 5002
408 408
409 409 missing data file
410 410 -----------------
411 411
412 412 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
413 413 > sed 's/uid: //'`
414 414 $ FILE=.hg/store/00changelog-"${UUID}".nd
415 415 $ mv $FILE ../tmp-data-file
416 416 $ cp .hg/store/00changelog.n ../tmp-docket
417 417
418 418 mercurial don't crash
419 419
420 420 $ hg log -r .
421 421 changeset: 5002:b355ef8adce0
422 422 tag: tip
423 423 parent: 4998:d918ad6d18d3
424 424 user: test
425 425 date: Thu Jan 01 00:00:00 1970 +0000
426 426 summary: babar
427 427
428 428 $ hg debugnodemap --metadata
429 429
430 430 $ hg debugupdatecache
431 431 $ hg debugnodemap --metadata
432 432 uid: * (glob)
433 433 tip-rev: 5002
434 434 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
435 435 data-length: 121088
436 436 data-unused: 0
437 437 data-unused: 0.000%
438 438
439 439 Sub-case: fallback for corrupted data file
440 440 ------------------------------------------
441 441
442 442 Sabotaging the data file so that nodemap resolutions fail, triggering fallback to
443 443 (non-persistent) C implementation.
444 444
445 445
446 446 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
447 447 > sed 's/uid: //'`
448 448 $ FILE=.hg/store/00changelog-"${UUID}".nd
449 449 $ python -c "fobj = open('$FILE', 'r+b'); fobj.write(b'\xff' * 121088); fobj.close()"
450 450
451 451 The nodemap data file is still considered in sync with the docket. This
452 452 would fail without the fallback to the (non-persistent) C implementation:
453 453
454 454 $ hg log -r b355ef8adce0949b8bdf6afc72ca853740d65944 -T '{rev}\n' --traceback
455 455 5002
456 456
457 457 The nodemap data file hasn't been fixed, more tests can be inserted:
458 458
459 459 $ hg debugnodemap --dump-disk | f --bytes=256 --hexdump --size
460 460 size=121088
461 461 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
462 462 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
463 463 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
464 464 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
465 465 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
466 466 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
467 467 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
468 468 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
469 469 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
470 470 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
471 471 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
472 472 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
473 473 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
474 474 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
475 475 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
476 476 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
477 477
478 478 $ mv ../tmp-data-file $FILE
479 479 $ mv ../tmp-docket .hg/store/00changelog.n
480 480
481 481 Check transaction related property
482 482 ==================================
483 483
484 484 An up to date nodemap should be available to shell hooks,
485 485
486 486 $ echo dsljfl > a
487 487 $ hg add a
488 488 $ hg ci -m a
489 489 $ hg debugnodemap --metadata
490 490 uid: ???????? (glob)
491 491 tip-rev: 5003
492 492 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
493 493 data-length: 121088
494 494 data-unused: 0
495 495 data-unused: 0.000%
496 496 $ echo babar2 > babar
497 497 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
498 498 uid: ???????? (glob)
499 499 tip-rev: 5004
500 500 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
501 501 data-length: 121280 (pure !)
502 502 data-length: 121280 (rust !)
503 503 data-length: 121088 (no-pure no-rust !)
504 504 data-unused: 192 (pure !)
505 505 data-unused: 192 (rust !)
506 506 data-unused: 0 (no-pure no-rust !)
507 507 data-unused: 0.158% (pure !)
508 508 data-unused: 0.158% (rust !)
509 509 data-unused: 0.000% (no-pure no-rust !)
510 510 $ hg debugnodemap --metadata
511 511 uid: ???????? (glob)
512 512 tip-rev: 5004
513 513 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
514 514 data-length: 121280 (pure !)
515 515 data-length: 121280 (rust !)
516 516 data-length: 121088 (no-pure no-rust !)
517 517 data-unused: 192 (pure !)
518 518 data-unused: 192 (rust !)
519 519 data-unused: 0 (no-pure no-rust !)
520 520 data-unused: 0.158% (pure !)
521 521 data-unused: 0.158% (rust !)
522 522 data-unused: 0.000% (no-pure no-rust !)
523 523
524 524 Another process does not see the pending nodemap content during run.
525 525
526 526 $ echo qpoasp > a
527 527 $ hg ci -m a2 \
528 528 > --config "hooks.pretxnclose=sh \"$RUNTESTDIR/testlib/wait-on-file\" 20 sync-repo-read sync-txn-pending" \
529 529 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
530 530
531 531 (read the repository while the commit transaction is pending)
532 532
533 533 $ sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-pending && \
534 534 > hg debugnodemap --metadata && \
535 535 > sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-close sync-repo-read
536 536 uid: ???????? (glob)
537 537 tip-rev: 5004
538 538 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
539 539 data-length: 121280 (pure !)
540 540 data-length: 121280 (rust !)
541 541 data-length: 121088 (no-pure no-rust !)
542 542 data-unused: 192 (pure !)
543 543 data-unused: 192 (rust !)
544 544 data-unused: 0 (no-pure no-rust !)
545 545 data-unused: 0.158% (pure !)
546 546 data-unused: 0.158% (rust !)
547 547 data-unused: 0.000% (no-pure no-rust !)
548 548 $ hg debugnodemap --metadata
549 549 uid: ???????? (glob)
550 550 tip-rev: 5005
551 551 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
552 552 data-length: 121536 (pure !)
553 553 data-length: 121536 (rust !)
554 554 data-length: 121088 (no-pure no-rust !)
555 555 data-unused: 448 (pure !)
556 556 data-unused: 448 (rust !)
557 557 data-unused: 0 (no-pure no-rust !)
558 558 data-unused: 0.369% (pure !)
559 559 data-unused: 0.369% (rust !)
560 560 data-unused: 0.000% (no-pure no-rust !)
561 561
562 562 $ cat output.txt
563 563
564 564 Check that a failing transaction will properly revert the data
565 565
566 566 $ echo plakfe > a
567 567 $ f --size --sha256 .hg/store/00changelog-*.nd
568 568 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
569 569 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
570 570 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
571 571 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
572 572 transaction abort!
573 573 rollback completed
574 574 abort: This is a late abort
575 575 [255]
576 576 $ hg debugnodemap --metadata
577 577 uid: ???????? (glob)
578 578 tip-rev: 5005
579 579 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
580 580 data-length: 121536 (pure !)
581 581 data-length: 121536 (rust !)
582 582 data-length: 121088 (no-pure no-rust !)
583 583 data-unused: 448 (pure !)
584 584 data-unused: 448 (rust !)
585 585 data-unused: 0 (no-pure no-rust !)
586 586 data-unused: 0.369% (pure !)
587 587 data-unused: 0.369% (rust !)
588 588 data-unused: 0.000% (no-pure no-rust !)
589 589 $ f --size --sha256 .hg/store/00changelog-*.nd
590 590 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
591 591 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
592 592 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
593 593
594 594 Check that removing content does not confuse the nodemap
595 595 --------------------------------------------------------
596 596
597 597 removing data with rollback
598 598
599 599 $ echo aso > a
600 600 $ hg ci -m a4
601 601 $ hg rollback
602 602 repository tip rolled back to revision 5005 (undo commit)
603 603 working directory now based on revision 5005
604 604 $ hg id -r .
605 605 90d5d3ba2fc4 tip
606 606
607 607 removing data with strip
608 608
609 609 $ echo aso > a
610 610 $ hg ci -m a4
611 611 $ hg --config extensions.strip= strip -r . --no-backup
612 612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
613 613 $ hg id -r . --traceback
614 614 90d5d3ba2fc4 tip
615 615
616 616 (be a good citizen and regenerate the nodemap)
617 617 $ hg debugupdatecaches
618 618 $ hg debugnodemap --metadata
619 619 uid: * (glob)
620 620 tip-rev: 5005
621 621 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
622 622 data-length: 121088
623 623 data-unused: 0
624 624 data-unused: 0.000%
625 625
626 626 Check race condition when multiple process write new data to the repository
627 627 ---------------------------------------------------------------------------
628 628
629 629 In this test, we check that two writers touching the repositories will not
630 630 overwrite each other data. This test is prompted by the existent of issue6554.
631 631 Where a writer ended up using and outdated docket to update the repository. See
632 632 the dedicated extension for details on the race windows and read/write schedule
633 633 necessary to end up in this situation: testlib/persistent-nodemap-race-ext.py
634 634
635 635 The issue was initially observed on a server with a high push trafic, but it
636 636 can be reproduced using a share and two commiting process which seems simpler.
637 637
638 638 The test is Rust only as the other implementation does not use the same
639 639 read/write patterns.
640 640
641 641 $ cd ..
642 642
643 643 #if rust
644 644
645 645 $ cp -R test-repo race-repo
646 646 $ hg share race-repo ./other-wc --config format.use-share-safe=yes
647 647 updating working directory
648 648 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
649 649 $ hg debugformat -R ./race-repo | egrep 'share-safe|persistent-nodemap'
650 650 share-safe: yes
651 651 persistent-nodemap: yes
652 652 $ hg debugformat -R ./other-wc/ | egrep 'share-safe|persistent-nodemap'
653 653 share-safe: yes
654 654 persistent-nodemap: yes
655 655 $ hg -R ./other-wc update 'min(head())'
656 656 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
657 657 $ hg -R ./race-repo debugnodemap --metadata
658 658 uid: 43c37dde
659 659 tip-rev: 5005
660 660 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
661 661 data-length: 121088
662 662 data-unused: 0
663 663 data-unused: 0.000%
664 664 $ hg -R ./race-repo log -G -r 'head()'
665 665 @ changeset: 5005:90d5d3ba2fc4
666 666 | tag: tip
667 667 ~ user: test
668 668 date: Thu Jan 01 00:00:00 1970 +0000
669 669 summary: a2
670 670
671 671 o changeset: 5001:16395c3cf7e2
672 672 | user: test
673 673 ~ date: Thu Jan 01 00:00:00 1970 +0000
674 674 summary: foo
675 675
676 676 $ hg -R ./other-wc log -G -r 'head()'
677 677 o changeset: 5005:90d5d3ba2fc4
678 678 | tag: tip
679 679 ~ user: test
680 680 date: Thu Jan 01 00:00:00 1970 +0000
681 681 summary: a2
682 682
683 683 @ changeset: 5001:16395c3cf7e2
684 684 | user: test
685 685 ~ date: Thu Jan 01 00:00:00 1970 +0000
686 686 summary: foo
687 687
688 688 $ echo left-side-race > race-repo/left-side-race
689 689 $ hg -R ./race-repo/ add race-repo/left-side-race
690 690
691 691 $ echo right-side-race > ./other-wc/right-side-race
692 692 $ hg -R ./other-wc/ add ./other-wc/right-side-race
693 693
694 694 $ mkdir sync-files
695 695 $ mkdir outputs
696 696 $ (
697 697 > hg -R ./race-repo/ commit -m left-side-commit \
698 698 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
699 699 > --config 'devel.nodemap-race.role=left';
700 700 > touch sync-files/left-done
701 701 > ) > outputs/left.txt 2>&1 &
702 702 $ (
703 703 > hg -R ./other-wc/ commit -m right-side-commit \
704 704 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
705 705 > --config 'devel.nodemap-race.role=right';
706 706 > touch sync-files/right-done
707 707 > ) > outputs/right.txt 2>&1 &
708 708 $ (
709 709 > hg -R ./race-repo/ check-nodemap-race \
710 710 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
711 711 > --config 'devel.nodemap-race.role=reader';
712 712 > touch sync-files/reader-done
713 713 > ) > outputs/reader.txt 2>&1 &
714 714 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/left-done
715 715 $ cat outputs/left.txt
716 716 docket-details:
717 717 uid: 43c37dde
718 718 actual-tip: 5005
719 719 tip-rev: 5005
720 720 data-length: 121088
721 721 nodemap-race: left side locked and ready to commit
722 722 docket-details:
723 723 uid: 43c37dde
724 724 actual-tip: 5005
725 725 tip-rev: 5005
726 726 data-length: 121088
727 727 finalized changelog write
728 728 persisting changelog nodemap
729 729 new data start at 121088
730 730 persisted changelog nodemap
731 731 docket-details:
732 732 uid: 43c37dde
733 733 actual-tip: 5006
734 734 tip-rev: 5006
735 735 data-length: 121280
736 736 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/right-done
737 737 $ cat outputs/right.txt
738 738 nodemap-race: right side start of the locking sequence
739 739 nodemap-race: right side reading changelog
740 740 nodemap-race: right side reading of changelog is done
741 741 docket-details:
742 742 uid: 43c37dde
743 743 actual-tip: 5006
744 744 tip-rev: 5005
745 745 data-length: 121088
746 746 nodemap-race: right side ready to wait for the lock
747 747 nodemap-race: right side locked and ready to commit
748 748 docket-details:
749 749 uid: 43c37dde
750 750 actual-tip: 5006
751 tip-rev: 5005
752 data-length: 121088
751 tip-rev: 5006
752 data-length: 121280
753 753 right ready to write, waiting for reader
754 754 right proceeding with writing its changelog index and nodemap
755 755 finalized changelog write
756 756 persisting changelog nodemap
757 new data start at 121088
757 new data start at 121280
758 758 persisted changelog nodemap
759 759 docket-details:
760 760 uid: 43c37dde
761 761 actual-tip: 5007
762 762 tip-rev: 5007
763 data-length: 121472
763 data-length: 121536
764 764 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/reader-done
765 765 $ cat outputs/reader.txt
766 766 reader: reading changelog
767 767 reader ready to read the changelog, waiting for right
768 768 reader: nodemap docket read
769 769 record-data-length: 121280
770 770 actual-data-length: 121280
771 file-actual-length: 121472
771 file-actual-length: 121536
772 772 reader: changelog read
773 773 docket-details:
774 774 uid: 43c37dde
775 775 actual-tip: 5006
776 776 tip-rev: 5006
777 777 data-length: 121280
778 778 tip-rev: 5006
779 779 tip-node: 492901161367
780 780 node-rev: 5006
781 error while checking revision: 18 (known-bad-output !)
782 Inconsistency: Revision 5007 found in nodemap is not in revlog indexi (known-bad-output !)
783 781
784 782 $ hg -R ./race-repo log -G -r 'head()'
785 783 o changeset: 5007:ac4a2abde241
786 784 | tag: tip
787 785 ~ parent: 5001:16395c3cf7e2
788 786 user: test
789 787 date: Thu Jan 01 00:00:00 1970 +0000
790 788 summary: right-side-commit
791 789
792 790 @ changeset: 5006:492901161367
793 791 | user: test
794 792 ~ date: Thu Jan 01 00:00:00 1970 +0000
795 793 summary: left-side-commit
796 794
797 795 $ hg -R ./other-wc log -G -r 'head()'
798 796 @ changeset: 5007:ac4a2abde241
799 797 | tag: tip
800 798 ~ parent: 5001:16395c3cf7e2
801 799 user: test
802 800 date: Thu Jan 01 00:00:00 1970 +0000
803 801 summary: right-side-commit
804 802
805 803 o changeset: 5006:492901161367
806 804 | user: test
807 805 ~ date: Thu Jan 01 00:00:00 1970 +0000
808 806 summary: left-side-commit
809 807
810 808 #endif
811 809
812 810 Test upgrade / downgrade
813 811 ========================
814 812
815 813 $ cd ./test-repo/
816 814
817 815 downgrading
818 816
819 817 $ cat << EOF >> .hg/hgrc
820 818 > [format]
821 819 > use-persistent-nodemap=no
822 820 > EOF
823 821 $ hg debugformat -v
824 822 format-variant repo config default
825 823 fncache: yes yes yes
826 824 dirstate-v2: no no no
827 825 dotencode: yes yes yes
828 826 generaldelta: yes yes yes
829 827 share-safe: yes yes no
830 828 sparserevlog: yes yes yes
831 829 persistent-nodemap: yes no no
832 830 copies-sdc: no no no
833 831 revlog-v2: no no no
834 832 changelog-v2: no no no
835 833 plain-cl-delta: yes yes yes
836 834 compression: zlib zlib zlib (no-zstd !)
837 835 compression: zstd zstd zstd (zstd !)
838 836 compression-level: default default default
839 837 $ hg debugupgraderepo --run --no-backup --quiet
840 838 upgrade will perform the following actions:
841 839
842 840 requirements
843 841 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
844 842 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
845 843 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
846 844 removed: persistent-nodemap
847 845
848 846 processed revlogs:
849 847 - all-filelogs
850 848 - changelog
851 849 - manifest
852 850
853 851 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
854 852 00changelog-*.nd (glob)
855 853 00manifest-*.nd (glob)
856 854 undo.backup.00changelog.n
857 855 undo.backup.00manifest.n
858 856 $ hg debugnodemap --metadata
859 857
860 858
861 859 upgrading
862 860
863 861 $ cat << EOF >> .hg/hgrc
864 862 > [format]
865 863 > use-persistent-nodemap=yes
866 864 > EOF
867 865 $ hg debugformat -v
868 866 format-variant repo config default
869 867 fncache: yes yes yes
870 868 dirstate-v2: no no no
871 869 dotencode: yes yes yes
872 870 generaldelta: yes yes yes
873 871 share-safe: yes yes no
874 872 sparserevlog: yes yes yes
875 873 persistent-nodemap: no yes no
876 874 copies-sdc: no no no
877 875 revlog-v2: no no no
878 876 changelog-v2: no no no
879 877 plain-cl-delta: yes yes yes
880 878 compression: zlib zlib zlib (no-zstd !)
881 879 compression: zstd zstd zstd (zstd !)
882 880 compression-level: default default default
883 881 $ hg debugupgraderepo --run --no-backup --quiet
884 882 upgrade will perform the following actions:
885 883
886 884 requirements
887 885 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
888 886 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
889 887 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
890 888 added: persistent-nodemap
891 889
892 890 processed revlogs:
893 891 - all-filelogs
894 892 - changelog
895 893 - manifest
896 894
897 895 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
898 896 00changelog-*.nd (glob)
899 897 00changelog.n
900 898 00manifest-*.nd (glob)
901 899 00manifest.n
902 900 undo.backup.00changelog.n
903 901 undo.backup.00manifest.n
904 902
905 903 $ hg debugnodemap --metadata
906 904 uid: * (glob)
907 905 tip-rev: 5005
908 906 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
909 907 data-length: 121088
910 908 data-unused: 0
911 909 data-unused: 0.000%
912 910
913 911 Running unrelated upgrade
914 912
915 913 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
916 914 upgrade will perform the following actions:
917 915
918 916 requirements
919 917 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
920 918 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
921 919 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
922 920
923 921 optimisations: re-delta-all
924 922
925 923 processed revlogs:
926 924 - all-filelogs
927 925 - changelog
928 926 - manifest
929 927
930 928 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
931 929 00changelog-*.nd (glob)
932 930 00changelog.n
933 931 00manifest-*.nd (glob)
934 932 00manifest.n
935 933
936 934 $ hg debugnodemap --metadata
937 935 uid: * (glob)
938 936 tip-rev: 5005
939 937 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
940 938 data-length: 121088
941 939 data-unused: 0
942 940 data-unused: 0.000%
943 941
944 942 Persistent nodemap and local/streaming clone
945 943 ============================================
946 944
947 945 $ cd ..
948 946
949 947 standard clone
950 948 --------------
951 949
952 950 The persistent nodemap should exist after a streaming clone
953 951
954 952 $ hg clone --pull --quiet -U test-repo standard-clone
955 953 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
956 954 00changelog-*.nd (glob)
957 955 00changelog.n
958 956 00manifest-*.nd (glob)
959 957 00manifest.n
960 958 $ hg -R standard-clone debugnodemap --metadata
961 959 uid: * (glob)
962 960 tip-rev: 5005
963 961 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
964 962 data-length: 121088
965 963 data-unused: 0
966 964 data-unused: 0.000%
967 965
968 966
969 967 local clone
970 968 ------------
971 969
972 970 The persistent nodemap should exist after a streaming clone
973 971
974 972 $ hg clone -U test-repo local-clone
975 973 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
976 974 00changelog-*.nd (glob)
977 975 00changelog.n
978 976 00manifest-*.nd (glob)
979 977 00manifest.n
980 978 $ hg -R local-clone debugnodemap --metadata
981 979 uid: * (glob)
982 980 tip-rev: 5005
983 981 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
984 982 data-length: 121088
985 983 data-unused: 0
986 984 data-unused: 0.000%
987 985
988 986 Test various corruption case
989 987 ============================
990 988
991 989 Missing datafile
992 990 ----------------
993 991
994 992 Test behavior with a missing datafile
995 993
996 994 $ hg clone --quiet --pull test-repo corruption-test-repo
997 995 $ ls -1 corruption-test-repo/.hg/store/00changelog*
998 996 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
999 997 corruption-test-repo/.hg/store/00changelog.d
1000 998 corruption-test-repo/.hg/store/00changelog.i
1001 999 corruption-test-repo/.hg/store/00changelog.n
1002 1000 $ rm corruption-test-repo/.hg/store/00changelog*.nd
1003 1001 $ hg log -R corruption-test-repo -r .
1004 1002 changeset: 5005:90d5d3ba2fc4
1005 1003 tag: tip
1006 1004 user: test
1007 1005 date: Thu Jan 01 00:00:00 1970 +0000
1008 1006 summary: a2
1009 1007
1010 1008 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1011 1009 corruption-test-repo/.hg/store/00changelog.d
1012 1010 corruption-test-repo/.hg/store/00changelog.i
1013 1011 corruption-test-repo/.hg/store/00changelog.n
1014 1012
1015 1013 Truncated data file
1016 1014 -------------------
1017 1015
1018 1016 Test behavior with a too short datafile
1019 1017
1020 1018 rebuild the missing data
1021 1019 $ hg -R corruption-test-repo debugupdatecache
1022 1020 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1023 1021 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
1024 1022 corruption-test-repo/.hg/store/00changelog.d
1025 1023 corruption-test-repo/.hg/store/00changelog.i
1026 1024 corruption-test-repo/.hg/store/00changelog.n
1027 1025
1028 1026 truncate the file
1029 1027
1030 1028 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
1031 1029 $ f -s $datafilepath
1032 1030 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1033 1031 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=noxfer
1034 1032 10+0 records in
1035 1033 10+0 records out
1036 1034 $ mv $datafilepath-tmp $datafilepath
1037 1035 $ f -s $datafilepath
1038 1036 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
1039 1037
1040 1038 Check that Mercurial reaction to this event
1041 1039
1042 1040 $ hg -R corruption-test-repo log -r . --traceback
1043 1041 changeset: 5005:90d5d3ba2fc4
1044 1042 tag: tip
1045 1043 user: test
1046 1044 date: Thu Jan 01 00:00:00 1970 +0000
1047 1045 summary: a2
1048 1046
1049 1047
1050 1048
1051 1049 stream clone
1052 1050 ============
1053 1051
1054 1052 The persistent nodemap should exist after a streaming clone
1055 1053
1056 1054 Simple case
1057 1055 -----------
1058 1056
1059 1057 No race condition
1060 1058
1061 1059 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1062 1060 adding [s] 00manifest.n (62 bytes)
1063 1061 adding [s] 00manifest-*.nd (118 KB) (glob)
1064 1062 adding [s] 00changelog.n (62 bytes)
1065 1063 adding [s] 00changelog-*.nd (118 KB) (glob)
1066 1064 adding [s] 00manifest.d (452 KB) (no-zstd !)
1067 1065 adding [s] 00manifest.d (491 KB) (zstd !)
1068 1066 adding [s] 00changelog.d (360 KB) (no-zstd !)
1069 1067 adding [s] 00changelog.d (368 KB) (zstd !)
1070 1068 adding [s] 00manifest.i (313 KB)
1071 1069 adding [s] 00changelog.i (313 KB)
1072 1070 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
1073 1071 00changelog-*.nd (glob)
1074 1072 00changelog.n
1075 1073 00manifest-*.nd (glob)
1076 1074 00manifest.n
1077 1075 $ hg -R stream-clone debugnodemap --metadata
1078 1076 uid: * (glob)
1079 1077 tip-rev: 5005
1080 1078 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1081 1079 data-length: 121088
1082 1080 data-unused: 0
1083 1081 data-unused: 0.000%
1084 1082
1085 1083 new data appened
1086 1084 -----------------
1087 1085
1088 1086 Other commit happening on the server during the stream clone
1089 1087
1090 1088 setup the step-by-step stream cloning
1091 1089
1092 1090 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
1093 1091 $ export HG_TEST_STREAM_WALKED_FILE_1
1094 1092 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
1095 1093 $ export HG_TEST_STREAM_WALKED_FILE_2
1096 1094 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
1097 1095 $ export HG_TEST_STREAM_WALKED_FILE_3
1098 1096 $ cat << EOF >> test-repo/.hg/hgrc
1099 1097 > [extensions]
1100 1098 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
1101 1099 > EOF
1102 1100
1103 1101 Check and record file state beforehand
1104 1102
1105 1103 $ f --size test-repo/.hg/store/00changelog*
1106 1104 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1107 1105 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
1108 1106 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
1109 1107 test-repo/.hg/store/00changelog.i: size=320384
1110 1108 test-repo/.hg/store/00changelog.n: size=62
1111 1109 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
1112 1110 uid: * (glob)
1113 1111 tip-rev: 5005
1114 1112 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1115 1113 data-length: 121088
1116 1114 data-unused: 0
1117 1115 data-unused: 0.000%
1118 1116
1119 1117 Prepare a commit
1120 1118
1121 1119 $ echo foo >> test-repo/foo
1122 1120 $ hg -R test-repo/ add test-repo/foo
1123 1121
1124 1122 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1125 1123
1126 1124 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1127 1125 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1128 1126 $ hg -R test-repo/ commit -m foo
1129 1127 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1130 1128 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1131 1129 $ cat clone-output
1132 1130 adding [s] 00manifest.n (62 bytes)
1133 1131 adding [s] 00manifest-*.nd (118 KB) (glob)
1134 1132 adding [s] 00changelog.n (62 bytes)
1135 1133 adding [s] 00changelog-*.nd (118 KB) (glob)
1136 1134 adding [s] 00manifest.d (452 KB) (no-zstd !)
1137 1135 adding [s] 00manifest.d (491 KB) (zstd !)
1138 1136 adding [s] 00changelog.d (360 KB) (no-zstd !)
1139 1137 adding [s] 00changelog.d (368 KB) (zstd !)
1140 1138 adding [s] 00manifest.i (313 KB)
1141 1139 adding [s] 00changelog.i (313 KB)
1142 1140
1143 1141 Check the result state
1144 1142
1145 1143 $ f --size stream-clone-race-1/.hg/store/00changelog*
1146 1144 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
1147 1145 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
1148 1146 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
1149 1147 stream-clone-race-1/.hg/store/00changelog.i: size=320384
1150 1148 stream-clone-race-1/.hg/store/00changelog.n: size=62
1151 1149
1152 1150 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
1153 1151 uid: * (glob)
1154 1152 tip-rev: 5005
1155 1153 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1156 1154 data-length: 121088
1157 1155 data-unused: 0
1158 1156 data-unused: 0.000%
1159 1157
1160 1158 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1161 1159 (ie: the following diff should be empty)
1162 1160
1163 1161 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1164 1162
1165 1163 #if no-rust no-pure
1166 1164 $ diff -u server-metadata.txt client-metadata.txt
1167 1165 --- server-metadata.txt * (glob)
1168 1166 +++ client-metadata.txt * (glob)
1169 1167 @@ -1,4 +1,4 @@
1170 1168 -uid: * (glob)
1171 1169 +uid: * (glob)
1172 1170 tip-rev: 5005
1173 1171 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1174 1172 data-length: 121088
1175 1173 [1]
1176 1174 #else
1177 1175 $ diff -u server-metadata.txt client-metadata.txt
1178 1176 #endif
1179 1177
1180 1178
1181 1179 Clean up after the test.
1182 1180
1183 1181 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
1184 1182 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
1185 1183 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
1186 1184
1187 1185 full regeneration
1188 1186 -----------------
1189 1187
1190 1188 A full nodemap is generated
1191 1189
1192 1190 (ideally this test would append enough data to make sure the nodemap data file
1193 1191 get changed, however to make thing simpler we will force the regeneration for
1194 1192 this test.
1195 1193
1196 1194 Check the initial state
1197 1195
1198 1196 $ f --size test-repo/.hg/store/00changelog*
1199 1197 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1200 1198 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1201 1199 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1202 1200 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
1203 1201 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
1204 1202 test-repo/.hg/store/00changelog.i: size=320448
1205 1203 test-repo/.hg/store/00changelog.n: size=62
1206 1204 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
1207 1205 uid: * (glob)
1208 1206 tip-rev: 5006
1209 1207 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1210 1208 data-length: 121344 (rust !)
1211 1209 data-length: 121344 (pure !)
1212 1210 data-length: 121152 (no-rust no-pure !)
1213 1211 data-unused: 192 (rust !)
1214 1212 data-unused: 192 (pure !)
1215 1213 data-unused: 0 (no-rust no-pure !)
1216 1214 data-unused: 0.158% (rust !)
1217 1215 data-unused: 0.158% (pure !)
1218 1216 data-unused: 0.000% (no-rust no-pure !)
1219 1217
1220 1218 Performe the mix of clone and full refresh of the nodemap, so that the files
1221 1219 (and filenames) are different between listing time and actual transfer time.
1222 1220
1223 1221 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1224 1222 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1225 1223 $ rm test-repo/.hg/store/00changelog.n
1226 1224 $ rm test-repo/.hg/store/00changelog-*.nd
1227 1225 $ hg -R test-repo/ debugupdatecache
1228 1226 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1229 1227 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1230 1228
1231 1229 (note: the stream clone code wronly pick the `undo.` files)
1232 1230
1233 1231 $ cat clone-output-2
1234 1232 adding [s] undo.backup.00manifest.n (62 bytes) (known-bad-output !)
1235 1233 adding [s] undo.backup.00changelog.n (62 bytes) (known-bad-output !)
1236 1234 adding [s] 00manifest.n (62 bytes)
1237 1235 adding [s] 00manifest-*.nd (118 KB) (glob)
1238 1236 adding [s] 00changelog.n (62 bytes)
1239 1237 adding [s] 00changelog-*.nd (118 KB) (glob)
1240 1238 adding [s] 00manifest.d (492 KB) (zstd !)
1241 1239 adding [s] 00manifest.d (452 KB) (no-zstd !)
1242 1240 adding [s] 00changelog.d (360 KB) (no-zstd !)
1243 1241 adding [s] 00changelog.d (368 KB) (zstd !)
1244 1242 adding [s] 00manifest.i (313 KB)
1245 1243 adding [s] 00changelog.i (313 KB)
1246 1244
1247 1245 Check the result.
1248 1246
1249 1247 $ f --size stream-clone-race-2/.hg/store/00changelog*
1250 1248 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1251 1249 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1252 1250 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1253 1251 stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
1254 1252 stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
1255 1253 stream-clone-race-2/.hg/store/00changelog.i: size=320448
1256 1254 stream-clone-race-2/.hg/store/00changelog.n: size=62
1257 1255
1258 1256 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1259 1257 uid: * (glob)
1260 1258 tip-rev: 5006
1261 1259 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1262 1260 data-length: 121344 (rust !)
1263 1261 data-unused: 192 (rust !)
1264 1262 data-unused: 0.158% (rust !)
1265 1263 data-length: 121152 (no-rust no-pure !)
1266 1264 data-unused: 0 (no-rust no-pure !)
1267 1265 data-unused: 0.000% (no-rust no-pure !)
1268 1266 data-length: 121344 (pure !)
1269 1267 data-unused: 192 (pure !)
1270 1268 data-unused: 0.158% (pure !)
1271 1269
1272 1270 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1273 1271 (ie: the following diff should be empty)
1274 1272
1275 1273 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1276 1274
1277 1275 #if no-rust no-pure
1278 1276 $ diff -u server-metadata-2.txt client-metadata-2.txt
1279 1277 --- server-metadata-2.txt * (glob)
1280 1278 +++ client-metadata-2.txt * (glob)
1281 1279 @@ -1,4 +1,4 @@
1282 1280 -uid: * (glob)
1283 1281 +uid: * (glob)
1284 1282 tip-rev: 5006
1285 1283 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1286 1284 data-length: 121152
1287 1285 [1]
1288 1286 #else
1289 1287 $ diff -u server-metadata-2.txt client-metadata-2.txt
1290 1288 #endif
1291 1289
1292 1290 Clean up after the test
1293 1291
1294 1292 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1295 1293 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1296 1294 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1297 1295
General Comments 0
You need to be logged in to leave comments. Login now