##// END OF EJS Templates
bookmarks: actual fix for race condition deleting bookmark...
marmoute -
r42903:e0cf09bc stable
parent child Browse files
Show More
@@ -1,3247 +1,3296 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 95 unfi = repo.unfiltered()
96 96 try:
97 97 return unfi.__dict__[self.sname]
98 98 except KeyError:
99 99 pass
100 100 return super(_basefilecache, self).__get__(unfi, type)
101 101
102 102 def set(self, repo, value):
103 103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104 104
105 105 class repofilecache(_basefilecache):
106 106 """filecache for files in .hg but outside of .hg/store"""
107 107 def __init__(self, *paths):
108 108 super(repofilecache, self).__init__(*paths)
109 109 for path in paths:
110 110 _cachedfiles.add((path, 'plain'))
111 111
112 112 def join(self, obj, fname):
113 113 return obj.vfs.join(fname)
114 114
115 115 class storecache(_basefilecache):
116 116 """filecache for files in the store"""
117 117 def __init__(self, *paths):
118 118 super(storecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, ''))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.sjoin(fname)
124 124
125 125 class mixedrepostorecache(_basefilecache):
126 126 """filecache for a mix files in .hg/store and outside"""
127 127 def __init__(self, *pathsandlocations):
128 128 # scmutil.filecache only uses the path for passing back into our
129 129 # join(), so we can safely pass a list of paths and locations
130 130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 131 _cachedfiles.update(pathsandlocations)
132 132
133 133 def join(self, obj, fnameandlocation):
134 134 fname, location = fnameandlocation
135 135 if location == 'plain':
136 136 return obj.vfs.join(fname)
137 137 else:
138 138 if location != '':
139 139 raise error.ProgrammingError('unexpected location: %s' %
140 140 location)
141 141 return obj.sjoin(fname)
142 142
143 143 def isfilecached(repo, name):
144 144 """check if a repo has already cached "name" filecache-ed property
145 145
146 146 This returns (cachedobj-or-None, iscached) tuple.
147 147 """
148 148 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 149 if not cacheentry:
150 150 return None, False
151 151 return cacheentry.obj, True
152 152
153 153 class unfilteredpropertycache(util.propertycache):
154 154 """propertycache that apply to unfiltered repo only"""
155 155
156 156 def __get__(self, repo, type=None):
157 157 unfi = repo.unfiltered()
158 158 if unfi is repo:
159 159 return super(unfilteredpropertycache, self).__get__(unfi)
160 160 return getattr(unfi, self.name)
161 161
162 162 class filteredpropertycache(util.propertycache):
163 163 """propertycache that must take filtering in account"""
164 164
165 165 def cachevalue(self, obj, value):
166 166 object.__setattr__(obj, self.name, value)
167 167
168 168
169 169 def hasunfilteredcache(repo, name):
170 170 """check if a repo has an unfilteredpropertycache value for <name>"""
171 171 return name in vars(repo.unfiltered())
172 172
173 173 def unfilteredmethod(orig):
174 174 """decorate method that always need to be run on unfiltered version"""
175 175 def wrapper(repo, *args, **kwargs):
176 176 return orig(repo.unfiltered(), *args, **kwargs)
177 177 return wrapper
178 178
179 179 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 180 'unbundle'}
181 181 legacycaps = moderncaps.union({'changegroupsubset'})
182 182
183 183 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 184 class localcommandexecutor(object):
185 185 def __init__(self, peer):
186 186 self._peer = peer
187 187 self._sent = False
188 188 self._closed = False
189 189
190 190 def __enter__(self):
191 191 return self
192 192
193 193 def __exit__(self, exctype, excvalue, exctb):
194 194 self.close()
195 195
196 196 def callcommand(self, command, args):
197 197 if self._sent:
198 198 raise error.ProgrammingError('callcommand() cannot be used after '
199 199 'sendcommands()')
200 200
201 201 if self._closed:
202 202 raise error.ProgrammingError('callcommand() cannot be used after '
203 203 'close()')
204 204
205 205 # We don't need to support anything fancy. Just call the named
206 206 # method on the peer and return a resolved future.
207 207 fn = getattr(self._peer, pycompat.sysstr(command))
208 208
209 209 f = pycompat.futures.Future()
210 210
211 211 try:
212 212 result = fn(**pycompat.strkwargs(args))
213 213 except Exception:
214 214 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 215 else:
216 216 f.set_result(result)
217 217
218 218 return f
219 219
220 220 def sendcommands(self):
221 221 self._sent = True
222 222
223 223 def close(self):
224 224 self._closed = True
225 225
226 226 @interfaceutil.implementer(repository.ipeercommands)
227 227 class localpeer(repository.peer):
228 228 '''peer for a local repo; reflects only the most recent API'''
229 229
230 230 def __init__(self, repo, caps=None):
231 231 super(localpeer, self).__init__()
232 232
233 233 if caps is None:
234 234 caps = moderncaps.copy()
235 235 self._repo = repo.filtered('served')
236 236 self.ui = repo.ui
237 237 self._caps = repo._restrictcapabilities(caps)
238 238
239 239 # Begin of _basepeer interface.
240 240
241 241 def url(self):
242 242 return self._repo.url()
243 243
244 244 def local(self):
245 245 return self._repo
246 246
247 247 def peer(self):
248 248 return self
249 249
250 250 def canpush(self):
251 251 return True
252 252
253 253 def close(self):
254 254 self._repo.close()
255 255
256 256 # End of _basepeer interface.
257 257
258 258 # Begin of _basewirecommands interface.
259 259
260 260 def branchmap(self):
261 261 return self._repo.branchmap()
262 262
263 263 def capabilities(self):
264 264 return self._caps
265 265
266 266 def clonebundles(self):
267 267 return self._repo.tryread('clonebundles.manifest')
268 268
269 269 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 270 """Used to test argument passing over the wire"""
271 271 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 272 pycompat.bytestr(four),
273 273 pycompat.bytestr(five))
274 274
275 275 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 276 **kwargs):
277 277 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 278 common=common, bundlecaps=bundlecaps,
279 279 **kwargs)[1]
280 280 cb = util.chunkbuffer(chunks)
281 281
282 282 if exchange.bundle2requested(bundlecaps):
283 283 # When requesting a bundle2, getbundle returns a stream to make the
284 284 # wire level function happier. We need to build a proper object
285 285 # from it in local peer.
286 286 return bundle2.getunbundler(self.ui, cb)
287 287 else:
288 288 return changegroup.getunbundler('01', cb, None)
289 289
290 290 def heads(self):
291 291 return self._repo.heads()
292 292
293 293 def known(self, nodes):
294 294 return self._repo.known(nodes)
295 295
296 296 def listkeys(self, namespace):
297 297 return self._repo.listkeys(namespace)
298 298
299 299 def lookup(self, key):
300 300 return self._repo.lookup(key)
301 301
302 302 def pushkey(self, namespace, key, old, new):
303 303 return self._repo.pushkey(namespace, key, old, new)
304 304
305 305 def stream_out(self):
306 306 raise error.Abort(_('cannot perform stream clone against local '
307 307 'peer'))
308 308
309 309 def unbundle(self, bundle, heads, url):
310 310 """apply a bundle on a repo
311 311
312 312 This function handles the repo locking itself."""
313 313 try:
314 314 try:
315 315 bundle = exchange.readbundle(self.ui, bundle, None)
316 316 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 317 if util.safehasattr(ret, 'getchunks'):
318 318 # This is a bundle20 object, turn it into an unbundler.
319 319 # This little dance should be dropped eventually when the
320 320 # API is finally improved.
321 321 stream = util.chunkbuffer(ret.getchunks())
322 322 ret = bundle2.getunbundler(self.ui, stream)
323 323 return ret
324 324 except Exception as exc:
325 325 # If the exception contains output salvaged from a bundle2
326 326 # reply, we need to make sure it is printed before continuing
327 327 # to fail. So we build a bundle2 with such output and consume
328 328 # it directly.
329 329 #
330 330 # This is not very elegant but allows a "simple" solution for
331 331 # issue4594
332 332 output = getattr(exc, '_bundle2salvagedoutput', ())
333 333 if output:
334 334 bundler = bundle2.bundle20(self._repo.ui)
335 335 for out in output:
336 336 bundler.addpart(out)
337 337 stream = util.chunkbuffer(bundler.getchunks())
338 338 b = bundle2.getunbundler(self.ui, stream)
339 339 bundle2.processbundle(self._repo, b)
340 340 raise
341 341 except error.PushRaced as exc:
342 342 raise error.ResponseError(_('push failed:'),
343 343 stringutil.forcebytestr(exc))
344 344
345 345 # End of _basewirecommands interface.
346 346
347 347 # Begin of peer interface.
348 348
349 349 def commandexecutor(self):
350 350 return localcommandexecutor(self)
351 351
352 352 # End of peer interface.
353 353
354 354 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 355 class locallegacypeer(localpeer):
356 356 '''peer extension which implements legacy methods too; used for tests with
357 357 restricted capabilities'''
358 358
359 359 def __init__(self, repo):
360 360 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 361
362 362 # Begin of baselegacywirecommands interface.
363 363
364 364 def between(self, pairs):
365 365 return self._repo.between(pairs)
366 366
367 367 def branches(self, nodes):
368 368 return self._repo.branches(nodes)
369 369
370 370 def changegroup(self, nodes, source):
371 371 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 372 missingheads=self._repo.heads())
373 373 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 374
375 375 def changegroupsubset(self, bases, heads, source):
376 376 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 377 missingheads=heads)
378 378 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 379
380 380 # End of baselegacywirecommands interface.
381 381
382 382 # Increment the sub-version when the revlog v2 format changes to lock out old
383 383 # clients.
384 384 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 385
386 386 # A repository with the sparserevlog feature will have delta chains that
387 387 # can spread over a larger span. Sparse reading cuts these large spans into
388 388 # pieces, so that each piece isn't too big.
389 389 # Without the sparserevlog capability, reading from the repository could use
390 390 # huge amounts of memory, because the whole span would be read at once,
391 391 # including all the intermediate revisions that aren't pertinent for the chain.
392 392 # This is why once a repository has enabled sparse-read, it becomes required.
393 393 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 394
395 395 # Functions receiving (ui, features) that extensions can register to impact
396 396 # the ability to load repositories with custom requirements. Only
397 397 # functions defined in loaded extensions are called.
398 398 #
399 399 # The function receives a set of requirement strings that the repository
400 400 # is capable of opening. Functions will typically add elements to the
401 401 # set to reflect that the extension knows how to handle that requirements.
402 402 featuresetupfuncs = set()
403 403
404 404 def makelocalrepository(baseui, path, intents=None):
405 405 """Create a local repository object.
406 406
407 407 Given arguments needed to construct a local repository, this function
408 408 performs various early repository loading functionality (such as
409 409 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 410 the repository can be opened, derives a type suitable for representing
411 411 that repository, and returns an instance of it.
412 412
413 413 The returned object conforms to the ``repository.completelocalrepository``
414 414 interface.
415 415
416 416 The repository type is derived by calling a series of factory functions
417 417 for each aspect/interface of the final repository. These are defined by
418 418 ``REPO_INTERFACES``.
419 419
420 420 Each factory function is called to produce a type implementing a specific
421 421 interface. The cumulative list of returned types will be combined into a
422 422 new type and that type will be instantiated to represent the local
423 423 repository.
424 424
425 425 The factory functions each receive various state that may be consulted
426 426 as part of deriving a type.
427 427
428 428 Extensions should wrap these factory functions to customize repository type
429 429 creation. Note that an extension's wrapped function may be called even if
430 430 that extension is not loaded for the repo being constructed. Extensions
431 431 should check if their ``__name__`` appears in the
432 432 ``extensionmodulenames`` set passed to the factory function and no-op if
433 433 not.
434 434 """
435 435 ui = baseui.copy()
436 436 # Prevent copying repo configuration.
437 437 ui.copy = baseui.copy
438 438
439 439 # Working directory VFS rooted at repository root.
440 440 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 441
442 442 # Main VFS for .hg/ directory.
443 443 hgpath = wdirvfs.join(b'.hg')
444 444 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 445
446 446 # The .hg/ path should exist and should be a directory. All other
447 447 # cases are errors.
448 448 if not hgvfs.isdir():
449 449 try:
450 450 hgvfs.stat()
451 451 except OSError as e:
452 452 if e.errno != errno.ENOENT:
453 453 raise
454 454
455 455 raise error.RepoError(_(b'repository %s not found') % path)
456 456
457 457 # .hg/requires file contains a newline-delimited list of
458 458 # features/capabilities the opener (us) must have in order to use
459 459 # the repository. This file was introduced in Mercurial 0.9.2,
460 460 # which means very old repositories may not have one. We assume
461 461 # a missing file translates to no requirements.
462 462 try:
463 463 requirements = set(hgvfs.read(b'requires').splitlines())
464 464 except IOError as e:
465 465 if e.errno != errno.ENOENT:
466 466 raise
467 467 requirements = set()
468 468
469 469 # The .hg/hgrc file may load extensions or contain config options
470 470 # that influence repository construction. Attempt to load it and
471 471 # process any new extensions that it may have pulled in.
472 472 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 473 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 474 extensions.loadall(ui)
475 475 extensions.populateui(ui)
476 476
477 477 # Set of module names of extensions loaded for this repository.
478 478 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 479
480 480 supportedrequirements = gathersupportedrequirements(ui)
481 481
482 482 # We first validate the requirements are known.
483 483 ensurerequirementsrecognized(requirements, supportedrequirements)
484 484
485 485 # Then we validate that the known set is reasonable to use together.
486 486 ensurerequirementscompatible(ui, requirements)
487 487
488 488 # TODO there are unhandled edge cases related to opening repositories with
489 489 # shared storage. If storage is shared, we should also test for requirements
490 490 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 491 # that repo, as that repo may load extensions needed to open it. This is a
492 492 # bit complicated because we don't want the other hgrc to overwrite settings
493 493 # in this hgrc.
494 494 #
495 495 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 496 # file when sharing repos. But if a requirement is added after the share is
497 497 # performed, thereby introducing a new requirement for the opener, we may
498 498 # will not see that and could encounter a run-time error interacting with
499 499 # that shared store since it has an unknown-to-us requirement.
500 500
501 501 # At this point, we know we should be capable of opening the repository.
502 502 # Now get on with doing that.
503 503
504 504 features = set()
505 505
506 506 # The "store" part of the repository holds versioned data. How it is
507 507 # accessed is determined by various requirements. The ``shared`` or
508 508 # ``relshared`` requirements indicate the store lives in the path contained
509 509 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 510 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 511 if b'shared' in requirements or b'relshared' in requirements:
512 512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 513 if b'relshared' in requirements:
514 514 sharedpath = hgvfs.join(sharedpath)
515 515
516 516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 517
518 518 if not sharedvfs.exists():
519 519 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 520 b'directory %s') % sharedvfs.base)
521 521
522 522 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 523
524 524 storebasepath = sharedvfs.base
525 525 cachepath = sharedvfs.join(b'cache')
526 526 else:
527 527 storebasepath = hgvfs.base
528 528 cachepath = hgvfs.join(b'cache')
529 529 wcachepath = hgvfs.join(b'wcache')
530 530
531 531
532 532 # The store has changed over time and the exact layout is dictated by
533 533 # requirements. The store interface abstracts differences across all
534 534 # of them.
535 535 store = makestore(requirements, storebasepath,
536 536 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 537 hgvfs.createmode = store.createmode
538 538
539 539 storevfs = store.vfs
540 540 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 541
542 542 # The cache vfs is used to manage cache files.
543 543 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 544 cachevfs.createmode = store.createmode
545 545 # The cache vfs is used to manage cache files related to the working copy
546 546 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 547 wcachevfs.createmode = store.createmode
548 548
549 549 # Now resolve the type for the repository object. We do this by repeatedly
550 550 # calling a factory function to produces types for specific aspects of the
551 551 # repo's operation. The aggregate returned types are used as base classes
552 552 # for a dynamically-derived type, which will represent our new repository.
553 553
554 554 bases = []
555 555 extrastate = {}
556 556
557 557 for iface, fn in REPO_INTERFACES:
558 558 # We pass all potentially useful state to give extensions tons of
559 559 # flexibility.
560 560 typ = fn()(ui=ui,
561 561 intents=intents,
562 562 requirements=requirements,
563 563 features=features,
564 564 wdirvfs=wdirvfs,
565 565 hgvfs=hgvfs,
566 566 store=store,
567 567 storevfs=storevfs,
568 568 storeoptions=storevfs.options,
569 569 cachevfs=cachevfs,
570 570 wcachevfs=wcachevfs,
571 571 extensionmodulenames=extensionmodulenames,
572 572 extrastate=extrastate,
573 573 baseclasses=bases)
574 574
575 575 if not isinstance(typ, type):
576 576 raise error.ProgrammingError('unable to construct type for %s' %
577 577 iface)
578 578
579 579 bases.append(typ)
580 580
581 581 # type() allows you to use characters in type names that wouldn't be
582 582 # recognized as Python symbols in source code. We abuse that to add
583 583 # rich information about our constructed repo.
584 584 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 585 wdirvfs.base,
586 586 b','.join(sorted(requirements))))
587 587
588 588 cls = type(name, tuple(bases), {})
589 589
590 590 return cls(
591 591 baseui=baseui,
592 592 ui=ui,
593 593 origroot=path,
594 594 wdirvfs=wdirvfs,
595 595 hgvfs=hgvfs,
596 596 requirements=requirements,
597 597 supportedrequirements=supportedrequirements,
598 598 sharedpath=storebasepath,
599 599 store=store,
600 600 cachevfs=cachevfs,
601 601 wcachevfs=wcachevfs,
602 602 features=features,
603 603 intents=intents)
604 604
605 605 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 606 """Load hgrc files/content into a ui instance.
607 607
608 608 This is called during repository opening to load any additional
609 609 config files or settings relevant to the current repository.
610 610
611 611 Returns a bool indicating whether any additional configs were loaded.
612 612
613 613 Extensions should monkeypatch this function to modify how per-repo
614 614 configs are loaded. For example, an extension may wish to pull in
615 615 configs from alternate files or sources.
616 616 """
617 617 try:
618 618 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 619 return True
620 620 except IOError:
621 621 return False
622 622
623 623 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 624 """Perform additional actions after .hg/hgrc is loaded.
625 625
626 626 This function is called during repository loading immediately after
627 627 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 628
629 629 The function can be used to validate configs, automatically add
630 630 options (including extensions) based on requirements, etc.
631 631 """
632 632
633 633 # Map of requirements to list of extensions to load automatically when
634 634 # requirement is present.
635 635 autoextensions = {
636 636 b'largefiles': [b'largefiles'],
637 637 b'lfs': [b'lfs'],
638 638 }
639 639
640 640 for requirement, names in sorted(autoextensions.items()):
641 641 if requirement not in requirements:
642 642 continue
643 643
644 644 for name in names:
645 645 if not ui.hasconfig(b'extensions', name):
646 646 ui.setconfig(b'extensions', name, b'', source='autoload')
647 647
648 648 def gathersupportedrequirements(ui):
649 649 """Determine the complete set of recognized requirements."""
650 650 # Start with all requirements supported by this file.
651 651 supported = set(localrepository._basesupported)
652 652
653 653 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 654 # relevant to this ui instance.
655 655 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 656
657 657 for fn in featuresetupfuncs:
658 658 if fn.__module__ in modules:
659 659 fn(ui, supported)
660 660
661 661 # Add derived requirements from registered compression engines.
662 662 for name in util.compengines:
663 663 engine = util.compengines[name]
664 664 if engine.available() and engine.revlogheader():
665 665 supported.add(b'exp-compression-%s' % name)
666 666 if engine.name() == 'zstd':
667 667 supported.add(b'revlog-compression-zstd')
668 668
669 669 return supported
670 670
671 671 def ensurerequirementsrecognized(requirements, supported):
672 672 """Validate that a set of local requirements is recognized.
673 673
674 674 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 675 exists any requirement in that set that currently loaded code doesn't
676 676 recognize.
677 677
678 678 Returns a set of supported requirements.
679 679 """
680 680 missing = set()
681 681
682 682 for requirement in requirements:
683 683 if requirement in supported:
684 684 continue
685 685
686 686 if not requirement or not requirement[0:1].isalnum():
687 687 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 688
689 689 missing.add(requirement)
690 690
691 691 if missing:
692 692 raise error.RequirementError(
693 693 _(b'repository requires features unknown to this Mercurial: %s') %
694 694 b' '.join(sorted(missing)),
695 695 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 696 b'for more information'))
697 697
698 698 def ensurerequirementscompatible(ui, requirements):
699 699 """Validates that a set of recognized requirements is mutually compatible.
700 700
701 701 Some requirements may not be compatible with others or require
702 702 config options that aren't enabled. This function is called during
703 703 repository opening to ensure that the set of requirements needed
704 704 to open a repository is sane and compatible with config options.
705 705
706 706 Extensions can monkeypatch this function to perform additional
707 707 checking.
708 708
709 709 ``error.RepoError`` should be raised on failure.
710 710 """
711 711 if b'exp-sparse' in requirements and not sparse.enabled:
712 712 raise error.RepoError(_(b'repository is using sparse feature but '
713 713 b'sparse is not enabled; enable the '
714 714 b'"sparse" extensions to access'))
715 715
716 716 def makestore(requirements, path, vfstype):
717 717 """Construct a storage object for a repository."""
718 718 if b'store' in requirements:
719 719 if b'fncache' in requirements:
720 720 return storemod.fncachestore(path, vfstype,
721 721 b'dotencode' in requirements)
722 722
723 723 return storemod.encodedstore(path, vfstype)
724 724
725 725 return storemod.basicstore(path, vfstype)
726 726
727 727 def resolvestorevfsoptions(ui, requirements, features):
728 728 """Resolve the options to pass to the store vfs opener.
729 729
730 730 The returned dict is used to influence behavior of the storage layer.
731 731 """
732 732 options = {}
733 733
734 734 if b'treemanifest' in requirements:
735 735 options[b'treemanifest'] = True
736 736
737 737 # experimental config: format.manifestcachesize
738 738 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 739 if manifestcachesize is not None:
740 740 options[b'manifestcachesize'] = manifestcachesize
741 741
742 742 # In the absence of another requirement superseding a revlog-related
743 743 # requirement, we have to assume the repo is using revlog version 0.
744 744 # This revlog format is super old and we don't bother trying to parse
745 745 # opener options for it because those options wouldn't do anything
746 746 # meaningful on such old repos.
747 747 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 748 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 749
750 750 return options
751 751
752 752 def resolverevlogstorevfsoptions(ui, requirements, features):
753 753 """Resolve opener options specific to revlogs."""
754 754
755 755 options = {}
756 756 options[b'flagprocessors'] = {}
757 757
758 758 if b'revlogv1' in requirements:
759 759 options[b'revlogv1'] = True
760 760 if REVLOGV2_REQUIREMENT in requirements:
761 761 options[b'revlogv2'] = True
762 762
763 763 if b'generaldelta' in requirements:
764 764 options[b'generaldelta'] = True
765 765
766 766 # experimental config: format.chunkcachesize
767 767 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 768 if chunkcachesize is not None:
769 769 options[b'chunkcachesize'] = chunkcachesize
770 770
771 771 deltabothparents = ui.configbool(b'storage',
772 772 b'revlog.optimize-delta-parent-choice')
773 773 options[b'deltabothparents'] = deltabothparents
774 774
775 775 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 776 lazydeltabase = False
777 777 if lazydelta:
778 778 lazydeltabase = ui.configbool(b'storage',
779 779 b'revlog.reuse-external-delta-parent')
780 780 if lazydeltabase is None:
781 781 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 782 options[b'lazydelta'] = lazydelta
783 783 options[b'lazydeltabase'] = lazydeltabase
784 784
785 785 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 786 if 0 <= chainspan:
787 787 options[b'maxdeltachainspan'] = chainspan
788 788
789 789 mmapindexthreshold = ui.configbytes(b'experimental',
790 790 b'mmapindexthreshold')
791 791 if mmapindexthreshold is not None:
792 792 options[b'mmapindexthreshold'] = mmapindexthreshold
793 793
794 794 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 795 srdensitythres = float(ui.config(b'experimental',
796 796 b'sparse-read.density-threshold'))
797 797 srmingapsize = ui.configbytes(b'experimental',
798 798 b'sparse-read.min-gap-size')
799 799 options[b'with-sparse-read'] = withsparseread
800 800 options[b'sparse-read-density-threshold'] = srdensitythres
801 801 options[b'sparse-read-min-gap-size'] = srmingapsize
802 802
803 803 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 804 options[b'sparse-revlog'] = sparserevlog
805 805 if sparserevlog:
806 806 options[b'generaldelta'] = True
807 807
808 808 maxchainlen = None
809 809 if sparserevlog:
810 810 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 811 # experimental config: format.maxchainlen
812 812 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 813 if maxchainlen is not None:
814 814 options[b'maxchainlen'] = maxchainlen
815 815
816 816 for r in requirements:
817 817 # we allow multiple compression engine requirement to co-exist because
818 818 # strickly speaking, revlog seems to support mixed compression style.
819 819 #
820 820 # The compression used for new entries will be "the last one"
821 821 prefix = r.startswith
822 822 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 823 options[b'compengine'] = r.split('-', 2)[2]
824 824
825 825 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 826 if options[b'zlib.level'] is not None:
827 827 if not (0 <= options[b'zlib.level'] <= 9):
828 828 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 829 raise error.Abort(msg % options[b'zlib.level'])
830 830 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 831 if options[b'zstd.level'] is not None:
832 832 if not (0 <= options[b'zstd.level'] <= 22):
833 833 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 834 raise error.Abort(msg % options[b'zstd.level'])
835 835
836 836 if repository.NARROW_REQUIREMENT in requirements:
837 837 options[b'enableellipsis'] = True
838 838
839 839 return options
840 840
841 841 def makemain(**kwargs):
842 842 """Produce a type conforming to ``ilocalrepositorymain``."""
843 843 return localrepository
844 844
845 845 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 846 class revlogfilestorage(object):
847 847 """File storage when using revlogs."""
848 848
849 849 def file(self, path):
850 850 if path[0] == b'/':
851 851 path = path[1:]
852 852
853 853 return filelog.filelog(self.svfs, path)
854 854
855 855 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 856 class revlognarrowfilestorage(object):
857 857 """File storage when using revlogs and narrow files."""
858 858
859 859 def file(self, path):
860 860 if path[0] == b'/':
861 861 path = path[1:]
862 862
863 863 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 864
865 865 def makefilestorage(requirements, features, **kwargs):
866 866 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 867 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 868 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 869
870 870 if repository.NARROW_REQUIREMENT in requirements:
871 871 return revlognarrowfilestorage
872 872 else:
873 873 return revlogfilestorage
874 874
875 875 # List of repository interfaces and factory functions for them. Each
876 876 # will be called in order during ``makelocalrepository()`` to iteratively
877 877 # derive the final type for a local repository instance. We capture the
878 878 # function as a lambda so we don't hold a reference and the module-level
879 879 # functions can be wrapped.
880 880 REPO_INTERFACES = [
881 881 (repository.ilocalrepositorymain, lambda: makemain),
882 882 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 883 ]
884 884
885 885 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 886 class localrepository(object):
887 887 """Main class for representing local repositories.
888 888
889 889 All local repositories are instances of this class.
890 890
891 891 Constructed on its own, instances of this class are not usable as
892 892 repository objects. To obtain a usable repository object, call
893 893 ``hg.repository()``, ``localrepo.instance()``, or
894 894 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 895 ``instance()`` adds support for creating new repositories.
896 896 ``hg.repository()`` adds more extension integration, including calling
897 897 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 898 used.
899 899 """
900 900
901 901 # obsolete experimental requirements:
902 902 # - manifestv2: An experimental new manifest format that allowed
903 903 # for stem compression of long paths. Experiment ended up not
904 904 # being successful (repository sizes went up due to worse delta
905 905 # chains), and the code was deleted in 4.6.
906 906 supportedformats = {
907 907 'revlogv1',
908 908 'generaldelta',
909 909 'treemanifest',
910 910 REVLOGV2_REQUIREMENT,
911 911 SPARSEREVLOG_REQUIREMENT,
912 912 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 913 }
914 914 _basesupported = supportedformats | {
915 915 'store',
916 916 'fncache',
917 917 'shared',
918 918 'relshared',
919 919 'dotencode',
920 920 'exp-sparse',
921 921 'internal-phase'
922 922 }
923 923
924 924 # list of prefix for file which can be written without 'wlock'
925 925 # Extensions should extend this list when needed
926 926 _wlockfreeprefix = {
927 927 # We migh consider requiring 'wlock' for the next
928 928 # two, but pretty much all the existing code assume
929 929 # wlock is not needed so we keep them excluded for
930 930 # now.
931 931 'hgrc',
932 932 'requires',
933 933 # XXX cache is a complicatged business someone
934 934 # should investigate this in depth at some point
935 935 'cache/',
936 936 # XXX shouldn't be dirstate covered by the wlock?
937 937 'dirstate',
938 938 # XXX bisect was still a bit too messy at the time
939 939 # this changeset was introduced. Someone should fix
940 940 # the remainig bit and drop this line
941 941 'bisect.state',
942 942 }
943 943
944 944 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 945 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 946 features, intents=None):
947 947 """Create a new local repository instance.
948 948
949 949 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 950 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 951 object.
952 952
953 953 Arguments:
954 954
955 955 baseui
956 956 ``ui.ui`` instance that ``ui`` argument was based off of.
957 957
958 958 ui
959 959 ``ui.ui`` instance for use by the repository.
960 960
961 961 origroot
962 962 ``bytes`` path to working directory root of this repository.
963 963
964 964 wdirvfs
965 965 ``vfs.vfs`` rooted at the working directory.
966 966
967 967 hgvfs
968 968 ``vfs.vfs`` rooted at .hg/
969 969
970 970 requirements
971 971 ``set`` of bytestrings representing repository opening requirements.
972 972
973 973 supportedrequirements
974 974 ``set`` of bytestrings representing repository requirements that we
975 975 know how to open. May be a supetset of ``requirements``.
976 976
977 977 sharedpath
978 978 ``bytes`` Defining path to storage base directory. Points to a
979 979 ``.hg/`` directory somewhere.
980 980
981 981 store
982 982 ``store.basicstore`` (or derived) instance providing access to
983 983 versioned storage.
984 984
985 985 cachevfs
986 986 ``vfs.vfs`` used for cache files.
987 987
988 988 wcachevfs
989 989 ``vfs.vfs`` used for cache files related to the working copy.
990 990
991 991 features
992 992 ``set`` of bytestrings defining features/capabilities of this
993 993 instance.
994 994
995 995 intents
996 996 ``set`` of system strings indicating what this repo will be used
997 997 for.
998 998 """
999 999 self.baseui = baseui
1000 1000 self.ui = ui
1001 1001 self.origroot = origroot
1002 1002 # vfs rooted at working directory.
1003 1003 self.wvfs = wdirvfs
1004 1004 self.root = wdirvfs.base
1005 1005 # vfs rooted at .hg/. Used to access most non-store paths.
1006 1006 self.vfs = hgvfs
1007 1007 self.path = hgvfs.base
1008 1008 self.requirements = requirements
1009 1009 self.supported = supportedrequirements
1010 1010 self.sharedpath = sharedpath
1011 1011 self.store = store
1012 1012 self.cachevfs = cachevfs
1013 1013 self.wcachevfs = wcachevfs
1014 1014 self.features = features
1015 1015
1016 1016 self.filtername = None
1017 1017
1018 1018 if (self.ui.configbool('devel', 'all-warnings') or
1019 1019 self.ui.configbool('devel', 'check-locks')):
1020 1020 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 1021 # A list of callback to shape the phase if no data were found.
1022 1022 # Callback are in the form: func(repo, roots) --> processed root.
1023 1023 # This list it to be filled by extension during repo setup
1024 1024 self._phasedefaults = []
1025 1025
1026 1026 color.setup(self.ui)
1027 1027
1028 1028 self.spath = self.store.path
1029 1029 self.svfs = self.store.vfs
1030 1030 self.sjoin = self.store.join
1031 1031 if (self.ui.configbool('devel', 'all-warnings') or
1032 1032 self.ui.configbool('devel', 'check-locks')):
1033 1033 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 1034 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 1035 else: # standard vfs
1036 1036 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037 1037
1038 1038 self._dirstatevalidatewarned = False
1039 1039
1040 1040 self._branchcaches = branchmap.BranchMapCache()
1041 1041 self._revbranchcache = None
1042 1042 self._filterpats = {}
1043 1043 self._datafilters = {}
1044 1044 self._transref = self._lockref = self._wlockref = None
1045 1045
1046 1046 # A cache for various files under .hg/ that tracks file changes,
1047 1047 # (used by the filecache decorator)
1048 1048 #
1049 1049 # Maps a property name to its util.filecacheentry
1050 1050 self._filecache = {}
1051 1051
1052 1052 # hold sets of revision to be filtered
1053 1053 # should be cleared when something might have changed the filter value:
1054 1054 # - new changesets,
1055 1055 # - phase change,
1056 1056 # - new obsolescence marker,
1057 1057 # - working directory parent change,
1058 1058 # - bookmark changes
1059 1059 self.filteredrevcache = {}
1060 1060
1061 1061 # post-dirstate-status hooks
1062 1062 self._postdsstatus = []
1063 1063
1064 1064 # generic mapping between names and nodes
1065 1065 self.names = namespaces.namespaces()
1066 1066
1067 1067 # Key to signature value.
1068 1068 self._sparsesignaturecache = {}
1069 1069 # Signature to cached matcher instance.
1070 1070 self._sparsematchercache = {}
1071 1071
1072 1072 self._extrafilterid = repoview.extrafilter(ui)
1073 1073
1074 1074 def _getvfsward(self, origfunc):
1075 1075 """build a ward for self.vfs"""
1076 1076 rref = weakref.ref(self)
1077 1077 def checkvfs(path, mode=None):
1078 1078 ret = origfunc(path, mode=mode)
1079 1079 repo = rref()
1080 1080 if (repo is None
1081 1081 or not util.safehasattr(repo, '_wlockref')
1082 1082 or not util.safehasattr(repo, '_lockref')):
1083 1083 return
1084 1084 if mode in (None, 'r', 'rb'):
1085 1085 return
1086 1086 if path.startswith(repo.path):
1087 1087 # truncate name relative to the repository (.hg)
1088 1088 path = path[len(repo.path) + 1:]
1089 1089 if path.startswith('cache/'):
1090 1090 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 1091 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 1092 if path.startswith('journal.') or path.startswith('undo.'):
1093 1093 # journal is covered by 'lock'
1094 1094 if repo._currentlock(repo._lockref) is None:
1095 1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 1096 stacklevel=3, config='check-locks')
1097 1097 elif repo._currentlock(repo._wlockref) is None:
1098 1098 # rest of vfs files are covered by 'wlock'
1099 1099 #
1100 1100 # exclude special files
1101 1101 for prefix in self._wlockfreeprefix:
1102 1102 if path.startswith(prefix):
1103 1103 return
1104 1104 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 1105 stacklevel=3, config='check-locks')
1106 1106 return ret
1107 1107 return checkvfs
1108 1108
1109 1109 def _getsvfsward(self, origfunc):
1110 1110 """build a ward for self.svfs"""
1111 1111 rref = weakref.ref(self)
1112 1112 def checksvfs(path, mode=None):
1113 1113 ret = origfunc(path, mode=mode)
1114 1114 repo = rref()
1115 1115 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 1116 return
1117 1117 if mode in (None, 'r', 'rb'):
1118 1118 return
1119 1119 if path.startswith(repo.sharedpath):
1120 1120 # truncate name relative to the repository (.hg)
1121 1121 path = path[len(repo.sharedpath) + 1:]
1122 1122 if repo._currentlock(repo._lockref) is None:
1123 1123 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 1124 stacklevel=4)
1125 1125 return ret
1126 1126 return checksvfs
1127 1127
1128 1128 def close(self):
1129 1129 self._writecaches()
1130 1130
1131 1131 def _writecaches(self):
1132 1132 if self._revbranchcache:
1133 1133 self._revbranchcache.write()
1134 1134
1135 1135 def _restrictcapabilities(self, caps):
1136 1136 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 1137 caps = set(caps)
1138 1138 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 1139 role='client'))
1140 1140 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 1141 return caps
1142 1142
1143 1143 def _writerequirements(self):
1144 1144 scmutil.writerequires(self.vfs, self.requirements)
1145 1145
1146 1146 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 1147 # self -> auditor -> self._checknested -> self
1148 1148
1149 1149 @property
1150 1150 def auditor(self):
1151 1151 # This is only used by context.workingctx.match in order to
1152 1152 # detect files in subrepos.
1153 1153 return pathutil.pathauditor(self.root, callback=self._checknested)
1154 1154
1155 1155 @property
1156 1156 def nofsauditor(self):
1157 1157 # This is only used by context.basectx.match in order to detect
1158 1158 # files in subrepos.
1159 1159 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 1160 realfs=False, cached=True)
1161 1161
1162 1162 def _checknested(self, path):
1163 1163 """Determine if path is a legal nested repository."""
1164 1164 if not path.startswith(self.root):
1165 1165 return False
1166 1166 subpath = path[len(self.root) + 1:]
1167 1167 normsubpath = util.pconvert(subpath)
1168 1168
1169 1169 # XXX: Checking against the current working copy is wrong in
1170 1170 # the sense that it can reject things like
1171 1171 #
1172 1172 # $ hg cat -r 10 sub/x.txt
1173 1173 #
1174 1174 # if sub/ is no longer a subrepository in the working copy
1175 1175 # parent revision.
1176 1176 #
1177 1177 # However, it can of course also allow things that would have
1178 1178 # been rejected before, such as the above cat command if sub/
1179 1179 # is a subrepository now, but was a normal directory before.
1180 1180 # The old path auditor would have rejected by mistake since it
1181 1181 # panics when it sees sub/.hg/.
1182 1182 #
1183 1183 # All in all, checking against the working copy seems sensible
1184 1184 # since we want to prevent access to nested repositories on
1185 1185 # the filesystem *now*.
1186 1186 ctx = self[None]
1187 1187 parts = util.splitpath(subpath)
1188 1188 while parts:
1189 1189 prefix = '/'.join(parts)
1190 1190 if prefix in ctx.substate:
1191 1191 if prefix == normsubpath:
1192 1192 return True
1193 1193 else:
1194 1194 sub = ctx.sub(prefix)
1195 1195 return sub.checknested(subpath[len(prefix) + 1:])
1196 1196 else:
1197 1197 parts.pop()
1198 1198 return False
1199 1199
1200 1200 def peer(self):
1201 1201 return localpeer(self) # not cached to avoid reference cycle
1202 1202
1203 1203 def unfiltered(self):
1204 1204 """Return unfiltered version of the repository
1205 1205
1206 1206 Intended to be overwritten by filtered repo."""
1207 1207 return self
1208 1208
1209 1209 def filtered(self, name, visibilityexceptions=None):
1210 1210 """Return a filtered version of a repository
1211 1211
1212 1212 The `name` parameter is the identifier of the requested view. This
1213 1213 will return a repoview object set "exactly" to the specified view.
1214 1214
1215 1215 This function does not apply recursive filtering to a repository. For
1216 1216 example calling `repo.filtered("served")` will return a repoview using
1217 1217 the "served" view, regardless of the initial view used by `repo`.
1218 1218
1219 1219 In other word, there is always only one level of `repoview` "filtering".
1220 1220 """
1221 1221 if self._extrafilterid is not None and '%' not in name:
1222 1222 name = name + '%' + self._extrafilterid
1223 1223
1224 1224 cls = repoview.newtype(self.unfiltered().__class__)
1225 1225 return cls(self, name, visibilityexceptions)
1226 1226
1227 1227 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1228 1228 ('bookmarks', ''), ('00changelog.i', ''))
1229 1229 def _bookmarks(self):
1230 # Since the multiple files involved in the transaction cannot be
1231 # written atomically (with current repository format), there is a race
1232 # condition here.
1233 #
1234 # 1) changelog content A is read
1235 # 2) outside transaction update changelog to content B
1236 # 3) outside transaction update bookmark file referring to content B
1237 # 4) bookmarks file content is read and filtered against changelog-A
1238 #
1239 # When this happens, bookmarks against nodes missing from A are dropped.
1240 #
1241 # Having this happening during read is not great, but it become worse
1242 # when this happen during write because the bookmarks to the "unknown"
1243 # nodes will be dropped for good. However, writes happen within locks.
1244 # This locking makes it possible to have a race free consistent read.
1245 # For this purpose data read from disc before locking are
1246 # "invalidated" right after the locks are taken. This invalidations are
1247 # "light", the `filecache` mechanism keep the data in memory and will
1248 # reuse them if the underlying files did not changed. Not parsing the
1249 # same data multiple times helps performances.
1250 #
1251 # Unfortunately in the case describe above, the files tracked by the
1252 # bookmarks file cache might not have changed, but the in-memory
1253 # content is still "wrong" because we used an older changelog content
1254 # to process the on-disk data. So after locking, the changelog would be
1255 # refreshed but `_bookmarks` would be preserved.
1256 # Adding `00changelog.i` to the list of tracked file is not
1257 # enough, because at the time we build the content for `_bookmarks` in
1258 # (4), the changelog file has already diverged from the content used
1259 # for loading `changelog` in (1)
1260 #
1261 # To prevent the issue, we force the changelog to be explicitly
1262 # reloaded while computing `_bookmarks`. The data race can still happen
1263 # without the lock (with a narrower window), but it would no longer go
1264 # undetected during the lock time refresh.
1265 #
1266 # The new schedule is as follow
1267 #
1268 # 1) filecache logic detect that `_bookmarks` needs to be computed
1269 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1270 # 3) We force `changelog` filecache to be tested
1271 # 4) cachestat for `changelog` are captured (for changelog)
1272 # 5) `_bookmarks` is computed and cached
1273 #
1274 # The step in (3) ensure we have a changelog at least as recent as the
1275 # cache stat computed in (1). As a result at locking time:
1276 # * if the changelog did not changed since (1) -> we can reuse the data
1277 # * otherwise -> the bookmarks get refreshed.
1278 self._refreshchangelog()
1230 1279 return bookmarks.bmstore(self)
1231 1280
1232 1281 def _refreshchangelog(self):
1233 1282 """make sure the in memory changelog match the on-disk one"""
1234 1283 if ('changelog' in vars(self) and self.currenttransaction() is None):
1235 1284 del self.changelog
1236 1285
1237 1286 @property
1238 1287 def _activebookmark(self):
1239 1288 return self._bookmarks.active
1240 1289
1241 1290 # _phasesets depend on changelog. what we need is to call
1242 1291 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1243 1292 # can't be easily expressed in filecache mechanism.
1244 1293 @storecache('phaseroots', '00changelog.i')
1245 1294 def _phasecache(self):
1246 1295 return phases.phasecache(self, self._phasedefaults)
1247 1296
1248 1297 @storecache('obsstore')
1249 1298 def obsstore(self):
1250 1299 return obsolete.makestore(self.ui, self)
1251 1300
1252 1301 @storecache('00changelog.i')
1253 1302 def changelog(self):
1254 1303 return changelog.changelog(self.svfs,
1255 1304 trypending=txnutil.mayhavepending(self.root))
1256 1305
1257 1306 @storecache('00manifest.i')
1258 1307 def manifestlog(self):
1259 1308 rootstore = manifest.manifestrevlog(self.svfs)
1260 1309 return manifest.manifestlog(self.svfs, self, rootstore,
1261 1310 self._storenarrowmatch)
1262 1311
1263 1312 @repofilecache('dirstate')
1264 1313 def dirstate(self):
1265 1314 return self._makedirstate()
1266 1315
1267 1316 def _makedirstate(self):
1268 1317 """Extension point for wrapping the dirstate per-repo."""
1269 1318 sparsematchfn = lambda: sparse.matcher(self)
1270 1319
1271 1320 return dirstate.dirstate(self.vfs, self.ui, self.root,
1272 1321 self._dirstatevalidate, sparsematchfn)
1273 1322
1274 1323 def _dirstatevalidate(self, node):
1275 1324 try:
1276 1325 self.changelog.rev(node)
1277 1326 return node
1278 1327 except error.LookupError:
1279 1328 if not self._dirstatevalidatewarned:
1280 1329 self._dirstatevalidatewarned = True
1281 1330 self.ui.warn(_("warning: ignoring unknown"
1282 1331 " working parent %s!\n") % short(node))
1283 1332 return nullid
1284 1333
1285 1334 @storecache(narrowspec.FILENAME)
1286 1335 def narrowpats(self):
1287 1336 """matcher patterns for this repository's narrowspec
1288 1337
1289 1338 A tuple of (includes, excludes).
1290 1339 """
1291 1340 return narrowspec.load(self)
1292 1341
1293 1342 @storecache(narrowspec.FILENAME)
1294 1343 def _storenarrowmatch(self):
1295 1344 if repository.NARROW_REQUIREMENT not in self.requirements:
1296 1345 return matchmod.always()
1297 1346 include, exclude = self.narrowpats
1298 1347 return narrowspec.match(self.root, include=include, exclude=exclude)
1299 1348
1300 1349 @storecache(narrowspec.FILENAME)
1301 1350 def _narrowmatch(self):
1302 1351 if repository.NARROW_REQUIREMENT not in self.requirements:
1303 1352 return matchmod.always()
1304 1353 narrowspec.checkworkingcopynarrowspec(self)
1305 1354 include, exclude = self.narrowpats
1306 1355 return narrowspec.match(self.root, include=include, exclude=exclude)
1307 1356
1308 1357 def narrowmatch(self, match=None, includeexact=False):
1309 1358 """matcher corresponding the the repo's narrowspec
1310 1359
1311 1360 If `match` is given, then that will be intersected with the narrow
1312 1361 matcher.
1313 1362
1314 1363 If `includeexact` is True, then any exact matches from `match` will
1315 1364 be included even if they're outside the narrowspec.
1316 1365 """
1317 1366 if match:
1318 1367 if includeexact and not self._narrowmatch.always():
1319 1368 # do not exclude explicitly-specified paths so that they can
1320 1369 # be warned later on
1321 1370 em = matchmod.exact(match.files())
1322 1371 nm = matchmod.unionmatcher([self._narrowmatch, em])
1323 1372 return matchmod.intersectmatchers(match, nm)
1324 1373 return matchmod.intersectmatchers(match, self._narrowmatch)
1325 1374 return self._narrowmatch
1326 1375
1327 1376 def setnarrowpats(self, newincludes, newexcludes):
1328 1377 narrowspec.save(self, newincludes, newexcludes)
1329 1378 self.invalidate(clearfilecache=True)
1330 1379
1331 1380 def __getitem__(self, changeid):
1332 1381 if changeid is None:
1333 1382 return context.workingctx(self)
1334 1383 if isinstance(changeid, context.basectx):
1335 1384 return changeid
1336 1385 if isinstance(changeid, slice):
1337 1386 # wdirrev isn't contiguous so the slice shouldn't include it
1338 1387 return [self[i]
1339 1388 for i in pycompat.xrange(*changeid.indices(len(self)))
1340 1389 if i not in self.changelog.filteredrevs]
1341 1390 try:
1342 1391 if isinstance(changeid, int):
1343 1392 node = self.changelog.node(changeid)
1344 1393 rev = changeid
1345 1394 elif changeid == 'null':
1346 1395 node = nullid
1347 1396 rev = nullrev
1348 1397 elif changeid == 'tip':
1349 1398 node = self.changelog.tip()
1350 1399 rev = self.changelog.rev(node)
1351 1400 elif changeid == '.':
1352 1401 # this is a hack to delay/avoid loading obsmarkers
1353 1402 # when we know that '.' won't be hidden
1354 1403 node = self.dirstate.p1()
1355 1404 rev = self.unfiltered().changelog.rev(node)
1356 1405 elif len(changeid) == 20:
1357 1406 try:
1358 1407 node = changeid
1359 1408 rev = self.changelog.rev(changeid)
1360 1409 except error.FilteredLookupError:
1361 1410 changeid = hex(changeid) # for the error message
1362 1411 raise
1363 1412 except LookupError:
1364 1413 # check if it might have come from damaged dirstate
1365 1414 #
1366 1415 # XXX we could avoid the unfiltered if we had a recognizable
1367 1416 # exception for filtered changeset access
1368 1417 if (self.local()
1369 1418 and changeid in self.unfiltered().dirstate.parents()):
1370 1419 msg = _("working directory has unknown parent '%s'!")
1371 1420 raise error.Abort(msg % short(changeid))
1372 1421 changeid = hex(changeid) # for the error message
1373 1422 raise
1374 1423
1375 1424 elif len(changeid) == 40:
1376 1425 node = bin(changeid)
1377 1426 rev = self.changelog.rev(node)
1378 1427 else:
1379 1428 raise error.ProgrammingError(
1380 1429 "unsupported changeid '%s' of type %s" %
1381 1430 (changeid, type(changeid)))
1382 1431
1383 1432 return context.changectx(self, rev, node)
1384 1433
1385 1434 except (error.FilteredIndexError, error.FilteredLookupError):
1386 1435 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1387 1436 % pycompat.bytestr(changeid))
1388 1437 except (IndexError, LookupError):
1389 1438 raise error.RepoLookupError(
1390 1439 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1391 1440 except error.WdirUnsupported:
1392 1441 return context.workingctx(self)
1393 1442
1394 1443 def __contains__(self, changeid):
1395 1444 """True if the given changeid exists
1396 1445
1397 1446 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1398 1447 specified.
1399 1448 """
1400 1449 try:
1401 1450 self[changeid]
1402 1451 return True
1403 1452 except error.RepoLookupError:
1404 1453 return False
1405 1454
1406 1455 def __nonzero__(self):
1407 1456 return True
1408 1457
1409 1458 __bool__ = __nonzero__
1410 1459
1411 1460 def __len__(self):
1412 1461 # no need to pay the cost of repoview.changelog
1413 1462 unfi = self.unfiltered()
1414 1463 return len(unfi.changelog)
1415 1464
1416 1465 def __iter__(self):
1417 1466 return iter(self.changelog)
1418 1467
1419 1468 def revs(self, expr, *args):
1420 1469 '''Find revisions matching a revset.
1421 1470
1422 1471 The revset is specified as a string ``expr`` that may contain
1423 1472 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1424 1473
1425 1474 Revset aliases from the configuration are not expanded. To expand
1426 1475 user aliases, consider calling ``scmutil.revrange()`` or
1427 1476 ``repo.anyrevs([expr], user=True)``.
1428 1477
1429 1478 Returns a revset.abstractsmartset, which is a list-like interface
1430 1479 that contains integer revisions.
1431 1480 '''
1432 1481 tree = revsetlang.spectree(expr, *args)
1433 1482 return revset.makematcher(tree)(self)
1434 1483
1435 1484 def set(self, expr, *args):
1436 1485 '''Find revisions matching a revset and emit changectx instances.
1437 1486
1438 1487 This is a convenience wrapper around ``revs()`` that iterates the
1439 1488 result and is a generator of changectx instances.
1440 1489
1441 1490 Revset aliases from the configuration are not expanded. To expand
1442 1491 user aliases, consider calling ``scmutil.revrange()``.
1443 1492 '''
1444 1493 for r in self.revs(expr, *args):
1445 1494 yield self[r]
1446 1495
1447 1496 def anyrevs(self, specs, user=False, localalias=None):
1448 1497 '''Find revisions matching one of the given revsets.
1449 1498
1450 1499 Revset aliases from the configuration are not expanded by default. To
1451 1500 expand user aliases, specify ``user=True``. To provide some local
1452 1501 definitions overriding user aliases, set ``localalias`` to
1453 1502 ``{name: definitionstring}``.
1454 1503 '''
1455 1504 if user:
1456 1505 m = revset.matchany(self.ui, specs,
1457 1506 lookup=revset.lookupfn(self),
1458 1507 localalias=localalias)
1459 1508 else:
1460 1509 m = revset.matchany(None, specs, localalias=localalias)
1461 1510 return m(self)
1462 1511
1463 1512 def url(self):
1464 1513 return 'file:' + self.root
1465 1514
1466 1515 def hook(self, name, throw=False, **args):
1467 1516 """Call a hook, passing this repo instance.
1468 1517
1469 1518 This a convenience method to aid invoking hooks. Extensions likely
1470 1519 won't call this unless they have registered a custom hook or are
1471 1520 replacing code that is expected to call a hook.
1472 1521 """
1473 1522 return hook.hook(self.ui, self, name, throw, **args)
1474 1523
1475 1524 @filteredpropertycache
1476 1525 def _tagscache(self):
1477 1526 '''Returns a tagscache object that contains various tags related
1478 1527 caches.'''
1479 1528
1480 1529 # This simplifies its cache management by having one decorated
1481 1530 # function (this one) and the rest simply fetch things from it.
1482 1531 class tagscache(object):
1483 1532 def __init__(self):
1484 1533 # These two define the set of tags for this repository. tags
1485 1534 # maps tag name to node; tagtypes maps tag name to 'global' or
1486 1535 # 'local'. (Global tags are defined by .hgtags across all
1487 1536 # heads, and local tags are defined in .hg/localtags.)
1488 1537 # They constitute the in-memory cache of tags.
1489 1538 self.tags = self.tagtypes = None
1490 1539
1491 1540 self.nodetagscache = self.tagslist = None
1492 1541
1493 1542 cache = tagscache()
1494 1543 cache.tags, cache.tagtypes = self._findtags()
1495 1544
1496 1545 return cache
1497 1546
1498 1547 def tags(self):
1499 1548 '''return a mapping of tag to node'''
1500 1549 t = {}
1501 1550 if self.changelog.filteredrevs:
1502 1551 tags, tt = self._findtags()
1503 1552 else:
1504 1553 tags = self._tagscache.tags
1505 1554 rev = self.changelog.rev
1506 1555 for k, v in tags.iteritems():
1507 1556 try:
1508 1557 # ignore tags to unknown nodes
1509 1558 rev(v)
1510 1559 t[k] = v
1511 1560 except (error.LookupError, ValueError):
1512 1561 pass
1513 1562 return t
1514 1563
1515 1564 def _findtags(self):
1516 1565 '''Do the hard work of finding tags. Return a pair of dicts
1517 1566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1518 1567 maps tag name to a string like \'global\' or \'local\'.
1519 1568 Subclasses or extensions are free to add their own tags, but
1520 1569 should be aware that the returned dicts will be retained for the
1521 1570 duration of the localrepo object.'''
1522 1571
1523 1572 # XXX what tagtype should subclasses/extensions use? Currently
1524 1573 # mq and bookmarks add tags, but do not set the tagtype at all.
1525 1574 # Should each extension invent its own tag type? Should there
1526 1575 # be one tagtype for all such "virtual" tags? Or is the status
1527 1576 # quo fine?
1528 1577
1529 1578
1530 1579 # map tag name to (node, hist)
1531 1580 alltags = tagsmod.findglobaltags(self.ui, self)
1532 1581 # map tag name to tag type
1533 1582 tagtypes = dict((tag, 'global') for tag in alltags)
1534 1583
1535 1584 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1536 1585
1537 1586 # Build the return dicts. Have to re-encode tag names because
1538 1587 # the tags module always uses UTF-8 (in order not to lose info
1539 1588 # writing to the cache), but the rest of Mercurial wants them in
1540 1589 # local encoding.
1541 1590 tags = {}
1542 1591 for (name, (node, hist)) in alltags.iteritems():
1543 1592 if node != nullid:
1544 1593 tags[encoding.tolocal(name)] = node
1545 1594 tags['tip'] = self.changelog.tip()
1546 1595 tagtypes = dict([(encoding.tolocal(name), value)
1547 1596 for (name, value) in tagtypes.iteritems()])
1548 1597 return (tags, tagtypes)
1549 1598
1550 1599 def tagtype(self, tagname):
1551 1600 '''
1552 1601 return the type of the given tag. result can be:
1553 1602
1554 1603 'local' : a local tag
1555 1604 'global' : a global tag
1556 1605 None : tag does not exist
1557 1606 '''
1558 1607
1559 1608 return self._tagscache.tagtypes.get(tagname)
1560 1609
1561 1610 def tagslist(self):
1562 1611 '''return a list of tags ordered by revision'''
1563 1612 if not self._tagscache.tagslist:
1564 1613 l = []
1565 1614 for t, n in self.tags().iteritems():
1566 1615 l.append((self.changelog.rev(n), t, n))
1567 1616 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1568 1617
1569 1618 return self._tagscache.tagslist
1570 1619
1571 1620 def nodetags(self, node):
1572 1621 '''return the tags associated with a node'''
1573 1622 if not self._tagscache.nodetagscache:
1574 1623 nodetagscache = {}
1575 1624 for t, n in self._tagscache.tags.iteritems():
1576 1625 nodetagscache.setdefault(n, []).append(t)
1577 1626 for tags in nodetagscache.itervalues():
1578 1627 tags.sort()
1579 1628 self._tagscache.nodetagscache = nodetagscache
1580 1629 return self._tagscache.nodetagscache.get(node, [])
1581 1630
1582 1631 def nodebookmarks(self, node):
1583 1632 """return the list of bookmarks pointing to the specified node"""
1584 1633 return self._bookmarks.names(node)
1585 1634
1586 1635 def branchmap(self):
1587 1636 '''returns a dictionary {branch: [branchheads]} with branchheads
1588 1637 ordered by increasing revision number'''
1589 1638 return self._branchcaches[self]
1590 1639
1591 1640 @unfilteredmethod
1592 1641 def revbranchcache(self):
1593 1642 if not self._revbranchcache:
1594 1643 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1595 1644 return self._revbranchcache
1596 1645
1597 1646 def branchtip(self, branch, ignoremissing=False):
1598 1647 '''return the tip node for a given branch
1599 1648
1600 1649 If ignoremissing is True, then this method will not raise an error.
1601 1650 This is helpful for callers that only expect None for a missing branch
1602 1651 (e.g. namespace).
1603 1652
1604 1653 '''
1605 1654 try:
1606 1655 return self.branchmap().branchtip(branch)
1607 1656 except KeyError:
1608 1657 if not ignoremissing:
1609 1658 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1610 1659 else:
1611 1660 pass
1612 1661
1613 1662 def lookup(self, key):
1614 1663 node = scmutil.revsymbol(self, key).node()
1615 1664 if node is None:
1616 1665 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1617 1666 return node
1618 1667
1619 1668 def lookupbranch(self, key):
1620 1669 if self.branchmap().hasbranch(key):
1621 1670 return key
1622 1671
1623 1672 return scmutil.revsymbol(self, key).branch()
1624 1673
1625 1674 def known(self, nodes):
1626 1675 cl = self.changelog
1627 1676 nm = cl.nodemap
1628 1677 filtered = cl.filteredrevs
1629 1678 result = []
1630 1679 for n in nodes:
1631 1680 r = nm.get(n)
1632 1681 resp = not (r is None or r in filtered)
1633 1682 result.append(resp)
1634 1683 return result
1635 1684
1636 1685 def local(self):
1637 1686 return self
1638 1687
1639 1688 def publishing(self):
1640 1689 # it's safe (and desirable) to trust the publish flag unconditionally
1641 1690 # so that we don't finalize changes shared between users via ssh or nfs
1642 1691 return self.ui.configbool('phases', 'publish', untrusted=True)
1643 1692
1644 1693 def cancopy(self):
1645 1694 # so statichttprepo's override of local() works
1646 1695 if not self.local():
1647 1696 return False
1648 1697 if not self.publishing():
1649 1698 return True
1650 1699 # if publishing we can't copy if there is filtered content
1651 1700 return not self.filtered('visible').changelog.filteredrevs
1652 1701
1653 1702 def shared(self):
1654 1703 '''the type of shared repository (None if not shared)'''
1655 1704 if self.sharedpath != self.path:
1656 1705 return 'store'
1657 1706 return None
1658 1707
1659 1708 def wjoin(self, f, *insidef):
1660 1709 return self.vfs.reljoin(self.root, f, *insidef)
1661 1710
1662 1711 def setparents(self, p1, p2=nullid):
1663 1712 with self.dirstate.parentchange():
1664 1713 copies = self.dirstate.setparents(p1, p2)
1665 1714 pctx = self[p1]
1666 1715 if copies:
1667 1716 # Adjust copy records, the dirstate cannot do it, it
1668 1717 # requires access to parents manifests. Preserve them
1669 1718 # only for entries added to first parent.
1670 1719 for f in copies:
1671 1720 if f not in pctx and copies[f] in pctx:
1672 1721 self.dirstate.copy(copies[f], f)
1673 1722 if p2 == nullid:
1674 1723 for f, s in sorted(self.dirstate.copies().items()):
1675 1724 if f not in pctx and s not in pctx:
1676 1725 self.dirstate.copy(None, f)
1677 1726
1678 1727 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1679 1728 """changeid must be a changeset revision, if specified.
1680 1729 fileid can be a file revision or node."""
1681 1730 return context.filectx(self, path, changeid, fileid,
1682 1731 changectx=changectx)
1683 1732
1684 1733 def getcwd(self):
1685 1734 return self.dirstate.getcwd()
1686 1735
1687 1736 def pathto(self, f, cwd=None):
1688 1737 return self.dirstate.pathto(f, cwd)
1689 1738
1690 1739 def _loadfilter(self, filter):
1691 1740 if filter not in self._filterpats:
1692 1741 l = []
1693 1742 for pat, cmd in self.ui.configitems(filter):
1694 1743 if cmd == '!':
1695 1744 continue
1696 1745 mf = matchmod.match(self.root, '', [pat])
1697 1746 fn = None
1698 1747 params = cmd
1699 1748 for name, filterfn in self._datafilters.iteritems():
1700 1749 if cmd.startswith(name):
1701 1750 fn = filterfn
1702 1751 params = cmd[len(name):].lstrip()
1703 1752 break
1704 1753 if not fn:
1705 1754 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1706 1755 # Wrap old filters not supporting keyword arguments
1707 1756 if not pycompat.getargspec(fn)[2]:
1708 1757 oldfn = fn
1709 1758 fn = lambda s, c, **kwargs: oldfn(s, c)
1710 1759 l.append((mf, fn, params))
1711 1760 self._filterpats[filter] = l
1712 1761 return self._filterpats[filter]
1713 1762
1714 1763 def _filter(self, filterpats, filename, data):
1715 1764 for mf, fn, cmd in filterpats:
1716 1765 if mf(filename):
1717 1766 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1718 1767 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1719 1768 break
1720 1769
1721 1770 return data
1722 1771
1723 1772 @unfilteredpropertycache
1724 1773 def _encodefilterpats(self):
1725 1774 return self._loadfilter('encode')
1726 1775
1727 1776 @unfilteredpropertycache
1728 1777 def _decodefilterpats(self):
1729 1778 return self._loadfilter('decode')
1730 1779
1731 1780 def adddatafilter(self, name, filter):
1732 1781 self._datafilters[name] = filter
1733 1782
1734 1783 def wread(self, filename):
1735 1784 if self.wvfs.islink(filename):
1736 1785 data = self.wvfs.readlink(filename)
1737 1786 else:
1738 1787 data = self.wvfs.read(filename)
1739 1788 return self._filter(self._encodefilterpats, filename, data)
1740 1789
1741 1790 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1742 1791 """write ``data`` into ``filename`` in the working directory
1743 1792
1744 1793 This returns length of written (maybe decoded) data.
1745 1794 """
1746 1795 data = self._filter(self._decodefilterpats, filename, data)
1747 1796 if 'l' in flags:
1748 1797 self.wvfs.symlink(data, filename)
1749 1798 else:
1750 1799 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1751 1800 **kwargs)
1752 1801 if 'x' in flags:
1753 1802 self.wvfs.setflags(filename, False, True)
1754 1803 else:
1755 1804 self.wvfs.setflags(filename, False, False)
1756 1805 return len(data)
1757 1806
1758 1807 def wwritedata(self, filename, data):
1759 1808 return self._filter(self._decodefilterpats, filename, data)
1760 1809
1761 1810 def currenttransaction(self):
1762 1811 """return the current transaction or None if non exists"""
1763 1812 if self._transref:
1764 1813 tr = self._transref()
1765 1814 else:
1766 1815 tr = None
1767 1816
1768 1817 if tr and tr.running():
1769 1818 return tr
1770 1819 return None
1771 1820
1772 1821 def transaction(self, desc, report=None):
1773 1822 if (self.ui.configbool('devel', 'all-warnings')
1774 1823 or self.ui.configbool('devel', 'check-locks')):
1775 1824 if self._currentlock(self._lockref) is None:
1776 1825 raise error.ProgrammingError('transaction requires locking')
1777 1826 tr = self.currenttransaction()
1778 1827 if tr is not None:
1779 1828 return tr.nest(name=desc)
1780 1829
1781 1830 # abort here if the journal already exists
1782 1831 if self.svfs.exists("journal"):
1783 1832 raise error.RepoError(
1784 1833 _("abandoned transaction found"),
1785 1834 hint=_("run 'hg recover' to clean up transaction"))
1786 1835
1787 1836 idbase = "%.40f#%f" % (random.random(), time.time())
1788 1837 ha = hex(hashlib.sha1(idbase).digest())
1789 1838 txnid = 'TXN:' + ha
1790 1839 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1791 1840
1792 1841 self._writejournal(desc)
1793 1842 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1794 1843 if report:
1795 1844 rp = report
1796 1845 else:
1797 1846 rp = self.ui.warn
1798 1847 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1799 1848 # we must avoid cyclic reference between repo and transaction.
1800 1849 reporef = weakref.ref(self)
1801 1850 # Code to track tag movement
1802 1851 #
1803 1852 # Since tags are all handled as file content, it is actually quite hard
1804 1853 # to track these movement from a code perspective. So we fallback to a
1805 1854 # tracking at the repository level. One could envision to track changes
1806 1855 # to the '.hgtags' file through changegroup apply but that fails to
1807 1856 # cope with case where transaction expose new heads without changegroup
1808 1857 # being involved (eg: phase movement).
1809 1858 #
1810 1859 # For now, We gate the feature behind a flag since this likely comes
1811 1860 # with performance impacts. The current code run more often than needed
1812 1861 # and do not use caches as much as it could. The current focus is on
1813 1862 # the behavior of the feature so we disable it by default. The flag
1814 1863 # will be removed when we are happy with the performance impact.
1815 1864 #
1816 1865 # Once this feature is no longer experimental move the following
1817 1866 # documentation to the appropriate help section:
1818 1867 #
1819 1868 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1820 1869 # tags (new or changed or deleted tags). In addition the details of
1821 1870 # these changes are made available in a file at:
1822 1871 # ``REPOROOT/.hg/changes/tags.changes``.
1823 1872 # Make sure you check for HG_TAG_MOVED before reading that file as it
1824 1873 # might exist from a previous transaction even if no tag were touched
1825 1874 # in this one. Changes are recorded in a line base format::
1826 1875 #
1827 1876 # <action> <hex-node> <tag-name>\n
1828 1877 #
1829 1878 # Actions are defined as follow:
1830 1879 # "-R": tag is removed,
1831 1880 # "+A": tag is added,
1832 1881 # "-M": tag is moved (old value),
1833 1882 # "+M": tag is moved (new value),
1834 1883 tracktags = lambda x: None
1835 1884 # experimental config: experimental.hook-track-tags
1836 1885 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1837 1886 if desc != 'strip' and shouldtracktags:
1838 1887 oldheads = self.changelog.headrevs()
1839 1888 def tracktags(tr2):
1840 1889 repo = reporef()
1841 1890 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1842 1891 newheads = repo.changelog.headrevs()
1843 1892 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1844 1893 # notes: we compare lists here.
1845 1894 # As we do it only once buiding set would not be cheaper
1846 1895 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1847 1896 if changes:
1848 1897 tr2.hookargs['tag_moved'] = '1'
1849 1898 with repo.vfs('changes/tags.changes', 'w',
1850 1899 atomictemp=True) as changesfile:
1851 1900 # note: we do not register the file to the transaction
1852 1901 # because we needs it to still exist on the transaction
1853 1902 # is close (for txnclose hooks)
1854 1903 tagsmod.writediff(changesfile, changes)
1855 1904 def validate(tr2):
1856 1905 """will run pre-closing hooks"""
1857 1906 # XXX the transaction API is a bit lacking here so we take a hacky
1858 1907 # path for now
1859 1908 #
1860 1909 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1861 1910 # dict is copied before these run. In addition we needs the data
1862 1911 # available to in memory hooks too.
1863 1912 #
1864 1913 # Moreover, we also need to make sure this runs before txnclose
1865 1914 # hooks and there is no "pending" mechanism that would execute
1866 1915 # logic only if hooks are about to run.
1867 1916 #
1868 1917 # Fixing this limitation of the transaction is also needed to track
1869 1918 # other families of changes (bookmarks, phases, obsolescence).
1870 1919 #
1871 1920 # This will have to be fixed before we remove the experimental
1872 1921 # gating.
1873 1922 tracktags(tr2)
1874 1923 repo = reporef()
1875 1924 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1876 1925 scmutil.enforcesinglehead(repo, tr2, desc)
1877 1926 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1878 1927 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1879 1928 args = tr.hookargs.copy()
1880 1929 args.update(bookmarks.preparehookargs(name, old, new))
1881 1930 repo.hook('pretxnclose-bookmark', throw=True,
1882 1931 **pycompat.strkwargs(args))
1883 1932 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1884 1933 cl = repo.unfiltered().changelog
1885 1934 for rev, (old, new) in tr.changes['phases'].items():
1886 1935 args = tr.hookargs.copy()
1887 1936 node = hex(cl.node(rev))
1888 1937 args.update(phases.preparehookargs(node, old, new))
1889 1938 repo.hook('pretxnclose-phase', throw=True,
1890 1939 **pycompat.strkwargs(args))
1891 1940
1892 1941 repo.hook('pretxnclose', throw=True,
1893 1942 **pycompat.strkwargs(tr.hookargs))
1894 1943 def releasefn(tr, success):
1895 1944 repo = reporef()
1896 1945 if success:
1897 1946 # this should be explicitly invoked here, because
1898 1947 # in-memory changes aren't written out at closing
1899 1948 # transaction, if tr.addfilegenerator (via
1900 1949 # dirstate.write or so) isn't invoked while
1901 1950 # transaction running
1902 1951 repo.dirstate.write(None)
1903 1952 else:
1904 1953 # discard all changes (including ones already written
1905 1954 # out) in this transaction
1906 1955 narrowspec.restorebackup(self, 'journal.narrowspec')
1907 1956 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1908 1957 repo.dirstate.restorebackup(None, 'journal.dirstate')
1909 1958
1910 1959 repo.invalidate(clearfilecache=True)
1911 1960
1912 1961 tr = transaction.transaction(rp, self.svfs, vfsmap,
1913 1962 "journal",
1914 1963 "undo",
1915 1964 aftertrans(renames),
1916 1965 self.store.createmode,
1917 1966 validator=validate,
1918 1967 releasefn=releasefn,
1919 1968 checkambigfiles=_cachedfiles,
1920 1969 name=desc)
1921 1970 tr.changes['origrepolen'] = len(self)
1922 1971 tr.changes['obsmarkers'] = set()
1923 1972 tr.changes['phases'] = {}
1924 1973 tr.changes['bookmarks'] = {}
1925 1974
1926 1975 tr.hookargs['txnid'] = txnid
1927 1976 tr.hookargs['txnname'] = desc
1928 1977 # note: writing the fncache only during finalize mean that the file is
1929 1978 # outdated when running hooks. As fncache is used for streaming clone,
1930 1979 # this is not expected to break anything that happen during the hooks.
1931 1980 tr.addfinalize('flush-fncache', self.store.write)
1932 1981 def txnclosehook(tr2):
1933 1982 """To be run if transaction is successful, will schedule a hook run
1934 1983 """
1935 1984 # Don't reference tr2 in hook() so we don't hold a reference.
1936 1985 # This reduces memory consumption when there are multiple
1937 1986 # transactions per lock. This can likely go away if issue5045
1938 1987 # fixes the function accumulation.
1939 1988 hookargs = tr2.hookargs
1940 1989
1941 1990 def hookfunc():
1942 1991 repo = reporef()
1943 1992 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1944 1993 bmchanges = sorted(tr.changes['bookmarks'].items())
1945 1994 for name, (old, new) in bmchanges:
1946 1995 args = tr.hookargs.copy()
1947 1996 args.update(bookmarks.preparehookargs(name, old, new))
1948 1997 repo.hook('txnclose-bookmark', throw=False,
1949 1998 **pycompat.strkwargs(args))
1950 1999
1951 2000 if hook.hashook(repo.ui, 'txnclose-phase'):
1952 2001 cl = repo.unfiltered().changelog
1953 2002 phasemv = sorted(tr.changes['phases'].items())
1954 2003 for rev, (old, new) in phasemv:
1955 2004 args = tr.hookargs.copy()
1956 2005 node = hex(cl.node(rev))
1957 2006 args.update(phases.preparehookargs(node, old, new))
1958 2007 repo.hook('txnclose-phase', throw=False,
1959 2008 **pycompat.strkwargs(args))
1960 2009
1961 2010 repo.hook('txnclose', throw=False,
1962 2011 **pycompat.strkwargs(hookargs))
1963 2012 reporef()._afterlock(hookfunc)
1964 2013 tr.addfinalize('txnclose-hook', txnclosehook)
1965 2014 # Include a leading "-" to make it happen before the transaction summary
1966 2015 # reports registered via scmutil.registersummarycallback() whose names
1967 2016 # are 00-txnreport etc. That way, the caches will be warm when the
1968 2017 # callbacks run.
1969 2018 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1970 2019 def txnaborthook(tr2):
1971 2020 """To be run if transaction is aborted
1972 2021 """
1973 2022 reporef().hook('txnabort', throw=False,
1974 2023 **pycompat.strkwargs(tr2.hookargs))
1975 2024 tr.addabort('txnabort-hook', txnaborthook)
1976 2025 # avoid eager cache invalidation. in-memory data should be identical
1977 2026 # to stored data if transaction has no error.
1978 2027 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1979 2028 self._transref = weakref.ref(tr)
1980 2029 scmutil.registersummarycallback(self, tr, desc)
1981 2030 return tr
1982 2031
1983 2032 def _journalfiles(self):
1984 2033 return ((self.svfs, 'journal'),
1985 2034 (self.svfs, 'journal.narrowspec'),
1986 2035 (self.vfs, 'journal.narrowspec.dirstate'),
1987 2036 (self.vfs, 'journal.dirstate'),
1988 2037 (self.vfs, 'journal.branch'),
1989 2038 (self.vfs, 'journal.desc'),
1990 2039 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1991 2040 (self.svfs, 'journal.phaseroots'))
1992 2041
1993 2042 def undofiles(self):
1994 2043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1995 2044
1996 2045 @unfilteredmethod
1997 2046 def _writejournal(self, desc):
1998 2047 self.dirstate.savebackup(None, 'journal.dirstate')
1999 2048 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2000 2049 narrowspec.savebackup(self, 'journal.narrowspec')
2001 2050 self.vfs.write("journal.branch",
2002 2051 encoding.fromlocal(self.dirstate.branch()))
2003 2052 self.vfs.write("journal.desc",
2004 2053 "%d\n%s\n" % (len(self), desc))
2005 2054 bookmarksvfs = bookmarks.bookmarksvfs(self)
2006 2055 bookmarksvfs.write("journal.bookmarks",
2007 2056 bookmarksvfs.tryread("bookmarks"))
2008 2057 self.svfs.write("journal.phaseroots",
2009 2058 self.svfs.tryread("phaseroots"))
2010 2059
2011 2060 def recover(self):
2012 2061 with self.lock():
2013 2062 if self.svfs.exists("journal"):
2014 2063 self.ui.status(_("rolling back interrupted transaction\n"))
2015 2064 vfsmap = {'': self.svfs,
2016 2065 'plain': self.vfs,}
2017 2066 transaction.rollback(self.svfs, vfsmap, "journal",
2018 2067 self.ui.warn,
2019 2068 checkambigfiles=_cachedfiles)
2020 2069 self.invalidate()
2021 2070 return True
2022 2071 else:
2023 2072 self.ui.warn(_("no interrupted transaction available\n"))
2024 2073 return False
2025 2074
2026 2075 def rollback(self, dryrun=False, force=False):
2027 2076 wlock = lock = dsguard = None
2028 2077 try:
2029 2078 wlock = self.wlock()
2030 2079 lock = self.lock()
2031 2080 if self.svfs.exists("undo"):
2032 2081 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2033 2082
2034 2083 return self._rollback(dryrun, force, dsguard)
2035 2084 else:
2036 2085 self.ui.warn(_("no rollback information available\n"))
2037 2086 return 1
2038 2087 finally:
2039 2088 release(dsguard, lock, wlock)
2040 2089
2041 2090 @unfilteredmethod # Until we get smarter cache management
2042 2091 def _rollback(self, dryrun, force, dsguard):
2043 2092 ui = self.ui
2044 2093 try:
2045 2094 args = self.vfs.read('undo.desc').splitlines()
2046 2095 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2047 2096 if len(args) >= 3:
2048 2097 detail = args[2]
2049 2098 oldtip = oldlen - 1
2050 2099
2051 2100 if detail and ui.verbose:
2052 2101 msg = (_('repository tip rolled back to revision %d'
2053 2102 ' (undo %s: %s)\n')
2054 2103 % (oldtip, desc, detail))
2055 2104 else:
2056 2105 msg = (_('repository tip rolled back to revision %d'
2057 2106 ' (undo %s)\n')
2058 2107 % (oldtip, desc))
2059 2108 except IOError:
2060 2109 msg = _('rolling back unknown transaction\n')
2061 2110 desc = None
2062 2111
2063 2112 if not force and self['.'] != self['tip'] and desc == 'commit':
2064 2113 raise error.Abort(
2065 2114 _('rollback of last commit while not checked out '
2066 2115 'may lose data'), hint=_('use -f to force'))
2067 2116
2068 2117 ui.status(msg)
2069 2118 if dryrun:
2070 2119 return 0
2071 2120
2072 2121 parents = self.dirstate.parents()
2073 2122 self.destroying()
2074 2123 vfsmap = {'plain': self.vfs, '': self.svfs}
2075 2124 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2076 2125 checkambigfiles=_cachedfiles)
2077 2126 bookmarksvfs = bookmarks.bookmarksvfs(self)
2078 2127 if bookmarksvfs.exists('undo.bookmarks'):
2079 2128 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2080 2129 if self.svfs.exists('undo.phaseroots'):
2081 2130 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2082 2131 self.invalidate()
2083 2132
2084 2133 parentgone = any(p not in self.changelog.nodemap for p in parents)
2085 2134 if parentgone:
2086 2135 # prevent dirstateguard from overwriting already restored one
2087 2136 dsguard.close()
2088 2137
2089 2138 narrowspec.restorebackup(self, 'undo.narrowspec')
2090 2139 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2091 2140 self.dirstate.restorebackup(None, 'undo.dirstate')
2092 2141 try:
2093 2142 branch = self.vfs.read('undo.branch')
2094 2143 self.dirstate.setbranch(encoding.tolocal(branch))
2095 2144 except IOError:
2096 2145 ui.warn(_('named branch could not be reset: '
2097 2146 'current branch is still \'%s\'\n')
2098 2147 % self.dirstate.branch())
2099 2148
2100 2149 parents = tuple([p.rev() for p in self[None].parents()])
2101 2150 if len(parents) > 1:
2102 2151 ui.status(_('working directory now based on '
2103 2152 'revisions %d and %d\n') % parents)
2104 2153 else:
2105 2154 ui.status(_('working directory now based on '
2106 2155 'revision %d\n') % parents)
2107 2156 mergemod.mergestate.clean(self, self['.'].node())
2108 2157
2109 2158 # TODO: if we know which new heads may result from this rollback, pass
2110 2159 # them to destroy(), which will prevent the branchhead cache from being
2111 2160 # invalidated.
2112 2161 self.destroyed()
2113 2162 return 0
2114 2163
2115 2164 def _buildcacheupdater(self, newtransaction):
2116 2165 """called during transaction to build the callback updating cache
2117 2166
2118 2167 Lives on the repository to help extension who might want to augment
2119 2168 this logic. For this purpose, the created transaction is passed to the
2120 2169 method.
2121 2170 """
2122 2171 # we must avoid cyclic reference between repo and transaction.
2123 2172 reporef = weakref.ref(self)
2124 2173 def updater(tr):
2125 2174 repo = reporef()
2126 2175 repo.updatecaches(tr)
2127 2176 return updater
2128 2177
2129 2178 @unfilteredmethod
2130 2179 def updatecaches(self, tr=None, full=False):
2131 2180 """warm appropriate caches
2132 2181
2133 2182 If this function is called after a transaction closed. The transaction
2134 2183 will be available in the 'tr' argument. This can be used to selectively
2135 2184 update caches relevant to the changes in that transaction.
2136 2185
2137 2186 If 'full' is set, make sure all caches the function knows about have
2138 2187 up-to-date data. Even the ones usually loaded more lazily.
2139 2188 """
2140 2189 if tr is not None and tr.hookargs.get('source') == 'strip':
2141 2190 # During strip, many caches are invalid but
2142 2191 # later call to `destroyed` will refresh them.
2143 2192 return
2144 2193
2145 2194 if tr is None or tr.changes['origrepolen'] < len(self):
2146 2195 # accessing the 'ser ved' branchmap should refresh all the others,
2147 2196 self.ui.debug('updating the branch cache\n')
2148 2197 self.filtered('served').branchmap()
2149 2198 self.filtered('served.hidden').branchmap()
2150 2199
2151 2200 if full:
2152 2201 unfi = self.unfiltered()
2153 2202 rbc = unfi.revbranchcache()
2154 2203 for r in unfi.changelog:
2155 2204 rbc.branchinfo(r)
2156 2205 rbc.write()
2157 2206
2158 2207 # ensure the working copy parents are in the manifestfulltextcache
2159 2208 for ctx in self['.'].parents():
2160 2209 ctx.manifest() # accessing the manifest is enough
2161 2210
2162 2211 # accessing fnode cache warms the cache
2163 2212 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2164 2213 # accessing tags warm the cache
2165 2214 self.tags()
2166 2215 self.filtered('served').tags()
2167 2216
2168 2217 def invalidatecaches(self):
2169 2218
2170 2219 if r'_tagscache' in vars(self):
2171 2220 # can't use delattr on proxy
2172 2221 del self.__dict__[r'_tagscache']
2173 2222
2174 2223 self._branchcaches.clear()
2175 2224 self.invalidatevolatilesets()
2176 2225 self._sparsesignaturecache.clear()
2177 2226
2178 2227 def invalidatevolatilesets(self):
2179 2228 self.filteredrevcache.clear()
2180 2229 obsolete.clearobscaches(self)
2181 2230
2182 2231 def invalidatedirstate(self):
2183 2232 '''Invalidates the dirstate, causing the next call to dirstate
2184 2233 to check if it was modified since the last time it was read,
2185 2234 rereading it if it has.
2186 2235
2187 2236 This is different to dirstate.invalidate() that it doesn't always
2188 2237 rereads the dirstate. Use dirstate.invalidate() if you want to
2189 2238 explicitly read the dirstate again (i.e. restoring it to a previous
2190 2239 known good state).'''
2191 2240 if hasunfilteredcache(self, r'dirstate'):
2192 2241 for k in self.dirstate._filecache:
2193 2242 try:
2194 2243 delattr(self.dirstate, k)
2195 2244 except AttributeError:
2196 2245 pass
2197 2246 delattr(self.unfiltered(), r'dirstate')
2198 2247
2199 2248 def invalidate(self, clearfilecache=False):
2200 2249 '''Invalidates both store and non-store parts other than dirstate
2201 2250
2202 2251 If a transaction is running, invalidation of store is omitted,
2203 2252 because discarding in-memory changes might cause inconsistency
2204 2253 (e.g. incomplete fncache causes unintentional failure, but
2205 2254 redundant one doesn't).
2206 2255 '''
2207 2256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2208 2257 for k in list(self._filecache.keys()):
2209 2258 # dirstate is invalidated separately in invalidatedirstate()
2210 2259 if k == 'dirstate':
2211 2260 continue
2212 2261 if (k == 'changelog' and
2213 2262 self.currenttransaction() and
2214 2263 self.changelog._delayed):
2215 2264 # The changelog object may store unwritten revisions. We don't
2216 2265 # want to lose them.
2217 2266 # TODO: Solve the problem instead of working around it.
2218 2267 continue
2219 2268
2220 2269 if clearfilecache:
2221 2270 del self._filecache[k]
2222 2271 try:
2223 2272 delattr(unfiltered, k)
2224 2273 except AttributeError:
2225 2274 pass
2226 2275 self.invalidatecaches()
2227 2276 if not self.currenttransaction():
2228 2277 # TODO: Changing contents of store outside transaction
2229 2278 # causes inconsistency. We should make in-memory store
2230 2279 # changes detectable, and abort if changed.
2231 2280 self.store.invalidatecaches()
2232 2281
2233 2282 def invalidateall(self):
2234 2283 '''Fully invalidates both store and non-store parts, causing the
2235 2284 subsequent operation to reread any outside changes.'''
2236 2285 # extension should hook this to invalidate its caches
2237 2286 self.invalidate()
2238 2287 self.invalidatedirstate()
2239 2288
2240 2289 @unfilteredmethod
2241 2290 def _refreshfilecachestats(self, tr):
2242 2291 """Reload stats of cached files so that they are flagged as valid"""
2243 2292 for k, ce in self._filecache.items():
2244 2293 k = pycompat.sysstr(k)
2245 2294 if k == r'dirstate' or k not in self.__dict__:
2246 2295 continue
2247 2296 ce.refresh()
2248 2297
2249 2298 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2250 2299 inheritchecker=None, parentenvvar=None):
2251 2300 parentlock = None
2252 2301 # the contents of parentenvvar are used by the underlying lock to
2253 2302 # determine whether it can be inherited
2254 2303 if parentenvvar is not None:
2255 2304 parentlock = encoding.environ.get(parentenvvar)
2256 2305
2257 2306 timeout = 0
2258 2307 warntimeout = 0
2259 2308 if wait:
2260 2309 timeout = self.ui.configint("ui", "timeout")
2261 2310 warntimeout = self.ui.configint("ui", "timeout.warn")
2262 2311 # internal config: ui.signal-safe-lock
2263 2312 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2264 2313
2265 2314 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2266 2315 releasefn=releasefn,
2267 2316 acquirefn=acquirefn, desc=desc,
2268 2317 inheritchecker=inheritchecker,
2269 2318 parentlock=parentlock,
2270 2319 signalsafe=signalsafe)
2271 2320 return l
2272 2321
2273 2322 def _afterlock(self, callback):
2274 2323 """add a callback to be run when the repository is fully unlocked
2275 2324
2276 2325 The callback will be executed when the outermost lock is released
2277 2326 (with wlock being higher level than 'lock')."""
2278 2327 for ref in (self._wlockref, self._lockref):
2279 2328 l = ref and ref()
2280 2329 if l and l.held:
2281 2330 l.postrelease.append(callback)
2282 2331 break
2283 2332 else: # no lock have been found.
2284 2333 callback()
2285 2334
2286 2335 def lock(self, wait=True):
2287 2336 '''Lock the repository store (.hg/store) and return a weak reference
2288 2337 to the lock. Use this before modifying the store (e.g. committing or
2289 2338 stripping). If you are opening a transaction, get a lock as well.)
2290 2339
2291 2340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2292 2341 'wlock' first to avoid a dead-lock hazard.'''
2293 2342 l = self._currentlock(self._lockref)
2294 2343 if l is not None:
2295 2344 l.lock()
2296 2345 return l
2297 2346
2298 2347 l = self._lock(vfs=self.svfs,
2299 2348 lockname="lock",
2300 2349 wait=wait,
2301 2350 releasefn=None,
2302 2351 acquirefn=self.invalidate,
2303 2352 desc=_('repository %s') % self.origroot)
2304 2353 self._lockref = weakref.ref(l)
2305 2354 return l
2306 2355
2307 2356 def _wlockchecktransaction(self):
2308 2357 if self.currenttransaction() is not None:
2309 2358 raise error.LockInheritanceContractViolation(
2310 2359 'wlock cannot be inherited in the middle of a transaction')
2311 2360
2312 2361 def wlock(self, wait=True):
2313 2362 '''Lock the non-store parts of the repository (everything under
2314 2363 .hg except .hg/store) and return a weak reference to the lock.
2315 2364
2316 2365 Use this before modifying files in .hg.
2317 2366
2318 2367 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2319 2368 'wlock' first to avoid a dead-lock hazard.'''
2320 2369 l = self._wlockref and self._wlockref()
2321 2370 if l is not None and l.held:
2322 2371 l.lock()
2323 2372 return l
2324 2373
2325 2374 # We do not need to check for non-waiting lock acquisition. Such
2326 2375 # acquisition would not cause dead-lock as they would just fail.
2327 2376 if wait and (self.ui.configbool('devel', 'all-warnings')
2328 2377 or self.ui.configbool('devel', 'check-locks')):
2329 2378 if self._currentlock(self._lockref) is not None:
2330 2379 self.ui.develwarn('"wlock" acquired after "lock"')
2331 2380
2332 2381 def unlock():
2333 2382 if self.dirstate.pendingparentchange():
2334 2383 self.dirstate.invalidate()
2335 2384 else:
2336 2385 self.dirstate.write(None)
2337 2386
2338 2387 self._filecache['dirstate'].refresh()
2339 2388
2340 2389 l = self._lock(self.vfs, "wlock", wait, unlock,
2341 2390 self.invalidatedirstate, _('working directory of %s') %
2342 2391 self.origroot,
2343 2392 inheritchecker=self._wlockchecktransaction,
2344 2393 parentenvvar='HG_WLOCK_LOCKER')
2345 2394 self._wlockref = weakref.ref(l)
2346 2395 return l
2347 2396
2348 2397 def _currentlock(self, lockref):
2349 2398 """Returns the lock if it's held, or None if it's not."""
2350 2399 if lockref is None:
2351 2400 return None
2352 2401 l = lockref()
2353 2402 if l is None or not l.held:
2354 2403 return None
2355 2404 return l
2356 2405
2357 2406 def currentwlock(self):
2358 2407 """Returns the wlock if it's held, or None if it's not."""
2359 2408 return self._currentlock(self._wlockref)
2360 2409
2361 2410 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2362 2411 includecopymeta):
2363 2412 """
2364 2413 commit an individual file as part of a larger transaction
2365 2414 """
2366 2415
2367 2416 fname = fctx.path()
2368 2417 fparent1 = manifest1.get(fname, nullid)
2369 2418 fparent2 = manifest2.get(fname, nullid)
2370 2419 if isinstance(fctx, context.filectx):
2371 2420 node = fctx.filenode()
2372 2421 if node in [fparent1, fparent2]:
2373 2422 self.ui.debug('reusing %s filelog entry\n' % fname)
2374 2423 if ((fparent1 != nullid and
2375 2424 manifest1.flags(fname) != fctx.flags()) or
2376 2425 (fparent2 != nullid and
2377 2426 manifest2.flags(fname) != fctx.flags())):
2378 2427 changelist.append(fname)
2379 2428 return node
2380 2429
2381 2430 flog = self.file(fname)
2382 2431 meta = {}
2383 2432 cfname = fctx.copysource()
2384 2433 if cfname and cfname != fname:
2385 2434 # Mark the new revision of this file as a copy of another
2386 2435 # file. This copy data will effectively act as a parent
2387 2436 # of this new revision. If this is a merge, the first
2388 2437 # parent will be the nullid (meaning "look up the copy data")
2389 2438 # and the second one will be the other parent. For example:
2390 2439 #
2391 2440 # 0 --- 1 --- 3 rev1 changes file foo
2392 2441 # \ / rev2 renames foo to bar and changes it
2393 2442 # \- 2 -/ rev3 should have bar with all changes and
2394 2443 # should record that bar descends from
2395 2444 # bar in rev2 and foo in rev1
2396 2445 #
2397 2446 # this allows this merge to succeed:
2398 2447 #
2399 2448 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2400 2449 # \ / merging rev3 and rev4 should use bar@rev2
2401 2450 # \- 2 --- 4 as the merge base
2402 2451 #
2403 2452
2404 2453 cnode = manifest1.get(cfname)
2405 2454 newfparent = fparent2
2406 2455
2407 2456 if manifest2: # branch merge
2408 2457 if fparent2 == nullid or cnode is None: # copied on remote side
2409 2458 if cfname in manifest2:
2410 2459 cnode = manifest2[cfname]
2411 2460 newfparent = fparent1
2412 2461
2413 2462 # Here, we used to search backwards through history to try to find
2414 2463 # where the file copy came from if the source of a copy was not in
2415 2464 # the parent directory. However, this doesn't actually make sense to
2416 2465 # do (what does a copy from something not in your working copy even
2417 2466 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2418 2467 # the user that copy information was dropped, so if they didn't
2419 2468 # expect this outcome it can be fixed, but this is the correct
2420 2469 # behavior in this circumstance.
2421 2470
2422 2471 if cnode:
2423 2472 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2424 2473 if includecopymeta:
2425 2474 meta["copy"] = cfname
2426 2475 meta["copyrev"] = hex(cnode)
2427 2476 fparent1, fparent2 = nullid, newfparent
2428 2477 else:
2429 2478 self.ui.warn(_("warning: can't find ancestor for '%s' "
2430 2479 "copied from '%s'!\n") % (fname, cfname))
2431 2480
2432 2481 elif fparent1 == nullid:
2433 2482 fparent1, fparent2 = fparent2, nullid
2434 2483 elif fparent2 != nullid:
2435 2484 # is one parent an ancestor of the other?
2436 2485 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2437 2486 if fparent1 in fparentancestors:
2438 2487 fparent1, fparent2 = fparent2, nullid
2439 2488 elif fparent2 in fparentancestors:
2440 2489 fparent2 = nullid
2441 2490
2442 2491 # is the file changed?
2443 2492 text = fctx.data()
2444 2493 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2445 2494 changelist.append(fname)
2446 2495 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2447 2496 # are just the flags changed during merge?
2448 2497 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2449 2498 changelist.append(fname)
2450 2499
2451 2500 return fparent1
2452 2501
2453 2502 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2454 2503 """check for commit arguments that aren't committable"""
2455 2504 if match.isexact() or match.prefix():
2456 2505 matched = set(status.modified + status.added + status.removed)
2457 2506
2458 2507 for f in match.files():
2459 2508 f = self.dirstate.normalize(f)
2460 2509 if f == '.' or f in matched or f in wctx.substate:
2461 2510 continue
2462 2511 if f in status.deleted:
2463 2512 fail(f, _('file not found!'))
2464 2513 if f in vdirs: # visited directory
2465 2514 d = f + '/'
2466 2515 for mf in matched:
2467 2516 if mf.startswith(d):
2468 2517 break
2469 2518 else:
2470 2519 fail(f, _("no match under directory!"))
2471 2520 elif f not in self.dirstate:
2472 2521 fail(f, _("file not tracked!"))
2473 2522
2474 2523 @unfilteredmethod
2475 2524 def commit(self, text="", user=None, date=None, match=None, force=False,
2476 2525 editor=False, extra=None):
2477 2526 """Add a new revision to current repository.
2478 2527
2479 2528 Revision information is gathered from the working directory,
2480 2529 match can be used to filter the committed files. If editor is
2481 2530 supplied, it is called to get a commit message.
2482 2531 """
2483 2532 if extra is None:
2484 2533 extra = {}
2485 2534
2486 2535 def fail(f, msg):
2487 2536 raise error.Abort('%s: %s' % (f, msg))
2488 2537
2489 2538 if not match:
2490 2539 match = matchmod.always()
2491 2540
2492 2541 if not force:
2493 2542 vdirs = []
2494 2543 match.explicitdir = vdirs.append
2495 2544 match.bad = fail
2496 2545
2497 2546 # lock() for recent changelog (see issue4368)
2498 2547 with self.wlock(), self.lock():
2499 2548 wctx = self[None]
2500 2549 merge = len(wctx.parents()) > 1
2501 2550
2502 2551 if not force and merge and not match.always():
2503 2552 raise error.Abort(_('cannot partially commit a merge '
2504 2553 '(do not specify files or patterns)'))
2505 2554
2506 2555 status = self.status(match=match, clean=force)
2507 2556 if force:
2508 2557 status.modified.extend(status.clean) # mq may commit clean files
2509 2558
2510 2559 # check subrepos
2511 2560 subs, commitsubs, newstate = subrepoutil.precommit(
2512 2561 self.ui, wctx, status, match, force=force)
2513 2562
2514 2563 # make sure all explicit patterns are matched
2515 2564 if not force:
2516 2565 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2517 2566
2518 2567 cctx = context.workingcommitctx(self, status,
2519 2568 text, user, date, extra)
2520 2569
2521 2570 # internal config: ui.allowemptycommit
2522 2571 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2523 2572 or extra.get('close') or merge or cctx.files()
2524 2573 or self.ui.configbool('ui', 'allowemptycommit'))
2525 2574 if not allowemptycommit:
2526 2575 return None
2527 2576
2528 2577 if merge and cctx.deleted():
2529 2578 raise error.Abort(_("cannot commit merge with missing files"))
2530 2579
2531 2580 ms = mergemod.mergestate.read(self)
2532 2581 mergeutil.checkunresolved(ms)
2533 2582
2534 2583 if editor:
2535 2584 cctx._text = editor(self, cctx, subs)
2536 2585 edited = (text != cctx._text)
2537 2586
2538 2587 # Save commit message in case this transaction gets rolled back
2539 2588 # (e.g. by a pretxncommit hook). Leave the content alone on
2540 2589 # the assumption that the user will use the same editor again.
2541 2590 msgfn = self.savecommitmessage(cctx._text)
2542 2591
2543 2592 # commit subs and write new state
2544 2593 if subs:
2545 2594 uipathfn = scmutil.getuipathfn(self)
2546 2595 for s in sorted(commitsubs):
2547 2596 sub = wctx.sub(s)
2548 2597 self.ui.status(_('committing subrepository %s\n') %
2549 2598 uipathfn(subrepoutil.subrelpath(sub)))
2550 2599 sr = sub.commit(cctx._text, user, date)
2551 2600 newstate[s] = (newstate[s][0], sr)
2552 2601 subrepoutil.writestate(self, newstate)
2553 2602
2554 2603 p1, p2 = self.dirstate.parents()
2555 2604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2556 2605 try:
2557 2606 self.hook("precommit", throw=True, parent1=hookp1,
2558 2607 parent2=hookp2)
2559 2608 with self.transaction('commit'):
2560 2609 ret = self.commitctx(cctx, True)
2561 2610 # update bookmarks, dirstate and mergestate
2562 2611 bookmarks.update(self, [p1, p2], ret)
2563 2612 cctx.markcommitted(ret)
2564 2613 ms.reset()
2565 2614 except: # re-raises
2566 2615 if edited:
2567 2616 self.ui.write(
2568 2617 _('note: commit message saved in %s\n') % msgfn)
2569 2618 raise
2570 2619
2571 2620 def commithook():
2572 2621 # hack for command that use a temporary commit (eg: histedit)
2573 2622 # temporary commit got stripped before hook release
2574 2623 if self.changelog.hasnode(ret):
2575 2624 self.hook("commit", node=hex(ret), parent1=hookp1,
2576 2625 parent2=hookp2)
2577 2626 self._afterlock(commithook)
2578 2627 return ret
2579 2628
2580 2629 @unfilteredmethod
2581 2630 def commitctx(self, ctx, error=False, origctx=None):
2582 2631 """Add a new revision to current repository.
2583 2632 Revision information is passed via the context argument.
2584 2633
2585 2634 ctx.files() should list all files involved in this commit, i.e.
2586 2635 modified/added/removed files. On merge, it may be wider than the
2587 2636 ctx.files() to be committed, since any file nodes derived directly
2588 2637 from p1 or p2 are excluded from the committed ctx.files().
2589 2638
2590 2639 origctx is for convert to work around the problem that bug
2591 2640 fixes to the files list in changesets change hashes. For
2592 2641 convert to be the identity, it can pass an origctx and this
2593 2642 function will use the same files list when it makes sense to
2594 2643 do so.
2595 2644 """
2596 2645
2597 2646 p1, p2 = ctx.p1(), ctx.p2()
2598 2647 user = ctx.user()
2599 2648
2600 2649 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2601 2650 writefilecopymeta = writecopiesto != 'changeset-only'
2602 2651 writechangesetcopy = (writecopiesto in
2603 2652 ('changeset-only', 'compatibility'))
2604 2653 p1copies, p2copies = None, None
2605 2654 if writechangesetcopy:
2606 2655 p1copies = ctx.p1copies()
2607 2656 p2copies = ctx.p2copies()
2608 2657 filesadded, filesremoved = None, None
2609 2658 with self.lock(), self.transaction("commit") as tr:
2610 2659 trp = weakref.proxy(tr)
2611 2660
2612 2661 if ctx.manifestnode():
2613 2662 # reuse an existing manifest revision
2614 2663 self.ui.debug('reusing known manifest\n')
2615 2664 mn = ctx.manifestnode()
2616 2665 files = ctx.files()
2617 2666 if writechangesetcopy:
2618 2667 filesadded = ctx.filesadded()
2619 2668 filesremoved = ctx.filesremoved()
2620 2669 elif ctx.files():
2621 2670 m1ctx = p1.manifestctx()
2622 2671 m2ctx = p2.manifestctx()
2623 2672 mctx = m1ctx.copy()
2624 2673
2625 2674 m = mctx.read()
2626 2675 m1 = m1ctx.read()
2627 2676 m2 = m2ctx.read()
2628 2677
2629 2678 # check in files
2630 2679 added = []
2631 2680 changed = []
2632 2681 removed = list(ctx.removed())
2633 2682 linkrev = len(self)
2634 2683 self.ui.note(_("committing files:\n"))
2635 2684 uipathfn = scmutil.getuipathfn(self)
2636 2685 for f in sorted(ctx.modified() + ctx.added()):
2637 2686 self.ui.note(uipathfn(f) + "\n")
2638 2687 try:
2639 2688 fctx = ctx[f]
2640 2689 if fctx is None:
2641 2690 removed.append(f)
2642 2691 else:
2643 2692 added.append(f)
2644 2693 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2645 2694 trp, changed,
2646 2695 writefilecopymeta)
2647 2696 m.setflag(f, fctx.flags())
2648 2697 except OSError:
2649 2698 self.ui.warn(_("trouble committing %s!\n") %
2650 2699 uipathfn(f))
2651 2700 raise
2652 2701 except IOError as inst:
2653 2702 errcode = getattr(inst, 'errno', errno.ENOENT)
2654 2703 if error or errcode and errcode != errno.ENOENT:
2655 2704 self.ui.warn(_("trouble committing %s!\n") %
2656 2705 uipathfn(f))
2657 2706 raise
2658 2707
2659 2708 # update manifest
2660 2709 removed = [f for f in removed if f in m1 or f in m2]
2661 2710 drop = sorted([f for f in removed if f in m])
2662 2711 for f in drop:
2663 2712 del m[f]
2664 2713 if p2.rev() != nullrev:
2665 2714 @util.cachefunc
2666 2715 def mas():
2667 2716 p1n = p1.node()
2668 2717 p2n = p2.node()
2669 2718 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2670 2719 if not cahs:
2671 2720 cahs = [nullrev]
2672 2721 return [self[r].manifest() for r in cahs]
2673 2722 def deletionfromparent(f):
2674 2723 # When a file is removed relative to p1 in a merge, this
2675 2724 # function determines whether the absence is due to a
2676 2725 # deletion from a parent, or whether the merge commit
2677 2726 # itself deletes the file. We decide this by doing a
2678 2727 # simplified three way merge of the manifest entry for
2679 2728 # the file. There are two ways we decide the merge
2680 2729 # itself didn't delete a file:
2681 2730 # - neither parent (nor the merge) contain the file
2682 2731 # - exactly one parent contains the file, and that
2683 2732 # parent has the same filelog entry as the merge
2684 2733 # ancestor (or all of them if there two). In other
2685 2734 # words, that parent left the file unchanged while the
2686 2735 # other one deleted it.
2687 2736 # One way to think about this is that deleting a file is
2688 2737 # similar to emptying it, so the list of changed files
2689 2738 # should be similar either way. The computation
2690 2739 # described above is not done directly in _filecommit
2691 2740 # when creating the list of changed files, however
2692 2741 # it does something very similar by comparing filelog
2693 2742 # nodes.
2694 2743 if f in m1:
2695 2744 return (f not in m2
2696 2745 and all(f in ma and ma.find(f) == m1.find(f)
2697 2746 for ma in mas()))
2698 2747 elif f in m2:
2699 2748 return all(f in ma and ma.find(f) == m2.find(f)
2700 2749 for ma in mas())
2701 2750 else:
2702 2751 return True
2703 2752 removed = [f for f in removed if not deletionfromparent(f)]
2704 2753
2705 2754 files = changed + removed
2706 2755 md = None
2707 2756 if not files:
2708 2757 # if no "files" actually changed in terms of the changelog,
2709 2758 # try hard to detect unmodified manifest entry so that the
2710 2759 # exact same commit can be reproduced later on convert.
2711 2760 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2712 2761 if not files and md:
2713 2762 self.ui.debug('not reusing manifest (no file change in '
2714 2763 'changelog, but manifest differs)\n')
2715 2764 if files or md:
2716 2765 self.ui.note(_("committing manifest\n"))
2717 2766 # we're using narrowmatch here since it's already applied at
2718 2767 # other stages (such as dirstate.walk), so we're already
2719 2768 # ignoring things outside of narrowspec in most cases. The
2720 2769 # one case where we might have files outside the narrowspec
2721 2770 # at this point is merges, and we already error out in the
2722 2771 # case where the merge has files outside of the narrowspec,
2723 2772 # so this is safe.
2724 2773 mn = mctx.write(trp, linkrev,
2725 2774 p1.manifestnode(), p2.manifestnode(),
2726 2775 added, drop, match=self.narrowmatch())
2727 2776
2728 2777 if writechangesetcopy:
2729 2778 filesadded = [f for f in changed
2730 2779 if not (f in m1 or f in m2)]
2731 2780 filesremoved = removed
2732 2781 else:
2733 2782 self.ui.debug('reusing manifest from p1 (listed files '
2734 2783 'actually unchanged)\n')
2735 2784 mn = p1.manifestnode()
2736 2785 else:
2737 2786 self.ui.debug('reusing manifest from p1 (no file change)\n')
2738 2787 mn = p1.manifestnode()
2739 2788 files = []
2740 2789
2741 2790 if writecopiesto == 'changeset-only':
2742 2791 # If writing only to changeset extras, use None to indicate that
2743 2792 # no entry should be written. If writing to both, write an empty
2744 2793 # entry to prevent the reader from falling back to reading
2745 2794 # filelogs.
2746 2795 p1copies = p1copies or None
2747 2796 p2copies = p2copies or None
2748 2797 filesadded = filesadded or None
2749 2798 filesremoved = filesremoved or None
2750 2799
2751 2800 if origctx and origctx.manifestnode() == mn:
2752 2801 files = origctx.files()
2753 2802
2754 2803 # update changelog
2755 2804 self.ui.note(_("committing changelog\n"))
2756 2805 self.changelog.delayupdate(tr)
2757 2806 n = self.changelog.add(mn, files, ctx.description(),
2758 2807 trp, p1.node(), p2.node(),
2759 2808 user, ctx.date(), ctx.extra().copy(),
2760 2809 p1copies, p2copies, filesadded, filesremoved)
2761 2810 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2762 2811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2763 2812 parent2=xp2)
2764 2813 # set the new commit is proper phase
2765 2814 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2766 2815 if targetphase:
2767 2816 # retract boundary do not alter parent changeset.
2768 2817 # if a parent have higher the resulting phase will
2769 2818 # be compliant anyway
2770 2819 #
2771 2820 # if minimal phase was 0 we don't need to retract anything
2772 2821 phases.registernew(self, tr, targetphase, [n])
2773 2822 return n
2774 2823
2775 2824 @unfilteredmethod
2776 2825 def destroying(self):
2777 2826 '''Inform the repository that nodes are about to be destroyed.
2778 2827 Intended for use by strip and rollback, so there's a common
2779 2828 place for anything that has to be done before destroying history.
2780 2829
2781 2830 This is mostly useful for saving state that is in memory and waiting
2782 2831 to be flushed when the current lock is released. Because a call to
2783 2832 destroyed is imminent, the repo will be invalidated causing those
2784 2833 changes to stay in memory (waiting for the next unlock), or vanish
2785 2834 completely.
2786 2835 '''
2787 2836 # When using the same lock to commit and strip, the phasecache is left
2788 2837 # dirty after committing. Then when we strip, the repo is invalidated,
2789 2838 # causing those changes to disappear.
2790 2839 if '_phasecache' in vars(self):
2791 2840 self._phasecache.write()
2792 2841
2793 2842 @unfilteredmethod
2794 2843 def destroyed(self):
2795 2844 '''Inform the repository that nodes have been destroyed.
2796 2845 Intended for use by strip and rollback, so there's a common
2797 2846 place for anything that has to be done after destroying history.
2798 2847 '''
2799 2848 # When one tries to:
2800 2849 # 1) destroy nodes thus calling this method (e.g. strip)
2801 2850 # 2) use phasecache somewhere (e.g. commit)
2802 2851 #
2803 2852 # then 2) will fail because the phasecache contains nodes that were
2804 2853 # removed. We can either remove phasecache from the filecache,
2805 2854 # causing it to reload next time it is accessed, or simply filter
2806 2855 # the removed nodes now and write the updated cache.
2807 2856 self._phasecache.filterunknown(self)
2808 2857 self._phasecache.write()
2809 2858
2810 2859 # refresh all repository caches
2811 2860 self.updatecaches()
2812 2861
2813 2862 # Ensure the persistent tag cache is updated. Doing it now
2814 2863 # means that the tag cache only has to worry about destroyed
2815 2864 # heads immediately after a strip/rollback. That in turn
2816 2865 # guarantees that "cachetip == currenttip" (comparing both rev
2817 2866 # and node) always means no nodes have been added or destroyed.
2818 2867
2819 2868 # XXX this is suboptimal when qrefresh'ing: we strip the current
2820 2869 # head, refresh the tag cache, then immediately add a new head.
2821 2870 # But I think doing it this way is necessary for the "instant
2822 2871 # tag cache retrieval" case to work.
2823 2872 self.invalidate()
2824 2873
2825 2874 def status(self, node1='.', node2=None, match=None,
2826 2875 ignored=False, clean=False, unknown=False,
2827 2876 listsubrepos=False):
2828 2877 '''a convenience method that calls node1.status(node2)'''
2829 2878 return self[node1].status(node2, match, ignored, clean, unknown,
2830 2879 listsubrepos)
2831 2880
2832 2881 def addpostdsstatus(self, ps):
2833 2882 """Add a callback to run within the wlock, at the point at which status
2834 2883 fixups happen.
2835 2884
2836 2885 On status completion, callback(wctx, status) will be called with the
2837 2886 wlock held, unless the dirstate has changed from underneath or the wlock
2838 2887 couldn't be grabbed.
2839 2888
2840 2889 Callbacks should not capture and use a cached copy of the dirstate --
2841 2890 it might change in the meanwhile. Instead, they should access the
2842 2891 dirstate via wctx.repo().dirstate.
2843 2892
2844 2893 This list is emptied out after each status run -- extensions should
2845 2894 make sure it adds to this list each time dirstate.status is called.
2846 2895 Extensions should also make sure they don't call this for statuses
2847 2896 that don't involve the dirstate.
2848 2897 """
2849 2898
2850 2899 # The list is located here for uniqueness reasons -- it is actually
2851 2900 # managed by the workingctx, but that isn't unique per-repo.
2852 2901 self._postdsstatus.append(ps)
2853 2902
2854 2903 def postdsstatus(self):
2855 2904 """Used by workingctx to get the list of post-dirstate-status hooks."""
2856 2905 return self._postdsstatus
2857 2906
2858 2907 def clearpostdsstatus(self):
2859 2908 """Used by workingctx to clear post-dirstate-status hooks."""
2860 2909 del self._postdsstatus[:]
2861 2910
2862 2911 def heads(self, start=None):
2863 2912 if start is None:
2864 2913 cl = self.changelog
2865 2914 headrevs = reversed(cl.headrevs())
2866 2915 return [cl.node(rev) for rev in headrevs]
2867 2916
2868 2917 heads = self.changelog.heads(start)
2869 2918 # sort the output in rev descending order
2870 2919 return sorted(heads, key=self.changelog.rev, reverse=True)
2871 2920
2872 2921 def branchheads(self, branch=None, start=None, closed=False):
2873 2922 '''return a (possibly filtered) list of heads for the given branch
2874 2923
2875 2924 Heads are returned in topological order, from newest to oldest.
2876 2925 If branch is None, use the dirstate branch.
2877 2926 If start is not None, return only heads reachable from start.
2878 2927 If closed is True, return heads that are marked as closed as well.
2879 2928 '''
2880 2929 if branch is None:
2881 2930 branch = self[None].branch()
2882 2931 branches = self.branchmap()
2883 2932 if not branches.hasbranch(branch):
2884 2933 return []
2885 2934 # the cache returns heads ordered lowest to highest
2886 2935 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2887 2936 if start is not None:
2888 2937 # filter out the heads that cannot be reached from startrev
2889 2938 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2890 2939 bheads = [h for h in bheads if h in fbheads]
2891 2940 return bheads
2892 2941
2893 2942 def branches(self, nodes):
2894 2943 if not nodes:
2895 2944 nodes = [self.changelog.tip()]
2896 2945 b = []
2897 2946 for n in nodes:
2898 2947 t = n
2899 2948 while True:
2900 2949 p = self.changelog.parents(n)
2901 2950 if p[1] != nullid or p[0] == nullid:
2902 2951 b.append((t, n, p[0], p[1]))
2903 2952 break
2904 2953 n = p[0]
2905 2954 return b
2906 2955
2907 2956 def between(self, pairs):
2908 2957 r = []
2909 2958
2910 2959 for top, bottom in pairs:
2911 2960 n, l, i = top, [], 0
2912 2961 f = 1
2913 2962
2914 2963 while n != bottom and n != nullid:
2915 2964 p = self.changelog.parents(n)[0]
2916 2965 if i == f:
2917 2966 l.append(n)
2918 2967 f = f * 2
2919 2968 n = p
2920 2969 i += 1
2921 2970
2922 2971 r.append(l)
2923 2972
2924 2973 return r
2925 2974
2926 2975 def checkpush(self, pushop):
2927 2976 """Extensions can override this function if additional checks have
2928 2977 to be performed before pushing, or call it if they override push
2929 2978 command.
2930 2979 """
2931 2980
2932 2981 @unfilteredpropertycache
2933 2982 def prepushoutgoinghooks(self):
2934 2983 """Return util.hooks consists of a pushop with repo, remote, outgoing
2935 2984 methods, which are called before pushing changesets.
2936 2985 """
2937 2986 return util.hooks()
2938 2987
2939 2988 def pushkey(self, namespace, key, old, new):
2940 2989 try:
2941 2990 tr = self.currenttransaction()
2942 2991 hookargs = {}
2943 2992 if tr is not None:
2944 2993 hookargs.update(tr.hookargs)
2945 2994 hookargs = pycompat.strkwargs(hookargs)
2946 2995 hookargs[r'namespace'] = namespace
2947 2996 hookargs[r'key'] = key
2948 2997 hookargs[r'old'] = old
2949 2998 hookargs[r'new'] = new
2950 2999 self.hook('prepushkey', throw=True, **hookargs)
2951 3000 except error.HookAbort as exc:
2952 3001 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2953 3002 if exc.hint:
2954 3003 self.ui.write_err(_("(%s)\n") % exc.hint)
2955 3004 return False
2956 3005 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2957 3006 ret = pushkey.push(self, namespace, key, old, new)
2958 3007 def runhook():
2959 3008 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2960 3009 ret=ret)
2961 3010 self._afterlock(runhook)
2962 3011 return ret
2963 3012
2964 3013 def listkeys(self, namespace):
2965 3014 self.hook('prelistkeys', throw=True, namespace=namespace)
2966 3015 self.ui.debug('listing keys for "%s"\n' % namespace)
2967 3016 values = pushkey.list(self, namespace)
2968 3017 self.hook('listkeys', namespace=namespace, values=values)
2969 3018 return values
2970 3019
2971 3020 def debugwireargs(self, one, two, three=None, four=None, five=None):
2972 3021 '''used to test argument passing over the wire'''
2973 3022 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2974 3023 pycompat.bytestr(four),
2975 3024 pycompat.bytestr(five))
2976 3025
2977 3026 def savecommitmessage(self, text):
2978 3027 fp = self.vfs('last-message.txt', 'wb')
2979 3028 try:
2980 3029 fp.write(text)
2981 3030 finally:
2982 3031 fp.close()
2983 3032 return self.pathto(fp.name[len(self.root) + 1:])
2984 3033
2985 3034 # used to avoid circular references so destructors work
2986 3035 def aftertrans(files):
2987 3036 renamefiles = [tuple(t) for t in files]
2988 3037 def a():
2989 3038 for vfs, src, dest in renamefiles:
2990 3039 # if src and dest refer to a same file, vfs.rename is a no-op,
2991 3040 # leaving both src and dest on disk. delete dest to make sure
2992 3041 # the rename couldn't be such a no-op.
2993 3042 vfs.tryunlink(dest)
2994 3043 try:
2995 3044 vfs.rename(src, dest)
2996 3045 except OSError: # journal file does not yet exist
2997 3046 pass
2998 3047 return a
2999 3048
3000 3049 def undoname(fn):
3001 3050 base, name = os.path.split(fn)
3002 3051 assert name.startswith('journal')
3003 3052 return os.path.join(base, name.replace('journal', 'undo', 1))
3004 3053
3005 3054 def instance(ui, path, create, intents=None, createopts=None):
3006 3055 localpath = util.urllocalpath(path)
3007 3056 if create:
3008 3057 createrepository(ui, localpath, createopts=createopts)
3009 3058
3010 3059 return makelocalrepository(ui, localpath, intents=intents)
3011 3060
3012 3061 def islocal(path):
3013 3062 return True
3014 3063
3015 3064 def defaultcreateopts(ui, createopts=None):
3016 3065 """Populate the default creation options for a repository.
3017 3066
3018 3067 A dictionary of explicitly requested creation options can be passed
3019 3068 in. Missing keys will be populated.
3020 3069 """
3021 3070 createopts = dict(createopts or {})
3022 3071
3023 3072 if 'backend' not in createopts:
3024 3073 # experimental config: storage.new-repo-backend
3025 3074 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3026 3075
3027 3076 return createopts
3028 3077
3029 3078 def newreporequirements(ui, createopts):
3030 3079 """Determine the set of requirements for a new local repository.
3031 3080
3032 3081 Extensions can wrap this function to specify custom requirements for
3033 3082 new repositories.
3034 3083 """
3035 3084 # If the repo is being created from a shared repository, we copy
3036 3085 # its requirements.
3037 3086 if 'sharedrepo' in createopts:
3038 3087 requirements = set(createopts['sharedrepo'].requirements)
3039 3088 if createopts.get('sharedrelative'):
3040 3089 requirements.add('relshared')
3041 3090 else:
3042 3091 requirements.add('shared')
3043 3092
3044 3093 return requirements
3045 3094
3046 3095 if 'backend' not in createopts:
3047 3096 raise error.ProgrammingError('backend key not present in createopts; '
3048 3097 'was defaultcreateopts() called?')
3049 3098
3050 3099 if createopts['backend'] != 'revlogv1':
3051 3100 raise error.Abort(_('unable to determine repository requirements for '
3052 3101 'storage backend: %s') % createopts['backend'])
3053 3102
3054 3103 requirements = {'revlogv1'}
3055 3104 if ui.configbool('format', 'usestore'):
3056 3105 requirements.add('store')
3057 3106 if ui.configbool('format', 'usefncache'):
3058 3107 requirements.add('fncache')
3059 3108 if ui.configbool('format', 'dotencode'):
3060 3109 requirements.add('dotencode')
3061 3110
3062 3111 compengine = ui.config('format', 'revlog-compression')
3063 3112 if compengine not in util.compengines:
3064 3113 raise error.Abort(_('compression engine %s defined by '
3065 3114 'format.revlog-compression not available') %
3066 3115 compengine,
3067 3116 hint=_('run "hg debuginstall" to list available '
3068 3117 'compression engines'))
3069 3118
3070 3119 # zlib is the historical default and doesn't need an explicit requirement.
3071 3120 elif compengine == 'zstd':
3072 3121 requirements.add('revlog-compression-zstd')
3073 3122 elif compengine != 'zlib':
3074 3123 requirements.add('exp-compression-%s' % compengine)
3075 3124
3076 3125 if scmutil.gdinitconfig(ui):
3077 3126 requirements.add('generaldelta')
3078 3127 if ui.configbool('format', 'sparse-revlog'):
3079 3128 requirements.add(SPARSEREVLOG_REQUIREMENT)
3080 3129 if ui.configbool('experimental', 'treemanifest'):
3081 3130 requirements.add('treemanifest')
3082 3131
3083 3132 revlogv2 = ui.config('experimental', 'revlogv2')
3084 3133 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3085 3134 requirements.remove('revlogv1')
3086 3135 # generaldelta is implied by revlogv2.
3087 3136 requirements.discard('generaldelta')
3088 3137 requirements.add(REVLOGV2_REQUIREMENT)
3089 3138 # experimental config: format.internal-phase
3090 3139 if ui.configbool('format', 'internal-phase'):
3091 3140 requirements.add('internal-phase')
3092 3141
3093 3142 if createopts.get('narrowfiles'):
3094 3143 requirements.add(repository.NARROW_REQUIREMENT)
3095 3144
3096 3145 if createopts.get('lfs'):
3097 3146 requirements.add('lfs')
3098 3147
3099 3148 if ui.configbool('format', 'bookmarks-in-store'):
3100 3149 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3101 3150
3102 3151 return requirements
3103 3152
3104 3153 def filterknowncreateopts(ui, createopts):
3105 3154 """Filters a dict of repo creation options against options that are known.
3106 3155
3107 3156 Receives a dict of repo creation options and returns a dict of those
3108 3157 options that we don't know how to handle.
3109 3158
3110 3159 This function is called as part of repository creation. If the
3111 3160 returned dict contains any items, repository creation will not
3112 3161 be allowed, as it means there was a request to create a repository
3113 3162 with options not recognized by loaded code.
3114 3163
3115 3164 Extensions can wrap this function to filter out creation options
3116 3165 they know how to handle.
3117 3166 """
3118 3167 known = {
3119 3168 'backend',
3120 3169 'lfs',
3121 3170 'narrowfiles',
3122 3171 'sharedrepo',
3123 3172 'sharedrelative',
3124 3173 'shareditems',
3125 3174 'shallowfilestore',
3126 3175 }
3127 3176
3128 3177 return {k: v for k, v in createopts.items() if k not in known}
3129 3178
3130 3179 def createrepository(ui, path, createopts=None):
3131 3180 """Create a new repository in a vfs.
3132 3181
3133 3182 ``path`` path to the new repo's working directory.
3134 3183 ``createopts`` options for the new repository.
3135 3184
3136 3185 The following keys for ``createopts`` are recognized:
3137 3186
3138 3187 backend
3139 3188 The storage backend to use.
3140 3189 lfs
3141 3190 Repository will be created with ``lfs`` requirement. The lfs extension
3142 3191 will automatically be loaded when the repository is accessed.
3143 3192 narrowfiles
3144 3193 Set up repository to support narrow file storage.
3145 3194 sharedrepo
3146 3195 Repository object from which storage should be shared.
3147 3196 sharedrelative
3148 3197 Boolean indicating if the path to the shared repo should be
3149 3198 stored as relative. By default, the pointer to the "parent" repo
3150 3199 is stored as an absolute path.
3151 3200 shareditems
3152 3201 Set of items to share to the new repository (in addition to storage).
3153 3202 shallowfilestore
3154 3203 Indicates that storage for files should be shallow (not all ancestor
3155 3204 revisions are known).
3156 3205 """
3157 3206 createopts = defaultcreateopts(ui, createopts=createopts)
3158 3207
3159 3208 unknownopts = filterknowncreateopts(ui, createopts)
3160 3209
3161 3210 if not isinstance(unknownopts, dict):
3162 3211 raise error.ProgrammingError('filterknowncreateopts() did not return '
3163 3212 'a dict')
3164 3213
3165 3214 if unknownopts:
3166 3215 raise error.Abort(_('unable to create repository because of unknown '
3167 3216 'creation option: %s') %
3168 3217 ', '.join(sorted(unknownopts)),
3169 3218 hint=_('is a required extension not loaded?'))
3170 3219
3171 3220 requirements = newreporequirements(ui, createopts=createopts)
3172 3221
3173 3222 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3174 3223
3175 3224 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3176 3225 if hgvfs.exists():
3177 3226 raise error.RepoError(_('repository %s already exists') % path)
3178 3227
3179 3228 if 'sharedrepo' in createopts:
3180 3229 sharedpath = createopts['sharedrepo'].sharedpath
3181 3230
3182 3231 if createopts.get('sharedrelative'):
3183 3232 try:
3184 3233 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3185 3234 except (IOError, ValueError) as e:
3186 3235 # ValueError is raised on Windows if the drive letters differ
3187 3236 # on each path.
3188 3237 raise error.Abort(_('cannot calculate relative path'),
3189 3238 hint=stringutil.forcebytestr(e))
3190 3239
3191 3240 if not wdirvfs.exists():
3192 3241 wdirvfs.makedirs()
3193 3242
3194 3243 hgvfs.makedir(notindexed=True)
3195 3244 if 'sharedrepo' not in createopts:
3196 3245 hgvfs.mkdir(b'cache')
3197 3246 hgvfs.mkdir(b'wcache')
3198 3247
3199 3248 if b'store' in requirements and 'sharedrepo' not in createopts:
3200 3249 hgvfs.mkdir(b'store')
3201 3250
3202 3251 # We create an invalid changelog outside the store so very old
3203 3252 # Mercurial versions (which didn't know about the requirements
3204 3253 # file) encounter an error on reading the changelog. This
3205 3254 # effectively locks out old clients and prevents them from
3206 3255 # mucking with a repo in an unknown format.
3207 3256 #
3208 3257 # The revlog header has version 2, which won't be recognized by
3209 3258 # such old clients.
3210 3259 hgvfs.append(b'00changelog.i',
3211 3260 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3212 3261 b'layout')
3213 3262
3214 3263 scmutil.writerequires(hgvfs, requirements)
3215 3264
3216 3265 # Write out file telling readers where to find the shared store.
3217 3266 if 'sharedrepo' in createopts:
3218 3267 hgvfs.write(b'sharedpath', sharedpath)
3219 3268
3220 3269 if createopts.get('shareditems'):
3221 3270 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3222 3271 hgvfs.write(b'shared', shared)
3223 3272
3224 3273 def poisonrepository(repo):
3225 3274 """Poison a repository instance so it can no longer be used."""
3226 3275 # Perform any cleanup on the instance.
3227 3276 repo.close()
3228 3277
3229 3278 # Our strategy is to replace the type of the object with one that
3230 3279 # has all attribute lookups result in error.
3231 3280 #
3232 3281 # But we have to allow the close() method because some constructors
3233 3282 # of repos call close() on repo references.
3234 3283 class poisonedrepository(object):
3235 3284 def __getattribute__(self, item):
3236 3285 if item == r'close':
3237 3286 return object.__getattribute__(self, item)
3238 3287
3239 3288 raise error.ProgrammingError('repo instances should not be used '
3240 3289 'after unshare')
3241 3290
3242 3291 def close(self):
3243 3292 pass
3244 3293
3245 3294 # We may have a repoview, which intercepts __setattr__. So be sure
3246 3295 # we operate at the lowest level possible.
3247 3296 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,245 +1,246 b''
1 1 ================================
2 2 Test corner case around bookmark
3 3 ================================
4 4
5 5 This test file is meant to gather test around bookmark that are specific
6 6 enough to not find a place elsewhere.
7 7
8 8 Test bookmark/changelog race condition
9 9 ======================================
10 10
11 11 The data from the bookmark file are filtered to only contains bookmark with
12 12 node known to the changelog. If the cache invalidation between these two bits
13 13 goes wrong, bookmark can be dropped.
14 14
15 15 global setup
16 16 ------------
17 17
18 18 $ cat >> $HGRCPATH << EOF
19 19 > [ui]
20 20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 21 > [server]
22 22 > concurrent-push-mode=check-related
23 23 > EOF
24 24
25 25 Setup
26 26 -----
27 27
28 28 initial repository setup
29 29
30 30 $ hg init bookrace-server
31 31 $ cd bookrace-server
32 32 $ echo a > a
33 33 $ hg add a
34 34 $ hg commit -m root
35 35 $ echo a >> a
36 36 $ hg bookmark book-A
37 37 $ hg commit -m A0
38 38 $ hg up 'desc(root)'
39 39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 (leaving bookmark book-A)
41 41 $ echo b > b
42 42 $ hg add b
43 43 $ hg bookmark book-B
44 44 $ hg commit -m B0
45 45 created new head
46 46 $ hg up null
47 47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
48 48 (leaving bookmark book-B)
49 49 $ hg phase --public --rev 'all()'
50 50 $ hg log -G
51 51 o changeset: 2:c79985706978
52 52 | bookmark: book-B
53 53 | tag: tip
54 54 | parent: 0:6569b5a81c7e
55 55 | user: test
56 56 | date: Thu Jan 01 00:00:00 1970 +0000
57 57 | summary: B0
58 58 |
59 59 | o changeset: 1:39c28d785860
60 60 |/ bookmark: book-A
61 61 | user: test
62 62 | date: Thu Jan 01 00:00:00 1970 +0000
63 63 | summary: A0
64 64 |
65 65 o changeset: 0:6569b5a81c7e
66 66 user: test
67 67 date: Thu Jan 01 00:00:00 1970 +0000
68 68 summary: root
69 69
70 70 $ hg book
71 71 book-A 1:39c28d785860
72 72 book-B 2:c79985706978
73 73 $ cd ..
74 74
75 75 Add new changeset on each bookmark in distinct clones
76 76
77 77 $ hg clone ssh://user@dummy/bookrace-server client-A
78 78 requesting all changes
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 3 changesets with 3 changes to 2 files (+1 heads)
83 83 new changesets 6569b5a81c7e:c79985706978
84 84 updating to branch default
85 85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 86 $ hg -R client-A update book-A
87 87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
88 88 (activating bookmark book-A)
89 89 $ echo a >> client-A/a
90 90 $ hg -R client-A commit -m A1
91 91 $ hg clone ssh://user@dummy/bookrace-server client-B
92 92 requesting all changes
93 93 adding changesets
94 94 adding manifests
95 95 adding file changes
96 96 added 3 changesets with 3 changes to 2 files (+1 heads)
97 97 new changesets 6569b5a81c7e:c79985706978
98 98 updating to branch default
99 99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 100 $ hg -R client-B update book-B
101 101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102 (activating bookmark book-B)
103 103 $ echo b >> client-B/b
104 104 $ hg -R client-B commit -m B1
105 105
106 106 extension to reproduce the race
107 107 -------------------------------
108 108
109 109 If two process are pushing we want to make sure the following happens:
110 110
111 111 * process A read changelog
112 112 * process B to its full push
113 113 * process A read bookmarks
114 114 * process A proceed with rest of the push
115 115
116 116 We build a server side extension for this purpose
117 117
118 118 $ cat > bookrace.py << EOF
119 119 > import atexit
120 120 > import os
121 121 > import time
122 122 > from mercurial import error, extensions, bookmarks
123 123 >
124 124 > def wait(repo):
125 125 > if not os.path.exists('push-A-started'):
126 126 > assert repo._currentlock(repo._lockref) is None
127 127 > assert repo._currentlock(repo._wlockref) is None
128 128 > repo.ui.status(b'setting raced push up\n')
129 129 > with open('push-A-started', 'w'):
130 130 > pass
131 131 > clock = 300
132 132 > while not os.path.exists('push-B-done'):
133 133 > clock -= 1
134 134 > if clock <= 0:
135 135 > raise error.Abort("race scenario timed out")
136 136 > time.sleep(0.1)
137 137 >
138 138 > def reposetup(ui, repo):
139 139 > class racedrepo(repo.__class__):
140 140 > @property
141 141 > def _bookmarks(self):
142 142 > wait(self)
143 143 > return super(racedrepo, self)._bookmarks
144 144 > repo.__class__ = racedrepo
145 145 >
146 146 > def e():
147 147 > with open('push-A-done', 'w'):
148 148 > pass
149 149 > atexit.register(e)
150 150 > EOF
151 151
152 152 Actual test
153 153 -----------
154 154
155 155 Start the raced push.
156 156
157 157 $ cat >> bookrace-server/.hg/hgrc << EOF
158 158 > [extensions]
159 159 > bookrace=$TESTTMP/bookrace.py
160 160 > EOF
161 161 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
162 162
163 163 Wait up to 30 seconds for that push to start.
164 164
165 165 $ clock=30
166 166 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
167 167 > clock=`expr $clock - 1`
168 168 > sleep 1
169 169 > done
170 170
171 171 Do the other push.
172 172
173 173 $ cat >> bookrace-server/.hg/hgrc << EOF
174 174 > [extensions]
175 175 > bookrace=!
176 176 > EOF
177 177
178 178 $ hg push -R client-B -r book-B
179 179 pushing to ssh://user@dummy/bookrace-server
180 180 searching for changes
181 181 remote: adding changesets
182 182 remote: adding manifests
183 183 remote: adding file changes
184 184 remote: added 1 changesets with 1 changes to 1 files
185 185 updating bookmark book-B
186 186
187 187 Signal the raced put that we are done (it waits up to 30 seconds).
188 188
189 189 $ touch push-B-done
190 190
191 191 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
192 192
193 193 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
194 194 > clock=`expr $clock - 1`
195 195 > sleep 1
196 196 > done
197 197
198 198 Check raced push output.
199 199
200 200 $ cat push-output.txt
201 201 pushing to ssh://user@dummy/bookrace-server
202 202 searching for changes
203 remote has heads on branch 'default' that are not known locally: f26c3b5167d1
203 204 remote: setting raced push up
204 205 remote: adding changesets
205 206 remote: adding manifests
206 207 remote: adding file changes
207 208 remote: added 1 changesets with 1 changes to 1 files
208 209 updating bookmark book-A
209 210
210 211 Check result of the push.
211 212
212 213 $ hg -R bookrace-server log -G
213 214 o changeset: 4:9ce3b28c16de
214 215 | bookmark: book-A
215 216 | tag: tip
216 217 | parent: 1:39c28d785860
217 218 | user: test
218 219 | date: Thu Jan 01 00:00:00 1970 +0000
219 220 | summary: A1
220 221 |
221 222 | o changeset: 3:f26c3b5167d1
222 | | bookmark: book-B (false !)
223 | | bookmark: book-B
223 224 | | user: test
224 225 | | date: Thu Jan 01 00:00:00 1970 +0000
225 226 | | summary: B1
226 227 | |
227 228 | o changeset: 2:c79985706978
228 229 | | parent: 0:6569b5a81c7e
229 230 | | user: test
230 231 | | date: Thu Jan 01 00:00:00 1970 +0000
231 232 | | summary: B0
232 233 | |
233 234 o | changeset: 1:39c28d785860
234 235 |/ user: test
235 236 | date: Thu Jan 01 00:00:00 1970 +0000
236 237 | summary: A0
237 238 |
238 239 o changeset: 0:6569b5a81c7e
239 240 user: test
240 241 date: Thu Jan 01 00:00:00 1970 +0000
241 242 summary: root
242 243
243 244 $ hg -R bookrace-server book
244 245 book-A 4:9ce3b28c16de
245 book-B 3:f26c3b5167d1 (false !)
246 book-B 3:f26c3b5167d1
General Comments 0
You need to be logged in to leave comments. Login now