##// END OF EJS Templates
narrow: include working copy narrowspec in transaction journal...
Martin von Zweigbergk -
r41264:3b35ebdb default
parent child Browse files
Show More
@@ -1,3085 +1,3089 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 95 unfi = repo.unfiltered()
96 96 try:
97 97 return unfi.__dict__[self.sname]
98 98 except KeyError:
99 99 pass
100 100 return super(_basefilecache, self).__get__(unfi, type)
101 101
102 102 def set(self, repo, value):
103 103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104 104
105 105 class repofilecache(_basefilecache):
106 106 """filecache for files in .hg but outside of .hg/store"""
107 107 def __init__(self, *paths):
108 108 super(repofilecache, self).__init__(*paths)
109 109 for path in paths:
110 110 _cachedfiles.add((path, 'plain'))
111 111
112 112 def join(self, obj, fname):
113 113 return obj.vfs.join(fname)
114 114
115 115 class storecache(_basefilecache):
116 116 """filecache for files in the store"""
117 117 def __init__(self, *paths):
118 118 super(storecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, ''))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.sjoin(fname)
124 124
125 125 def isfilecached(repo, name):
126 126 """check if a repo has already cached "name" filecache-ed property
127 127
128 128 This returns (cachedobj-or-None, iscached) tuple.
129 129 """
130 130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 131 if not cacheentry:
132 132 return None, False
133 133 return cacheentry.obj, True
134 134
135 135 class unfilteredpropertycache(util.propertycache):
136 136 """propertycache that apply to unfiltered repo only"""
137 137
138 138 def __get__(self, repo, type=None):
139 139 unfi = repo.unfiltered()
140 140 if unfi is repo:
141 141 return super(unfilteredpropertycache, self).__get__(unfi)
142 142 return getattr(unfi, self.name)
143 143
144 144 class filteredpropertycache(util.propertycache):
145 145 """propertycache that must take filtering in account"""
146 146
147 147 def cachevalue(self, obj, value):
148 148 object.__setattr__(obj, self.name, value)
149 149
150 150
151 151 def hasunfilteredcache(repo, name):
152 152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 153 return name in vars(repo.unfiltered())
154 154
155 155 def unfilteredmethod(orig):
156 156 """decorate method that always need to be run on unfiltered version"""
157 157 def wrapper(repo, *args, **kwargs):
158 158 return orig(repo.unfiltered(), *args, **kwargs)
159 159 return wrapper
160 160
161 161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 162 'unbundle'}
163 163 legacycaps = moderncaps.union({'changegroupsubset'})
164 164
165 165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 166 class localcommandexecutor(object):
167 167 def __init__(self, peer):
168 168 self._peer = peer
169 169 self._sent = False
170 170 self._closed = False
171 171
172 172 def __enter__(self):
173 173 return self
174 174
175 175 def __exit__(self, exctype, excvalue, exctb):
176 176 self.close()
177 177
178 178 def callcommand(self, command, args):
179 179 if self._sent:
180 180 raise error.ProgrammingError('callcommand() cannot be used after '
181 181 'sendcommands()')
182 182
183 183 if self._closed:
184 184 raise error.ProgrammingError('callcommand() cannot be used after '
185 185 'close()')
186 186
187 187 # We don't need to support anything fancy. Just call the named
188 188 # method on the peer and return a resolved future.
189 189 fn = getattr(self._peer, pycompat.sysstr(command))
190 190
191 191 f = pycompat.futures.Future()
192 192
193 193 try:
194 194 result = fn(**pycompat.strkwargs(args))
195 195 except Exception:
196 196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 197 else:
198 198 f.set_result(result)
199 199
200 200 return f
201 201
202 202 def sendcommands(self):
203 203 self._sent = True
204 204
205 205 def close(self):
206 206 self._closed = True
207 207
208 208 @interfaceutil.implementer(repository.ipeercommands)
209 209 class localpeer(repository.peer):
210 210 '''peer for a local repo; reflects only the most recent API'''
211 211
212 212 def __init__(self, repo, caps=None):
213 213 super(localpeer, self).__init__()
214 214
215 215 if caps is None:
216 216 caps = moderncaps.copy()
217 217 self._repo = repo.filtered('served')
218 218 self.ui = repo.ui
219 219 self._caps = repo._restrictcapabilities(caps)
220 220
221 221 # Begin of _basepeer interface.
222 222
223 223 def url(self):
224 224 return self._repo.url()
225 225
226 226 def local(self):
227 227 return self._repo
228 228
229 229 def peer(self):
230 230 return self
231 231
232 232 def canpush(self):
233 233 return True
234 234
235 235 def close(self):
236 236 self._repo.close()
237 237
238 238 # End of _basepeer interface.
239 239
240 240 # Begin of _basewirecommands interface.
241 241
242 242 def branchmap(self):
243 243 return self._repo.branchmap()
244 244
245 245 def capabilities(self):
246 246 return self._caps
247 247
248 248 def clonebundles(self):
249 249 return self._repo.tryread('clonebundles.manifest')
250 250
251 251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 252 """Used to test argument passing over the wire"""
253 253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 254 pycompat.bytestr(four),
255 255 pycompat.bytestr(five))
256 256
257 257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 258 **kwargs):
259 259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 260 common=common, bundlecaps=bundlecaps,
261 261 **kwargs)[1]
262 262 cb = util.chunkbuffer(chunks)
263 263
264 264 if exchange.bundle2requested(bundlecaps):
265 265 # When requesting a bundle2, getbundle returns a stream to make the
266 266 # wire level function happier. We need to build a proper object
267 267 # from it in local peer.
268 268 return bundle2.getunbundler(self.ui, cb)
269 269 else:
270 270 return changegroup.getunbundler('01', cb, None)
271 271
272 272 def heads(self):
273 273 return self._repo.heads()
274 274
275 275 def known(self, nodes):
276 276 return self._repo.known(nodes)
277 277
278 278 def listkeys(self, namespace):
279 279 return self._repo.listkeys(namespace)
280 280
281 281 def lookup(self, key):
282 282 return self._repo.lookup(key)
283 283
284 284 def pushkey(self, namespace, key, old, new):
285 285 return self._repo.pushkey(namespace, key, old, new)
286 286
287 287 def stream_out(self):
288 288 raise error.Abort(_('cannot perform stream clone against local '
289 289 'peer'))
290 290
291 291 def unbundle(self, bundle, heads, url):
292 292 """apply a bundle on a repo
293 293
294 294 This function handles the repo locking itself."""
295 295 try:
296 296 try:
297 297 bundle = exchange.readbundle(self.ui, bundle, None)
298 298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 299 if util.safehasattr(ret, 'getchunks'):
300 300 # This is a bundle20 object, turn it into an unbundler.
301 301 # This little dance should be dropped eventually when the
302 302 # API is finally improved.
303 303 stream = util.chunkbuffer(ret.getchunks())
304 304 ret = bundle2.getunbundler(self.ui, stream)
305 305 return ret
306 306 except Exception as exc:
307 307 # If the exception contains output salvaged from a bundle2
308 308 # reply, we need to make sure it is printed before continuing
309 309 # to fail. So we build a bundle2 with such output and consume
310 310 # it directly.
311 311 #
312 312 # This is not very elegant but allows a "simple" solution for
313 313 # issue4594
314 314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 315 if output:
316 316 bundler = bundle2.bundle20(self._repo.ui)
317 317 for out in output:
318 318 bundler.addpart(out)
319 319 stream = util.chunkbuffer(bundler.getchunks())
320 320 b = bundle2.getunbundler(self.ui, stream)
321 321 bundle2.processbundle(self._repo, b)
322 322 raise
323 323 except error.PushRaced as exc:
324 324 raise error.ResponseError(_('push failed:'),
325 325 stringutil.forcebytestr(exc))
326 326
327 327 # End of _basewirecommands interface.
328 328
329 329 # Begin of peer interface.
330 330
331 331 def commandexecutor(self):
332 332 return localcommandexecutor(self)
333 333
334 334 # End of peer interface.
335 335
336 336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 337 class locallegacypeer(localpeer):
338 338 '''peer extension which implements legacy methods too; used for tests with
339 339 restricted capabilities'''
340 340
341 341 def __init__(self, repo):
342 342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343 343
344 344 # Begin of baselegacywirecommands interface.
345 345
346 346 def between(self, pairs):
347 347 return self._repo.between(pairs)
348 348
349 349 def branches(self, nodes):
350 350 return self._repo.branches(nodes)
351 351
352 352 def changegroup(self, nodes, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 354 missingheads=self._repo.heads())
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 def changegroupsubset(self, bases, heads, source):
358 358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 359 missingheads=heads)
360 360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361 361
362 362 # End of baselegacywirecommands interface.
363 363
364 364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 365 # clients.
366 366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367 367
368 368 # A repository with the sparserevlog feature will have delta chains that
369 369 # can spread over a larger span. Sparse reading cuts these large spans into
370 370 # pieces, so that each piece isn't too big.
371 371 # Without the sparserevlog capability, reading from the repository could use
372 372 # huge amounts of memory, because the whole span would be read at once,
373 373 # including all the intermediate revisions that aren't pertinent for the chain.
374 374 # This is why once a repository has enabled sparse-read, it becomes required.
375 375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376 376
377 377 # Functions receiving (ui, features) that extensions can register to impact
378 378 # the ability to load repositories with custom requirements. Only
379 379 # functions defined in loaded extensions are called.
380 380 #
381 381 # The function receives a set of requirement strings that the repository
382 382 # is capable of opening. Functions will typically add elements to the
383 383 # set to reflect that the extension knows how to handle that requirements.
384 384 featuresetupfuncs = set()
385 385
386 386 def makelocalrepository(baseui, path, intents=None):
387 387 """Create a local repository object.
388 388
389 389 Given arguments needed to construct a local repository, this function
390 390 performs various early repository loading functionality (such as
391 391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 392 the repository can be opened, derives a type suitable for representing
393 393 that repository, and returns an instance of it.
394 394
395 395 The returned object conforms to the ``repository.completelocalrepository``
396 396 interface.
397 397
398 398 The repository type is derived by calling a series of factory functions
399 399 for each aspect/interface of the final repository. These are defined by
400 400 ``REPO_INTERFACES``.
401 401
402 402 Each factory function is called to produce a type implementing a specific
403 403 interface. The cumulative list of returned types will be combined into a
404 404 new type and that type will be instantiated to represent the local
405 405 repository.
406 406
407 407 The factory functions each receive various state that may be consulted
408 408 as part of deriving a type.
409 409
410 410 Extensions should wrap these factory functions to customize repository type
411 411 creation. Note that an extension's wrapped function may be called even if
412 412 that extension is not loaded for the repo being constructed. Extensions
413 413 should check if their ``__name__`` appears in the
414 414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 415 not.
416 416 """
417 417 ui = baseui.copy()
418 418 # Prevent copying repo configuration.
419 419 ui.copy = baseui.copy
420 420
421 421 # Working directory VFS rooted at repository root.
422 422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423 423
424 424 # Main VFS for .hg/ directory.
425 425 hgpath = wdirvfs.join(b'.hg')
426 426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427 427
428 428 # The .hg/ path should exist and should be a directory. All other
429 429 # cases are errors.
430 430 if not hgvfs.isdir():
431 431 try:
432 432 hgvfs.stat()
433 433 except OSError as e:
434 434 if e.errno != errno.ENOENT:
435 435 raise
436 436
437 437 raise error.RepoError(_(b'repository %s not found') % path)
438 438
439 439 # .hg/requires file contains a newline-delimited list of
440 440 # features/capabilities the opener (us) must have in order to use
441 441 # the repository. This file was introduced in Mercurial 0.9.2,
442 442 # which means very old repositories may not have one. We assume
443 443 # a missing file translates to no requirements.
444 444 try:
445 445 requirements = set(hgvfs.read(b'requires').splitlines())
446 446 except IOError as e:
447 447 if e.errno != errno.ENOENT:
448 448 raise
449 449 requirements = set()
450 450
451 451 # The .hg/hgrc file may load extensions or contain config options
452 452 # that influence repository construction. Attempt to load it and
453 453 # process any new extensions that it may have pulled in.
454 454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 456 extensions.loadall(ui)
457 457 extensions.populateui(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511 wcachepath = hgvfs.join(b'wcache')
512 512
513 513
514 514 # The store has changed over time and the exact layout is dictated by
515 515 # requirements. The store interface abstracts differences across all
516 516 # of them.
517 517 store = makestore(requirements, storebasepath,
518 518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 519 hgvfs.createmode = store.createmode
520 520
521 521 storevfs = store.vfs
522 522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523 523
524 524 # The cache vfs is used to manage cache files.
525 525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 526 cachevfs.createmode = store.createmode
527 527 # The cache vfs is used to manage cache files related to the working copy
528 528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 529 wcachevfs.createmode = store.createmode
530 530
531 531 # Now resolve the type for the repository object. We do this by repeatedly
532 532 # calling a factory function to produces types for specific aspects of the
533 533 # repo's operation. The aggregate returned types are used as base classes
534 534 # for a dynamically-derived type, which will represent our new repository.
535 535
536 536 bases = []
537 537 extrastate = {}
538 538
539 539 for iface, fn in REPO_INTERFACES:
540 540 # We pass all potentially useful state to give extensions tons of
541 541 # flexibility.
542 542 typ = fn()(ui=ui,
543 543 intents=intents,
544 544 requirements=requirements,
545 545 features=features,
546 546 wdirvfs=wdirvfs,
547 547 hgvfs=hgvfs,
548 548 store=store,
549 549 storevfs=storevfs,
550 550 storeoptions=storevfs.options,
551 551 cachevfs=cachevfs,
552 552 wcachevfs=wcachevfs,
553 553 extensionmodulenames=extensionmodulenames,
554 554 extrastate=extrastate,
555 555 baseclasses=bases)
556 556
557 557 if not isinstance(typ, type):
558 558 raise error.ProgrammingError('unable to construct type for %s' %
559 559 iface)
560 560
561 561 bases.append(typ)
562 562
563 563 # type() allows you to use characters in type names that wouldn't be
564 564 # recognized as Python symbols in source code. We abuse that to add
565 565 # rich information about our constructed repo.
566 566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 567 wdirvfs.base,
568 568 b','.join(sorted(requirements))))
569 569
570 570 cls = type(name, tuple(bases), {})
571 571
572 572 return cls(
573 573 baseui=baseui,
574 574 ui=ui,
575 575 origroot=path,
576 576 wdirvfs=wdirvfs,
577 577 hgvfs=hgvfs,
578 578 requirements=requirements,
579 579 supportedrequirements=supportedrequirements,
580 580 sharedpath=storebasepath,
581 581 store=store,
582 582 cachevfs=cachevfs,
583 583 wcachevfs=wcachevfs,
584 584 features=features,
585 585 intents=intents)
586 586
587 587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 588 """Load hgrc files/content into a ui instance.
589 589
590 590 This is called during repository opening to load any additional
591 591 config files or settings relevant to the current repository.
592 592
593 593 Returns a bool indicating whether any additional configs were loaded.
594 594
595 595 Extensions should monkeypatch this function to modify how per-repo
596 596 configs are loaded. For example, an extension may wish to pull in
597 597 configs from alternate files or sources.
598 598 """
599 599 try:
600 600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 601 return True
602 602 except IOError:
603 603 return False
604 604
605 605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 606 """Perform additional actions after .hg/hgrc is loaded.
607 607
608 608 This function is called during repository loading immediately after
609 609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610 610
611 611 The function can be used to validate configs, automatically add
612 612 options (including extensions) based on requirements, etc.
613 613 """
614 614
615 615 # Map of requirements to list of extensions to load automatically when
616 616 # requirement is present.
617 617 autoextensions = {
618 618 b'largefiles': [b'largefiles'],
619 619 b'lfs': [b'lfs'],
620 620 }
621 621
622 622 for requirement, names in sorted(autoextensions.items()):
623 623 if requirement not in requirements:
624 624 continue
625 625
626 626 for name in names:
627 627 if not ui.hasconfig(b'extensions', name):
628 628 ui.setconfig(b'extensions', name, b'', source='autoload')
629 629
630 630 def gathersupportedrequirements(ui):
631 631 """Determine the complete set of recognized requirements."""
632 632 # Start with all requirements supported by this file.
633 633 supported = set(localrepository._basesupported)
634 634
635 635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 636 # relevant to this ui instance.
637 637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638 638
639 639 for fn in featuresetupfuncs:
640 640 if fn.__module__ in modules:
641 641 fn(ui, supported)
642 642
643 643 # Add derived requirements from registered compression engines.
644 644 for name in util.compengines:
645 645 engine = util.compengines[name]
646 646 if engine.revlogheader():
647 647 supported.add(b'exp-compression-%s' % name)
648 648
649 649 return supported
650 650
651 651 def ensurerequirementsrecognized(requirements, supported):
652 652 """Validate that a set of local requirements is recognized.
653 653
654 654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 655 exists any requirement in that set that currently loaded code doesn't
656 656 recognize.
657 657
658 658 Returns a set of supported requirements.
659 659 """
660 660 missing = set()
661 661
662 662 for requirement in requirements:
663 663 if requirement in supported:
664 664 continue
665 665
666 666 if not requirement or not requirement[0:1].isalnum():
667 667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668 668
669 669 missing.add(requirement)
670 670
671 671 if missing:
672 672 raise error.RequirementError(
673 673 _(b'repository requires features unknown to this Mercurial: %s') %
674 674 b' '.join(sorted(missing)),
675 675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 676 b'for more information'))
677 677
678 678 def ensurerequirementscompatible(ui, requirements):
679 679 """Validates that a set of recognized requirements is mutually compatible.
680 680
681 681 Some requirements may not be compatible with others or require
682 682 config options that aren't enabled. This function is called during
683 683 repository opening to ensure that the set of requirements needed
684 684 to open a repository is sane and compatible with config options.
685 685
686 686 Extensions can monkeypatch this function to perform additional
687 687 checking.
688 688
689 689 ``error.RepoError`` should be raised on failure.
690 690 """
691 691 if b'exp-sparse' in requirements and not sparse.enabled:
692 692 raise error.RepoError(_(b'repository is using sparse feature but '
693 693 b'sparse is not enabled; enable the '
694 694 b'"sparse" extensions to access'))
695 695
696 696 def makestore(requirements, path, vfstype):
697 697 """Construct a storage object for a repository."""
698 698 if b'store' in requirements:
699 699 if b'fncache' in requirements:
700 700 return storemod.fncachestore(path, vfstype,
701 701 b'dotencode' in requirements)
702 702
703 703 return storemod.encodedstore(path, vfstype)
704 704
705 705 return storemod.basicstore(path, vfstype)
706 706
707 707 def resolvestorevfsoptions(ui, requirements, features):
708 708 """Resolve the options to pass to the store vfs opener.
709 709
710 710 The returned dict is used to influence behavior of the storage layer.
711 711 """
712 712 options = {}
713 713
714 714 if b'treemanifest' in requirements:
715 715 options[b'treemanifest'] = True
716 716
717 717 # experimental config: format.manifestcachesize
718 718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 719 if manifestcachesize is not None:
720 720 options[b'manifestcachesize'] = manifestcachesize
721 721
722 722 # In the absence of another requirement superseding a revlog-related
723 723 # requirement, we have to assume the repo is using revlog version 0.
724 724 # This revlog format is super old and we don't bother trying to parse
725 725 # opener options for it because those options wouldn't do anything
726 726 # meaningful on such old repos.
727 727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729 729
730 730 return options
731 731
732 732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 733 """Resolve opener options specific to revlogs."""
734 734
735 735 options = {}
736 736 options[b'flagprocessors'] = {}
737 737
738 738 if b'revlogv1' in requirements:
739 739 options[b'revlogv1'] = True
740 740 if REVLOGV2_REQUIREMENT in requirements:
741 741 options[b'revlogv2'] = True
742 742
743 743 if b'generaldelta' in requirements:
744 744 options[b'generaldelta'] = True
745 745
746 746 # experimental config: format.chunkcachesize
747 747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 748 if chunkcachesize is not None:
749 749 options[b'chunkcachesize'] = chunkcachesize
750 750
751 751 deltabothparents = ui.configbool(b'storage',
752 752 b'revlog.optimize-delta-parent-choice')
753 753 options[b'deltabothparents'] = deltabothparents
754 754
755 755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
756 756
757 757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
758 758 if 0 <= chainspan:
759 759 options[b'maxdeltachainspan'] = chainspan
760 760
761 761 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
762 762 if mmapindexthreshold is not None:
763 763 options[b'mmapindexthreshold'] = mmapindexthreshold
764 764
765 765 withsparseread = ui.configbool(b'experimental', b'sparse-read')
766 766 srdensitythres = float(ui.config(b'experimental',
767 767 b'sparse-read.density-threshold'))
768 768 srmingapsize = ui.configbytes(b'experimental',
769 769 b'sparse-read.min-gap-size')
770 770 options[b'with-sparse-read'] = withsparseread
771 771 options[b'sparse-read-density-threshold'] = srdensitythres
772 772 options[b'sparse-read-min-gap-size'] = srmingapsize
773 773
774 774 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
775 775 options[b'sparse-revlog'] = sparserevlog
776 776 if sparserevlog:
777 777 options[b'generaldelta'] = True
778 778
779 779 maxchainlen = None
780 780 if sparserevlog:
781 781 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
782 782 # experimental config: format.maxchainlen
783 783 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
784 784 if maxchainlen is not None:
785 785 options[b'maxchainlen'] = maxchainlen
786 786
787 787 for r in requirements:
788 788 if r.startswith(b'exp-compression-'):
789 789 options[b'compengine'] = r[len(b'exp-compression-'):]
790 790
791 791 if repository.NARROW_REQUIREMENT in requirements:
792 792 options[b'enableellipsis'] = True
793 793
794 794 return options
795 795
796 796 def makemain(**kwargs):
797 797 """Produce a type conforming to ``ilocalrepositorymain``."""
798 798 return localrepository
799 799
800 800 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
801 801 class revlogfilestorage(object):
802 802 """File storage when using revlogs."""
803 803
804 804 def file(self, path):
805 805 if path[0] == b'/':
806 806 path = path[1:]
807 807
808 808 return filelog.filelog(self.svfs, path)
809 809
810 810 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
811 811 class revlognarrowfilestorage(object):
812 812 """File storage when using revlogs and narrow files."""
813 813
814 814 def file(self, path):
815 815 if path[0] == b'/':
816 816 path = path[1:]
817 817
818 818 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
819 819
820 820 def makefilestorage(requirements, features, **kwargs):
821 821 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
822 822 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
823 823 features.add(repository.REPO_FEATURE_STREAM_CLONE)
824 824
825 825 if repository.NARROW_REQUIREMENT in requirements:
826 826 return revlognarrowfilestorage
827 827 else:
828 828 return revlogfilestorage
829 829
830 830 # List of repository interfaces and factory functions for them. Each
831 831 # will be called in order during ``makelocalrepository()`` to iteratively
832 832 # derive the final type for a local repository instance. We capture the
833 833 # function as a lambda so we don't hold a reference and the module-level
834 834 # functions can be wrapped.
835 835 REPO_INTERFACES = [
836 836 (repository.ilocalrepositorymain, lambda: makemain),
837 837 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
838 838 ]
839 839
840 840 @interfaceutil.implementer(repository.ilocalrepositorymain)
841 841 class localrepository(object):
842 842 """Main class for representing local repositories.
843 843
844 844 All local repositories are instances of this class.
845 845
846 846 Constructed on its own, instances of this class are not usable as
847 847 repository objects. To obtain a usable repository object, call
848 848 ``hg.repository()``, ``localrepo.instance()``, or
849 849 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
850 850 ``instance()`` adds support for creating new repositories.
851 851 ``hg.repository()`` adds more extension integration, including calling
852 852 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
853 853 used.
854 854 """
855 855
856 856 # obsolete experimental requirements:
857 857 # - manifestv2: An experimental new manifest format that allowed
858 858 # for stem compression of long paths. Experiment ended up not
859 859 # being successful (repository sizes went up due to worse delta
860 860 # chains), and the code was deleted in 4.6.
861 861 supportedformats = {
862 862 'revlogv1',
863 863 'generaldelta',
864 864 'treemanifest',
865 865 REVLOGV2_REQUIREMENT,
866 866 SPARSEREVLOG_REQUIREMENT,
867 867 }
868 868 _basesupported = supportedformats | {
869 869 'store',
870 870 'fncache',
871 871 'shared',
872 872 'relshared',
873 873 'dotencode',
874 874 'exp-sparse',
875 875 'internal-phase'
876 876 }
877 877
878 878 # list of prefix for file which can be written without 'wlock'
879 879 # Extensions should extend this list when needed
880 880 _wlockfreeprefix = {
881 881 # We migh consider requiring 'wlock' for the next
882 882 # two, but pretty much all the existing code assume
883 883 # wlock is not needed so we keep them excluded for
884 884 # now.
885 885 'hgrc',
886 886 'requires',
887 887 # XXX cache is a complicatged business someone
888 888 # should investigate this in depth at some point
889 889 'cache/',
890 890 # XXX shouldn't be dirstate covered by the wlock?
891 891 'dirstate',
892 892 # XXX bisect was still a bit too messy at the time
893 893 # this changeset was introduced. Someone should fix
894 894 # the remainig bit and drop this line
895 895 'bisect.state',
896 896 }
897 897
898 898 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
899 899 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
900 900 features, intents=None):
901 901 """Create a new local repository instance.
902 902
903 903 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
904 904 or ``localrepo.makelocalrepository()`` for obtaining a new repository
905 905 object.
906 906
907 907 Arguments:
908 908
909 909 baseui
910 910 ``ui.ui`` instance that ``ui`` argument was based off of.
911 911
912 912 ui
913 913 ``ui.ui`` instance for use by the repository.
914 914
915 915 origroot
916 916 ``bytes`` path to working directory root of this repository.
917 917
918 918 wdirvfs
919 919 ``vfs.vfs`` rooted at the working directory.
920 920
921 921 hgvfs
922 922 ``vfs.vfs`` rooted at .hg/
923 923
924 924 requirements
925 925 ``set`` of bytestrings representing repository opening requirements.
926 926
927 927 supportedrequirements
928 928 ``set`` of bytestrings representing repository requirements that we
929 929 know how to open. May be a supetset of ``requirements``.
930 930
931 931 sharedpath
932 932 ``bytes`` Defining path to storage base directory. Points to a
933 933 ``.hg/`` directory somewhere.
934 934
935 935 store
936 936 ``store.basicstore`` (or derived) instance providing access to
937 937 versioned storage.
938 938
939 939 cachevfs
940 940 ``vfs.vfs`` used for cache files.
941 941
942 942 wcachevfs
943 943 ``vfs.vfs`` used for cache files related to the working copy.
944 944
945 945 features
946 946 ``set`` of bytestrings defining features/capabilities of this
947 947 instance.
948 948
949 949 intents
950 950 ``set`` of system strings indicating what this repo will be used
951 951 for.
952 952 """
953 953 self.baseui = baseui
954 954 self.ui = ui
955 955 self.origroot = origroot
956 956 # vfs rooted at working directory.
957 957 self.wvfs = wdirvfs
958 958 self.root = wdirvfs.base
959 959 # vfs rooted at .hg/. Used to access most non-store paths.
960 960 self.vfs = hgvfs
961 961 self.path = hgvfs.base
962 962 self.requirements = requirements
963 963 self.supported = supportedrequirements
964 964 self.sharedpath = sharedpath
965 965 self.store = store
966 966 self.cachevfs = cachevfs
967 967 self.wcachevfs = wcachevfs
968 968 self.features = features
969 969
970 970 self.filtername = None
971 971
972 972 if (self.ui.configbool('devel', 'all-warnings') or
973 973 self.ui.configbool('devel', 'check-locks')):
974 974 self.vfs.audit = self._getvfsward(self.vfs.audit)
975 975 # A list of callback to shape the phase if no data were found.
976 976 # Callback are in the form: func(repo, roots) --> processed root.
977 977 # This list it to be filled by extension during repo setup
978 978 self._phasedefaults = []
979 979
980 980 color.setup(self.ui)
981 981
982 982 self.spath = self.store.path
983 983 self.svfs = self.store.vfs
984 984 self.sjoin = self.store.join
985 985 if (self.ui.configbool('devel', 'all-warnings') or
986 986 self.ui.configbool('devel', 'check-locks')):
987 987 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
988 988 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
989 989 else: # standard vfs
990 990 self.svfs.audit = self._getsvfsward(self.svfs.audit)
991 991
992 992 self._dirstatevalidatewarned = False
993 993
994 994 self._branchcaches = {}
995 995 self._revbranchcache = None
996 996 self._filterpats = {}
997 997 self._datafilters = {}
998 998 self._transref = self._lockref = self._wlockref = None
999 999
1000 1000 # A cache for various files under .hg/ that tracks file changes,
1001 1001 # (used by the filecache decorator)
1002 1002 #
1003 1003 # Maps a property name to its util.filecacheentry
1004 1004 self._filecache = {}
1005 1005
1006 1006 # hold sets of revision to be filtered
1007 1007 # should be cleared when something might have changed the filter value:
1008 1008 # - new changesets,
1009 1009 # - phase change,
1010 1010 # - new obsolescence marker,
1011 1011 # - working directory parent change,
1012 1012 # - bookmark changes
1013 1013 self.filteredrevcache = {}
1014 1014
1015 1015 # post-dirstate-status hooks
1016 1016 self._postdsstatus = []
1017 1017
1018 1018 # generic mapping between names and nodes
1019 1019 self.names = namespaces.namespaces()
1020 1020
1021 1021 # Key to signature value.
1022 1022 self._sparsesignaturecache = {}
1023 1023 # Signature to cached matcher instance.
1024 1024 self._sparsematchercache = {}
1025 1025
1026 1026 def _getvfsward(self, origfunc):
1027 1027 """build a ward for self.vfs"""
1028 1028 rref = weakref.ref(self)
1029 1029 def checkvfs(path, mode=None):
1030 1030 ret = origfunc(path, mode=mode)
1031 1031 repo = rref()
1032 1032 if (repo is None
1033 1033 or not util.safehasattr(repo, '_wlockref')
1034 1034 or not util.safehasattr(repo, '_lockref')):
1035 1035 return
1036 1036 if mode in (None, 'r', 'rb'):
1037 1037 return
1038 1038 if path.startswith(repo.path):
1039 1039 # truncate name relative to the repository (.hg)
1040 1040 path = path[len(repo.path) + 1:]
1041 1041 if path.startswith('cache/'):
1042 1042 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1043 1043 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1044 1044 if path.startswith('journal.') or path.startswith('undo.'):
1045 1045 # journal is covered by 'lock'
1046 1046 if repo._currentlock(repo._lockref) is None:
1047 1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 1048 stacklevel=3, config='check-locks')
1049 1049 elif repo._currentlock(repo._wlockref) is None:
1050 1050 # rest of vfs files are covered by 'wlock'
1051 1051 #
1052 1052 # exclude special files
1053 1053 for prefix in self._wlockfreeprefix:
1054 1054 if path.startswith(prefix):
1055 1055 return
1056 1056 repo.ui.develwarn('write with no wlock: "%s"' % path,
1057 1057 stacklevel=3, config='check-locks')
1058 1058 return ret
1059 1059 return checkvfs
1060 1060
1061 1061 def _getsvfsward(self, origfunc):
1062 1062 """build a ward for self.svfs"""
1063 1063 rref = weakref.ref(self)
1064 1064 def checksvfs(path, mode=None):
1065 1065 ret = origfunc(path, mode=mode)
1066 1066 repo = rref()
1067 1067 if repo is None or not util.safehasattr(repo, '_lockref'):
1068 1068 return
1069 1069 if mode in (None, 'r', 'rb'):
1070 1070 return
1071 1071 if path.startswith(repo.sharedpath):
1072 1072 # truncate name relative to the repository (.hg)
1073 1073 path = path[len(repo.sharedpath) + 1:]
1074 1074 if repo._currentlock(repo._lockref) is None:
1075 1075 repo.ui.develwarn('write with no lock: "%s"' % path,
1076 1076 stacklevel=4)
1077 1077 return ret
1078 1078 return checksvfs
1079 1079
1080 1080 def close(self):
1081 1081 self._writecaches()
1082 1082
1083 1083 def _writecaches(self):
1084 1084 if self._revbranchcache:
1085 1085 self._revbranchcache.write()
1086 1086
1087 1087 def _restrictcapabilities(self, caps):
1088 1088 if self.ui.configbool('experimental', 'bundle2-advertise'):
1089 1089 caps = set(caps)
1090 1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1091 1091 role='client'))
1092 1092 caps.add('bundle2=' + urlreq.quote(capsblob))
1093 1093 return caps
1094 1094
1095 1095 def _writerequirements(self):
1096 1096 scmutil.writerequires(self.vfs, self.requirements)
1097 1097
1098 1098 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1099 1099 # self -> auditor -> self._checknested -> self
1100 1100
1101 1101 @property
1102 1102 def auditor(self):
1103 1103 # This is only used by context.workingctx.match in order to
1104 1104 # detect files in subrepos.
1105 1105 return pathutil.pathauditor(self.root, callback=self._checknested)
1106 1106
1107 1107 @property
1108 1108 def nofsauditor(self):
1109 1109 # This is only used by context.basectx.match in order to detect
1110 1110 # files in subrepos.
1111 1111 return pathutil.pathauditor(self.root, callback=self._checknested,
1112 1112 realfs=False, cached=True)
1113 1113
1114 1114 def _checknested(self, path):
1115 1115 """Determine if path is a legal nested repository."""
1116 1116 if not path.startswith(self.root):
1117 1117 return False
1118 1118 subpath = path[len(self.root) + 1:]
1119 1119 normsubpath = util.pconvert(subpath)
1120 1120
1121 1121 # XXX: Checking against the current working copy is wrong in
1122 1122 # the sense that it can reject things like
1123 1123 #
1124 1124 # $ hg cat -r 10 sub/x.txt
1125 1125 #
1126 1126 # if sub/ is no longer a subrepository in the working copy
1127 1127 # parent revision.
1128 1128 #
1129 1129 # However, it can of course also allow things that would have
1130 1130 # been rejected before, such as the above cat command if sub/
1131 1131 # is a subrepository now, but was a normal directory before.
1132 1132 # The old path auditor would have rejected by mistake since it
1133 1133 # panics when it sees sub/.hg/.
1134 1134 #
1135 1135 # All in all, checking against the working copy seems sensible
1136 1136 # since we want to prevent access to nested repositories on
1137 1137 # the filesystem *now*.
1138 1138 ctx = self[None]
1139 1139 parts = util.splitpath(subpath)
1140 1140 while parts:
1141 1141 prefix = '/'.join(parts)
1142 1142 if prefix in ctx.substate:
1143 1143 if prefix == normsubpath:
1144 1144 return True
1145 1145 else:
1146 1146 sub = ctx.sub(prefix)
1147 1147 return sub.checknested(subpath[len(prefix) + 1:])
1148 1148 else:
1149 1149 parts.pop()
1150 1150 return False
1151 1151
1152 1152 def peer(self):
1153 1153 return localpeer(self) # not cached to avoid reference cycle
1154 1154
1155 1155 def unfiltered(self):
1156 1156 """Return unfiltered version of the repository
1157 1157
1158 1158 Intended to be overwritten by filtered repo."""
1159 1159 return self
1160 1160
1161 1161 def filtered(self, name, visibilityexceptions=None):
1162 1162 """Return a filtered version of a repository"""
1163 1163 cls = repoview.newtype(self.unfiltered().__class__)
1164 1164 return cls(self, name, visibilityexceptions)
1165 1165
1166 1166 @repofilecache('bookmarks', 'bookmarks.current')
1167 1167 def _bookmarks(self):
1168 1168 return bookmarks.bmstore(self)
1169 1169
1170 1170 @property
1171 1171 def _activebookmark(self):
1172 1172 return self._bookmarks.active
1173 1173
1174 1174 # _phasesets depend on changelog. what we need is to call
1175 1175 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1176 1176 # can't be easily expressed in filecache mechanism.
1177 1177 @storecache('phaseroots', '00changelog.i')
1178 1178 def _phasecache(self):
1179 1179 return phases.phasecache(self, self._phasedefaults)
1180 1180
1181 1181 @storecache('obsstore')
1182 1182 def obsstore(self):
1183 1183 return obsolete.makestore(self.ui, self)
1184 1184
1185 1185 @storecache('00changelog.i')
1186 1186 def changelog(self):
1187 1187 return changelog.changelog(self.svfs,
1188 1188 trypending=txnutil.mayhavepending(self.root))
1189 1189
1190 1190 @storecache('00manifest.i')
1191 1191 def manifestlog(self):
1192 1192 rootstore = manifest.manifestrevlog(self.svfs)
1193 1193 return manifest.manifestlog(self.svfs, self, rootstore,
1194 1194 self.narrowmatch())
1195 1195
1196 1196 @repofilecache('dirstate')
1197 1197 def dirstate(self):
1198 1198 return self._makedirstate()
1199 1199
1200 1200 def _makedirstate(self):
1201 1201 """Extension point for wrapping the dirstate per-repo."""
1202 1202 sparsematchfn = lambda: sparse.matcher(self)
1203 1203
1204 1204 return dirstate.dirstate(self.vfs, self.ui, self.root,
1205 1205 self._dirstatevalidate, sparsematchfn)
1206 1206
1207 1207 def _dirstatevalidate(self, node):
1208 1208 try:
1209 1209 self.changelog.rev(node)
1210 1210 return node
1211 1211 except error.LookupError:
1212 1212 if not self._dirstatevalidatewarned:
1213 1213 self._dirstatevalidatewarned = True
1214 1214 self.ui.warn(_("warning: ignoring unknown"
1215 1215 " working parent %s!\n") % short(node))
1216 1216 return nullid
1217 1217
1218 1218 @storecache(narrowspec.FILENAME)
1219 1219 def narrowpats(self):
1220 1220 """matcher patterns for this repository's narrowspec
1221 1221
1222 1222 A tuple of (includes, excludes).
1223 1223 """
1224 1224 return narrowspec.load(self)
1225 1225
1226 1226 @storecache(narrowspec.FILENAME)
1227 1227 def _narrowmatch(self):
1228 1228 if repository.NARROW_REQUIREMENT not in self.requirements:
1229 1229 return matchmod.always(self.root, '')
1230 1230 narrowspec.checkworkingcopynarrowspec(self)
1231 1231 include, exclude = self.narrowpats
1232 1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1233 1233
1234 1234 def narrowmatch(self, match=None, includeexact=False):
1235 1235 """matcher corresponding the the repo's narrowspec
1236 1236
1237 1237 If `match` is given, then that will be intersected with the narrow
1238 1238 matcher.
1239 1239
1240 1240 If `includeexact` is True, then any exact matches from `match` will
1241 1241 be included even if they're outside the narrowspec.
1242 1242 """
1243 1243 if match:
1244 1244 if includeexact and not self._narrowmatch.always():
1245 1245 # do not exclude explicitly-specified paths so that they can
1246 1246 # be warned later on
1247 1247 em = matchmod.exact(match._root, match._cwd, match.files())
1248 1248 nm = matchmod.unionmatcher([self._narrowmatch, em])
1249 1249 return matchmod.intersectmatchers(match, nm)
1250 1250 return matchmod.intersectmatchers(match, self._narrowmatch)
1251 1251 return self._narrowmatch
1252 1252
1253 1253 def setnarrowpats(self, newincludes, newexcludes):
1254 1254 narrowspec.save(self, newincludes, newexcludes)
1255 1255 narrowspec.copytoworkingcopy(self, self.currenttransaction())
1256 1256 self.invalidate(clearfilecache=True)
1257 1257 # So the next access won't be considered a conflict
1258 1258 # TODO: It seems like there should be a way of doing this that
1259 1259 # doesn't involve replacing these attributes.
1260 1260 self.narrowpats = newincludes, newexcludes
1261 1261 self._narrowmatch = narrowspec.match(self.root, include=newincludes,
1262 1262 exclude=newexcludes)
1263 1263
1264 1264 def __getitem__(self, changeid):
1265 1265 if changeid is None:
1266 1266 return context.workingctx(self)
1267 1267 if isinstance(changeid, context.basectx):
1268 1268 return changeid
1269 1269 if isinstance(changeid, slice):
1270 1270 # wdirrev isn't contiguous so the slice shouldn't include it
1271 1271 return [self[i]
1272 1272 for i in pycompat.xrange(*changeid.indices(len(self)))
1273 1273 if i not in self.changelog.filteredrevs]
1274 1274 try:
1275 1275 if isinstance(changeid, int):
1276 1276 node = self.changelog.node(changeid)
1277 1277 rev = changeid
1278 1278 elif changeid == 'null':
1279 1279 node = nullid
1280 1280 rev = nullrev
1281 1281 elif changeid == 'tip':
1282 1282 node = self.changelog.tip()
1283 1283 rev = self.changelog.rev(node)
1284 1284 elif changeid == '.':
1285 1285 # this is a hack to delay/avoid loading obsmarkers
1286 1286 # when we know that '.' won't be hidden
1287 1287 node = self.dirstate.p1()
1288 1288 rev = self.unfiltered().changelog.rev(node)
1289 1289 elif len(changeid) == 20:
1290 1290 try:
1291 1291 node = changeid
1292 1292 rev = self.changelog.rev(changeid)
1293 1293 except error.FilteredLookupError:
1294 1294 changeid = hex(changeid) # for the error message
1295 1295 raise
1296 1296 except LookupError:
1297 1297 # check if it might have come from damaged dirstate
1298 1298 #
1299 1299 # XXX we could avoid the unfiltered if we had a recognizable
1300 1300 # exception for filtered changeset access
1301 1301 if (self.local()
1302 1302 and changeid in self.unfiltered().dirstate.parents()):
1303 1303 msg = _("working directory has unknown parent '%s'!")
1304 1304 raise error.Abort(msg % short(changeid))
1305 1305 changeid = hex(changeid) # for the error message
1306 1306 raise
1307 1307
1308 1308 elif len(changeid) == 40:
1309 1309 node = bin(changeid)
1310 1310 rev = self.changelog.rev(node)
1311 1311 else:
1312 1312 raise error.ProgrammingError(
1313 1313 "unsupported changeid '%s' of type %s" %
1314 1314 (changeid, type(changeid)))
1315 1315
1316 1316 return context.changectx(self, rev, node)
1317 1317
1318 1318 except (error.FilteredIndexError, error.FilteredLookupError):
1319 1319 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1320 1320 % pycompat.bytestr(changeid))
1321 1321 except (IndexError, LookupError):
1322 1322 raise error.RepoLookupError(
1323 1323 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1324 1324 except error.WdirUnsupported:
1325 1325 return context.workingctx(self)
1326 1326
1327 1327 def __contains__(self, changeid):
1328 1328 """True if the given changeid exists
1329 1329
1330 1330 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1331 1331 specified.
1332 1332 """
1333 1333 try:
1334 1334 self[changeid]
1335 1335 return True
1336 1336 except error.RepoLookupError:
1337 1337 return False
1338 1338
1339 1339 def __nonzero__(self):
1340 1340 return True
1341 1341
1342 1342 __bool__ = __nonzero__
1343 1343
1344 1344 def __len__(self):
1345 1345 # no need to pay the cost of repoview.changelog
1346 1346 unfi = self.unfiltered()
1347 1347 return len(unfi.changelog)
1348 1348
1349 1349 def __iter__(self):
1350 1350 return iter(self.changelog)
1351 1351
1352 1352 def revs(self, expr, *args):
1353 1353 '''Find revisions matching a revset.
1354 1354
1355 1355 The revset is specified as a string ``expr`` that may contain
1356 1356 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1357 1357
1358 1358 Revset aliases from the configuration are not expanded. To expand
1359 1359 user aliases, consider calling ``scmutil.revrange()`` or
1360 1360 ``repo.anyrevs([expr], user=True)``.
1361 1361
1362 1362 Returns a revset.abstractsmartset, which is a list-like interface
1363 1363 that contains integer revisions.
1364 1364 '''
1365 1365 tree = revsetlang.spectree(expr, *args)
1366 1366 return revset.makematcher(tree)(self)
1367 1367
1368 1368 def set(self, expr, *args):
1369 1369 '''Find revisions matching a revset and emit changectx instances.
1370 1370
1371 1371 This is a convenience wrapper around ``revs()`` that iterates the
1372 1372 result and is a generator of changectx instances.
1373 1373
1374 1374 Revset aliases from the configuration are not expanded. To expand
1375 1375 user aliases, consider calling ``scmutil.revrange()``.
1376 1376 '''
1377 1377 for r in self.revs(expr, *args):
1378 1378 yield self[r]
1379 1379
1380 1380 def anyrevs(self, specs, user=False, localalias=None):
1381 1381 '''Find revisions matching one of the given revsets.
1382 1382
1383 1383 Revset aliases from the configuration are not expanded by default. To
1384 1384 expand user aliases, specify ``user=True``. To provide some local
1385 1385 definitions overriding user aliases, set ``localalias`` to
1386 1386 ``{name: definitionstring}``.
1387 1387 '''
1388 1388 if user:
1389 1389 m = revset.matchany(self.ui, specs,
1390 1390 lookup=revset.lookupfn(self),
1391 1391 localalias=localalias)
1392 1392 else:
1393 1393 m = revset.matchany(None, specs, localalias=localalias)
1394 1394 return m(self)
1395 1395
1396 1396 def url(self):
1397 1397 return 'file:' + self.root
1398 1398
1399 1399 def hook(self, name, throw=False, **args):
1400 1400 """Call a hook, passing this repo instance.
1401 1401
1402 1402 This a convenience method to aid invoking hooks. Extensions likely
1403 1403 won't call this unless they have registered a custom hook or are
1404 1404 replacing code that is expected to call a hook.
1405 1405 """
1406 1406 return hook.hook(self.ui, self, name, throw, **args)
1407 1407
1408 1408 @filteredpropertycache
1409 1409 def _tagscache(self):
1410 1410 '''Returns a tagscache object that contains various tags related
1411 1411 caches.'''
1412 1412
1413 1413 # This simplifies its cache management by having one decorated
1414 1414 # function (this one) and the rest simply fetch things from it.
1415 1415 class tagscache(object):
1416 1416 def __init__(self):
1417 1417 # These two define the set of tags for this repository. tags
1418 1418 # maps tag name to node; tagtypes maps tag name to 'global' or
1419 1419 # 'local'. (Global tags are defined by .hgtags across all
1420 1420 # heads, and local tags are defined in .hg/localtags.)
1421 1421 # They constitute the in-memory cache of tags.
1422 1422 self.tags = self.tagtypes = None
1423 1423
1424 1424 self.nodetagscache = self.tagslist = None
1425 1425
1426 1426 cache = tagscache()
1427 1427 cache.tags, cache.tagtypes = self._findtags()
1428 1428
1429 1429 return cache
1430 1430
1431 1431 def tags(self):
1432 1432 '''return a mapping of tag to node'''
1433 1433 t = {}
1434 1434 if self.changelog.filteredrevs:
1435 1435 tags, tt = self._findtags()
1436 1436 else:
1437 1437 tags = self._tagscache.tags
1438 1438 rev = self.changelog.rev
1439 1439 for k, v in tags.iteritems():
1440 1440 try:
1441 1441 # ignore tags to unknown nodes
1442 1442 rev(v)
1443 1443 t[k] = v
1444 1444 except (error.LookupError, ValueError):
1445 1445 pass
1446 1446 return t
1447 1447
1448 1448 def _findtags(self):
1449 1449 '''Do the hard work of finding tags. Return a pair of dicts
1450 1450 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1451 1451 maps tag name to a string like \'global\' or \'local\'.
1452 1452 Subclasses or extensions are free to add their own tags, but
1453 1453 should be aware that the returned dicts will be retained for the
1454 1454 duration of the localrepo object.'''
1455 1455
1456 1456 # XXX what tagtype should subclasses/extensions use? Currently
1457 1457 # mq and bookmarks add tags, but do not set the tagtype at all.
1458 1458 # Should each extension invent its own tag type? Should there
1459 1459 # be one tagtype for all such "virtual" tags? Or is the status
1460 1460 # quo fine?
1461 1461
1462 1462
1463 1463 # map tag name to (node, hist)
1464 1464 alltags = tagsmod.findglobaltags(self.ui, self)
1465 1465 # map tag name to tag type
1466 1466 tagtypes = dict((tag, 'global') for tag in alltags)
1467 1467
1468 1468 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1469 1469
1470 1470 # Build the return dicts. Have to re-encode tag names because
1471 1471 # the tags module always uses UTF-8 (in order not to lose info
1472 1472 # writing to the cache), but the rest of Mercurial wants them in
1473 1473 # local encoding.
1474 1474 tags = {}
1475 1475 for (name, (node, hist)) in alltags.iteritems():
1476 1476 if node != nullid:
1477 1477 tags[encoding.tolocal(name)] = node
1478 1478 tags['tip'] = self.changelog.tip()
1479 1479 tagtypes = dict([(encoding.tolocal(name), value)
1480 1480 for (name, value) in tagtypes.iteritems()])
1481 1481 return (tags, tagtypes)
1482 1482
1483 1483 def tagtype(self, tagname):
1484 1484 '''
1485 1485 return the type of the given tag. result can be:
1486 1486
1487 1487 'local' : a local tag
1488 1488 'global' : a global tag
1489 1489 None : tag does not exist
1490 1490 '''
1491 1491
1492 1492 return self._tagscache.tagtypes.get(tagname)
1493 1493
1494 1494 def tagslist(self):
1495 1495 '''return a list of tags ordered by revision'''
1496 1496 if not self._tagscache.tagslist:
1497 1497 l = []
1498 1498 for t, n in self.tags().iteritems():
1499 1499 l.append((self.changelog.rev(n), t, n))
1500 1500 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1501 1501
1502 1502 return self._tagscache.tagslist
1503 1503
1504 1504 def nodetags(self, node):
1505 1505 '''return the tags associated with a node'''
1506 1506 if not self._tagscache.nodetagscache:
1507 1507 nodetagscache = {}
1508 1508 for t, n in self._tagscache.tags.iteritems():
1509 1509 nodetagscache.setdefault(n, []).append(t)
1510 1510 for tags in nodetagscache.itervalues():
1511 1511 tags.sort()
1512 1512 self._tagscache.nodetagscache = nodetagscache
1513 1513 return self._tagscache.nodetagscache.get(node, [])
1514 1514
1515 1515 def nodebookmarks(self, node):
1516 1516 """return the list of bookmarks pointing to the specified node"""
1517 1517 return self._bookmarks.names(node)
1518 1518
1519 1519 def branchmap(self):
1520 1520 '''returns a dictionary {branch: [branchheads]} with branchheads
1521 1521 ordered by increasing revision number'''
1522 1522 branchmap.updatecache(self)
1523 1523 return self._branchcaches[self.filtername]
1524 1524
1525 1525 @unfilteredmethod
1526 1526 def revbranchcache(self):
1527 1527 if not self._revbranchcache:
1528 1528 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1529 1529 return self._revbranchcache
1530 1530
1531 1531 def branchtip(self, branch, ignoremissing=False):
1532 1532 '''return the tip node for a given branch
1533 1533
1534 1534 If ignoremissing is True, then this method will not raise an error.
1535 1535 This is helpful for callers that only expect None for a missing branch
1536 1536 (e.g. namespace).
1537 1537
1538 1538 '''
1539 1539 try:
1540 1540 return self.branchmap().branchtip(branch)
1541 1541 except KeyError:
1542 1542 if not ignoremissing:
1543 1543 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1544 1544 else:
1545 1545 pass
1546 1546
1547 1547 def lookup(self, key):
1548 1548 return scmutil.revsymbol(self, key).node()
1549 1549
1550 1550 def lookupbranch(self, key):
1551 1551 if key in self.branchmap():
1552 1552 return key
1553 1553
1554 1554 return scmutil.revsymbol(self, key).branch()
1555 1555
1556 1556 def known(self, nodes):
1557 1557 cl = self.changelog
1558 1558 nm = cl.nodemap
1559 1559 filtered = cl.filteredrevs
1560 1560 result = []
1561 1561 for n in nodes:
1562 1562 r = nm.get(n)
1563 1563 resp = not (r is None or r in filtered)
1564 1564 result.append(resp)
1565 1565 return result
1566 1566
1567 1567 def local(self):
1568 1568 return self
1569 1569
1570 1570 def publishing(self):
1571 1571 # it's safe (and desirable) to trust the publish flag unconditionally
1572 1572 # so that we don't finalize changes shared between users via ssh or nfs
1573 1573 return self.ui.configbool('phases', 'publish', untrusted=True)
1574 1574
1575 1575 def cancopy(self):
1576 1576 # so statichttprepo's override of local() works
1577 1577 if not self.local():
1578 1578 return False
1579 1579 if not self.publishing():
1580 1580 return True
1581 1581 # if publishing we can't copy if there is filtered content
1582 1582 return not self.filtered('visible').changelog.filteredrevs
1583 1583
1584 1584 def shared(self):
1585 1585 '''the type of shared repository (None if not shared)'''
1586 1586 if self.sharedpath != self.path:
1587 1587 return 'store'
1588 1588 return None
1589 1589
1590 1590 def wjoin(self, f, *insidef):
1591 1591 return self.vfs.reljoin(self.root, f, *insidef)
1592 1592
1593 1593 def setparents(self, p1, p2=nullid):
1594 1594 with self.dirstate.parentchange():
1595 1595 copies = self.dirstate.setparents(p1, p2)
1596 1596 pctx = self[p1]
1597 1597 if copies:
1598 1598 # Adjust copy records, the dirstate cannot do it, it
1599 1599 # requires access to parents manifests. Preserve them
1600 1600 # only for entries added to first parent.
1601 1601 for f in copies:
1602 1602 if f not in pctx and copies[f] in pctx:
1603 1603 self.dirstate.copy(copies[f], f)
1604 1604 if p2 == nullid:
1605 1605 for f, s in sorted(self.dirstate.copies().items()):
1606 1606 if f not in pctx and s not in pctx:
1607 1607 self.dirstate.copy(None, f)
1608 1608
1609 1609 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1610 1610 """changeid must be a changeset revision, if specified.
1611 1611 fileid can be a file revision or node."""
1612 1612 return context.filectx(self, path, changeid, fileid,
1613 1613 changectx=changectx)
1614 1614
1615 1615 def getcwd(self):
1616 1616 return self.dirstate.getcwd()
1617 1617
1618 1618 def pathto(self, f, cwd=None):
1619 1619 return self.dirstate.pathto(f, cwd)
1620 1620
1621 1621 def _loadfilter(self, filter):
1622 1622 if filter not in self._filterpats:
1623 1623 l = []
1624 1624 for pat, cmd in self.ui.configitems(filter):
1625 1625 if cmd == '!':
1626 1626 continue
1627 1627 mf = matchmod.match(self.root, '', [pat])
1628 1628 fn = None
1629 1629 params = cmd
1630 1630 for name, filterfn in self._datafilters.iteritems():
1631 1631 if cmd.startswith(name):
1632 1632 fn = filterfn
1633 1633 params = cmd[len(name):].lstrip()
1634 1634 break
1635 1635 if not fn:
1636 1636 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1637 1637 # Wrap old filters not supporting keyword arguments
1638 1638 if not pycompat.getargspec(fn)[2]:
1639 1639 oldfn = fn
1640 1640 fn = lambda s, c, **kwargs: oldfn(s, c)
1641 1641 l.append((mf, fn, params))
1642 1642 self._filterpats[filter] = l
1643 1643 return self._filterpats[filter]
1644 1644
1645 1645 def _filter(self, filterpats, filename, data):
1646 1646 for mf, fn, cmd in filterpats:
1647 1647 if mf(filename):
1648 1648 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1649 1649 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1650 1650 break
1651 1651
1652 1652 return data
1653 1653
1654 1654 @unfilteredpropertycache
1655 1655 def _encodefilterpats(self):
1656 1656 return self._loadfilter('encode')
1657 1657
1658 1658 @unfilteredpropertycache
1659 1659 def _decodefilterpats(self):
1660 1660 return self._loadfilter('decode')
1661 1661
1662 1662 def adddatafilter(self, name, filter):
1663 1663 self._datafilters[name] = filter
1664 1664
1665 1665 def wread(self, filename):
1666 1666 if self.wvfs.islink(filename):
1667 1667 data = self.wvfs.readlink(filename)
1668 1668 else:
1669 1669 data = self.wvfs.read(filename)
1670 1670 return self._filter(self._encodefilterpats, filename, data)
1671 1671
1672 1672 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1673 1673 """write ``data`` into ``filename`` in the working directory
1674 1674
1675 1675 This returns length of written (maybe decoded) data.
1676 1676 """
1677 1677 data = self._filter(self._decodefilterpats, filename, data)
1678 1678 if 'l' in flags:
1679 1679 self.wvfs.symlink(data, filename)
1680 1680 else:
1681 1681 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1682 1682 **kwargs)
1683 1683 if 'x' in flags:
1684 1684 self.wvfs.setflags(filename, False, True)
1685 1685 else:
1686 1686 self.wvfs.setflags(filename, False, False)
1687 1687 return len(data)
1688 1688
1689 1689 def wwritedata(self, filename, data):
1690 1690 return self._filter(self._decodefilterpats, filename, data)
1691 1691
1692 1692 def currenttransaction(self):
1693 1693 """return the current transaction or None if non exists"""
1694 1694 if self._transref:
1695 1695 tr = self._transref()
1696 1696 else:
1697 1697 tr = None
1698 1698
1699 1699 if tr and tr.running():
1700 1700 return tr
1701 1701 return None
1702 1702
1703 1703 def transaction(self, desc, report=None):
1704 1704 if (self.ui.configbool('devel', 'all-warnings')
1705 1705 or self.ui.configbool('devel', 'check-locks')):
1706 1706 if self._currentlock(self._lockref) is None:
1707 1707 raise error.ProgrammingError('transaction requires locking')
1708 1708 tr = self.currenttransaction()
1709 1709 if tr is not None:
1710 1710 return tr.nest(name=desc)
1711 1711
1712 1712 # abort here if the journal already exists
1713 1713 if self.svfs.exists("journal"):
1714 1714 raise error.RepoError(
1715 1715 _("abandoned transaction found"),
1716 1716 hint=_("run 'hg recover' to clean up transaction"))
1717 1717
1718 1718 idbase = "%.40f#%f" % (random.random(), time.time())
1719 1719 ha = hex(hashlib.sha1(idbase).digest())
1720 1720 txnid = 'TXN:' + ha
1721 1721 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1722 1722
1723 1723 self._writejournal(desc)
1724 1724 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1725 1725 if report:
1726 1726 rp = report
1727 1727 else:
1728 1728 rp = self.ui.warn
1729 1729 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1730 1730 # we must avoid cyclic reference between repo and transaction.
1731 1731 reporef = weakref.ref(self)
1732 1732 # Code to track tag movement
1733 1733 #
1734 1734 # Since tags are all handled as file content, it is actually quite hard
1735 1735 # to track these movement from a code perspective. So we fallback to a
1736 1736 # tracking at the repository level. One could envision to track changes
1737 1737 # to the '.hgtags' file through changegroup apply but that fails to
1738 1738 # cope with case where transaction expose new heads without changegroup
1739 1739 # being involved (eg: phase movement).
1740 1740 #
1741 1741 # For now, We gate the feature behind a flag since this likely comes
1742 1742 # with performance impacts. The current code run more often than needed
1743 1743 # and do not use caches as much as it could. The current focus is on
1744 1744 # the behavior of the feature so we disable it by default. The flag
1745 1745 # will be removed when we are happy with the performance impact.
1746 1746 #
1747 1747 # Once this feature is no longer experimental move the following
1748 1748 # documentation to the appropriate help section:
1749 1749 #
1750 1750 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1751 1751 # tags (new or changed or deleted tags). In addition the details of
1752 1752 # these changes are made available in a file at:
1753 1753 # ``REPOROOT/.hg/changes/tags.changes``.
1754 1754 # Make sure you check for HG_TAG_MOVED before reading that file as it
1755 1755 # might exist from a previous transaction even if no tag were touched
1756 1756 # in this one. Changes are recorded in a line base format::
1757 1757 #
1758 1758 # <action> <hex-node> <tag-name>\n
1759 1759 #
1760 1760 # Actions are defined as follow:
1761 1761 # "-R": tag is removed,
1762 1762 # "+A": tag is added,
1763 1763 # "-M": tag is moved (old value),
1764 1764 # "+M": tag is moved (new value),
1765 1765 tracktags = lambda x: None
1766 1766 # experimental config: experimental.hook-track-tags
1767 1767 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1768 1768 if desc != 'strip' and shouldtracktags:
1769 1769 oldheads = self.changelog.headrevs()
1770 1770 def tracktags(tr2):
1771 1771 repo = reporef()
1772 1772 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1773 1773 newheads = repo.changelog.headrevs()
1774 1774 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1775 1775 # notes: we compare lists here.
1776 1776 # As we do it only once buiding set would not be cheaper
1777 1777 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1778 1778 if changes:
1779 1779 tr2.hookargs['tag_moved'] = '1'
1780 1780 with repo.vfs('changes/tags.changes', 'w',
1781 1781 atomictemp=True) as changesfile:
1782 1782 # note: we do not register the file to the transaction
1783 1783 # because we needs it to still exist on the transaction
1784 1784 # is close (for txnclose hooks)
1785 1785 tagsmod.writediff(changesfile, changes)
1786 1786 def validate(tr2):
1787 1787 """will run pre-closing hooks"""
1788 1788 # XXX the transaction API is a bit lacking here so we take a hacky
1789 1789 # path for now
1790 1790 #
1791 1791 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1792 1792 # dict is copied before these run. In addition we needs the data
1793 1793 # available to in memory hooks too.
1794 1794 #
1795 1795 # Moreover, we also need to make sure this runs before txnclose
1796 1796 # hooks and there is no "pending" mechanism that would execute
1797 1797 # logic only if hooks are about to run.
1798 1798 #
1799 1799 # Fixing this limitation of the transaction is also needed to track
1800 1800 # other families of changes (bookmarks, phases, obsolescence).
1801 1801 #
1802 1802 # This will have to be fixed before we remove the experimental
1803 1803 # gating.
1804 1804 tracktags(tr2)
1805 1805 repo = reporef()
1806 1806 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1807 1807 scmutil.enforcesinglehead(repo, tr2, desc)
1808 1808 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1809 1809 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1810 1810 args = tr.hookargs.copy()
1811 1811 args.update(bookmarks.preparehookargs(name, old, new))
1812 1812 repo.hook('pretxnclose-bookmark', throw=True,
1813 1813 txnname=desc,
1814 1814 **pycompat.strkwargs(args))
1815 1815 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1816 1816 cl = repo.unfiltered().changelog
1817 1817 for rev, (old, new) in tr.changes['phases'].items():
1818 1818 args = tr.hookargs.copy()
1819 1819 node = hex(cl.node(rev))
1820 1820 args.update(phases.preparehookargs(node, old, new))
1821 1821 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1822 1822 **pycompat.strkwargs(args))
1823 1823
1824 1824 repo.hook('pretxnclose', throw=True,
1825 1825 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1826 1826 def releasefn(tr, success):
1827 1827 repo = reporef()
1828 1828 if success:
1829 1829 # this should be explicitly invoked here, because
1830 1830 # in-memory changes aren't written out at closing
1831 1831 # transaction, if tr.addfilegenerator (via
1832 1832 # dirstate.write or so) isn't invoked while
1833 1833 # transaction running
1834 1834 repo.dirstate.write(None)
1835 1835 else:
1836 1836 # discard all changes (including ones already written
1837 1837 # out) in this transaction
1838 1838 narrowspec.restorebackup(self, 'journal.narrowspec')
1839 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1839 1840 repo.dirstate.restorebackup(None, 'journal.dirstate')
1840 1841
1841 1842 repo.invalidate(clearfilecache=True)
1842 1843
1843 1844 tr = transaction.transaction(rp, self.svfs, vfsmap,
1844 1845 "journal",
1845 1846 "undo",
1846 1847 aftertrans(renames),
1847 1848 self.store.createmode,
1848 1849 validator=validate,
1849 1850 releasefn=releasefn,
1850 1851 checkambigfiles=_cachedfiles,
1851 1852 name=desc)
1852 1853 tr.changes['origrepolen'] = len(self)
1853 1854 tr.changes['obsmarkers'] = set()
1854 1855 tr.changes['phases'] = {}
1855 1856 tr.changes['bookmarks'] = {}
1856 1857
1857 1858 tr.hookargs['txnid'] = txnid
1858 1859 # note: writing the fncache only during finalize mean that the file is
1859 1860 # outdated when running hooks. As fncache is used for streaming clone,
1860 1861 # this is not expected to break anything that happen during the hooks.
1861 1862 tr.addfinalize('flush-fncache', self.store.write)
1862 1863 def txnclosehook(tr2):
1863 1864 """To be run if transaction is successful, will schedule a hook run
1864 1865 """
1865 1866 # Don't reference tr2 in hook() so we don't hold a reference.
1866 1867 # This reduces memory consumption when there are multiple
1867 1868 # transactions per lock. This can likely go away if issue5045
1868 1869 # fixes the function accumulation.
1869 1870 hookargs = tr2.hookargs
1870 1871
1871 1872 def hookfunc():
1872 1873 repo = reporef()
1873 1874 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1874 1875 bmchanges = sorted(tr.changes['bookmarks'].items())
1875 1876 for name, (old, new) in bmchanges:
1876 1877 args = tr.hookargs.copy()
1877 1878 args.update(bookmarks.preparehookargs(name, old, new))
1878 1879 repo.hook('txnclose-bookmark', throw=False,
1879 1880 txnname=desc, **pycompat.strkwargs(args))
1880 1881
1881 1882 if hook.hashook(repo.ui, 'txnclose-phase'):
1882 1883 cl = repo.unfiltered().changelog
1883 1884 phasemv = sorted(tr.changes['phases'].items())
1884 1885 for rev, (old, new) in phasemv:
1885 1886 args = tr.hookargs.copy()
1886 1887 node = hex(cl.node(rev))
1887 1888 args.update(phases.preparehookargs(node, old, new))
1888 1889 repo.hook('txnclose-phase', throw=False, txnname=desc,
1889 1890 **pycompat.strkwargs(args))
1890 1891
1891 1892 repo.hook('txnclose', throw=False, txnname=desc,
1892 1893 **pycompat.strkwargs(hookargs))
1893 1894 reporef()._afterlock(hookfunc)
1894 1895 tr.addfinalize('txnclose-hook', txnclosehook)
1895 1896 # Include a leading "-" to make it happen before the transaction summary
1896 1897 # reports registered via scmutil.registersummarycallback() whose names
1897 1898 # are 00-txnreport etc. That way, the caches will be warm when the
1898 1899 # callbacks run.
1899 1900 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1900 1901 def txnaborthook(tr2):
1901 1902 """To be run if transaction is aborted
1902 1903 """
1903 1904 reporef().hook('txnabort', throw=False, txnname=desc,
1904 1905 **pycompat.strkwargs(tr2.hookargs))
1905 1906 tr.addabort('txnabort-hook', txnaborthook)
1906 1907 # avoid eager cache invalidation. in-memory data should be identical
1907 1908 # to stored data if transaction has no error.
1908 1909 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1909 1910 self._transref = weakref.ref(tr)
1910 1911 scmutil.registersummarycallback(self, tr, desc)
1911 1912 return tr
1912 1913
1913 1914 def _journalfiles(self):
1914 1915 return ((self.svfs, 'journal'),
1915 1916 (self.svfs, 'journal.narrowspec'),
1917 (self.vfs, 'journal.narrowspec.dirstate'),
1916 1918 (self.vfs, 'journal.dirstate'),
1917 1919 (self.vfs, 'journal.branch'),
1918 1920 (self.vfs, 'journal.desc'),
1919 1921 (self.vfs, 'journal.bookmarks'),
1920 1922 (self.svfs, 'journal.phaseroots'))
1921 1923
1922 1924 def undofiles(self):
1923 1925 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1924 1926
1925 1927 @unfilteredmethod
1926 1928 def _writejournal(self, desc):
1927 1929 self.dirstate.savebackup(None, 'journal.dirstate')
1930 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1928 1931 narrowspec.savebackup(self, 'journal.narrowspec')
1929 1932 self.vfs.write("journal.branch",
1930 1933 encoding.fromlocal(self.dirstate.branch()))
1931 1934 self.vfs.write("journal.desc",
1932 1935 "%d\n%s\n" % (len(self), desc))
1933 1936 self.vfs.write("journal.bookmarks",
1934 1937 self.vfs.tryread("bookmarks"))
1935 1938 self.svfs.write("journal.phaseroots",
1936 1939 self.svfs.tryread("phaseroots"))
1937 1940
1938 1941 def recover(self):
1939 1942 with self.lock():
1940 1943 if self.svfs.exists("journal"):
1941 1944 self.ui.status(_("rolling back interrupted transaction\n"))
1942 1945 vfsmap = {'': self.svfs,
1943 1946 'plain': self.vfs,}
1944 1947 transaction.rollback(self.svfs, vfsmap, "journal",
1945 1948 self.ui.warn,
1946 1949 checkambigfiles=_cachedfiles)
1947 1950 self.invalidate()
1948 1951 return True
1949 1952 else:
1950 1953 self.ui.warn(_("no interrupted transaction available\n"))
1951 1954 return False
1952 1955
1953 1956 def rollback(self, dryrun=False, force=False):
1954 1957 wlock = lock = dsguard = None
1955 1958 try:
1956 1959 wlock = self.wlock()
1957 1960 lock = self.lock()
1958 1961 if self.svfs.exists("undo"):
1959 1962 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1960 1963
1961 1964 return self._rollback(dryrun, force, dsguard)
1962 1965 else:
1963 1966 self.ui.warn(_("no rollback information available\n"))
1964 1967 return 1
1965 1968 finally:
1966 1969 release(dsguard, lock, wlock)
1967 1970
1968 1971 @unfilteredmethod # Until we get smarter cache management
1969 1972 def _rollback(self, dryrun, force, dsguard):
1970 1973 ui = self.ui
1971 1974 try:
1972 1975 args = self.vfs.read('undo.desc').splitlines()
1973 1976 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1974 1977 if len(args) >= 3:
1975 1978 detail = args[2]
1976 1979 oldtip = oldlen - 1
1977 1980
1978 1981 if detail and ui.verbose:
1979 1982 msg = (_('repository tip rolled back to revision %d'
1980 1983 ' (undo %s: %s)\n')
1981 1984 % (oldtip, desc, detail))
1982 1985 else:
1983 1986 msg = (_('repository tip rolled back to revision %d'
1984 1987 ' (undo %s)\n')
1985 1988 % (oldtip, desc))
1986 1989 except IOError:
1987 1990 msg = _('rolling back unknown transaction\n')
1988 1991 desc = None
1989 1992
1990 1993 if not force and self['.'] != self['tip'] and desc == 'commit':
1991 1994 raise error.Abort(
1992 1995 _('rollback of last commit while not checked out '
1993 1996 'may lose data'), hint=_('use -f to force'))
1994 1997
1995 1998 ui.status(msg)
1996 1999 if dryrun:
1997 2000 return 0
1998 2001
1999 2002 parents = self.dirstate.parents()
2000 2003 self.destroying()
2001 2004 vfsmap = {'plain': self.vfs, '': self.svfs}
2002 2005 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2003 2006 checkambigfiles=_cachedfiles)
2004 2007 if self.vfs.exists('undo.bookmarks'):
2005 2008 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2006 2009 if self.svfs.exists('undo.phaseroots'):
2007 2010 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2008 2011 self.invalidate()
2009 2012
2010 2013 parentgone = (parents[0] not in self.changelog.nodemap or
2011 2014 parents[1] not in self.changelog.nodemap)
2012 2015 if parentgone:
2013 2016 # prevent dirstateguard from overwriting already restored one
2014 2017 dsguard.close()
2015 2018
2016 2019 narrowspec.restorebackup(self, 'undo.narrowspec')
2020 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2017 2021 self.dirstate.restorebackup(None, 'undo.dirstate')
2018 2022 try:
2019 2023 branch = self.vfs.read('undo.branch')
2020 2024 self.dirstate.setbranch(encoding.tolocal(branch))
2021 2025 except IOError:
2022 2026 ui.warn(_('named branch could not be reset: '
2023 2027 'current branch is still \'%s\'\n')
2024 2028 % self.dirstate.branch())
2025 2029
2026 2030 parents = tuple([p.rev() for p in self[None].parents()])
2027 2031 if len(parents) > 1:
2028 2032 ui.status(_('working directory now based on '
2029 2033 'revisions %d and %d\n') % parents)
2030 2034 else:
2031 2035 ui.status(_('working directory now based on '
2032 2036 'revision %d\n') % parents)
2033 2037 mergemod.mergestate.clean(self, self['.'].node())
2034 2038
2035 2039 # TODO: if we know which new heads may result from this rollback, pass
2036 2040 # them to destroy(), which will prevent the branchhead cache from being
2037 2041 # invalidated.
2038 2042 self.destroyed()
2039 2043 return 0
2040 2044
2041 2045 def _buildcacheupdater(self, newtransaction):
2042 2046 """called during transaction to build the callback updating cache
2043 2047
2044 2048 Lives on the repository to help extension who might want to augment
2045 2049 this logic. For this purpose, the created transaction is passed to the
2046 2050 method.
2047 2051 """
2048 2052 # we must avoid cyclic reference between repo and transaction.
2049 2053 reporef = weakref.ref(self)
2050 2054 def updater(tr):
2051 2055 repo = reporef()
2052 2056 repo.updatecaches(tr)
2053 2057 return updater
2054 2058
2055 2059 @unfilteredmethod
2056 2060 def updatecaches(self, tr=None, full=False):
2057 2061 """warm appropriate caches
2058 2062
2059 2063 If this function is called after a transaction closed. The transaction
2060 2064 will be available in the 'tr' argument. This can be used to selectively
2061 2065 update caches relevant to the changes in that transaction.
2062 2066
2063 2067 If 'full' is set, make sure all caches the function knows about have
2064 2068 up-to-date data. Even the ones usually loaded more lazily.
2065 2069 """
2066 2070 if tr is not None and tr.hookargs.get('source') == 'strip':
2067 2071 # During strip, many caches are invalid but
2068 2072 # later call to `destroyed` will refresh them.
2069 2073 return
2070 2074
2071 2075 if tr is None or tr.changes['origrepolen'] < len(self):
2072 2076 # updating the unfiltered branchmap should refresh all the others,
2073 2077 self.ui.debug('updating the branch cache\n')
2074 2078 branchmap.updatecache(self.filtered('served'))
2075 2079
2076 2080 if full:
2077 2081 rbc = self.revbranchcache()
2078 2082 for r in self.changelog:
2079 2083 rbc.branchinfo(r)
2080 2084 rbc.write()
2081 2085
2082 2086 # ensure the working copy parents are in the manifestfulltextcache
2083 2087 for ctx in self['.'].parents():
2084 2088 ctx.manifest() # accessing the manifest is enough
2085 2089
2086 2090 def invalidatecaches(self):
2087 2091
2088 2092 if r'_tagscache' in vars(self):
2089 2093 # can't use delattr on proxy
2090 2094 del self.__dict__[r'_tagscache']
2091 2095
2092 2096 self.unfiltered()._branchcaches.clear()
2093 2097 self.invalidatevolatilesets()
2094 2098 self._sparsesignaturecache.clear()
2095 2099
2096 2100 def invalidatevolatilesets(self):
2097 2101 self.filteredrevcache.clear()
2098 2102 obsolete.clearobscaches(self)
2099 2103
2100 2104 def invalidatedirstate(self):
2101 2105 '''Invalidates the dirstate, causing the next call to dirstate
2102 2106 to check if it was modified since the last time it was read,
2103 2107 rereading it if it has.
2104 2108
2105 2109 This is different to dirstate.invalidate() that it doesn't always
2106 2110 rereads the dirstate. Use dirstate.invalidate() if you want to
2107 2111 explicitly read the dirstate again (i.e. restoring it to a previous
2108 2112 known good state).'''
2109 2113 if hasunfilteredcache(self, r'dirstate'):
2110 2114 for k in self.dirstate._filecache:
2111 2115 try:
2112 2116 delattr(self.dirstate, k)
2113 2117 except AttributeError:
2114 2118 pass
2115 2119 delattr(self.unfiltered(), r'dirstate')
2116 2120
2117 2121 def invalidate(self, clearfilecache=False):
2118 2122 '''Invalidates both store and non-store parts other than dirstate
2119 2123
2120 2124 If a transaction is running, invalidation of store is omitted,
2121 2125 because discarding in-memory changes might cause inconsistency
2122 2126 (e.g. incomplete fncache causes unintentional failure, but
2123 2127 redundant one doesn't).
2124 2128 '''
2125 2129 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2126 2130 for k in list(self._filecache.keys()):
2127 2131 # dirstate is invalidated separately in invalidatedirstate()
2128 2132 if k == 'dirstate':
2129 2133 continue
2130 2134 if (k == 'changelog' and
2131 2135 self.currenttransaction() and
2132 2136 self.changelog._delayed):
2133 2137 # The changelog object may store unwritten revisions. We don't
2134 2138 # want to lose them.
2135 2139 # TODO: Solve the problem instead of working around it.
2136 2140 continue
2137 2141
2138 2142 if clearfilecache:
2139 2143 del self._filecache[k]
2140 2144 try:
2141 2145 delattr(unfiltered, k)
2142 2146 except AttributeError:
2143 2147 pass
2144 2148 self.invalidatecaches()
2145 2149 if not self.currenttransaction():
2146 2150 # TODO: Changing contents of store outside transaction
2147 2151 # causes inconsistency. We should make in-memory store
2148 2152 # changes detectable, and abort if changed.
2149 2153 self.store.invalidatecaches()
2150 2154
2151 2155 def invalidateall(self):
2152 2156 '''Fully invalidates both store and non-store parts, causing the
2153 2157 subsequent operation to reread any outside changes.'''
2154 2158 # extension should hook this to invalidate its caches
2155 2159 self.invalidate()
2156 2160 self.invalidatedirstate()
2157 2161
2158 2162 @unfilteredmethod
2159 2163 def _refreshfilecachestats(self, tr):
2160 2164 """Reload stats of cached files so that they are flagged as valid"""
2161 2165 for k, ce in self._filecache.items():
2162 2166 k = pycompat.sysstr(k)
2163 2167 if k == r'dirstate' or k not in self.__dict__:
2164 2168 continue
2165 2169 ce.refresh()
2166 2170
2167 2171 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2168 2172 inheritchecker=None, parentenvvar=None):
2169 2173 parentlock = None
2170 2174 # the contents of parentenvvar are used by the underlying lock to
2171 2175 # determine whether it can be inherited
2172 2176 if parentenvvar is not None:
2173 2177 parentlock = encoding.environ.get(parentenvvar)
2174 2178
2175 2179 timeout = 0
2176 2180 warntimeout = 0
2177 2181 if wait:
2178 2182 timeout = self.ui.configint("ui", "timeout")
2179 2183 warntimeout = self.ui.configint("ui", "timeout.warn")
2180 2184 # internal config: ui.signal-safe-lock
2181 2185 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2182 2186
2183 2187 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2184 2188 releasefn=releasefn,
2185 2189 acquirefn=acquirefn, desc=desc,
2186 2190 inheritchecker=inheritchecker,
2187 2191 parentlock=parentlock,
2188 2192 signalsafe=signalsafe)
2189 2193 return l
2190 2194
2191 2195 def _afterlock(self, callback):
2192 2196 """add a callback to be run when the repository is fully unlocked
2193 2197
2194 2198 The callback will be executed when the outermost lock is released
2195 2199 (with wlock being higher level than 'lock')."""
2196 2200 for ref in (self._wlockref, self._lockref):
2197 2201 l = ref and ref()
2198 2202 if l and l.held:
2199 2203 l.postrelease.append(callback)
2200 2204 break
2201 2205 else: # no lock have been found.
2202 2206 callback()
2203 2207
2204 2208 def lock(self, wait=True):
2205 2209 '''Lock the repository store (.hg/store) and return a weak reference
2206 2210 to the lock. Use this before modifying the store (e.g. committing or
2207 2211 stripping). If you are opening a transaction, get a lock as well.)
2208 2212
2209 2213 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2210 2214 'wlock' first to avoid a dead-lock hazard.'''
2211 2215 l = self._currentlock(self._lockref)
2212 2216 if l is not None:
2213 2217 l.lock()
2214 2218 return l
2215 2219
2216 2220 l = self._lock(self.svfs, "lock", wait, None,
2217 2221 self.invalidate, _('repository %s') % self.origroot)
2218 2222 self._lockref = weakref.ref(l)
2219 2223 return l
2220 2224
2221 2225 def _wlockchecktransaction(self):
2222 2226 if self.currenttransaction() is not None:
2223 2227 raise error.LockInheritanceContractViolation(
2224 2228 'wlock cannot be inherited in the middle of a transaction')
2225 2229
2226 2230 def wlock(self, wait=True):
2227 2231 '''Lock the non-store parts of the repository (everything under
2228 2232 .hg except .hg/store) and return a weak reference to the lock.
2229 2233
2230 2234 Use this before modifying files in .hg.
2231 2235
2232 2236 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2233 2237 'wlock' first to avoid a dead-lock hazard.'''
2234 2238 l = self._wlockref and self._wlockref()
2235 2239 if l is not None and l.held:
2236 2240 l.lock()
2237 2241 return l
2238 2242
2239 2243 # We do not need to check for non-waiting lock acquisition. Such
2240 2244 # acquisition would not cause dead-lock as they would just fail.
2241 2245 if wait and (self.ui.configbool('devel', 'all-warnings')
2242 2246 or self.ui.configbool('devel', 'check-locks')):
2243 2247 if self._currentlock(self._lockref) is not None:
2244 2248 self.ui.develwarn('"wlock" acquired after "lock"')
2245 2249
2246 2250 def unlock():
2247 2251 if self.dirstate.pendingparentchange():
2248 2252 self.dirstate.invalidate()
2249 2253 else:
2250 2254 self.dirstate.write(None)
2251 2255
2252 2256 self._filecache['dirstate'].refresh()
2253 2257
2254 2258 l = self._lock(self.vfs, "wlock", wait, unlock,
2255 2259 self.invalidatedirstate, _('working directory of %s') %
2256 2260 self.origroot,
2257 2261 inheritchecker=self._wlockchecktransaction,
2258 2262 parentenvvar='HG_WLOCK_LOCKER')
2259 2263 self._wlockref = weakref.ref(l)
2260 2264 return l
2261 2265
2262 2266 def _currentlock(self, lockref):
2263 2267 """Returns the lock if it's held, or None if it's not."""
2264 2268 if lockref is None:
2265 2269 return None
2266 2270 l = lockref()
2267 2271 if l is None or not l.held:
2268 2272 return None
2269 2273 return l
2270 2274
2271 2275 def currentwlock(self):
2272 2276 """Returns the wlock if it's held, or None if it's not."""
2273 2277 return self._currentlock(self._wlockref)
2274 2278
2275 2279 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2276 2280 """
2277 2281 commit an individual file as part of a larger transaction
2278 2282 """
2279 2283
2280 2284 fname = fctx.path()
2281 2285 fparent1 = manifest1.get(fname, nullid)
2282 2286 fparent2 = manifest2.get(fname, nullid)
2283 2287 if isinstance(fctx, context.filectx):
2284 2288 node = fctx.filenode()
2285 2289 if node in [fparent1, fparent2]:
2286 2290 self.ui.debug('reusing %s filelog entry\n' % fname)
2287 2291 if manifest1.flags(fname) != fctx.flags():
2288 2292 changelist.append(fname)
2289 2293 return node
2290 2294
2291 2295 flog = self.file(fname)
2292 2296 meta = {}
2293 2297 copy = fctx.renamed()
2294 2298 if copy and copy[0] != fname:
2295 2299 # Mark the new revision of this file as a copy of another
2296 2300 # file. This copy data will effectively act as a parent
2297 2301 # of this new revision. If this is a merge, the first
2298 2302 # parent will be the nullid (meaning "look up the copy data")
2299 2303 # and the second one will be the other parent. For example:
2300 2304 #
2301 2305 # 0 --- 1 --- 3 rev1 changes file foo
2302 2306 # \ / rev2 renames foo to bar and changes it
2303 2307 # \- 2 -/ rev3 should have bar with all changes and
2304 2308 # should record that bar descends from
2305 2309 # bar in rev2 and foo in rev1
2306 2310 #
2307 2311 # this allows this merge to succeed:
2308 2312 #
2309 2313 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2310 2314 # \ / merging rev3 and rev4 should use bar@rev2
2311 2315 # \- 2 --- 4 as the merge base
2312 2316 #
2313 2317
2314 2318 cfname = copy[0]
2315 2319 crev = manifest1.get(cfname)
2316 2320 newfparent = fparent2
2317 2321
2318 2322 if manifest2: # branch merge
2319 2323 if fparent2 == nullid or crev is None: # copied on remote side
2320 2324 if cfname in manifest2:
2321 2325 crev = manifest2[cfname]
2322 2326 newfparent = fparent1
2323 2327
2324 2328 # Here, we used to search backwards through history to try to find
2325 2329 # where the file copy came from if the source of a copy was not in
2326 2330 # the parent directory. However, this doesn't actually make sense to
2327 2331 # do (what does a copy from something not in your working copy even
2328 2332 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2329 2333 # the user that copy information was dropped, so if they didn't
2330 2334 # expect this outcome it can be fixed, but this is the correct
2331 2335 # behavior in this circumstance.
2332 2336
2333 2337 if crev:
2334 2338 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2335 2339 meta["copy"] = cfname
2336 2340 meta["copyrev"] = hex(crev)
2337 2341 fparent1, fparent2 = nullid, newfparent
2338 2342 else:
2339 2343 self.ui.warn(_("warning: can't find ancestor for '%s' "
2340 2344 "copied from '%s'!\n") % (fname, cfname))
2341 2345
2342 2346 elif fparent1 == nullid:
2343 2347 fparent1, fparent2 = fparent2, nullid
2344 2348 elif fparent2 != nullid:
2345 2349 # is one parent an ancestor of the other?
2346 2350 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2347 2351 if fparent1 in fparentancestors:
2348 2352 fparent1, fparent2 = fparent2, nullid
2349 2353 elif fparent2 in fparentancestors:
2350 2354 fparent2 = nullid
2351 2355
2352 2356 # is the file changed?
2353 2357 text = fctx.data()
2354 2358 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2355 2359 changelist.append(fname)
2356 2360 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2357 2361 # are just the flags changed during merge?
2358 2362 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2359 2363 changelist.append(fname)
2360 2364
2361 2365 return fparent1
2362 2366
2363 2367 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2364 2368 """check for commit arguments that aren't committable"""
2365 2369 if match.isexact() or match.prefix():
2366 2370 matched = set(status.modified + status.added + status.removed)
2367 2371
2368 2372 for f in match.files():
2369 2373 f = self.dirstate.normalize(f)
2370 2374 if f == '.' or f in matched or f in wctx.substate:
2371 2375 continue
2372 2376 if f in status.deleted:
2373 2377 fail(f, _('file not found!'))
2374 2378 if f in vdirs: # visited directory
2375 2379 d = f + '/'
2376 2380 for mf in matched:
2377 2381 if mf.startswith(d):
2378 2382 break
2379 2383 else:
2380 2384 fail(f, _("no match under directory!"))
2381 2385 elif f not in self.dirstate:
2382 2386 fail(f, _("file not tracked!"))
2383 2387
2384 2388 @unfilteredmethod
2385 2389 def commit(self, text="", user=None, date=None, match=None, force=False,
2386 2390 editor=False, extra=None):
2387 2391 """Add a new revision to current repository.
2388 2392
2389 2393 Revision information is gathered from the working directory,
2390 2394 match can be used to filter the committed files. If editor is
2391 2395 supplied, it is called to get a commit message.
2392 2396 """
2393 2397 if extra is None:
2394 2398 extra = {}
2395 2399
2396 2400 def fail(f, msg):
2397 2401 raise error.Abort('%s: %s' % (f, msg))
2398 2402
2399 2403 if not match:
2400 2404 match = matchmod.always(self.root, '')
2401 2405
2402 2406 if not force:
2403 2407 vdirs = []
2404 2408 match.explicitdir = vdirs.append
2405 2409 match.bad = fail
2406 2410
2407 2411 wlock = lock = tr = None
2408 2412 try:
2409 2413 wlock = self.wlock()
2410 2414 lock = self.lock() # for recent changelog (see issue4368)
2411 2415
2412 2416 wctx = self[None]
2413 2417 merge = len(wctx.parents()) > 1
2414 2418
2415 2419 if not force and merge and not match.always():
2416 2420 raise error.Abort(_('cannot partially commit a merge '
2417 2421 '(do not specify files or patterns)'))
2418 2422
2419 2423 status = self.status(match=match, clean=force)
2420 2424 if force:
2421 2425 status.modified.extend(status.clean) # mq may commit clean files
2422 2426
2423 2427 # check subrepos
2424 2428 subs, commitsubs, newstate = subrepoutil.precommit(
2425 2429 self.ui, wctx, status, match, force=force)
2426 2430
2427 2431 # make sure all explicit patterns are matched
2428 2432 if not force:
2429 2433 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2430 2434
2431 2435 cctx = context.workingcommitctx(self, status,
2432 2436 text, user, date, extra)
2433 2437
2434 2438 # internal config: ui.allowemptycommit
2435 2439 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2436 2440 or extra.get('close') or merge or cctx.files()
2437 2441 or self.ui.configbool('ui', 'allowemptycommit'))
2438 2442 if not allowemptycommit:
2439 2443 return None
2440 2444
2441 2445 if merge and cctx.deleted():
2442 2446 raise error.Abort(_("cannot commit merge with missing files"))
2443 2447
2444 2448 ms = mergemod.mergestate.read(self)
2445 2449 mergeutil.checkunresolved(ms)
2446 2450
2447 2451 if editor:
2448 2452 cctx._text = editor(self, cctx, subs)
2449 2453 edited = (text != cctx._text)
2450 2454
2451 2455 # Save commit message in case this transaction gets rolled back
2452 2456 # (e.g. by a pretxncommit hook). Leave the content alone on
2453 2457 # the assumption that the user will use the same editor again.
2454 2458 msgfn = self.savecommitmessage(cctx._text)
2455 2459
2456 2460 # commit subs and write new state
2457 2461 if subs:
2458 2462 for s in sorted(commitsubs):
2459 2463 sub = wctx.sub(s)
2460 2464 self.ui.status(_('committing subrepository %s\n') %
2461 2465 subrepoutil.subrelpath(sub))
2462 2466 sr = sub.commit(cctx._text, user, date)
2463 2467 newstate[s] = (newstate[s][0], sr)
2464 2468 subrepoutil.writestate(self, newstate)
2465 2469
2466 2470 p1, p2 = self.dirstate.parents()
2467 2471 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2468 2472 try:
2469 2473 self.hook("precommit", throw=True, parent1=hookp1,
2470 2474 parent2=hookp2)
2471 2475 tr = self.transaction('commit')
2472 2476 ret = self.commitctx(cctx, True)
2473 2477 except: # re-raises
2474 2478 if edited:
2475 2479 self.ui.write(
2476 2480 _('note: commit message saved in %s\n') % msgfn)
2477 2481 raise
2478 2482 # update bookmarks, dirstate and mergestate
2479 2483 bookmarks.update(self, [p1, p2], ret)
2480 2484 cctx.markcommitted(ret)
2481 2485 ms.reset()
2482 2486 tr.close()
2483 2487
2484 2488 finally:
2485 2489 lockmod.release(tr, lock, wlock)
2486 2490
2487 2491 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2488 2492 # hack for command that use a temporary commit (eg: histedit)
2489 2493 # temporary commit got stripped before hook release
2490 2494 if self.changelog.hasnode(ret):
2491 2495 self.hook("commit", node=node, parent1=parent1,
2492 2496 parent2=parent2)
2493 2497 self._afterlock(commithook)
2494 2498 return ret
2495 2499
2496 2500 @unfilteredmethod
2497 2501 def commitctx(self, ctx, error=False):
2498 2502 """Add a new revision to current repository.
2499 2503 Revision information is passed via the context argument.
2500 2504
2501 2505 ctx.files() should list all files involved in this commit, i.e.
2502 2506 modified/added/removed files. On merge, it may be wider than the
2503 2507 ctx.files() to be committed, since any file nodes derived directly
2504 2508 from p1 or p2 are excluded from the committed ctx.files().
2505 2509 """
2506 2510
2507 2511 tr = None
2508 2512 p1, p2 = ctx.p1(), ctx.p2()
2509 2513 user = ctx.user()
2510 2514
2511 2515 lock = self.lock()
2512 2516 try:
2513 2517 tr = self.transaction("commit")
2514 2518 trp = weakref.proxy(tr)
2515 2519
2516 2520 if ctx.manifestnode():
2517 2521 # reuse an existing manifest revision
2518 2522 self.ui.debug('reusing known manifest\n')
2519 2523 mn = ctx.manifestnode()
2520 2524 files = ctx.files()
2521 2525 elif ctx.files():
2522 2526 m1ctx = p1.manifestctx()
2523 2527 m2ctx = p2.manifestctx()
2524 2528 mctx = m1ctx.copy()
2525 2529
2526 2530 m = mctx.read()
2527 2531 m1 = m1ctx.read()
2528 2532 m2 = m2ctx.read()
2529 2533
2530 2534 # check in files
2531 2535 added = []
2532 2536 changed = []
2533 2537 removed = list(ctx.removed())
2534 2538 linkrev = len(self)
2535 2539 self.ui.note(_("committing files:\n"))
2536 2540 for f in sorted(ctx.modified() + ctx.added()):
2537 2541 self.ui.note(f + "\n")
2538 2542 try:
2539 2543 fctx = ctx[f]
2540 2544 if fctx is None:
2541 2545 removed.append(f)
2542 2546 else:
2543 2547 added.append(f)
2544 2548 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2545 2549 trp, changed)
2546 2550 m.setflag(f, fctx.flags())
2547 2551 except OSError as inst:
2548 2552 self.ui.warn(_("trouble committing %s!\n") % f)
2549 2553 raise
2550 2554 except IOError as inst:
2551 2555 errcode = getattr(inst, 'errno', errno.ENOENT)
2552 2556 if error or errcode and errcode != errno.ENOENT:
2553 2557 self.ui.warn(_("trouble committing %s!\n") % f)
2554 2558 raise
2555 2559
2556 2560 # update manifest
2557 2561 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2558 2562 drop = [f for f in removed if f in m]
2559 2563 for f in drop:
2560 2564 del m[f]
2561 2565 files = changed + removed
2562 2566 md = None
2563 2567 if not files:
2564 2568 # if no "files" actually changed in terms of the changelog,
2565 2569 # try hard to detect unmodified manifest entry so that the
2566 2570 # exact same commit can be reproduced later on convert.
2567 2571 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2568 2572 if not files and md:
2569 2573 self.ui.debug('not reusing manifest (no file change in '
2570 2574 'changelog, but manifest differs)\n')
2571 2575 if files or md:
2572 2576 self.ui.note(_("committing manifest\n"))
2573 2577 # we're using narrowmatch here since it's already applied at
2574 2578 # other stages (such as dirstate.walk), so we're already
2575 2579 # ignoring things outside of narrowspec in most cases. The
2576 2580 # one case where we might have files outside the narrowspec
2577 2581 # at this point is merges, and we already error out in the
2578 2582 # case where the merge has files outside of the narrowspec,
2579 2583 # so this is safe.
2580 2584 mn = mctx.write(trp, linkrev,
2581 2585 p1.manifestnode(), p2.manifestnode(),
2582 2586 added, drop, match=self.narrowmatch())
2583 2587 else:
2584 2588 self.ui.debug('reusing manifest form p1 (listed files '
2585 2589 'actually unchanged)\n')
2586 2590 mn = p1.manifestnode()
2587 2591 else:
2588 2592 self.ui.debug('reusing manifest from p1 (no file change)\n')
2589 2593 mn = p1.manifestnode()
2590 2594 files = []
2591 2595
2592 2596 # update changelog
2593 2597 self.ui.note(_("committing changelog\n"))
2594 2598 self.changelog.delayupdate(tr)
2595 2599 n = self.changelog.add(mn, files, ctx.description(),
2596 2600 trp, p1.node(), p2.node(),
2597 2601 user, ctx.date(), ctx.extra().copy())
2598 2602 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2599 2603 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2600 2604 parent2=xp2)
2601 2605 # set the new commit is proper phase
2602 2606 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2603 2607 if targetphase:
2604 2608 # retract boundary do not alter parent changeset.
2605 2609 # if a parent have higher the resulting phase will
2606 2610 # be compliant anyway
2607 2611 #
2608 2612 # if minimal phase was 0 we don't need to retract anything
2609 2613 phases.registernew(self, tr, targetphase, [n])
2610 2614 tr.close()
2611 2615 return n
2612 2616 finally:
2613 2617 if tr:
2614 2618 tr.release()
2615 2619 lock.release()
2616 2620
2617 2621 @unfilteredmethod
2618 2622 def destroying(self):
2619 2623 '''Inform the repository that nodes are about to be destroyed.
2620 2624 Intended for use by strip and rollback, so there's a common
2621 2625 place for anything that has to be done before destroying history.
2622 2626
2623 2627 This is mostly useful for saving state that is in memory and waiting
2624 2628 to be flushed when the current lock is released. Because a call to
2625 2629 destroyed is imminent, the repo will be invalidated causing those
2626 2630 changes to stay in memory (waiting for the next unlock), or vanish
2627 2631 completely.
2628 2632 '''
2629 2633 # When using the same lock to commit and strip, the phasecache is left
2630 2634 # dirty after committing. Then when we strip, the repo is invalidated,
2631 2635 # causing those changes to disappear.
2632 2636 if '_phasecache' in vars(self):
2633 2637 self._phasecache.write()
2634 2638
2635 2639 @unfilteredmethod
2636 2640 def destroyed(self):
2637 2641 '''Inform the repository that nodes have been destroyed.
2638 2642 Intended for use by strip and rollback, so there's a common
2639 2643 place for anything that has to be done after destroying history.
2640 2644 '''
2641 2645 # When one tries to:
2642 2646 # 1) destroy nodes thus calling this method (e.g. strip)
2643 2647 # 2) use phasecache somewhere (e.g. commit)
2644 2648 #
2645 2649 # then 2) will fail because the phasecache contains nodes that were
2646 2650 # removed. We can either remove phasecache from the filecache,
2647 2651 # causing it to reload next time it is accessed, or simply filter
2648 2652 # the removed nodes now and write the updated cache.
2649 2653 self._phasecache.filterunknown(self)
2650 2654 self._phasecache.write()
2651 2655
2652 2656 # refresh all repository caches
2653 2657 self.updatecaches()
2654 2658
2655 2659 # Ensure the persistent tag cache is updated. Doing it now
2656 2660 # means that the tag cache only has to worry about destroyed
2657 2661 # heads immediately after a strip/rollback. That in turn
2658 2662 # guarantees that "cachetip == currenttip" (comparing both rev
2659 2663 # and node) always means no nodes have been added or destroyed.
2660 2664
2661 2665 # XXX this is suboptimal when qrefresh'ing: we strip the current
2662 2666 # head, refresh the tag cache, then immediately add a new head.
2663 2667 # But I think doing it this way is necessary for the "instant
2664 2668 # tag cache retrieval" case to work.
2665 2669 self.invalidate()
2666 2670
2667 2671 def status(self, node1='.', node2=None, match=None,
2668 2672 ignored=False, clean=False, unknown=False,
2669 2673 listsubrepos=False):
2670 2674 '''a convenience method that calls node1.status(node2)'''
2671 2675 return self[node1].status(node2, match, ignored, clean, unknown,
2672 2676 listsubrepos)
2673 2677
2674 2678 def addpostdsstatus(self, ps):
2675 2679 """Add a callback to run within the wlock, at the point at which status
2676 2680 fixups happen.
2677 2681
2678 2682 On status completion, callback(wctx, status) will be called with the
2679 2683 wlock held, unless the dirstate has changed from underneath or the wlock
2680 2684 couldn't be grabbed.
2681 2685
2682 2686 Callbacks should not capture and use a cached copy of the dirstate --
2683 2687 it might change in the meanwhile. Instead, they should access the
2684 2688 dirstate via wctx.repo().dirstate.
2685 2689
2686 2690 This list is emptied out after each status run -- extensions should
2687 2691 make sure it adds to this list each time dirstate.status is called.
2688 2692 Extensions should also make sure they don't call this for statuses
2689 2693 that don't involve the dirstate.
2690 2694 """
2691 2695
2692 2696 # The list is located here for uniqueness reasons -- it is actually
2693 2697 # managed by the workingctx, but that isn't unique per-repo.
2694 2698 self._postdsstatus.append(ps)
2695 2699
2696 2700 def postdsstatus(self):
2697 2701 """Used by workingctx to get the list of post-dirstate-status hooks."""
2698 2702 return self._postdsstatus
2699 2703
2700 2704 def clearpostdsstatus(self):
2701 2705 """Used by workingctx to clear post-dirstate-status hooks."""
2702 2706 del self._postdsstatus[:]
2703 2707
2704 2708 def heads(self, start=None):
2705 2709 if start is None:
2706 2710 cl = self.changelog
2707 2711 headrevs = reversed(cl.headrevs())
2708 2712 return [cl.node(rev) for rev in headrevs]
2709 2713
2710 2714 heads = self.changelog.heads(start)
2711 2715 # sort the output in rev descending order
2712 2716 return sorted(heads, key=self.changelog.rev, reverse=True)
2713 2717
2714 2718 def branchheads(self, branch=None, start=None, closed=False):
2715 2719 '''return a (possibly filtered) list of heads for the given branch
2716 2720
2717 2721 Heads are returned in topological order, from newest to oldest.
2718 2722 If branch is None, use the dirstate branch.
2719 2723 If start is not None, return only heads reachable from start.
2720 2724 If closed is True, return heads that are marked as closed as well.
2721 2725 '''
2722 2726 if branch is None:
2723 2727 branch = self[None].branch()
2724 2728 branches = self.branchmap()
2725 2729 if branch not in branches:
2726 2730 return []
2727 2731 # the cache returns heads ordered lowest to highest
2728 2732 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2729 2733 if start is not None:
2730 2734 # filter out the heads that cannot be reached from startrev
2731 2735 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2732 2736 bheads = [h for h in bheads if h in fbheads]
2733 2737 return bheads
2734 2738
2735 2739 def branches(self, nodes):
2736 2740 if not nodes:
2737 2741 nodes = [self.changelog.tip()]
2738 2742 b = []
2739 2743 for n in nodes:
2740 2744 t = n
2741 2745 while True:
2742 2746 p = self.changelog.parents(n)
2743 2747 if p[1] != nullid or p[0] == nullid:
2744 2748 b.append((t, n, p[0], p[1]))
2745 2749 break
2746 2750 n = p[0]
2747 2751 return b
2748 2752
2749 2753 def between(self, pairs):
2750 2754 r = []
2751 2755
2752 2756 for top, bottom in pairs:
2753 2757 n, l, i = top, [], 0
2754 2758 f = 1
2755 2759
2756 2760 while n != bottom and n != nullid:
2757 2761 p = self.changelog.parents(n)[0]
2758 2762 if i == f:
2759 2763 l.append(n)
2760 2764 f = f * 2
2761 2765 n = p
2762 2766 i += 1
2763 2767
2764 2768 r.append(l)
2765 2769
2766 2770 return r
2767 2771
2768 2772 def checkpush(self, pushop):
2769 2773 """Extensions can override this function if additional checks have
2770 2774 to be performed before pushing, or call it if they override push
2771 2775 command.
2772 2776 """
2773 2777
2774 2778 @unfilteredpropertycache
2775 2779 def prepushoutgoinghooks(self):
2776 2780 """Return util.hooks consists of a pushop with repo, remote, outgoing
2777 2781 methods, which are called before pushing changesets.
2778 2782 """
2779 2783 return util.hooks()
2780 2784
2781 2785 def pushkey(self, namespace, key, old, new):
2782 2786 try:
2783 2787 tr = self.currenttransaction()
2784 2788 hookargs = {}
2785 2789 if tr is not None:
2786 2790 hookargs.update(tr.hookargs)
2787 2791 hookargs = pycompat.strkwargs(hookargs)
2788 2792 hookargs[r'namespace'] = namespace
2789 2793 hookargs[r'key'] = key
2790 2794 hookargs[r'old'] = old
2791 2795 hookargs[r'new'] = new
2792 2796 self.hook('prepushkey', throw=True, **hookargs)
2793 2797 except error.HookAbort as exc:
2794 2798 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2795 2799 if exc.hint:
2796 2800 self.ui.write_err(_("(%s)\n") % exc.hint)
2797 2801 return False
2798 2802 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2799 2803 ret = pushkey.push(self, namespace, key, old, new)
2800 2804 def runhook():
2801 2805 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2802 2806 ret=ret)
2803 2807 self._afterlock(runhook)
2804 2808 return ret
2805 2809
2806 2810 def listkeys(self, namespace):
2807 2811 self.hook('prelistkeys', throw=True, namespace=namespace)
2808 2812 self.ui.debug('listing keys for "%s"\n' % namespace)
2809 2813 values = pushkey.list(self, namespace)
2810 2814 self.hook('listkeys', namespace=namespace, values=values)
2811 2815 return values
2812 2816
2813 2817 def debugwireargs(self, one, two, three=None, four=None, five=None):
2814 2818 '''used to test argument passing over the wire'''
2815 2819 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2816 2820 pycompat.bytestr(four),
2817 2821 pycompat.bytestr(five))
2818 2822
2819 2823 def savecommitmessage(self, text):
2820 2824 fp = self.vfs('last-message.txt', 'wb')
2821 2825 try:
2822 2826 fp.write(text)
2823 2827 finally:
2824 2828 fp.close()
2825 2829 return self.pathto(fp.name[len(self.root) + 1:])
2826 2830
2827 2831 # used to avoid circular references so destructors work
2828 2832 def aftertrans(files):
2829 2833 renamefiles = [tuple(t) for t in files]
2830 2834 def a():
2831 2835 for vfs, src, dest in renamefiles:
2832 2836 # if src and dest refer to a same file, vfs.rename is a no-op,
2833 2837 # leaving both src and dest on disk. delete dest to make sure
2834 2838 # the rename couldn't be such a no-op.
2835 2839 vfs.tryunlink(dest)
2836 2840 try:
2837 2841 vfs.rename(src, dest)
2838 2842 except OSError: # journal file does not yet exist
2839 2843 pass
2840 2844 return a
2841 2845
2842 2846 def undoname(fn):
2843 2847 base, name = os.path.split(fn)
2844 2848 assert name.startswith('journal')
2845 2849 return os.path.join(base, name.replace('journal', 'undo', 1))
2846 2850
2847 2851 def instance(ui, path, create, intents=None, createopts=None):
2848 2852 localpath = util.urllocalpath(path)
2849 2853 if create:
2850 2854 createrepository(ui, localpath, createopts=createopts)
2851 2855
2852 2856 return makelocalrepository(ui, localpath, intents=intents)
2853 2857
2854 2858 def islocal(path):
2855 2859 return True
2856 2860
2857 2861 def defaultcreateopts(ui, createopts=None):
2858 2862 """Populate the default creation options for a repository.
2859 2863
2860 2864 A dictionary of explicitly requested creation options can be passed
2861 2865 in. Missing keys will be populated.
2862 2866 """
2863 2867 createopts = dict(createopts or {})
2864 2868
2865 2869 if 'backend' not in createopts:
2866 2870 # experimental config: storage.new-repo-backend
2867 2871 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2868 2872
2869 2873 return createopts
2870 2874
2871 2875 def newreporequirements(ui, createopts):
2872 2876 """Determine the set of requirements for a new local repository.
2873 2877
2874 2878 Extensions can wrap this function to specify custom requirements for
2875 2879 new repositories.
2876 2880 """
2877 2881 # If the repo is being created from a shared repository, we copy
2878 2882 # its requirements.
2879 2883 if 'sharedrepo' in createopts:
2880 2884 requirements = set(createopts['sharedrepo'].requirements)
2881 2885 if createopts.get('sharedrelative'):
2882 2886 requirements.add('relshared')
2883 2887 else:
2884 2888 requirements.add('shared')
2885 2889
2886 2890 return requirements
2887 2891
2888 2892 if 'backend' not in createopts:
2889 2893 raise error.ProgrammingError('backend key not present in createopts; '
2890 2894 'was defaultcreateopts() called?')
2891 2895
2892 2896 if createopts['backend'] != 'revlogv1':
2893 2897 raise error.Abort(_('unable to determine repository requirements for '
2894 2898 'storage backend: %s') % createopts['backend'])
2895 2899
2896 2900 requirements = {'revlogv1'}
2897 2901 if ui.configbool('format', 'usestore'):
2898 2902 requirements.add('store')
2899 2903 if ui.configbool('format', 'usefncache'):
2900 2904 requirements.add('fncache')
2901 2905 if ui.configbool('format', 'dotencode'):
2902 2906 requirements.add('dotencode')
2903 2907
2904 2908 compengine = ui.config('experimental', 'format.compression')
2905 2909 if compengine not in util.compengines:
2906 2910 raise error.Abort(_('compression engine %s defined by '
2907 2911 'experimental.format.compression not available') %
2908 2912 compengine,
2909 2913 hint=_('run "hg debuginstall" to list available '
2910 2914 'compression engines'))
2911 2915
2912 2916 # zlib is the historical default and doesn't need an explicit requirement.
2913 2917 if compengine != 'zlib':
2914 2918 requirements.add('exp-compression-%s' % compengine)
2915 2919
2916 2920 if scmutil.gdinitconfig(ui):
2917 2921 requirements.add('generaldelta')
2918 2922 # experimental config: format.sparse-revlog
2919 2923 if ui.configbool('format', 'sparse-revlog'):
2920 2924 requirements.add(SPARSEREVLOG_REQUIREMENT)
2921 2925 if ui.configbool('experimental', 'treemanifest'):
2922 2926 requirements.add('treemanifest')
2923 2927
2924 2928 revlogv2 = ui.config('experimental', 'revlogv2')
2925 2929 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2926 2930 requirements.remove('revlogv1')
2927 2931 # generaldelta is implied by revlogv2.
2928 2932 requirements.discard('generaldelta')
2929 2933 requirements.add(REVLOGV2_REQUIREMENT)
2930 2934 # experimental config: format.internal-phase
2931 2935 if ui.configbool('format', 'internal-phase'):
2932 2936 requirements.add('internal-phase')
2933 2937
2934 2938 if createopts.get('narrowfiles'):
2935 2939 requirements.add(repository.NARROW_REQUIREMENT)
2936 2940
2937 2941 if createopts.get('lfs'):
2938 2942 requirements.add('lfs')
2939 2943
2940 2944 return requirements
2941 2945
2942 2946 def filterknowncreateopts(ui, createopts):
2943 2947 """Filters a dict of repo creation options against options that are known.
2944 2948
2945 2949 Receives a dict of repo creation options and returns a dict of those
2946 2950 options that we don't know how to handle.
2947 2951
2948 2952 This function is called as part of repository creation. If the
2949 2953 returned dict contains any items, repository creation will not
2950 2954 be allowed, as it means there was a request to create a repository
2951 2955 with options not recognized by loaded code.
2952 2956
2953 2957 Extensions can wrap this function to filter out creation options
2954 2958 they know how to handle.
2955 2959 """
2956 2960 known = {
2957 2961 'backend',
2958 2962 'lfs',
2959 2963 'narrowfiles',
2960 2964 'sharedrepo',
2961 2965 'sharedrelative',
2962 2966 'shareditems',
2963 2967 'shallowfilestore',
2964 2968 }
2965 2969
2966 2970 return {k: v for k, v in createopts.items() if k not in known}
2967 2971
2968 2972 def createrepository(ui, path, createopts=None):
2969 2973 """Create a new repository in a vfs.
2970 2974
2971 2975 ``path`` path to the new repo's working directory.
2972 2976 ``createopts`` options for the new repository.
2973 2977
2974 2978 The following keys for ``createopts`` are recognized:
2975 2979
2976 2980 backend
2977 2981 The storage backend to use.
2978 2982 lfs
2979 2983 Repository will be created with ``lfs`` requirement. The lfs extension
2980 2984 will automatically be loaded when the repository is accessed.
2981 2985 narrowfiles
2982 2986 Set up repository to support narrow file storage.
2983 2987 sharedrepo
2984 2988 Repository object from which storage should be shared.
2985 2989 sharedrelative
2986 2990 Boolean indicating if the path to the shared repo should be
2987 2991 stored as relative. By default, the pointer to the "parent" repo
2988 2992 is stored as an absolute path.
2989 2993 shareditems
2990 2994 Set of items to share to the new repository (in addition to storage).
2991 2995 shallowfilestore
2992 2996 Indicates that storage for files should be shallow (not all ancestor
2993 2997 revisions are known).
2994 2998 """
2995 2999 createopts = defaultcreateopts(ui, createopts=createopts)
2996 3000
2997 3001 unknownopts = filterknowncreateopts(ui, createopts)
2998 3002
2999 3003 if not isinstance(unknownopts, dict):
3000 3004 raise error.ProgrammingError('filterknowncreateopts() did not return '
3001 3005 'a dict')
3002 3006
3003 3007 if unknownopts:
3004 3008 raise error.Abort(_('unable to create repository because of unknown '
3005 3009 'creation option: %s') %
3006 3010 ', '.join(sorted(unknownopts)),
3007 3011 hint=_('is a required extension not loaded?'))
3008 3012
3009 3013 requirements = newreporequirements(ui, createopts=createopts)
3010 3014
3011 3015 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3012 3016
3013 3017 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3014 3018 if hgvfs.exists():
3015 3019 raise error.RepoError(_('repository %s already exists') % path)
3016 3020
3017 3021 if 'sharedrepo' in createopts:
3018 3022 sharedpath = createopts['sharedrepo'].sharedpath
3019 3023
3020 3024 if createopts.get('sharedrelative'):
3021 3025 try:
3022 3026 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3023 3027 except (IOError, ValueError) as e:
3024 3028 # ValueError is raised on Windows if the drive letters differ
3025 3029 # on each path.
3026 3030 raise error.Abort(_('cannot calculate relative path'),
3027 3031 hint=stringutil.forcebytestr(e))
3028 3032
3029 3033 if not wdirvfs.exists():
3030 3034 wdirvfs.makedirs()
3031 3035
3032 3036 hgvfs.makedir(notindexed=True)
3033 3037 if 'sharedrepo' not in createopts:
3034 3038 hgvfs.mkdir(b'cache')
3035 3039 hgvfs.mkdir(b'wcache')
3036 3040
3037 3041 if b'store' in requirements and 'sharedrepo' not in createopts:
3038 3042 hgvfs.mkdir(b'store')
3039 3043
3040 3044 # We create an invalid changelog outside the store so very old
3041 3045 # Mercurial versions (which didn't know about the requirements
3042 3046 # file) encounter an error on reading the changelog. This
3043 3047 # effectively locks out old clients and prevents them from
3044 3048 # mucking with a repo in an unknown format.
3045 3049 #
3046 3050 # The revlog header has version 2, which won't be recognized by
3047 3051 # such old clients.
3048 3052 hgvfs.append(b'00changelog.i',
3049 3053 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3050 3054 b'layout')
3051 3055
3052 3056 scmutil.writerequires(hgvfs, requirements)
3053 3057
3054 3058 # Write out file telling readers where to find the shared store.
3055 3059 if 'sharedrepo' in createopts:
3056 3060 hgvfs.write(b'sharedpath', sharedpath)
3057 3061
3058 3062 if createopts.get('shareditems'):
3059 3063 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3060 3064 hgvfs.write(b'shared', shared)
3061 3065
3062 3066 def poisonrepository(repo):
3063 3067 """Poison a repository instance so it can no longer be used."""
3064 3068 # Perform any cleanup on the instance.
3065 3069 repo.close()
3066 3070
3067 3071 # Our strategy is to replace the type of the object with one that
3068 3072 # has all attribute lookups result in error.
3069 3073 #
3070 3074 # But we have to allow the close() method because some constructors
3071 3075 # of repos call close() on repo references.
3072 3076 class poisonedrepository(object):
3073 3077 def __getattribute__(self, item):
3074 3078 if item == r'close':
3075 3079 return object.__getattribute__(self, item)
3076 3080
3077 3081 raise error.ProgrammingError('repo instances should not be used '
3078 3082 'after unshare')
3079 3083
3080 3084 def close(self):
3081 3085 pass
3082 3086
3083 3087 # We may have a repoview, which intercepts __setattr__. So be sure
3084 3088 # we operate at the lowest level possible.
3085 3089 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now