##// END OF EJS Templates
localrepo: micro-optimize __len__() to bypass repoview...
Yuya Nishihara -
r35754:29f57ce4 default
parent child Browse files
Show More
@@ -1,2276 +1,2278
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # This is only used by context.workingctx.match in order to
368 368 # detect files in subrepos.
369 369 self.auditor = pathutil.pathauditor(
370 370 self.root, callback=self._checknested)
371 371 # This is only used by context.basectx.match in order to detect
372 372 # files in subrepos.
373 373 self.nofsauditor = pathutil.pathauditor(
374 374 self.root, callback=self._checknested, realfs=False, cached=True)
375 375 self.baseui = baseui
376 376 self.ui = baseui.copy()
377 377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 379 if (self.ui.configbool('devel', 'all-warnings') or
380 380 self.ui.configbool('devel', 'check-locks')):
381 381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 382 # A list of callback to shape the phase if no data were found.
383 383 # Callback are in the form: func(repo, roots) --> processed root.
384 384 # This list it to be filled by extension during repo setup
385 385 self._phasedefaults = []
386 386 try:
387 387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 388 self._loadextensions()
389 389 except IOError:
390 390 pass
391 391
392 392 if self.featuresetupfuncs:
393 393 self.supported = set(self._basesupported) # use private copy
394 394 extmods = set(m.__name__ for n, m
395 395 in extensions.extensions(self.ui))
396 396 for setupfunc in self.featuresetupfuncs:
397 397 if setupfunc.__module__ in extmods:
398 398 setupfunc(self.ui, self.supported)
399 399 else:
400 400 self.supported = self._basesupported
401 401 color.setup(self.ui)
402 402
403 403 # Add compression engines.
404 404 for name in util.compengines:
405 405 engine = util.compengines[name]
406 406 if engine.revlogheader():
407 407 self.supported.add('exp-compression-%s' % name)
408 408
409 409 if not self.vfs.isdir():
410 410 if create:
411 411 self.requirements = newreporequirements(self)
412 412
413 413 if not self.wvfs.exists():
414 414 self.wvfs.makedirs()
415 415 self.vfs.makedir(notindexed=True)
416 416
417 417 if 'store' in self.requirements:
418 418 self.vfs.mkdir("store")
419 419
420 420 # create an invalid changelog
421 421 self.vfs.append(
422 422 "00changelog.i",
423 423 '\0\0\0\2' # represents revlogv2
424 424 ' dummy changelog to prevent using the old repo layout'
425 425 )
426 426 else:
427 427 raise error.RepoError(_("repository %s not found") % path)
428 428 elif create:
429 429 raise error.RepoError(_("repository %s already exists") % path)
430 430 else:
431 431 try:
432 432 self.requirements = scmutil.readrequires(
433 433 self.vfs, self.supported)
434 434 except IOError as inst:
435 435 if inst.errno != errno.ENOENT:
436 436 raise
437 437
438 438 cachepath = self.vfs.join('cache')
439 439 self.sharedpath = self.path
440 440 try:
441 441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 442 if 'relshared' in self.requirements:
443 443 sharedpath = self.vfs.join(sharedpath)
444 444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 445 cachepath = vfs.join('cache')
446 446 s = vfs.base
447 447 if not vfs.exists():
448 448 raise error.RepoError(
449 449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 450 self.sharedpath = s
451 451 except IOError as inst:
452 452 if inst.errno != errno.ENOENT:
453 453 raise
454 454
455 455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 456 raise error.RepoError(_('repository is using sparse feature but '
457 457 'sparse is not enabled; enable the '
458 458 '"sparse" extensions to access'))
459 459
460 460 self.store = store.store(
461 461 self.requirements, self.sharedpath,
462 462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 463 self.spath = self.store.path
464 464 self.svfs = self.store.vfs
465 465 self.sjoin = self.store.join
466 466 self.vfs.createmode = self.store.createmode
467 467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 468 self.cachevfs.createmode = self.store.createmode
469 469 if (self.ui.configbool('devel', 'all-warnings') or
470 470 self.ui.configbool('devel', 'check-locks')):
471 471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 473 else: # standard vfs
474 474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 475 self._applyopenerreqs()
476 476 if create:
477 477 self._writerequirements()
478 478
479 479 self._dirstatevalidatewarned = False
480 480
481 481 self._branchcaches = {}
482 482 self._revbranchcache = None
483 483 self.filterpats = {}
484 484 self._datafilters = {}
485 485 self._transref = self._lockref = self._wlockref = None
486 486
487 487 # A cache for various files under .hg/ that tracks file changes,
488 488 # (used by the filecache decorator)
489 489 #
490 490 # Maps a property name to its util.filecacheentry
491 491 self._filecache = {}
492 492
493 493 # hold sets of revision to be filtered
494 494 # should be cleared when something might have changed the filter value:
495 495 # - new changesets,
496 496 # - phase change,
497 497 # - new obsolescence marker,
498 498 # - working directory parent change,
499 499 # - bookmark changes
500 500 self.filteredrevcache = {}
501 501
502 502 # post-dirstate-status hooks
503 503 self._postdsstatus = []
504 504
505 505 # generic mapping between names and nodes
506 506 self.names = namespaces.namespaces()
507 507
508 508 # Key to signature value.
509 509 self._sparsesignaturecache = {}
510 510 # Signature to cached matcher instance.
511 511 self._sparsematchercache = {}
512 512
513 513 def _getvfsward(self, origfunc):
514 514 """build a ward for self.vfs"""
515 515 rref = weakref.ref(self)
516 516 def checkvfs(path, mode=None):
517 517 ret = origfunc(path, mode=mode)
518 518 repo = rref()
519 519 if (repo is None
520 520 or not util.safehasattr(repo, '_wlockref')
521 521 or not util.safehasattr(repo, '_lockref')):
522 522 return
523 523 if mode in (None, 'r', 'rb'):
524 524 return
525 525 if path.startswith(repo.path):
526 526 # truncate name relative to the repository (.hg)
527 527 path = path[len(repo.path) + 1:]
528 528 if path.startswith('cache/'):
529 529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 531 if path.startswith('journal.'):
532 532 # journal is covered by 'lock'
533 533 if repo._currentlock(repo._lockref) is None:
534 534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 535 stacklevel=2, config='check-locks')
536 536 elif repo._currentlock(repo._wlockref) is None:
537 537 # rest of vfs files are covered by 'wlock'
538 538 #
539 539 # exclude special files
540 540 for prefix in self._wlockfreeprefix:
541 541 if path.startswith(prefix):
542 542 return
543 543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 544 stacklevel=2, config='check-locks')
545 545 return ret
546 546 return checkvfs
547 547
548 548 def _getsvfsward(self, origfunc):
549 549 """build a ward for self.svfs"""
550 550 rref = weakref.ref(self)
551 551 def checksvfs(path, mode=None):
552 552 ret = origfunc(path, mode=mode)
553 553 repo = rref()
554 554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 555 return
556 556 if mode in (None, 'r', 'rb'):
557 557 return
558 558 if path.startswith(repo.sharedpath):
559 559 # truncate name relative to the repository (.hg)
560 560 path = path[len(repo.sharedpath) + 1:]
561 561 if repo._currentlock(repo._lockref) is None:
562 562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 563 stacklevel=3)
564 564 return ret
565 565 return checksvfs
566 566
567 567 def close(self):
568 568 self._writecaches()
569 569
570 570 def _loadextensions(self):
571 571 extensions.loadall(self.ui)
572 572
573 573 def _writecaches(self):
574 574 if self._revbranchcache:
575 575 self._revbranchcache.write()
576 576
577 577 def _restrictcapabilities(self, caps):
578 578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 579 caps = set(caps)
580 580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 582 return caps
583 583
584 584 def _applyopenerreqs(self):
585 585 self.svfs.options = dict((r, 1) for r in self.requirements
586 586 if r in self.openerreqs)
587 587 # experimental config: format.chunkcachesize
588 588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 589 if chunkcachesize is not None:
590 590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 591 # experimental config: format.maxchainlen
592 592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 593 if maxchainlen is not None:
594 594 self.svfs.options['maxchainlen'] = maxchainlen
595 595 # experimental config: format.manifestcachesize
596 596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 597 if manifestcachesize is not None:
598 598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 599 # experimental config: format.aggressivemergedeltas
600 600 aggressivemergedeltas = self.ui.configbool('format',
601 601 'aggressivemergedeltas')
602 602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 605 if 0 <= chainspan:
606 606 self.svfs.options['maxdeltachainspan'] = chainspan
607 607 mmapindexthreshold = self.ui.configbytes('experimental',
608 608 'mmapindexthreshold')
609 609 if mmapindexthreshold is not None:
610 610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 612 srdensitythres = float(self.ui.config('experimental',
613 613 'sparse-read.density-threshold'))
614 614 srmingapsize = self.ui.configbytes('experimental',
615 615 'sparse-read.min-gap-size')
616 616 self.svfs.options['with-sparse-read'] = withsparseread
617 617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 619
620 620 for r in self.requirements:
621 621 if r.startswith('exp-compression-'):
622 622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 623
624 624 # TODO move "revlogv2" to openerreqs once finalized.
625 625 if REVLOGV2_REQUIREMENT in self.requirements:
626 626 self.svfs.options['revlogv2'] = True
627 627
628 628 def _writerequirements(self):
629 629 scmutil.writerequires(self.vfs, self.requirements)
630 630
631 631 def _checknested(self, path):
632 632 """Determine if path is a legal nested repository."""
633 633 if not path.startswith(self.root):
634 634 return False
635 635 subpath = path[len(self.root) + 1:]
636 636 normsubpath = util.pconvert(subpath)
637 637
638 638 # XXX: Checking against the current working copy is wrong in
639 639 # the sense that it can reject things like
640 640 #
641 641 # $ hg cat -r 10 sub/x.txt
642 642 #
643 643 # if sub/ is no longer a subrepository in the working copy
644 644 # parent revision.
645 645 #
646 646 # However, it can of course also allow things that would have
647 647 # been rejected before, such as the above cat command if sub/
648 648 # is a subrepository now, but was a normal directory before.
649 649 # The old path auditor would have rejected by mistake since it
650 650 # panics when it sees sub/.hg/.
651 651 #
652 652 # All in all, checking against the working copy seems sensible
653 653 # since we want to prevent access to nested repositories on
654 654 # the filesystem *now*.
655 655 ctx = self[None]
656 656 parts = util.splitpath(subpath)
657 657 while parts:
658 658 prefix = '/'.join(parts)
659 659 if prefix in ctx.substate:
660 660 if prefix == normsubpath:
661 661 return True
662 662 else:
663 663 sub = ctx.sub(prefix)
664 664 return sub.checknested(subpath[len(prefix) + 1:])
665 665 else:
666 666 parts.pop()
667 667 return False
668 668
669 669 def peer(self):
670 670 return localpeer(self) # not cached to avoid reference cycle
671 671
672 672 def unfiltered(self):
673 673 """Return unfiltered version of the repository
674 674
675 675 Intended to be overwritten by filtered repo."""
676 676 return self
677 677
678 678 def filtered(self, name, visibilityexceptions=None):
679 679 """Return a filtered version of a repository"""
680 680 cls = repoview.newtype(self.unfiltered().__class__)
681 681 return cls(self, name, visibilityexceptions)
682 682
683 683 @repofilecache('bookmarks', 'bookmarks.current')
684 684 def _bookmarks(self):
685 685 return bookmarks.bmstore(self)
686 686
687 687 @property
688 688 def _activebookmark(self):
689 689 return self._bookmarks.active
690 690
691 691 # _phasesets depend on changelog. what we need is to call
692 692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 693 # can't be easily expressed in filecache mechanism.
694 694 @storecache('phaseroots', '00changelog.i')
695 695 def _phasecache(self):
696 696 return phases.phasecache(self, self._phasedefaults)
697 697
698 698 @storecache('obsstore')
699 699 def obsstore(self):
700 700 return obsolete.makestore(self.ui, self)
701 701
702 702 @storecache('00changelog.i')
703 703 def changelog(self):
704 704 return changelog.changelog(self.svfs,
705 705 trypending=txnutil.mayhavepending(self.root))
706 706
707 707 def _constructmanifest(self):
708 708 # This is a temporary function while we migrate from manifest to
709 709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 710 # manifest creation.
711 711 return manifest.manifestrevlog(self.svfs)
712 712
713 713 @storecache('00manifest.i')
714 714 def manifestlog(self):
715 715 return manifest.manifestlog(self.svfs, self)
716 716
717 717 @repofilecache('dirstate')
718 718 def dirstate(self):
719 719 sparsematchfn = lambda: sparse.matcher(self)
720 720
721 721 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 722 self._dirstatevalidate, sparsematchfn)
723 723
724 724 def _dirstatevalidate(self, node):
725 725 try:
726 726 self.changelog.rev(node)
727 727 return node
728 728 except error.LookupError:
729 729 if not self._dirstatevalidatewarned:
730 730 self._dirstatevalidatewarned = True
731 731 self.ui.warn(_("warning: ignoring unknown"
732 732 " working parent %s!\n") % short(node))
733 733 return nullid
734 734
735 735 def __getitem__(self, changeid):
736 736 if changeid is None:
737 737 return context.workingctx(self)
738 738 if isinstance(changeid, slice):
739 739 # wdirrev isn't contiguous so the slice shouldn't include it
740 740 return [context.changectx(self, i)
741 741 for i in xrange(*changeid.indices(len(self)))
742 742 if i not in self.changelog.filteredrevs]
743 743 try:
744 744 return context.changectx(self, changeid)
745 745 except error.WdirUnsupported:
746 746 return context.workingctx(self)
747 747
748 748 def __contains__(self, changeid):
749 749 """True if the given changeid exists
750 750
751 751 error.LookupError is raised if an ambiguous node specified.
752 752 """
753 753 try:
754 754 self[changeid]
755 755 return True
756 756 except error.RepoLookupError:
757 757 return False
758 758
759 759 def __nonzero__(self):
760 760 return True
761 761
762 762 __bool__ = __nonzero__
763 763
764 764 def __len__(self):
765 return len(self.changelog)
765 # no need to pay the cost of repoview.changelog
766 unfi = self.unfiltered()
767 return len(unfi.changelog)
766 768
767 769 def __iter__(self):
768 770 return iter(self.changelog)
769 771
770 772 def revs(self, expr, *args):
771 773 '''Find revisions matching a revset.
772 774
773 775 The revset is specified as a string ``expr`` that may contain
774 776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
775 777
776 778 Revset aliases from the configuration are not expanded. To expand
777 779 user aliases, consider calling ``scmutil.revrange()`` or
778 780 ``repo.anyrevs([expr], user=True)``.
779 781
780 782 Returns a revset.abstractsmartset, which is a list-like interface
781 783 that contains integer revisions.
782 784 '''
783 785 expr = revsetlang.formatspec(expr, *args)
784 786 m = revset.match(None, expr)
785 787 return m(self)
786 788
787 789 def set(self, expr, *args):
788 790 '''Find revisions matching a revset and emit changectx instances.
789 791
790 792 This is a convenience wrapper around ``revs()`` that iterates the
791 793 result and is a generator of changectx instances.
792 794
793 795 Revset aliases from the configuration are not expanded. To expand
794 796 user aliases, consider calling ``scmutil.revrange()``.
795 797 '''
796 798 for r in self.revs(expr, *args):
797 799 yield self[r]
798 800
799 801 def anyrevs(self, specs, user=False, localalias=None):
800 802 '''Find revisions matching one of the given revsets.
801 803
802 804 Revset aliases from the configuration are not expanded by default. To
803 805 expand user aliases, specify ``user=True``. To provide some local
804 806 definitions overriding user aliases, set ``localalias`` to
805 807 ``{name: definitionstring}``.
806 808 '''
807 809 if user:
808 810 m = revset.matchany(self.ui, specs, repo=self,
809 811 localalias=localalias)
810 812 else:
811 813 m = revset.matchany(None, specs, localalias=localalias)
812 814 return m(self)
813 815
814 816 def url(self):
815 817 return 'file:' + self.root
816 818
817 819 def hook(self, name, throw=False, **args):
818 820 """Call a hook, passing this repo instance.
819 821
820 822 This a convenience method to aid invoking hooks. Extensions likely
821 823 won't call this unless they have registered a custom hook or are
822 824 replacing code that is expected to call a hook.
823 825 """
824 826 return hook.hook(self.ui, self, name, throw, **args)
825 827
826 828 @filteredpropertycache
827 829 def _tagscache(self):
828 830 '''Returns a tagscache object that contains various tags related
829 831 caches.'''
830 832
831 833 # This simplifies its cache management by having one decorated
832 834 # function (this one) and the rest simply fetch things from it.
833 835 class tagscache(object):
834 836 def __init__(self):
835 837 # These two define the set of tags for this repository. tags
836 838 # maps tag name to node; tagtypes maps tag name to 'global' or
837 839 # 'local'. (Global tags are defined by .hgtags across all
838 840 # heads, and local tags are defined in .hg/localtags.)
839 841 # They constitute the in-memory cache of tags.
840 842 self.tags = self.tagtypes = None
841 843
842 844 self.nodetagscache = self.tagslist = None
843 845
844 846 cache = tagscache()
845 847 cache.tags, cache.tagtypes = self._findtags()
846 848
847 849 return cache
848 850
849 851 def tags(self):
850 852 '''return a mapping of tag to node'''
851 853 t = {}
852 854 if self.changelog.filteredrevs:
853 855 tags, tt = self._findtags()
854 856 else:
855 857 tags = self._tagscache.tags
856 858 for k, v in tags.iteritems():
857 859 try:
858 860 # ignore tags to unknown nodes
859 861 self.changelog.rev(v)
860 862 t[k] = v
861 863 except (error.LookupError, ValueError):
862 864 pass
863 865 return t
864 866
865 867 def _findtags(self):
866 868 '''Do the hard work of finding tags. Return a pair of dicts
867 869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
868 870 maps tag name to a string like \'global\' or \'local\'.
869 871 Subclasses or extensions are free to add their own tags, but
870 872 should be aware that the returned dicts will be retained for the
871 873 duration of the localrepo object.'''
872 874
873 875 # XXX what tagtype should subclasses/extensions use? Currently
874 876 # mq and bookmarks add tags, but do not set the tagtype at all.
875 877 # Should each extension invent its own tag type? Should there
876 878 # be one tagtype for all such "virtual" tags? Or is the status
877 879 # quo fine?
878 880
879 881
880 882 # map tag name to (node, hist)
881 883 alltags = tagsmod.findglobaltags(self.ui, self)
882 884 # map tag name to tag type
883 885 tagtypes = dict((tag, 'global') for tag in alltags)
884 886
885 887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
886 888
887 889 # Build the return dicts. Have to re-encode tag names because
888 890 # the tags module always uses UTF-8 (in order not to lose info
889 891 # writing to the cache), but the rest of Mercurial wants them in
890 892 # local encoding.
891 893 tags = {}
892 894 for (name, (node, hist)) in alltags.iteritems():
893 895 if node != nullid:
894 896 tags[encoding.tolocal(name)] = node
895 897 tags['tip'] = self.changelog.tip()
896 898 tagtypes = dict([(encoding.tolocal(name), value)
897 899 for (name, value) in tagtypes.iteritems()])
898 900 return (tags, tagtypes)
899 901
900 902 def tagtype(self, tagname):
901 903 '''
902 904 return the type of the given tag. result can be:
903 905
904 906 'local' : a local tag
905 907 'global' : a global tag
906 908 None : tag does not exist
907 909 '''
908 910
909 911 return self._tagscache.tagtypes.get(tagname)
910 912
911 913 def tagslist(self):
912 914 '''return a list of tags ordered by revision'''
913 915 if not self._tagscache.tagslist:
914 916 l = []
915 917 for t, n in self.tags().iteritems():
916 918 l.append((self.changelog.rev(n), t, n))
917 919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
918 920
919 921 return self._tagscache.tagslist
920 922
921 923 def nodetags(self, node):
922 924 '''return the tags associated with a node'''
923 925 if not self._tagscache.nodetagscache:
924 926 nodetagscache = {}
925 927 for t, n in self._tagscache.tags.iteritems():
926 928 nodetagscache.setdefault(n, []).append(t)
927 929 for tags in nodetagscache.itervalues():
928 930 tags.sort()
929 931 self._tagscache.nodetagscache = nodetagscache
930 932 return self._tagscache.nodetagscache.get(node, [])
931 933
932 934 def nodebookmarks(self, node):
933 935 """return the list of bookmarks pointing to the specified node"""
934 936 marks = []
935 937 for bookmark, n in self._bookmarks.iteritems():
936 938 if n == node:
937 939 marks.append(bookmark)
938 940 return sorted(marks)
939 941
940 942 def branchmap(self):
941 943 '''returns a dictionary {branch: [branchheads]} with branchheads
942 944 ordered by increasing revision number'''
943 945 branchmap.updatecache(self)
944 946 return self._branchcaches[self.filtername]
945 947
946 948 @unfilteredmethod
947 949 def revbranchcache(self):
948 950 if not self._revbranchcache:
949 951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
950 952 return self._revbranchcache
951 953
952 954 def branchtip(self, branch, ignoremissing=False):
953 955 '''return the tip node for a given branch
954 956
955 957 If ignoremissing is True, then this method will not raise an error.
956 958 This is helpful for callers that only expect None for a missing branch
957 959 (e.g. namespace).
958 960
959 961 '''
960 962 try:
961 963 return self.branchmap().branchtip(branch)
962 964 except KeyError:
963 965 if not ignoremissing:
964 966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
965 967 else:
966 968 pass
967 969
968 970 def lookup(self, key):
969 971 return self[key].node()
970 972
971 973 def lookupbranch(self, key, remote=None):
972 974 repo = remote or self
973 975 if key in repo.branchmap():
974 976 return key
975 977
976 978 repo = (remote and remote.local()) and remote or self
977 979 return repo[key].branch()
978 980
979 981 def known(self, nodes):
980 982 cl = self.changelog
981 983 nm = cl.nodemap
982 984 filtered = cl.filteredrevs
983 985 result = []
984 986 for n in nodes:
985 987 r = nm.get(n)
986 988 resp = not (r is None or r in filtered)
987 989 result.append(resp)
988 990 return result
989 991
990 992 def local(self):
991 993 return self
992 994
993 995 def publishing(self):
994 996 # it's safe (and desirable) to trust the publish flag unconditionally
995 997 # so that we don't finalize changes shared between users via ssh or nfs
996 998 return self.ui.configbool('phases', 'publish', untrusted=True)
997 999
998 1000 def cancopy(self):
999 1001 # so statichttprepo's override of local() works
1000 1002 if not self.local():
1001 1003 return False
1002 1004 if not self.publishing():
1003 1005 return True
1004 1006 # if publishing we can't copy if there is filtered content
1005 1007 return not self.filtered('visible').changelog.filteredrevs
1006 1008
1007 1009 def shared(self):
1008 1010 '''the type of shared repository (None if not shared)'''
1009 1011 if self.sharedpath != self.path:
1010 1012 return 'store'
1011 1013 return None
1012 1014
1013 1015 def wjoin(self, f, *insidef):
1014 1016 return self.vfs.reljoin(self.root, f, *insidef)
1015 1017
1016 1018 def file(self, f):
1017 1019 if f[0] == '/':
1018 1020 f = f[1:]
1019 1021 return filelog.filelog(self.svfs, f)
1020 1022
1021 1023 def changectx(self, changeid):
1022 1024 return self[changeid]
1023 1025
1024 1026 def setparents(self, p1, p2=nullid):
1025 1027 with self.dirstate.parentchange():
1026 1028 copies = self.dirstate.setparents(p1, p2)
1027 1029 pctx = self[p1]
1028 1030 if copies:
1029 1031 # Adjust copy records, the dirstate cannot do it, it
1030 1032 # requires access to parents manifests. Preserve them
1031 1033 # only for entries added to first parent.
1032 1034 for f in copies:
1033 1035 if f not in pctx and copies[f] in pctx:
1034 1036 self.dirstate.copy(copies[f], f)
1035 1037 if p2 == nullid:
1036 1038 for f, s in sorted(self.dirstate.copies().items()):
1037 1039 if f not in pctx and s not in pctx:
1038 1040 self.dirstate.copy(None, f)
1039 1041
1040 1042 def filectx(self, path, changeid=None, fileid=None):
1041 1043 """changeid can be a changeset revision, node, or tag.
1042 1044 fileid can be a file revision or node."""
1043 1045 return context.filectx(self, path, changeid, fileid)
1044 1046
1045 1047 def getcwd(self):
1046 1048 return self.dirstate.getcwd()
1047 1049
1048 1050 def pathto(self, f, cwd=None):
1049 1051 return self.dirstate.pathto(f, cwd)
1050 1052
1051 1053 def _loadfilter(self, filter):
1052 1054 if filter not in self.filterpats:
1053 1055 l = []
1054 1056 for pat, cmd in self.ui.configitems(filter):
1055 1057 if cmd == '!':
1056 1058 continue
1057 1059 mf = matchmod.match(self.root, '', [pat])
1058 1060 fn = None
1059 1061 params = cmd
1060 1062 for name, filterfn in self._datafilters.iteritems():
1061 1063 if cmd.startswith(name):
1062 1064 fn = filterfn
1063 1065 params = cmd[len(name):].lstrip()
1064 1066 break
1065 1067 if not fn:
1066 1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1067 1069 # Wrap old filters not supporting keyword arguments
1068 1070 if not inspect.getargspec(fn)[2]:
1069 1071 oldfn = fn
1070 1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1071 1073 l.append((mf, fn, params))
1072 1074 self.filterpats[filter] = l
1073 1075 return self.filterpats[filter]
1074 1076
1075 1077 def _filter(self, filterpats, filename, data):
1076 1078 for mf, fn, cmd in filterpats:
1077 1079 if mf(filename):
1078 1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1079 1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1080 1082 break
1081 1083
1082 1084 return data
1083 1085
1084 1086 @unfilteredpropertycache
1085 1087 def _encodefilterpats(self):
1086 1088 return self._loadfilter('encode')
1087 1089
1088 1090 @unfilteredpropertycache
1089 1091 def _decodefilterpats(self):
1090 1092 return self._loadfilter('decode')
1091 1093
1092 1094 def adddatafilter(self, name, filter):
1093 1095 self._datafilters[name] = filter
1094 1096
1095 1097 def wread(self, filename):
1096 1098 if self.wvfs.islink(filename):
1097 1099 data = self.wvfs.readlink(filename)
1098 1100 else:
1099 1101 data = self.wvfs.read(filename)
1100 1102 return self._filter(self._encodefilterpats, filename, data)
1101 1103
1102 1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1103 1105 """write ``data`` into ``filename`` in the working directory
1104 1106
1105 1107 This returns length of written (maybe decoded) data.
1106 1108 """
1107 1109 data = self._filter(self._decodefilterpats, filename, data)
1108 1110 if 'l' in flags:
1109 1111 self.wvfs.symlink(data, filename)
1110 1112 else:
1111 1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1112 1114 **kwargs)
1113 1115 if 'x' in flags:
1114 1116 self.wvfs.setflags(filename, False, True)
1115 1117 else:
1116 1118 self.wvfs.setflags(filename, False, False)
1117 1119 return len(data)
1118 1120
1119 1121 def wwritedata(self, filename, data):
1120 1122 return self._filter(self._decodefilterpats, filename, data)
1121 1123
1122 1124 def currenttransaction(self):
1123 1125 """return the current transaction or None if non exists"""
1124 1126 if self._transref:
1125 1127 tr = self._transref()
1126 1128 else:
1127 1129 tr = None
1128 1130
1129 1131 if tr and tr.running():
1130 1132 return tr
1131 1133 return None
1132 1134
1133 1135 def transaction(self, desc, report=None):
1134 1136 if (self.ui.configbool('devel', 'all-warnings')
1135 1137 or self.ui.configbool('devel', 'check-locks')):
1136 1138 if self._currentlock(self._lockref) is None:
1137 1139 raise error.ProgrammingError('transaction requires locking')
1138 1140 tr = self.currenttransaction()
1139 1141 if tr is not None:
1140 1142 return tr.nest()
1141 1143
1142 1144 # abort here if the journal already exists
1143 1145 if self.svfs.exists("journal"):
1144 1146 raise error.RepoError(
1145 1147 _("abandoned transaction found"),
1146 1148 hint=_("run 'hg recover' to clean up transaction"))
1147 1149
1148 1150 idbase = "%.40f#%f" % (random.random(), time.time())
1149 1151 ha = hex(hashlib.sha1(idbase).digest())
1150 1152 txnid = 'TXN:' + ha
1151 1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1152 1154
1153 1155 self._writejournal(desc)
1154 1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1155 1157 if report:
1156 1158 rp = report
1157 1159 else:
1158 1160 rp = self.ui.warn
1159 1161 vfsmap = {'plain': self.vfs} # root of .hg/
1160 1162 # we must avoid cyclic reference between repo and transaction.
1161 1163 reporef = weakref.ref(self)
1162 1164 # Code to track tag movement
1163 1165 #
1164 1166 # Since tags are all handled as file content, it is actually quite hard
1165 1167 # to track these movement from a code perspective. So we fallback to a
1166 1168 # tracking at the repository level. One could envision to track changes
1167 1169 # to the '.hgtags' file through changegroup apply but that fails to
1168 1170 # cope with case where transaction expose new heads without changegroup
1169 1171 # being involved (eg: phase movement).
1170 1172 #
1171 1173 # For now, We gate the feature behind a flag since this likely comes
1172 1174 # with performance impacts. The current code run more often than needed
1173 1175 # and do not use caches as much as it could. The current focus is on
1174 1176 # the behavior of the feature so we disable it by default. The flag
1175 1177 # will be removed when we are happy with the performance impact.
1176 1178 #
1177 1179 # Once this feature is no longer experimental move the following
1178 1180 # documentation to the appropriate help section:
1179 1181 #
1180 1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1181 1183 # tags (new or changed or deleted tags). In addition the details of
1182 1184 # these changes are made available in a file at:
1183 1185 # ``REPOROOT/.hg/changes/tags.changes``.
1184 1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1185 1187 # might exist from a previous transaction even if no tag were touched
1186 1188 # in this one. Changes are recorded in a line base format::
1187 1189 #
1188 1190 # <action> <hex-node> <tag-name>\n
1189 1191 #
1190 1192 # Actions are defined as follow:
1191 1193 # "-R": tag is removed,
1192 1194 # "+A": tag is added,
1193 1195 # "-M": tag is moved (old value),
1194 1196 # "+M": tag is moved (new value),
1195 1197 tracktags = lambda x: None
1196 1198 # experimental config: experimental.hook-track-tags
1197 1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1198 1200 if desc != 'strip' and shouldtracktags:
1199 1201 oldheads = self.changelog.headrevs()
1200 1202 def tracktags(tr2):
1201 1203 repo = reporef()
1202 1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1203 1205 newheads = repo.changelog.headrevs()
1204 1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1205 1207 # notes: we compare lists here.
1206 1208 # As we do it only once buiding set would not be cheaper
1207 1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1208 1210 if changes:
1209 1211 tr2.hookargs['tag_moved'] = '1'
1210 1212 with repo.vfs('changes/tags.changes', 'w',
1211 1213 atomictemp=True) as changesfile:
1212 1214 # note: we do not register the file to the transaction
1213 1215 # because we needs it to still exist on the transaction
1214 1216 # is close (for txnclose hooks)
1215 1217 tagsmod.writediff(changesfile, changes)
1216 1218 def validate(tr2):
1217 1219 """will run pre-closing hooks"""
1218 1220 # XXX the transaction API is a bit lacking here so we take a hacky
1219 1221 # path for now
1220 1222 #
1221 1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1222 1224 # dict is copied before these run. In addition we needs the data
1223 1225 # available to in memory hooks too.
1224 1226 #
1225 1227 # Moreover, we also need to make sure this runs before txnclose
1226 1228 # hooks and there is no "pending" mechanism that would execute
1227 1229 # logic only if hooks are about to run.
1228 1230 #
1229 1231 # Fixing this limitation of the transaction is also needed to track
1230 1232 # other families of changes (bookmarks, phases, obsolescence).
1231 1233 #
1232 1234 # This will have to be fixed before we remove the experimental
1233 1235 # gating.
1234 1236 tracktags(tr2)
1235 1237 repo = reporef()
1236 1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1237 1239 scmutil.enforcesinglehead(repo, tr2, desc)
1238 1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1239 1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1240 1242 args = tr.hookargs.copy()
1241 1243 args.update(bookmarks.preparehookargs(name, old, new))
1242 1244 repo.hook('pretxnclose-bookmark', throw=True,
1243 1245 txnname=desc,
1244 1246 **pycompat.strkwargs(args))
1245 1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1246 1248 cl = repo.unfiltered().changelog
1247 1249 for rev, (old, new) in tr.changes['phases'].items():
1248 1250 args = tr.hookargs.copy()
1249 1251 node = hex(cl.node(rev))
1250 1252 args.update(phases.preparehookargs(node, old, new))
1251 1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1252 1254 **pycompat.strkwargs(args))
1253 1255
1254 1256 repo.hook('pretxnclose', throw=True,
1255 1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1256 1258 def releasefn(tr, success):
1257 1259 repo = reporef()
1258 1260 if success:
1259 1261 # this should be explicitly invoked here, because
1260 1262 # in-memory changes aren't written out at closing
1261 1263 # transaction, if tr.addfilegenerator (via
1262 1264 # dirstate.write or so) isn't invoked while
1263 1265 # transaction running
1264 1266 repo.dirstate.write(None)
1265 1267 else:
1266 1268 # discard all changes (including ones already written
1267 1269 # out) in this transaction
1268 1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1269 1271
1270 1272 repo.invalidate(clearfilecache=True)
1271 1273
1272 1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1273 1275 "journal",
1274 1276 "undo",
1275 1277 aftertrans(renames),
1276 1278 self.store.createmode,
1277 1279 validator=validate,
1278 1280 releasefn=releasefn,
1279 1281 checkambigfiles=_cachedfiles)
1280 1282 tr.changes['revs'] = xrange(0, 0)
1281 1283 tr.changes['obsmarkers'] = set()
1282 1284 tr.changes['phases'] = {}
1283 1285 tr.changes['bookmarks'] = {}
1284 1286
1285 1287 tr.hookargs['txnid'] = txnid
1286 1288 # note: writing the fncache only during finalize mean that the file is
1287 1289 # outdated when running hooks. As fncache is used for streaming clone,
1288 1290 # this is not expected to break anything that happen during the hooks.
1289 1291 tr.addfinalize('flush-fncache', self.store.write)
1290 1292 def txnclosehook(tr2):
1291 1293 """To be run if transaction is successful, will schedule a hook run
1292 1294 """
1293 1295 # Don't reference tr2 in hook() so we don't hold a reference.
1294 1296 # This reduces memory consumption when there are multiple
1295 1297 # transactions per lock. This can likely go away if issue5045
1296 1298 # fixes the function accumulation.
1297 1299 hookargs = tr2.hookargs
1298 1300
1299 1301 def hookfunc():
1300 1302 repo = reporef()
1301 1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1302 1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1303 1305 for name, (old, new) in bmchanges:
1304 1306 args = tr.hookargs.copy()
1305 1307 args.update(bookmarks.preparehookargs(name, old, new))
1306 1308 repo.hook('txnclose-bookmark', throw=False,
1307 1309 txnname=desc, **pycompat.strkwargs(args))
1308 1310
1309 1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1310 1312 cl = repo.unfiltered().changelog
1311 1313 phasemv = sorted(tr.changes['phases'].items())
1312 1314 for rev, (old, new) in phasemv:
1313 1315 args = tr.hookargs.copy()
1314 1316 node = hex(cl.node(rev))
1315 1317 args.update(phases.preparehookargs(node, old, new))
1316 1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1317 1319 **pycompat.strkwargs(args))
1318 1320
1319 1321 repo.hook('txnclose', throw=False, txnname=desc,
1320 1322 **pycompat.strkwargs(hookargs))
1321 1323 reporef()._afterlock(hookfunc)
1322 1324 tr.addfinalize('txnclose-hook', txnclosehook)
1323 1325 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1324 1326 def txnaborthook(tr2):
1325 1327 """To be run if transaction is aborted
1326 1328 """
1327 1329 reporef().hook('txnabort', throw=False, txnname=desc,
1328 1330 **tr2.hookargs)
1329 1331 tr.addabort('txnabort-hook', txnaborthook)
1330 1332 # avoid eager cache invalidation. in-memory data should be identical
1331 1333 # to stored data if transaction has no error.
1332 1334 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1333 1335 self._transref = weakref.ref(tr)
1334 1336 scmutil.registersummarycallback(self, tr, desc)
1335 1337 return tr
1336 1338
1337 1339 def _journalfiles(self):
1338 1340 return ((self.svfs, 'journal'),
1339 1341 (self.vfs, 'journal.dirstate'),
1340 1342 (self.vfs, 'journal.branch'),
1341 1343 (self.vfs, 'journal.desc'),
1342 1344 (self.vfs, 'journal.bookmarks'),
1343 1345 (self.svfs, 'journal.phaseroots'))
1344 1346
1345 1347 def undofiles(self):
1346 1348 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1347 1349
1348 1350 @unfilteredmethod
1349 1351 def _writejournal(self, desc):
1350 1352 self.dirstate.savebackup(None, 'journal.dirstate')
1351 1353 self.vfs.write("journal.branch",
1352 1354 encoding.fromlocal(self.dirstate.branch()))
1353 1355 self.vfs.write("journal.desc",
1354 1356 "%d\n%s\n" % (len(self), desc))
1355 1357 self.vfs.write("journal.bookmarks",
1356 1358 self.vfs.tryread("bookmarks"))
1357 1359 self.svfs.write("journal.phaseroots",
1358 1360 self.svfs.tryread("phaseroots"))
1359 1361
1360 1362 def recover(self):
1361 1363 with self.lock():
1362 1364 if self.svfs.exists("journal"):
1363 1365 self.ui.status(_("rolling back interrupted transaction\n"))
1364 1366 vfsmap = {'': self.svfs,
1365 1367 'plain': self.vfs,}
1366 1368 transaction.rollback(self.svfs, vfsmap, "journal",
1367 1369 self.ui.warn,
1368 1370 checkambigfiles=_cachedfiles)
1369 1371 self.invalidate()
1370 1372 return True
1371 1373 else:
1372 1374 self.ui.warn(_("no interrupted transaction available\n"))
1373 1375 return False
1374 1376
1375 1377 def rollback(self, dryrun=False, force=False):
1376 1378 wlock = lock = dsguard = None
1377 1379 try:
1378 1380 wlock = self.wlock()
1379 1381 lock = self.lock()
1380 1382 if self.svfs.exists("undo"):
1381 1383 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1382 1384
1383 1385 return self._rollback(dryrun, force, dsguard)
1384 1386 else:
1385 1387 self.ui.warn(_("no rollback information available\n"))
1386 1388 return 1
1387 1389 finally:
1388 1390 release(dsguard, lock, wlock)
1389 1391
1390 1392 @unfilteredmethod # Until we get smarter cache management
1391 1393 def _rollback(self, dryrun, force, dsguard):
1392 1394 ui = self.ui
1393 1395 try:
1394 1396 args = self.vfs.read('undo.desc').splitlines()
1395 1397 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1396 1398 if len(args) >= 3:
1397 1399 detail = args[2]
1398 1400 oldtip = oldlen - 1
1399 1401
1400 1402 if detail and ui.verbose:
1401 1403 msg = (_('repository tip rolled back to revision %d'
1402 1404 ' (undo %s: %s)\n')
1403 1405 % (oldtip, desc, detail))
1404 1406 else:
1405 1407 msg = (_('repository tip rolled back to revision %d'
1406 1408 ' (undo %s)\n')
1407 1409 % (oldtip, desc))
1408 1410 except IOError:
1409 1411 msg = _('rolling back unknown transaction\n')
1410 1412 desc = None
1411 1413
1412 1414 if not force and self['.'] != self['tip'] and desc == 'commit':
1413 1415 raise error.Abort(
1414 1416 _('rollback of last commit while not checked out '
1415 1417 'may lose data'), hint=_('use -f to force'))
1416 1418
1417 1419 ui.status(msg)
1418 1420 if dryrun:
1419 1421 return 0
1420 1422
1421 1423 parents = self.dirstate.parents()
1422 1424 self.destroying()
1423 1425 vfsmap = {'plain': self.vfs, '': self.svfs}
1424 1426 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1425 1427 checkambigfiles=_cachedfiles)
1426 1428 if self.vfs.exists('undo.bookmarks'):
1427 1429 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1428 1430 if self.svfs.exists('undo.phaseroots'):
1429 1431 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1430 1432 self.invalidate()
1431 1433
1432 1434 parentgone = (parents[0] not in self.changelog.nodemap or
1433 1435 parents[1] not in self.changelog.nodemap)
1434 1436 if parentgone:
1435 1437 # prevent dirstateguard from overwriting already restored one
1436 1438 dsguard.close()
1437 1439
1438 1440 self.dirstate.restorebackup(None, 'undo.dirstate')
1439 1441 try:
1440 1442 branch = self.vfs.read('undo.branch')
1441 1443 self.dirstate.setbranch(encoding.tolocal(branch))
1442 1444 except IOError:
1443 1445 ui.warn(_('named branch could not be reset: '
1444 1446 'current branch is still \'%s\'\n')
1445 1447 % self.dirstate.branch())
1446 1448
1447 1449 parents = tuple([p.rev() for p in self[None].parents()])
1448 1450 if len(parents) > 1:
1449 1451 ui.status(_('working directory now based on '
1450 1452 'revisions %d and %d\n') % parents)
1451 1453 else:
1452 1454 ui.status(_('working directory now based on '
1453 1455 'revision %d\n') % parents)
1454 1456 mergemod.mergestate.clean(self, self['.'].node())
1455 1457
1456 1458 # TODO: if we know which new heads may result from this rollback, pass
1457 1459 # them to destroy(), which will prevent the branchhead cache from being
1458 1460 # invalidated.
1459 1461 self.destroyed()
1460 1462 return 0
1461 1463
1462 1464 def _buildcacheupdater(self, newtransaction):
1463 1465 """called during transaction to build the callback updating cache
1464 1466
1465 1467 Lives on the repository to help extension who might want to augment
1466 1468 this logic. For this purpose, the created transaction is passed to the
1467 1469 method.
1468 1470 """
1469 1471 # we must avoid cyclic reference between repo and transaction.
1470 1472 reporef = weakref.ref(self)
1471 1473 def updater(tr):
1472 1474 repo = reporef()
1473 1475 repo.updatecaches(tr)
1474 1476 return updater
1475 1477
1476 1478 @unfilteredmethod
1477 1479 def updatecaches(self, tr=None):
1478 1480 """warm appropriate caches
1479 1481
1480 1482 If this function is called after a transaction closed. The transaction
1481 1483 will be available in the 'tr' argument. This can be used to selectively
1482 1484 update caches relevant to the changes in that transaction.
1483 1485 """
1484 1486 if tr is not None and tr.hookargs.get('source') == 'strip':
1485 1487 # During strip, many caches are invalid but
1486 1488 # later call to `destroyed` will refresh them.
1487 1489 return
1488 1490
1489 1491 if tr is None or tr.changes['revs']:
1490 1492 # updating the unfiltered branchmap should refresh all the others,
1491 1493 self.ui.debug('updating the branch cache\n')
1492 1494 branchmap.updatecache(self.filtered('served'))
1493 1495
1494 1496 def invalidatecaches(self):
1495 1497
1496 1498 if '_tagscache' in vars(self):
1497 1499 # can't use delattr on proxy
1498 1500 del self.__dict__['_tagscache']
1499 1501
1500 1502 self.unfiltered()._branchcaches.clear()
1501 1503 self.invalidatevolatilesets()
1502 1504 self._sparsesignaturecache.clear()
1503 1505
1504 1506 def invalidatevolatilesets(self):
1505 1507 self.filteredrevcache.clear()
1506 1508 obsolete.clearobscaches(self)
1507 1509
1508 1510 def invalidatedirstate(self):
1509 1511 '''Invalidates the dirstate, causing the next call to dirstate
1510 1512 to check if it was modified since the last time it was read,
1511 1513 rereading it if it has.
1512 1514
1513 1515 This is different to dirstate.invalidate() that it doesn't always
1514 1516 rereads the dirstate. Use dirstate.invalidate() if you want to
1515 1517 explicitly read the dirstate again (i.e. restoring it to a previous
1516 1518 known good state).'''
1517 1519 if hasunfilteredcache(self, 'dirstate'):
1518 1520 for k in self.dirstate._filecache:
1519 1521 try:
1520 1522 delattr(self.dirstate, k)
1521 1523 except AttributeError:
1522 1524 pass
1523 1525 delattr(self.unfiltered(), 'dirstate')
1524 1526
1525 1527 def invalidate(self, clearfilecache=False):
1526 1528 '''Invalidates both store and non-store parts other than dirstate
1527 1529
1528 1530 If a transaction is running, invalidation of store is omitted,
1529 1531 because discarding in-memory changes might cause inconsistency
1530 1532 (e.g. incomplete fncache causes unintentional failure, but
1531 1533 redundant one doesn't).
1532 1534 '''
1533 1535 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1534 1536 for k in list(self._filecache.keys()):
1535 1537 # dirstate is invalidated separately in invalidatedirstate()
1536 1538 if k == 'dirstate':
1537 1539 continue
1538 1540 if (k == 'changelog' and
1539 1541 self.currenttransaction() and
1540 1542 self.changelog._delayed):
1541 1543 # The changelog object may store unwritten revisions. We don't
1542 1544 # want to lose them.
1543 1545 # TODO: Solve the problem instead of working around it.
1544 1546 continue
1545 1547
1546 1548 if clearfilecache:
1547 1549 del self._filecache[k]
1548 1550 try:
1549 1551 delattr(unfiltered, k)
1550 1552 except AttributeError:
1551 1553 pass
1552 1554 self.invalidatecaches()
1553 1555 if not self.currenttransaction():
1554 1556 # TODO: Changing contents of store outside transaction
1555 1557 # causes inconsistency. We should make in-memory store
1556 1558 # changes detectable, and abort if changed.
1557 1559 self.store.invalidatecaches()
1558 1560
1559 1561 def invalidateall(self):
1560 1562 '''Fully invalidates both store and non-store parts, causing the
1561 1563 subsequent operation to reread any outside changes.'''
1562 1564 # extension should hook this to invalidate its caches
1563 1565 self.invalidate()
1564 1566 self.invalidatedirstate()
1565 1567
1566 1568 @unfilteredmethod
1567 1569 def _refreshfilecachestats(self, tr):
1568 1570 """Reload stats of cached files so that they are flagged as valid"""
1569 1571 for k, ce in self._filecache.items():
1570 1572 if k == 'dirstate' or k not in self.__dict__:
1571 1573 continue
1572 1574 ce.refresh()
1573 1575
1574 1576 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1575 1577 inheritchecker=None, parentenvvar=None):
1576 1578 parentlock = None
1577 1579 # the contents of parentenvvar are used by the underlying lock to
1578 1580 # determine whether it can be inherited
1579 1581 if parentenvvar is not None:
1580 1582 parentlock = encoding.environ.get(parentenvvar)
1581 1583
1582 1584 timeout = 0
1583 1585 warntimeout = 0
1584 1586 if wait:
1585 1587 timeout = self.ui.configint("ui", "timeout")
1586 1588 warntimeout = self.ui.configint("ui", "timeout.warn")
1587 1589
1588 1590 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1589 1591 releasefn=releasefn,
1590 1592 acquirefn=acquirefn, desc=desc,
1591 1593 inheritchecker=inheritchecker,
1592 1594 parentlock=parentlock)
1593 1595 return l
1594 1596
1595 1597 def _afterlock(self, callback):
1596 1598 """add a callback to be run when the repository is fully unlocked
1597 1599
1598 1600 The callback will be executed when the outermost lock is released
1599 1601 (with wlock being higher level than 'lock')."""
1600 1602 for ref in (self._wlockref, self._lockref):
1601 1603 l = ref and ref()
1602 1604 if l and l.held:
1603 1605 l.postrelease.append(callback)
1604 1606 break
1605 1607 else: # no lock have been found.
1606 1608 callback()
1607 1609
1608 1610 def lock(self, wait=True):
1609 1611 '''Lock the repository store (.hg/store) and return a weak reference
1610 1612 to the lock. Use this before modifying the store (e.g. committing or
1611 1613 stripping). If you are opening a transaction, get a lock as well.)
1612 1614
1613 1615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1614 1616 'wlock' first to avoid a dead-lock hazard.'''
1615 1617 l = self._currentlock(self._lockref)
1616 1618 if l is not None:
1617 1619 l.lock()
1618 1620 return l
1619 1621
1620 1622 l = self._lock(self.svfs, "lock", wait, None,
1621 1623 self.invalidate, _('repository %s') % self.origroot)
1622 1624 self._lockref = weakref.ref(l)
1623 1625 return l
1624 1626
1625 1627 def _wlockchecktransaction(self):
1626 1628 if self.currenttransaction() is not None:
1627 1629 raise error.LockInheritanceContractViolation(
1628 1630 'wlock cannot be inherited in the middle of a transaction')
1629 1631
1630 1632 def wlock(self, wait=True):
1631 1633 '''Lock the non-store parts of the repository (everything under
1632 1634 .hg except .hg/store) and return a weak reference to the lock.
1633 1635
1634 1636 Use this before modifying files in .hg.
1635 1637
1636 1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1637 1639 'wlock' first to avoid a dead-lock hazard.'''
1638 1640 l = self._wlockref and self._wlockref()
1639 1641 if l is not None and l.held:
1640 1642 l.lock()
1641 1643 return l
1642 1644
1643 1645 # We do not need to check for non-waiting lock acquisition. Such
1644 1646 # acquisition would not cause dead-lock as they would just fail.
1645 1647 if wait and (self.ui.configbool('devel', 'all-warnings')
1646 1648 or self.ui.configbool('devel', 'check-locks')):
1647 1649 if self._currentlock(self._lockref) is not None:
1648 1650 self.ui.develwarn('"wlock" acquired after "lock"')
1649 1651
1650 1652 def unlock():
1651 1653 if self.dirstate.pendingparentchange():
1652 1654 self.dirstate.invalidate()
1653 1655 else:
1654 1656 self.dirstate.write(None)
1655 1657
1656 1658 self._filecache['dirstate'].refresh()
1657 1659
1658 1660 l = self._lock(self.vfs, "wlock", wait, unlock,
1659 1661 self.invalidatedirstate, _('working directory of %s') %
1660 1662 self.origroot,
1661 1663 inheritchecker=self._wlockchecktransaction,
1662 1664 parentenvvar='HG_WLOCK_LOCKER')
1663 1665 self._wlockref = weakref.ref(l)
1664 1666 return l
1665 1667
1666 1668 def _currentlock(self, lockref):
1667 1669 """Returns the lock if it's held, or None if it's not."""
1668 1670 if lockref is None:
1669 1671 return None
1670 1672 l = lockref()
1671 1673 if l is None or not l.held:
1672 1674 return None
1673 1675 return l
1674 1676
1675 1677 def currentwlock(self):
1676 1678 """Returns the wlock if it's held, or None if it's not."""
1677 1679 return self._currentlock(self._wlockref)
1678 1680
1679 1681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1680 1682 """
1681 1683 commit an individual file as part of a larger transaction
1682 1684 """
1683 1685
1684 1686 fname = fctx.path()
1685 1687 fparent1 = manifest1.get(fname, nullid)
1686 1688 fparent2 = manifest2.get(fname, nullid)
1687 1689 if isinstance(fctx, context.filectx):
1688 1690 node = fctx.filenode()
1689 1691 if node in [fparent1, fparent2]:
1690 1692 self.ui.debug('reusing %s filelog entry\n' % fname)
1691 1693 if manifest1.flags(fname) != fctx.flags():
1692 1694 changelist.append(fname)
1693 1695 return node
1694 1696
1695 1697 flog = self.file(fname)
1696 1698 meta = {}
1697 1699 copy = fctx.renamed()
1698 1700 if copy and copy[0] != fname:
1699 1701 # Mark the new revision of this file as a copy of another
1700 1702 # file. This copy data will effectively act as a parent
1701 1703 # of this new revision. If this is a merge, the first
1702 1704 # parent will be the nullid (meaning "look up the copy data")
1703 1705 # and the second one will be the other parent. For example:
1704 1706 #
1705 1707 # 0 --- 1 --- 3 rev1 changes file foo
1706 1708 # \ / rev2 renames foo to bar and changes it
1707 1709 # \- 2 -/ rev3 should have bar with all changes and
1708 1710 # should record that bar descends from
1709 1711 # bar in rev2 and foo in rev1
1710 1712 #
1711 1713 # this allows this merge to succeed:
1712 1714 #
1713 1715 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1714 1716 # \ / merging rev3 and rev4 should use bar@rev2
1715 1717 # \- 2 --- 4 as the merge base
1716 1718 #
1717 1719
1718 1720 cfname = copy[0]
1719 1721 crev = manifest1.get(cfname)
1720 1722 newfparent = fparent2
1721 1723
1722 1724 if manifest2: # branch merge
1723 1725 if fparent2 == nullid or crev is None: # copied on remote side
1724 1726 if cfname in manifest2:
1725 1727 crev = manifest2[cfname]
1726 1728 newfparent = fparent1
1727 1729
1728 1730 # Here, we used to search backwards through history to try to find
1729 1731 # where the file copy came from if the source of a copy was not in
1730 1732 # the parent directory. However, this doesn't actually make sense to
1731 1733 # do (what does a copy from something not in your working copy even
1732 1734 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1733 1735 # the user that copy information was dropped, so if they didn't
1734 1736 # expect this outcome it can be fixed, but this is the correct
1735 1737 # behavior in this circumstance.
1736 1738
1737 1739 if crev:
1738 1740 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1739 1741 meta["copy"] = cfname
1740 1742 meta["copyrev"] = hex(crev)
1741 1743 fparent1, fparent2 = nullid, newfparent
1742 1744 else:
1743 1745 self.ui.warn(_("warning: can't find ancestor for '%s' "
1744 1746 "copied from '%s'!\n") % (fname, cfname))
1745 1747
1746 1748 elif fparent1 == nullid:
1747 1749 fparent1, fparent2 = fparent2, nullid
1748 1750 elif fparent2 != nullid:
1749 1751 # is one parent an ancestor of the other?
1750 1752 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1751 1753 if fparent1 in fparentancestors:
1752 1754 fparent1, fparent2 = fparent2, nullid
1753 1755 elif fparent2 in fparentancestors:
1754 1756 fparent2 = nullid
1755 1757
1756 1758 # is the file changed?
1757 1759 text = fctx.data()
1758 1760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1759 1761 changelist.append(fname)
1760 1762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1761 1763 # are just the flags changed during merge?
1762 1764 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1763 1765 changelist.append(fname)
1764 1766
1765 1767 return fparent1
1766 1768
1767 1769 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1768 1770 """check for commit arguments that aren't committable"""
1769 1771 if match.isexact() or match.prefix():
1770 1772 matched = set(status.modified + status.added + status.removed)
1771 1773
1772 1774 for f in match.files():
1773 1775 f = self.dirstate.normalize(f)
1774 1776 if f == '.' or f in matched or f in wctx.substate:
1775 1777 continue
1776 1778 if f in status.deleted:
1777 1779 fail(f, _('file not found!'))
1778 1780 if f in vdirs: # visited directory
1779 1781 d = f + '/'
1780 1782 for mf in matched:
1781 1783 if mf.startswith(d):
1782 1784 break
1783 1785 else:
1784 1786 fail(f, _("no match under directory!"))
1785 1787 elif f not in self.dirstate:
1786 1788 fail(f, _("file not tracked!"))
1787 1789
1788 1790 @unfilteredmethod
1789 1791 def commit(self, text="", user=None, date=None, match=None, force=False,
1790 1792 editor=False, extra=None):
1791 1793 """Add a new revision to current repository.
1792 1794
1793 1795 Revision information is gathered from the working directory,
1794 1796 match can be used to filter the committed files. If editor is
1795 1797 supplied, it is called to get a commit message.
1796 1798 """
1797 1799 if extra is None:
1798 1800 extra = {}
1799 1801
1800 1802 def fail(f, msg):
1801 1803 raise error.Abort('%s: %s' % (f, msg))
1802 1804
1803 1805 if not match:
1804 1806 match = matchmod.always(self.root, '')
1805 1807
1806 1808 if not force:
1807 1809 vdirs = []
1808 1810 match.explicitdir = vdirs.append
1809 1811 match.bad = fail
1810 1812
1811 1813 wlock = lock = tr = None
1812 1814 try:
1813 1815 wlock = self.wlock()
1814 1816 lock = self.lock() # for recent changelog (see issue4368)
1815 1817
1816 1818 wctx = self[None]
1817 1819 merge = len(wctx.parents()) > 1
1818 1820
1819 1821 if not force and merge and not match.always():
1820 1822 raise error.Abort(_('cannot partially commit a merge '
1821 1823 '(do not specify files or patterns)'))
1822 1824
1823 1825 status = self.status(match=match, clean=force)
1824 1826 if force:
1825 1827 status.modified.extend(status.clean) # mq may commit clean files
1826 1828
1827 1829 # check subrepos
1828 1830 subs, commitsubs, newstate = subrepo.precommit(
1829 1831 self.ui, wctx, status, match, force=force)
1830 1832
1831 1833 # make sure all explicit patterns are matched
1832 1834 if not force:
1833 1835 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1834 1836
1835 1837 cctx = context.workingcommitctx(self, status,
1836 1838 text, user, date, extra)
1837 1839
1838 1840 # internal config: ui.allowemptycommit
1839 1841 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1840 1842 or extra.get('close') or merge or cctx.files()
1841 1843 or self.ui.configbool('ui', 'allowemptycommit'))
1842 1844 if not allowemptycommit:
1843 1845 return None
1844 1846
1845 1847 if merge and cctx.deleted():
1846 1848 raise error.Abort(_("cannot commit merge with missing files"))
1847 1849
1848 1850 ms = mergemod.mergestate.read(self)
1849 1851 mergeutil.checkunresolved(ms)
1850 1852
1851 1853 if editor:
1852 1854 cctx._text = editor(self, cctx, subs)
1853 1855 edited = (text != cctx._text)
1854 1856
1855 1857 # Save commit message in case this transaction gets rolled back
1856 1858 # (e.g. by a pretxncommit hook). Leave the content alone on
1857 1859 # the assumption that the user will use the same editor again.
1858 1860 msgfn = self.savecommitmessage(cctx._text)
1859 1861
1860 1862 # commit subs and write new state
1861 1863 if subs:
1862 1864 for s in sorted(commitsubs):
1863 1865 sub = wctx.sub(s)
1864 1866 self.ui.status(_('committing subrepository %s\n') %
1865 1867 subrepo.subrelpath(sub))
1866 1868 sr = sub.commit(cctx._text, user, date)
1867 1869 newstate[s] = (newstate[s][0], sr)
1868 1870 subrepo.writestate(self, newstate)
1869 1871
1870 1872 p1, p2 = self.dirstate.parents()
1871 1873 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1872 1874 try:
1873 1875 self.hook("precommit", throw=True, parent1=hookp1,
1874 1876 parent2=hookp2)
1875 1877 tr = self.transaction('commit')
1876 1878 ret = self.commitctx(cctx, True)
1877 1879 except: # re-raises
1878 1880 if edited:
1879 1881 self.ui.write(
1880 1882 _('note: commit message saved in %s\n') % msgfn)
1881 1883 raise
1882 1884 # update bookmarks, dirstate and mergestate
1883 1885 bookmarks.update(self, [p1, p2], ret)
1884 1886 cctx.markcommitted(ret)
1885 1887 ms.reset()
1886 1888 tr.close()
1887 1889
1888 1890 finally:
1889 1891 lockmod.release(tr, lock, wlock)
1890 1892
1891 1893 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1892 1894 # hack for command that use a temporary commit (eg: histedit)
1893 1895 # temporary commit got stripped before hook release
1894 1896 if self.changelog.hasnode(ret):
1895 1897 self.hook("commit", node=node, parent1=parent1,
1896 1898 parent2=parent2)
1897 1899 self._afterlock(commithook)
1898 1900 return ret
1899 1901
1900 1902 @unfilteredmethod
1901 1903 def commitctx(self, ctx, error=False):
1902 1904 """Add a new revision to current repository.
1903 1905 Revision information is passed via the context argument.
1904 1906 """
1905 1907
1906 1908 tr = None
1907 1909 p1, p2 = ctx.p1(), ctx.p2()
1908 1910 user = ctx.user()
1909 1911
1910 1912 lock = self.lock()
1911 1913 try:
1912 1914 tr = self.transaction("commit")
1913 1915 trp = weakref.proxy(tr)
1914 1916
1915 1917 if ctx.manifestnode():
1916 1918 # reuse an existing manifest revision
1917 1919 mn = ctx.manifestnode()
1918 1920 files = ctx.files()
1919 1921 elif ctx.files():
1920 1922 m1ctx = p1.manifestctx()
1921 1923 m2ctx = p2.manifestctx()
1922 1924 mctx = m1ctx.copy()
1923 1925
1924 1926 m = mctx.read()
1925 1927 m1 = m1ctx.read()
1926 1928 m2 = m2ctx.read()
1927 1929
1928 1930 # check in files
1929 1931 added = []
1930 1932 changed = []
1931 1933 removed = list(ctx.removed())
1932 1934 linkrev = len(self)
1933 1935 self.ui.note(_("committing files:\n"))
1934 1936 for f in sorted(ctx.modified() + ctx.added()):
1935 1937 self.ui.note(f + "\n")
1936 1938 try:
1937 1939 fctx = ctx[f]
1938 1940 if fctx is None:
1939 1941 removed.append(f)
1940 1942 else:
1941 1943 added.append(f)
1942 1944 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1943 1945 trp, changed)
1944 1946 m.setflag(f, fctx.flags())
1945 1947 except OSError as inst:
1946 1948 self.ui.warn(_("trouble committing %s!\n") % f)
1947 1949 raise
1948 1950 except IOError as inst:
1949 1951 errcode = getattr(inst, 'errno', errno.ENOENT)
1950 1952 if error or errcode and errcode != errno.ENOENT:
1951 1953 self.ui.warn(_("trouble committing %s!\n") % f)
1952 1954 raise
1953 1955
1954 1956 # update manifest
1955 1957 self.ui.note(_("committing manifest\n"))
1956 1958 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1957 1959 drop = [f for f in removed if f in m]
1958 1960 for f in drop:
1959 1961 del m[f]
1960 1962 mn = mctx.write(trp, linkrev,
1961 1963 p1.manifestnode(), p2.manifestnode(),
1962 1964 added, drop)
1963 1965 files = changed + removed
1964 1966 else:
1965 1967 mn = p1.manifestnode()
1966 1968 files = []
1967 1969
1968 1970 # update changelog
1969 1971 self.ui.note(_("committing changelog\n"))
1970 1972 self.changelog.delayupdate(tr)
1971 1973 n = self.changelog.add(mn, files, ctx.description(),
1972 1974 trp, p1.node(), p2.node(),
1973 1975 user, ctx.date(), ctx.extra().copy())
1974 1976 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1975 1977 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1976 1978 parent2=xp2)
1977 1979 # set the new commit is proper phase
1978 1980 targetphase = subrepo.newcommitphase(self.ui, ctx)
1979 1981 if targetphase:
1980 1982 # retract boundary do not alter parent changeset.
1981 1983 # if a parent have higher the resulting phase will
1982 1984 # be compliant anyway
1983 1985 #
1984 1986 # if minimal phase was 0 we don't need to retract anything
1985 1987 phases.registernew(self, tr, targetphase, [n])
1986 1988 tr.close()
1987 1989 return n
1988 1990 finally:
1989 1991 if tr:
1990 1992 tr.release()
1991 1993 lock.release()
1992 1994
1993 1995 @unfilteredmethod
1994 1996 def destroying(self):
1995 1997 '''Inform the repository that nodes are about to be destroyed.
1996 1998 Intended for use by strip and rollback, so there's a common
1997 1999 place for anything that has to be done before destroying history.
1998 2000
1999 2001 This is mostly useful for saving state that is in memory and waiting
2000 2002 to be flushed when the current lock is released. Because a call to
2001 2003 destroyed is imminent, the repo will be invalidated causing those
2002 2004 changes to stay in memory (waiting for the next unlock), or vanish
2003 2005 completely.
2004 2006 '''
2005 2007 # When using the same lock to commit and strip, the phasecache is left
2006 2008 # dirty after committing. Then when we strip, the repo is invalidated,
2007 2009 # causing those changes to disappear.
2008 2010 if '_phasecache' in vars(self):
2009 2011 self._phasecache.write()
2010 2012
2011 2013 @unfilteredmethod
2012 2014 def destroyed(self):
2013 2015 '''Inform the repository that nodes have been destroyed.
2014 2016 Intended for use by strip and rollback, so there's a common
2015 2017 place for anything that has to be done after destroying history.
2016 2018 '''
2017 2019 # When one tries to:
2018 2020 # 1) destroy nodes thus calling this method (e.g. strip)
2019 2021 # 2) use phasecache somewhere (e.g. commit)
2020 2022 #
2021 2023 # then 2) will fail because the phasecache contains nodes that were
2022 2024 # removed. We can either remove phasecache from the filecache,
2023 2025 # causing it to reload next time it is accessed, or simply filter
2024 2026 # the removed nodes now and write the updated cache.
2025 2027 self._phasecache.filterunknown(self)
2026 2028 self._phasecache.write()
2027 2029
2028 2030 # refresh all repository caches
2029 2031 self.updatecaches()
2030 2032
2031 2033 # Ensure the persistent tag cache is updated. Doing it now
2032 2034 # means that the tag cache only has to worry about destroyed
2033 2035 # heads immediately after a strip/rollback. That in turn
2034 2036 # guarantees that "cachetip == currenttip" (comparing both rev
2035 2037 # and node) always means no nodes have been added or destroyed.
2036 2038
2037 2039 # XXX this is suboptimal when qrefresh'ing: we strip the current
2038 2040 # head, refresh the tag cache, then immediately add a new head.
2039 2041 # But I think doing it this way is necessary for the "instant
2040 2042 # tag cache retrieval" case to work.
2041 2043 self.invalidate()
2042 2044
2043 2045 def walk(self, match, node=None):
2044 2046 '''
2045 2047 walk recursively through the directory tree or a given
2046 2048 changeset, finding all files matched by the match
2047 2049 function
2048 2050 '''
2049 2051 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2050 2052 return self[node].walk(match)
2051 2053
2052 2054 def status(self, node1='.', node2=None, match=None,
2053 2055 ignored=False, clean=False, unknown=False,
2054 2056 listsubrepos=False):
2055 2057 '''a convenience method that calls node1.status(node2)'''
2056 2058 return self[node1].status(node2, match, ignored, clean, unknown,
2057 2059 listsubrepos)
2058 2060
2059 2061 def addpostdsstatus(self, ps):
2060 2062 """Add a callback to run within the wlock, at the point at which status
2061 2063 fixups happen.
2062 2064
2063 2065 On status completion, callback(wctx, status) will be called with the
2064 2066 wlock held, unless the dirstate has changed from underneath or the wlock
2065 2067 couldn't be grabbed.
2066 2068
2067 2069 Callbacks should not capture and use a cached copy of the dirstate --
2068 2070 it might change in the meanwhile. Instead, they should access the
2069 2071 dirstate via wctx.repo().dirstate.
2070 2072
2071 2073 This list is emptied out after each status run -- extensions should
2072 2074 make sure it adds to this list each time dirstate.status is called.
2073 2075 Extensions should also make sure they don't call this for statuses
2074 2076 that don't involve the dirstate.
2075 2077 """
2076 2078
2077 2079 # The list is located here for uniqueness reasons -- it is actually
2078 2080 # managed by the workingctx, but that isn't unique per-repo.
2079 2081 self._postdsstatus.append(ps)
2080 2082
2081 2083 def postdsstatus(self):
2082 2084 """Used by workingctx to get the list of post-dirstate-status hooks."""
2083 2085 return self._postdsstatus
2084 2086
2085 2087 def clearpostdsstatus(self):
2086 2088 """Used by workingctx to clear post-dirstate-status hooks."""
2087 2089 del self._postdsstatus[:]
2088 2090
2089 2091 def heads(self, start=None):
2090 2092 if start is None:
2091 2093 cl = self.changelog
2092 2094 headrevs = reversed(cl.headrevs())
2093 2095 return [cl.node(rev) for rev in headrevs]
2094 2096
2095 2097 heads = self.changelog.heads(start)
2096 2098 # sort the output in rev descending order
2097 2099 return sorted(heads, key=self.changelog.rev, reverse=True)
2098 2100
2099 2101 def branchheads(self, branch=None, start=None, closed=False):
2100 2102 '''return a (possibly filtered) list of heads for the given branch
2101 2103
2102 2104 Heads are returned in topological order, from newest to oldest.
2103 2105 If branch is None, use the dirstate branch.
2104 2106 If start is not None, return only heads reachable from start.
2105 2107 If closed is True, return heads that are marked as closed as well.
2106 2108 '''
2107 2109 if branch is None:
2108 2110 branch = self[None].branch()
2109 2111 branches = self.branchmap()
2110 2112 if branch not in branches:
2111 2113 return []
2112 2114 # the cache returns heads ordered lowest to highest
2113 2115 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2114 2116 if start is not None:
2115 2117 # filter out the heads that cannot be reached from startrev
2116 2118 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2117 2119 bheads = [h for h in bheads if h in fbheads]
2118 2120 return bheads
2119 2121
2120 2122 def branches(self, nodes):
2121 2123 if not nodes:
2122 2124 nodes = [self.changelog.tip()]
2123 2125 b = []
2124 2126 for n in nodes:
2125 2127 t = n
2126 2128 while True:
2127 2129 p = self.changelog.parents(n)
2128 2130 if p[1] != nullid or p[0] == nullid:
2129 2131 b.append((t, n, p[0], p[1]))
2130 2132 break
2131 2133 n = p[0]
2132 2134 return b
2133 2135
2134 2136 def between(self, pairs):
2135 2137 r = []
2136 2138
2137 2139 for top, bottom in pairs:
2138 2140 n, l, i = top, [], 0
2139 2141 f = 1
2140 2142
2141 2143 while n != bottom and n != nullid:
2142 2144 p = self.changelog.parents(n)[0]
2143 2145 if i == f:
2144 2146 l.append(n)
2145 2147 f = f * 2
2146 2148 n = p
2147 2149 i += 1
2148 2150
2149 2151 r.append(l)
2150 2152
2151 2153 return r
2152 2154
2153 2155 def checkpush(self, pushop):
2154 2156 """Extensions can override this function if additional checks have
2155 2157 to be performed before pushing, or call it if they override push
2156 2158 command.
2157 2159 """
2158 2160
2159 2161 @unfilteredpropertycache
2160 2162 def prepushoutgoinghooks(self):
2161 2163 """Return util.hooks consists of a pushop with repo, remote, outgoing
2162 2164 methods, which are called before pushing changesets.
2163 2165 """
2164 2166 return util.hooks()
2165 2167
2166 2168 def pushkey(self, namespace, key, old, new):
2167 2169 try:
2168 2170 tr = self.currenttransaction()
2169 2171 hookargs = {}
2170 2172 if tr is not None:
2171 2173 hookargs.update(tr.hookargs)
2172 2174 hookargs['namespace'] = namespace
2173 2175 hookargs['key'] = key
2174 2176 hookargs['old'] = old
2175 2177 hookargs['new'] = new
2176 2178 self.hook('prepushkey', throw=True, **hookargs)
2177 2179 except error.HookAbort as exc:
2178 2180 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2179 2181 if exc.hint:
2180 2182 self.ui.write_err(_("(%s)\n") % exc.hint)
2181 2183 return False
2182 2184 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2183 2185 ret = pushkey.push(self, namespace, key, old, new)
2184 2186 def runhook():
2185 2187 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2186 2188 ret=ret)
2187 2189 self._afterlock(runhook)
2188 2190 return ret
2189 2191
2190 2192 def listkeys(self, namespace):
2191 2193 self.hook('prelistkeys', throw=True, namespace=namespace)
2192 2194 self.ui.debug('listing keys for "%s"\n' % namespace)
2193 2195 values = pushkey.list(self, namespace)
2194 2196 self.hook('listkeys', namespace=namespace, values=values)
2195 2197 return values
2196 2198
2197 2199 def debugwireargs(self, one, two, three=None, four=None, five=None):
2198 2200 '''used to test argument passing over the wire'''
2199 2201 return "%s %s %s %s %s" % (one, two, three, four, five)
2200 2202
2201 2203 def savecommitmessage(self, text):
2202 2204 fp = self.vfs('last-message.txt', 'wb')
2203 2205 try:
2204 2206 fp.write(text)
2205 2207 finally:
2206 2208 fp.close()
2207 2209 return self.pathto(fp.name[len(self.root) + 1:])
2208 2210
2209 2211 # used to avoid circular references so destructors work
2210 2212 def aftertrans(files):
2211 2213 renamefiles = [tuple(t) for t in files]
2212 2214 def a():
2213 2215 for vfs, src, dest in renamefiles:
2214 2216 # if src and dest refer to a same file, vfs.rename is a no-op,
2215 2217 # leaving both src and dest on disk. delete dest to make sure
2216 2218 # the rename couldn't be such a no-op.
2217 2219 vfs.tryunlink(dest)
2218 2220 try:
2219 2221 vfs.rename(src, dest)
2220 2222 except OSError: # journal file does not yet exist
2221 2223 pass
2222 2224 return a
2223 2225
2224 2226 def undoname(fn):
2225 2227 base, name = os.path.split(fn)
2226 2228 assert name.startswith('journal')
2227 2229 return os.path.join(base, name.replace('journal', 'undo', 1))
2228 2230
2229 2231 def instance(ui, path, create):
2230 2232 return localrepository(ui, util.urllocalpath(path), create)
2231 2233
2232 2234 def islocal(path):
2233 2235 return True
2234 2236
2235 2237 def newreporequirements(repo):
2236 2238 """Determine the set of requirements for a new local repository.
2237 2239
2238 2240 Extensions can wrap this function to specify custom requirements for
2239 2241 new repositories.
2240 2242 """
2241 2243 ui = repo.ui
2242 2244 requirements = {'revlogv1'}
2243 2245 if ui.configbool('format', 'usestore'):
2244 2246 requirements.add('store')
2245 2247 if ui.configbool('format', 'usefncache'):
2246 2248 requirements.add('fncache')
2247 2249 if ui.configbool('format', 'dotencode'):
2248 2250 requirements.add('dotencode')
2249 2251
2250 2252 compengine = ui.config('experimental', 'format.compression')
2251 2253 if compengine not in util.compengines:
2252 2254 raise error.Abort(_('compression engine %s defined by '
2253 2255 'experimental.format.compression not available') %
2254 2256 compengine,
2255 2257 hint=_('run "hg debuginstall" to list available '
2256 2258 'compression engines'))
2257 2259
2258 2260 # zlib is the historical default and doesn't need an explicit requirement.
2259 2261 if compengine != 'zlib':
2260 2262 requirements.add('exp-compression-%s' % compengine)
2261 2263
2262 2264 if scmutil.gdinitconfig(ui):
2263 2265 requirements.add('generaldelta')
2264 2266 if ui.configbool('experimental', 'treemanifest'):
2265 2267 requirements.add('treemanifest')
2266 2268 if ui.configbool('experimental', 'manifestv2'):
2267 2269 requirements.add('manifestv2')
2268 2270
2269 2271 revlogv2 = ui.config('experimental', 'revlogv2')
2270 2272 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2271 2273 requirements.remove('revlogv1')
2272 2274 # generaldelta is implied by revlogv2.
2273 2275 requirements.discard('generaldelta')
2274 2276 requirements.add(REVLOGV2_REQUIREMENT)
2275 2277
2276 2278 return requirements
General Comments 0
You need to be logged in to leave comments. Login now