##// END OF EJS Templates
localrepo: add some comment about role of various vfs object...
Pierre-Yves David -
r31144:afcc4b4a default
parent child Browse files
Show More
@@ -1,2052 +1,2057 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 )
63 63
64 64 release = lockmod.release
65 65 urlerr = util.urlerr
66 66 urlreq = util.urlreq
67 67
68 68 class repofilecache(scmutil.filecache):
69 69 """All filecache usage on repo are done for logic that should be unfiltered
70 70 """
71 71
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(repofilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class storecache(repofilecache):
82 82 """filecache for files in the store"""
83 83 def join(self, obj, fname):
84 84 return obj.sjoin(fname)
85 85
86 86 class unfilteredpropertycache(util.propertycache):
87 87 """propertycache that apply to unfiltered repo only"""
88 88
89 89 def __get__(self, repo, type=None):
90 90 unfi = repo.unfiltered()
91 91 if unfi is repo:
92 92 return super(unfilteredpropertycache, self).__get__(unfi)
93 93 return getattr(unfi, self.name)
94 94
95 95 class filteredpropertycache(util.propertycache):
96 96 """propertycache that must take filtering in account"""
97 97
98 98 def cachevalue(self, obj, value):
99 99 object.__setattr__(obj, self.name, value)
100 100
101 101
102 102 def hasunfilteredcache(repo, name):
103 103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 104 return name in vars(repo.unfiltered())
105 105
106 106 def unfilteredmethod(orig):
107 107 """decorate method that always need to be run on unfiltered version"""
108 108 def wrapper(repo, *args, **kwargs):
109 109 return orig(repo.unfiltered(), *args, **kwargs)
110 110 return wrapper
111 111
112 112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 113 'unbundle'))
114 114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115 115
116 116 class localpeer(peer.peerrepository):
117 117 '''peer for a local repo; reflects only the most recent API'''
118 118
119 119 def __init__(self, repo, caps=moderncaps):
120 120 peer.peerrepository.__init__(self)
121 121 self._repo = repo.filtered('served')
122 122 self.ui = repo.ui
123 123 self._caps = repo._restrictcapabilities(caps)
124 124 self.requirements = repo.requirements
125 125 self.supportedformats = repo.supportedformats
126 126
127 127 def close(self):
128 128 self._repo.close()
129 129
130 130 def _capabilities(self):
131 131 return self._caps
132 132
133 133 def local(self):
134 134 return self._repo
135 135
136 136 def canpush(self):
137 137 return True
138 138
139 139 def url(self):
140 140 return self._repo.url()
141 141
142 142 def lookup(self, key):
143 143 return self._repo.lookup(key)
144 144
145 145 def branchmap(self):
146 146 return self._repo.branchmap()
147 147
148 148 def heads(self):
149 149 return self._repo.heads()
150 150
151 151 def known(self, nodes):
152 152 return self._repo.known(nodes)
153 153
154 154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 155 **kwargs):
156 156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 157 common=common, bundlecaps=bundlecaps,
158 158 **kwargs)
159 159 cb = util.chunkbuffer(chunks)
160 160
161 161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 162 # When requesting a bundle2, getbundle returns a stream to make the
163 163 # wire level function happier. We need to build a proper object
164 164 # from it in local peer.
165 165 return bundle2.getunbundler(self.ui, cb)
166 166 else:
167 167 return changegroup.getunbundler('01', cb, None)
168 168
169 169 # TODO We might want to move the next two calls into legacypeer and add
170 170 # unbundle instead.
171 171
172 172 def unbundle(self, cg, heads, url):
173 173 """apply a bundle on a repo
174 174
175 175 This function handles the repo locking itself."""
176 176 try:
177 177 try:
178 178 cg = exchange.readbundle(self.ui, cg, None)
179 179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 180 if util.safehasattr(ret, 'getchunks'):
181 181 # This is a bundle20 object, turn it into an unbundler.
182 182 # This little dance should be dropped eventually when the
183 183 # API is finally improved.
184 184 stream = util.chunkbuffer(ret.getchunks())
185 185 ret = bundle2.getunbundler(self.ui, stream)
186 186 return ret
187 187 except Exception as exc:
188 188 # If the exception contains output salvaged from a bundle2
189 189 # reply, we need to make sure it is printed before continuing
190 190 # to fail. So we build a bundle2 with such output and consume
191 191 # it directly.
192 192 #
193 193 # This is not very elegant but allows a "simple" solution for
194 194 # issue4594
195 195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 196 if output:
197 197 bundler = bundle2.bundle20(self._repo.ui)
198 198 for out in output:
199 199 bundler.addpart(out)
200 200 stream = util.chunkbuffer(bundler.getchunks())
201 201 b = bundle2.getunbundler(self.ui, stream)
202 202 bundle2.processbundle(self._repo, b)
203 203 raise
204 204 except error.PushRaced as exc:
205 205 raise error.ResponseError(_('push failed:'), str(exc))
206 206
207 207 def lock(self):
208 208 return self._repo.lock()
209 209
210 210 def addchangegroup(self, cg, source, url):
211 211 return cg.apply(self._repo, source, url)
212 212
213 213 def pushkey(self, namespace, key, old, new):
214 214 return self._repo.pushkey(namespace, key, old, new)
215 215
216 216 def listkeys(self, namespace):
217 217 return self._repo.listkeys(namespace)
218 218
219 219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 220 '''used to test argument passing over the wire'''
221 221 return "%s %s %s %s %s" % (one, two, three, four, five)
222 222
223 223 class locallegacypeer(localpeer):
224 224 '''peer extension which implements legacy methods too; used for tests with
225 225 restricted capabilities'''
226 226
227 227 def __init__(self, repo):
228 228 localpeer.__init__(self, repo, caps=legacycaps)
229 229
230 230 def branches(self, nodes):
231 231 return self._repo.branches(nodes)
232 232
233 233 def between(self, pairs):
234 234 return self._repo.between(pairs)
235 235
236 236 def changegroup(self, basenodes, source):
237 237 return changegroup.changegroup(self._repo, basenodes, source)
238 238
239 239 def changegroupsubset(self, bases, heads, source):
240 240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241 241
242 242 class localrepository(object):
243 243
244 244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 245 'manifestv2'))
246 246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 247 'relshared', 'dotencode'))
248 248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 249 filtername = None
250 250
251 251 # a list of (ui, featureset) functions.
252 252 # only functions defined in module of enabled extensions are invoked
253 253 featuresetupfuncs = set()
254 254
255 255 def __init__(self, baseui, path, create=False):
256 256 self.requirements = set()
257 # vfs to access the working copy
257 258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 # vfs to access the content of the repository
260 self.vfs = None
261 # vfs to access the store part of the repository
262 self.svfs = None
258 263 self.wopener = self.wvfs
259 264 self.root = self.wvfs.base
260 265 self.path = self.wvfs.join(".hg")
261 266 self.origroot = path
262 267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
263 268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
264 269 realfs=False)
265 270 self.vfs = scmutil.vfs(self.path)
266 271 self.opener = self.vfs
267 272 self.baseui = baseui
268 273 self.ui = baseui.copy()
269 274 self.ui.copy = baseui.copy # prevent copying repo configuration
270 275 # A list of callback to shape the phase if no data were found.
271 276 # Callback are in the form: func(repo, roots) --> processed root.
272 277 # This list it to be filled by extension during repo setup
273 278 self._phasedefaults = []
274 279 try:
275 280 self.ui.readconfig(self.join("hgrc"), self.root)
276 281 self._loadextensions()
277 282 except IOError:
278 283 pass
279 284
280 285 if self.featuresetupfuncs:
281 286 self.supported = set(self._basesupported) # use private copy
282 287 extmods = set(m.__name__ for n, m
283 288 in extensions.extensions(self.ui))
284 289 for setupfunc in self.featuresetupfuncs:
285 290 if setupfunc.__module__ in extmods:
286 291 setupfunc(self.ui, self.supported)
287 292 else:
288 293 self.supported = self._basesupported
289 294 color.setup(self.ui)
290 295
291 296 # Add compression engines.
292 297 for name in util.compengines:
293 298 engine = util.compengines[name]
294 299 if engine.revlogheader():
295 300 self.supported.add('exp-compression-%s' % name)
296 301
297 302 if not self.vfs.isdir():
298 303 if create:
299 304 self.requirements = newreporequirements(self)
300 305
301 306 if not self.wvfs.exists():
302 307 self.wvfs.makedirs()
303 308 self.vfs.makedir(notindexed=True)
304 309
305 310 if 'store' in self.requirements:
306 311 self.vfs.mkdir("store")
307 312
308 313 # create an invalid changelog
309 314 self.vfs.append(
310 315 "00changelog.i",
311 316 '\0\0\0\2' # represents revlogv2
312 317 ' dummy changelog to prevent using the old repo layout'
313 318 )
314 319 else:
315 320 raise error.RepoError(_("repository %s not found") % path)
316 321 elif create:
317 322 raise error.RepoError(_("repository %s already exists") % path)
318 323 else:
319 324 try:
320 325 self.requirements = scmutil.readrequires(
321 326 self.vfs, self.supported)
322 327 except IOError as inst:
323 328 if inst.errno != errno.ENOENT:
324 329 raise
325 330
326 331 self.sharedpath = self.path
327 332 try:
328 333 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
329 334 if 'relshared' in self.requirements:
330 335 sharedpath = self.vfs.join(sharedpath)
331 336 vfs = scmutil.vfs(sharedpath, realpath=True)
332 337
333 338 s = vfs.base
334 339 if not vfs.exists():
335 340 raise error.RepoError(
336 341 _('.hg/sharedpath points to nonexistent directory %s') % s)
337 342 self.sharedpath = s
338 343 except IOError as inst:
339 344 if inst.errno != errno.ENOENT:
340 345 raise
341 346
342 347 self.store = store.store(
343 348 self.requirements, self.sharedpath, scmutil.vfs)
344 349 self.spath = self.store.path
345 350 self.svfs = self.store.vfs
346 351 self.sjoin = self.store.join
347 352 self.vfs.createmode = self.store.createmode
348 353 self._applyopenerreqs()
349 354 if create:
350 355 self._writerequirements()
351 356
352 357 self._dirstatevalidatewarned = False
353 358
354 359 self._branchcaches = {}
355 360 self._revbranchcache = None
356 361 self.filterpats = {}
357 362 self._datafilters = {}
358 363 self._transref = self._lockref = self._wlockref = None
359 364
360 365 # A cache for various files under .hg/ that tracks file changes,
361 366 # (used by the filecache decorator)
362 367 #
363 368 # Maps a property name to its util.filecacheentry
364 369 self._filecache = {}
365 370
366 371 # hold sets of revision to be filtered
367 372 # should be cleared when something might have changed the filter value:
368 373 # - new changesets,
369 374 # - phase change,
370 375 # - new obsolescence marker,
371 376 # - working directory parent change,
372 377 # - bookmark changes
373 378 self.filteredrevcache = {}
374 379
375 380 # generic mapping between names and nodes
376 381 self.names = namespaces.namespaces()
377 382
378 383 def close(self):
379 384 self._writecaches()
380 385
381 386 def _loadextensions(self):
382 387 extensions.loadall(self.ui)
383 388
384 389 def _writecaches(self):
385 390 if self._revbranchcache:
386 391 self._revbranchcache.write()
387 392
388 393 def _restrictcapabilities(self, caps):
389 394 if self.ui.configbool('experimental', 'bundle2-advertise', True):
390 395 caps = set(caps)
391 396 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
392 397 caps.add('bundle2=' + urlreq.quote(capsblob))
393 398 return caps
394 399
395 400 def _applyopenerreqs(self):
396 401 self.svfs.options = dict((r, 1) for r in self.requirements
397 402 if r in self.openerreqs)
398 403 # experimental config: format.chunkcachesize
399 404 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
400 405 if chunkcachesize is not None:
401 406 self.svfs.options['chunkcachesize'] = chunkcachesize
402 407 # experimental config: format.maxchainlen
403 408 maxchainlen = self.ui.configint('format', 'maxchainlen')
404 409 if maxchainlen is not None:
405 410 self.svfs.options['maxchainlen'] = maxchainlen
406 411 # experimental config: format.manifestcachesize
407 412 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
408 413 if manifestcachesize is not None:
409 414 self.svfs.options['manifestcachesize'] = manifestcachesize
410 415 # experimental config: format.aggressivemergedeltas
411 416 aggressivemergedeltas = self.ui.configbool('format',
412 417 'aggressivemergedeltas', False)
413 418 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
414 419 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
415 420
416 421 for r in self.requirements:
417 422 if r.startswith('exp-compression-'):
418 423 self.svfs.options['compengine'] = r[len('exp-compression-'):]
419 424
420 425 def _writerequirements(self):
421 426 scmutil.writerequires(self.vfs, self.requirements)
422 427
423 428 def _checknested(self, path):
424 429 """Determine if path is a legal nested repository."""
425 430 if not path.startswith(self.root):
426 431 return False
427 432 subpath = path[len(self.root) + 1:]
428 433 normsubpath = util.pconvert(subpath)
429 434
430 435 # XXX: Checking against the current working copy is wrong in
431 436 # the sense that it can reject things like
432 437 #
433 438 # $ hg cat -r 10 sub/x.txt
434 439 #
435 440 # if sub/ is no longer a subrepository in the working copy
436 441 # parent revision.
437 442 #
438 443 # However, it can of course also allow things that would have
439 444 # been rejected before, such as the above cat command if sub/
440 445 # is a subrepository now, but was a normal directory before.
441 446 # The old path auditor would have rejected by mistake since it
442 447 # panics when it sees sub/.hg/.
443 448 #
444 449 # All in all, checking against the working copy seems sensible
445 450 # since we want to prevent access to nested repositories on
446 451 # the filesystem *now*.
447 452 ctx = self[None]
448 453 parts = util.splitpath(subpath)
449 454 while parts:
450 455 prefix = '/'.join(parts)
451 456 if prefix in ctx.substate:
452 457 if prefix == normsubpath:
453 458 return True
454 459 else:
455 460 sub = ctx.sub(prefix)
456 461 return sub.checknested(subpath[len(prefix) + 1:])
457 462 else:
458 463 parts.pop()
459 464 return False
460 465
461 466 def peer(self):
462 467 return localpeer(self) # not cached to avoid reference cycle
463 468
464 469 def unfiltered(self):
465 470 """Return unfiltered version of the repository
466 471
467 472 Intended to be overwritten by filtered repo."""
468 473 return self
469 474
470 475 def filtered(self, name):
471 476 """Return a filtered version of a repository"""
472 477 # build a new class with the mixin and the current class
473 478 # (possibly subclass of the repo)
474 479 class proxycls(repoview.repoview, self.unfiltered().__class__):
475 480 pass
476 481 return proxycls(self, name)
477 482
478 483 @repofilecache('bookmarks', 'bookmarks.current')
479 484 def _bookmarks(self):
480 485 return bookmarks.bmstore(self)
481 486
482 487 @property
483 488 def _activebookmark(self):
484 489 return self._bookmarks.active
485 490
486 491 def bookmarkheads(self, bookmark):
487 492 name = bookmark.split('@', 1)[0]
488 493 heads = []
489 494 for mark, n in self._bookmarks.iteritems():
490 495 if mark.split('@', 1)[0] == name:
491 496 heads.append(n)
492 497 return heads
493 498
494 499 # _phaserevs and _phasesets depend on changelog. what we need is to
495 500 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
496 501 # can't be easily expressed in filecache mechanism.
497 502 @storecache('phaseroots', '00changelog.i')
498 503 def _phasecache(self):
499 504 return phases.phasecache(self, self._phasedefaults)
500 505
501 506 @storecache('obsstore')
502 507 def obsstore(self):
503 508 # read default format for new obsstore.
504 509 # developer config: format.obsstore-version
505 510 defaultformat = self.ui.configint('format', 'obsstore-version', None)
506 511 # rely on obsstore class default when possible.
507 512 kwargs = {}
508 513 if defaultformat is not None:
509 514 kwargs['defaultformat'] = defaultformat
510 515 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
511 516 store = obsolete.obsstore(self.svfs, readonly=readonly,
512 517 **kwargs)
513 518 if store and readonly:
514 519 self.ui.warn(
515 520 _('obsolete feature not enabled but %i markers found!\n')
516 521 % len(list(store)))
517 522 return store
518 523
519 524 @storecache('00changelog.i')
520 525 def changelog(self):
521 526 c = changelog.changelog(self.svfs)
522 527 if txnutil.mayhavepending(self.root):
523 528 c.readpending('00changelog.i.a')
524 529 return c
525 530
526 531 def _constructmanifest(self):
527 532 # This is a temporary function while we migrate from manifest to
528 533 # manifestlog. It allows bundlerepo and unionrepo to intercept the
529 534 # manifest creation.
530 535 return manifest.manifestrevlog(self.svfs)
531 536
532 537 @storecache('00manifest.i')
533 538 def manifestlog(self):
534 539 return manifest.manifestlog(self.svfs, self)
535 540
536 541 @repofilecache('dirstate')
537 542 def dirstate(self):
538 543 return dirstate.dirstate(self.vfs, self.ui, self.root,
539 544 self._dirstatevalidate)
540 545
541 546 def _dirstatevalidate(self, node):
542 547 try:
543 548 self.changelog.rev(node)
544 549 return node
545 550 except error.LookupError:
546 551 if not self._dirstatevalidatewarned:
547 552 self._dirstatevalidatewarned = True
548 553 self.ui.warn(_("warning: ignoring unknown"
549 554 " working parent %s!\n") % short(node))
550 555 return nullid
551 556
552 557 def __getitem__(self, changeid):
553 558 if changeid is None or changeid == wdirrev:
554 559 return context.workingctx(self)
555 560 if isinstance(changeid, slice):
556 561 return [context.changectx(self, i)
557 562 for i in xrange(*changeid.indices(len(self)))
558 563 if i not in self.changelog.filteredrevs]
559 564 return context.changectx(self, changeid)
560 565
561 566 def __contains__(self, changeid):
562 567 try:
563 568 self[changeid]
564 569 return True
565 570 except error.RepoLookupError:
566 571 return False
567 572
568 573 def __nonzero__(self):
569 574 return True
570 575
571 576 def __len__(self):
572 577 return len(self.changelog)
573 578
574 579 def __iter__(self):
575 580 return iter(self.changelog)
576 581
577 582 def revs(self, expr, *args):
578 583 '''Find revisions matching a revset.
579 584
580 585 The revset is specified as a string ``expr`` that may contain
581 586 %-formatting to escape certain types. See ``revsetlang.formatspec``.
582 587
583 588 Revset aliases from the configuration are not expanded. To expand
584 589 user aliases, consider calling ``scmutil.revrange()`` or
585 590 ``repo.anyrevs([expr], user=True)``.
586 591
587 592 Returns a revset.abstractsmartset, which is a list-like interface
588 593 that contains integer revisions.
589 594 '''
590 595 expr = revsetlang.formatspec(expr, *args)
591 596 m = revset.match(None, expr)
592 597 return m(self)
593 598
594 599 def set(self, expr, *args):
595 600 '''Find revisions matching a revset and emit changectx instances.
596 601
597 602 This is a convenience wrapper around ``revs()`` that iterates the
598 603 result and is a generator of changectx instances.
599 604
600 605 Revset aliases from the configuration are not expanded. To expand
601 606 user aliases, consider calling ``scmutil.revrange()``.
602 607 '''
603 608 for r in self.revs(expr, *args):
604 609 yield self[r]
605 610
606 611 def anyrevs(self, specs, user=False):
607 612 '''Find revisions matching one of the given revsets.
608 613
609 614 Revset aliases from the configuration are not expanded by default. To
610 615 expand user aliases, specify ``user=True``.
611 616 '''
612 617 if user:
613 618 m = revset.matchany(self.ui, specs, repo=self)
614 619 else:
615 620 m = revset.matchany(None, specs)
616 621 return m(self)
617 622
618 623 def url(self):
619 624 return 'file:' + self.root
620 625
621 626 def hook(self, name, throw=False, **args):
622 627 """Call a hook, passing this repo instance.
623 628
624 629 This a convenience method to aid invoking hooks. Extensions likely
625 630 won't call this unless they have registered a custom hook or are
626 631 replacing code that is expected to call a hook.
627 632 """
628 633 return hook.hook(self.ui, self, name, throw, **args)
629 634
630 635 @unfilteredmethod
631 636 def _tag(self, names, node, message, local, user, date, extra=None,
632 637 editor=False):
633 638 if isinstance(names, str):
634 639 names = (names,)
635 640
636 641 branches = self.branchmap()
637 642 for name in names:
638 643 self.hook('pretag', throw=True, node=hex(node), tag=name,
639 644 local=local)
640 645 if name in branches:
641 646 self.ui.warn(_("warning: tag %s conflicts with existing"
642 647 " branch name\n") % name)
643 648
644 649 def writetags(fp, names, munge, prevtags):
645 650 fp.seek(0, 2)
646 651 if prevtags and prevtags[-1] != '\n':
647 652 fp.write('\n')
648 653 for name in names:
649 654 if munge:
650 655 m = munge(name)
651 656 else:
652 657 m = name
653 658
654 659 if (self._tagscache.tagtypes and
655 660 name in self._tagscache.tagtypes):
656 661 old = self.tags().get(name, nullid)
657 662 fp.write('%s %s\n' % (hex(old), m))
658 663 fp.write('%s %s\n' % (hex(node), m))
659 664 fp.close()
660 665
661 666 prevtags = ''
662 667 if local:
663 668 try:
664 669 fp = self.vfs('localtags', 'r+')
665 670 except IOError:
666 671 fp = self.vfs('localtags', 'a')
667 672 else:
668 673 prevtags = fp.read()
669 674
670 675 # local tags are stored in the current charset
671 676 writetags(fp, names, None, prevtags)
672 677 for name in names:
673 678 self.hook('tag', node=hex(node), tag=name, local=local)
674 679 return
675 680
676 681 try:
677 682 fp = self.wfile('.hgtags', 'rb+')
678 683 except IOError as e:
679 684 if e.errno != errno.ENOENT:
680 685 raise
681 686 fp = self.wfile('.hgtags', 'ab')
682 687 else:
683 688 prevtags = fp.read()
684 689
685 690 # committed tags are stored in UTF-8
686 691 writetags(fp, names, encoding.fromlocal, prevtags)
687 692
688 693 fp.close()
689 694
690 695 self.invalidatecaches()
691 696
692 697 if '.hgtags' not in self.dirstate:
693 698 self[None].add(['.hgtags'])
694 699
695 700 m = matchmod.exact(self.root, '', ['.hgtags'])
696 701 tagnode = self.commit(message, user, date, extra=extra, match=m,
697 702 editor=editor)
698 703
699 704 for name in names:
700 705 self.hook('tag', node=hex(node), tag=name, local=local)
701 706
702 707 return tagnode
703 708
704 709 def tag(self, names, node, message, local, user, date, editor=False):
705 710 '''tag a revision with one or more symbolic names.
706 711
707 712 names is a list of strings or, when adding a single tag, names may be a
708 713 string.
709 714
710 715 if local is True, the tags are stored in a per-repository file.
711 716 otherwise, they are stored in the .hgtags file, and a new
712 717 changeset is committed with the change.
713 718
714 719 keyword arguments:
715 720
716 721 local: whether to store tags in non-version-controlled file
717 722 (default False)
718 723
719 724 message: commit message to use if committing
720 725
721 726 user: name of user to use if committing
722 727
723 728 date: date tuple to use if committing'''
724 729
725 730 if not local:
726 731 m = matchmod.exact(self.root, '', ['.hgtags'])
727 732 if any(self.status(match=m, unknown=True, ignored=True)):
728 733 raise error.Abort(_('working copy of .hgtags is changed'),
729 734 hint=_('please commit .hgtags manually'))
730 735
731 736 self.tags() # instantiate the cache
732 737 self._tag(names, node, message, local, user, date, editor=editor)
733 738
734 739 @filteredpropertycache
735 740 def _tagscache(self):
736 741 '''Returns a tagscache object that contains various tags related
737 742 caches.'''
738 743
739 744 # This simplifies its cache management by having one decorated
740 745 # function (this one) and the rest simply fetch things from it.
741 746 class tagscache(object):
742 747 def __init__(self):
743 748 # These two define the set of tags for this repository. tags
744 749 # maps tag name to node; tagtypes maps tag name to 'global' or
745 750 # 'local'. (Global tags are defined by .hgtags across all
746 751 # heads, and local tags are defined in .hg/localtags.)
747 752 # They constitute the in-memory cache of tags.
748 753 self.tags = self.tagtypes = None
749 754
750 755 self.nodetagscache = self.tagslist = None
751 756
752 757 cache = tagscache()
753 758 cache.tags, cache.tagtypes = self._findtags()
754 759
755 760 return cache
756 761
757 762 def tags(self):
758 763 '''return a mapping of tag to node'''
759 764 t = {}
760 765 if self.changelog.filteredrevs:
761 766 tags, tt = self._findtags()
762 767 else:
763 768 tags = self._tagscache.tags
764 769 for k, v in tags.iteritems():
765 770 try:
766 771 # ignore tags to unknown nodes
767 772 self.changelog.rev(v)
768 773 t[k] = v
769 774 except (error.LookupError, ValueError):
770 775 pass
771 776 return t
772 777
773 778 def _findtags(self):
774 779 '''Do the hard work of finding tags. Return a pair of dicts
775 780 (tags, tagtypes) where tags maps tag name to node, and tagtypes
776 781 maps tag name to a string like \'global\' or \'local\'.
777 782 Subclasses or extensions are free to add their own tags, but
778 783 should be aware that the returned dicts will be retained for the
779 784 duration of the localrepo object.'''
780 785
781 786 # XXX what tagtype should subclasses/extensions use? Currently
782 787 # mq and bookmarks add tags, but do not set the tagtype at all.
783 788 # Should each extension invent its own tag type? Should there
784 789 # be one tagtype for all such "virtual" tags? Or is the status
785 790 # quo fine?
786 791
787 792 alltags = {} # map tag name to (node, hist)
788 793 tagtypes = {}
789 794
790 795 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
791 796 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
792 797
793 798 # Build the return dicts. Have to re-encode tag names because
794 799 # the tags module always uses UTF-8 (in order not to lose info
795 800 # writing to the cache), but the rest of Mercurial wants them in
796 801 # local encoding.
797 802 tags = {}
798 803 for (name, (node, hist)) in alltags.iteritems():
799 804 if node != nullid:
800 805 tags[encoding.tolocal(name)] = node
801 806 tags['tip'] = self.changelog.tip()
802 807 tagtypes = dict([(encoding.tolocal(name), value)
803 808 for (name, value) in tagtypes.iteritems()])
804 809 return (tags, tagtypes)
805 810
806 811 def tagtype(self, tagname):
807 812 '''
808 813 return the type of the given tag. result can be:
809 814
810 815 'local' : a local tag
811 816 'global' : a global tag
812 817 None : tag does not exist
813 818 '''
814 819
815 820 return self._tagscache.tagtypes.get(tagname)
816 821
817 822 def tagslist(self):
818 823 '''return a list of tags ordered by revision'''
819 824 if not self._tagscache.tagslist:
820 825 l = []
821 826 for t, n in self.tags().iteritems():
822 827 l.append((self.changelog.rev(n), t, n))
823 828 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
824 829
825 830 return self._tagscache.tagslist
826 831
827 832 def nodetags(self, node):
828 833 '''return the tags associated with a node'''
829 834 if not self._tagscache.nodetagscache:
830 835 nodetagscache = {}
831 836 for t, n in self._tagscache.tags.iteritems():
832 837 nodetagscache.setdefault(n, []).append(t)
833 838 for tags in nodetagscache.itervalues():
834 839 tags.sort()
835 840 self._tagscache.nodetagscache = nodetagscache
836 841 return self._tagscache.nodetagscache.get(node, [])
837 842
838 843 def nodebookmarks(self, node):
839 844 """return the list of bookmarks pointing to the specified node"""
840 845 marks = []
841 846 for bookmark, n in self._bookmarks.iteritems():
842 847 if n == node:
843 848 marks.append(bookmark)
844 849 return sorted(marks)
845 850
846 851 def branchmap(self):
847 852 '''returns a dictionary {branch: [branchheads]} with branchheads
848 853 ordered by increasing revision number'''
849 854 branchmap.updatecache(self)
850 855 return self._branchcaches[self.filtername]
851 856
852 857 @unfilteredmethod
853 858 def revbranchcache(self):
854 859 if not self._revbranchcache:
855 860 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
856 861 return self._revbranchcache
857 862
858 863 def branchtip(self, branch, ignoremissing=False):
859 864 '''return the tip node for a given branch
860 865
861 866 If ignoremissing is True, then this method will not raise an error.
862 867 This is helpful for callers that only expect None for a missing branch
863 868 (e.g. namespace).
864 869
865 870 '''
866 871 try:
867 872 return self.branchmap().branchtip(branch)
868 873 except KeyError:
869 874 if not ignoremissing:
870 875 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
871 876 else:
872 877 pass
873 878
874 879 def lookup(self, key):
875 880 return self[key].node()
876 881
877 882 def lookupbranch(self, key, remote=None):
878 883 repo = remote or self
879 884 if key in repo.branchmap():
880 885 return key
881 886
882 887 repo = (remote and remote.local()) and remote or self
883 888 return repo[key].branch()
884 889
885 890 def known(self, nodes):
886 891 cl = self.changelog
887 892 nm = cl.nodemap
888 893 filtered = cl.filteredrevs
889 894 result = []
890 895 for n in nodes:
891 896 r = nm.get(n)
892 897 resp = not (r is None or r in filtered)
893 898 result.append(resp)
894 899 return result
895 900
896 901 def local(self):
897 902 return self
898 903
899 904 def publishing(self):
900 905 # it's safe (and desirable) to trust the publish flag unconditionally
901 906 # so that we don't finalize changes shared between users via ssh or nfs
902 907 return self.ui.configbool('phases', 'publish', True, untrusted=True)
903 908
904 909 def cancopy(self):
905 910 # so statichttprepo's override of local() works
906 911 if not self.local():
907 912 return False
908 913 if not self.publishing():
909 914 return True
910 915 # if publishing we can't copy if there is filtered content
911 916 return not self.filtered('visible').changelog.filteredrevs
912 917
913 918 def shared(self):
914 919 '''the type of shared repository (None if not shared)'''
915 920 if self.sharedpath != self.path:
916 921 return 'store'
917 922 return None
918 923
919 924 def join(self, f, *insidef):
920 925 return self.vfs.join(os.path.join(f, *insidef))
921 926
922 927 def wjoin(self, f, *insidef):
923 928 return self.vfs.reljoin(self.root, f, *insidef)
924 929
925 930 def file(self, f):
926 931 if f[0] == '/':
927 932 f = f[1:]
928 933 return filelog.filelog(self.svfs, f)
929 934
930 935 def changectx(self, changeid):
931 936 return self[changeid]
932 937
933 938 def setparents(self, p1, p2=nullid):
934 939 self.dirstate.beginparentchange()
935 940 copies = self.dirstate.setparents(p1, p2)
936 941 pctx = self[p1]
937 942 if copies:
938 943 # Adjust copy records, the dirstate cannot do it, it
939 944 # requires access to parents manifests. Preserve them
940 945 # only for entries added to first parent.
941 946 for f in copies:
942 947 if f not in pctx and copies[f] in pctx:
943 948 self.dirstate.copy(copies[f], f)
944 949 if p2 == nullid:
945 950 for f, s in sorted(self.dirstate.copies().items()):
946 951 if f not in pctx and s not in pctx:
947 952 self.dirstate.copy(None, f)
948 953 self.dirstate.endparentchange()
949 954
950 955 def filectx(self, path, changeid=None, fileid=None):
951 956 """changeid can be a changeset revision, node, or tag.
952 957 fileid can be a file revision or node."""
953 958 return context.filectx(self, path, changeid, fileid)
954 959
955 960 def getcwd(self):
956 961 return self.dirstate.getcwd()
957 962
958 963 def pathto(self, f, cwd=None):
959 964 return self.dirstate.pathto(f, cwd)
960 965
961 966 def wfile(self, f, mode='r'):
962 967 return self.wvfs(f, mode)
963 968
964 969 def _link(self, f):
965 970 return self.wvfs.islink(f)
966 971
967 972 def _loadfilter(self, filter):
968 973 if filter not in self.filterpats:
969 974 l = []
970 975 for pat, cmd in self.ui.configitems(filter):
971 976 if cmd == '!':
972 977 continue
973 978 mf = matchmod.match(self.root, '', [pat])
974 979 fn = None
975 980 params = cmd
976 981 for name, filterfn in self._datafilters.iteritems():
977 982 if cmd.startswith(name):
978 983 fn = filterfn
979 984 params = cmd[len(name):].lstrip()
980 985 break
981 986 if not fn:
982 987 fn = lambda s, c, **kwargs: util.filter(s, c)
983 988 # Wrap old filters not supporting keyword arguments
984 989 if not inspect.getargspec(fn)[2]:
985 990 oldfn = fn
986 991 fn = lambda s, c, **kwargs: oldfn(s, c)
987 992 l.append((mf, fn, params))
988 993 self.filterpats[filter] = l
989 994 return self.filterpats[filter]
990 995
991 996 def _filter(self, filterpats, filename, data):
992 997 for mf, fn, cmd in filterpats:
993 998 if mf(filename):
994 999 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
995 1000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
996 1001 break
997 1002
998 1003 return data
999 1004
1000 1005 @unfilteredpropertycache
1001 1006 def _encodefilterpats(self):
1002 1007 return self._loadfilter('encode')
1003 1008
1004 1009 @unfilteredpropertycache
1005 1010 def _decodefilterpats(self):
1006 1011 return self._loadfilter('decode')
1007 1012
1008 1013 def adddatafilter(self, name, filter):
1009 1014 self._datafilters[name] = filter
1010 1015
1011 1016 def wread(self, filename):
1012 1017 if self._link(filename):
1013 1018 data = self.wvfs.readlink(filename)
1014 1019 else:
1015 1020 data = self.wvfs.read(filename)
1016 1021 return self._filter(self._encodefilterpats, filename, data)
1017 1022
1018 1023 def wwrite(self, filename, data, flags, backgroundclose=False):
1019 1024 """write ``data`` into ``filename`` in the working directory
1020 1025
1021 1026 This returns length of written (maybe decoded) data.
1022 1027 """
1023 1028 data = self._filter(self._decodefilterpats, filename, data)
1024 1029 if 'l' in flags:
1025 1030 self.wvfs.symlink(data, filename)
1026 1031 else:
1027 1032 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1028 1033 if 'x' in flags:
1029 1034 self.wvfs.setflags(filename, False, True)
1030 1035 return len(data)
1031 1036
1032 1037 def wwritedata(self, filename, data):
1033 1038 return self._filter(self._decodefilterpats, filename, data)
1034 1039
1035 1040 def currenttransaction(self):
1036 1041 """return the current transaction or None if non exists"""
1037 1042 if self._transref:
1038 1043 tr = self._transref()
1039 1044 else:
1040 1045 tr = None
1041 1046
1042 1047 if tr and tr.running():
1043 1048 return tr
1044 1049 return None
1045 1050
1046 1051 def transaction(self, desc, report=None):
1047 1052 if (self.ui.configbool('devel', 'all-warnings')
1048 1053 or self.ui.configbool('devel', 'check-locks')):
1049 1054 if self._currentlock(self._lockref) is None:
1050 1055 raise error.ProgrammingError('transaction requires locking')
1051 1056 tr = self.currenttransaction()
1052 1057 if tr is not None:
1053 1058 return tr.nest()
1054 1059
1055 1060 # abort here if the journal already exists
1056 1061 if self.svfs.exists("journal"):
1057 1062 raise error.RepoError(
1058 1063 _("abandoned transaction found"),
1059 1064 hint=_("run 'hg recover' to clean up transaction"))
1060 1065
1061 1066 idbase = "%.40f#%f" % (random.random(), time.time())
1062 1067 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1063 1068 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1064 1069
1065 1070 self._writejournal(desc)
1066 1071 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1067 1072 if report:
1068 1073 rp = report
1069 1074 else:
1070 1075 rp = self.ui.warn
1071 1076 vfsmap = {'plain': self.vfs} # root of .hg/
1072 1077 # we must avoid cyclic reference between repo and transaction.
1073 1078 reporef = weakref.ref(self)
1074 1079 def validate(tr):
1075 1080 """will run pre-closing hooks"""
1076 1081 reporef().hook('pretxnclose', throw=True,
1077 1082 txnname=desc, **tr.hookargs)
1078 1083 def releasefn(tr, success):
1079 1084 repo = reporef()
1080 1085 if success:
1081 1086 # this should be explicitly invoked here, because
1082 1087 # in-memory changes aren't written out at closing
1083 1088 # transaction, if tr.addfilegenerator (via
1084 1089 # dirstate.write or so) isn't invoked while
1085 1090 # transaction running
1086 1091 repo.dirstate.write(None)
1087 1092 else:
1088 1093 # discard all changes (including ones already written
1089 1094 # out) in this transaction
1090 1095 repo.dirstate.restorebackup(None, prefix='journal.')
1091 1096
1092 1097 repo.invalidate(clearfilecache=True)
1093 1098
1094 1099 tr = transaction.transaction(rp, self.svfs, vfsmap,
1095 1100 "journal",
1096 1101 "undo",
1097 1102 aftertrans(renames),
1098 1103 self.store.createmode,
1099 1104 validator=validate,
1100 1105 releasefn=releasefn)
1101 1106
1102 1107 tr.hookargs['txnid'] = txnid
1103 1108 # note: writing the fncache only during finalize mean that the file is
1104 1109 # outdated when running hooks. As fncache is used for streaming clone,
1105 1110 # this is not expected to break anything that happen during the hooks.
1106 1111 tr.addfinalize('flush-fncache', self.store.write)
1107 1112 def txnclosehook(tr2):
1108 1113 """To be run if transaction is successful, will schedule a hook run
1109 1114 """
1110 1115 # Don't reference tr2 in hook() so we don't hold a reference.
1111 1116 # This reduces memory consumption when there are multiple
1112 1117 # transactions per lock. This can likely go away if issue5045
1113 1118 # fixes the function accumulation.
1114 1119 hookargs = tr2.hookargs
1115 1120
1116 1121 def hook():
1117 1122 reporef().hook('txnclose', throw=False, txnname=desc,
1118 1123 **hookargs)
1119 1124 reporef()._afterlock(hook)
1120 1125 tr.addfinalize('txnclose-hook', txnclosehook)
1121 1126 def txnaborthook(tr2):
1122 1127 """To be run if transaction is aborted
1123 1128 """
1124 1129 reporef().hook('txnabort', throw=False, txnname=desc,
1125 1130 **tr2.hookargs)
1126 1131 tr.addabort('txnabort-hook', txnaborthook)
1127 1132 # avoid eager cache invalidation. in-memory data should be identical
1128 1133 # to stored data if transaction has no error.
1129 1134 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1130 1135 self._transref = weakref.ref(tr)
1131 1136 return tr
1132 1137
1133 1138 def _journalfiles(self):
1134 1139 return ((self.svfs, 'journal'),
1135 1140 (self.vfs, 'journal.dirstate'),
1136 1141 (self.vfs, 'journal.branch'),
1137 1142 (self.vfs, 'journal.desc'),
1138 1143 (self.vfs, 'journal.bookmarks'),
1139 1144 (self.svfs, 'journal.phaseroots'))
1140 1145
1141 1146 def undofiles(self):
1142 1147 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1143 1148
1144 1149 def _writejournal(self, desc):
1145 1150 self.dirstate.savebackup(None, prefix='journal.')
1146 1151 self.vfs.write("journal.branch",
1147 1152 encoding.fromlocal(self.dirstate.branch()))
1148 1153 self.vfs.write("journal.desc",
1149 1154 "%d\n%s\n" % (len(self), desc))
1150 1155 self.vfs.write("journal.bookmarks",
1151 1156 self.vfs.tryread("bookmarks"))
1152 1157 self.svfs.write("journal.phaseroots",
1153 1158 self.svfs.tryread("phaseroots"))
1154 1159
1155 1160 def recover(self):
1156 1161 with self.lock():
1157 1162 if self.svfs.exists("journal"):
1158 1163 self.ui.status(_("rolling back interrupted transaction\n"))
1159 1164 vfsmap = {'': self.svfs,
1160 1165 'plain': self.vfs,}
1161 1166 transaction.rollback(self.svfs, vfsmap, "journal",
1162 1167 self.ui.warn)
1163 1168 self.invalidate()
1164 1169 return True
1165 1170 else:
1166 1171 self.ui.warn(_("no interrupted transaction available\n"))
1167 1172 return False
1168 1173
1169 1174 def rollback(self, dryrun=False, force=False):
1170 1175 wlock = lock = dsguard = None
1171 1176 try:
1172 1177 wlock = self.wlock()
1173 1178 lock = self.lock()
1174 1179 if self.svfs.exists("undo"):
1175 1180 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1176 1181
1177 1182 return self._rollback(dryrun, force, dsguard)
1178 1183 else:
1179 1184 self.ui.warn(_("no rollback information available\n"))
1180 1185 return 1
1181 1186 finally:
1182 1187 release(dsguard, lock, wlock)
1183 1188
1184 1189 @unfilteredmethod # Until we get smarter cache management
1185 1190 def _rollback(self, dryrun, force, dsguard):
1186 1191 ui = self.ui
1187 1192 try:
1188 1193 args = self.vfs.read('undo.desc').splitlines()
1189 1194 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1190 1195 if len(args) >= 3:
1191 1196 detail = args[2]
1192 1197 oldtip = oldlen - 1
1193 1198
1194 1199 if detail and ui.verbose:
1195 1200 msg = (_('repository tip rolled back to revision %s'
1196 1201 ' (undo %s: %s)\n')
1197 1202 % (oldtip, desc, detail))
1198 1203 else:
1199 1204 msg = (_('repository tip rolled back to revision %s'
1200 1205 ' (undo %s)\n')
1201 1206 % (oldtip, desc))
1202 1207 except IOError:
1203 1208 msg = _('rolling back unknown transaction\n')
1204 1209 desc = None
1205 1210
1206 1211 if not force and self['.'] != self['tip'] and desc == 'commit':
1207 1212 raise error.Abort(
1208 1213 _('rollback of last commit while not checked out '
1209 1214 'may lose data'), hint=_('use -f to force'))
1210 1215
1211 1216 ui.status(msg)
1212 1217 if dryrun:
1213 1218 return 0
1214 1219
1215 1220 parents = self.dirstate.parents()
1216 1221 self.destroying()
1217 1222 vfsmap = {'plain': self.vfs, '': self.svfs}
1218 1223 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1219 1224 if self.vfs.exists('undo.bookmarks'):
1220 1225 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1221 1226 if self.svfs.exists('undo.phaseroots'):
1222 1227 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1223 1228 self.invalidate()
1224 1229
1225 1230 parentgone = (parents[0] not in self.changelog.nodemap or
1226 1231 parents[1] not in self.changelog.nodemap)
1227 1232 if parentgone:
1228 1233 # prevent dirstateguard from overwriting already restored one
1229 1234 dsguard.close()
1230 1235
1231 1236 self.dirstate.restorebackup(None, prefix='undo.')
1232 1237 try:
1233 1238 branch = self.vfs.read('undo.branch')
1234 1239 self.dirstate.setbranch(encoding.tolocal(branch))
1235 1240 except IOError:
1236 1241 ui.warn(_('named branch could not be reset: '
1237 1242 'current branch is still \'%s\'\n')
1238 1243 % self.dirstate.branch())
1239 1244
1240 1245 parents = tuple([p.rev() for p in self[None].parents()])
1241 1246 if len(parents) > 1:
1242 1247 ui.status(_('working directory now based on '
1243 1248 'revisions %d and %d\n') % parents)
1244 1249 else:
1245 1250 ui.status(_('working directory now based on '
1246 1251 'revision %d\n') % parents)
1247 1252 mergemod.mergestate.clean(self, self['.'].node())
1248 1253
1249 1254 # TODO: if we know which new heads may result from this rollback, pass
1250 1255 # them to destroy(), which will prevent the branchhead cache from being
1251 1256 # invalidated.
1252 1257 self.destroyed()
1253 1258 return 0
1254 1259
1255 1260 def invalidatecaches(self):
1256 1261
1257 1262 if '_tagscache' in vars(self):
1258 1263 # can't use delattr on proxy
1259 1264 del self.__dict__['_tagscache']
1260 1265
1261 1266 self.unfiltered()._branchcaches.clear()
1262 1267 self.invalidatevolatilesets()
1263 1268
1264 1269 def invalidatevolatilesets(self):
1265 1270 self.filteredrevcache.clear()
1266 1271 obsolete.clearobscaches(self)
1267 1272
1268 1273 def invalidatedirstate(self):
1269 1274 '''Invalidates the dirstate, causing the next call to dirstate
1270 1275 to check if it was modified since the last time it was read,
1271 1276 rereading it if it has.
1272 1277
1273 1278 This is different to dirstate.invalidate() that it doesn't always
1274 1279 rereads the dirstate. Use dirstate.invalidate() if you want to
1275 1280 explicitly read the dirstate again (i.e. restoring it to a previous
1276 1281 known good state).'''
1277 1282 if hasunfilteredcache(self, 'dirstate'):
1278 1283 for k in self.dirstate._filecache:
1279 1284 try:
1280 1285 delattr(self.dirstate, k)
1281 1286 except AttributeError:
1282 1287 pass
1283 1288 delattr(self.unfiltered(), 'dirstate')
1284 1289
1285 1290 def invalidate(self, clearfilecache=False):
1286 1291 '''Invalidates both store and non-store parts other than dirstate
1287 1292
1288 1293 If a transaction is running, invalidation of store is omitted,
1289 1294 because discarding in-memory changes might cause inconsistency
1290 1295 (e.g. incomplete fncache causes unintentional failure, but
1291 1296 redundant one doesn't).
1292 1297 '''
1293 1298 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1294 1299 for k in self._filecache.keys():
1295 1300 # dirstate is invalidated separately in invalidatedirstate()
1296 1301 if k == 'dirstate':
1297 1302 continue
1298 1303
1299 1304 if clearfilecache:
1300 1305 del self._filecache[k]
1301 1306 try:
1302 1307 delattr(unfiltered, k)
1303 1308 except AttributeError:
1304 1309 pass
1305 1310 self.invalidatecaches()
1306 1311 if not self.currenttransaction():
1307 1312 # TODO: Changing contents of store outside transaction
1308 1313 # causes inconsistency. We should make in-memory store
1309 1314 # changes detectable, and abort if changed.
1310 1315 self.store.invalidatecaches()
1311 1316
1312 1317 def invalidateall(self):
1313 1318 '''Fully invalidates both store and non-store parts, causing the
1314 1319 subsequent operation to reread any outside changes.'''
1315 1320 # extension should hook this to invalidate its caches
1316 1321 self.invalidate()
1317 1322 self.invalidatedirstate()
1318 1323
1319 1324 @unfilteredmethod
1320 1325 def _refreshfilecachestats(self, tr):
1321 1326 """Reload stats of cached files so that they are flagged as valid"""
1322 1327 for k, ce in self._filecache.items():
1323 1328 if k == 'dirstate' or k not in self.__dict__:
1324 1329 continue
1325 1330 ce.refresh()
1326 1331
1327 1332 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1328 1333 inheritchecker=None, parentenvvar=None):
1329 1334 parentlock = None
1330 1335 # the contents of parentenvvar are used by the underlying lock to
1331 1336 # determine whether it can be inherited
1332 1337 if parentenvvar is not None:
1333 1338 parentlock = encoding.environ.get(parentenvvar)
1334 1339 try:
1335 1340 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1336 1341 acquirefn=acquirefn, desc=desc,
1337 1342 inheritchecker=inheritchecker,
1338 1343 parentlock=parentlock)
1339 1344 except error.LockHeld as inst:
1340 1345 if not wait:
1341 1346 raise
1342 1347 # show more details for new-style locks
1343 1348 if ':' in inst.locker:
1344 1349 host, pid = inst.locker.split(":", 1)
1345 1350 self.ui.warn(
1346 1351 _("waiting for lock on %s held by process %r "
1347 1352 "on host %r\n") % (desc, pid, host))
1348 1353 else:
1349 1354 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1350 1355 (desc, inst.locker))
1351 1356 # default to 600 seconds timeout
1352 1357 l = lockmod.lock(vfs, lockname,
1353 1358 int(self.ui.config("ui", "timeout", "600")),
1354 1359 releasefn=releasefn, acquirefn=acquirefn,
1355 1360 desc=desc)
1356 1361 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1357 1362 return l
1358 1363
1359 1364 def _afterlock(self, callback):
1360 1365 """add a callback to be run when the repository is fully unlocked
1361 1366
1362 1367 The callback will be executed when the outermost lock is released
1363 1368 (with wlock being higher level than 'lock')."""
1364 1369 for ref in (self._wlockref, self._lockref):
1365 1370 l = ref and ref()
1366 1371 if l and l.held:
1367 1372 l.postrelease.append(callback)
1368 1373 break
1369 1374 else: # no lock have been found.
1370 1375 callback()
1371 1376
1372 1377 def lock(self, wait=True):
1373 1378 '''Lock the repository store (.hg/store) and return a weak reference
1374 1379 to the lock. Use this before modifying the store (e.g. committing or
1375 1380 stripping). If you are opening a transaction, get a lock as well.)
1376 1381
1377 1382 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1378 1383 'wlock' first to avoid a dead-lock hazard.'''
1379 1384 l = self._currentlock(self._lockref)
1380 1385 if l is not None:
1381 1386 l.lock()
1382 1387 return l
1383 1388
1384 1389 l = self._lock(self.svfs, "lock", wait, None,
1385 1390 self.invalidate, _('repository %s') % self.origroot)
1386 1391 self._lockref = weakref.ref(l)
1387 1392 return l
1388 1393
1389 1394 def _wlockchecktransaction(self):
1390 1395 if self.currenttransaction() is not None:
1391 1396 raise error.LockInheritanceContractViolation(
1392 1397 'wlock cannot be inherited in the middle of a transaction')
1393 1398
1394 1399 def wlock(self, wait=True):
1395 1400 '''Lock the non-store parts of the repository (everything under
1396 1401 .hg except .hg/store) and return a weak reference to the lock.
1397 1402
1398 1403 Use this before modifying files in .hg.
1399 1404
1400 1405 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1401 1406 'wlock' first to avoid a dead-lock hazard.'''
1402 1407 l = self._wlockref and self._wlockref()
1403 1408 if l is not None and l.held:
1404 1409 l.lock()
1405 1410 return l
1406 1411
1407 1412 # We do not need to check for non-waiting lock acquisition. Such
1408 1413 # acquisition would not cause dead-lock as they would just fail.
1409 1414 if wait and (self.ui.configbool('devel', 'all-warnings')
1410 1415 or self.ui.configbool('devel', 'check-locks')):
1411 1416 if self._currentlock(self._lockref) is not None:
1412 1417 self.ui.develwarn('"wlock" acquired after "lock"')
1413 1418
1414 1419 def unlock():
1415 1420 if self.dirstate.pendingparentchange():
1416 1421 self.dirstate.invalidate()
1417 1422 else:
1418 1423 self.dirstate.write(None)
1419 1424
1420 1425 self._filecache['dirstate'].refresh()
1421 1426
1422 1427 l = self._lock(self.vfs, "wlock", wait, unlock,
1423 1428 self.invalidatedirstate, _('working directory of %s') %
1424 1429 self.origroot,
1425 1430 inheritchecker=self._wlockchecktransaction,
1426 1431 parentenvvar='HG_WLOCK_LOCKER')
1427 1432 self._wlockref = weakref.ref(l)
1428 1433 return l
1429 1434
1430 1435 def _currentlock(self, lockref):
1431 1436 """Returns the lock if it's held, or None if it's not."""
1432 1437 if lockref is None:
1433 1438 return None
1434 1439 l = lockref()
1435 1440 if l is None or not l.held:
1436 1441 return None
1437 1442 return l
1438 1443
1439 1444 def currentwlock(self):
1440 1445 """Returns the wlock if it's held, or None if it's not."""
1441 1446 return self._currentlock(self._wlockref)
1442 1447
1443 1448 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1444 1449 """
1445 1450 commit an individual file as part of a larger transaction
1446 1451 """
1447 1452
1448 1453 fname = fctx.path()
1449 1454 fparent1 = manifest1.get(fname, nullid)
1450 1455 fparent2 = manifest2.get(fname, nullid)
1451 1456 if isinstance(fctx, context.filectx):
1452 1457 node = fctx.filenode()
1453 1458 if node in [fparent1, fparent2]:
1454 1459 self.ui.debug('reusing %s filelog entry\n' % fname)
1455 1460 if manifest1.flags(fname) != fctx.flags():
1456 1461 changelist.append(fname)
1457 1462 return node
1458 1463
1459 1464 flog = self.file(fname)
1460 1465 meta = {}
1461 1466 copy = fctx.renamed()
1462 1467 if copy and copy[0] != fname:
1463 1468 # Mark the new revision of this file as a copy of another
1464 1469 # file. This copy data will effectively act as a parent
1465 1470 # of this new revision. If this is a merge, the first
1466 1471 # parent will be the nullid (meaning "look up the copy data")
1467 1472 # and the second one will be the other parent. For example:
1468 1473 #
1469 1474 # 0 --- 1 --- 3 rev1 changes file foo
1470 1475 # \ / rev2 renames foo to bar and changes it
1471 1476 # \- 2 -/ rev3 should have bar with all changes and
1472 1477 # should record that bar descends from
1473 1478 # bar in rev2 and foo in rev1
1474 1479 #
1475 1480 # this allows this merge to succeed:
1476 1481 #
1477 1482 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1478 1483 # \ / merging rev3 and rev4 should use bar@rev2
1479 1484 # \- 2 --- 4 as the merge base
1480 1485 #
1481 1486
1482 1487 cfname = copy[0]
1483 1488 crev = manifest1.get(cfname)
1484 1489 newfparent = fparent2
1485 1490
1486 1491 if manifest2: # branch merge
1487 1492 if fparent2 == nullid or crev is None: # copied on remote side
1488 1493 if cfname in manifest2:
1489 1494 crev = manifest2[cfname]
1490 1495 newfparent = fparent1
1491 1496
1492 1497 # Here, we used to search backwards through history to try to find
1493 1498 # where the file copy came from if the source of a copy was not in
1494 1499 # the parent directory. However, this doesn't actually make sense to
1495 1500 # do (what does a copy from something not in your working copy even
1496 1501 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1497 1502 # the user that copy information was dropped, so if they didn't
1498 1503 # expect this outcome it can be fixed, but this is the correct
1499 1504 # behavior in this circumstance.
1500 1505
1501 1506 if crev:
1502 1507 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1503 1508 meta["copy"] = cfname
1504 1509 meta["copyrev"] = hex(crev)
1505 1510 fparent1, fparent2 = nullid, newfparent
1506 1511 else:
1507 1512 self.ui.warn(_("warning: can't find ancestor for '%s' "
1508 1513 "copied from '%s'!\n") % (fname, cfname))
1509 1514
1510 1515 elif fparent1 == nullid:
1511 1516 fparent1, fparent2 = fparent2, nullid
1512 1517 elif fparent2 != nullid:
1513 1518 # is one parent an ancestor of the other?
1514 1519 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1515 1520 if fparent1 in fparentancestors:
1516 1521 fparent1, fparent2 = fparent2, nullid
1517 1522 elif fparent2 in fparentancestors:
1518 1523 fparent2 = nullid
1519 1524
1520 1525 # is the file changed?
1521 1526 text = fctx.data()
1522 1527 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1523 1528 changelist.append(fname)
1524 1529 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1525 1530 # are just the flags changed during merge?
1526 1531 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1527 1532 changelist.append(fname)
1528 1533
1529 1534 return fparent1
1530 1535
1531 1536 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1532 1537 """check for commit arguments that aren't committable"""
1533 1538 if match.isexact() or match.prefix():
1534 1539 matched = set(status.modified + status.added + status.removed)
1535 1540
1536 1541 for f in match.files():
1537 1542 f = self.dirstate.normalize(f)
1538 1543 if f == '.' or f in matched or f in wctx.substate:
1539 1544 continue
1540 1545 if f in status.deleted:
1541 1546 fail(f, _('file not found!'))
1542 1547 if f in vdirs: # visited directory
1543 1548 d = f + '/'
1544 1549 for mf in matched:
1545 1550 if mf.startswith(d):
1546 1551 break
1547 1552 else:
1548 1553 fail(f, _("no match under directory!"))
1549 1554 elif f not in self.dirstate:
1550 1555 fail(f, _("file not tracked!"))
1551 1556
1552 1557 @unfilteredmethod
1553 1558 def commit(self, text="", user=None, date=None, match=None, force=False,
1554 1559 editor=False, extra=None):
1555 1560 """Add a new revision to current repository.
1556 1561
1557 1562 Revision information is gathered from the working directory,
1558 1563 match can be used to filter the committed files. If editor is
1559 1564 supplied, it is called to get a commit message.
1560 1565 """
1561 1566 if extra is None:
1562 1567 extra = {}
1563 1568
1564 1569 def fail(f, msg):
1565 1570 raise error.Abort('%s: %s' % (f, msg))
1566 1571
1567 1572 if not match:
1568 1573 match = matchmod.always(self.root, '')
1569 1574
1570 1575 if not force:
1571 1576 vdirs = []
1572 1577 match.explicitdir = vdirs.append
1573 1578 match.bad = fail
1574 1579
1575 1580 wlock = lock = tr = None
1576 1581 try:
1577 1582 wlock = self.wlock()
1578 1583 lock = self.lock() # for recent changelog (see issue4368)
1579 1584
1580 1585 wctx = self[None]
1581 1586 merge = len(wctx.parents()) > 1
1582 1587
1583 1588 if not force and merge and match.ispartial():
1584 1589 raise error.Abort(_('cannot partially commit a merge '
1585 1590 '(do not specify files or patterns)'))
1586 1591
1587 1592 status = self.status(match=match, clean=force)
1588 1593 if force:
1589 1594 status.modified.extend(status.clean) # mq may commit clean files
1590 1595
1591 1596 # check subrepos
1592 1597 subs = []
1593 1598 commitsubs = set()
1594 1599 newstate = wctx.substate.copy()
1595 1600 # only manage subrepos and .hgsubstate if .hgsub is present
1596 1601 if '.hgsub' in wctx:
1597 1602 # we'll decide whether to track this ourselves, thanks
1598 1603 for c in status.modified, status.added, status.removed:
1599 1604 if '.hgsubstate' in c:
1600 1605 c.remove('.hgsubstate')
1601 1606
1602 1607 # compare current state to last committed state
1603 1608 # build new substate based on last committed state
1604 1609 oldstate = wctx.p1().substate
1605 1610 for s in sorted(newstate.keys()):
1606 1611 if not match(s):
1607 1612 # ignore working copy, use old state if present
1608 1613 if s in oldstate:
1609 1614 newstate[s] = oldstate[s]
1610 1615 continue
1611 1616 if not force:
1612 1617 raise error.Abort(
1613 1618 _("commit with new subrepo %s excluded") % s)
1614 1619 dirtyreason = wctx.sub(s).dirtyreason(True)
1615 1620 if dirtyreason:
1616 1621 if not self.ui.configbool('ui', 'commitsubrepos'):
1617 1622 raise error.Abort(dirtyreason,
1618 1623 hint=_("use --subrepos for recursive commit"))
1619 1624 subs.append(s)
1620 1625 commitsubs.add(s)
1621 1626 else:
1622 1627 bs = wctx.sub(s).basestate()
1623 1628 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1624 1629 if oldstate.get(s, (None, None, None))[1] != bs:
1625 1630 subs.append(s)
1626 1631
1627 1632 # check for removed subrepos
1628 1633 for p in wctx.parents():
1629 1634 r = [s for s in p.substate if s not in newstate]
1630 1635 subs += [s for s in r if match(s)]
1631 1636 if subs:
1632 1637 if (not match('.hgsub') and
1633 1638 '.hgsub' in (wctx.modified() + wctx.added())):
1634 1639 raise error.Abort(
1635 1640 _("can't commit subrepos without .hgsub"))
1636 1641 status.modified.insert(0, '.hgsubstate')
1637 1642
1638 1643 elif '.hgsub' in status.removed:
1639 1644 # clean up .hgsubstate when .hgsub is removed
1640 1645 if ('.hgsubstate' in wctx and
1641 1646 '.hgsubstate' not in (status.modified + status.added +
1642 1647 status.removed)):
1643 1648 status.removed.insert(0, '.hgsubstate')
1644 1649
1645 1650 # make sure all explicit patterns are matched
1646 1651 if not force:
1647 1652 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1648 1653
1649 1654 cctx = context.workingcommitctx(self, status,
1650 1655 text, user, date, extra)
1651 1656
1652 1657 # internal config: ui.allowemptycommit
1653 1658 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1654 1659 or extra.get('close') or merge or cctx.files()
1655 1660 or self.ui.configbool('ui', 'allowemptycommit'))
1656 1661 if not allowemptycommit:
1657 1662 return None
1658 1663
1659 1664 if merge and cctx.deleted():
1660 1665 raise error.Abort(_("cannot commit merge with missing files"))
1661 1666
1662 1667 ms = mergemod.mergestate.read(self)
1663 1668 mergeutil.checkunresolved(ms)
1664 1669
1665 1670 if editor:
1666 1671 cctx._text = editor(self, cctx, subs)
1667 1672 edited = (text != cctx._text)
1668 1673
1669 1674 # Save commit message in case this transaction gets rolled back
1670 1675 # (e.g. by a pretxncommit hook). Leave the content alone on
1671 1676 # the assumption that the user will use the same editor again.
1672 1677 msgfn = self.savecommitmessage(cctx._text)
1673 1678
1674 1679 # commit subs and write new state
1675 1680 if subs:
1676 1681 for s in sorted(commitsubs):
1677 1682 sub = wctx.sub(s)
1678 1683 self.ui.status(_('committing subrepository %s\n') %
1679 1684 subrepo.subrelpath(sub))
1680 1685 sr = sub.commit(cctx._text, user, date)
1681 1686 newstate[s] = (newstate[s][0], sr)
1682 1687 subrepo.writestate(self, newstate)
1683 1688
1684 1689 p1, p2 = self.dirstate.parents()
1685 1690 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1686 1691 try:
1687 1692 self.hook("precommit", throw=True, parent1=hookp1,
1688 1693 parent2=hookp2)
1689 1694 tr = self.transaction('commit')
1690 1695 ret = self.commitctx(cctx, True)
1691 1696 except: # re-raises
1692 1697 if edited:
1693 1698 self.ui.write(
1694 1699 _('note: commit message saved in %s\n') % msgfn)
1695 1700 raise
1696 1701 # update bookmarks, dirstate and mergestate
1697 1702 bookmarks.update(self, [p1, p2], ret)
1698 1703 cctx.markcommitted(ret)
1699 1704 ms.reset()
1700 1705 tr.close()
1701 1706
1702 1707 finally:
1703 1708 lockmod.release(tr, lock, wlock)
1704 1709
1705 1710 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1706 1711 # hack for command that use a temporary commit (eg: histedit)
1707 1712 # temporary commit got stripped before hook release
1708 1713 if self.changelog.hasnode(ret):
1709 1714 self.hook("commit", node=node, parent1=parent1,
1710 1715 parent2=parent2)
1711 1716 self._afterlock(commithook)
1712 1717 return ret
1713 1718
1714 1719 @unfilteredmethod
1715 1720 def commitctx(self, ctx, error=False):
1716 1721 """Add a new revision to current repository.
1717 1722 Revision information is passed via the context argument.
1718 1723 """
1719 1724
1720 1725 tr = None
1721 1726 p1, p2 = ctx.p1(), ctx.p2()
1722 1727 user = ctx.user()
1723 1728
1724 1729 lock = self.lock()
1725 1730 try:
1726 1731 tr = self.transaction("commit")
1727 1732 trp = weakref.proxy(tr)
1728 1733
1729 1734 if ctx.manifestnode():
1730 1735 # reuse an existing manifest revision
1731 1736 mn = ctx.manifestnode()
1732 1737 files = ctx.files()
1733 1738 elif ctx.files():
1734 1739 m1ctx = p1.manifestctx()
1735 1740 m2ctx = p2.manifestctx()
1736 1741 mctx = m1ctx.copy()
1737 1742
1738 1743 m = mctx.read()
1739 1744 m1 = m1ctx.read()
1740 1745 m2 = m2ctx.read()
1741 1746
1742 1747 # check in files
1743 1748 added = []
1744 1749 changed = []
1745 1750 removed = list(ctx.removed())
1746 1751 linkrev = len(self)
1747 1752 self.ui.note(_("committing files:\n"))
1748 1753 for f in sorted(ctx.modified() + ctx.added()):
1749 1754 self.ui.note(f + "\n")
1750 1755 try:
1751 1756 fctx = ctx[f]
1752 1757 if fctx is None:
1753 1758 removed.append(f)
1754 1759 else:
1755 1760 added.append(f)
1756 1761 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1757 1762 trp, changed)
1758 1763 m.setflag(f, fctx.flags())
1759 1764 except OSError as inst:
1760 1765 self.ui.warn(_("trouble committing %s!\n") % f)
1761 1766 raise
1762 1767 except IOError as inst:
1763 1768 errcode = getattr(inst, 'errno', errno.ENOENT)
1764 1769 if error or errcode and errcode != errno.ENOENT:
1765 1770 self.ui.warn(_("trouble committing %s!\n") % f)
1766 1771 raise
1767 1772
1768 1773 # update manifest
1769 1774 self.ui.note(_("committing manifest\n"))
1770 1775 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1771 1776 drop = [f for f in removed if f in m]
1772 1777 for f in drop:
1773 1778 del m[f]
1774 1779 mn = mctx.write(trp, linkrev,
1775 1780 p1.manifestnode(), p2.manifestnode(),
1776 1781 added, drop)
1777 1782 files = changed + removed
1778 1783 else:
1779 1784 mn = p1.manifestnode()
1780 1785 files = []
1781 1786
1782 1787 # update changelog
1783 1788 self.ui.note(_("committing changelog\n"))
1784 1789 self.changelog.delayupdate(tr)
1785 1790 n = self.changelog.add(mn, files, ctx.description(),
1786 1791 trp, p1.node(), p2.node(),
1787 1792 user, ctx.date(), ctx.extra().copy())
1788 1793 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1789 1794 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1790 1795 parent2=xp2)
1791 1796 # set the new commit is proper phase
1792 1797 targetphase = subrepo.newcommitphase(self.ui, ctx)
1793 1798 if targetphase:
1794 1799 # retract boundary do not alter parent changeset.
1795 1800 # if a parent have higher the resulting phase will
1796 1801 # be compliant anyway
1797 1802 #
1798 1803 # if minimal phase was 0 we don't need to retract anything
1799 1804 phases.retractboundary(self, tr, targetphase, [n])
1800 1805 tr.close()
1801 1806 branchmap.updatecache(self.filtered('served'))
1802 1807 return n
1803 1808 finally:
1804 1809 if tr:
1805 1810 tr.release()
1806 1811 lock.release()
1807 1812
1808 1813 @unfilteredmethod
1809 1814 def destroying(self):
1810 1815 '''Inform the repository that nodes are about to be destroyed.
1811 1816 Intended for use by strip and rollback, so there's a common
1812 1817 place for anything that has to be done before destroying history.
1813 1818
1814 1819 This is mostly useful for saving state that is in memory and waiting
1815 1820 to be flushed when the current lock is released. Because a call to
1816 1821 destroyed is imminent, the repo will be invalidated causing those
1817 1822 changes to stay in memory (waiting for the next unlock), or vanish
1818 1823 completely.
1819 1824 '''
1820 1825 # When using the same lock to commit and strip, the phasecache is left
1821 1826 # dirty after committing. Then when we strip, the repo is invalidated,
1822 1827 # causing those changes to disappear.
1823 1828 if '_phasecache' in vars(self):
1824 1829 self._phasecache.write()
1825 1830
1826 1831 @unfilteredmethod
1827 1832 def destroyed(self):
1828 1833 '''Inform the repository that nodes have been destroyed.
1829 1834 Intended for use by strip and rollback, so there's a common
1830 1835 place for anything that has to be done after destroying history.
1831 1836 '''
1832 1837 # When one tries to:
1833 1838 # 1) destroy nodes thus calling this method (e.g. strip)
1834 1839 # 2) use phasecache somewhere (e.g. commit)
1835 1840 #
1836 1841 # then 2) will fail because the phasecache contains nodes that were
1837 1842 # removed. We can either remove phasecache from the filecache,
1838 1843 # causing it to reload next time it is accessed, or simply filter
1839 1844 # the removed nodes now and write the updated cache.
1840 1845 self._phasecache.filterunknown(self)
1841 1846 self._phasecache.write()
1842 1847
1843 1848 # update the 'served' branch cache to help read only server process
1844 1849 # Thanks to branchcache collaboration this is done from the nearest
1845 1850 # filtered subset and it is expected to be fast.
1846 1851 branchmap.updatecache(self.filtered('served'))
1847 1852
1848 1853 # Ensure the persistent tag cache is updated. Doing it now
1849 1854 # means that the tag cache only has to worry about destroyed
1850 1855 # heads immediately after a strip/rollback. That in turn
1851 1856 # guarantees that "cachetip == currenttip" (comparing both rev
1852 1857 # and node) always means no nodes have been added or destroyed.
1853 1858
1854 1859 # XXX this is suboptimal when qrefresh'ing: we strip the current
1855 1860 # head, refresh the tag cache, then immediately add a new head.
1856 1861 # But I think doing it this way is necessary for the "instant
1857 1862 # tag cache retrieval" case to work.
1858 1863 self.invalidate()
1859 1864
1860 1865 def walk(self, match, node=None):
1861 1866 '''
1862 1867 walk recursively through the directory tree or a given
1863 1868 changeset, finding all files matched by the match
1864 1869 function
1865 1870 '''
1866 1871 return self[node].walk(match)
1867 1872
1868 1873 def status(self, node1='.', node2=None, match=None,
1869 1874 ignored=False, clean=False, unknown=False,
1870 1875 listsubrepos=False):
1871 1876 '''a convenience method that calls node1.status(node2)'''
1872 1877 return self[node1].status(node2, match, ignored, clean, unknown,
1873 1878 listsubrepos)
1874 1879
1875 1880 def heads(self, start=None):
1876 1881 if start is None:
1877 1882 cl = self.changelog
1878 1883 headrevs = reversed(cl.headrevs())
1879 1884 return [cl.node(rev) for rev in headrevs]
1880 1885
1881 1886 heads = self.changelog.heads(start)
1882 1887 # sort the output in rev descending order
1883 1888 return sorted(heads, key=self.changelog.rev, reverse=True)
1884 1889
1885 1890 def branchheads(self, branch=None, start=None, closed=False):
1886 1891 '''return a (possibly filtered) list of heads for the given branch
1887 1892
1888 1893 Heads are returned in topological order, from newest to oldest.
1889 1894 If branch is None, use the dirstate branch.
1890 1895 If start is not None, return only heads reachable from start.
1891 1896 If closed is True, return heads that are marked as closed as well.
1892 1897 '''
1893 1898 if branch is None:
1894 1899 branch = self[None].branch()
1895 1900 branches = self.branchmap()
1896 1901 if branch not in branches:
1897 1902 return []
1898 1903 # the cache returns heads ordered lowest to highest
1899 1904 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1900 1905 if start is not None:
1901 1906 # filter out the heads that cannot be reached from startrev
1902 1907 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1903 1908 bheads = [h for h in bheads if h in fbheads]
1904 1909 return bheads
1905 1910
1906 1911 def branches(self, nodes):
1907 1912 if not nodes:
1908 1913 nodes = [self.changelog.tip()]
1909 1914 b = []
1910 1915 for n in nodes:
1911 1916 t = n
1912 1917 while True:
1913 1918 p = self.changelog.parents(n)
1914 1919 if p[1] != nullid or p[0] == nullid:
1915 1920 b.append((t, n, p[0], p[1]))
1916 1921 break
1917 1922 n = p[0]
1918 1923 return b
1919 1924
1920 1925 def between(self, pairs):
1921 1926 r = []
1922 1927
1923 1928 for top, bottom in pairs:
1924 1929 n, l, i = top, [], 0
1925 1930 f = 1
1926 1931
1927 1932 while n != bottom and n != nullid:
1928 1933 p = self.changelog.parents(n)[0]
1929 1934 if i == f:
1930 1935 l.append(n)
1931 1936 f = f * 2
1932 1937 n = p
1933 1938 i += 1
1934 1939
1935 1940 r.append(l)
1936 1941
1937 1942 return r
1938 1943
1939 1944 def checkpush(self, pushop):
1940 1945 """Extensions can override this function if additional checks have
1941 1946 to be performed before pushing, or call it if they override push
1942 1947 command.
1943 1948 """
1944 1949 pass
1945 1950
1946 1951 @unfilteredpropertycache
1947 1952 def prepushoutgoinghooks(self):
1948 1953 """Return util.hooks consists of a pushop with repo, remote, outgoing
1949 1954 methods, which are called before pushing changesets.
1950 1955 """
1951 1956 return util.hooks()
1952 1957
1953 1958 def pushkey(self, namespace, key, old, new):
1954 1959 try:
1955 1960 tr = self.currenttransaction()
1956 1961 hookargs = {}
1957 1962 if tr is not None:
1958 1963 hookargs.update(tr.hookargs)
1959 1964 hookargs['namespace'] = namespace
1960 1965 hookargs['key'] = key
1961 1966 hookargs['old'] = old
1962 1967 hookargs['new'] = new
1963 1968 self.hook('prepushkey', throw=True, **hookargs)
1964 1969 except error.HookAbort as exc:
1965 1970 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1966 1971 if exc.hint:
1967 1972 self.ui.write_err(_("(%s)\n") % exc.hint)
1968 1973 return False
1969 1974 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1970 1975 ret = pushkey.push(self, namespace, key, old, new)
1971 1976 def runhook():
1972 1977 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1973 1978 ret=ret)
1974 1979 self._afterlock(runhook)
1975 1980 return ret
1976 1981
1977 1982 def listkeys(self, namespace):
1978 1983 self.hook('prelistkeys', throw=True, namespace=namespace)
1979 1984 self.ui.debug('listing keys for "%s"\n' % namespace)
1980 1985 values = pushkey.list(self, namespace)
1981 1986 self.hook('listkeys', namespace=namespace, values=values)
1982 1987 return values
1983 1988
1984 1989 def debugwireargs(self, one, two, three=None, four=None, five=None):
1985 1990 '''used to test argument passing over the wire'''
1986 1991 return "%s %s %s %s %s" % (one, two, three, four, five)
1987 1992
1988 1993 def savecommitmessage(self, text):
1989 1994 fp = self.vfs('last-message.txt', 'wb')
1990 1995 try:
1991 1996 fp.write(text)
1992 1997 finally:
1993 1998 fp.close()
1994 1999 return self.pathto(fp.name[len(self.root) + 1:])
1995 2000
1996 2001 # used to avoid circular references so destructors work
1997 2002 def aftertrans(files):
1998 2003 renamefiles = [tuple(t) for t in files]
1999 2004 def a():
2000 2005 for vfs, src, dest in renamefiles:
2001 2006 try:
2002 2007 vfs.rename(src, dest)
2003 2008 except OSError: # journal file does not yet exist
2004 2009 pass
2005 2010 return a
2006 2011
2007 2012 def undoname(fn):
2008 2013 base, name = os.path.split(fn)
2009 2014 assert name.startswith('journal')
2010 2015 return os.path.join(base, name.replace('journal', 'undo', 1))
2011 2016
2012 2017 def instance(ui, path, create):
2013 2018 return localrepository(ui, util.urllocalpath(path), create)
2014 2019
2015 2020 def islocal(path):
2016 2021 return True
2017 2022
2018 2023 def newreporequirements(repo):
2019 2024 """Determine the set of requirements for a new local repository.
2020 2025
2021 2026 Extensions can wrap this function to specify custom requirements for
2022 2027 new repositories.
2023 2028 """
2024 2029 ui = repo.ui
2025 2030 requirements = set(['revlogv1'])
2026 2031 if ui.configbool('format', 'usestore', True):
2027 2032 requirements.add('store')
2028 2033 if ui.configbool('format', 'usefncache', True):
2029 2034 requirements.add('fncache')
2030 2035 if ui.configbool('format', 'dotencode', True):
2031 2036 requirements.add('dotencode')
2032 2037
2033 2038 compengine = ui.config('experimental', 'format.compression', 'zlib')
2034 2039 if compengine not in util.compengines:
2035 2040 raise error.Abort(_('compression engine %s defined by '
2036 2041 'experimental.format.compression not available') %
2037 2042 compengine,
2038 2043 hint=_('run "hg debuginstall" to list available '
2039 2044 'compression engines'))
2040 2045
2041 2046 # zlib is the historical default and doesn't need an explicit requirement.
2042 2047 if compengine != 'zlib':
2043 2048 requirements.add('exp-compression-%s' % compengine)
2044 2049
2045 2050 if scmutil.gdinitconfig(ui):
2046 2051 requirements.add('generaldelta')
2047 2052 if ui.configbool('experimental', 'treemanifest', False):
2048 2053 requirements.add('treemanifest')
2049 2054 if ui.configbool('experimental', 'manifestv2', False):
2050 2055 requirements.add('manifestv2')
2051 2056
2052 2057 return requirements
General Comments 0
You need to be logged in to leave comments. Login now