##// END OF EJS Templates
localrepo: rename proxycls to filteredrepo...
Jun Wu -
r31279:052bc876 default
parent child Browse files
Show More
@@ -1,2073 +1,2073 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class repofilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72
73 73 def __get__(self, repo, type=None):
74 74 if repo is None:
75 75 return self
76 76 return super(repofilecache, self).__get__(repo.unfiltered(), type)
77 77 def __set__(self, repo, value):
78 78 return super(repofilecache, self).__set__(repo.unfiltered(), value)
79 79 def __delete__(self, repo):
80 80 return super(repofilecache, self).__delete__(repo.unfiltered())
81 81
82 82 class storecache(repofilecache):
83 83 """filecache for files in the store"""
84 84 def join(self, obj, fname):
85 85 return obj.sjoin(fname)
86 86
87 87 class unfilteredpropertycache(util.propertycache):
88 88 """propertycache that apply to unfiltered repo only"""
89 89
90 90 def __get__(self, repo, type=None):
91 91 unfi = repo.unfiltered()
92 92 if unfi is repo:
93 93 return super(unfilteredpropertycache, self).__get__(unfi)
94 94 return getattr(unfi, self.name)
95 95
96 96 class filteredpropertycache(util.propertycache):
97 97 """propertycache that must take filtering in account"""
98 98
99 99 def cachevalue(self, obj, value):
100 100 object.__setattr__(obj, self.name, value)
101 101
102 102
103 103 def hasunfilteredcache(repo, name):
104 104 """check if a repo has an unfilteredpropertycache value for <name>"""
105 105 return name in vars(repo.unfiltered())
106 106
107 107 def unfilteredmethod(orig):
108 108 """decorate method that always need to be run on unfiltered version"""
109 109 def wrapper(repo, *args, **kwargs):
110 110 return orig(repo.unfiltered(), *args, **kwargs)
111 111 return wrapper
112 112
113 113 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
114 114 'unbundle'))
115 115 legacycaps = moderncaps.union(set(['changegroupsubset']))
116 116
117 117 class localpeer(peer.peerrepository):
118 118 '''peer for a local repo; reflects only the most recent API'''
119 119
120 120 def __init__(self, repo, caps=moderncaps):
121 121 peer.peerrepository.__init__(self)
122 122 self._repo = repo.filtered('served')
123 123 self.ui = repo.ui
124 124 self._caps = repo._restrictcapabilities(caps)
125 125 self.requirements = repo.requirements
126 126 self.supportedformats = repo.supportedformats
127 127
128 128 def close(self):
129 129 self._repo.close()
130 130
131 131 def _capabilities(self):
132 132 return self._caps
133 133
134 134 def local(self):
135 135 return self._repo
136 136
137 137 def canpush(self):
138 138 return True
139 139
140 140 def url(self):
141 141 return self._repo.url()
142 142
143 143 def lookup(self, key):
144 144 return self._repo.lookup(key)
145 145
146 146 def branchmap(self):
147 147 return self._repo.branchmap()
148 148
149 149 def heads(self):
150 150 return self._repo.heads()
151 151
152 152 def known(self, nodes):
153 153 return self._repo.known(nodes)
154 154
155 155 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
156 156 **kwargs):
157 157 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
158 158 common=common, bundlecaps=bundlecaps,
159 159 **kwargs)
160 160 cb = util.chunkbuffer(chunks)
161 161
162 162 if bundlecaps is not None and 'HG20' in bundlecaps:
163 163 # When requesting a bundle2, getbundle returns a stream to make the
164 164 # wire level function happier. We need to build a proper object
165 165 # from it in local peer.
166 166 return bundle2.getunbundler(self.ui, cb)
167 167 else:
168 168 return changegroup.getunbundler('01', cb, None)
169 169
170 170 # TODO We might want to move the next two calls into legacypeer and add
171 171 # unbundle instead.
172 172
173 173 def unbundle(self, cg, heads, url):
174 174 """apply a bundle on a repo
175 175
176 176 This function handles the repo locking itself."""
177 177 try:
178 178 try:
179 179 cg = exchange.readbundle(self.ui, cg, None)
180 180 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
181 181 if util.safehasattr(ret, 'getchunks'):
182 182 # This is a bundle20 object, turn it into an unbundler.
183 183 # This little dance should be dropped eventually when the
184 184 # API is finally improved.
185 185 stream = util.chunkbuffer(ret.getchunks())
186 186 ret = bundle2.getunbundler(self.ui, stream)
187 187 return ret
188 188 except Exception as exc:
189 189 # If the exception contains output salvaged from a bundle2
190 190 # reply, we need to make sure it is printed before continuing
191 191 # to fail. So we build a bundle2 with such output and consume
192 192 # it directly.
193 193 #
194 194 # This is not very elegant but allows a "simple" solution for
195 195 # issue4594
196 196 output = getattr(exc, '_bundle2salvagedoutput', ())
197 197 if output:
198 198 bundler = bundle2.bundle20(self._repo.ui)
199 199 for out in output:
200 200 bundler.addpart(out)
201 201 stream = util.chunkbuffer(bundler.getchunks())
202 202 b = bundle2.getunbundler(self.ui, stream)
203 203 bundle2.processbundle(self._repo, b)
204 204 raise
205 205 except error.PushRaced as exc:
206 206 raise error.ResponseError(_('push failed:'), str(exc))
207 207
208 208 def lock(self):
209 209 return self._repo.lock()
210 210
211 211 def addchangegroup(self, cg, source, url):
212 212 return cg.apply(self._repo, source, url)
213 213
214 214 def pushkey(self, namespace, key, old, new):
215 215 return self._repo.pushkey(namespace, key, old, new)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def debugwireargs(self, one, two, three=None, four=None, five=None):
221 221 '''used to test argument passing over the wire'''
222 222 return "%s %s %s %s %s" % (one, two, three, four, five)
223 223
224 224 class locallegacypeer(localpeer):
225 225 '''peer extension which implements legacy methods too; used for tests with
226 226 restricted capabilities'''
227 227
228 228 def __init__(self, repo):
229 229 localpeer.__init__(self, repo, caps=legacycaps)
230 230
231 231 def branches(self, nodes):
232 232 return self._repo.branches(nodes)
233 233
234 234 def between(self, pairs):
235 235 return self._repo.between(pairs)
236 236
237 237 def changegroup(self, basenodes, source):
238 238 return changegroup.changegroup(self._repo, basenodes, source)
239 239
240 240 def changegroupsubset(self, bases, heads, source):
241 241 return changegroup.changegroupsubset(self._repo, bases, heads, source)
242 242
243 243 class localrepository(object):
244 244
245 245 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
246 246 'manifestv2'))
247 247 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
248 248 'relshared', 'dotencode'))
249 249 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
250 250 filtername = None
251 251
252 252 # a list of (ui, featureset) functions.
253 253 # only functions defined in module of enabled extensions are invoked
254 254 featuresetupfuncs = set()
255 255
256 256 def __init__(self, baseui, path, create=False):
257 257 self.requirements = set()
258 258 # vfs to access the working copy
259 259 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
260 260 # vfs to access the content of the repository
261 261 self.vfs = None
262 262 # vfs to access the store part of the repository
263 263 self.svfs = None
264 264 self.root = self.wvfs.base
265 265 self.path = self.wvfs.join(".hg")
266 266 self.origroot = path
267 267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 269 realfs=False)
270 270 self.vfs = vfsmod.vfs(self.path)
271 271 self.baseui = baseui
272 272 self.ui = baseui.copy()
273 273 self.ui.copy = baseui.copy # prevent copying repo configuration
274 274 # A list of callback to shape the phase if no data were found.
275 275 # Callback are in the form: func(repo, roots) --> processed root.
276 276 # This list it to be filled by extension during repo setup
277 277 self._phasedefaults = []
278 278 try:
279 279 self.ui.readconfig(self.join("hgrc"), self.root)
280 280 self._loadextensions()
281 281 except IOError:
282 282 pass
283 283
284 284 if self.featuresetupfuncs:
285 285 self.supported = set(self._basesupported) # use private copy
286 286 extmods = set(m.__name__ for n, m
287 287 in extensions.extensions(self.ui))
288 288 for setupfunc in self.featuresetupfuncs:
289 289 if setupfunc.__module__ in extmods:
290 290 setupfunc(self.ui, self.supported)
291 291 else:
292 292 self.supported = self._basesupported
293 293 color.setup(self.ui)
294 294
295 295 # Add compression engines.
296 296 for name in util.compengines:
297 297 engine = util.compengines[name]
298 298 if engine.revlogheader():
299 299 self.supported.add('exp-compression-%s' % name)
300 300
301 301 if not self.vfs.isdir():
302 302 if create:
303 303 self.requirements = newreporequirements(self)
304 304
305 305 if not self.wvfs.exists():
306 306 self.wvfs.makedirs()
307 307 self.vfs.makedir(notindexed=True)
308 308
309 309 if 'store' in self.requirements:
310 310 self.vfs.mkdir("store")
311 311
312 312 # create an invalid changelog
313 313 self.vfs.append(
314 314 "00changelog.i",
315 315 '\0\0\0\2' # represents revlogv2
316 316 ' dummy changelog to prevent using the old repo layout'
317 317 )
318 318 else:
319 319 raise error.RepoError(_("repository %s not found") % path)
320 320 elif create:
321 321 raise error.RepoError(_("repository %s already exists") % path)
322 322 else:
323 323 try:
324 324 self.requirements = scmutil.readrequires(
325 325 self.vfs, self.supported)
326 326 except IOError as inst:
327 327 if inst.errno != errno.ENOENT:
328 328 raise
329 329
330 330 self.sharedpath = self.path
331 331 try:
332 332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 333 if 'relshared' in self.requirements:
334 334 sharedpath = self.vfs.join(sharedpath)
335 335 vfs = vfsmod.vfs(sharedpath, realpath=True)
336 336 s = vfs.base
337 337 if not vfs.exists():
338 338 raise error.RepoError(
339 339 _('.hg/sharedpath points to nonexistent directory %s') % s)
340 340 self.sharedpath = s
341 341 except IOError as inst:
342 342 if inst.errno != errno.ENOENT:
343 343 raise
344 344
345 345 self.store = store.store(
346 346 self.requirements, self.sharedpath, vfsmod.vfs)
347 347 self.spath = self.store.path
348 348 self.svfs = self.store.vfs
349 349 self.sjoin = self.store.join
350 350 self.vfs.createmode = self.store.createmode
351 351 self._applyopenerreqs()
352 352 if create:
353 353 self._writerequirements()
354 354
355 355 self._dirstatevalidatewarned = False
356 356
357 357 self._branchcaches = {}
358 358 self._revbranchcache = None
359 359 self.filterpats = {}
360 360 self._datafilters = {}
361 361 self._transref = self._lockref = self._wlockref = None
362 362
363 363 # A cache for various files under .hg/ that tracks file changes,
364 364 # (used by the filecache decorator)
365 365 #
366 366 # Maps a property name to its util.filecacheentry
367 367 self._filecache = {}
368 368
369 369 # hold sets of revision to be filtered
370 370 # should be cleared when something might have changed the filter value:
371 371 # - new changesets,
372 372 # - phase change,
373 373 # - new obsolescence marker,
374 374 # - working directory parent change,
375 375 # - bookmark changes
376 376 self.filteredrevcache = {}
377 377
378 378 # generic mapping between names and nodes
379 379 self.names = namespaces.namespaces()
380 380
381 381 @property
382 382 def wopener(self):
383 383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
384 384 return self.wvfs
385 385
386 386 @property
387 387 def opener(self):
388 388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 389 return self.vfs
390 390
391 391 def close(self):
392 392 self._writecaches()
393 393
394 394 def _loadextensions(self):
395 395 extensions.loadall(self.ui)
396 396
397 397 def _writecaches(self):
398 398 if self._revbranchcache:
399 399 self._revbranchcache.write()
400 400
401 401 def _restrictcapabilities(self, caps):
402 402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
403 403 caps = set(caps)
404 404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
405 405 caps.add('bundle2=' + urlreq.quote(capsblob))
406 406 return caps
407 407
408 408 def _applyopenerreqs(self):
409 409 self.svfs.options = dict((r, 1) for r in self.requirements
410 410 if r in self.openerreqs)
411 411 # experimental config: format.chunkcachesize
412 412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
413 413 if chunkcachesize is not None:
414 414 self.svfs.options['chunkcachesize'] = chunkcachesize
415 415 # experimental config: format.maxchainlen
416 416 maxchainlen = self.ui.configint('format', 'maxchainlen')
417 417 if maxchainlen is not None:
418 418 self.svfs.options['maxchainlen'] = maxchainlen
419 419 # experimental config: format.manifestcachesize
420 420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
421 421 if manifestcachesize is not None:
422 422 self.svfs.options['manifestcachesize'] = manifestcachesize
423 423 # experimental config: format.aggressivemergedeltas
424 424 aggressivemergedeltas = self.ui.configbool('format',
425 425 'aggressivemergedeltas', False)
426 426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
427 427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
428 428
429 429 for r in self.requirements:
430 430 if r.startswith('exp-compression-'):
431 431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
432 432
433 433 def _writerequirements(self):
434 434 scmutil.writerequires(self.vfs, self.requirements)
435 435
436 436 def _checknested(self, path):
437 437 """Determine if path is a legal nested repository."""
438 438 if not path.startswith(self.root):
439 439 return False
440 440 subpath = path[len(self.root) + 1:]
441 441 normsubpath = util.pconvert(subpath)
442 442
443 443 # XXX: Checking against the current working copy is wrong in
444 444 # the sense that it can reject things like
445 445 #
446 446 # $ hg cat -r 10 sub/x.txt
447 447 #
448 448 # if sub/ is no longer a subrepository in the working copy
449 449 # parent revision.
450 450 #
451 451 # However, it can of course also allow things that would have
452 452 # been rejected before, such as the above cat command if sub/
453 453 # is a subrepository now, but was a normal directory before.
454 454 # The old path auditor would have rejected by mistake since it
455 455 # panics when it sees sub/.hg/.
456 456 #
457 457 # All in all, checking against the working copy seems sensible
458 458 # since we want to prevent access to nested repositories on
459 459 # the filesystem *now*.
460 460 ctx = self[None]
461 461 parts = util.splitpath(subpath)
462 462 while parts:
463 463 prefix = '/'.join(parts)
464 464 if prefix in ctx.substate:
465 465 if prefix == normsubpath:
466 466 return True
467 467 else:
468 468 sub = ctx.sub(prefix)
469 469 return sub.checknested(subpath[len(prefix) + 1:])
470 470 else:
471 471 parts.pop()
472 472 return False
473 473
474 474 def peer(self):
475 475 return localpeer(self) # not cached to avoid reference cycle
476 476
477 477 def unfiltered(self):
478 478 """Return unfiltered version of the repository
479 479
480 480 Intended to be overwritten by filtered repo."""
481 481 return self
482 482
483 483 def filtered(self, name):
484 484 """Return a filtered version of a repository"""
485 485 # build a new class with the mixin and the current class
486 486 # (possibly subclass of the repo)
487 class proxycls(repoview.repoview, self.unfiltered().__class__):
487 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
488 488 pass
489 return proxycls(self, name)
489 return filteredrepo(self, name)
490 490
491 491 @repofilecache('bookmarks', 'bookmarks.current')
492 492 def _bookmarks(self):
493 493 return bookmarks.bmstore(self)
494 494
495 495 @property
496 496 def _activebookmark(self):
497 497 return self._bookmarks.active
498 498
499 499 def bookmarkheads(self, bookmark):
500 500 name = bookmark.split('@', 1)[0]
501 501 heads = []
502 502 for mark, n in self._bookmarks.iteritems():
503 503 if mark.split('@', 1)[0] == name:
504 504 heads.append(n)
505 505 return heads
506 506
507 507 # _phaserevs and _phasesets depend on changelog. what we need is to
508 508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
509 509 # can't be easily expressed in filecache mechanism.
510 510 @storecache('phaseroots', '00changelog.i')
511 511 def _phasecache(self):
512 512 return phases.phasecache(self, self._phasedefaults)
513 513
514 514 @storecache('obsstore')
515 515 def obsstore(self):
516 516 # read default format for new obsstore.
517 517 # developer config: format.obsstore-version
518 518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
519 519 # rely on obsstore class default when possible.
520 520 kwargs = {}
521 521 if defaultformat is not None:
522 522 kwargs['defaultformat'] = defaultformat
523 523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
524 524 store = obsolete.obsstore(self.svfs, readonly=readonly,
525 525 **kwargs)
526 526 if store and readonly:
527 527 self.ui.warn(
528 528 _('obsolete feature not enabled but %i markers found!\n')
529 529 % len(list(store)))
530 530 return store
531 531
532 532 @storecache('00changelog.i')
533 533 def changelog(self):
534 534 c = changelog.changelog(self.svfs)
535 535 if txnutil.mayhavepending(self.root):
536 536 c.readpending('00changelog.i.a')
537 537 return c
538 538
539 539 def _constructmanifest(self):
540 540 # This is a temporary function while we migrate from manifest to
541 541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 542 # manifest creation.
543 543 return manifest.manifestrevlog(self.svfs)
544 544
545 545 @storecache('00manifest.i')
546 546 def manifestlog(self):
547 547 return manifest.manifestlog(self.svfs, self)
548 548
549 549 @repofilecache('dirstate')
550 550 def dirstate(self):
551 551 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 552 self._dirstatevalidate)
553 553
554 554 def _dirstatevalidate(self, node):
555 555 try:
556 556 self.changelog.rev(node)
557 557 return node
558 558 except error.LookupError:
559 559 if not self._dirstatevalidatewarned:
560 560 self._dirstatevalidatewarned = True
561 561 self.ui.warn(_("warning: ignoring unknown"
562 562 " working parent %s!\n") % short(node))
563 563 return nullid
564 564
565 565 def __getitem__(self, changeid):
566 566 if changeid is None or changeid == wdirrev:
567 567 return context.workingctx(self)
568 568 if isinstance(changeid, slice):
569 569 return [context.changectx(self, i)
570 570 for i in xrange(*changeid.indices(len(self)))
571 571 if i not in self.changelog.filteredrevs]
572 572 return context.changectx(self, changeid)
573 573
574 574 def __contains__(self, changeid):
575 575 try:
576 576 self[changeid]
577 577 return True
578 578 except error.RepoLookupError:
579 579 return False
580 580
581 581 def __nonzero__(self):
582 582 return True
583 583
584 584 def __len__(self):
585 585 return len(self.changelog)
586 586
587 587 def __iter__(self):
588 588 return iter(self.changelog)
589 589
590 590 def revs(self, expr, *args):
591 591 '''Find revisions matching a revset.
592 592
593 593 The revset is specified as a string ``expr`` that may contain
594 594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
595 595
596 596 Revset aliases from the configuration are not expanded. To expand
597 597 user aliases, consider calling ``scmutil.revrange()`` or
598 598 ``repo.anyrevs([expr], user=True)``.
599 599
600 600 Returns a revset.abstractsmartset, which is a list-like interface
601 601 that contains integer revisions.
602 602 '''
603 603 expr = revsetlang.formatspec(expr, *args)
604 604 m = revset.match(None, expr)
605 605 return m(self)
606 606
607 607 def set(self, expr, *args):
608 608 '''Find revisions matching a revset and emit changectx instances.
609 609
610 610 This is a convenience wrapper around ``revs()`` that iterates the
611 611 result and is a generator of changectx instances.
612 612
613 613 Revset aliases from the configuration are not expanded. To expand
614 614 user aliases, consider calling ``scmutil.revrange()``.
615 615 '''
616 616 for r in self.revs(expr, *args):
617 617 yield self[r]
618 618
619 619 def anyrevs(self, specs, user=False):
620 620 '''Find revisions matching one of the given revsets.
621 621
622 622 Revset aliases from the configuration are not expanded by default. To
623 623 expand user aliases, specify ``user=True``.
624 624 '''
625 625 if user:
626 626 m = revset.matchany(self.ui, specs, repo=self)
627 627 else:
628 628 m = revset.matchany(None, specs)
629 629 return m(self)
630 630
631 631 def url(self):
632 632 return 'file:' + self.root
633 633
634 634 def hook(self, name, throw=False, **args):
635 635 """Call a hook, passing this repo instance.
636 636
637 637 This a convenience method to aid invoking hooks. Extensions likely
638 638 won't call this unless they have registered a custom hook or are
639 639 replacing code that is expected to call a hook.
640 640 """
641 641 return hook.hook(self.ui, self, name, throw, **args)
642 642
643 643 @unfilteredmethod
644 644 def _tag(self, names, node, message, local, user, date, extra=None,
645 645 editor=False):
646 646 if isinstance(names, str):
647 647 names = (names,)
648 648
649 649 branches = self.branchmap()
650 650 for name in names:
651 651 self.hook('pretag', throw=True, node=hex(node), tag=name,
652 652 local=local)
653 653 if name in branches:
654 654 self.ui.warn(_("warning: tag %s conflicts with existing"
655 655 " branch name\n") % name)
656 656
657 657 def writetags(fp, names, munge, prevtags):
658 658 fp.seek(0, 2)
659 659 if prevtags and prevtags[-1] != '\n':
660 660 fp.write('\n')
661 661 for name in names:
662 662 if munge:
663 663 m = munge(name)
664 664 else:
665 665 m = name
666 666
667 667 if (self._tagscache.tagtypes and
668 668 name in self._tagscache.tagtypes):
669 669 old = self.tags().get(name, nullid)
670 670 fp.write('%s %s\n' % (hex(old), m))
671 671 fp.write('%s %s\n' % (hex(node), m))
672 672 fp.close()
673 673
674 674 prevtags = ''
675 675 if local:
676 676 try:
677 677 fp = self.vfs('localtags', 'r+')
678 678 except IOError:
679 679 fp = self.vfs('localtags', 'a')
680 680 else:
681 681 prevtags = fp.read()
682 682
683 683 # local tags are stored in the current charset
684 684 writetags(fp, names, None, prevtags)
685 685 for name in names:
686 686 self.hook('tag', node=hex(node), tag=name, local=local)
687 687 return
688 688
689 689 try:
690 690 fp = self.wfile('.hgtags', 'rb+')
691 691 except IOError as e:
692 692 if e.errno != errno.ENOENT:
693 693 raise
694 694 fp = self.wfile('.hgtags', 'ab')
695 695 else:
696 696 prevtags = fp.read()
697 697
698 698 # committed tags are stored in UTF-8
699 699 writetags(fp, names, encoding.fromlocal, prevtags)
700 700
701 701 fp.close()
702 702
703 703 self.invalidatecaches()
704 704
705 705 if '.hgtags' not in self.dirstate:
706 706 self[None].add(['.hgtags'])
707 707
708 708 m = matchmod.exact(self.root, '', ['.hgtags'])
709 709 tagnode = self.commit(message, user, date, extra=extra, match=m,
710 710 editor=editor)
711 711
712 712 for name in names:
713 713 self.hook('tag', node=hex(node), tag=name, local=local)
714 714
715 715 return tagnode
716 716
717 717 def tag(self, names, node, message, local, user, date, editor=False):
718 718 '''tag a revision with one or more symbolic names.
719 719
720 720 names is a list of strings or, when adding a single tag, names may be a
721 721 string.
722 722
723 723 if local is True, the tags are stored in a per-repository file.
724 724 otherwise, they are stored in the .hgtags file, and a new
725 725 changeset is committed with the change.
726 726
727 727 keyword arguments:
728 728
729 729 local: whether to store tags in non-version-controlled file
730 730 (default False)
731 731
732 732 message: commit message to use if committing
733 733
734 734 user: name of user to use if committing
735 735
736 736 date: date tuple to use if committing'''
737 737
738 738 if not local:
739 739 m = matchmod.exact(self.root, '', ['.hgtags'])
740 740 if any(self.status(match=m, unknown=True, ignored=True)):
741 741 raise error.Abort(_('working copy of .hgtags is changed'),
742 742 hint=_('please commit .hgtags manually'))
743 743
744 744 self.tags() # instantiate the cache
745 745 self._tag(names, node, message, local, user, date, editor=editor)
746 746
747 747 @filteredpropertycache
748 748 def _tagscache(self):
749 749 '''Returns a tagscache object that contains various tags related
750 750 caches.'''
751 751
752 752 # This simplifies its cache management by having one decorated
753 753 # function (this one) and the rest simply fetch things from it.
754 754 class tagscache(object):
755 755 def __init__(self):
756 756 # These two define the set of tags for this repository. tags
757 757 # maps tag name to node; tagtypes maps tag name to 'global' or
758 758 # 'local'. (Global tags are defined by .hgtags across all
759 759 # heads, and local tags are defined in .hg/localtags.)
760 760 # They constitute the in-memory cache of tags.
761 761 self.tags = self.tagtypes = None
762 762
763 763 self.nodetagscache = self.tagslist = None
764 764
765 765 cache = tagscache()
766 766 cache.tags, cache.tagtypes = self._findtags()
767 767
768 768 return cache
769 769
770 770 def tags(self):
771 771 '''return a mapping of tag to node'''
772 772 t = {}
773 773 if self.changelog.filteredrevs:
774 774 tags, tt = self._findtags()
775 775 else:
776 776 tags = self._tagscache.tags
777 777 for k, v in tags.iteritems():
778 778 try:
779 779 # ignore tags to unknown nodes
780 780 self.changelog.rev(v)
781 781 t[k] = v
782 782 except (error.LookupError, ValueError):
783 783 pass
784 784 return t
785 785
786 786 def _findtags(self):
787 787 '''Do the hard work of finding tags. Return a pair of dicts
788 788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
789 789 maps tag name to a string like \'global\' or \'local\'.
790 790 Subclasses or extensions are free to add their own tags, but
791 791 should be aware that the returned dicts will be retained for the
792 792 duration of the localrepo object.'''
793 793
794 794 # XXX what tagtype should subclasses/extensions use? Currently
795 795 # mq and bookmarks add tags, but do not set the tagtype at all.
796 796 # Should each extension invent its own tag type? Should there
797 797 # be one tagtype for all such "virtual" tags? Or is the status
798 798 # quo fine?
799 799
800 800 alltags = {} # map tag name to (node, hist)
801 801 tagtypes = {}
802 802
803 803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
804 804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
805 805
806 806 # Build the return dicts. Have to re-encode tag names because
807 807 # the tags module always uses UTF-8 (in order not to lose info
808 808 # writing to the cache), but the rest of Mercurial wants them in
809 809 # local encoding.
810 810 tags = {}
811 811 for (name, (node, hist)) in alltags.iteritems():
812 812 if node != nullid:
813 813 tags[encoding.tolocal(name)] = node
814 814 tags['tip'] = self.changelog.tip()
815 815 tagtypes = dict([(encoding.tolocal(name), value)
816 816 for (name, value) in tagtypes.iteritems()])
817 817 return (tags, tagtypes)
818 818
819 819 def tagtype(self, tagname):
820 820 '''
821 821 return the type of the given tag. result can be:
822 822
823 823 'local' : a local tag
824 824 'global' : a global tag
825 825 None : tag does not exist
826 826 '''
827 827
828 828 return self._tagscache.tagtypes.get(tagname)
829 829
830 830 def tagslist(self):
831 831 '''return a list of tags ordered by revision'''
832 832 if not self._tagscache.tagslist:
833 833 l = []
834 834 for t, n in self.tags().iteritems():
835 835 l.append((self.changelog.rev(n), t, n))
836 836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
837 837
838 838 return self._tagscache.tagslist
839 839
840 840 def nodetags(self, node):
841 841 '''return the tags associated with a node'''
842 842 if not self._tagscache.nodetagscache:
843 843 nodetagscache = {}
844 844 for t, n in self._tagscache.tags.iteritems():
845 845 nodetagscache.setdefault(n, []).append(t)
846 846 for tags in nodetagscache.itervalues():
847 847 tags.sort()
848 848 self._tagscache.nodetagscache = nodetagscache
849 849 return self._tagscache.nodetagscache.get(node, [])
850 850
851 851 def nodebookmarks(self, node):
852 852 """return the list of bookmarks pointing to the specified node"""
853 853 marks = []
854 854 for bookmark, n in self._bookmarks.iteritems():
855 855 if n == node:
856 856 marks.append(bookmark)
857 857 return sorted(marks)
858 858
859 859 def branchmap(self):
860 860 '''returns a dictionary {branch: [branchheads]} with branchheads
861 861 ordered by increasing revision number'''
862 862 branchmap.updatecache(self)
863 863 return self._branchcaches[self.filtername]
864 864
865 865 @unfilteredmethod
866 866 def revbranchcache(self):
867 867 if not self._revbranchcache:
868 868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
869 869 return self._revbranchcache
870 870
871 871 def branchtip(self, branch, ignoremissing=False):
872 872 '''return the tip node for a given branch
873 873
874 874 If ignoremissing is True, then this method will not raise an error.
875 875 This is helpful for callers that only expect None for a missing branch
876 876 (e.g. namespace).
877 877
878 878 '''
879 879 try:
880 880 return self.branchmap().branchtip(branch)
881 881 except KeyError:
882 882 if not ignoremissing:
883 883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
884 884 else:
885 885 pass
886 886
887 887 def lookup(self, key):
888 888 return self[key].node()
889 889
890 890 def lookupbranch(self, key, remote=None):
891 891 repo = remote or self
892 892 if key in repo.branchmap():
893 893 return key
894 894
895 895 repo = (remote and remote.local()) and remote or self
896 896 return repo[key].branch()
897 897
898 898 def known(self, nodes):
899 899 cl = self.changelog
900 900 nm = cl.nodemap
901 901 filtered = cl.filteredrevs
902 902 result = []
903 903 for n in nodes:
904 904 r = nm.get(n)
905 905 resp = not (r is None or r in filtered)
906 906 result.append(resp)
907 907 return result
908 908
909 909 def local(self):
910 910 return self
911 911
912 912 def publishing(self):
913 913 # it's safe (and desirable) to trust the publish flag unconditionally
914 914 # so that we don't finalize changes shared between users via ssh or nfs
915 915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
916 916
917 917 def cancopy(self):
918 918 # so statichttprepo's override of local() works
919 919 if not self.local():
920 920 return False
921 921 if not self.publishing():
922 922 return True
923 923 # if publishing we can't copy if there is filtered content
924 924 return not self.filtered('visible').changelog.filteredrevs
925 925
926 926 def shared(self):
927 927 '''the type of shared repository (None if not shared)'''
928 928 if self.sharedpath != self.path:
929 929 return 'store'
930 930 return None
931 931
932 932 def join(self, f, *insidef):
933 933 return self.vfs.join(os.path.join(f, *insidef))
934 934
935 935 def wjoin(self, f, *insidef):
936 936 return self.vfs.reljoin(self.root, f, *insidef)
937 937
938 938 def file(self, f):
939 939 if f[0] == '/':
940 940 f = f[1:]
941 941 return filelog.filelog(self.svfs, f)
942 942
943 943 def changectx(self, changeid):
944 944 return self[changeid]
945 945
946 946 def setparents(self, p1, p2=nullid):
947 947 self.dirstate.beginparentchange()
948 948 copies = self.dirstate.setparents(p1, p2)
949 949 pctx = self[p1]
950 950 if copies:
951 951 # Adjust copy records, the dirstate cannot do it, it
952 952 # requires access to parents manifests. Preserve them
953 953 # only for entries added to first parent.
954 954 for f in copies:
955 955 if f not in pctx and copies[f] in pctx:
956 956 self.dirstate.copy(copies[f], f)
957 957 if p2 == nullid:
958 958 for f, s in sorted(self.dirstate.copies().items()):
959 959 if f not in pctx and s not in pctx:
960 960 self.dirstate.copy(None, f)
961 961 self.dirstate.endparentchange()
962 962
963 963 def filectx(self, path, changeid=None, fileid=None):
964 964 """changeid can be a changeset revision, node, or tag.
965 965 fileid can be a file revision or node."""
966 966 return context.filectx(self, path, changeid, fileid)
967 967
968 968 def getcwd(self):
969 969 return self.dirstate.getcwd()
970 970
971 971 def pathto(self, f, cwd=None):
972 972 return self.dirstate.pathto(f, cwd)
973 973
974 974 def wfile(self, f, mode='r'):
975 975 return self.wvfs(f, mode)
976 976
977 977 def _link(self, f):
978 978 return self.wvfs.islink(f)
979 979
980 980 def _loadfilter(self, filter):
981 981 if filter not in self.filterpats:
982 982 l = []
983 983 for pat, cmd in self.ui.configitems(filter):
984 984 if cmd == '!':
985 985 continue
986 986 mf = matchmod.match(self.root, '', [pat])
987 987 fn = None
988 988 params = cmd
989 989 for name, filterfn in self._datafilters.iteritems():
990 990 if cmd.startswith(name):
991 991 fn = filterfn
992 992 params = cmd[len(name):].lstrip()
993 993 break
994 994 if not fn:
995 995 fn = lambda s, c, **kwargs: util.filter(s, c)
996 996 # Wrap old filters not supporting keyword arguments
997 997 if not inspect.getargspec(fn)[2]:
998 998 oldfn = fn
999 999 fn = lambda s, c, **kwargs: oldfn(s, c)
1000 1000 l.append((mf, fn, params))
1001 1001 self.filterpats[filter] = l
1002 1002 return self.filterpats[filter]
1003 1003
1004 1004 def _filter(self, filterpats, filename, data):
1005 1005 for mf, fn, cmd in filterpats:
1006 1006 if mf(filename):
1007 1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1008 1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1009 1009 break
1010 1010
1011 1011 return data
1012 1012
1013 1013 @unfilteredpropertycache
1014 1014 def _encodefilterpats(self):
1015 1015 return self._loadfilter('encode')
1016 1016
1017 1017 @unfilteredpropertycache
1018 1018 def _decodefilterpats(self):
1019 1019 return self._loadfilter('decode')
1020 1020
1021 1021 def adddatafilter(self, name, filter):
1022 1022 self._datafilters[name] = filter
1023 1023
1024 1024 def wread(self, filename):
1025 1025 if self._link(filename):
1026 1026 data = self.wvfs.readlink(filename)
1027 1027 else:
1028 1028 data = self.wvfs.read(filename)
1029 1029 return self._filter(self._encodefilterpats, filename, data)
1030 1030
1031 1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1032 1032 """write ``data`` into ``filename`` in the working directory
1033 1033
1034 1034 This returns length of written (maybe decoded) data.
1035 1035 """
1036 1036 data = self._filter(self._decodefilterpats, filename, data)
1037 1037 if 'l' in flags:
1038 1038 self.wvfs.symlink(data, filename)
1039 1039 else:
1040 1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1041 1041 if 'x' in flags:
1042 1042 self.wvfs.setflags(filename, False, True)
1043 1043 return len(data)
1044 1044
1045 1045 def wwritedata(self, filename, data):
1046 1046 return self._filter(self._decodefilterpats, filename, data)
1047 1047
1048 1048 def currenttransaction(self):
1049 1049 """return the current transaction or None if non exists"""
1050 1050 if self._transref:
1051 1051 tr = self._transref()
1052 1052 else:
1053 1053 tr = None
1054 1054
1055 1055 if tr and tr.running():
1056 1056 return tr
1057 1057 return None
1058 1058
1059 1059 def transaction(self, desc, report=None):
1060 1060 if (self.ui.configbool('devel', 'all-warnings')
1061 1061 or self.ui.configbool('devel', 'check-locks')):
1062 1062 if self._currentlock(self._lockref) is None:
1063 1063 raise error.ProgrammingError('transaction requires locking')
1064 1064 tr = self.currenttransaction()
1065 1065 if tr is not None:
1066 1066 return tr.nest()
1067 1067
1068 1068 # abort here if the journal already exists
1069 1069 if self.svfs.exists("journal"):
1070 1070 raise error.RepoError(
1071 1071 _("abandoned transaction found"),
1072 1072 hint=_("run 'hg recover' to clean up transaction"))
1073 1073
1074 1074 idbase = "%.40f#%f" % (random.random(), time.time())
1075 1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1076 1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1077 1077
1078 1078 self._writejournal(desc)
1079 1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1080 1080 if report:
1081 1081 rp = report
1082 1082 else:
1083 1083 rp = self.ui.warn
1084 1084 vfsmap = {'plain': self.vfs} # root of .hg/
1085 1085 # we must avoid cyclic reference between repo and transaction.
1086 1086 reporef = weakref.ref(self)
1087 1087 def validate(tr):
1088 1088 """will run pre-closing hooks"""
1089 1089 reporef().hook('pretxnclose', throw=True,
1090 1090 txnname=desc, **tr.hookargs)
1091 1091 def releasefn(tr, success):
1092 1092 repo = reporef()
1093 1093 if success:
1094 1094 # this should be explicitly invoked here, because
1095 1095 # in-memory changes aren't written out at closing
1096 1096 # transaction, if tr.addfilegenerator (via
1097 1097 # dirstate.write or so) isn't invoked while
1098 1098 # transaction running
1099 1099 repo.dirstate.write(None)
1100 1100 else:
1101 1101 # discard all changes (including ones already written
1102 1102 # out) in this transaction
1103 1103 repo.dirstate.restorebackup(None, prefix='journal.')
1104 1104
1105 1105 repo.invalidate(clearfilecache=True)
1106 1106
1107 1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1108 1108 "journal",
1109 1109 "undo",
1110 1110 aftertrans(renames),
1111 1111 self.store.createmode,
1112 1112 validator=validate,
1113 1113 releasefn=releasefn)
1114 1114
1115 1115 tr.hookargs['txnid'] = txnid
1116 1116 # note: writing the fncache only during finalize mean that the file is
1117 1117 # outdated when running hooks. As fncache is used for streaming clone,
1118 1118 # this is not expected to break anything that happen during the hooks.
1119 1119 tr.addfinalize('flush-fncache', self.store.write)
1120 1120 def txnclosehook(tr2):
1121 1121 """To be run if transaction is successful, will schedule a hook run
1122 1122 """
1123 1123 # Don't reference tr2 in hook() so we don't hold a reference.
1124 1124 # This reduces memory consumption when there are multiple
1125 1125 # transactions per lock. This can likely go away if issue5045
1126 1126 # fixes the function accumulation.
1127 1127 hookargs = tr2.hookargs
1128 1128
1129 1129 def hook():
1130 1130 reporef().hook('txnclose', throw=False, txnname=desc,
1131 1131 **hookargs)
1132 1132 reporef()._afterlock(hook)
1133 1133 tr.addfinalize('txnclose-hook', txnclosehook)
1134 1134 def txnaborthook(tr2):
1135 1135 """To be run if transaction is aborted
1136 1136 """
1137 1137 reporef().hook('txnabort', throw=False, txnname=desc,
1138 1138 **tr2.hookargs)
1139 1139 tr.addabort('txnabort-hook', txnaborthook)
1140 1140 # avoid eager cache invalidation. in-memory data should be identical
1141 1141 # to stored data if transaction has no error.
1142 1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1143 1143 self._transref = weakref.ref(tr)
1144 1144 return tr
1145 1145
1146 1146 def _journalfiles(self):
1147 1147 return ((self.svfs, 'journal'),
1148 1148 (self.vfs, 'journal.dirstate'),
1149 1149 (self.vfs, 'journal.branch'),
1150 1150 (self.vfs, 'journal.desc'),
1151 1151 (self.vfs, 'journal.bookmarks'),
1152 1152 (self.svfs, 'journal.phaseroots'))
1153 1153
1154 1154 def undofiles(self):
1155 1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1156 1156
1157 1157 def _writejournal(self, desc):
1158 1158 self.dirstate.savebackup(None, prefix='journal.')
1159 1159 self.vfs.write("journal.branch",
1160 1160 encoding.fromlocal(self.dirstate.branch()))
1161 1161 self.vfs.write("journal.desc",
1162 1162 "%d\n%s\n" % (len(self), desc))
1163 1163 self.vfs.write("journal.bookmarks",
1164 1164 self.vfs.tryread("bookmarks"))
1165 1165 self.svfs.write("journal.phaseroots",
1166 1166 self.svfs.tryread("phaseroots"))
1167 1167
1168 1168 def recover(self):
1169 1169 with self.lock():
1170 1170 if self.svfs.exists("journal"):
1171 1171 self.ui.status(_("rolling back interrupted transaction\n"))
1172 1172 vfsmap = {'': self.svfs,
1173 1173 'plain': self.vfs,}
1174 1174 transaction.rollback(self.svfs, vfsmap, "journal",
1175 1175 self.ui.warn)
1176 1176 self.invalidate()
1177 1177 return True
1178 1178 else:
1179 1179 self.ui.warn(_("no interrupted transaction available\n"))
1180 1180 return False
1181 1181
1182 1182 def rollback(self, dryrun=False, force=False):
1183 1183 wlock = lock = dsguard = None
1184 1184 try:
1185 1185 wlock = self.wlock()
1186 1186 lock = self.lock()
1187 1187 if self.svfs.exists("undo"):
1188 1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1189 1189
1190 1190 return self._rollback(dryrun, force, dsguard)
1191 1191 else:
1192 1192 self.ui.warn(_("no rollback information available\n"))
1193 1193 return 1
1194 1194 finally:
1195 1195 release(dsguard, lock, wlock)
1196 1196
1197 1197 @unfilteredmethod # Until we get smarter cache management
1198 1198 def _rollback(self, dryrun, force, dsguard):
1199 1199 ui = self.ui
1200 1200 try:
1201 1201 args = self.vfs.read('undo.desc').splitlines()
1202 1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1203 1203 if len(args) >= 3:
1204 1204 detail = args[2]
1205 1205 oldtip = oldlen - 1
1206 1206
1207 1207 if detail and ui.verbose:
1208 1208 msg = (_('repository tip rolled back to revision %s'
1209 1209 ' (undo %s: %s)\n')
1210 1210 % (oldtip, desc, detail))
1211 1211 else:
1212 1212 msg = (_('repository tip rolled back to revision %s'
1213 1213 ' (undo %s)\n')
1214 1214 % (oldtip, desc))
1215 1215 except IOError:
1216 1216 msg = _('rolling back unknown transaction\n')
1217 1217 desc = None
1218 1218
1219 1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1220 1220 raise error.Abort(
1221 1221 _('rollback of last commit while not checked out '
1222 1222 'may lose data'), hint=_('use -f to force'))
1223 1223
1224 1224 ui.status(msg)
1225 1225 if dryrun:
1226 1226 return 0
1227 1227
1228 1228 parents = self.dirstate.parents()
1229 1229 self.destroying()
1230 1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1231 1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1232 1232 if self.vfs.exists('undo.bookmarks'):
1233 1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1234 1234 if self.svfs.exists('undo.phaseroots'):
1235 1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1236 1236 self.invalidate()
1237 1237
1238 1238 parentgone = (parents[0] not in self.changelog.nodemap or
1239 1239 parents[1] not in self.changelog.nodemap)
1240 1240 if parentgone:
1241 1241 # prevent dirstateguard from overwriting already restored one
1242 1242 dsguard.close()
1243 1243
1244 1244 self.dirstate.restorebackup(None, prefix='undo.')
1245 1245 try:
1246 1246 branch = self.vfs.read('undo.branch')
1247 1247 self.dirstate.setbranch(encoding.tolocal(branch))
1248 1248 except IOError:
1249 1249 ui.warn(_('named branch could not be reset: '
1250 1250 'current branch is still \'%s\'\n')
1251 1251 % self.dirstate.branch())
1252 1252
1253 1253 parents = tuple([p.rev() for p in self[None].parents()])
1254 1254 if len(parents) > 1:
1255 1255 ui.status(_('working directory now based on '
1256 1256 'revisions %d and %d\n') % parents)
1257 1257 else:
1258 1258 ui.status(_('working directory now based on '
1259 1259 'revision %d\n') % parents)
1260 1260 mergemod.mergestate.clean(self, self['.'].node())
1261 1261
1262 1262 # TODO: if we know which new heads may result from this rollback, pass
1263 1263 # them to destroy(), which will prevent the branchhead cache from being
1264 1264 # invalidated.
1265 1265 self.destroyed()
1266 1266 return 0
1267 1267
1268 1268 def invalidatecaches(self):
1269 1269
1270 1270 if '_tagscache' in vars(self):
1271 1271 # can't use delattr on proxy
1272 1272 del self.__dict__['_tagscache']
1273 1273
1274 1274 self.unfiltered()._branchcaches.clear()
1275 1275 self.invalidatevolatilesets()
1276 1276
1277 1277 def invalidatevolatilesets(self):
1278 1278 self.filteredrevcache.clear()
1279 1279 obsolete.clearobscaches(self)
1280 1280
1281 1281 def invalidatedirstate(self):
1282 1282 '''Invalidates the dirstate, causing the next call to dirstate
1283 1283 to check if it was modified since the last time it was read,
1284 1284 rereading it if it has.
1285 1285
1286 1286 This is different to dirstate.invalidate() that it doesn't always
1287 1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1288 1288 explicitly read the dirstate again (i.e. restoring it to a previous
1289 1289 known good state).'''
1290 1290 if hasunfilteredcache(self, 'dirstate'):
1291 1291 for k in self.dirstate._filecache:
1292 1292 try:
1293 1293 delattr(self.dirstate, k)
1294 1294 except AttributeError:
1295 1295 pass
1296 1296 delattr(self.unfiltered(), 'dirstate')
1297 1297
1298 1298 def invalidate(self, clearfilecache=False):
1299 1299 '''Invalidates both store and non-store parts other than dirstate
1300 1300
1301 1301 If a transaction is running, invalidation of store is omitted,
1302 1302 because discarding in-memory changes might cause inconsistency
1303 1303 (e.g. incomplete fncache causes unintentional failure, but
1304 1304 redundant one doesn't).
1305 1305 '''
1306 1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1307 1307 for k in self._filecache.keys():
1308 1308 # dirstate is invalidated separately in invalidatedirstate()
1309 1309 if k == 'dirstate':
1310 1310 continue
1311 1311
1312 1312 if clearfilecache:
1313 1313 del self._filecache[k]
1314 1314 try:
1315 1315 delattr(unfiltered, k)
1316 1316 except AttributeError:
1317 1317 pass
1318 1318 self.invalidatecaches()
1319 1319 if not self.currenttransaction():
1320 1320 # TODO: Changing contents of store outside transaction
1321 1321 # causes inconsistency. We should make in-memory store
1322 1322 # changes detectable, and abort if changed.
1323 1323 self.store.invalidatecaches()
1324 1324
1325 1325 def invalidateall(self):
1326 1326 '''Fully invalidates both store and non-store parts, causing the
1327 1327 subsequent operation to reread any outside changes.'''
1328 1328 # extension should hook this to invalidate its caches
1329 1329 self.invalidate()
1330 1330 self.invalidatedirstate()
1331 1331
1332 1332 @unfilteredmethod
1333 1333 def _refreshfilecachestats(self, tr):
1334 1334 """Reload stats of cached files so that they are flagged as valid"""
1335 1335 for k, ce in self._filecache.items():
1336 1336 if k == 'dirstate' or k not in self.__dict__:
1337 1337 continue
1338 1338 ce.refresh()
1339 1339
1340 1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1341 1341 inheritchecker=None, parentenvvar=None):
1342 1342 parentlock = None
1343 1343 # the contents of parentenvvar are used by the underlying lock to
1344 1344 # determine whether it can be inherited
1345 1345 if parentenvvar is not None:
1346 1346 parentlock = encoding.environ.get(parentenvvar)
1347 1347 try:
1348 1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1349 1349 acquirefn=acquirefn, desc=desc,
1350 1350 inheritchecker=inheritchecker,
1351 1351 parentlock=parentlock)
1352 1352 except error.LockHeld as inst:
1353 1353 if not wait:
1354 1354 raise
1355 1355 # show more details for new-style locks
1356 1356 if ':' in inst.locker:
1357 1357 host, pid = inst.locker.split(":", 1)
1358 1358 self.ui.warn(
1359 1359 _("waiting for lock on %s held by process %r "
1360 1360 "on host %r\n") % (desc, pid, host))
1361 1361 else:
1362 1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1363 1363 (desc, inst.locker))
1364 1364 # default to 600 seconds timeout
1365 1365 l = lockmod.lock(vfs, lockname,
1366 1366 int(self.ui.config("ui", "timeout", "600")),
1367 1367 releasefn=releasefn, acquirefn=acquirefn,
1368 1368 desc=desc)
1369 1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1370 1370 return l
1371 1371
1372 1372 def _afterlock(self, callback):
1373 1373 """add a callback to be run when the repository is fully unlocked
1374 1374
1375 1375 The callback will be executed when the outermost lock is released
1376 1376 (with wlock being higher level than 'lock')."""
1377 1377 for ref in (self._wlockref, self._lockref):
1378 1378 l = ref and ref()
1379 1379 if l and l.held:
1380 1380 l.postrelease.append(callback)
1381 1381 break
1382 1382 else: # no lock have been found.
1383 1383 callback()
1384 1384
1385 1385 def lock(self, wait=True):
1386 1386 '''Lock the repository store (.hg/store) and return a weak reference
1387 1387 to the lock. Use this before modifying the store (e.g. committing or
1388 1388 stripping). If you are opening a transaction, get a lock as well.)
1389 1389
1390 1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1391 1391 'wlock' first to avoid a dead-lock hazard.'''
1392 1392 l = self._currentlock(self._lockref)
1393 1393 if l is not None:
1394 1394 l.lock()
1395 1395 return l
1396 1396
1397 1397 l = self._lock(self.svfs, "lock", wait, None,
1398 1398 self.invalidate, _('repository %s') % self.origroot)
1399 1399 self._lockref = weakref.ref(l)
1400 1400 return l
1401 1401
1402 1402 def _wlockchecktransaction(self):
1403 1403 if self.currenttransaction() is not None:
1404 1404 raise error.LockInheritanceContractViolation(
1405 1405 'wlock cannot be inherited in the middle of a transaction')
1406 1406
1407 1407 def wlock(self, wait=True):
1408 1408 '''Lock the non-store parts of the repository (everything under
1409 1409 .hg except .hg/store) and return a weak reference to the lock.
1410 1410
1411 1411 Use this before modifying files in .hg.
1412 1412
1413 1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1414 1414 'wlock' first to avoid a dead-lock hazard.'''
1415 1415 l = self._wlockref and self._wlockref()
1416 1416 if l is not None and l.held:
1417 1417 l.lock()
1418 1418 return l
1419 1419
1420 1420 # We do not need to check for non-waiting lock acquisition. Such
1421 1421 # acquisition would not cause dead-lock as they would just fail.
1422 1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1423 1423 or self.ui.configbool('devel', 'check-locks')):
1424 1424 if self._currentlock(self._lockref) is not None:
1425 1425 self.ui.develwarn('"wlock" acquired after "lock"')
1426 1426
1427 1427 def unlock():
1428 1428 if self.dirstate.pendingparentchange():
1429 1429 self.dirstate.invalidate()
1430 1430 else:
1431 1431 self.dirstate.write(None)
1432 1432
1433 1433 self._filecache['dirstate'].refresh()
1434 1434
1435 1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1436 1436 self.invalidatedirstate, _('working directory of %s') %
1437 1437 self.origroot,
1438 1438 inheritchecker=self._wlockchecktransaction,
1439 1439 parentenvvar='HG_WLOCK_LOCKER')
1440 1440 self._wlockref = weakref.ref(l)
1441 1441 return l
1442 1442
1443 1443 def _currentlock(self, lockref):
1444 1444 """Returns the lock if it's held, or None if it's not."""
1445 1445 if lockref is None:
1446 1446 return None
1447 1447 l = lockref()
1448 1448 if l is None or not l.held:
1449 1449 return None
1450 1450 return l
1451 1451
1452 1452 def currentwlock(self):
1453 1453 """Returns the wlock if it's held, or None if it's not."""
1454 1454 return self._currentlock(self._wlockref)
1455 1455
1456 1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1457 1457 """
1458 1458 commit an individual file as part of a larger transaction
1459 1459 """
1460 1460
1461 1461 fname = fctx.path()
1462 1462 fparent1 = manifest1.get(fname, nullid)
1463 1463 fparent2 = manifest2.get(fname, nullid)
1464 1464 if isinstance(fctx, context.filectx):
1465 1465 node = fctx.filenode()
1466 1466 if node in [fparent1, fparent2]:
1467 1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1468 1468 if manifest1.flags(fname) != fctx.flags():
1469 1469 changelist.append(fname)
1470 1470 return node
1471 1471
1472 1472 flog = self.file(fname)
1473 1473 meta = {}
1474 1474 copy = fctx.renamed()
1475 1475 if copy and copy[0] != fname:
1476 1476 # Mark the new revision of this file as a copy of another
1477 1477 # file. This copy data will effectively act as a parent
1478 1478 # of this new revision. If this is a merge, the first
1479 1479 # parent will be the nullid (meaning "look up the copy data")
1480 1480 # and the second one will be the other parent. For example:
1481 1481 #
1482 1482 # 0 --- 1 --- 3 rev1 changes file foo
1483 1483 # \ / rev2 renames foo to bar and changes it
1484 1484 # \- 2 -/ rev3 should have bar with all changes and
1485 1485 # should record that bar descends from
1486 1486 # bar in rev2 and foo in rev1
1487 1487 #
1488 1488 # this allows this merge to succeed:
1489 1489 #
1490 1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1491 1491 # \ / merging rev3 and rev4 should use bar@rev2
1492 1492 # \- 2 --- 4 as the merge base
1493 1493 #
1494 1494
1495 1495 cfname = copy[0]
1496 1496 crev = manifest1.get(cfname)
1497 1497 newfparent = fparent2
1498 1498
1499 1499 if manifest2: # branch merge
1500 1500 if fparent2 == nullid or crev is None: # copied on remote side
1501 1501 if cfname in manifest2:
1502 1502 crev = manifest2[cfname]
1503 1503 newfparent = fparent1
1504 1504
1505 1505 # Here, we used to search backwards through history to try to find
1506 1506 # where the file copy came from if the source of a copy was not in
1507 1507 # the parent directory. However, this doesn't actually make sense to
1508 1508 # do (what does a copy from something not in your working copy even
1509 1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1510 1510 # the user that copy information was dropped, so if they didn't
1511 1511 # expect this outcome it can be fixed, but this is the correct
1512 1512 # behavior in this circumstance.
1513 1513
1514 1514 if crev:
1515 1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1516 1516 meta["copy"] = cfname
1517 1517 meta["copyrev"] = hex(crev)
1518 1518 fparent1, fparent2 = nullid, newfparent
1519 1519 else:
1520 1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1521 1521 "copied from '%s'!\n") % (fname, cfname))
1522 1522
1523 1523 elif fparent1 == nullid:
1524 1524 fparent1, fparent2 = fparent2, nullid
1525 1525 elif fparent2 != nullid:
1526 1526 # is one parent an ancestor of the other?
1527 1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1528 1528 if fparent1 in fparentancestors:
1529 1529 fparent1, fparent2 = fparent2, nullid
1530 1530 elif fparent2 in fparentancestors:
1531 1531 fparent2 = nullid
1532 1532
1533 1533 # is the file changed?
1534 1534 text = fctx.data()
1535 1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1536 1536 changelist.append(fname)
1537 1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1538 1538 # are just the flags changed during merge?
1539 1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1540 1540 changelist.append(fname)
1541 1541
1542 1542 return fparent1
1543 1543
1544 1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1545 1545 """check for commit arguments that aren't committable"""
1546 1546 if match.isexact() or match.prefix():
1547 1547 matched = set(status.modified + status.added + status.removed)
1548 1548
1549 1549 for f in match.files():
1550 1550 f = self.dirstate.normalize(f)
1551 1551 if f == '.' or f in matched or f in wctx.substate:
1552 1552 continue
1553 1553 if f in status.deleted:
1554 1554 fail(f, _('file not found!'))
1555 1555 if f in vdirs: # visited directory
1556 1556 d = f + '/'
1557 1557 for mf in matched:
1558 1558 if mf.startswith(d):
1559 1559 break
1560 1560 else:
1561 1561 fail(f, _("no match under directory!"))
1562 1562 elif f not in self.dirstate:
1563 1563 fail(f, _("file not tracked!"))
1564 1564
1565 1565 @unfilteredmethod
1566 1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1567 1567 editor=False, extra=None):
1568 1568 """Add a new revision to current repository.
1569 1569
1570 1570 Revision information is gathered from the working directory,
1571 1571 match can be used to filter the committed files. If editor is
1572 1572 supplied, it is called to get a commit message.
1573 1573 """
1574 1574 if extra is None:
1575 1575 extra = {}
1576 1576
1577 1577 def fail(f, msg):
1578 1578 raise error.Abort('%s: %s' % (f, msg))
1579 1579
1580 1580 if not match:
1581 1581 match = matchmod.always(self.root, '')
1582 1582
1583 1583 if not force:
1584 1584 vdirs = []
1585 1585 match.explicitdir = vdirs.append
1586 1586 match.bad = fail
1587 1587
1588 1588 wlock = lock = tr = None
1589 1589 try:
1590 1590 wlock = self.wlock()
1591 1591 lock = self.lock() # for recent changelog (see issue4368)
1592 1592
1593 1593 wctx = self[None]
1594 1594 merge = len(wctx.parents()) > 1
1595 1595
1596 1596 if not force and merge and match.ispartial():
1597 1597 raise error.Abort(_('cannot partially commit a merge '
1598 1598 '(do not specify files or patterns)'))
1599 1599
1600 1600 status = self.status(match=match, clean=force)
1601 1601 if force:
1602 1602 status.modified.extend(status.clean) # mq may commit clean files
1603 1603
1604 1604 # check subrepos
1605 1605 subs = []
1606 1606 commitsubs = set()
1607 1607 newstate = wctx.substate.copy()
1608 1608 # only manage subrepos and .hgsubstate if .hgsub is present
1609 1609 if '.hgsub' in wctx:
1610 1610 # we'll decide whether to track this ourselves, thanks
1611 1611 for c in status.modified, status.added, status.removed:
1612 1612 if '.hgsubstate' in c:
1613 1613 c.remove('.hgsubstate')
1614 1614
1615 1615 # compare current state to last committed state
1616 1616 # build new substate based on last committed state
1617 1617 oldstate = wctx.p1().substate
1618 1618 for s in sorted(newstate.keys()):
1619 1619 if not match(s):
1620 1620 # ignore working copy, use old state if present
1621 1621 if s in oldstate:
1622 1622 newstate[s] = oldstate[s]
1623 1623 continue
1624 1624 if not force:
1625 1625 raise error.Abort(
1626 1626 _("commit with new subrepo %s excluded") % s)
1627 1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1628 1628 if dirtyreason:
1629 1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1630 1630 raise error.Abort(dirtyreason,
1631 1631 hint=_("use --subrepos for recursive commit"))
1632 1632 subs.append(s)
1633 1633 commitsubs.add(s)
1634 1634 else:
1635 1635 bs = wctx.sub(s).basestate()
1636 1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1637 1637 if oldstate.get(s, (None, None, None))[1] != bs:
1638 1638 subs.append(s)
1639 1639
1640 1640 # check for removed subrepos
1641 1641 for p in wctx.parents():
1642 1642 r = [s for s in p.substate if s not in newstate]
1643 1643 subs += [s for s in r if match(s)]
1644 1644 if subs:
1645 1645 if (not match('.hgsub') and
1646 1646 '.hgsub' in (wctx.modified() + wctx.added())):
1647 1647 raise error.Abort(
1648 1648 _("can't commit subrepos without .hgsub"))
1649 1649 status.modified.insert(0, '.hgsubstate')
1650 1650
1651 1651 elif '.hgsub' in status.removed:
1652 1652 # clean up .hgsubstate when .hgsub is removed
1653 1653 if ('.hgsubstate' in wctx and
1654 1654 '.hgsubstate' not in (status.modified + status.added +
1655 1655 status.removed)):
1656 1656 status.removed.insert(0, '.hgsubstate')
1657 1657
1658 1658 # make sure all explicit patterns are matched
1659 1659 if not force:
1660 1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1661 1661
1662 1662 cctx = context.workingcommitctx(self, status,
1663 1663 text, user, date, extra)
1664 1664
1665 1665 # internal config: ui.allowemptycommit
1666 1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1667 1667 or extra.get('close') or merge or cctx.files()
1668 1668 or self.ui.configbool('ui', 'allowemptycommit'))
1669 1669 if not allowemptycommit:
1670 1670 return None
1671 1671
1672 1672 if merge and cctx.deleted():
1673 1673 raise error.Abort(_("cannot commit merge with missing files"))
1674 1674
1675 1675 ms = mergemod.mergestate.read(self)
1676 1676 mergeutil.checkunresolved(ms)
1677 1677
1678 1678 if editor:
1679 1679 cctx._text = editor(self, cctx, subs)
1680 1680 edited = (text != cctx._text)
1681 1681
1682 1682 # Save commit message in case this transaction gets rolled back
1683 1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1684 1684 # the assumption that the user will use the same editor again.
1685 1685 msgfn = self.savecommitmessage(cctx._text)
1686 1686
1687 1687 # commit subs and write new state
1688 1688 if subs:
1689 1689 for s in sorted(commitsubs):
1690 1690 sub = wctx.sub(s)
1691 1691 self.ui.status(_('committing subrepository %s\n') %
1692 1692 subrepo.subrelpath(sub))
1693 1693 sr = sub.commit(cctx._text, user, date)
1694 1694 newstate[s] = (newstate[s][0], sr)
1695 1695 subrepo.writestate(self, newstate)
1696 1696
1697 1697 p1, p2 = self.dirstate.parents()
1698 1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1699 1699 try:
1700 1700 self.hook("precommit", throw=True, parent1=hookp1,
1701 1701 parent2=hookp2)
1702 1702 tr = self.transaction('commit')
1703 1703 ret = self.commitctx(cctx, True)
1704 1704 except: # re-raises
1705 1705 if edited:
1706 1706 self.ui.write(
1707 1707 _('note: commit message saved in %s\n') % msgfn)
1708 1708 raise
1709 1709 # update bookmarks, dirstate and mergestate
1710 1710 bookmarks.update(self, [p1, p2], ret)
1711 1711 cctx.markcommitted(ret)
1712 1712 ms.reset()
1713 1713 tr.close()
1714 1714
1715 1715 finally:
1716 1716 lockmod.release(tr, lock, wlock)
1717 1717
1718 1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1719 1719 # hack for command that use a temporary commit (eg: histedit)
1720 1720 # temporary commit got stripped before hook release
1721 1721 if self.changelog.hasnode(ret):
1722 1722 self.hook("commit", node=node, parent1=parent1,
1723 1723 parent2=parent2)
1724 1724 self._afterlock(commithook)
1725 1725 return ret
1726 1726
1727 1727 @unfilteredmethod
1728 1728 def commitctx(self, ctx, error=False):
1729 1729 """Add a new revision to current repository.
1730 1730 Revision information is passed via the context argument.
1731 1731 """
1732 1732
1733 1733 tr = None
1734 1734 p1, p2 = ctx.p1(), ctx.p2()
1735 1735 user = ctx.user()
1736 1736
1737 1737 lock = self.lock()
1738 1738 try:
1739 1739 tr = self.transaction("commit")
1740 1740 trp = weakref.proxy(tr)
1741 1741
1742 1742 if ctx.manifestnode():
1743 1743 # reuse an existing manifest revision
1744 1744 mn = ctx.manifestnode()
1745 1745 files = ctx.files()
1746 1746 elif ctx.files():
1747 1747 m1ctx = p1.manifestctx()
1748 1748 m2ctx = p2.manifestctx()
1749 1749 mctx = m1ctx.copy()
1750 1750
1751 1751 m = mctx.read()
1752 1752 m1 = m1ctx.read()
1753 1753 m2 = m2ctx.read()
1754 1754
1755 1755 # check in files
1756 1756 added = []
1757 1757 changed = []
1758 1758 removed = list(ctx.removed())
1759 1759 linkrev = len(self)
1760 1760 self.ui.note(_("committing files:\n"))
1761 1761 for f in sorted(ctx.modified() + ctx.added()):
1762 1762 self.ui.note(f + "\n")
1763 1763 try:
1764 1764 fctx = ctx[f]
1765 1765 if fctx is None:
1766 1766 removed.append(f)
1767 1767 else:
1768 1768 added.append(f)
1769 1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1770 1770 trp, changed)
1771 1771 m.setflag(f, fctx.flags())
1772 1772 except OSError as inst:
1773 1773 self.ui.warn(_("trouble committing %s!\n") % f)
1774 1774 raise
1775 1775 except IOError as inst:
1776 1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1777 1777 if error or errcode and errcode != errno.ENOENT:
1778 1778 self.ui.warn(_("trouble committing %s!\n") % f)
1779 1779 raise
1780 1780
1781 1781 # update manifest
1782 1782 self.ui.note(_("committing manifest\n"))
1783 1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1784 1784 drop = [f for f in removed if f in m]
1785 1785 for f in drop:
1786 1786 del m[f]
1787 1787 mn = mctx.write(trp, linkrev,
1788 1788 p1.manifestnode(), p2.manifestnode(),
1789 1789 added, drop)
1790 1790 files = changed + removed
1791 1791 else:
1792 1792 mn = p1.manifestnode()
1793 1793 files = []
1794 1794
1795 1795 # update changelog
1796 1796 self.ui.note(_("committing changelog\n"))
1797 1797 self.changelog.delayupdate(tr)
1798 1798 n = self.changelog.add(mn, files, ctx.description(),
1799 1799 trp, p1.node(), p2.node(),
1800 1800 user, ctx.date(), ctx.extra().copy())
1801 1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1802 1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1803 1803 parent2=xp2)
1804 1804 # set the new commit is proper phase
1805 1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1806 1806 if targetphase:
1807 1807 # retract boundary do not alter parent changeset.
1808 1808 # if a parent have higher the resulting phase will
1809 1809 # be compliant anyway
1810 1810 #
1811 1811 # if minimal phase was 0 we don't need to retract anything
1812 1812 phases.retractboundary(self, tr, targetphase, [n])
1813 1813 tr.close()
1814 1814 branchmap.updatecache(self.filtered('served'))
1815 1815 return n
1816 1816 finally:
1817 1817 if tr:
1818 1818 tr.release()
1819 1819 lock.release()
1820 1820
1821 1821 @unfilteredmethod
1822 1822 def destroying(self):
1823 1823 '''Inform the repository that nodes are about to be destroyed.
1824 1824 Intended for use by strip and rollback, so there's a common
1825 1825 place for anything that has to be done before destroying history.
1826 1826
1827 1827 This is mostly useful for saving state that is in memory and waiting
1828 1828 to be flushed when the current lock is released. Because a call to
1829 1829 destroyed is imminent, the repo will be invalidated causing those
1830 1830 changes to stay in memory (waiting for the next unlock), or vanish
1831 1831 completely.
1832 1832 '''
1833 1833 # When using the same lock to commit and strip, the phasecache is left
1834 1834 # dirty after committing. Then when we strip, the repo is invalidated,
1835 1835 # causing those changes to disappear.
1836 1836 if '_phasecache' in vars(self):
1837 1837 self._phasecache.write()
1838 1838
1839 1839 @unfilteredmethod
1840 1840 def destroyed(self):
1841 1841 '''Inform the repository that nodes have been destroyed.
1842 1842 Intended for use by strip and rollback, so there's a common
1843 1843 place for anything that has to be done after destroying history.
1844 1844 '''
1845 1845 # When one tries to:
1846 1846 # 1) destroy nodes thus calling this method (e.g. strip)
1847 1847 # 2) use phasecache somewhere (e.g. commit)
1848 1848 #
1849 1849 # then 2) will fail because the phasecache contains nodes that were
1850 1850 # removed. We can either remove phasecache from the filecache,
1851 1851 # causing it to reload next time it is accessed, or simply filter
1852 1852 # the removed nodes now and write the updated cache.
1853 1853 self._phasecache.filterunknown(self)
1854 1854 self._phasecache.write()
1855 1855
1856 1856 # update the 'served' branch cache to help read only server process
1857 1857 # Thanks to branchcache collaboration this is done from the nearest
1858 1858 # filtered subset and it is expected to be fast.
1859 1859 branchmap.updatecache(self.filtered('served'))
1860 1860
1861 1861 # Ensure the persistent tag cache is updated. Doing it now
1862 1862 # means that the tag cache only has to worry about destroyed
1863 1863 # heads immediately after a strip/rollback. That in turn
1864 1864 # guarantees that "cachetip == currenttip" (comparing both rev
1865 1865 # and node) always means no nodes have been added or destroyed.
1866 1866
1867 1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 1868 # head, refresh the tag cache, then immediately add a new head.
1869 1869 # But I think doing it this way is necessary for the "instant
1870 1870 # tag cache retrieval" case to work.
1871 1871 self.invalidate()
1872 1872
1873 1873 def walk(self, match, node=None):
1874 1874 '''
1875 1875 walk recursively through the directory tree or a given
1876 1876 changeset, finding all files matched by the match
1877 1877 function
1878 1878 '''
1879 1879 return self[node].walk(match)
1880 1880
1881 1881 def status(self, node1='.', node2=None, match=None,
1882 1882 ignored=False, clean=False, unknown=False,
1883 1883 listsubrepos=False):
1884 1884 '''a convenience method that calls node1.status(node2)'''
1885 1885 return self[node1].status(node2, match, ignored, clean, unknown,
1886 1886 listsubrepos)
1887 1887
1888 1888 def heads(self, start=None):
1889 1889 if start is None:
1890 1890 cl = self.changelog
1891 1891 headrevs = reversed(cl.headrevs())
1892 1892 return [cl.node(rev) for rev in headrevs]
1893 1893
1894 1894 heads = self.changelog.heads(start)
1895 1895 # sort the output in rev descending order
1896 1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1897 1897
1898 1898 def branchheads(self, branch=None, start=None, closed=False):
1899 1899 '''return a (possibly filtered) list of heads for the given branch
1900 1900
1901 1901 Heads are returned in topological order, from newest to oldest.
1902 1902 If branch is None, use the dirstate branch.
1903 1903 If start is not None, return only heads reachable from start.
1904 1904 If closed is True, return heads that are marked as closed as well.
1905 1905 '''
1906 1906 if branch is None:
1907 1907 branch = self[None].branch()
1908 1908 branches = self.branchmap()
1909 1909 if branch not in branches:
1910 1910 return []
1911 1911 # the cache returns heads ordered lowest to highest
1912 1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1913 1913 if start is not None:
1914 1914 # filter out the heads that cannot be reached from startrev
1915 1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1916 1916 bheads = [h for h in bheads if h in fbheads]
1917 1917 return bheads
1918 1918
1919 1919 def branches(self, nodes):
1920 1920 if not nodes:
1921 1921 nodes = [self.changelog.tip()]
1922 1922 b = []
1923 1923 for n in nodes:
1924 1924 t = n
1925 1925 while True:
1926 1926 p = self.changelog.parents(n)
1927 1927 if p[1] != nullid or p[0] == nullid:
1928 1928 b.append((t, n, p[0], p[1]))
1929 1929 break
1930 1930 n = p[0]
1931 1931 return b
1932 1932
1933 1933 def between(self, pairs):
1934 1934 r = []
1935 1935
1936 1936 for top, bottom in pairs:
1937 1937 n, l, i = top, [], 0
1938 1938 f = 1
1939 1939
1940 1940 while n != bottom and n != nullid:
1941 1941 p = self.changelog.parents(n)[0]
1942 1942 if i == f:
1943 1943 l.append(n)
1944 1944 f = f * 2
1945 1945 n = p
1946 1946 i += 1
1947 1947
1948 1948 r.append(l)
1949 1949
1950 1950 return r
1951 1951
1952 1952 def checkpush(self, pushop):
1953 1953 """Extensions can override this function if additional checks have
1954 1954 to be performed before pushing, or call it if they override push
1955 1955 command.
1956 1956 """
1957 1957 pass
1958 1958
1959 1959 @unfilteredpropertycache
1960 1960 def prepushoutgoinghooks(self):
1961 1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1962 1962 methods, which are called before pushing changesets.
1963 1963 """
1964 1964 return util.hooks()
1965 1965
1966 1966 def pushkey(self, namespace, key, old, new):
1967 1967 try:
1968 1968 tr = self.currenttransaction()
1969 1969 hookargs = {}
1970 1970 if tr is not None:
1971 1971 hookargs.update(tr.hookargs)
1972 1972 hookargs['namespace'] = namespace
1973 1973 hookargs['key'] = key
1974 1974 hookargs['old'] = old
1975 1975 hookargs['new'] = new
1976 1976 self.hook('prepushkey', throw=True, **hookargs)
1977 1977 except error.HookAbort as exc:
1978 1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1979 1979 if exc.hint:
1980 1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1981 1981 return False
1982 1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1983 1983 ret = pushkey.push(self, namespace, key, old, new)
1984 1984 def runhook():
1985 1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1986 1986 ret=ret)
1987 1987 self._afterlock(runhook)
1988 1988 return ret
1989 1989
1990 1990 def listkeys(self, namespace):
1991 1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1992 1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1993 1993 values = pushkey.list(self, namespace)
1994 1994 self.hook('listkeys', namespace=namespace, values=values)
1995 1995 return values
1996 1996
1997 1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1998 1998 '''used to test argument passing over the wire'''
1999 1999 return "%s %s %s %s %s" % (one, two, three, four, five)
2000 2000
2001 2001 def savecommitmessage(self, text):
2002 2002 fp = self.vfs('last-message.txt', 'wb')
2003 2003 try:
2004 2004 fp.write(text)
2005 2005 finally:
2006 2006 fp.close()
2007 2007 return self.pathto(fp.name[len(self.root) + 1:])
2008 2008
2009 2009 # used to avoid circular references so destructors work
2010 2010 def aftertrans(files):
2011 2011 renamefiles = [tuple(t) for t in files]
2012 2012 def a():
2013 2013 for vfs, src, dest in renamefiles:
2014 2014 try:
2015 2015 # if src and dest refer to a same file, vfs.rename is a no-op,
2016 2016 # leaving both src and dest on disk. delete dest to make sure
2017 2017 # the rename couldn't be such a no-op.
2018 2018 vfs.unlink(dest)
2019 2019 except OSError as ex:
2020 2020 if ex.errno != errno.ENOENT:
2021 2021 raise
2022 2022 try:
2023 2023 vfs.rename(src, dest)
2024 2024 except OSError: # journal file does not yet exist
2025 2025 pass
2026 2026 return a
2027 2027
2028 2028 def undoname(fn):
2029 2029 base, name = os.path.split(fn)
2030 2030 assert name.startswith('journal')
2031 2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2032 2032
2033 2033 def instance(ui, path, create):
2034 2034 return localrepository(ui, util.urllocalpath(path), create)
2035 2035
2036 2036 def islocal(path):
2037 2037 return True
2038 2038
2039 2039 def newreporequirements(repo):
2040 2040 """Determine the set of requirements for a new local repository.
2041 2041
2042 2042 Extensions can wrap this function to specify custom requirements for
2043 2043 new repositories.
2044 2044 """
2045 2045 ui = repo.ui
2046 2046 requirements = set(['revlogv1'])
2047 2047 if ui.configbool('format', 'usestore', True):
2048 2048 requirements.add('store')
2049 2049 if ui.configbool('format', 'usefncache', True):
2050 2050 requirements.add('fncache')
2051 2051 if ui.configbool('format', 'dotencode', True):
2052 2052 requirements.add('dotencode')
2053 2053
2054 2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 2055 if compengine not in util.compengines:
2056 2056 raise error.Abort(_('compression engine %s defined by '
2057 2057 'experimental.format.compression not available') %
2058 2058 compengine,
2059 2059 hint=_('run "hg debuginstall" to list available '
2060 2060 'compression engines'))
2061 2061
2062 2062 # zlib is the historical default and doesn't need an explicit requirement.
2063 2063 if compengine != 'zlib':
2064 2064 requirements.add('exp-compression-%s' % compengine)
2065 2065
2066 2066 if scmutil.gdinitconfig(ui):
2067 2067 requirements.add('generaldelta')
2068 2068 if ui.configbool('experimental', 'treemanifest', False):
2069 2069 requirements.add('treemanifest')
2070 2070 if ui.configbool('experimental', 'manifestv2', False):
2071 2071 requirements.add('manifestv2')
2072 2072
2073 2073 return requirements
General Comments 0
You need to be logged in to leave comments. Login now