##// END OF EJS Templates
localrepo: cache self.changelog in local variable...
Stanislau Hlebik -
r30905:6037caa6 default
parent child Browse files
Show More
@@ -1,2030 +1,2031 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 repoview,
52 52 revset,
53 53 scmutil,
54 54 store,
55 55 subrepo,
56 56 tags as tagsmod,
57 57 transaction,
58 58 util,
59 59 )
60 60
61 61 release = lockmod.release
62 62 urlerr = util.urlerr
63 63 urlreq = util.urlreq
64 64
65 65 class repofilecache(scmutil.filecache):
66 66 """All filecache usage on repo are done for logic that should be unfiltered
67 67 """
68 68
69 69 def __get__(self, repo, type=None):
70 70 if repo is None:
71 71 return self
72 72 return super(repofilecache, self).__get__(repo.unfiltered(), type)
73 73 def __set__(self, repo, value):
74 74 return super(repofilecache, self).__set__(repo.unfiltered(), value)
75 75 def __delete__(self, repo):
76 76 return super(repofilecache, self).__delete__(repo.unfiltered())
77 77
78 78 class storecache(repofilecache):
79 79 """filecache for files in the store"""
80 80 def join(self, obj, fname):
81 81 return obj.sjoin(fname)
82 82
83 83 class unfilteredpropertycache(util.propertycache):
84 84 """propertycache that apply to unfiltered repo only"""
85 85
86 86 def __get__(self, repo, type=None):
87 87 unfi = repo.unfiltered()
88 88 if unfi is repo:
89 89 return super(unfilteredpropertycache, self).__get__(unfi)
90 90 return getattr(unfi, self.name)
91 91
92 92 class filteredpropertycache(util.propertycache):
93 93 """propertycache that must take filtering in account"""
94 94
95 95 def cachevalue(self, obj, value):
96 96 object.__setattr__(obj, self.name, value)
97 97
98 98
99 99 def hasunfilteredcache(repo, name):
100 100 """check if a repo has an unfilteredpropertycache value for <name>"""
101 101 return name in vars(repo.unfiltered())
102 102
103 103 def unfilteredmethod(orig):
104 104 """decorate method that always need to be run on unfiltered version"""
105 105 def wrapper(repo, *args, **kwargs):
106 106 return orig(repo.unfiltered(), *args, **kwargs)
107 107 return wrapper
108 108
109 109 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
110 110 'unbundle'))
111 111 legacycaps = moderncaps.union(set(['changegroupsubset']))
112 112
113 113 class localpeer(peer.peerrepository):
114 114 '''peer for a local repo; reflects only the most recent API'''
115 115
116 116 def __init__(self, repo, caps=moderncaps):
117 117 peer.peerrepository.__init__(self)
118 118 self._repo = repo.filtered('served')
119 119 self.ui = repo.ui
120 120 self._caps = repo._restrictcapabilities(caps)
121 121 self.requirements = repo.requirements
122 122 self.supportedformats = repo.supportedformats
123 123
124 124 def close(self):
125 125 self._repo.close()
126 126
127 127 def _capabilities(self):
128 128 return self._caps
129 129
130 130 def local(self):
131 131 return self._repo
132 132
133 133 def canpush(self):
134 134 return True
135 135
136 136 def url(self):
137 137 return self._repo.url()
138 138
139 139 def lookup(self, key):
140 140 return self._repo.lookup(key)
141 141
142 142 def branchmap(self):
143 143 return self._repo.branchmap()
144 144
145 145 def heads(self):
146 146 return self._repo.heads()
147 147
148 148 def known(self, nodes):
149 149 return self._repo.known(nodes)
150 150
151 151 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
152 152 **kwargs):
153 153 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
154 154 common=common, bundlecaps=bundlecaps,
155 155 **kwargs)
156 156 cb = util.chunkbuffer(chunks)
157 157
158 158 if bundlecaps is not None and 'HG20' in bundlecaps:
159 159 # When requesting a bundle2, getbundle returns a stream to make the
160 160 # wire level function happier. We need to build a proper object
161 161 # from it in local peer.
162 162 return bundle2.getunbundler(self.ui, cb)
163 163 else:
164 164 return changegroup.getunbundler('01', cb, None)
165 165
166 166 # TODO We might want to move the next two calls into legacypeer and add
167 167 # unbundle instead.
168 168
169 169 def unbundle(self, cg, heads, url):
170 170 """apply a bundle on a repo
171 171
172 172 This function handles the repo locking itself."""
173 173 try:
174 174 try:
175 175 cg = exchange.readbundle(self.ui, cg, None)
176 176 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
177 177 if util.safehasattr(ret, 'getchunks'):
178 178 # This is a bundle20 object, turn it into an unbundler.
179 179 # This little dance should be dropped eventually when the
180 180 # API is finally improved.
181 181 stream = util.chunkbuffer(ret.getchunks())
182 182 ret = bundle2.getunbundler(self.ui, stream)
183 183 return ret
184 184 except Exception as exc:
185 185 # If the exception contains output salvaged from a bundle2
186 186 # reply, we need to make sure it is printed before continuing
187 187 # to fail. So we build a bundle2 with such output and consume
188 188 # it directly.
189 189 #
190 190 # This is not very elegant but allows a "simple" solution for
191 191 # issue4594
192 192 output = getattr(exc, '_bundle2salvagedoutput', ())
193 193 if output:
194 194 bundler = bundle2.bundle20(self._repo.ui)
195 195 for out in output:
196 196 bundler.addpart(out)
197 197 stream = util.chunkbuffer(bundler.getchunks())
198 198 b = bundle2.getunbundler(self.ui, stream)
199 199 bundle2.processbundle(self._repo, b)
200 200 raise
201 201 except error.PushRaced as exc:
202 202 raise error.ResponseError(_('push failed:'), str(exc))
203 203
204 204 def lock(self):
205 205 return self._repo.lock()
206 206
207 207 def addchangegroup(self, cg, source, url):
208 208 return cg.apply(self._repo, source, url)
209 209
210 210 def pushkey(self, namespace, key, old, new):
211 211 return self._repo.pushkey(namespace, key, old, new)
212 212
213 213 def listkeys(self, namespace):
214 214 return self._repo.listkeys(namespace)
215 215
216 216 def debugwireargs(self, one, two, three=None, four=None, five=None):
217 217 '''used to test argument passing over the wire'''
218 218 return "%s %s %s %s %s" % (one, two, three, four, five)
219 219
220 220 class locallegacypeer(localpeer):
221 221 '''peer extension which implements legacy methods too; used for tests with
222 222 restricted capabilities'''
223 223
224 224 def __init__(self, repo):
225 225 localpeer.__init__(self, repo, caps=legacycaps)
226 226
227 227 def branches(self, nodes):
228 228 return self._repo.branches(nodes)
229 229
230 230 def between(self, pairs):
231 231 return self._repo.between(pairs)
232 232
233 233 def changegroup(self, basenodes, source):
234 234 return changegroup.changegroup(self._repo, basenodes, source)
235 235
236 236 def changegroupsubset(self, bases, heads, source):
237 237 return changegroup.changegroupsubset(self._repo, bases, heads, source)
238 238
239 239 class localrepository(object):
240 240
241 241 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
242 242 'manifestv2'))
243 243 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
244 244 'dotencode'))
245 245 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
246 246 filtername = None
247 247
248 248 # a list of (ui, featureset) functions.
249 249 # only functions defined in module of enabled extensions are invoked
250 250 featuresetupfuncs = set()
251 251
252 252 def __init__(self, baseui, path, create=False):
253 253 self.requirements = set()
254 254 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
255 255 self.wopener = self.wvfs
256 256 self.root = self.wvfs.base
257 257 self.path = self.wvfs.join(".hg")
258 258 self.origroot = path
259 259 self.auditor = pathutil.pathauditor(self.root, self._checknested)
260 260 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
261 261 realfs=False)
262 262 self.vfs = scmutil.vfs(self.path)
263 263 self.opener = self.vfs
264 264 self.baseui = baseui
265 265 self.ui = baseui.copy()
266 266 self.ui.copy = baseui.copy # prevent copying repo configuration
267 267 # A list of callback to shape the phase if no data were found.
268 268 # Callback are in the form: func(repo, roots) --> processed root.
269 269 # This list it to be filled by extension during repo setup
270 270 self._phasedefaults = []
271 271 try:
272 272 self.ui.readconfig(self.join("hgrc"), self.root)
273 273 extensions.loadall(self.ui)
274 274 except IOError:
275 275 pass
276 276
277 277 if self.featuresetupfuncs:
278 278 self.supported = set(self._basesupported) # use private copy
279 279 extmods = set(m.__name__ for n, m
280 280 in extensions.extensions(self.ui))
281 281 for setupfunc in self.featuresetupfuncs:
282 282 if setupfunc.__module__ in extmods:
283 283 setupfunc(self.ui, self.supported)
284 284 else:
285 285 self.supported = self._basesupported
286 286
287 287 # Add compression engines.
288 288 for name in util.compengines:
289 289 engine = util.compengines[name]
290 290 if engine.revlogheader():
291 291 self.supported.add('exp-compression-%s' % name)
292 292
293 293 if not self.vfs.isdir():
294 294 if create:
295 295 self.requirements = newreporequirements(self)
296 296
297 297 if not self.wvfs.exists():
298 298 self.wvfs.makedirs()
299 299 self.vfs.makedir(notindexed=True)
300 300
301 301 if 'store' in self.requirements:
302 302 self.vfs.mkdir("store")
303 303
304 304 # create an invalid changelog
305 305 self.vfs.append(
306 306 "00changelog.i",
307 307 '\0\0\0\2' # represents revlogv2
308 308 ' dummy changelog to prevent using the old repo layout'
309 309 )
310 310 else:
311 311 raise error.RepoError(_("repository %s not found") % path)
312 312 elif create:
313 313 raise error.RepoError(_("repository %s already exists") % path)
314 314 else:
315 315 try:
316 316 self.requirements = scmutil.readrequires(
317 317 self.vfs, self.supported)
318 318 except IOError as inst:
319 319 if inst.errno != errno.ENOENT:
320 320 raise
321 321
322 322 self.sharedpath = self.path
323 323 try:
324 324 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
325 325 realpath=True)
326 326 s = vfs.base
327 327 if not vfs.exists():
328 328 raise error.RepoError(
329 329 _('.hg/sharedpath points to nonexistent directory %s') % s)
330 330 self.sharedpath = s
331 331 except IOError as inst:
332 332 if inst.errno != errno.ENOENT:
333 333 raise
334 334
335 335 self.store = store.store(
336 336 self.requirements, self.sharedpath, scmutil.vfs)
337 337 self.spath = self.store.path
338 338 self.svfs = self.store.vfs
339 339 self.sjoin = self.store.join
340 340 self.vfs.createmode = self.store.createmode
341 341 self._applyopenerreqs()
342 342 if create:
343 343 self._writerequirements()
344 344
345 345 self._dirstatevalidatewarned = False
346 346
347 347 self._branchcaches = {}
348 348 self._revbranchcache = None
349 349 self.filterpats = {}
350 350 self._datafilters = {}
351 351 self._transref = self._lockref = self._wlockref = None
352 352
353 353 # A cache for various files under .hg/ that tracks file changes,
354 354 # (used by the filecache decorator)
355 355 #
356 356 # Maps a property name to its util.filecacheentry
357 357 self._filecache = {}
358 358
359 359 # hold sets of revision to be filtered
360 360 # should be cleared when something might have changed the filter value:
361 361 # - new changesets,
362 362 # - phase change,
363 363 # - new obsolescence marker,
364 364 # - working directory parent change,
365 365 # - bookmark changes
366 366 self.filteredrevcache = {}
367 367
368 368 # generic mapping between names and nodes
369 369 self.names = namespaces.namespaces()
370 370
371 371 def close(self):
372 372 self._writecaches()
373 373
374 374 def _writecaches(self):
375 375 if self._revbranchcache:
376 376 self._revbranchcache.write()
377 377
378 378 def _restrictcapabilities(self, caps):
379 379 if self.ui.configbool('experimental', 'bundle2-advertise', True):
380 380 caps = set(caps)
381 381 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
382 382 caps.add('bundle2=' + urlreq.quote(capsblob))
383 383 return caps
384 384
385 385 def _applyopenerreqs(self):
386 386 self.svfs.options = dict((r, 1) for r in self.requirements
387 387 if r in self.openerreqs)
388 388 # experimental config: format.chunkcachesize
389 389 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
390 390 if chunkcachesize is not None:
391 391 self.svfs.options['chunkcachesize'] = chunkcachesize
392 392 # experimental config: format.maxchainlen
393 393 maxchainlen = self.ui.configint('format', 'maxchainlen')
394 394 if maxchainlen is not None:
395 395 self.svfs.options['maxchainlen'] = maxchainlen
396 396 # experimental config: format.manifestcachesize
397 397 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
398 398 if manifestcachesize is not None:
399 399 self.svfs.options['manifestcachesize'] = manifestcachesize
400 400 # experimental config: format.aggressivemergedeltas
401 401 aggressivemergedeltas = self.ui.configbool('format',
402 402 'aggressivemergedeltas', False)
403 403 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
404 404 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
405 405
406 406 for r in self.requirements:
407 407 if r.startswith('exp-compression-'):
408 408 self.svfs.options['compengine'] = r[len('exp-compression-'):]
409 409
410 410 def _writerequirements(self):
411 411 scmutil.writerequires(self.vfs, self.requirements)
412 412
413 413 def _checknested(self, path):
414 414 """Determine if path is a legal nested repository."""
415 415 if not path.startswith(self.root):
416 416 return False
417 417 subpath = path[len(self.root) + 1:]
418 418 normsubpath = util.pconvert(subpath)
419 419
420 420 # XXX: Checking against the current working copy is wrong in
421 421 # the sense that it can reject things like
422 422 #
423 423 # $ hg cat -r 10 sub/x.txt
424 424 #
425 425 # if sub/ is no longer a subrepository in the working copy
426 426 # parent revision.
427 427 #
428 428 # However, it can of course also allow things that would have
429 429 # been rejected before, such as the above cat command if sub/
430 430 # is a subrepository now, but was a normal directory before.
431 431 # The old path auditor would have rejected by mistake since it
432 432 # panics when it sees sub/.hg/.
433 433 #
434 434 # All in all, checking against the working copy seems sensible
435 435 # since we want to prevent access to nested repositories on
436 436 # the filesystem *now*.
437 437 ctx = self[None]
438 438 parts = util.splitpath(subpath)
439 439 while parts:
440 440 prefix = '/'.join(parts)
441 441 if prefix in ctx.substate:
442 442 if prefix == normsubpath:
443 443 return True
444 444 else:
445 445 sub = ctx.sub(prefix)
446 446 return sub.checknested(subpath[len(prefix) + 1:])
447 447 else:
448 448 parts.pop()
449 449 return False
450 450
451 451 def peer(self):
452 452 return localpeer(self) # not cached to avoid reference cycle
453 453
454 454 def unfiltered(self):
455 455 """Return unfiltered version of the repository
456 456
457 457 Intended to be overwritten by filtered repo."""
458 458 return self
459 459
460 460 def filtered(self, name):
461 461 """Return a filtered version of a repository"""
462 462 # build a new class with the mixin and the current class
463 463 # (possibly subclass of the repo)
464 464 class proxycls(repoview.repoview, self.unfiltered().__class__):
465 465 pass
466 466 return proxycls(self, name)
467 467
468 468 @repofilecache('bookmarks', 'bookmarks.current')
469 469 def _bookmarks(self):
470 470 return bookmarks.bmstore(self)
471 471
472 472 @property
473 473 def _activebookmark(self):
474 474 return self._bookmarks.active
475 475
476 476 def bookmarkheads(self, bookmark):
477 477 name = bookmark.split('@', 1)[0]
478 478 heads = []
479 479 for mark, n in self._bookmarks.iteritems():
480 480 if mark.split('@', 1)[0] == name:
481 481 heads.append(n)
482 482 return heads
483 483
484 484 # _phaserevs and _phasesets depend on changelog. what we need is to
485 485 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
486 486 # can't be easily expressed in filecache mechanism.
487 487 @storecache('phaseroots', '00changelog.i')
488 488 def _phasecache(self):
489 489 return phases.phasecache(self, self._phasedefaults)
490 490
491 491 @storecache('obsstore')
492 492 def obsstore(self):
493 493 # read default format for new obsstore.
494 494 # developer config: format.obsstore-version
495 495 defaultformat = self.ui.configint('format', 'obsstore-version', None)
496 496 # rely on obsstore class default when possible.
497 497 kwargs = {}
498 498 if defaultformat is not None:
499 499 kwargs['defaultformat'] = defaultformat
500 500 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
501 501 store = obsolete.obsstore(self.svfs, readonly=readonly,
502 502 **kwargs)
503 503 if store and readonly:
504 504 self.ui.warn(
505 505 _('obsolete feature not enabled but %i markers found!\n')
506 506 % len(list(store)))
507 507 return store
508 508
509 509 @storecache('00changelog.i')
510 510 def changelog(self):
511 511 c = changelog.changelog(self.svfs)
512 512 if 'HG_PENDING' in encoding.environ:
513 513 p = encoding.environ['HG_PENDING']
514 514 if p.startswith(self.root):
515 515 c.readpending('00changelog.i.a')
516 516 return c
517 517
518 518 def _constructmanifest(self):
519 519 # This is a temporary function while we migrate from manifest to
520 520 # manifestlog. It allows bundlerepo and unionrepo to intercept the
521 521 # manifest creation.
522 522 return manifest.manifestrevlog(self.svfs)
523 523
524 524 @storecache('00manifest.i')
525 525 def manifestlog(self):
526 526 return manifest.manifestlog(self.svfs, self)
527 527
528 528 @repofilecache('dirstate')
529 529 def dirstate(self):
530 530 return dirstate.dirstate(self.vfs, self.ui, self.root,
531 531 self._dirstatevalidate)
532 532
533 533 def _dirstatevalidate(self, node):
534 534 try:
535 535 self.changelog.rev(node)
536 536 return node
537 537 except error.LookupError:
538 538 if not self._dirstatevalidatewarned:
539 539 self._dirstatevalidatewarned = True
540 540 self.ui.warn(_("warning: ignoring unknown"
541 541 " working parent %s!\n") % short(node))
542 542 return nullid
543 543
544 544 def __getitem__(self, changeid):
545 545 if changeid is None or changeid == wdirrev:
546 546 return context.workingctx(self)
547 547 if isinstance(changeid, slice):
548 548 return [context.changectx(self, i)
549 549 for i in xrange(*changeid.indices(len(self)))
550 550 if i not in self.changelog.filteredrevs]
551 551 return context.changectx(self, changeid)
552 552
553 553 def __contains__(self, changeid):
554 554 try:
555 555 self[changeid]
556 556 return True
557 557 except error.RepoLookupError:
558 558 return False
559 559
560 560 def __nonzero__(self):
561 561 return True
562 562
563 563 def __len__(self):
564 564 return len(self.changelog)
565 565
566 566 def __iter__(self):
567 567 return iter(self.changelog)
568 568
569 569 def revs(self, expr, *args):
570 570 '''Find revisions matching a revset.
571 571
572 572 The revset is specified as a string ``expr`` that may contain
573 573 %-formatting to escape certain types. See ``revset.formatspec``.
574 574
575 575 Revset aliases from the configuration are not expanded. To expand
576 576 user aliases, consider calling ``scmutil.revrange()``.
577 577
578 578 Returns a revset.abstractsmartset, which is a list-like interface
579 579 that contains integer revisions.
580 580 '''
581 581 expr = revset.formatspec(expr, *args)
582 582 m = revset.match(None, expr)
583 583 return m(self)
584 584
585 585 def set(self, expr, *args):
586 586 '''Find revisions matching a revset and emit changectx instances.
587 587
588 588 This is a convenience wrapper around ``revs()`` that iterates the
589 589 result and is a generator of changectx instances.
590 590
591 591 Revset aliases from the configuration are not expanded. To expand
592 592 user aliases, consider calling ``scmutil.revrange()``.
593 593 '''
594 594 for r in self.revs(expr, *args):
595 595 yield self[r]
596 596
597 597 def url(self):
598 598 return 'file:' + self.root
599 599
600 600 def hook(self, name, throw=False, **args):
601 601 """Call a hook, passing this repo instance.
602 602
603 603 This a convenience method to aid invoking hooks. Extensions likely
604 604 won't call this unless they have registered a custom hook or are
605 605 replacing code that is expected to call a hook.
606 606 """
607 607 return hook.hook(self.ui, self, name, throw, **args)
608 608
609 609 @unfilteredmethod
610 610 def _tag(self, names, node, message, local, user, date, extra=None,
611 611 editor=False):
612 612 if isinstance(names, str):
613 613 names = (names,)
614 614
615 615 branches = self.branchmap()
616 616 for name in names:
617 617 self.hook('pretag', throw=True, node=hex(node), tag=name,
618 618 local=local)
619 619 if name in branches:
620 620 self.ui.warn(_("warning: tag %s conflicts with existing"
621 621 " branch name\n") % name)
622 622
623 623 def writetags(fp, names, munge, prevtags):
624 624 fp.seek(0, 2)
625 625 if prevtags and prevtags[-1] != '\n':
626 626 fp.write('\n')
627 627 for name in names:
628 628 if munge:
629 629 m = munge(name)
630 630 else:
631 631 m = name
632 632
633 633 if (self._tagscache.tagtypes and
634 634 name in self._tagscache.tagtypes):
635 635 old = self.tags().get(name, nullid)
636 636 fp.write('%s %s\n' % (hex(old), m))
637 637 fp.write('%s %s\n' % (hex(node), m))
638 638 fp.close()
639 639
640 640 prevtags = ''
641 641 if local:
642 642 try:
643 643 fp = self.vfs('localtags', 'r+')
644 644 except IOError:
645 645 fp = self.vfs('localtags', 'a')
646 646 else:
647 647 prevtags = fp.read()
648 648
649 649 # local tags are stored in the current charset
650 650 writetags(fp, names, None, prevtags)
651 651 for name in names:
652 652 self.hook('tag', node=hex(node), tag=name, local=local)
653 653 return
654 654
655 655 try:
656 656 fp = self.wfile('.hgtags', 'rb+')
657 657 except IOError as e:
658 658 if e.errno != errno.ENOENT:
659 659 raise
660 660 fp = self.wfile('.hgtags', 'ab')
661 661 else:
662 662 prevtags = fp.read()
663 663
664 664 # committed tags are stored in UTF-8
665 665 writetags(fp, names, encoding.fromlocal, prevtags)
666 666
667 667 fp.close()
668 668
669 669 self.invalidatecaches()
670 670
671 671 if '.hgtags' not in self.dirstate:
672 672 self[None].add(['.hgtags'])
673 673
674 674 m = matchmod.exact(self.root, '', ['.hgtags'])
675 675 tagnode = self.commit(message, user, date, extra=extra, match=m,
676 676 editor=editor)
677 677
678 678 for name in names:
679 679 self.hook('tag', node=hex(node), tag=name, local=local)
680 680
681 681 return tagnode
682 682
683 683 def tag(self, names, node, message, local, user, date, editor=False):
684 684 '''tag a revision with one or more symbolic names.
685 685
686 686 names is a list of strings or, when adding a single tag, names may be a
687 687 string.
688 688
689 689 if local is True, the tags are stored in a per-repository file.
690 690 otherwise, they are stored in the .hgtags file, and a new
691 691 changeset is committed with the change.
692 692
693 693 keyword arguments:
694 694
695 695 local: whether to store tags in non-version-controlled file
696 696 (default False)
697 697
698 698 message: commit message to use if committing
699 699
700 700 user: name of user to use if committing
701 701
702 702 date: date tuple to use if committing'''
703 703
704 704 if not local:
705 705 m = matchmod.exact(self.root, '', ['.hgtags'])
706 706 if any(self.status(match=m, unknown=True, ignored=True)):
707 707 raise error.Abort(_('working copy of .hgtags is changed'),
708 708 hint=_('please commit .hgtags manually'))
709 709
710 710 self.tags() # instantiate the cache
711 711 self._tag(names, node, message, local, user, date, editor=editor)
712 712
713 713 @filteredpropertycache
714 714 def _tagscache(self):
715 715 '''Returns a tagscache object that contains various tags related
716 716 caches.'''
717 717
718 718 # This simplifies its cache management by having one decorated
719 719 # function (this one) and the rest simply fetch things from it.
720 720 class tagscache(object):
721 721 def __init__(self):
722 722 # These two define the set of tags for this repository. tags
723 723 # maps tag name to node; tagtypes maps tag name to 'global' or
724 724 # 'local'. (Global tags are defined by .hgtags across all
725 725 # heads, and local tags are defined in .hg/localtags.)
726 726 # They constitute the in-memory cache of tags.
727 727 self.tags = self.tagtypes = None
728 728
729 729 self.nodetagscache = self.tagslist = None
730 730
731 731 cache = tagscache()
732 732 cache.tags, cache.tagtypes = self._findtags()
733 733
734 734 return cache
735 735
736 736 def tags(self):
737 737 '''return a mapping of tag to node'''
738 738 t = {}
739 739 if self.changelog.filteredrevs:
740 740 tags, tt = self._findtags()
741 741 else:
742 742 tags = self._tagscache.tags
743 743 for k, v in tags.iteritems():
744 744 try:
745 745 # ignore tags to unknown nodes
746 746 self.changelog.rev(v)
747 747 t[k] = v
748 748 except (error.LookupError, ValueError):
749 749 pass
750 750 return t
751 751
752 752 def _findtags(self):
753 753 '''Do the hard work of finding tags. Return a pair of dicts
754 754 (tags, tagtypes) where tags maps tag name to node, and tagtypes
755 755 maps tag name to a string like \'global\' or \'local\'.
756 756 Subclasses or extensions are free to add their own tags, but
757 757 should be aware that the returned dicts will be retained for the
758 758 duration of the localrepo object.'''
759 759
760 760 # XXX what tagtype should subclasses/extensions use? Currently
761 761 # mq and bookmarks add tags, but do not set the tagtype at all.
762 762 # Should each extension invent its own tag type? Should there
763 763 # be one tagtype for all such "virtual" tags? Or is the status
764 764 # quo fine?
765 765
766 766 alltags = {} # map tag name to (node, hist)
767 767 tagtypes = {}
768 768
769 769 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
770 770 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
771 771
772 772 # Build the return dicts. Have to re-encode tag names because
773 773 # the tags module always uses UTF-8 (in order not to lose info
774 774 # writing to the cache), but the rest of Mercurial wants them in
775 775 # local encoding.
776 776 tags = {}
777 777 for (name, (node, hist)) in alltags.iteritems():
778 778 if node != nullid:
779 779 tags[encoding.tolocal(name)] = node
780 780 tags['tip'] = self.changelog.tip()
781 781 tagtypes = dict([(encoding.tolocal(name), value)
782 782 for (name, value) in tagtypes.iteritems()])
783 783 return (tags, tagtypes)
784 784
785 785 def tagtype(self, tagname):
786 786 '''
787 787 return the type of the given tag. result can be:
788 788
789 789 'local' : a local tag
790 790 'global' : a global tag
791 791 None : tag does not exist
792 792 '''
793 793
794 794 return self._tagscache.tagtypes.get(tagname)
795 795
796 796 def tagslist(self):
797 797 '''return a list of tags ordered by revision'''
798 798 if not self._tagscache.tagslist:
799 799 l = []
800 800 for t, n in self.tags().iteritems():
801 801 l.append((self.changelog.rev(n), t, n))
802 802 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
803 803
804 804 return self._tagscache.tagslist
805 805
806 806 def nodetags(self, node):
807 807 '''return the tags associated with a node'''
808 808 if not self._tagscache.nodetagscache:
809 809 nodetagscache = {}
810 810 for t, n in self._tagscache.tags.iteritems():
811 811 nodetagscache.setdefault(n, []).append(t)
812 812 for tags in nodetagscache.itervalues():
813 813 tags.sort()
814 814 self._tagscache.nodetagscache = nodetagscache
815 815 return self._tagscache.nodetagscache.get(node, [])
816 816
817 817 def nodebookmarks(self, node):
818 818 """return the list of bookmarks pointing to the specified node"""
819 819 marks = []
820 820 for bookmark, n in self._bookmarks.iteritems():
821 821 if n == node:
822 822 marks.append(bookmark)
823 823 return sorted(marks)
824 824
825 825 def branchmap(self):
826 826 '''returns a dictionary {branch: [branchheads]} with branchheads
827 827 ordered by increasing revision number'''
828 828 branchmap.updatecache(self)
829 829 return self._branchcaches[self.filtername]
830 830
831 831 @unfilteredmethod
832 832 def revbranchcache(self):
833 833 if not self._revbranchcache:
834 834 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
835 835 return self._revbranchcache
836 836
837 837 def branchtip(self, branch, ignoremissing=False):
838 838 '''return the tip node for a given branch
839 839
840 840 If ignoremissing is True, then this method will not raise an error.
841 841 This is helpful for callers that only expect None for a missing branch
842 842 (e.g. namespace).
843 843
844 844 '''
845 845 try:
846 846 return self.branchmap().branchtip(branch)
847 847 except KeyError:
848 848 if not ignoremissing:
849 849 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
850 850 else:
851 851 pass
852 852
853 853 def lookup(self, key):
854 854 return self[key].node()
855 855
856 856 def lookupbranch(self, key, remote=None):
857 857 repo = remote or self
858 858 if key in repo.branchmap():
859 859 return key
860 860
861 861 repo = (remote and remote.local()) and remote or self
862 862 return repo[key].branch()
863 863
864 864 def known(self, nodes):
865 865 cl = self.changelog
866 866 nm = cl.nodemap
867 867 filtered = cl.filteredrevs
868 868 result = []
869 869 for n in nodes:
870 870 r = nm.get(n)
871 871 resp = not (r is None or r in filtered)
872 872 result.append(resp)
873 873 return result
874 874
875 875 def local(self):
876 876 return self
877 877
878 878 def publishing(self):
879 879 # it's safe (and desirable) to trust the publish flag unconditionally
880 880 # so that we don't finalize changes shared between users via ssh or nfs
881 881 return self.ui.configbool('phases', 'publish', True, untrusted=True)
882 882
883 883 def cancopy(self):
884 884 # so statichttprepo's override of local() works
885 885 if not self.local():
886 886 return False
887 887 if not self.publishing():
888 888 return True
889 889 # if publishing we can't copy if there is filtered content
890 890 return not self.filtered('visible').changelog.filteredrevs
891 891
892 892 def shared(self):
893 893 '''the type of shared repository (None if not shared)'''
894 894 if self.sharedpath != self.path:
895 895 return 'store'
896 896 return None
897 897
898 898 def join(self, f, *insidef):
899 899 return self.vfs.join(os.path.join(f, *insidef))
900 900
901 901 def wjoin(self, f, *insidef):
902 902 return self.vfs.reljoin(self.root, f, *insidef)
903 903
904 904 def file(self, f):
905 905 if f[0] == '/':
906 906 f = f[1:]
907 907 return filelog.filelog(self.svfs, f)
908 908
909 909 def changectx(self, changeid):
910 910 return self[changeid]
911 911
912 912 def setparents(self, p1, p2=nullid):
913 913 self.dirstate.beginparentchange()
914 914 copies = self.dirstate.setparents(p1, p2)
915 915 pctx = self[p1]
916 916 if copies:
917 917 # Adjust copy records, the dirstate cannot do it, it
918 918 # requires access to parents manifests. Preserve them
919 919 # only for entries added to first parent.
920 920 for f in copies:
921 921 if f not in pctx and copies[f] in pctx:
922 922 self.dirstate.copy(copies[f], f)
923 923 if p2 == nullid:
924 924 for f, s in sorted(self.dirstate.copies().items()):
925 925 if f not in pctx and s not in pctx:
926 926 self.dirstate.copy(None, f)
927 927 self.dirstate.endparentchange()
928 928
929 929 def filectx(self, path, changeid=None, fileid=None):
930 930 """changeid can be a changeset revision, node, or tag.
931 931 fileid can be a file revision or node."""
932 932 return context.filectx(self, path, changeid, fileid)
933 933
934 934 def getcwd(self):
935 935 return self.dirstate.getcwd()
936 936
937 937 def pathto(self, f, cwd=None):
938 938 return self.dirstate.pathto(f, cwd)
939 939
940 940 def wfile(self, f, mode='r'):
941 941 return self.wvfs(f, mode)
942 942
943 943 def _link(self, f):
944 944 return self.wvfs.islink(f)
945 945
946 946 def _loadfilter(self, filter):
947 947 if filter not in self.filterpats:
948 948 l = []
949 949 for pat, cmd in self.ui.configitems(filter):
950 950 if cmd == '!':
951 951 continue
952 952 mf = matchmod.match(self.root, '', [pat])
953 953 fn = None
954 954 params = cmd
955 955 for name, filterfn in self._datafilters.iteritems():
956 956 if cmd.startswith(name):
957 957 fn = filterfn
958 958 params = cmd[len(name):].lstrip()
959 959 break
960 960 if not fn:
961 961 fn = lambda s, c, **kwargs: util.filter(s, c)
962 962 # Wrap old filters not supporting keyword arguments
963 963 if not inspect.getargspec(fn)[2]:
964 964 oldfn = fn
965 965 fn = lambda s, c, **kwargs: oldfn(s, c)
966 966 l.append((mf, fn, params))
967 967 self.filterpats[filter] = l
968 968 return self.filterpats[filter]
969 969
970 970 def _filter(self, filterpats, filename, data):
971 971 for mf, fn, cmd in filterpats:
972 972 if mf(filename):
973 973 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
974 974 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
975 975 break
976 976
977 977 return data
978 978
979 979 @unfilteredpropertycache
980 980 def _encodefilterpats(self):
981 981 return self._loadfilter('encode')
982 982
983 983 @unfilteredpropertycache
984 984 def _decodefilterpats(self):
985 985 return self._loadfilter('decode')
986 986
987 987 def adddatafilter(self, name, filter):
988 988 self._datafilters[name] = filter
989 989
990 990 def wread(self, filename):
991 991 if self._link(filename):
992 992 data = self.wvfs.readlink(filename)
993 993 else:
994 994 data = self.wvfs.read(filename)
995 995 return self._filter(self._encodefilterpats, filename, data)
996 996
997 997 def wwrite(self, filename, data, flags, backgroundclose=False):
998 998 """write ``data`` into ``filename`` in the working directory
999 999
1000 1000 This returns length of written (maybe decoded) data.
1001 1001 """
1002 1002 data = self._filter(self._decodefilterpats, filename, data)
1003 1003 if 'l' in flags:
1004 1004 self.wvfs.symlink(data, filename)
1005 1005 else:
1006 1006 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1007 1007 if 'x' in flags:
1008 1008 self.wvfs.setflags(filename, False, True)
1009 1009 return len(data)
1010 1010
1011 1011 def wwritedata(self, filename, data):
1012 1012 return self._filter(self._decodefilterpats, filename, data)
1013 1013
1014 1014 def currenttransaction(self):
1015 1015 """return the current transaction or None if non exists"""
1016 1016 if self._transref:
1017 1017 tr = self._transref()
1018 1018 else:
1019 1019 tr = None
1020 1020
1021 1021 if tr and tr.running():
1022 1022 return tr
1023 1023 return None
1024 1024
1025 1025 def transaction(self, desc, report=None):
1026 1026 if (self.ui.configbool('devel', 'all-warnings')
1027 1027 or self.ui.configbool('devel', 'check-locks')):
1028 1028 if self._currentlock(self._lockref) is None:
1029 1029 raise error.ProgrammingError('transaction requires locking')
1030 1030 tr = self.currenttransaction()
1031 1031 if tr is not None:
1032 1032 return tr.nest()
1033 1033
1034 1034 # abort here if the journal already exists
1035 1035 if self.svfs.exists("journal"):
1036 1036 raise error.RepoError(
1037 1037 _("abandoned transaction found"),
1038 1038 hint=_("run 'hg recover' to clean up transaction"))
1039 1039
1040 1040 idbase = "%.40f#%f" % (random.random(), time.time())
1041 1041 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1042 1042 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1043 1043
1044 1044 self._writejournal(desc)
1045 1045 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1046 1046 if report:
1047 1047 rp = report
1048 1048 else:
1049 1049 rp = self.ui.warn
1050 1050 vfsmap = {'plain': self.vfs} # root of .hg/
1051 1051 # we must avoid cyclic reference between repo and transaction.
1052 1052 reporef = weakref.ref(self)
1053 1053 def validate(tr):
1054 1054 """will run pre-closing hooks"""
1055 1055 reporef().hook('pretxnclose', throw=True,
1056 1056 txnname=desc, **tr.hookargs)
1057 1057 def releasefn(tr, success):
1058 1058 repo = reporef()
1059 1059 if success:
1060 1060 # this should be explicitly invoked here, because
1061 1061 # in-memory changes aren't written out at closing
1062 1062 # transaction, if tr.addfilegenerator (via
1063 1063 # dirstate.write or so) isn't invoked while
1064 1064 # transaction running
1065 1065 repo.dirstate.write(None)
1066 1066 else:
1067 1067 # discard all changes (including ones already written
1068 1068 # out) in this transaction
1069 1069 repo.dirstate.restorebackup(None, prefix='journal.')
1070 1070
1071 1071 repo.invalidate(clearfilecache=True)
1072 1072
1073 1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1074 1074 "journal",
1075 1075 "undo",
1076 1076 aftertrans(renames),
1077 1077 self.store.createmode,
1078 1078 validator=validate,
1079 1079 releasefn=releasefn)
1080 1080
1081 1081 tr.hookargs['txnid'] = txnid
1082 1082 # note: writing the fncache only during finalize mean that the file is
1083 1083 # outdated when running hooks. As fncache is used for streaming clone,
1084 1084 # this is not expected to break anything that happen during the hooks.
1085 1085 tr.addfinalize('flush-fncache', self.store.write)
1086 1086 def txnclosehook(tr2):
1087 1087 """To be run if transaction is successful, will schedule a hook run
1088 1088 """
1089 1089 # Don't reference tr2 in hook() so we don't hold a reference.
1090 1090 # This reduces memory consumption when there are multiple
1091 1091 # transactions per lock. This can likely go away if issue5045
1092 1092 # fixes the function accumulation.
1093 1093 hookargs = tr2.hookargs
1094 1094
1095 1095 def hook():
1096 1096 reporef().hook('txnclose', throw=False, txnname=desc,
1097 1097 **hookargs)
1098 1098 reporef()._afterlock(hook)
1099 1099 tr.addfinalize('txnclose-hook', txnclosehook)
1100 1100 def txnaborthook(tr2):
1101 1101 """To be run if transaction is aborted
1102 1102 """
1103 1103 reporef().hook('txnabort', throw=False, txnname=desc,
1104 1104 **tr2.hookargs)
1105 1105 tr.addabort('txnabort-hook', txnaborthook)
1106 1106 # avoid eager cache invalidation. in-memory data should be identical
1107 1107 # to stored data if transaction has no error.
1108 1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 1109 self._transref = weakref.ref(tr)
1110 1110 return tr
1111 1111
1112 1112 def _journalfiles(self):
1113 1113 return ((self.svfs, 'journal'),
1114 1114 (self.vfs, 'journal.dirstate'),
1115 1115 (self.vfs, 'journal.branch'),
1116 1116 (self.vfs, 'journal.desc'),
1117 1117 (self.vfs, 'journal.bookmarks'),
1118 1118 (self.svfs, 'journal.phaseroots'))
1119 1119
1120 1120 def undofiles(self):
1121 1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122 1122
1123 1123 def _writejournal(self, desc):
1124 1124 self.dirstate.savebackup(None, prefix='journal.')
1125 1125 self.vfs.write("journal.branch",
1126 1126 encoding.fromlocal(self.dirstate.branch()))
1127 1127 self.vfs.write("journal.desc",
1128 1128 "%d\n%s\n" % (len(self), desc))
1129 1129 self.vfs.write("journal.bookmarks",
1130 1130 self.vfs.tryread("bookmarks"))
1131 1131 self.svfs.write("journal.phaseroots",
1132 1132 self.svfs.tryread("phaseroots"))
1133 1133
1134 1134 def recover(self):
1135 1135 with self.lock():
1136 1136 if self.svfs.exists("journal"):
1137 1137 self.ui.status(_("rolling back interrupted transaction\n"))
1138 1138 vfsmap = {'': self.svfs,
1139 1139 'plain': self.vfs,}
1140 1140 transaction.rollback(self.svfs, vfsmap, "journal",
1141 1141 self.ui.warn)
1142 1142 self.invalidate()
1143 1143 return True
1144 1144 else:
1145 1145 self.ui.warn(_("no interrupted transaction available\n"))
1146 1146 return False
1147 1147
1148 1148 def rollback(self, dryrun=False, force=False):
1149 1149 wlock = lock = dsguard = None
1150 1150 try:
1151 1151 wlock = self.wlock()
1152 1152 lock = self.lock()
1153 1153 if self.svfs.exists("undo"):
1154 1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155 1155
1156 1156 return self._rollback(dryrun, force, dsguard)
1157 1157 else:
1158 1158 self.ui.warn(_("no rollback information available\n"))
1159 1159 return 1
1160 1160 finally:
1161 1161 release(dsguard, lock, wlock)
1162 1162
1163 1163 @unfilteredmethod # Until we get smarter cache management
1164 1164 def _rollback(self, dryrun, force, dsguard):
1165 1165 ui = self.ui
1166 1166 try:
1167 1167 args = self.vfs.read('undo.desc').splitlines()
1168 1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 1169 if len(args) >= 3:
1170 1170 detail = args[2]
1171 1171 oldtip = oldlen - 1
1172 1172
1173 1173 if detail and ui.verbose:
1174 1174 msg = (_('repository tip rolled back to revision %s'
1175 1175 ' (undo %s: %s)\n')
1176 1176 % (oldtip, desc, detail))
1177 1177 else:
1178 1178 msg = (_('repository tip rolled back to revision %s'
1179 1179 ' (undo %s)\n')
1180 1180 % (oldtip, desc))
1181 1181 except IOError:
1182 1182 msg = _('rolling back unknown transaction\n')
1183 1183 desc = None
1184 1184
1185 1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 1186 raise error.Abort(
1187 1187 _('rollback of last commit while not checked out '
1188 1188 'may lose data'), hint=_('use -f to force'))
1189 1189
1190 1190 ui.status(msg)
1191 1191 if dryrun:
1192 1192 return 0
1193 1193
1194 1194 parents = self.dirstate.parents()
1195 1195 self.destroying()
1196 1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 1198 if self.vfs.exists('undo.bookmarks'):
1199 1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 1200 if self.svfs.exists('undo.phaseroots'):
1201 1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 1202 self.invalidate()
1203 1203
1204 1204 parentgone = (parents[0] not in self.changelog.nodemap or
1205 1205 parents[1] not in self.changelog.nodemap)
1206 1206 if parentgone:
1207 1207 # prevent dirstateguard from overwriting already restored one
1208 1208 dsguard.close()
1209 1209
1210 1210 self.dirstate.restorebackup(None, prefix='undo.')
1211 1211 try:
1212 1212 branch = self.vfs.read('undo.branch')
1213 1213 self.dirstate.setbranch(encoding.tolocal(branch))
1214 1214 except IOError:
1215 1215 ui.warn(_('named branch could not be reset: '
1216 1216 'current branch is still \'%s\'\n')
1217 1217 % self.dirstate.branch())
1218 1218
1219 1219 parents = tuple([p.rev() for p in self[None].parents()])
1220 1220 if len(parents) > 1:
1221 1221 ui.status(_('working directory now based on '
1222 1222 'revisions %d and %d\n') % parents)
1223 1223 else:
1224 1224 ui.status(_('working directory now based on '
1225 1225 'revision %d\n') % parents)
1226 1226 mergemod.mergestate.clean(self, self['.'].node())
1227 1227
1228 1228 # TODO: if we know which new heads may result from this rollback, pass
1229 1229 # them to destroy(), which will prevent the branchhead cache from being
1230 1230 # invalidated.
1231 1231 self.destroyed()
1232 1232 return 0
1233 1233
1234 1234 def invalidatecaches(self):
1235 1235
1236 1236 if '_tagscache' in vars(self):
1237 1237 # can't use delattr on proxy
1238 1238 del self.__dict__['_tagscache']
1239 1239
1240 1240 self.unfiltered()._branchcaches.clear()
1241 1241 self.invalidatevolatilesets()
1242 1242
1243 1243 def invalidatevolatilesets(self):
1244 1244 self.filteredrevcache.clear()
1245 1245 obsolete.clearobscaches(self)
1246 1246
1247 1247 def invalidatedirstate(self):
1248 1248 '''Invalidates the dirstate, causing the next call to dirstate
1249 1249 to check if it was modified since the last time it was read,
1250 1250 rereading it if it has.
1251 1251
1252 1252 This is different to dirstate.invalidate() that it doesn't always
1253 1253 rereads the dirstate. Use dirstate.invalidate() if you want to
1254 1254 explicitly read the dirstate again (i.e. restoring it to a previous
1255 1255 known good state).'''
1256 1256 if hasunfilteredcache(self, 'dirstate'):
1257 1257 for k in self.dirstate._filecache:
1258 1258 try:
1259 1259 delattr(self.dirstate, k)
1260 1260 except AttributeError:
1261 1261 pass
1262 1262 delattr(self.unfiltered(), 'dirstate')
1263 1263
1264 1264 def invalidate(self, clearfilecache=False):
1265 1265 '''Invalidates both store and non-store parts other than dirstate
1266 1266
1267 1267 If a transaction is running, invalidation of store is omitted,
1268 1268 because discarding in-memory changes might cause inconsistency
1269 1269 (e.g. incomplete fncache causes unintentional failure, but
1270 1270 redundant one doesn't).
1271 1271 '''
1272 1272 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1273 1273 for k in self._filecache.keys():
1274 1274 # dirstate is invalidated separately in invalidatedirstate()
1275 1275 if k == 'dirstate':
1276 1276 continue
1277 1277
1278 1278 if clearfilecache:
1279 1279 del self._filecache[k]
1280 1280 try:
1281 1281 delattr(unfiltered, k)
1282 1282 except AttributeError:
1283 1283 pass
1284 1284 self.invalidatecaches()
1285 1285 if not self.currenttransaction():
1286 1286 # TODO: Changing contents of store outside transaction
1287 1287 # causes inconsistency. We should make in-memory store
1288 1288 # changes detectable, and abort if changed.
1289 1289 self.store.invalidatecaches()
1290 1290
1291 1291 def invalidateall(self):
1292 1292 '''Fully invalidates both store and non-store parts, causing the
1293 1293 subsequent operation to reread any outside changes.'''
1294 1294 # extension should hook this to invalidate its caches
1295 1295 self.invalidate()
1296 1296 self.invalidatedirstate()
1297 1297
1298 1298 @unfilteredmethod
1299 1299 def _refreshfilecachestats(self, tr):
1300 1300 """Reload stats of cached files so that they are flagged as valid"""
1301 1301 for k, ce in self._filecache.items():
1302 1302 if k == 'dirstate' or k not in self.__dict__:
1303 1303 continue
1304 1304 ce.refresh()
1305 1305
1306 1306 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1307 1307 inheritchecker=None, parentenvvar=None):
1308 1308 parentlock = None
1309 1309 # the contents of parentenvvar are used by the underlying lock to
1310 1310 # determine whether it can be inherited
1311 1311 if parentenvvar is not None:
1312 1312 parentlock = encoding.environ.get(parentenvvar)
1313 1313 try:
1314 1314 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1315 1315 acquirefn=acquirefn, desc=desc,
1316 1316 inheritchecker=inheritchecker,
1317 1317 parentlock=parentlock)
1318 1318 except error.LockHeld as inst:
1319 1319 if not wait:
1320 1320 raise
1321 1321 # show more details for new-style locks
1322 1322 if ':' in inst.locker:
1323 1323 host, pid = inst.locker.split(":", 1)
1324 1324 self.ui.warn(
1325 1325 _("waiting for lock on %s held by process %r "
1326 1326 "on host %r\n") % (desc, pid, host))
1327 1327 else:
1328 1328 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1329 1329 (desc, inst.locker))
1330 1330 # default to 600 seconds timeout
1331 1331 l = lockmod.lock(vfs, lockname,
1332 1332 int(self.ui.config("ui", "timeout", "600")),
1333 1333 releasefn=releasefn, acquirefn=acquirefn,
1334 1334 desc=desc)
1335 1335 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1336 1336 return l
1337 1337
1338 1338 def _afterlock(self, callback):
1339 1339 """add a callback to be run when the repository is fully unlocked
1340 1340
1341 1341 The callback will be executed when the outermost lock is released
1342 1342 (with wlock being higher level than 'lock')."""
1343 1343 for ref in (self._wlockref, self._lockref):
1344 1344 l = ref and ref()
1345 1345 if l and l.held:
1346 1346 l.postrelease.append(callback)
1347 1347 break
1348 1348 else: # no lock have been found.
1349 1349 callback()
1350 1350
1351 1351 def lock(self, wait=True):
1352 1352 '''Lock the repository store (.hg/store) and return a weak reference
1353 1353 to the lock. Use this before modifying the store (e.g. committing or
1354 1354 stripping). If you are opening a transaction, get a lock as well.)
1355 1355
1356 1356 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1357 1357 'wlock' first to avoid a dead-lock hazard.'''
1358 1358 l = self._currentlock(self._lockref)
1359 1359 if l is not None:
1360 1360 l.lock()
1361 1361 return l
1362 1362
1363 1363 l = self._lock(self.svfs, "lock", wait, None,
1364 1364 self.invalidate, _('repository %s') % self.origroot)
1365 1365 self._lockref = weakref.ref(l)
1366 1366 return l
1367 1367
1368 1368 def _wlockchecktransaction(self):
1369 1369 if self.currenttransaction() is not None:
1370 1370 raise error.LockInheritanceContractViolation(
1371 1371 'wlock cannot be inherited in the middle of a transaction')
1372 1372
1373 1373 def wlock(self, wait=True):
1374 1374 '''Lock the non-store parts of the repository (everything under
1375 1375 .hg except .hg/store) and return a weak reference to the lock.
1376 1376
1377 1377 Use this before modifying files in .hg.
1378 1378
1379 1379 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1380 1380 'wlock' first to avoid a dead-lock hazard.'''
1381 1381 l = self._wlockref and self._wlockref()
1382 1382 if l is not None and l.held:
1383 1383 l.lock()
1384 1384 return l
1385 1385
1386 1386 # We do not need to check for non-waiting lock acquisition. Such
1387 1387 # acquisition would not cause dead-lock as they would just fail.
1388 1388 if wait and (self.ui.configbool('devel', 'all-warnings')
1389 1389 or self.ui.configbool('devel', 'check-locks')):
1390 1390 if self._currentlock(self._lockref) is not None:
1391 1391 self.ui.develwarn('"wlock" acquired after "lock"')
1392 1392
1393 1393 def unlock():
1394 1394 if self.dirstate.pendingparentchange():
1395 1395 self.dirstate.invalidate()
1396 1396 else:
1397 1397 self.dirstate.write(None)
1398 1398
1399 1399 self._filecache['dirstate'].refresh()
1400 1400
1401 1401 l = self._lock(self.vfs, "wlock", wait, unlock,
1402 1402 self.invalidatedirstate, _('working directory of %s') %
1403 1403 self.origroot,
1404 1404 inheritchecker=self._wlockchecktransaction,
1405 1405 parentenvvar='HG_WLOCK_LOCKER')
1406 1406 self._wlockref = weakref.ref(l)
1407 1407 return l
1408 1408
1409 1409 def _currentlock(self, lockref):
1410 1410 """Returns the lock if it's held, or None if it's not."""
1411 1411 if lockref is None:
1412 1412 return None
1413 1413 l = lockref()
1414 1414 if l is None or not l.held:
1415 1415 return None
1416 1416 return l
1417 1417
1418 1418 def currentwlock(self):
1419 1419 """Returns the wlock if it's held, or None if it's not."""
1420 1420 return self._currentlock(self._wlockref)
1421 1421
1422 1422 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1423 1423 """
1424 1424 commit an individual file as part of a larger transaction
1425 1425 """
1426 1426
1427 1427 fname = fctx.path()
1428 1428 fparent1 = manifest1.get(fname, nullid)
1429 1429 fparent2 = manifest2.get(fname, nullid)
1430 1430 if isinstance(fctx, context.filectx):
1431 1431 node = fctx.filenode()
1432 1432 if node in [fparent1, fparent2]:
1433 1433 self.ui.debug('reusing %s filelog entry\n' % fname)
1434 1434 if manifest1.flags(fname) != fctx.flags():
1435 1435 changelist.append(fname)
1436 1436 return node
1437 1437
1438 1438 flog = self.file(fname)
1439 1439 meta = {}
1440 1440 copy = fctx.renamed()
1441 1441 if copy and copy[0] != fname:
1442 1442 # Mark the new revision of this file as a copy of another
1443 1443 # file. This copy data will effectively act as a parent
1444 1444 # of this new revision. If this is a merge, the first
1445 1445 # parent will be the nullid (meaning "look up the copy data")
1446 1446 # and the second one will be the other parent. For example:
1447 1447 #
1448 1448 # 0 --- 1 --- 3 rev1 changes file foo
1449 1449 # \ / rev2 renames foo to bar and changes it
1450 1450 # \- 2 -/ rev3 should have bar with all changes and
1451 1451 # should record that bar descends from
1452 1452 # bar in rev2 and foo in rev1
1453 1453 #
1454 1454 # this allows this merge to succeed:
1455 1455 #
1456 1456 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1457 1457 # \ / merging rev3 and rev4 should use bar@rev2
1458 1458 # \- 2 --- 4 as the merge base
1459 1459 #
1460 1460
1461 1461 cfname = copy[0]
1462 1462 crev = manifest1.get(cfname)
1463 1463 newfparent = fparent2
1464 1464
1465 1465 if manifest2: # branch merge
1466 1466 if fparent2 == nullid or crev is None: # copied on remote side
1467 1467 if cfname in manifest2:
1468 1468 crev = manifest2[cfname]
1469 1469 newfparent = fparent1
1470 1470
1471 1471 # Here, we used to search backwards through history to try to find
1472 1472 # where the file copy came from if the source of a copy was not in
1473 1473 # the parent directory. However, this doesn't actually make sense to
1474 1474 # do (what does a copy from something not in your working copy even
1475 1475 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1476 1476 # the user that copy information was dropped, so if they didn't
1477 1477 # expect this outcome it can be fixed, but this is the correct
1478 1478 # behavior in this circumstance.
1479 1479
1480 1480 if crev:
1481 1481 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1482 1482 meta["copy"] = cfname
1483 1483 meta["copyrev"] = hex(crev)
1484 1484 fparent1, fparent2 = nullid, newfparent
1485 1485 else:
1486 1486 self.ui.warn(_("warning: can't find ancestor for '%s' "
1487 1487 "copied from '%s'!\n") % (fname, cfname))
1488 1488
1489 1489 elif fparent1 == nullid:
1490 1490 fparent1, fparent2 = fparent2, nullid
1491 1491 elif fparent2 != nullid:
1492 1492 # is one parent an ancestor of the other?
1493 1493 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1494 1494 if fparent1 in fparentancestors:
1495 1495 fparent1, fparent2 = fparent2, nullid
1496 1496 elif fparent2 in fparentancestors:
1497 1497 fparent2 = nullid
1498 1498
1499 1499 # is the file changed?
1500 1500 text = fctx.data()
1501 1501 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1502 1502 changelist.append(fname)
1503 1503 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1504 1504 # are just the flags changed during merge?
1505 1505 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1506 1506 changelist.append(fname)
1507 1507
1508 1508 return fparent1
1509 1509
1510 1510 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1511 1511 """check for commit arguments that aren't committable"""
1512 1512 if match.isexact() or match.prefix():
1513 1513 matched = set(status.modified + status.added + status.removed)
1514 1514
1515 1515 for f in match.files():
1516 1516 f = self.dirstate.normalize(f)
1517 1517 if f == '.' or f in matched or f in wctx.substate:
1518 1518 continue
1519 1519 if f in status.deleted:
1520 1520 fail(f, _('file not found!'))
1521 1521 if f in vdirs: # visited directory
1522 1522 d = f + '/'
1523 1523 for mf in matched:
1524 1524 if mf.startswith(d):
1525 1525 break
1526 1526 else:
1527 1527 fail(f, _("no match under directory!"))
1528 1528 elif f not in self.dirstate:
1529 1529 fail(f, _("file not tracked!"))
1530 1530
1531 1531 @unfilteredmethod
1532 1532 def commit(self, text="", user=None, date=None, match=None, force=False,
1533 1533 editor=False, extra=None):
1534 1534 """Add a new revision to current repository.
1535 1535
1536 1536 Revision information is gathered from the working directory,
1537 1537 match can be used to filter the committed files. If editor is
1538 1538 supplied, it is called to get a commit message.
1539 1539 """
1540 1540 if extra is None:
1541 1541 extra = {}
1542 1542
1543 1543 def fail(f, msg):
1544 1544 raise error.Abort('%s: %s' % (f, msg))
1545 1545
1546 1546 if not match:
1547 1547 match = matchmod.always(self.root, '')
1548 1548
1549 1549 if not force:
1550 1550 vdirs = []
1551 1551 match.explicitdir = vdirs.append
1552 1552 match.bad = fail
1553 1553
1554 1554 wlock = lock = tr = None
1555 1555 try:
1556 1556 wlock = self.wlock()
1557 1557 lock = self.lock() # for recent changelog (see issue4368)
1558 1558
1559 1559 wctx = self[None]
1560 1560 merge = len(wctx.parents()) > 1
1561 1561
1562 1562 if not force and merge and match.ispartial():
1563 1563 raise error.Abort(_('cannot partially commit a merge '
1564 1564 '(do not specify files or patterns)'))
1565 1565
1566 1566 status = self.status(match=match, clean=force)
1567 1567 if force:
1568 1568 status.modified.extend(status.clean) # mq may commit clean files
1569 1569
1570 1570 # check subrepos
1571 1571 subs = []
1572 1572 commitsubs = set()
1573 1573 newstate = wctx.substate.copy()
1574 1574 # only manage subrepos and .hgsubstate if .hgsub is present
1575 1575 if '.hgsub' in wctx:
1576 1576 # we'll decide whether to track this ourselves, thanks
1577 1577 for c in status.modified, status.added, status.removed:
1578 1578 if '.hgsubstate' in c:
1579 1579 c.remove('.hgsubstate')
1580 1580
1581 1581 # compare current state to last committed state
1582 1582 # build new substate based on last committed state
1583 1583 oldstate = wctx.p1().substate
1584 1584 for s in sorted(newstate.keys()):
1585 1585 if not match(s):
1586 1586 # ignore working copy, use old state if present
1587 1587 if s in oldstate:
1588 1588 newstate[s] = oldstate[s]
1589 1589 continue
1590 1590 if not force:
1591 1591 raise error.Abort(
1592 1592 _("commit with new subrepo %s excluded") % s)
1593 1593 dirtyreason = wctx.sub(s).dirtyreason(True)
1594 1594 if dirtyreason:
1595 1595 if not self.ui.configbool('ui', 'commitsubrepos'):
1596 1596 raise error.Abort(dirtyreason,
1597 1597 hint=_("use --subrepos for recursive commit"))
1598 1598 subs.append(s)
1599 1599 commitsubs.add(s)
1600 1600 else:
1601 1601 bs = wctx.sub(s).basestate()
1602 1602 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1603 1603 if oldstate.get(s, (None, None, None))[1] != bs:
1604 1604 subs.append(s)
1605 1605
1606 1606 # check for removed subrepos
1607 1607 for p in wctx.parents():
1608 1608 r = [s for s in p.substate if s not in newstate]
1609 1609 subs += [s for s in r if match(s)]
1610 1610 if subs:
1611 1611 if (not match('.hgsub') and
1612 1612 '.hgsub' in (wctx.modified() + wctx.added())):
1613 1613 raise error.Abort(
1614 1614 _("can't commit subrepos without .hgsub"))
1615 1615 status.modified.insert(0, '.hgsubstate')
1616 1616
1617 1617 elif '.hgsub' in status.removed:
1618 1618 # clean up .hgsubstate when .hgsub is removed
1619 1619 if ('.hgsubstate' in wctx and
1620 1620 '.hgsubstate' not in (status.modified + status.added +
1621 1621 status.removed)):
1622 1622 status.removed.insert(0, '.hgsubstate')
1623 1623
1624 1624 # make sure all explicit patterns are matched
1625 1625 if not force:
1626 1626 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1627 1627
1628 1628 cctx = context.workingcommitctx(self, status,
1629 1629 text, user, date, extra)
1630 1630
1631 1631 # internal config: ui.allowemptycommit
1632 1632 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1633 1633 or extra.get('close') or merge or cctx.files()
1634 1634 or self.ui.configbool('ui', 'allowemptycommit'))
1635 1635 if not allowemptycommit:
1636 1636 return None
1637 1637
1638 1638 if merge and cctx.deleted():
1639 1639 raise error.Abort(_("cannot commit merge with missing files"))
1640 1640
1641 1641 ms = mergemod.mergestate.read(self)
1642 1642 mergeutil.checkunresolved(ms)
1643 1643
1644 1644 if editor:
1645 1645 cctx._text = editor(self, cctx, subs)
1646 1646 edited = (text != cctx._text)
1647 1647
1648 1648 # Save commit message in case this transaction gets rolled back
1649 1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1650 1650 # the assumption that the user will use the same editor again.
1651 1651 msgfn = self.savecommitmessage(cctx._text)
1652 1652
1653 1653 # commit subs and write new state
1654 1654 if subs:
1655 1655 for s in sorted(commitsubs):
1656 1656 sub = wctx.sub(s)
1657 1657 self.ui.status(_('committing subrepository %s\n') %
1658 1658 subrepo.subrelpath(sub))
1659 1659 sr = sub.commit(cctx._text, user, date)
1660 1660 newstate[s] = (newstate[s][0], sr)
1661 1661 subrepo.writestate(self, newstate)
1662 1662
1663 1663 p1, p2 = self.dirstate.parents()
1664 1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1665 1665 try:
1666 1666 self.hook("precommit", throw=True, parent1=hookp1,
1667 1667 parent2=hookp2)
1668 1668 tr = self.transaction('commit')
1669 1669 ret = self.commitctx(cctx, True)
1670 1670 except: # re-raises
1671 1671 if edited:
1672 1672 self.ui.write(
1673 1673 _('note: commit message saved in %s\n') % msgfn)
1674 1674 raise
1675 1675 # update bookmarks, dirstate and mergestate
1676 1676 bookmarks.update(self, [p1, p2], ret)
1677 1677 cctx.markcommitted(ret)
1678 1678 ms.reset()
1679 1679 tr.close()
1680 1680
1681 1681 finally:
1682 1682 lockmod.release(tr, lock, wlock)
1683 1683
1684 1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1685 1685 # hack for command that use a temporary commit (eg: histedit)
1686 1686 # temporary commit got stripped before hook release
1687 1687 if self.changelog.hasnode(ret):
1688 1688 self.hook("commit", node=node, parent1=parent1,
1689 1689 parent2=parent2)
1690 1690 self._afterlock(commithook)
1691 1691 return ret
1692 1692
1693 1693 @unfilteredmethod
1694 1694 def commitctx(self, ctx, error=False):
1695 1695 """Add a new revision to current repository.
1696 1696 Revision information is passed via the context argument.
1697 1697 """
1698 1698
1699 1699 tr = None
1700 1700 p1, p2 = ctx.p1(), ctx.p2()
1701 1701 user = ctx.user()
1702 1702
1703 1703 lock = self.lock()
1704 1704 try:
1705 1705 tr = self.transaction("commit")
1706 1706 trp = weakref.proxy(tr)
1707 1707
1708 1708 if ctx.manifestnode():
1709 1709 # reuse an existing manifest revision
1710 1710 mn = ctx.manifestnode()
1711 1711 files = ctx.files()
1712 1712 elif ctx.files():
1713 1713 m1ctx = p1.manifestctx()
1714 1714 m2ctx = p2.manifestctx()
1715 1715 mctx = m1ctx.copy()
1716 1716
1717 1717 m = mctx.read()
1718 1718 m1 = m1ctx.read()
1719 1719 m2 = m2ctx.read()
1720 1720
1721 1721 # check in files
1722 1722 added = []
1723 1723 changed = []
1724 1724 removed = list(ctx.removed())
1725 1725 linkrev = len(self)
1726 1726 self.ui.note(_("committing files:\n"))
1727 1727 for f in sorted(ctx.modified() + ctx.added()):
1728 1728 self.ui.note(f + "\n")
1729 1729 try:
1730 1730 fctx = ctx[f]
1731 1731 if fctx is None:
1732 1732 removed.append(f)
1733 1733 else:
1734 1734 added.append(f)
1735 1735 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1736 1736 trp, changed)
1737 1737 m.setflag(f, fctx.flags())
1738 1738 except OSError as inst:
1739 1739 self.ui.warn(_("trouble committing %s!\n") % f)
1740 1740 raise
1741 1741 except IOError as inst:
1742 1742 errcode = getattr(inst, 'errno', errno.ENOENT)
1743 1743 if error or errcode and errcode != errno.ENOENT:
1744 1744 self.ui.warn(_("trouble committing %s!\n") % f)
1745 1745 raise
1746 1746
1747 1747 # update manifest
1748 1748 self.ui.note(_("committing manifest\n"))
1749 1749 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1750 1750 drop = [f for f in removed if f in m]
1751 1751 for f in drop:
1752 1752 del m[f]
1753 1753 mn = mctx.write(trp, linkrev,
1754 1754 p1.manifestnode(), p2.manifestnode(),
1755 1755 added, drop)
1756 1756 files = changed + removed
1757 1757 else:
1758 1758 mn = p1.manifestnode()
1759 1759 files = []
1760 1760
1761 1761 # update changelog
1762 1762 self.ui.note(_("committing changelog\n"))
1763 1763 self.changelog.delayupdate(tr)
1764 1764 n = self.changelog.add(mn, files, ctx.description(),
1765 1765 trp, p1.node(), p2.node(),
1766 1766 user, ctx.date(), ctx.extra().copy())
1767 1767 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1768 1768 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1769 1769 parent2=xp2)
1770 1770 # set the new commit is proper phase
1771 1771 targetphase = subrepo.newcommitphase(self.ui, ctx)
1772 1772 if targetphase:
1773 1773 # retract boundary do not alter parent changeset.
1774 1774 # if a parent have higher the resulting phase will
1775 1775 # be compliant anyway
1776 1776 #
1777 1777 # if minimal phase was 0 we don't need to retract anything
1778 1778 phases.retractboundary(self, tr, targetphase, [n])
1779 1779 tr.close()
1780 1780 branchmap.updatecache(self.filtered('served'))
1781 1781 return n
1782 1782 finally:
1783 1783 if tr:
1784 1784 tr.release()
1785 1785 lock.release()
1786 1786
1787 1787 @unfilteredmethod
1788 1788 def destroying(self):
1789 1789 '''Inform the repository that nodes are about to be destroyed.
1790 1790 Intended for use by strip and rollback, so there's a common
1791 1791 place for anything that has to be done before destroying history.
1792 1792
1793 1793 This is mostly useful for saving state that is in memory and waiting
1794 1794 to be flushed when the current lock is released. Because a call to
1795 1795 destroyed is imminent, the repo will be invalidated causing those
1796 1796 changes to stay in memory (waiting for the next unlock), or vanish
1797 1797 completely.
1798 1798 '''
1799 1799 # When using the same lock to commit and strip, the phasecache is left
1800 1800 # dirty after committing. Then when we strip, the repo is invalidated,
1801 1801 # causing those changes to disappear.
1802 1802 if '_phasecache' in vars(self):
1803 1803 self._phasecache.write()
1804 1804
1805 1805 @unfilteredmethod
1806 1806 def destroyed(self):
1807 1807 '''Inform the repository that nodes have been destroyed.
1808 1808 Intended for use by strip and rollback, so there's a common
1809 1809 place for anything that has to be done after destroying history.
1810 1810 '''
1811 1811 # When one tries to:
1812 1812 # 1) destroy nodes thus calling this method (e.g. strip)
1813 1813 # 2) use phasecache somewhere (e.g. commit)
1814 1814 #
1815 1815 # then 2) will fail because the phasecache contains nodes that were
1816 1816 # removed. We can either remove phasecache from the filecache,
1817 1817 # causing it to reload next time it is accessed, or simply filter
1818 1818 # the removed nodes now and write the updated cache.
1819 1819 self._phasecache.filterunknown(self)
1820 1820 self._phasecache.write()
1821 1821
1822 1822 # update the 'served' branch cache to help read only server process
1823 1823 # Thanks to branchcache collaboration this is done from the nearest
1824 1824 # filtered subset and it is expected to be fast.
1825 1825 branchmap.updatecache(self.filtered('served'))
1826 1826
1827 1827 # Ensure the persistent tag cache is updated. Doing it now
1828 1828 # means that the tag cache only has to worry about destroyed
1829 1829 # heads immediately after a strip/rollback. That in turn
1830 1830 # guarantees that "cachetip == currenttip" (comparing both rev
1831 1831 # and node) always means no nodes have been added or destroyed.
1832 1832
1833 1833 # XXX this is suboptimal when qrefresh'ing: we strip the current
1834 1834 # head, refresh the tag cache, then immediately add a new head.
1835 1835 # But I think doing it this way is necessary for the "instant
1836 1836 # tag cache retrieval" case to work.
1837 1837 self.invalidate()
1838 1838
1839 1839 def walk(self, match, node=None):
1840 1840 '''
1841 1841 walk recursively through the directory tree or a given
1842 1842 changeset, finding all files matched by the match
1843 1843 function
1844 1844 '''
1845 1845 return self[node].walk(match)
1846 1846
1847 1847 def status(self, node1='.', node2=None, match=None,
1848 1848 ignored=False, clean=False, unknown=False,
1849 1849 listsubrepos=False):
1850 1850 '''a convenience method that calls node1.status(node2)'''
1851 1851 return self[node1].status(node2, match, ignored, clean, unknown,
1852 1852 listsubrepos)
1853 1853
1854 1854 def heads(self, start=None):
1855 1855 if start is None:
1856 headrevs = sorted(self.changelog.headrevs(), reverse=True)
1857 return [self.changelog.node(rev) for rev in headrevs]
1856 cl = self.changelog
1857 headrevs = sorted(cl.headrevs(), reverse=True)
1858 return [cl.node(rev) for rev in headrevs]
1858 1859
1859 1860 heads = self.changelog.heads(start)
1860 1861 # sort the output in rev descending order
1861 1862 return sorted(heads, key=self.changelog.rev, reverse=True)
1862 1863
1863 1864 def branchheads(self, branch=None, start=None, closed=False):
1864 1865 '''return a (possibly filtered) list of heads for the given branch
1865 1866
1866 1867 Heads are returned in topological order, from newest to oldest.
1867 1868 If branch is None, use the dirstate branch.
1868 1869 If start is not None, return only heads reachable from start.
1869 1870 If closed is True, return heads that are marked as closed as well.
1870 1871 '''
1871 1872 if branch is None:
1872 1873 branch = self[None].branch()
1873 1874 branches = self.branchmap()
1874 1875 if branch not in branches:
1875 1876 return []
1876 1877 # the cache returns heads ordered lowest to highest
1877 1878 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1878 1879 if start is not None:
1879 1880 # filter out the heads that cannot be reached from startrev
1880 1881 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1881 1882 bheads = [h for h in bheads if h in fbheads]
1882 1883 return bheads
1883 1884
1884 1885 def branches(self, nodes):
1885 1886 if not nodes:
1886 1887 nodes = [self.changelog.tip()]
1887 1888 b = []
1888 1889 for n in nodes:
1889 1890 t = n
1890 1891 while True:
1891 1892 p = self.changelog.parents(n)
1892 1893 if p[1] != nullid or p[0] == nullid:
1893 1894 b.append((t, n, p[0], p[1]))
1894 1895 break
1895 1896 n = p[0]
1896 1897 return b
1897 1898
1898 1899 def between(self, pairs):
1899 1900 r = []
1900 1901
1901 1902 for top, bottom in pairs:
1902 1903 n, l, i = top, [], 0
1903 1904 f = 1
1904 1905
1905 1906 while n != bottom and n != nullid:
1906 1907 p = self.changelog.parents(n)[0]
1907 1908 if i == f:
1908 1909 l.append(n)
1909 1910 f = f * 2
1910 1911 n = p
1911 1912 i += 1
1912 1913
1913 1914 r.append(l)
1914 1915
1915 1916 return r
1916 1917
1917 1918 def checkpush(self, pushop):
1918 1919 """Extensions can override this function if additional checks have
1919 1920 to be performed before pushing, or call it if they override push
1920 1921 command.
1921 1922 """
1922 1923 pass
1923 1924
1924 1925 @unfilteredpropertycache
1925 1926 def prepushoutgoinghooks(self):
1926 1927 """Return util.hooks consists of a pushop with repo, remote, outgoing
1927 1928 methods, which are called before pushing changesets.
1928 1929 """
1929 1930 return util.hooks()
1930 1931
1931 1932 def pushkey(self, namespace, key, old, new):
1932 1933 try:
1933 1934 tr = self.currenttransaction()
1934 1935 hookargs = {}
1935 1936 if tr is not None:
1936 1937 hookargs.update(tr.hookargs)
1937 1938 hookargs['namespace'] = namespace
1938 1939 hookargs['key'] = key
1939 1940 hookargs['old'] = old
1940 1941 hookargs['new'] = new
1941 1942 self.hook('prepushkey', throw=True, **hookargs)
1942 1943 except error.HookAbort as exc:
1943 1944 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1944 1945 if exc.hint:
1945 1946 self.ui.write_err(_("(%s)\n") % exc.hint)
1946 1947 return False
1947 1948 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1948 1949 ret = pushkey.push(self, namespace, key, old, new)
1949 1950 def runhook():
1950 1951 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1951 1952 ret=ret)
1952 1953 self._afterlock(runhook)
1953 1954 return ret
1954 1955
1955 1956 def listkeys(self, namespace):
1956 1957 self.hook('prelistkeys', throw=True, namespace=namespace)
1957 1958 self.ui.debug('listing keys for "%s"\n' % namespace)
1958 1959 values = pushkey.list(self, namespace)
1959 1960 self.hook('listkeys', namespace=namespace, values=values)
1960 1961 return values
1961 1962
1962 1963 def debugwireargs(self, one, two, three=None, four=None, five=None):
1963 1964 '''used to test argument passing over the wire'''
1964 1965 return "%s %s %s %s %s" % (one, two, three, four, five)
1965 1966
1966 1967 def savecommitmessage(self, text):
1967 1968 fp = self.vfs('last-message.txt', 'wb')
1968 1969 try:
1969 1970 fp.write(text)
1970 1971 finally:
1971 1972 fp.close()
1972 1973 return self.pathto(fp.name[len(self.root) + 1:])
1973 1974
1974 1975 # used to avoid circular references so destructors work
1975 1976 def aftertrans(files):
1976 1977 renamefiles = [tuple(t) for t in files]
1977 1978 def a():
1978 1979 for vfs, src, dest in renamefiles:
1979 1980 try:
1980 1981 vfs.rename(src, dest)
1981 1982 except OSError: # journal file does not yet exist
1982 1983 pass
1983 1984 return a
1984 1985
1985 1986 def undoname(fn):
1986 1987 base, name = os.path.split(fn)
1987 1988 assert name.startswith('journal')
1988 1989 return os.path.join(base, name.replace('journal', 'undo', 1))
1989 1990
1990 1991 def instance(ui, path, create):
1991 1992 return localrepository(ui, util.urllocalpath(path), create)
1992 1993
1993 1994 def islocal(path):
1994 1995 return True
1995 1996
1996 1997 def newreporequirements(repo):
1997 1998 """Determine the set of requirements for a new local repository.
1998 1999
1999 2000 Extensions can wrap this function to specify custom requirements for
2000 2001 new repositories.
2001 2002 """
2002 2003 ui = repo.ui
2003 2004 requirements = set(['revlogv1'])
2004 2005 if ui.configbool('format', 'usestore', True):
2005 2006 requirements.add('store')
2006 2007 if ui.configbool('format', 'usefncache', True):
2007 2008 requirements.add('fncache')
2008 2009 if ui.configbool('format', 'dotencode', True):
2009 2010 requirements.add('dotencode')
2010 2011
2011 2012 compengine = ui.config('experimental', 'format.compression', 'zlib')
2012 2013 if compengine not in util.compengines:
2013 2014 raise error.Abort(_('compression engine %s defined by '
2014 2015 'experimental.format.compression not available') %
2015 2016 compengine,
2016 2017 hint=_('run "hg debuginstall" to list available '
2017 2018 'compression engines'))
2018 2019
2019 2020 # zlib is the historical default and doesn't need an explicit requirement.
2020 2021 if compengine != 'zlib':
2021 2022 requirements.add('exp-compression-%s' % compengine)
2022 2023
2023 2024 if scmutil.gdinitconfig(ui):
2024 2025 requirements.add('generaldelta')
2025 2026 if ui.configbool('experimental', 'treemanifest', False):
2026 2027 requirements.add('treemanifest')
2027 2028 if ui.configbool('experimental', 'manifestv2', False):
2028 2029 requirements.add('manifestv2')
2029 2030
2030 2031 return requirements
General Comments 0
You need to be logged in to leave comments. Login now