##// END OF EJS Templates
localrepo: make invalidate avoid invalidating store inside transaction (API)...
FUJIWARA Katsunori -
r29918:d9c49138 default
parent child Browse files
Show More
@@ -1,1984 +1,1995 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps, **kwargs)
154 154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 155 # When requesting a bundle2, getbundle returns a stream to make the
156 156 # wire level function happier. We need to build a proper object
157 157 # from it in local peer.
158 158 cg = bundle2.getunbundler(self.ui, cg)
159 159 return cg
160 160
161 161 # TODO We might want to move the next two calls into legacypeer and add
162 162 # unbundle instead.
163 163
164 164 def unbundle(self, cg, heads, url):
165 165 """apply a bundle on a repo
166 166
167 167 This function handles the repo locking itself."""
168 168 try:
169 169 try:
170 170 cg = exchange.readbundle(self.ui, cg, None)
171 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 172 if util.safehasattr(ret, 'getchunks'):
173 173 # This is a bundle20 object, turn it into an unbundler.
174 174 # This little dance should be dropped eventually when the
175 175 # API is finally improved.
176 176 stream = util.chunkbuffer(ret.getchunks())
177 177 ret = bundle2.getunbundler(self.ui, stream)
178 178 return ret
179 179 except Exception as exc:
180 180 # If the exception contains output salvaged from a bundle2
181 181 # reply, we need to make sure it is printed before continuing
182 182 # to fail. So we build a bundle2 with such output and consume
183 183 # it directly.
184 184 #
185 185 # This is not very elegant but allows a "simple" solution for
186 186 # issue4594
187 187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 188 if output:
189 189 bundler = bundle2.bundle20(self._repo.ui)
190 190 for out in output:
191 191 bundler.addpart(out)
192 192 stream = util.chunkbuffer(bundler.getchunks())
193 193 b = bundle2.getunbundler(self.ui, stream)
194 194 bundle2.processbundle(self._repo, b)
195 195 raise
196 196 except error.PushRaced as exc:
197 197 raise error.ResponseError(_('push failed:'), str(exc))
198 198
199 199 def lock(self):
200 200 return self._repo.lock()
201 201
202 202 def addchangegroup(self, cg, source, url):
203 203 return cg.apply(self._repo, source, url)
204 204
205 205 def pushkey(self, namespace, key, old, new):
206 206 return self._repo.pushkey(namespace, key, old, new)
207 207
208 208 def listkeys(self, namespace):
209 209 return self._repo.listkeys(namespace)
210 210
211 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 212 '''used to test argument passing over the wire'''
213 213 return "%s %s %s %s %s" % (one, two, three, four, five)
214 214
215 215 class locallegacypeer(localpeer):
216 216 '''peer extension which implements legacy methods too; used for tests with
217 217 restricted capabilities'''
218 218
219 219 def __init__(self, repo):
220 220 localpeer.__init__(self, repo, caps=legacycaps)
221 221
222 222 def branches(self, nodes):
223 223 return self._repo.branches(nodes)
224 224
225 225 def between(self, pairs):
226 226 return self._repo.between(pairs)
227 227
228 228 def changegroup(self, basenodes, source):
229 229 return changegroup.changegroup(self._repo, basenodes, source)
230 230
231 231 def changegroupsubset(self, bases, heads, source):
232 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233 233
234 234 class localrepository(object):
235 235
236 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 237 'manifestv2'))
238 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 239 'dotencode'))
240 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 241 filtername = None
242 242
243 243 # a list of (ui, featureset) functions.
244 244 # only functions defined in module of enabled extensions are invoked
245 245 featuresetupfuncs = set()
246 246
247 247 def __init__(self, baseui, path=None, create=False):
248 248 self.requirements = set()
249 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 250 self.wopener = self.wvfs
251 251 self.root = self.wvfs.base
252 252 self.path = self.wvfs.join(".hg")
253 253 self.origroot = path
254 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 256 realfs=False)
257 257 self.vfs = scmutil.vfs(self.path)
258 258 self.opener = self.vfs
259 259 self.baseui = baseui
260 260 self.ui = baseui.copy()
261 261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 262 # A list of callback to shape the phase if no data were found.
263 263 # Callback are in the form: func(repo, roots) --> processed root.
264 264 # This list it to be filled by extension during repo setup
265 265 self._phasedefaults = []
266 266 try:
267 267 self.ui.readconfig(self.join("hgrc"), self.root)
268 268 extensions.loadall(self.ui)
269 269 except IOError:
270 270 pass
271 271
272 272 if self.featuresetupfuncs:
273 273 self.supported = set(self._basesupported) # use private copy
274 274 extmods = set(m.__name__ for n, m
275 275 in extensions.extensions(self.ui))
276 276 for setupfunc in self.featuresetupfuncs:
277 277 if setupfunc.__module__ in extmods:
278 278 setupfunc(self.ui, self.supported)
279 279 else:
280 280 self.supported = self._basesupported
281 281
282 282 if not self.vfs.isdir():
283 283 if create:
284 284 self.requirements = newreporequirements(self)
285 285
286 286 if not self.wvfs.exists():
287 287 self.wvfs.makedirs()
288 288 self.vfs.makedir(notindexed=True)
289 289
290 290 if 'store' in self.requirements:
291 291 self.vfs.mkdir("store")
292 292
293 293 # create an invalid changelog
294 294 self.vfs.append(
295 295 "00changelog.i",
296 296 '\0\0\0\2' # represents revlogv2
297 297 ' dummy changelog to prevent using the old repo layout'
298 298 )
299 299 else:
300 300 raise error.RepoError(_("repository %s not found") % path)
301 301 elif create:
302 302 raise error.RepoError(_("repository %s already exists") % path)
303 303 else:
304 304 try:
305 305 self.requirements = scmutil.readrequires(
306 306 self.vfs, self.supported)
307 307 except IOError as inst:
308 308 if inst.errno != errno.ENOENT:
309 309 raise
310 310
311 311 self.sharedpath = self.path
312 312 try:
313 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 314 realpath=True)
315 315 s = vfs.base
316 316 if not vfs.exists():
317 317 raise error.RepoError(
318 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 319 self.sharedpath = s
320 320 except IOError as inst:
321 321 if inst.errno != errno.ENOENT:
322 322 raise
323 323
324 324 self.store = store.store(
325 325 self.requirements, self.sharedpath, scmutil.vfs)
326 326 self.spath = self.store.path
327 327 self.svfs = self.store.vfs
328 328 self.sjoin = self.store.join
329 329 self.vfs.createmode = self.store.createmode
330 330 self._applyopenerreqs()
331 331 if create:
332 332 self._writerequirements()
333 333
334 334 self._dirstatevalidatewarned = False
335 335
336 336 self._branchcaches = {}
337 337 self._revbranchcache = None
338 338 self.filterpats = {}
339 339 self._datafilters = {}
340 340 self._transref = self._lockref = self._wlockref = None
341 341
342 342 # A cache for various files under .hg/ that tracks file changes,
343 343 # (used by the filecache decorator)
344 344 #
345 345 # Maps a property name to its util.filecacheentry
346 346 self._filecache = {}
347 347
348 348 # hold sets of revision to be filtered
349 349 # should be cleared when something might have changed the filter value:
350 350 # - new changesets,
351 351 # - phase change,
352 352 # - new obsolescence marker,
353 353 # - working directory parent change,
354 354 # - bookmark changes
355 355 self.filteredrevcache = {}
356 356
357 357 # generic mapping between names and nodes
358 358 self.names = namespaces.namespaces()
359 359
360 360 def close(self):
361 361 self._writecaches()
362 362
363 363 def _writecaches(self):
364 364 if self._revbranchcache:
365 365 self._revbranchcache.write()
366 366
367 367 def _restrictcapabilities(self, caps):
368 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 369 caps = set(caps)
370 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 372 return caps
373 373
374 374 def _applyopenerreqs(self):
375 375 self.svfs.options = dict((r, 1) for r in self.requirements
376 376 if r in self.openerreqs)
377 377 # experimental config: format.chunkcachesize
378 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 379 if chunkcachesize is not None:
380 380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 381 # experimental config: format.maxchainlen
382 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 383 if maxchainlen is not None:
384 384 self.svfs.options['maxchainlen'] = maxchainlen
385 385 # experimental config: format.manifestcachesize
386 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 387 if manifestcachesize is not None:
388 388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 389 # experimental config: format.aggressivemergedeltas
390 390 aggressivemergedeltas = self.ui.configbool('format',
391 391 'aggressivemergedeltas', False)
392 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394 394
395 395 def _writerequirements(self):
396 396 scmutil.writerequires(self.vfs, self.requirements)
397 397
398 398 def _checknested(self, path):
399 399 """Determine if path is a legal nested repository."""
400 400 if not path.startswith(self.root):
401 401 return False
402 402 subpath = path[len(self.root) + 1:]
403 403 normsubpath = util.pconvert(subpath)
404 404
405 405 # XXX: Checking against the current working copy is wrong in
406 406 # the sense that it can reject things like
407 407 #
408 408 # $ hg cat -r 10 sub/x.txt
409 409 #
410 410 # if sub/ is no longer a subrepository in the working copy
411 411 # parent revision.
412 412 #
413 413 # However, it can of course also allow things that would have
414 414 # been rejected before, such as the above cat command if sub/
415 415 # is a subrepository now, but was a normal directory before.
416 416 # The old path auditor would have rejected by mistake since it
417 417 # panics when it sees sub/.hg/.
418 418 #
419 419 # All in all, checking against the working copy seems sensible
420 420 # since we want to prevent access to nested repositories on
421 421 # the filesystem *now*.
422 422 ctx = self[None]
423 423 parts = util.splitpath(subpath)
424 424 while parts:
425 425 prefix = '/'.join(parts)
426 426 if prefix in ctx.substate:
427 427 if prefix == normsubpath:
428 428 return True
429 429 else:
430 430 sub = ctx.sub(prefix)
431 431 return sub.checknested(subpath[len(prefix) + 1:])
432 432 else:
433 433 parts.pop()
434 434 return False
435 435
436 436 def peer(self):
437 437 return localpeer(self) # not cached to avoid reference cycle
438 438
439 439 def unfiltered(self):
440 440 """Return unfiltered version of the repository
441 441
442 442 Intended to be overwritten by filtered repo."""
443 443 return self
444 444
445 445 def filtered(self, name):
446 446 """Return a filtered version of a repository"""
447 447 # build a new class with the mixin and the current class
448 448 # (possibly subclass of the repo)
449 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 450 pass
451 451 return proxycls(self, name)
452 452
453 453 @repofilecache('bookmarks', 'bookmarks.current')
454 454 def _bookmarks(self):
455 455 return bookmarks.bmstore(self)
456 456
457 457 @property
458 458 def _activebookmark(self):
459 459 return self._bookmarks.active
460 460
461 461 def bookmarkheads(self, bookmark):
462 462 name = bookmark.split('@', 1)[0]
463 463 heads = []
464 464 for mark, n in self._bookmarks.iteritems():
465 465 if mark.split('@', 1)[0] == name:
466 466 heads.append(n)
467 467 return heads
468 468
469 469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 471 # can't be easily expressed in filecache mechanism.
472 472 @storecache('phaseroots', '00changelog.i')
473 473 def _phasecache(self):
474 474 return phases.phasecache(self, self._phasedefaults)
475 475
476 476 @storecache('obsstore')
477 477 def obsstore(self):
478 478 # read default format for new obsstore.
479 479 # developer config: format.obsstore-version
480 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 481 # rely on obsstore class default when possible.
482 482 kwargs = {}
483 483 if defaultformat is not None:
484 484 kwargs['defaultformat'] = defaultformat
485 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 487 **kwargs)
488 488 if store and readonly:
489 489 self.ui.warn(
490 490 _('obsolete feature not enabled but %i markers found!\n')
491 491 % len(list(store)))
492 492 return store
493 493
494 494 @storecache('00changelog.i')
495 495 def changelog(self):
496 496 c = changelog.changelog(self.svfs)
497 497 if 'HG_PENDING' in os.environ:
498 498 p = os.environ['HG_PENDING']
499 499 if p.startswith(self.root):
500 500 c.readpending('00changelog.i.a')
501 501 return c
502 502
503 503 @storecache('00manifest.i')
504 504 def manifest(self):
505 505 return manifest.manifest(self.svfs)
506 506
507 507 @property
508 508 def manifestlog(self):
509 509 return manifest.manifestlog(self.svfs, self)
510 510
511 511 @repofilecache('dirstate')
512 512 def dirstate(self):
513 513 return dirstate.dirstate(self.vfs, self.ui, self.root,
514 514 self._dirstatevalidate)
515 515
516 516 def _dirstatevalidate(self, node):
517 517 try:
518 518 self.changelog.rev(node)
519 519 return node
520 520 except error.LookupError:
521 521 if not self._dirstatevalidatewarned:
522 522 self._dirstatevalidatewarned = True
523 523 self.ui.warn(_("warning: ignoring unknown"
524 524 " working parent %s!\n") % short(node))
525 525 return nullid
526 526
527 527 def __getitem__(self, changeid):
528 528 if changeid is None or changeid == wdirrev:
529 529 return context.workingctx(self)
530 530 if isinstance(changeid, slice):
531 531 return [context.changectx(self, i)
532 532 for i in xrange(*changeid.indices(len(self)))
533 533 if i not in self.changelog.filteredrevs]
534 534 return context.changectx(self, changeid)
535 535
536 536 def __contains__(self, changeid):
537 537 try:
538 538 self[changeid]
539 539 return True
540 540 except error.RepoLookupError:
541 541 return False
542 542
543 543 def __nonzero__(self):
544 544 return True
545 545
546 546 def __len__(self):
547 547 return len(self.changelog)
548 548
549 549 def __iter__(self):
550 550 return iter(self.changelog)
551 551
552 552 def revs(self, expr, *args):
553 553 '''Find revisions matching a revset.
554 554
555 555 The revset is specified as a string ``expr`` that may contain
556 556 %-formatting to escape certain types. See ``revset.formatspec``.
557 557
558 558 Revset aliases from the configuration are not expanded. To expand
559 559 user aliases, consider calling ``scmutil.revrange()``.
560 560
561 561 Returns a revset.abstractsmartset, which is a list-like interface
562 562 that contains integer revisions.
563 563 '''
564 564 expr = revset.formatspec(expr, *args)
565 565 m = revset.match(None, expr)
566 566 return m(self)
567 567
568 568 def set(self, expr, *args):
569 569 '''Find revisions matching a revset and emit changectx instances.
570 570
571 571 This is a convenience wrapper around ``revs()`` that iterates the
572 572 result and is a generator of changectx instances.
573 573
574 574 Revset aliases from the configuration are not expanded. To expand
575 575 user aliases, consider calling ``scmutil.revrange()``.
576 576 '''
577 577 for r in self.revs(expr, *args):
578 578 yield self[r]
579 579
580 580 def url(self):
581 581 return 'file:' + self.root
582 582
583 583 def hook(self, name, throw=False, **args):
584 584 """Call a hook, passing this repo instance.
585 585
586 586 This a convenience method to aid invoking hooks. Extensions likely
587 587 won't call this unless they have registered a custom hook or are
588 588 replacing code that is expected to call a hook.
589 589 """
590 590 return hook.hook(self.ui, self, name, throw, **args)
591 591
592 592 @unfilteredmethod
593 593 def _tag(self, names, node, message, local, user, date, extra=None,
594 594 editor=False):
595 595 if isinstance(names, str):
596 596 names = (names,)
597 597
598 598 branches = self.branchmap()
599 599 for name in names:
600 600 self.hook('pretag', throw=True, node=hex(node), tag=name,
601 601 local=local)
602 602 if name in branches:
603 603 self.ui.warn(_("warning: tag %s conflicts with existing"
604 604 " branch name\n") % name)
605 605
606 606 def writetags(fp, names, munge, prevtags):
607 607 fp.seek(0, 2)
608 608 if prevtags and prevtags[-1] != '\n':
609 609 fp.write('\n')
610 610 for name in names:
611 611 if munge:
612 612 m = munge(name)
613 613 else:
614 614 m = name
615 615
616 616 if (self._tagscache.tagtypes and
617 617 name in self._tagscache.tagtypes):
618 618 old = self.tags().get(name, nullid)
619 619 fp.write('%s %s\n' % (hex(old), m))
620 620 fp.write('%s %s\n' % (hex(node), m))
621 621 fp.close()
622 622
623 623 prevtags = ''
624 624 if local:
625 625 try:
626 626 fp = self.vfs('localtags', 'r+')
627 627 except IOError:
628 628 fp = self.vfs('localtags', 'a')
629 629 else:
630 630 prevtags = fp.read()
631 631
632 632 # local tags are stored in the current charset
633 633 writetags(fp, names, None, prevtags)
634 634 for name in names:
635 635 self.hook('tag', node=hex(node), tag=name, local=local)
636 636 return
637 637
638 638 try:
639 639 fp = self.wfile('.hgtags', 'rb+')
640 640 except IOError as e:
641 641 if e.errno != errno.ENOENT:
642 642 raise
643 643 fp = self.wfile('.hgtags', 'ab')
644 644 else:
645 645 prevtags = fp.read()
646 646
647 647 # committed tags are stored in UTF-8
648 648 writetags(fp, names, encoding.fromlocal, prevtags)
649 649
650 650 fp.close()
651 651
652 652 self.invalidatecaches()
653 653
654 654 if '.hgtags' not in self.dirstate:
655 655 self[None].add(['.hgtags'])
656 656
657 657 m = matchmod.exact(self.root, '', ['.hgtags'])
658 658 tagnode = self.commit(message, user, date, extra=extra, match=m,
659 659 editor=editor)
660 660
661 661 for name in names:
662 662 self.hook('tag', node=hex(node), tag=name, local=local)
663 663
664 664 return tagnode
665 665
666 666 def tag(self, names, node, message, local, user, date, editor=False):
667 667 '''tag a revision with one or more symbolic names.
668 668
669 669 names is a list of strings or, when adding a single tag, names may be a
670 670 string.
671 671
672 672 if local is True, the tags are stored in a per-repository file.
673 673 otherwise, they are stored in the .hgtags file, and a new
674 674 changeset is committed with the change.
675 675
676 676 keyword arguments:
677 677
678 678 local: whether to store tags in non-version-controlled file
679 679 (default False)
680 680
681 681 message: commit message to use if committing
682 682
683 683 user: name of user to use if committing
684 684
685 685 date: date tuple to use if committing'''
686 686
687 687 if not local:
688 688 m = matchmod.exact(self.root, '', ['.hgtags'])
689 689 if any(self.status(match=m, unknown=True, ignored=True)):
690 690 raise error.Abort(_('working copy of .hgtags is changed'),
691 691 hint=_('please commit .hgtags manually'))
692 692
693 693 self.tags() # instantiate the cache
694 694 self._tag(names, node, message, local, user, date, editor=editor)
695 695
696 696 @filteredpropertycache
697 697 def _tagscache(self):
698 698 '''Returns a tagscache object that contains various tags related
699 699 caches.'''
700 700
701 701 # This simplifies its cache management by having one decorated
702 702 # function (this one) and the rest simply fetch things from it.
703 703 class tagscache(object):
704 704 def __init__(self):
705 705 # These two define the set of tags for this repository. tags
706 706 # maps tag name to node; tagtypes maps tag name to 'global' or
707 707 # 'local'. (Global tags are defined by .hgtags across all
708 708 # heads, and local tags are defined in .hg/localtags.)
709 709 # They constitute the in-memory cache of tags.
710 710 self.tags = self.tagtypes = None
711 711
712 712 self.nodetagscache = self.tagslist = None
713 713
714 714 cache = tagscache()
715 715 cache.tags, cache.tagtypes = self._findtags()
716 716
717 717 return cache
718 718
719 719 def tags(self):
720 720 '''return a mapping of tag to node'''
721 721 t = {}
722 722 if self.changelog.filteredrevs:
723 723 tags, tt = self._findtags()
724 724 else:
725 725 tags = self._tagscache.tags
726 726 for k, v in tags.iteritems():
727 727 try:
728 728 # ignore tags to unknown nodes
729 729 self.changelog.rev(v)
730 730 t[k] = v
731 731 except (error.LookupError, ValueError):
732 732 pass
733 733 return t
734 734
735 735 def _findtags(self):
736 736 '''Do the hard work of finding tags. Return a pair of dicts
737 737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
738 738 maps tag name to a string like \'global\' or \'local\'.
739 739 Subclasses or extensions are free to add their own tags, but
740 740 should be aware that the returned dicts will be retained for the
741 741 duration of the localrepo object.'''
742 742
743 743 # XXX what tagtype should subclasses/extensions use? Currently
744 744 # mq and bookmarks add tags, but do not set the tagtype at all.
745 745 # Should each extension invent its own tag type? Should there
746 746 # be one tagtype for all such "virtual" tags? Or is the status
747 747 # quo fine?
748 748
749 749 alltags = {} # map tag name to (node, hist)
750 750 tagtypes = {}
751 751
752 752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
753 753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
754 754
755 755 # Build the return dicts. Have to re-encode tag names because
756 756 # the tags module always uses UTF-8 (in order not to lose info
757 757 # writing to the cache), but the rest of Mercurial wants them in
758 758 # local encoding.
759 759 tags = {}
760 760 for (name, (node, hist)) in alltags.iteritems():
761 761 if node != nullid:
762 762 tags[encoding.tolocal(name)] = node
763 763 tags['tip'] = self.changelog.tip()
764 764 tagtypes = dict([(encoding.tolocal(name), value)
765 765 for (name, value) in tagtypes.iteritems()])
766 766 return (tags, tagtypes)
767 767
768 768 def tagtype(self, tagname):
769 769 '''
770 770 return the type of the given tag. result can be:
771 771
772 772 'local' : a local tag
773 773 'global' : a global tag
774 774 None : tag does not exist
775 775 '''
776 776
777 777 return self._tagscache.tagtypes.get(tagname)
778 778
779 779 def tagslist(self):
780 780 '''return a list of tags ordered by revision'''
781 781 if not self._tagscache.tagslist:
782 782 l = []
783 783 for t, n in self.tags().iteritems():
784 784 l.append((self.changelog.rev(n), t, n))
785 785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
786 786
787 787 return self._tagscache.tagslist
788 788
789 789 def nodetags(self, node):
790 790 '''return the tags associated with a node'''
791 791 if not self._tagscache.nodetagscache:
792 792 nodetagscache = {}
793 793 for t, n in self._tagscache.tags.iteritems():
794 794 nodetagscache.setdefault(n, []).append(t)
795 795 for tags in nodetagscache.itervalues():
796 796 tags.sort()
797 797 self._tagscache.nodetagscache = nodetagscache
798 798 return self._tagscache.nodetagscache.get(node, [])
799 799
800 800 def nodebookmarks(self, node):
801 801 """return the list of bookmarks pointing to the specified node"""
802 802 marks = []
803 803 for bookmark, n in self._bookmarks.iteritems():
804 804 if n == node:
805 805 marks.append(bookmark)
806 806 return sorted(marks)
807 807
808 808 def branchmap(self):
809 809 '''returns a dictionary {branch: [branchheads]} with branchheads
810 810 ordered by increasing revision number'''
811 811 branchmap.updatecache(self)
812 812 return self._branchcaches[self.filtername]
813 813
814 814 @unfilteredmethod
815 815 def revbranchcache(self):
816 816 if not self._revbranchcache:
817 817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
818 818 return self._revbranchcache
819 819
820 820 def branchtip(self, branch, ignoremissing=False):
821 821 '''return the tip node for a given branch
822 822
823 823 If ignoremissing is True, then this method will not raise an error.
824 824 This is helpful for callers that only expect None for a missing branch
825 825 (e.g. namespace).
826 826
827 827 '''
828 828 try:
829 829 return self.branchmap().branchtip(branch)
830 830 except KeyError:
831 831 if not ignoremissing:
832 832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
833 833 else:
834 834 pass
835 835
836 836 def lookup(self, key):
837 837 return self[key].node()
838 838
839 839 def lookupbranch(self, key, remote=None):
840 840 repo = remote or self
841 841 if key in repo.branchmap():
842 842 return key
843 843
844 844 repo = (remote and remote.local()) and remote or self
845 845 return repo[key].branch()
846 846
847 847 def known(self, nodes):
848 848 cl = self.changelog
849 849 nm = cl.nodemap
850 850 filtered = cl.filteredrevs
851 851 result = []
852 852 for n in nodes:
853 853 r = nm.get(n)
854 854 resp = not (r is None or r in filtered)
855 855 result.append(resp)
856 856 return result
857 857
858 858 def local(self):
859 859 return self
860 860
861 861 def publishing(self):
862 862 # it's safe (and desirable) to trust the publish flag unconditionally
863 863 # so that we don't finalize changes shared between users via ssh or nfs
864 864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
865 865
866 866 def cancopy(self):
867 867 # so statichttprepo's override of local() works
868 868 if not self.local():
869 869 return False
870 870 if not self.publishing():
871 871 return True
872 872 # if publishing we can't copy if there is filtered content
873 873 return not self.filtered('visible').changelog.filteredrevs
874 874
875 875 def shared(self):
876 876 '''the type of shared repository (None if not shared)'''
877 877 if self.sharedpath != self.path:
878 878 return 'store'
879 879 return None
880 880
881 881 def join(self, f, *insidef):
882 882 return self.vfs.join(os.path.join(f, *insidef))
883 883
884 884 def wjoin(self, f, *insidef):
885 885 return self.vfs.reljoin(self.root, f, *insidef)
886 886
887 887 def file(self, f):
888 888 if f[0] == '/':
889 889 f = f[1:]
890 890 return filelog.filelog(self.svfs, f)
891 891
892 892 def changectx(self, changeid):
893 893 return self[changeid]
894 894
895 895 def setparents(self, p1, p2=nullid):
896 896 self.dirstate.beginparentchange()
897 897 copies = self.dirstate.setparents(p1, p2)
898 898 pctx = self[p1]
899 899 if copies:
900 900 # Adjust copy records, the dirstate cannot do it, it
901 901 # requires access to parents manifests. Preserve them
902 902 # only for entries added to first parent.
903 903 for f in copies:
904 904 if f not in pctx and copies[f] in pctx:
905 905 self.dirstate.copy(copies[f], f)
906 906 if p2 == nullid:
907 907 for f, s in sorted(self.dirstate.copies().items()):
908 908 if f not in pctx and s not in pctx:
909 909 self.dirstate.copy(None, f)
910 910 self.dirstate.endparentchange()
911 911
912 912 def filectx(self, path, changeid=None, fileid=None):
913 913 """changeid can be a changeset revision, node, or tag.
914 914 fileid can be a file revision or node."""
915 915 return context.filectx(self, path, changeid, fileid)
916 916
917 917 def getcwd(self):
918 918 return self.dirstate.getcwd()
919 919
920 920 def pathto(self, f, cwd=None):
921 921 return self.dirstate.pathto(f, cwd)
922 922
923 923 def wfile(self, f, mode='r'):
924 924 return self.wvfs(f, mode)
925 925
926 926 def _link(self, f):
927 927 return self.wvfs.islink(f)
928 928
929 929 def _loadfilter(self, filter):
930 930 if filter not in self.filterpats:
931 931 l = []
932 932 for pat, cmd in self.ui.configitems(filter):
933 933 if cmd == '!':
934 934 continue
935 935 mf = matchmod.match(self.root, '', [pat])
936 936 fn = None
937 937 params = cmd
938 938 for name, filterfn in self._datafilters.iteritems():
939 939 if cmd.startswith(name):
940 940 fn = filterfn
941 941 params = cmd[len(name):].lstrip()
942 942 break
943 943 if not fn:
944 944 fn = lambda s, c, **kwargs: util.filter(s, c)
945 945 # Wrap old filters not supporting keyword arguments
946 946 if not inspect.getargspec(fn)[2]:
947 947 oldfn = fn
948 948 fn = lambda s, c, **kwargs: oldfn(s, c)
949 949 l.append((mf, fn, params))
950 950 self.filterpats[filter] = l
951 951 return self.filterpats[filter]
952 952
953 953 def _filter(self, filterpats, filename, data):
954 954 for mf, fn, cmd in filterpats:
955 955 if mf(filename):
956 956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 958 break
959 959
960 960 return data
961 961
962 962 @unfilteredpropertycache
963 963 def _encodefilterpats(self):
964 964 return self._loadfilter('encode')
965 965
966 966 @unfilteredpropertycache
967 967 def _decodefilterpats(self):
968 968 return self._loadfilter('decode')
969 969
970 970 def adddatafilter(self, name, filter):
971 971 self._datafilters[name] = filter
972 972
973 973 def wread(self, filename):
974 974 if self._link(filename):
975 975 data = self.wvfs.readlink(filename)
976 976 else:
977 977 data = self.wvfs.read(filename)
978 978 return self._filter(self._encodefilterpats, filename, data)
979 979
980 980 def wwrite(self, filename, data, flags, backgroundclose=False):
981 981 """write ``data`` into ``filename`` in the working directory
982 982
983 983 This returns length of written (maybe decoded) data.
984 984 """
985 985 data = self._filter(self._decodefilterpats, filename, data)
986 986 if 'l' in flags:
987 987 self.wvfs.symlink(data, filename)
988 988 else:
989 989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 990 if 'x' in flags:
991 991 self.wvfs.setflags(filename, False, True)
992 992 return len(data)
993 993
994 994 def wwritedata(self, filename, data):
995 995 return self._filter(self._decodefilterpats, filename, data)
996 996
997 997 def currenttransaction(self):
998 998 """return the current transaction or None if non exists"""
999 999 if self._transref:
1000 1000 tr = self._transref()
1001 1001 else:
1002 1002 tr = None
1003 1003
1004 1004 if tr and tr.running():
1005 1005 return tr
1006 1006 return None
1007 1007
1008 1008 def transaction(self, desc, report=None):
1009 1009 if (self.ui.configbool('devel', 'all-warnings')
1010 1010 or self.ui.configbool('devel', 'check-locks')):
1011 1011 if self._currentlock(self._lockref) is None:
1012 1012 raise RuntimeError('programming error: transaction requires '
1013 1013 'locking')
1014 1014 tr = self.currenttransaction()
1015 1015 if tr is not None:
1016 1016 return tr.nest()
1017 1017
1018 1018 # abort here if the journal already exists
1019 1019 if self.svfs.exists("journal"):
1020 1020 raise error.RepoError(
1021 1021 _("abandoned transaction found"),
1022 1022 hint=_("run 'hg recover' to clean up transaction"))
1023 1023
1024 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1026 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027 1027
1028 1028 self._writejournal(desc)
1029 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 1030 if report:
1031 1031 rp = report
1032 1032 else:
1033 1033 rp = self.ui.warn
1034 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 1035 # we must avoid cyclic reference between repo and transaction.
1036 1036 reporef = weakref.ref(self)
1037 1037 def validate(tr):
1038 1038 """will run pre-closing hooks"""
1039 1039 reporef().hook('pretxnclose', throw=True,
1040 1040 txnname=desc, **tr.hookargs)
1041 1041 def releasefn(tr, success):
1042 1042 repo = reporef()
1043 1043 if success:
1044 1044 # this should be explicitly invoked here, because
1045 1045 # in-memory changes aren't written out at closing
1046 1046 # transaction, if tr.addfilegenerator (via
1047 1047 # dirstate.write or so) isn't invoked while
1048 1048 # transaction running
1049 1049 repo.dirstate.write(None)
1050 1050 else:
1051 1051 # discard all changes (including ones already written
1052 1052 # out) in this transaction
1053 1053 repo.dirstate.restorebackup(None, prefix='journal.')
1054 1054
1055 1055 repo.invalidate(clearfilecache=True)
1056 1056
1057 1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1058 1058 "journal",
1059 1059 "undo",
1060 1060 aftertrans(renames),
1061 1061 self.store.createmode,
1062 1062 validator=validate,
1063 1063 releasefn=releasefn)
1064 1064
1065 1065 tr.hookargs['txnid'] = txnid
1066 1066 # note: writing the fncache only during finalize mean that the file is
1067 1067 # outdated when running hooks. As fncache is used for streaming clone,
1068 1068 # this is not expected to break anything that happen during the hooks.
1069 1069 tr.addfinalize('flush-fncache', self.store.write)
1070 1070 def txnclosehook(tr2):
1071 1071 """To be run if transaction is successful, will schedule a hook run
1072 1072 """
1073 1073 # Don't reference tr2 in hook() so we don't hold a reference.
1074 1074 # This reduces memory consumption when there are multiple
1075 1075 # transactions per lock. This can likely go away if issue5045
1076 1076 # fixes the function accumulation.
1077 1077 hookargs = tr2.hookargs
1078 1078
1079 1079 def hook():
1080 1080 reporef().hook('txnclose', throw=False, txnname=desc,
1081 1081 **hookargs)
1082 1082 reporef()._afterlock(hook)
1083 1083 tr.addfinalize('txnclose-hook', txnclosehook)
1084 1084 def txnaborthook(tr2):
1085 1085 """To be run if transaction is aborted
1086 1086 """
1087 1087 reporef().hook('txnabort', throw=False, txnname=desc,
1088 1088 **tr2.hookargs)
1089 1089 tr.addabort('txnabort-hook', txnaborthook)
1090 1090 # avoid eager cache invalidation. in-memory data should be identical
1091 1091 # to stored data if transaction has no error.
1092 1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1093 1093 self._transref = weakref.ref(tr)
1094 1094 return tr
1095 1095
1096 1096 def _journalfiles(self):
1097 1097 return ((self.svfs, 'journal'),
1098 1098 (self.vfs, 'journal.dirstate'),
1099 1099 (self.vfs, 'journal.branch'),
1100 1100 (self.vfs, 'journal.desc'),
1101 1101 (self.vfs, 'journal.bookmarks'),
1102 1102 (self.svfs, 'journal.phaseroots'))
1103 1103
1104 1104 def undofiles(self):
1105 1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1106 1106
1107 1107 def _writejournal(self, desc):
1108 1108 self.dirstate.savebackup(None, prefix='journal.')
1109 1109 self.vfs.write("journal.branch",
1110 1110 encoding.fromlocal(self.dirstate.branch()))
1111 1111 self.vfs.write("journal.desc",
1112 1112 "%d\n%s\n" % (len(self), desc))
1113 1113 self.vfs.write("journal.bookmarks",
1114 1114 self.vfs.tryread("bookmarks"))
1115 1115 self.svfs.write("journal.phaseroots",
1116 1116 self.svfs.tryread("phaseroots"))
1117 1117
1118 1118 def recover(self):
1119 1119 with self.lock():
1120 1120 if self.svfs.exists("journal"):
1121 1121 self.ui.status(_("rolling back interrupted transaction\n"))
1122 1122 vfsmap = {'': self.svfs,
1123 1123 'plain': self.vfs,}
1124 1124 transaction.rollback(self.svfs, vfsmap, "journal",
1125 1125 self.ui.warn)
1126 1126 self.invalidate()
1127 1127 return True
1128 1128 else:
1129 1129 self.ui.warn(_("no interrupted transaction available\n"))
1130 1130 return False
1131 1131
1132 1132 def rollback(self, dryrun=False, force=False):
1133 1133 wlock = lock = dsguard = None
1134 1134 try:
1135 1135 wlock = self.wlock()
1136 1136 lock = self.lock()
1137 1137 if self.svfs.exists("undo"):
1138 1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139 1139
1140 1140 return self._rollback(dryrun, force, dsguard)
1141 1141 else:
1142 1142 self.ui.warn(_("no rollback information available\n"))
1143 1143 return 1
1144 1144 finally:
1145 1145 release(dsguard, lock, wlock)
1146 1146
1147 1147 @unfilteredmethod # Until we get smarter cache management
1148 1148 def _rollback(self, dryrun, force, dsguard):
1149 1149 ui = self.ui
1150 1150 try:
1151 1151 args = self.vfs.read('undo.desc').splitlines()
1152 1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 1153 if len(args) >= 3:
1154 1154 detail = args[2]
1155 1155 oldtip = oldlen - 1
1156 1156
1157 1157 if detail and ui.verbose:
1158 1158 msg = (_('repository tip rolled back to revision %s'
1159 1159 ' (undo %s: %s)\n')
1160 1160 % (oldtip, desc, detail))
1161 1161 else:
1162 1162 msg = (_('repository tip rolled back to revision %s'
1163 1163 ' (undo %s)\n')
1164 1164 % (oldtip, desc))
1165 1165 except IOError:
1166 1166 msg = _('rolling back unknown transaction\n')
1167 1167 desc = None
1168 1168
1169 1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 1170 raise error.Abort(
1171 1171 _('rollback of last commit while not checked out '
1172 1172 'may lose data'), hint=_('use -f to force'))
1173 1173
1174 1174 ui.status(msg)
1175 1175 if dryrun:
1176 1176 return 0
1177 1177
1178 1178 parents = self.dirstate.parents()
1179 1179 self.destroying()
1180 1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 1182 if self.vfs.exists('undo.bookmarks'):
1183 1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1184 1184 if self.svfs.exists('undo.phaseroots'):
1185 1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1186 1186 self.invalidate()
1187 1187
1188 1188 parentgone = (parents[0] not in self.changelog.nodemap or
1189 1189 parents[1] not in self.changelog.nodemap)
1190 1190 if parentgone:
1191 1191 # prevent dirstateguard from overwriting already restored one
1192 1192 dsguard.close()
1193 1193
1194 1194 self.dirstate.restorebackup(None, prefix='undo.')
1195 1195 try:
1196 1196 branch = self.vfs.read('undo.branch')
1197 1197 self.dirstate.setbranch(encoding.tolocal(branch))
1198 1198 except IOError:
1199 1199 ui.warn(_('named branch could not be reset: '
1200 1200 'current branch is still \'%s\'\n')
1201 1201 % self.dirstate.branch())
1202 1202
1203 1203 parents = tuple([p.rev() for p in self[None].parents()])
1204 1204 if len(parents) > 1:
1205 1205 ui.status(_('working directory now based on '
1206 1206 'revisions %d and %d\n') % parents)
1207 1207 else:
1208 1208 ui.status(_('working directory now based on '
1209 1209 'revision %d\n') % parents)
1210 1210 mergemod.mergestate.clean(self, self['.'].node())
1211 1211
1212 1212 # TODO: if we know which new heads may result from this rollback, pass
1213 1213 # them to destroy(), which will prevent the branchhead cache from being
1214 1214 # invalidated.
1215 1215 self.destroyed()
1216 1216 return 0
1217 1217
1218 1218 def invalidatecaches(self):
1219 1219
1220 1220 if '_tagscache' in vars(self):
1221 1221 # can't use delattr on proxy
1222 1222 del self.__dict__['_tagscache']
1223 1223
1224 1224 self.unfiltered()._branchcaches.clear()
1225 1225 self.invalidatevolatilesets()
1226 1226
1227 1227 def invalidatevolatilesets(self):
1228 1228 self.filteredrevcache.clear()
1229 1229 obsolete.clearobscaches(self)
1230 1230
1231 1231 def invalidatedirstate(self):
1232 1232 '''Invalidates the dirstate, causing the next call to dirstate
1233 1233 to check if it was modified since the last time it was read,
1234 1234 rereading it if it has.
1235 1235
1236 1236 This is different to dirstate.invalidate() that it doesn't always
1237 1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1238 1238 explicitly read the dirstate again (i.e. restoring it to a previous
1239 1239 known good state).'''
1240 1240 if hasunfilteredcache(self, 'dirstate'):
1241 1241 for k in self.dirstate._filecache:
1242 1242 try:
1243 1243 delattr(self.dirstate, k)
1244 1244 except AttributeError:
1245 1245 pass
1246 1246 delattr(self.unfiltered(), 'dirstate')
1247 1247
1248 1248 def invalidate(self, clearfilecache=False):
1249 '''Invalidates both store and non-store parts other than dirstate
1250
1251 If a transaction is running, invalidation of store is omitted,
1252 because discarding in-memory changes might cause inconsistency
1253 (e.g. incomplete fncache causes unintentional failure, but
1254 redundant one doesn't).
1255 '''
1249 1256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1250 1257 for k in self._filecache.keys():
1251 1258 # dirstate is invalidated separately in invalidatedirstate()
1252 1259 if k == 'dirstate':
1253 1260 continue
1254 1261
1255 1262 if clearfilecache:
1256 1263 del self._filecache[k]
1257 1264 try:
1258 1265 delattr(unfiltered, k)
1259 1266 except AttributeError:
1260 1267 pass
1261 1268 self.invalidatecaches()
1262 self.store.invalidatecaches()
1269 if not self.currenttransaction():
1270 # TODO: Changing contents of store outside transaction
1271 # causes inconsistency. We should make in-memory store
1272 # changes detectable, and abort if changed.
1273 self.store.invalidatecaches()
1263 1274
1264 1275 def invalidateall(self):
1265 1276 '''Fully invalidates both store and non-store parts, causing the
1266 1277 subsequent operation to reread any outside changes.'''
1267 1278 # extension should hook this to invalidate its caches
1268 1279 self.invalidate()
1269 1280 self.invalidatedirstate()
1270 1281
1271 1282 def _refreshfilecachestats(self, tr):
1272 1283 """Reload stats of cached files so that they are flagged as valid"""
1273 1284 for k, ce in self._filecache.items():
1274 1285 if k == 'dirstate' or k not in self.__dict__:
1275 1286 continue
1276 1287 ce.refresh()
1277 1288
1278 1289 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1279 1290 inheritchecker=None, parentenvvar=None):
1280 1291 parentlock = None
1281 1292 # the contents of parentenvvar are used by the underlying lock to
1282 1293 # determine whether it can be inherited
1283 1294 if parentenvvar is not None:
1284 1295 parentlock = os.environ.get(parentenvvar)
1285 1296 try:
1286 1297 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1287 1298 acquirefn=acquirefn, desc=desc,
1288 1299 inheritchecker=inheritchecker,
1289 1300 parentlock=parentlock)
1290 1301 except error.LockHeld as inst:
1291 1302 if not wait:
1292 1303 raise
1293 1304 # show more details for new-style locks
1294 1305 if ':' in inst.locker:
1295 1306 host, pid = inst.locker.split(":", 1)
1296 1307 self.ui.warn(
1297 1308 _("waiting for lock on %s held by process %r "
1298 1309 "on host %r\n") % (desc, pid, host))
1299 1310 else:
1300 1311 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1301 1312 (desc, inst.locker))
1302 1313 # default to 600 seconds timeout
1303 1314 l = lockmod.lock(vfs, lockname,
1304 1315 int(self.ui.config("ui", "timeout", "600")),
1305 1316 releasefn=releasefn, acquirefn=acquirefn,
1306 1317 desc=desc)
1307 1318 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1308 1319 return l
1309 1320
1310 1321 def _afterlock(self, callback):
1311 1322 """add a callback to be run when the repository is fully unlocked
1312 1323
1313 1324 The callback will be executed when the outermost lock is released
1314 1325 (with wlock being higher level than 'lock')."""
1315 1326 for ref in (self._wlockref, self._lockref):
1316 1327 l = ref and ref()
1317 1328 if l and l.held:
1318 1329 l.postrelease.append(callback)
1319 1330 break
1320 1331 else: # no lock have been found.
1321 1332 callback()
1322 1333
1323 1334 def lock(self, wait=True):
1324 1335 '''Lock the repository store (.hg/store) and return a weak reference
1325 1336 to the lock. Use this before modifying the store (e.g. committing or
1326 1337 stripping). If you are opening a transaction, get a lock as well.)
1327 1338
1328 1339 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 1340 'wlock' first to avoid a dead-lock hazard.'''
1330 1341 l = self._currentlock(self._lockref)
1331 1342 if l is not None:
1332 1343 l.lock()
1333 1344 return l
1334 1345
1335 1346 l = self._lock(self.svfs, "lock", wait, None,
1336 1347 self.invalidate, _('repository %s') % self.origroot)
1337 1348 self._lockref = weakref.ref(l)
1338 1349 return l
1339 1350
1340 1351 def _wlockchecktransaction(self):
1341 1352 if self.currenttransaction() is not None:
1342 1353 raise error.LockInheritanceContractViolation(
1343 1354 'wlock cannot be inherited in the middle of a transaction')
1344 1355
1345 1356 def wlock(self, wait=True):
1346 1357 '''Lock the non-store parts of the repository (everything under
1347 1358 .hg except .hg/store) and return a weak reference to the lock.
1348 1359
1349 1360 Use this before modifying files in .hg.
1350 1361
1351 1362 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1352 1363 'wlock' first to avoid a dead-lock hazard.'''
1353 1364 l = self._wlockref and self._wlockref()
1354 1365 if l is not None and l.held:
1355 1366 l.lock()
1356 1367 return l
1357 1368
1358 1369 # We do not need to check for non-waiting lock acquisition. Such
1359 1370 # acquisition would not cause dead-lock as they would just fail.
1360 1371 if wait and (self.ui.configbool('devel', 'all-warnings')
1361 1372 or self.ui.configbool('devel', 'check-locks')):
1362 1373 if self._currentlock(self._lockref) is not None:
1363 1374 self.ui.develwarn('"wlock" acquired after "lock"')
1364 1375
1365 1376 def unlock():
1366 1377 if self.dirstate.pendingparentchange():
1367 1378 self.dirstate.invalidate()
1368 1379 else:
1369 1380 self.dirstate.write(None)
1370 1381
1371 1382 self._filecache['dirstate'].refresh()
1372 1383
1373 1384 l = self._lock(self.vfs, "wlock", wait, unlock,
1374 1385 self.invalidatedirstate, _('working directory of %s') %
1375 1386 self.origroot,
1376 1387 inheritchecker=self._wlockchecktransaction,
1377 1388 parentenvvar='HG_WLOCK_LOCKER')
1378 1389 self._wlockref = weakref.ref(l)
1379 1390 return l
1380 1391
1381 1392 def _currentlock(self, lockref):
1382 1393 """Returns the lock if it's held, or None if it's not."""
1383 1394 if lockref is None:
1384 1395 return None
1385 1396 l = lockref()
1386 1397 if l is None or not l.held:
1387 1398 return None
1388 1399 return l
1389 1400
1390 1401 def currentwlock(self):
1391 1402 """Returns the wlock if it's held, or None if it's not."""
1392 1403 return self._currentlock(self._wlockref)
1393 1404
1394 1405 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1395 1406 """
1396 1407 commit an individual file as part of a larger transaction
1397 1408 """
1398 1409
1399 1410 fname = fctx.path()
1400 1411 fparent1 = manifest1.get(fname, nullid)
1401 1412 fparent2 = manifest2.get(fname, nullid)
1402 1413 if isinstance(fctx, context.filectx):
1403 1414 node = fctx.filenode()
1404 1415 if node in [fparent1, fparent2]:
1405 1416 self.ui.debug('reusing %s filelog entry\n' % fname)
1406 1417 if manifest1.flags(fname) != fctx.flags():
1407 1418 changelist.append(fname)
1408 1419 return node
1409 1420
1410 1421 flog = self.file(fname)
1411 1422 meta = {}
1412 1423 copy = fctx.renamed()
1413 1424 if copy and copy[0] != fname:
1414 1425 # Mark the new revision of this file as a copy of another
1415 1426 # file. This copy data will effectively act as a parent
1416 1427 # of this new revision. If this is a merge, the first
1417 1428 # parent will be the nullid (meaning "look up the copy data")
1418 1429 # and the second one will be the other parent. For example:
1419 1430 #
1420 1431 # 0 --- 1 --- 3 rev1 changes file foo
1421 1432 # \ / rev2 renames foo to bar and changes it
1422 1433 # \- 2 -/ rev3 should have bar with all changes and
1423 1434 # should record that bar descends from
1424 1435 # bar in rev2 and foo in rev1
1425 1436 #
1426 1437 # this allows this merge to succeed:
1427 1438 #
1428 1439 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1429 1440 # \ / merging rev3 and rev4 should use bar@rev2
1430 1441 # \- 2 --- 4 as the merge base
1431 1442 #
1432 1443
1433 1444 cfname = copy[0]
1434 1445 crev = manifest1.get(cfname)
1435 1446 newfparent = fparent2
1436 1447
1437 1448 if manifest2: # branch merge
1438 1449 if fparent2 == nullid or crev is None: # copied on remote side
1439 1450 if cfname in manifest2:
1440 1451 crev = manifest2[cfname]
1441 1452 newfparent = fparent1
1442 1453
1443 1454 # Here, we used to search backwards through history to try to find
1444 1455 # where the file copy came from if the source of a copy was not in
1445 1456 # the parent directory. However, this doesn't actually make sense to
1446 1457 # do (what does a copy from something not in your working copy even
1447 1458 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1448 1459 # the user that copy information was dropped, so if they didn't
1449 1460 # expect this outcome it can be fixed, but this is the correct
1450 1461 # behavior in this circumstance.
1451 1462
1452 1463 if crev:
1453 1464 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1454 1465 meta["copy"] = cfname
1455 1466 meta["copyrev"] = hex(crev)
1456 1467 fparent1, fparent2 = nullid, newfparent
1457 1468 else:
1458 1469 self.ui.warn(_("warning: can't find ancestor for '%s' "
1459 1470 "copied from '%s'!\n") % (fname, cfname))
1460 1471
1461 1472 elif fparent1 == nullid:
1462 1473 fparent1, fparent2 = fparent2, nullid
1463 1474 elif fparent2 != nullid:
1464 1475 # is one parent an ancestor of the other?
1465 1476 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1466 1477 if fparent1 in fparentancestors:
1467 1478 fparent1, fparent2 = fparent2, nullid
1468 1479 elif fparent2 in fparentancestors:
1469 1480 fparent2 = nullid
1470 1481
1471 1482 # is the file changed?
1472 1483 text = fctx.data()
1473 1484 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1474 1485 changelist.append(fname)
1475 1486 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1476 1487 # are just the flags changed during merge?
1477 1488 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1478 1489 changelist.append(fname)
1479 1490
1480 1491 return fparent1
1481 1492
1482 1493 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1483 1494 """check for commit arguments that aren't commitable"""
1484 1495 if match.isexact() or match.prefix():
1485 1496 matched = set(status.modified + status.added + status.removed)
1486 1497
1487 1498 for f in match.files():
1488 1499 f = self.dirstate.normalize(f)
1489 1500 if f == '.' or f in matched or f in wctx.substate:
1490 1501 continue
1491 1502 if f in status.deleted:
1492 1503 fail(f, _('file not found!'))
1493 1504 if f in vdirs: # visited directory
1494 1505 d = f + '/'
1495 1506 for mf in matched:
1496 1507 if mf.startswith(d):
1497 1508 break
1498 1509 else:
1499 1510 fail(f, _("no match under directory!"))
1500 1511 elif f not in self.dirstate:
1501 1512 fail(f, _("file not tracked!"))
1502 1513
1503 1514 @unfilteredmethod
1504 1515 def commit(self, text="", user=None, date=None, match=None, force=False,
1505 1516 editor=False, extra=None):
1506 1517 """Add a new revision to current repository.
1507 1518
1508 1519 Revision information is gathered from the working directory,
1509 1520 match can be used to filter the committed files. If editor is
1510 1521 supplied, it is called to get a commit message.
1511 1522 """
1512 1523 if extra is None:
1513 1524 extra = {}
1514 1525
1515 1526 def fail(f, msg):
1516 1527 raise error.Abort('%s: %s' % (f, msg))
1517 1528
1518 1529 if not match:
1519 1530 match = matchmod.always(self.root, '')
1520 1531
1521 1532 if not force:
1522 1533 vdirs = []
1523 1534 match.explicitdir = vdirs.append
1524 1535 match.bad = fail
1525 1536
1526 1537 wlock = lock = tr = None
1527 1538 try:
1528 1539 wlock = self.wlock()
1529 1540 lock = self.lock() # for recent changelog (see issue4368)
1530 1541
1531 1542 wctx = self[None]
1532 1543 merge = len(wctx.parents()) > 1
1533 1544
1534 1545 if not force and merge and match.ispartial():
1535 1546 raise error.Abort(_('cannot partially commit a merge '
1536 1547 '(do not specify files or patterns)'))
1537 1548
1538 1549 status = self.status(match=match, clean=force)
1539 1550 if force:
1540 1551 status.modified.extend(status.clean) # mq may commit clean files
1541 1552
1542 1553 # check subrepos
1543 1554 subs = []
1544 1555 commitsubs = set()
1545 1556 newstate = wctx.substate.copy()
1546 1557 # only manage subrepos and .hgsubstate if .hgsub is present
1547 1558 if '.hgsub' in wctx:
1548 1559 # we'll decide whether to track this ourselves, thanks
1549 1560 for c in status.modified, status.added, status.removed:
1550 1561 if '.hgsubstate' in c:
1551 1562 c.remove('.hgsubstate')
1552 1563
1553 1564 # compare current state to last committed state
1554 1565 # build new substate based on last committed state
1555 1566 oldstate = wctx.p1().substate
1556 1567 for s in sorted(newstate.keys()):
1557 1568 if not match(s):
1558 1569 # ignore working copy, use old state if present
1559 1570 if s in oldstate:
1560 1571 newstate[s] = oldstate[s]
1561 1572 continue
1562 1573 if not force:
1563 1574 raise error.Abort(
1564 1575 _("commit with new subrepo %s excluded") % s)
1565 1576 dirtyreason = wctx.sub(s).dirtyreason(True)
1566 1577 if dirtyreason:
1567 1578 if not self.ui.configbool('ui', 'commitsubrepos'):
1568 1579 raise error.Abort(dirtyreason,
1569 1580 hint=_("use --subrepos for recursive commit"))
1570 1581 subs.append(s)
1571 1582 commitsubs.add(s)
1572 1583 else:
1573 1584 bs = wctx.sub(s).basestate()
1574 1585 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1575 1586 if oldstate.get(s, (None, None, None))[1] != bs:
1576 1587 subs.append(s)
1577 1588
1578 1589 # check for removed subrepos
1579 1590 for p in wctx.parents():
1580 1591 r = [s for s in p.substate if s not in newstate]
1581 1592 subs += [s for s in r if match(s)]
1582 1593 if subs:
1583 1594 if (not match('.hgsub') and
1584 1595 '.hgsub' in (wctx.modified() + wctx.added())):
1585 1596 raise error.Abort(
1586 1597 _("can't commit subrepos without .hgsub"))
1587 1598 status.modified.insert(0, '.hgsubstate')
1588 1599
1589 1600 elif '.hgsub' in status.removed:
1590 1601 # clean up .hgsubstate when .hgsub is removed
1591 1602 if ('.hgsubstate' in wctx and
1592 1603 '.hgsubstate' not in (status.modified + status.added +
1593 1604 status.removed)):
1594 1605 status.removed.insert(0, '.hgsubstate')
1595 1606
1596 1607 # make sure all explicit patterns are matched
1597 1608 if not force:
1598 1609 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1599 1610
1600 1611 cctx = context.workingcommitctx(self, status,
1601 1612 text, user, date, extra)
1602 1613
1603 1614 # internal config: ui.allowemptycommit
1604 1615 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1605 1616 or extra.get('close') or merge or cctx.files()
1606 1617 or self.ui.configbool('ui', 'allowemptycommit'))
1607 1618 if not allowemptycommit:
1608 1619 return None
1609 1620
1610 1621 if merge and cctx.deleted():
1611 1622 raise error.Abort(_("cannot commit merge with missing files"))
1612 1623
1613 1624 ms = mergemod.mergestate.read(self)
1614 1625
1615 1626 if list(ms.unresolved()):
1616 1627 raise error.Abort(_('unresolved merge conflicts '
1617 1628 '(see "hg help resolve")'))
1618 1629 if ms.mdstate() != 's' or list(ms.driverresolved()):
1619 1630 raise error.Abort(_('driver-resolved merge conflicts'),
1620 1631 hint=_('run "hg resolve --all" to resolve'))
1621 1632
1622 1633 if editor:
1623 1634 cctx._text = editor(self, cctx, subs)
1624 1635 edited = (text != cctx._text)
1625 1636
1626 1637 # Save commit message in case this transaction gets rolled back
1627 1638 # (e.g. by a pretxncommit hook). Leave the content alone on
1628 1639 # the assumption that the user will use the same editor again.
1629 1640 msgfn = self.savecommitmessage(cctx._text)
1630 1641
1631 1642 # commit subs and write new state
1632 1643 if subs:
1633 1644 for s in sorted(commitsubs):
1634 1645 sub = wctx.sub(s)
1635 1646 self.ui.status(_('committing subrepository %s\n') %
1636 1647 subrepo.subrelpath(sub))
1637 1648 sr = sub.commit(cctx._text, user, date)
1638 1649 newstate[s] = (newstate[s][0], sr)
1639 1650 subrepo.writestate(self, newstate)
1640 1651
1641 1652 p1, p2 = self.dirstate.parents()
1642 1653 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1643 1654 try:
1644 1655 self.hook("precommit", throw=True, parent1=hookp1,
1645 1656 parent2=hookp2)
1646 1657 tr = self.transaction('commit')
1647 1658 ret = self.commitctx(cctx, True)
1648 1659 except: # re-raises
1649 1660 if edited:
1650 1661 self.ui.write(
1651 1662 _('note: commit message saved in %s\n') % msgfn)
1652 1663 raise
1653 1664 # update bookmarks, dirstate and mergestate
1654 1665 bookmarks.update(self, [p1, p2], ret)
1655 1666 cctx.markcommitted(ret)
1656 1667 ms.reset()
1657 1668 tr.close()
1658 1669
1659 1670 finally:
1660 1671 lockmod.release(tr, lock, wlock)
1661 1672
1662 1673 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1663 1674 # hack for command that use a temporary commit (eg: histedit)
1664 1675 # temporary commit got stripped before hook release
1665 1676 if self.changelog.hasnode(ret):
1666 1677 self.hook("commit", node=node, parent1=parent1,
1667 1678 parent2=parent2)
1668 1679 self._afterlock(commithook)
1669 1680 return ret
1670 1681
1671 1682 @unfilteredmethod
1672 1683 def commitctx(self, ctx, error=False):
1673 1684 """Add a new revision to current repository.
1674 1685 Revision information is passed via the context argument.
1675 1686 """
1676 1687
1677 1688 tr = None
1678 1689 p1, p2 = ctx.p1(), ctx.p2()
1679 1690 user = ctx.user()
1680 1691
1681 1692 lock = self.lock()
1682 1693 try:
1683 1694 tr = self.transaction("commit")
1684 1695 trp = weakref.proxy(tr)
1685 1696
1686 1697 if ctx.files():
1687 1698 m1 = p1.manifest()
1688 1699 m2 = p2.manifest()
1689 1700 m = m1.copy()
1690 1701
1691 1702 # check in files
1692 1703 added = []
1693 1704 changed = []
1694 1705 removed = list(ctx.removed())
1695 1706 linkrev = len(self)
1696 1707 self.ui.note(_("committing files:\n"))
1697 1708 for f in sorted(ctx.modified() + ctx.added()):
1698 1709 self.ui.note(f + "\n")
1699 1710 try:
1700 1711 fctx = ctx[f]
1701 1712 if fctx is None:
1702 1713 removed.append(f)
1703 1714 else:
1704 1715 added.append(f)
1705 1716 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1706 1717 trp, changed)
1707 1718 m.setflag(f, fctx.flags())
1708 1719 except OSError as inst:
1709 1720 self.ui.warn(_("trouble committing %s!\n") % f)
1710 1721 raise
1711 1722 except IOError as inst:
1712 1723 errcode = getattr(inst, 'errno', errno.ENOENT)
1713 1724 if error or errcode and errcode != errno.ENOENT:
1714 1725 self.ui.warn(_("trouble committing %s!\n") % f)
1715 1726 raise
1716 1727
1717 1728 # update manifest
1718 1729 self.ui.note(_("committing manifest\n"))
1719 1730 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1720 1731 drop = [f for f in removed if f in m]
1721 1732 for f in drop:
1722 1733 del m[f]
1723 1734 mn = self.manifest.add(m, trp, linkrev,
1724 1735 p1.manifestnode(), p2.manifestnode(),
1725 1736 added, drop)
1726 1737 files = changed + removed
1727 1738 else:
1728 1739 mn = p1.manifestnode()
1729 1740 files = []
1730 1741
1731 1742 # update changelog
1732 1743 self.ui.note(_("committing changelog\n"))
1733 1744 self.changelog.delayupdate(tr)
1734 1745 n = self.changelog.add(mn, files, ctx.description(),
1735 1746 trp, p1.node(), p2.node(),
1736 1747 user, ctx.date(), ctx.extra().copy())
1737 1748 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1738 1749 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1739 1750 parent2=xp2)
1740 1751 # set the new commit is proper phase
1741 1752 targetphase = subrepo.newcommitphase(self.ui, ctx)
1742 1753 if targetphase:
1743 1754 # retract boundary do not alter parent changeset.
1744 1755 # if a parent have higher the resulting phase will
1745 1756 # be compliant anyway
1746 1757 #
1747 1758 # if minimal phase was 0 we don't need to retract anything
1748 1759 phases.retractboundary(self, tr, targetphase, [n])
1749 1760 tr.close()
1750 1761 branchmap.updatecache(self.filtered('served'))
1751 1762 return n
1752 1763 finally:
1753 1764 if tr:
1754 1765 tr.release()
1755 1766 lock.release()
1756 1767
1757 1768 @unfilteredmethod
1758 1769 def destroying(self):
1759 1770 '''Inform the repository that nodes are about to be destroyed.
1760 1771 Intended for use by strip and rollback, so there's a common
1761 1772 place for anything that has to be done before destroying history.
1762 1773
1763 1774 This is mostly useful for saving state that is in memory and waiting
1764 1775 to be flushed when the current lock is released. Because a call to
1765 1776 destroyed is imminent, the repo will be invalidated causing those
1766 1777 changes to stay in memory (waiting for the next unlock), or vanish
1767 1778 completely.
1768 1779 '''
1769 1780 # When using the same lock to commit and strip, the phasecache is left
1770 1781 # dirty after committing. Then when we strip, the repo is invalidated,
1771 1782 # causing those changes to disappear.
1772 1783 if '_phasecache' in vars(self):
1773 1784 self._phasecache.write()
1774 1785
1775 1786 @unfilteredmethod
1776 1787 def destroyed(self):
1777 1788 '''Inform the repository that nodes have been destroyed.
1778 1789 Intended for use by strip and rollback, so there's a common
1779 1790 place for anything that has to be done after destroying history.
1780 1791 '''
1781 1792 # When one tries to:
1782 1793 # 1) destroy nodes thus calling this method (e.g. strip)
1783 1794 # 2) use phasecache somewhere (e.g. commit)
1784 1795 #
1785 1796 # then 2) will fail because the phasecache contains nodes that were
1786 1797 # removed. We can either remove phasecache from the filecache,
1787 1798 # causing it to reload next time it is accessed, or simply filter
1788 1799 # the removed nodes now and write the updated cache.
1789 1800 self._phasecache.filterunknown(self)
1790 1801 self._phasecache.write()
1791 1802
1792 1803 # update the 'served' branch cache to help read only server process
1793 1804 # Thanks to branchcache collaboration this is done from the nearest
1794 1805 # filtered subset and it is expected to be fast.
1795 1806 branchmap.updatecache(self.filtered('served'))
1796 1807
1797 1808 # Ensure the persistent tag cache is updated. Doing it now
1798 1809 # means that the tag cache only has to worry about destroyed
1799 1810 # heads immediately after a strip/rollback. That in turn
1800 1811 # guarantees that "cachetip == currenttip" (comparing both rev
1801 1812 # and node) always means no nodes have been added or destroyed.
1802 1813
1803 1814 # XXX this is suboptimal when qrefresh'ing: we strip the current
1804 1815 # head, refresh the tag cache, then immediately add a new head.
1805 1816 # But I think doing it this way is necessary for the "instant
1806 1817 # tag cache retrieval" case to work.
1807 1818 self.invalidate()
1808 1819
1809 1820 def walk(self, match, node=None):
1810 1821 '''
1811 1822 walk recursively through the directory tree or a given
1812 1823 changeset, finding all files matched by the match
1813 1824 function
1814 1825 '''
1815 1826 return self[node].walk(match)
1816 1827
1817 1828 def status(self, node1='.', node2=None, match=None,
1818 1829 ignored=False, clean=False, unknown=False,
1819 1830 listsubrepos=False):
1820 1831 '''a convenience method that calls node1.status(node2)'''
1821 1832 return self[node1].status(node2, match, ignored, clean, unknown,
1822 1833 listsubrepos)
1823 1834
1824 1835 def heads(self, start=None):
1825 1836 heads = self.changelog.heads(start)
1826 1837 # sort the output in rev descending order
1827 1838 return sorted(heads, key=self.changelog.rev, reverse=True)
1828 1839
1829 1840 def branchheads(self, branch=None, start=None, closed=False):
1830 1841 '''return a (possibly filtered) list of heads for the given branch
1831 1842
1832 1843 Heads are returned in topological order, from newest to oldest.
1833 1844 If branch is None, use the dirstate branch.
1834 1845 If start is not None, return only heads reachable from start.
1835 1846 If closed is True, return heads that are marked as closed as well.
1836 1847 '''
1837 1848 if branch is None:
1838 1849 branch = self[None].branch()
1839 1850 branches = self.branchmap()
1840 1851 if branch not in branches:
1841 1852 return []
1842 1853 # the cache returns heads ordered lowest to highest
1843 1854 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1844 1855 if start is not None:
1845 1856 # filter out the heads that cannot be reached from startrev
1846 1857 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1847 1858 bheads = [h for h in bheads if h in fbheads]
1848 1859 return bheads
1849 1860
1850 1861 def branches(self, nodes):
1851 1862 if not nodes:
1852 1863 nodes = [self.changelog.tip()]
1853 1864 b = []
1854 1865 for n in nodes:
1855 1866 t = n
1856 1867 while True:
1857 1868 p = self.changelog.parents(n)
1858 1869 if p[1] != nullid or p[0] == nullid:
1859 1870 b.append((t, n, p[0], p[1]))
1860 1871 break
1861 1872 n = p[0]
1862 1873 return b
1863 1874
1864 1875 def between(self, pairs):
1865 1876 r = []
1866 1877
1867 1878 for top, bottom in pairs:
1868 1879 n, l, i = top, [], 0
1869 1880 f = 1
1870 1881
1871 1882 while n != bottom and n != nullid:
1872 1883 p = self.changelog.parents(n)[0]
1873 1884 if i == f:
1874 1885 l.append(n)
1875 1886 f = f * 2
1876 1887 n = p
1877 1888 i += 1
1878 1889
1879 1890 r.append(l)
1880 1891
1881 1892 return r
1882 1893
1883 1894 def checkpush(self, pushop):
1884 1895 """Extensions can override this function if additional checks have
1885 1896 to be performed before pushing, or call it if they override push
1886 1897 command.
1887 1898 """
1888 1899 pass
1889 1900
1890 1901 @unfilteredpropertycache
1891 1902 def prepushoutgoinghooks(self):
1892 1903 """Return util.hooks consists of a pushop with repo, remote, outgoing
1893 1904 methods, which are called before pushing changesets.
1894 1905 """
1895 1906 return util.hooks()
1896 1907
1897 1908 def pushkey(self, namespace, key, old, new):
1898 1909 try:
1899 1910 tr = self.currenttransaction()
1900 1911 hookargs = {}
1901 1912 if tr is not None:
1902 1913 hookargs.update(tr.hookargs)
1903 1914 hookargs['namespace'] = namespace
1904 1915 hookargs['key'] = key
1905 1916 hookargs['old'] = old
1906 1917 hookargs['new'] = new
1907 1918 self.hook('prepushkey', throw=True, **hookargs)
1908 1919 except error.HookAbort as exc:
1909 1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1910 1921 if exc.hint:
1911 1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1912 1923 return False
1913 1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1914 1925 ret = pushkey.push(self, namespace, key, old, new)
1915 1926 def runhook():
1916 1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1917 1928 ret=ret)
1918 1929 self._afterlock(runhook)
1919 1930 return ret
1920 1931
1921 1932 def listkeys(self, namespace):
1922 1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1923 1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1924 1935 values = pushkey.list(self, namespace)
1925 1936 self.hook('listkeys', namespace=namespace, values=values)
1926 1937 return values
1927 1938
1928 1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1929 1940 '''used to test argument passing over the wire'''
1930 1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1931 1942
1932 1943 def savecommitmessage(self, text):
1933 1944 fp = self.vfs('last-message.txt', 'wb')
1934 1945 try:
1935 1946 fp.write(text)
1936 1947 finally:
1937 1948 fp.close()
1938 1949 return self.pathto(fp.name[len(self.root) + 1:])
1939 1950
1940 1951 # used to avoid circular references so destructors work
1941 1952 def aftertrans(files):
1942 1953 renamefiles = [tuple(t) for t in files]
1943 1954 def a():
1944 1955 for vfs, src, dest in renamefiles:
1945 1956 try:
1946 1957 vfs.rename(src, dest)
1947 1958 except OSError: # journal file does not yet exist
1948 1959 pass
1949 1960 return a
1950 1961
1951 1962 def undoname(fn):
1952 1963 base, name = os.path.split(fn)
1953 1964 assert name.startswith('journal')
1954 1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1955 1966
1956 1967 def instance(ui, path, create):
1957 1968 return localrepository(ui, util.urllocalpath(path), create)
1958 1969
1959 1970 def islocal(path):
1960 1971 return True
1961 1972
1962 1973 def newreporequirements(repo):
1963 1974 """Determine the set of requirements for a new local repository.
1964 1975
1965 1976 Extensions can wrap this function to specify custom requirements for
1966 1977 new repositories.
1967 1978 """
1968 1979 ui = repo.ui
1969 1980 requirements = set(['revlogv1'])
1970 1981 if ui.configbool('format', 'usestore', True):
1971 1982 requirements.add('store')
1972 1983 if ui.configbool('format', 'usefncache', True):
1973 1984 requirements.add('fncache')
1974 1985 if ui.configbool('format', 'dotencode', True):
1975 1986 requirements.add('dotencode')
1976 1987
1977 1988 if scmutil.gdinitconfig(ui):
1978 1989 requirements.add('generaldelta')
1979 1990 if ui.configbool('experimental', 'treemanifest', False):
1980 1991 requirements.add('treemanifest')
1981 1992 if ui.configbool('experimental', 'manifestv2', False):
1982 1993 requirements.add('manifestv2')
1983 1994
1984 1995 return requirements
General Comments 0
You need to be logged in to leave comments. Login now