##// END OF EJS Templates
localrepo: isolate requirements determination from side effects...
Gregory Szorc -
r28163:5d3495e3 default
parent child Browse files
Show More
@@ -1,1966 +1,1971 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import urllib
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 propertycache = util.propertycache
62 62 filecache = scmutil.filecache
63 63
64 64 class repofilecache(filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 70 def __set__(self, repo, value):
71 71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 72 def __delete__(self, repo):
73 73 return super(repofilecache, self).__delete__(repo.unfiltered())
74 74
75 75 class storecache(repofilecache):
76 76 """filecache for files in the store"""
77 77 def join(self, obj, fname):
78 78 return obj.sjoin(fname)
79 79
80 80 class unfilteredpropertycache(propertycache):
81 81 """propertycache that apply to unfiltered repo only"""
82 82
83 83 def __get__(self, repo, type=None):
84 84 unfi = repo.unfiltered()
85 85 if unfi is repo:
86 86 return super(unfilteredpropertycache, self).__get__(unfi)
87 87 return getattr(unfi, self.name)
88 88
89 89 class filteredpropertycache(propertycache):
90 90 """propertycache that must take filtering in account"""
91 91
92 92 def cachevalue(self, obj, value):
93 93 object.__setattr__(obj, self.name, value)
94 94
95 95
96 96 def hasunfilteredcache(repo, name):
97 97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 98 return name in vars(repo.unfiltered())
99 99
100 100 def unfilteredmethod(orig):
101 101 """decorate method that always need to be run on unfiltered version"""
102 102 def wrapper(repo, *args, **kwargs):
103 103 return orig(repo.unfiltered(), *args, **kwargs)
104 104 return wrapper
105 105
106 106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 107 'unbundle'))
108 108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 109
110 110 class localpeer(peer.peerrepository):
111 111 '''peer for a local repo; reflects only the most recent API'''
112 112
113 113 def __init__(self, repo, caps=moderncaps):
114 114 peer.peerrepository.__init__(self)
115 115 self._repo = repo.filtered('served')
116 116 self.ui = repo.ui
117 117 self._caps = repo._restrictcapabilities(caps)
118 118 self.requirements = repo.requirements
119 119 self.supportedformats = repo.supportedformats
120 120
121 121 def close(self):
122 122 self._repo.close()
123 123
124 124 def _capabilities(self):
125 125 return self._caps
126 126
127 127 def local(self):
128 128 return self._repo
129 129
130 130 def canpush(self):
131 131 return True
132 132
133 133 def url(self):
134 134 return self._repo.url()
135 135
136 136 def lookup(self, key):
137 137 return self._repo.lookup(key)
138 138
139 139 def branchmap(self):
140 140 return self._repo.branchmap()
141 141
142 142 def heads(self):
143 143 return self._repo.heads()
144 144
145 145 def known(self, nodes):
146 146 return self._repo.known(nodes)
147 147
148 148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 149 **kwargs):
150 150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 151 common=common, bundlecaps=bundlecaps, **kwargs)
152 152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 153 # When requesting a bundle2, getbundle returns a stream to make the
154 154 # wire level function happier. We need to build a proper object
155 155 # from it in local peer.
156 156 cg = bundle2.getunbundler(self.ui, cg)
157 157 return cg
158 158
159 159 # TODO We might want to move the next two calls into legacypeer and add
160 160 # unbundle instead.
161 161
162 162 def unbundle(self, cg, heads, url):
163 163 """apply a bundle on a repo
164 164
165 165 This function handles the repo locking itself."""
166 166 try:
167 167 try:
168 168 cg = exchange.readbundle(self.ui, cg, None)
169 169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 170 if util.safehasattr(ret, 'getchunks'):
171 171 # This is a bundle20 object, turn it into an unbundler.
172 172 # This little dance should be dropped eventually when the
173 173 # API is finally improved.
174 174 stream = util.chunkbuffer(ret.getchunks())
175 175 ret = bundle2.getunbundler(self.ui, stream)
176 176 return ret
177 177 except Exception as exc:
178 178 # If the exception contains output salvaged from a bundle2
179 179 # reply, we need to make sure it is printed before continuing
180 180 # to fail. So we build a bundle2 with such output and consume
181 181 # it directly.
182 182 #
183 183 # This is not very elegant but allows a "simple" solution for
184 184 # issue4594
185 185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 186 if output:
187 187 bundler = bundle2.bundle20(self._repo.ui)
188 188 for out in output:
189 189 bundler.addpart(out)
190 190 stream = util.chunkbuffer(bundler.getchunks())
191 191 b = bundle2.getunbundler(self.ui, stream)
192 192 bundle2.processbundle(self._repo, b)
193 193 raise
194 194 except error.PushRaced as exc:
195 195 raise error.ResponseError(_('push failed:'), str(exc))
196 196
197 197 def lock(self):
198 198 return self._repo.lock()
199 199
200 200 def addchangegroup(self, cg, source, url):
201 201 return cg.apply(self._repo, source, url)
202 202
203 203 def pushkey(self, namespace, key, old, new):
204 204 return self._repo.pushkey(namespace, key, old, new)
205 205
206 206 def listkeys(self, namespace):
207 207 return self._repo.listkeys(namespace)
208 208
209 209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 210 '''used to test argument passing over the wire'''
211 211 return "%s %s %s %s %s" % (one, two, three, four, five)
212 212
213 213 class locallegacypeer(localpeer):
214 214 '''peer extension which implements legacy methods too; used for tests with
215 215 restricted capabilities'''
216 216
217 217 def __init__(self, repo):
218 218 localpeer.__init__(self, repo, caps=legacycaps)
219 219
220 220 def branches(self, nodes):
221 221 return self._repo.branches(nodes)
222 222
223 223 def between(self, pairs):
224 224 return self._repo.between(pairs)
225 225
226 226 def changegroup(self, basenodes, source):
227 227 return changegroup.changegroup(self._repo, basenodes, source)
228 228
229 229 def changegroupsubset(self, bases, heads, source):
230 230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 231
232 232 class localrepository(object):
233 233
234 234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 235 'manifestv2'))
236 236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 237 'dotencode'))
238 238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 239 filtername = None
240 240
241 241 # a list of (ui, featureset) functions.
242 242 # only functions defined in module of enabled extensions are invoked
243 243 featuresetupfuncs = set()
244 244
245 245 def _baserequirements(self, create):
246 246 return ['revlogv1']
247 247
248 248 def __init__(self, baseui, path=None, create=False):
249 249 self.requirements = set()
250 250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 251 self.wopener = self.wvfs
252 252 self.root = self.wvfs.base
253 253 self.path = self.wvfs.join(".hg")
254 254 self.origroot = path
255 255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 257 realfs=False)
258 258 self.vfs = scmutil.vfs(self.path)
259 259 self.opener = self.vfs
260 260 self.baseui = baseui
261 261 self.ui = baseui.copy()
262 262 self.ui.copy = baseui.copy # prevent copying repo configuration
263 263 # A list of callback to shape the phase if no data were found.
264 264 # Callback are in the form: func(repo, roots) --> processed root.
265 265 # This list it to be filled by extension during repo setup
266 266 self._phasedefaults = []
267 267 try:
268 268 self.ui.readconfig(self.join("hgrc"), self.root)
269 269 extensions.loadall(self.ui)
270 270 except IOError:
271 271 pass
272 272
273 273 if self.featuresetupfuncs:
274 274 self.supported = set(self._basesupported) # use private copy
275 275 extmods = set(m.__name__ for n, m
276 276 in extensions.extensions(self.ui))
277 277 for setupfunc in self.featuresetupfuncs:
278 278 if setupfunc.__module__ in extmods:
279 279 setupfunc(self.ui, self.supported)
280 280 else:
281 281 self.supported = self._basesupported
282 282
283 283 if not self.vfs.isdir():
284 284 if create:
285 if not self.wvfs.exists():
286 self.wvfs.makedirs()
287 self.vfs.makedir(notindexed=True)
288 285 requirements = set(self._baserequirements(create))
289 286 if self.ui.configbool('format', 'usestore', True):
290 self.vfs.mkdir("store")
291 287 requirements.add("store")
292 288 if self.ui.configbool('format', 'usefncache', True):
293 289 requirements.add("fncache")
294 290 if self.ui.configbool('format', 'dotencode', True):
295 291 requirements.add('dotencode')
296 # create an invalid changelog
297 self.vfs.append(
298 "00changelog.i",
299 '\0\0\0\2' # represents revlogv2
300 ' dummy changelog to prevent using the old repo layout'
301 )
292
302 293 if scmutil.gdinitconfig(self.ui):
303 294 requirements.add("generaldelta")
304 295 if self.ui.configbool('experimental', 'treemanifest', False):
305 296 requirements.add("treemanifest")
306 297 if self.ui.configbool('experimental', 'manifestv2', False):
307 298 requirements.add("manifestv2")
308 299
309 300 self.requirements = requirements
301
302 if not self.wvfs.exists():
303 self.wvfs.makedirs()
304 self.vfs.makedir(notindexed=True)
305
306 if 'store' in requirements:
307 self.vfs.mkdir("store")
308
309 # create an invalid changelog
310 self.vfs.append(
311 "00changelog.i",
312 '\0\0\0\2' # represents revlogv2
313 ' dummy changelog to prevent using the old repo layout'
314 )
310 315 else:
311 316 raise error.RepoError(_("repository %s not found") % path)
312 317 elif create:
313 318 raise error.RepoError(_("repository %s already exists") % path)
314 319 else:
315 320 try:
316 321 self.requirements = scmutil.readrequires(
317 322 self.vfs, self.supported)
318 323 except IOError as inst:
319 324 if inst.errno != errno.ENOENT:
320 325 raise
321 326
322 327 self.sharedpath = self.path
323 328 try:
324 329 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
325 330 realpath=True)
326 331 s = vfs.base
327 332 if not vfs.exists():
328 333 raise error.RepoError(
329 334 _('.hg/sharedpath points to nonexistent directory %s') % s)
330 335 self.sharedpath = s
331 336 except IOError as inst:
332 337 if inst.errno != errno.ENOENT:
333 338 raise
334 339
335 340 self.store = store.store(
336 341 self.requirements, self.sharedpath, scmutil.vfs)
337 342 self.spath = self.store.path
338 343 self.svfs = self.store.vfs
339 344 self.sjoin = self.store.join
340 345 self.vfs.createmode = self.store.createmode
341 346 self._applyopenerreqs()
342 347 if create:
343 348 self._writerequirements()
344 349
345 350 self._dirstatevalidatewarned = False
346 351
347 352 self._branchcaches = {}
348 353 self._revbranchcache = None
349 354 self.filterpats = {}
350 355 self._datafilters = {}
351 356 self._transref = self._lockref = self._wlockref = None
352 357
353 358 # A cache for various files under .hg/ that tracks file changes,
354 359 # (used by the filecache decorator)
355 360 #
356 361 # Maps a property name to its util.filecacheentry
357 362 self._filecache = {}
358 363
359 364 # hold sets of revision to be filtered
360 365 # should be cleared when something might have changed the filter value:
361 366 # - new changesets,
362 367 # - phase change,
363 368 # - new obsolescence marker,
364 369 # - working directory parent change,
365 370 # - bookmark changes
366 371 self.filteredrevcache = {}
367 372
368 373 # generic mapping between names and nodes
369 374 self.names = namespaces.namespaces()
370 375
371 376 def close(self):
372 377 self._writecaches()
373 378
374 379 def _writecaches(self):
375 380 if self._revbranchcache:
376 381 self._revbranchcache.write()
377 382
378 383 def _restrictcapabilities(self, caps):
379 384 if self.ui.configbool('experimental', 'bundle2-advertise', True):
380 385 caps = set(caps)
381 386 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
382 387 caps.add('bundle2=' + urllib.quote(capsblob))
383 388 return caps
384 389
385 390 def _applyopenerreqs(self):
386 391 self.svfs.options = dict((r, 1) for r in self.requirements
387 392 if r in self.openerreqs)
388 393 # experimental config: format.chunkcachesize
389 394 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
390 395 if chunkcachesize is not None:
391 396 self.svfs.options['chunkcachesize'] = chunkcachesize
392 397 # experimental config: format.maxchainlen
393 398 maxchainlen = self.ui.configint('format', 'maxchainlen')
394 399 if maxchainlen is not None:
395 400 self.svfs.options['maxchainlen'] = maxchainlen
396 401 # experimental config: format.manifestcachesize
397 402 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
398 403 if manifestcachesize is not None:
399 404 self.svfs.options['manifestcachesize'] = manifestcachesize
400 405 # experimental config: format.aggressivemergedeltas
401 406 aggressivemergedeltas = self.ui.configbool('format',
402 407 'aggressivemergedeltas', False)
403 408 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
404 409 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
405 410
406 411 def _writerequirements(self):
407 412 scmutil.writerequires(self.vfs, self.requirements)
408 413
409 414 def _checknested(self, path):
410 415 """Determine if path is a legal nested repository."""
411 416 if not path.startswith(self.root):
412 417 return False
413 418 subpath = path[len(self.root) + 1:]
414 419 normsubpath = util.pconvert(subpath)
415 420
416 421 # XXX: Checking against the current working copy is wrong in
417 422 # the sense that it can reject things like
418 423 #
419 424 # $ hg cat -r 10 sub/x.txt
420 425 #
421 426 # if sub/ is no longer a subrepository in the working copy
422 427 # parent revision.
423 428 #
424 429 # However, it can of course also allow things that would have
425 430 # been rejected before, such as the above cat command if sub/
426 431 # is a subrepository now, but was a normal directory before.
427 432 # The old path auditor would have rejected by mistake since it
428 433 # panics when it sees sub/.hg/.
429 434 #
430 435 # All in all, checking against the working copy seems sensible
431 436 # since we want to prevent access to nested repositories on
432 437 # the filesystem *now*.
433 438 ctx = self[None]
434 439 parts = util.splitpath(subpath)
435 440 while parts:
436 441 prefix = '/'.join(parts)
437 442 if prefix in ctx.substate:
438 443 if prefix == normsubpath:
439 444 return True
440 445 else:
441 446 sub = ctx.sub(prefix)
442 447 return sub.checknested(subpath[len(prefix) + 1:])
443 448 else:
444 449 parts.pop()
445 450 return False
446 451
447 452 def peer(self):
448 453 return localpeer(self) # not cached to avoid reference cycle
449 454
450 455 def unfiltered(self):
451 456 """Return unfiltered version of the repository
452 457
453 458 Intended to be overwritten by filtered repo."""
454 459 return self
455 460
456 461 def filtered(self, name):
457 462 """Return a filtered version of a repository"""
458 463 # build a new class with the mixin and the current class
459 464 # (possibly subclass of the repo)
460 465 class proxycls(repoview.repoview, self.unfiltered().__class__):
461 466 pass
462 467 return proxycls(self, name)
463 468
464 469 @repofilecache('bookmarks', 'bookmarks.current')
465 470 def _bookmarks(self):
466 471 return bookmarks.bmstore(self)
467 472
468 473 @property
469 474 def _activebookmark(self):
470 475 return self._bookmarks.active
471 476
472 477 def bookmarkheads(self, bookmark):
473 478 name = bookmark.split('@', 1)[0]
474 479 heads = []
475 480 for mark, n in self._bookmarks.iteritems():
476 481 if mark.split('@', 1)[0] == name:
477 482 heads.append(n)
478 483 return heads
479 484
480 485 # _phaserevs and _phasesets depend on changelog. what we need is to
481 486 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
482 487 # can't be easily expressed in filecache mechanism.
483 488 @storecache('phaseroots', '00changelog.i')
484 489 def _phasecache(self):
485 490 return phases.phasecache(self, self._phasedefaults)
486 491
487 492 @storecache('obsstore')
488 493 def obsstore(self):
489 494 # read default format for new obsstore.
490 495 # developer config: format.obsstore-version
491 496 defaultformat = self.ui.configint('format', 'obsstore-version', None)
492 497 # rely on obsstore class default when possible.
493 498 kwargs = {}
494 499 if defaultformat is not None:
495 500 kwargs['defaultformat'] = defaultformat
496 501 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
497 502 store = obsolete.obsstore(self.svfs, readonly=readonly,
498 503 **kwargs)
499 504 if store and readonly:
500 505 self.ui.warn(
501 506 _('obsolete feature not enabled but %i markers found!\n')
502 507 % len(list(store)))
503 508 return store
504 509
505 510 @storecache('00changelog.i')
506 511 def changelog(self):
507 512 c = changelog.changelog(self.svfs)
508 513 if 'HG_PENDING' in os.environ:
509 514 p = os.environ['HG_PENDING']
510 515 if p.startswith(self.root):
511 516 c.readpending('00changelog.i.a')
512 517 return c
513 518
514 519 @storecache('00manifest.i')
515 520 def manifest(self):
516 521 return manifest.manifest(self.svfs)
517 522
518 523 def dirlog(self, dir):
519 524 return self.manifest.dirlog(dir)
520 525
521 526 @repofilecache('dirstate')
522 527 def dirstate(self):
523 528 return dirstate.dirstate(self.vfs, self.ui, self.root,
524 529 self._dirstatevalidate)
525 530
526 531 def _dirstatevalidate(self, node):
527 532 try:
528 533 self.changelog.rev(node)
529 534 return node
530 535 except error.LookupError:
531 536 if not self._dirstatevalidatewarned:
532 537 self._dirstatevalidatewarned = True
533 538 self.ui.warn(_("warning: ignoring unknown"
534 539 " working parent %s!\n") % short(node))
535 540 return nullid
536 541
537 542 def __getitem__(self, changeid):
538 543 if changeid is None or changeid == wdirrev:
539 544 return context.workingctx(self)
540 545 if isinstance(changeid, slice):
541 546 return [context.changectx(self, i)
542 547 for i in xrange(*changeid.indices(len(self)))
543 548 if i not in self.changelog.filteredrevs]
544 549 return context.changectx(self, changeid)
545 550
546 551 def __contains__(self, changeid):
547 552 try:
548 553 self[changeid]
549 554 return True
550 555 except error.RepoLookupError:
551 556 return False
552 557
553 558 def __nonzero__(self):
554 559 return True
555 560
556 561 def __len__(self):
557 562 return len(self.changelog)
558 563
559 564 def __iter__(self):
560 565 return iter(self.changelog)
561 566
562 567 def revs(self, expr, *args):
563 568 '''Find revisions matching a revset.
564 569
565 570 The revset is specified as a string ``expr`` that may contain
566 571 %-formatting to escape certain types. See ``revset.formatspec``.
567 572
568 573 Return a revset.abstractsmartset, which is a list-like interface
569 574 that contains integer revisions.
570 575 '''
571 576 expr = revset.formatspec(expr, *args)
572 577 m = revset.match(None, expr)
573 578 return m(self)
574 579
575 580 def set(self, expr, *args):
576 581 '''Find revisions matching a revset and emit changectx instances.
577 582
578 583 This is a convenience wrapper around ``revs()`` that iterates the
579 584 result and is a generator of changectx instances.
580 585 '''
581 586 for r in self.revs(expr, *args):
582 587 yield self[r]
583 588
584 589 def url(self):
585 590 return 'file:' + self.root
586 591
587 592 def hook(self, name, throw=False, **args):
588 593 """Call a hook, passing this repo instance.
589 594
590 595 This a convenience method to aid invoking hooks. Extensions likely
591 596 won't call this unless they have registered a custom hook or are
592 597 replacing code that is expected to call a hook.
593 598 """
594 599 return hook.hook(self.ui, self, name, throw, **args)
595 600
596 601 @unfilteredmethod
597 602 def _tag(self, names, node, message, local, user, date, extra=None,
598 603 editor=False):
599 604 if isinstance(names, str):
600 605 names = (names,)
601 606
602 607 branches = self.branchmap()
603 608 for name in names:
604 609 self.hook('pretag', throw=True, node=hex(node), tag=name,
605 610 local=local)
606 611 if name in branches:
607 612 self.ui.warn(_("warning: tag %s conflicts with existing"
608 613 " branch name\n") % name)
609 614
610 615 def writetags(fp, names, munge, prevtags):
611 616 fp.seek(0, 2)
612 617 if prevtags and prevtags[-1] != '\n':
613 618 fp.write('\n')
614 619 for name in names:
615 620 if munge:
616 621 m = munge(name)
617 622 else:
618 623 m = name
619 624
620 625 if (self._tagscache.tagtypes and
621 626 name in self._tagscache.tagtypes):
622 627 old = self.tags().get(name, nullid)
623 628 fp.write('%s %s\n' % (hex(old), m))
624 629 fp.write('%s %s\n' % (hex(node), m))
625 630 fp.close()
626 631
627 632 prevtags = ''
628 633 if local:
629 634 try:
630 635 fp = self.vfs('localtags', 'r+')
631 636 except IOError:
632 637 fp = self.vfs('localtags', 'a')
633 638 else:
634 639 prevtags = fp.read()
635 640
636 641 # local tags are stored in the current charset
637 642 writetags(fp, names, None, prevtags)
638 643 for name in names:
639 644 self.hook('tag', node=hex(node), tag=name, local=local)
640 645 return
641 646
642 647 try:
643 648 fp = self.wfile('.hgtags', 'rb+')
644 649 except IOError as e:
645 650 if e.errno != errno.ENOENT:
646 651 raise
647 652 fp = self.wfile('.hgtags', 'ab')
648 653 else:
649 654 prevtags = fp.read()
650 655
651 656 # committed tags are stored in UTF-8
652 657 writetags(fp, names, encoding.fromlocal, prevtags)
653 658
654 659 fp.close()
655 660
656 661 self.invalidatecaches()
657 662
658 663 if '.hgtags' not in self.dirstate:
659 664 self[None].add(['.hgtags'])
660 665
661 666 m = matchmod.exact(self.root, '', ['.hgtags'])
662 667 tagnode = self.commit(message, user, date, extra=extra, match=m,
663 668 editor=editor)
664 669
665 670 for name in names:
666 671 self.hook('tag', node=hex(node), tag=name, local=local)
667 672
668 673 return tagnode
669 674
670 675 def tag(self, names, node, message, local, user, date, editor=False):
671 676 '''tag a revision with one or more symbolic names.
672 677
673 678 names is a list of strings or, when adding a single tag, names may be a
674 679 string.
675 680
676 681 if local is True, the tags are stored in a per-repository file.
677 682 otherwise, they are stored in the .hgtags file, and a new
678 683 changeset is committed with the change.
679 684
680 685 keyword arguments:
681 686
682 687 local: whether to store tags in non-version-controlled file
683 688 (default False)
684 689
685 690 message: commit message to use if committing
686 691
687 692 user: name of user to use if committing
688 693
689 694 date: date tuple to use if committing'''
690 695
691 696 if not local:
692 697 m = matchmod.exact(self.root, '', ['.hgtags'])
693 698 if any(self.status(match=m, unknown=True, ignored=True)):
694 699 raise error.Abort(_('working copy of .hgtags is changed'),
695 700 hint=_('please commit .hgtags manually'))
696 701
697 702 self.tags() # instantiate the cache
698 703 self._tag(names, node, message, local, user, date, editor=editor)
699 704
700 705 @filteredpropertycache
701 706 def _tagscache(self):
702 707 '''Returns a tagscache object that contains various tags related
703 708 caches.'''
704 709
705 710 # This simplifies its cache management by having one decorated
706 711 # function (this one) and the rest simply fetch things from it.
707 712 class tagscache(object):
708 713 def __init__(self):
709 714 # These two define the set of tags for this repository. tags
710 715 # maps tag name to node; tagtypes maps tag name to 'global' or
711 716 # 'local'. (Global tags are defined by .hgtags across all
712 717 # heads, and local tags are defined in .hg/localtags.)
713 718 # They constitute the in-memory cache of tags.
714 719 self.tags = self.tagtypes = None
715 720
716 721 self.nodetagscache = self.tagslist = None
717 722
718 723 cache = tagscache()
719 724 cache.tags, cache.tagtypes = self._findtags()
720 725
721 726 return cache
722 727
723 728 def tags(self):
724 729 '''return a mapping of tag to node'''
725 730 t = {}
726 731 if self.changelog.filteredrevs:
727 732 tags, tt = self._findtags()
728 733 else:
729 734 tags = self._tagscache.tags
730 735 for k, v in tags.iteritems():
731 736 try:
732 737 # ignore tags to unknown nodes
733 738 self.changelog.rev(v)
734 739 t[k] = v
735 740 except (error.LookupError, ValueError):
736 741 pass
737 742 return t
738 743
739 744 def _findtags(self):
740 745 '''Do the hard work of finding tags. Return a pair of dicts
741 746 (tags, tagtypes) where tags maps tag name to node, and tagtypes
742 747 maps tag name to a string like \'global\' or \'local\'.
743 748 Subclasses or extensions are free to add their own tags, but
744 749 should be aware that the returned dicts will be retained for the
745 750 duration of the localrepo object.'''
746 751
747 752 # XXX what tagtype should subclasses/extensions use? Currently
748 753 # mq and bookmarks add tags, but do not set the tagtype at all.
749 754 # Should each extension invent its own tag type? Should there
750 755 # be one tagtype for all such "virtual" tags? Or is the status
751 756 # quo fine?
752 757
753 758 alltags = {} # map tag name to (node, hist)
754 759 tagtypes = {}
755 760
756 761 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
757 762 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
758 763
759 764 # Build the return dicts. Have to re-encode tag names because
760 765 # the tags module always uses UTF-8 (in order not to lose info
761 766 # writing to the cache), but the rest of Mercurial wants them in
762 767 # local encoding.
763 768 tags = {}
764 769 for (name, (node, hist)) in alltags.iteritems():
765 770 if node != nullid:
766 771 tags[encoding.tolocal(name)] = node
767 772 tags['tip'] = self.changelog.tip()
768 773 tagtypes = dict([(encoding.tolocal(name), value)
769 774 for (name, value) in tagtypes.iteritems()])
770 775 return (tags, tagtypes)
771 776
772 777 def tagtype(self, tagname):
773 778 '''
774 779 return the type of the given tag. result can be:
775 780
776 781 'local' : a local tag
777 782 'global' : a global tag
778 783 None : tag does not exist
779 784 '''
780 785
781 786 return self._tagscache.tagtypes.get(tagname)
782 787
783 788 def tagslist(self):
784 789 '''return a list of tags ordered by revision'''
785 790 if not self._tagscache.tagslist:
786 791 l = []
787 792 for t, n in self.tags().iteritems():
788 793 l.append((self.changelog.rev(n), t, n))
789 794 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
790 795
791 796 return self._tagscache.tagslist
792 797
793 798 def nodetags(self, node):
794 799 '''return the tags associated with a node'''
795 800 if not self._tagscache.nodetagscache:
796 801 nodetagscache = {}
797 802 for t, n in self._tagscache.tags.iteritems():
798 803 nodetagscache.setdefault(n, []).append(t)
799 804 for tags in nodetagscache.itervalues():
800 805 tags.sort()
801 806 self._tagscache.nodetagscache = nodetagscache
802 807 return self._tagscache.nodetagscache.get(node, [])
803 808
804 809 def nodebookmarks(self, node):
805 810 """return the list of bookmarks pointing to the specified node"""
806 811 marks = []
807 812 for bookmark, n in self._bookmarks.iteritems():
808 813 if n == node:
809 814 marks.append(bookmark)
810 815 return sorted(marks)
811 816
812 817 def branchmap(self):
813 818 '''returns a dictionary {branch: [branchheads]} with branchheads
814 819 ordered by increasing revision number'''
815 820 branchmap.updatecache(self)
816 821 return self._branchcaches[self.filtername]
817 822
818 823 @unfilteredmethod
819 824 def revbranchcache(self):
820 825 if not self._revbranchcache:
821 826 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
822 827 return self._revbranchcache
823 828
824 829 def branchtip(self, branch, ignoremissing=False):
825 830 '''return the tip node for a given branch
826 831
827 832 If ignoremissing is True, then this method will not raise an error.
828 833 This is helpful for callers that only expect None for a missing branch
829 834 (e.g. namespace).
830 835
831 836 '''
832 837 try:
833 838 return self.branchmap().branchtip(branch)
834 839 except KeyError:
835 840 if not ignoremissing:
836 841 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
837 842 else:
838 843 pass
839 844
840 845 def lookup(self, key):
841 846 return self[key].node()
842 847
843 848 def lookupbranch(self, key, remote=None):
844 849 repo = remote or self
845 850 if key in repo.branchmap():
846 851 return key
847 852
848 853 repo = (remote and remote.local()) and remote or self
849 854 return repo[key].branch()
850 855
851 856 def known(self, nodes):
852 857 cl = self.changelog
853 858 nm = cl.nodemap
854 859 filtered = cl.filteredrevs
855 860 result = []
856 861 for n in nodes:
857 862 r = nm.get(n)
858 863 resp = not (r is None or r in filtered)
859 864 result.append(resp)
860 865 return result
861 866
862 867 def local(self):
863 868 return self
864 869
865 870 def publishing(self):
866 871 # it's safe (and desirable) to trust the publish flag unconditionally
867 872 # so that we don't finalize changes shared between users via ssh or nfs
868 873 return self.ui.configbool('phases', 'publish', True, untrusted=True)
869 874
870 875 def cancopy(self):
871 876 # so statichttprepo's override of local() works
872 877 if not self.local():
873 878 return False
874 879 if not self.publishing():
875 880 return True
876 881 # if publishing we can't copy if there is filtered content
877 882 return not self.filtered('visible').changelog.filteredrevs
878 883
879 884 def shared(self):
880 885 '''the type of shared repository (None if not shared)'''
881 886 if self.sharedpath != self.path:
882 887 return 'store'
883 888 return None
884 889
885 890 def join(self, f, *insidef):
886 891 return self.vfs.join(os.path.join(f, *insidef))
887 892
888 893 def wjoin(self, f, *insidef):
889 894 return self.vfs.reljoin(self.root, f, *insidef)
890 895
891 896 def file(self, f):
892 897 if f[0] == '/':
893 898 f = f[1:]
894 899 return filelog.filelog(self.svfs, f)
895 900
896 901 def parents(self, changeid=None):
897 902 '''get list of changectxs for parents of changeid'''
898 903 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
899 904 self.ui.deprecwarn(msg, '3.7')
900 905 return self[changeid].parents()
901 906
902 907 def changectx(self, changeid):
903 908 return self[changeid]
904 909
905 910 def setparents(self, p1, p2=nullid):
906 911 self.dirstate.beginparentchange()
907 912 copies = self.dirstate.setparents(p1, p2)
908 913 pctx = self[p1]
909 914 if copies:
910 915 # Adjust copy records, the dirstate cannot do it, it
911 916 # requires access to parents manifests. Preserve them
912 917 # only for entries added to first parent.
913 918 for f in copies:
914 919 if f not in pctx and copies[f] in pctx:
915 920 self.dirstate.copy(copies[f], f)
916 921 if p2 == nullid:
917 922 for f, s in sorted(self.dirstate.copies().items()):
918 923 if f not in pctx and s not in pctx:
919 924 self.dirstate.copy(None, f)
920 925 self.dirstate.endparentchange()
921 926
922 927 def filectx(self, path, changeid=None, fileid=None):
923 928 """changeid can be a changeset revision, node, or tag.
924 929 fileid can be a file revision or node."""
925 930 return context.filectx(self, path, changeid, fileid)
926 931
927 932 def getcwd(self):
928 933 return self.dirstate.getcwd()
929 934
930 935 def pathto(self, f, cwd=None):
931 936 return self.dirstate.pathto(f, cwd)
932 937
933 938 def wfile(self, f, mode='r'):
934 939 return self.wvfs(f, mode)
935 940
936 941 def _link(self, f):
937 942 return self.wvfs.islink(f)
938 943
939 944 def _loadfilter(self, filter):
940 945 if filter not in self.filterpats:
941 946 l = []
942 947 for pat, cmd in self.ui.configitems(filter):
943 948 if cmd == '!':
944 949 continue
945 950 mf = matchmod.match(self.root, '', [pat])
946 951 fn = None
947 952 params = cmd
948 953 for name, filterfn in self._datafilters.iteritems():
949 954 if cmd.startswith(name):
950 955 fn = filterfn
951 956 params = cmd[len(name):].lstrip()
952 957 break
953 958 if not fn:
954 959 fn = lambda s, c, **kwargs: util.filter(s, c)
955 960 # Wrap old filters not supporting keyword arguments
956 961 if not inspect.getargspec(fn)[2]:
957 962 oldfn = fn
958 963 fn = lambda s, c, **kwargs: oldfn(s, c)
959 964 l.append((mf, fn, params))
960 965 self.filterpats[filter] = l
961 966 return self.filterpats[filter]
962 967
963 968 def _filter(self, filterpats, filename, data):
964 969 for mf, fn, cmd in filterpats:
965 970 if mf(filename):
966 971 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
967 972 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
968 973 break
969 974
970 975 return data
971 976
972 977 @unfilteredpropertycache
973 978 def _encodefilterpats(self):
974 979 return self._loadfilter('encode')
975 980
976 981 @unfilteredpropertycache
977 982 def _decodefilterpats(self):
978 983 return self._loadfilter('decode')
979 984
980 985 def adddatafilter(self, name, filter):
981 986 self._datafilters[name] = filter
982 987
983 988 def wread(self, filename):
984 989 if self._link(filename):
985 990 data = self.wvfs.readlink(filename)
986 991 else:
987 992 data = self.wvfs.read(filename)
988 993 return self._filter(self._encodefilterpats, filename, data)
989 994
990 995 def wwrite(self, filename, data, flags):
991 996 """write ``data`` into ``filename`` in the working directory
992 997
993 998 This returns length of written (maybe decoded) data.
994 999 """
995 1000 data = self._filter(self._decodefilterpats, filename, data)
996 1001 if 'l' in flags:
997 1002 self.wvfs.symlink(data, filename)
998 1003 else:
999 1004 self.wvfs.write(filename, data)
1000 1005 if 'x' in flags:
1001 1006 self.wvfs.setflags(filename, False, True)
1002 1007 return len(data)
1003 1008
1004 1009 def wwritedata(self, filename, data):
1005 1010 return self._filter(self._decodefilterpats, filename, data)
1006 1011
1007 1012 def currenttransaction(self):
1008 1013 """return the current transaction or None if non exists"""
1009 1014 if self._transref:
1010 1015 tr = self._transref()
1011 1016 else:
1012 1017 tr = None
1013 1018
1014 1019 if tr and tr.running():
1015 1020 return tr
1016 1021 return None
1017 1022
1018 1023 def transaction(self, desc, report=None):
1019 1024 if (self.ui.configbool('devel', 'all-warnings')
1020 1025 or self.ui.configbool('devel', 'check-locks')):
1021 1026 l = self._lockref and self._lockref()
1022 1027 if l is None or not l.held:
1023 1028 self.ui.develwarn('transaction with no lock')
1024 1029 tr = self.currenttransaction()
1025 1030 if tr is not None:
1026 1031 return tr.nest()
1027 1032
1028 1033 # abort here if the journal already exists
1029 1034 if self.svfs.exists("journal"):
1030 1035 raise error.RepoError(
1031 1036 _("abandoned transaction found"),
1032 1037 hint=_("run 'hg recover' to clean up transaction"))
1033 1038
1034 1039 # make journal.dirstate contain in-memory changes at this point
1035 1040 self.dirstate.write(None)
1036 1041
1037 1042 idbase = "%.40f#%f" % (random.random(), time.time())
1038 1043 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1039 1044 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1040 1045
1041 1046 self._writejournal(desc)
1042 1047 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1043 1048 if report:
1044 1049 rp = report
1045 1050 else:
1046 1051 rp = self.ui.warn
1047 1052 vfsmap = {'plain': self.vfs} # root of .hg/
1048 1053 # we must avoid cyclic reference between repo and transaction.
1049 1054 reporef = weakref.ref(self)
1050 1055 def validate(tr):
1051 1056 """will run pre-closing hooks"""
1052 1057 reporef().hook('pretxnclose', throw=True,
1053 1058 txnname=desc, **tr.hookargs)
1054 1059 def releasefn(tr, success):
1055 1060 repo = reporef()
1056 1061 if success:
1057 1062 # this should be explicitly invoked here, because
1058 1063 # in-memory changes aren't written out at closing
1059 1064 # transaction, if tr.addfilegenerator (via
1060 1065 # dirstate.write or so) isn't invoked while
1061 1066 # transaction running
1062 1067 repo.dirstate.write(None)
1063 1068 else:
1064 1069 # prevent in-memory changes from being written out at
1065 1070 # the end of outer wlock scope or so
1066 1071 repo.dirstate.invalidate()
1067 1072
1068 1073 # discard all changes (including ones already written
1069 1074 # out) in this transaction
1070 1075 repo.vfs.rename('journal.dirstate', 'dirstate')
1071 1076
1072 1077 repo.invalidate(clearfilecache=True)
1073 1078
1074 1079 tr = transaction.transaction(rp, self.svfs, vfsmap,
1075 1080 "journal",
1076 1081 "undo",
1077 1082 aftertrans(renames),
1078 1083 self.store.createmode,
1079 1084 validator=validate,
1080 1085 releasefn=releasefn)
1081 1086
1082 1087 tr.hookargs['txnid'] = txnid
1083 1088 # note: writing the fncache only during finalize mean that the file is
1084 1089 # outdated when running hooks. As fncache is used for streaming clone,
1085 1090 # this is not expected to break anything that happen during the hooks.
1086 1091 tr.addfinalize('flush-fncache', self.store.write)
1087 1092 def txnclosehook(tr2):
1088 1093 """To be run if transaction is successful, will schedule a hook run
1089 1094 """
1090 1095 # Don't reference tr2 in hook() so we don't hold a reference.
1091 1096 # This reduces memory consumption when there are multiple
1092 1097 # transactions per lock. This can likely go away if issue5045
1093 1098 # fixes the function accumulation.
1094 1099 hookargs = tr2.hookargs
1095 1100
1096 1101 def hook():
1097 1102 reporef().hook('txnclose', throw=False, txnname=desc,
1098 1103 **hookargs)
1099 1104 reporef()._afterlock(hook)
1100 1105 tr.addfinalize('txnclose-hook', txnclosehook)
1101 1106 def txnaborthook(tr2):
1102 1107 """To be run if transaction is aborted
1103 1108 """
1104 1109 reporef().hook('txnabort', throw=False, txnname=desc,
1105 1110 **tr2.hookargs)
1106 1111 tr.addabort('txnabort-hook', txnaborthook)
1107 1112 # avoid eager cache invalidation. in-memory data should be identical
1108 1113 # to stored data if transaction has no error.
1109 1114 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1110 1115 self._transref = weakref.ref(tr)
1111 1116 return tr
1112 1117
1113 1118 def _journalfiles(self):
1114 1119 return ((self.svfs, 'journal'),
1115 1120 (self.vfs, 'journal.dirstate'),
1116 1121 (self.vfs, 'journal.branch'),
1117 1122 (self.vfs, 'journal.desc'),
1118 1123 (self.vfs, 'journal.bookmarks'),
1119 1124 (self.svfs, 'journal.phaseroots'))
1120 1125
1121 1126 def undofiles(self):
1122 1127 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1123 1128
1124 1129 def _writejournal(self, desc):
1125 1130 self.vfs.write("journal.dirstate",
1126 1131 self.vfs.tryread("dirstate"))
1127 1132 self.vfs.write("journal.branch",
1128 1133 encoding.fromlocal(self.dirstate.branch()))
1129 1134 self.vfs.write("journal.desc",
1130 1135 "%d\n%s\n" % (len(self), desc))
1131 1136 self.vfs.write("journal.bookmarks",
1132 1137 self.vfs.tryread("bookmarks"))
1133 1138 self.svfs.write("journal.phaseroots",
1134 1139 self.svfs.tryread("phaseroots"))
1135 1140
1136 1141 def recover(self):
1137 1142 with self.lock():
1138 1143 if self.svfs.exists("journal"):
1139 1144 self.ui.status(_("rolling back interrupted transaction\n"))
1140 1145 vfsmap = {'': self.svfs,
1141 1146 'plain': self.vfs,}
1142 1147 transaction.rollback(self.svfs, vfsmap, "journal",
1143 1148 self.ui.warn)
1144 1149 self.invalidate()
1145 1150 return True
1146 1151 else:
1147 1152 self.ui.warn(_("no interrupted transaction available\n"))
1148 1153 return False
1149 1154
1150 1155 def rollback(self, dryrun=False, force=False):
1151 1156 wlock = lock = dsguard = None
1152 1157 try:
1153 1158 wlock = self.wlock()
1154 1159 lock = self.lock()
1155 1160 if self.svfs.exists("undo"):
1156 1161 dsguard = cmdutil.dirstateguard(self, 'rollback')
1157 1162
1158 1163 return self._rollback(dryrun, force, dsguard)
1159 1164 else:
1160 1165 self.ui.warn(_("no rollback information available\n"))
1161 1166 return 1
1162 1167 finally:
1163 1168 release(dsguard, lock, wlock)
1164 1169
1165 1170 @unfilteredmethod # Until we get smarter cache management
1166 1171 def _rollback(self, dryrun, force, dsguard):
1167 1172 ui = self.ui
1168 1173 try:
1169 1174 args = self.vfs.read('undo.desc').splitlines()
1170 1175 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1171 1176 if len(args) >= 3:
1172 1177 detail = args[2]
1173 1178 oldtip = oldlen - 1
1174 1179
1175 1180 if detail and ui.verbose:
1176 1181 msg = (_('repository tip rolled back to revision %s'
1177 1182 ' (undo %s: %s)\n')
1178 1183 % (oldtip, desc, detail))
1179 1184 else:
1180 1185 msg = (_('repository tip rolled back to revision %s'
1181 1186 ' (undo %s)\n')
1182 1187 % (oldtip, desc))
1183 1188 except IOError:
1184 1189 msg = _('rolling back unknown transaction\n')
1185 1190 desc = None
1186 1191
1187 1192 if not force and self['.'] != self['tip'] and desc == 'commit':
1188 1193 raise error.Abort(
1189 1194 _('rollback of last commit while not checked out '
1190 1195 'may lose data'), hint=_('use -f to force'))
1191 1196
1192 1197 ui.status(msg)
1193 1198 if dryrun:
1194 1199 return 0
1195 1200
1196 1201 parents = self.dirstate.parents()
1197 1202 self.destroying()
1198 1203 vfsmap = {'plain': self.vfs, '': self.svfs}
1199 1204 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1200 1205 if self.vfs.exists('undo.bookmarks'):
1201 1206 self.vfs.rename('undo.bookmarks', 'bookmarks')
1202 1207 if self.svfs.exists('undo.phaseroots'):
1203 1208 self.svfs.rename('undo.phaseroots', 'phaseroots')
1204 1209 self.invalidate()
1205 1210
1206 1211 parentgone = (parents[0] not in self.changelog.nodemap or
1207 1212 parents[1] not in self.changelog.nodemap)
1208 1213 if parentgone:
1209 1214 # prevent dirstateguard from overwriting already restored one
1210 1215 dsguard.close()
1211 1216
1212 1217 self.vfs.rename('undo.dirstate', 'dirstate')
1213 1218 try:
1214 1219 branch = self.vfs.read('undo.branch')
1215 1220 self.dirstate.setbranch(encoding.tolocal(branch))
1216 1221 except IOError:
1217 1222 ui.warn(_('named branch could not be reset: '
1218 1223 'current branch is still \'%s\'\n')
1219 1224 % self.dirstate.branch())
1220 1225
1221 1226 self.dirstate.invalidate()
1222 1227 parents = tuple([p.rev() for p in self[None].parents()])
1223 1228 if len(parents) > 1:
1224 1229 ui.status(_('working directory now based on '
1225 1230 'revisions %d and %d\n') % parents)
1226 1231 else:
1227 1232 ui.status(_('working directory now based on '
1228 1233 'revision %d\n') % parents)
1229 1234 mergemod.mergestate.clean(self, self['.'].node())
1230 1235
1231 1236 # TODO: if we know which new heads may result from this rollback, pass
1232 1237 # them to destroy(), which will prevent the branchhead cache from being
1233 1238 # invalidated.
1234 1239 self.destroyed()
1235 1240 return 0
1236 1241
1237 1242 def invalidatecaches(self):
1238 1243
1239 1244 if '_tagscache' in vars(self):
1240 1245 # can't use delattr on proxy
1241 1246 del self.__dict__['_tagscache']
1242 1247
1243 1248 self.unfiltered()._branchcaches.clear()
1244 1249 self.invalidatevolatilesets()
1245 1250
1246 1251 def invalidatevolatilesets(self):
1247 1252 self.filteredrevcache.clear()
1248 1253 obsolete.clearobscaches(self)
1249 1254
1250 1255 def invalidatedirstate(self):
1251 1256 '''Invalidates the dirstate, causing the next call to dirstate
1252 1257 to check if it was modified since the last time it was read,
1253 1258 rereading it if it has.
1254 1259
1255 1260 This is different to dirstate.invalidate() that it doesn't always
1256 1261 rereads the dirstate. Use dirstate.invalidate() if you want to
1257 1262 explicitly read the dirstate again (i.e. restoring it to a previous
1258 1263 known good state).'''
1259 1264 if hasunfilteredcache(self, 'dirstate'):
1260 1265 for k in self.dirstate._filecache:
1261 1266 try:
1262 1267 delattr(self.dirstate, k)
1263 1268 except AttributeError:
1264 1269 pass
1265 1270 delattr(self.unfiltered(), 'dirstate')
1266 1271
1267 1272 def invalidate(self, clearfilecache=False):
1268 1273 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1269 1274 for k in self._filecache.keys():
1270 1275 # dirstate is invalidated separately in invalidatedirstate()
1271 1276 if k == 'dirstate':
1272 1277 continue
1273 1278
1274 1279 if clearfilecache:
1275 1280 del self._filecache[k]
1276 1281 try:
1277 1282 delattr(unfiltered, k)
1278 1283 except AttributeError:
1279 1284 pass
1280 1285 self.invalidatecaches()
1281 1286 self.store.invalidatecaches()
1282 1287
1283 1288 def invalidateall(self):
1284 1289 '''Fully invalidates both store and non-store parts, causing the
1285 1290 subsequent operation to reread any outside changes.'''
1286 1291 # extension should hook this to invalidate its caches
1287 1292 self.invalidate()
1288 1293 self.invalidatedirstate()
1289 1294
1290 1295 def _refreshfilecachestats(self, tr):
1291 1296 """Reload stats of cached files so that they are flagged as valid"""
1292 1297 for k, ce in self._filecache.items():
1293 1298 if k == 'dirstate' or k not in self.__dict__:
1294 1299 continue
1295 1300 ce.refresh()
1296 1301
1297 1302 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1298 1303 inheritchecker=None, parentenvvar=None):
1299 1304 parentlock = None
1300 1305 # the contents of parentenvvar are used by the underlying lock to
1301 1306 # determine whether it can be inherited
1302 1307 if parentenvvar is not None:
1303 1308 parentlock = os.environ.get(parentenvvar)
1304 1309 try:
1305 1310 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1306 1311 acquirefn=acquirefn, desc=desc,
1307 1312 inheritchecker=inheritchecker,
1308 1313 parentlock=parentlock)
1309 1314 except error.LockHeld as inst:
1310 1315 if not wait:
1311 1316 raise
1312 1317 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1313 1318 (desc, inst.locker))
1314 1319 # default to 600 seconds timeout
1315 1320 l = lockmod.lock(vfs, lockname,
1316 1321 int(self.ui.config("ui", "timeout", "600")),
1317 1322 releasefn=releasefn, acquirefn=acquirefn,
1318 1323 desc=desc)
1319 1324 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1320 1325 return l
1321 1326
1322 1327 def _afterlock(self, callback):
1323 1328 """add a callback to be run when the repository is fully unlocked
1324 1329
1325 1330 The callback will be executed when the outermost lock is released
1326 1331 (with wlock being higher level than 'lock')."""
1327 1332 for ref in (self._wlockref, self._lockref):
1328 1333 l = ref and ref()
1329 1334 if l and l.held:
1330 1335 l.postrelease.append(callback)
1331 1336 break
1332 1337 else: # no lock have been found.
1333 1338 callback()
1334 1339
1335 1340 def lock(self, wait=True):
1336 1341 '''Lock the repository store (.hg/store) and return a weak reference
1337 1342 to the lock. Use this before modifying the store (e.g. committing or
1338 1343 stripping). If you are opening a transaction, get a lock as well.)
1339 1344
1340 1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1341 1346 'wlock' first to avoid a dead-lock hazard.'''
1342 1347 l = self._lockref and self._lockref()
1343 1348 if l is not None and l.held:
1344 1349 l.lock()
1345 1350 return l
1346 1351
1347 1352 l = self._lock(self.svfs, "lock", wait, None,
1348 1353 self.invalidate, _('repository %s') % self.origroot)
1349 1354 self._lockref = weakref.ref(l)
1350 1355 return l
1351 1356
1352 1357 def _wlockchecktransaction(self):
1353 1358 if self.currenttransaction() is not None:
1354 1359 raise error.LockInheritanceContractViolation(
1355 1360 'wlock cannot be inherited in the middle of a transaction')
1356 1361
1357 1362 def wlock(self, wait=True):
1358 1363 '''Lock the non-store parts of the repository (everything under
1359 1364 .hg except .hg/store) and return a weak reference to the lock.
1360 1365
1361 1366 Use this before modifying files in .hg.
1362 1367
1363 1368 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1364 1369 'wlock' first to avoid a dead-lock hazard.'''
1365 1370 l = self._wlockref and self._wlockref()
1366 1371 if l is not None and l.held:
1367 1372 l.lock()
1368 1373 return l
1369 1374
1370 1375 # We do not need to check for non-waiting lock acquisition. Such
1371 1376 # acquisition would not cause dead-lock as they would just fail.
1372 1377 if wait and (self.ui.configbool('devel', 'all-warnings')
1373 1378 or self.ui.configbool('devel', 'check-locks')):
1374 1379 l = self._lockref and self._lockref()
1375 1380 if l is not None and l.held:
1376 1381 self.ui.develwarn('"wlock" acquired after "lock"')
1377 1382
1378 1383 def unlock():
1379 1384 if self.dirstate.pendingparentchange():
1380 1385 self.dirstate.invalidate()
1381 1386 else:
1382 1387 self.dirstate.write(None)
1383 1388
1384 1389 self._filecache['dirstate'].refresh()
1385 1390
1386 1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1387 1392 self.invalidatedirstate, _('working directory of %s') %
1388 1393 self.origroot,
1389 1394 inheritchecker=self._wlockchecktransaction,
1390 1395 parentenvvar='HG_WLOCK_LOCKER')
1391 1396 self._wlockref = weakref.ref(l)
1392 1397 return l
1393 1398
1394 1399 def _currentlock(self, lockref):
1395 1400 """Returns the lock if it's held, or None if it's not."""
1396 1401 if lockref is None:
1397 1402 return None
1398 1403 l = lockref()
1399 1404 if l is None or not l.held:
1400 1405 return None
1401 1406 return l
1402 1407
1403 1408 def currentwlock(self):
1404 1409 """Returns the wlock if it's held, or None if it's not."""
1405 1410 return self._currentlock(self._wlockref)
1406 1411
1407 1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1408 1413 """
1409 1414 commit an individual file as part of a larger transaction
1410 1415 """
1411 1416
1412 1417 fname = fctx.path()
1413 1418 fparent1 = manifest1.get(fname, nullid)
1414 1419 fparent2 = manifest2.get(fname, nullid)
1415 1420 if isinstance(fctx, context.filectx):
1416 1421 node = fctx.filenode()
1417 1422 if node in [fparent1, fparent2]:
1418 1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1419 1424 return node
1420 1425
1421 1426 flog = self.file(fname)
1422 1427 meta = {}
1423 1428 copy = fctx.renamed()
1424 1429 if copy and copy[0] != fname:
1425 1430 # Mark the new revision of this file as a copy of another
1426 1431 # file. This copy data will effectively act as a parent
1427 1432 # of this new revision. If this is a merge, the first
1428 1433 # parent will be the nullid (meaning "look up the copy data")
1429 1434 # and the second one will be the other parent. For example:
1430 1435 #
1431 1436 # 0 --- 1 --- 3 rev1 changes file foo
1432 1437 # \ / rev2 renames foo to bar and changes it
1433 1438 # \- 2 -/ rev3 should have bar with all changes and
1434 1439 # should record that bar descends from
1435 1440 # bar in rev2 and foo in rev1
1436 1441 #
1437 1442 # this allows this merge to succeed:
1438 1443 #
1439 1444 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1440 1445 # \ / merging rev3 and rev4 should use bar@rev2
1441 1446 # \- 2 --- 4 as the merge base
1442 1447 #
1443 1448
1444 1449 cfname = copy[0]
1445 1450 crev = manifest1.get(cfname)
1446 1451 newfparent = fparent2
1447 1452
1448 1453 if manifest2: # branch merge
1449 1454 if fparent2 == nullid or crev is None: # copied on remote side
1450 1455 if cfname in manifest2:
1451 1456 crev = manifest2[cfname]
1452 1457 newfparent = fparent1
1453 1458
1454 1459 # Here, we used to search backwards through history to try to find
1455 1460 # where the file copy came from if the source of a copy was not in
1456 1461 # the parent directory. However, this doesn't actually make sense to
1457 1462 # do (what does a copy from something not in your working copy even
1458 1463 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1459 1464 # the user that copy information was dropped, so if they didn't
1460 1465 # expect this outcome it can be fixed, but this is the correct
1461 1466 # behavior in this circumstance.
1462 1467
1463 1468 if crev:
1464 1469 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1465 1470 meta["copy"] = cfname
1466 1471 meta["copyrev"] = hex(crev)
1467 1472 fparent1, fparent2 = nullid, newfparent
1468 1473 else:
1469 1474 self.ui.warn(_("warning: can't find ancestor for '%s' "
1470 1475 "copied from '%s'!\n") % (fname, cfname))
1471 1476
1472 1477 elif fparent1 == nullid:
1473 1478 fparent1, fparent2 = fparent2, nullid
1474 1479 elif fparent2 != nullid:
1475 1480 # is one parent an ancestor of the other?
1476 1481 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1477 1482 if fparent1 in fparentancestors:
1478 1483 fparent1, fparent2 = fparent2, nullid
1479 1484 elif fparent2 in fparentancestors:
1480 1485 fparent2 = nullid
1481 1486
1482 1487 # is the file changed?
1483 1488 text = fctx.data()
1484 1489 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1485 1490 changelist.append(fname)
1486 1491 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1487 1492 # are just the flags changed during merge?
1488 1493 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1489 1494 changelist.append(fname)
1490 1495
1491 1496 return fparent1
1492 1497
1493 1498 @unfilteredmethod
1494 1499 def commit(self, text="", user=None, date=None, match=None, force=False,
1495 1500 editor=False, extra=None):
1496 1501 """Add a new revision to current repository.
1497 1502
1498 1503 Revision information is gathered from the working directory,
1499 1504 match can be used to filter the committed files. If editor is
1500 1505 supplied, it is called to get a commit message.
1501 1506 """
1502 1507 if extra is None:
1503 1508 extra = {}
1504 1509
1505 1510 def fail(f, msg):
1506 1511 raise error.Abort('%s: %s' % (f, msg))
1507 1512
1508 1513 if not match:
1509 1514 match = matchmod.always(self.root, '')
1510 1515
1511 1516 if not force:
1512 1517 vdirs = []
1513 1518 match.explicitdir = vdirs.append
1514 1519 match.bad = fail
1515 1520
1516 1521 wlock = lock = tr = None
1517 1522 try:
1518 1523 wlock = self.wlock()
1519 1524 lock = self.lock() # for recent changelog (see issue4368)
1520 1525
1521 1526 wctx = self[None]
1522 1527 merge = len(wctx.parents()) > 1
1523 1528
1524 1529 if not force and merge and match.ispartial():
1525 1530 raise error.Abort(_('cannot partially commit a merge '
1526 1531 '(do not specify files or patterns)'))
1527 1532
1528 1533 status = self.status(match=match, clean=force)
1529 1534 if force:
1530 1535 status.modified.extend(status.clean) # mq may commit clean files
1531 1536
1532 1537 # check subrepos
1533 1538 subs = []
1534 1539 commitsubs = set()
1535 1540 newstate = wctx.substate.copy()
1536 1541 # only manage subrepos and .hgsubstate if .hgsub is present
1537 1542 if '.hgsub' in wctx:
1538 1543 # we'll decide whether to track this ourselves, thanks
1539 1544 for c in status.modified, status.added, status.removed:
1540 1545 if '.hgsubstate' in c:
1541 1546 c.remove('.hgsubstate')
1542 1547
1543 1548 # compare current state to last committed state
1544 1549 # build new substate based on last committed state
1545 1550 oldstate = wctx.p1().substate
1546 1551 for s in sorted(newstate.keys()):
1547 1552 if not match(s):
1548 1553 # ignore working copy, use old state if present
1549 1554 if s in oldstate:
1550 1555 newstate[s] = oldstate[s]
1551 1556 continue
1552 1557 if not force:
1553 1558 raise error.Abort(
1554 1559 _("commit with new subrepo %s excluded") % s)
1555 1560 dirtyreason = wctx.sub(s).dirtyreason(True)
1556 1561 if dirtyreason:
1557 1562 if not self.ui.configbool('ui', 'commitsubrepos'):
1558 1563 raise error.Abort(dirtyreason,
1559 1564 hint=_("use --subrepos for recursive commit"))
1560 1565 subs.append(s)
1561 1566 commitsubs.add(s)
1562 1567 else:
1563 1568 bs = wctx.sub(s).basestate()
1564 1569 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1565 1570 if oldstate.get(s, (None, None, None))[1] != bs:
1566 1571 subs.append(s)
1567 1572
1568 1573 # check for removed subrepos
1569 1574 for p in wctx.parents():
1570 1575 r = [s for s in p.substate if s not in newstate]
1571 1576 subs += [s for s in r if match(s)]
1572 1577 if subs:
1573 1578 if (not match('.hgsub') and
1574 1579 '.hgsub' in (wctx.modified() + wctx.added())):
1575 1580 raise error.Abort(
1576 1581 _("can't commit subrepos without .hgsub"))
1577 1582 status.modified.insert(0, '.hgsubstate')
1578 1583
1579 1584 elif '.hgsub' in status.removed:
1580 1585 # clean up .hgsubstate when .hgsub is removed
1581 1586 if ('.hgsubstate' in wctx and
1582 1587 '.hgsubstate' not in (status.modified + status.added +
1583 1588 status.removed)):
1584 1589 status.removed.insert(0, '.hgsubstate')
1585 1590
1586 1591 # make sure all explicit patterns are matched
1587 1592 if not force and (match.isexact() or match.prefix()):
1588 1593 matched = set(status.modified + status.added + status.removed)
1589 1594
1590 1595 for f in match.files():
1591 1596 f = self.dirstate.normalize(f)
1592 1597 if f == '.' or f in matched or f in wctx.substate:
1593 1598 continue
1594 1599 if f in status.deleted:
1595 1600 fail(f, _('file not found!'))
1596 1601 if f in vdirs: # visited directory
1597 1602 d = f + '/'
1598 1603 for mf in matched:
1599 1604 if mf.startswith(d):
1600 1605 break
1601 1606 else:
1602 1607 fail(f, _("no match under directory!"))
1603 1608 elif f not in self.dirstate:
1604 1609 fail(f, _("file not tracked!"))
1605 1610
1606 1611 cctx = context.workingcommitctx(self, status,
1607 1612 text, user, date, extra)
1608 1613
1609 1614 # internal config: ui.allowemptycommit
1610 1615 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1611 1616 or extra.get('close') or merge or cctx.files()
1612 1617 or self.ui.configbool('ui', 'allowemptycommit'))
1613 1618 if not allowemptycommit:
1614 1619 return None
1615 1620
1616 1621 if merge and cctx.deleted():
1617 1622 raise error.Abort(_("cannot commit merge with missing files"))
1618 1623
1619 1624 ms = mergemod.mergestate.read(self)
1620 1625
1621 1626 if list(ms.unresolved()):
1622 1627 raise error.Abort(_('unresolved merge conflicts '
1623 1628 '(see "hg help resolve")'))
1624 1629 if ms.mdstate() != 's' or list(ms.driverresolved()):
1625 1630 raise error.Abort(_('driver-resolved merge conflicts'),
1626 1631 hint=_('run "hg resolve --all" to resolve'))
1627 1632
1628 1633 if editor:
1629 1634 cctx._text = editor(self, cctx, subs)
1630 1635 edited = (text != cctx._text)
1631 1636
1632 1637 # Save commit message in case this transaction gets rolled back
1633 1638 # (e.g. by a pretxncommit hook). Leave the content alone on
1634 1639 # the assumption that the user will use the same editor again.
1635 1640 msgfn = self.savecommitmessage(cctx._text)
1636 1641
1637 1642 # commit subs and write new state
1638 1643 if subs:
1639 1644 for s in sorted(commitsubs):
1640 1645 sub = wctx.sub(s)
1641 1646 self.ui.status(_('committing subrepository %s\n') %
1642 1647 subrepo.subrelpath(sub))
1643 1648 sr = sub.commit(cctx._text, user, date)
1644 1649 newstate[s] = (newstate[s][0], sr)
1645 1650 subrepo.writestate(self, newstate)
1646 1651
1647 1652 p1, p2 = self.dirstate.parents()
1648 1653 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1649 1654 try:
1650 1655 self.hook("precommit", throw=True, parent1=hookp1,
1651 1656 parent2=hookp2)
1652 1657 tr = self.transaction('commit')
1653 1658 ret = self.commitctx(cctx, True)
1654 1659 except: # re-raises
1655 1660 if edited:
1656 1661 self.ui.write(
1657 1662 _('note: commit message saved in %s\n') % msgfn)
1658 1663 raise
1659 1664 # update bookmarks, dirstate and mergestate
1660 1665 bookmarks.update(self, [p1, p2], ret)
1661 1666 cctx.markcommitted(ret)
1662 1667 ms.reset()
1663 1668 tr.close()
1664 1669
1665 1670 finally:
1666 1671 lockmod.release(tr, lock, wlock)
1667 1672
1668 1673 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1669 1674 # hack for command that use a temporary commit (eg: histedit)
1670 1675 # temporary commit got stripped before hook release
1671 1676 if self.changelog.hasnode(ret):
1672 1677 self.hook("commit", node=node, parent1=parent1,
1673 1678 parent2=parent2)
1674 1679 self._afterlock(commithook)
1675 1680 return ret
1676 1681
1677 1682 @unfilteredmethod
1678 1683 def commitctx(self, ctx, error=False):
1679 1684 """Add a new revision to current repository.
1680 1685 Revision information is passed via the context argument.
1681 1686 """
1682 1687
1683 1688 tr = None
1684 1689 p1, p2 = ctx.p1(), ctx.p2()
1685 1690 user = ctx.user()
1686 1691
1687 1692 lock = self.lock()
1688 1693 try:
1689 1694 tr = self.transaction("commit")
1690 1695 trp = weakref.proxy(tr)
1691 1696
1692 1697 if ctx.files():
1693 1698 m1 = p1.manifest()
1694 1699 m2 = p2.manifest()
1695 1700 m = m1.copy()
1696 1701
1697 1702 # check in files
1698 1703 added = []
1699 1704 changed = []
1700 1705 removed = list(ctx.removed())
1701 1706 linkrev = len(self)
1702 1707 self.ui.note(_("committing files:\n"))
1703 1708 for f in sorted(ctx.modified() + ctx.added()):
1704 1709 self.ui.note(f + "\n")
1705 1710 try:
1706 1711 fctx = ctx[f]
1707 1712 if fctx is None:
1708 1713 removed.append(f)
1709 1714 else:
1710 1715 added.append(f)
1711 1716 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1712 1717 trp, changed)
1713 1718 m.setflag(f, fctx.flags())
1714 1719 except OSError as inst:
1715 1720 self.ui.warn(_("trouble committing %s!\n") % f)
1716 1721 raise
1717 1722 except IOError as inst:
1718 1723 errcode = getattr(inst, 'errno', errno.ENOENT)
1719 1724 if error or errcode and errcode != errno.ENOENT:
1720 1725 self.ui.warn(_("trouble committing %s!\n") % f)
1721 1726 raise
1722 1727
1723 1728 # update manifest
1724 1729 self.ui.note(_("committing manifest\n"))
1725 1730 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1726 1731 drop = [f for f in removed if f in m]
1727 1732 for f in drop:
1728 1733 del m[f]
1729 1734 mn = self.manifest.add(m, trp, linkrev,
1730 1735 p1.manifestnode(), p2.manifestnode(),
1731 1736 added, drop)
1732 1737 files = changed + removed
1733 1738 else:
1734 1739 mn = p1.manifestnode()
1735 1740 files = []
1736 1741
1737 1742 # update changelog
1738 1743 self.ui.note(_("committing changelog\n"))
1739 1744 self.changelog.delayupdate(tr)
1740 1745 n = self.changelog.add(mn, files, ctx.description(),
1741 1746 trp, p1.node(), p2.node(),
1742 1747 user, ctx.date(), ctx.extra().copy())
1743 1748 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1744 1749 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1745 1750 parent2=xp2)
1746 1751 # set the new commit is proper phase
1747 1752 targetphase = subrepo.newcommitphase(self.ui, ctx)
1748 1753 if targetphase:
1749 1754 # retract boundary do not alter parent changeset.
1750 1755 # if a parent have higher the resulting phase will
1751 1756 # be compliant anyway
1752 1757 #
1753 1758 # if minimal phase was 0 we don't need to retract anything
1754 1759 phases.retractboundary(self, tr, targetphase, [n])
1755 1760 tr.close()
1756 1761 branchmap.updatecache(self.filtered('served'))
1757 1762 return n
1758 1763 finally:
1759 1764 if tr:
1760 1765 tr.release()
1761 1766 lock.release()
1762 1767
1763 1768 @unfilteredmethod
1764 1769 def destroying(self):
1765 1770 '''Inform the repository that nodes are about to be destroyed.
1766 1771 Intended for use by strip and rollback, so there's a common
1767 1772 place for anything that has to be done before destroying history.
1768 1773
1769 1774 This is mostly useful for saving state that is in memory and waiting
1770 1775 to be flushed when the current lock is released. Because a call to
1771 1776 destroyed is imminent, the repo will be invalidated causing those
1772 1777 changes to stay in memory (waiting for the next unlock), or vanish
1773 1778 completely.
1774 1779 '''
1775 1780 # When using the same lock to commit and strip, the phasecache is left
1776 1781 # dirty after committing. Then when we strip, the repo is invalidated,
1777 1782 # causing those changes to disappear.
1778 1783 if '_phasecache' in vars(self):
1779 1784 self._phasecache.write()
1780 1785
1781 1786 @unfilteredmethod
1782 1787 def destroyed(self):
1783 1788 '''Inform the repository that nodes have been destroyed.
1784 1789 Intended for use by strip and rollback, so there's a common
1785 1790 place for anything that has to be done after destroying history.
1786 1791 '''
1787 1792 # When one tries to:
1788 1793 # 1) destroy nodes thus calling this method (e.g. strip)
1789 1794 # 2) use phasecache somewhere (e.g. commit)
1790 1795 #
1791 1796 # then 2) will fail because the phasecache contains nodes that were
1792 1797 # removed. We can either remove phasecache from the filecache,
1793 1798 # causing it to reload next time it is accessed, or simply filter
1794 1799 # the removed nodes now and write the updated cache.
1795 1800 self._phasecache.filterunknown(self)
1796 1801 self._phasecache.write()
1797 1802
1798 1803 # update the 'served' branch cache to help read only server process
1799 1804 # Thanks to branchcache collaboration this is done from the nearest
1800 1805 # filtered subset and it is expected to be fast.
1801 1806 branchmap.updatecache(self.filtered('served'))
1802 1807
1803 1808 # Ensure the persistent tag cache is updated. Doing it now
1804 1809 # means that the tag cache only has to worry about destroyed
1805 1810 # heads immediately after a strip/rollback. That in turn
1806 1811 # guarantees that "cachetip == currenttip" (comparing both rev
1807 1812 # and node) always means no nodes have been added or destroyed.
1808 1813
1809 1814 # XXX this is suboptimal when qrefresh'ing: we strip the current
1810 1815 # head, refresh the tag cache, then immediately add a new head.
1811 1816 # But I think doing it this way is necessary for the "instant
1812 1817 # tag cache retrieval" case to work.
1813 1818 self.invalidate()
1814 1819
1815 1820 def walk(self, match, node=None):
1816 1821 '''
1817 1822 walk recursively through the directory tree or a given
1818 1823 changeset, finding all files matched by the match
1819 1824 function
1820 1825 '''
1821 1826 return self[node].walk(match)
1822 1827
1823 1828 def status(self, node1='.', node2=None, match=None,
1824 1829 ignored=False, clean=False, unknown=False,
1825 1830 listsubrepos=False):
1826 1831 '''a convenience method that calls node1.status(node2)'''
1827 1832 return self[node1].status(node2, match, ignored, clean, unknown,
1828 1833 listsubrepos)
1829 1834
1830 1835 def heads(self, start=None):
1831 1836 heads = self.changelog.heads(start)
1832 1837 # sort the output in rev descending order
1833 1838 return sorted(heads, key=self.changelog.rev, reverse=True)
1834 1839
1835 1840 def branchheads(self, branch=None, start=None, closed=False):
1836 1841 '''return a (possibly filtered) list of heads for the given branch
1837 1842
1838 1843 Heads are returned in topological order, from newest to oldest.
1839 1844 If branch is None, use the dirstate branch.
1840 1845 If start is not None, return only heads reachable from start.
1841 1846 If closed is True, return heads that are marked as closed as well.
1842 1847 '''
1843 1848 if branch is None:
1844 1849 branch = self[None].branch()
1845 1850 branches = self.branchmap()
1846 1851 if branch not in branches:
1847 1852 return []
1848 1853 # the cache returns heads ordered lowest to highest
1849 1854 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1850 1855 if start is not None:
1851 1856 # filter out the heads that cannot be reached from startrev
1852 1857 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1853 1858 bheads = [h for h in bheads if h in fbheads]
1854 1859 return bheads
1855 1860
1856 1861 def branches(self, nodes):
1857 1862 if not nodes:
1858 1863 nodes = [self.changelog.tip()]
1859 1864 b = []
1860 1865 for n in nodes:
1861 1866 t = n
1862 1867 while True:
1863 1868 p = self.changelog.parents(n)
1864 1869 if p[1] != nullid or p[0] == nullid:
1865 1870 b.append((t, n, p[0], p[1]))
1866 1871 break
1867 1872 n = p[0]
1868 1873 return b
1869 1874
1870 1875 def between(self, pairs):
1871 1876 r = []
1872 1877
1873 1878 for top, bottom in pairs:
1874 1879 n, l, i = top, [], 0
1875 1880 f = 1
1876 1881
1877 1882 while n != bottom and n != nullid:
1878 1883 p = self.changelog.parents(n)[0]
1879 1884 if i == f:
1880 1885 l.append(n)
1881 1886 f = f * 2
1882 1887 n = p
1883 1888 i += 1
1884 1889
1885 1890 r.append(l)
1886 1891
1887 1892 return r
1888 1893
1889 1894 def checkpush(self, pushop):
1890 1895 """Extensions can override this function if additional checks have
1891 1896 to be performed before pushing, or call it if they override push
1892 1897 command.
1893 1898 """
1894 1899 pass
1895 1900
1896 1901 @unfilteredpropertycache
1897 1902 def prepushoutgoinghooks(self):
1898 1903 """Return util.hooks consists of "(repo, remote, outgoing)"
1899 1904 functions, which are called before pushing changesets.
1900 1905 """
1901 1906 return util.hooks()
1902 1907
1903 1908 def pushkey(self, namespace, key, old, new):
1904 1909 try:
1905 1910 tr = self.currenttransaction()
1906 1911 hookargs = {}
1907 1912 if tr is not None:
1908 1913 hookargs.update(tr.hookargs)
1909 1914 hookargs['namespace'] = namespace
1910 1915 hookargs['key'] = key
1911 1916 hookargs['old'] = old
1912 1917 hookargs['new'] = new
1913 1918 self.hook('prepushkey', throw=True, **hookargs)
1914 1919 except error.HookAbort as exc:
1915 1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1916 1921 if exc.hint:
1917 1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1918 1923 return False
1919 1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1920 1925 ret = pushkey.push(self, namespace, key, old, new)
1921 1926 def runhook():
1922 1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1923 1928 ret=ret)
1924 1929 self._afterlock(runhook)
1925 1930 return ret
1926 1931
1927 1932 def listkeys(self, namespace):
1928 1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1929 1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1930 1935 values = pushkey.list(self, namespace)
1931 1936 self.hook('listkeys', namespace=namespace, values=values)
1932 1937 return values
1933 1938
1934 1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1935 1940 '''used to test argument passing over the wire'''
1936 1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1937 1942
1938 1943 def savecommitmessage(self, text):
1939 1944 fp = self.vfs('last-message.txt', 'wb')
1940 1945 try:
1941 1946 fp.write(text)
1942 1947 finally:
1943 1948 fp.close()
1944 1949 return self.pathto(fp.name[len(self.root) + 1:])
1945 1950
1946 1951 # used to avoid circular references so destructors work
1947 1952 def aftertrans(files):
1948 1953 renamefiles = [tuple(t) for t in files]
1949 1954 def a():
1950 1955 for vfs, src, dest in renamefiles:
1951 1956 try:
1952 1957 vfs.rename(src, dest)
1953 1958 except OSError: # journal file does not yet exist
1954 1959 pass
1955 1960 return a
1956 1961
1957 1962 def undoname(fn):
1958 1963 base, name = os.path.split(fn)
1959 1964 assert name.startswith('journal')
1960 1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1961 1966
1962 1967 def instance(ui, path, create):
1963 1968 return localrepository(ui, util.urllocalpath(path), create)
1964 1969
1965 1970 def islocal(path):
1966 1971 return True
General Comments 0
You need to be logged in to leave comments. Login now