##// END OF EJS Templates
localrepo: don't reference transaction from hook closure (issue5043)...
Gregory Szorc -
r27907:e219dbfd default
parent child Browse files
Show More
@@ -1,1958 +1,1964
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import urllib
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 propertycache = util.propertycache
62 62 filecache = scmutil.filecache
63 63
64 64 class repofilecache(filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 70 def __set__(self, repo, value):
71 71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 72 def __delete__(self, repo):
73 73 return super(repofilecache, self).__delete__(repo.unfiltered())
74 74
75 75 class storecache(repofilecache):
76 76 """filecache for files in the store"""
77 77 def join(self, obj, fname):
78 78 return obj.sjoin(fname)
79 79
80 80 class unfilteredpropertycache(propertycache):
81 81 """propertycache that apply to unfiltered repo only"""
82 82
83 83 def __get__(self, repo, type=None):
84 84 unfi = repo.unfiltered()
85 85 if unfi is repo:
86 86 return super(unfilteredpropertycache, self).__get__(unfi)
87 87 return getattr(unfi, self.name)
88 88
89 89 class filteredpropertycache(propertycache):
90 90 """propertycache that must take filtering in account"""
91 91
92 92 def cachevalue(self, obj, value):
93 93 object.__setattr__(obj, self.name, value)
94 94
95 95
96 96 def hasunfilteredcache(repo, name):
97 97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 98 return name in vars(repo.unfiltered())
99 99
100 100 def unfilteredmethod(orig):
101 101 """decorate method that always need to be run on unfiltered version"""
102 102 def wrapper(repo, *args, **kwargs):
103 103 return orig(repo.unfiltered(), *args, **kwargs)
104 104 return wrapper
105 105
106 106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 107 'unbundle'))
108 108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 109
110 110 class localpeer(peer.peerrepository):
111 111 '''peer for a local repo; reflects only the most recent API'''
112 112
113 113 def __init__(self, repo, caps=moderncaps):
114 114 peer.peerrepository.__init__(self)
115 115 self._repo = repo.filtered('served')
116 116 self.ui = repo.ui
117 117 self._caps = repo._restrictcapabilities(caps)
118 118 self.requirements = repo.requirements
119 119 self.supportedformats = repo.supportedformats
120 120
121 121 def close(self):
122 122 self._repo.close()
123 123
124 124 def _capabilities(self):
125 125 return self._caps
126 126
127 127 def local(self):
128 128 return self._repo
129 129
130 130 def canpush(self):
131 131 return True
132 132
133 133 def url(self):
134 134 return self._repo.url()
135 135
136 136 def lookup(self, key):
137 137 return self._repo.lookup(key)
138 138
139 139 def branchmap(self):
140 140 return self._repo.branchmap()
141 141
142 142 def heads(self):
143 143 return self._repo.heads()
144 144
145 145 def known(self, nodes):
146 146 return self._repo.known(nodes)
147 147
148 148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 149 **kwargs):
150 150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 151 common=common, bundlecaps=bundlecaps, **kwargs)
152 152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 153 # When requesting a bundle2, getbundle returns a stream to make the
154 154 # wire level function happier. We need to build a proper object
155 155 # from it in local peer.
156 156 cg = bundle2.getunbundler(self.ui, cg)
157 157 return cg
158 158
159 159 # TODO We might want to move the next two calls into legacypeer and add
160 160 # unbundle instead.
161 161
162 162 def unbundle(self, cg, heads, url):
163 163 """apply a bundle on a repo
164 164
165 165 This function handles the repo locking itself."""
166 166 try:
167 167 try:
168 168 cg = exchange.readbundle(self.ui, cg, None)
169 169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 170 if util.safehasattr(ret, 'getchunks'):
171 171 # This is a bundle20 object, turn it into an unbundler.
172 172 # This little dance should be dropped eventually when the
173 173 # API is finally improved.
174 174 stream = util.chunkbuffer(ret.getchunks())
175 175 ret = bundle2.getunbundler(self.ui, stream)
176 176 return ret
177 177 except Exception as exc:
178 178 # If the exception contains output salvaged from a bundle2
179 179 # reply, we need to make sure it is printed before continuing
180 180 # to fail. So we build a bundle2 with such output and consume
181 181 # it directly.
182 182 #
183 183 # This is not very elegant but allows a "simple" solution for
184 184 # issue4594
185 185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 186 if output:
187 187 bundler = bundle2.bundle20(self._repo.ui)
188 188 for out in output:
189 189 bundler.addpart(out)
190 190 stream = util.chunkbuffer(bundler.getchunks())
191 191 b = bundle2.getunbundler(self.ui, stream)
192 192 bundle2.processbundle(self._repo, b)
193 193 raise
194 194 except error.PushRaced as exc:
195 195 raise error.ResponseError(_('push failed:'), str(exc))
196 196
197 197 def lock(self):
198 198 return self._repo.lock()
199 199
200 200 def addchangegroup(self, cg, source, url):
201 201 return cg.apply(self._repo, source, url)
202 202
203 203 def pushkey(self, namespace, key, old, new):
204 204 return self._repo.pushkey(namespace, key, old, new)
205 205
206 206 def listkeys(self, namespace):
207 207 return self._repo.listkeys(namespace)
208 208
209 209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 210 '''used to test argument passing over the wire'''
211 211 return "%s %s %s %s %s" % (one, two, three, four, five)
212 212
213 213 class locallegacypeer(localpeer):
214 214 '''peer extension which implements legacy methods too; used for tests with
215 215 restricted capabilities'''
216 216
217 217 def __init__(self, repo):
218 218 localpeer.__init__(self, repo, caps=legacycaps)
219 219
220 220 def branches(self, nodes):
221 221 return self._repo.branches(nodes)
222 222
223 223 def between(self, pairs):
224 224 return self._repo.between(pairs)
225 225
226 226 def changegroup(self, basenodes, source):
227 227 return changegroup.changegroup(self._repo, basenodes, source)
228 228
229 229 def changegroupsubset(self, bases, heads, source):
230 230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 231
232 232 class localrepository(object):
233 233
234 234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 235 'manifestv2'))
236 236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 237 'dotencode'))
238 238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 239 filtername = None
240 240
241 241 # a list of (ui, featureset) functions.
242 242 # only functions defined in module of enabled extensions are invoked
243 243 featuresetupfuncs = set()
244 244
245 245 def _baserequirements(self, create):
246 246 return ['revlogv1']
247 247
248 248 def __init__(self, baseui, path=None, create=False):
249 249 self.requirements = set()
250 250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 251 self.wopener = self.wvfs
252 252 self.root = self.wvfs.base
253 253 self.path = self.wvfs.join(".hg")
254 254 self.origroot = path
255 255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 257 realfs=False)
258 258 self.vfs = scmutil.vfs(self.path)
259 259 self.opener = self.vfs
260 260 self.baseui = baseui
261 261 self.ui = baseui.copy()
262 262 self.ui.copy = baseui.copy # prevent copying repo configuration
263 263 # A list of callback to shape the phase if no data were found.
264 264 # Callback are in the form: func(repo, roots) --> processed root.
265 265 # This list it to be filled by extension during repo setup
266 266 self._phasedefaults = []
267 267 try:
268 268 self.ui.readconfig(self.join("hgrc"), self.root)
269 269 extensions.loadall(self.ui)
270 270 except IOError:
271 271 pass
272 272
273 273 if self.featuresetupfuncs:
274 274 self.supported = set(self._basesupported) # use private copy
275 275 extmods = set(m.__name__ for n, m
276 276 in extensions.extensions(self.ui))
277 277 for setupfunc in self.featuresetupfuncs:
278 278 if setupfunc.__module__ in extmods:
279 279 setupfunc(self.ui, self.supported)
280 280 else:
281 281 self.supported = self._basesupported
282 282
283 283 if not self.vfs.isdir():
284 284 if create:
285 285 if not self.wvfs.exists():
286 286 self.wvfs.makedirs()
287 287 self.vfs.makedir(notindexed=True)
288 288 self.requirements.update(self._baserequirements(create))
289 289 if self.ui.configbool('format', 'usestore', True):
290 290 self.vfs.mkdir("store")
291 291 self.requirements.add("store")
292 292 if self.ui.configbool('format', 'usefncache', True):
293 293 self.requirements.add("fncache")
294 294 if self.ui.configbool('format', 'dotencode', True):
295 295 self.requirements.add('dotencode')
296 296 # create an invalid changelog
297 297 self.vfs.append(
298 298 "00changelog.i",
299 299 '\0\0\0\2' # represents revlogv2
300 300 ' dummy changelog to prevent using the old repo layout'
301 301 )
302 302 if scmutil.gdinitconfig(self.ui):
303 303 self.requirements.add("generaldelta")
304 304 if self.ui.configbool('experimental', 'treemanifest', False):
305 305 self.requirements.add("treemanifest")
306 306 if self.ui.configbool('experimental', 'manifestv2', False):
307 307 self.requirements.add("manifestv2")
308 308 else:
309 309 raise error.RepoError(_("repository %s not found") % path)
310 310 elif create:
311 311 raise error.RepoError(_("repository %s already exists") % path)
312 312 else:
313 313 try:
314 314 self.requirements = scmutil.readrequires(
315 315 self.vfs, self.supported)
316 316 except IOError as inst:
317 317 if inst.errno != errno.ENOENT:
318 318 raise
319 319
320 320 self.sharedpath = self.path
321 321 try:
322 322 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
323 323 realpath=True)
324 324 s = vfs.base
325 325 if not vfs.exists():
326 326 raise error.RepoError(
327 327 _('.hg/sharedpath points to nonexistent directory %s') % s)
328 328 self.sharedpath = s
329 329 except IOError as inst:
330 330 if inst.errno != errno.ENOENT:
331 331 raise
332 332
333 333 self.store = store.store(
334 334 self.requirements, self.sharedpath, scmutil.vfs)
335 335 self.spath = self.store.path
336 336 self.svfs = self.store.vfs
337 337 self.sjoin = self.store.join
338 338 self.vfs.createmode = self.store.createmode
339 339 self._applyopenerreqs()
340 340 if create:
341 341 self._writerequirements()
342 342
343 343 self._dirstatevalidatewarned = False
344 344
345 345 self._branchcaches = {}
346 346 self._revbranchcache = None
347 347 self.filterpats = {}
348 348 self._datafilters = {}
349 349 self._transref = self._lockref = self._wlockref = None
350 350
351 351 # A cache for various files under .hg/ that tracks file changes,
352 352 # (used by the filecache decorator)
353 353 #
354 354 # Maps a property name to its util.filecacheentry
355 355 self._filecache = {}
356 356
357 357 # hold sets of revision to be filtered
358 358 # should be cleared when something might have changed the filter value:
359 359 # - new changesets,
360 360 # - phase change,
361 361 # - new obsolescence marker,
362 362 # - working directory parent change,
363 363 # - bookmark changes
364 364 self.filteredrevcache = {}
365 365
366 366 # generic mapping between names and nodes
367 367 self.names = namespaces.namespaces()
368 368
369 369 def close(self):
370 370 self._writecaches()
371 371
372 372 def _writecaches(self):
373 373 if self._revbranchcache:
374 374 self._revbranchcache.write()
375 375
376 376 def _restrictcapabilities(self, caps):
377 377 if self.ui.configbool('experimental', 'bundle2-advertise', True):
378 378 caps = set(caps)
379 379 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
380 380 caps.add('bundle2=' + urllib.quote(capsblob))
381 381 return caps
382 382
383 383 def _applyopenerreqs(self):
384 384 self.svfs.options = dict((r, 1) for r in self.requirements
385 385 if r in self.openerreqs)
386 386 # experimental config: format.chunkcachesize
387 387 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
388 388 if chunkcachesize is not None:
389 389 self.svfs.options['chunkcachesize'] = chunkcachesize
390 390 # experimental config: format.maxchainlen
391 391 maxchainlen = self.ui.configint('format', 'maxchainlen')
392 392 if maxchainlen is not None:
393 393 self.svfs.options['maxchainlen'] = maxchainlen
394 394 # experimental config: format.manifestcachesize
395 395 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
396 396 if manifestcachesize is not None:
397 397 self.svfs.options['manifestcachesize'] = manifestcachesize
398 398 # experimental config: format.aggressivemergedeltas
399 399 aggressivemergedeltas = self.ui.configbool('format',
400 400 'aggressivemergedeltas', False)
401 401 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
402 402 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
403 403
404 404 def _writerequirements(self):
405 405 scmutil.writerequires(self.vfs, self.requirements)
406 406
407 407 def _checknested(self, path):
408 408 """Determine if path is a legal nested repository."""
409 409 if not path.startswith(self.root):
410 410 return False
411 411 subpath = path[len(self.root) + 1:]
412 412 normsubpath = util.pconvert(subpath)
413 413
414 414 # XXX: Checking against the current working copy is wrong in
415 415 # the sense that it can reject things like
416 416 #
417 417 # $ hg cat -r 10 sub/x.txt
418 418 #
419 419 # if sub/ is no longer a subrepository in the working copy
420 420 # parent revision.
421 421 #
422 422 # However, it can of course also allow things that would have
423 423 # been rejected before, such as the above cat command if sub/
424 424 # is a subrepository now, but was a normal directory before.
425 425 # The old path auditor would have rejected by mistake since it
426 426 # panics when it sees sub/.hg/.
427 427 #
428 428 # All in all, checking against the working copy seems sensible
429 429 # since we want to prevent access to nested repositories on
430 430 # the filesystem *now*.
431 431 ctx = self[None]
432 432 parts = util.splitpath(subpath)
433 433 while parts:
434 434 prefix = '/'.join(parts)
435 435 if prefix in ctx.substate:
436 436 if prefix == normsubpath:
437 437 return True
438 438 else:
439 439 sub = ctx.sub(prefix)
440 440 return sub.checknested(subpath[len(prefix) + 1:])
441 441 else:
442 442 parts.pop()
443 443 return False
444 444
445 445 def peer(self):
446 446 return localpeer(self) # not cached to avoid reference cycle
447 447
448 448 def unfiltered(self):
449 449 """Return unfiltered version of the repository
450 450
451 451 Intended to be overwritten by filtered repo."""
452 452 return self
453 453
454 454 def filtered(self, name):
455 455 """Return a filtered version of a repository"""
456 456 # build a new class with the mixin and the current class
457 457 # (possibly subclass of the repo)
458 458 class proxycls(repoview.repoview, self.unfiltered().__class__):
459 459 pass
460 460 return proxycls(self, name)
461 461
462 462 @repofilecache('bookmarks', 'bookmarks.current')
463 463 def _bookmarks(self):
464 464 return bookmarks.bmstore(self)
465 465
466 466 @property
467 467 def _activebookmark(self):
468 468 return self._bookmarks.active
469 469
470 470 def bookmarkheads(self, bookmark):
471 471 name = bookmark.split('@', 1)[0]
472 472 heads = []
473 473 for mark, n in self._bookmarks.iteritems():
474 474 if mark.split('@', 1)[0] == name:
475 475 heads.append(n)
476 476 return heads
477 477
478 478 # _phaserevs and _phasesets depend on changelog. what we need is to
479 479 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
480 480 # can't be easily expressed in filecache mechanism.
481 481 @storecache('phaseroots', '00changelog.i')
482 482 def _phasecache(self):
483 483 return phases.phasecache(self, self._phasedefaults)
484 484
485 485 @storecache('obsstore')
486 486 def obsstore(self):
487 487 # read default format for new obsstore.
488 488 # developer config: format.obsstore-version
489 489 defaultformat = self.ui.configint('format', 'obsstore-version', None)
490 490 # rely on obsstore class default when possible.
491 491 kwargs = {}
492 492 if defaultformat is not None:
493 493 kwargs['defaultformat'] = defaultformat
494 494 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
495 495 store = obsolete.obsstore(self.svfs, readonly=readonly,
496 496 **kwargs)
497 497 if store and readonly:
498 498 self.ui.warn(
499 499 _('obsolete feature not enabled but %i markers found!\n')
500 500 % len(list(store)))
501 501 return store
502 502
503 503 @storecache('00changelog.i')
504 504 def changelog(self):
505 505 c = changelog.changelog(self.svfs)
506 506 if 'HG_PENDING' in os.environ:
507 507 p = os.environ['HG_PENDING']
508 508 if p.startswith(self.root):
509 509 c.readpending('00changelog.i.a')
510 510 return c
511 511
512 512 @storecache('00manifest.i')
513 513 def manifest(self):
514 514 return manifest.manifest(self.svfs)
515 515
516 516 def dirlog(self, dir):
517 517 return self.manifest.dirlog(dir)
518 518
519 519 @repofilecache('dirstate')
520 520 def dirstate(self):
521 521 return dirstate.dirstate(self.vfs, self.ui, self.root,
522 522 self._dirstatevalidate)
523 523
524 524 def _dirstatevalidate(self, node):
525 525 try:
526 526 self.changelog.rev(node)
527 527 return node
528 528 except error.LookupError:
529 529 if not self._dirstatevalidatewarned:
530 530 self._dirstatevalidatewarned = True
531 531 self.ui.warn(_("warning: ignoring unknown"
532 532 " working parent %s!\n") % short(node))
533 533 return nullid
534 534
535 535 def __getitem__(self, changeid):
536 536 if changeid is None or changeid == wdirrev:
537 537 return context.workingctx(self)
538 538 if isinstance(changeid, slice):
539 539 return [context.changectx(self, i)
540 540 for i in xrange(*changeid.indices(len(self)))
541 541 if i not in self.changelog.filteredrevs]
542 542 return context.changectx(self, changeid)
543 543
544 544 def __contains__(self, changeid):
545 545 try:
546 546 self[changeid]
547 547 return True
548 548 except error.RepoLookupError:
549 549 return False
550 550
551 551 def __nonzero__(self):
552 552 return True
553 553
554 554 def __len__(self):
555 555 return len(self.changelog)
556 556
557 557 def __iter__(self):
558 558 return iter(self.changelog)
559 559
560 560 def revs(self, expr, *args):
561 561 '''Find revisions matching a revset.
562 562
563 563 The revset is specified as a string ``expr`` that may contain
564 564 %-formatting to escape certain types. See ``revset.formatspec``.
565 565
566 566 Return a revset.abstractsmartset, which is a list-like interface
567 567 that contains integer revisions.
568 568 '''
569 569 expr = revset.formatspec(expr, *args)
570 570 m = revset.match(None, expr)
571 571 return m(self)
572 572
573 573 def set(self, expr, *args):
574 574 '''Find revisions matching a revset and emit changectx instances.
575 575
576 576 This is a convenience wrapper around ``revs()`` that iterates the
577 577 result and is a generator of changectx instances.
578 578 '''
579 579 for r in self.revs(expr, *args):
580 580 yield self[r]
581 581
582 582 def url(self):
583 583 return 'file:' + self.root
584 584
585 585 def hook(self, name, throw=False, **args):
586 586 """Call a hook, passing this repo instance.
587 587
588 588 This a convenience method to aid invoking hooks. Extensions likely
589 589 won't call this unless they have registered a custom hook or are
590 590 replacing code that is expected to call a hook.
591 591 """
592 592 return hook.hook(self.ui, self, name, throw, **args)
593 593
594 594 @unfilteredmethod
595 595 def _tag(self, names, node, message, local, user, date, extra=None,
596 596 editor=False):
597 597 if isinstance(names, str):
598 598 names = (names,)
599 599
600 600 branches = self.branchmap()
601 601 for name in names:
602 602 self.hook('pretag', throw=True, node=hex(node), tag=name,
603 603 local=local)
604 604 if name in branches:
605 605 self.ui.warn(_("warning: tag %s conflicts with existing"
606 606 " branch name\n") % name)
607 607
608 608 def writetags(fp, names, munge, prevtags):
609 609 fp.seek(0, 2)
610 610 if prevtags and prevtags[-1] != '\n':
611 611 fp.write('\n')
612 612 for name in names:
613 613 if munge:
614 614 m = munge(name)
615 615 else:
616 616 m = name
617 617
618 618 if (self._tagscache.tagtypes and
619 619 name in self._tagscache.tagtypes):
620 620 old = self.tags().get(name, nullid)
621 621 fp.write('%s %s\n' % (hex(old), m))
622 622 fp.write('%s %s\n' % (hex(node), m))
623 623 fp.close()
624 624
625 625 prevtags = ''
626 626 if local:
627 627 try:
628 628 fp = self.vfs('localtags', 'r+')
629 629 except IOError:
630 630 fp = self.vfs('localtags', 'a')
631 631 else:
632 632 prevtags = fp.read()
633 633
634 634 # local tags are stored in the current charset
635 635 writetags(fp, names, None, prevtags)
636 636 for name in names:
637 637 self.hook('tag', node=hex(node), tag=name, local=local)
638 638 return
639 639
640 640 try:
641 641 fp = self.wfile('.hgtags', 'rb+')
642 642 except IOError as e:
643 643 if e.errno != errno.ENOENT:
644 644 raise
645 645 fp = self.wfile('.hgtags', 'ab')
646 646 else:
647 647 prevtags = fp.read()
648 648
649 649 # committed tags are stored in UTF-8
650 650 writetags(fp, names, encoding.fromlocal, prevtags)
651 651
652 652 fp.close()
653 653
654 654 self.invalidatecaches()
655 655
656 656 if '.hgtags' not in self.dirstate:
657 657 self[None].add(['.hgtags'])
658 658
659 659 m = matchmod.exact(self.root, '', ['.hgtags'])
660 660 tagnode = self.commit(message, user, date, extra=extra, match=m,
661 661 editor=editor)
662 662
663 663 for name in names:
664 664 self.hook('tag', node=hex(node), tag=name, local=local)
665 665
666 666 return tagnode
667 667
668 668 def tag(self, names, node, message, local, user, date, editor=False):
669 669 '''tag a revision with one or more symbolic names.
670 670
671 671 names is a list of strings or, when adding a single tag, names may be a
672 672 string.
673 673
674 674 if local is True, the tags are stored in a per-repository file.
675 675 otherwise, they are stored in the .hgtags file, and a new
676 676 changeset is committed with the change.
677 677
678 678 keyword arguments:
679 679
680 680 local: whether to store tags in non-version-controlled file
681 681 (default False)
682 682
683 683 message: commit message to use if committing
684 684
685 685 user: name of user to use if committing
686 686
687 687 date: date tuple to use if committing'''
688 688
689 689 if not local:
690 690 m = matchmod.exact(self.root, '', ['.hgtags'])
691 691 if any(self.status(match=m, unknown=True, ignored=True)):
692 692 raise error.Abort(_('working copy of .hgtags is changed'),
693 693 hint=_('please commit .hgtags manually'))
694 694
695 695 self.tags() # instantiate the cache
696 696 self._tag(names, node, message, local, user, date, editor=editor)
697 697
698 698 @filteredpropertycache
699 699 def _tagscache(self):
700 700 '''Returns a tagscache object that contains various tags related
701 701 caches.'''
702 702
703 703 # This simplifies its cache management by having one decorated
704 704 # function (this one) and the rest simply fetch things from it.
705 705 class tagscache(object):
706 706 def __init__(self):
707 707 # These two define the set of tags for this repository. tags
708 708 # maps tag name to node; tagtypes maps tag name to 'global' or
709 709 # 'local'. (Global tags are defined by .hgtags across all
710 710 # heads, and local tags are defined in .hg/localtags.)
711 711 # They constitute the in-memory cache of tags.
712 712 self.tags = self.tagtypes = None
713 713
714 714 self.nodetagscache = self.tagslist = None
715 715
716 716 cache = tagscache()
717 717 cache.tags, cache.tagtypes = self._findtags()
718 718
719 719 return cache
720 720
721 721 def tags(self):
722 722 '''return a mapping of tag to node'''
723 723 t = {}
724 724 if self.changelog.filteredrevs:
725 725 tags, tt = self._findtags()
726 726 else:
727 727 tags = self._tagscache.tags
728 728 for k, v in tags.iteritems():
729 729 try:
730 730 # ignore tags to unknown nodes
731 731 self.changelog.rev(v)
732 732 t[k] = v
733 733 except (error.LookupError, ValueError):
734 734 pass
735 735 return t
736 736
737 737 def _findtags(self):
738 738 '''Do the hard work of finding tags. Return a pair of dicts
739 739 (tags, tagtypes) where tags maps tag name to node, and tagtypes
740 740 maps tag name to a string like \'global\' or \'local\'.
741 741 Subclasses or extensions are free to add their own tags, but
742 742 should be aware that the returned dicts will be retained for the
743 743 duration of the localrepo object.'''
744 744
745 745 # XXX what tagtype should subclasses/extensions use? Currently
746 746 # mq and bookmarks add tags, but do not set the tagtype at all.
747 747 # Should each extension invent its own tag type? Should there
748 748 # be one tagtype for all such "virtual" tags? Or is the status
749 749 # quo fine?
750 750
751 751 alltags = {} # map tag name to (node, hist)
752 752 tagtypes = {}
753 753
754 754 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
755 755 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
756 756
757 757 # Build the return dicts. Have to re-encode tag names because
758 758 # the tags module always uses UTF-8 (in order not to lose info
759 759 # writing to the cache), but the rest of Mercurial wants them in
760 760 # local encoding.
761 761 tags = {}
762 762 for (name, (node, hist)) in alltags.iteritems():
763 763 if node != nullid:
764 764 tags[encoding.tolocal(name)] = node
765 765 tags['tip'] = self.changelog.tip()
766 766 tagtypes = dict([(encoding.tolocal(name), value)
767 767 for (name, value) in tagtypes.iteritems()])
768 768 return (tags, tagtypes)
769 769
770 770 def tagtype(self, tagname):
771 771 '''
772 772 return the type of the given tag. result can be:
773 773
774 774 'local' : a local tag
775 775 'global' : a global tag
776 776 None : tag does not exist
777 777 '''
778 778
779 779 return self._tagscache.tagtypes.get(tagname)
780 780
781 781 def tagslist(self):
782 782 '''return a list of tags ordered by revision'''
783 783 if not self._tagscache.tagslist:
784 784 l = []
785 785 for t, n in self.tags().iteritems():
786 786 l.append((self.changelog.rev(n), t, n))
787 787 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
788 788
789 789 return self._tagscache.tagslist
790 790
791 791 def nodetags(self, node):
792 792 '''return the tags associated with a node'''
793 793 if not self._tagscache.nodetagscache:
794 794 nodetagscache = {}
795 795 for t, n in self._tagscache.tags.iteritems():
796 796 nodetagscache.setdefault(n, []).append(t)
797 797 for tags in nodetagscache.itervalues():
798 798 tags.sort()
799 799 self._tagscache.nodetagscache = nodetagscache
800 800 return self._tagscache.nodetagscache.get(node, [])
801 801
802 802 def nodebookmarks(self, node):
803 803 """return the list of bookmarks pointing to the specified node"""
804 804 marks = []
805 805 for bookmark, n in self._bookmarks.iteritems():
806 806 if n == node:
807 807 marks.append(bookmark)
808 808 return sorted(marks)
809 809
810 810 def branchmap(self):
811 811 '''returns a dictionary {branch: [branchheads]} with branchheads
812 812 ordered by increasing revision number'''
813 813 branchmap.updatecache(self)
814 814 return self._branchcaches[self.filtername]
815 815
816 816 @unfilteredmethod
817 817 def revbranchcache(self):
818 818 if not self._revbranchcache:
819 819 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
820 820 return self._revbranchcache
821 821
822 822 def branchtip(self, branch, ignoremissing=False):
823 823 '''return the tip node for a given branch
824 824
825 825 If ignoremissing is True, then this method will not raise an error.
826 826 This is helpful for callers that only expect None for a missing branch
827 827 (e.g. namespace).
828 828
829 829 '''
830 830 try:
831 831 return self.branchmap().branchtip(branch)
832 832 except KeyError:
833 833 if not ignoremissing:
834 834 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
835 835 else:
836 836 pass
837 837
838 838 def lookup(self, key):
839 839 return self[key].node()
840 840
841 841 def lookupbranch(self, key, remote=None):
842 842 repo = remote or self
843 843 if key in repo.branchmap():
844 844 return key
845 845
846 846 repo = (remote and remote.local()) and remote or self
847 847 return repo[key].branch()
848 848
849 849 def known(self, nodes):
850 850 cl = self.changelog
851 851 nm = cl.nodemap
852 852 filtered = cl.filteredrevs
853 853 result = []
854 854 for n in nodes:
855 855 r = nm.get(n)
856 856 resp = not (r is None or r in filtered)
857 857 result.append(resp)
858 858 return result
859 859
860 860 def local(self):
861 861 return self
862 862
863 863 def publishing(self):
864 864 # it's safe (and desirable) to trust the publish flag unconditionally
865 865 # so that we don't finalize changes shared between users via ssh or nfs
866 866 return self.ui.configbool('phases', 'publish', True, untrusted=True)
867 867
868 868 def cancopy(self):
869 869 # so statichttprepo's override of local() works
870 870 if not self.local():
871 871 return False
872 872 if not self.publishing():
873 873 return True
874 874 # if publishing we can't copy if there is filtered content
875 875 return not self.filtered('visible').changelog.filteredrevs
876 876
877 877 def shared(self):
878 878 '''the type of shared repository (None if not shared)'''
879 879 if self.sharedpath != self.path:
880 880 return 'store'
881 881 return None
882 882
883 883 def join(self, f, *insidef):
884 884 return self.vfs.join(os.path.join(f, *insidef))
885 885
886 886 def wjoin(self, f, *insidef):
887 887 return self.vfs.reljoin(self.root, f, *insidef)
888 888
889 889 def file(self, f):
890 890 if f[0] == '/':
891 891 f = f[1:]
892 892 return filelog.filelog(self.svfs, f)
893 893
894 894 def parents(self, changeid=None):
895 895 '''get list of changectxs for parents of changeid'''
896 896 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
897 897 self.ui.deprecwarn(msg, '3.7')
898 898 return self[changeid].parents()
899 899
900 900 def changectx(self, changeid):
901 901 return self[changeid]
902 902
903 903 def setparents(self, p1, p2=nullid):
904 904 self.dirstate.beginparentchange()
905 905 copies = self.dirstate.setparents(p1, p2)
906 906 pctx = self[p1]
907 907 if copies:
908 908 # Adjust copy records, the dirstate cannot do it, it
909 909 # requires access to parents manifests. Preserve them
910 910 # only for entries added to first parent.
911 911 for f in copies:
912 912 if f not in pctx and copies[f] in pctx:
913 913 self.dirstate.copy(copies[f], f)
914 914 if p2 == nullid:
915 915 for f, s in sorted(self.dirstate.copies().items()):
916 916 if f not in pctx and s not in pctx:
917 917 self.dirstate.copy(None, f)
918 918 self.dirstate.endparentchange()
919 919
920 920 def filectx(self, path, changeid=None, fileid=None):
921 921 """changeid can be a changeset revision, node, or tag.
922 922 fileid can be a file revision or node."""
923 923 return context.filectx(self, path, changeid, fileid)
924 924
925 925 def getcwd(self):
926 926 return self.dirstate.getcwd()
927 927
928 928 def pathto(self, f, cwd=None):
929 929 return self.dirstate.pathto(f, cwd)
930 930
931 931 def wfile(self, f, mode='r'):
932 932 return self.wvfs(f, mode)
933 933
934 934 def _link(self, f):
935 935 return self.wvfs.islink(f)
936 936
937 937 def _loadfilter(self, filter):
938 938 if filter not in self.filterpats:
939 939 l = []
940 940 for pat, cmd in self.ui.configitems(filter):
941 941 if cmd == '!':
942 942 continue
943 943 mf = matchmod.match(self.root, '', [pat])
944 944 fn = None
945 945 params = cmd
946 946 for name, filterfn in self._datafilters.iteritems():
947 947 if cmd.startswith(name):
948 948 fn = filterfn
949 949 params = cmd[len(name):].lstrip()
950 950 break
951 951 if not fn:
952 952 fn = lambda s, c, **kwargs: util.filter(s, c)
953 953 # Wrap old filters not supporting keyword arguments
954 954 if not inspect.getargspec(fn)[2]:
955 955 oldfn = fn
956 956 fn = lambda s, c, **kwargs: oldfn(s, c)
957 957 l.append((mf, fn, params))
958 958 self.filterpats[filter] = l
959 959 return self.filterpats[filter]
960 960
961 961 def _filter(self, filterpats, filename, data):
962 962 for mf, fn, cmd in filterpats:
963 963 if mf(filename):
964 964 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
965 965 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
966 966 break
967 967
968 968 return data
969 969
970 970 @unfilteredpropertycache
971 971 def _encodefilterpats(self):
972 972 return self._loadfilter('encode')
973 973
974 974 @unfilteredpropertycache
975 975 def _decodefilterpats(self):
976 976 return self._loadfilter('decode')
977 977
978 978 def adddatafilter(self, name, filter):
979 979 self._datafilters[name] = filter
980 980
981 981 def wread(self, filename):
982 982 if self._link(filename):
983 983 data = self.wvfs.readlink(filename)
984 984 else:
985 985 data = self.wvfs.read(filename)
986 986 return self._filter(self._encodefilterpats, filename, data)
987 987
988 988 def wwrite(self, filename, data, flags):
989 989 """write ``data`` into ``filename`` in the working directory
990 990
991 991 This returns length of written (maybe decoded) data.
992 992 """
993 993 data = self._filter(self._decodefilterpats, filename, data)
994 994 if 'l' in flags:
995 995 self.wvfs.symlink(data, filename)
996 996 else:
997 997 self.wvfs.write(filename, data)
998 998 if 'x' in flags:
999 999 self.wvfs.setflags(filename, False, True)
1000 1000 return len(data)
1001 1001
1002 1002 def wwritedata(self, filename, data):
1003 1003 return self._filter(self._decodefilterpats, filename, data)
1004 1004
1005 1005 def currenttransaction(self):
1006 1006 """return the current transaction or None if non exists"""
1007 1007 if self._transref:
1008 1008 tr = self._transref()
1009 1009 else:
1010 1010 tr = None
1011 1011
1012 1012 if tr and tr.running():
1013 1013 return tr
1014 1014 return None
1015 1015
1016 1016 def transaction(self, desc, report=None):
1017 1017 if (self.ui.configbool('devel', 'all-warnings')
1018 1018 or self.ui.configbool('devel', 'check-locks')):
1019 1019 l = self._lockref and self._lockref()
1020 1020 if l is None or not l.held:
1021 1021 self.ui.develwarn('transaction with no lock')
1022 1022 tr = self.currenttransaction()
1023 1023 if tr is not None:
1024 1024 return tr.nest()
1025 1025
1026 1026 # abort here if the journal already exists
1027 1027 if self.svfs.exists("journal"):
1028 1028 raise error.RepoError(
1029 1029 _("abandoned transaction found"),
1030 1030 hint=_("run 'hg recover' to clean up transaction"))
1031 1031
1032 1032 # make journal.dirstate contain in-memory changes at this point
1033 1033 self.dirstate.write(None)
1034 1034
1035 1035 idbase = "%.40f#%f" % (random.random(), time.time())
1036 1036 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1037 1037 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1038 1038
1039 1039 self._writejournal(desc)
1040 1040 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1041 1041 if report:
1042 1042 rp = report
1043 1043 else:
1044 1044 rp = self.ui.warn
1045 1045 vfsmap = {'plain': self.vfs} # root of .hg/
1046 1046 # we must avoid cyclic reference between repo and transaction.
1047 1047 reporef = weakref.ref(self)
1048 1048 def validate(tr):
1049 1049 """will run pre-closing hooks"""
1050 1050 reporef().hook('pretxnclose', throw=True,
1051 1051 txnname=desc, **tr.hookargs)
1052 1052 def releasefn(tr, success):
1053 1053 repo = reporef()
1054 1054 if success:
1055 1055 # this should be explicitly invoked here, because
1056 1056 # in-memory changes aren't written out at closing
1057 1057 # transaction, if tr.addfilegenerator (via
1058 1058 # dirstate.write or so) isn't invoked while
1059 1059 # transaction running
1060 1060 repo.dirstate.write(None)
1061 1061 else:
1062 1062 # prevent in-memory changes from being written out at
1063 1063 # the end of outer wlock scope or so
1064 1064 repo.dirstate.invalidate()
1065 1065
1066 1066 # discard all changes (including ones already written
1067 1067 # out) in this transaction
1068 1068 repo.vfs.rename('journal.dirstate', 'dirstate')
1069 1069
1070 1070 repo.invalidate(clearfilecache=True)
1071 1071
1072 1072 tr = transaction.transaction(rp, self.svfs, vfsmap,
1073 1073 "journal",
1074 1074 "undo",
1075 1075 aftertrans(renames),
1076 1076 self.store.createmode,
1077 1077 validator=validate,
1078 1078 releasefn=releasefn)
1079 1079
1080 1080 tr.hookargs['txnid'] = txnid
1081 1081 # note: writing the fncache only during finalize mean that the file is
1082 1082 # outdated when running hooks. As fncache is used for streaming clone,
1083 1083 # this is not expected to break anything that happen during the hooks.
1084 1084 tr.addfinalize('flush-fncache', self.store.write)
1085 1085 def txnclosehook(tr2):
1086 1086 """To be run if transaction is successful, will schedule a hook run
1087 1087 """
1088 # Don't reference tr2 in hook() so we don't hold a reference.
1089 # This reduces memory consumption when there are multiple
1090 # transactions per lock. This can likely go away if issue5045
1091 # fixes the function accumulation.
1092 hookargs = tr2.hookargs
1093
1088 1094 def hook():
1089 1095 reporef().hook('txnclose', throw=False, txnname=desc,
1090 **tr2.hookargs)
1096 **hookargs)
1091 1097 reporef()._afterlock(hook)
1092 1098 tr.addfinalize('txnclose-hook', txnclosehook)
1093 1099 def txnaborthook(tr2):
1094 1100 """To be run if transaction is aborted
1095 1101 """
1096 1102 reporef().hook('txnabort', throw=False, txnname=desc,
1097 1103 **tr2.hookargs)
1098 1104 tr.addabort('txnabort-hook', txnaborthook)
1099 1105 # avoid eager cache invalidation. in-memory data should be identical
1100 1106 # to stored data if transaction has no error.
1101 1107 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1102 1108 self._transref = weakref.ref(tr)
1103 1109 return tr
1104 1110
1105 1111 def _journalfiles(self):
1106 1112 return ((self.svfs, 'journal'),
1107 1113 (self.vfs, 'journal.dirstate'),
1108 1114 (self.vfs, 'journal.branch'),
1109 1115 (self.vfs, 'journal.desc'),
1110 1116 (self.vfs, 'journal.bookmarks'),
1111 1117 (self.svfs, 'journal.phaseroots'))
1112 1118
1113 1119 def undofiles(self):
1114 1120 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1115 1121
1116 1122 def _writejournal(self, desc):
1117 1123 self.vfs.write("journal.dirstate",
1118 1124 self.vfs.tryread("dirstate"))
1119 1125 self.vfs.write("journal.branch",
1120 1126 encoding.fromlocal(self.dirstate.branch()))
1121 1127 self.vfs.write("journal.desc",
1122 1128 "%d\n%s\n" % (len(self), desc))
1123 1129 self.vfs.write("journal.bookmarks",
1124 1130 self.vfs.tryread("bookmarks"))
1125 1131 self.svfs.write("journal.phaseroots",
1126 1132 self.svfs.tryread("phaseroots"))
1127 1133
1128 1134 def recover(self):
1129 1135 with self.lock():
1130 1136 if self.svfs.exists("journal"):
1131 1137 self.ui.status(_("rolling back interrupted transaction\n"))
1132 1138 vfsmap = {'': self.svfs,
1133 1139 'plain': self.vfs,}
1134 1140 transaction.rollback(self.svfs, vfsmap, "journal",
1135 1141 self.ui.warn)
1136 1142 self.invalidate()
1137 1143 return True
1138 1144 else:
1139 1145 self.ui.warn(_("no interrupted transaction available\n"))
1140 1146 return False
1141 1147
1142 1148 def rollback(self, dryrun=False, force=False):
1143 1149 wlock = lock = dsguard = None
1144 1150 try:
1145 1151 wlock = self.wlock()
1146 1152 lock = self.lock()
1147 1153 if self.svfs.exists("undo"):
1148 1154 dsguard = cmdutil.dirstateguard(self, 'rollback')
1149 1155
1150 1156 return self._rollback(dryrun, force, dsguard)
1151 1157 else:
1152 1158 self.ui.warn(_("no rollback information available\n"))
1153 1159 return 1
1154 1160 finally:
1155 1161 release(dsguard, lock, wlock)
1156 1162
1157 1163 @unfilteredmethod # Until we get smarter cache management
1158 1164 def _rollback(self, dryrun, force, dsguard):
1159 1165 ui = self.ui
1160 1166 try:
1161 1167 args = self.vfs.read('undo.desc').splitlines()
1162 1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1163 1169 if len(args) >= 3:
1164 1170 detail = args[2]
1165 1171 oldtip = oldlen - 1
1166 1172
1167 1173 if detail and ui.verbose:
1168 1174 msg = (_('repository tip rolled back to revision %s'
1169 1175 ' (undo %s: %s)\n')
1170 1176 % (oldtip, desc, detail))
1171 1177 else:
1172 1178 msg = (_('repository tip rolled back to revision %s'
1173 1179 ' (undo %s)\n')
1174 1180 % (oldtip, desc))
1175 1181 except IOError:
1176 1182 msg = _('rolling back unknown transaction\n')
1177 1183 desc = None
1178 1184
1179 1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1180 1186 raise error.Abort(
1181 1187 _('rollback of last commit while not checked out '
1182 1188 'may lose data'), hint=_('use -f to force'))
1183 1189
1184 1190 ui.status(msg)
1185 1191 if dryrun:
1186 1192 return 0
1187 1193
1188 1194 parents = self.dirstate.parents()
1189 1195 self.destroying()
1190 1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1191 1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1192 1198 if self.vfs.exists('undo.bookmarks'):
1193 1199 self.vfs.rename('undo.bookmarks', 'bookmarks')
1194 1200 if self.svfs.exists('undo.phaseroots'):
1195 1201 self.svfs.rename('undo.phaseroots', 'phaseroots')
1196 1202 self.invalidate()
1197 1203
1198 1204 parentgone = (parents[0] not in self.changelog.nodemap or
1199 1205 parents[1] not in self.changelog.nodemap)
1200 1206 if parentgone:
1201 1207 # prevent dirstateguard from overwriting already restored one
1202 1208 dsguard.close()
1203 1209
1204 1210 self.vfs.rename('undo.dirstate', 'dirstate')
1205 1211 try:
1206 1212 branch = self.vfs.read('undo.branch')
1207 1213 self.dirstate.setbranch(encoding.tolocal(branch))
1208 1214 except IOError:
1209 1215 ui.warn(_('named branch could not be reset: '
1210 1216 'current branch is still \'%s\'\n')
1211 1217 % self.dirstate.branch())
1212 1218
1213 1219 self.dirstate.invalidate()
1214 1220 parents = tuple([p.rev() for p in self[None].parents()])
1215 1221 if len(parents) > 1:
1216 1222 ui.status(_('working directory now based on '
1217 1223 'revisions %d and %d\n') % parents)
1218 1224 else:
1219 1225 ui.status(_('working directory now based on '
1220 1226 'revision %d\n') % parents)
1221 1227 mergemod.mergestate.clean(self, self['.'].node())
1222 1228
1223 1229 # TODO: if we know which new heads may result from this rollback, pass
1224 1230 # them to destroy(), which will prevent the branchhead cache from being
1225 1231 # invalidated.
1226 1232 self.destroyed()
1227 1233 return 0
1228 1234
1229 1235 def invalidatecaches(self):
1230 1236
1231 1237 if '_tagscache' in vars(self):
1232 1238 # can't use delattr on proxy
1233 1239 del self.__dict__['_tagscache']
1234 1240
1235 1241 self.unfiltered()._branchcaches.clear()
1236 1242 self.invalidatevolatilesets()
1237 1243
1238 1244 def invalidatevolatilesets(self):
1239 1245 self.filteredrevcache.clear()
1240 1246 obsolete.clearobscaches(self)
1241 1247
1242 1248 def invalidatedirstate(self):
1243 1249 '''Invalidates the dirstate, causing the next call to dirstate
1244 1250 to check if it was modified since the last time it was read,
1245 1251 rereading it if it has.
1246 1252
1247 1253 This is different to dirstate.invalidate() that it doesn't always
1248 1254 rereads the dirstate. Use dirstate.invalidate() if you want to
1249 1255 explicitly read the dirstate again (i.e. restoring it to a previous
1250 1256 known good state).'''
1251 1257 if hasunfilteredcache(self, 'dirstate'):
1252 1258 for k in self.dirstate._filecache:
1253 1259 try:
1254 1260 delattr(self.dirstate, k)
1255 1261 except AttributeError:
1256 1262 pass
1257 1263 delattr(self.unfiltered(), 'dirstate')
1258 1264
1259 1265 def invalidate(self, clearfilecache=False):
1260 1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1261 1267 for k in self._filecache.keys():
1262 1268 # dirstate is invalidated separately in invalidatedirstate()
1263 1269 if k == 'dirstate':
1264 1270 continue
1265 1271
1266 1272 if clearfilecache:
1267 1273 del self._filecache[k]
1268 1274 try:
1269 1275 delattr(unfiltered, k)
1270 1276 except AttributeError:
1271 1277 pass
1272 1278 self.invalidatecaches()
1273 1279 self.store.invalidatecaches()
1274 1280
1275 1281 def invalidateall(self):
1276 1282 '''Fully invalidates both store and non-store parts, causing the
1277 1283 subsequent operation to reread any outside changes.'''
1278 1284 # extension should hook this to invalidate its caches
1279 1285 self.invalidate()
1280 1286 self.invalidatedirstate()
1281 1287
1282 1288 def _refreshfilecachestats(self, tr):
1283 1289 """Reload stats of cached files so that they are flagged as valid"""
1284 1290 for k, ce in self._filecache.items():
1285 1291 if k == 'dirstate' or k not in self.__dict__:
1286 1292 continue
1287 1293 ce.refresh()
1288 1294
1289 1295 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1290 1296 inheritchecker=None, parentenvvar=None):
1291 1297 parentlock = None
1292 1298 # the contents of parentenvvar are used by the underlying lock to
1293 1299 # determine whether it can be inherited
1294 1300 if parentenvvar is not None:
1295 1301 parentlock = os.environ.get(parentenvvar)
1296 1302 try:
1297 1303 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1298 1304 acquirefn=acquirefn, desc=desc,
1299 1305 inheritchecker=inheritchecker,
1300 1306 parentlock=parentlock)
1301 1307 except error.LockHeld as inst:
1302 1308 if not wait:
1303 1309 raise
1304 1310 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1305 1311 (desc, inst.locker))
1306 1312 # default to 600 seconds timeout
1307 1313 l = lockmod.lock(vfs, lockname,
1308 1314 int(self.ui.config("ui", "timeout", "600")),
1309 1315 releasefn=releasefn, acquirefn=acquirefn,
1310 1316 desc=desc)
1311 1317 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1312 1318 return l
1313 1319
1314 1320 def _afterlock(self, callback):
1315 1321 """add a callback to be run when the repository is fully unlocked
1316 1322
1317 1323 The callback will be executed when the outermost lock is released
1318 1324 (with wlock being higher level than 'lock')."""
1319 1325 for ref in (self._wlockref, self._lockref):
1320 1326 l = ref and ref()
1321 1327 if l and l.held:
1322 1328 l.postrelease.append(callback)
1323 1329 break
1324 1330 else: # no lock have been found.
1325 1331 callback()
1326 1332
1327 1333 def lock(self, wait=True):
1328 1334 '''Lock the repository store (.hg/store) and return a weak reference
1329 1335 to the lock. Use this before modifying the store (e.g. committing or
1330 1336 stripping). If you are opening a transaction, get a lock as well.)
1331 1337
1332 1338 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1333 1339 'wlock' first to avoid a dead-lock hazard.'''
1334 1340 l = self._lockref and self._lockref()
1335 1341 if l is not None and l.held:
1336 1342 l.lock()
1337 1343 return l
1338 1344
1339 1345 l = self._lock(self.svfs, "lock", wait, None,
1340 1346 self.invalidate, _('repository %s') % self.origroot)
1341 1347 self._lockref = weakref.ref(l)
1342 1348 return l
1343 1349
1344 1350 def _wlockchecktransaction(self):
1345 1351 if self.currenttransaction() is not None:
1346 1352 raise error.LockInheritanceContractViolation(
1347 1353 'wlock cannot be inherited in the middle of a transaction')
1348 1354
1349 1355 def wlock(self, wait=True):
1350 1356 '''Lock the non-store parts of the repository (everything under
1351 1357 .hg except .hg/store) and return a weak reference to the lock.
1352 1358
1353 1359 Use this before modifying files in .hg.
1354 1360
1355 1361 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1356 1362 'wlock' first to avoid a dead-lock hazard.'''
1357 1363 l = self._wlockref and self._wlockref()
1358 1364 if l is not None and l.held:
1359 1365 l.lock()
1360 1366 return l
1361 1367
1362 1368 # We do not need to check for non-waiting lock acquisition. Such
1363 1369 # acquisition would not cause dead-lock as they would just fail.
1364 1370 if wait and (self.ui.configbool('devel', 'all-warnings')
1365 1371 or self.ui.configbool('devel', 'check-locks')):
1366 1372 l = self._lockref and self._lockref()
1367 1373 if l is not None and l.held:
1368 1374 self.ui.develwarn('"wlock" acquired after "lock"')
1369 1375
1370 1376 def unlock():
1371 1377 if self.dirstate.pendingparentchange():
1372 1378 self.dirstate.invalidate()
1373 1379 else:
1374 1380 self.dirstate.write(None)
1375 1381
1376 1382 self._filecache['dirstate'].refresh()
1377 1383
1378 1384 l = self._lock(self.vfs, "wlock", wait, unlock,
1379 1385 self.invalidatedirstate, _('working directory of %s') %
1380 1386 self.origroot,
1381 1387 inheritchecker=self._wlockchecktransaction,
1382 1388 parentenvvar='HG_WLOCK_LOCKER')
1383 1389 self._wlockref = weakref.ref(l)
1384 1390 return l
1385 1391
1386 1392 def _currentlock(self, lockref):
1387 1393 """Returns the lock if it's held, or None if it's not."""
1388 1394 if lockref is None:
1389 1395 return None
1390 1396 l = lockref()
1391 1397 if l is None or not l.held:
1392 1398 return None
1393 1399 return l
1394 1400
1395 1401 def currentwlock(self):
1396 1402 """Returns the wlock if it's held, or None if it's not."""
1397 1403 return self._currentlock(self._wlockref)
1398 1404
1399 1405 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1400 1406 """
1401 1407 commit an individual file as part of a larger transaction
1402 1408 """
1403 1409
1404 1410 fname = fctx.path()
1405 1411 fparent1 = manifest1.get(fname, nullid)
1406 1412 fparent2 = manifest2.get(fname, nullid)
1407 1413 if isinstance(fctx, context.filectx):
1408 1414 node = fctx.filenode()
1409 1415 if node in [fparent1, fparent2]:
1410 1416 self.ui.debug('reusing %s filelog entry\n' % fname)
1411 1417 return node
1412 1418
1413 1419 flog = self.file(fname)
1414 1420 meta = {}
1415 1421 copy = fctx.renamed()
1416 1422 if copy and copy[0] != fname:
1417 1423 # Mark the new revision of this file as a copy of another
1418 1424 # file. This copy data will effectively act as a parent
1419 1425 # of this new revision. If this is a merge, the first
1420 1426 # parent will be the nullid (meaning "look up the copy data")
1421 1427 # and the second one will be the other parent. For example:
1422 1428 #
1423 1429 # 0 --- 1 --- 3 rev1 changes file foo
1424 1430 # \ / rev2 renames foo to bar and changes it
1425 1431 # \- 2 -/ rev3 should have bar with all changes and
1426 1432 # should record that bar descends from
1427 1433 # bar in rev2 and foo in rev1
1428 1434 #
1429 1435 # this allows this merge to succeed:
1430 1436 #
1431 1437 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1432 1438 # \ / merging rev3 and rev4 should use bar@rev2
1433 1439 # \- 2 --- 4 as the merge base
1434 1440 #
1435 1441
1436 1442 cfname = copy[0]
1437 1443 crev = manifest1.get(cfname)
1438 1444 newfparent = fparent2
1439 1445
1440 1446 if manifest2: # branch merge
1441 1447 if fparent2 == nullid or crev is None: # copied on remote side
1442 1448 if cfname in manifest2:
1443 1449 crev = manifest2[cfname]
1444 1450 newfparent = fparent1
1445 1451
1446 1452 # Here, we used to search backwards through history to try to find
1447 1453 # where the file copy came from if the source of a copy was not in
1448 1454 # the parent directory. However, this doesn't actually make sense to
1449 1455 # do (what does a copy from something not in your working copy even
1450 1456 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1451 1457 # the user that copy information was dropped, so if they didn't
1452 1458 # expect this outcome it can be fixed, but this is the correct
1453 1459 # behavior in this circumstance.
1454 1460
1455 1461 if crev:
1456 1462 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1457 1463 meta["copy"] = cfname
1458 1464 meta["copyrev"] = hex(crev)
1459 1465 fparent1, fparent2 = nullid, newfparent
1460 1466 else:
1461 1467 self.ui.warn(_("warning: can't find ancestor for '%s' "
1462 1468 "copied from '%s'!\n") % (fname, cfname))
1463 1469
1464 1470 elif fparent1 == nullid:
1465 1471 fparent1, fparent2 = fparent2, nullid
1466 1472 elif fparent2 != nullid:
1467 1473 # is one parent an ancestor of the other?
1468 1474 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1469 1475 if fparent1 in fparentancestors:
1470 1476 fparent1, fparent2 = fparent2, nullid
1471 1477 elif fparent2 in fparentancestors:
1472 1478 fparent2 = nullid
1473 1479
1474 1480 # is the file changed?
1475 1481 text = fctx.data()
1476 1482 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1477 1483 changelist.append(fname)
1478 1484 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1479 1485 # are just the flags changed during merge?
1480 1486 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1481 1487 changelist.append(fname)
1482 1488
1483 1489 return fparent1
1484 1490
1485 1491 @unfilteredmethod
1486 1492 def commit(self, text="", user=None, date=None, match=None, force=False,
1487 1493 editor=False, extra=None):
1488 1494 """Add a new revision to current repository.
1489 1495
1490 1496 Revision information is gathered from the working directory,
1491 1497 match can be used to filter the committed files. If editor is
1492 1498 supplied, it is called to get a commit message.
1493 1499 """
1494 1500 if extra is None:
1495 1501 extra = {}
1496 1502
1497 1503 def fail(f, msg):
1498 1504 raise error.Abort('%s: %s' % (f, msg))
1499 1505
1500 1506 if not match:
1501 1507 match = matchmod.always(self.root, '')
1502 1508
1503 1509 if not force:
1504 1510 vdirs = []
1505 1511 match.explicitdir = vdirs.append
1506 1512 match.bad = fail
1507 1513
1508 1514 wlock = lock = tr = None
1509 1515 try:
1510 1516 wlock = self.wlock()
1511 1517 lock = self.lock() # for recent changelog (see issue4368)
1512 1518
1513 1519 wctx = self[None]
1514 1520 merge = len(wctx.parents()) > 1
1515 1521
1516 1522 if not force and merge and match.ispartial():
1517 1523 raise error.Abort(_('cannot partially commit a merge '
1518 1524 '(do not specify files or patterns)'))
1519 1525
1520 1526 status = self.status(match=match, clean=force)
1521 1527 if force:
1522 1528 status.modified.extend(status.clean) # mq may commit clean files
1523 1529
1524 1530 # check subrepos
1525 1531 subs = []
1526 1532 commitsubs = set()
1527 1533 newstate = wctx.substate.copy()
1528 1534 # only manage subrepos and .hgsubstate if .hgsub is present
1529 1535 if '.hgsub' in wctx:
1530 1536 # we'll decide whether to track this ourselves, thanks
1531 1537 for c in status.modified, status.added, status.removed:
1532 1538 if '.hgsubstate' in c:
1533 1539 c.remove('.hgsubstate')
1534 1540
1535 1541 # compare current state to last committed state
1536 1542 # build new substate based on last committed state
1537 1543 oldstate = wctx.p1().substate
1538 1544 for s in sorted(newstate.keys()):
1539 1545 if not match(s):
1540 1546 # ignore working copy, use old state if present
1541 1547 if s in oldstate:
1542 1548 newstate[s] = oldstate[s]
1543 1549 continue
1544 1550 if not force:
1545 1551 raise error.Abort(
1546 1552 _("commit with new subrepo %s excluded") % s)
1547 1553 dirtyreason = wctx.sub(s).dirtyreason(True)
1548 1554 if dirtyreason:
1549 1555 if not self.ui.configbool('ui', 'commitsubrepos'):
1550 1556 raise error.Abort(dirtyreason,
1551 1557 hint=_("use --subrepos for recursive commit"))
1552 1558 subs.append(s)
1553 1559 commitsubs.add(s)
1554 1560 else:
1555 1561 bs = wctx.sub(s).basestate()
1556 1562 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1557 1563 if oldstate.get(s, (None, None, None))[1] != bs:
1558 1564 subs.append(s)
1559 1565
1560 1566 # check for removed subrepos
1561 1567 for p in wctx.parents():
1562 1568 r = [s for s in p.substate if s not in newstate]
1563 1569 subs += [s for s in r if match(s)]
1564 1570 if subs:
1565 1571 if (not match('.hgsub') and
1566 1572 '.hgsub' in (wctx.modified() + wctx.added())):
1567 1573 raise error.Abort(
1568 1574 _("can't commit subrepos without .hgsub"))
1569 1575 status.modified.insert(0, '.hgsubstate')
1570 1576
1571 1577 elif '.hgsub' in status.removed:
1572 1578 # clean up .hgsubstate when .hgsub is removed
1573 1579 if ('.hgsubstate' in wctx and
1574 1580 '.hgsubstate' not in (status.modified + status.added +
1575 1581 status.removed)):
1576 1582 status.removed.insert(0, '.hgsubstate')
1577 1583
1578 1584 # make sure all explicit patterns are matched
1579 1585 if not force and (match.isexact() or match.prefix()):
1580 1586 matched = set(status.modified + status.added + status.removed)
1581 1587
1582 1588 for f in match.files():
1583 1589 f = self.dirstate.normalize(f)
1584 1590 if f == '.' or f in matched or f in wctx.substate:
1585 1591 continue
1586 1592 if f in status.deleted:
1587 1593 fail(f, _('file not found!'))
1588 1594 if f in vdirs: # visited directory
1589 1595 d = f + '/'
1590 1596 for mf in matched:
1591 1597 if mf.startswith(d):
1592 1598 break
1593 1599 else:
1594 1600 fail(f, _("no match under directory!"))
1595 1601 elif f not in self.dirstate:
1596 1602 fail(f, _("file not tracked!"))
1597 1603
1598 1604 cctx = context.workingcommitctx(self, status,
1599 1605 text, user, date, extra)
1600 1606
1601 1607 # internal config: ui.allowemptycommit
1602 1608 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1603 1609 or extra.get('close') or merge or cctx.files()
1604 1610 or self.ui.configbool('ui', 'allowemptycommit'))
1605 1611 if not allowemptycommit:
1606 1612 return None
1607 1613
1608 1614 if merge and cctx.deleted():
1609 1615 raise error.Abort(_("cannot commit merge with missing files"))
1610 1616
1611 1617 ms = mergemod.mergestate.read(self)
1612 1618
1613 1619 if list(ms.unresolved()):
1614 1620 raise error.Abort(_('unresolved merge conflicts '
1615 1621 '(see "hg help resolve")'))
1616 1622 if ms.mdstate() != 's' or list(ms.driverresolved()):
1617 1623 raise error.Abort(_('driver-resolved merge conflicts'),
1618 1624 hint=_('run "hg resolve --all" to resolve'))
1619 1625
1620 1626 if editor:
1621 1627 cctx._text = editor(self, cctx, subs)
1622 1628 edited = (text != cctx._text)
1623 1629
1624 1630 # Save commit message in case this transaction gets rolled back
1625 1631 # (e.g. by a pretxncommit hook). Leave the content alone on
1626 1632 # the assumption that the user will use the same editor again.
1627 1633 msgfn = self.savecommitmessage(cctx._text)
1628 1634
1629 1635 # commit subs and write new state
1630 1636 if subs:
1631 1637 for s in sorted(commitsubs):
1632 1638 sub = wctx.sub(s)
1633 1639 self.ui.status(_('committing subrepository %s\n') %
1634 1640 subrepo.subrelpath(sub))
1635 1641 sr = sub.commit(cctx._text, user, date)
1636 1642 newstate[s] = (newstate[s][0], sr)
1637 1643 subrepo.writestate(self, newstate)
1638 1644
1639 1645 p1, p2 = self.dirstate.parents()
1640 1646 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1641 1647 try:
1642 1648 self.hook("precommit", throw=True, parent1=hookp1,
1643 1649 parent2=hookp2)
1644 1650 tr = self.transaction('commit')
1645 1651 ret = self.commitctx(cctx, True)
1646 1652 except: # re-raises
1647 1653 if edited:
1648 1654 self.ui.write(
1649 1655 _('note: commit message saved in %s\n') % msgfn)
1650 1656 raise
1651 1657 # update bookmarks, dirstate and mergestate
1652 1658 bookmarks.update(self, [p1, p2], ret)
1653 1659 cctx.markcommitted(ret)
1654 1660 ms.reset()
1655 1661 tr.close()
1656 1662
1657 1663 finally:
1658 1664 lockmod.release(tr, lock, wlock)
1659 1665
1660 1666 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1661 1667 # hack for command that use a temporary commit (eg: histedit)
1662 1668 # temporary commit got stripped before hook release
1663 1669 if self.changelog.hasnode(ret):
1664 1670 self.hook("commit", node=node, parent1=parent1,
1665 1671 parent2=parent2)
1666 1672 self._afterlock(commithook)
1667 1673 return ret
1668 1674
1669 1675 @unfilteredmethod
1670 1676 def commitctx(self, ctx, error=False):
1671 1677 """Add a new revision to current repository.
1672 1678 Revision information is passed via the context argument.
1673 1679 """
1674 1680
1675 1681 tr = None
1676 1682 p1, p2 = ctx.p1(), ctx.p2()
1677 1683 user = ctx.user()
1678 1684
1679 1685 lock = self.lock()
1680 1686 try:
1681 1687 tr = self.transaction("commit")
1682 1688 trp = weakref.proxy(tr)
1683 1689
1684 1690 if ctx.files():
1685 1691 m1 = p1.manifest()
1686 1692 m2 = p2.manifest()
1687 1693 m = m1.copy()
1688 1694
1689 1695 # check in files
1690 1696 added = []
1691 1697 changed = []
1692 1698 removed = list(ctx.removed())
1693 1699 linkrev = len(self)
1694 1700 self.ui.note(_("committing files:\n"))
1695 1701 for f in sorted(ctx.modified() + ctx.added()):
1696 1702 self.ui.note(f + "\n")
1697 1703 try:
1698 1704 fctx = ctx[f]
1699 1705 if fctx is None:
1700 1706 removed.append(f)
1701 1707 else:
1702 1708 added.append(f)
1703 1709 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1704 1710 trp, changed)
1705 1711 m.setflag(f, fctx.flags())
1706 1712 except OSError as inst:
1707 1713 self.ui.warn(_("trouble committing %s!\n") % f)
1708 1714 raise
1709 1715 except IOError as inst:
1710 1716 errcode = getattr(inst, 'errno', errno.ENOENT)
1711 1717 if error or errcode and errcode != errno.ENOENT:
1712 1718 self.ui.warn(_("trouble committing %s!\n") % f)
1713 1719 raise
1714 1720
1715 1721 # update manifest
1716 1722 self.ui.note(_("committing manifest\n"))
1717 1723 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1718 1724 drop = [f for f in removed if f in m]
1719 1725 for f in drop:
1720 1726 del m[f]
1721 1727 mn = self.manifest.add(m, trp, linkrev,
1722 1728 p1.manifestnode(), p2.manifestnode(),
1723 1729 added, drop)
1724 1730 files = changed + removed
1725 1731 else:
1726 1732 mn = p1.manifestnode()
1727 1733 files = []
1728 1734
1729 1735 # update changelog
1730 1736 self.ui.note(_("committing changelog\n"))
1731 1737 self.changelog.delayupdate(tr)
1732 1738 n = self.changelog.add(mn, files, ctx.description(),
1733 1739 trp, p1.node(), p2.node(),
1734 1740 user, ctx.date(), ctx.extra().copy())
1735 1741 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1736 1742 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1737 1743 parent2=xp2)
1738 1744 # set the new commit is proper phase
1739 1745 targetphase = subrepo.newcommitphase(self.ui, ctx)
1740 1746 if targetphase:
1741 1747 # retract boundary do not alter parent changeset.
1742 1748 # if a parent have higher the resulting phase will
1743 1749 # be compliant anyway
1744 1750 #
1745 1751 # if minimal phase was 0 we don't need to retract anything
1746 1752 phases.retractboundary(self, tr, targetphase, [n])
1747 1753 tr.close()
1748 1754 branchmap.updatecache(self.filtered('served'))
1749 1755 return n
1750 1756 finally:
1751 1757 if tr:
1752 1758 tr.release()
1753 1759 lock.release()
1754 1760
1755 1761 @unfilteredmethod
1756 1762 def destroying(self):
1757 1763 '''Inform the repository that nodes are about to be destroyed.
1758 1764 Intended for use by strip and rollback, so there's a common
1759 1765 place for anything that has to be done before destroying history.
1760 1766
1761 1767 This is mostly useful for saving state that is in memory and waiting
1762 1768 to be flushed when the current lock is released. Because a call to
1763 1769 destroyed is imminent, the repo will be invalidated causing those
1764 1770 changes to stay in memory (waiting for the next unlock), or vanish
1765 1771 completely.
1766 1772 '''
1767 1773 # When using the same lock to commit and strip, the phasecache is left
1768 1774 # dirty after committing. Then when we strip, the repo is invalidated,
1769 1775 # causing those changes to disappear.
1770 1776 if '_phasecache' in vars(self):
1771 1777 self._phasecache.write()
1772 1778
1773 1779 @unfilteredmethod
1774 1780 def destroyed(self):
1775 1781 '''Inform the repository that nodes have been destroyed.
1776 1782 Intended for use by strip and rollback, so there's a common
1777 1783 place for anything that has to be done after destroying history.
1778 1784 '''
1779 1785 # When one tries to:
1780 1786 # 1) destroy nodes thus calling this method (e.g. strip)
1781 1787 # 2) use phasecache somewhere (e.g. commit)
1782 1788 #
1783 1789 # then 2) will fail because the phasecache contains nodes that were
1784 1790 # removed. We can either remove phasecache from the filecache,
1785 1791 # causing it to reload next time it is accessed, or simply filter
1786 1792 # the removed nodes now and write the updated cache.
1787 1793 self._phasecache.filterunknown(self)
1788 1794 self._phasecache.write()
1789 1795
1790 1796 # update the 'served' branch cache to help read only server process
1791 1797 # Thanks to branchcache collaboration this is done from the nearest
1792 1798 # filtered subset and it is expected to be fast.
1793 1799 branchmap.updatecache(self.filtered('served'))
1794 1800
1795 1801 # Ensure the persistent tag cache is updated. Doing it now
1796 1802 # means that the tag cache only has to worry about destroyed
1797 1803 # heads immediately after a strip/rollback. That in turn
1798 1804 # guarantees that "cachetip == currenttip" (comparing both rev
1799 1805 # and node) always means no nodes have been added or destroyed.
1800 1806
1801 1807 # XXX this is suboptimal when qrefresh'ing: we strip the current
1802 1808 # head, refresh the tag cache, then immediately add a new head.
1803 1809 # But I think doing it this way is necessary for the "instant
1804 1810 # tag cache retrieval" case to work.
1805 1811 self.invalidate()
1806 1812
1807 1813 def walk(self, match, node=None):
1808 1814 '''
1809 1815 walk recursively through the directory tree or a given
1810 1816 changeset, finding all files matched by the match
1811 1817 function
1812 1818 '''
1813 1819 return self[node].walk(match)
1814 1820
1815 1821 def status(self, node1='.', node2=None, match=None,
1816 1822 ignored=False, clean=False, unknown=False,
1817 1823 listsubrepos=False):
1818 1824 '''a convenience method that calls node1.status(node2)'''
1819 1825 return self[node1].status(node2, match, ignored, clean, unknown,
1820 1826 listsubrepos)
1821 1827
1822 1828 def heads(self, start=None):
1823 1829 heads = self.changelog.heads(start)
1824 1830 # sort the output in rev descending order
1825 1831 return sorted(heads, key=self.changelog.rev, reverse=True)
1826 1832
1827 1833 def branchheads(self, branch=None, start=None, closed=False):
1828 1834 '''return a (possibly filtered) list of heads for the given branch
1829 1835
1830 1836 Heads are returned in topological order, from newest to oldest.
1831 1837 If branch is None, use the dirstate branch.
1832 1838 If start is not None, return only heads reachable from start.
1833 1839 If closed is True, return heads that are marked as closed as well.
1834 1840 '''
1835 1841 if branch is None:
1836 1842 branch = self[None].branch()
1837 1843 branches = self.branchmap()
1838 1844 if branch not in branches:
1839 1845 return []
1840 1846 # the cache returns heads ordered lowest to highest
1841 1847 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1842 1848 if start is not None:
1843 1849 # filter out the heads that cannot be reached from startrev
1844 1850 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1845 1851 bheads = [h for h in bheads if h in fbheads]
1846 1852 return bheads
1847 1853
1848 1854 def branches(self, nodes):
1849 1855 if not nodes:
1850 1856 nodes = [self.changelog.tip()]
1851 1857 b = []
1852 1858 for n in nodes:
1853 1859 t = n
1854 1860 while True:
1855 1861 p = self.changelog.parents(n)
1856 1862 if p[1] != nullid or p[0] == nullid:
1857 1863 b.append((t, n, p[0], p[1]))
1858 1864 break
1859 1865 n = p[0]
1860 1866 return b
1861 1867
1862 1868 def between(self, pairs):
1863 1869 r = []
1864 1870
1865 1871 for top, bottom in pairs:
1866 1872 n, l, i = top, [], 0
1867 1873 f = 1
1868 1874
1869 1875 while n != bottom and n != nullid:
1870 1876 p = self.changelog.parents(n)[0]
1871 1877 if i == f:
1872 1878 l.append(n)
1873 1879 f = f * 2
1874 1880 n = p
1875 1881 i += 1
1876 1882
1877 1883 r.append(l)
1878 1884
1879 1885 return r
1880 1886
1881 1887 def checkpush(self, pushop):
1882 1888 """Extensions can override this function if additional checks have
1883 1889 to be performed before pushing, or call it if they override push
1884 1890 command.
1885 1891 """
1886 1892 pass
1887 1893
1888 1894 @unfilteredpropertycache
1889 1895 def prepushoutgoinghooks(self):
1890 1896 """Return util.hooks consists of "(repo, remote, outgoing)"
1891 1897 functions, which are called before pushing changesets.
1892 1898 """
1893 1899 return util.hooks()
1894 1900
1895 1901 def pushkey(self, namespace, key, old, new):
1896 1902 try:
1897 1903 tr = self.currenttransaction()
1898 1904 hookargs = {}
1899 1905 if tr is not None:
1900 1906 hookargs.update(tr.hookargs)
1901 1907 hookargs['namespace'] = namespace
1902 1908 hookargs['key'] = key
1903 1909 hookargs['old'] = old
1904 1910 hookargs['new'] = new
1905 1911 self.hook('prepushkey', throw=True, **hookargs)
1906 1912 except error.HookAbort as exc:
1907 1913 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1908 1914 if exc.hint:
1909 1915 self.ui.write_err(_("(%s)\n") % exc.hint)
1910 1916 return False
1911 1917 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1912 1918 ret = pushkey.push(self, namespace, key, old, new)
1913 1919 def runhook():
1914 1920 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1915 1921 ret=ret)
1916 1922 self._afterlock(runhook)
1917 1923 return ret
1918 1924
1919 1925 def listkeys(self, namespace):
1920 1926 self.hook('prelistkeys', throw=True, namespace=namespace)
1921 1927 self.ui.debug('listing keys for "%s"\n' % namespace)
1922 1928 values = pushkey.list(self, namespace)
1923 1929 self.hook('listkeys', namespace=namespace, values=values)
1924 1930 return values
1925 1931
1926 1932 def debugwireargs(self, one, two, three=None, four=None, five=None):
1927 1933 '''used to test argument passing over the wire'''
1928 1934 return "%s %s %s %s %s" % (one, two, three, four, five)
1929 1935
1930 1936 def savecommitmessage(self, text):
1931 1937 fp = self.vfs('last-message.txt', 'wb')
1932 1938 try:
1933 1939 fp.write(text)
1934 1940 finally:
1935 1941 fp.close()
1936 1942 return self.pathto(fp.name[len(self.root) + 1:])
1937 1943
1938 1944 # used to avoid circular references so destructors work
1939 1945 def aftertrans(files):
1940 1946 renamefiles = [tuple(t) for t in files]
1941 1947 def a():
1942 1948 for vfs, src, dest in renamefiles:
1943 1949 try:
1944 1950 vfs.rename(src, dest)
1945 1951 except OSError: # journal file does not yet exist
1946 1952 pass
1947 1953 return a
1948 1954
1949 1955 def undoname(fn):
1950 1956 base, name = os.path.split(fn)
1951 1957 assert name.startswith('journal')
1952 1958 return os.path.join(base, name.replace('journal', 'undo', 1))
1953 1959
1954 1960 def instance(ui, path, create):
1955 1961 return localrepository(ui, util.urllocalpath(path), create)
1956 1962
1957 1963 def islocal(path):
1958 1964 return True
General Comments 0
You need to be logged in to leave comments. Login now