##// END OF EJS Templates
commit: make commit acquire store lock before processing for consistency...
FUJIWARA Katsunori -
r27291:a18328aa default
parent child Browse files
Show More
@@ -1,1920 +1,1921 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return cg.apply(self._repo, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
218 218 realfs=False)
219 219 self.vfs = scmutil.vfs(self.path)
220 220 self.opener = self.vfs
221 221 self.baseui = baseui
222 222 self.ui = baseui.copy()
223 223 self.ui.copy = baseui.copy # prevent copying repo configuration
224 224 # A list of callback to shape the phase if no data were found.
225 225 # Callback are in the form: func(repo, roots) --> processed root.
226 226 # This list it to be filled by extension during repo setup
227 227 self._phasedefaults = []
228 228 try:
229 229 self.ui.readconfig(self.join("hgrc"), self.root)
230 230 extensions.loadall(self.ui)
231 231 except IOError:
232 232 pass
233 233
234 234 if self.featuresetupfuncs:
235 235 self.supported = set(self._basesupported) # use private copy
236 236 extmods = set(m.__name__ for n, m
237 237 in extensions.extensions(self.ui))
238 238 for setupfunc in self.featuresetupfuncs:
239 239 if setupfunc.__module__ in extmods:
240 240 setupfunc(self.ui, self.supported)
241 241 else:
242 242 self.supported = self._basesupported
243 243
244 244 if not self.vfs.isdir():
245 245 if create:
246 246 if not self.wvfs.exists():
247 247 self.wvfs.makedirs()
248 248 self.vfs.makedir(notindexed=True)
249 249 self.requirements.update(self._baserequirements(create))
250 250 if self.ui.configbool('format', 'usestore', True):
251 251 self.vfs.mkdir("store")
252 252 self.requirements.add("store")
253 253 if self.ui.configbool('format', 'usefncache', True):
254 254 self.requirements.add("fncache")
255 255 if self.ui.configbool('format', 'dotencode', True):
256 256 self.requirements.add('dotencode')
257 257 # create an invalid changelog
258 258 self.vfs.append(
259 259 "00changelog.i",
260 260 '\0\0\0\2' # represents revlogv2
261 261 ' dummy changelog to prevent using the old repo layout'
262 262 )
263 263 if scmutil.gdinitconfig(self.ui):
264 264 self.requirements.add("generaldelta")
265 265 if self.ui.configbool('experimental', 'treemanifest', False):
266 266 self.requirements.add("treemanifest")
267 267 if self.ui.configbool('experimental', 'manifestv2', False):
268 268 self.requirements.add("manifestv2")
269 269 else:
270 270 raise error.RepoError(_("repository %s not found") % path)
271 271 elif create:
272 272 raise error.RepoError(_("repository %s already exists") % path)
273 273 else:
274 274 try:
275 275 self.requirements = scmutil.readrequires(
276 276 self.vfs, self.supported)
277 277 except IOError as inst:
278 278 if inst.errno != errno.ENOENT:
279 279 raise
280 280
281 281 self.sharedpath = self.path
282 282 try:
283 283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
284 284 realpath=True)
285 285 s = vfs.base
286 286 if not vfs.exists():
287 287 raise error.RepoError(
288 288 _('.hg/sharedpath points to nonexistent directory %s') % s)
289 289 self.sharedpath = s
290 290 except IOError as inst:
291 291 if inst.errno != errno.ENOENT:
292 292 raise
293 293
294 294 self.store = store.store(
295 295 self.requirements, self.sharedpath, scmutil.vfs)
296 296 self.spath = self.store.path
297 297 self.svfs = self.store.vfs
298 298 self.sjoin = self.store.join
299 299 self.vfs.createmode = self.store.createmode
300 300 self._applyopenerreqs()
301 301 if create:
302 302 self._writerequirements()
303 303
304 304 self._dirstatevalidatewarned = False
305 305
306 306 self._branchcaches = {}
307 307 self._revbranchcache = None
308 308 self.filterpats = {}
309 309 self._datafilters = {}
310 310 self._transref = self._lockref = self._wlockref = None
311 311
312 312 # A cache for various files under .hg/ that tracks file changes,
313 313 # (used by the filecache decorator)
314 314 #
315 315 # Maps a property name to its util.filecacheentry
316 316 self._filecache = {}
317 317
318 318 # hold sets of revision to be filtered
319 319 # should be cleared when something might have changed the filter value:
320 320 # - new changesets,
321 321 # - phase change,
322 322 # - new obsolescence marker,
323 323 # - working directory parent change,
324 324 # - bookmark changes
325 325 self.filteredrevcache = {}
326 326
327 327 # generic mapping between names and nodes
328 328 self.names = namespaces.namespaces()
329 329
330 330 def close(self):
331 331 self._writecaches()
332 332
333 333 def _writecaches(self):
334 334 if self._revbranchcache:
335 335 self._revbranchcache.write()
336 336
337 337 def _restrictcapabilities(self, caps):
338 338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
339 339 caps = set(caps)
340 340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
341 341 caps.add('bundle2=' + urllib.quote(capsblob))
342 342 return caps
343 343
344 344 def _applyopenerreqs(self):
345 345 self.svfs.options = dict((r, 1) for r in self.requirements
346 346 if r in self.openerreqs)
347 347 # experimental config: format.chunkcachesize
348 348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
349 349 if chunkcachesize is not None:
350 350 self.svfs.options['chunkcachesize'] = chunkcachesize
351 351 # experimental config: format.maxchainlen
352 352 maxchainlen = self.ui.configint('format', 'maxchainlen')
353 353 if maxchainlen is not None:
354 354 self.svfs.options['maxchainlen'] = maxchainlen
355 355 # experimental config: format.manifestcachesize
356 356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
357 357 if manifestcachesize is not None:
358 358 self.svfs.options['manifestcachesize'] = manifestcachesize
359 359 # experimental config: format.aggressivemergedeltas
360 360 aggressivemergedeltas = self.ui.configbool('format',
361 361 'aggressivemergedeltas', False)
362 362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
363 363 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
364 364
365 365 def _writerequirements(self):
366 366 scmutil.writerequires(self.vfs, self.requirements)
367 367
368 368 def _checknested(self, path):
369 369 """Determine if path is a legal nested repository."""
370 370 if not path.startswith(self.root):
371 371 return False
372 372 subpath = path[len(self.root) + 1:]
373 373 normsubpath = util.pconvert(subpath)
374 374
375 375 # XXX: Checking against the current working copy is wrong in
376 376 # the sense that it can reject things like
377 377 #
378 378 # $ hg cat -r 10 sub/x.txt
379 379 #
380 380 # if sub/ is no longer a subrepository in the working copy
381 381 # parent revision.
382 382 #
383 383 # However, it can of course also allow things that would have
384 384 # been rejected before, such as the above cat command if sub/
385 385 # is a subrepository now, but was a normal directory before.
386 386 # The old path auditor would have rejected by mistake since it
387 387 # panics when it sees sub/.hg/.
388 388 #
389 389 # All in all, checking against the working copy seems sensible
390 390 # since we want to prevent access to nested repositories on
391 391 # the filesystem *now*.
392 392 ctx = self[None]
393 393 parts = util.splitpath(subpath)
394 394 while parts:
395 395 prefix = '/'.join(parts)
396 396 if prefix in ctx.substate:
397 397 if prefix == normsubpath:
398 398 return True
399 399 else:
400 400 sub = ctx.sub(prefix)
401 401 return sub.checknested(subpath[len(prefix) + 1:])
402 402 else:
403 403 parts.pop()
404 404 return False
405 405
406 406 def peer(self):
407 407 return localpeer(self) # not cached to avoid reference cycle
408 408
409 409 def unfiltered(self):
410 410 """Return unfiltered version of the repository
411 411
412 412 Intended to be overwritten by filtered repo."""
413 413 return self
414 414
415 415 def filtered(self, name):
416 416 """Return a filtered version of a repository"""
417 417 # build a new class with the mixin and the current class
418 418 # (possibly subclass of the repo)
419 419 class proxycls(repoview.repoview, self.unfiltered().__class__):
420 420 pass
421 421 return proxycls(self, name)
422 422
423 423 @repofilecache('bookmarks')
424 424 def _bookmarks(self):
425 425 return bookmarks.bmstore(self)
426 426
427 427 @repofilecache('bookmarks.current')
428 428 def _activebookmark(self):
429 429 return bookmarks.readactive(self)
430 430
431 431 def bookmarkheads(self, bookmark):
432 432 name = bookmark.split('@', 1)[0]
433 433 heads = []
434 434 for mark, n in self._bookmarks.iteritems():
435 435 if mark.split('@', 1)[0] == name:
436 436 heads.append(n)
437 437 return heads
438 438
439 439 # _phaserevs and _phasesets depend on changelog. what we need is to
440 440 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
441 441 # can't be easily expressed in filecache mechanism.
442 442 @storecache('phaseroots', '00changelog.i')
443 443 def _phasecache(self):
444 444 return phases.phasecache(self, self._phasedefaults)
445 445
446 446 @storecache('obsstore')
447 447 def obsstore(self):
448 448 # read default format for new obsstore.
449 449 # developer config: format.obsstore-version
450 450 defaultformat = self.ui.configint('format', 'obsstore-version', None)
451 451 # rely on obsstore class default when possible.
452 452 kwargs = {}
453 453 if defaultformat is not None:
454 454 kwargs['defaultformat'] = defaultformat
455 455 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
456 456 store = obsolete.obsstore(self.svfs, readonly=readonly,
457 457 **kwargs)
458 458 if store and readonly:
459 459 self.ui.warn(
460 460 _('obsolete feature not enabled but %i markers found!\n')
461 461 % len(list(store)))
462 462 return store
463 463
464 464 @storecache('00changelog.i')
465 465 def changelog(self):
466 466 c = changelog.changelog(self.svfs)
467 467 if 'HG_PENDING' in os.environ:
468 468 p = os.environ['HG_PENDING']
469 469 if p.startswith(self.root):
470 470 c.readpending('00changelog.i.a')
471 471 return c
472 472
473 473 @storecache('00manifest.i')
474 474 def manifest(self):
475 475 return manifest.manifest(self.svfs)
476 476
477 477 def dirlog(self, dir):
478 478 return self.manifest.dirlog(dir)
479 479
480 480 @repofilecache('dirstate')
481 481 def dirstate(self):
482 482 return dirstate.dirstate(self.vfs, self.ui, self.root,
483 483 self._dirstatevalidate)
484 484
485 485 def _dirstatevalidate(self, node):
486 486 try:
487 487 self.changelog.rev(node)
488 488 return node
489 489 except error.LookupError:
490 490 if not self._dirstatevalidatewarned:
491 491 self._dirstatevalidatewarned = True
492 492 self.ui.warn(_("warning: ignoring unknown"
493 493 " working parent %s!\n") % short(node))
494 494 return nullid
495 495
496 496 def __getitem__(self, changeid):
497 497 if changeid is None or changeid == wdirrev:
498 498 return context.workingctx(self)
499 499 if isinstance(changeid, slice):
500 500 return [context.changectx(self, i)
501 501 for i in xrange(*changeid.indices(len(self)))
502 502 if i not in self.changelog.filteredrevs]
503 503 return context.changectx(self, changeid)
504 504
505 505 def __contains__(self, changeid):
506 506 try:
507 507 self[changeid]
508 508 return True
509 509 except error.RepoLookupError:
510 510 return False
511 511
512 512 def __nonzero__(self):
513 513 return True
514 514
515 515 def __len__(self):
516 516 return len(self.changelog)
517 517
518 518 def __iter__(self):
519 519 return iter(self.changelog)
520 520
521 521 def revs(self, expr, *args):
522 522 '''Find revisions matching a revset.
523 523
524 524 The revset is specified as a string ``expr`` that may contain
525 525 %-formatting to escape certain types. See ``revset.formatspec``.
526 526
527 527 Return a revset.abstractsmartset, which is a list-like interface
528 528 that contains integer revisions.
529 529 '''
530 530 expr = revset.formatspec(expr, *args)
531 531 m = revset.match(None, expr)
532 532 return m(self)
533 533
534 534 def set(self, expr, *args):
535 535 '''Find revisions matching a revset and emit changectx instances.
536 536
537 537 This is a convenience wrapper around ``revs()`` that iterates the
538 538 result and is a generator of changectx instances.
539 539 '''
540 540 for r in self.revs(expr, *args):
541 541 yield self[r]
542 542
543 543 def url(self):
544 544 return 'file:' + self.root
545 545
546 546 def hook(self, name, throw=False, **args):
547 547 """Call a hook, passing this repo instance.
548 548
549 549 This a convenience method to aid invoking hooks. Extensions likely
550 550 won't call this unless they have registered a custom hook or are
551 551 replacing code that is expected to call a hook.
552 552 """
553 553 return hook.hook(self.ui, self, name, throw, **args)
554 554
555 555 @unfilteredmethod
556 556 def _tag(self, names, node, message, local, user, date, extra=None,
557 557 editor=False):
558 558 if isinstance(names, str):
559 559 names = (names,)
560 560
561 561 branches = self.branchmap()
562 562 for name in names:
563 563 self.hook('pretag', throw=True, node=hex(node), tag=name,
564 564 local=local)
565 565 if name in branches:
566 566 self.ui.warn(_("warning: tag %s conflicts with existing"
567 567 " branch name\n") % name)
568 568
569 569 def writetags(fp, names, munge, prevtags):
570 570 fp.seek(0, 2)
571 571 if prevtags and prevtags[-1] != '\n':
572 572 fp.write('\n')
573 573 for name in names:
574 574 if munge:
575 575 m = munge(name)
576 576 else:
577 577 m = name
578 578
579 579 if (self._tagscache.tagtypes and
580 580 name in self._tagscache.tagtypes):
581 581 old = self.tags().get(name, nullid)
582 582 fp.write('%s %s\n' % (hex(old), m))
583 583 fp.write('%s %s\n' % (hex(node), m))
584 584 fp.close()
585 585
586 586 prevtags = ''
587 587 if local:
588 588 try:
589 589 fp = self.vfs('localtags', 'r+')
590 590 except IOError:
591 591 fp = self.vfs('localtags', 'a')
592 592 else:
593 593 prevtags = fp.read()
594 594
595 595 # local tags are stored in the current charset
596 596 writetags(fp, names, None, prevtags)
597 597 for name in names:
598 598 self.hook('tag', node=hex(node), tag=name, local=local)
599 599 return
600 600
601 601 try:
602 602 fp = self.wfile('.hgtags', 'rb+')
603 603 except IOError as e:
604 604 if e.errno != errno.ENOENT:
605 605 raise
606 606 fp = self.wfile('.hgtags', 'ab')
607 607 else:
608 608 prevtags = fp.read()
609 609
610 610 # committed tags are stored in UTF-8
611 611 writetags(fp, names, encoding.fromlocal, prevtags)
612 612
613 613 fp.close()
614 614
615 615 self.invalidatecaches()
616 616
617 617 if '.hgtags' not in self.dirstate:
618 618 self[None].add(['.hgtags'])
619 619
620 620 m = matchmod.exact(self.root, '', ['.hgtags'])
621 621 tagnode = self.commit(message, user, date, extra=extra, match=m,
622 622 editor=editor)
623 623
624 624 for name in names:
625 625 self.hook('tag', node=hex(node), tag=name, local=local)
626 626
627 627 return tagnode
628 628
629 629 def tag(self, names, node, message, local, user, date, editor=False):
630 630 '''tag a revision with one or more symbolic names.
631 631
632 632 names is a list of strings or, when adding a single tag, names may be a
633 633 string.
634 634
635 635 if local is True, the tags are stored in a per-repository file.
636 636 otherwise, they are stored in the .hgtags file, and a new
637 637 changeset is committed with the change.
638 638
639 639 keyword arguments:
640 640
641 641 local: whether to store tags in non-version-controlled file
642 642 (default False)
643 643
644 644 message: commit message to use if committing
645 645
646 646 user: name of user to use if committing
647 647
648 648 date: date tuple to use if committing'''
649 649
650 650 if not local:
651 651 m = matchmod.exact(self.root, '', ['.hgtags'])
652 652 if any(self.status(match=m, unknown=True, ignored=True)):
653 653 raise error.Abort(_('working copy of .hgtags is changed'),
654 654 hint=_('please commit .hgtags manually'))
655 655
656 656 self.tags() # instantiate the cache
657 657 self._tag(names, node, message, local, user, date, editor=editor)
658 658
659 659 @filteredpropertycache
660 660 def _tagscache(self):
661 661 '''Returns a tagscache object that contains various tags related
662 662 caches.'''
663 663
664 664 # This simplifies its cache management by having one decorated
665 665 # function (this one) and the rest simply fetch things from it.
666 666 class tagscache(object):
667 667 def __init__(self):
668 668 # These two define the set of tags for this repository. tags
669 669 # maps tag name to node; tagtypes maps tag name to 'global' or
670 670 # 'local'. (Global tags are defined by .hgtags across all
671 671 # heads, and local tags are defined in .hg/localtags.)
672 672 # They constitute the in-memory cache of tags.
673 673 self.tags = self.tagtypes = None
674 674
675 675 self.nodetagscache = self.tagslist = None
676 676
677 677 cache = tagscache()
678 678 cache.tags, cache.tagtypes = self._findtags()
679 679
680 680 return cache
681 681
682 682 def tags(self):
683 683 '''return a mapping of tag to node'''
684 684 t = {}
685 685 if self.changelog.filteredrevs:
686 686 tags, tt = self._findtags()
687 687 else:
688 688 tags = self._tagscache.tags
689 689 for k, v in tags.iteritems():
690 690 try:
691 691 # ignore tags to unknown nodes
692 692 self.changelog.rev(v)
693 693 t[k] = v
694 694 except (error.LookupError, ValueError):
695 695 pass
696 696 return t
697 697
698 698 def _findtags(self):
699 699 '''Do the hard work of finding tags. Return a pair of dicts
700 700 (tags, tagtypes) where tags maps tag name to node, and tagtypes
701 701 maps tag name to a string like \'global\' or \'local\'.
702 702 Subclasses or extensions are free to add their own tags, but
703 703 should be aware that the returned dicts will be retained for the
704 704 duration of the localrepo object.'''
705 705
706 706 # XXX what tagtype should subclasses/extensions use? Currently
707 707 # mq and bookmarks add tags, but do not set the tagtype at all.
708 708 # Should each extension invent its own tag type? Should there
709 709 # be one tagtype for all such "virtual" tags? Or is the status
710 710 # quo fine?
711 711
712 712 alltags = {} # map tag name to (node, hist)
713 713 tagtypes = {}
714 714
715 715 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
716 716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717 717
718 718 # Build the return dicts. Have to re-encode tag names because
719 719 # the tags module always uses UTF-8 (in order not to lose info
720 720 # writing to the cache), but the rest of Mercurial wants them in
721 721 # local encoding.
722 722 tags = {}
723 723 for (name, (node, hist)) in alltags.iteritems():
724 724 if node != nullid:
725 725 tags[encoding.tolocal(name)] = node
726 726 tags['tip'] = self.changelog.tip()
727 727 tagtypes = dict([(encoding.tolocal(name), value)
728 728 for (name, value) in tagtypes.iteritems()])
729 729 return (tags, tagtypes)
730 730
731 731 def tagtype(self, tagname):
732 732 '''
733 733 return the type of the given tag. result can be:
734 734
735 735 'local' : a local tag
736 736 'global' : a global tag
737 737 None : tag does not exist
738 738 '''
739 739
740 740 return self._tagscache.tagtypes.get(tagname)
741 741
742 742 def tagslist(self):
743 743 '''return a list of tags ordered by revision'''
744 744 if not self._tagscache.tagslist:
745 745 l = []
746 746 for t, n in self.tags().iteritems():
747 747 l.append((self.changelog.rev(n), t, n))
748 748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749 749
750 750 return self._tagscache.tagslist
751 751
752 752 def nodetags(self, node):
753 753 '''return the tags associated with a node'''
754 754 if not self._tagscache.nodetagscache:
755 755 nodetagscache = {}
756 756 for t, n in self._tagscache.tags.iteritems():
757 757 nodetagscache.setdefault(n, []).append(t)
758 758 for tags in nodetagscache.itervalues():
759 759 tags.sort()
760 760 self._tagscache.nodetagscache = nodetagscache
761 761 return self._tagscache.nodetagscache.get(node, [])
762 762
763 763 def nodebookmarks(self, node):
764 764 """return the list of bookmarks pointing to the specified node"""
765 765 marks = []
766 766 for bookmark, n in self._bookmarks.iteritems():
767 767 if n == node:
768 768 marks.append(bookmark)
769 769 return sorted(marks)
770 770
771 771 def branchmap(self):
772 772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 773 ordered by increasing revision number'''
774 774 branchmap.updatecache(self)
775 775 return self._branchcaches[self.filtername]
776 776
777 777 @unfilteredmethod
778 778 def revbranchcache(self):
779 779 if not self._revbranchcache:
780 780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 781 return self._revbranchcache
782 782
783 783 def branchtip(self, branch, ignoremissing=False):
784 784 '''return the tip node for a given branch
785 785
786 786 If ignoremissing is True, then this method will not raise an error.
787 787 This is helpful for callers that only expect None for a missing branch
788 788 (e.g. namespace).
789 789
790 790 '''
791 791 try:
792 792 return self.branchmap().branchtip(branch)
793 793 except KeyError:
794 794 if not ignoremissing:
795 795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 796 else:
797 797 pass
798 798
799 799 def lookup(self, key):
800 800 return self[key].node()
801 801
802 802 def lookupbranch(self, key, remote=None):
803 803 repo = remote or self
804 804 if key in repo.branchmap():
805 805 return key
806 806
807 807 repo = (remote and remote.local()) and remote or self
808 808 return repo[key].branch()
809 809
810 810 def known(self, nodes):
811 811 nm = self.changelog.nodemap
812 812 pc = self._phasecache
813 813 result = []
814 814 for n in nodes:
815 815 r = nm.get(n)
816 816 resp = not (r is None or pc.phase(self, r) >= phases.secret)
817 817 result.append(resp)
818 818 return result
819 819
820 820 def local(self):
821 821 return self
822 822
823 823 def publishing(self):
824 824 # it's safe (and desirable) to trust the publish flag unconditionally
825 825 # so that we don't finalize changes shared between users via ssh or nfs
826 826 return self.ui.configbool('phases', 'publish', True, untrusted=True)
827 827
828 828 def cancopy(self):
829 829 # so statichttprepo's override of local() works
830 830 if not self.local():
831 831 return False
832 832 if not self.publishing():
833 833 return True
834 834 # if publishing we can't copy if there is filtered content
835 835 return not self.filtered('visible').changelog.filteredrevs
836 836
837 837 def shared(self):
838 838 '''the type of shared repository (None if not shared)'''
839 839 if self.sharedpath != self.path:
840 840 return 'store'
841 841 return None
842 842
843 843 def join(self, f, *insidef):
844 844 return self.vfs.join(os.path.join(f, *insidef))
845 845
846 846 def wjoin(self, f, *insidef):
847 847 return self.vfs.reljoin(self.root, f, *insidef)
848 848
849 849 def file(self, f):
850 850 if f[0] == '/':
851 851 f = f[1:]
852 852 return filelog.filelog(self.svfs, f)
853 853
854 854 def parents(self, changeid=None):
855 855 '''get list of changectxs for parents of changeid'''
856 856 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
857 857 self.ui.deprecwarn(msg, '3.7')
858 858 return self[changeid].parents()
859 859
860 860 def changectx(self, changeid):
861 861 return self[changeid]
862 862
863 863 def setparents(self, p1, p2=nullid):
864 864 self.dirstate.beginparentchange()
865 865 copies = self.dirstate.setparents(p1, p2)
866 866 pctx = self[p1]
867 867 if copies:
868 868 # Adjust copy records, the dirstate cannot do it, it
869 869 # requires access to parents manifests. Preserve them
870 870 # only for entries added to first parent.
871 871 for f in copies:
872 872 if f not in pctx and copies[f] in pctx:
873 873 self.dirstate.copy(copies[f], f)
874 874 if p2 == nullid:
875 875 for f, s in sorted(self.dirstate.copies().items()):
876 876 if f not in pctx and s not in pctx:
877 877 self.dirstate.copy(None, f)
878 878 self.dirstate.endparentchange()
879 879
880 880 def filectx(self, path, changeid=None, fileid=None):
881 881 """changeid can be a changeset revision, node, or tag.
882 882 fileid can be a file revision or node."""
883 883 return context.filectx(self, path, changeid, fileid)
884 884
885 885 def getcwd(self):
886 886 return self.dirstate.getcwd()
887 887
888 888 def pathto(self, f, cwd=None):
889 889 return self.dirstate.pathto(f, cwd)
890 890
891 891 def wfile(self, f, mode='r'):
892 892 return self.wvfs(f, mode)
893 893
894 894 def _link(self, f):
895 895 return self.wvfs.islink(f)
896 896
897 897 def _loadfilter(self, filter):
898 898 if filter not in self.filterpats:
899 899 l = []
900 900 for pat, cmd in self.ui.configitems(filter):
901 901 if cmd == '!':
902 902 continue
903 903 mf = matchmod.match(self.root, '', [pat])
904 904 fn = None
905 905 params = cmd
906 906 for name, filterfn in self._datafilters.iteritems():
907 907 if cmd.startswith(name):
908 908 fn = filterfn
909 909 params = cmd[len(name):].lstrip()
910 910 break
911 911 if not fn:
912 912 fn = lambda s, c, **kwargs: util.filter(s, c)
913 913 # Wrap old filters not supporting keyword arguments
914 914 if not inspect.getargspec(fn)[2]:
915 915 oldfn = fn
916 916 fn = lambda s, c, **kwargs: oldfn(s, c)
917 917 l.append((mf, fn, params))
918 918 self.filterpats[filter] = l
919 919 return self.filterpats[filter]
920 920
921 921 def _filter(self, filterpats, filename, data):
922 922 for mf, fn, cmd in filterpats:
923 923 if mf(filename):
924 924 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
925 925 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
926 926 break
927 927
928 928 return data
929 929
930 930 @unfilteredpropertycache
931 931 def _encodefilterpats(self):
932 932 return self._loadfilter('encode')
933 933
934 934 @unfilteredpropertycache
935 935 def _decodefilterpats(self):
936 936 return self._loadfilter('decode')
937 937
938 938 def adddatafilter(self, name, filter):
939 939 self._datafilters[name] = filter
940 940
941 941 def wread(self, filename):
942 942 if self._link(filename):
943 943 data = self.wvfs.readlink(filename)
944 944 else:
945 945 data = self.wvfs.read(filename)
946 946 return self._filter(self._encodefilterpats, filename, data)
947 947
948 948 def wwrite(self, filename, data, flags):
949 949 """write ``data`` into ``filename`` in the working directory
950 950
951 951 This returns length of written (maybe decoded) data.
952 952 """
953 953 data = self._filter(self._decodefilterpats, filename, data)
954 954 if 'l' in flags:
955 955 self.wvfs.symlink(data, filename)
956 956 else:
957 957 self.wvfs.write(filename, data)
958 958 if 'x' in flags:
959 959 self.wvfs.setflags(filename, False, True)
960 960 return len(data)
961 961
962 962 def wwritedata(self, filename, data):
963 963 return self._filter(self._decodefilterpats, filename, data)
964 964
965 965 def currenttransaction(self):
966 966 """return the current transaction or None if non exists"""
967 967 if self._transref:
968 968 tr = self._transref()
969 969 else:
970 970 tr = None
971 971
972 972 if tr and tr.running():
973 973 return tr
974 974 return None
975 975
976 976 def transaction(self, desc, report=None):
977 977 if (self.ui.configbool('devel', 'all-warnings')
978 978 or self.ui.configbool('devel', 'check-locks')):
979 979 l = self._lockref and self._lockref()
980 980 if l is None or not l.held:
981 981 self.ui.develwarn('transaction with no lock')
982 982 tr = self.currenttransaction()
983 983 if tr is not None:
984 984 return tr.nest()
985 985
986 986 # abort here if the journal already exists
987 987 if self.svfs.exists("journal"):
988 988 raise error.RepoError(
989 989 _("abandoned transaction found"),
990 990 hint=_("run 'hg recover' to clean up transaction"))
991 991
992 992 # make journal.dirstate contain in-memory changes at this point
993 993 self.dirstate.write(None)
994 994
995 995 idbase = "%.40f#%f" % (random.random(), time.time())
996 996 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
997 997 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
998 998
999 999 self._writejournal(desc)
1000 1000 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1001 1001 if report:
1002 1002 rp = report
1003 1003 else:
1004 1004 rp = self.ui.warn
1005 1005 vfsmap = {'plain': self.vfs} # root of .hg/
1006 1006 # we must avoid cyclic reference between repo and transaction.
1007 1007 reporef = weakref.ref(self)
1008 1008 def validate(tr):
1009 1009 """will run pre-closing hooks"""
1010 1010 reporef().hook('pretxnclose', throw=True,
1011 1011 txnname=desc, **tr.hookargs)
1012 1012 def releasefn(tr, success):
1013 1013 repo = reporef()
1014 1014 if success:
1015 1015 # this should be explicitly invoked here, because
1016 1016 # in-memory changes aren't written out at closing
1017 1017 # transaction, if tr.addfilegenerator (via
1018 1018 # dirstate.write or so) isn't invoked while
1019 1019 # transaction running
1020 1020 repo.dirstate.write(None)
1021 1021 else:
1022 1022 # prevent in-memory changes from being written out at
1023 1023 # the end of outer wlock scope or so
1024 1024 repo.dirstate.invalidate()
1025 1025
1026 1026 # discard all changes (including ones already written
1027 1027 # out) in this transaction
1028 1028 repo.vfs.rename('journal.dirstate', 'dirstate')
1029 1029
1030 1030 repo.invalidate(clearfilecache=True)
1031 1031
1032 1032 tr = transaction.transaction(rp, self.svfs, vfsmap,
1033 1033 "journal",
1034 1034 "undo",
1035 1035 aftertrans(renames),
1036 1036 self.store.createmode,
1037 1037 validator=validate,
1038 1038 releasefn=releasefn)
1039 1039
1040 1040 tr.hookargs['txnid'] = txnid
1041 1041 # note: writing the fncache only during finalize mean that the file is
1042 1042 # outdated when running hooks. As fncache is used for streaming clone,
1043 1043 # this is not expected to break anything that happen during the hooks.
1044 1044 tr.addfinalize('flush-fncache', self.store.write)
1045 1045 def txnclosehook(tr2):
1046 1046 """To be run if transaction is successful, will schedule a hook run
1047 1047 """
1048 1048 def hook():
1049 1049 reporef().hook('txnclose', throw=False, txnname=desc,
1050 1050 **tr2.hookargs)
1051 1051 reporef()._afterlock(hook)
1052 1052 tr.addfinalize('txnclose-hook', txnclosehook)
1053 1053 def txnaborthook(tr2):
1054 1054 """To be run if transaction is aborted
1055 1055 """
1056 1056 reporef().hook('txnabort', throw=False, txnname=desc,
1057 1057 **tr2.hookargs)
1058 1058 tr.addabort('txnabort-hook', txnaborthook)
1059 1059 # avoid eager cache invalidation. in-memory data should be identical
1060 1060 # to stored data if transaction has no error.
1061 1061 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1062 1062 self._transref = weakref.ref(tr)
1063 1063 return tr
1064 1064
1065 1065 def _journalfiles(self):
1066 1066 return ((self.svfs, 'journal'),
1067 1067 (self.vfs, 'journal.dirstate'),
1068 1068 (self.vfs, 'journal.branch'),
1069 1069 (self.vfs, 'journal.desc'),
1070 1070 (self.vfs, 'journal.bookmarks'),
1071 1071 (self.svfs, 'journal.phaseroots'))
1072 1072
1073 1073 def undofiles(self):
1074 1074 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1075 1075
1076 1076 def _writejournal(self, desc):
1077 1077 self.vfs.write("journal.dirstate",
1078 1078 self.vfs.tryread("dirstate"))
1079 1079 self.vfs.write("journal.branch",
1080 1080 encoding.fromlocal(self.dirstate.branch()))
1081 1081 self.vfs.write("journal.desc",
1082 1082 "%d\n%s\n" % (len(self), desc))
1083 1083 self.vfs.write("journal.bookmarks",
1084 1084 self.vfs.tryread("bookmarks"))
1085 1085 self.svfs.write("journal.phaseroots",
1086 1086 self.svfs.tryread("phaseroots"))
1087 1087
1088 1088 def recover(self):
1089 1089 lock = self.lock()
1090 1090 try:
1091 1091 if self.svfs.exists("journal"):
1092 1092 self.ui.status(_("rolling back interrupted transaction\n"))
1093 1093 vfsmap = {'': self.svfs,
1094 1094 'plain': self.vfs,}
1095 1095 transaction.rollback(self.svfs, vfsmap, "journal",
1096 1096 self.ui.warn)
1097 1097 self.invalidate()
1098 1098 return True
1099 1099 else:
1100 1100 self.ui.warn(_("no interrupted transaction available\n"))
1101 1101 return False
1102 1102 finally:
1103 1103 lock.release()
1104 1104
1105 1105 def rollback(self, dryrun=False, force=False):
1106 1106 wlock = lock = dsguard = None
1107 1107 try:
1108 1108 wlock = self.wlock()
1109 1109 lock = self.lock()
1110 1110 if self.svfs.exists("undo"):
1111 1111 dsguard = cmdutil.dirstateguard(self, 'rollback')
1112 1112
1113 1113 return self._rollback(dryrun, force, dsguard)
1114 1114 else:
1115 1115 self.ui.warn(_("no rollback information available\n"))
1116 1116 return 1
1117 1117 finally:
1118 1118 release(dsguard, lock, wlock)
1119 1119
1120 1120 @unfilteredmethod # Until we get smarter cache management
1121 1121 def _rollback(self, dryrun, force, dsguard):
1122 1122 ui = self.ui
1123 1123 try:
1124 1124 args = self.vfs.read('undo.desc').splitlines()
1125 1125 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1126 1126 if len(args) >= 3:
1127 1127 detail = args[2]
1128 1128 oldtip = oldlen - 1
1129 1129
1130 1130 if detail and ui.verbose:
1131 1131 msg = (_('repository tip rolled back to revision %s'
1132 1132 ' (undo %s: %s)\n')
1133 1133 % (oldtip, desc, detail))
1134 1134 else:
1135 1135 msg = (_('repository tip rolled back to revision %s'
1136 1136 ' (undo %s)\n')
1137 1137 % (oldtip, desc))
1138 1138 except IOError:
1139 1139 msg = _('rolling back unknown transaction\n')
1140 1140 desc = None
1141 1141
1142 1142 if not force and self['.'] != self['tip'] and desc == 'commit':
1143 1143 raise error.Abort(
1144 1144 _('rollback of last commit while not checked out '
1145 1145 'may lose data'), hint=_('use -f to force'))
1146 1146
1147 1147 ui.status(msg)
1148 1148 if dryrun:
1149 1149 return 0
1150 1150
1151 1151 parents = self.dirstate.parents()
1152 1152 self.destroying()
1153 1153 vfsmap = {'plain': self.vfs, '': self.svfs}
1154 1154 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1155 1155 if self.vfs.exists('undo.bookmarks'):
1156 1156 self.vfs.rename('undo.bookmarks', 'bookmarks')
1157 1157 if self.svfs.exists('undo.phaseroots'):
1158 1158 self.svfs.rename('undo.phaseroots', 'phaseroots')
1159 1159 self.invalidate()
1160 1160
1161 1161 parentgone = (parents[0] not in self.changelog.nodemap or
1162 1162 parents[1] not in self.changelog.nodemap)
1163 1163 if parentgone:
1164 1164 # prevent dirstateguard from overwriting already restored one
1165 1165 dsguard.close()
1166 1166
1167 1167 self.vfs.rename('undo.dirstate', 'dirstate')
1168 1168 try:
1169 1169 branch = self.vfs.read('undo.branch')
1170 1170 self.dirstate.setbranch(encoding.tolocal(branch))
1171 1171 except IOError:
1172 1172 ui.warn(_('named branch could not be reset: '
1173 1173 'current branch is still \'%s\'\n')
1174 1174 % self.dirstate.branch())
1175 1175
1176 1176 self.dirstate.invalidate()
1177 1177 parents = tuple([p.rev() for p in self[None].parents()])
1178 1178 if len(parents) > 1:
1179 1179 ui.status(_('working directory now based on '
1180 1180 'revisions %d and %d\n') % parents)
1181 1181 else:
1182 1182 ui.status(_('working directory now based on '
1183 1183 'revision %d\n') % parents)
1184 1184 mergemod.mergestate.clean(self, self['.'].node())
1185 1185
1186 1186 # TODO: if we know which new heads may result from this rollback, pass
1187 1187 # them to destroy(), which will prevent the branchhead cache from being
1188 1188 # invalidated.
1189 1189 self.destroyed()
1190 1190 return 0
1191 1191
1192 1192 def invalidatecaches(self):
1193 1193
1194 1194 if '_tagscache' in vars(self):
1195 1195 # can't use delattr on proxy
1196 1196 del self.__dict__['_tagscache']
1197 1197
1198 1198 self.unfiltered()._branchcaches.clear()
1199 1199 self.invalidatevolatilesets()
1200 1200
1201 1201 def invalidatevolatilesets(self):
1202 1202 self.filteredrevcache.clear()
1203 1203 obsolete.clearobscaches(self)
1204 1204
1205 1205 def invalidatedirstate(self):
1206 1206 '''Invalidates the dirstate, causing the next call to dirstate
1207 1207 to check if it was modified since the last time it was read,
1208 1208 rereading it if it has.
1209 1209
1210 1210 This is different to dirstate.invalidate() that it doesn't always
1211 1211 rereads the dirstate. Use dirstate.invalidate() if you want to
1212 1212 explicitly read the dirstate again (i.e. restoring it to a previous
1213 1213 known good state).'''
1214 1214 if hasunfilteredcache(self, 'dirstate'):
1215 1215 for k in self.dirstate._filecache:
1216 1216 try:
1217 1217 delattr(self.dirstate, k)
1218 1218 except AttributeError:
1219 1219 pass
1220 1220 delattr(self.unfiltered(), 'dirstate')
1221 1221
1222 1222 def invalidate(self, clearfilecache=False):
1223 1223 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 1224 for k in self._filecache.keys():
1225 1225 # dirstate is invalidated separately in invalidatedirstate()
1226 1226 if k == 'dirstate':
1227 1227 continue
1228 1228
1229 1229 if clearfilecache:
1230 1230 del self._filecache[k]
1231 1231 try:
1232 1232 delattr(unfiltered, k)
1233 1233 except AttributeError:
1234 1234 pass
1235 1235 self.invalidatecaches()
1236 1236 self.store.invalidatecaches()
1237 1237
1238 1238 def invalidateall(self):
1239 1239 '''Fully invalidates both store and non-store parts, causing the
1240 1240 subsequent operation to reread any outside changes.'''
1241 1241 # extension should hook this to invalidate its caches
1242 1242 self.invalidate()
1243 1243 self.invalidatedirstate()
1244 1244
1245 1245 def _refreshfilecachestats(self, tr):
1246 1246 """Reload stats of cached files so that they are flagged as valid"""
1247 1247 for k, ce in self._filecache.items():
1248 1248 if k == 'dirstate' or k not in self.__dict__:
1249 1249 continue
1250 1250 ce.refresh()
1251 1251
1252 1252 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1253 1253 inheritchecker=None, parentenvvar=None):
1254 1254 parentlock = None
1255 1255 # the contents of parentenvvar are used by the underlying lock to
1256 1256 # determine whether it can be inherited
1257 1257 if parentenvvar is not None:
1258 1258 parentlock = os.environ.get(parentenvvar)
1259 1259 try:
1260 1260 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1261 1261 acquirefn=acquirefn, desc=desc,
1262 1262 inheritchecker=inheritchecker,
1263 1263 parentlock=parentlock)
1264 1264 except error.LockHeld as inst:
1265 1265 if not wait:
1266 1266 raise
1267 1267 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1268 1268 (desc, inst.locker))
1269 1269 # default to 600 seconds timeout
1270 1270 l = lockmod.lock(vfs, lockname,
1271 1271 int(self.ui.config("ui", "timeout", "600")),
1272 1272 releasefn=releasefn, acquirefn=acquirefn,
1273 1273 desc=desc)
1274 1274 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1275 1275 return l
1276 1276
1277 1277 def _afterlock(self, callback):
1278 1278 """add a callback to be run when the repository is fully unlocked
1279 1279
1280 1280 The callback will be executed when the outermost lock is released
1281 1281 (with wlock being higher level than 'lock')."""
1282 1282 for ref in (self._wlockref, self._lockref):
1283 1283 l = ref and ref()
1284 1284 if l and l.held:
1285 1285 l.postrelease.append(callback)
1286 1286 break
1287 1287 else: # no lock have been found.
1288 1288 callback()
1289 1289
1290 1290 def lock(self, wait=True):
1291 1291 '''Lock the repository store (.hg/store) and return a weak reference
1292 1292 to the lock. Use this before modifying the store (e.g. committing or
1293 1293 stripping). If you are opening a transaction, get a lock as well.)
1294 1294
1295 1295 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1296 1296 'wlock' first to avoid a dead-lock hazard.'''
1297 1297 l = self._lockref and self._lockref()
1298 1298 if l is not None and l.held:
1299 1299 l.lock()
1300 1300 return l
1301 1301
1302 1302 l = self._lock(self.svfs, "lock", wait, None,
1303 1303 self.invalidate, _('repository %s') % self.origroot)
1304 1304 self._lockref = weakref.ref(l)
1305 1305 return l
1306 1306
1307 1307 def _wlockchecktransaction(self):
1308 1308 if self.currenttransaction() is not None:
1309 1309 raise error.LockInheritanceContractViolation(
1310 1310 'wlock cannot be inherited in the middle of a transaction')
1311 1311
1312 1312 def wlock(self, wait=True):
1313 1313 '''Lock the non-store parts of the repository (everything under
1314 1314 .hg except .hg/store) and return a weak reference to the lock.
1315 1315
1316 1316 Use this before modifying files in .hg.
1317 1317
1318 1318 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1319 1319 'wlock' first to avoid a dead-lock hazard.'''
1320 1320 l = self._wlockref and self._wlockref()
1321 1321 if l is not None and l.held:
1322 1322 l.lock()
1323 1323 return l
1324 1324
1325 1325 # We do not need to check for non-waiting lock acquisition. Such
1326 1326 # acquisition would not cause dead-lock as they would just fail.
1327 1327 if wait and (self.ui.configbool('devel', 'all-warnings')
1328 1328 or self.ui.configbool('devel', 'check-locks')):
1329 1329 l = self._lockref and self._lockref()
1330 1330 if l is not None and l.held:
1331 1331 self.ui.develwarn('"wlock" acquired after "lock"')
1332 1332
1333 1333 def unlock():
1334 1334 if self.dirstate.pendingparentchange():
1335 1335 self.dirstate.invalidate()
1336 1336 else:
1337 1337 self.dirstate.write(None)
1338 1338
1339 1339 self._filecache['dirstate'].refresh()
1340 1340
1341 1341 l = self._lock(self.vfs, "wlock", wait, unlock,
1342 1342 self.invalidatedirstate, _('working directory of %s') %
1343 1343 self.origroot,
1344 1344 inheritchecker=self._wlockchecktransaction,
1345 1345 parentenvvar='HG_WLOCK_LOCKER')
1346 1346 self._wlockref = weakref.ref(l)
1347 1347 return l
1348 1348
1349 1349 def _currentlock(self, lockref):
1350 1350 """Returns the lock if it's held, or None if it's not."""
1351 1351 if lockref is None:
1352 1352 return None
1353 1353 l = lockref()
1354 1354 if l is None or not l.held:
1355 1355 return None
1356 1356 return l
1357 1357
1358 1358 def currentwlock(self):
1359 1359 """Returns the wlock if it's held, or None if it's not."""
1360 1360 return self._currentlock(self._wlockref)
1361 1361
1362 1362 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1363 1363 """
1364 1364 commit an individual file as part of a larger transaction
1365 1365 """
1366 1366
1367 1367 fname = fctx.path()
1368 1368 fparent1 = manifest1.get(fname, nullid)
1369 1369 fparent2 = manifest2.get(fname, nullid)
1370 1370 if isinstance(fctx, context.filectx):
1371 1371 node = fctx.filenode()
1372 1372 if node in [fparent1, fparent2]:
1373 1373 self.ui.debug('reusing %s filelog entry\n' % fname)
1374 1374 return node
1375 1375
1376 1376 flog = self.file(fname)
1377 1377 meta = {}
1378 1378 copy = fctx.renamed()
1379 1379 if copy and copy[0] != fname:
1380 1380 # Mark the new revision of this file as a copy of another
1381 1381 # file. This copy data will effectively act as a parent
1382 1382 # of this new revision. If this is a merge, the first
1383 1383 # parent will be the nullid (meaning "look up the copy data")
1384 1384 # and the second one will be the other parent. For example:
1385 1385 #
1386 1386 # 0 --- 1 --- 3 rev1 changes file foo
1387 1387 # \ / rev2 renames foo to bar and changes it
1388 1388 # \- 2 -/ rev3 should have bar with all changes and
1389 1389 # should record that bar descends from
1390 1390 # bar in rev2 and foo in rev1
1391 1391 #
1392 1392 # this allows this merge to succeed:
1393 1393 #
1394 1394 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1395 1395 # \ / merging rev3 and rev4 should use bar@rev2
1396 1396 # \- 2 --- 4 as the merge base
1397 1397 #
1398 1398
1399 1399 cfname = copy[0]
1400 1400 crev = manifest1.get(cfname)
1401 1401 newfparent = fparent2
1402 1402
1403 1403 if manifest2: # branch merge
1404 1404 if fparent2 == nullid or crev is None: # copied on remote side
1405 1405 if cfname in manifest2:
1406 1406 crev = manifest2[cfname]
1407 1407 newfparent = fparent1
1408 1408
1409 1409 # Here, we used to search backwards through history to try to find
1410 1410 # where the file copy came from if the source of a copy was not in
1411 1411 # the parent directory. However, this doesn't actually make sense to
1412 1412 # do (what does a copy from something not in your working copy even
1413 1413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1414 1414 # the user that copy information was dropped, so if they didn't
1415 1415 # expect this outcome it can be fixed, but this is the correct
1416 1416 # behavior in this circumstance.
1417 1417
1418 1418 if crev:
1419 1419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1420 1420 meta["copy"] = cfname
1421 1421 meta["copyrev"] = hex(crev)
1422 1422 fparent1, fparent2 = nullid, newfparent
1423 1423 else:
1424 1424 self.ui.warn(_("warning: can't find ancestor for '%s' "
1425 1425 "copied from '%s'!\n") % (fname, cfname))
1426 1426
1427 1427 elif fparent1 == nullid:
1428 1428 fparent1, fparent2 = fparent2, nullid
1429 1429 elif fparent2 != nullid:
1430 1430 # is one parent an ancestor of the other?
1431 1431 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1432 1432 if fparent1 in fparentancestors:
1433 1433 fparent1, fparent2 = fparent2, nullid
1434 1434 elif fparent2 in fparentancestors:
1435 1435 fparent2 = nullid
1436 1436
1437 1437 # is the file changed?
1438 1438 text = fctx.data()
1439 1439 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1440 1440 changelist.append(fname)
1441 1441 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1442 1442 # are just the flags changed during merge?
1443 1443 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1444 1444 changelist.append(fname)
1445 1445
1446 1446 return fparent1
1447 1447
1448 1448 @unfilteredmethod
1449 1449 def commit(self, text="", user=None, date=None, match=None, force=False,
1450 1450 editor=False, extra=None):
1451 1451 """Add a new revision to current repository.
1452 1452
1453 1453 Revision information is gathered from the working directory,
1454 1454 match can be used to filter the committed files. If editor is
1455 1455 supplied, it is called to get a commit message.
1456 1456 """
1457 1457 if extra is None:
1458 1458 extra = {}
1459 1459
1460 1460 def fail(f, msg):
1461 1461 raise error.Abort('%s: %s' % (f, msg))
1462 1462
1463 1463 if not match:
1464 1464 match = matchmod.always(self.root, '')
1465 1465
1466 1466 if not force:
1467 1467 vdirs = []
1468 1468 match.explicitdir = vdirs.append
1469 1469 match.bad = fail
1470 1470
1471 1471 wlock = lock = tr = None
1472 1472 try:
1473 1473 wlock = self.wlock()
1474 lock = self.lock() # for recent changelog (see issue4368)
1475
1474 1476 wctx = self[None]
1475 1477 merge = len(wctx.parents()) > 1
1476 1478
1477 1479 if not force and merge and match.ispartial():
1478 1480 raise error.Abort(_('cannot partially commit a merge '
1479 1481 '(do not specify files or patterns)'))
1480 1482
1481 1483 status = self.status(match=match, clean=force)
1482 1484 if force:
1483 1485 status.modified.extend(status.clean) # mq may commit clean files
1484 1486
1485 1487 # check subrepos
1486 1488 subs = []
1487 1489 commitsubs = set()
1488 1490 newstate = wctx.substate.copy()
1489 1491 # only manage subrepos and .hgsubstate if .hgsub is present
1490 1492 if '.hgsub' in wctx:
1491 1493 # we'll decide whether to track this ourselves, thanks
1492 1494 for c in status.modified, status.added, status.removed:
1493 1495 if '.hgsubstate' in c:
1494 1496 c.remove('.hgsubstate')
1495 1497
1496 1498 # compare current state to last committed state
1497 1499 # build new substate based on last committed state
1498 1500 oldstate = wctx.p1().substate
1499 1501 for s in sorted(newstate.keys()):
1500 1502 if not match(s):
1501 1503 # ignore working copy, use old state if present
1502 1504 if s in oldstate:
1503 1505 newstate[s] = oldstate[s]
1504 1506 continue
1505 1507 if not force:
1506 1508 raise error.Abort(
1507 1509 _("commit with new subrepo %s excluded") % s)
1508 1510 dirtyreason = wctx.sub(s).dirtyreason(True)
1509 1511 if dirtyreason:
1510 1512 if not self.ui.configbool('ui', 'commitsubrepos'):
1511 1513 raise error.Abort(dirtyreason,
1512 1514 hint=_("use --subrepos for recursive commit"))
1513 1515 subs.append(s)
1514 1516 commitsubs.add(s)
1515 1517 else:
1516 1518 bs = wctx.sub(s).basestate()
1517 1519 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1518 1520 if oldstate.get(s, (None, None, None))[1] != bs:
1519 1521 subs.append(s)
1520 1522
1521 1523 # check for removed subrepos
1522 1524 for p in wctx.parents():
1523 1525 r = [s for s in p.substate if s not in newstate]
1524 1526 subs += [s for s in r if match(s)]
1525 1527 if subs:
1526 1528 if (not match('.hgsub') and
1527 1529 '.hgsub' in (wctx.modified() + wctx.added())):
1528 1530 raise error.Abort(
1529 1531 _("can't commit subrepos without .hgsub"))
1530 1532 status.modified.insert(0, '.hgsubstate')
1531 1533
1532 1534 elif '.hgsub' in status.removed:
1533 1535 # clean up .hgsubstate when .hgsub is removed
1534 1536 if ('.hgsubstate' in wctx and
1535 1537 '.hgsubstate' not in (status.modified + status.added +
1536 1538 status.removed)):
1537 1539 status.removed.insert(0, '.hgsubstate')
1538 1540
1539 1541 # make sure all explicit patterns are matched
1540 1542 if not force and (match.isexact() or match.prefix()):
1541 1543 matched = set(status.modified + status.added + status.removed)
1542 1544
1543 1545 for f in match.files():
1544 1546 f = self.dirstate.normalize(f)
1545 1547 if f == '.' or f in matched or f in wctx.substate:
1546 1548 continue
1547 1549 if f in status.deleted:
1548 1550 fail(f, _('file not found!'))
1549 1551 if f in vdirs: # visited directory
1550 1552 d = f + '/'
1551 1553 for mf in matched:
1552 1554 if mf.startswith(d):
1553 1555 break
1554 1556 else:
1555 1557 fail(f, _("no match under directory!"))
1556 1558 elif f not in self.dirstate:
1557 1559 fail(f, _("file not tracked!"))
1558 1560
1559 1561 cctx = context.workingcommitctx(self, status,
1560 1562 text, user, date, extra)
1561 1563
1562 1564 # internal config: ui.allowemptycommit
1563 1565 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1564 1566 or extra.get('close') or merge or cctx.files()
1565 1567 or self.ui.configbool('ui', 'allowemptycommit'))
1566 1568 if not allowemptycommit:
1567 1569 return None
1568 1570
1569 1571 if merge and cctx.deleted():
1570 1572 raise error.Abort(_("cannot commit merge with missing files"))
1571 1573
1572 1574 ms = mergemod.mergestate.read(self)
1573 1575
1574 1576 if list(ms.unresolved()):
1575 1577 raise error.Abort(_('unresolved merge conflicts '
1576 1578 '(see "hg help resolve")'))
1577 1579 if ms.mdstate() != 's' or list(ms.driverresolved()):
1578 1580 raise error.Abort(_('driver-resolved merge conflicts'),
1579 1581 hint=_('run "hg resolve --all" to resolve'))
1580 1582
1581 1583 if editor:
1582 1584 cctx._text = editor(self, cctx, subs)
1583 1585 edited = (text != cctx._text)
1584 1586
1585 1587 # Save commit message in case this transaction gets rolled back
1586 1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1587 1589 # the assumption that the user will use the same editor again.
1588 1590 msgfn = self.savecommitmessage(cctx._text)
1589 1591
1590 1592 # commit subs and write new state
1591 1593 if subs:
1592 1594 for s in sorted(commitsubs):
1593 1595 sub = wctx.sub(s)
1594 1596 self.ui.status(_('committing subrepository %s\n') %
1595 1597 subrepo.subrelpath(sub))
1596 1598 sr = sub.commit(cctx._text, user, date)
1597 1599 newstate[s] = (newstate[s][0], sr)
1598 1600 subrepo.writestate(self, newstate)
1599 1601
1600 1602 p1, p2 = self.dirstate.parents()
1601 lock = self.lock()
1602 1603 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1603 1604 try:
1604 1605 self.hook("precommit", throw=True, parent1=hookp1,
1605 1606 parent2=hookp2)
1606 1607 tr = self.transaction('commit')
1607 1608 ret = self.commitctx(cctx, True)
1608 1609 except: # re-raises
1609 1610 if edited:
1610 1611 self.ui.write(
1611 1612 _('note: commit message saved in %s\n') % msgfn)
1612 1613 raise
1613 1614 # update bookmarks, dirstate and mergestate
1614 1615 bookmarks.update(self, [p1, p2], ret)
1615 1616 cctx.markcommitted(ret)
1616 1617 ms.reset()
1617 1618 tr.close()
1618 1619
1619 1620 finally:
1620 1621 lockmod.release(tr, lock, wlock)
1621 1622
1622 1623 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1623 1624 # hack for command that use a temporary commit (eg: histedit)
1624 1625 # temporary commit got stripped before hook release
1625 1626 if self.changelog.hasnode(ret):
1626 1627 self.hook("commit", node=node, parent1=parent1,
1627 1628 parent2=parent2)
1628 1629 self._afterlock(commithook)
1629 1630 return ret
1630 1631
1631 1632 @unfilteredmethod
1632 1633 def commitctx(self, ctx, error=False):
1633 1634 """Add a new revision to current repository.
1634 1635 Revision information is passed via the context argument.
1635 1636 """
1636 1637
1637 1638 tr = None
1638 1639 p1, p2 = ctx.p1(), ctx.p2()
1639 1640 user = ctx.user()
1640 1641
1641 1642 lock = self.lock()
1642 1643 try:
1643 1644 tr = self.transaction("commit")
1644 1645 trp = weakref.proxy(tr)
1645 1646
1646 1647 if ctx.files():
1647 1648 m1 = p1.manifest()
1648 1649 m2 = p2.manifest()
1649 1650 m = m1.copy()
1650 1651
1651 1652 # check in files
1652 1653 added = []
1653 1654 changed = []
1654 1655 removed = list(ctx.removed())
1655 1656 linkrev = len(self)
1656 1657 self.ui.note(_("committing files:\n"))
1657 1658 for f in sorted(ctx.modified() + ctx.added()):
1658 1659 self.ui.note(f + "\n")
1659 1660 try:
1660 1661 fctx = ctx[f]
1661 1662 if fctx is None:
1662 1663 removed.append(f)
1663 1664 else:
1664 1665 added.append(f)
1665 1666 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1666 1667 trp, changed)
1667 1668 m.setflag(f, fctx.flags())
1668 1669 except OSError as inst:
1669 1670 self.ui.warn(_("trouble committing %s!\n") % f)
1670 1671 raise
1671 1672 except IOError as inst:
1672 1673 errcode = getattr(inst, 'errno', errno.ENOENT)
1673 1674 if error or errcode and errcode != errno.ENOENT:
1674 1675 self.ui.warn(_("trouble committing %s!\n") % f)
1675 1676 raise
1676 1677
1677 1678 # update manifest
1678 1679 self.ui.note(_("committing manifest\n"))
1679 1680 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1680 1681 drop = [f for f in removed if f in m]
1681 1682 for f in drop:
1682 1683 del m[f]
1683 1684 mn = self.manifest.add(m, trp, linkrev,
1684 1685 p1.manifestnode(), p2.manifestnode(),
1685 1686 added, drop)
1686 1687 files = changed + removed
1687 1688 else:
1688 1689 mn = p1.manifestnode()
1689 1690 files = []
1690 1691
1691 1692 # update changelog
1692 1693 self.ui.note(_("committing changelog\n"))
1693 1694 self.changelog.delayupdate(tr)
1694 1695 n = self.changelog.add(mn, files, ctx.description(),
1695 1696 trp, p1.node(), p2.node(),
1696 1697 user, ctx.date(), ctx.extra().copy())
1697 1698 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1698 1699 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1699 1700 parent2=xp2)
1700 1701 # set the new commit is proper phase
1701 1702 targetphase = subrepo.newcommitphase(self.ui, ctx)
1702 1703 if targetphase:
1703 1704 # retract boundary do not alter parent changeset.
1704 1705 # if a parent have higher the resulting phase will
1705 1706 # be compliant anyway
1706 1707 #
1707 1708 # if minimal phase was 0 we don't need to retract anything
1708 1709 phases.retractboundary(self, tr, targetphase, [n])
1709 1710 tr.close()
1710 1711 branchmap.updatecache(self.filtered('served'))
1711 1712 return n
1712 1713 finally:
1713 1714 if tr:
1714 1715 tr.release()
1715 1716 lock.release()
1716 1717
1717 1718 @unfilteredmethod
1718 1719 def destroying(self):
1719 1720 '''Inform the repository that nodes are about to be destroyed.
1720 1721 Intended for use by strip and rollback, so there's a common
1721 1722 place for anything that has to be done before destroying history.
1722 1723
1723 1724 This is mostly useful for saving state that is in memory and waiting
1724 1725 to be flushed when the current lock is released. Because a call to
1725 1726 destroyed is imminent, the repo will be invalidated causing those
1726 1727 changes to stay in memory (waiting for the next unlock), or vanish
1727 1728 completely.
1728 1729 '''
1729 1730 # When using the same lock to commit and strip, the phasecache is left
1730 1731 # dirty after committing. Then when we strip, the repo is invalidated,
1731 1732 # causing those changes to disappear.
1732 1733 if '_phasecache' in vars(self):
1733 1734 self._phasecache.write()
1734 1735
1735 1736 @unfilteredmethod
1736 1737 def destroyed(self):
1737 1738 '''Inform the repository that nodes have been destroyed.
1738 1739 Intended for use by strip and rollback, so there's a common
1739 1740 place for anything that has to be done after destroying history.
1740 1741 '''
1741 1742 # When one tries to:
1742 1743 # 1) destroy nodes thus calling this method (e.g. strip)
1743 1744 # 2) use phasecache somewhere (e.g. commit)
1744 1745 #
1745 1746 # then 2) will fail because the phasecache contains nodes that were
1746 1747 # removed. We can either remove phasecache from the filecache,
1747 1748 # causing it to reload next time it is accessed, or simply filter
1748 1749 # the removed nodes now and write the updated cache.
1749 1750 self._phasecache.filterunknown(self)
1750 1751 self._phasecache.write()
1751 1752
1752 1753 # update the 'served' branch cache to help read only server process
1753 1754 # Thanks to branchcache collaboration this is done from the nearest
1754 1755 # filtered subset and it is expected to be fast.
1755 1756 branchmap.updatecache(self.filtered('served'))
1756 1757
1757 1758 # Ensure the persistent tag cache is updated. Doing it now
1758 1759 # means that the tag cache only has to worry about destroyed
1759 1760 # heads immediately after a strip/rollback. That in turn
1760 1761 # guarantees that "cachetip == currenttip" (comparing both rev
1761 1762 # and node) always means no nodes have been added or destroyed.
1762 1763
1763 1764 # XXX this is suboptimal when qrefresh'ing: we strip the current
1764 1765 # head, refresh the tag cache, then immediately add a new head.
1765 1766 # But I think doing it this way is necessary for the "instant
1766 1767 # tag cache retrieval" case to work.
1767 1768 self.invalidate()
1768 1769
1769 1770 def walk(self, match, node=None):
1770 1771 '''
1771 1772 walk recursively through the directory tree or a given
1772 1773 changeset, finding all files matched by the match
1773 1774 function
1774 1775 '''
1775 1776 return self[node].walk(match)
1776 1777
1777 1778 def status(self, node1='.', node2=None, match=None,
1778 1779 ignored=False, clean=False, unknown=False,
1779 1780 listsubrepos=False):
1780 1781 '''a convenience method that calls node1.status(node2)'''
1781 1782 return self[node1].status(node2, match, ignored, clean, unknown,
1782 1783 listsubrepos)
1783 1784
1784 1785 def heads(self, start=None):
1785 1786 heads = self.changelog.heads(start)
1786 1787 # sort the output in rev descending order
1787 1788 return sorted(heads, key=self.changelog.rev, reverse=True)
1788 1789
1789 1790 def branchheads(self, branch=None, start=None, closed=False):
1790 1791 '''return a (possibly filtered) list of heads for the given branch
1791 1792
1792 1793 Heads are returned in topological order, from newest to oldest.
1793 1794 If branch is None, use the dirstate branch.
1794 1795 If start is not None, return only heads reachable from start.
1795 1796 If closed is True, return heads that are marked as closed as well.
1796 1797 '''
1797 1798 if branch is None:
1798 1799 branch = self[None].branch()
1799 1800 branches = self.branchmap()
1800 1801 if branch not in branches:
1801 1802 return []
1802 1803 # the cache returns heads ordered lowest to highest
1803 1804 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1804 1805 if start is not None:
1805 1806 # filter out the heads that cannot be reached from startrev
1806 1807 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1807 1808 bheads = [h for h in bheads if h in fbheads]
1808 1809 return bheads
1809 1810
1810 1811 def branches(self, nodes):
1811 1812 if not nodes:
1812 1813 nodes = [self.changelog.tip()]
1813 1814 b = []
1814 1815 for n in nodes:
1815 1816 t = n
1816 1817 while True:
1817 1818 p = self.changelog.parents(n)
1818 1819 if p[1] != nullid or p[0] == nullid:
1819 1820 b.append((t, n, p[0], p[1]))
1820 1821 break
1821 1822 n = p[0]
1822 1823 return b
1823 1824
1824 1825 def between(self, pairs):
1825 1826 r = []
1826 1827
1827 1828 for top, bottom in pairs:
1828 1829 n, l, i = top, [], 0
1829 1830 f = 1
1830 1831
1831 1832 while n != bottom and n != nullid:
1832 1833 p = self.changelog.parents(n)[0]
1833 1834 if i == f:
1834 1835 l.append(n)
1835 1836 f = f * 2
1836 1837 n = p
1837 1838 i += 1
1838 1839
1839 1840 r.append(l)
1840 1841
1841 1842 return r
1842 1843
1843 1844 def checkpush(self, pushop):
1844 1845 """Extensions can override this function if additional checks have
1845 1846 to be performed before pushing, or call it if they override push
1846 1847 command.
1847 1848 """
1848 1849 pass
1849 1850
1850 1851 @unfilteredpropertycache
1851 1852 def prepushoutgoinghooks(self):
1852 1853 """Return util.hooks consists of "(repo, remote, outgoing)"
1853 1854 functions, which are called before pushing changesets.
1854 1855 """
1855 1856 return util.hooks()
1856 1857
1857 1858 def pushkey(self, namespace, key, old, new):
1858 1859 try:
1859 1860 tr = self.currenttransaction()
1860 1861 hookargs = {}
1861 1862 if tr is not None:
1862 1863 hookargs.update(tr.hookargs)
1863 1864 hookargs['namespace'] = namespace
1864 1865 hookargs['key'] = key
1865 1866 hookargs['old'] = old
1866 1867 hookargs['new'] = new
1867 1868 self.hook('prepushkey', throw=True, **hookargs)
1868 1869 except error.HookAbort as exc:
1869 1870 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 1871 if exc.hint:
1871 1872 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 1873 return False
1873 1874 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 1875 ret = pushkey.push(self, namespace, key, old, new)
1875 1876 def runhook():
1876 1877 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 1878 ret=ret)
1878 1879 self._afterlock(runhook)
1879 1880 return ret
1880 1881
1881 1882 def listkeys(self, namespace):
1882 1883 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 1884 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 1885 values = pushkey.list(self, namespace)
1885 1886 self.hook('listkeys', namespace=namespace, values=values)
1886 1887 return values
1887 1888
1888 1889 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 1890 '''used to test argument passing over the wire'''
1890 1891 return "%s %s %s %s %s" % (one, two, three, four, five)
1891 1892
1892 1893 def savecommitmessage(self, text):
1893 1894 fp = self.vfs('last-message.txt', 'wb')
1894 1895 try:
1895 1896 fp.write(text)
1896 1897 finally:
1897 1898 fp.close()
1898 1899 return self.pathto(fp.name[len(self.root) + 1:])
1899 1900
1900 1901 # used to avoid circular references so destructors work
1901 1902 def aftertrans(files):
1902 1903 renamefiles = [tuple(t) for t in files]
1903 1904 def a():
1904 1905 for vfs, src, dest in renamefiles:
1905 1906 try:
1906 1907 vfs.rename(src, dest)
1907 1908 except OSError: # journal file does not yet exist
1908 1909 pass
1909 1910 return a
1910 1911
1911 1912 def undoname(fn):
1912 1913 base, name = os.path.split(fn)
1913 1914 assert name.startswith('journal')
1914 1915 return os.path.join(base, name.replace('journal', 'undo', 1))
1915 1916
1916 1917 def instance(ui, path, create):
1917 1918 return localrepository(ui, util.urllocalpath(path), create)
1918 1919
1919 1920 def islocal(path):
1920 1921 return True
General Comments 0
You need to be logged in to leave comments. Login now