##// END OF EJS Templates
localrepo: remove a couple of local type aliases...
Augie Fackler -
r29104:b207653a default
parent child Browse files
Show More
@@ -1,1977 +1,1975 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 wdirrev,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 cmdutil,
31 31 context,
32 32 dirstate,
33 33 encoding,
34 34 error,
35 35 exchange,
36 36 extensions,
37 37 filelog,
38 38 hook,
39 39 lock as lockmod,
40 40 manifest,
41 41 match as matchmod,
42 42 merge as mergemod,
43 43 namespaces,
44 44 obsolete,
45 45 pathutil,
46 46 peer,
47 47 phases,
48 48 pushkey,
49 49 repoview,
50 50 revset,
51 51 scmutil,
52 52 store,
53 53 subrepo,
54 54 tags as tagsmod,
55 55 transaction,
56 56 util,
57 57 )
58 58
59 59 release = lockmod.release
60 propertycache = util.propertycache
61 60 urlerr = util.urlerr
62 61 urlreq = util.urlreq
63 filecache = scmutil.filecache
64 62
65 class repofilecache(filecache):
63 class repofilecache(scmutil.filecache):
66 64 """All filecache usage on repo are done for logic that should be unfiltered
67 65 """
68 66
69 67 def __get__(self, repo, type=None):
70 68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 69 def __set__(self, repo, value):
72 70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 71 def __delete__(self, repo):
74 72 return super(repofilecache, self).__delete__(repo.unfiltered())
75 73
76 74 class storecache(repofilecache):
77 75 """filecache for files in the store"""
78 76 def join(self, obj, fname):
79 77 return obj.sjoin(fname)
80 78
81 class unfilteredpropertycache(propertycache):
79 class unfilteredpropertycache(util.propertycache):
82 80 """propertycache that apply to unfiltered repo only"""
83 81
84 82 def __get__(self, repo, type=None):
85 83 unfi = repo.unfiltered()
86 84 if unfi is repo:
87 85 return super(unfilteredpropertycache, self).__get__(unfi)
88 86 return getattr(unfi, self.name)
89 87
90 class filteredpropertycache(propertycache):
88 class filteredpropertycache(util.propertycache):
91 89 """propertycache that must take filtering in account"""
92 90
93 91 def cachevalue(self, obj, value):
94 92 object.__setattr__(obj, self.name, value)
95 93
96 94
97 95 def hasunfilteredcache(repo, name):
98 96 """check if a repo has an unfilteredpropertycache value for <name>"""
99 97 return name in vars(repo.unfiltered())
100 98
101 99 def unfilteredmethod(orig):
102 100 """decorate method that always need to be run on unfiltered version"""
103 101 def wrapper(repo, *args, **kwargs):
104 102 return orig(repo.unfiltered(), *args, **kwargs)
105 103 return wrapper
106 104
107 105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 106 'unbundle'))
109 107 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 108
111 109 class localpeer(peer.peerrepository):
112 110 '''peer for a local repo; reflects only the most recent API'''
113 111
114 112 def __init__(self, repo, caps=moderncaps):
115 113 peer.peerrepository.__init__(self)
116 114 self._repo = repo.filtered('served')
117 115 self.ui = repo.ui
118 116 self._caps = repo._restrictcapabilities(caps)
119 117 self.requirements = repo.requirements
120 118 self.supportedformats = repo.supportedformats
121 119
122 120 def close(self):
123 121 self._repo.close()
124 122
125 123 def _capabilities(self):
126 124 return self._caps
127 125
128 126 def local(self):
129 127 return self._repo
130 128
131 129 def canpush(self):
132 130 return True
133 131
134 132 def url(self):
135 133 return self._repo.url()
136 134
137 135 def lookup(self, key):
138 136 return self._repo.lookup(key)
139 137
140 138 def branchmap(self):
141 139 return self._repo.branchmap()
142 140
143 141 def heads(self):
144 142 return self._repo.heads()
145 143
146 144 def known(self, nodes):
147 145 return self._repo.known(nodes)
148 146
149 147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 148 **kwargs):
151 149 cg = exchange.getbundle(self._repo, source, heads=heads,
152 150 common=common, bundlecaps=bundlecaps, **kwargs)
153 151 if bundlecaps is not None and 'HG20' in bundlecaps:
154 152 # When requesting a bundle2, getbundle returns a stream to make the
155 153 # wire level function happier. We need to build a proper object
156 154 # from it in local peer.
157 155 cg = bundle2.getunbundler(self.ui, cg)
158 156 return cg
159 157
160 158 # TODO We might want to move the next two calls into legacypeer and add
161 159 # unbundle instead.
162 160
163 161 def unbundle(self, cg, heads, url):
164 162 """apply a bundle on a repo
165 163
166 164 This function handles the repo locking itself."""
167 165 try:
168 166 try:
169 167 cg = exchange.readbundle(self.ui, cg, None)
170 168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 169 if util.safehasattr(ret, 'getchunks'):
172 170 # This is a bundle20 object, turn it into an unbundler.
173 171 # This little dance should be dropped eventually when the
174 172 # API is finally improved.
175 173 stream = util.chunkbuffer(ret.getchunks())
176 174 ret = bundle2.getunbundler(self.ui, stream)
177 175 return ret
178 176 except Exception as exc:
179 177 # If the exception contains output salvaged from a bundle2
180 178 # reply, we need to make sure it is printed before continuing
181 179 # to fail. So we build a bundle2 with such output and consume
182 180 # it directly.
183 181 #
184 182 # This is not very elegant but allows a "simple" solution for
185 183 # issue4594
186 184 output = getattr(exc, '_bundle2salvagedoutput', ())
187 185 if output:
188 186 bundler = bundle2.bundle20(self._repo.ui)
189 187 for out in output:
190 188 bundler.addpart(out)
191 189 stream = util.chunkbuffer(bundler.getchunks())
192 190 b = bundle2.getunbundler(self.ui, stream)
193 191 bundle2.processbundle(self._repo, b)
194 192 raise
195 193 except error.PushRaced as exc:
196 194 raise error.ResponseError(_('push failed:'), str(exc))
197 195
198 196 def lock(self):
199 197 return self._repo.lock()
200 198
201 199 def addchangegroup(self, cg, source, url):
202 200 return cg.apply(self._repo, source, url)
203 201
204 202 def pushkey(self, namespace, key, old, new):
205 203 return self._repo.pushkey(namespace, key, old, new)
206 204
207 205 def listkeys(self, namespace):
208 206 return self._repo.listkeys(namespace)
209 207
210 208 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 209 '''used to test argument passing over the wire'''
212 210 return "%s %s %s %s %s" % (one, two, three, four, five)
213 211
214 212 class locallegacypeer(localpeer):
215 213 '''peer extension which implements legacy methods too; used for tests with
216 214 restricted capabilities'''
217 215
218 216 def __init__(self, repo):
219 217 localpeer.__init__(self, repo, caps=legacycaps)
220 218
221 219 def branches(self, nodes):
222 220 return self._repo.branches(nodes)
223 221
224 222 def between(self, pairs):
225 223 return self._repo.between(pairs)
226 224
227 225 def changegroup(self, basenodes, source):
228 226 return changegroup.changegroup(self._repo, basenodes, source)
229 227
230 228 def changegroupsubset(self, bases, heads, source):
231 229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 230
233 231 class localrepository(object):
234 232
235 233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 234 'manifestv2'))
237 235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 236 'dotencode'))
239 237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 238 filtername = None
241 239
242 240 # a list of (ui, featureset) functions.
243 241 # only functions defined in module of enabled extensions are invoked
244 242 featuresetupfuncs = set()
245 243
246 244 def __init__(self, baseui, path=None, create=False):
247 245 self.requirements = set()
248 246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 247 self.wopener = self.wvfs
250 248 self.root = self.wvfs.base
251 249 self.path = self.wvfs.join(".hg")
252 250 self.origroot = path
253 251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 253 realfs=False)
256 254 self.vfs = scmutil.vfs(self.path)
257 255 self.opener = self.vfs
258 256 self.baseui = baseui
259 257 self.ui = baseui.copy()
260 258 self.ui.copy = baseui.copy # prevent copying repo configuration
261 259 # A list of callback to shape the phase if no data were found.
262 260 # Callback are in the form: func(repo, roots) --> processed root.
263 261 # This list it to be filled by extension during repo setup
264 262 self._phasedefaults = []
265 263 try:
266 264 self.ui.readconfig(self.join("hgrc"), self.root)
267 265 extensions.loadall(self.ui)
268 266 except IOError:
269 267 pass
270 268
271 269 if self.featuresetupfuncs:
272 270 self.supported = set(self._basesupported) # use private copy
273 271 extmods = set(m.__name__ for n, m
274 272 in extensions.extensions(self.ui))
275 273 for setupfunc in self.featuresetupfuncs:
276 274 if setupfunc.__module__ in extmods:
277 275 setupfunc(self.ui, self.supported)
278 276 else:
279 277 self.supported = self._basesupported
280 278
281 279 if not self.vfs.isdir():
282 280 if create:
283 281 self.requirements = newreporequirements(self)
284 282
285 283 if not self.wvfs.exists():
286 284 self.wvfs.makedirs()
287 285 self.vfs.makedir(notindexed=True)
288 286
289 287 if 'store' in self.requirements:
290 288 self.vfs.mkdir("store")
291 289
292 290 # create an invalid changelog
293 291 self.vfs.append(
294 292 "00changelog.i",
295 293 '\0\0\0\2' # represents revlogv2
296 294 ' dummy changelog to prevent using the old repo layout'
297 295 )
298 296 else:
299 297 raise error.RepoError(_("repository %s not found") % path)
300 298 elif create:
301 299 raise error.RepoError(_("repository %s already exists") % path)
302 300 else:
303 301 try:
304 302 self.requirements = scmutil.readrequires(
305 303 self.vfs, self.supported)
306 304 except IOError as inst:
307 305 if inst.errno != errno.ENOENT:
308 306 raise
309 307
310 308 self.sharedpath = self.path
311 309 try:
312 310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 311 realpath=True)
314 312 s = vfs.base
315 313 if not vfs.exists():
316 314 raise error.RepoError(
317 315 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 316 self.sharedpath = s
319 317 except IOError as inst:
320 318 if inst.errno != errno.ENOENT:
321 319 raise
322 320
323 321 self.store = store.store(
324 322 self.requirements, self.sharedpath, scmutil.vfs)
325 323 self.spath = self.store.path
326 324 self.svfs = self.store.vfs
327 325 self.sjoin = self.store.join
328 326 self.vfs.createmode = self.store.createmode
329 327 self._applyopenerreqs()
330 328 if create:
331 329 self._writerequirements()
332 330
333 331 self._dirstatevalidatewarned = False
334 332
335 333 self._branchcaches = {}
336 334 self._revbranchcache = None
337 335 self.filterpats = {}
338 336 self._datafilters = {}
339 337 self._transref = self._lockref = self._wlockref = None
340 338
341 339 # A cache for various files under .hg/ that tracks file changes,
342 340 # (used by the filecache decorator)
343 341 #
344 342 # Maps a property name to its util.filecacheentry
345 343 self._filecache = {}
346 344
347 345 # hold sets of revision to be filtered
348 346 # should be cleared when something might have changed the filter value:
349 347 # - new changesets,
350 348 # - phase change,
351 349 # - new obsolescence marker,
352 350 # - working directory parent change,
353 351 # - bookmark changes
354 352 self.filteredrevcache = {}
355 353
356 354 # generic mapping between names and nodes
357 355 self.names = namespaces.namespaces()
358 356
359 357 def close(self):
360 358 self._writecaches()
361 359
362 360 def _writecaches(self):
363 361 if self._revbranchcache:
364 362 self._revbranchcache.write()
365 363
366 364 def _restrictcapabilities(self, caps):
367 365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 366 caps = set(caps)
369 367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 368 caps.add('bundle2=' + urlreq.quote(capsblob))
371 369 return caps
372 370
373 371 def _applyopenerreqs(self):
374 372 self.svfs.options = dict((r, 1) for r in self.requirements
375 373 if r in self.openerreqs)
376 374 # experimental config: format.chunkcachesize
377 375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 376 if chunkcachesize is not None:
379 377 self.svfs.options['chunkcachesize'] = chunkcachesize
380 378 # experimental config: format.maxchainlen
381 379 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 380 if maxchainlen is not None:
383 381 self.svfs.options['maxchainlen'] = maxchainlen
384 382 # experimental config: format.manifestcachesize
385 383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 384 if manifestcachesize is not None:
387 385 self.svfs.options['manifestcachesize'] = manifestcachesize
388 386 # experimental config: format.aggressivemergedeltas
389 387 aggressivemergedeltas = self.ui.configbool('format',
390 388 'aggressivemergedeltas', False)
391 389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 391
394 392 def _writerequirements(self):
395 393 scmutil.writerequires(self.vfs, self.requirements)
396 394
397 395 def _checknested(self, path):
398 396 """Determine if path is a legal nested repository."""
399 397 if not path.startswith(self.root):
400 398 return False
401 399 subpath = path[len(self.root) + 1:]
402 400 normsubpath = util.pconvert(subpath)
403 401
404 402 # XXX: Checking against the current working copy is wrong in
405 403 # the sense that it can reject things like
406 404 #
407 405 # $ hg cat -r 10 sub/x.txt
408 406 #
409 407 # if sub/ is no longer a subrepository in the working copy
410 408 # parent revision.
411 409 #
412 410 # However, it can of course also allow things that would have
413 411 # been rejected before, such as the above cat command if sub/
414 412 # is a subrepository now, but was a normal directory before.
415 413 # The old path auditor would have rejected by mistake since it
416 414 # panics when it sees sub/.hg/.
417 415 #
418 416 # All in all, checking against the working copy seems sensible
419 417 # since we want to prevent access to nested repositories on
420 418 # the filesystem *now*.
421 419 ctx = self[None]
422 420 parts = util.splitpath(subpath)
423 421 while parts:
424 422 prefix = '/'.join(parts)
425 423 if prefix in ctx.substate:
426 424 if prefix == normsubpath:
427 425 return True
428 426 else:
429 427 sub = ctx.sub(prefix)
430 428 return sub.checknested(subpath[len(prefix) + 1:])
431 429 else:
432 430 parts.pop()
433 431 return False
434 432
435 433 def peer(self):
436 434 return localpeer(self) # not cached to avoid reference cycle
437 435
438 436 def unfiltered(self):
439 437 """Return unfiltered version of the repository
440 438
441 439 Intended to be overwritten by filtered repo."""
442 440 return self
443 441
444 442 def filtered(self, name):
445 443 """Return a filtered version of a repository"""
446 444 # build a new class with the mixin and the current class
447 445 # (possibly subclass of the repo)
448 446 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 447 pass
450 448 return proxycls(self, name)
451 449
452 450 @repofilecache('bookmarks', 'bookmarks.current')
453 451 def _bookmarks(self):
454 452 return bookmarks.bmstore(self)
455 453
456 454 @property
457 455 def _activebookmark(self):
458 456 return self._bookmarks.active
459 457
460 458 def bookmarkheads(self, bookmark):
461 459 name = bookmark.split('@', 1)[0]
462 460 heads = []
463 461 for mark, n in self._bookmarks.iteritems():
464 462 if mark.split('@', 1)[0] == name:
465 463 heads.append(n)
466 464 return heads
467 465
468 466 # _phaserevs and _phasesets depend on changelog. what we need is to
469 467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 468 # can't be easily expressed in filecache mechanism.
471 469 @storecache('phaseroots', '00changelog.i')
472 470 def _phasecache(self):
473 471 return phases.phasecache(self, self._phasedefaults)
474 472
475 473 @storecache('obsstore')
476 474 def obsstore(self):
477 475 # read default format for new obsstore.
478 476 # developer config: format.obsstore-version
479 477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 478 # rely on obsstore class default when possible.
481 479 kwargs = {}
482 480 if defaultformat is not None:
483 481 kwargs['defaultformat'] = defaultformat
484 482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 483 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 484 **kwargs)
487 485 if store and readonly:
488 486 self.ui.warn(
489 487 _('obsolete feature not enabled but %i markers found!\n')
490 488 % len(list(store)))
491 489 return store
492 490
493 491 @storecache('00changelog.i')
494 492 def changelog(self):
495 493 c = changelog.changelog(self.svfs)
496 494 if 'HG_PENDING' in os.environ:
497 495 p = os.environ['HG_PENDING']
498 496 if p.startswith(self.root):
499 497 c.readpending('00changelog.i.a')
500 498 return c
501 499
502 500 @storecache('00manifest.i')
503 501 def manifest(self):
504 502 return manifest.manifest(self.svfs)
505 503
506 504 def dirlog(self, dir):
507 505 return self.manifest.dirlog(dir)
508 506
509 507 @repofilecache('dirstate')
510 508 def dirstate(self):
511 509 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 510 self._dirstatevalidate)
513 511
514 512 def _dirstatevalidate(self, node):
515 513 try:
516 514 self.changelog.rev(node)
517 515 return node
518 516 except error.LookupError:
519 517 if not self._dirstatevalidatewarned:
520 518 self._dirstatevalidatewarned = True
521 519 self.ui.warn(_("warning: ignoring unknown"
522 520 " working parent %s!\n") % short(node))
523 521 return nullid
524 522
525 523 def __getitem__(self, changeid):
526 524 if changeid is None or changeid == wdirrev:
527 525 return context.workingctx(self)
528 526 if isinstance(changeid, slice):
529 527 return [context.changectx(self, i)
530 528 for i in xrange(*changeid.indices(len(self)))
531 529 if i not in self.changelog.filteredrevs]
532 530 return context.changectx(self, changeid)
533 531
534 532 def __contains__(self, changeid):
535 533 try:
536 534 self[changeid]
537 535 return True
538 536 except error.RepoLookupError:
539 537 return False
540 538
541 539 def __nonzero__(self):
542 540 return True
543 541
544 542 def __len__(self):
545 543 return len(self.changelog)
546 544
547 545 def __iter__(self):
548 546 return iter(self.changelog)
549 547
550 548 def revs(self, expr, *args):
551 549 '''Find revisions matching a revset.
552 550
553 551 The revset is specified as a string ``expr`` that may contain
554 552 %-formatting to escape certain types. See ``revset.formatspec``.
555 553
556 554 Return a revset.abstractsmartset, which is a list-like interface
557 555 that contains integer revisions.
558 556 '''
559 557 expr = revset.formatspec(expr, *args)
560 558 m = revset.match(None, expr)
561 559 return m(self)
562 560
563 561 def set(self, expr, *args):
564 562 '''Find revisions matching a revset and emit changectx instances.
565 563
566 564 This is a convenience wrapper around ``revs()`` that iterates the
567 565 result and is a generator of changectx instances.
568 566 '''
569 567 for r in self.revs(expr, *args):
570 568 yield self[r]
571 569
572 570 def url(self):
573 571 return 'file:' + self.root
574 572
575 573 def hook(self, name, throw=False, **args):
576 574 """Call a hook, passing this repo instance.
577 575
578 576 This a convenience method to aid invoking hooks. Extensions likely
579 577 won't call this unless they have registered a custom hook or are
580 578 replacing code that is expected to call a hook.
581 579 """
582 580 return hook.hook(self.ui, self, name, throw, **args)
583 581
584 582 @unfilteredmethod
585 583 def _tag(self, names, node, message, local, user, date, extra=None,
586 584 editor=False):
587 585 if isinstance(names, str):
588 586 names = (names,)
589 587
590 588 branches = self.branchmap()
591 589 for name in names:
592 590 self.hook('pretag', throw=True, node=hex(node), tag=name,
593 591 local=local)
594 592 if name in branches:
595 593 self.ui.warn(_("warning: tag %s conflicts with existing"
596 594 " branch name\n") % name)
597 595
598 596 def writetags(fp, names, munge, prevtags):
599 597 fp.seek(0, 2)
600 598 if prevtags and prevtags[-1] != '\n':
601 599 fp.write('\n')
602 600 for name in names:
603 601 if munge:
604 602 m = munge(name)
605 603 else:
606 604 m = name
607 605
608 606 if (self._tagscache.tagtypes and
609 607 name in self._tagscache.tagtypes):
610 608 old = self.tags().get(name, nullid)
611 609 fp.write('%s %s\n' % (hex(old), m))
612 610 fp.write('%s %s\n' % (hex(node), m))
613 611 fp.close()
614 612
615 613 prevtags = ''
616 614 if local:
617 615 try:
618 616 fp = self.vfs('localtags', 'r+')
619 617 except IOError:
620 618 fp = self.vfs('localtags', 'a')
621 619 else:
622 620 prevtags = fp.read()
623 621
624 622 # local tags are stored in the current charset
625 623 writetags(fp, names, None, prevtags)
626 624 for name in names:
627 625 self.hook('tag', node=hex(node), tag=name, local=local)
628 626 return
629 627
630 628 try:
631 629 fp = self.wfile('.hgtags', 'rb+')
632 630 except IOError as e:
633 631 if e.errno != errno.ENOENT:
634 632 raise
635 633 fp = self.wfile('.hgtags', 'ab')
636 634 else:
637 635 prevtags = fp.read()
638 636
639 637 # committed tags are stored in UTF-8
640 638 writetags(fp, names, encoding.fromlocal, prevtags)
641 639
642 640 fp.close()
643 641
644 642 self.invalidatecaches()
645 643
646 644 if '.hgtags' not in self.dirstate:
647 645 self[None].add(['.hgtags'])
648 646
649 647 m = matchmod.exact(self.root, '', ['.hgtags'])
650 648 tagnode = self.commit(message, user, date, extra=extra, match=m,
651 649 editor=editor)
652 650
653 651 for name in names:
654 652 self.hook('tag', node=hex(node), tag=name, local=local)
655 653
656 654 return tagnode
657 655
658 656 def tag(self, names, node, message, local, user, date, editor=False):
659 657 '''tag a revision with one or more symbolic names.
660 658
661 659 names is a list of strings or, when adding a single tag, names may be a
662 660 string.
663 661
664 662 if local is True, the tags are stored in a per-repository file.
665 663 otherwise, they are stored in the .hgtags file, and a new
666 664 changeset is committed with the change.
667 665
668 666 keyword arguments:
669 667
670 668 local: whether to store tags in non-version-controlled file
671 669 (default False)
672 670
673 671 message: commit message to use if committing
674 672
675 673 user: name of user to use if committing
676 674
677 675 date: date tuple to use if committing'''
678 676
679 677 if not local:
680 678 m = matchmod.exact(self.root, '', ['.hgtags'])
681 679 if any(self.status(match=m, unknown=True, ignored=True)):
682 680 raise error.Abort(_('working copy of .hgtags is changed'),
683 681 hint=_('please commit .hgtags manually'))
684 682
685 683 self.tags() # instantiate the cache
686 684 self._tag(names, node, message, local, user, date, editor=editor)
687 685
688 686 @filteredpropertycache
689 687 def _tagscache(self):
690 688 '''Returns a tagscache object that contains various tags related
691 689 caches.'''
692 690
693 691 # This simplifies its cache management by having one decorated
694 692 # function (this one) and the rest simply fetch things from it.
695 693 class tagscache(object):
696 694 def __init__(self):
697 695 # These two define the set of tags for this repository. tags
698 696 # maps tag name to node; tagtypes maps tag name to 'global' or
699 697 # 'local'. (Global tags are defined by .hgtags across all
700 698 # heads, and local tags are defined in .hg/localtags.)
701 699 # They constitute the in-memory cache of tags.
702 700 self.tags = self.tagtypes = None
703 701
704 702 self.nodetagscache = self.tagslist = None
705 703
706 704 cache = tagscache()
707 705 cache.tags, cache.tagtypes = self._findtags()
708 706
709 707 return cache
710 708
711 709 def tags(self):
712 710 '''return a mapping of tag to node'''
713 711 t = {}
714 712 if self.changelog.filteredrevs:
715 713 tags, tt = self._findtags()
716 714 else:
717 715 tags = self._tagscache.tags
718 716 for k, v in tags.iteritems():
719 717 try:
720 718 # ignore tags to unknown nodes
721 719 self.changelog.rev(v)
722 720 t[k] = v
723 721 except (error.LookupError, ValueError):
724 722 pass
725 723 return t
726 724
727 725 def _findtags(self):
728 726 '''Do the hard work of finding tags. Return a pair of dicts
729 727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
730 728 maps tag name to a string like \'global\' or \'local\'.
731 729 Subclasses or extensions are free to add their own tags, but
732 730 should be aware that the returned dicts will be retained for the
733 731 duration of the localrepo object.'''
734 732
735 733 # XXX what tagtype should subclasses/extensions use? Currently
736 734 # mq and bookmarks add tags, but do not set the tagtype at all.
737 735 # Should each extension invent its own tag type? Should there
738 736 # be one tagtype for all such "virtual" tags? Or is the status
739 737 # quo fine?
740 738
741 739 alltags = {} # map tag name to (node, hist)
742 740 tagtypes = {}
743 741
744 742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
745 743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
746 744
747 745 # Build the return dicts. Have to re-encode tag names because
748 746 # the tags module always uses UTF-8 (in order not to lose info
749 747 # writing to the cache), but the rest of Mercurial wants them in
750 748 # local encoding.
751 749 tags = {}
752 750 for (name, (node, hist)) in alltags.iteritems():
753 751 if node != nullid:
754 752 tags[encoding.tolocal(name)] = node
755 753 tags['tip'] = self.changelog.tip()
756 754 tagtypes = dict([(encoding.tolocal(name), value)
757 755 for (name, value) in tagtypes.iteritems()])
758 756 return (tags, tagtypes)
759 757
760 758 def tagtype(self, tagname):
761 759 '''
762 760 return the type of the given tag. result can be:
763 761
764 762 'local' : a local tag
765 763 'global' : a global tag
766 764 None : tag does not exist
767 765 '''
768 766
769 767 return self._tagscache.tagtypes.get(tagname)
770 768
771 769 def tagslist(self):
772 770 '''return a list of tags ordered by revision'''
773 771 if not self._tagscache.tagslist:
774 772 l = []
775 773 for t, n in self.tags().iteritems():
776 774 l.append((self.changelog.rev(n), t, n))
777 775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
778 776
779 777 return self._tagscache.tagslist
780 778
781 779 def nodetags(self, node):
782 780 '''return the tags associated with a node'''
783 781 if not self._tagscache.nodetagscache:
784 782 nodetagscache = {}
785 783 for t, n in self._tagscache.tags.iteritems():
786 784 nodetagscache.setdefault(n, []).append(t)
787 785 for tags in nodetagscache.itervalues():
788 786 tags.sort()
789 787 self._tagscache.nodetagscache = nodetagscache
790 788 return self._tagscache.nodetagscache.get(node, [])
791 789
792 790 def nodebookmarks(self, node):
793 791 """return the list of bookmarks pointing to the specified node"""
794 792 marks = []
795 793 for bookmark, n in self._bookmarks.iteritems():
796 794 if n == node:
797 795 marks.append(bookmark)
798 796 return sorted(marks)
799 797
800 798 def branchmap(self):
801 799 '''returns a dictionary {branch: [branchheads]} with branchheads
802 800 ordered by increasing revision number'''
803 801 branchmap.updatecache(self)
804 802 return self._branchcaches[self.filtername]
805 803
806 804 @unfilteredmethod
807 805 def revbranchcache(self):
808 806 if not self._revbranchcache:
809 807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
810 808 return self._revbranchcache
811 809
812 810 def branchtip(self, branch, ignoremissing=False):
813 811 '''return the tip node for a given branch
814 812
815 813 If ignoremissing is True, then this method will not raise an error.
816 814 This is helpful for callers that only expect None for a missing branch
817 815 (e.g. namespace).
818 816
819 817 '''
820 818 try:
821 819 return self.branchmap().branchtip(branch)
822 820 except KeyError:
823 821 if not ignoremissing:
824 822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
825 823 else:
826 824 pass
827 825
828 826 def lookup(self, key):
829 827 return self[key].node()
830 828
831 829 def lookupbranch(self, key, remote=None):
832 830 repo = remote or self
833 831 if key in repo.branchmap():
834 832 return key
835 833
836 834 repo = (remote and remote.local()) and remote or self
837 835 return repo[key].branch()
838 836
839 837 def known(self, nodes):
840 838 cl = self.changelog
841 839 nm = cl.nodemap
842 840 filtered = cl.filteredrevs
843 841 result = []
844 842 for n in nodes:
845 843 r = nm.get(n)
846 844 resp = not (r is None or r in filtered)
847 845 result.append(resp)
848 846 return result
849 847
850 848 def local(self):
851 849 return self
852 850
853 851 def publishing(self):
854 852 # it's safe (and desirable) to trust the publish flag unconditionally
855 853 # so that we don't finalize changes shared between users via ssh or nfs
856 854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
857 855
858 856 def cancopy(self):
859 857 # so statichttprepo's override of local() works
860 858 if not self.local():
861 859 return False
862 860 if not self.publishing():
863 861 return True
864 862 # if publishing we can't copy if there is filtered content
865 863 return not self.filtered('visible').changelog.filteredrevs
866 864
867 865 def shared(self):
868 866 '''the type of shared repository (None if not shared)'''
869 867 if self.sharedpath != self.path:
870 868 return 'store'
871 869 return None
872 870
873 871 def join(self, f, *insidef):
874 872 return self.vfs.join(os.path.join(f, *insidef))
875 873
876 874 def wjoin(self, f, *insidef):
877 875 return self.vfs.reljoin(self.root, f, *insidef)
878 876
879 877 def file(self, f):
880 878 if f[0] == '/':
881 879 f = f[1:]
882 880 return filelog.filelog(self.svfs, f)
883 881
884 882 def changectx(self, changeid):
885 883 return self[changeid]
886 884
887 885 def setparents(self, p1, p2=nullid):
888 886 self.dirstate.beginparentchange()
889 887 copies = self.dirstate.setparents(p1, p2)
890 888 pctx = self[p1]
891 889 if copies:
892 890 # Adjust copy records, the dirstate cannot do it, it
893 891 # requires access to parents manifests. Preserve them
894 892 # only for entries added to first parent.
895 893 for f in copies:
896 894 if f not in pctx and copies[f] in pctx:
897 895 self.dirstate.copy(copies[f], f)
898 896 if p2 == nullid:
899 897 for f, s in sorted(self.dirstate.copies().items()):
900 898 if f not in pctx and s not in pctx:
901 899 self.dirstate.copy(None, f)
902 900 self.dirstate.endparentchange()
903 901
904 902 def filectx(self, path, changeid=None, fileid=None):
905 903 """changeid can be a changeset revision, node, or tag.
906 904 fileid can be a file revision or node."""
907 905 return context.filectx(self, path, changeid, fileid)
908 906
909 907 def getcwd(self):
910 908 return self.dirstate.getcwd()
911 909
912 910 def pathto(self, f, cwd=None):
913 911 return self.dirstate.pathto(f, cwd)
914 912
915 913 def wfile(self, f, mode='r'):
916 914 return self.wvfs(f, mode)
917 915
918 916 def _link(self, f):
919 917 return self.wvfs.islink(f)
920 918
921 919 def _loadfilter(self, filter):
922 920 if filter not in self.filterpats:
923 921 l = []
924 922 for pat, cmd in self.ui.configitems(filter):
925 923 if cmd == '!':
926 924 continue
927 925 mf = matchmod.match(self.root, '', [pat])
928 926 fn = None
929 927 params = cmd
930 928 for name, filterfn in self._datafilters.iteritems():
931 929 if cmd.startswith(name):
932 930 fn = filterfn
933 931 params = cmd[len(name):].lstrip()
934 932 break
935 933 if not fn:
936 934 fn = lambda s, c, **kwargs: util.filter(s, c)
937 935 # Wrap old filters not supporting keyword arguments
938 936 if not inspect.getargspec(fn)[2]:
939 937 oldfn = fn
940 938 fn = lambda s, c, **kwargs: oldfn(s, c)
941 939 l.append((mf, fn, params))
942 940 self.filterpats[filter] = l
943 941 return self.filterpats[filter]
944 942
945 943 def _filter(self, filterpats, filename, data):
946 944 for mf, fn, cmd in filterpats:
947 945 if mf(filename):
948 946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
949 947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
950 948 break
951 949
952 950 return data
953 951
954 952 @unfilteredpropertycache
955 953 def _encodefilterpats(self):
956 954 return self._loadfilter('encode')
957 955
958 956 @unfilteredpropertycache
959 957 def _decodefilterpats(self):
960 958 return self._loadfilter('decode')
961 959
962 960 def adddatafilter(self, name, filter):
963 961 self._datafilters[name] = filter
964 962
965 963 def wread(self, filename):
966 964 if self._link(filename):
967 965 data = self.wvfs.readlink(filename)
968 966 else:
969 967 data = self.wvfs.read(filename)
970 968 return self._filter(self._encodefilterpats, filename, data)
971 969
972 970 def wwrite(self, filename, data, flags, backgroundclose=False):
973 971 """write ``data`` into ``filename`` in the working directory
974 972
975 973 This returns length of written (maybe decoded) data.
976 974 """
977 975 data = self._filter(self._decodefilterpats, filename, data)
978 976 if 'l' in flags:
979 977 self.wvfs.symlink(data, filename)
980 978 else:
981 979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
982 980 if 'x' in flags:
983 981 self.wvfs.setflags(filename, False, True)
984 982 return len(data)
985 983
986 984 def wwritedata(self, filename, data):
987 985 return self._filter(self._decodefilterpats, filename, data)
988 986
989 987 def currenttransaction(self):
990 988 """return the current transaction or None if non exists"""
991 989 if self._transref:
992 990 tr = self._transref()
993 991 else:
994 992 tr = None
995 993
996 994 if tr and tr.running():
997 995 return tr
998 996 return None
999 997
1000 998 def transaction(self, desc, report=None):
1001 999 if (self.ui.configbool('devel', 'all-warnings')
1002 1000 or self.ui.configbool('devel', 'check-locks')):
1003 1001 l = self._lockref and self._lockref()
1004 1002 if l is None or not l.held:
1005 1003 self.ui.develwarn('transaction with no lock')
1006 1004 tr = self.currenttransaction()
1007 1005 if tr is not None:
1008 1006 return tr.nest()
1009 1007
1010 1008 # abort here if the journal already exists
1011 1009 if self.svfs.exists("journal"):
1012 1010 raise error.RepoError(
1013 1011 _("abandoned transaction found"),
1014 1012 hint=_("run 'hg recover' to clean up transaction"))
1015 1013
1016 1014 # make journal.dirstate contain in-memory changes at this point
1017 1015 self.dirstate.write(None)
1018 1016
1019 1017 idbase = "%.40f#%f" % (random.random(), time.time())
1020 1018 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1021 1019 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1022 1020
1023 1021 self._writejournal(desc)
1024 1022 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1025 1023 if report:
1026 1024 rp = report
1027 1025 else:
1028 1026 rp = self.ui.warn
1029 1027 vfsmap = {'plain': self.vfs} # root of .hg/
1030 1028 # we must avoid cyclic reference between repo and transaction.
1031 1029 reporef = weakref.ref(self)
1032 1030 def validate(tr):
1033 1031 """will run pre-closing hooks"""
1034 1032 reporef().hook('pretxnclose', throw=True,
1035 1033 txnname=desc, **tr.hookargs)
1036 1034 def releasefn(tr, success):
1037 1035 repo = reporef()
1038 1036 if success:
1039 1037 # this should be explicitly invoked here, because
1040 1038 # in-memory changes aren't written out at closing
1041 1039 # transaction, if tr.addfilegenerator (via
1042 1040 # dirstate.write or so) isn't invoked while
1043 1041 # transaction running
1044 1042 repo.dirstate.write(None)
1045 1043 else:
1046 1044 # prevent in-memory changes from being written out at
1047 1045 # the end of outer wlock scope or so
1048 1046 repo.dirstate.invalidate()
1049 1047
1050 1048 # discard all changes (including ones already written
1051 1049 # out) in this transaction
1052 1050 repo.vfs.rename('journal.dirstate', 'dirstate')
1053 1051
1054 1052 repo.invalidate(clearfilecache=True)
1055 1053
1056 1054 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 1055 "journal",
1058 1056 "undo",
1059 1057 aftertrans(renames),
1060 1058 self.store.createmode,
1061 1059 validator=validate,
1062 1060 releasefn=releasefn)
1063 1061
1064 1062 tr.hookargs['txnid'] = txnid
1065 1063 # note: writing the fncache only during finalize mean that the file is
1066 1064 # outdated when running hooks. As fncache is used for streaming clone,
1067 1065 # this is not expected to break anything that happen during the hooks.
1068 1066 tr.addfinalize('flush-fncache', self.store.write)
1069 1067 def txnclosehook(tr2):
1070 1068 """To be run if transaction is successful, will schedule a hook run
1071 1069 """
1072 1070 # Don't reference tr2 in hook() so we don't hold a reference.
1073 1071 # This reduces memory consumption when there are multiple
1074 1072 # transactions per lock. This can likely go away if issue5045
1075 1073 # fixes the function accumulation.
1076 1074 hookargs = tr2.hookargs
1077 1075
1078 1076 def hook():
1079 1077 reporef().hook('txnclose', throw=False, txnname=desc,
1080 1078 **hookargs)
1081 1079 reporef()._afterlock(hook)
1082 1080 tr.addfinalize('txnclose-hook', txnclosehook)
1083 1081 def txnaborthook(tr2):
1084 1082 """To be run if transaction is aborted
1085 1083 """
1086 1084 reporef().hook('txnabort', throw=False, txnname=desc,
1087 1085 **tr2.hookargs)
1088 1086 tr.addabort('txnabort-hook', txnaborthook)
1089 1087 # avoid eager cache invalidation. in-memory data should be identical
1090 1088 # to stored data if transaction has no error.
1091 1089 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 1090 self._transref = weakref.ref(tr)
1093 1091 return tr
1094 1092
1095 1093 def _journalfiles(self):
1096 1094 return ((self.svfs, 'journal'),
1097 1095 (self.vfs, 'journal.dirstate'),
1098 1096 (self.vfs, 'journal.branch'),
1099 1097 (self.vfs, 'journal.desc'),
1100 1098 (self.vfs, 'journal.bookmarks'),
1101 1099 (self.svfs, 'journal.phaseroots'))
1102 1100
1103 1101 def undofiles(self):
1104 1102 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105 1103
1106 1104 def _writejournal(self, desc):
1107 1105 self.vfs.write("journal.dirstate",
1108 1106 self.vfs.tryread("dirstate"))
1109 1107 self.vfs.write("journal.branch",
1110 1108 encoding.fromlocal(self.dirstate.branch()))
1111 1109 self.vfs.write("journal.desc",
1112 1110 "%d\n%s\n" % (len(self), desc))
1113 1111 self.vfs.write("journal.bookmarks",
1114 1112 self.vfs.tryread("bookmarks"))
1115 1113 self.svfs.write("journal.phaseroots",
1116 1114 self.svfs.tryread("phaseroots"))
1117 1115
1118 1116 def recover(self):
1119 1117 with self.lock():
1120 1118 if self.svfs.exists("journal"):
1121 1119 self.ui.status(_("rolling back interrupted transaction\n"))
1122 1120 vfsmap = {'': self.svfs,
1123 1121 'plain': self.vfs,}
1124 1122 transaction.rollback(self.svfs, vfsmap, "journal",
1125 1123 self.ui.warn)
1126 1124 self.invalidate()
1127 1125 return True
1128 1126 else:
1129 1127 self.ui.warn(_("no interrupted transaction available\n"))
1130 1128 return False
1131 1129
1132 1130 def rollback(self, dryrun=False, force=False):
1133 1131 wlock = lock = dsguard = None
1134 1132 try:
1135 1133 wlock = self.wlock()
1136 1134 lock = self.lock()
1137 1135 if self.svfs.exists("undo"):
1138 1136 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139 1137
1140 1138 return self._rollback(dryrun, force, dsguard)
1141 1139 else:
1142 1140 self.ui.warn(_("no rollback information available\n"))
1143 1141 return 1
1144 1142 finally:
1145 1143 release(dsguard, lock, wlock)
1146 1144
1147 1145 @unfilteredmethod # Until we get smarter cache management
1148 1146 def _rollback(self, dryrun, force, dsguard):
1149 1147 ui = self.ui
1150 1148 try:
1151 1149 args = self.vfs.read('undo.desc').splitlines()
1152 1150 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 1151 if len(args) >= 3:
1154 1152 detail = args[2]
1155 1153 oldtip = oldlen - 1
1156 1154
1157 1155 if detail and ui.verbose:
1158 1156 msg = (_('repository tip rolled back to revision %s'
1159 1157 ' (undo %s: %s)\n')
1160 1158 % (oldtip, desc, detail))
1161 1159 else:
1162 1160 msg = (_('repository tip rolled back to revision %s'
1163 1161 ' (undo %s)\n')
1164 1162 % (oldtip, desc))
1165 1163 except IOError:
1166 1164 msg = _('rolling back unknown transaction\n')
1167 1165 desc = None
1168 1166
1169 1167 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 1168 raise error.Abort(
1171 1169 _('rollback of last commit while not checked out '
1172 1170 'may lose data'), hint=_('use -f to force'))
1173 1171
1174 1172 ui.status(msg)
1175 1173 if dryrun:
1176 1174 return 0
1177 1175
1178 1176 parents = self.dirstate.parents()
1179 1177 self.destroying()
1180 1178 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 1179 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 1180 if self.vfs.exists('undo.bookmarks'):
1183 1181 self.vfs.rename('undo.bookmarks', 'bookmarks')
1184 1182 if self.svfs.exists('undo.phaseroots'):
1185 1183 self.svfs.rename('undo.phaseroots', 'phaseroots')
1186 1184 self.invalidate()
1187 1185
1188 1186 parentgone = (parents[0] not in self.changelog.nodemap or
1189 1187 parents[1] not in self.changelog.nodemap)
1190 1188 if parentgone:
1191 1189 # prevent dirstateguard from overwriting already restored one
1192 1190 dsguard.close()
1193 1191
1194 1192 self.vfs.rename('undo.dirstate', 'dirstate')
1195 1193 try:
1196 1194 branch = self.vfs.read('undo.branch')
1197 1195 self.dirstate.setbranch(encoding.tolocal(branch))
1198 1196 except IOError:
1199 1197 ui.warn(_('named branch could not be reset: '
1200 1198 'current branch is still \'%s\'\n')
1201 1199 % self.dirstate.branch())
1202 1200
1203 1201 self.dirstate.invalidate()
1204 1202 parents = tuple([p.rev() for p in self[None].parents()])
1205 1203 if len(parents) > 1:
1206 1204 ui.status(_('working directory now based on '
1207 1205 'revisions %d and %d\n') % parents)
1208 1206 else:
1209 1207 ui.status(_('working directory now based on '
1210 1208 'revision %d\n') % parents)
1211 1209 mergemod.mergestate.clean(self, self['.'].node())
1212 1210
1213 1211 # TODO: if we know which new heads may result from this rollback, pass
1214 1212 # them to destroy(), which will prevent the branchhead cache from being
1215 1213 # invalidated.
1216 1214 self.destroyed()
1217 1215 return 0
1218 1216
1219 1217 def invalidatecaches(self):
1220 1218
1221 1219 if '_tagscache' in vars(self):
1222 1220 # can't use delattr on proxy
1223 1221 del self.__dict__['_tagscache']
1224 1222
1225 1223 self.unfiltered()._branchcaches.clear()
1226 1224 self.invalidatevolatilesets()
1227 1225
1228 1226 def invalidatevolatilesets(self):
1229 1227 self.filteredrevcache.clear()
1230 1228 obsolete.clearobscaches(self)
1231 1229
1232 1230 def invalidatedirstate(self):
1233 1231 '''Invalidates the dirstate, causing the next call to dirstate
1234 1232 to check if it was modified since the last time it was read,
1235 1233 rereading it if it has.
1236 1234
1237 1235 This is different to dirstate.invalidate() that it doesn't always
1238 1236 rereads the dirstate. Use dirstate.invalidate() if you want to
1239 1237 explicitly read the dirstate again (i.e. restoring it to a previous
1240 1238 known good state).'''
1241 1239 if hasunfilteredcache(self, 'dirstate'):
1242 1240 for k in self.dirstate._filecache:
1243 1241 try:
1244 1242 delattr(self.dirstate, k)
1245 1243 except AttributeError:
1246 1244 pass
1247 1245 delattr(self.unfiltered(), 'dirstate')
1248 1246
1249 1247 def invalidate(self, clearfilecache=False):
1250 1248 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1251 1249 for k in self._filecache.keys():
1252 1250 # dirstate is invalidated separately in invalidatedirstate()
1253 1251 if k == 'dirstate':
1254 1252 continue
1255 1253
1256 1254 if clearfilecache:
1257 1255 del self._filecache[k]
1258 1256 try:
1259 1257 delattr(unfiltered, k)
1260 1258 except AttributeError:
1261 1259 pass
1262 1260 self.invalidatecaches()
1263 1261 self.store.invalidatecaches()
1264 1262
1265 1263 def invalidateall(self):
1266 1264 '''Fully invalidates both store and non-store parts, causing the
1267 1265 subsequent operation to reread any outside changes.'''
1268 1266 # extension should hook this to invalidate its caches
1269 1267 self.invalidate()
1270 1268 self.invalidatedirstate()
1271 1269
1272 1270 def _refreshfilecachestats(self, tr):
1273 1271 """Reload stats of cached files so that they are flagged as valid"""
1274 1272 for k, ce in self._filecache.items():
1275 1273 if k == 'dirstate' or k not in self.__dict__:
1276 1274 continue
1277 1275 ce.refresh()
1278 1276
1279 1277 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1280 1278 inheritchecker=None, parentenvvar=None):
1281 1279 parentlock = None
1282 1280 # the contents of parentenvvar are used by the underlying lock to
1283 1281 # determine whether it can be inherited
1284 1282 if parentenvvar is not None:
1285 1283 parentlock = os.environ.get(parentenvvar)
1286 1284 try:
1287 1285 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1288 1286 acquirefn=acquirefn, desc=desc,
1289 1287 inheritchecker=inheritchecker,
1290 1288 parentlock=parentlock)
1291 1289 except error.LockHeld as inst:
1292 1290 if not wait:
1293 1291 raise
1294 1292 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1295 1293 (desc, inst.locker))
1296 1294 # default to 600 seconds timeout
1297 1295 l = lockmod.lock(vfs, lockname,
1298 1296 int(self.ui.config("ui", "timeout", "600")),
1299 1297 releasefn=releasefn, acquirefn=acquirefn,
1300 1298 desc=desc)
1301 1299 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1302 1300 return l
1303 1301
1304 1302 def _afterlock(self, callback):
1305 1303 """add a callback to be run when the repository is fully unlocked
1306 1304
1307 1305 The callback will be executed when the outermost lock is released
1308 1306 (with wlock being higher level than 'lock')."""
1309 1307 for ref in (self._wlockref, self._lockref):
1310 1308 l = ref and ref()
1311 1309 if l and l.held:
1312 1310 l.postrelease.append(callback)
1313 1311 break
1314 1312 else: # no lock have been found.
1315 1313 callback()
1316 1314
1317 1315 def lock(self, wait=True):
1318 1316 '''Lock the repository store (.hg/store) and return a weak reference
1319 1317 to the lock. Use this before modifying the store (e.g. committing or
1320 1318 stripping). If you are opening a transaction, get a lock as well.)
1321 1319
1322 1320 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1323 1321 'wlock' first to avoid a dead-lock hazard.'''
1324 1322 l = self._lockref and self._lockref()
1325 1323 if l is not None and l.held:
1326 1324 l.lock()
1327 1325 return l
1328 1326
1329 1327 l = self._lock(self.svfs, "lock", wait, None,
1330 1328 self.invalidate, _('repository %s') % self.origroot)
1331 1329 self._lockref = weakref.ref(l)
1332 1330 return l
1333 1331
1334 1332 def _wlockchecktransaction(self):
1335 1333 if self.currenttransaction() is not None:
1336 1334 raise error.LockInheritanceContractViolation(
1337 1335 'wlock cannot be inherited in the middle of a transaction')
1338 1336
1339 1337 def wlock(self, wait=True):
1340 1338 '''Lock the non-store parts of the repository (everything under
1341 1339 .hg except .hg/store) and return a weak reference to the lock.
1342 1340
1343 1341 Use this before modifying files in .hg.
1344 1342
1345 1343 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 1344 'wlock' first to avoid a dead-lock hazard.'''
1347 1345 l = self._wlockref and self._wlockref()
1348 1346 if l is not None and l.held:
1349 1347 l.lock()
1350 1348 return l
1351 1349
1352 1350 # We do not need to check for non-waiting lock acquisition. Such
1353 1351 # acquisition would not cause dead-lock as they would just fail.
1354 1352 if wait and (self.ui.configbool('devel', 'all-warnings')
1355 1353 or self.ui.configbool('devel', 'check-locks')):
1356 1354 l = self._lockref and self._lockref()
1357 1355 if l is not None and l.held:
1358 1356 self.ui.develwarn('"wlock" acquired after "lock"')
1359 1357
1360 1358 def unlock():
1361 1359 if self.dirstate.pendingparentchange():
1362 1360 self.dirstate.invalidate()
1363 1361 else:
1364 1362 self.dirstate.write(None)
1365 1363
1366 1364 self._filecache['dirstate'].refresh()
1367 1365
1368 1366 l = self._lock(self.vfs, "wlock", wait, unlock,
1369 1367 self.invalidatedirstate, _('working directory of %s') %
1370 1368 self.origroot,
1371 1369 inheritchecker=self._wlockchecktransaction,
1372 1370 parentenvvar='HG_WLOCK_LOCKER')
1373 1371 self._wlockref = weakref.ref(l)
1374 1372 return l
1375 1373
1376 1374 def _currentlock(self, lockref):
1377 1375 """Returns the lock if it's held, or None if it's not."""
1378 1376 if lockref is None:
1379 1377 return None
1380 1378 l = lockref()
1381 1379 if l is None or not l.held:
1382 1380 return None
1383 1381 return l
1384 1382
1385 1383 def currentwlock(self):
1386 1384 """Returns the wlock if it's held, or None if it's not."""
1387 1385 return self._currentlock(self._wlockref)
1388 1386
1389 1387 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1390 1388 """
1391 1389 commit an individual file as part of a larger transaction
1392 1390 """
1393 1391
1394 1392 fname = fctx.path()
1395 1393 fparent1 = manifest1.get(fname, nullid)
1396 1394 fparent2 = manifest2.get(fname, nullid)
1397 1395 if isinstance(fctx, context.filectx):
1398 1396 node = fctx.filenode()
1399 1397 if node in [fparent1, fparent2]:
1400 1398 self.ui.debug('reusing %s filelog entry\n' % fname)
1401 1399 return node
1402 1400
1403 1401 flog = self.file(fname)
1404 1402 meta = {}
1405 1403 copy = fctx.renamed()
1406 1404 if copy and copy[0] != fname:
1407 1405 # Mark the new revision of this file as a copy of another
1408 1406 # file. This copy data will effectively act as a parent
1409 1407 # of this new revision. If this is a merge, the first
1410 1408 # parent will be the nullid (meaning "look up the copy data")
1411 1409 # and the second one will be the other parent. For example:
1412 1410 #
1413 1411 # 0 --- 1 --- 3 rev1 changes file foo
1414 1412 # \ / rev2 renames foo to bar and changes it
1415 1413 # \- 2 -/ rev3 should have bar with all changes and
1416 1414 # should record that bar descends from
1417 1415 # bar in rev2 and foo in rev1
1418 1416 #
1419 1417 # this allows this merge to succeed:
1420 1418 #
1421 1419 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1422 1420 # \ / merging rev3 and rev4 should use bar@rev2
1423 1421 # \- 2 --- 4 as the merge base
1424 1422 #
1425 1423
1426 1424 cfname = copy[0]
1427 1425 crev = manifest1.get(cfname)
1428 1426 newfparent = fparent2
1429 1427
1430 1428 if manifest2: # branch merge
1431 1429 if fparent2 == nullid or crev is None: # copied on remote side
1432 1430 if cfname in manifest2:
1433 1431 crev = manifest2[cfname]
1434 1432 newfparent = fparent1
1435 1433
1436 1434 # Here, we used to search backwards through history to try to find
1437 1435 # where the file copy came from if the source of a copy was not in
1438 1436 # the parent directory. However, this doesn't actually make sense to
1439 1437 # do (what does a copy from something not in your working copy even
1440 1438 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1441 1439 # the user that copy information was dropped, so if they didn't
1442 1440 # expect this outcome it can be fixed, but this is the correct
1443 1441 # behavior in this circumstance.
1444 1442
1445 1443 if crev:
1446 1444 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1447 1445 meta["copy"] = cfname
1448 1446 meta["copyrev"] = hex(crev)
1449 1447 fparent1, fparent2 = nullid, newfparent
1450 1448 else:
1451 1449 self.ui.warn(_("warning: can't find ancestor for '%s' "
1452 1450 "copied from '%s'!\n") % (fname, cfname))
1453 1451
1454 1452 elif fparent1 == nullid:
1455 1453 fparent1, fparent2 = fparent2, nullid
1456 1454 elif fparent2 != nullid:
1457 1455 # is one parent an ancestor of the other?
1458 1456 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1459 1457 if fparent1 in fparentancestors:
1460 1458 fparent1, fparent2 = fparent2, nullid
1461 1459 elif fparent2 in fparentancestors:
1462 1460 fparent2 = nullid
1463 1461
1464 1462 # is the file changed?
1465 1463 text = fctx.data()
1466 1464 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1467 1465 changelist.append(fname)
1468 1466 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1469 1467 # are just the flags changed during merge?
1470 1468 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1471 1469 changelist.append(fname)
1472 1470
1473 1471 return fparent1
1474 1472
1475 1473 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1476 1474 """check for commit arguments that aren't commitable"""
1477 1475 if match.isexact() or match.prefix():
1478 1476 matched = set(status.modified + status.added + status.removed)
1479 1477
1480 1478 for f in match.files():
1481 1479 f = self.dirstate.normalize(f)
1482 1480 if f == '.' or f in matched or f in wctx.substate:
1483 1481 continue
1484 1482 if f in status.deleted:
1485 1483 fail(f, _('file not found!'))
1486 1484 if f in vdirs: # visited directory
1487 1485 d = f + '/'
1488 1486 for mf in matched:
1489 1487 if mf.startswith(d):
1490 1488 break
1491 1489 else:
1492 1490 fail(f, _("no match under directory!"))
1493 1491 elif f not in self.dirstate:
1494 1492 fail(f, _("file not tracked!"))
1495 1493
1496 1494 @unfilteredmethod
1497 1495 def commit(self, text="", user=None, date=None, match=None, force=False,
1498 1496 editor=False, extra=None):
1499 1497 """Add a new revision to current repository.
1500 1498
1501 1499 Revision information is gathered from the working directory,
1502 1500 match can be used to filter the committed files. If editor is
1503 1501 supplied, it is called to get a commit message.
1504 1502 """
1505 1503 if extra is None:
1506 1504 extra = {}
1507 1505
1508 1506 def fail(f, msg):
1509 1507 raise error.Abort('%s: %s' % (f, msg))
1510 1508
1511 1509 if not match:
1512 1510 match = matchmod.always(self.root, '')
1513 1511
1514 1512 if not force:
1515 1513 vdirs = []
1516 1514 match.explicitdir = vdirs.append
1517 1515 match.bad = fail
1518 1516
1519 1517 wlock = lock = tr = None
1520 1518 try:
1521 1519 wlock = self.wlock()
1522 1520 lock = self.lock() # for recent changelog (see issue4368)
1523 1521
1524 1522 wctx = self[None]
1525 1523 merge = len(wctx.parents()) > 1
1526 1524
1527 1525 if not force and merge and match.ispartial():
1528 1526 raise error.Abort(_('cannot partially commit a merge '
1529 1527 '(do not specify files or patterns)'))
1530 1528
1531 1529 status = self.status(match=match, clean=force)
1532 1530 if force:
1533 1531 status.modified.extend(status.clean) # mq may commit clean files
1534 1532
1535 1533 # check subrepos
1536 1534 subs = []
1537 1535 commitsubs = set()
1538 1536 newstate = wctx.substate.copy()
1539 1537 # only manage subrepos and .hgsubstate if .hgsub is present
1540 1538 if '.hgsub' in wctx:
1541 1539 # we'll decide whether to track this ourselves, thanks
1542 1540 for c in status.modified, status.added, status.removed:
1543 1541 if '.hgsubstate' in c:
1544 1542 c.remove('.hgsubstate')
1545 1543
1546 1544 # compare current state to last committed state
1547 1545 # build new substate based on last committed state
1548 1546 oldstate = wctx.p1().substate
1549 1547 for s in sorted(newstate.keys()):
1550 1548 if not match(s):
1551 1549 # ignore working copy, use old state if present
1552 1550 if s in oldstate:
1553 1551 newstate[s] = oldstate[s]
1554 1552 continue
1555 1553 if not force:
1556 1554 raise error.Abort(
1557 1555 _("commit with new subrepo %s excluded") % s)
1558 1556 dirtyreason = wctx.sub(s).dirtyreason(True)
1559 1557 if dirtyreason:
1560 1558 if not self.ui.configbool('ui', 'commitsubrepos'):
1561 1559 raise error.Abort(dirtyreason,
1562 1560 hint=_("use --subrepos for recursive commit"))
1563 1561 subs.append(s)
1564 1562 commitsubs.add(s)
1565 1563 else:
1566 1564 bs = wctx.sub(s).basestate()
1567 1565 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1568 1566 if oldstate.get(s, (None, None, None))[1] != bs:
1569 1567 subs.append(s)
1570 1568
1571 1569 # check for removed subrepos
1572 1570 for p in wctx.parents():
1573 1571 r = [s for s in p.substate if s not in newstate]
1574 1572 subs += [s for s in r if match(s)]
1575 1573 if subs:
1576 1574 if (not match('.hgsub') and
1577 1575 '.hgsub' in (wctx.modified() + wctx.added())):
1578 1576 raise error.Abort(
1579 1577 _("can't commit subrepos without .hgsub"))
1580 1578 status.modified.insert(0, '.hgsubstate')
1581 1579
1582 1580 elif '.hgsub' in status.removed:
1583 1581 # clean up .hgsubstate when .hgsub is removed
1584 1582 if ('.hgsubstate' in wctx and
1585 1583 '.hgsubstate' not in (status.modified + status.added +
1586 1584 status.removed)):
1587 1585 status.removed.insert(0, '.hgsubstate')
1588 1586
1589 1587 # make sure all explicit patterns are matched
1590 1588 if not force:
1591 1589 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1592 1590
1593 1591 cctx = context.workingcommitctx(self, status,
1594 1592 text, user, date, extra)
1595 1593
1596 1594 # internal config: ui.allowemptycommit
1597 1595 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1598 1596 or extra.get('close') or merge or cctx.files()
1599 1597 or self.ui.configbool('ui', 'allowemptycommit'))
1600 1598 if not allowemptycommit:
1601 1599 return None
1602 1600
1603 1601 if merge and cctx.deleted():
1604 1602 raise error.Abort(_("cannot commit merge with missing files"))
1605 1603
1606 1604 ms = mergemod.mergestate.read(self)
1607 1605
1608 1606 if list(ms.unresolved()):
1609 1607 raise error.Abort(_('unresolved merge conflicts '
1610 1608 '(see "hg help resolve")'))
1611 1609 if ms.mdstate() != 's' or list(ms.driverresolved()):
1612 1610 raise error.Abort(_('driver-resolved merge conflicts'),
1613 1611 hint=_('run "hg resolve --all" to resolve'))
1614 1612
1615 1613 if editor:
1616 1614 cctx._text = editor(self, cctx, subs)
1617 1615 edited = (text != cctx._text)
1618 1616
1619 1617 # Save commit message in case this transaction gets rolled back
1620 1618 # (e.g. by a pretxncommit hook). Leave the content alone on
1621 1619 # the assumption that the user will use the same editor again.
1622 1620 msgfn = self.savecommitmessage(cctx._text)
1623 1621
1624 1622 # commit subs and write new state
1625 1623 if subs:
1626 1624 for s in sorted(commitsubs):
1627 1625 sub = wctx.sub(s)
1628 1626 self.ui.status(_('committing subrepository %s\n') %
1629 1627 subrepo.subrelpath(sub))
1630 1628 sr = sub.commit(cctx._text, user, date)
1631 1629 newstate[s] = (newstate[s][0], sr)
1632 1630 subrepo.writestate(self, newstate)
1633 1631
1634 1632 p1, p2 = self.dirstate.parents()
1635 1633 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1636 1634 try:
1637 1635 self.hook("precommit", throw=True, parent1=hookp1,
1638 1636 parent2=hookp2)
1639 1637 tr = self.transaction('commit')
1640 1638 ret = self.commitctx(cctx, True)
1641 1639 except: # re-raises
1642 1640 if edited:
1643 1641 self.ui.write(
1644 1642 _('note: commit message saved in %s\n') % msgfn)
1645 1643 raise
1646 1644 # update bookmarks, dirstate and mergestate
1647 1645 bookmarks.update(self, [p1, p2], ret)
1648 1646 cctx.markcommitted(ret)
1649 1647 ms.reset()
1650 1648 tr.close()
1651 1649
1652 1650 finally:
1653 1651 lockmod.release(tr, lock, wlock)
1654 1652
1655 1653 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1656 1654 # hack for command that use a temporary commit (eg: histedit)
1657 1655 # temporary commit got stripped before hook release
1658 1656 if self.changelog.hasnode(ret):
1659 1657 self.hook("commit", node=node, parent1=parent1,
1660 1658 parent2=parent2)
1661 1659 self._afterlock(commithook)
1662 1660 return ret
1663 1661
1664 1662 @unfilteredmethod
1665 1663 def commitctx(self, ctx, error=False):
1666 1664 """Add a new revision to current repository.
1667 1665 Revision information is passed via the context argument.
1668 1666 """
1669 1667
1670 1668 tr = None
1671 1669 p1, p2 = ctx.p1(), ctx.p2()
1672 1670 user = ctx.user()
1673 1671
1674 1672 lock = self.lock()
1675 1673 try:
1676 1674 tr = self.transaction("commit")
1677 1675 trp = weakref.proxy(tr)
1678 1676
1679 1677 if ctx.files():
1680 1678 m1 = p1.manifest()
1681 1679 m2 = p2.manifest()
1682 1680 m = m1.copy()
1683 1681
1684 1682 # check in files
1685 1683 added = []
1686 1684 changed = []
1687 1685 removed = list(ctx.removed())
1688 1686 linkrev = len(self)
1689 1687 self.ui.note(_("committing files:\n"))
1690 1688 for f in sorted(ctx.modified() + ctx.added()):
1691 1689 self.ui.note(f + "\n")
1692 1690 try:
1693 1691 fctx = ctx[f]
1694 1692 if fctx is None:
1695 1693 removed.append(f)
1696 1694 else:
1697 1695 added.append(f)
1698 1696 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1699 1697 trp, changed)
1700 1698 m.setflag(f, fctx.flags())
1701 1699 except OSError as inst:
1702 1700 self.ui.warn(_("trouble committing %s!\n") % f)
1703 1701 raise
1704 1702 except IOError as inst:
1705 1703 errcode = getattr(inst, 'errno', errno.ENOENT)
1706 1704 if error or errcode and errcode != errno.ENOENT:
1707 1705 self.ui.warn(_("trouble committing %s!\n") % f)
1708 1706 raise
1709 1707
1710 1708 # update manifest
1711 1709 self.ui.note(_("committing manifest\n"))
1712 1710 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1713 1711 drop = [f for f in removed if f in m]
1714 1712 for f in drop:
1715 1713 del m[f]
1716 1714 mn = self.manifest.add(m, trp, linkrev,
1717 1715 p1.manifestnode(), p2.manifestnode(),
1718 1716 added, drop)
1719 1717 files = changed + removed
1720 1718 else:
1721 1719 mn = p1.manifestnode()
1722 1720 files = []
1723 1721
1724 1722 # update changelog
1725 1723 self.ui.note(_("committing changelog\n"))
1726 1724 self.changelog.delayupdate(tr)
1727 1725 n = self.changelog.add(mn, files, ctx.description(),
1728 1726 trp, p1.node(), p2.node(),
1729 1727 user, ctx.date(), ctx.extra().copy())
1730 1728 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1731 1729 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1732 1730 parent2=xp2)
1733 1731 # set the new commit is proper phase
1734 1732 targetphase = subrepo.newcommitphase(self.ui, ctx)
1735 1733 if targetphase:
1736 1734 # retract boundary do not alter parent changeset.
1737 1735 # if a parent have higher the resulting phase will
1738 1736 # be compliant anyway
1739 1737 #
1740 1738 # if minimal phase was 0 we don't need to retract anything
1741 1739 phases.retractboundary(self, tr, targetphase, [n])
1742 1740 tr.close()
1743 1741 branchmap.updatecache(self.filtered('served'))
1744 1742 return n
1745 1743 finally:
1746 1744 if tr:
1747 1745 tr.release()
1748 1746 lock.release()
1749 1747
1750 1748 @unfilteredmethod
1751 1749 def destroying(self):
1752 1750 '''Inform the repository that nodes are about to be destroyed.
1753 1751 Intended for use by strip and rollback, so there's a common
1754 1752 place for anything that has to be done before destroying history.
1755 1753
1756 1754 This is mostly useful for saving state that is in memory and waiting
1757 1755 to be flushed when the current lock is released. Because a call to
1758 1756 destroyed is imminent, the repo will be invalidated causing those
1759 1757 changes to stay in memory (waiting for the next unlock), or vanish
1760 1758 completely.
1761 1759 '''
1762 1760 # When using the same lock to commit and strip, the phasecache is left
1763 1761 # dirty after committing. Then when we strip, the repo is invalidated,
1764 1762 # causing those changes to disappear.
1765 1763 if '_phasecache' in vars(self):
1766 1764 self._phasecache.write()
1767 1765
1768 1766 @unfilteredmethod
1769 1767 def destroyed(self):
1770 1768 '''Inform the repository that nodes have been destroyed.
1771 1769 Intended for use by strip and rollback, so there's a common
1772 1770 place for anything that has to be done after destroying history.
1773 1771 '''
1774 1772 # When one tries to:
1775 1773 # 1) destroy nodes thus calling this method (e.g. strip)
1776 1774 # 2) use phasecache somewhere (e.g. commit)
1777 1775 #
1778 1776 # then 2) will fail because the phasecache contains nodes that were
1779 1777 # removed. We can either remove phasecache from the filecache,
1780 1778 # causing it to reload next time it is accessed, or simply filter
1781 1779 # the removed nodes now and write the updated cache.
1782 1780 self._phasecache.filterunknown(self)
1783 1781 self._phasecache.write()
1784 1782
1785 1783 # update the 'served' branch cache to help read only server process
1786 1784 # Thanks to branchcache collaboration this is done from the nearest
1787 1785 # filtered subset and it is expected to be fast.
1788 1786 branchmap.updatecache(self.filtered('served'))
1789 1787
1790 1788 # Ensure the persistent tag cache is updated. Doing it now
1791 1789 # means that the tag cache only has to worry about destroyed
1792 1790 # heads immediately after a strip/rollback. That in turn
1793 1791 # guarantees that "cachetip == currenttip" (comparing both rev
1794 1792 # and node) always means no nodes have been added or destroyed.
1795 1793
1796 1794 # XXX this is suboptimal when qrefresh'ing: we strip the current
1797 1795 # head, refresh the tag cache, then immediately add a new head.
1798 1796 # But I think doing it this way is necessary for the "instant
1799 1797 # tag cache retrieval" case to work.
1800 1798 self.invalidate()
1801 1799
1802 1800 def walk(self, match, node=None):
1803 1801 '''
1804 1802 walk recursively through the directory tree or a given
1805 1803 changeset, finding all files matched by the match
1806 1804 function
1807 1805 '''
1808 1806 return self[node].walk(match)
1809 1807
1810 1808 def status(self, node1='.', node2=None, match=None,
1811 1809 ignored=False, clean=False, unknown=False,
1812 1810 listsubrepos=False):
1813 1811 '''a convenience method that calls node1.status(node2)'''
1814 1812 return self[node1].status(node2, match, ignored, clean, unknown,
1815 1813 listsubrepos)
1816 1814
1817 1815 def heads(self, start=None):
1818 1816 heads = self.changelog.heads(start)
1819 1817 # sort the output in rev descending order
1820 1818 return sorted(heads, key=self.changelog.rev, reverse=True)
1821 1819
1822 1820 def branchheads(self, branch=None, start=None, closed=False):
1823 1821 '''return a (possibly filtered) list of heads for the given branch
1824 1822
1825 1823 Heads are returned in topological order, from newest to oldest.
1826 1824 If branch is None, use the dirstate branch.
1827 1825 If start is not None, return only heads reachable from start.
1828 1826 If closed is True, return heads that are marked as closed as well.
1829 1827 '''
1830 1828 if branch is None:
1831 1829 branch = self[None].branch()
1832 1830 branches = self.branchmap()
1833 1831 if branch not in branches:
1834 1832 return []
1835 1833 # the cache returns heads ordered lowest to highest
1836 1834 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1837 1835 if start is not None:
1838 1836 # filter out the heads that cannot be reached from startrev
1839 1837 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1840 1838 bheads = [h for h in bheads if h in fbheads]
1841 1839 return bheads
1842 1840
1843 1841 def branches(self, nodes):
1844 1842 if not nodes:
1845 1843 nodes = [self.changelog.tip()]
1846 1844 b = []
1847 1845 for n in nodes:
1848 1846 t = n
1849 1847 while True:
1850 1848 p = self.changelog.parents(n)
1851 1849 if p[1] != nullid or p[0] == nullid:
1852 1850 b.append((t, n, p[0], p[1]))
1853 1851 break
1854 1852 n = p[0]
1855 1853 return b
1856 1854
1857 1855 def between(self, pairs):
1858 1856 r = []
1859 1857
1860 1858 for top, bottom in pairs:
1861 1859 n, l, i = top, [], 0
1862 1860 f = 1
1863 1861
1864 1862 while n != bottom and n != nullid:
1865 1863 p = self.changelog.parents(n)[0]
1866 1864 if i == f:
1867 1865 l.append(n)
1868 1866 f = f * 2
1869 1867 n = p
1870 1868 i += 1
1871 1869
1872 1870 r.append(l)
1873 1871
1874 1872 return r
1875 1873
1876 1874 def checkpush(self, pushop):
1877 1875 """Extensions can override this function if additional checks have
1878 1876 to be performed before pushing, or call it if they override push
1879 1877 command.
1880 1878 """
1881 1879 pass
1882 1880
1883 1881 @unfilteredpropertycache
1884 1882 def prepushoutgoinghooks(self):
1885 1883 """Return util.hooks consists of a pushop with repo, remote, outgoing
1886 1884 methods, which are called before pushing changesets.
1887 1885 """
1888 1886 return util.hooks()
1889 1887
1890 1888 def pushkey(self, namespace, key, old, new):
1891 1889 try:
1892 1890 tr = self.currenttransaction()
1893 1891 hookargs = {}
1894 1892 if tr is not None:
1895 1893 hookargs.update(tr.hookargs)
1896 1894 hookargs['namespace'] = namespace
1897 1895 hookargs['key'] = key
1898 1896 hookargs['old'] = old
1899 1897 hookargs['new'] = new
1900 1898 self.hook('prepushkey', throw=True, **hookargs)
1901 1899 except error.HookAbort as exc:
1902 1900 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1903 1901 if exc.hint:
1904 1902 self.ui.write_err(_("(%s)\n") % exc.hint)
1905 1903 return False
1906 1904 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1907 1905 ret = pushkey.push(self, namespace, key, old, new)
1908 1906 def runhook():
1909 1907 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1910 1908 ret=ret)
1911 1909 self._afterlock(runhook)
1912 1910 return ret
1913 1911
1914 1912 def listkeys(self, namespace):
1915 1913 self.hook('prelistkeys', throw=True, namespace=namespace)
1916 1914 self.ui.debug('listing keys for "%s"\n' % namespace)
1917 1915 values = pushkey.list(self, namespace)
1918 1916 self.hook('listkeys', namespace=namespace, values=values)
1919 1917 return values
1920 1918
1921 1919 def debugwireargs(self, one, two, three=None, four=None, five=None):
1922 1920 '''used to test argument passing over the wire'''
1923 1921 return "%s %s %s %s %s" % (one, two, three, four, five)
1924 1922
1925 1923 def savecommitmessage(self, text):
1926 1924 fp = self.vfs('last-message.txt', 'wb')
1927 1925 try:
1928 1926 fp.write(text)
1929 1927 finally:
1930 1928 fp.close()
1931 1929 return self.pathto(fp.name[len(self.root) + 1:])
1932 1930
1933 1931 # used to avoid circular references so destructors work
1934 1932 def aftertrans(files):
1935 1933 renamefiles = [tuple(t) for t in files]
1936 1934 def a():
1937 1935 for vfs, src, dest in renamefiles:
1938 1936 try:
1939 1937 vfs.rename(src, dest)
1940 1938 except OSError: # journal file does not yet exist
1941 1939 pass
1942 1940 return a
1943 1941
1944 1942 def undoname(fn):
1945 1943 base, name = os.path.split(fn)
1946 1944 assert name.startswith('journal')
1947 1945 return os.path.join(base, name.replace('journal', 'undo', 1))
1948 1946
1949 1947 def instance(ui, path, create):
1950 1948 return localrepository(ui, util.urllocalpath(path), create)
1951 1949
1952 1950 def islocal(path):
1953 1951 return True
1954 1952
1955 1953 def newreporequirements(repo):
1956 1954 """Determine the set of requirements for a new local repository.
1957 1955
1958 1956 Extensions can wrap this function to specify custom requirements for
1959 1957 new repositories.
1960 1958 """
1961 1959 ui = repo.ui
1962 1960 requirements = set(['revlogv1'])
1963 1961 if ui.configbool('format', 'usestore', True):
1964 1962 requirements.add('store')
1965 1963 if ui.configbool('format', 'usefncache', True):
1966 1964 requirements.add('fncache')
1967 1965 if ui.configbool('format', 'dotencode', True):
1968 1966 requirements.add('dotencode')
1969 1967
1970 1968 if scmutil.gdinitconfig(ui):
1971 1969 requirements.add('generaldelta')
1972 1970 if ui.configbool('experimental', 'treemanifest', False):
1973 1971 requirements.add('treemanifest')
1974 1972 if ui.configbool('experimental', 'manifestv2', False):
1975 1973 requirements.add('manifestv2')
1976 1974
1977 1975 return requirements
General Comments 0
You need to be logged in to leave comments. Login now