##// END OF EJS Templates
localrepo: jettison now-unused dirlog() method from localrepo
Augie Fackler -
r29709:b9ee2a1c default
parent child Browse files
Show More
@@ -1,1976 +1,1973 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps, **kwargs)
154 154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 155 # When requesting a bundle2, getbundle returns a stream to make the
156 156 # wire level function happier. We need to build a proper object
157 157 # from it in local peer.
158 158 cg = bundle2.getunbundler(self.ui, cg)
159 159 return cg
160 160
161 161 # TODO We might want to move the next two calls into legacypeer and add
162 162 # unbundle instead.
163 163
164 164 def unbundle(self, cg, heads, url):
165 165 """apply a bundle on a repo
166 166
167 167 This function handles the repo locking itself."""
168 168 try:
169 169 try:
170 170 cg = exchange.readbundle(self.ui, cg, None)
171 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 172 if util.safehasattr(ret, 'getchunks'):
173 173 # This is a bundle20 object, turn it into an unbundler.
174 174 # This little dance should be dropped eventually when the
175 175 # API is finally improved.
176 176 stream = util.chunkbuffer(ret.getchunks())
177 177 ret = bundle2.getunbundler(self.ui, stream)
178 178 return ret
179 179 except Exception as exc:
180 180 # If the exception contains output salvaged from a bundle2
181 181 # reply, we need to make sure it is printed before continuing
182 182 # to fail. So we build a bundle2 with such output and consume
183 183 # it directly.
184 184 #
185 185 # This is not very elegant but allows a "simple" solution for
186 186 # issue4594
187 187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 188 if output:
189 189 bundler = bundle2.bundle20(self._repo.ui)
190 190 for out in output:
191 191 bundler.addpart(out)
192 192 stream = util.chunkbuffer(bundler.getchunks())
193 193 b = bundle2.getunbundler(self.ui, stream)
194 194 bundle2.processbundle(self._repo, b)
195 195 raise
196 196 except error.PushRaced as exc:
197 197 raise error.ResponseError(_('push failed:'), str(exc))
198 198
199 199 def lock(self):
200 200 return self._repo.lock()
201 201
202 202 def addchangegroup(self, cg, source, url):
203 203 return cg.apply(self._repo, source, url)
204 204
205 205 def pushkey(self, namespace, key, old, new):
206 206 return self._repo.pushkey(namespace, key, old, new)
207 207
208 208 def listkeys(self, namespace):
209 209 return self._repo.listkeys(namespace)
210 210
211 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 212 '''used to test argument passing over the wire'''
213 213 return "%s %s %s %s %s" % (one, two, three, four, five)
214 214
215 215 class locallegacypeer(localpeer):
216 216 '''peer extension which implements legacy methods too; used for tests with
217 217 restricted capabilities'''
218 218
219 219 def __init__(self, repo):
220 220 localpeer.__init__(self, repo, caps=legacycaps)
221 221
222 222 def branches(self, nodes):
223 223 return self._repo.branches(nodes)
224 224
225 225 def between(self, pairs):
226 226 return self._repo.between(pairs)
227 227
228 228 def changegroup(self, basenodes, source):
229 229 return changegroup.changegroup(self._repo, basenodes, source)
230 230
231 231 def changegroupsubset(self, bases, heads, source):
232 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233 233
234 234 class localrepository(object):
235 235
236 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 237 'manifestv2'))
238 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 239 'dotencode'))
240 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 241 filtername = None
242 242
243 243 # a list of (ui, featureset) functions.
244 244 # only functions defined in module of enabled extensions are invoked
245 245 featuresetupfuncs = set()
246 246
247 247 def __init__(self, baseui, path=None, create=False):
248 248 self.requirements = set()
249 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 250 self.wopener = self.wvfs
251 251 self.root = self.wvfs.base
252 252 self.path = self.wvfs.join(".hg")
253 253 self.origroot = path
254 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 256 realfs=False)
257 257 self.vfs = scmutil.vfs(self.path)
258 258 self.opener = self.vfs
259 259 self.baseui = baseui
260 260 self.ui = baseui.copy()
261 261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 262 # A list of callback to shape the phase if no data were found.
263 263 # Callback are in the form: func(repo, roots) --> processed root.
264 264 # This list it to be filled by extension during repo setup
265 265 self._phasedefaults = []
266 266 try:
267 267 self.ui.readconfig(self.join("hgrc"), self.root)
268 268 extensions.loadall(self.ui)
269 269 except IOError:
270 270 pass
271 271
272 272 if self.featuresetupfuncs:
273 273 self.supported = set(self._basesupported) # use private copy
274 274 extmods = set(m.__name__ for n, m
275 275 in extensions.extensions(self.ui))
276 276 for setupfunc in self.featuresetupfuncs:
277 277 if setupfunc.__module__ in extmods:
278 278 setupfunc(self.ui, self.supported)
279 279 else:
280 280 self.supported = self._basesupported
281 281
282 282 if not self.vfs.isdir():
283 283 if create:
284 284 self.requirements = newreporequirements(self)
285 285
286 286 if not self.wvfs.exists():
287 287 self.wvfs.makedirs()
288 288 self.vfs.makedir(notindexed=True)
289 289
290 290 if 'store' in self.requirements:
291 291 self.vfs.mkdir("store")
292 292
293 293 # create an invalid changelog
294 294 self.vfs.append(
295 295 "00changelog.i",
296 296 '\0\0\0\2' # represents revlogv2
297 297 ' dummy changelog to prevent using the old repo layout'
298 298 )
299 299 else:
300 300 raise error.RepoError(_("repository %s not found") % path)
301 301 elif create:
302 302 raise error.RepoError(_("repository %s already exists") % path)
303 303 else:
304 304 try:
305 305 self.requirements = scmutil.readrequires(
306 306 self.vfs, self.supported)
307 307 except IOError as inst:
308 308 if inst.errno != errno.ENOENT:
309 309 raise
310 310
311 311 self.sharedpath = self.path
312 312 try:
313 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 314 realpath=True)
315 315 s = vfs.base
316 316 if not vfs.exists():
317 317 raise error.RepoError(
318 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 319 self.sharedpath = s
320 320 except IOError as inst:
321 321 if inst.errno != errno.ENOENT:
322 322 raise
323 323
324 324 self.store = store.store(
325 325 self.requirements, self.sharedpath, scmutil.vfs)
326 326 self.spath = self.store.path
327 327 self.svfs = self.store.vfs
328 328 self.sjoin = self.store.join
329 329 self.vfs.createmode = self.store.createmode
330 330 self._applyopenerreqs()
331 331 if create:
332 332 self._writerequirements()
333 333
334 334 self._dirstatevalidatewarned = False
335 335
336 336 self._branchcaches = {}
337 337 self._revbranchcache = None
338 338 self.filterpats = {}
339 339 self._datafilters = {}
340 340 self._transref = self._lockref = self._wlockref = None
341 341
342 342 # A cache for various files under .hg/ that tracks file changes,
343 343 # (used by the filecache decorator)
344 344 #
345 345 # Maps a property name to its util.filecacheentry
346 346 self._filecache = {}
347 347
348 348 # hold sets of revision to be filtered
349 349 # should be cleared when something might have changed the filter value:
350 350 # - new changesets,
351 351 # - phase change,
352 352 # - new obsolescence marker,
353 353 # - working directory parent change,
354 354 # - bookmark changes
355 355 self.filteredrevcache = {}
356 356
357 357 # generic mapping between names and nodes
358 358 self.names = namespaces.namespaces()
359 359
360 360 def close(self):
361 361 self._writecaches()
362 362
363 363 def _writecaches(self):
364 364 if self._revbranchcache:
365 365 self._revbranchcache.write()
366 366
367 367 def _restrictcapabilities(self, caps):
368 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 369 caps = set(caps)
370 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 372 return caps
373 373
374 374 def _applyopenerreqs(self):
375 375 self.svfs.options = dict((r, 1) for r in self.requirements
376 376 if r in self.openerreqs)
377 377 # experimental config: format.chunkcachesize
378 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 379 if chunkcachesize is not None:
380 380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 381 # experimental config: format.maxchainlen
382 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 383 if maxchainlen is not None:
384 384 self.svfs.options['maxchainlen'] = maxchainlen
385 385 # experimental config: format.manifestcachesize
386 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 387 if manifestcachesize is not None:
388 388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 389 # experimental config: format.aggressivemergedeltas
390 390 aggressivemergedeltas = self.ui.configbool('format',
391 391 'aggressivemergedeltas', False)
392 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394 394
395 395 def _writerequirements(self):
396 396 scmutil.writerequires(self.vfs, self.requirements)
397 397
398 398 def _checknested(self, path):
399 399 """Determine if path is a legal nested repository."""
400 400 if not path.startswith(self.root):
401 401 return False
402 402 subpath = path[len(self.root) + 1:]
403 403 normsubpath = util.pconvert(subpath)
404 404
405 405 # XXX: Checking against the current working copy is wrong in
406 406 # the sense that it can reject things like
407 407 #
408 408 # $ hg cat -r 10 sub/x.txt
409 409 #
410 410 # if sub/ is no longer a subrepository in the working copy
411 411 # parent revision.
412 412 #
413 413 # However, it can of course also allow things that would have
414 414 # been rejected before, such as the above cat command if sub/
415 415 # is a subrepository now, but was a normal directory before.
416 416 # The old path auditor would have rejected by mistake since it
417 417 # panics when it sees sub/.hg/.
418 418 #
419 419 # All in all, checking against the working copy seems sensible
420 420 # since we want to prevent access to nested repositories on
421 421 # the filesystem *now*.
422 422 ctx = self[None]
423 423 parts = util.splitpath(subpath)
424 424 while parts:
425 425 prefix = '/'.join(parts)
426 426 if prefix in ctx.substate:
427 427 if prefix == normsubpath:
428 428 return True
429 429 else:
430 430 sub = ctx.sub(prefix)
431 431 return sub.checknested(subpath[len(prefix) + 1:])
432 432 else:
433 433 parts.pop()
434 434 return False
435 435
436 436 def peer(self):
437 437 return localpeer(self) # not cached to avoid reference cycle
438 438
439 439 def unfiltered(self):
440 440 """Return unfiltered version of the repository
441 441
442 442 Intended to be overwritten by filtered repo."""
443 443 return self
444 444
445 445 def filtered(self, name):
446 446 """Return a filtered version of a repository"""
447 447 # build a new class with the mixin and the current class
448 448 # (possibly subclass of the repo)
449 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 450 pass
451 451 return proxycls(self, name)
452 452
453 453 @repofilecache('bookmarks', 'bookmarks.current')
454 454 def _bookmarks(self):
455 455 return bookmarks.bmstore(self)
456 456
457 457 @property
458 458 def _activebookmark(self):
459 459 return self._bookmarks.active
460 460
461 461 def bookmarkheads(self, bookmark):
462 462 name = bookmark.split('@', 1)[0]
463 463 heads = []
464 464 for mark, n in self._bookmarks.iteritems():
465 465 if mark.split('@', 1)[0] == name:
466 466 heads.append(n)
467 467 return heads
468 468
469 469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 471 # can't be easily expressed in filecache mechanism.
472 472 @storecache('phaseroots', '00changelog.i')
473 473 def _phasecache(self):
474 474 return phases.phasecache(self, self._phasedefaults)
475 475
476 476 @storecache('obsstore')
477 477 def obsstore(self):
478 478 # read default format for new obsstore.
479 479 # developer config: format.obsstore-version
480 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 481 # rely on obsstore class default when possible.
482 482 kwargs = {}
483 483 if defaultformat is not None:
484 484 kwargs['defaultformat'] = defaultformat
485 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 487 **kwargs)
488 488 if store and readonly:
489 489 self.ui.warn(
490 490 _('obsolete feature not enabled but %i markers found!\n')
491 491 % len(list(store)))
492 492 return store
493 493
494 494 @storecache('00changelog.i')
495 495 def changelog(self):
496 496 c = changelog.changelog(self.svfs)
497 497 if 'HG_PENDING' in os.environ:
498 498 p = os.environ['HG_PENDING']
499 499 if p.startswith(self.root):
500 500 c.readpending('00changelog.i.a')
501 501 return c
502 502
503 503 @storecache('00manifest.i')
504 504 def manifest(self):
505 505 return manifest.manifest(self.svfs)
506 506
507 def dirlog(self, dir):
508 return self.manifest.dirlog(dir)
509
510 507 @repofilecache('dirstate')
511 508 def dirstate(self):
512 509 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 510 self._dirstatevalidate)
514 511
515 512 def _dirstatevalidate(self, node):
516 513 try:
517 514 self.changelog.rev(node)
518 515 return node
519 516 except error.LookupError:
520 517 if not self._dirstatevalidatewarned:
521 518 self._dirstatevalidatewarned = True
522 519 self.ui.warn(_("warning: ignoring unknown"
523 520 " working parent %s!\n") % short(node))
524 521 return nullid
525 522
526 523 def __getitem__(self, changeid):
527 524 if changeid is None or changeid == wdirrev:
528 525 return context.workingctx(self)
529 526 if isinstance(changeid, slice):
530 527 return [context.changectx(self, i)
531 528 for i in xrange(*changeid.indices(len(self)))
532 529 if i not in self.changelog.filteredrevs]
533 530 return context.changectx(self, changeid)
534 531
535 532 def __contains__(self, changeid):
536 533 try:
537 534 self[changeid]
538 535 return True
539 536 except error.RepoLookupError:
540 537 return False
541 538
542 539 def __nonzero__(self):
543 540 return True
544 541
545 542 def __len__(self):
546 543 return len(self.changelog)
547 544
548 545 def __iter__(self):
549 546 return iter(self.changelog)
550 547
551 548 def revs(self, expr, *args):
552 549 '''Find revisions matching a revset.
553 550
554 551 The revset is specified as a string ``expr`` that may contain
555 552 %-formatting to escape certain types. See ``revset.formatspec``.
556 553
557 554 Revset aliases from the configuration are not expanded. To expand
558 555 user aliases, consider calling ``scmutil.revrange()``.
559 556
560 557 Returns a revset.abstractsmartset, which is a list-like interface
561 558 that contains integer revisions.
562 559 '''
563 560 expr = revset.formatspec(expr, *args)
564 561 m = revset.match(None, expr)
565 562 return m(self)
566 563
567 564 def set(self, expr, *args):
568 565 '''Find revisions matching a revset and emit changectx instances.
569 566
570 567 This is a convenience wrapper around ``revs()`` that iterates the
571 568 result and is a generator of changectx instances.
572 569
573 570 Revset aliases from the configuration are not expanded. To expand
574 571 user aliases, consider calling ``scmutil.revrange()``.
575 572 '''
576 573 for r in self.revs(expr, *args):
577 574 yield self[r]
578 575
579 576 def url(self):
580 577 return 'file:' + self.root
581 578
582 579 def hook(self, name, throw=False, **args):
583 580 """Call a hook, passing this repo instance.
584 581
585 582 This a convenience method to aid invoking hooks. Extensions likely
586 583 won't call this unless they have registered a custom hook or are
587 584 replacing code that is expected to call a hook.
588 585 """
589 586 return hook.hook(self.ui, self, name, throw, **args)
590 587
591 588 @unfilteredmethod
592 589 def _tag(self, names, node, message, local, user, date, extra=None,
593 590 editor=False):
594 591 if isinstance(names, str):
595 592 names = (names,)
596 593
597 594 branches = self.branchmap()
598 595 for name in names:
599 596 self.hook('pretag', throw=True, node=hex(node), tag=name,
600 597 local=local)
601 598 if name in branches:
602 599 self.ui.warn(_("warning: tag %s conflicts with existing"
603 600 " branch name\n") % name)
604 601
605 602 def writetags(fp, names, munge, prevtags):
606 603 fp.seek(0, 2)
607 604 if prevtags and prevtags[-1] != '\n':
608 605 fp.write('\n')
609 606 for name in names:
610 607 if munge:
611 608 m = munge(name)
612 609 else:
613 610 m = name
614 611
615 612 if (self._tagscache.tagtypes and
616 613 name in self._tagscache.tagtypes):
617 614 old = self.tags().get(name, nullid)
618 615 fp.write('%s %s\n' % (hex(old), m))
619 616 fp.write('%s %s\n' % (hex(node), m))
620 617 fp.close()
621 618
622 619 prevtags = ''
623 620 if local:
624 621 try:
625 622 fp = self.vfs('localtags', 'r+')
626 623 except IOError:
627 624 fp = self.vfs('localtags', 'a')
628 625 else:
629 626 prevtags = fp.read()
630 627
631 628 # local tags are stored in the current charset
632 629 writetags(fp, names, None, prevtags)
633 630 for name in names:
634 631 self.hook('tag', node=hex(node), tag=name, local=local)
635 632 return
636 633
637 634 try:
638 635 fp = self.wfile('.hgtags', 'rb+')
639 636 except IOError as e:
640 637 if e.errno != errno.ENOENT:
641 638 raise
642 639 fp = self.wfile('.hgtags', 'ab')
643 640 else:
644 641 prevtags = fp.read()
645 642
646 643 # committed tags are stored in UTF-8
647 644 writetags(fp, names, encoding.fromlocal, prevtags)
648 645
649 646 fp.close()
650 647
651 648 self.invalidatecaches()
652 649
653 650 if '.hgtags' not in self.dirstate:
654 651 self[None].add(['.hgtags'])
655 652
656 653 m = matchmod.exact(self.root, '', ['.hgtags'])
657 654 tagnode = self.commit(message, user, date, extra=extra, match=m,
658 655 editor=editor)
659 656
660 657 for name in names:
661 658 self.hook('tag', node=hex(node), tag=name, local=local)
662 659
663 660 return tagnode
664 661
665 662 def tag(self, names, node, message, local, user, date, editor=False):
666 663 '''tag a revision with one or more symbolic names.
667 664
668 665 names is a list of strings or, when adding a single tag, names may be a
669 666 string.
670 667
671 668 if local is True, the tags are stored in a per-repository file.
672 669 otherwise, they are stored in the .hgtags file, and a new
673 670 changeset is committed with the change.
674 671
675 672 keyword arguments:
676 673
677 674 local: whether to store tags in non-version-controlled file
678 675 (default False)
679 676
680 677 message: commit message to use if committing
681 678
682 679 user: name of user to use if committing
683 680
684 681 date: date tuple to use if committing'''
685 682
686 683 if not local:
687 684 m = matchmod.exact(self.root, '', ['.hgtags'])
688 685 if any(self.status(match=m, unknown=True, ignored=True)):
689 686 raise error.Abort(_('working copy of .hgtags is changed'),
690 687 hint=_('please commit .hgtags manually'))
691 688
692 689 self.tags() # instantiate the cache
693 690 self._tag(names, node, message, local, user, date, editor=editor)
694 691
695 692 @filteredpropertycache
696 693 def _tagscache(self):
697 694 '''Returns a tagscache object that contains various tags related
698 695 caches.'''
699 696
700 697 # This simplifies its cache management by having one decorated
701 698 # function (this one) and the rest simply fetch things from it.
702 699 class tagscache(object):
703 700 def __init__(self):
704 701 # These two define the set of tags for this repository. tags
705 702 # maps tag name to node; tagtypes maps tag name to 'global' or
706 703 # 'local'. (Global tags are defined by .hgtags across all
707 704 # heads, and local tags are defined in .hg/localtags.)
708 705 # They constitute the in-memory cache of tags.
709 706 self.tags = self.tagtypes = None
710 707
711 708 self.nodetagscache = self.tagslist = None
712 709
713 710 cache = tagscache()
714 711 cache.tags, cache.tagtypes = self._findtags()
715 712
716 713 return cache
717 714
718 715 def tags(self):
719 716 '''return a mapping of tag to node'''
720 717 t = {}
721 718 if self.changelog.filteredrevs:
722 719 tags, tt = self._findtags()
723 720 else:
724 721 tags = self._tagscache.tags
725 722 for k, v in tags.iteritems():
726 723 try:
727 724 # ignore tags to unknown nodes
728 725 self.changelog.rev(v)
729 726 t[k] = v
730 727 except (error.LookupError, ValueError):
731 728 pass
732 729 return t
733 730
734 731 def _findtags(self):
735 732 '''Do the hard work of finding tags. Return a pair of dicts
736 733 (tags, tagtypes) where tags maps tag name to node, and tagtypes
737 734 maps tag name to a string like \'global\' or \'local\'.
738 735 Subclasses or extensions are free to add their own tags, but
739 736 should be aware that the returned dicts will be retained for the
740 737 duration of the localrepo object.'''
741 738
742 739 # XXX what tagtype should subclasses/extensions use? Currently
743 740 # mq and bookmarks add tags, but do not set the tagtype at all.
744 741 # Should each extension invent its own tag type? Should there
745 742 # be one tagtype for all such "virtual" tags? Or is the status
746 743 # quo fine?
747 744
748 745 alltags = {} # map tag name to (node, hist)
749 746 tagtypes = {}
750 747
751 748 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
752 749 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
753 750
754 751 # Build the return dicts. Have to re-encode tag names because
755 752 # the tags module always uses UTF-8 (in order not to lose info
756 753 # writing to the cache), but the rest of Mercurial wants them in
757 754 # local encoding.
758 755 tags = {}
759 756 for (name, (node, hist)) in alltags.iteritems():
760 757 if node != nullid:
761 758 tags[encoding.tolocal(name)] = node
762 759 tags['tip'] = self.changelog.tip()
763 760 tagtypes = dict([(encoding.tolocal(name), value)
764 761 for (name, value) in tagtypes.iteritems()])
765 762 return (tags, tagtypes)
766 763
767 764 def tagtype(self, tagname):
768 765 '''
769 766 return the type of the given tag. result can be:
770 767
771 768 'local' : a local tag
772 769 'global' : a global tag
773 770 None : tag does not exist
774 771 '''
775 772
776 773 return self._tagscache.tagtypes.get(tagname)
777 774
778 775 def tagslist(self):
779 776 '''return a list of tags ordered by revision'''
780 777 if not self._tagscache.tagslist:
781 778 l = []
782 779 for t, n in self.tags().iteritems():
783 780 l.append((self.changelog.rev(n), t, n))
784 781 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
785 782
786 783 return self._tagscache.tagslist
787 784
788 785 def nodetags(self, node):
789 786 '''return the tags associated with a node'''
790 787 if not self._tagscache.nodetagscache:
791 788 nodetagscache = {}
792 789 for t, n in self._tagscache.tags.iteritems():
793 790 nodetagscache.setdefault(n, []).append(t)
794 791 for tags in nodetagscache.itervalues():
795 792 tags.sort()
796 793 self._tagscache.nodetagscache = nodetagscache
797 794 return self._tagscache.nodetagscache.get(node, [])
798 795
799 796 def nodebookmarks(self, node):
800 797 """return the list of bookmarks pointing to the specified node"""
801 798 marks = []
802 799 for bookmark, n in self._bookmarks.iteritems():
803 800 if n == node:
804 801 marks.append(bookmark)
805 802 return sorted(marks)
806 803
807 804 def branchmap(self):
808 805 '''returns a dictionary {branch: [branchheads]} with branchheads
809 806 ordered by increasing revision number'''
810 807 branchmap.updatecache(self)
811 808 return self._branchcaches[self.filtername]
812 809
813 810 @unfilteredmethod
814 811 def revbranchcache(self):
815 812 if not self._revbranchcache:
816 813 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
817 814 return self._revbranchcache
818 815
819 816 def branchtip(self, branch, ignoremissing=False):
820 817 '''return the tip node for a given branch
821 818
822 819 If ignoremissing is True, then this method will not raise an error.
823 820 This is helpful for callers that only expect None for a missing branch
824 821 (e.g. namespace).
825 822
826 823 '''
827 824 try:
828 825 return self.branchmap().branchtip(branch)
829 826 except KeyError:
830 827 if not ignoremissing:
831 828 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
832 829 else:
833 830 pass
834 831
835 832 def lookup(self, key):
836 833 return self[key].node()
837 834
838 835 def lookupbranch(self, key, remote=None):
839 836 repo = remote or self
840 837 if key in repo.branchmap():
841 838 return key
842 839
843 840 repo = (remote and remote.local()) and remote or self
844 841 return repo[key].branch()
845 842
846 843 def known(self, nodes):
847 844 cl = self.changelog
848 845 nm = cl.nodemap
849 846 filtered = cl.filteredrevs
850 847 result = []
851 848 for n in nodes:
852 849 r = nm.get(n)
853 850 resp = not (r is None or r in filtered)
854 851 result.append(resp)
855 852 return result
856 853
857 854 def local(self):
858 855 return self
859 856
860 857 def publishing(self):
861 858 # it's safe (and desirable) to trust the publish flag unconditionally
862 859 # so that we don't finalize changes shared between users via ssh or nfs
863 860 return self.ui.configbool('phases', 'publish', True, untrusted=True)
864 861
865 862 def cancopy(self):
866 863 # so statichttprepo's override of local() works
867 864 if not self.local():
868 865 return False
869 866 if not self.publishing():
870 867 return True
871 868 # if publishing we can't copy if there is filtered content
872 869 return not self.filtered('visible').changelog.filteredrevs
873 870
874 871 def shared(self):
875 872 '''the type of shared repository (None if not shared)'''
876 873 if self.sharedpath != self.path:
877 874 return 'store'
878 875 return None
879 876
880 877 def join(self, f, *insidef):
881 878 return self.vfs.join(os.path.join(f, *insidef))
882 879
883 880 def wjoin(self, f, *insidef):
884 881 return self.vfs.reljoin(self.root, f, *insidef)
885 882
886 883 def file(self, f):
887 884 if f[0] == '/':
888 885 f = f[1:]
889 886 return filelog.filelog(self.svfs, f)
890 887
891 888 def changectx(self, changeid):
892 889 return self[changeid]
893 890
894 891 def setparents(self, p1, p2=nullid):
895 892 self.dirstate.beginparentchange()
896 893 copies = self.dirstate.setparents(p1, p2)
897 894 pctx = self[p1]
898 895 if copies:
899 896 # Adjust copy records, the dirstate cannot do it, it
900 897 # requires access to parents manifests. Preserve them
901 898 # only for entries added to first parent.
902 899 for f in copies:
903 900 if f not in pctx and copies[f] in pctx:
904 901 self.dirstate.copy(copies[f], f)
905 902 if p2 == nullid:
906 903 for f, s in sorted(self.dirstate.copies().items()):
907 904 if f not in pctx and s not in pctx:
908 905 self.dirstate.copy(None, f)
909 906 self.dirstate.endparentchange()
910 907
911 908 def filectx(self, path, changeid=None, fileid=None):
912 909 """changeid can be a changeset revision, node, or tag.
913 910 fileid can be a file revision or node."""
914 911 return context.filectx(self, path, changeid, fileid)
915 912
916 913 def getcwd(self):
917 914 return self.dirstate.getcwd()
918 915
919 916 def pathto(self, f, cwd=None):
920 917 return self.dirstate.pathto(f, cwd)
921 918
922 919 def wfile(self, f, mode='r'):
923 920 return self.wvfs(f, mode)
924 921
925 922 def _link(self, f):
926 923 return self.wvfs.islink(f)
927 924
928 925 def _loadfilter(self, filter):
929 926 if filter not in self.filterpats:
930 927 l = []
931 928 for pat, cmd in self.ui.configitems(filter):
932 929 if cmd == '!':
933 930 continue
934 931 mf = matchmod.match(self.root, '', [pat])
935 932 fn = None
936 933 params = cmd
937 934 for name, filterfn in self._datafilters.iteritems():
938 935 if cmd.startswith(name):
939 936 fn = filterfn
940 937 params = cmd[len(name):].lstrip()
941 938 break
942 939 if not fn:
943 940 fn = lambda s, c, **kwargs: util.filter(s, c)
944 941 # Wrap old filters not supporting keyword arguments
945 942 if not inspect.getargspec(fn)[2]:
946 943 oldfn = fn
947 944 fn = lambda s, c, **kwargs: oldfn(s, c)
948 945 l.append((mf, fn, params))
949 946 self.filterpats[filter] = l
950 947 return self.filterpats[filter]
951 948
952 949 def _filter(self, filterpats, filename, data):
953 950 for mf, fn, cmd in filterpats:
954 951 if mf(filename):
955 952 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
956 953 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
957 954 break
958 955
959 956 return data
960 957
961 958 @unfilteredpropertycache
962 959 def _encodefilterpats(self):
963 960 return self._loadfilter('encode')
964 961
965 962 @unfilteredpropertycache
966 963 def _decodefilterpats(self):
967 964 return self._loadfilter('decode')
968 965
969 966 def adddatafilter(self, name, filter):
970 967 self._datafilters[name] = filter
971 968
972 969 def wread(self, filename):
973 970 if self._link(filename):
974 971 data = self.wvfs.readlink(filename)
975 972 else:
976 973 data = self.wvfs.read(filename)
977 974 return self._filter(self._encodefilterpats, filename, data)
978 975
979 976 def wwrite(self, filename, data, flags, backgroundclose=False):
980 977 """write ``data`` into ``filename`` in the working directory
981 978
982 979 This returns length of written (maybe decoded) data.
983 980 """
984 981 data = self._filter(self._decodefilterpats, filename, data)
985 982 if 'l' in flags:
986 983 self.wvfs.symlink(data, filename)
987 984 else:
988 985 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
989 986 if 'x' in flags:
990 987 self.wvfs.setflags(filename, False, True)
991 988 return len(data)
992 989
993 990 def wwritedata(self, filename, data):
994 991 return self._filter(self._decodefilterpats, filename, data)
995 992
996 993 def currenttransaction(self):
997 994 """return the current transaction or None if non exists"""
998 995 if self._transref:
999 996 tr = self._transref()
1000 997 else:
1001 998 tr = None
1002 999
1003 1000 if tr and tr.running():
1004 1001 return tr
1005 1002 return None
1006 1003
1007 1004 def transaction(self, desc, report=None):
1008 1005 if (self.ui.configbool('devel', 'all-warnings')
1009 1006 or self.ui.configbool('devel', 'check-locks')):
1010 1007 if self._currentlock(self._lockref) is None:
1011 1008 raise RuntimeError('programming error: transaction requires '
1012 1009 'locking')
1013 1010 tr = self.currenttransaction()
1014 1011 if tr is not None:
1015 1012 return tr.nest()
1016 1013
1017 1014 # abort here if the journal already exists
1018 1015 if self.svfs.exists("journal"):
1019 1016 raise error.RepoError(
1020 1017 _("abandoned transaction found"),
1021 1018 hint=_("run 'hg recover' to clean up transaction"))
1022 1019
1023 1020 idbase = "%.40f#%f" % (random.random(), time.time())
1024 1021 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1025 1022 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 1023
1027 1024 self._writejournal(desc)
1028 1025 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 1026 if report:
1030 1027 rp = report
1031 1028 else:
1032 1029 rp = self.ui.warn
1033 1030 vfsmap = {'plain': self.vfs} # root of .hg/
1034 1031 # we must avoid cyclic reference between repo and transaction.
1035 1032 reporef = weakref.ref(self)
1036 1033 def validate(tr):
1037 1034 """will run pre-closing hooks"""
1038 1035 reporef().hook('pretxnclose', throw=True,
1039 1036 txnname=desc, **tr.hookargs)
1040 1037 def releasefn(tr, success):
1041 1038 repo = reporef()
1042 1039 if success:
1043 1040 # this should be explicitly invoked here, because
1044 1041 # in-memory changes aren't written out at closing
1045 1042 # transaction, if tr.addfilegenerator (via
1046 1043 # dirstate.write or so) isn't invoked while
1047 1044 # transaction running
1048 1045 repo.dirstate.write(None)
1049 1046 else:
1050 1047 # discard all changes (including ones already written
1051 1048 # out) in this transaction
1052 1049 repo.dirstate.restorebackup(None, prefix='journal.')
1053 1050
1054 1051 repo.invalidate(clearfilecache=True)
1055 1052
1056 1053 tr = transaction.transaction(rp, self.svfs, vfsmap,
1057 1054 "journal",
1058 1055 "undo",
1059 1056 aftertrans(renames),
1060 1057 self.store.createmode,
1061 1058 validator=validate,
1062 1059 releasefn=releasefn)
1063 1060
1064 1061 tr.hookargs['txnid'] = txnid
1065 1062 # note: writing the fncache only during finalize mean that the file is
1066 1063 # outdated when running hooks. As fncache is used for streaming clone,
1067 1064 # this is not expected to break anything that happen during the hooks.
1068 1065 tr.addfinalize('flush-fncache', self.store.write)
1069 1066 def txnclosehook(tr2):
1070 1067 """To be run if transaction is successful, will schedule a hook run
1071 1068 """
1072 1069 # Don't reference tr2 in hook() so we don't hold a reference.
1073 1070 # This reduces memory consumption when there are multiple
1074 1071 # transactions per lock. This can likely go away if issue5045
1075 1072 # fixes the function accumulation.
1076 1073 hookargs = tr2.hookargs
1077 1074
1078 1075 def hook():
1079 1076 reporef().hook('txnclose', throw=False, txnname=desc,
1080 1077 **hookargs)
1081 1078 reporef()._afterlock(hook)
1082 1079 tr.addfinalize('txnclose-hook', txnclosehook)
1083 1080 def txnaborthook(tr2):
1084 1081 """To be run if transaction is aborted
1085 1082 """
1086 1083 reporef().hook('txnabort', throw=False, txnname=desc,
1087 1084 **tr2.hookargs)
1088 1085 tr.addabort('txnabort-hook', txnaborthook)
1089 1086 # avoid eager cache invalidation. in-memory data should be identical
1090 1087 # to stored data if transaction has no error.
1091 1088 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1092 1089 self._transref = weakref.ref(tr)
1093 1090 return tr
1094 1091
1095 1092 def _journalfiles(self):
1096 1093 return ((self.svfs, 'journal'),
1097 1094 (self.vfs, 'journal.dirstate'),
1098 1095 (self.vfs, 'journal.branch'),
1099 1096 (self.vfs, 'journal.desc'),
1100 1097 (self.vfs, 'journal.bookmarks'),
1101 1098 (self.svfs, 'journal.phaseroots'))
1102 1099
1103 1100 def undofiles(self):
1104 1101 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1105 1102
1106 1103 def _writejournal(self, desc):
1107 1104 self.dirstate.savebackup(None, prefix='journal.')
1108 1105 self.vfs.write("journal.branch",
1109 1106 encoding.fromlocal(self.dirstate.branch()))
1110 1107 self.vfs.write("journal.desc",
1111 1108 "%d\n%s\n" % (len(self), desc))
1112 1109 self.vfs.write("journal.bookmarks",
1113 1110 self.vfs.tryread("bookmarks"))
1114 1111 self.svfs.write("journal.phaseroots",
1115 1112 self.svfs.tryread("phaseroots"))
1116 1113
1117 1114 def recover(self):
1118 1115 with self.lock():
1119 1116 if self.svfs.exists("journal"):
1120 1117 self.ui.status(_("rolling back interrupted transaction\n"))
1121 1118 vfsmap = {'': self.svfs,
1122 1119 'plain': self.vfs,}
1123 1120 transaction.rollback(self.svfs, vfsmap, "journal",
1124 1121 self.ui.warn)
1125 1122 self.invalidate()
1126 1123 return True
1127 1124 else:
1128 1125 self.ui.warn(_("no interrupted transaction available\n"))
1129 1126 return False
1130 1127
1131 1128 def rollback(self, dryrun=False, force=False):
1132 1129 wlock = lock = dsguard = None
1133 1130 try:
1134 1131 wlock = self.wlock()
1135 1132 lock = self.lock()
1136 1133 if self.svfs.exists("undo"):
1137 1134 dsguard = cmdutil.dirstateguard(self, 'rollback')
1138 1135
1139 1136 return self._rollback(dryrun, force, dsguard)
1140 1137 else:
1141 1138 self.ui.warn(_("no rollback information available\n"))
1142 1139 return 1
1143 1140 finally:
1144 1141 release(dsguard, lock, wlock)
1145 1142
1146 1143 @unfilteredmethod # Until we get smarter cache management
1147 1144 def _rollback(self, dryrun, force, dsguard):
1148 1145 ui = self.ui
1149 1146 try:
1150 1147 args = self.vfs.read('undo.desc').splitlines()
1151 1148 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1152 1149 if len(args) >= 3:
1153 1150 detail = args[2]
1154 1151 oldtip = oldlen - 1
1155 1152
1156 1153 if detail and ui.verbose:
1157 1154 msg = (_('repository tip rolled back to revision %s'
1158 1155 ' (undo %s: %s)\n')
1159 1156 % (oldtip, desc, detail))
1160 1157 else:
1161 1158 msg = (_('repository tip rolled back to revision %s'
1162 1159 ' (undo %s)\n')
1163 1160 % (oldtip, desc))
1164 1161 except IOError:
1165 1162 msg = _('rolling back unknown transaction\n')
1166 1163 desc = None
1167 1164
1168 1165 if not force and self['.'] != self['tip'] and desc == 'commit':
1169 1166 raise error.Abort(
1170 1167 _('rollback of last commit while not checked out '
1171 1168 'may lose data'), hint=_('use -f to force'))
1172 1169
1173 1170 ui.status(msg)
1174 1171 if dryrun:
1175 1172 return 0
1176 1173
1177 1174 parents = self.dirstate.parents()
1178 1175 self.destroying()
1179 1176 vfsmap = {'plain': self.vfs, '': self.svfs}
1180 1177 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1181 1178 if self.vfs.exists('undo.bookmarks'):
1182 1179 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1183 1180 if self.svfs.exists('undo.phaseroots'):
1184 1181 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1185 1182 self.invalidate()
1186 1183
1187 1184 parentgone = (parents[0] not in self.changelog.nodemap or
1188 1185 parents[1] not in self.changelog.nodemap)
1189 1186 if parentgone:
1190 1187 # prevent dirstateguard from overwriting already restored one
1191 1188 dsguard.close()
1192 1189
1193 1190 self.dirstate.restorebackup(None, prefix='undo.')
1194 1191 try:
1195 1192 branch = self.vfs.read('undo.branch')
1196 1193 self.dirstate.setbranch(encoding.tolocal(branch))
1197 1194 except IOError:
1198 1195 ui.warn(_('named branch could not be reset: '
1199 1196 'current branch is still \'%s\'\n')
1200 1197 % self.dirstate.branch())
1201 1198
1202 1199 parents = tuple([p.rev() for p in self[None].parents()])
1203 1200 if len(parents) > 1:
1204 1201 ui.status(_('working directory now based on '
1205 1202 'revisions %d and %d\n') % parents)
1206 1203 else:
1207 1204 ui.status(_('working directory now based on '
1208 1205 'revision %d\n') % parents)
1209 1206 mergemod.mergestate.clean(self, self['.'].node())
1210 1207
1211 1208 # TODO: if we know which new heads may result from this rollback, pass
1212 1209 # them to destroy(), which will prevent the branchhead cache from being
1213 1210 # invalidated.
1214 1211 self.destroyed()
1215 1212 return 0
1216 1213
1217 1214 def invalidatecaches(self):
1218 1215
1219 1216 if '_tagscache' in vars(self):
1220 1217 # can't use delattr on proxy
1221 1218 del self.__dict__['_tagscache']
1222 1219
1223 1220 self.unfiltered()._branchcaches.clear()
1224 1221 self.invalidatevolatilesets()
1225 1222
1226 1223 def invalidatevolatilesets(self):
1227 1224 self.filteredrevcache.clear()
1228 1225 obsolete.clearobscaches(self)
1229 1226
1230 1227 def invalidatedirstate(self):
1231 1228 '''Invalidates the dirstate, causing the next call to dirstate
1232 1229 to check if it was modified since the last time it was read,
1233 1230 rereading it if it has.
1234 1231
1235 1232 This is different to dirstate.invalidate() that it doesn't always
1236 1233 rereads the dirstate. Use dirstate.invalidate() if you want to
1237 1234 explicitly read the dirstate again (i.e. restoring it to a previous
1238 1235 known good state).'''
1239 1236 if hasunfilteredcache(self, 'dirstate'):
1240 1237 for k in self.dirstate._filecache:
1241 1238 try:
1242 1239 delattr(self.dirstate, k)
1243 1240 except AttributeError:
1244 1241 pass
1245 1242 delattr(self.unfiltered(), 'dirstate')
1246 1243
1247 1244 def invalidate(self, clearfilecache=False):
1248 1245 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1249 1246 for k in self._filecache.keys():
1250 1247 # dirstate is invalidated separately in invalidatedirstate()
1251 1248 if k == 'dirstate':
1252 1249 continue
1253 1250
1254 1251 if clearfilecache:
1255 1252 del self._filecache[k]
1256 1253 try:
1257 1254 delattr(unfiltered, k)
1258 1255 except AttributeError:
1259 1256 pass
1260 1257 self.invalidatecaches()
1261 1258 self.store.invalidatecaches()
1262 1259
1263 1260 def invalidateall(self):
1264 1261 '''Fully invalidates both store and non-store parts, causing the
1265 1262 subsequent operation to reread any outside changes.'''
1266 1263 # extension should hook this to invalidate its caches
1267 1264 self.invalidate()
1268 1265 self.invalidatedirstate()
1269 1266
1270 1267 def _refreshfilecachestats(self, tr):
1271 1268 """Reload stats of cached files so that they are flagged as valid"""
1272 1269 for k, ce in self._filecache.items():
1273 1270 if k == 'dirstate' or k not in self.__dict__:
1274 1271 continue
1275 1272 ce.refresh()
1276 1273
1277 1274 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1278 1275 inheritchecker=None, parentenvvar=None):
1279 1276 parentlock = None
1280 1277 # the contents of parentenvvar are used by the underlying lock to
1281 1278 # determine whether it can be inherited
1282 1279 if parentenvvar is not None:
1283 1280 parentlock = os.environ.get(parentenvvar)
1284 1281 try:
1285 1282 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1286 1283 acquirefn=acquirefn, desc=desc,
1287 1284 inheritchecker=inheritchecker,
1288 1285 parentlock=parentlock)
1289 1286 except error.LockHeld as inst:
1290 1287 if not wait:
1291 1288 raise
1292 1289 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1293 1290 (desc, inst.locker))
1294 1291 # default to 600 seconds timeout
1295 1292 l = lockmod.lock(vfs, lockname,
1296 1293 int(self.ui.config("ui", "timeout", "600")),
1297 1294 releasefn=releasefn, acquirefn=acquirefn,
1298 1295 desc=desc)
1299 1296 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1300 1297 return l
1301 1298
1302 1299 def _afterlock(self, callback):
1303 1300 """add a callback to be run when the repository is fully unlocked
1304 1301
1305 1302 The callback will be executed when the outermost lock is released
1306 1303 (with wlock being higher level than 'lock')."""
1307 1304 for ref in (self._wlockref, self._lockref):
1308 1305 l = ref and ref()
1309 1306 if l and l.held:
1310 1307 l.postrelease.append(callback)
1311 1308 break
1312 1309 else: # no lock have been found.
1313 1310 callback()
1314 1311
1315 1312 def lock(self, wait=True):
1316 1313 '''Lock the repository store (.hg/store) and return a weak reference
1317 1314 to the lock. Use this before modifying the store (e.g. committing or
1318 1315 stripping). If you are opening a transaction, get a lock as well.)
1319 1316
1320 1317 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1321 1318 'wlock' first to avoid a dead-lock hazard.'''
1322 1319 l = self._currentlock(self._lockref)
1323 1320 if l is not None:
1324 1321 l.lock()
1325 1322 return l
1326 1323
1327 1324 l = self._lock(self.svfs, "lock", wait, None,
1328 1325 self.invalidate, _('repository %s') % self.origroot)
1329 1326 self._lockref = weakref.ref(l)
1330 1327 return l
1331 1328
1332 1329 def _wlockchecktransaction(self):
1333 1330 if self.currenttransaction() is not None:
1334 1331 raise error.LockInheritanceContractViolation(
1335 1332 'wlock cannot be inherited in the middle of a transaction')
1336 1333
1337 1334 def wlock(self, wait=True):
1338 1335 '''Lock the non-store parts of the repository (everything under
1339 1336 .hg except .hg/store) and return a weak reference to the lock.
1340 1337
1341 1338 Use this before modifying files in .hg.
1342 1339
1343 1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1344 1341 'wlock' first to avoid a dead-lock hazard.'''
1345 1342 l = self._wlockref and self._wlockref()
1346 1343 if l is not None and l.held:
1347 1344 l.lock()
1348 1345 return l
1349 1346
1350 1347 # We do not need to check for non-waiting lock acquisition. Such
1351 1348 # acquisition would not cause dead-lock as they would just fail.
1352 1349 if wait and (self.ui.configbool('devel', 'all-warnings')
1353 1350 or self.ui.configbool('devel', 'check-locks')):
1354 1351 if self._currentlock(self._lockref) is not None:
1355 1352 self.ui.develwarn('"wlock" acquired after "lock"')
1356 1353
1357 1354 def unlock():
1358 1355 if self.dirstate.pendingparentchange():
1359 1356 self.dirstate.invalidate()
1360 1357 else:
1361 1358 self.dirstate.write(None)
1362 1359
1363 1360 self._filecache['dirstate'].refresh()
1364 1361
1365 1362 l = self._lock(self.vfs, "wlock", wait, unlock,
1366 1363 self.invalidatedirstate, _('working directory of %s') %
1367 1364 self.origroot,
1368 1365 inheritchecker=self._wlockchecktransaction,
1369 1366 parentenvvar='HG_WLOCK_LOCKER')
1370 1367 self._wlockref = weakref.ref(l)
1371 1368 return l
1372 1369
1373 1370 def _currentlock(self, lockref):
1374 1371 """Returns the lock if it's held, or None if it's not."""
1375 1372 if lockref is None:
1376 1373 return None
1377 1374 l = lockref()
1378 1375 if l is None or not l.held:
1379 1376 return None
1380 1377 return l
1381 1378
1382 1379 def currentwlock(self):
1383 1380 """Returns the wlock if it's held, or None if it's not."""
1384 1381 return self._currentlock(self._wlockref)
1385 1382
1386 1383 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1387 1384 """
1388 1385 commit an individual file as part of a larger transaction
1389 1386 """
1390 1387
1391 1388 fname = fctx.path()
1392 1389 fparent1 = manifest1.get(fname, nullid)
1393 1390 fparent2 = manifest2.get(fname, nullid)
1394 1391 if isinstance(fctx, context.filectx):
1395 1392 node = fctx.filenode()
1396 1393 if node in [fparent1, fparent2]:
1397 1394 self.ui.debug('reusing %s filelog entry\n' % fname)
1398 1395 if manifest1.flags(fname) != fctx.flags():
1399 1396 changelist.append(fname)
1400 1397 return node
1401 1398
1402 1399 flog = self.file(fname)
1403 1400 meta = {}
1404 1401 copy = fctx.renamed()
1405 1402 if copy and copy[0] != fname:
1406 1403 # Mark the new revision of this file as a copy of another
1407 1404 # file. This copy data will effectively act as a parent
1408 1405 # of this new revision. If this is a merge, the first
1409 1406 # parent will be the nullid (meaning "look up the copy data")
1410 1407 # and the second one will be the other parent. For example:
1411 1408 #
1412 1409 # 0 --- 1 --- 3 rev1 changes file foo
1413 1410 # \ / rev2 renames foo to bar and changes it
1414 1411 # \- 2 -/ rev3 should have bar with all changes and
1415 1412 # should record that bar descends from
1416 1413 # bar in rev2 and foo in rev1
1417 1414 #
1418 1415 # this allows this merge to succeed:
1419 1416 #
1420 1417 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1421 1418 # \ / merging rev3 and rev4 should use bar@rev2
1422 1419 # \- 2 --- 4 as the merge base
1423 1420 #
1424 1421
1425 1422 cfname = copy[0]
1426 1423 crev = manifest1.get(cfname)
1427 1424 newfparent = fparent2
1428 1425
1429 1426 if manifest2: # branch merge
1430 1427 if fparent2 == nullid or crev is None: # copied on remote side
1431 1428 if cfname in manifest2:
1432 1429 crev = manifest2[cfname]
1433 1430 newfparent = fparent1
1434 1431
1435 1432 # Here, we used to search backwards through history to try to find
1436 1433 # where the file copy came from if the source of a copy was not in
1437 1434 # the parent directory. However, this doesn't actually make sense to
1438 1435 # do (what does a copy from something not in your working copy even
1439 1436 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1440 1437 # the user that copy information was dropped, so if they didn't
1441 1438 # expect this outcome it can be fixed, but this is the correct
1442 1439 # behavior in this circumstance.
1443 1440
1444 1441 if crev:
1445 1442 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1446 1443 meta["copy"] = cfname
1447 1444 meta["copyrev"] = hex(crev)
1448 1445 fparent1, fparent2 = nullid, newfparent
1449 1446 else:
1450 1447 self.ui.warn(_("warning: can't find ancestor for '%s' "
1451 1448 "copied from '%s'!\n") % (fname, cfname))
1452 1449
1453 1450 elif fparent1 == nullid:
1454 1451 fparent1, fparent2 = fparent2, nullid
1455 1452 elif fparent2 != nullid:
1456 1453 # is one parent an ancestor of the other?
1457 1454 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1458 1455 if fparent1 in fparentancestors:
1459 1456 fparent1, fparent2 = fparent2, nullid
1460 1457 elif fparent2 in fparentancestors:
1461 1458 fparent2 = nullid
1462 1459
1463 1460 # is the file changed?
1464 1461 text = fctx.data()
1465 1462 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1466 1463 changelist.append(fname)
1467 1464 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1468 1465 # are just the flags changed during merge?
1469 1466 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1470 1467 changelist.append(fname)
1471 1468
1472 1469 return fparent1
1473 1470
1474 1471 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1475 1472 """check for commit arguments that aren't commitable"""
1476 1473 if match.isexact() or match.prefix():
1477 1474 matched = set(status.modified + status.added + status.removed)
1478 1475
1479 1476 for f in match.files():
1480 1477 f = self.dirstate.normalize(f)
1481 1478 if f == '.' or f in matched or f in wctx.substate:
1482 1479 continue
1483 1480 if f in status.deleted:
1484 1481 fail(f, _('file not found!'))
1485 1482 if f in vdirs: # visited directory
1486 1483 d = f + '/'
1487 1484 for mf in matched:
1488 1485 if mf.startswith(d):
1489 1486 break
1490 1487 else:
1491 1488 fail(f, _("no match under directory!"))
1492 1489 elif f not in self.dirstate:
1493 1490 fail(f, _("file not tracked!"))
1494 1491
1495 1492 @unfilteredmethod
1496 1493 def commit(self, text="", user=None, date=None, match=None, force=False,
1497 1494 editor=False, extra=None):
1498 1495 """Add a new revision to current repository.
1499 1496
1500 1497 Revision information is gathered from the working directory,
1501 1498 match can be used to filter the committed files. If editor is
1502 1499 supplied, it is called to get a commit message.
1503 1500 """
1504 1501 if extra is None:
1505 1502 extra = {}
1506 1503
1507 1504 def fail(f, msg):
1508 1505 raise error.Abort('%s: %s' % (f, msg))
1509 1506
1510 1507 if not match:
1511 1508 match = matchmod.always(self.root, '')
1512 1509
1513 1510 if not force:
1514 1511 vdirs = []
1515 1512 match.explicitdir = vdirs.append
1516 1513 match.bad = fail
1517 1514
1518 1515 wlock = lock = tr = None
1519 1516 try:
1520 1517 wlock = self.wlock()
1521 1518 lock = self.lock() # for recent changelog (see issue4368)
1522 1519
1523 1520 wctx = self[None]
1524 1521 merge = len(wctx.parents()) > 1
1525 1522
1526 1523 if not force and merge and match.ispartial():
1527 1524 raise error.Abort(_('cannot partially commit a merge '
1528 1525 '(do not specify files or patterns)'))
1529 1526
1530 1527 status = self.status(match=match, clean=force)
1531 1528 if force:
1532 1529 status.modified.extend(status.clean) # mq may commit clean files
1533 1530
1534 1531 # check subrepos
1535 1532 subs = []
1536 1533 commitsubs = set()
1537 1534 newstate = wctx.substate.copy()
1538 1535 # only manage subrepos and .hgsubstate if .hgsub is present
1539 1536 if '.hgsub' in wctx:
1540 1537 # we'll decide whether to track this ourselves, thanks
1541 1538 for c in status.modified, status.added, status.removed:
1542 1539 if '.hgsubstate' in c:
1543 1540 c.remove('.hgsubstate')
1544 1541
1545 1542 # compare current state to last committed state
1546 1543 # build new substate based on last committed state
1547 1544 oldstate = wctx.p1().substate
1548 1545 for s in sorted(newstate.keys()):
1549 1546 if not match(s):
1550 1547 # ignore working copy, use old state if present
1551 1548 if s in oldstate:
1552 1549 newstate[s] = oldstate[s]
1553 1550 continue
1554 1551 if not force:
1555 1552 raise error.Abort(
1556 1553 _("commit with new subrepo %s excluded") % s)
1557 1554 dirtyreason = wctx.sub(s).dirtyreason(True)
1558 1555 if dirtyreason:
1559 1556 if not self.ui.configbool('ui', 'commitsubrepos'):
1560 1557 raise error.Abort(dirtyreason,
1561 1558 hint=_("use --subrepos for recursive commit"))
1562 1559 subs.append(s)
1563 1560 commitsubs.add(s)
1564 1561 else:
1565 1562 bs = wctx.sub(s).basestate()
1566 1563 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1567 1564 if oldstate.get(s, (None, None, None))[1] != bs:
1568 1565 subs.append(s)
1569 1566
1570 1567 # check for removed subrepos
1571 1568 for p in wctx.parents():
1572 1569 r = [s for s in p.substate if s not in newstate]
1573 1570 subs += [s for s in r if match(s)]
1574 1571 if subs:
1575 1572 if (not match('.hgsub') and
1576 1573 '.hgsub' in (wctx.modified() + wctx.added())):
1577 1574 raise error.Abort(
1578 1575 _("can't commit subrepos without .hgsub"))
1579 1576 status.modified.insert(0, '.hgsubstate')
1580 1577
1581 1578 elif '.hgsub' in status.removed:
1582 1579 # clean up .hgsubstate when .hgsub is removed
1583 1580 if ('.hgsubstate' in wctx and
1584 1581 '.hgsubstate' not in (status.modified + status.added +
1585 1582 status.removed)):
1586 1583 status.removed.insert(0, '.hgsubstate')
1587 1584
1588 1585 # make sure all explicit patterns are matched
1589 1586 if not force:
1590 1587 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1591 1588
1592 1589 cctx = context.workingcommitctx(self, status,
1593 1590 text, user, date, extra)
1594 1591
1595 1592 # internal config: ui.allowemptycommit
1596 1593 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1597 1594 or extra.get('close') or merge or cctx.files()
1598 1595 or self.ui.configbool('ui', 'allowemptycommit'))
1599 1596 if not allowemptycommit:
1600 1597 return None
1601 1598
1602 1599 if merge and cctx.deleted():
1603 1600 raise error.Abort(_("cannot commit merge with missing files"))
1604 1601
1605 1602 ms = mergemod.mergestate.read(self)
1606 1603
1607 1604 if list(ms.unresolved()):
1608 1605 raise error.Abort(_('unresolved merge conflicts '
1609 1606 '(see "hg help resolve")'))
1610 1607 if ms.mdstate() != 's' or list(ms.driverresolved()):
1611 1608 raise error.Abort(_('driver-resolved merge conflicts'),
1612 1609 hint=_('run "hg resolve --all" to resolve'))
1613 1610
1614 1611 if editor:
1615 1612 cctx._text = editor(self, cctx, subs)
1616 1613 edited = (text != cctx._text)
1617 1614
1618 1615 # Save commit message in case this transaction gets rolled back
1619 1616 # (e.g. by a pretxncommit hook). Leave the content alone on
1620 1617 # the assumption that the user will use the same editor again.
1621 1618 msgfn = self.savecommitmessage(cctx._text)
1622 1619
1623 1620 # commit subs and write new state
1624 1621 if subs:
1625 1622 for s in sorted(commitsubs):
1626 1623 sub = wctx.sub(s)
1627 1624 self.ui.status(_('committing subrepository %s\n') %
1628 1625 subrepo.subrelpath(sub))
1629 1626 sr = sub.commit(cctx._text, user, date)
1630 1627 newstate[s] = (newstate[s][0], sr)
1631 1628 subrepo.writestate(self, newstate)
1632 1629
1633 1630 p1, p2 = self.dirstate.parents()
1634 1631 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1635 1632 try:
1636 1633 self.hook("precommit", throw=True, parent1=hookp1,
1637 1634 parent2=hookp2)
1638 1635 tr = self.transaction('commit')
1639 1636 ret = self.commitctx(cctx, True)
1640 1637 except: # re-raises
1641 1638 if edited:
1642 1639 self.ui.write(
1643 1640 _('note: commit message saved in %s\n') % msgfn)
1644 1641 raise
1645 1642 # update bookmarks, dirstate and mergestate
1646 1643 bookmarks.update(self, [p1, p2], ret)
1647 1644 cctx.markcommitted(ret)
1648 1645 ms.reset()
1649 1646 tr.close()
1650 1647
1651 1648 finally:
1652 1649 lockmod.release(tr, lock, wlock)
1653 1650
1654 1651 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1655 1652 # hack for command that use a temporary commit (eg: histedit)
1656 1653 # temporary commit got stripped before hook release
1657 1654 if self.changelog.hasnode(ret):
1658 1655 self.hook("commit", node=node, parent1=parent1,
1659 1656 parent2=parent2)
1660 1657 self._afterlock(commithook)
1661 1658 return ret
1662 1659
1663 1660 @unfilteredmethod
1664 1661 def commitctx(self, ctx, error=False):
1665 1662 """Add a new revision to current repository.
1666 1663 Revision information is passed via the context argument.
1667 1664 """
1668 1665
1669 1666 tr = None
1670 1667 p1, p2 = ctx.p1(), ctx.p2()
1671 1668 user = ctx.user()
1672 1669
1673 1670 lock = self.lock()
1674 1671 try:
1675 1672 tr = self.transaction("commit")
1676 1673 trp = weakref.proxy(tr)
1677 1674
1678 1675 if ctx.files():
1679 1676 m1 = p1.manifest()
1680 1677 m2 = p2.manifest()
1681 1678 m = m1.copy()
1682 1679
1683 1680 # check in files
1684 1681 added = []
1685 1682 changed = []
1686 1683 removed = list(ctx.removed())
1687 1684 linkrev = len(self)
1688 1685 self.ui.note(_("committing files:\n"))
1689 1686 for f in sorted(ctx.modified() + ctx.added()):
1690 1687 self.ui.note(f + "\n")
1691 1688 try:
1692 1689 fctx = ctx[f]
1693 1690 if fctx is None:
1694 1691 removed.append(f)
1695 1692 else:
1696 1693 added.append(f)
1697 1694 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1698 1695 trp, changed)
1699 1696 m.setflag(f, fctx.flags())
1700 1697 except OSError as inst:
1701 1698 self.ui.warn(_("trouble committing %s!\n") % f)
1702 1699 raise
1703 1700 except IOError as inst:
1704 1701 errcode = getattr(inst, 'errno', errno.ENOENT)
1705 1702 if error or errcode and errcode != errno.ENOENT:
1706 1703 self.ui.warn(_("trouble committing %s!\n") % f)
1707 1704 raise
1708 1705
1709 1706 # update manifest
1710 1707 self.ui.note(_("committing manifest\n"))
1711 1708 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1712 1709 drop = [f for f in removed if f in m]
1713 1710 for f in drop:
1714 1711 del m[f]
1715 1712 mn = self.manifest.add(m, trp, linkrev,
1716 1713 p1.manifestnode(), p2.manifestnode(),
1717 1714 added, drop)
1718 1715 files = changed + removed
1719 1716 else:
1720 1717 mn = p1.manifestnode()
1721 1718 files = []
1722 1719
1723 1720 # update changelog
1724 1721 self.ui.note(_("committing changelog\n"))
1725 1722 self.changelog.delayupdate(tr)
1726 1723 n = self.changelog.add(mn, files, ctx.description(),
1727 1724 trp, p1.node(), p2.node(),
1728 1725 user, ctx.date(), ctx.extra().copy())
1729 1726 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1730 1727 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1731 1728 parent2=xp2)
1732 1729 # set the new commit is proper phase
1733 1730 targetphase = subrepo.newcommitphase(self.ui, ctx)
1734 1731 if targetphase:
1735 1732 # retract boundary do not alter parent changeset.
1736 1733 # if a parent have higher the resulting phase will
1737 1734 # be compliant anyway
1738 1735 #
1739 1736 # if minimal phase was 0 we don't need to retract anything
1740 1737 phases.retractboundary(self, tr, targetphase, [n])
1741 1738 tr.close()
1742 1739 branchmap.updatecache(self.filtered('served'))
1743 1740 return n
1744 1741 finally:
1745 1742 if tr:
1746 1743 tr.release()
1747 1744 lock.release()
1748 1745
1749 1746 @unfilteredmethod
1750 1747 def destroying(self):
1751 1748 '''Inform the repository that nodes are about to be destroyed.
1752 1749 Intended for use by strip and rollback, so there's a common
1753 1750 place for anything that has to be done before destroying history.
1754 1751
1755 1752 This is mostly useful for saving state that is in memory and waiting
1756 1753 to be flushed when the current lock is released. Because a call to
1757 1754 destroyed is imminent, the repo will be invalidated causing those
1758 1755 changes to stay in memory (waiting for the next unlock), or vanish
1759 1756 completely.
1760 1757 '''
1761 1758 # When using the same lock to commit and strip, the phasecache is left
1762 1759 # dirty after committing. Then when we strip, the repo is invalidated,
1763 1760 # causing those changes to disappear.
1764 1761 if '_phasecache' in vars(self):
1765 1762 self._phasecache.write()
1766 1763
1767 1764 @unfilteredmethod
1768 1765 def destroyed(self):
1769 1766 '''Inform the repository that nodes have been destroyed.
1770 1767 Intended for use by strip and rollback, so there's a common
1771 1768 place for anything that has to be done after destroying history.
1772 1769 '''
1773 1770 # When one tries to:
1774 1771 # 1) destroy nodes thus calling this method (e.g. strip)
1775 1772 # 2) use phasecache somewhere (e.g. commit)
1776 1773 #
1777 1774 # then 2) will fail because the phasecache contains nodes that were
1778 1775 # removed. We can either remove phasecache from the filecache,
1779 1776 # causing it to reload next time it is accessed, or simply filter
1780 1777 # the removed nodes now and write the updated cache.
1781 1778 self._phasecache.filterunknown(self)
1782 1779 self._phasecache.write()
1783 1780
1784 1781 # update the 'served' branch cache to help read only server process
1785 1782 # Thanks to branchcache collaboration this is done from the nearest
1786 1783 # filtered subset and it is expected to be fast.
1787 1784 branchmap.updatecache(self.filtered('served'))
1788 1785
1789 1786 # Ensure the persistent tag cache is updated. Doing it now
1790 1787 # means that the tag cache only has to worry about destroyed
1791 1788 # heads immediately after a strip/rollback. That in turn
1792 1789 # guarantees that "cachetip == currenttip" (comparing both rev
1793 1790 # and node) always means no nodes have been added or destroyed.
1794 1791
1795 1792 # XXX this is suboptimal when qrefresh'ing: we strip the current
1796 1793 # head, refresh the tag cache, then immediately add a new head.
1797 1794 # But I think doing it this way is necessary for the "instant
1798 1795 # tag cache retrieval" case to work.
1799 1796 self.invalidate()
1800 1797
1801 1798 def walk(self, match, node=None):
1802 1799 '''
1803 1800 walk recursively through the directory tree or a given
1804 1801 changeset, finding all files matched by the match
1805 1802 function
1806 1803 '''
1807 1804 return self[node].walk(match)
1808 1805
1809 1806 def status(self, node1='.', node2=None, match=None,
1810 1807 ignored=False, clean=False, unknown=False,
1811 1808 listsubrepos=False):
1812 1809 '''a convenience method that calls node1.status(node2)'''
1813 1810 return self[node1].status(node2, match, ignored, clean, unknown,
1814 1811 listsubrepos)
1815 1812
1816 1813 def heads(self, start=None):
1817 1814 heads = self.changelog.heads(start)
1818 1815 # sort the output in rev descending order
1819 1816 return sorted(heads, key=self.changelog.rev, reverse=True)
1820 1817
1821 1818 def branchheads(self, branch=None, start=None, closed=False):
1822 1819 '''return a (possibly filtered) list of heads for the given branch
1823 1820
1824 1821 Heads are returned in topological order, from newest to oldest.
1825 1822 If branch is None, use the dirstate branch.
1826 1823 If start is not None, return only heads reachable from start.
1827 1824 If closed is True, return heads that are marked as closed as well.
1828 1825 '''
1829 1826 if branch is None:
1830 1827 branch = self[None].branch()
1831 1828 branches = self.branchmap()
1832 1829 if branch not in branches:
1833 1830 return []
1834 1831 # the cache returns heads ordered lowest to highest
1835 1832 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1836 1833 if start is not None:
1837 1834 # filter out the heads that cannot be reached from startrev
1838 1835 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1839 1836 bheads = [h for h in bheads if h in fbheads]
1840 1837 return bheads
1841 1838
1842 1839 def branches(self, nodes):
1843 1840 if not nodes:
1844 1841 nodes = [self.changelog.tip()]
1845 1842 b = []
1846 1843 for n in nodes:
1847 1844 t = n
1848 1845 while True:
1849 1846 p = self.changelog.parents(n)
1850 1847 if p[1] != nullid or p[0] == nullid:
1851 1848 b.append((t, n, p[0], p[1]))
1852 1849 break
1853 1850 n = p[0]
1854 1851 return b
1855 1852
1856 1853 def between(self, pairs):
1857 1854 r = []
1858 1855
1859 1856 for top, bottom in pairs:
1860 1857 n, l, i = top, [], 0
1861 1858 f = 1
1862 1859
1863 1860 while n != bottom and n != nullid:
1864 1861 p = self.changelog.parents(n)[0]
1865 1862 if i == f:
1866 1863 l.append(n)
1867 1864 f = f * 2
1868 1865 n = p
1869 1866 i += 1
1870 1867
1871 1868 r.append(l)
1872 1869
1873 1870 return r
1874 1871
1875 1872 def checkpush(self, pushop):
1876 1873 """Extensions can override this function if additional checks have
1877 1874 to be performed before pushing, or call it if they override push
1878 1875 command.
1879 1876 """
1880 1877 pass
1881 1878
1882 1879 @unfilteredpropertycache
1883 1880 def prepushoutgoinghooks(self):
1884 1881 """Return util.hooks consists of a pushop with repo, remote, outgoing
1885 1882 methods, which are called before pushing changesets.
1886 1883 """
1887 1884 return util.hooks()
1888 1885
1889 1886 def pushkey(self, namespace, key, old, new):
1890 1887 try:
1891 1888 tr = self.currenttransaction()
1892 1889 hookargs = {}
1893 1890 if tr is not None:
1894 1891 hookargs.update(tr.hookargs)
1895 1892 hookargs['namespace'] = namespace
1896 1893 hookargs['key'] = key
1897 1894 hookargs['old'] = old
1898 1895 hookargs['new'] = new
1899 1896 self.hook('prepushkey', throw=True, **hookargs)
1900 1897 except error.HookAbort as exc:
1901 1898 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 1899 if exc.hint:
1903 1900 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 1901 return False
1905 1902 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 1903 ret = pushkey.push(self, namespace, key, old, new)
1907 1904 def runhook():
1908 1905 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 1906 ret=ret)
1910 1907 self._afterlock(runhook)
1911 1908 return ret
1912 1909
1913 1910 def listkeys(self, namespace):
1914 1911 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 1912 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 1913 values = pushkey.list(self, namespace)
1917 1914 self.hook('listkeys', namespace=namespace, values=values)
1918 1915 return values
1919 1916
1920 1917 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 1918 '''used to test argument passing over the wire'''
1922 1919 return "%s %s %s %s %s" % (one, two, three, four, five)
1923 1920
1924 1921 def savecommitmessage(self, text):
1925 1922 fp = self.vfs('last-message.txt', 'wb')
1926 1923 try:
1927 1924 fp.write(text)
1928 1925 finally:
1929 1926 fp.close()
1930 1927 return self.pathto(fp.name[len(self.root) + 1:])
1931 1928
1932 1929 # used to avoid circular references so destructors work
1933 1930 def aftertrans(files):
1934 1931 renamefiles = [tuple(t) for t in files]
1935 1932 def a():
1936 1933 for vfs, src, dest in renamefiles:
1937 1934 try:
1938 1935 vfs.rename(src, dest)
1939 1936 except OSError: # journal file does not yet exist
1940 1937 pass
1941 1938 return a
1942 1939
1943 1940 def undoname(fn):
1944 1941 base, name = os.path.split(fn)
1945 1942 assert name.startswith('journal')
1946 1943 return os.path.join(base, name.replace('journal', 'undo', 1))
1947 1944
1948 1945 def instance(ui, path, create):
1949 1946 return localrepository(ui, util.urllocalpath(path), create)
1950 1947
1951 1948 def islocal(path):
1952 1949 return True
1953 1950
1954 1951 def newreporequirements(repo):
1955 1952 """Determine the set of requirements for a new local repository.
1956 1953
1957 1954 Extensions can wrap this function to specify custom requirements for
1958 1955 new repositories.
1959 1956 """
1960 1957 ui = repo.ui
1961 1958 requirements = set(['revlogv1'])
1962 1959 if ui.configbool('format', 'usestore', True):
1963 1960 requirements.add('store')
1964 1961 if ui.configbool('format', 'usefncache', True):
1965 1962 requirements.add('fncache')
1966 1963 if ui.configbool('format', 'dotencode', True):
1967 1964 requirements.add('dotencode')
1968 1965
1969 1966 if scmutil.gdinitconfig(ui):
1970 1967 requirements.add('generaldelta')
1971 1968 if ui.configbool('experimental', 'treemanifest', False):
1972 1969 requirements.add('treemanifest')
1973 1970 if ui.configbool('experimental', 'manifestv2', False):
1974 1971 requirements.add('manifestv2')
1975 1972
1976 1973 return requirements
General Comments 0
You need to be logged in to leave comments. Login now