##// END OF EJS Templates
tags: deprecated 'repo.tag'...
Pierre-Yves David -
r31672:e6fd7930 default
parent child Browse files
Show More
@@ -1,1983 +1,1984 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'))
118 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'))
252 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'))
254 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 @property
389 389 def wopener(self):
390 390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 391 return self.wvfs
392 392
393 393 @property
394 394 def opener(self):
395 395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 396 return self.vfs
397 397
398 398 def close(self):
399 399 self._writecaches()
400 400
401 401 def _loadextensions(self):
402 402 extensions.loadall(self.ui)
403 403
404 404 def _writecaches(self):
405 405 if self._revbranchcache:
406 406 self._revbranchcache.write()
407 407
408 408 def _restrictcapabilities(self, caps):
409 409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 410 caps = set(caps)
411 411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 413 return caps
414 414
415 415 def _applyopenerreqs(self):
416 416 self.svfs.options = dict((r, 1) for r in self.requirements
417 417 if r in self.openerreqs)
418 418 # experimental config: format.chunkcachesize
419 419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 420 if chunkcachesize is not None:
421 421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 422 # experimental config: format.maxchainlen
423 423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 424 if maxchainlen is not None:
425 425 self.svfs.options['maxchainlen'] = maxchainlen
426 426 # experimental config: format.manifestcachesize
427 427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 428 if manifestcachesize is not None:
429 429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 430 # experimental config: format.aggressivemergedeltas
431 431 aggressivemergedeltas = self.ui.configbool('format',
432 432 'aggressivemergedeltas', False)
433 433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435 435
436 436 for r in self.requirements:
437 437 if r.startswith('exp-compression-'):
438 438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439 439
440 440 def _writerequirements(self):
441 441 scmutil.writerequires(self.vfs, self.requirements)
442 442
443 443 def _checknested(self, path):
444 444 """Determine if path is a legal nested repository."""
445 445 if not path.startswith(self.root):
446 446 return False
447 447 subpath = path[len(self.root) + 1:]
448 448 normsubpath = util.pconvert(subpath)
449 449
450 450 # XXX: Checking against the current working copy is wrong in
451 451 # the sense that it can reject things like
452 452 #
453 453 # $ hg cat -r 10 sub/x.txt
454 454 #
455 455 # if sub/ is no longer a subrepository in the working copy
456 456 # parent revision.
457 457 #
458 458 # However, it can of course also allow things that would have
459 459 # been rejected before, such as the above cat command if sub/
460 460 # is a subrepository now, but was a normal directory before.
461 461 # The old path auditor would have rejected by mistake since it
462 462 # panics when it sees sub/.hg/.
463 463 #
464 464 # All in all, checking against the working copy seems sensible
465 465 # since we want to prevent access to nested repositories on
466 466 # the filesystem *now*.
467 467 ctx = self[None]
468 468 parts = util.splitpath(subpath)
469 469 while parts:
470 470 prefix = '/'.join(parts)
471 471 if prefix in ctx.substate:
472 472 if prefix == normsubpath:
473 473 return True
474 474 else:
475 475 sub = ctx.sub(prefix)
476 476 return sub.checknested(subpath[len(prefix) + 1:])
477 477 else:
478 478 parts.pop()
479 479 return False
480 480
481 481 def peer(self):
482 482 return localpeer(self) # not cached to avoid reference cycle
483 483
484 484 def unfiltered(self):
485 485 """Return unfiltered version of the repository
486 486
487 487 Intended to be overwritten by filtered repo."""
488 488 return self
489 489
490 490 def filtered(self, name):
491 491 """Return a filtered version of a repository"""
492 492 # build a new class with the mixin and the current class
493 493 # (possibly subclass of the repo)
494 494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 495 pass
496 496 return filteredrepo(self, name)
497 497
498 498 @repofilecache('bookmarks', 'bookmarks.current')
499 499 def _bookmarks(self):
500 500 return bookmarks.bmstore(self)
501 501
502 502 @property
503 503 def _activebookmark(self):
504 504 return self._bookmarks.active
505 505
506 506 def bookmarkheads(self, bookmark):
507 507 name = bookmark.split('@', 1)[0]
508 508 heads = []
509 509 for mark, n in self._bookmarks.iteritems():
510 510 if mark.split('@', 1)[0] == name:
511 511 heads.append(n)
512 512 return heads
513 513
514 514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 516 # can't be easily expressed in filecache mechanism.
517 517 @storecache('phaseroots', '00changelog.i')
518 518 def _phasecache(self):
519 519 return phases.phasecache(self, self._phasedefaults)
520 520
521 521 @storecache('obsstore')
522 522 def obsstore(self):
523 523 # read default format for new obsstore.
524 524 # developer config: format.obsstore-version
525 525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 526 # rely on obsstore class default when possible.
527 527 kwargs = {}
528 528 if defaultformat is not None:
529 529 kwargs['defaultformat'] = defaultformat
530 530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 532 **kwargs)
533 533 if store and readonly:
534 534 self.ui.warn(
535 535 _('obsolete feature not enabled but %i markers found!\n')
536 536 % len(list(store)))
537 537 return store
538 538
539 539 @storecache('00changelog.i')
540 540 def changelog(self):
541 541 c = changelog.changelog(self.svfs)
542 542 if txnutil.mayhavepending(self.root):
543 543 c.readpending('00changelog.i.a')
544 544 return c
545 545
546 546 def _constructmanifest(self):
547 547 # This is a temporary function while we migrate from manifest to
548 548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 549 # manifest creation.
550 550 return manifest.manifestrevlog(self.svfs)
551 551
552 552 @storecache('00manifest.i')
553 553 def manifestlog(self):
554 554 return manifest.manifestlog(self.svfs, self)
555 555
556 556 @repofilecache('dirstate')
557 557 def dirstate(self):
558 558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 559 self._dirstatevalidate)
560 560
561 561 def _dirstatevalidate(self, node):
562 562 try:
563 563 self.changelog.rev(node)
564 564 return node
565 565 except error.LookupError:
566 566 if not self._dirstatevalidatewarned:
567 567 self._dirstatevalidatewarned = True
568 568 self.ui.warn(_("warning: ignoring unknown"
569 569 " working parent %s!\n") % short(node))
570 570 return nullid
571 571
572 572 def __getitem__(self, changeid):
573 573 if changeid is None or changeid == wdirrev:
574 574 return context.workingctx(self)
575 575 if isinstance(changeid, slice):
576 576 return [context.changectx(self, i)
577 577 for i in xrange(*changeid.indices(len(self)))
578 578 if i not in self.changelog.filteredrevs]
579 579 return context.changectx(self, changeid)
580 580
581 581 def __contains__(self, changeid):
582 582 try:
583 583 self[changeid]
584 584 return True
585 585 except error.RepoLookupError:
586 586 return False
587 587
588 588 def __nonzero__(self):
589 589 return True
590 590
591 591 __bool__ = __nonzero__
592 592
593 593 def __len__(self):
594 594 return len(self.changelog)
595 595
596 596 def __iter__(self):
597 597 return iter(self.changelog)
598 598
599 599 def revs(self, expr, *args):
600 600 '''Find revisions matching a revset.
601 601
602 602 The revset is specified as a string ``expr`` that may contain
603 603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 604
605 605 Revset aliases from the configuration are not expanded. To expand
606 606 user aliases, consider calling ``scmutil.revrange()`` or
607 607 ``repo.anyrevs([expr], user=True)``.
608 608
609 609 Returns a revset.abstractsmartset, which is a list-like interface
610 610 that contains integer revisions.
611 611 '''
612 612 expr = revsetlang.formatspec(expr, *args)
613 613 m = revset.match(None, expr)
614 614 return m(self)
615 615
616 616 def set(self, expr, *args):
617 617 '''Find revisions matching a revset and emit changectx instances.
618 618
619 619 This is a convenience wrapper around ``revs()`` that iterates the
620 620 result and is a generator of changectx instances.
621 621
622 622 Revset aliases from the configuration are not expanded. To expand
623 623 user aliases, consider calling ``scmutil.revrange()``.
624 624 '''
625 625 for r in self.revs(expr, *args):
626 626 yield self[r]
627 627
628 628 def anyrevs(self, specs, user=False):
629 629 '''Find revisions matching one of the given revsets.
630 630
631 631 Revset aliases from the configuration are not expanded by default. To
632 632 expand user aliases, specify ``user=True``.
633 633 '''
634 634 if user:
635 635 m = revset.matchany(self.ui, specs, repo=self)
636 636 else:
637 637 m = revset.matchany(None, specs)
638 638 return m(self)
639 639
640 640 def url(self):
641 641 return 'file:' + self.root
642 642
643 643 def hook(self, name, throw=False, **args):
644 644 """Call a hook, passing this repo instance.
645 645
646 646 This a convenience method to aid invoking hooks. Extensions likely
647 647 won't call this unless they have registered a custom hook or are
648 648 replacing code that is expected to call a hook.
649 649 """
650 650 return hook.hook(self.ui, self, name, throw, **args)
651 651
652 652 def tag(self, names, node, message, local, user, date, editor=False):
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
653 654 tagsmod.tag(self, names, node, message, local, user, date,
654 655 editor=editor)
655 656
656 657 @filteredpropertycache
657 658 def _tagscache(self):
658 659 '''Returns a tagscache object that contains various tags related
659 660 caches.'''
660 661
661 662 # This simplifies its cache management by having one decorated
662 663 # function (this one) and the rest simply fetch things from it.
663 664 class tagscache(object):
664 665 def __init__(self):
665 666 # These two define the set of tags for this repository. tags
666 667 # maps tag name to node; tagtypes maps tag name to 'global' or
667 668 # 'local'. (Global tags are defined by .hgtags across all
668 669 # heads, and local tags are defined in .hg/localtags.)
669 670 # They constitute the in-memory cache of tags.
670 671 self.tags = self.tagtypes = None
671 672
672 673 self.nodetagscache = self.tagslist = None
673 674
674 675 cache = tagscache()
675 676 cache.tags, cache.tagtypes = self._findtags()
676 677
677 678 return cache
678 679
679 680 def tags(self):
680 681 '''return a mapping of tag to node'''
681 682 t = {}
682 683 if self.changelog.filteredrevs:
683 684 tags, tt = self._findtags()
684 685 else:
685 686 tags = self._tagscache.tags
686 687 for k, v in tags.iteritems():
687 688 try:
688 689 # ignore tags to unknown nodes
689 690 self.changelog.rev(v)
690 691 t[k] = v
691 692 except (error.LookupError, ValueError):
692 693 pass
693 694 return t
694 695
695 696 def _findtags(self):
696 697 '''Do the hard work of finding tags. Return a pair of dicts
697 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 699 maps tag name to a string like \'global\' or \'local\'.
699 700 Subclasses or extensions are free to add their own tags, but
700 701 should be aware that the returned dicts will be retained for the
701 702 duration of the localrepo object.'''
702 703
703 704 # XXX what tagtype should subclasses/extensions use? Currently
704 705 # mq and bookmarks add tags, but do not set the tagtype at all.
705 706 # Should each extension invent its own tag type? Should there
706 707 # be one tagtype for all such "virtual" tags? Or is the status
707 708 # quo fine?
708 709
709 710 alltags = {} # map tag name to (node, hist)
710 711 tagtypes = {}
711 712
712 713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
713 714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714 715
715 716 # Build the return dicts. Have to re-encode tag names because
716 717 # the tags module always uses UTF-8 (in order not to lose info
717 718 # writing to the cache), but the rest of Mercurial wants them in
718 719 # local encoding.
719 720 tags = {}
720 721 for (name, (node, hist)) in alltags.iteritems():
721 722 if node != nullid:
722 723 tags[encoding.tolocal(name)] = node
723 724 tags['tip'] = self.changelog.tip()
724 725 tagtypes = dict([(encoding.tolocal(name), value)
725 726 for (name, value) in tagtypes.iteritems()])
726 727 return (tags, tagtypes)
727 728
728 729 def tagtype(self, tagname):
729 730 '''
730 731 return the type of the given tag. result can be:
731 732
732 733 'local' : a local tag
733 734 'global' : a global tag
734 735 None : tag does not exist
735 736 '''
736 737
737 738 return self._tagscache.tagtypes.get(tagname)
738 739
739 740 def tagslist(self):
740 741 '''return a list of tags ordered by revision'''
741 742 if not self._tagscache.tagslist:
742 743 l = []
743 744 for t, n in self.tags().iteritems():
744 745 l.append((self.changelog.rev(n), t, n))
745 746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746 747
747 748 return self._tagscache.tagslist
748 749
749 750 def nodetags(self, node):
750 751 '''return the tags associated with a node'''
751 752 if not self._tagscache.nodetagscache:
752 753 nodetagscache = {}
753 754 for t, n in self._tagscache.tags.iteritems():
754 755 nodetagscache.setdefault(n, []).append(t)
755 756 for tags in nodetagscache.itervalues():
756 757 tags.sort()
757 758 self._tagscache.nodetagscache = nodetagscache
758 759 return self._tagscache.nodetagscache.get(node, [])
759 760
760 761 def nodebookmarks(self, node):
761 762 """return the list of bookmarks pointing to the specified node"""
762 763 marks = []
763 764 for bookmark, n in self._bookmarks.iteritems():
764 765 if n == node:
765 766 marks.append(bookmark)
766 767 return sorted(marks)
767 768
768 769 def branchmap(self):
769 770 '''returns a dictionary {branch: [branchheads]} with branchheads
770 771 ordered by increasing revision number'''
771 772 branchmap.updatecache(self)
772 773 return self._branchcaches[self.filtername]
773 774
774 775 @unfilteredmethod
775 776 def revbranchcache(self):
776 777 if not self._revbranchcache:
777 778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 779 return self._revbranchcache
779 780
780 781 def branchtip(self, branch, ignoremissing=False):
781 782 '''return the tip node for a given branch
782 783
783 784 If ignoremissing is True, then this method will not raise an error.
784 785 This is helpful for callers that only expect None for a missing branch
785 786 (e.g. namespace).
786 787
787 788 '''
788 789 try:
789 790 return self.branchmap().branchtip(branch)
790 791 except KeyError:
791 792 if not ignoremissing:
792 793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 794 else:
794 795 pass
795 796
796 797 def lookup(self, key):
797 798 return self[key].node()
798 799
799 800 def lookupbranch(self, key, remote=None):
800 801 repo = remote or self
801 802 if key in repo.branchmap():
802 803 return key
803 804
804 805 repo = (remote and remote.local()) and remote or self
805 806 return repo[key].branch()
806 807
807 808 def known(self, nodes):
808 809 cl = self.changelog
809 810 nm = cl.nodemap
810 811 filtered = cl.filteredrevs
811 812 result = []
812 813 for n in nodes:
813 814 r = nm.get(n)
814 815 resp = not (r is None or r in filtered)
815 816 result.append(resp)
816 817 return result
817 818
818 819 def local(self):
819 820 return self
820 821
821 822 def publishing(self):
822 823 # it's safe (and desirable) to trust the publish flag unconditionally
823 824 # so that we don't finalize changes shared between users via ssh or nfs
824 825 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825 826
826 827 def cancopy(self):
827 828 # so statichttprepo's override of local() works
828 829 if not self.local():
829 830 return False
830 831 if not self.publishing():
831 832 return True
832 833 # if publishing we can't copy if there is filtered content
833 834 return not self.filtered('visible').changelog.filteredrevs
834 835
835 836 def shared(self):
836 837 '''the type of shared repository (None if not shared)'''
837 838 if self.sharedpath != self.path:
838 839 return 'store'
839 840 return None
840 841
841 842 def join(self, f, *insidef):
842 843 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
843 844 return self.vfs.join(os.path.join(f, *insidef))
844 845
845 846 def wjoin(self, f, *insidef):
846 847 return self.vfs.reljoin(self.root, f, *insidef)
847 848
848 849 def file(self, f):
849 850 if f[0] == '/':
850 851 f = f[1:]
851 852 return filelog.filelog(self.svfs, f)
852 853
853 854 def changectx(self, changeid):
854 855 return self[changeid]
855 856
856 857 def setparents(self, p1, p2=nullid):
857 858 self.dirstate.beginparentchange()
858 859 copies = self.dirstate.setparents(p1, p2)
859 860 pctx = self[p1]
860 861 if copies:
861 862 # Adjust copy records, the dirstate cannot do it, it
862 863 # requires access to parents manifests. Preserve them
863 864 # only for entries added to first parent.
864 865 for f in copies:
865 866 if f not in pctx and copies[f] in pctx:
866 867 self.dirstate.copy(copies[f], f)
867 868 if p2 == nullid:
868 869 for f, s in sorted(self.dirstate.copies().items()):
869 870 if f not in pctx and s not in pctx:
870 871 self.dirstate.copy(None, f)
871 872 self.dirstate.endparentchange()
872 873
873 874 def filectx(self, path, changeid=None, fileid=None):
874 875 """changeid can be a changeset revision, node, or tag.
875 876 fileid can be a file revision or node."""
876 877 return context.filectx(self, path, changeid, fileid)
877 878
878 879 def getcwd(self):
879 880 return self.dirstate.getcwd()
880 881
881 882 def pathto(self, f, cwd=None):
882 883 return self.dirstate.pathto(f, cwd)
883 884
884 885 def wfile(self, f, mode='r'):
885 886 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
886 887 return self.wvfs(f, mode)
887 888
888 889 def _link(self, f):
889 890 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
890 891 '4.0')
891 892 return self.wvfs.islink(f)
892 893
893 894 def _loadfilter(self, filter):
894 895 if filter not in self.filterpats:
895 896 l = []
896 897 for pat, cmd in self.ui.configitems(filter):
897 898 if cmd == '!':
898 899 continue
899 900 mf = matchmod.match(self.root, '', [pat])
900 901 fn = None
901 902 params = cmd
902 903 for name, filterfn in self._datafilters.iteritems():
903 904 if cmd.startswith(name):
904 905 fn = filterfn
905 906 params = cmd[len(name):].lstrip()
906 907 break
907 908 if not fn:
908 909 fn = lambda s, c, **kwargs: util.filter(s, c)
909 910 # Wrap old filters not supporting keyword arguments
910 911 if not inspect.getargspec(fn)[2]:
911 912 oldfn = fn
912 913 fn = lambda s, c, **kwargs: oldfn(s, c)
913 914 l.append((mf, fn, params))
914 915 self.filterpats[filter] = l
915 916 return self.filterpats[filter]
916 917
917 918 def _filter(self, filterpats, filename, data):
918 919 for mf, fn, cmd in filterpats:
919 920 if mf(filename):
920 921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
921 922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
922 923 break
923 924
924 925 return data
925 926
926 927 @unfilteredpropertycache
927 928 def _encodefilterpats(self):
928 929 return self._loadfilter('encode')
929 930
930 931 @unfilteredpropertycache
931 932 def _decodefilterpats(self):
932 933 return self._loadfilter('decode')
933 934
934 935 def adddatafilter(self, name, filter):
935 936 self._datafilters[name] = filter
936 937
937 938 def wread(self, filename):
938 939 if self.wvfs.islink(filename):
939 940 data = self.wvfs.readlink(filename)
940 941 else:
941 942 data = self.wvfs.read(filename)
942 943 return self._filter(self._encodefilterpats, filename, data)
943 944
944 945 def wwrite(self, filename, data, flags, backgroundclose=False):
945 946 """write ``data`` into ``filename`` in the working directory
946 947
947 948 This returns length of written (maybe decoded) data.
948 949 """
949 950 data = self._filter(self._decodefilterpats, filename, data)
950 951 if 'l' in flags:
951 952 self.wvfs.symlink(data, filename)
952 953 else:
953 954 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
954 955 if 'x' in flags:
955 956 self.wvfs.setflags(filename, False, True)
956 957 return len(data)
957 958
958 959 def wwritedata(self, filename, data):
959 960 return self._filter(self._decodefilterpats, filename, data)
960 961
961 962 def currenttransaction(self):
962 963 """return the current transaction or None if non exists"""
963 964 if self._transref:
964 965 tr = self._transref()
965 966 else:
966 967 tr = None
967 968
968 969 if tr and tr.running():
969 970 return tr
970 971 return None
971 972
972 973 def transaction(self, desc, report=None):
973 974 if (self.ui.configbool('devel', 'all-warnings')
974 975 or self.ui.configbool('devel', 'check-locks')):
975 976 if self._currentlock(self._lockref) is None:
976 977 raise error.ProgrammingError('transaction requires locking')
977 978 tr = self.currenttransaction()
978 979 if tr is not None:
979 980 return tr.nest()
980 981
981 982 # abort here if the journal already exists
982 983 if self.svfs.exists("journal"):
983 984 raise error.RepoError(
984 985 _("abandoned transaction found"),
985 986 hint=_("run 'hg recover' to clean up transaction"))
986 987
987 988 idbase = "%.40f#%f" % (random.random(), time.time())
988 989 ha = hex(hashlib.sha1(idbase).digest())
989 990 txnid = 'TXN:' + ha
990 991 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
991 992
992 993 self._writejournal(desc)
993 994 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
994 995 if report:
995 996 rp = report
996 997 else:
997 998 rp = self.ui.warn
998 999 vfsmap = {'plain': self.vfs} # root of .hg/
999 1000 # we must avoid cyclic reference between repo and transaction.
1000 1001 reporef = weakref.ref(self)
1001 1002 def validate(tr):
1002 1003 """will run pre-closing hooks"""
1003 1004 reporef().hook('pretxnclose', throw=True,
1004 1005 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1005 1006 def releasefn(tr, success):
1006 1007 repo = reporef()
1007 1008 if success:
1008 1009 # this should be explicitly invoked here, because
1009 1010 # in-memory changes aren't written out at closing
1010 1011 # transaction, if tr.addfilegenerator (via
1011 1012 # dirstate.write or so) isn't invoked while
1012 1013 # transaction running
1013 1014 repo.dirstate.write(None)
1014 1015 else:
1015 1016 # discard all changes (including ones already written
1016 1017 # out) in this transaction
1017 1018 repo.dirstate.restorebackup(None, prefix='journal.')
1018 1019
1019 1020 repo.invalidate(clearfilecache=True)
1020 1021
1021 1022 tr = transaction.transaction(rp, self.svfs, vfsmap,
1022 1023 "journal",
1023 1024 "undo",
1024 1025 aftertrans(renames),
1025 1026 self.store.createmode,
1026 1027 validator=validate,
1027 1028 releasefn=releasefn)
1028 1029
1029 1030 tr.hookargs['txnid'] = txnid
1030 1031 # note: writing the fncache only during finalize mean that the file is
1031 1032 # outdated when running hooks. As fncache is used for streaming clone,
1032 1033 # this is not expected to break anything that happen during the hooks.
1033 1034 tr.addfinalize('flush-fncache', self.store.write)
1034 1035 def txnclosehook(tr2):
1035 1036 """To be run if transaction is successful, will schedule a hook run
1036 1037 """
1037 1038 # Don't reference tr2 in hook() so we don't hold a reference.
1038 1039 # This reduces memory consumption when there are multiple
1039 1040 # transactions per lock. This can likely go away if issue5045
1040 1041 # fixes the function accumulation.
1041 1042 hookargs = tr2.hookargs
1042 1043
1043 1044 def hook():
1044 1045 reporef().hook('txnclose', throw=False, txnname=desc,
1045 1046 **pycompat.strkwargs(hookargs))
1046 1047 reporef()._afterlock(hook)
1047 1048 tr.addfinalize('txnclose-hook', txnclosehook)
1048 1049 def txnaborthook(tr2):
1049 1050 """To be run if transaction is aborted
1050 1051 """
1051 1052 reporef().hook('txnabort', throw=False, txnname=desc,
1052 1053 **tr2.hookargs)
1053 1054 tr.addabort('txnabort-hook', txnaborthook)
1054 1055 # avoid eager cache invalidation. in-memory data should be identical
1055 1056 # to stored data if transaction has no error.
1056 1057 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 1058 self._transref = weakref.ref(tr)
1058 1059 return tr
1059 1060
1060 1061 def _journalfiles(self):
1061 1062 return ((self.svfs, 'journal'),
1062 1063 (self.vfs, 'journal.dirstate'),
1063 1064 (self.vfs, 'journal.branch'),
1064 1065 (self.vfs, 'journal.desc'),
1065 1066 (self.vfs, 'journal.bookmarks'),
1066 1067 (self.svfs, 'journal.phaseroots'))
1067 1068
1068 1069 def undofiles(self):
1069 1070 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070 1071
1071 1072 def _writejournal(self, desc):
1072 1073 self.dirstate.savebackup(None, prefix='journal.')
1073 1074 self.vfs.write("journal.branch",
1074 1075 encoding.fromlocal(self.dirstate.branch()))
1075 1076 self.vfs.write("journal.desc",
1076 1077 "%d\n%s\n" % (len(self), desc))
1077 1078 self.vfs.write("journal.bookmarks",
1078 1079 self.vfs.tryread("bookmarks"))
1079 1080 self.svfs.write("journal.phaseroots",
1080 1081 self.svfs.tryread("phaseroots"))
1081 1082
1082 1083 def recover(self):
1083 1084 with self.lock():
1084 1085 if self.svfs.exists("journal"):
1085 1086 self.ui.status(_("rolling back interrupted transaction\n"))
1086 1087 vfsmap = {'': self.svfs,
1087 1088 'plain': self.vfs,}
1088 1089 transaction.rollback(self.svfs, vfsmap, "journal",
1089 1090 self.ui.warn)
1090 1091 self.invalidate()
1091 1092 return True
1092 1093 else:
1093 1094 self.ui.warn(_("no interrupted transaction available\n"))
1094 1095 return False
1095 1096
1096 1097 def rollback(self, dryrun=False, force=False):
1097 1098 wlock = lock = dsguard = None
1098 1099 try:
1099 1100 wlock = self.wlock()
1100 1101 lock = self.lock()
1101 1102 if self.svfs.exists("undo"):
1102 1103 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1103 1104
1104 1105 return self._rollback(dryrun, force, dsguard)
1105 1106 else:
1106 1107 self.ui.warn(_("no rollback information available\n"))
1107 1108 return 1
1108 1109 finally:
1109 1110 release(dsguard, lock, wlock)
1110 1111
1111 1112 @unfilteredmethod # Until we get smarter cache management
1112 1113 def _rollback(self, dryrun, force, dsguard):
1113 1114 ui = self.ui
1114 1115 try:
1115 1116 args = self.vfs.read('undo.desc').splitlines()
1116 1117 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1117 1118 if len(args) >= 3:
1118 1119 detail = args[2]
1119 1120 oldtip = oldlen - 1
1120 1121
1121 1122 if detail and ui.verbose:
1122 1123 msg = (_('repository tip rolled back to revision %s'
1123 1124 ' (undo %s: %s)\n')
1124 1125 % (oldtip, desc, detail))
1125 1126 else:
1126 1127 msg = (_('repository tip rolled back to revision %s'
1127 1128 ' (undo %s)\n')
1128 1129 % (oldtip, desc))
1129 1130 except IOError:
1130 1131 msg = _('rolling back unknown transaction\n')
1131 1132 desc = None
1132 1133
1133 1134 if not force and self['.'] != self['tip'] and desc == 'commit':
1134 1135 raise error.Abort(
1135 1136 _('rollback of last commit while not checked out '
1136 1137 'may lose data'), hint=_('use -f to force'))
1137 1138
1138 1139 ui.status(msg)
1139 1140 if dryrun:
1140 1141 return 0
1141 1142
1142 1143 parents = self.dirstate.parents()
1143 1144 self.destroying()
1144 1145 vfsmap = {'plain': self.vfs, '': self.svfs}
1145 1146 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1146 1147 if self.vfs.exists('undo.bookmarks'):
1147 1148 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1148 1149 if self.svfs.exists('undo.phaseroots'):
1149 1150 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1150 1151 self.invalidate()
1151 1152
1152 1153 parentgone = (parents[0] not in self.changelog.nodemap or
1153 1154 parents[1] not in self.changelog.nodemap)
1154 1155 if parentgone:
1155 1156 # prevent dirstateguard from overwriting already restored one
1156 1157 dsguard.close()
1157 1158
1158 1159 self.dirstate.restorebackup(None, prefix='undo.')
1159 1160 try:
1160 1161 branch = self.vfs.read('undo.branch')
1161 1162 self.dirstate.setbranch(encoding.tolocal(branch))
1162 1163 except IOError:
1163 1164 ui.warn(_('named branch could not be reset: '
1164 1165 'current branch is still \'%s\'\n')
1165 1166 % self.dirstate.branch())
1166 1167
1167 1168 parents = tuple([p.rev() for p in self[None].parents()])
1168 1169 if len(parents) > 1:
1169 1170 ui.status(_('working directory now based on '
1170 1171 'revisions %d and %d\n') % parents)
1171 1172 else:
1172 1173 ui.status(_('working directory now based on '
1173 1174 'revision %d\n') % parents)
1174 1175 mergemod.mergestate.clean(self, self['.'].node())
1175 1176
1176 1177 # TODO: if we know which new heads may result from this rollback, pass
1177 1178 # them to destroy(), which will prevent the branchhead cache from being
1178 1179 # invalidated.
1179 1180 self.destroyed()
1180 1181 return 0
1181 1182
1182 1183 def invalidatecaches(self):
1183 1184
1184 1185 if '_tagscache' in vars(self):
1185 1186 # can't use delattr on proxy
1186 1187 del self.__dict__['_tagscache']
1187 1188
1188 1189 self.unfiltered()._branchcaches.clear()
1189 1190 self.invalidatevolatilesets()
1190 1191
1191 1192 def invalidatevolatilesets(self):
1192 1193 self.filteredrevcache.clear()
1193 1194 obsolete.clearobscaches(self)
1194 1195
1195 1196 def invalidatedirstate(self):
1196 1197 '''Invalidates the dirstate, causing the next call to dirstate
1197 1198 to check if it was modified since the last time it was read,
1198 1199 rereading it if it has.
1199 1200
1200 1201 This is different to dirstate.invalidate() that it doesn't always
1201 1202 rereads the dirstate. Use dirstate.invalidate() if you want to
1202 1203 explicitly read the dirstate again (i.e. restoring it to a previous
1203 1204 known good state).'''
1204 1205 if hasunfilteredcache(self, 'dirstate'):
1205 1206 for k in self.dirstate._filecache:
1206 1207 try:
1207 1208 delattr(self.dirstate, k)
1208 1209 except AttributeError:
1209 1210 pass
1210 1211 delattr(self.unfiltered(), 'dirstate')
1211 1212
1212 1213 def invalidate(self, clearfilecache=False):
1213 1214 '''Invalidates both store and non-store parts other than dirstate
1214 1215
1215 1216 If a transaction is running, invalidation of store is omitted,
1216 1217 because discarding in-memory changes might cause inconsistency
1217 1218 (e.g. incomplete fncache causes unintentional failure, but
1218 1219 redundant one doesn't).
1219 1220 '''
1220 1221 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1221 1222 for k in list(self._filecache.keys()):
1222 1223 # dirstate is invalidated separately in invalidatedirstate()
1223 1224 if k == 'dirstate':
1224 1225 continue
1225 1226
1226 1227 if clearfilecache:
1227 1228 del self._filecache[k]
1228 1229 try:
1229 1230 delattr(unfiltered, k)
1230 1231 except AttributeError:
1231 1232 pass
1232 1233 self.invalidatecaches()
1233 1234 if not self.currenttransaction():
1234 1235 # TODO: Changing contents of store outside transaction
1235 1236 # causes inconsistency. We should make in-memory store
1236 1237 # changes detectable, and abort if changed.
1237 1238 self.store.invalidatecaches()
1238 1239
1239 1240 def invalidateall(self):
1240 1241 '''Fully invalidates both store and non-store parts, causing the
1241 1242 subsequent operation to reread any outside changes.'''
1242 1243 # extension should hook this to invalidate its caches
1243 1244 self.invalidate()
1244 1245 self.invalidatedirstate()
1245 1246
1246 1247 @unfilteredmethod
1247 1248 def _refreshfilecachestats(self, tr):
1248 1249 """Reload stats of cached files so that they are flagged as valid"""
1249 1250 for k, ce in self._filecache.items():
1250 1251 if k == 'dirstate' or k not in self.__dict__:
1251 1252 continue
1252 1253 ce.refresh()
1253 1254
1254 1255 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1255 1256 inheritchecker=None, parentenvvar=None):
1256 1257 parentlock = None
1257 1258 # the contents of parentenvvar are used by the underlying lock to
1258 1259 # determine whether it can be inherited
1259 1260 if parentenvvar is not None:
1260 1261 parentlock = encoding.environ.get(parentenvvar)
1261 1262 try:
1262 1263 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1263 1264 acquirefn=acquirefn, desc=desc,
1264 1265 inheritchecker=inheritchecker,
1265 1266 parentlock=parentlock)
1266 1267 except error.LockHeld as inst:
1267 1268 if not wait:
1268 1269 raise
1269 1270 # show more details for new-style locks
1270 1271 if ':' in inst.locker:
1271 1272 host, pid = inst.locker.split(":", 1)
1272 1273 self.ui.warn(
1273 1274 _("waiting for lock on %s held by process %r "
1274 1275 "on host %r\n") % (desc, pid, host))
1275 1276 else:
1276 1277 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1277 1278 (desc, inst.locker))
1278 1279 # default to 600 seconds timeout
1279 1280 l = lockmod.lock(vfs, lockname,
1280 1281 int(self.ui.config("ui", "timeout", "600")),
1281 1282 releasefn=releasefn, acquirefn=acquirefn,
1282 1283 desc=desc)
1283 1284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1284 1285 return l
1285 1286
1286 1287 def _afterlock(self, callback):
1287 1288 """add a callback to be run when the repository is fully unlocked
1288 1289
1289 1290 The callback will be executed when the outermost lock is released
1290 1291 (with wlock being higher level than 'lock')."""
1291 1292 for ref in (self._wlockref, self._lockref):
1292 1293 l = ref and ref()
1293 1294 if l and l.held:
1294 1295 l.postrelease.append(callback)
1295 1296 break
1296 1297 else: # no lock have been found.
1297 1298 callback()
1298 1299
1299 1300 def lock(self, wait=True):
1300 1301 '''Lock the repository store (.hg/store) and return a weak reference
1301 1302 to the lock. Use this before modifying the store (e.g. committing or
1302 1303 stripping). If you are opening a transaction, get a lock as well.)
1303 1304
1304 1305 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1305 1306 'wlock' first to avoid a dead-lock hazard.'''
1306 1307 l = self._currentlock(self._lockref)
1307 1308 if l is not None:
1308 1309 l.lock()
1309 1310 return l
1310 1311
1311 1312 l = self._lock(self.svfs, "lock", wait, None,
1312 1313 self.invalidate, _('repository %s') % self.origroot)
1313 1314 self._lockref = weakref.ref(l)
1314 1315 return l
1315 1316
1316 1317 def _wlockchecktransaction(self):
1317 1318 if self.currenttransaction() is not None:
1318 1319 raise error.LockInheritanceContractViolation(
1319 1320 'wlock cannot be inherited in the middle of a transaction')
1320 1321
1321 1322 def wlock(self, wait=True):
1322 1323 '''Lock the non-store parts of the repository (everything under
1323 1324 .hg except .hg/store) and return a weak reference to the lock.
1324 1325
1325 1326 Use this before modifying files in .hg.
1326 1327
1327 1328 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 1329 'wlock' first to avoid a dead-lock hazard.'''
1329 1330 l = self._wlockref and self._wlockref()
1330 1331 if l is not None and l.held:
1331 1332 l.lock()
1332 1333 return l
1333 1334
1334 1335 # We do not need to check for non-waiting lock acquisition. Such
1335 1336 # acquisition would not cause dead-lock as they would just fail.
1336 1337 if wait and (self.ui.configbool('devel', 'all-warnings')
1337 1338 or self.ui.configbool('devel', 'check-locks')):
1338 1339 if self._currentlock(self._lockref) is not None:
1339 1340 self.ui.develwarn('"wlock" acquired after "lock"')
1340 1341
1341 1342 def unlock():
1342 1343 if self.dirstate.pendingparentchange():
1343 1344 self.dirstate.invalidate()
1344 1345 else:
1345 1346 self.dirstate.write(None)
1346 1347
1347 1348 self._filecache['dirstate'].refresh()
1348 1349
1349 1350 l = self._lock(self.vfs, "wlock", wait, unlock,
1350 1351 self.invalidatedirstate, _('working directory of %s') %
1351 1352 self.origroot,
1352 1353 inheritchecker=self._wlockchecktransaction,
1353 1354 parentenvvar='HG_WLOCK_LOCKER')
1354 1355 self._wlockref = weakref.ref(l)
1355 1356 return l
1356 1357
1357 1358 def _currentlock(self, lockref):
1358 1359 """Returns the lock if it's held, or None if it's not."""
1359 1360 if lockref is None:
1360 1361 return None
1361 1362 l = lockref()
1362 1363 if l is None or not l.held:
1363 1364 return None
1364 1365 return l
1365 1366
1366 1367 def currentwlock(self):
1367 1368 """Returns the wlock if it's held, or None if it's not."""
1368 1369 return self._currentlock(self._wlockref)
1369 1370
1370 1371 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1371 1372 """
1372 1373 commit an individual file as part of a larger transaction
1373 1374 """
1374 1375
1375 1376 fname = fctx.path()
1376 1377 fparent1 = manifest1.get(fname, nullid)
1377 1378 fparent2 = manifest2.get(fname, nullid)
1378 1379 if isinstance(fctx, context.filectx):
1379 1380 node = fctx.filenode()
1380 1381 if node in [fparent1, fparent2]:
1381 1382 self.ui.debug('reusing %s filelog entry\n' % fname)
1382 1383 if manifest1.flags(fname) != fctx.flags():
1383 1384 changelist.append(fname)
1384 1385 return node
1385 1386
1386 1387 flog = self.file(fname)
1387 1388 meta = {}
1388 1389 copy = fctx.renamed()
1389 1390 if copy and copy[0] != fname:
1390 1391 # Mark the new revision of this file as a copy of another
1391 1392 # file. This copy data will effectively act as a parent
1392 1393 # of this new revision. If this is a merge, the first
1393 1394 # parent will be the nullid (meaning "look up the copy data")
1394 1395 # and the second one will be the other parent. For example:
1395 1396 #
1396 1397 # 0 --- 1 --- 3 rev1 changes file foo
1397 1398 # \ / rev2 renames foo to bar and changes it
1398 1399 # \- 2 -/ rev3 should have bar with all changes and
1399 1400 # should record that bar descends from
1400 1401 # bar in rev2 and foo in rev1
1401 1402 #
1402 1403 # this allows this merge to succeed:
1403 1404 #
1404 1405 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1405 1406 # \ / merging rev3 and rev4 should use bar@rev2
1406 1407 # \- 2 --- 4 as the merge base
1407 1408 #
1408 1409
1409 1410 cfname = copy[0]
1410 1411 crev = manifest1.get(cfname)
1411 1412 newfparent = fparent2
1412 1413
1413 1414 if manifest2: # branch merge
1414 1415 if fparent2 == nullid or crev is None: # copied on remote side
1415 1416 if cfname in manifest2:
1416 1417 crev = manifest2[cfname]
1417 1418 newfparent = fparent1
1418 1419
1419 1420 # Here, we used to search backwards through history to try to find
1420 1421 # where the file copy came from if the source of a copy was not in
1421 1422 # the parent directory. However, this doesn't actually make sense to
1422 1423 # do (what does a copy from something not in your working copy even
1423 1424 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1424 1425 # the user that copy information was dropped, so if they didn't
1425 1426 # expect this outcome it can be fixed, but this is the correct
1426 1427 # behavior in this circumstance.
1427 1428
1428 1429 if crev:
1429 1430 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1430 1431 meta["copy"] = cfname
1431 1432 meta["copyrev"] = hex(crev)
1432 1433 fparent1, fparent2 = nullid, newfparent
1433 1434 else:
1434 1435 self.ui.warn(_("warning: can't find ancestor for '%s' "
1435 1436 "copied from '%s'!\n") % (fname, cfname))
1436 1437
1437 1438 elif fparent1 == nullid:
1438 1439 fparent1, fparent2 = fparent2, nullid
1439 1440 elif fparent2 != nullid:
1440 1441 # is one parent an ancestor of the other?
1441 1442 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1442 1443 if fparent1 in fparentancestors:
1443 1444 fparent1, fparent2 = fparent2, nullid
1444 1445 elif fparent2 in fparentancestors:
1445 1446 fparent2 = nullid
1446 1447
1447 1448 # is the file changed?
1448 1449 text = fctx.data()
1449 1450 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1450 1451 changelist.append(fname)
1451 1452 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1452 1453 # are just the flags changed during merge?
1453 1454 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1454 1455 changelist.append(fname)
1455 1456
1456 1457 return fparent1
1457 1458
1458 1459 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1459 1460 """check for commit arguments that aren't committable"""
1460 1461 if match.isexact() or match.prefix():
1461 1462 matched = set(status.modified + status.added + status.removed)
1462 1463
1463 1464 for f in match.files():
1464 1465 f = self.dirstate.normalize(f)
1465 1466 if f == '.' or f in matched or f in wctx.substate:
1466 1467 continue
1467 1468 if f in status.deleted:
1468 1469 fail(f, _('file not found!'))
1469 1470 if f in vdirs: # visited directory
1470 1471 d = f + '/'
1471 1472 for mf in matched:
1472 1473 if mf.startswith(d):
1473 1474 break
1474 1475 else:
1475 1476 fail(f, _("no match under directory!"))
1476 1477 elif f not in self.dirstate:
1477 1478 fail(f, _("file not tracked!"))
1478 1479
1479 1480 @unfilteredmethod
1480 1481 def commit(self, text="", user=None, date=None, match=None, force=False,
1481 1482 editor=False, extra=None):
1482 1483 """Add a new revision to current repository.
1483 1484
1484 1485 Revision information is gathered from the working directory,
1485 1486 match can be used to filter the committed files. If editor is
1486 1487 supplied, it is called to get a commit message.
1487 1488 """
1488 1489 if extra is None:
1489 1490 extra = {}
1490 1491
1491 1492 def fail(f, msg):
1492 1493 raise error.Abort('%s: %s' % (f, msg))
1493 1494
1494 1495 if not match:
1495 1496 match = matchmod.always(self.root, '')
1496 1497
1497 1498 if not force:
1498 1499 vdirs = []
1499 1500 match.explicitdir = vdirs.append
1500 1501 match.bad = fail
1501 1502
1502 1503 wlock = lock = tr = None
1503 1504 try:
1504 1505 wlock = self.wlock()
1505 1506 lock = self.lock() # for recent changelog (see issue4368)
1506 1507
1507 1508 wctx = self[None]
1508 1509 merge = len(wctx.parents()) > 1
1509 1510
1510 1511 if not force and merge and match.ispartial():
1511 1512 raise error.Abort(_('cannot partially commit a merge '
1512 1513 '(do not specify files or patterns)'))
1513 1514
1514 1515 status = self.status(match=match, clean=force)
1515 1516 if force:
1516 1517 status.modified.extend(status.clean) # mq may commit clean files
1517 1518
1518 1519 # check subrepos
1519 1520 subs = []
1520 1521 commitsubs = set()
1521 1522 newstate = wctx.substate.copy()
1522 1523 # only manage subrepos and .hgsubstate if .hgsub is present
1523 1524 if '.hgsub' in wctx:
1524 1525 # we'll decide whether to track this ourselves, thanks
1525 1526 for c in status.modified, status.added, status.removed:
1526 1527 if '.hgsubstate' in c:
1527 1528 c.remove('.hgsubstate')
1528 1529
1529 1530 # compare current state to last committed state
1530 1531 # build new substate based on last committed state
1531 1532 oldstate = wctx.p1().substate
1532 1533 for s in sorted(newstate.keys()):
1533 1534 if not match(s):
1534 1535 # ignore working copy, use old state if present
1535 1536 if s in oldstate:
1536 1537 newstate[s] = oldstate[s]
1537 1538 continue
1538 1539 if not force:
1539 1540 raise error.Abort(
1540 1541 _("commit with new subrepo %s excluded") % s)
1541 1542 dirtyreason = wctx.sub(s).dirtyreason(True)
1542 1543 if dirtyreason:
1543 1544 if not self.ui.configbool('ui', 'commitsubrepos'):
1544 1545 raise error.Abort(dirtyreason,
1545 1546 hint=_("use --subrepos for recursive commit"))
1546 1547 subs.append(s)
1547 1548 commitsubs.add(s)
1548 1549 else:
1549 1550 bs = wctx.sub(s).basestate()
1550 1551 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1551 1552 if oldstate.get(s, (None, None, None))[1] != bs:
1552 1553 subs.append(s)
1553 1554
1554 1555 # check for removed subrepos
1555 1556 for p in wctx.parents():
1556 1557 r = [s for s in p.substate if s not in newstate]
1557 1558 subs += [s for s in r if match(s)]
1558 1559 if subs:
1559 1560 if (not match('.hgsub') and
1560 1561 '.hgsub' in (wctx.modified() + wctx.added())):
1561 1562 raise error.Abort(
1562 1563 _("can't commit subrepos without .hgsub"))
1563 1564 status.modified.insert(0, '.hgsubstate')
1564 1565
1565 1566 elif '.hgsub' in status.removed:
1566 1567 # clean up .hgsubstate when .hgsub is removed
1567 1568 if ('.hgsubstate' in wctx and
1568 1569 '.hgsubstate' not in (status.modified + status.added +
1569 1570 status.removed)):
1570 1571 status.removed.insert(0, '.hgsubstate')
1571 1572
1572 1573 # make sure all explicit patterns are matched
1573 1574 if not force:
1574 1575 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1575 1576
1576 1577 cctx = context.workingcommitctx(self, status,
1577 1578 text, user, date, extra)
1578 1579
1579 1580 # internal config: ui.allowemptycommit
1580 1581 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1581 1582 or extra.get('close') or merge or cctx.files()
1582 1583 or self.ui.configbool('ui', 'allowemptycommit'))
1583 1584 if not allowemptycommit:
1584 1585 return None
1585 1586
1586 1587 if merge and cctx.deleted():
1587 1588 raise error.Abort(_("cannot commit merge with missing files"))
1588 1589
1589 1590 ms = mergemod.mergestate.read(self)
1590 1591 mergeutil.checkunresolved(ms)
1591 1592
1592 1593 if editor:
1593 1594 cctx._text = editor(self, cctx, subs)
1594 1595 edited = (text != cctx._text)
1595 1596
1596 1597 # Save commit message in case this transaction gets rolled back
1597 1598 # (e.g. by a pretxncommit hook). Leave the content alone on
1598 1599 # the assumption that the user will use the same editor again.
1599 1600 msgfn = self.savecommitmessage(cctx._text)
1600 1601
1601 1602 # commit subs and write new state
1602 1603 if subs:
1603 1604 for s in sorted(commitsubs):
1604 1605 sub = wctx.sub(s)
1605 1606 self.ui.status(_('committing subrepository %s\n') %
1606 1607 subrepo.subrelpath(sub))
1607 1608 sr = sub.commit(cctx._text, user, date)
1608 1609 newstate[s] = (newstate[s][0], sr)
1609 1610 subrepo.writestate(self, newstate)
1610 1611
1611 1612 p1, p2 = self.dirstate.parents()
1612 1613 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1613 1614 try:
1614 1615 self.hook("precommit", throw=True, parent1=hookp1,
1615 1616 parent2=hookp2)
1616 1617 tr = self.transaction('commit')
1617 1618 ret = self.commitctx(cctx, True)
1618 1619 except: # re-raises
1619 1620 if edited:
1620 1621 self.ui.write(
1621 1622 _('note: commit message saved in %s\n') % msgfn)
1622 1623 raise
1623 1624 # update bookmarks, dirstate and mergestate
1624 1625 bookmarks.update(self, [p1, p2], ret)
1625 1626 cctx.markcommitted(ret)
1626 1627 ms.reset()
1627 1628 tr.close()
1628 1629
1629 1630 finally:
1630 1631 lockmod.release(tr, lock, wlock)
1631 1632
1632 1633 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1633 1634 # hack for command that use a temporary commit (eg: histedit)
1634 1635 # temporary commit got stripped before hook release
1635 1636 if self.changelog.hasnode(ret):
1636 1637 self.hook("commit", node=node, parent1=parent1,
1637 1638 parent2=parent2)
1638 1639 self._afterlock(commithook)
1639 1640 return ret
1640 1641
1641 1642 @unfilteredmethod
1642 1643 def commitctx(self, ctx, error=False):
1643 1644 """Add a new revision to current repository.
1644 1645 Revision information is passed via the context argument.
1645 1646 """
1646 1647
1647 1648 tr = None
1648 1649 p1, p2 = ctx.p1(), ctx.p2()
1649 1650 user = ctx.user()
1650 1651
1651 1652 lock = self.lock()
1652 1653 try:
1653 1654 tr = self.transaction("commit")
1654 1655 trp = weakref.proxy(tr)
1655 1656
1656 1657 if ctx.manifestnode():
1657 1658 # reuse an existing manifest revision
1658 1659 mn = ctx.manifestnode()
1659 1660 files = ctx.files()
1660 1661 elif ctx.files():
1661 1662 m1ctx = p1.manifestctx()
1662 1663 m2ctx = p2.manifestctx()
1663 1664 mctx = m1ctx.copy()
1664 1665
1665 1666 m = mctx.read()
1666 1667 m1 = m1ctx.read()
1667 1668 m2 = m2ctx.read()
1668 1669
1669 1670 # check in files
1670 1671 added = []
1671 1672 changed = []
1672 1673 removed = list(ctx.removed())
1673 1674 linkrev = len(self)
1674 1675 self.ui.note(_("committing files:\n"))
1675 1676 for f in sorted(ctx.modified() + ctx.added()):
1676 1677 self.ui.note(f + "\n")
1677 1678 try:
1678 1679 fctx = ctx[f]
1679 1680 if fctx is None:
1680 1681 removed.append(f)
1681 1682 else:
1682 1683 added.append(f)
1683 1684 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1684 1685 trp, changed)
1685 1686 m.setflag(f, fctx.flags())
1686 1687 except OSError as inst:
1687 1688 self.ui.warn(_("trouble committing %s!\n") % f)
1688 1689 raise
1689 1690 except IOError as inst:
1690 1691 errcode = getattr(inst, 'errno', errno.ENOENT)
1691 1692 if error or errcode and errcode != errno.ENOENT:
1692 1693 self.ui.warn(_("trouble committing %s!\n") % f)
1693 1694 raise
1694 1695
1695 1696 # update manifest
1696 1697 self.ui.note(_("committing manifest\n"))
1697 1698 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1698 1699 drop = [f for f in removed if f in m]
1699 1700 for f in drop:
1700 1701 del m[f]
1701 1702 mn = mctx.write(trp, linkrev,
1702 1703 p1.manifestnode(), p2.manifestnode(),
1703 1704 added, drop)
1704 1705 files = changed + removed
1705 1706 else:
1706 1707 mn = p1.manifestnode()
1707 1708 files = []
1708 1709
1709 1710 # update changelog
1710 1711 self.ui.note(_("committing changelog\n"))
1711 1712 self.changelog.delayupdate(tr)
1712 1713 n = self.changelog.add(mn, files, ctx.description(),
1713 1714 trp, p1.node(), p2.node(),
1714 1715 user, ctx.date(), ctx.extra().copy())
1715 1716 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1716 1717 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1717 1718 parent2=xp2)
1718 1719 # set the new commit is proper phase
1719 1720 targetphase = subrepo.newcommitphase(self.ui, ctx)
1720 1721 if targetphase:
1721 1722 # retract boundary do not alter parent changeset.
1722 1723 # if a parent have higher the resulting phase will
1723 1724 # be compliant anyway
1724 1725 #
1725 1726 # if minimal phase was 0 we don't need to retract anything
1726 1727 phases.retractboundary(self, tr, targetphase, [n])
1727 1728 tr.close()
1728 1729 branchmap.updatecache(self.filtered('served'))
1729 1730 return n
1730 1731 finally:
1731 1732 if tr:
1732 1733 tr.release()
1733 1734 lock.release()
1734 1735
1735 1736 @unfilteredmethod
1736 1737 def destroying(self):
1737 1738 '''Inform the repository that nodes are about to be destroyed.
1738 1739 Intended for use by strip and rollback, so there's a common
1739 1740 place for anything that has to be done before destroying history.
1740 1741
1741 1742 This is mostly useful for saving state that is in memory and waiting
1742 1743 to be flushed when the current lock is released. Because a call to
1743 1744 destroyed is imminent, the repo will be invalidated causing those
1744 1745 changes to stay in memory (waiting for the next unlock), or vanish
1745 1746 completely.
1746 1747 '''
1747 1748 # When using the same lock to commit and strip, the phasecache is left
1748 1749 # dirty after committing. Then when we strip, the repo is invalidated,
1749 1750 # causing those changes to disappear.
1750 1751 if '_phasecache' in vars(self):
1751 1752 self._phasecache.write()
1752 1753
1753 1754 @unfilteredmethod
1754 1755 def destroyed(self):
1755 1756 '''Inform the repository that nodes have been destroyed.
1756 1757 Intended for use by strip and rollback, so there's a common
1757 1758 place for anything that has to be done after destroying history.
1758 1759 '''
1759 1760 # When one tries to:
1760 1761 # 1) destroy nodes thus calling this method (e.g. strip)
1761 1762 # 2) use phasecache somewhere (e.g. commit)
1762 1763 #
1763 1764 # then 2) will fail because the phasecache contains nodes that were
1764 1765 # removed. We can either remove phasecache from the filecache,
1765 1766 # causing it to reload next time it is accessed, or simply filter
1766 1767 # the removed nodes now and write the updated cache.
1767 1768 self._phasecache.filterunknown(self)
1768 1769 self._phasecache.write()
1769 1770
1770 1771 # update the 'served' branch cache to help read only server process
1771 1772 # Thanks to branchcache collaboration this is done from the nearest
1772 1773 # filtered subset and it is expected to be fast.
1773 1774 branchmap.updatecache(self.filtered('served'))
1774 1775
1775 1776 # Ensure the persistent tag cache is updated. Doing it now
1776 1777 # means that the tag cache only has to worry about destroyed
1777 1778 # heads immediately after a strip/rollback. That in turn
1778 1779 # guarantees that "cachetip == currenttip" (comparing both rev
1779 1780 # and node) always means no nodes have been added or destroyed.
1780 1781
1781 1782 # XXX this is suboptimal when qrefresh'ing: we strip the current
1782 1783 # head, refresh the tag cache, then immediately add a new head.
1783 1784 # But I think doing it this way is necessary for the "instant
1784 1785 # tag cache retrieval" case to work.
1785 1786 self.invalidate()
1786 1787
1787 1788 def walk(self, match, node=None):
1788 1789 '''
1789 1790 walk recursively through the directory tree or a given
1790 1791 changeset, finding all files matched by the match
1791 1792 function
1792 1793 '''
1793 1794 return self[node].walk(match)
1794 1795
1795 1796 def status(self, node1='.', node2=None, match=None,
1796 1797 ignored=False, clean=False, unknown=False,
1797 1798 listsubrepos=False):
1798 1799 '''a convenience method that calls node1.status(node2)'''
1799 1800 return self[node1].status(node2, match, ignored, clean, unknown,
1800 1801 listsubrepos)
1801 1802
1802 1803 def heads(self, start=None):
1803 1804 if start is None:
1804 1805 cl = self.changelog
1805 1806 headrevs = reversed(cl.headrevs())
1806 1807 return [cl.node(rev) for rev in headrevs]
1807 1808
1808 1809 heads = self.changelog.heads(start)
1809 1810 # sort the output in rev descending order
1810 1811 return sorted(heads, key=self.changelog.rev, reverse=True)
1811 1812
1812 1813 def branchheads(self, branch=None, start=None, closed=False):
1813 1814 '''return a (possibly filtered) list of heads for the given branch
1814 1815
1815 1816 Heads are returned in topological order, from newest to oldest.
1816 1817 If branch is None, use the dirstate branch.
1817 1818 If start is not None, return only heads reachable from start.
1818 1819 If closed is True, return heads that are marked as closed as well.
1819 1820 '''
1820 1821 if branch is None:
1821 1822 branch = self[None].branch()
1822 1823 branches = self.branchmap()
1823 1824 if branch not in branches:
1824 1825 return []
1825 1826 # the cache returns heads ordered lowest to highest
1826 1827 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1827 1828 if start is not None:
1828 1829 # filter out the heads that cannot be reached from startrev
1829 1830 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1830 1831 bheads = [h for h in bheads if h in fbheads]
1831 1832 return bheads
1832 1833
1833 1834 def branches(self, nodes):
1834 1835 if not nodes:
1835 1836 nodes = [self.changelog.tip()]
1836 1837 b = []
1837 1838 for n in nodes:
1838 1839 t = n
1839 1840 while True:
1840 1841 p = self.changelog.parents(n)
1841 1842 if p[1] != nullid or p[0] == nullid:
1842 1843 b.append((t, n, p[0], p[1]))
1843 1844 break
1844 1845 n = p[0]
1845 1846 return b
1846 1847
1847 1848 def between(self, pairs):
1848 1849 r = []
1849 1850
1850 1851 for top, bottom in pairs:
1851 1852 n, l, i = top, [], 0
1852 1853 f = 1
1853 1854
1854 1855 while n != bottom and n != nullid:
1855 1856 p = self.changelog.parents(n)[0]
1856 1857 if i == f:
1857 1858 l.append(n)
1858 1859 f = f * 2
1859 1860 n = p
1860 1861 i += 1
1861 1862
1862 1863 r.append(l)
1863 1864
1864 1865 return r
1865 1866
1866 1867 def checkpush(self, pushop):
1867 1868 """Extensions can override this function if additional checks have
1868 1869 to be performed before pushing, or call it if they override push
1869 1870 command.
1870 1871 """
1871 1872 pass
1872 1873
1873 1874 @unfilteredpropertycache
1874 1875 def prepushoutgoinghooks(self):
1875 1876 """Return util.hooks consists of a pushop with repo, remote, outgoing
1876 1877 methods, which are called before pushing changesets.
1877 1878 """
1878 1879 return util.hooks()
1879 1880
1880 1881 def pushkey(self, namespace, key, old, new):
1881 1882 try:
1882 1883 tr = self.currenttransaction()
1883 1884 hookargs = {}
1884 1885 if tr is not None:
1885 1886 hookargs.update(tr.hookargs)
1886 1887 hookargs['namespace'] = namespace
1887 1888 hookargs['key'] = key
1888 1889 hookargs['old'] = old
1889 1890 hookargs['new'] = new
1890 1891 self.hook('prepushkey', throw=True, **hookargs)
1891 1892 except error.HookAbort as exc:
1892 1893 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1893 1894 if exc.hint:
1894 1895 self.ui.write_err(_("(%s)\n") % exc.hint)
1895 1896 return False
1896 1897 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1897 1898 ret = pushkey.push(self, namespace, key, old, new)
1898 1899 def runhook():
1899 1900 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1900 1901 ret=ret)
1901 1902 self._afterlock(runhook)
1902 1903 return ret
1903 1904
1904 1905 def listkeys(self, namespace):
1905 1906 self.hook('prelistkeys', throw=True, namespace=namespace)
1906 1907 self.ui.debug('listing keys for "%s"\n' % namespace)
1907 1908 values = pushkey.list(self, namespace)
1908 1909 self.hook('listkeys', namespace=namespace, values=values)
1909 1910 return values
1910 1911
1911 1912 def debugwireargs(self, one, two, three=None, four=None, five=None):
1912 1913 '''used to test argument passing over the wire'''
1913 1914 return "%s %s %s %s %s" % (one, two, three, four, five)
1914 1915
1915 1916 def savecommitmessage(self, text):
1916 1917 fp = self.vfs('last-message.txt', 'wb')
1917 1918 try:
1918 1919 fp.write(text)
1919 1920 finally:
1920 1921 fp.close()
1921 1922 return self.pathto(fp.name[len(self.root) + 1:])
1922 1923
1923 1924 # used to avoid circular references so destructors work
1924 1925 def aftertrans(files):
1925 1926 renamefiles = [tuple(t) for t in files]
1926 1927 def a():
1927 1928 for vfs, src, dest in renamefiles:
1928 1929 # if src and dest refer to a same file, vfs.rename is a no-op,
1929 1930 # leaving both src and dest on disk. delete dest to make sure
1930 1931 # the rename couldn't be such a no-op.
1931 1932 vfs.tryunlink(dest)
1932 1933 try:
1933 1934 vfs.rename(src, dest)
1934 1935 except OSError: # journal file does not yet exist
1935 1936 pass
1936 1937 return a
1937 1938
1938 1939 def undoname(fn):
1939 1940 base, name = os.path.split(fn)
1940 1941 assert name.startswith('journal')
1941 1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1942 1943
1943 1944 def instance(ui, path, create):
1944 1945 return localrepository(ui, util.urllocalpath(path), create)
1945 1946
1946 1947 def islocal(path):
1947 1948 return True
1948 1949
1949 1950 def newreporequirements(repo):
1950 1951 """Determine the set of requirements for a new local repository.
1951 1952
1952 1953 Extensions can wrap this function to specify custom requirements for
1953 1954 new repositories.
1954 1955 """
1955 1956 ui = repo.ui
1956 1957 requirements = set(['revlogv1'])
1957 1958 if ui.configbool('format', 'usestore', True):
1958 1959 requirements.add('store')
1959 1960 if ui.configbool('format', 'usefncache', True):
1960 1961 requirements.add('fncache')
1961 1962 if ui.configbool('format', 'dotencode', True):
1962 1963 requirements.add('dotencode')
1963 1964
1964 1965 compengine = ui.config('experimental', 'format.compression', 'zlib')
1965 1966 if compengine not in util.compengines:
1966 1967 raise error.Abort(_('compression engine %s defined by '
1967 1968 'experimental.format.compression not available') %
1968 1969 compengine,
1969 1970 hint=_('run "hg debuginstall" to list available '
1970 1971 'compression engines'))
1971 1972
1972 1973 # zlib is the historical default and doesn't need an explicit requirement.
1973 1974 if compengine != 'zlib':
1974 1975 requirements.add('exp-compression-%s' % compengine)
1975 1976
1976 1977 if scmutil.gdinitconfig(ui):
1977 1978 requirements.add('generaldelta')
1978 1979 if ui.configbool('experimental', 'treemanifest', False):
1979 1980 requirements.add('treemanifest')
1980 1981 if ui.configbool('experimental', 'manifestv2', False):
1981 1982 requirements.add('manifestv2')
1982 1983
1983 1984 return requirements
General Comments 0
You need to be logged in to leave comments. Login now