##// END OF EJS Templates
localrepo: fix deprecation version for 'repo.join'...
Pierre-Yves David -
r31818:8fa516b2 default
parent child Browse files
Show More
@@ -1,1986 +1,1986 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'))
118 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'))
252 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'))
254 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 @property
389 389 def wopener(self):
390 390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 391 return self.wvfs
392 392
393 393 @property
394 394 def opener(self):
395 395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 396 return self.vfs
397 397
398 398 def close(self):
399 399 self._writecaches()
400 400
401 401 def _loadextensions(self):
402 402 extensions.loadall(self.ui)
403 403
404 404 def _writecaches(self):
405 405 if self._revbranchcache:
406 406 self._revbranchcache.write()
407 407
408 408 def _restrictcapabilities(self, caps):
409 409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 410 caps = set(caps)
411 411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 413 return caps
414 414
415 415 def _applyopenerreqs(self):
416 416 self.svfs.options = dict((r, 1) for r in self.requirements
417 417 if r in self.openerreqs)
418 418 # experimental config: format.chunkcachesize
419 419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 420 if chunkcachesize is not None:
421 421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 422 # experimental config: format.maxchainlen
423 423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 424 if maxchainlen is not None:
425 425 self.svfs.options['maxchainlen'] = maxchainlen
426 426 # experimental config: format.manifestcachesize
427 427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 428 if manifestcachesize is not None:
429 429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 430 # experimental config: format.aggressivemergedeltas
431 431 aggressivemergedeltas = self.ui.configbool('format',
432 432 'aggressivemergedeltas', False)
433 433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435 435
436 436 for r in self.requirements:
437 437 if r.startswith('exp-compression-'):
438 438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439 439
440 440 def _writerequirements(self):
441 441 scmutil.writerequires(self.vfs, self.requirements)
442 442
443 443 def _checknested(self, path):
444 444 """Determine if path is a legal nested repository."""
445 445 if not path.startswith(self.root):
446 446 return False
447 447 subpath = path[len(self.root) + 1:]
448 448 normsubpath = util.pconvert(subpath)
449 449
450 450 # XXX: Checking against the current working copy is wrong in
451 451 # the sense that it can reject things like
452 452 #
453 453 # $ hg cat -r 10 sub/x.txt
454 454 #
455 455 # if sub/ is no longer a subrepository in the working copy
456 456 # parent revision.
457 457 #
458 458 # However, it can of course also allow things that would have
459 459 # been rejected before, such as the above cat command if sub/
460 460 # is a subrepository now, but was a normal directory before.
461 461 # The old path auditor would have rejected by mistake since it
462 462 # panics when it sees sub/.hg/.
463 463 #
464 464 # All in all, checking against the working copy seems sensible
465 465 # since we want to prevent access to nested repositories on
466 466 # the filesystem *now*.
467 467 ctx = self[None]
468 468 parts = util.splitpath(subpath)
469 469 while parts:
470 470 prefix = '/'.join(parts)
471 471 if prefix in ctx.substate:
472 472 if prefix == normsubpath:
473 473 return True
474 474 else:
475 475 sub = ctx.sub(prefix)
476 476 return sub.checknested(subpath[len(prefix) + 1:])
477 477 else:
478 478 parts.pop()
479 479 return False
480 480
481 481 def peer(self):
482 482 return localpeer(self) # not cached to avoid reference cycle
483 483
484 484 def unfiltered(self):
485 485 """Return unfiltered version of the repository
486 486
487 487 Intended to be overwritten by filtered repo."""
488 488 return self
489 489
490 490 def filtered(self, name):
491 491 """Return a filtered version of a repository"""
492 492 # build a new class with the mixin and the current class
493 493 # (possibly subclass of the repo)
494 494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 495 pass
496 496 return filteredrepo(self, name)
497 497
498 498 @repofilecache('bookmarks', 'bookmarks.current')
499 499 def _bookmarks(self):
500 500 return bookmarks.bmstore(self)
501 501
502 502 @property
503 503 def _activebookmark(self):
504 504 return self._bookmarks.active
505 505
506 506 def bookmarkheads(self, bookmark):
507 507 name = bookmark.split('@', 1)[0]
508 508 heads = []
509 509 for mark, n in self._bookmarks.iteritems():
510 510 if mark.split('@', 1)[0] == name:
511 511 heads.append(n)
512 512 return heads
513 513
514 514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 516 # can't be easily expressed in filecache mechanism.
517 517 @storecache('phaseroots', '00changelog.i')
518 518 def _phasecache(self):
519 519 return phases.phasecache(self, self._phasedefaults)
520 520
521 521 @storecache('obsstore')
522 522 def obsstore(self):
523 523 # read default format for new obsstore.
524 524 # developer config: format.obsstore-version
525 525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 526 # rely on obsstore class default when possible.
527 527 kwargs = {}
528 528 if defaultformat is not None:
529 529 kwargs['defaultformat'] = defaultformat
530 530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 532 **kwargs)
533 533 if store and readonly:
534 534 self.ui.warn(
535 535 _('obsolete feature not enabled but %i markers found!\n')
536 536 % len(list(store)))
537 537 return store
538 538
539 539 @storecache('00changelog.i')
540 540 def changelog(self):
541 541 c = changelog.changelog(self.svfs)
542 542 if txnutil.mayhavepending(self.root):
543 543 c.readpending('00changelog.i.a')
544 544 return c
545 545
546 546 def _constructmanifest(self):
547 547 # This is a temporary function while we migrate from manifest to
548 548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 549 # manifest creation.
550 550 return manifest.manifestrevlog(self.svfs)
551 551
552 552 @storecache('00manifest.i')
553 553 def manifestlog(self):
554 554 return manifest.manifestlog(self.svfs, self)
555 555
556 556 @repofilecache('dirstate')
557 557 def dirstate(self):
558 558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 559 self._dirstatevalidate)
560 560
561 561 def _dirstatevalidate(self, node):
562 562 try:
563 563 self.changelog.rev(node)
564 564 return node
565 565 except error.LookupError:
566 566 if not self._dirstatevalidatewarned:
567 567 self._dirstatevalidatewarned = True
568 568 self.ui.warn(_("warning: ignoring unknown"
569 569 " working parent %s!\n") % short(node))
570 570 return nullid
571 571
572 572 def __getitem__(self, changeid):
573 573 if changeid is None or changeid == wdirrev:
574 574 return context.workingctx(self)
575 575 if isinstance(changeid, slice):
576 576 return [context.changectx(self, i)
577 577 for i in xrange(*changeid.indices(len(self)))
578 578 if i not in self.changelog.filteredrevs]
579 579 return context.changectx(self, changeid)
580 580
581 581 def __contains__(self, changeid):
582 582 try:
583 583 self[changeid]
584 584 return True
585 585 except error.RepoLookupError:
586 586 return False
587 587
588 588 def __nonzero__(self):
589 589 return True
590 590
591 591 __bool__ = __nonzero__
592 592
593 593 def __len__(self):
594 594 return len(self.changelog)
595 595
596 596 def __iter__(self):
597 597 return iter(self.changelog)
598 598
599 599 def revs(self, expr, *args):
600 600 '''Find revisions matching a revset.
601 601
602 602 The revset is specified as a string ``expr`` that may contain
603 603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604 604
605 605 Revset aliases from the configuration are not expanded. To expand
606 606 user aliases, consider calling ``scmutil.revrange()`` or
607 607 ``repo.anyrevs([expr], user=True)``.
608 608
609 609 Returns a revset.abstractsmartset, which is a list-like interface
610 610 that contains integer revisions.
611 611 '''
612 612 expr = revsetlang.formatspec(expr, *args)
613 613 m = revset.match(None, expr)
614 614 return m(self)
615 615
616 616 def set(self, expr, *args):
617 617 '''Find revisions matching a revset and emit changectx instances.
618 618
619 619 This is a convenience wrapper around ``revs()`` that iterates the
620 620 result and is a generator of changectx instances.
621 621
622 622 Revset aliases from the configuration are not expanded. To expand
623 623 user aliases, consider calling ``scmutil.revrange()``.
624 624 '''
625 625 for r in self.revs(expr, *args):
626 626 yield self[r]
627 627
628 628 def anyrevs(self, specs, user=False):
629 629 '''Find revisions matching one of the given revsets.
630 630
631 631 Revset aliases from the configuration are not expanded by default. To
632 632 expand user aliases, specify ``user=True``.
633 633 '''
634 634 if user:
635 635 m = revset.matchany(self.ui, specs, repo=self)
636 636 else:
637 637 m = revset.matchany(None, specs)
638 638 return m(self)
639 639
640 640 def url(self):
641 641 return 'file:' + self.root
642 642
643 643 def hook(self, name, throw=False, **args):
644 644 """Call a hook, passing this repo instance.
645 645
646 646 This a convenience method to aid invoking hooks. Extensions likely
647 647 won't call this unless they have registered a custom hook or are
648 648 replacing code that is expected to call a hook.
649 649 """
650 650 return hook.hook(self.ui, self, name, throw, **args)
651 651
652 652 def tag(self, names, node, message, local, user, date, editor=False):
653 653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
654 654 tagsmod.tag(self, names, node, message, local, user, date,
655 655 editor=editor)
656 656
657 657 @filteredpropertycache
658 658 def _tagscache(self):
659 659 '''Returns a tagscache object that contains various tags related
660 660 caches.'''
661 661
662 662 # This simplifies its cache management by having one decorated
663 663 # function (this one) and the rest simply fetch things from it.
664 664 class tagscache(object):
665 665 def __init__(self):
666 666 # These two define the set of tags for this repository. tags
667 667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 668 # 'local'. (Global tags are defined by .hgtags across all
669 669 # heads, and local tags are defined in .hg/localtags.)
670 670 # They constitute the in-memory cache of tags.
671 671 self.tags = self.tagtypes = None
672 672
673 673 self.nodetagscache = self.tagslist = None
674 674
675 675 cache = tagscache()
676 676 cache.tags, cache.tagtypes = self._findtags()
677 677
678 678 return cache
679 679
680 680 def tags(self):
681 681 '''return a mapping of tag to node'''
682 682 t = {}
683 683 if self.changelog.filteredrevs:
684 684 tags, tt = self._findtags()
685 685 else:
686 686 tags = self._tagscache.tags
687 687 for k, v in tags.iteritems():
688 688 try:
689 689 # ignore tags to unknown nodes
690 690 self.changelog.rev(v)
691 691 t[k] = v
692 692 except (error.LookupError, ValueError):
693 693 pass
694 694 return t
695 695
696 696 def _findtags(self):
697 697 '''Do the hard work of finding tags. Return a pair of dicts
698 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 699 maps tag name to a string like \'global\' or \'local\'.
700 700 Subclasses or extensions are free to add their own tags, but
701 701 should be aware that the returned dicts will be retained for the
702 702 duration of the localrepo object.'''
703 703
704 704 # XXX what tagtype should subclasses/extensions use? Currently
705 705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 706 # Should each extension invent its own tag type? Should there
707 707 # be one tagtype for all such "virtual" tags? Or is the status
708 708 # quo fine?
709 709
710 710
711 711 # map tag name to (node, hist)
712 712 alltags = tagsmod.findglobaltags(self.ui, self)
713 713 # map tag name to tag type
714 714 tagtypes = dict((tag, 'global') for tag in alltags)
715 715
716 716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717 717
718 718 # Build the return dicts. Have to re-encode tag names because
719 719 # the tags module always uses UTF-8 (in order not to lose info
720 720 # writing to the cache), but the rest of Mercurial wants them in
721 721 # local encoding.
722 722 tags = {}
723 723 for (name, (node, hist)) in alltags.iteritems():
724 724 if node != nullid:
725 725 tags[encoding.tolocal(name)] = node
726 726 tags['tip'] = self.changelog.tip()
727 727 tagtypes = dict([(encoding.tolocal(name), value)
728 728 for (name, value) in tagtypes.iteritems()])
729 729 return (tags, tagtypes)
730 730
731 731 def tagtype(self, tagname):
732 732 '''
733 733 return the type of the given tag. result can be:
734 734
735 735 'local' : a local tag
736 736 'global' : a global tag
737 737 None : tag does not exist
738 738 '''
739 739
740 740 return self._tagscache.tagtypes.get(tagname)
741 741
742 742 def tagslist(self):
743 743 '''return a list of tags ordered by revision'''
744 744 if not self._tagscache.tagslist:
745 745 l = []
746 746 for t, n in self.tags().iteritems():
747 747 l.append((self.changelog.rev(n), t, n))
748 748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749 749
750 750 return self._tagscache.tagslist
751 751
752 752 def nodetags(self, node):
753 753 '''return the tags associated with a node'''
754 754 if not self._tagscache.nodetagscache:
755 755 nodetagscache = {}
756 756 for t, n in self._tagscache.tags.iteritems():
757 757 nodetagscache.setdefault(n, []).append(t)
758 758 for tags in nodetagscache.itervalues():
759 759 tags.sort()
760 760 self._tagscache.nodetagscache = nodetagscache
761 761 return self._tagscache.nodetagscache.get(node, [])
762 762
763 763 def nodebookmarks(self, node):
764 764 """return the list of bookmarks pointing to the specified node"""
765 765 marks = []
766 766 for bookmark, n in self._bookmarks.iteritems():
767 767 if n == node:
768 768 marks.append(bookmark)
769 769 return sorted(marks)
770 770
771 771 def branchmap(self):
772 772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 773 ordered by increasing revision number'''
774 774 branchmap.updatecache(self)
775 775 return self._branchcaches[self.filtername]
776 776
777 777 @unfilteredmethod
778 778 def revbranchcache(self):
779 779 if not self._revbranchcache:
780 780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 781 return self._revbranchcache
782 782
783 783 def branchtip(self, branch, ignoremissing=False):
784 784 '''return the tip node for a given branch
785 785
786 786 If ignoremissing is True, then this method will not raise an error.
787 787 This is helpful for callers that only expect None for a missing branch
788 788 (e.g. namespace).
789 789
790 790 '''
791 791 try:
792 792 return self.branchmap().branchtip(branch)
793 793 except KeyError:
794 794 if not ignoremissing:
795 795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 796 else:
797 797 pass
798 798
799 799 def lookup(self, key):
800 800 return self[key].node()
801 801
802 802 def lookupbranch(self, key, remote=None):
803 803 repo = remote or self
804 804 if key in repo.branchmap():
805 805 return key
806 806
807 807 repo = (remote and remote.local()) and remote or self
808 808 return repo[key].branch()
809 809
810 810 def known(self, nodes):
811 811 cl = self.changelog
812 812 nm = cl.nodemap
813 813 filtered = cl.filteredrevs
814 814 result = []
815 815 for n in nodes:
816 816 r = nm.get(n)
817 817 resp = not (r is None or r in filtered)
818 818 result.append(resp)
819 819 return result
820 820
821 821 def local(self):
822 822 return self
823 823
824 824 def publishing(self):
825 825 # it's safe (and desirable) to trust the publish flag unconditionally
826 826 # so that we don't finalize changes shared between users via ssh or nfs
827 827 return self.ui.configbool('phases', 'publish', True, untrusted=True)
828 828
829 829 def cancopy(self):
830 830 # so statichttprepo's override of local() works
831 831 if not self.local():
832 832 return False
833 833 if not self.publishing():
834 834 return True
835 835 # if publishing we can't copy if there is filtered content
836 836 return not self.filtered('visible').changelog.filteredrevs
837 837
838 838 def shared(self):
839 839 '''the type of shared repository (None if not shared)'''
840 840 if self.sharedpath != self.path:
841 841 return 'store'
842 842 return None
843 843
844 844 def join(self, f, *insidef):
845 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
845 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.2')
846 846 return self.vfs.join(os.path.join(f, *insidef))
847 847
848 848 def wjoin(self, f, *insidef):
849 849 return self.vfs.reljoin(self.root, f, *insidef)
850 850
851 851 def file(self, f):
852 852 if f[0] == '/':
853 853 f = f[1:]
854 854 return filelog.filelog(self.svfs, f)
855 855
856 856 def changectx(self, changeid):
857 857 return self[changeid]
858 858
859 859 def setparents(self, p1, p2=nullid):
860 860 self.dirstate.beginparentchange()
861 861 copies = self.dirstate.setparents(p1, p2)
862 862 pctx = self[p1]
863 863 if copies:
864 864 # Adjust copy records, the dirstate cannot do it, it
865 865 # requires access to parents manifests. Preserve them
866 866 # only for entries added to first parent.
867 867 for f in copies:
868 868 if f not in pctx and copies[f] in pctx:
869 869 self.dirstate.copy(copies[f], f)
870 870 if p2 == nullid:
871 871 for f, s in sorted(self.dirstate.copies().items()):
872 872 if f not in pctx and s not in pctx:
873 873 self.dirstate.copy(None, f)
874 874 self.dirstate.endparentchange()
875 875
876 876 def filectx(self, path, changeid=None, fileid=None):
877 877 """changeid can be a changeset revision, node, or tag.
878 878 fileid can be a file revision or node."""
879 879 return context.filectx(self, path, changeid, fileid)
880 880
881 881 def getcwd(self):
882 882 return self.dirstate.getcwd()
883 883
884 884 def pathto(self, f, cwd=None):
885 885 return self.dirstate.pathto(f, cwd)
886 886
887 887 def wfile(self, f, mode='r'):
888 888 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
889 889 return self.wvfs(f, mode)
890 890
891 891 def _link(self, f):
892 892 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
893 893 '4.0')
894 894 return self.wvfs.islink(f)
895 895
896 896 def _loadfilter(self, filter):
897 897 if filter not in self.filterpats:
898 898 l = []
899 899 for pat, cmd in self.ui.configitems(filter):
900 900 if cmd == '!':
901 901 continue
902 902 mf = matchmod.match(self.root, '', [pat])
903 903 fn = None
904 904 params = cmd
905 905 for name, filterfn in self._datafilters.iteritems():
906 906 if cmd.startswith(name):
907 907 fn = filterfn
908 908 params = cmd[len(name):].lstrip()
909 909 break
910 910 if not fn:
911 911 fn = lambda s, c, **kwargs: util.filter(s, c)
912 912 # Wrap old filters not supporting keyword arguments
913 913 if not inspect.getargspec(fn)[2]:
914 914 oldfn = fn
915 915 fn = lambda s, c, **kwargs: oldfn(s, c)
916 916 l.append((mf, fn, params))
917 917 self.filterpats[filter] = l
918 918 return self.filterpats[filter]
919 919
920 920 def _filter(self, filterpats, filename, data):
921 921 for mf, fn, cmd in filterpats:
922 922 if mf(filename):
923 923 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
924 924 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
925 925 break
926 926
927 927 return data
928 928
929 929 @unfilteredpropertycache
930 930 def _encodefilterpats(self):
931 931 return self._loadfilter('encode')
932 932
933 933 @unfilteredpropertycache
934 934 def _decodefilterpats(self):
935 935 return self._loadfilter('decode')
936 936
937 937 def adddatafilter(self, name, filter):
938 938 self._datafilters[name] = filter
939 939
940 940 def wread(self, filename):
941 941 if self.wvfs.islink(filename):
942 942 data = self.wvfs.readlink(filename)
943 943 else:
944 944 data = self.wvfs.read(filename)
945 945 return self._filter(self._encodefilterpats, filename, data)
946 946
947 947 def wwrite(self, filename, data, flags, backgroundclose=False):
948 948 """write ``data`` into ``filename`` in the working directory
949 949
950 950 This returns length of written (maybe decoded) data.
951 951 """
952 952 data = self._filter(self._decodefilterpats, filename, data)
953 953 if 'l' in flags:
954 954 self.wvfs.symlink(data, filename)
955 955 else:
956 956 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
957 957 if 'x' in flags:
958 958 self.wvfs.setflags(filename, False, True)
959 959 return len(data)
960 960
961 961 def wwritedata(self, filename, data):
962 962 return self._filter(self._decodefilterpats, filename, data)
963 963
964 964 def currenttransaction(self):
965 965 """return the current transaction or None if non exists"""
966 966 if self._transref:
967 967 tr = self._transref()
968 968 else:
969 969 tr = None
970 970
971 971 if tr and tr.running():
972 972 return tr
973 973 return None
974 974
975 975 def transaction(self, desc, report=None):
976 976 if (self.ui.configbool('devel', 'all-warnings')
977 977 or self.ui.configbool('devel', 'check-locks')):
978 978 if self._currentlock(self._lockref) is None:
979 979 raise error.ProgrammingError('transaction requires locking')
980 980 tr = self.currenttransaction()
981 981 if tr is not None:
982 982 return tr.nest()
983 983
984 984 # abort here if the journal already exists
985 985 if self.svfs.exists("journal"):
986 986 raise error.RepoError(
987 987 _("abandoned transaction found"),
988 988 hint=_("run 'hg recover' to clean up transaction"))
989 989
990 990 idbase = "%.40f#%f" % (random.random(), time.time())
991 991 ha = hex(hashlib.sha1(idbase).digest())
992 992 txnid = 'TXN:' + ha
993 993 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
994 994
995 995 self._writejournal(desc)
996 996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
997 997 if report:
998 998 rp = report
999 999 else:
1000 1000 rp = self.ui.warn
1001 1001 vfsmap = {'plain': self.vfs} # root of .hg/
1002 1002 # we must avoid cyclic reference between repo and transaction.
1003 1003 reporef = weakref.ref(self)
1004 1004 def validate(tr):
1005 1005 """will run pre-closing hooks"""
1006 1006 reporef().hook('pretxnclose', throw=True,
1007 1007 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1008 1008 def releasefn(tr, success):
1009 1009 repo = reporef()
1010 1010 if success:
1011 1011 # this should be explicitly invoked here, because
1012 1012 # in-memory changes aren't written out at closing
1013 1013 # transaction, if tr.addfilegenerator (via
1014 1014 # dirstate.write or so) isn't invoked while
1015 1015 # transaction running
1016 1016 repo.dirstate.write(None)
1017 1017 else:
1018 1018 # discard all changes (including ones already written
1019 1019 # out) in this transaction
1020 1020 repo.dirstate.restorebackup(None, prefix='journal.')
1021 1021
1022 1022 repo.invalidate(clearfilecache=True)
1023 1023
1024 1024 tr = transaction.transaction(rp, self.svfs, vfsmap,
1025 1025 "journal",
1026 1026 "undo",
1027 1027 aftertrans(renames),
1028 1028 self.store.createmode,
1029 1029 validator=validate,
1030 1030 releasefn=releasefn)
1031 1031
1032 1032 tr.hookargs['txnid'] = txnid
1033 1033 # note: writing the fncache only during finalize mean that the file is
1034 1034 # outdated when running hooks. As fncache is used for streaming clone,
1035 1035 # this is not expected to break anything that happen during the hooks.
1036 1036 tr.addfinalize('flush-fncache', self.store.write)
1037 1037 def txnclosehook(tr2):
1038 1038 """To be run if transaction is successful, will schedule a hook run
1039 1039 """
1040 1040 # Don't reference tr2 in hook() so we don't hold a reference.
1041 1041 # This reduces memory consumption when there are multiple
1042 1042 # transactions per lock. This can likely go away if issue5045
1043 1043 # fixes the function accumulation.
1044 1044 hookargs = tr2.hookargs
1045 1045
1046 1046 def hook():
1047 1047 reporef().hook('txnclose', throw=False, txnname=desc,
1048 1048 **pycompat.strkwargs(hookargs))
1049 1049 reporef()._afterlock(hook)
1050 1050 tr.addfinalize('txnclose-hook', txnclosehook)
1051 1051 def txnaborthook(tr2):
1052 1052 """To be run if transaction is aborted
1053 1053 """
1054 1054 reporef().hook('txnabort', throw=False, txnname=desc,
1055 1055 **tr2.hookargs)
1056 1056 tr.addabort('txnabort-hook', txnaborthook)
1057 1057 # avoid eager cache invalidation. in-memory data should be identical
1058 1058 # to stored data if transaction has no error.
1059 1059 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1060 1060 self._transref = weakref.ref(tr)
1061 1061 return tr
1062 1062
1063 1063 def _journalfiles(self):
1064 1064 return ((self.svfs, 'journal'),
1065 1065 (self.vfs, 'journal.dirstate'),
1066 1066 (self.vfs, 'journal.branch'),
1067 1067 (self.vfs, 'journal.desc'),
1068 1068 (self.vfs, 'journal.bookmarks'),
1069 1069 (self.svfs, 'journal.phaseroots'))
1070 1070
1071 1071 def undofiles(self):
1072 1072 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1073 1073
1074 1074 def _writejournal(self, desc):
1075 1075 self.dirstate.savebackup(None, prefix='journal.')
1076 1076 self.vfs.write("journal.branch",
1077 1077 encoding.fromlocal(self.dirstate.branch()))
1078 1078 self.vfs.write("journal.desc",
1079 1079 "%d\n%s\n" % (len(self), desc))
1080 1080 self.vfs.write("journal.bookmarks",
1081 1081 self.vfs.tryread("bookmarks"))
1082 1082 self.svfs.write("journal.phaseroots",
1083 1083 self.svfs.tryread("phaseroots"))
1084 1084
1085 1085 def recover(self):
1086 1086 with self.lock():
1087 1087 if self.svfs.exists("journal"):
1088 1088 self.ui.status(_("rolling back interrupted transaction\n"))
1089 1089 vfsmap = {'': self.svfs,
1090 1090 'plain': self.vfs,}
1091 1091 transaction.rollback(self.svfs, vfsmap, "journal",
1092 1092 self.ui.warn)
1093 1093 self.invalidate()
1094 1094 return True
1095 1095 else:
1096 1096 self.ui.warn(_("no interrupted transaction available\n"))
1097 1097 return False
1098 1098
1099 1099 def rollback(self, dryrun=False, force=False):
1100 1100 wlock = lock = dsguard = None
1101 1101 try:
1102 1102 wlock = self.wlock()
1103 1103 lock = self.lock()
1104 1104 if self.svfs.exists("undo"):
1105 1105 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1106 1106
1107 1107 return self._rollback(dryrun, force, dsguard)
1108 1108 else:
1109 1109 self.ui.warn(_("no rollback information available\n"))
1110 1110 return 1
1111 1111 finally:
1112 1112 release(dsguard, lock, wlock)
1113 1113
1114 1114 @unfilteredmethod # Until we get smarter cache management
1115 1115 def _rollback(self, dryrun, force, dsguard):
1116 1116 ui = self.ui
1117 1117 try:
1118 1118 args = self.vfs.read('undo.desc').splitlines()
1119 1119 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1120 1120 if len(args) >= 3:
1121 1121 detail = args[2]
1122 1122 oldtip = oldlen - 1
1123 1123
1124 1124 if detail and ui.verbose:
1125 1125 msg = (_('repository tip rolled back to revision %s'
1126 1126 ' (undo %s: %s)\n')
1127 1127 % (oldtip, desc, detail))
1128 1128 else:
1129 1129 msg = (_('repository tip rolled back to revision %s'
1130 1130 ' (undo %s)\n')
1131 1131 % (oldtip, desc))
1132 1132 except IOError:
1133 1133 msg = _('rolling back unknown transaction\n')
1134 1134 desc = None
1135 1135
1136 1136 if not force and self['.'] != self['tip'] and desc == 'commit':
1137 1137 raise error.Abort(
1138 1138 _('rollback of last commit while not checked out '
1139 1139 'may lose data'), hint=_('use -f to force'))
1140 1140
1141 1141 ui.status(msg)
1142 1142 if dryrun:
1143 1143 return 0
1144 1144
1145 1145 parents = self.dirstate.parents()
1146 1146 self.destroying()
1147 1147 vfsmap = {'plain': self.vfs, '': self.svfs}
1148 1148 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1149 1149 if self.vfs.exists('undo.bookmarks'):
1150 1150 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1151 1151 if self.svfs.exists('undo.phaseroots'):
1152 1152 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1153 1153 self.invalidate()
1154 1154
1155 1155 parentgone = (parents[0] not in self.changelog.nodemap or
1156 1156 parents[1] not in self.changelog.nodemap)
1157 1157 if parentgone:
1158 1158 # prevent dirstateguard from overwriting already restored one
1159 1159 dsguard.close()
1160 1160
1161 1161 self.dirstate.restorebackup(None, prefix='undo.')
1162 1162 try:
1163 1163 branch = self.vfs.read('undo.branch')
1164 1164 self.dirstate.setbranch(encoding.tolocal(branch))
1165 1165 except IOError:
1166 1166 ui.warn(_('named branch could not be reset: '
1167 1167 'current branch is still \'%s\'\n')
1168 1168 % self.dirstate.branch())
1169 1169
1170 1170 parents = tuple([p.rev() for p in self[None].parents()])
1171 1171 if len(parents) > 1:
1172 1172 ui.status(_('working directory now based on '
1173 1173 'revisions %d and %d\n') % parents)
1174 1174 else:
1175 1175 ui.status(_('working directory now based on '
1176 1176 'revision %d\n') % parents)
1177 1177 mergemod.mergestate.clean(self, self['.'].node())
1178 1178
1179 1179 # TODO: if we know which new heads may result from this rollback, pass
1180 1180 # them to destroy(), which will prevent the branchhead cache from being
1181 1181 # invalidated.
1182 1182 self.destroyed()
1183 1183 return 0
1184 1184
1185 1185 def invalidatecaches(self):
1186 1186
1187 1187 if '_tagscache' in vars(self):
1188 1188 # can't use delattr on proxy
1189 1189 del self.__dict__['_tagscache']
1190 1190
1191 1191 self.unfiltered()._branchcaches.clear()
1192 1192 self.invalidatevolatilesets()
1193 1193
1194 1194 def invalidatevolatilesets(self):
1195 1195 self.filteredrevcache.clear()
1196 1196 obsolete.clearobscaches(self)
1197 1197
1198 1198 def invalidatedirstate(self):
1199 1199 '''Invalidates the dirstate, causing the next call to dirstate
1200 1200 to check if it was modified since the last time it was read,
1201 1201 rereading it if it has.
1202 1202
1203 1203 This is different to dirstate.invalidate() that it doesn't always
1204 1204 rereads the dirstate. Use dirstate.invalidate() if you want to
1205 1205 explicitly read the dirstate again (i.e. restoring it to a previous
1206 1206 known good state).'''
1207 1207 if hasunfilteredcache(self, 'dirstate'):
1208 1208 for k in self.dirstate._filecache:
1209 1209 try:
1210 1210 delattr(self.dirstate, k)
1211 1211 except AttributeError:
1212 1212 pass
1213 1213 delattr(self.unfiltered(), 'dirstate')
1214 1214
1215 1215 def invalidate(self, clearfilecache=False):
1216 1216 '''Invalidates both store and non-store parts other than dirstate
1217 1217
1218 1218 If a transaction is running, invalidation of store is omitted,
1219 1219 because discarding in-memory changes might cause inconsistency
1220 1220 (e.g. incomplete fncache causes unintentional failure, but
1221 1221 redundant one doesn't).
1222 1222 '''
1223 1223 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 1224 for k in list(self._filecache.keys()):
1225 1225 # dirstate is invalidated separately in invalidatedirstate()
1226 1226 if k == 'dirstate':
1227 1227 continue
1228 1228
1229 1229 if clearfilecache:
1230 1230 del self._filecache[k]
1231 1231 try:
1232 1232 delattr(unfiltered, k)
1233 1233 except AttributeError:
1234 1234 pass
1235 1235 self.invalidatecaches()
1236 1236 if not self.currenttransaction():
1237 1237 # TODO: Changing contents of store outside transaction
1238 1238 # causes inconsistency. We should make in-memory store
1239 1239 # changes detectable, and abort if changed.
1240 1240 self.store.invalidatecaches()
1241 1241
1242 1242 def invalidateall(self):
1243 1243 '''Fully invalidates both store and non-store parts, causing the
1244 1244 subsequent operation to reread any outside changes.'''
1245 1245 # extension should hook this to invalidate its caches
1246 1246 self.invalidate()
1247 1247 self.invalidatedirstate()
1248 1248
1249 1249 @unfilteredmethod
1250 1250 def _refreshfilecachestats(self, tr):
1251 1251 """Reload stats of cached files so that they are flagged as valid"""
1252 1252 for k, ce in self._filecache.items():
1253 1253 if k == 'dirstate' or k not in self.__dict__:
1254 1254 continue
1255 1255 ce.refresh()
1256 1256
1257 1257 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1258 1258 inheritchecker=None, parentenvvar=None):
1259 1259 parentlock = None
1260 1260 # the contents of parentenvvar are used by the underlying lock to
1261 1261 # determine whether it can be inherited
1262 1262 if parentenvvar is not None:
1263 1263 parentlock = encoding.environ.get(parentenvvar)
1264 1264 try:
1265 1265 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1266 1266 acquirefn=acquirefn, desc=desc,
1267 1267 inheritchecker=inheritchecker,
1268 1268 parentlock=parentlock)
1269 1269 except error.LockHeld as inst:
1270 1270 if not wait:
1271 1271 raise
1272 1272 # show more details for new-style locks
1273 1273 if ':' in inst.locker:
1274 1274 host, pid = inst.locker.split(":", 1)
1275 1275 self.ui.warn(
1276 1276 _("waiting for lock on %s held by process %r "
1277 1277 "on host %r\n") % (desc, pid, host))
1278 1278 else:
1279 1279 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1280 1280 (desc, inst.locker))
1281 1281 # default to 600 seconds timeout
1282 1282 l = lockmod.lock(vfs, lockname,
1283 1283 int(self.ui.config("ui", "timeout", "600")),
1284 1284 releasefn=releasefn, acquirefn=acquirefn,
1285 1285 desc=desc)
1286 1286 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1287 1287 return l
1288 1288
1289 1289 def _afterlock(self, callback):
1290 1290 """add a callback to be run when the repository is fully unlocked
1291 1291
1292 1292 The callback will be executed when the outermost lock is released
1293 1293 (with wlock being higher level than 'lock')."""
1294 1294 for ref in (self._wlockref, self._lockref):
1295 1295 l = ref and ref()
1296 1296 if l and l.held:
1297 1297 l.postrelease.append(callback)
1298 1298 break
1299 1299 else: # no lock have been found.
1300 1300 callback()
1301 1301
1302 1302 def lock(self, wait=True):
1303 1303 '''Lock the repository store (.hg/store) and return a weak reference
1304 1304 to the lock. Use this before modifying the store (e.g. committing or
1305 1305 stripping). If you are opening a transaction, get a lock as well.)
1306 1306
1307 1307 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1308 1308 'wlock' first to avoid a dead-lock hazard.'''
1309 1309 l = self._currentlock(self._lockref)
1310 1310 if l is not None:
1311 1311 l.lock()
1312 1312 return l
1313 1313
1314 1314 l = self._lock(self.svfs, "lock", wait, None,
1315 1315 self.invalidate, _('repository %s') % self.origroot)
1316 1316 self._lockref = weakref.ref(l)
1317 1317 return l
1318 1318
1319 1319 def _wlockchecktransaction(self):
1320 1320 if self.currenttransaction() is not None:
1321 1321 raise error.LockInheritanceContractViolation(
1322 1322 'wlock cannot be inherited in the middle of a transaction')
1323 1323
1324 1324 def wlock(self, wait=True):
1325 1325 '''Lock the non-store parts of the repository (everything under
1326 1326 .hg except .hg/store) and return a weak reference to the lock.
1327 1327
1328 1328 Use this before modifying files in .hg.
1329 1329
1330 1330 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1331 1331 'wlock' first to avoid a dead-lock hazard.'''
1332 1332 l = self._wlockref and self._wlockref()
1333 1333 if l is not None and l.held:
1334 1334 l.lock()
1335 1335 return l
1336 1336
1337 1337 # We do not need to check for non-waiting lock acquisition. Such
1338 1338 # acquisition would not cause dead-lock as they would just fail.
1339 1339 if wait and (self.ui.configbool('devel', 'all-warnings')
1340 1340 or self.ui.configbool('devel', 'check-locks')):
1341 1341 if self._currentlock(self._lockref) is not None:
1342 1342 self.ui.develwarn('"wlock" acquired after "lock"')
1343 1343
1344 1344 def unlock():
1345 1345 if self.dirstate.pendingparentchange():
1346 1346 self.dirstate.invalidate()
1347 1347 else:
1348 1348 self.dirstate.write(None)
1349 1349
1350 1350 self._filecache['dirstate'].refresh()
1351 1351
1352 1352 l = self._lock(self.vfs, "wlock", wait, unlock,
1353 1353 self.invalidatedirstate, _('working directory of %s') %
1354 1354 self.origroot,
1355 1355 inheritchecker=self._wlockchecktransaction,
1356 1356 parentenvvar='HG_WLOCK_LOCKER')
1357 1357 self._wlockref = weakref.ref(l)
1358 1358 return l
1359 1359
1360 1360 def _currentlock(self, lockref):
1361 1361 """Returns the lock if it's held, or None if it's not."""
1362 1362 if lockref is None:
1363 1363 return None
1364 1364 l = lockref()
1365 1365 if l is None or not l.held:
1366 1366 return None
1367 1367 return l
1368 1368
1369 1369 def currentwlock(self):
1370 1370 """Returns the wlock if it's held, or None if it's not."""
1371 1371 return self._currentlock(self._wlockref)
1372 1372
1373 1373 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1374 1374 """
1375 1375 commit an individual file as part of a larger transaction
1376 1376 """
1377 1377
1378 1378 fname = fctx.path()
1379 1379 fparent1 = manifest1.get(fname, nullid)
1380 1380 fparent2 = manifest2.get(fname, nullid)
1381 1381 if isinstance(fctx, context.filectx):
1382 1382 node = fctx.filenode()
1383 1383 if node in [fparent1, fparent2]:
1384 1384 self.ui.debug('reusing %s filelog entry\n' % fname)
1385 1385 if manifest1.flags(fname) != fctx.flags():
1386 1386 changelist.append(fname)
1387 1387 return node
1388 1388
1389 1389 flog = self.file(fname)
1390 1390 meta = {}
1391 1391 copy = fctx.renamed()
1392 1392 if copy and copy[0] != fname:
1393 1393 # Mark the new revision of this file as a copy of another
1394 1394 # file. This copy data will effectively act as a parent
1395 1395 # of this new revision. If this is a merge, the first
1396 1396 # parent will be the nullid (meaning "look up the copy data")
1397 1397 # and the second one will be the other parent. For example:
1398 1398 #
1399 1399 # 0 --- 1 --- 3 rev1 changes file foo
1400 1400 # \ / rev2 renames foo to bar and changes it
1401 1401 # \- 2 -/ rev3 should have bar with all changes and
1402 1402 # should record that bar descends from
1403 1403 # bar in rev2 and foo in rev1
1404 1404 #
1405 1405 # this allows this merge to succeed:
1406 1406 #
1407 1407 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1408 1408 # \ / merging rev3 and rev4 should use bar@rev2
1409 1409 # \- 2 --- 4 as the merge base
1410 1410 #
1411 1411
1412 1412 cfname = copy[0]
1413 1413 crev = manifest1.get(cfname)
1414 1414 newfparent = fparent2
1415 1415
1416 1416 if manifest2: # branch merge
1417 1417 if fparent2 == nullid or crev is None: # copied on remote side
1418 1418 if cfname in manifest2:
1419 1419 crev = manifest2[cfname]
1420 1420 newfparent = fparent1
1421 1421
1422 1422 # Here, we used to search backwards through history to try to find
1423 1423 # where the file copy came from if the source of a copy was not in
1424 1424 # the parent directory. However, this doesn't actually make sense to
1425 1425 # do (what does a copy from something not in your working copy even
1426 1426 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1427 1427 # the user that copy information was dropped, so if they didn't
1428 1428 # expect this outcome it can be fixed, but this is the correct
1429 1429 # behavior in this circumstance.
1430 1430
1431 1431 if crev:
1432 1432 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1433 1433 meta["copy"] = cfname
1434 1434 meta["copyrev"] = hex(crev)
1435 1435 fparent1, fparent2 = nullid, newfparent
1436 1436 else:
1437 1437 self.ui.warn(_("warning: can't find ancestor for '%s' "
1438 1438 "copied from '%s'!\n") % (fname, cfname))
1439 1439
1440 1440 elif fparent1 == nullid:
1441 1441 fparent1, fparent2 = fparent2, nullid
1442 1442 elif fparent2 != nullid:
1443 1443 # is one parent an ancestor of the other?
1444 1444 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1445 1445 if fparent1 in fparentancestors:
1446 1446 fparent1, fparent2 = fparent2, nullid
1447 1447 elif fparent2 in fparentancestors:
1448 1448 fparent2 = nullid
1449 1449
1450 1450 # is the file changed?
1451 1451 text = fctx.data()
1452 1452 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1453 1453 changelist.append(fname)
1454 1454 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1455 1455 # are just the flags changed during merge?
1456 1456 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1457 1457 changelist.append(fname)
1458 1458
1459 1459 return fparent1
1460 1460
1461 1461 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1462 1462 """check for commit arguments that aren't committable"""
1463 1463 if match.isexact() or match.prefix():
1464 1464 matched = set(status.modified + status.added + status.removed)
1465 1465
1466 1466 for f in match.files():
1467 1467 f = self.dirstate.normalize(f)
1468 1468 if f == '.' or f in matched or f in wctx.substate:
1469 1469 continue
1470 1470 if f in status.deleted:
1471 1471 fail(f, _('file not found!'))
1472 1472 if f in vdirs: # visited directory
1473 1473 d = f + '/'
1474 1474 for mf in matched:
1475 1475 if mf.startswith(d):
1476 1476 break
1477 1477 else:
1478 1478 fail(f, _("no match under directory!"))
1479 1479 elif f not in self.dirstate:
1480 1480 fail(f, _("file not tracked!"))
1481 1481
1482 1482 @unfilteredmethod
1483 1483 def commit(self, text="", user=None, date=None, match=None, force=False,
1484 1484 editor=False, extra=None):
1485 1485 """Add a new revision to current repository.
1486 1486
1487 1487 Revision information is gathered from the working directory,
1488 1488 match can be used to filter the committed files. If editor is
1489 1489 supplied, it is called to get a commit message.
1490 1490 """
1491 1491 if extra is None:
1492 1492 extra = {}
1493 1493
1494 1494 def fail(f, msg):
1495 1495 raise error.Abort('%s: %s' % (f, msg))
1496 1496
1497 1497 if not match:
1498 1498 match = matchmod.always(self.root, '')
1499 1499
1500 1500 if not force:
1501 1501 vdirs = []
1502 1502 match.explicitdir = vdirs.append
1503 1503 match.bad = fail
1504 1504
1505 1505 wlock = lock = tr = None
1506 1506 try:
1507 1507 wlock = self.wlock()
1508 1508 lock = self.lock() # for recent changelog (see issue4368)
1509 1509
1510 1510 wctx = self[None]
1511 1511 merge = len(wctx.parents()) > 1
1512 1512
1513 1513 if not force and merge and match.ispartial():
1514 1514 raise error.Abort(_('cannot partially commit a merge '
1515 1515 '(do not specify files or patterns)'))
1516 1516
1517 1517 status = self.status(match=match, clean=force)
1518 1518 if force:
1519 1519 status.modified.extend(status.clean) # mq may commit clean files
1520 1520
1521 1521 # check subrepos
1522 1522 subs = []
1523 1523 commitsubs = set()
1524 1524 newstate = wctx.substate.copy()
1525 1525 # only manage subrepos and .hgsubstate if .hgsub is present
1526 1526 if '.hgsub' in wctx:
1527 1527 # we'll decide whether to track this ourselves, thanks
1528 1528 for c in status.modified, status.added, status.removed:
1529 1529 if '.hgsubstate' in c:
1530 1530 c.remove('.hgsubstate')
1531 1531
1532 1532 # compare current state to last committed state
1533 1533 # build new substate based on last committed state
1534 1534 oldstate = wctx.p1().substate
1535 1535 for s in sorted(newstate.keys()):
1536 1536 if not match(s):
1537 1537 # ignore working copy, use old state if present
1538 1538 if s in oldstate:
1539 1539 newstate[s] = oldstate[s]
1540 1540 continue
1541 1541 if not force:
1542 1542 raise error.Abort(
1543 1543 _("commit with new subrepo %s excluded") % s)
1544 1544 dirtyreason = wctx.sub(s).dirtyreason(True)
1545 1545 if dirtyreason:
1546 1546 if not self.ui.configbool('ui', 'commitsubrepos'):
1547 1547 raise error.Abort(dirtyreason,
1548 1548 hint=_("use --subrepos for recursive commit"))
1549 1549 subs.append(s)
1550 1550 commitsubs.add(s)
1551 1551 else:
1552 1552 bs = wctx.sub(s).basestate()
1553 1553 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1554 1554 if oldstate.get(s, (None, None, None))[1] != bs:
1555 1555 subs.append(s)
1556 1556
1557 1557 # check for removed subrepos
1558 1558 for p in wctx.parents():
1559 1559 r = [s for s in p.substate if s not in newstate]
1560 1560 subs += [s for s in r if match(s)]
1561 1561 if subs:
1562 1562 if (not match('.hgsub') and
1563 1563 '.hgsub' in (wctx.modified() + wctx.added())):
1564 1564 raise error.Abort(
1565 1565 _("can't commit subrepos without .hgsub"))
1566 1566 status.modified.insert(0, '.hgsubstate')
1567 1567
1568 1568 elif '.hgsub' in status.removed:
1569 1569 # clean up .hgsubstate when .hgsub is removed
1570 1570 if ('.hgsubstate' in wctx and
1571 1571 '.hgsubstate' not in (status.modified + status.added +
1572 1572 status.removed)):
1573 1573 status.removed.insert(0, '.hgsubstate')
1574 1574
1575 1575 # make sure all explicit patterns are matched
1576 1576 if not force:
1577 1577 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1578 1578
1579 1579 cctx = context.workingcommitctx(self, status,
1580 1580 text, user, date, extra)
1581 1581
1582 1582 # internal config: ui.allowemptycommit
1583 1583 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1584 1584 or extra.get('close') or merge or cctx.files()
1585 1585 or self.ui.configbool('ui', 'allowemptycommit'))
1586 1586 if not allowemptycommit:
1587 1587 return None
1588 1588
1589 1589 if merge and cctx.deleted():
1590 1590 raise error.Abort(_("cannot commit merge with missing files"))
1591 1591
1592 1592 ms = mergemod.mergestate.read(self)
1593 1593 mergeutil.checkunresolved(ms)
1594 1594
1595 1595 if editor:
1596 1596 cctx._text = editor(self, cctx, subs)
1597 1597 edited = (text != cctx._text)
1598 1598
1599 1599 # Save commit message in case this transaction gets rolled back
1600 1600 # (e.g. by a pretxncommit hook). Leave the content alone on
1601 1601 # the assumption that the user will use the same editor again.
1602 1602 msgfn = self.savecommitmessage(cctx._text)
1603 1603
1604 1604 # commit subs and write new state
1605 1605 if subs:
1606 1606 for s in sorted(commitsubs):
1607 1607 sub = wctx.sub(s)
1608 1608 self.ui.status(_('committing subrepository %s\n') %
1609 1609 subrepo.subrelpath(sub))
1610 1610 sr = sub.commit(cctx._text, user, date)
1611 1611 newstate[s] = (newstate[s][0], sr)
1612 1612 subrepo.writestate(self, newstate)
1613 1613
1614 1614 p1, p2 = self.dirstate.parents()
1615 1615 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1616 1616 try:
1617 1617 self.hook("precommit", throw=True, parent1=hookp1,
1618 1618 parent2=hookp2)
1619 1619 tr = self.transaction('commit')
1620 1620 ret = self.commitctx(cctx, True)
1621 1621 except: # re-raises
1622 1622 if edited:
1623 1623 self.ui.write(
1624 1624 _('note: commit message saved in %s\n') % msgfn)
1625 1625 raise
1626 1626 # update bookmarks, dirstate and mergestate
1627 1627 bookmarks.update(self, [p1, p2], ret)
1628 1628 cctx.markcommitted(ret)
1629 1629 ms.reset()
1630 1630 tr.close()
1631 1631
1632 1632 finally:
1633 1633 lockmod.release(tr, lock, wlock)
1634 1634
1635 1635 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1636 1636 # hack for command that use a temporary commit (eg: histedit)
1637 1637 # temporary commit got stripped before hook release
1638 1638 if self.changelog.hasnode(ret):
1639 1639 self.hook("commit", node=node, parent1=parent1,
1640 1640 parent2=parent2)
1641 1641 self._afterlock(commithook)
1642 1642 return ret
1643 1643
1644 1644 @unfilteredmethod
1645 1645 def commitctx(self, ctx, error=False):
1646 1646 """Add a new revision to current repository.
1647 1647 Revision information is passed via the context argument.
1648 1648 """
1649 1649
1650 1650 tr = None
1651 1651 p1, p2 = ctx.p1(), ctx.p2()
1652 1652 user = ctx.user()
1653 1653
1654 1654 lock = self.lock()
1655 1655 try:
1656 1656 tr = self.transaction("commit")
1657 1657 trp = weakref.proxy(tr)
1658 1658
1659 1659 if ctx.manifestnode():
1660 1660 # reuse an existing manifest revision
1661 1661 mn = ctx.manifestnode()
1662 1662 files = ctx.files()
1663 1663 elif ctx.files():
1664 1664 m1ctx = p1.manifestctx()
1665 1665 m2ctx = p2.manifestctx()
1666 1666 mctx = m1ctx.copy()
1667 1667
1668 1668 m = mctx.read()
1669 1669 m1 = m1ctx.read()
1670 1670 m2 = m2ctx.read()
1671 1671
1672 1672 # check in files
1673 1673 added = []
1674 1674 changed = []
1675 1675 removed = list(ctx.removed())
1676 1676 linkrev = len(self)
1677 1677 self.ui.note(_("committing files:\n"))
1678 1678 for f in sorted(ctx.modified() + ctx.added()):
1679 1679 self.ui.note(f + "\n")
1680 1680 try:
1681 1681 fctx = ctx[f]
1682 1682 if fctx is None:
1683 1683 removed.append(f)
1684 1684 else:
1685 1685 added.append(f)
1686 1686 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1687 1687 trp, changed)
1688 1688 m.setflag(f, fctx.flags())
1689 1689 except OSError as inst:
1690 1690 self.ui.warn(_("trouble committing %s!\n") % f)
1691 1691 raise
1692 1692 except IOError as inst:
1693 1693 errcode = getattr(inst, 'errno', errno.ENOENT)
1694 1694 if error or errcode and errcode != errno.ENOENT:
1695 1695 self.ui.warn(_("trouble committing %s!\n") % f)
1696 1696 raise
1697 1697
1698 1698 # update manifest
1699 1699 self.ui.note(_("committing manifest\n"))
1700 1700 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1701 1701 drop = [f for f in removed if f in m]
1702 1702 for f in drop:
1703 1703 del m[f]
1704 1704 mn = mctx.write(trp, linkrev,
1705 1705 p1.manifestnode(), p2.manifestnode(),
1706 1706 added, drop)
1707 1707 files = changed + removed
1708 1708 else:
1709 1709 mn = p1.manifestnode()
1710 1710 files = []
1711 1711
1712 1712 # update changelog
1713 1713 self.ui.note(_("committing changelog\n"))
1714 1714 self.changelog.delayupdate(tr)
1715 1715 n = self.changelog.add(mn, files, ctx.description(),
1716 1716 trp, p1.node(), p2.node(),
1717 1717 user, ctx.date(), ctx.extra().copy())
1718 1718 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1719 1719 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1720 1720 parent2=xp2)
1721 1721 # set the new commit is proper phase
1722 1722 targetphase = subrepo.newcommitphase(self.ui, ctx)
1723 1723 if targetphase:
1724 1724 # retract boundary do not alter parent changeset.
1725 1725 # if a parent have higher the resulting phase will
1726 1726 # be compliant anyway
1727 1727 #
1728 1728 # if minimal phase was 0 we don't need to retract anything
1729 1729 phases.retractboundary(self, tr, targetphase, [n])
1730 1730 tr.close()
1731 1731 branchmap.updatecache(self.filtered('served'))
1732 1732 return n
1733 1733 finally:
1734 1734 if tr:
1735 1735 tr.release()
1736 1736 lock.release()
1737 1737
1738 1738 @unfilteredmethod
1739 1739 def destroying(self):
1740 1740 '''Inform the repository that nodes are about to be destroyed.
1741 1741 Intended for use by strip and rollback, so there's a common
1742 1742 place for anything that has to be done before destroying history.
1743 1743
1744 1744 This is mostly useful for saving state that is in memory and waiting
1745 1745 to be flushed when the current lock is released. Because a call to
1746 1746 destroyed is imminent, the repo will be invalidated causing those
1747 1747 changes to stay in memory (waiting for the next unlock), or vanish
1748 1748 completely.
1749 1749 '''
1750 1750 # When using the same lock to commit and strip, the phasecache is left
1751 1751 # dirty after committing. Then when we strip, the repo is invalidated,
1752 1752 # causing those changes to disappear.
1753 1753 if '_phasecache' in vars(self):
1754 1754 self._phasecache.write()
1755 1755
1756 1756 @unfilteredmethod
1757 1757 def destroyed(self):
1758 1758 '''Inform the repository that nodes have been destroyed.
1759 1759 Intended for use by strip and rollback, so there's a common
1760 1760 place for anything that has to be done after destroying history.
1761 1761 '''
1762 1762 # When one tries to:
1763 1763 # 1) destroy nodes thus calling this method (e.g. strip)
1764 1764 # 2) use phasecache somewhere (e.g. commit)
1765 1765 #
1766 1766 # then 2) will fail because the phasecache contains nodes that were
1767 1767 # removed. We can either remove phasecache from the filecache,
1768 1768 # causing it to reload next time it is accessed, or simply filter
1769 1769 # the removed nodes now and write the updated cache.
1770 1770 self._phasecache.filterunknown(self)
1771 1771 self._phasecache.write()
1772 1772
1773 1773 # update the 'served' branch cache to help read only server process
1774 1774 # Thanks to branchcache collaboration this is done from the nearest
1775 1775 # filtered subset and it is expected to be fast.
1776 1776 branchmap.updatecache(self.filtered('served'))
1777 1777
1778 1778 # Ensure the persistent tag cache is updated. Doing it now
1779 1779 # means that the tag cache only has to worry about destroyed
1780 1780 # heads immediately after a strip/rollback. That in turn
1781 1781 # guarantees that "cachetip == currenttip" (comparing both rev
1782 1782 # and node) always means no nodes have been added or destroyed.
1783 1783
1784 1784 # XXX this is suboptimal when qrefresh'ing: we strip the current
1785 1785 # head, refresh the tag cache, then immediately add a new head.
1786 1786 # But I think doing it this way is necessary for the "instant
1787 1787 # tag cache retrieval" case to work.
1788 1788 self.invalidate()
1789 1789
1790 1790 def walk(self, match, node=None):
1791 1791 '''
1792 1792 walk recursively through the directory tree or a given
1793 1793 changeset, finding all files matched by the match
1794 1794 function
1795 1795 '''
1796 1796 return self[node].walk(match)
1797 1797
1798 1798 def status(self, node1='.', node2=None, match=None,
1799 1799 ignored=False, clean=False, unknown=False,
1800 1800 listsubrepos=False):
1801 1801 '''a convenience method that calls node1.status(node2)'''
1802 1802 return self[node1].status(node2, match, ignored, clean, unknown,
1803 1803 listsubrepos)
1804 1804
1805 1805 def heads(self, start=None):
1806 1806 if start is None:
1807 1807 cl = self.changelog
1808 1808 headrevs = reversed(cl.headrevs())
1809 1809 return [cl.node(rev) for rev in headrevs]
1810 1810
1811 1811 heads = self.changelog.heads(start)
1812 1812 # sort the output in rev descending order
1813 1813 return sorted(heads, key=self.changelog.rev, reverse=True)
1814 1814
1815 1815 def branchheads(self, branch=None, start=None, closed=False):
1816 1816 '''return a (possibly filtered) list of heads for the given branch
1817 1817
1818 1818 Heads are returned in topological order, from newest to oldest.
1819 1819 If branch is None, use the dirstate branch.
1820 1820 If start is not None, return only heads reachable from start.
1821 1821 If closed is True, return heads that are marked as closed as well.
1822 1822 '''
1823 1823 if branch is None:
1824 1824 branch = self[None].branch()
1825 1825 branches = self.branchmap()
1826 1826 if branch not in branches:
1827 1827 return []
1828 1828 # the cache returns heads ordered lowest to highest
1829 1829 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 1830 if start is not None:
1831 1831 # filter out the heads that cannot be reached from startrev
1832 1832 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 1833 bheads = [h for h in bheads if h in fbheads]
1834 1834 return bheads
1835 1835
1836 1836 def branches(self, nodes):
1837 1837 if not nodes:
1838 1838 nodes = [self.changelog.tip()]
1839 1839 b = []
1840 1840 for n in nodes:
1841 1841 t = n
1842 1842 while True:
1843 1843 p = self.changelog.parents(n)
1844 1844 if p[1] != nullid or p[0] == nullid:
1845 1845 b.append((t, n, p[0], p[1]))
1846 1846 break
1847 1847 n = p[0]
1848 1848 return b
1849 1849
1850 1850 def between(self, pairs):
1851 1851 r = []
1852 1852
1853 1853 for top, bottom in pairs:
1854 1854 n, l, i = top, [], 0
1855 1855 f = 1
1856 1856
1857 1857 while n != bottom and n != nullid:
1858 1858 p = self.changelog.parents(n)[0]
1859 1859 if i == f:
1860 1860 l.append(n)
1861 1861 f = f * 2
1862 1862 n = p
1863 1863 i += 1
1864 1864
1865 1865 r.append(l)
1866 1866
1867 1867 return r
1868 1868
1869 1869 def checkpush(self, pushop):
1870 1870 """Extensions can override this function if additional checks have
1871 1871 to be performed before pushing, or call it if they override push
1872 1872 command.
1873 1873 """
1874 1874 pass
1875 1875
1876 1876 @unfilteredpropertycache
1877 1877 def prepushoutgoinghooks(self):
1878 1878 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 1879 methods, which are called before pushing changesets.
1880 1880 """
1881 1881 return util.hooks()
1882 1882
1883 1883 def pushkey(self, namespace, key, old, new):
1884 1884 try:
1885 1885 tr = self.currenttransaction()
1886 1886 hookargs = {}
1887 1887 if tr is not None:
1888 1888 hookargs.update(tr.hookargs)
1889 1889 hookargs['namespace'] = namespace
1890 1890 hookargs['key'] = key
1891 1891 hookargs['old'] = old
1892 1892 hookargs['new'] = new
1893 1893 self.hook('prepushkey', throw=True, **hookargs)
1894 1894 except error.HookAbort as exc:
1895 1895 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 1896 if exc.hint:
1897 1897 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 1898 return False
1899 1899 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 1900 ret = pushkey.push(self, namespace, key, old, new)
1901 1901 def runhook():
1902 1902 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 1903 ret=ret)
1904 1904 self._afterlock(runhook)
1905 1905 return ret
1906 1906
1907 1907 def listkeys(self, namespace):
1908 1908 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 1909 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 1910 values = pushkey.list(self, namespace)
1911 1911 self.hook('listkeys', namespace=namespace, values=values)
1912 1912 return values
1913 1913
1914 1914 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 1915 '''used to test argument passing over the wire'''
1916 1916 return "%s %s %s %s %s" % (one, two, three, four, five)
1917 1917
1918 1918 def savecommitmessage(self, text):
1919 1919 fp = self.vfs('last-message.txt', 'wb')
1920 1920 try:
1921 1921 fp.write(text)
1922 1922 finally:
1923 1923 fp.close()
1924 1924 return self.pathto(fp.name[len(self.root) + 1:])
1925 1925
1926 1926 # used to avoid circular references so destructors work
1927 1927 def aftertrans(files):
1928 1928 renamefiles = [tuple(t) for t in files]
1929 1929 def a():
1930 1930 for vfs, src, dest in renamefiles:
1931 1931 # if src and dest refer to a same file, vfs.rename is a no-op,
1932 1932 # leaving both src and dest on disk. delete dest to make sure
1933 1933 # the rename couldn't be such a no-op.
1934 1934 vfs.tryunlink(dest)
1935 1935 try:
1936 1936 vfs.rename(src, dest)
1937 1937 except OSError: # journal file does not yet exist
1938 1938 pass
1939 1939 return a
1940 1940
1941 1941 def undoname(fn):
1942 1942 base, name = os.path.split(fn)
1943 1943 assert name.startswith('journal')
1944 1944 return os.path.join(base, name.replace('journal', 'undo', 1))
1945 1945
1946 1946 def instance(ui, path, create):
1947 1947 return localrepository(ui, util.urllocalpath(path), create)
1948 1948
1949 1949 def islocal(path):
1950 1950 return True
1951 1951
1952 1952 def newreporequirements(repo):
1953 1953 """Determine the set of requirements for a new local repository.
1954 1954
1955 1955 Extensions can wrap this function to specify custom requirements for
1956 1956 new repositories.
1957 1957 """
1958 1958 ui = repo.ui
1959 1959 requirements = set(['revlogv1'])
1960 1960 if ui.configbool('format', 'usestore', True):
1961 1961 requirements.add('store')
1962 1962 if ui.configbool('format', 'usefncache', True):
1963 1963 requirements.add('fncache')
1964 1964 if ui.configbool('format', 'dotencode', True):
1965 1965 requirements.add('dotencode')
1966 1966
1967 1967 compengine = ui.config('experimental', 'format.compression', 'zlib')
1968 1968 if compengine not in util.compengines:
1969 1969 raise error.Abort(_('compression engine %s defined by '
1970 1970 'experimental.format.compression not available') %
1971 1971 compengine,
1972 1972 hint=_('run "hg debuginstall" to list available '
1973 1973 'compression engines'))
1974 1974
1975 1975 # zlib is the historical default and doesn't need an explicit requirement.
1976 1976 if compengine != 'zlib':
1977 1977 requirements.add('exp-compression-%s' % compengine)
1978 1978
1979 1979 if scmutil.gdinitconfig(ui):
1980 1980 requirements.add('generaldelta')
1981 1981 if ui.configbool('experimental', 'treemanifest', False):
1982 1982 requirements.add('treemanifest')
1983 1983 if ui.configbool('experimental', 'manifestv2', False):
1984 1984 requirements.add('manifestv2')
1985 1985
1986 1986 return requirements
General Comments 0
You need to be logged in to leave comments. Login now