##// END OF EJS Templates
localrepo: deprecated 'repo.wopener' (API)...
Pierre-Yves David -
r31145:11a97785 default
parent child Browse files
Show More
@@ -1,2057 +1,2061 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 )
63 63
64 64 release = lockmod.release
65 65 urlerr = util.urlerr
66 66 urlreq = util.urlreq
67 67
68 68 class repofilecache(scmutil.filecache):
69 69 """All filecache usage on repo are done for logic that should be unfiltered
70 70 """
71 71
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(repofilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class storecache(repofilecache):
82 82 """filecache for files in the store"""
83 83 def join(self, obj, fname):
84 84 return obj.sjoin(fname)
85 85
86 86 class unfilteredpropertycache(util.propertycache):
87 87 """propertycache that apply to unfiltered repo only"""
88 88
89 89 def __get__(self, repo, type=None):
90 90 unfi = repo.unfiltered()
91 91 if unfi is repo:
92 92 return super(unfilteredpropertycache, self).__get__(unfi)
93 93 return getattr(unfi, self.name)
94 94
95 95 class filteredpropertycache(util.propertycache):
96 96 """propertycache that must take filtering in account"""
97 97
98 98 def cachevalue(self, obj, value):
99 99 object.__setattr__(obj, self.name, value)
100 100
101 101
102 102 def hasunfilteredcache(repo, name):
103 103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 104 return name in vars(repo.unfiltered())
105 105
106 106 def unfilteredmethod(orig):
107 107 """decorate method that always need to be run on unfiltered version"""
108 108 def wrapper(repo, *args, **kwargs):
109 109 return orig(repo.unfiltered(), *args, **kwargs)
110 110 return wrapper
111 111
112 112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 113 'unbundle'))
114 114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115 115
116 116 class localpeer(peer.peerrepository):
117 117 '''peer for a local repo; reflects only the most recent API'''
118 118
119 119 def __init__(self, repo, caps=moderncaps):
120 120 peer.peerrepository.__init__(self)
121 121 self._repo = repo.filtered('served')
122 122 self.ui = repo.ui
123 123 self._caps = repo._restrictcapabilities(caps)
124 124 self.requirements = repo.requirements
125 125 self.supportedformats = repo.supportedformats
126 126
127 127 def close(self):
128 128 self._repo.close()
129 129
130 130 def _capabilities(self):
131 131 return self._caps
132 132
133 133 def local(self):
134 134 return self._repo
135 135
136 136 def canpush(self):
137 137 return True
138 138
139 139 def url(self):
140 140 return self._repo.url()
141 141
142 142 def lookup(self, key):
143 143 return self._repo.lookup(key)
144 144
145 145 def branchmap(self):
146 146 return self._repo.branchmap()
147 147
148 148 def heads(self):
149 149 return self._repo.heads()
150 150
151 151 def known(self, nodes):
152 152 return self._repo.known(nodes)
153 153
154 154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 155 **kwargs):
156 156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 157 common=common, bundlecaps=bundlecaps,
158 158 **kwargs)
159 159 cb = util.chunkbuffer(chunks)
160 160
161 161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 162 # When requesting a bundle2, getbundle returns a stream to make the
163 163 # wire level function happier. We need to build a proper object
164 164 # from it in local peer.
165 165 return bundle2.getunbundler(self.ui, cb)
166 166 else:
167 167 return changegroup.getunbundler('01', cb, None)
168 168
169 169 # TODO We might want to move the next two calls into legacypeer and add
170 170 # unbundle instead.
171 171
172 172 def unbundle(self, cg, heads, url):
173 173 """apply a bundle on a repo
174 174
175 175 This function handles the repo locking itself."""
176 176 try:
177 177 try:
178 178 cg = exchange.readbundle(self.ui, cg, None)
179 179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 180 if util.safehasattr(ret, 'getchunks'):
181 181 # This is a bundle20 object, turn it into an unbundler.
182 182 # This little dance should be dropped eventually when the
183 183 # API is finally improved.
184 184 stream = util.chunkbuffer(ret.getchunks())
185 185 ret = bundle2.getunbundler(self.ui, stream)
186 186 return ret
187 187 except Exception as exc:
188 188 # If the exception contains output salvaged from a bundle2
189 189 # reply, we need to make sure it is printed before continuing
190 190 # to fail. So we build a bundle2 with such output and consume
191 191 # it directly.
192 192 #
193 193 # This is not very elegant but allows a "simple" solution for
194 194 # issue4594
195 195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 196 if output:
197 197 bundler = bundle2.bundle20(self._repo.ui)
198 198 for out in output:
199 199 bundler.addpart(out)
200 200 stream = util.chunkbuffer(bundler.getchunks())
201 201 b = bundle2.getunbundler(self.ui, stream)
202 202 bundle2.processbundle(self._repo, b)
203 203 raise
204 204 except error.PushRaced as exc:
205 205 raise error.ResponseError(_('push failed:'), str(exc))
206 206
207 207 def lock(self):
208 208 return self._repo.lock()
209 209
210 210 def addchangegroup(self, cg, source, url):
211 211 return cg.apply(self._repo, source, url)
212 212
213 213 def pushkey(self, namespace, key, old, new):
214 214 return self._repo.pushkey(namespace, key, old, new)
215 215
216 216 def listkeys(self, namespace):
217 217 return self._repo.listkeys(namespace)
218 218
219 219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 220 '''used to test argument passing over the wire'''
221 221 return "%s %s %s %s %s" % (one, two, three, four, five)
222 222
223 223 class locallegacypeer(localpeer):
224 224 '''peer extension which implements legacy methods too; used for tests with
225 225 restricted capabilities'''
226 226
227 227 def __init__(self, repo):
228 228 localpeer.__init__(self, repo, caps=legacycaps)
229 229
230 230 def branches(self, nodes):
231 231 return self._repo.branches(nodes)
232 232
233 233 def between(self, pairs):
234 234 return self._repo.between(pairs)
235 235
236 236 def changegroup(self, basenodes, source):
237 237 return changegroup.changegroup(self._repo, basenodes, source)
238 238
239 239 def changegroupsubset(self, bases, heads, source):
240 240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241 241
242 242 class localrepository(object):
243 243
244 244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 245 'manifestv2'))
246 246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 247 'relshared', 'dotencode'))
248 248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 249 filtername = None
250 250
251 251 # a list of (ui, featureset) functions.
252 252 # only functions defined in module of enabled extensions are invoked
253 253 featuresetupfuncs = set()
254 254
255 255 def __init__(self, baseui, path, create=False):
256 256 self.requirements = set()
257 257 # vfs to access the working copy
258 258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 259 # vfs to access the content of the repository
260 260 self.vfs = None
261 261 # vfs to access the store part of the repository
262 262 self.svfs = None
263 self.wopener = self.wvfs
264 263 self.root = self.wvfs.base
265 264 self.path = self.wvfs.join(".hg")
266 265 self.origroot = path
267 266 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 267 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 268 realfs=False)
270 269 self.vfs = scmutil.vfs(self.path)
271 270 self.opener = self.vfs
272 271 self.baseui = baseui
273 272 self.ui = baseui.copy()
274 273 self.ui.copy = baseui.copy # prevent copying repo configuration
275 274 # A list of callback to shape the phase if no data were found.
276 275 # Callback are in the form: func(repo, roots) --> processed root.
277 276 # This list it to be filled by extension during repo setup
278 277 self._phasedefaults = []
279 278 try:
280 279 self.ui.readconfig(self.join("hgrc"), self.root)
281 280 self._loadextensions()
282 281 except IOError:
283 282 pass
284 283
285 284 if self.featuresetupfuncs:
286 285 self.supported = set(self._basesupported) # use private copy
287 286 extmods = set(m.__name__ for n, m
288 287 in extensions.extensions(self.ui))
289 288 for setupfunc in self.featuresetupfuncs:
290 289 if setupfunc.__module__ in extmods:
291 290 setupfunc(self.ui, self.supported)
292 291 else:
293 292 self.supported = self._basesupported
294 293 color.setup(self.ui)
295 294
296 295 # Add compression engines.
297 296 for name in util.compengines:
298 297 engine = util.compengines[name]
299 298 if engine.revlogheader():
300 299 self.supported.add('exp-compression-%s' % name)
301 300
302 301 if not self.vfs.isdir():
303 302 if create:
304 303 self.requirements = newreporequirements(self)
305 304
306 305 if not self.wvfs.exists():
307 306 self.wvfs.makedirs()
308 307 self.vfs.makedir(notindexed=True)
309 308
310 309 if 'store' in self.requirements:
311 310 self.vfs.mkdir("store")
312 311
313 312 # create an invalid changelog
314 313 self.vfs.append(
315 314 "00changelog.i",
316 315 '\0\0\0\2' # represents revlogv2
317 316 ' dummy changelog to prevent using the old repo layout'
318 317 )
319 318 else:
320 319 raise error.RepoError(_("repository %s not found") % path)
321 320 elif create:
322 321 raise error.RepoError(_("repository %s already exists") % path)
323 322 else:
324 323 try:
325 324 self.requirements = scmutil.readrequires(
326 325 self.vfs, self.supported)
327 326 except IOError as inst:
328 327 if inst.errno != errno.ENOENT:
329 328 raise
330 329
331 330 self.sharedpath = self.path
332 331 try:
333 332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
334 333 if 'relshared' in self.requirements:
335 334 sharedpath = self.vfs.join(sharedpath)
336 335 vfs = scmutil.vfs(sharedpath, realpath=True)
337 336
338 337 s = vfs.base
339 338 if not vfs.exists():
340 339 raise error.RepoError(
341 340 _('.hg/sharedpath points to nonexistent directory %s') % s)
342 341 self.sharedpath = s
343 342 except IOError as inst:
344 343 if inst.errno != errno.ENOENT:
345 344 raise
346 345
347 346 self.store = store.store(
348 347 self.requirements, self.sharedpath, scmutil.vfs)
349 348 self.spath = self.store.path
350 349 self.svfs = self.store.vfs
351 350 self.sjoin = self.store.join
352 351 self.vfs.createmode = self.store.createmode
353 352 self._applyopenerreqs()
354 353 if create:
355 354 self._writerequirements()
356 355
357 356 self._dirstatevalidatewarned = False
358 357
359 358 self._branchcaches = {}
360 359 self._revbranchcache = None
361 360 self.filterpats = {}
362 361 self._datafilters = {}
363 362 self._transref = self._lockref = self._wlockref = None
364 363
365 364 # A cache for various files under .hg/ that tracks file changes,
366 365 # (used by the filecache decorator)
367 366 #
368 367 # Maps a property name to its util.filecacheentry
369 368 self._filecache = {}
370 369
371 370 # hold sets of revision to be filtered
372 371 # should be cleared when something might have changed the filter value:
373 372 # - new changesets,
374 373 # - phase change,
375 374 # - new obsolescence marker,
376 375 # - working directory parent change,
377 376 # - bookmark changes
378 377 self.filteredrevcache = {}
379 378
380 379 # generic mapping between names and nodes
381 380 self.names = namespaces.namespaces()
382 381
382 @property
383 def wopener(self):
384 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 return self.wvfs
386
383 387 def close(self):
384 388 self._writecaches()
385 389
386 390 def _loadextensions(self):
387 391 extensions.loadall(self.ui)
388 392
389 393 def _writecaches(self):
390 394 if self._revbranchcache:
391 395 self._revbranchcache.write()
392 396
393 397 def _restrictcapabilities(self, caps):
394 398 if self.ui.configbool('experimental', 'bundle2-advertise', True):
395 399 caps = set(caps)
396 400 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
397 401 caps.add('bundle2=' + urlreq.quote(capsblob))
398 402 return caps
399 403
400 404 def _applyopenerreqs(self):
401 405 self.svfs.options = dict((r, 1) for r in self.requirements
402 406 if r in self.openerreqs)
403 407 # experimental config: format.chunkcachesize
404 408 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
405 409 if chunkcachesize is not None:
406 410 self.svfs.options['chunkcachesize'] = chunkcachesize
407 411 # experimental config: format.maxchainlen
408 412 maxchainlen = self.ui.configint('format', 'maxchainlen')
409 413 if maxchainlen is not None:
410 414 self.svfs.options['maxchainlen'] = maxchainlen
411 415 # experimental config: format.manifestcachesize
412 416 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
413 417 if manifestcachesize is not None:
414 418 self.svfs.options['manifestcachesize'] = manifestcachesize
415 419 # experimental config: format.aggressivemergedeltas
416 420 aggressivemergedeltas = self.ui.configbool('format',
417 421 'aggressivemergedeltas', False)
418 422 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
419 423 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
420 424
421 425 for r in self.requirements:
422 426 if r.startswith('exp-compression-'):
423 427 self.svfs.options['compengine'] = r[len('exp-compression-'):]
424 428
425 429 def _writerequirements(self):
426 430 scmutil.writerequires(self.vfs, self.requirements)
427 431
428 432 def _checknested(self, path):
429 433 """Determine if path is a legal nested repository."""
430 434 if not path.startswith(self.root):
431 435 return False
432 436 subpath = path[len(self.root) + 1:]
433 437 normsubpath = util.pconvert(subpath)
434 438
435 439 # XXX: Checking against the current working copy is wrong in
436 440 # the sense that it can reject things like
437 441 #
438 442 # $ hg cat -r 10 sub/x.txt
439 443 #
440 444 # if sub/ is no longer a subrepository in the working copy
441 445 # parent revision.
442 446 #
443 447 # However, it can of course also allow things that would have
444 448 # been rejected before, such as the above cat command if sub/
445 449 # is a subrepository now, but was a normal directory before.
446 450 # The old path auditor would have rejected by mistake since it
447 451 # panics when it sees sub/.hg/.
448 452 #
449 453 # All in all, checking against the working copy seems sensible
450 454 # since we want to prevent access to nested repositories on
451 455 # the filesystem *now*.
452 456 ctx = self[None]
453 457 parts = util.splitpath(subpath)
454 458 while parts:
455 459 prefix = '/'.join(parts)
456 460 if prefix in ctx.substate:
457 461 if prefix == normsubpath:
458 462 return True
459 463 else:
460 464 sub = ctx.sub(prefix)
461 465 return sub.checknested(subpath[len(prefix) + 1:])
462 466 else:
463 467 parts.pop()
464 468 return False
465 469
466 470 def peer(self):
467 471 return localpeer(self) # not cached to avoid reference cycle
468 472
469 473 def unfiltered(self):
470 474 """Return unfiltered version of the repository
471 475
472 476 Intended to be overwritten by filtered repo."""
473 477 return self
474 478
475 479 def filtered(self, name):
476 480 """Return a filtered version of a repository"""
477 481 # build a new class with the mixin and the current class
478 482 # (possibly subclass of the repo)
479 483 class proxycls(repoview.repoview, self.unfiltered().__class__):
480 484 pass
481 485 return proxycls(self, name)
482 486
483 487 @repofilecache('bookmarks', 'bookmarks.current')
484 488 def _bookmarks(self):
485 489 return bookmarks.bmstore(self)
486 490
487 491 @property
488 492 def _activebookmark(self):
489 493 return self._bookmarks.active
490 494
491 495 def bookmarkheads(self, bookmark):
492 496 name = bookmark.split('@', 1)[0]
493 497 heads = []
494 498 for mark, n in self._bookmarks.iteritems():
495 499 if mark.split('@', 1)[0] == name:
496 500 heads.append(n)
497 501 return heads
498 502
499 503 # _phaserevs and _phasesets depend on changelog. what we need is to
500 504 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
501 505 # can't be easily expressed in filecache mechanism.
502 506 @storecache('phaseroots', '00changelog.i')
503 507 def _phasecache(self):
504 508 return phases.phasecache(self, self._phasedefaults)
505 509
506 510 @storecache('obsstore')
507 511 def obsstore(self):
508 512 # read default format for new obsstore.
509 513 # developer config: format.obsstore-version
510 514 defaultformat = self.ui.configint('format', 'obsstore-version', None)
511 515 # rely on obsstore class default when possible.
512 516 kwargs = {}
513 517 if defaultformat is not None:
514 518 kwargs['defaultformat'] = defaultformat
515 519 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
516 520 store = obsolete.obsstore(self.svfs, readonly=readonly,
517 521 **kwargs)
518 522 if store and readonly:
519 523 self.ui.warn(
520 524 _('obsolete feature not enabled but %i markers found!\n')
521 525 % len(list(store)))
522 526 return store
523 527
524 528 @storecache('00changelog.i')
525 529 def changelog(self):
526 530 c = changelog.changelog(self.svfs)
527 531 if txnutil.mayhavepending(self.root):
528 532 c.readpending('00changelog.i.a')
529 533 return c
530 534
531 535 def _constructmanifest(self):
532 536 # This is a temporary function while we migrate from manifest to
533 537 # manifestlog. It allows bundlerepo and unionrepo to intercept the
534 538 # manifest creation.
535 539 return manifest.manifestrevlog(self.svfs)
536 540
537 541 @storecache('00manifest.i')
538 542 def manifestlog(self):
539 543 return manifest.manifestlog(self.svfs, self)
540 544
541 545 @repofilecache('dirstate')
542 546 def dirstate(self):
543 547 return dirstate.dirstate(self.vfs, self.ui, self.root,
544 548 self._dirstatevalidate)
545 549
546 550 def _dirstatevalidate(self, node):
547 551 try:
548 552 self.changelog.rev(node)
549 553 return node
550 554 except error.LookupError:
551 555 if not self._dirstatevalidatewarned:
552 556 self._dirstatevalidatewarned = True
553 557 self.ui.warn(_("warning: ignoring unknown"
554 558 " working parent %s!\n") % short(node))
555 559 return nullid
556 560
557 561 def __getitem__(self, changeid):
558 562 if changeid is None or changeid == wdirrev:
559 563 return context.workingctx(self)
560 564 if isinstance(changeid, slice):
561 565 return [context.changectx(self, i)
562 566 for i in xrange(*changeid.indices(len(self)))
563 567 if i not in self.changelog.filteredrevs]
564 568 return context.changectx(self, changeid)
565 569
566 570 def __contains__(self, changeid):
567 571 try:
568 572 self[changeid]
569 573 return True
570 574 except error.RepoLookupError:
571 575 return False
572 576
573 577 def __nonzero__(self):
574 578 return True
575 579
576 580 def __len__(self):
577 581 return len(self.changelog)
578 582
579 583 def __iter__(self):
580 584 return iter(self.changelog)
581 585
582 586 def revs(self, expr, *args):
583 587 '''Find revisions matching a revset.
584 588
585 589 The revset is specified as a string ``expr`` that may contain
586 590 %-formatting to escape certain types. See ``revsetlang.formatspec``.
587 591
588 592 Revset aliases from the configuration are not expanded. To expand
589 593 user aliases, consider calling ``scmutil.revrange()`` or
590 594 ``repo.anyrevs([expr], user=True)``.
591 595
592 596 Returns a revset.abstractsmartset, which is a list-like interface
593 597 that contains integer revisions.
594 598 '''
595 599 expr = revsetlang.formatspec(expr, *args)
596 600 m = revset.match(None, expr)
597 601 return m(self)
598 602
599 603 def set(self, expr, *args):
600 604 '''Find revisions matching a revset and emit changectx instances.
601 605
602 606 This is a convenience wrapper around ``revs()`` that iterates the
603 607 result and is a generator of changectx instances.
604 608
605 609 Revset aliases from the configuration are not expanded. To expand
606 610 user aliases, consider calling ``scmutil.revrange()``.
607 611 '''
608 612 for r in self.revs(expr, *args):
609 613 yield self[r]
610 614
611 615 def anyrevs(self, specs, user=False):
612 616 '''Find revisions matching one of the given revsets.
613 617
614 618 Revset aliases from the configuration are not expanded by default. To
615 619 expand user aliases, specify ``user=True``.
616 620 '''
617 621 if user:
618 622 m = revset.matchany(self.ui, specs, repo=self)
619 623 else:
620 624 m = revset.matchany(None, specs)
621 625 return m(self)
622 626
623 627 def url(self):
624 628 return 'file:' + self.root
625 629
626 630 def hook(self, name, throw=False, **args):
627 631 """Call a hook, passing this repo instance.
628 632
629 633 This a convenience method to aid invoking hooks. Extensions likely
630 634 won't call this unless they have registered a custom hook or are
631 635 replacing code that is expected to call a hook.
632 636 """
633 637 return hook.hook(self.ui, self, name, throw, **args)
634 638
635 639 @unfilteredmethod
636 640 def _tag(self, names, node, message, local, user, date, extra=None,
637 641 editor=False):
638 642 if isinstance(names, str):
639 643 names = (names,)
640 644
641 645 branches = self.branchmap()
642 646 for name in names:
643 647 self.hook('pretag', throw=True, node=hex(node), tag=name,
644 648 local=local)
645 649 if name in branches:
646 650 self.ui.warn(_("warning: tag %s conflicts with existing"
647 651 " branch name\n") % name)
648 652
649 653 def writetags(fp, names, munge, prevtags):
650 654 fp.seek(0, 2)
651 655 if prevtags and prevtags[-1] != '\n':
652 656 fp.write('\n')
653 657 for name in names:
654 658 if munge:
655 659 m = munge(name)
656 660 else:
657 661 m = name
658 662
659 663 if (self._tagscache.tagtypes and
660 664 name in self._tagscache.tagtypes):
661 665 old = self.tags().get(name, nullid)
662 666 fp.write('%s %s\n' % (hex(old), m))
663 667 fp.write('%s %s\n' % (hex(node), m))
664 668 fp.close()
665 669
666 670 prevtags = ''
667 671 if local:
668 672 try:
669 673 fp = self.vfs('localtags', 'r+')
670 674 except IOError:
671 675 fp = self.vfs('localtags', 'a')
672 676 else:
673 677 prevtags = fp.read()
674 678
675 679 # local tags are stored in the current charset
676 680 writetags(fp, names, None, prevtags)
677 681 for name in names:
678 682 self.hook('tag', node=hex(node), tag=name, local=local)
679 683 return
680 684
681 685 try:
682 686 fp = self.wfile('.hgtags', 'rb+')
683 687 except IOError as e:
684 688 if e.errno != errno.ENOENT:
685 689 raise
686 690 fp = self.wfile('.hgtags', 'ab')
687 691 else:
688 692 prevtags = fp.read()
689 693
690 694 # committed tags are stored in UTF-8
691 695 writetags(fp, names, encoding.fromlocal, prevtags)
692 696
693 697 fp.close()
694 698
695 699 self.invalidatecaches()
696 700
697 701 if '.hgtags' not in self.dirstate:
698 702 self[None].add(['.hgtags'])
699 703
700 704 m = matchmod.exact(self.root, '', ['.hgtags'])
701 705 tagnode = self.commit(message, user, date, extra=extra, match=m,
702 706 editor=editor)
703 707
704 708 for name in names:
705 709 self.hook('tag', node=hex(node), tag=name, local=local)
706 710
707 711 return tagnode
708 712
709 713 def tag(self, names, node, message, local, user, date, editor=False):
710 714 '''tag a revision with one or more symbolic names.
711 715
712 716 names is a list of strings or, when adding a single tag, names may be a
713 717 string.
714 718
715 719 if local is True, the tags are stored in a per-repository file.
716 720 otherwise, they are stored in the .hgtags file, and a new
717 721 changeset is committed with the change.
718 722
719 723 keyword arguments:
720 724
721 725 local: whether to store tags in non-version-controlled file
722 726 (default False)
723 727
724 728 message: commit message to use if committing
725 729
726 730 user: name of user to use if committing
727 731
728 732 date: date tuple to use if committing'''
729 733
730 734 if not local:
731 735 m = matchmod.exact(self.root, '', ['.hgtags'])
732 736 if any(self.status(match=m, unknown=True, ignored=True)):
733 737 raise error.Abort(_('working copy of .hgtags is changed'),
734 738 hint=_('please commit .hgtags manually'))
735 739
736 740 self.tags() # instantiate the cache
737 741 self._tag(names, node, message, local, user, date, editor=editor)
738 742
739 743 @filteredpropertycache
740 744 def _tagscache(self):
741 745 '''Returns a tagscache object that contains various tags related
742 746 caches.'''
743 747
744 748 # This simplifies its cache management by having one decorated
745 749 # function (this one) and the rest simply fetch things from it.
746 750 class tagscache(object):
747 751 def __init__(self):
748 752 # These two define the set of tags for this repository. tags
749 753 # maps tag name to node; tagtypes maps tag name to 'global' or
750 754 # 'local'. (Global tags are defined by .hgtags across all
751 755 # heads, and local tags are defined in .hg/localtags.)
752 756 # They constitute the in-memory cache of tags.
753 757 self.tags = self.tagtypes = None
754 758
755 759 self.nodetagscache = self.tagslist = None
756 760
757 761 cache = tagscache()
758 762 cache.tags, cache.tagtypes = self._findtags()
759 763
760 764 return cache
761 765
762 766 def tags(self):
763 767 '''return a mapping of tag to node'''
764 768 t = {}
765 769 if self.changelog.filteredrevs:
766 770 tags, tt = self._findtags()
767 771 else:
768 772 tags = self._tagscache.tags
769 773 for k, v in tags.iteritems():
770 774 try:
771 775 # ignore tags to unknown nodes
772 776 self.changelog.rev(v)
773 777 t[k] = v
774 778 except (error.LookupError, ValueError):
775 779 pass
776 780 return t
777 781
778 782 def _findtags(self):
779 783 '''Do the hard work of finding tags. Return a pair of dicts
780 784 (tags, tagtypes) where tags maps tag name to node, and tagtypes
781 785 maps tag name to a string like \'global\' or \'local\'.
782 786 Subclasses or extensions are free to add their own tags, but
783 787 should be aware that the returned dicts will be retained for the
784 788 duration of the localrepo object.'''
785 789
786 790 # XXX what tagtype should subclasses/extensions use? Currently
787 791 # mq and bookmarks add tags, but do not set the tagtype at all.
788 792 # Should each extension invent its own tag type? Should there
789 793 # be one tagtype for all such "virtual" tags? Or is the status
790 794 # quo fine?
791 795
792 796 alltags = {} # map tag name to (node, hist)
793 797 tagtypes = {}
794 798
795 799 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
796 800 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
797 801
798 802 # Build the return dicts. Have to re-encode tag names because
799 803 # the tags module always uses UTF-8 (in order not to lose info
800 804 # writing to the cache), but the rest of Mercurial wants them in
801 805 # local encoding.
802 806 tags = {}
803 807 for (name, (node, hist)) in alltags.iteritems():
804 808 if node != nullid:
805 809 tags[encoding.tolocal(name)] = node
806 810 tags['tip'] = self.changelog.tip()
807 811 tagtypes = dict([(encoding.tolocal(name), value)
808 812 for (name, value) in tagtypes.iteritems()])
809 813 return (tags, tagtypes)
810 814
811 815 def tagtype(self, tagname):
812 816 '''
813 817 return the type of the given tag. result can be:
814 818
815 819 'local' : a local tag
816 820 'global' : a global tag
817 821 None : tag does not exist
818 822 '''
819 823
820 824 return self._tagscache.tagtypes.get(tagname)
821 825
822 826 def tagslist(self):
823 827 '''return a list of tags ordered by revision'''
824 828 if not self._tagscache.tagslist:
825 829 l = []
826 830 for t, n in self.tags().iteritems():
827 831 l.append((self.changelog.rev(n), t, n))
828 832 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
829 833
830 834 return self._tagscache.tagslist
831 835
832 836 def nodetags(self, node):
833 837 '''return the tags associated with a node'''
834 838 if not self._tagscache.nodetagscache:
835 839 nodetagscache = {}
836 840 for t, n in self._tagscache.tags.iteritems():
837 841 nodetagscache.setdefault(n, []).append(t)
838 842 for tags in nodetagscache.itervalues():
839 843 tags.sort()
840 844 self._tagscache.nodetagscache = nodetagscache
841 845 return self._tagscache.nodetagscache.get(node, [])
842 846
843 847 def nodebookmarks(self, node):
844 848 """return the list of bookmarks pointing to the specified node"""
845 849 marks = []
846 850 for bookmark, n in self._bookmarks.iteritems():
847 851 if n == node:
848 852 marks.append(bookmark)
849 853 return sorted(marks)
850 854
851 855 def branchmap(self):
852 856 '''returns a dictionary {branch: [branchheads]} with branchheads
853 857 ordered by increasing revision number'''
854 858 branchmap.updatecache(self)
855 859 return self._branchcaches[self.filtername]
856 860
857 861 @unfilteredmethod
858 862 def revbranchcache(self):
859 863 if not self._revbranchcache:
860 864 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
861 865 return self._revbranchcache
862 866
863 867 def branchtip(self, branch, ignoremissing=False):
864 868 '''return the tip node for a given branch
865 869
866 870 If ignoremissing is True, then this method will not raise an error.
867 871 This is helpful for callers that only expect None for a missing branch
868 872 (e.g. namespace).
869 873
870 874 '''
871 875 try:
872 876 return self.branchmap().branchtip(branch)
873 877 except KeyError:
874 878 if not ignoremissing:
875 879 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
876 880 else:
877 881 pass
878 882
879 883 def lookup(self, key):
880 884 return self[key].node()
881 885
882 886 def lookupbranch(self, key, remote=None):
883 887 repo = remote or self
884 888 if key in repo.branchmap():
885 889 return key
886 890
887 891 repo = (remote and remote.local()) and remote or self
888 892 return repo[key].branch()
889 893
890 894 def known(self, nodes):
891 895 cl = self.changelog
892 896 nm = cl.nodemap
893 897 filtered = cl.filteredrevs
894 898 result = []
895 899 for n in nodes:
896 900 r = nm.get(n)
897 901 resp = not (r is None or r in filtered)
898 902 result.append(resp)
899 903 return result
900 904
901 905 def local(self):
902 906 return self
903 907
904 908 def publishing(self):
905 909 # it's safe (and desirable) to trust the publish flag unconditionally
906 910 # so that we don't finalize changes shared between users via ssh or nfs
907 911 return self.ui.configbool('phases', 'publish', True, untrusted=True)
908 912
909 913 def cancopy(self):
910 914 # so statichttprepo's override of local() works
911 915 if not self.local():
912 916 return False
913 917 if not self.publishing():
914 918 return True
915 919 # if publishing we can't copy if there is filtered content
916 920 return not self.filtered('visible').changelog.filteredrevs
917 921
918 922 def shared(self):
919 923 '''the type of shared repository (None if not shared)'''
920 924 if self.sharedpath != self.path:
921 925 return 'store'
922 926 return None
923 927
924 928 def join(self, f, *insidef):
925 929 return self.vfs.join(os.path.join(f, *insidef))
926 930
927 931 def wjoin(self, f, *insidef):
928 932 return self.vfs.reljoin(self.root, f, *insidef)
929 933
930 934 def file(self, f):
931 935 if f[0] == '/':
932 936 f = f[1:]
933 937 return filelog.filelog(self.svfs, f)
934 938
935 939 def changectx(self, changeid):
936 940 return self[changeid]
937 941
938 942 def setparents(self, p1, p2=nullid):
939 943 self.dirstate.beginparentchange()
940 944 copies = self.dirstate.setparents(p1, p2)
941 945 pctx = self[p1]
942 946 if copies:
943 947 # Adjust copy records, the dirstate cannot do it, it
944 948 # requires access to parents manifests. Preserve them
945 949 # only for entries added to first parent.
946 950 for f in copies:
947 951 if f not in pctx and copies[f] in pctx:
948 952 self.dirstate.copy(copies[f], f)
949 953 if p2 == nullid:
950 954 for f, s in sorted(self.dirstate.copies().items()):
951 955 if f not in pctx and s not in pctx:
952 956 self.dirstate.copy(None, f)
953 957 self.dirstate.endparentchange()
954 958
955 959 def filectx(self, path, changeid=None, fileid=None):
956 960 """changeid can be a changeset revision, node, or tag.
957 961 fileid can be a file revision or node."""
958 962 return context.filectx(self, path, changeid, fileid)
959 963
960 964 def getcwd(self):
961 965 return self.dirstate.getcwd()
962 966
963 967 def pathto(self, f, cwd=None):
964 968 return self.dirstate.pathto(f, cwd)
965 969
966 970 def wfile(self, f, mode='r'):
967 971 return self.wvfs(f, mode)
968 972
969 973 def _link(self, f):
970 974 return self.wvfs.islink(f)
971 975
972 976 def _loadfilter(self, filter):
973 977 if filter not in self.filterpats:
974 978 l = []
975 979 for pat, cmd in self.ui.configitems(filter):
976 980 if cmd == '!':
977 981 continue
978 982 mf = matchmod.match(self.root, '', [pat])
979 983 fn = None
980 984 params = cmd
981 985 for name, filterfn in self._datafilters.iteritems():
982 986 if cmd.startswith(name):
983 987 fn = filterfn
984 988 params = cmd[len(name):].lstrip()
985 989 break
986 990 if not fn:
987 991 fn = lambda s, c, **kwargs: util.filter(s, c)
988 992 # Wrap old filters not supporting keyword arguments
989 993 if not inspect.getargspec(fn)[2]:
990 994 oldfn = fn
991 995 fn = lambda s, c, **kwargs: oldfn(s, c)
992 996 l.append((mf, fn, params))
993 997 self.filterpats[filter] = l
994 998 return self.filterpats[filter]
995 999
996 1000 def _filter(self, filterpats, filename, data):
997 1001 for mf, fn, cmd in filterpats:
998 1002 if mf(filename):
999 1003 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1000 1004 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1001 1005 break
1002 1006
1003 1007 return data
1004 1008
1005 1009 @unfilteredpropertycache
1006 1010 def _encodefilterpats(self):
1007 1011 return self._loadfilter('encode')
1008 1012
1009 1013 @unfilteredpropertycache
1010 1014 def _decodefilterpats(self):
1011 1015 return self._loadfilter('decode')
1012 1016
1013 1017 def adddatafilter(self, name, filter):
1014 1018 self._datafilters[name] = filter
1015 1019
1016 1020 def wread(self, filename):
1017 1021 if self._link(filename):
1018 1022 data = self.wvfs.readlink(filename)
1019 1023 else:
1020 1024 data = self.wvfs.read(filename)
1021 1025 return self._filter(self._encodefilterpats, filename, data)
1022 1026
1023 1027 def wwrite(self, filename, data, flags, backgroundclose=False):
1024 1028 """write ``data`` into ``filename`` in the working directory
1025 1029
1026 1030 This returns length of written (maybe decoded) data.
1027 1031 """
1028 1032 data = self._filter(self._decodefilterpats, filename, data)
1029 1033 if 'l' in flags:
1030 1034 self.wvfs.symlink(data, filename)
1031 1035 else:
1032 1036 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1033 1037 if 'x' in flags:
1034 1038 self.wvfs.setflags(filename, False, True)
1035 1039 return len(data)
1036 1040
1037 1041 def wwritedata(self, filename, data):
1038 1042 return self._filter(self._decodefilterpats, filename, data)
1039 1043
1040 1044 def currenttransaction(self):
1041 1045 """return the current transaction or None if non exists"""
1042 1046 if self._transref:
1043 1047 tr = self._transref()
1044 1048 else:
1045 1049 tr = None
1046 1050
1047 1051 if tr and tr.running():
1048 1052 return tr
1049 1053 return None
1050 1054
1051 1055 def transaction(self, desc, report=None):
1052 1056 if (self.ui.configbool('devel', 'all-warnings')
1053 1057 or self.ui.configbool('devel', 'check-locks')):
1054 1058 if self._currentlock(self._lockref) is None:
1055 1059 raise error.ProgrammingError('transaction requires locking')
1056 1060 tr = self.currenttransaction()
1057 1061 if tr is not None:
1058 1062 return tr.nest()
1059 1063
1060 1064 # abort here if the journal already exists
1061 1065 if self.svfs.exists("journal"):
1062 1066 raise error.RepoError(
1063 1067 _("abandoned transaction found"),
1064 1068 hint=_("run 'hg recover' to clean up transaction"))
1065 1069
1066 1070 idbase = "%.40f#%f" % (random.random(), time.time())
1067 1071 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1068 1072 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1069 1073
1070 1074 self._writejournal(desc)
1071 1075 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1072 1076 if report:
1073 1077 rp = report
1074 1078 else:
1075 1079 rp = self.ui.warn
1076 1080 vfsmap = {'plain': self.vfs} # root of .hg/
1077 1081 # we must avoid cyclic reference between repo and transaction.
1078 1082 reporef = weakref.ref(self)
1079 1083 def validate(tr):
1080 1084 """will run pre-closing hooks"""
1081 1085 reporef().hook('pretxnclose', throw=True,
1082 1086 txnname=desc, **tr.hookargs)
1083 1087 def releasefn(tr, success):
1084 1088 repo = reporef()
1085 1089 if success:
1086 1090 # this should be explicitly invoked here, because
1087 1091 # in-memory changes aren't written out at closing
1088 1092 # transaction, if tr.addfilegenerator (via
1089 1093 # dirstate.write or so) isn't invoked while
1090 1094 # transaction running
1091 1095 repo.dirstate.write(None)
1092 1096 else:
1093 1097 # discard all changes (including ones already written
1094 1098 # out) in this transaction
1095 1099 repo.dirstate.restorebackup(None, prefix='journal.')
1096 1100
1097 1101 repo.invalidate(clearfilecache=True)
1098 1102
1099 1103 tr = transaction.transaction(rp, self.svfs, vfsmap,
1100 1104 "journal",
1101 1105 "undo",
1102 1106 aftertrans(renames),
1103 1107 self.store.createmode,
1104 1108 validator=validate,
1105 1109 releasefn=releasefn)
1106 1110
1107 1111 tr.hookargs['txnid'] = txnid
1108 1112 # note: writing the fncache only during finalize mean that the file is
1109 1113 # outdated when running hooks. As fncache is used for streaming clone,
1110 1114 # this is not expected to break anything that happen during the hooks.
1111 1115 tr.addfinalize('flush-fncache', self.store.write)
1112 1116 def txnclosehook(tr2):
1113 1117 """To be run if transaction is successful, will schedule a hook run
1114 1118 """
1115 1119 # Don't reference tr2 in hook() so we don't hold a reference.
1116 1120 # This reduces memory consumption when there are multiple
1117 1121 # transactions per lock. This can likely go away if issue5045
1118 1122 # fixes the function accumulation.
1119 1123 hookargs = tr2.hookargs
1120 1124
1121 1125 def hook():
1122 1126 reporef().hook('txnclose', throw=False, txnname=desc,
1123 1127 **hookargs)
1124 1128 reporef()._afterlock(hook)
1125 1129 tr.addfinalize('txnclose-hook', txnclosehook)
1126 1130 def txnaborthook(tr2):
1127 1131 """To be run if transaction is aborted
1128 1132 """
1129 1133 reporef().hook('txnabort', throw=False, txnname=desc,
1130 1134 **tr2.hookargs)
1131 1135 tr.addabort('txnabort-hook', txnaborthook)
1132 1136 # avoid eager cache invalidation. in-memory data should be identical
1133 1137 # to stored data if transaction has no error.
1134 1138 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1135 1139 self._transref = weakref.ref(tr)
1136 1140 return tr
1137 1141
1138 1142 def _journalfiles(self):
1139 1143 return ((self.svfs, 'journal'),
1140 1144 (self.vfs, 'journal.dirstate'),
1141 1145 (self.vfs, 'journal.branch'),
1142 1146 (self.vfs, 'journal.desc'),
1143 1147 (self.vfs, 'journal.bookmarks'),
1144 1148 (self.svfs, 'journal.phaseroots'))
1145 1149
1146 1150 def undofiles(self):
1147 1151 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1148 1152
1149 1153 def _writejournal(self, desc):
1150 1154 self.dirstate.savebackup(None, prefix='journal.')
1151 1155 self.vfs.write("journal.branch",
1152 1156 encoding.fromlocal(self.dirstate.branch()))
1153 1157 self.vfs.write("journal.desc",
1154 1158 "%d\n%s\n" % (len(self), desc))
1155 1159 self.vfs.write("journal.bookmarks",
1156 1160 self.vfs.tryread("bookmarks"))
1157 1161 self.svfs.write("journal.phaseroots",
1158 1162 self.svfs.tryread("phaseroots"))
1159 1163
1160 1164 def recover(self):
1161 1165 with self.lock():
1162 1166 if self.svfs.exists("journal"):
1163 1167 self.ui.status(_("rolling back interrupted transaction\n"))
1164 1168 vfsmap = {'': self.svfs,
1165 1169 'plain': self.vfs,}
1166 1170 transaction.rollback(self.svfs, vfsmap, "journal",
1167 1171 self.ui.warn)
1168 1172 self.invalidate()
1169 1173 return True
1170 1174 else:
1171 1175 self.ui.warn(_("no interrupted transaction available\n"))
1172 1176 return False
1173 1177
1174 1178 def rollback(self, dryrun=False, force=False):
1175 1179 wlock = lock = dsguard = None
1176 1180 try:
1177 1181 wlock = self.wlock()
1178 1182 lock = self.lock()
1179 1183 if self.svfs.exists("undo"):
1180 1184 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1181 1185
1182 1186 return self._rollback(dryrun, force, dsguard)
1183 1187 else:
1184 1188 self.ui.warn(_("no rollback information available\n"))
1185 1189 return 1
1186 1190 finally:
1187 1191 release(dsguard, lock, wlock)
1188 1192
1189 1193 @unfilteredmethod # Until we get smarter cache management
1190 1194 def _rollback(self, dryrun, force, dsguard):
1191 1195 ui = self.ui
1192 1196 try:
1193 1197 args = self.vfs.read('undo.desc').splitlines()
1194 1198 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1195 1199 if len(args) >= 3:
1196 1200 detail = args[2]
1197 1201 oldtip = oldlen - 1
1198 1202
1199 1203 if detail and ui.verbose:
1200 1204 msg = (_('repository tip rolled back to revision %s'
1201 1205 ' (undo %s: %s)\n')
1202 1206 % (oldtip, desc, detail))
1203 1207 else:
1204 1208 msg = (_('repository tip rolled back to revision %s'
1205 1209 ' (undo %s)\n')
1206 1210 % (oldtip, desc))
1207 1211 except IOError:
1208 1212 msg = _('rolling back unknown transaction\n')
1209 1213 desc = None
1210 1214
1211 1215 if not force and self['.'] != self['tip'] and desc == 'commit':
1212 1216 raise error.Abort(
1213 1217 _('rollback of last commit while not checked out '
1214 1218 'may lose data'), hint=_('use -f to force'))
1215 1219
1216 1220 ui.status(msg)
1217 1221 if dryrun:
1218 1222 return 0
1219 1223
1220 1224 parents = self.dirstate.parents()
1221 1225 self.destroying()
1222 1226 vfsmap = {'plain': self.vfs, '': self.svfs}
1223 1227 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1224 1228 if self.vfs.exists('undo.bookmarks'):
1225 1229 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1226 1230 if self.svfs.exists('undo.phaseroots'):
1227 1231 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1228 1232 self.invalidate()
1229 1233
1230 1234 parentgone = (parents[0] not in self.changelog.nodemap or
1231 1235 parents[1] not in self.changelog.nodemap)
1232 1236 if parentgone:
1233 1237 # prevent dirstateguard from overwriting already restored one
1234 1238 dsguard.close()
1235 1239
1236 1240 self.dirstate.restorebackup(None, prefix='undo.')
1237 1241 try:
1238 1242 branch = self.vfs.read('undo.branch')
1239 1243 self.dirstate.setbranch(encoding.tolocal(branch))
1240 1244 except IOError:
1241 1245 ui.warn(_('named branch could not be reset: '
1242 1246 'current branch is still \'%s\'\n')
1243 1247 % self.dirstate.branch())
1244 1248
1245 1249 parents = tuple([p.rev() for p in self[None].parents()])
1246 1250 if len(parents) > 1:
1247 1251 ui.status(_('working directory now based on '
1248 1252 'revisions %d and %d\n') % parents)
1249 1253 else:
1250 1254 ui.status(_('working directory now based on '
1251 1255 'revision %d\n') % parents)
1252 1256 mergemod.mergestate.clean(self, self['.'].node())
1253 1257
1254 1258 # TODO: if we know which new heads may result from this rollback, pass
1255 1259 # them to destroy(), which will prevent the branchhead cache from being
1256 1260 # invalidated.
1257 1261 self.destroyed()
1258 1262 return 0
1259 1263
1260 1264 def invalidatecaches(self):
1261 1265
1262 1266 if '_tagscache' in vars(self):
1263 1267 # can't use delattr on proxy
1264 1268 del self.__dict__['_tagscache']
1265 1269
1266 1270 self.unfiltered()._branchcaches.clear()
1267 1271 self.invalidatevolatilesets()
1268 1272
1269 1273 def invalidatevolatilesets(self):
1270 1274 self.filteredrevcache.clear()
1271 1275 obsolete.clearobscaches(self)
1272 1276
1273 1277 def invalidatedirstate(self):
1274 1278 '''Invalidates the dirstate, causing the next call to dirstate
1275 1279 to check if it was modified since the last time it was read,
1276 1280 rereading it if it has.
1277 1281
1278 1282 This is different to dirstate.invalidate() that it doesn't always
1279 1283 rereads the dirstate. Use dirstate.invalidate() if you want to
1280 1284 explicitly read the dirstate again (i.e. restoring it to a previous
1281 1285 known good state).'''
1282 1286 if hasunfilteredcache(self, 'dirstate'):
1283 1287 for k in self.dirstate._filecache:
1284 1288 try:
1285 1289 delattr(self.dirstate, k)
1286 1290 except AttributeError:
1287 1291 pass
1288 1292 delattr(self.unfiltered(), 'dirstate')
1289 1293
1290 1294 def invalidate(self, clearfilecache=False):
1291 1295 '''Invalidates both store and non-store parts other than dirstate
1292 1296
1293 1297 If a transaction is running, invalidation of store is omitted,
1294 1298 because discarding in-memory changes might cause inconsistency
1295 1299 (e.g. incomplete fncache causes unintentional failure, but
1296 1300 redundant one doesn't).
1297 1301 '''
1298 1302 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1299 1303 for k in self._filecache.keys():
1300 1304 # dirstate is invalidated separately in invalidatedirstate()
1301 1305 if k == 'dirstate':
1302 1306 continue
1303 1307
1304 1308 if clearfilecache:
1305 1309 del self._filecache[k]
1306 1310 try:
1307 1311 delattr(unfiltered, k)
1308 1312 except AttributeError:
1309 1313 pass
1310 1314 self.invalidatecaches()
1311 1315 if not self.currenttransaction():
1312 1316 # TODO: Changing contents of store outside transaction
1313 1317 # causes inconsistency. We should make in-memory store
1314 1318 # changes detectable, and abort if changed.
1315 1319 self.store.invalidatecaches()
1316 1320
1317 1321 def invalidateall(self):
1318 1322 '''Fully invalidates both store and non-store parts, causing the
1319 1323 subsequent operation to reread any outside changes.'''
1320 1324 # extension should hook this to invalidate its caches
1321 1325 self.invalidate()
1322 1326 self.invalidatedirstate()
1323 1327
1324 1328 @unfilteredmethod
1325 1329 def _refreshfilecachestats(self, tr):
1326 1330 """Reload stats of cached files so that they are flagged as valid"""
1327 1331 for k, ce in self._filecache.items():
1328 1332 if k == 'dirstate' or k not in self.__dict__:
1329 1333 continue
1330 1334 ce.refresh()
1331 1335
1332 1336 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1333 1337 inheritchecker=None, parentenvvar=None):
1334 1338 parentlock = None
1335 1339 # the contents of parentenvvar are used by the underlying lock to
1336 1340 # determine whether it can be inherited
1337 1341 if parentenvvar is not None:
1338 1342 parentlock = encoding.environ.get(parentenvvar)
1339 1343 try:
1340 1344 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1341 1345 acquirefn=acquirefn, desc=desc,
1342 1346 inheritchecker=inheritchecker,
1343 1347 parentlock=parentlock)
1344 1348 except error.LockHeld as inst:
1345 1349 if not wait:
1346 1350 raise
1347 1351 # show more details for new-style locks
1348 1352 if ':' in inst.locker:
1349 1353 host, pid = inst.locker.split(":", 1)
1350 1354 self.ui.warn(
1351 1355 _("waiting for lock on %s held by process %r "
1352 1356 "on host %r\n") % (desc, pid, host))
1353 1357 else:
1354 1358 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1355 1359 (desc, inst.locker))
1356 1360 # default to 600 seconds timeout
1357 1361 l = lockmod.lock(vfs, lockname,
1358 1362 int(self.ui.config("ui", "timeout", "600")),
1359 1363 releasefn=releasefn, acquirefn=acquirefn,
1360 1364 desc=desc)
1361 1365 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1362 1366 return l
1363 1367
1364 1368 def _afterlock(self, callback):
1365 1369 """add a callback to be run when the repository is fully unlocked
1366 1370
1367 1371 The callback will be executed when the outermost lock is released
1368 1372 (with wlock being higher level than 'lock')."""
1369 1373 for ref in (self._wlockref, self._lockref):
1370 1374 l = ref and ref()
1371 1375 if l and l.held:
1372 1376 l.postrelease.append(callback)
1373 1377 break
1374 1378 else: # no lock have been found.
1375 1379 callback()
1376 1380
1377 1381 def lock(self, wait=True):
1378 1382 '''Lock the repository store (.hg/store) and return a weak reference
1379 1383 to the lock. Use this before modifying the store (e.g. committing or
1380 1384 stripping). If you are opening a transaction, get a lock as well.)
1381 1385
1382 1386 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1383 1387 'wlock' first to avoid a dead-lock hazard.'''
1384 1388 l = self._currentlock(self._lockref)
1385 1389 if l is not None:
1386 1390 l.lock()
1387 1391 return l
1388 1392
1389 1393 l = self._lock(self.svfs, "lock", wait, None,
1390 1394 self.invalidate, _('repository %s') % self.origroot)
1391 1395 self._lockref = weakref.ref(l)
1392 1396 return l
1393 1397
1394 1398 def _wlockchecktransaction(self):
1395 1399 if self.currenttransaction() is not None:
1396 1400 raise error.LockInheritanceContractViolation(
1397 1401 'wlock cannot be inherited in the middle of a transaction')
1398 1402
1399 1403 def wlock(self, wait=True):
1400 1404 '''Lock the non-store parts of the repository (everything under
1401 1405 .hg except .hg/store) and return a weak reference to the lock.
1402 1406
1403 1407 Use this before modifying files in .hg.
1404 1408
1405 1409 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1406 1410 'wlock' first to avoid a dead-lock hazard.'''
1407 1411 l = self._wlockref and self._wlockref()
1408 1412 if l is not None and l.held:
1409 1413 l.lock()
1410 1414 return l
1411 1415
1412 1416 # We do not need to check for non-waiting lock acquisition. Such
1413 1417 # acquisition would not cause dead-lock as they would just fail.
1414 1418 if wait and (self.ui.configbool('devel', 'all-warnings')
1415 1419 or self.ui.configbool('devel', 'check-locks')):
1416 1420 if self._currentlock(self._lockref) is not None:
1417 1421 self.ui.develwarn('"wlock" acquired after "lock"')
1418 1422
1419 1423 def unlock():
1420 1424 if self.dirstate.pendingparentchange():
1421 1425 self.dirstate.invalidate()
1422 1426 else:
1423 1427 self.dirstate.write(None)
1424 1428
1425 1429 self._filecache['dirstate'].refresh()
1426 1430
1427 1431 l = self._lock(self.vfs, "wlock", wait, unlock,
1428 1432 self.invalidatedirstate, _('working directory of %s') %
1429 1433 self.origroot,
1430 1434 inheritchecker=self._wlockchecktransaction,
1431 1435 parentenvvar='HG_WLOCK_LOCKER')
1432 1436 self._wlockref = weakref.ref(l)
1433 1437 return l
1434 1438
1435 1439 def _currentlock(self, lockref):
1436 1440 """Returns the lock if it's held, or None if it's not."""
1437 1441 if lockref is None:
1438 1442 return None
1439 1443 l = lockref()
1440 1444 if l is None or not l.held:
1441 1445 return None
1442 1446 return l
1443 1447
1444 1448 def currentwlock(self):
1445 1449 """Returns the wlock if it's held, or None if it's not."""
1446 1450 return self._currentlock(self._wlockref)
1447 1451
1448 1452 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1449 1453 """
1450 1454 commit an individual file as part of a larger transaction
1451 1455 """
1452 1456
1453 1457 fname = fctx.path()
1454 1458 fparent1 = manifest1.get(fname, nullid)
1455 1459 fparent2 = manifest2.get(fname, nullid)
1456 1460 if isinstance(fctx, context.filectx):
1457 1461 node = fctx.filenode()
1458 1462 if node in [fparent1, fparent2]:
1459 1463 self.ui.debug('reusing %s filelog entry\n' % fname)
1460 1464 if manifest1.flags(fname) != fctx.flags():
1461 1465 changelist.append(fname)
1462 1466 return node
1463 1467
1464 1468 flog = self.file(fname)
1465 1469 meta = {}
1466 1470 copy = fctx.renamed()
1467 1471 if copy and copy[0] != fname:
1468 1472 # Mark the new revision of this file as a copy of another
1469 1473 # file. This copy data will effectively act as a parent
1470 1474 # of this new revision. If this is a merge, the first
1471 1475 # parent will be the nullid (meaning "look up the copy data")
1472 1476 # and the second one will be the other parent. For example:
1473 1477 #
1474 1478 # 0 --- 1 --- 3 rev1 changes file foo
1475 1479 # \ / rev2 renames foo to bar and changes it
1476 1480 # \- 2 -/ rev3 should have bar with all changes and
1477 1481 # should record that bar descends from
1478 1482 # bar in rev2 and foo in rev1
1479 1483 #
1480 1484 # this allows this merge to succeed:
1481 1485 #
1482 1486 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1483 1487 # \ / merging rev3 and rev4 should use bar@rev2
1484 1488 # \- 2 --- 4 as the merge base
1485 1489 #
1486 1490
1487 1491 cfname = copy[0]
1488 1492 crev = manifest1.get(cfname)
1489 1493 newfparent = fparent2
1490 1494
1491 1495 if manifest2: # branch merge
1492 1496 if fparent2 == nullid or crev is None: # copied on remote side
1493 1497 if cfname in manifest2:
1494 1498 crev = manifest2[cfname]
1495 1499 newfparent = fparent1
1496 1500
1497 1501 # Here, we used to search backwards through history to try to find
1498 1502 # where the file copy came from if the source of a copy was not in
1499 1503 # the parent directory. However, this doesn't actually make sense to
1500 1504 # do (what does a copy from something not in your working copy even
1501 1505 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1502 1506 # the user that copy information was dropped, so if they didn't
1503 1507 # expect this outcome it can be fixed, but this is the correct
1504 1508 # behavior in this circumstance.
1505 1509
1506 1510 if crev:
1507 1511 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1508 1512 meta["copy"] = cfname
1509 1513 meta["copyrev"] = hex(crev)
1510 1514 fparent1, fparent2 = nullid, newfparent
1511 1515 else:
1512 1516 self.ui.warn(_("warning: can't find ancestor for '%s' "
1513 1517 "copied from '%s'!\n") % (fname, cfname))
1514 1518
1515 1519 elif fparent1 == nullid:
1516 1520 fparent1, fparent2 = fparent2, nullid
1517 1521 elif fparent2 != nullid:
1518 1522 # is one parent an ancestor of the other?
1519 1523 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1520 1524 if fparent1 in fparentancestors:
1521 1525 fparent1, fparent2 = fparent2, nullid
1522 1526 elif fparent2 in fparentancestors:
1523 1527 fparent2 = nullid
1524 1528
1525 1529 # is the file changed?
1526 1530 text = fctx.data()
1527 1531 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1528 1532 changelist.append(fname)
1529 1533 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1530 1534 # are just the flags changed during merge?
1531 1535 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1532 1536 changelist.append(fname)
1533 1537
1534 1538 return fparent1
1535 1539
1536 1540 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1537 1541 """check for commit arguments that aren't committable"""
1538 1542 if match.isexact() or match.prefix():
1539 1543 matched = set(status.modified + status.added + status.removed)
1540 1544
1541 1545 for f in match.files():
1542 1546 f = self.dirstate.normalize(f)
1543 1547 if f == '.' or f in matched or f in wctx.substate:
1544 1548 continue
1545 1549 if f in status.deleted:
1546 1550 fail(f, _('file not found!'))
1547 1551 if f in vdirs: # visited directory
1548 1552 d = f + '/'
1549 1553 for mf in matched:
1550 1554 if mf.startswith(d):
1551 1555 break
1552 1556 else:
1553 1557 fail(f, _("no match under directory!"))
1554 1558 elif f not in self.dirstate:
1555 1559 fail(f, _("file not tracked!"))
1556 1560
1557 1561 @unfilteredmethod
1558 1562 def commit(self, text="", user=None, date=None, match=None, force=False,
1559 1563 editor=False, extra=None):
1560 1564 """Add a new revision to current repository.
1561 1565
1562 1566 Revision information is gathered from the working directory,
1563 1567 match can be used to filter the committed files. If editor is
1564 1568 supplied, it is called to get a commit message.
1565 1569 """
1566 1570 if extra is None:
1567 1571 extra = {}
1568 1572
1569 1573 def fail(f, msg):
1570 1574 raise error.Abort('%s: %s' % (f, msg))
1571 1575
1572 1576 if not match:
1573 1577 match = matchmod.always(self.root, '')
1574 1578
1575 1579 if not force:
1576 1580 vdirs = []
1577 1581 match.explicitdir = vdirs.append
1578 1582 match.bad = fail
1579 1583
1580 1584 wlock = lock = tr = None
1581 1585 try:
1582 1586 wlock = self.wlock()
1583 1587 lock = self.lock() # for recent changelog (see issue4368)
1584 1588
1585 1589 wctx = self[None]
1586 1590 merge = len(wctx.parents()) > 1
1587 1591
1588 1592 if not force and merge and match.ispartial():
1589 1593 raise error.Abort(_('cannot partially commit a merge '
1590 1594 '(do not specify files or patterns)'))
1591 1595
1592 1596 status = self.status(match=match, clean=force)
1593 1597 if force:
1594 1598 status.modified.extend(status.clean) # mq may commit clean files
1595 1599
1596 1600 # check subrepos
1597 1601 subs = []
1598 1602 commitsubs = set()
1599 1603 newstate = wctx.substate.copy()
1600 1604 # only manage subrepos and .hgsubstate if .hgsub is present
1601 1605 if '.hgsub' in wctx:
1602 1606 # we'll decide whether to track this ourselves, thanks
1603 1607 for c in status.modified, status.added, status.removed:
1604 1608 if '.hgsubstate' in c:
1605 1609 c.remove('.hgsubstate')
1606 1610
1607 1611 # compare current state to last committed state
1608 1612 # build new substate based on last committed state
1609 1613 oldstate = wctx.p1().substate
1610 1614 for s in sorted(newstate.keys()):
1611 1615 if not match(s):
1612 1616 # ignore working copy, use old state if present
1613 1617 if s in oldstate:
1614 1618 newstate[s] = oldstate[s]
1615 1619 continue
1616 1620 if not force:
1617 1621 raise error.Abort(
1618 1622 _("commit with new subrepo %s excluded") % s)
1619 1623 dirtyreason = wctx.sub(s).dirtyreason(True)
1620 1624 if dirtyreason:
1621 1625 if not self.ui.configbool('ui', 'commitsubrepos'):
1622 1626 raise error.Abort(dirtyreason,
1623 1627 hint=_("use --subrepos for recursive commit"))
1624 1628 subs.append(s)
1625 1629 commitsubs.add(s)
1626 1630 else:
1627 1631 bs = wctx.sub(s).basestate()
1628 1632 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1629 1633 if oldstate.get(s, (None, None, None))[1] != bs:
1630 1634 subs.append(s)
1631 1635
1632 1636 # check for removed subrepos
1633 1637 for p in wctx.parents():
1634 1638 r = [s for s in p.substate if s not in newstate]
1635 1639 subs += [s for s in r if match(s)]
1636 1640 if subs:
1637 1641 if (not match('.hgsub') and
1638 1642 '.hgsub' in (wctx.modified() + wctx.added())):
1639 1643 raise error.Abort(
1640 1644 _("can't commit subrepos without .hgsub"))
1641 1645 status.modified.insert(0, '.hgsubstate')
1642 1646
1643 1647 elif '.hgsub' in status.removed:
1644 1648 # clean up .hgsubstate when .hgsub is removed
1645 1649 if ('.hgsubstate' in wctx and
1646 1650 '.hgsubstate' not in (status.modified + status.added +
1647 1651 status.removed)):
1648 1652 status.removed.insert(0, '.hgsubstate')
1649 1653
1650 1654 # make sure all explicit patterns are matched
1651 1655 if not force:
1652 1656 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1653 1657
1654 1658 cctx = context.workingcommitctx(self, status,
1655 1659 text, user, date, extra)
1656 1660
1657 1661 # internal config: ui.allowemptycommit
1658 1662 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1659 1663 or extra.get('close') or merge or cctx.files()
1660 1664 or self.ui.configbool('ui', 'allowemptycommit'))
1661 1665 if not allowemptycommit:
1662 1666 return None
1663 1667
1664 1668 if merge and cctx.deleted():
1665 1669 raise error.Abort(_("cannot commit merge with missing files"))
1666 1670
1667 1671 ms = mergemod.mergestate.read(self)
1668 1672 mergeutil.checkunresolved(ms)
1669 1673
1670 1674 if editor:
1671 1675 cctx._text = editor(self, cctx, subs)
1672 1676 edited = (text != cctx._text)
1673 1677
1674 1678 # Save commit message in case this transaction gets rolled back
1675 1679 # (e.g. by a pretxncommit hook). Leave the content alone on
1676 1680 # the assumption that the user will use the same editor again.
1677 1681 msgfn = self.savecommitmessage(cctx._text)
1678 1682
1679 1683 # commit subs and write new state
1680 1684 if subs:
1681 1685 for s in sorted(commitsubs):
1682 1686 sub = wctx.sub(s)
1683 1687 self.ui.status(_('committing subrepository %s\n') %
1684 1688 subrepo.subrelpath(sub))
1685 1689 sr = sub.commit(cctx._text, user, date)
1686 1690 newstate[s] = (newstate[s][0], sr)
1687 1691 subrepo.writestate(self, newstate)
1688 1692
1689 1693 p1, p2 = self.dirstate.parents()
1690 1694 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1691 1695 try:
1692 1696 self.hook("precommit", throw=True, parent1=hookp1,
1693 1697 parent2=hookp2)
1694 1698 tr = self.transaction('commit')
1695 1699 ret = self.commitctx(cctx, True)
1696 1700 except: # re-raises
1697 1701 if edited:
1698 1702 self.ui.write(
1699 1703 _('note: commit message saved in %s\n') % msgfn)
1700 1704 raise
1701 1705 # update bookmarks, dirstate and mergestate
1702 1706 bookmarks.update(self, [p1, p2], ret)
1703 1707 cctx.markcommitted(ret)
1704 1708 ms.reset()
1705 1709 tr.close()
1706 1710
1707 1711 finally:
1708 1712 lockmod.release(tr, lock, wlock)
1709 1713
1710 1714 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1711 1715 # hack for command that use a temporary commit (eg: histedit)
1712 1716 # temporary commit got stripped before hook release
1713 1717 if self.changelog.hasnode(ret):
1714 1718 self.hook("commit", node=node, parent1=parent1,
1715 1719 parent2=parent2)
1716 1720 self._afterlock(commithook)
1717 1721 return ret
1718 1722
1719 1723 @unfilteredmethod
1720 1724 def commitctx(self, ctx, error=False):
1721 1725 """Add a new revision to current repository.
1722 1726 Revision information is passed via the context argument.
1723 1727 """
1724 1728
1725 1729 tr = None
1726 1730 p1, p2 = ctx.p1(), ctx.p2()
1727 1731 user = ctx.user()
1728 1732
1729 1733 lock = self.lock()
1730 1734 try:
1731 1735 tr = self.transaction("commit")
1732 1736 trp = weakref.proxy(tr)
1733 1737
1734 1738 if ctx.manifestnode():
1735 1739 # reuse an existing manifest revision
1736 1740 mn = ctx.manifestnode()
1737 1741 files = ctx.files()
1738 1742 elif ctx.files():
1739 1743 m1ctx = p1.manifestctx()
1740 1744 m2ctx = p2.manifestctx()
1741 1745 mctx = m1ctx.copy()
1742 1746
1743 1747 m = mctx.read()
1744 1748 m1 = m1ctx.read()
1745 1749 m2 = m2ctx.read()
1746 1750
1747 1751 # check in files
1748 1752 added = []
1749 1753 changed = []
1750 1754 removed = list(ctx.removed())
1751 1755 linkrev = len(self)
1752 1756 self.ui.note(_("committing files:\n"))
1753 1757 for f in sorted(ctx.modified() + ctx.added()):
1754 1758 self.ui.note(f + "\n")
1755 1759 try:
1756 1760 fctx = ctx[f]
1757 1761 if fctx is None:
1758 1762 removed.append(f)
1759 1763 else:
1760 1764 added.append(f)
1761 1765 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1762 1766 trp, changed)
1763 1767 m.setflag(f, fctx.flags())
1764 1768 except OSError as inst:
1765 1769 self.ui.warn(_("trouble committing %s!\n") % f)
1766 1770 raise
1767 1771 except IOError as inst:
1768 1772 errcode = getattr(inst, 'errno', errno.ENOENT)
1769 1773 if error or errcode and errcode != errno.ENOENT:
1770 1774 self.ui.warn(_("trouble committing %s!\n") % f)
1771 1775 raise
1772 1776
1773 1777 # update manifest
1774 1778 self.ui.note(_("committing manifest\n"))
1775 1779 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1776 1780 drop = [f for f in removed if f in m]
1777 1781 for f in drop:
1778 1782 del m[f]
1779 1783 mn = mctx.write(trp, linkrev,
1780 1784 p1.manifestnode(), p2.manifestnode(),
1781 1785 added, drop)
1782 1786 files = changed + removed
1783 1787 else:
1784 1788 mn = p1.manifestnode()
1785 1789 files = []
1786 1790
1787 1791 # update changelog
1788 1792 self.ui.note(_("committing changelog\n"))
1789 1793 self.changelog.delayupdate(tr)
1790 1794 n = self.changelog.add(mn, files, ctx.description(),
1791 1795 trp, p1.node(), p2.node(),
1792 1796 user, ctx.date(), ctx.extra().copy())
1793 1797 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1794 1798 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1795 1799 parent2=xp2)
1796 1800 # set the new commit is proper phase
1797 1801 targetphase = subrepo.newcommitphase(self.ui, ctx)
1798 1802 if targetphase:
1799 1803 # retract boundary do not alter parent changeset.
1800 1804 # if a parent have higher the resulting phase will
1801 1805 # be compliant anyway
1802 1806 #
1803 1807 # if minimal phase was 0 we don't need to retract anything
1804 1808 phases.retractboundary(self, tr, targetphase, [n])
1805 1809 tr.close()
1806 1810 branchmap.updatecache(self.filtered('served'))
1807 1811 return n
1808 1812 finally:
1809 1813 if tr:
1810 1814 tr.release()
1811 1815 lock.release()
1812 1816
1813 1817 @unfilteredmethod
1814 1818 def destroying(self):
1815 1819 '''Inform the repository that nodes are about to be destroyed.
1816 1820 Intended for use by strip and rollback, so there's a common
1817 1821 place for anything that has to be done before destroying history.
1818 1822
1819 1823 This is mostly useful for saving state that is in memory and waiting
1820 1824 to be flushed when the current lock is released. Because a call to
1821 1825 destroyed is imminent, the repo will be invalidated causing those
1822 1826 changes to stay in memory (waiting for the next unlock), or vanish
1823 1827 completely.
1824 1828 '''
1825 1829 # When using the same lock to commit and strip, the phasecache is left
1826 1830 # dirty after committing. Then when we strip, the repo is invalidated,
1827 1831 # causing those changes to disappear.
1828 1832 if '_phasecache' in vars(self):
1829 1833 self._phasecache.write()
1830 1834
1831 1835 @unfilteredmethod
1832 1836 def destroyed(self):
1833 1837 '''Inform the repository that nodes have been destroyed.
1834 1838 Intended for use by strip and rollback, so there's a common
1835 1839 place for anything that has to be done after destroying history.
1836 1840 '''
1837 1841 # When one tries to:
1838 1842 # 1) destroy nodes thus calling this method (e.g. strip)
1839 1843 # 2) use phasecache somewhere (e.g. commit)
1840 1844 #
1841 1845 # then 2) will fail because the phasecache contains nodes that were
1842 1846 # removed. We can either remove phasecache from the filecache,
1843 1847 # causing it to reload next time it is accessed, or simply filter
1844 1848 # the removed nodes now and write the updated cache.
1845 1849 self._phasecache.filterunknown(self)
1846 1850 self._phasecache.write()
1847 1851
1848 1852 # update the 'served' branch cache to help read only server process
1849 1853 # Thanks to branchcache collaboration this is done from the nearest
1850 1854 # filtered subset and it is expected to be fast.
1851 1855 branchmap.updatecache(self.filtered('served'))
1852 1856
1853 1857 # Ensure the persistent tag cache is updated. Doing it now
1854 1858 # means that the tag cache only has to worry about destroyed
1855 1859 # heads immediately after a strip/rollback. That in turn
1856 1860 # guarantees that "cachetip == currenttip" (comparing both rev
1857 1861 # and node) always means no nodes have been added or destroyed.
1858 1862
1859 1863 # XXX this is suboptimal when qrefresh'ing: we strip the current
1860 1864 # head, refresh the tag cache, then immediately add a new head.
1861 1865 # But I think doing it this way is necessary for the "instant
1862 1866 # tag cache retrieval" case to work.
1863 1867 self.invalidate()
1864 1868
1865 1869 def walk(self, match, node=None):
1866 1870 '''
1867 1871 walk recursively through the directory tree or a given
1868 1872 changeset, finding all files matched by the match
1869 1873 function
1870 1874 '''
1871 1875 return self[node].walk(match)
1872 1876
1873 1877 def status(self, node1='.', node2=None, match=None,
1874 1878 ignored=False, clean=False, unknown=False,
1875 1879 listsubrepos=False):
1876 1880 '''a convenience method that calls node1.status(node2)'''
1877 1881 return self[node1].status(node2, match, ignored, clean, unknown,
1878 1882 listsubrepos)
1879 1883
1880 1884 def heads(self, start=None):
1881 1885 if start is None:
1882 1886 cl = self.changelog
1883 1887 headrevs = reversed(cl.headrevs())
1884 1888 return [cl.node(rev) for rev in headrevs]
1885 1889
1886 1890 heads = self.changelog.heads(start)
1887 1891 # sort the output in rev descending order
1888 1892 return sorted(heads, key=self.changelog.rev, reverse=True)
1889 1893
1890 1894 def branchheads(self, branch=None, start=None, closed=False):
1891 1895 '''return a (possibly filtered) list of heads for the given branch
1892 1896
1893 1897 Heads are returned in topological order, from newest to oldest.
1894 1898 If branch is None, use the dirstate branch.
1895 1899 If start is not None, return only heads reachable from start.
1896 1900 If closed is True, return heads that are marked as closed as well.
1897 1901 '''
1898 1902 if branch is None:
1899 1903 branch = self[None].branch()
1900 1904 branches = self.branchmap()
1901 1905 if branch not in branches:
1902 1906 return []
1903 1907 # the cache returns heads ordered lowest to highest
1904 1908 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1905 1909 if start is not None:
1906 1910 # filter out the heads that cannot be reached from startrev
1907 1911 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1908 1912 bheads = [h for h in bheads if h in fbheads]
1909 1913 return bheads
1910 1914
1911 1915 def branches(self, nodes):
1912 1916 if not nodes:
1913 1917 nodes = [self.changelog.tip()]
1914 1918 b = []
1915 1919 for n in nodes:
1916 1920 t = n
1917 1921 while True:
1918 1922 p = self.changelog.parents(n)
1919 1923 if p[1] != nullid or p[0] == nullid:
1920 1924 b.append((t, n, p[0], p[1]))
1921 1925 break
1922 1926 n = p[0]
1923 1927 return b
1924 1928
1925 1929 def between(self, pairs):
1926 1930 r = []
1927 1931
1928 1932 for top, bottom in pairs:
1929 1933 n, l, i = top, [], 0
1930 1934 f = 1
1931 1935
1932 1936 while n != bottom and n != nullid:
1933 1937 p = self.changelog.parents(n)[0]
1934 1938 if i == f:
1935 1939 l.append(n)
1936 1940 f = f * 2
1937 1941 n = p
1938 1942 i += 1
1939 1943
1940 1944 r.append(l)
1941 1945
1942 1946 return r
1943 1947
1944 1948 def checkpush(self, pushop):
1945 1949 """Extensions can override this function if additional checks have
1946 1950 to be performed before pushing, or call it if they override push
1947 1951 command.
1948 1952 """
1949 1953 pass
1950 1954
1951 1955 @unfilteredpropertycache
1952 1956 def prepushoutgoinghooks(self):
1953 1957 """Return util.hooks consists of a pushop with repo, remote, outgoing
1954 1958 methods, which are called before pushing changesets.
1955 1959 """
1956 1960 return util.hooks()
1957 1961
1958 1962 def pushkey(self, namespace, key, old, new):
1959 1963 try:
1960 1964 tr = self.currenttransaction()
1961 1965 hookargs = {}
1962 1966 if tr is not None:
1963 1967 hookargs.update(tr.hookargs)
1964 1968 hookargs['namespace'] = namespace
1965 1969 hookargs['key'] = key
1966 1970 hookargs['old'] = old
1967 1971 hookargs['new'] = new
1968 1972 self.hook('prepushkey', throw=True, **hookargs)
1969 1973 except error.HookAbort as exc:
1970 1974 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1971 1975 if exc.hint:
1972 1976 self.ui.write_err(_("(%s)\n") % exc.hint)
1973 1977 return False
1974 1978 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1975 1979 ret = pushkey.push(self, namespace, key, old, new)
1976 1980 def runhook():
1977 1981 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1978 1982 ret=ret)
1979 1983 self._afterlock(runhook)
1980 1984 return ret
1981 1985
1982 1986 def listkeys(self, namespace):
1983 1987 self.hook('prelistkeys', throw=True, namespace=namespace)
1984 1988 self.ui.debug('listing keys for "%s"\n' % namespace)
1985 1989 values = pushkey.list(self, namespace)
1986 1990 self.hook('listkeys', namespace=namespace, values=values)
1987 1991 return values
1988 1992
1989 1993 def debugwireargs(self, one, two, three=None, four=None, five=None):
1990 1994 '''used to test argument passing over the wire'''
1991 1995 return "%s %s %s %s %s" % (one, two, three, four, five)
1992 1996
1993 1997 def savecommitmessage(self, text):
1994 1998 fp = self.vfs('last-message.txt', 'wb')
1995 1999 try:
1996 2000 fp.write(text)
1997 2001 finally:
1998 2002 fp.close()
1999 2003 return self.pathto(fp.name[len(self.root) + 1:])
2000 2004
2001 2005 # used to avoid circular references so destructors work
2002 2006 def aftertrans(files):
2003 2007 renamefiles = [tuple(t) for t in files]
2004 2008 def a():
2005 2009 for vfs, src, dest in renamefiles:
2006 2010 try:
2007 2011 vfs.rename(src, dest)
2008 2012 except OSError: # journal file does not yet exist
2009 2013 pass
2010 2014 return a
2011 2015
2012 2016 def undoname(fn):
2013 2017 base, name = os.path.split(fn)
2014 2018 assert name.startswith('journal')
2015 2019 return os.path.join(base, name.replace('journal', 'undo', 1))
2016 2020
2017 2021 def instance(ui, path, create):
2018 2022 return localrepository(ui, util.urllocalpath(path), create)
2019 2023
2020 2024 def islocal(path):
2021 2025 return True
2022 2026
2023 2027 def newreporequirements(repo):
2024 2028 """Determine the set of requirements for a new local repository.
2025 2029
2026 2030 Extensions can wrap this function to specify custom requirements for
2027 2031 new repositories.
2028 2032 """
2029 2033 ui = repo.ui
2030 2034 requirements = set(['revlogv1'])
2031 2035 if ui.configbool('format', 'usestore', True):
2032 2036 requirements.add('store')
2033 2037 if ui.configbool('format', 'usefncache', True):
2034 2038 requirements.add('fncache')
2035 2039 if ui.configbool('format', 'dotencode', True):
2036 2040 requirements.add('dotencode')
2037 2041
2038 2042 compengine = ui.config('experimental', 'format.compression', 'zlib')
2039 2043 if compengine not in util.compengines:
2040 2044 raise error.Abort(_('compression engine %s defined by '
2041 2045 'experimental.format.compression not available') %
2042 2046 compengine,
2043 2047 hint=_('run "hg debuginstall" to list available '
2044 2048 'compression engines'))
2045 2049
2046 2050 # zlib is the historical default and doesn't need an explicit requirement.
2047 2051 if compengine != 'zlib':
2048 2052 requirements.add('exp-compression-%s' % compengine)
2049 2053
2050 2054 if scmutil.gdinitconfig(ui):
2051 2055 requirements.add('generaldelta')
2052 2056 if ui.configbool('experimental', 'treemanifest', False):
2053 2057 requirements.add('treemanifest')
2054 2058 if ui.configbool('experimental', 'manifestv2', False):
2055 2059 requirements.add('manifestv2')
2056 2060
2057 2061 return requirements
General Comments 0
You need to be logged in to leave comments. Login now