##// END OF EJS Templates
repofilecache: define a 'join' method...
Pierre-Yves David -
r31282:b9228a22 default
parent child Browse files
Show More
@@ -1,2073 +1,2075 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class repofilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72
73 def join(self, obj, fname):
74 return obj.join(fname)
73 75 def __get__(self, repo, type=None):
74 76 if repo is None:
75 77 return self
76 78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
77 79 def __set__(self, repo, value):
78 80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
79 81 def __delete__(self, repo):
80 82 return super(repofilecache, self).__delete__(repo.unfiltered())
81 83
82 84 class storecache(repofilecache):
83 85 """filecache for files in the store"""
84 86 def join(self, obj, fname):
85 87 return obj.sjoin(fname)
86 88
87 89 class unfilteredpropertycache(util.propertycache):
88 90 """propertycache that apply to unfiltered repo only"""
89 91
90 92 def __get__(self, repo, type=None):
91 93 unfi = repo.unfiltered()
92 94 if unfi is repo:
93 95 return super(unfilteredpropertycache, self).__get__(unfi)
94 96 return getattr(unfi, self.name)
95 97
96 98 class filteredpropertycache(util.propertycache):
97 99 """propertycache that must take filtering in account"""
98 100
99 101 def cachevalue(self, obj, value):
100 102 object.__setattr__(obj, self.name, value)
101 103
102 104
103 105 def hasunfilteredcache(repo, name):
104 106 """check if a repo has an unfilteredpropertycache value for <name>"""
105 107 return name in vars(repo.unfiltered())
106 108
107 109 def unfilteredmethod(orig):
108 110 """decorate method that always need to be run on unfiltered version"""
109 111 def wrapper(repo, *args, **kwargs):
110 112 return orig(repo.unfiltered(), *args, **kwargs)
111 113 return wrapper
112 114
113 115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
114 116 'unbundle'))
115 117 legacycaps = moderncaps.union(set(['changegroupsubset']))
116 118
117 119 class localpeer(peer.peerrepository):
118 120 '''peer for a local repo; reflects only the most recent API'''
119 121
120 122 def __init__(self, repo, caps=moderncaps):
121 123 peer.peerrepository.__init__(self)
122 124 self._repo = repo.filtered('served')
123 125 self.ui = repo.ui
124 126 self._caps = repo._restrictcapabilities(caps)
125 127 self.requirements = repo.requirements
126 128 self.supportedformats = repo.supportedformats
127 129
128 130 def close(self):
129 131 self._repo.close()
130 132
131 133 def _capabilities(self):
132 134 return self._caps
133 135
134 136 def local(self):
135 137 return self._repo
136 138
137 139 def canpush(self):
138 140 return True
139 141
140 142 def url(self):
141 143 return self._repo.url()
142 144
143 145 def lookup(self, key):
144 146 return self._repo.lookup(key)
145 147
146 148 def branchmap(self):
147 149 return self._repo.branchmap()
148 150
149 151 def heads(self):
150 152 return self._repo.heads()
151 153
152 154 def known(self, nodes):
153 155 return self._repo.known(nodes)
154 156
155 157 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
156 158 **kwargs):
157 159 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
158 160 common=common, bundlecaps=bundlecaps,
159 161 **kwargs)
160 162 cb = util.chunkbuffer(chunks)
161 163
162 164 if bundlecaps is not None and 'HG20' in bundlecaps:
163 165 # When requesting a bundle2, getbundle returns a stream to make the
164 166 # wire level function happier. We need to build a proper object
165 167 # from it in local peer.
166 168 return bundle2.getunbundler(self.ui, cb)
167 169 else:
168 170 return changegroup.getunbundler('01', cb, None)
169 171
170 172 # TODO We might want to move the next two calls into legacypeer and add
171 173 # unbundle instead.
172 174
173 175 def unbundle(self, cg, heads, url):
174 176 """apply a bundle on a repo
175 177
176 178 This function handles the repo locking itself."""
177 179 try:
178 180 try:
179 181 cg = exchange.readbundle(self.ui, cg, None)
180 182 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
181 183 if util.safehasattr(ret, 'getchunks'):
182 184 # This is a bundle20 object, turn it into an unbundler.
183 185 # This little dance should be dropped eventually when the
184 186 # API is finally improved.
185 187 stream = util.chunkbuffer(ret.getchunks())
186 188 ret = bundle2.getunbundler(self.ui, stream)
187 189 return ret
188 190 except Exception as exc:
189 191 # If the exception contains output salvaged from a bundle2
190 192 # reply, we need to make sure it is printed before continuing
191 193 # to fail. So we build a bundle2 with such output and consume
192 194 # it directly.
193 195 #
194 196 # This is not very elegant but allows a "simple" solution for
195 197 # issue4594
196 198 output = getattr(exc, '_bundle2salvagedoutput', ())
197 199 if output:
198 200 bundler = bundle2.bundle20(self._repo.ui)
199 201 for out in output:
200 202 bundler.addpart(out)
201 203 stream = util.chunkbuffer(bundler.getchunks())
202 204 b = bundle2.getunbundler(self.ui, stream)
203 205 bundle2.processbundle(self._repo, b)
204 206 raise
205 207 except error.PushRaced as exc:
206 208 raise error.ResponseError(_('push failed:'), str(exc))
207 209
208 210 def lock(self):
209 211 return self._repo.lock()
210 212
211 213 def addchangegroup(self, cg, source, url):
212 214 return cg.apply(self._repo, source, url)
213 215
214 216 def pushkey(self, namespace, key, old, new):
215 217 return self._repo.pushkey(namespace, key, old, new)
216 218
217 219 def listkeys(self, namespace):
218 220 return self._repo.listkeys(namespace)
219 221
220 222 def debugwireargs(self, one, two, three=None, four=None, five=None):
221 223 '''used to test argument passing over the wire'''
222 224 return "%s %s %s %s %s" % (one, two, three, four, five)
223 225
224 226 class locallegacypeer(localpeer):
225 227 '''peer extension which implements legacy methods too; used for tests with
226 228 restricted capabilities'''
227 229
228 230 def __init__(self, repo):
229 231 localpeer.__init__(self, repo, caps=legacycaps)
230 232
231 233 def branches(self, nodes):
232 234 return self._repo.branches(nodes)
233 235
234 236 def between(self, pairs):
235 237 return self._repo.between(pairs)
236 238
237 239 def changegroup(self, basenodes, source):
238 240 return changegroup.changegroup(self._repo, basenodes, source)
239 241
240 242 def changegroupsubset(self, bases, heads, source):
241 243 return changegroup.changegroupsubset(self._repo, bases, heads, source)
242 244
243 245 class localrepository(object):
244 246
245 247 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
246 248 'manifestv2'))
247 249 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
248 250 'relshared', 'dotencode'))
249 251 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
250 252 filtername = None
251 253
252 254 # a list of (ui, featureset) functions.
253 255 # only functions defined in module of enabled extensions are invoked
254 256 featuresetupfuncs = set()
255 257
256 258 def __init__(self, baseui, path, create=False):
257 259 self.requirements = set()
258 260 # vfs to access the working copy
259 261 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
260 262 # vfs to access the content of the repository
261 263 self.vfs = None
262 264 # vfs to access the store part of the repository
263 265 self.svfs = None
264 266 self.root = self.wvfs.base
265 267 self.path = self.wvfs.join(".hg")
266 268 self.origroot = path
267 269 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 270 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 271 realfs=False)
270 272 self.vfs = vfsmod.vfs(self.path)
271 273 self.baseui = baseui
272 274 self.ui = baseui.copy()
273 275 self.ui.copy = baseui.copy # prevent copying repo configuration
274 276 # A list of callback to shape the phase if no data were found.
275 277 # Callback are in the form: func(repo, roots) --> processed root.
276 278 # This list it to be filled by extension during repo setup
277 279 self._phasedefaults = []
278 280 try:
279 281 self.ui.readconfig(self.join("hgrc"), self.root)
280 282 self._loadextensions()
281 283 except IOError:
282 284 pass
283 285
284 286 if self.featuresetupfuncs:
285 287 self.supported = set(self._basesupported) # use private copy
286 288 extmods = set(m.__name__ for n, m
287 289 in extensions.extensions(self.ui))
288 290 for setupfunc in self.featuresetupfuncs:
289 291 if setupfunc.__module__ in extmods:
290 292 setupfunc(self.ui, self.supported)
291 293 else:
292 294 self.supported = self._basesupported
293 295 color.setup(self.ui)
294 296
295 297 # Add compression engines.
296 298 for name in util.compengines:
297 299 engine = util.compengines[name]
298 300 if engine.revlogheader():
299 301 self.supported.add('exp-compression-%s' % name)
300 302
301 303 if not self.vfs.isdir():
302 304 if create:
303 305 self.requirements = newreporequirements(self)
304 306
305 307 if not self.wvfs.exists():
306 308 self.wvfs.makedirs()
307 309 self.vfs.makedir(notindexed=True)
308 310
309 311 if 'store' in self.requirements:
310 312 self.vfs.mkdir("store")
311 313
312 314 # create an invalid changelog
313 315 self.vfs.append(
314 316 "00changelog.i",
315 317 '\0\0\0\2' # represents revlogv2
316 318 ' dummy changelog to prevent using the old repo layout'
317 319 )
318 320 else:
319 321 raise error.RepoError(_("repository %s not found") % path)
320 322 elif create:
321 323 raise error.RepoError(_("repository %s already exists") % path)
322 324 else:
323 325 try:
324 326 self.requirements = scmutil.readrequires(
325 327 self.vfs, self.supported)
326 328 except IOError as inst:
327 329 if inst.errno != errno.ENOENT:
328 330 raise
329 331
330 332 self.sharedpath = self.path
331 333 try:
332 334 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 335 if 'relshared' in self.requirements:
334 336 sharedpath = self.vfs.join(sharedpath)
335 337 vfs = vfsmod.vfs(sharedpath, realpath=True)
336 338 s = vfs.base
337 339 if not vfs.exists():
338 340 raise error.RepoError(
339 341 _('.hg/sharedpath points to nonexistent directory %s') % s)
340 342 self.sharedpath = s
341 343 except IOError as inst:
342 344 if inst.errno != errno.ENOENT:
343 345 raise
344 346
345 347 self.store = store.store(
346 348 self.requirements, self.sharedpath, vfsmod.vfs)
347 349 self.spath = self.store.path
348 350 self.svfs = self.store.vfs
349 351 self.sjoin = self.store.join
350 352 self.vfs.createmode = self.store.createmode
351 353 self._applyopenerreqs()
352 354 if create:
353 355 self._writerequirements()
354 356
355 357 self._dirstatevalidatewarned = False
356 358
357 359 self._branchcaches = {}
358 360 self._revbranchcache = None
359 361 self.filterpats = {}
360 362 self._datafilters = {}
361 363 self._transref = self._lockref = self._wlockref = None
362 364
363 365 # A cache for various files under .hg/ that tracks file changes,
364 366 # (used by the filecache decorator)
365 367 #
366 368 # Maps a property name to its util.filecacheentry
367 369 self._filecache = {}
368 370
369 371 # hold sets of revision to be filtered
370 372 # should be cleared when something might have changed the filter value:
371 373 # - new changesets,
372 374 # - phase change,
373 375 # - new obsolescence marker,
374 376 # - working directory parent change,
375 377 # - bookmark changes
376 378 self.filteredrevcache = {}
377 379
378 380 # generic mapping between names and nodes
379 381 self.names = namespaces.namespaces()
380 382
381 383 @property
382 384 def wopener(self):
383 385 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
384 386 return self.wvfs
385 387
386 388 @property
387 389 def opener(self):
388 390 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 391 return self.vfs
390 392
391 393 def close(self):
392 394 self._writecaches()
393 395
394 396 def _loadextensions(self):
395 397 extensions.loadall(self.ui)
396 398
397 399 def _writecaches(self):
398 400 if self._revbranchcache:
399 401 self._revbranchcache.write()
400 402
401 403 def _restrictcapabilities(self, caps):
402 404 if self.ui.configbool('experimental', 'bundle2-advertise', True):
403 405 caps = set(caps)
404 406 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
405 407 caps.add('bundle2=' + urlreq.quote(capsblob))
406 408 return caps
407 409
408 410 def _applyopenerreqs(self):
409 411 self.svfs.options = dict((r, 1) for r in self.requirements
410 412 if r in self.openerreqs)
411 413 # experimental config: format.chunkcachesize
412 414 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
413 415 if chunkcachesize is not None:
414 416 self.svfs.options['chunkcachesize'] = chunkcachesize
415 417 # experimental config: format.maxchainlen
416 418 maxchainlen = self.ui.configint('format', 'maxchainlen')
417 419 if maxchainlen is not None:
418 420 self.svfs.options['maxchainlen'] = maxchainlen
419 421 # experimental config: format.manifestcachesize
420 422 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
421 423 if manifestcachesize is not None:
422 424 self.svfs.options['manifestcachesize'] = manifestcachesize
423 425 # experimental config: format.aggressivemergedeltas
424 426 aggressivemergedeltas = self.ui.configbool('format',
425 427 'aggressivemergedeltas', False)
426 428 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
427 429 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
428 430
429 431 for r in self.requirements:
430 432 if r.startswith('exp-compression-'):
431 433 self.svfs.options['compengine'] = r[len('exp-compression-'):]
432 434
433 435 def _writerequirements(self):
434 436 scmutil.writerequires(self.vfs, self.requirements)
435 437
436 438 def _checknested(self, path):
437 439 """Determine if path is a legal nested repository."""
438 440 if not path.startswith(self.root):
439 441 return False
440 442 subpath = path[len(self.root) + 1:]
441 443 normsubpath = util.pconvert(subpath)
442 444
443 445 # XXX: Checking against the current working copy is wrong in
444 446 # the sense that it can reject things like
445 447 #
446 448 # $ hg cat -r 10 sub/x.txt
447 449 #
448 450 # if sub/ is no longer a subrepository in the working copy
449 451 # parent revision.
450 452 #
451 453 # However, it can of course also allow things that would have
452 454 # been rejected before, such as the above cat command if sub/
453 455 # is a subrepository now, but was a normal directory before.
454 456 # The old path auditor would have rejected by mistake since it
455 457 # panics when it sees sub/.hg/.
456 458 #
457 459 # All in all, checking against the working copy seems sensible
458 460 # since we want to prevent access to nested repositories on
459 461 # the filesystem *now*.
460 462 ctx = self[None]
461 463 parts = util.splitpath(subpath)
462 464 while parts:
463 465 prefix = '/'.join(parts)
464 466 if prefix in ctx.substate:
465 467 if prefix == normsubpath:
466 468 return True
467 469 else:
468 470 sub = ctx.sub(prefix)
469 471 return sub.checknested(subpath[len(prefix) + 1:])
470 472 else:
471 473 parts.pop()
472 474 return False
473 475
474 476 def peer(self):
475 477 return localpeer(self) # not cached to avoid reference cycle
476 478
477 479 def unfiltered(self):
478 480 """Return unfiltered version of the repository
479 481
480 482 Intended to be overwritten by filtered repo."""
481 483 return self
482 484
483 485 def filtered(self, name):
484 486 """Return a filtered version of a repository"""
485 487 # build a new class with the mixin and the current class
486 488 # (possibly subclass of the repo)
487 489 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
488 490 pass
489 491 return filteredrepo(self, name)
490 492
491 493 @repofilecache('bookmarks', 'bookmarks.current')
492 494 def _bookmarks(self):
493 495 return bookmarks.bmstore(self)
494 496
495 497 @property
496 498 def _activebookmark(self):
497 499 return self._bookmarks.active
498 500
499 501 def bookmarkheads(self, bookmark):
500 502 name = bookmark.split('@', 1)[0]
501 503 heads = []
502 504 for mark, n in self._bookmarks.iteritems():
503 505 if mark.split('@', 1)[0] == name:
504 506 heads.append(n)
505 507 return heads
506 508
507 509 # _phaserevs and _phasesets depend on changelog. what we need is to
508 510 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
509 511 # can't be easily expressed in filecache mechanism.
510 512 @storecache('phaseroots', '00changelog.i')
511 513 def _phasecache(self):
512 514 return phases.phasecache(self, self._phasedefaults)
513 515
514 516 @storecache('obsstore')
515 517 def obsstore(self):
516 518 # read default format for new obsstore.
517 519 # developer config: format.obsstore-version
518 520 defaultformat = self.ui.configint('format', 'obsstore-version', None)
519 521 # rely on obsstore class default when possible.
520 522 kwargs = {}
521 523 if defaultformat is not None:
522 524 kwargs['defaultformat'] = defaultformat
523 525 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
524 526 store = obsolete.obsstore(self.svfs, readonly=readonly,
525 527 **kwargs)
526 528 if store and readonly:
527 529 self.ui.warn(
528 530 _('obsolete feature not enabled but %i markers found!\n')
529 531 % len(list(store)))
530 532 return store
531 533
532 534 @storecache('00changelog.i')
533 535 def changelog(self):
534 536 c = changelog.changelog(self.svfs)
535 537 if txnutil.mayhavepending(self.root):
536 538 c.readpending('00changelog.i.a')
537 539 return c
538 540
539 541 def _constructmanifest(self):
540 542 # This is a temporary function while we migrate from manifest to
541 543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 544 # manifest creation.
543 545 return manifest.manifestrevlog(self.svfs)
544 546
545 547 @storecache('00manifest.i')
546 548 def manifestlog(self):
547 549 return manifest.manifestlog(self.svfs, self)
548 550
549 551 @repofilecache('dirstate')
550 552 def dirstate(self):
551 553 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 554 self._dirstatevalidate)
553 555
554 556 def _dirstatevalidate(self, node):
555 557 try:
556 558 self.changelog.rev(node)
557 559 return node
558 560 except error.LookupError:
559 561 if not self._dirstatevalidatewarned:
560 562 self._dirstatevalidatewarned = True
561 563 self.ui.warn(_("warning: ignoring unknown"
562 564 " working parent %s!\n") % short(node))
563 565 return nullid
564 566
565 567 def __getitem__(self, changeid):
566 568 if changeid is None or changeid == wdirrev:
567 569 return context.workingctx(self)
568 570 if isinstance(changeid, slice):
569 571 return [context.changectx(self, i)
570 572 for i in xrange(*changeid.indices(len(self)))
571 573 if i not in self.changelog.filteredrevs]
572 574 return context.changectx(self, changeid)
573 575
574 576 def __contains__(self, changeid):
575 577 try:
576 578 self[changeid]
577 579 return True
578 580 except error.RepoLookupError:
579 581 return False
580 582
581 583 def __nonzero__(self):
582 584 return True
583 585
584 586 def __len__(self):
585 587 return len(self.changelog)
586 588
587 589 def __iter__(self):
588 590 return iter(self.changelog)
589 591
590 592 def revs(self, expr, *args):
591 593 '''Find revisions matching a revset.
592 594
593 595 The revset is specified as a string ``expr`` that may contain
594 596 %-formatting to escape certain types. See ``revsetlang.formatspec``.
595 597
596 598 Revset aliases from the configuration are not expanded. To expand
597 599 user aliases, consider calling ``scmutil.revrange()`` or
598 600 ``repo.anyrevs([expr], user=True)``.
599 601
600 602 Returns a revset.abstractsmartset, which is a list-like interface
601 603 that contains integer revisions.
602 604 '''
603 605 expr = revsetlang.formatspec(expr, *args)
604 606 m = revset.match(None, expr)
605 607 return m(self)
606 608
607 609 def set(self, expr, *args):
608 610 '''Find revisions matching a revset and emit changectx instances.
609 611
610 612 This is a convenience wrapper around ``revs()`` that iterates the
611 613 result and is a generator of changectx instances.
612 614
613 615 Revset aliases from the configuration are not expanded. To expand
614 616 user aliases, consider calling ``scmutil.revrange()``.
615 617 '''
616 618 for r in self.revs(expr, *args):
617 619 yield self[r]
618 620
619 621 def anyrevs(self, specs, user=False):
620 622 '''Find revisions matching one of the given revsets.
621 623
622 624 Revset aliases from the configuration are not expanded by default. To
623 625 expand user aliases, specify ``user=True``.
624 626 '''
625 627 if user:
626 628 m = revset.matchany(self.ui, specs, repo=self)
627 629 else:
628 630 m = revset.matchany(None, specs)
629 631 return m(self)
630 632
631 633 def url(self):
632 634 return 'file:' + self.root
633 635
634 636 def hook(self, name, throw=False, **args):
635 637 """Call a hook, passing this repo instance.
636 638
637 639 This a convenience method to aid invoking hooks. Extensions likely
638 640 won't call this unless they have registered a custom hook or are
639 641 replacing code that is expected to call a hook.
640 642 """
641 643 return hook.hook(self.ui, self, name, throw, **args)
642 644
643 645 @unfilteredmethod
644 646 def _tag(self, names, node, message, local, user, date, extra=None,
645 647 editor=False):
646 648 if isinstance(names, str):
647 649 names = (names,)
648 650
649 651 branches = self.branchmap()
650 652 for name in names:
651 653 self.hook('pretag', throw=True, node=hex(node), tag=name,
652 654 local=local)
653 655 if name in branches:
654 656 self.ui.warn(_("warning: tag %s conflicts with existing"
655 657 " branch name\n") % name)
656 658
657 659 def writetags(fp, names, munge, prevtags):
658 660 fp.seek(0, 2)
659 661 if prevtags and prevtags[-1] != '\n':
660 662 fp.write('\n')
661 663 for name in names:
662 664 if munge:
663 665 m = munge(name)
664 666 else:
665 667 m = name
666 668
667 669 if (self._tagscache.tagtypes and
668 670 name in self._tagscache.tagtypes):
669 671 old = self.tags().get(name, nullid)
670 672 fp.write('%s %s\n' % (hex(old), m))
671 673 fp.write('%s %s\n' % (hex(node), m))
672 674 fp.close()
673 675
674 676 prevtags = ''
675 677 if local:
676 678 try:
677 679 fp = self.vfs('localtags', 'r+')
678 680 except IOError:
679 681 fp = self.vfs('localtags', 'a')
680 682 else:
681 683 prevtags = fp.read()
682 684
683 685 # local tags are stored in the current charset
684 686 writetags(fp, names, None, prevtags)
685 687 for name in names:
686 688 self.hook('tag', node=hex(node), tag=name, local=local)
687 689 return
688 690
689 691 try:
690 692 fp = self.wfile('.hgtags', 'rb+')
691 693 except IOError as e:
692 694 if e.errno != errno.ENOENT:
693 695 raise
694 696 fp = self.wfile('.hgtags', 'ab')
695 697 else:
696 698 prevtags = fp.read()
697 699
698 700 # committed tags are stored in UTF-8
699 701 writetags(fp, names, encoding.fromlocal, prevtags)
700 702
701 703 fp.close()
702 704
703 705 self.invalidatecaches()
704 706
705 707 if '.hgtags' not in self.dirstate:
706 708 self[None].add(['.hgtags'])
707 709
708 710 m = matchmod.exact(self.root, '', ['.hgtags'])
709 711 tagnode = self.commit(message, user, date, extra=extra, match=m,
710 712 editor=editor)
711 713
712 714 for name in names:
713 715 self.hook('tag', node=hex(node), tag=name, local=local)
714 716
715 717 return tagnode
716 718
717 719 def tag(self, names, node, message, local, user, date, editor=False):
718 720 '''tag a revision with one or more symbolic names.
719 721
720 722 names is a list of strings or, when adding a single tag, names may be a
721 723 string.
722 724
723 725 if local is True, the tags are stored in a per-repository file.
724 726 otherwise, they are stored in the .hgtags file, and a new
725 727 changeset is committed with the change.
726 728
727 729 keyword arguments:
728 730
729 731 local: whether to store tags in non-version-controlled file
730 732 (default False)
731 733
732 734 message: commit message to use if committing
733 735
734 736 user: name of user to use if committing
735 737
736 738 date: date tuple to use if committing'''
737 739
738 740 if not local:
739 741 m = matchmod.exact(self.root, '', ['.hgtags'])
740 742 if any(self.status(match=m, unknown=True, ignored=True)):
741 743 raise error.Abort(_('working copy of .hgtags is changed'),
742 744 hint=_('please commit .hgtags manually'))
743 745
744 746 self.tags() # instantiate the cache
745 747 self._tag(names, node, message, local, user, date, editor=editor)
746 748
747 749 @filteredpropertycache
748 750 def _tagscache(self):
749 751 '''Returns a tagscache object that contains various tags related
750 752 caches.'''
751 753
752 754 # This simplifies its cache management by having one decorated
753 755 # function (this one) and the rest simply fetch things from it.
754 756 class tagscache(object):
755 757 def __init__(self):
756 758 # These two define the set of tags for this repository. tags
757 759 # maps tag name to node; tagtypes maps tag name to 'global' or
758 760 # 'local'. (Global tags are defined by .hgtags across all
759 761 # heads, and local tags are defined in .hg/localtags.)
760 762 # They constitute the in-memory cache of tags.
761 763 self.tags = self.tagtypes = None
762 764
763 765 self.nodetagscache = self.tagslist = None
764 766
765 767 cache = tagscache()
766 768 cache.tags, cache.tagtypes = self._findtags()
767 769
768 770 return cache
769 771
770 772 def tags(self):
771 773 '''return a mapping of tag to node'''
772 774 t = {}
773 775 if self.changelog.filteredrevs:
774 776 tags, tt = self._findtags()
775 777 else:
776 778 tags = self._tagscache.tags
777 779 for k, v in tags.iteritems():
778 780 try:
779 781 # ignore tags to unknown nodes
780 782 self.changelog.rev(v)
781 783 t[k] = v
782 784 except (error.LookupError, ValueError):
783 785 pass
784 786 return t
785 787
786 788 def _findtags(self):
787 789 '''Do the hard work of finding tags. Return a pair of dicts
788 790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
789 791 maps tag name to a string like \'global\' or \'local\'.
790 792 Subclasses or extensions are free to add their own tags, but
791 793 should be aware that the returned dicts will be retained for the
792 794 duration of the localrepo object.'''
793 795
794 796 # XXX what tagtype should subclasses/extensions use? Currently
795 797 # mq and bookmarks add tags, but do not set the tagtype at all.
796 798 # Should each extension invent its own tag type? Should there
797 799 # be one tagtype for all such "virtual" tags? Or is the status
798 800 # quo fine?
799 801
800 802 alltags = {} # map tag name to (node, hist)
801 803 tagtypes = {}
802 804
803 805 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
804 806 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
805 807
806 808 # Build the return dicts. Have to re-encode tag names because
807 809 # the tags module always uses UTF-8 (in order not to lose info
808 810 # writing to the cache), but the rest of Mercurial wants them in
809 811 # local encoding.
810 812 tags = {}
811 813 for (name, (node, hist)) in alltags.iteritems():
812 814 if node != nullid:
813 815 tags[encoding.tolocal(name)] = node
814 816 tags['tip'] = self.changelog.tip()
815 817 tagtypes = dict([(encoding.tolocal(name), value)
816 818 for (name, value) in tagtypes.iteritems()])
817 819 return (tags, tagtypes)
818 820
819 821 def tagtype(self, tagname):
820 822 '''
821 823 return the type of the given tag. result can be:
822 824
823 825 'local' : a local tag
824 826 'global' : a global tag
825 827 None : tag does not exist
826 828 '''
827 829
828 830 return self._tagscache.tagtypes.get(tagname)
829 831
830 832 def tagslist(self):
831 833 '''return a list of tags ordered by revision'''
832 834 if not self._tagscache.tagslist:
833 835 l = []
834 836 for t, n in self.tags().iteritems():
835 837 l.append((self.changelog.rev(n), t, n))
836 838 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
837 839
838 840 return self._tagscache.tagslist
839 841
840 842 def nodetags(self, node):
841 843 '''return the tags associated with a node'''
842 844 if not self._tagscache.nodetagscache:
843 845 nodetagscache = {}
844 846 for t, n in self._tagscache.tags.iteritems():
845 847 nodetagscache.setdefault(n, []).append(t)
846 848 for tags in nodetagscache.itervalues():
847 849 tags.sort()
848 850 self._tagscache.nodetagscache = nodetagscache
849 851 return self._tagscache.nodetagscache.get(node, [])
850 852
851 853 def nodebookmarks(self, node):
852 854 """return the list of bookmarks pointing to the specified node"""
853 855 marks = []
854 856 for bookmark, n in self._bookmarks.iteritems():
855 857 if n == node:
856 858 marks.append(bookmark)
857 859 return sorted(marks)
858 860
859 861 def branchmap(self):
860 862 '''returns a dictionary {branch: [branchheads]} with branchheads
861 863 ordered by increasing revision number'''
862 864 branchmap.updatecache(self)
863 865 return self._branchcaches[self.filtername]
864 866
865 867 @unfilteredmethod
866 868 def revbranchcache(self):
867 869 if not self._revbranchcache:
868 870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
869 871 return self._revbranchcache
870 872
871 873 def branchtip(self, branch, ignoremissing=False):
872 874 '''return the tip node for a given branch
873 875
874 876 If ignoremissing is True, then this method will not raise an error.
875 877 This is helpful for callers that only expect None for a missing branch
876 878 (e.g. namespace).
877 879
878 880 '''
879 881 try:
880 882 return self.branchmap().branchtip(branch)
881 883 except KeyError:
882 884 if not ignoremissing:
883 885 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
884 886 else:
885 887 pass
886 888
887 889 def lookup(self, key):
888 890 return self[key].node()
889 891
890 892 def lookupbranch(self, key, remote=None):
891 893 repo = remote or self
892 894 if key in repo.branchmap():
893 895 return key
894 896
895 897 repo = (remote and remote.local()) and remote or self
896 898 return repo[key].branch()
897 899
898 900 def known(self, nodes):
899 901 cl = self.changelog
900 902 nm = cl.nodemap
901 903 filtered = cl.filteredrevs
902 904 result = []
903 905 for n in nodes:
904 906 r = nm.get(n)
905 907 resp = not (r is None or r in filtered)
906 908 result.append(resp)
907 909 return result
908 910
909 911 def local(self):
910 912 return self
911 913
912 914 def publishing(self):
913 915 # it's safe (and desirable) to trust the publish flag unconditionally
914 916 # so that we don't finalize changes shared between users via ssh or nfs
915 917 return self.ui.configbool('phases', 'publish', True, untrusted=True)
916 918
917 919 def cancopy(self):
918 920 # so statichttprepo's override of local() works
919 921 if not self.local():
920 922 return False
921 923 if not self.publishing():
922 924 return True
923 925 # if publishing we can't copy if there is filtered content
924 926 return not self.filtered('visible').changelog.filteredrevs
925 927
926 928 def shared(self):
927 929 '''the type of shared repository (None if not shared)'''
928 930 if self.sharedpath != self.path:
929 931 return 'store'
930 932 return None
931 933
932 934 def join(self, f, *insidef):
933 935 return self.vfs.join(os.path.join(f, *insidef))
934 936
935 937 def wjoin(self, f, *insidef):
936 938 return self.vfs.reljoin(self.root, f, *insidef)
937 939
938 940 def file(self, f):
939 941 if f[0] == '/':
940 942 f = f[1:]
941 943 return filelog.filelog(self.svfs, f)
942 944
943 945 def changectx(self, changeid):
944 946 return self[changeid]
945 947
946 948 def setparents(self, p1, p2=nullid):
947 949 self.dirstate.beginparentchange()
948 950 copies = self.dirstate.setparents(p1, p2)
949 951 pctx = self[p1]
950 952 if copies:
951 953 # Adjust copy records, the dirstate cannot do it, it
952 954 # requires access to parents manifests. Preserve them
953 955 # only for entries added to first parent.
954 956 for f in copies:
955 957 if f not in pctx and copies[f] in pctx:
956 958 self.dirstate.copy(copies[f], f)
957 959 if p2 == nullid:
958 960 for f, s in sorted(self.dirstate.copies().items()):
959 961 if f not in pctx and s not in pctx:
960 962 self.dirstate.copy(None, f)
961 963 self.dirstate.endparentchange()
962 964
963 965 def filectx(self, path, changeid=None, fileid=None):
964 966 """changeid can be a changeset revision, node, or tag.
965 967 fileid can be a file revision or node."""
966 968 return context.filectx(self, path, changeid, fileid)
967 969
968 970 def getcwd(self):
969 971 return self.dirstate.getcwd()
970 972
971 973 def pathto(self, f, cwd=None):
972 974 return self.dirstate.pathto(f, cwd)
973 975
974 976 def wfile(self, f, mode='r'):
975 977 return self.wvfs(f, mode)
976 978
977 979 def _link(self, f):
978 980 return self.wvfs.islink(f)
979 981
980 982 def _loadfilter(self, filter):
981 983 if filter not in self.filterpats:
982 984 l = []
983 985 for pat, cmd in self.ui.configitems(filter):
984 986 if cmd == '!':
985 987 continue
986 988 mf = matchmod.match(self.root, '', [pat])
987 989 fn = None
988 990 params = cmd
989 991 for name, filterfn in self._datafilters.iteritems():
990 992 if cmd.startswith(name):
991 993 fn = filterfn
992 994 params = cmd[len(name):].lstrip()
993 995 break
994 996 if not fn:
995 997 fn = lambda s, c, **kwargs: util.filter(s, c)
996 998 # Wrap old filters not supporting keyword arguments
997 999 if not inspect.getargspec(fn)[2]:
998 1000 oldfn = fn
999 1001 fn = lambda s, c, **kwargs: oldfn(s, c)
1000 1002 l.append((mf, fn, params))
1001 1003 self.filterpats[filter] = l
1002 1004 return self.filterpats[filter]
1003 1005
1004 1006 def _filter(self, filterpats, filename, data):
1005 1007 for mf, fn, cmd in filterpats:
1006 1008 if mf(filename):
1007 1009 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1008 1010 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1009 1011 break
1010 1012
1011 1013 return data
1012 1014
1013 1015 @unfilteredpropertycache
1014 1016 def _encodefilterpats(self):
1015 1017 return self._loadfilter('encode')
1016 1018
1017 1019 @unfilteredpropertycache
1018 1020 def _decodefilterpats(self):
1019 1021 return self._loadfilter('decode')
1020 1022
1021 1023 def adddatafilter(self, name, filter):
1022 1024 self._datafilters[name] = filter
1023 1025
1024 1026 def wread(self, filename):
1025 1027 if self._link(filename):
1026 1028 data = self.wvfs.readlink(filename)
1027 1029 else:
1028 1030 data = self.wvfs.read(filename)
1029 1031 return self._filter(self._encodefilterpats, filename, data)
1030 1032
1031 1033 def wwrite(self, filename, data, flags, backgroundclose=False):
1032 1034 """write ``data`` into ``filename`` in the working directory
1033 1035
1034 1036 This returns length of written (maybe decoded) data.
1035 1037 """
1036 1038 data = self._filter(self._decodefilterpats, filename, data)
1037 1039 if 'l' in flags:
1038 1040 self.wvfs.symlink(data, filename)
1039 1041 else:
1040 1042 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1041 1043 if 'x' in flags:
1042 1044 self.wvfs.setflags(filename, False, True)
1043 1045 return len(data)
1044 1046
1045 1047 def wwritedata(self, filename, data):
1046 1048 return self._filter(self._decodefilterpats, filename, data)
1047 1049
1048 1050 def currenttransaction(self):
1049 1051 """return the current transaction or None if non exists"""
1050 1052 if self._transref:
1051 1053 tr = self._transref()
1052 1054 else:
1053 1055 tr = None
1054 1056
1055 1057 if tr and tr.running():
1056 1058 return tr
1057 1059 return None
1058 1060
1059 1061 def transaction(self, desc, report=None):
1060 1062 if (self.ui.configbool('devel', 'all-warnings')
1061 1063 or self.ui.configbool('devel', 'check-locks')):
1062 1064 if self._currentlock(self._lockref) is None:
1063 1065 raise error.ProgrammingError('transaction requires locking')
1064 1066 tr = self.currenttransaction()
1065 1067 if tr is not None:
1066 1068 return tr.nest()
1067 1069
1068 1070 # abort here if the journal already exists
1069 1071 if self.svfs.exists("journal"):
1070 1072 raise error.RepoError(
1071 1073 _("abandoned transaction found"),
1072 1074 hint=_("run 'hg recover' to clean up transaction"))
1073 1075
1074 1076 idbase = "%.40f#%f" % (random.random(), time.time())
1075 1077 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1076 1078 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1077 1079
1078 1080 self._writejournal(desc)
1079 1081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1080 1082 if report:
1081 1083 rp = report
1082 1084 else:
1083 1085 rp = self.ui.warn
1084 1086 vfsmap = {'plain': self.vfs} # root of .hg/
1085 1087 # we must avoid cyclic reference between repo and transaction.
1086 1088 reporef = weakref.ref(self)
1087 1089 def validate(tr):
1088 1090 """will run pre-closing hooks"""
1089 1091 reporef().hook('pretxnclose', throw=True,
1090 1092 txnname=desc, **tr.hookargs)
1091 1093 def releasefn(tr, success):
1092 1094 repo = reporef()
1093 1095 if success:
1094 1096 # this should be explicitly invoked here, because
1095 1097 # in-memory changes aren't written out at closing
1096 1098 # transaction, if tr.addfilegenerator (via
1097 1099 # dirstate.write or so) isn't invoked while
1098 1100 # transaction running
1099 1101 repo.dirstate.write(None)
1100 1102 else:
1101 1103 # discard all changes (including ones already written
1102 1104 # out) in this transaction
1103 1105 repo.dirstate.restorebackup(None, prefix='journal.')
1104 1106
1105 1107 repo.invalidate(clearfilecache=True)
1106 1108
1107 1109 tr = transaction.transaction(rp, self.svfs, vfsmap,
1108 1110 "journal",
1109 1111 "undo",
1110 1112 aftertrans(renames),
1111 1113 self.store.createmode,
1112 1114 validator=validate,
1113 1115 releasefn=releasefn)
1114 1116
1115 1117 tr.hookargs['txnid'] = txnid
1116 1118 # note: writing the fncache only during finalize mean that the file is
1117 1119 # outdated when running hooks. As fncache is used for streaming clone,
1118 1120 # this is not expected to break anything that happen during the hooks.
1119 1121 tr.addfinalize('flush-fncache', self.store.write)
1120 1122 def txnclosehook(tr2):
1121 1123 """To be run if transaction is successful, will schedule a hook run
1122 1124 """
1123 1125 # Don't reference tr2 in hook() so we don't hold a reference.
1124 1126 # This reduces memory consumption when there are multiple
1125 1127 # transactions per lock. This can likely go away if issue5045
1126 1128 # fixes the function accumulation.
1127 1129 hookargs = tr2.hookargs
1128 1130
1129 1131 def hook():
1130 1132 reporef().hook('txnclose', throw=False, txnname=desc,
1131 1133 **hookargs)
1132 1134 reporef()._afterlock(hook)
1133 1135 tr.addfinalize('txnclose-hook', txnclosehook)
1134 1136 def txnaborthook(tr2):
1135 1137 """To be run if transaction is aborted
1136 1138 """
1137 1139 reporef().hook('txnabort', throw=False, txnname=desc,
1138 1140 **tr2.hookargs)
1139 1141 tr.addabort('txnabort-hook', txnaborthook)
1140 1142 # avoid eager cache invalidation. in-memory data should be identical
1141 1143 # to stored data if transaction has no error.
1142 1144 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1143 1145 self._transref = weakref.ref(tr)
1144 1146 return tr
1145 1147
1146 1148 def _journalfiles(self):
1147 1149 return ((self.svfs, 'journal'),
1148 1150 (self.vfs, 'journal.dirstate'),
1149 1151 (self.vfs, 'journal.branch'),
1150 1152 (self.vfs, 'journal.desc'),
1151 1153 (self.vfs, 'journal.bookmarks'),
1152 1154 (self.svfs, 'journal.phaseroots'))
1153 1155
1154 1156 def undofiles(self):
1155 1157 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1156 1158
1157 1159 def _writejournal(self, desc):
1158 1160 self.dirstate.savebackup(None, prefix='journal.')
1159 1161 self.vfs.write("journal.branch",
1160 1162 encoding.fromlocal(self.dirstate.branch()))
1161 1163 self.vfs.write("journal.desc",
1162 1164 "%d\n%s\n" % (len(self), desc))
1163 1165 self.vfs.write("journal.bookmarks",
1164 1166 self.vfs.tryread("bookmarks"))
1165 1167 self.svfs.write("journal.phaseroots",
1166 1168 self.svfs.tryread("phaseroots"))
1167 1169
1168 1170 def recover(self):
1169 1171 with self.lock():
1170 1172 if self.svfs.exists("journal"):
1171 1173 self.ui.status(_("rolling back interrupted transaction\n"))
1172 1174 vfsmap = {'': self.svfs,
1173 1175 'plain': self.vfs,}
1174 1176 transaction.rollback(self.svfs, vfsmap, "journal",
1175 1177 self.ui.warn)
1176 1178 self.invalidate()
1177 1179 return True
1178 1180 else:
1179 1181 self.ui.warn(_("no interrupted transaction available\n"))
1180 1182 return False
1181 1183
1182 1184 def rollback(self, dryrun=False, force=False):
1183 1185 wlock = lock = dsguard = None
1184 1186 try:
1185 1187 wlock = self.wlock()
1186 1188 lock = self.lock()
1187 1189 if self.svfs.exists("undo"):
1188 1190 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1189 1191
1190 1192 return self._rollback(dryrun, force, dsguard)
1191 1193 else:
1192 1194 self.ui.warn(_("no rollback information available\n"))
1193 1195 return 1
1194 1196 finally:
1195 1197 release(dsguard, lock, wlock)
1196 1198
1197 1199 @unfilteredmethod # Until we get smarter cache management
1198 1200 def _rollback(self, dryrun, force, dsguard):
1199 1201 ui = self.ui
1200 1202 try:
1201 1203 args = self.vfs.read('undo.desc').splitlines()
1202 1204 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1203 1205 if len(args) >= 3:
1204 1206 detail = args[2]
1205 1207 oldtip = oldlen - 1
1206 1208
1207 1209 if detail and ui.verbose:
1208 1210 msg = (_('repository tip rolled back to revision %s'
1209 1211 ' (undo %s: %s)\n')
1210 1212 % (oldtip, desc, detail))
1211 1213 else:
1212 1214 msg = (_('repository tip rolled back to revision %s'
1213 1215 ' (undo %s)\n')
1214 1216 % (oldtip, desc))
1215 1217 except IOError:
1216 1218 msg = _('rolling back unknown transaction\n')
1217 1219 desc = None
1218 1220
1219 1221 if not force and self['.'] != self['tip'] and desc == 'commit':
1220 1222 raise error.Abort(
1221 1223 _('rollback of last commit while not checked out '
1222 1224 'may lose data'), hint=_('use -f to force'))
1223 1225
1224 1226 ui.status(msg)
1225 1227 if dryrun:
1226 1228 return 0
1227 1229
1228 1230 parents = self.dirstate.parents()
1229 1231 self.destroying()
1230 1232 vfsmap = {'plain': self.vfs, '': self.svfs}
1231 1233 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1232 1234 if self.vfs.exists('undo.bookmarks'):
1233 1235 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1234 1236 if self.svfs.exists('undo.phaseroots'):
1235 1237 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1236 1238 self.invalidate()
1237 1239
1238 1240 parentgone = (parents[0] not in self.changelog.nodemap or
1239 1241 parents[1] not in self.changelog.nodemap)
1240 1242 if parentgone:
1241 1243 # prevent dirstateguard from overwriting already restored one
1242 1244 dsguard.close()
1243 1245
1244 1246 self.dirstate.restorebackup(None, prefix='undo.')
1245 1247 try:
1246 1248 branch = self.vfs.read('undo.branch')
1247 1249 self.dirstate.setbranch(encoding.tolocal(branch))
1248 1250 except IOError:
1249 1251 ui.warn(_('named branch could not be reset: '
1250 1252 'current branch is still \'%s\'\n')
1251 1253 % self.dirstate.branch())
1252 1254
1253 1255 parents = tuple([p.rev() for p in self[None].parents()])
1254 1256 if len(parents) > 1:
1255 1257 ui.status(_('working directory now based on '
1256 1258 'revisions %d and %d\n') % parents)
1257 1259 else:
1258 1260 ui.status(_('working directory now based on '
1259 1261 'revision %d\n') % parents)
1260 1262 mergemod.mergestate.clean(self, self['.'].node())
1261 1263
1262 1264 # TODO: if we know which new heads may result from this rollback, pass
1263 1265 # them to destroy(), which will prevent the branchhead cache from being
1264 1266 # invalidated.
1265 1267 self.destroyed()
1266 1268 return 0
1267 1269
1268 1270 def invalidatecaches(self):
1269 1271
1270 1272 if '_tagscache' in vars(self):
1271 1273 # can't use delattr on proxy
1272 1274 del self.__dict__['_tagscache']
1273 1275
1274 1276 self.unfiltered()._branchcaches.clear()
1275 1277 self.invalidatevolatilesets()
1276 1278
1277 1279 def invalidatevolatilesets(self):
1278 1280 self.filteredrevcache.clear()
1279 1281 obsolete.clearobscaches(self)
1280 1282
1281 1283 def invalidatedirstate(self):
1282 1284 '''Invalidates the dirstate, causing the next call to dirstate
1283 1285 to check if it was modified since the last time it was read,
1284 1286 rereading it if it has.
1285 1287
1286 1288 This is different to dirstate.invalidate() that it doesn't always
1287 1289 rereads the dirstate. Use dirstate.invalidate() if you want to
1288 1290 explicitly read the dirstate again (i.e. restoring it to a previous
1289 1291 known good state).'''
1290 1292 if hasunfilteredcache(self, 'dirstate'):
1291 1293 for k in self.dirstate._filecache:
1292 1294 try:
1293 1295 delattr(self.dirstate, k)
1294 1296 except AttributeError:
1295 1297 pass
1296 1298 delattr(self.unfiltered(), 'dirstate')
1297 1299
1298 1300 def invalidate(self, clearfilecache=False):
1299 1301 '''Invalidates both store and non-store parts other than dirstate
1300 1302
1301 1303 If a transaction is running, invalidation of store is omitted,
1302 1304 because discarding in-memory changes might cause inconsistency
1303 1305 (e.g. incomplete fncache causes unintentional failure, but
1304 1306 redundant one doesn't).
1305 1307 '''
1306 1308 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1307 1309 for k in self._filecache.keys():
1308 1310 # dirstate is invalidated separately in invalidatedirstate()
1309 1311 if k == 'dirstate':
1310 1312 continue
1311 1313
1312 1314 if clearfilecache:
1313 1315 del self._filecache[k]
1314 1316 try:
1315 1317 delattr(unfiltered, k)
1316 1318 except AttributeError:
1317 1319 pass
1318 1320 self.invalidatecaches()
1319 1321 if not self.currenttransaction():
1320 1322 # TODO: Changing contents of store outside transaction
1321 1323 # causes inconsistency. We should make in-memory store
1322 1324 # changes detectable, and abort if changed.
1323 1325 self.store.invalidatecaches()
1324 1326
1325 1327 def invalidateall(self):
1326 1328 '''Fully invalidates both store and non-store parts, causing the
1327 1329 subsequent operation to reread any outside changes.'''
1328 1330 # extension should hook this to invalidate its caches
1329 1331 self.invalidate()
1330 1332 self.invalidatedirstate()
1331 1333
1332 1334 @unfilteredmethod
1333 1335 def _refreshfilecachestats(self, tr):
1334 1336 """Reload stats of cached files so that they are flagged as valid"""
1335 1337 for k, ce in self._filecache.items():
1336 1338 if k == 'dirstate' or k not in self.__dict__:
1337 1339 continue
1338 1340 ce.refresh()
1339 1341
1340 1342 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1341 1343 inheritchecker=None, parentenvvar=None):
1342 1344 parentlock = None
1343 1345 # the contents of parentenvvar are used by the underlying lock to
1344 1346 # determine whether it can be inherited
1345 1347 if parentenvvar is not None:
1346 1348 parentlock = encoding.environ.get(parentenvvar)
1347 1349 try:
1348 1350 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1349 1351 acquirefn=acquirefn, desc=desc,
1350 1352 inheritchecker=inheritchecker,
1351 1353 parentlock=parentlock)
1352 1354 except error.LockHeld as inst:
1353 1355 if not wait:
1354 1356 raise
1355 1357 # show more details for new-style locks
1356 1358 if ':' in inst.locker:
1357 1359 host, pid = inst.locker.split(":", 1)
1358 1360 self.ui.warn(
1359 1361 _("waiting for lock on %s held by process %r "
1360 1362 "on host %r\n") % (desc, pid, host))
1361 1363 else:
1362 1364 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1363 1365 (desc, inst.locker))
1364 1366 # default to 600 seconds timeout
1365 1367 l = lockmod.lock(vfs, lockname,
1366 1368 int(self.ui.config("ui", "timeout", "600")),
1367 1369 releasefn=releasefn, acquirefn=acquirefn,
1368 1370 desc=desc)
1369 1371 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1370 1372 return l
1371 1373
1372 1374 def _afterlock(self, callback):
1373 1375 """add a callback to be run when the repository is fully unlocked
1374 1376
1375 1377 The callback will be executed when the outermost lock is released
1376 1378 (with wlock being higher level than 'lock')."""
1377 1379 for ref in (self._wlockref, self._lockref):
1378 1380 l = ref and ref()
1379 1381 if l and l.held:
1380 1382 l.postrelease.append(callback)
1381 1383 break
1382 1384 else: # no lock have been found.
1383 1385 callback()
1384 1386
1385 1387 def lock(self, wait=True):
1386 1388 '''Lock the repository store (.hg/store) and return a weak reference
1387 1389 to the lock. Use this before modifying the store (e.g. committing or
1388 1390 stripping). If you are opening a transaction, get a lock as well.)
1389 1391
1390 1392 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1391 1393 'wlock' first to avoid a dead-lock hazard.'''
1392 1394 l = self._currentlock(self._lockref)
1393 1395 if l is not None:
1394 1396 l.lock()
1395 1397 return l
1396 1398
1397 1399 l = self._lock(self.svfs, "lock", wait, None,
1398 1400 self.invalidate, _('repository %s') % self.origroot)
1399 1401 self._lockref = weakref.ref(l)
1400 1402 return l
1401 1403
1402 1404 def _wlockchecktransaction(self):
1403 1405 if self.currenttransaction() is not None:
1404 1406 raise error.LockInheritanceContractViolation(
1405 1407 'wlock cannot be inherited in the middle of a transaction')
1406 1408
1407 1409 def wlock(self, wait=True):
1408 1410 '''Lock the non-store parts of the repository (everything under
1409 1411 .hg except .hg/store) and return a weak reference to the lock.
1410 1412
1411 1413 Use this before modifying files in .hg.
1412 1414
1413 1415 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1414 1416 'wlock' first to avoid a dead-lock hazard.'''
1415 1417 l = self._wlockref and self._wlockref()
1416 1418 if l is not None and l.held:
1417 1419 l.lock()
1418 1420 return l
1419 1421
1420 1422 # We do not need to check for non-waiting lock acquisition. Such
1421 1423 # acquisition would not cause dead-lock as they would just fail.
1422 1424 if wait and (self.ui.configbool('devel', 'all-warnings')
1423 1425 or self.ui.configbool('devel', 'check-locks')):
1424 1426 if self._currentlock(self._lockref) is not None:
1425 1427 self.ui.develwarn('"wlock" acquired after "lock"')
1426 1428
1427 1429 def unlock():
1428 1430 if self.dirstate.pendingparentchange():
1429 1431 self.dirstate.invalidate()
1430 1432 else:
1431 1433 self.dirstate.write(None)
1432 1434
1433 1435 self._filecache['dirstate'].refresh()
1434 1436
1435 1437 l = self._lock(self.vfs, "wlock", wait, unlock,
1436 1438 self.invalidatedirstate, _('working directory of %s') %
1437 1439 self.origroot,
1438 1440 inheritchecker=self._wlockchecktransaction,
1439 1441 parentenvvar='HG_WLOCK_LOCKER')
1440 1442 self._wlockref = weakref.ref(l)
1441 1443 return l
1442 1444
1443 1445 def _currentlock(self, lockref):
1444 1446 """Returns the lock if it's held, or None if it's not."""
1445 1447 if lockref is None:
1446 1448 return None
1447 1449 l = lockref()
1448 1450 if l is None or not l.held:
1449 1451 return None
1450 1452 return l
1451 1453
1452 1454 def currentwlock(self):
1453 1455 """Returns the wlock if it's held, or None if it's not."""
1454 1456 return self._currentlock(self._wlockref)
1455 1457
1456 1458 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1457 1459 """
1458 1460 commit an individual file as part of a larger transaction
1459 1461 """
1460 1462
1461 1463 fname = fctx.path()
1462 1464 fparent1 = manifest1.get(fname, nullid)
1463 1465 fparent2 = manifest2.get(fname, nullid)
1464 1466 if isinstance(fctx, context.filectx):
1465 1467 node = fctx.filenode()
1466 1468 if node in [fparent1, fparent2]:
1467 1469 self.ui.debug('reusing %s filelog entry\n' % fname)
1468 1470 if manifest1.flags(fname) != fctx.flags():
1469 1471 changelist.append(fname)
1470 1472 return node
1471 1473
1472 1474 flog = self.file(fname)
1473 1475 meta = {}
1474 1476 copy = fctx.renamed()
1475 1477 if copy and copy[0] != fname:
1476 1478 # Mark the new revision of this file as a copy of another
1477 1479 # file. This copy data will effectively act as a parent
1478 1480 # of this new revision. If this is a merge, the first
1479 1481 # parent will be the nullid (meaning "look up the copy data")
1480 1482 # and the second one will be the other parent. For example:
1481 1483 #
1482 1484 # 0 --- 1 --- 3 rev1 changes file foo
1483 1485 # \ / rev2 renames foo to bar and changes it
1484 1486 # \- 2 -/ rev3 should have bar with all changes and
1485 1487 # should record that bar descends from
1486 1488 # bar in rev2 and foo in rev1
1487 1489 #
1488 1490 # this allows this merge to succeed:
1489 1491 #
1490 1492 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1491 1493 # \ / merging rev3 and rev4 should use bar@rev2
1492 1494 # \- 2 --- 4 as the merge base
1493 1495 #
1494 1496
1495 1497 cfname = copy[0]
1496 1498 crev = manifest1.get(cfname)
1497 1499 newfparent = fparent2
1498 1500
1499 1501 if manifest2: # branch merge
1500 1502 if fparent2 == nullid or crev is None: # copied on remote side
1501 1503 if cfname in manifest2:
1502 1504 crev = manifest2[cfname]
1503 1505 newfparent = fparent1
1504 1506
1505 1507 # Here, we used to search backwards through history to try to find
1506 1508 # where the file copy came from if the source of a copy was not in
1507 1509 # the parent directory. However, this doesn't actually make sense to
1508 1510 # do (what does a copy from something not in your working copy even
1509 1511 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1510 1512 # the user that copy information was dropped, so if they didn't
1511 1513 # expect this outcome it can be fixed, but this is the correct
1512 1514 # behavior in this circumstance.
1513 1515
1514 1516 if crev:
1515 1517 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1516 1518 meta["copy"] = cfname
1517 1519 meta["copyrev"] = hex(crev)
1518 1520 fparent1, fparent2 = nullid, newfparent
1519 1521 else:
1520 1522 self.ui.warn(_("warning: can't find ancestor for '%s' "
1521 1523 "copied from '%s'!\n") % (fname, cfname))
1522 1524
1523 1525 elif fparent1 == nullid:
1524 1526 fparent1, fparent2 = fparent2, nullid
1525 1527 elif fparent2 != nullid:
1526 1528 # is one parent an ancestor of the other?
1527 1529 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1528 1530 if fparent1 in fparentancestors:
1529 1531 fparent1, fparent2 = fparent2, nullid
1530 1532 elif fparent2 in fparentancestors:
1531 1533 fparent2 = nullid
1532 1534
1533 1535 # is the file changed?
1534 1536 text = fctx.data()
1535 1537 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1536 1538 changelist.append(fname)
1537 1539 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1538 1540 # are just the flags changed during merge?
1539 1541 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1540 1542 changelist.append(fname)
1541 1543
1542 1544 return fparent1
1543 1545
1544 1546 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1545 1547 """check for commit arguments that aren't committable"""
1546 1548 if match.isexact() or match.prefix():
1547 1549 matched = set(status.modified + status.added + status.removed)
1548 1550
1549 1551 for f in match.files():
1550 1552 f = self.dirstate.normalize(f)
1551 1553 if f == '.' or f in matched or f in wctx.substate:
1552 1554 continue
1553 1555 if f in status.deleted:
1554 1556 fail(f, _('file not found!'))
1555 1557 if f in vdirs: # visited directory
1556 1558 d = f + '/'
1557 1559 for mf in matched:
1558 1560 if mf.startswith(d):
1559 1561 break
1560 1562 else:
1561 1563 fail(f, _("no match under directory!"))
1562 1564 elif f not in self.dirstate:
1563 1565 fail(f, _("file not tracked!"))
1564 1566
1565 1567 @unfilteredmethod
1566 1568 def commit(self, text="", user=None, date=None, match=None, force=False,
1567 1569 editor=False, extra=None):
1568 1570 """Add a new revision to current repository.
1569 1571
1570 1572 Revision information is gathered from the working directory,
1571 1573 match can be used to filter the committed files. If editor is
1572 1574 supplied, it is called to get a commit message.
1573 1575 """
1574 1576 if extra is None:
1575 1577 extra = {}
1576 1578
1577 1579 def fail(f, msg):
1578 1580 raise error.Abort('%s: %s' % (f, msg))
1579 1581
1580 1582 if not match:
1581 1583 match = matchmod.always(self.root, '')
1582 1584
1583 1585 if not force:
1584 1586 vdirs = []
1585 1587 match.explicitdir = vdirs.append
1586 1588 match.bad = fail
1587 1589
1588 1590 wlock = lock = tr = None
1589 1591 try:
1590 1592 wlock = self.wlock()
1591 1593 lock = self.lock() # for recent changelog (see issue4368)
1592 1594
1593 1595 wctx = self[None]
1594 1596 merge = len(wctx.parents()) > 1
1595 1597
1596 1598 if not force and merge and match.ispartial():
1597 1599 raise error.Abort(_('cannot partially commit a merge '
1598 1600 '(do not specify files or patterns)'))
1599 1601
1600 1602 status = self.status(match=match, clean=force)
1601 1603 if force:
1602 1604 status.modified.extend(status.clean) # mq may commit clean files
1603 1605
1604 1606 # check subrepos
1605 1607 subs = []
1606 1608 commitsubs = set()
1607 1609 newstate = wctx.substate.copy()
1608 1610 # only manage subrepos and .hgsubstate if .hgsub is present
1609 1611 if '.hgsub' in wctx:
1610 1612 # we'll decide whether to track this ourselves, thanks
1611 1613 for c in status.modified, status.added, status.removed:
1612 1614 if '.hgsubstate' in c:
1613 1615 c.remove('.hgsubstate')
1614 1616
1615 1617 # compare current state to last committed state
1616 1618 # build new substate based on last committed state
1617 1619 oldstate = wctx.p1().substate
1618 1620 for s in sorted(newstate.keys()):
1619 1621 if not match(s):
1620 1622 # ignore working copy, use old state if present
1621 1623 if s in oldstate:
1622 1624 newstate[s] = oldstate[s]
1623 1625 continue
1624 1626 if not force:
1625 1627 raise error.Abort(
1626 1628 _("commit with new subrepo %s excluded") % s)
1627 1629 dirtyreason = wctx.sub(s).dirtyreason(True)
1628 1630 if dirtyreason:
1629 1631 if not self.ui.configbool('ui', 'commitsubrepos'):
1630 1632 raise error.Abort(dirtyreason,
1631 1633 hint=_("use --subrepos for recursive commit"))
1632 1634 subs.append(s)
1633 1635 commitsubs.add(s)
1634 1636 else:
1635 1637 bs = wctx.sub(s).basestate()
1636 1638 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1637 1639 if oldstate.get(s, (None, None, None))[1] != bs:
1638 1640 subs.append(s)
1639 1641
1640 1642 # check for removed subrepos
1641 1643 for p in wctx.parents():
1642 1644 r = [s for s in p.substate if s not in newstate]
1643 1645 subs += [s for s in r if match(s)]
1644 1646 if subs:
1645 1647 if (not match('.hgsub') and
1646 1648 '.hgsub' in (wctx.modified() + wctx.added())):
1647 1649 raise error.Abort(
1648 1650 _("can't commit subrepos without .hgsub"))
1649 1651 status.modified.insert(0, '.hgsubstate')
1650 1652
1651 1653 elif '.hgsub' in status.removed:
1652 1654 # clean up .hgsubstate when .hgsub is removed
1653 1655 if ('.hgsubstate' in wctx and
1654 1656 '.hgsubstate' not in (status.modified + status.added +
1655 1657 status.removed)):
1656 1658 status.removed.insert(0, '.hgsubstate')
1657 1659
1658 1660 # make sure all explicit patterns are matched
1659 1661 if not force:
1660 1662 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1661 1663
1662 1664 cctx = context.workingcommitctx(self, status,
1663 1665 text, user, date, extra)
1664 1666
1665 1667 # internal config: ui.allowemptycommit
1666 1668 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1667 1669 or extra.get('close') or merge or cctx.files()
1668 1670 or self.ui.configbool('ui', 'allowemptycommit'))
1669 1671 if not allowemptycommit:
1670 1672 return None
1671 1673
1672 1674 if merge and cctx.deleted():
1673 1675 raise error.Abort(_("cannot commit merge with missing files"))
1674 1676
1675 1677 ms = mergemod.mergestate.read(self)
1676 1678 mergeutil.checkunresolved(ms)
1677 1679
1678 1680 if editor:
1679 1681 cctx._text = editor(self, cctx, subs)
1680 1682 edited = (text != cctx._text)
1681 1683
1682 1684 # Save commit message in case this transaction gets rolled back
1683 1685 # (e.g. by a pretxncommit hook). Leave the content alone on
1684 1686 # the assumption that the user will use the same editor again.
1685 1687 msgfn = self.savecommitmessage(cctx._text)
1686 1688
1687 1689 # commit subs and write new state
1688 1690 if subs:
1689 1691 for s in sorted(commitsubs):
1690 1692 sub = wctx.sub(s)
1691 1693 self.ui.status(_('committing subrepository %s\n') %
1692 1694 subrepo.subrelpath(sub))
1693 1695 sr = sub.commit(cctx._text, user, date)
1694 1696 newstate[s] = (newstate[s][0], sr)
1695 1697 subrepo.writestate(self, newstate)
1696 1698
1697 1699 p1, p2 = self.dirstate.parents()
1698 1700 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1699 1701 try:
1700 1702 self.hook("precommit", throw=True, parent1=hookp1,
1701 1703 parent2=hookp2)
1702 1704 tr = self.transaction('commit')
1703 1705 ret = self.commitctx(cctx, True)
1704 1706 except: # re-raises
1705 1707 if edited:
1706 1708 self.ui.write(
1707 1709 _('note: commit message saved in %s\n') % msgfn)
1708 1710 raise
1709 1711 # update bookmarks, dirstate and mergestate
1710 1712 bookmarks.update(self, [p1, p2], ret)
1711 1713 cctx.markcommitted(ret)
1712 1714 ms.reset()
1713 1715 tr.close()
1714 1716
1715 1717 finally:
1716 1718 lockmod.release(tr, lock, wlock)
1717 1719
1718 1720 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1719 1721 # hack for command that use a temporary commit (eg: histedit)
1720 1722 # temporary commit got stripped before hook release
1721 1723 if self.changelog.hasnode(ret):
1722 1724 self.hook("commit", node=node, parent1=parent1,
1723 1725 parent2=parent2)
1724 1726 self._afterlock(commithook)
1725 1727 return ret
1726 1728
1727 1729 @unfilteredmethod
1728 1730 def commitctx(self, ctx, error=False):
1729 1731 """Add a new revision to current repository.
1730 1732 Revision information is passed via the context argument.
1731 1733 """
1732 1734
1733 1735 tr = None
1734 1736 p1, p2 = ctx.p1(), ctx.p2()
1735 1737 user = ctx.user()
1736 1738
1737 1739 lock = self.lock()
1738 1740 try:
1739 1741 tr = self.transaction("commit")
1740 1742 trp = weakref.proxy(tr)
1741 1743
1742 1744 if ctx.manifestnode():
1743 1745 # reuse an existing manifest revision
1744 1746 mn = ctx.manifestnode()
1745 1747 files = ctx.files()
1746 1748 elif ctx.files():
1747 1749 m1ctx = p1.manifestctx()
1748 1750 m2ctx = p2.manifestctx()
1749 1751 mctx = m1ctx.copy()
1750 1752
1751 1753 m = mctx.read()
1752 1754 m1 = m1ctx.read()
1753 1755 m2 = m2ctx.read()
1754 1756
1755 1757 # check in files
1756 1758 added = []
1757 1759 changed = []
1758 1760 removed = list(ctx.removed())
1759 1761 linkrev = len(self)
1760 1762 self.ui.note(_("committing files:\n"))
1761 1763 for f in sorted(ctx.modified() + ctx.added()):
1762 1764 self.ui.note(f + "\n")
1763 1765 try:
1764 1766 fctx = ctx[f]
1765 1767 if fctx is None:
1766 1768 removed.append(f)
1767 1769 else:
1768 1770 added.append(f)
1769 1771 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1770 1772 trp, changed)
1771 1773 m.setflag(f, fctx.flags())
1772 1774 except OSError as inst:
1773 1775 self.ui.warn(_("trouble committing %s!\n") % f)
1774 1776 raise
1775 1777 except IOError as inst:
1776 1778 errcode = getattr(inst, 'errno', errno.ENOENT)
1777 1779 if error or errcode and errcode != errno.ENOENT:
1778 1780 self.ui.warn(_("trouble committing %s!\n") % f)
1779 1781 raise
1780 1782
1781 1783 # update manifest
1782 1784 self.ui.note(_("committing manifest\n"))
1783 1785 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1784 1786 drop = [f for f in removed if f in m]
1785 1787 for f in drop:
1786 1788 del m[f]
1787 1789 mn = mctx.write(trp, linkrev,
1788 1790 p1.manifestnode(), p2.manifestnode(),
1789 1791 added, drop)
1790 1792 files = changed + removed
1791 1793 else:
1792 1794 mn = p1.manifestnode()
1793 1795 files = []
1794 1796
1795 1797 # update changelog
1796 1798 self.ui.note(_("committing changelog\n"))
1797 1799 self.changelog.delayupdate(tr)
1798 1800 n = self.changelog.add(mn, files, ctx.description(),
1799 1801 trp, p1.node(), p2.node(),
1800 1802 user, ctx.date(), ctx.extra().copy())
1801 1803 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1802 1804 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1803 1805 parent2=xp2)
1804 1806 # set the new commit is proper phase
1805 1807 targetphase = subrepo.newcommitphase(self.ui, ctx)
1806 1808 if targetphase:
1807 1809 # retract boundary do not alter parent changeset.
1808 1810 # if a parent have higher the resulting phase will
1809 1811 # be compliant anyway
1810 1812 #
1811 1813 # if minimal phase was 0 we don't need to retract anything
1812 1814 phases.retractboundary(self, tr, targetphase, [n])
1813 1815 tr.close()
1814 1816 branchmap.updatecache(self.filtered('served'))
1815 1817 return n
1816 1818 finally:
1817 1819 if tr:
1818 1820 tr.release()
1819 1821 lock.release()
1820 1822
1821 1823 @unfilteredmethod
1822 1824 def destroying(self):
1823 1825 '''Inform the repository that nodes are about to be destroyed.
1824 1826 Intended for use by strip and rollback, so there's a common
1825 1827 place for anything that has to be done before destroying history.
1826 1828
1827 1829 This is mostly useful for saving state that is in memory and waiting
1828 1830 to be flushed when the current lock is released. Because a call to
1829 1831 destroyed is imminent, the repo will be invalidated causing those
1830 1832 changes to stay in memory (waiting for the next unlock), or vanish
1831 1833 completely.
1832 1834 '''
1833 1835 # When using the same lock to commit and strip, the phasecache is left
1834 1836 # dirty after committing. Then when we strip, the repo is invalidated,
1835 1837 # causing those changes to disappear.
1836 1838 if '_phasecache' in vars(self):
1837 1839 self._phasecache.write()
1838 1840
1839 1841 @unfilteredmethod
1840 1842 def destroyed(self):
1841 1843 '''Inform the repository that nodes have been destroyed.
1842 1844 Intended for use by strip and rollback, so there's a common
1843 1845 place for anything that has to be done after destroying history.
1844 1846 '''
1845 1847 # When one tries to:
1846 1848 # 1) destroy nodes thus calling this method (e.g. strip)
1847 1849 # 2) use phasecache somewhere (e.g. commit)
1848 1850 #
1849 1851 # then 2) will fail because the phasecache contains nodes that were
1850 1852 # removed. We can either remove phasecache from the filecache,
1851 1853 # causing it to reload next time it is accessed, or simply filter
1852 1854 # the removed nodes now and write the updated cache.
1853 1855 self._phasecache.filterunknown(self)
1854 1856 self._phasecache.write()
1855 1857
1856 1858 # update the 'served' branch cache to help read only server process
1857 1859 # Thanks to branchcache collaboration this is done from the nearest
1858 1860 # filtered subset and it is expected to be fast.
1859 1861 branchmap.updatecache(self.filtered('served'))
1860 1862
1861 1863 # Ensure the persistent tag cache is updated. Doing it now
1862 1864 # means that the tag cache only has to worry about destroyed
1863 1865 # heads immediately after a strip/rollback. That in turn
1864 1866 # guarantees that "cachetip == currenttip" (comparing both rev
1865 1867 # and node) always means no nodes have been added or destroyed.
1866 1868
1867 1869 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 1870 # head, refresh the tag cache, then immediately add a new head.
1869 1871 # But I think doing it this way is necessary for the "instant
1870 1872 # tag cache retrieval" case to work.
1871 1873 self.invalidate()
1872 1874
1873 1875 def walk(self, match, node=None):
1874 1876 '''
1875 1877 walk recursively through the directory tree or a given
1876 1878 changeset, finding all files matched by the match
1877 1879 function
1878 1880 '''
1879 1881 return self[node].walk(match)
1880 1882
1881 1883 def status(self, node1='.', node2=None, match=None,
1882 1884 ignored=False, clean=False, unknown=False,
1883 1885 listsubrepos=False):
1884 1886 '''a convenience method that calls node1.status(node2)'''
1885 1887 return self[node1].status(node2, match, ignored, clean, unknown,
1886 1888 listsubrepos)
1887 1889
1888 1890 def heads(self, start=None):
1889 1891 if start is None:
1890 1892 cl = self.changelog
1891 1893 headrevs = reversed(cl.headrevs())
1892 1894 return [cl.node(rev) for rev in headrevs]
1893 1895
1894 1896 heads = self.changelog.heads(start)
1895 1897 # sort the output in rev descending order
1896 1898 return sorted(heads, key=self.changelog.rev, reverse=True)
1897 1899
1898 1900 def branchheads(self, branch=None, start=None, closed=False):
1899 1901 '''return a (possibly filtered) list of heads for the given branch
1900 1902
1901 1903 Heads are returned in topological order, from newest to oldest.
1902 1904 If branch is None, use the dirstate branch.
1903 1905 If start is not None, return only heads reachable from start.
1904 1906 If closed is True, return heads that are marked as closed as well.
1905 1907 '''
1906 1908 if branch is None:
1907 1909 branch = self[None].branch()
1908 1910 branches = self.branchmap()
1909 1911 if branch not in branches:
1910 1912 return []
1911 1913 # the cache returns heads ordered lowest to highest
1912 1914 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1913 1915 if start is not None:
1914 1916 # filter out the heads that cannot be reached from startrev
1915 1917 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1916 1918 bheads = [h for h in bheads if h in fbheads]
1917 1919 return bheads
1918 1920
1919 1921 def branches(self, nodes):
1920 1922 if not nodes:
1921 1923 nodes = [self.changelog.tip()]
1922 1924 b = []
1923 1925 for n in nodes:
1924 1926 t = n
1925 1927 while True:
1926 1928 p = self.changelog.parents(n)
1927 1929 if p[1] != nullid or p[0] == nullid:
1928 1930 b.append((t, n, p[0], p[1]))
1929 1931 break
1930 1932 n = p[0]
1931 1933 return b
1932 1934
1933 1935 def between(self, pairs):
1934 1936 r = []
1935 1937
1936 1938 for top, bottom in pairs:
1937 1939 n, l, i = top, [], 0
1938 1940 f = 1
1939 1941
1940 1942 while n != bottom and n != nullid:
1941 1943 p = self.changelog.parents(n)[0]
1942 1944 if i == f:
1943 1945 l.append(n)
1944 1946 f = f * 2
1945 1947 n = p
1946 1948 i += 1
1947 1949
1948 1950 r.append(l)
1949 1951
1950 1952 return r
1951 1953
1952 1954 def checkpush(self, pushop):
1953 1955 """Extensions can override this function if additional checks have
1954 1956 to be performed before pushing, or call it if they override push
1955 1957 command.
1956 1958 """
1957 1959 pass
1958 1960
1959 1961 @unfilteredpropertycache
1960 1962 def prepushoutgoinghooks(self):
1961 1963 """Return util.hooks consists of a pushop with repo, remote, outgoing
1962 1964 methods, which are called before pushing changesets.
1963 1965 """
1964 1966 return util.hooks()
1965 1967
1966 1968 def pushkey(self, namespace, key, old, new):
1967 1969 try:
1968 1970 tr = self.currenttransaction()
1969 1971 hookargs = {}
1970 1972 if tr is not None:
1971 1973 hookargs.update(tr.hookargs)
1972 1974 hookargs['namespace'] = namespace
1973 1975 hookargs['key'] = key
1974 1976 hookargs['old'] = old
1975 1977 hookargs['new'] = new
1976 1978 self.hook('prepushkey', throw=True, **hookargs)
1977 1979 except error.HookAbort as exc:
1978 1980 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1979 1981 if exc.hint:
1980 1982 self.ui.write_err(_("(%s)\n") % exc.hint)
1981 1983 return False
1982 1984 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1983 1985 ret = pushkey.push(self, namespace, key, old, new)
1984 1986 def runhook():
1985 1987 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1986 1988 ret=ret)
1987 1989 self._afterlock(runhook)
1988 1990 return ret
1989 1991
1990 1992 def listkeys(self, namespace):
1991 1993 self.hook('prelistkeys', throw=True, namespace=namespace)
1992 1994 self.ui.debug('listing keys for "%s"\n' % namespace)
1993 1995 values = pushkey.list(self, namespace)
1994 1996 self.hook('listkeys', namespace=namespace, values=values)
1995 1997 return values
1996 1998
1997 1999 def debugwireargs(self, one, two, three=None, four=None, five=None):
1998 2000 '''used to test argument passing over the wire'''
1999 2001 return "%s %s %s %s %s" % (one, two, three, four, five)
2000 2002
2001 2003 def savecommitmessage(self, text):
2002 2004 fp = self.vfs('last-message.txt', 'wb')
2003 2005 try:
2004 2006 fp.write(text)
2005 2007 finally:
2006 2008 fp.close()
2007 2009 return self.pathto(fp.name[len(self.root) + 1:])
2008 2010
2009 2011 # used to avoid circular references so destructors work
2010 2012 def aftertrans(files):
2011 2013 renamefiles = [tuple(t) for t in files]
2012 2014 def a():
2013 2015 for vfs, src, dest in renamefiles:
2014 2016 try:
2015 2017 # if src and dest refer to a same file, vfs.rename is a no-op,
2016 2018 # leaving both src and dest on disk. delete dest to make sure
2017 2019 # the rename couldn't be such a no-op.
2018 2020 vfs.unlink(dest)
2019 2021 except OSError as ex:
2020 2022 if ex.errno != errno.ENOENT:
2021 2023 raise
2022 2024 try:
2023 2025 vfs.rename(src, dest)
2024 2026 except OSError: # journal file does not yet exist
2025 2027 pass
2026 2028 return a
2027 2029
2028 2030 def undoname(fn):
2029 2031 base, name = os.path.split(fn)
2030 2032 assert name.startswith('journal')
2031 2033 return os.path.join(base, name.replace('journal', 'undo', 1))
2032 2034
2033 2035 def instance(ui, path, create):
2034 2036 return localrepository(ui, util.urllocalpath(path), create)
2035 2037
2036 2038 def islocal(path):
2037 2039 return True
2038 2040
2039 2041 def newreporequirements(repo):
2040 2042 """Determine the set of requirements for a new local repository.
2041 2043
2042 2044 Extensions can wrap this function to specify custom requirements for
2043 2045 new repositories.
2044 2046 """
2045 2047 ui = repo.ui
2046 2048 requirements = set(['revlogv1'])
2047 2049 if ui.configbool('format', 'usestore', True):
2048 2050 requirements.add('store')
2049 2051 if ui.configbool('format', 'usefncache', True):
2050 2052 requirements.add('fncache')
2051 2053 if ui.configbool('format', 'dotencode', True):
2052 2054 requirements.add('dotencode')
2053 2055
2054 2056 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 2057 if compengine not in util.compengines:
2056 2058 raise error.Abort(_('compression engine %s defined by '
2057 2059 'experimental.format.compression not available') %
2058 2060 compengine,
2059 2061 hint=_('run "hg debuginstall" to list available '
2060 2062 'compression engines'))
2061 2063
2062 2064 # zlib is the historical default and doesn't need an explicit requirement.
2063 2065 if compengine != 'zlib':
2064 2066 requirements.add('exp-compression-%s' % compengine)
2065 2067
2066 2068 if scmutil.gdinitconfig(ui):
2067 2069 requirements.add('generaldelta')
2068 2070 if ui.configbool('experimental', 'treemanifest', False):
2069 2071 requirements.add('treemanifest')
2070 2072 if ui.configbool('experimental', 'manifestv2', False):
2071 2073 requirements.add('manifestv2')
2072 2074
2073 2075 return requirements
General Comments 0
You need to be logged in to leave comments. Login now