##// END OF EJS Templates
localrepo: move filtername to __init__...
Gregory Szorc -
r32730:b8ff7d0f default
parent child Browse files
Show More
@@ -1,2075 +1,2075 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class repofilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72
73 73 def join(self, obj, fname):
74 74 return obj.vfs.join(fname)
75 75 def __get__(self, repo, type=None):
76 76 if repo is None:
77 77 return self
78 78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 79 def __set__(self, repo, value):
80 80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 81 def __delete__(self, repo):
82 82 return super(repofilecache, self).__delete__(repo.unfiltered())
83 83
84 84 class storecache(repofilecache):
85 85 """filecache for files in the store"""
86 86 def join(self, obj, fname):
87 87 return obj.sjoin(fname)
88 88
89 89 class unfilteredpropertycache(util.propertycache):
90 90 """propertycache that apply to unfiltered repo only"""
91 91
92 92 def __get__(self, repo, type=None):
93 93 unfi = repo.unfiltered()
94 94 if unfi is repo:
95 95 return super(unfilteredpropertycache, self).__get__(unfi)
96 96 return getattr(unfi, self.name)
97 97
98 98 class filteredpropertycache(util.propertycache):
99 99 """propertycache that must take filtering in account"""
100 100
101 101 def cachevalue(self, obj, value):
102 102 object.__setattr__(obj, self.name, value)
103 103
104 104
105 105 def hasunfilteredcache(repo, name):
106 106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 107 return name in vars(repo.unfiltered())
108 108
109 109 def unfilteredmethod(orig):
110 110 """decorate method that always need to be run on unfiltered version"""
111 111 def wrapper(repo, *args, **kwargs):
112 112 return orig(repo.unfiltered(), *args, **kwargs)
113 113 return wrapper
114 114
115 115 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 116 'unbundle'}
117 117 legacycaps = moderncaps.union({'changegroupsubset'})
118 118
119 119 class localpeer(peer.peerrepository):
120 120 '''peer for a local repo; reflects only the most recent API'''
121 121
122 122 def __init__(self, repo, caps=None):
123 123 if caps is None:
124 124 caps = moderncaps.copy()
125 125 peer.peerrepository.__init__(self)
126 126 self._repo = repo.filtered('served')
127 127 self.ui = repo.ui
128 128 self._caps = repo._restrictcapabilities(caps)
129 129 self.requirements = repo.requirements
130 130 self.supportedformats = repo.supportedformats
131 131
132 132 def close(self):
133 133 self._repo.close()
134 134
135 135 def _capabilities(self):
136 136 return self._caps
137 137
138 138 def local(self):
139 139 return self._repo
140 140
141 141 def canpush(self):
142 142 return True
143 143
144 144 def url(self):
145 145 return self._repo.url()
146 146
147 147 def lookup(self, key):
148 148 return self._repo.lookup(key)
149 149
150 150 def branchmap(self):
151 151 return self._repo.branchmap()
152 152
153 153 def heads(self):
154 154 return self._repo.heads()
155 155
156 156 def known(self, nodes):
157 157 return self._repo.known(nodes)
158 158
159 159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 160 **kwargs):
161 161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 162 common=common, bundlecaps=bundlecaps,
163 163 **kwargs)
164 164 cb = util.chunkbuffer(chunks)
165 165
166 166 if exchange.bundle2requested(bundlecaps):
167 167 # When requesting a bundle2, getbundle returns a stream to make the
168 168 # wire level function happier. We need to build a proper object
169 169 # from it in local peer.
170 170 return bundle2.getunbundler(self.ui, cb)
171 171 else:
172 172 return changegroup.getunbundler('01', cb, None)
173 173
174 174 # TODO We might want to move the next two calls into legacypeer and add
175 175 # unbundle instead.
176 176
177 177 def unbundle(self, cg, heads, url):
178 178 """apply a bundle on a repo
179 179
180 180 This function handles the repo locking itself."""
181 181 try:
182 182 try:
183 183 cg = exchange.readbundle(self.ui, cg, None)
184 184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 185 if util.safehasattr(ret, 'getchunks'):
186 186 # This is a bundle20 object, turn it into an unbundler.
187 187 # This little dance should be dropped eventually when the
188 188 # API is finally improved.
189 189 stream = util.chunkbuffer(ret.getchunks())
190 190 ret = bundle2.getunbundler(self.ui, stream)
191 191 return ret
192 192 except Exception as exc:
193 193 # If the exception contains output salvaged from a bundle2
194 194 # reply, we need to make sure it is printed before continuing
195 195 # to fail. So we build a bundle2 with such output and consume
196 196 # it directly.
197 197 #
198 198 # This is not very elegant but allows a "simple" solution for
199 199 # issue4594
200 200 output = getattr(exc, '_bundle2salvagedoutput', ())
201 201 if output:
202 202 bundler = bundle2.bundle20(self._repo.ui)
203 203 for out in output:
204 204 bundler.addpart(out)
205 205 stream = util.chunkbuffer(bundler.getchunks())
206 206 b = bundle2.getunbundler(self.ui, stream)
207 207 bundle2.processbundle(self._repo, b)
208 208 raise
209 209 except error.PushRaced as exc:
210 210 raise error.ResponseError(_('push failed:'), str(exc))
211 211
212 212 def lock(self):
213 213 return self._repo.lock()
214 214
215 215 def addchangegroup(self, cg, source, url):
216 216 return cg.apply(self._repo, source, url)
217 217
218 218 def pushkey(self, namespace, key, old, new):
219 219 return self._repo.pushkey(namespace, key, old, new)
220 220
221 221 def listkeys(self, namespace):
222 222 return self._repo.listkeys(namespace)
223 223
224 224 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 225 '''used to test argument passing over the wire'''
226 226 return "%s %s %s %s %s" % (one, two, three, four, five)
227 227
228 228 class locallegacypeer(localpeer):
229 229 '''peer extension which implements legacy methods too; used for tests with
230 230 restricted capabilities'''
231 231
232 232 def __init__(self, repo):
233 233 localpeer.__init__(self, repo, caps=legacycaps)
234 234
235 235 def branches(self, nodes):
236 236 return self._repo.branches(nodes)
237 237
238 238 def between(self, pairs):
239 239 return self._repo.between(pairs)
240 240
241 241 def changegroup(self, basenodes, source):
242 242 return changegroup.changegroup(self._repo, basenodes, source)
243 243
244 244 def changegroupsubset(self, bases, heads, source):
245 245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 246
247 247 # Increment the sub-version when the revlog v2 format changes to lock out old
248 248 # clients.
249 249 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
250 250
251 251 class localrepository(object):
252 252
253 253 supportedformats = {
254 254 'revlogv1',
255 255 'generaldelta',
256 256 'treemanifest',
257 257 'manifestv2',
258 258 REVLOGV2_REQUIREMENT,
259 259 }
260 260 _basesupported = supportedformats | {
261 261 'store',
262 262 'fncache',
263 263 'shared',
264 264 'relshared',
265 265 'dotencode',
266 266 }
267 267 openerreqs = {
268 268 'revlogv1',
269 269 'generaldelta',
270 270 'treemanifest',
271 271 'manifestv2',
272 272 }
273 filtername = None
274 273
275 274 # a list of (ui, featureset) functions.
276 275 # only functions defined in module of enabled extensions are invoked
277 276 featuresetupfuncs = set()
278 277
279 278 def __init__(self, baseui, path, create=False):
280 279 self.requirements = set()
280 self.filtername = None
281 281 # wvfs: rooted at the repository root, used to access the working copy
282 282 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
283 283 # vfs: rooted at .hg, used to access repo files outside of .hg/store
284 284 self.vfs = None
285 285 # svfs: usually rooted at .hg/store, used to access repository history
286 286 # If this is a shared repository, this vfs may point to another
287 287 # repository's .hg/store directory.
288 288 self.svfs = None
289 289 self.root = self.wvfs.base
290 290 self.path = self.wvfs.join(".hg")
291 291 self.origroot = path
292 292 self.auditor = pathutil.pathauditor(self.root, self._checknested)
293 293 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
294 294 realfs=False)
295 295 self.vfs = vfsmod.vfs(self.path)
296 296 self.baseui = baseui
297 297 self.ui = baseui.copy()
298 298 self.ui.copy = baseui.copy # prevent copying repo configuration
299 299 # A list of callback to shape the phase if no data were found.
300 300 # Callback are in the form: func(repo, roots) --> processed root.
301 301 # This list it to be filled by extension during repo setup
302 302 self._phasedefaults = []
303 303 try:
304 304 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
305 305 self._loadextensions()
306 306 except IOError:
307 307 pass
308 308
309 309 if self.featuresetupfuncs:
310 310 self.supported = set(self._basesupported) # use private copy
311 311 extmods = set(m.__name__ for n, m
312 312 in extensions.extensions(self.ui))
313 313 for setupfunc in self.featuresetupfuncs:
314 314 if setupfunc.__module__ in extmods:
315 315 setupfunc(self.ui, self.supported)
316 316 else:
317 317 self.supported = self._basesupported
318 318 color.setup(self.ui)
319 319
320 320 # Add compression engines.
321 321 for name in util.compengines:
322 322 engine = util.compengines[name]
323 323 if engine.revlogheader():
324 324 self.supported.add('exp-compression-%s' % name)
325 325
326 326 if not self.vfs.isdir():
327 327 if create:
328 328 self.requirements = newreporequirements(self)
329 329
330 330 if not self.wvfs.exists():
331 331 self.wvfs.makedirs()
332 332 self.vfs.makedir(notindexed=True)
333 333
334 334 if 'store' in self.requirements:
335 335 self.vfs.mkdir("store")
336 336
337 337 # create an invalid changelog
338 338 self.vfs.append(
339 339 "00changelog.i",
340 340 '\0\0\0\2' # represents revlogv2
341 341 ' dummy changelog to prevent using the old repo layout'
342 342 )
343 343 else:
344 344 raise error.RepoError(_("repository %s not found") % path)
345 345 elif create:
346 346 raise error.RepoError(_("repository %s already exists") % path)
347 347 else:
348 348 try:
349 349 self.requirements = scmutil.readrequires(
350 350 self.vfs, self.supported)
351 351 except IOError as inst:
352 352 if inst.errno != errno.ENOENT:
353 353 raise
354 354
355 355 self.sharedpath = self.path
356 356 try:
357 357 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
358 358 if 'relshared' in self.requirements:
359 359 sharedpath = self.vfs.join(sharedpath)
360 360 vfs = vfsmod.vfs(sharedpath, realpath=True)
361 361 s = vfs.base
362 362 if not vfs.exists():
363 363 raise error.RepoError(
364 364 _('.hg/sharedpath points to nonexistent directory %s') % s)
365 365 self.sharedpath = s
366 366 except IOError as inst:
367 367 if inst.errno != errno.ENOENT:
368 368 raise
369 369
370 370 self.store = store.store(
371 371 self.requirements, self.sharedpath, vfsmod.vfs)
372 372 self.spath = self.store.path
373 373 self.svfs = self.store.vfs
374 374 self.sjoin = self.store.join
375 375 self.vfs.createmode = self.store.createmode
376 376 self._applyopenerreqs()
377 377 if create:
378 378 self._writerequirements()
379 379
380 380 self._dirstatevalidatewarned = False
381 381
382 382 self._branchcaches = {}
383 383 self._revbranchcache = None
384 384 self.filterpats = {}
385 385 self._datafilters = {}
386 386 self._transref = self._lockref = self._wlockref = None
387 387
388 388 # A cache for various files under .hg/ that tracks file changes,
389 389 # (used by the filecache decorator)
390 390 #
391 391 # Maps a property name to its util.filecacheentry
392 392 self._filecache = {}
393 393
394 394 # hold sets of revision to be filtered
395 395 # should be cleared when something might have changed the filter value:
396 396 # - new changesets,
397 397 # - phase change,
398 398 # - new obsolescence marker,
399 399 # - working directory parent change,
400 400 # - bookmark changes
401 401 self.filteredrevcache = {}
402 402
403 403 # generic mapping between names and nodes
404 404 self.names = namespaces.namespaces()
405 405
406 406 def close(self):
407 407 self._writecaches()
408 408
409 409 def _loadextensions(self):
410 410 extensions.loadall(self.ui)
411 411
412 412 def _writecaches(self):
413 413 if self._revbranchcache:
414 414 self._revbranchcache.write()
415 415
416 416 def _restrictcapabilities(self, caps):
417 417 if self.ui.configbool('experimental', 'bundle2-advertise', True):
418 418 caps = set(caps)
419 419 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
420 420 caps.add('bundle2=' + urlreq.quote(capsblob))
421 421 return caps
422 422
423 423 def _applyopenerreqs(self):
424 424 self.svfs.options = dict((r, 1) for r in self.requirements
425 425 if r in self.openerreqs)
426 426 # experimental config: format.chunkcachesize
427 427 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
428 428 if chunkcachesize is not None:
429 429 self.svfs.options['chunkcachesize'] = chunkcachesize
430 430 # experimental config: format.maxchainlen
431 431 maxchainlen = self.ui.configint('format', 'maxchainlen')
432 432 if maxchainlen is not None:
433 433 self.svfs.options['maxchainlen'] = maxchainlen
434 434 # experimental config: format.manifestcachesize
435 435 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
436 436 if manifestcachesize is not None:
437 437 self.svfs.options['manifestcachesize'] = manifestcachesize
438 438 # experimental config: format.aggressivemergedeltas
439 439 aggressivemergedeltas = self.ui.configbool('format',
440 440 'aggressivemergedeltas', False)
441 441 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
442 442 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
443 443
444 444 for r in self.requirements:
445 445 if r.startswith('exp-compression-'):
446 446 self.svfs.options['compengine'] = r[len('exp-compression-'):]
447 447
448 448 # TODO move "revlogv2" to openerreqs once finalized.
449 449 if REVLOGV2_REQUIREMENT in self.requirements:
450 450 self.svfs.options['revlogv2'] = True
451 451
452 452 def _writerequirements(self):
453 453 scmutil.writerequires(self.vfs, self.requirements)
454 454
455 455 def _checknested(self, path):
456 456 """Determine if path is a legal nested repository."""
457 457 if not path.startswith(self.root):
458 458 return False
459 459 subpath = path[len(self.root) + 1:]
460 460 normsubpath = util.pconvert(subpath)
461 461
462 462 # XXX: Checking against the current working copy is wrong in
463 463 # the sense that it can reject things like
464 464 #
465 465 # $ hg cat -r 10 sub/x.txt
466 466 #
467 467 # if sub/ is no longer a subrepository in the working copy
468 468 # parent revision.
469 469 #
470 470 # However, it can of course also allow things that would have
471 471 # been rejected before, such as the above cat command if sub/
472 472 # is a subrepository now, but was a normal directory before.
473 473 # The old path auditor would have rejected by mistake since it
474 474 # panics when it sees sub/.hg/.
475 475 #
476 476 # All in all, checking against the working copy seems sensible
477 477 # since we want to prevent access to nested repositories on
478 478 # the filesystem *now*.
479 479 ctx = self[None]
480 480 parts = util.splitpath(subpath)
481 481 while parts:
482 482 prefix = '/'.join(parts)
483 483 if prefix in ctx.substate:
484 484 if prefix == normsubpath:
485 485 return True
486 486 else:
487 487 sub = ctx.sub(prefix)
488 488 return sub.checknested(subpath[len(prefix) + 1:])
489 489 else:
490 490 parts.pop()
491 491 return False
492 492
493 493 def peer(self):
494 494 return localpeer(self) # not cached to avoid reference cycle
495 495
496 496 def unfiltered(self):
497 497 """Return unfiltered version of the repository
498 498
499 499 Intended to be overwritten by filtered repo."""
500 500 return self
501 501
502 502 def filtered(self, name):
503 503 """Return a filtered version of a repository"""
504 504 # build a new class with the mixin and the current class
505 505 # (possibly subclass of the repo)
506 506 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
507 507 pass
508 508 return filteredrepo(self, name)
509 509
510 510 @repofilecache('bookmarks', 'bookmarks.current')
511 511 def _bookmarks(self):
512 512 return bookmarks.bmstore(self)
513 513
514 514 @property
515 515 def _activebookmark(self):
516 516 return self._bookmarks.active
517 517
518 518 # _phaserevs and _phasesets depend on changelog. what we need is to
519 519 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
520 520 # can't be easily expressed in filecache mechanism.
521 521 @storecache('phaseroots', '00changelog.i')
522 522 def _phasecache(self):
523 523 return phases.phasecache(self, self._phasedefaults)
524 524
525 525 @storecache('obsstore')
526 526 def obsstore(self):
527 527 return obsolete.makestore(self.ui, self)
528 528
529 529 @storecache('00changelog.i')
530 530 def changelog(self):
531 531 return changelog.changelog(self.svfs,
532 532 trypending=txnutil.mayhavepending(self.root))
533 533
534 534 def _constructmanifest(self):
535 535 # This is a temporary function while we migrate from manifest to
536 536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
537 537 # manifest creation.
538 538 return manifest.manifestrevlog(self.svfs)
539 539
540 540 @storecache('00manifest.i')
541 541 def manifestlog(self):
542 542 return manifest.manifestlog(self.svfs, self)
543 543
544 544 @repofilecache('dirstate')
545 545 def dirstate(self):
546 546 return dirstate.dirstate(self.vfs, self.ui, self.root,
547 547 self._dirstatevalidate)
548 548
549 549 def _dirstatevalidate(self, node):
550 550 try:
551 551 self.changelog.rev(node)
552 552 return node
553 553 except error.LookupError:
554 554 if not self._dirstatevalidatewarned:
555 555 self._dirstatevalidatewarned = True
556 556 self.ui.warn(_("warning: ignoring unknown"
557 557 " working parent %s!\n") % short(node))
558 558 return nullid
559 559
560 560 def __getitem__(self, changeid):
561 561 if changeid is None:
562 562 return context.workingctx(self)
563 563 if isinstance(changeid, slice):
564 564 # wdirrev isn't contiguous so the slice shouldn't include it
565 565 return [context.changectx(self, i)
566 566 for i in xrange(*changeid.indices(len(self)))
567 567 if i not in self.changelog.filteredrevs]
568 568 try:
569 569 return context.changectx(self, changeid)
570 570 except error.WdirUnsupported:
571 571 return context.workingctx(self)
572 572
573 573 def __contains__(self, changeid):
574 574 """True if the given changeid exists
575 575
576 576 error.LookupError is raised if an ambiguous node specified.
577 577 """
578 578 try:
579 579 self[changeid]
580 580 return True
581 581 except error.RepoLookupError:
582 582 return False
583 583
584 584 def __nonzero__(self):
585 585 return True
586 586
587 587 __bool__ = __nonzero__
588 588
589 589 def __len__(self):
590 590 return len(self.changelog)
591 591
592 592 def __iter__(self):
593 593 return iter(self.changelog)
594 594
595 595 def revs(self, expr, *args):
596 596 '''Find revisions matching a revset.
597 597
598 598 The revset is specified as a string ``expr`` that may contain
599 599 %-formatting to escape certain types. See ``revsetlang.formatspec``.
600 600
601 601 Revset aliases from the configuration are not expanded. To expand
602 602 user aliases, consider calling ``scmutil.revrange()`` or
603 603 ``repo.anyrevs([expr], user=True)``.
604 604
605 605 Returns a revset.abstractsmartset, which is a list-like interface
606 606 that contains integer revisions.
607 607 '''
608 608 expr = revsetlang.formatspec(expr, *args)
609 609 m = revset.match(None, expr)
610 610 return m(self)
611 611
612 612 def set(self, expr, *args):
613 613 '''Find revisions matching a revset and emit changectx instances.
614 614
615 615 This is a convenience wrapper around ``revs()`` that iterates the
616 616 result and is a generator of changectx instances.
617 617
618 618 Revset aliases from the configuration are not expanded. To expand
619 619 user aliases, consider calling ``scmutil.revrange()``.
620 620 '''
621 621 for r in self.revs(expr, *args):
622 622 yield self[r]
623 623
624 624 def anyrevs(self, specs, user=False):
625 625 '''Find revisions matching one of the given revsets.
626 626
627 627 Revset aliases from the configuration are not expanded by default. To
628 628 expand user aliases, specify ``user=True``.
629 629 '''
630 630 if user:
631 631 m = revset.matchany(self.ui, specs, repo=self)
632 632 else:
633 633 m = revset.matchany(None, specs)
634 634 return m(self)
635 635
636 636 def url(self):
637 637 return 'file:' + self.root
638 638
639 639 def hook(self, name, throw=False, **args):
640 640 """Call a hook, passing this repo instance.
641 641
642 642 This a convenience method to aid invoking hooks. Extensions likely
643 643 won't call this unless they have registered a custom hook or are
644 644 replacing code that is expected to call a hook.
645 645 """
646 646 return hook.hook(self.ui, self, name, throw, **args)
647 647
648 648 @filteredpropertycache
649 649 def _tagscache(self):
650 650 '''Returns a tagscache object that contains various tags related
651 651 caches.'''
652 652
653 653 # This simplifies its cache management by having one decorated
654 654 # function (this one) and the rest simply fetch things from it.
655 655 class tagscache(object):
656 656 def __init__(self):
657 657 # These two define the set of tags for this repository. tags
658 658 # maps tag name to node; tagtypes maps tag name to 'global' or
659 659 # 'local'. (Global tags are defined by .hgtags across all
660 660 # heads, and local tags are defined in .hg/localtags.)
661 661 # They constitute the in-memory cache of tags.
662 662 self.tags = self.tagtypes = None
663 663
664 664 self.nodetagscache = self.tagslist = None
665 665
666 666 cache = tagscache()
667 667 cache.tags, cache.tagtypes = self._findtags()
668 668
669 669 return cache
670 670
671 671 def tags(self):
672 672 '''return a mapping of tag to node'''
673 673 t = {}
674 674 if self.changelog.filteredrevs:
675 675 tags, tt = self._findtags()
676 676 else:
677 677 tags = self._tagscache.tags
678 678 for k, v in tags.iteritems():
679 679 try:
680 680 # ignore tags to unknown nodes
681 681 self.changelog.rev(v)
682 682 t[k] = v
683 683 except (error.LookupError, ValueError):
684 684 pass
685 685 return t
686 686
687 687 def _findtags(self):
688 688 '''Do the hard work of finding tags. Return a pair of dicts
689 689 (tags, tagtypes) where tags maps tag name to node, and tagtypes
690 690 maps tag name to a string like \'global\' or \'local\'.
691 691 Subclasses or extensions are free to add their own tags, but
692 692 should be aware that the returned dicts will be retained for the
693 693 duration of the localrepo object.'''
694 694
695 695 # XXX what tagtype should subclasses/extensions use? Currently
696 696 # mq and bookmarks add tags, but do not set the tagtype at all.
697 697 # Should each extension invent its own tag type? Should there
698 698 # be one tagtype for all such "virtual" tags? Or is the status
699 699 # quo fine?
700 700
701 701
702 702 # map tag name to (node, hist)
703 703 alltags = tagsmod.findglobaltags(self.ui, self)
704 704 # map tag name to tag type
705 705 tagtypes = dict((tag, 'global') for tag in alltags)
706 706
707 707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
708 708
709 709 # Build the return dicts. Have to re-encode tag names because
710 710 # the tags module always uses UTF-8 (in order not to lose info
711 711 # writing to the cache), but the rest of Mercurial wants them in
712 712 # local encoding.
713 713 tags = {}
714 714 for (name, (node, hist)) in alltags.iteritems():
715 715 if node != nullid:
716 716 tags[encoding.tolocal(name)] = node
717 717 tags['tip'] = self.changelog.tip()
718 718 tagtypes = dict([(encoding.tolocal(name), value)
719 719 for (name, value) in tagtypes.iteritems()])
720 720 return (tags, tagtypes)
721 721
722 722 def tagtype(self, tagname):
723 723 '''
724 724 return the type of the given tag. result can be:
725 725
726 726 'local' : a local tag
727 727 'global' : a global tag
728 728 None : tag does not exist
729 729 '''
730 730
731 731 return self._tagscache.tagtypes.get(tagname)
732 732
733 733 def tagslist(self):
734 734 '''return a list of tags ordered by revision'''
735 735 if not self._tagscache.tagslist:
736 736 l = []
737 737 for t, n in self.tags().iteritems():
738 738 l.append((self.changelog.rev(n), t, n))
739 739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
740 740
741 741 return self._tagscache.tagslist
742 742
743 743 def nodetags(self, node):
744 744 '''return the tags associated with a node'''
745 745 if not self._tagscache.nodetagscache:
746 746 nodetagscache = {}
747 747 for t, n in self._tagscache.tags.iteritems():
748 748 nodetagscache.setdefault(n, []).append(t)
749 749 for tags in nodetagscache.itervalues():
750 750 tags.sort()
751 751 self._tagscache.nodetagscache = nodetagscache
752 752 return self._tagscache.nodetagscache.get(node, [])
753 753
754 754 def nodebookmarks(self, node):
755 755 """return the list of bookmarks pointing to the specified node"""
756 756 marks = []
757 757 for bookmark, n in self._bookmarks.iteritems():
758 758 if n == node:
759 759 marks.append(bookmark)
760 760 return sorted(marks)
761 761
762 762 def branchmap(self):
763 763 '''returns a dictionary {branch: [branchheads]} with branchheads
764 764 ordered by increasing revision number'''
765 765 branchmap.updatecache(self)
766 766 return self._branchcaches[self.filtername]
767 767
768 768 @unfilteredmethod
769 769 def revbranchcache(self):
770 770 if not self._revbranchcache:
771 771 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
772 772 return self._revbranchcache
773 773
774 774 def branchtip(self, branch, ignoremissing=False):
775 775 '''return the tip node for a given branch
776 776
777 777 If ignoremissing is True, then this method will not raise an error.
778 778 This is helpful for callers that only expect None for a missing branch
779 779 (e.g. namespace).
780 780
781 781 '''
782 782 try:
783 783 return self.branchmap().branchtip(branch)
784 784 except KeyError:
785 785 if not ignoremissing:
786 786 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
787 787 else:
788 788 pass
789 789
790 790 def lookup(self, key):
791 791 return self[key].node()
792 792
793 793 def lookupbranch(self, key, remote=None):
794 794 repo = remote or self
795 795 if key in repo.branchmap():
796 796 return key
797 797
798 798 repo = (remote and remote.local()) and remote or self
799 799 return repo[key].branch()
800 800
801 801 def known(self, nodes):
802 802 cl = self.changelog
803 803 nm = cl.nodemap
804 804 filtered = cl.filteredrevs
805 805 result = []
806 806 for n in nodes:
807 807 r = nm.get(n)
808 808 resp = not (r is None or r in filtered)
809 809 result.append(resp)
810 810 return result
811 811
812 812 def local(self):
813 813 return self
814 814
815 815 def publishing(self):
816 816 # it's safe (and desirable) to trust the publish flag unconditionally
817 817 # so that we don't finalize changes shared between users via ssh or nfs
818 818 return self.ui.configbool('phases', 'publish', True, untrusted=True)
819 819
820 820 def cancopy(self):
821 821 # so statichttprepo's override of local() works
822 822 if not self.local():
823 823 return False
824 824 if not self.publishing():
825 825 return True
826 826 # if publishing we can't copy if there is filtered content
827 827 return not self.filtered('visible').changelog.filteredrevs
828 828
829 829 def shared(self):
830 830 '''the type of shared repository (None if not shared)'''
831 831 if self.sharedpath != self.path:
832 832 return 'store'
833 833 return None
834 834
835 835 def wjoin(self, f, *insidef):
836 836 return self.vfs.reljoin(self.root, f, *insidef)
837 837
838 838 def file(self, f):
839 839 if f[0] == '/':
840 840 f = f[1:]
841 841 return filelog.filelog(self.svfs, f)
842 842
843 843 def changectx(self, changeid):
844 844 return self[changeid]
845 845
846 846 def setparents(self, p1, p2=nullid):
847 847 with self.dirstate.parentchange():
848 848 copies = self.dirstate.setparents(p1, p2)
849 849 pctx = self[p1]
850 850 if copies:
851 851 # Adjust copy records, the dirstate cannot do it, it
852 852 # requires access to parents manifests. Preserve them
853 853 # only for entries added to first parent.
854 854 for f in copies:
855 855 if f not in pctx and copies[f] in pctx:
856 856 self.dirstate.copy(copies[f], f)
857 857 if p2 == nullid:
858 858 for f, s in sorted(self.dirstate.copies().items()):
859 859 if f not in pctx and s not in pctx:
860 860 self.dirstate.copy(None, f)
861 861
862 862 def filectx(self, path, changeid=None, fileid=None):
863 863 """changeid can be a changeset revision, node, or tag.
864 864 fileid can be a file revision or node."""
865 865 return context.filectx(self, path, changeid, fileid)
866 866
867 867 def getcwd(self):
868 868 return self.dirstate.getcwd()
869 869
870 870 def pathto(self, f, cwd=None):
871 871 return self.dirstate.pathto(f, cwd)
872 872
873 873 def _loadfilter(self, filter):
874 874 if filter not in self.filterpats:
875 875 l = []
876 876 for pat, cmd in self.ui.configitems(filter):
877 877 if cmd == '!':
878 878 continue
879 879 mf = matchmod.match(self.root, '', [pat])
880 880 fn = None
881 881 params = cmd
882 882 for name, filterfn in self._datafilters.iteritems():
883 883 if cmd.startswith(name):
884 884 fn = filterfn
885 885 params = cmd[len(name):].lstrip()
886 886 break
887 887 if not fn:
888 888 fn = lambda s, c, **kwargs: util.filter(s, c)
889 889 # Wrap old filters not supporting keyword arguments
890 890 if not inspect.getargspec(fn)[2]:
891 891 oldfn = fn
892 892 fn = lambda s, c, **kwargs: oldfn(s, c)
893 893 l.append((mf, fn, params))
894 894 self.filterpats[filter] = l
895 895 return self.filterpats[filter]
896 896
897 897 def _filter(self, filterpats, filename, data):
898 898 for mf, fn, cmd in filterpats:
899 899 if mf(filename):
900 900 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
901 901 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
902 902 break
903 903
904 904 return data
905 905
906 906 @unfilteredpropertycache
907 907 def _encodefilterpats(self):
908 908 return self._loadfilter('encode')
909 909
910 910 @unfilteredpropertycache
911 911 def _decodefilterpats(self):
912 912 return self._loadfilter('decode')
913 913
914 914 def adddatafilter(self, name, filter):
915 915 self._datafilters[name] = filter
916 916
917 917 def wread(self, filename):
918 918 if self.wvfs.islink(filename):
919 919 data = self.wvfs.readlink(filename)
920 920 else:
921 921 data = self.wvfs.read(filename)
922 922 return self._filter(self._encodefilterpats, filename, data)
923 923
924 924 def wwrite(self, filename, data, flags, backgroundclose=False):
925 925 """write ``data`` into ``filename`` in the working directory
926 926
927 927 This returns length of written (maybe decoded) data.
928 928 """
929 929 data = self._filter(self._decodefilterpats, filename, data)
930 930 if 'l' in flags:
931 931 self.wvfs.symlink(data, filename)
932 932 else:
933 933 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
934 934 if 'x' in flags:
935 935 self.wvfs.setflags(filename, False, True)
936 936 return len(data)
937 937
938 938 def wwritedata(self, filename, data):
939 939 return self._filter(self._decodefilterpats, filename, data)
940 940
941 941 def currenttransaction(self):
942 942 """return the current transaction or None if non exists"""
943 943 if self._transref:
944 944 tr = self._transref()
945 945 else:
946 946 tr = None
947 947
948 948 if tr and tr.running():
949 949 return tr
950 950 return None
951 951
952 952 def transaction(self, desc, report=None):
953 953 if (self.ui.configbool('devel', 'all-warnings')
954 954 or self.ui.configbool('devel', 'check-locks')):
955 955 if self._currentlock(self._lockref) is None:
956 956 raise error.ProgrammingError('transaction requires locking')
957 957 tr = self.currenttransaction()
958 958 if tr is not None:
959 959 return tr.nest()
960 960
961 961 # abort here if the journal already exists
962 962 if self.svfs.exists("journal"):
963 963 raise error.RepoError(
964 964 _("abandoned transaction found"),
965 965 hint=_("run 'hg recover' to clean up transaction"))
966 966
967 967 idbase = "%.40f#%f" % (random.random(), time.time())
968 968 ha = hex(hashlib.sha1(idbase).digest())
969 969 txnid = 'TXN:' + ha
970 970 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
971 971
972 972 self._writejournal(desc)
973 973 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
974 974 if report:
975 975 rp = report
976 976 else:
977 977 rp = self.ui.warn
978 978 vfsmap = {'plain': self.vfs} # root of .hg/
979 979 # we must avoid cyclic reference between repo and transaction.
980 980 reporef = weakref.ref(self)
981 981 # Code to track tag movement
982 982 #
983 983 # Since tags are all handled as file content, it is actually quite hard
984 984 # to track these movement from a code perspective. So we fallback to a
985 985 # tracking at the repository level. One could envision to track changes
986 986 # to the '.hgtags' file through changegroup apply but that fails to
987 987 # cope with case where transaction expose new heads without changegroup
988 988 # being involved (eg: phase movement).
989 989 #
990 990 # For now, We gate the feature behind a flag since this likely comes
991 991 # with performance impacts. The current code run more often than needed
992 992 # and do not use caches as much as it could. The current focus is on
993 993 # the behavior of the feature so we disable it by default. The flag
994 994 # will be removed when we are happy with the performance impact.
995 995 #
996 996 # Once this feature is no longer experimental move the following
997 997 # documentation to the appropriate help section:
998 998 #
999 999 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1000 1000 # tags (new or changed or deleted tags). In addition the details of
1001 1001 # these changes are made available in a file at:
1002 1002 # ``REPOROOT/.hg/changes/tags.changes``.
1003 1003 # Make sure you check for HG_TAG_MOVED before reading that file as it
1004 1004 # might exist from a previous transaction even if no tag were touched
1005 1005 # in this one. Changes are recorded in a line base format::
1006 1006 #
1007 1007 # <action> <hex-node> <tag-name>\n
1008 1008 #
1009 1009 # Actions are defined as follow:
1010 1010 # "-R": tag is removed,
1011 1011 # "+A": tag is added,
1012 1012 # "-M": tag is moved (old value),
1013 1013 # "+M": tag is moved (new value),
1014 1014 tracktags = lambda x: None
1015 1015 # experimental config: experimental.hook-track-tags
1016 1016 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1017 1017 False)
1018 1018 if desc != 'strip' and shouldtracktags:
1019 1019 oldheads = self.changelog.headrevs()
1020 1020 def tracktags(tr2):
1021 1021 repo = reporef()
1022 1022 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1023 1023 newheads = repo.changelog.headrevs()
1024 1024 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1025 1025 # notes: we compare lists here.
1026 1026 # As we do it only once buiding set would not be cheaper
1027 1027 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1028 1028 if changes:
1029 1029 tr2.hookargs['tag_moved'] = '1'
1030 1030 with repo.vfs('changes/tags.changes', 'w',
1031 1031 atomictemp=True) as changesfile:
1032 1032 # note: we do not register the file to the transaction
1033 1033 # because we needs it to still exist on the transaction
1034 1034 # is close (for txnclose hooks)
1035 1035 tagsmod.writediff(changesfile, changes)
1036 1036 def validate(tr2):
1037 1037 """will run pre-closing hooks"""
1038 1038 # XXX the transaction API is a bit lacking here so we take a hacky
1039 1039 # path for now
1040 1040 #
1041 1041 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1042 1042 # dict is copied before these run. In addition we needs the data
1043 1043 # available to in memory hooks too.
1044 1044 #
1045 1045 # Moreover, we also need to make sure this runs before txnclose
1046 1046 # hooks and there is no "pending" mechanism that would execute
1047 1047 # logic only if hooks are about to run.
1048 1048 #
1049 1049 # Fixing this limitation of the transaction is also needed to track
1050 1050 # other families of changes (bookmarks, phases, obsolescence).
1051 1051 #
1052 1052 # This will have to be fixed before we remove the experimental
1053 1053 # gating.
1054 1054 tracktags(tr2)
1055 1055 reporef().hook('pretxnclose', throw=True,
1056 1056 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1057 1057 def releasefn(tr, success):
1058 1058 repo = reporef()
1059 1059 if success:
1060 1060 # this should be explicitly invoked here, because
1061 1061 # in-memory changes aren't written out at closing
1062 1062 # transaction, if tr.addfilegenerator (via
1063 1063 # dirstate.write or so) isn't invoked while
1064 1064 # transaction running
1065 1065 repo.dirstate.write(None)
1066 1066 else:
1067 1067 # discard all changes (including ones already written
1068 1068 # out) in this transaction
1069 1069 repo.dirstate.restorebackup(None, prefix='journal.')
1070 1070
1071 1071 repo.invalidate(clearfilecache=True)
1072 1072
1073 1073 tr = transaction.transaction(rp, self.svfs, vfsmap,
1074 1074 "journal",
1075 1075 "undo",
1076 1076 aftertrans(renames),
1077 1077 self.store.createmode,
1078 1078 validator=validate,
1079 1079 releasefn=releasefn)
1080 1080 tr.changes['revs'] = set()
1081 1081
1082 1082 tr.hookargs['txnid'] = txnid
1083 1083 # note: writing the fncache only during finalize mean that the file is
1084 1084 # outdated when running hooks. As fncache is used for streaming clone,
1085 1085 # this is not expected to break anything that happen during the hooks.
1086 1086 tr.addfinalize('flush-fncache', self.store.write)
1087 1087 def txnclosehook(tr2):
1088 1088 """To be run if transaction is successful, will schedule a hook run
1089 1089 """
1090 1090 # Don't reference tr2 in hook() so we don't hold a reference.
1091 1091 # This reduces memory consumption when there are multiple
1092 1092 # transactions per lock. This can likely go away if issue5045
1093 1093 # fixes the function accumulation.
1094 1094 hookargs = tr2.hookargs
1095 1095
1096 1096 def hook():
1097 1097 reporef().hook('txnclose', throw=False, txnname=desc,
1098 1098 **pycompat.strkwargs(hookargs))
1099 1099 reporef()._afterlock(hook)
1100 1100 tr.addfinalize('txnclose-hook', txnclosehook)
1101 1101 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1102 1102 def txnaborthook(tr2):
1103 1103 """To be run if transaction is aborted
1104 1104 """
1105 1105 reporef().hook('txnabort', throw=False, txnname=desc,
1106 1106 **tr2.hookargs)
1107 1107 tr.addabort('txnabort-hook', txnaborthook)
1108 1108 # avoid eager cache invalidation. in-memory data should be identical
1109 1109 # to stored data if transaction has no error.
1110 1110 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1111 1111 self._transref = weakref.ref(tr)
1112 1112 return tr
1113 1113
1114 1114 def _journalfiles(self):
1115 1115 return ((self.svfs, 'journal'),
1116 1116 (self.vfs, 'journal.dirstate'),
1117 1117 (self.vfs, 'journal.branch'),
1118 1118 (self.vfs, 'journal.desc'),
1119 1119 (self.vfs, 'journal.bookmarks'),
1120 1120 (self.svfs, 'journal.phaseroots'))
1121 1121
1122 1122 def undofiles(self):
1123 1123 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1124 1124
1125 1125 @unfilteredmethod
1126 1126 def _writejournal(self, desc):
1127 1127 self.dirstate.savebackup(None, prefix='journal.')
1128 1128 self.vfs.write("journal.branch",
1129 1129 encoding.fromlocal(self.dirstate.branch()))
1130 1130 self.vfs.write("journal.desc",
1131 1131 "%d\n%s\n" % (len(self), desc))
1132 1132 self.vfs.write("journal.bookmarks",
1133 1133 self.vfs.tryread("bookmarks"))
1134 1134 self.svfs.write("journal.phaseroots",
1135 1135 self.svfs.tryread("phaseroots"))
1136 1136
1137 1137 def recover(self):
1138 1138 with self.lock():
1139 1139 if self.svfs.exists("journal"):
1140 1140 self.ui.status(_("rolling back interrupted transaction\n"))
1141 1141 vfsmap = {'': self.svfs,
1142 1142 'plain': self.vfs,}
1143 1143 transaction.rollback(self.svfs, vfsmap, "journal",
1144 1144 self.ui.warn)
1145 1145 self.invalidate()
1146 1146 return True
1147 1147 else:
1148 1148 self.ui.warn(_("no interrupted transaction available\n"))
1149 1149 return False
1150 1150
1151 1151 def rollback(self, dryrun=False, force=False):
1152 1152 wlock = lock = dsguard = None
1153 1153 try:
1154 1154 wlock = self.wlock()
1155 1155 lock = self.lock()
1156 1156 if self.svfs.exists("undo"):
1157 1157 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1158 1158
1159 1159 return self._rollback(dryrun, force, dsguard)
1160 1160 else:
1161 1161 self.ui.warn(_("no rollback information available\n"))
1162 1162 return 1
1163 1163 finally:
1164 1164 release(dsguard, lock, wlock)
1165 1165
1166 1166 @unfilteredmethod # Until we get smarter cache management
1167 1167 def _rollback(self, dryrun, force, dsguard):
1168 1168 ui = self.ui
1169 1169 try:
1170 1170 args = self.vfs.read('undo.desc').splitlines()
1171 1171 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1172 1172 if len(args) >= 3:
1173 1173 detail = args[2]
1174 1174 oldtip = oldlen - 1
1175 1175
1176 1176 if detail and ui.verbose:
1177 1177 msg = (_('repository tip rolled back to revision %s'
1178 1178 ' (undo %s: %s)\n')
1179 1179 % (oldtip, desc, detail))
1180 1180 else:
1181 1181 msg = (_('repository tip rolled back to revision %s'
1182 1182 ' (undo %s)\n')
1183 1183 % (oldtip, desc))
1184 1184 except IOError:
1185 1185 msg = _('rolling back unknown transaction\n')
1186 1186 desc = None
1187 1187
1188 1188 if not force and self['.'] != self['tip'] and desc == 'commit':
1189 1189 raise error.Abort(
1190 1190 _('rollback of last commit while not checked out '
1191 1191 'may lose data'), hint=_('use -f to force'))
1192 1192
1193 1193 ui.status(msg)
1194 1194 if dryrun:
1195 1195 return 0
1196 1196
1197 1197 parents = self.dirstate.parents()
1198 1198 self.destroying()
1199 1199 vfsmap = {'plain': self.vfs, '': self.svfs}
1200 1200 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1201 1201 if self.vfs.exists('undo.bookmarks'):
1202 1202 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1203 1203 if self.svfs.exists('undo.phaseroots'):
1204 1204 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1205 1205 self.invalidate()
1206 1206
1207 1207 parentgone = (parents[0] not in self.changelog.nodemap or
1208 1208 parents[1] not in self.changelog.nodemap)
1209 1209 if parentgone:
1210 1210 # prevent dirstateguard from overwriting already restored one
1211 1211 dsguard.close()
1212 1212
1213 1213 self.dirstate.restorebackup(None, prefix='undo.')
1214 1214 try:
1215 1215 branch = self.vfs.read('undo.branch')
1216 1216 self.dirstate.setbranch(encoding.tolocal(branch))
1217 1217 except IOError:
1218 1218 ui.warn(_('named branch could not be reset: '
1219 1219 'current branch is still \'%s\'\n')
1220 1220 % self.dirstate.branch())
1221 1221
1222 1222 parents = tuple([p.rev() for p in self[None].parents()])
1223 1223 if len(parents) > 1:
1224 1224 ui.status(_('working directory now based on '
1225 1225 'revisions %d and %d\n') % parents)
1226 1226 else:
1227 1227 ui.status(_('working directory now based on '
1228 1228 'revision %d\n') % parents)
1229 1229 mergemod.mergestate.clean(self, self['.'].node())
1230 1230
1231 1231 # TODO: if we know which new heads may result from this rollback, pass
1232 1232 # them to destroy(), which will prevent the branchhead cache from being
1233 1233 # invalidated.
1234 1234 self.destroyed()
1235 1235 return 0
1236 1236
1237 1237 def _buildcacheupdater(self, newtransaction):
1238 1238 """called during transaction to build the callback updating cache
1239 1239
1240 1240 Lives on the repository to help extension who might want to augment
1241 1241 this logic. For this purpose, the created transaction is passed to the
1242 1242 method.
1243 1243 """
1244 1244 # we must avoid cyclic reference between repo and transaction.
1245 1245 reporef = weakref.ref(self)
1246 1246 def updater(tr):
1247 1247 repo = reporef()
1248 1248 repo.updatecaches(tr)
1249 1249 return updater
1250 1250
1251 1251 @unfilteredmethod
1252 1252 def updatecaches(self, tr=None):
1253 1253 """warm appropriate caches
1254 1254
1255 1255 If this function is called after a transaction closed. The transaction
1256 1256 will be available in the 'tr' argument. This can be used to selectively
1257 1257 update caches relevant to the changes in that transaction.
1258 1258 """
1259 1259 if tr is not None and tr.hookargs.get('source') == 'strip':
1260 1260 # During strip, many caches are invalid but
1261 1261 # later call to `destroyed` will refresh them.
1262 1262 return
1263 1263
1264 1264 if tr is None or tr.changes['revs']:
1265 1265 # updating the unfiltered branchmap should refresh all the others,
1266 1266 self.ui.debug('updating the branch cache\n')
1267 1267 branchmap.updatecache(self.filtered('served'))
1268 1268
1269 1269 def invalidatecaches(self):
1270 1270
1271 1271 if '_tagscache' in vars(self):
1272 1272 # can't use delattr on proxy
1273 1273 del self.__dict__['_tagscache']
1274 1274
1275 1275 self.unfiltered()._branchcaches.clear()
1276 1276 self.invalidatevolatilesets()
1277 1277
1278 1278 def invalidatevolatilesets(self):
1279 1279 self.filteredrevcache.clear()
1280 1280 obsolete.clearobscaches(self)
1281 1281
1282 1282 def invalidatedirstate(self):
1283 1283 '''Invalidates the dirstate, causing the next call to dirstate
1284 1284 to check if it was modified since the last time it was read,
1285 1285 rereading it if it has.
1286 1286
1287 1287 This is different to dirstate.invalidate() that it doesn't always
1288 1288 rereads the dirstate. Use dirstate.invalidate() if you want to
1289 1289 explicitly read the dirstate again (i.e. restoring it to a previous
1290 1290 known good state).'''
1291 1291 if hasunfilteredcache(self, 'dirstate'):
1292 1292 for k in self.dirstate._filecache:
1293 1293 try:
1294 1294 delattr(self.dirstate, k)
1295 1295 except AttributeError:
1296 1296 pass
1297 1297 delattr(self.unfiltered(), 'dirstate')
1298 1298
1299 1299 def invalidate(self, clearfilecache=False):
1300 1300 '''Invalidates both store and non-store parts other than dirstate
1301 1301
1302 1302 If a transaction is running, invalidation of store is omitted,
1303 1303 because discarding in-memory changes might cause inconsistency
1304 1304 (e.g. incomplete fncache causes unintentional failure, but
1305 1305 redundant one doesn't).
1306 1306 '''
1307 1307 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1308 1308 for k in list(self._filecache.keys()):
1309 1309 # dirstate is invalidated separately in invalidatedirstate()
1310 1310 if k == 'dirstate':
1311 1311 continue
1312 1312
1313 1313 if clearfilecache:
1314 1314 del self._filecache[k]
1315 1315 try:
1316 1316 delattr(unfiltered, k)
1317 1317 except AttributeError:
1318 1318 pass
1319 1319 self.invalidatecaches()
1320 1320 if not self.currenttransaction():
1321 1321 # TODO: Changing contents of store outside transaction
1322 1322 # causes inconsistency. We should make in-memory store
1323 1323 # changes detectable, and abort if changed.
1324 1324 self.store.invalidatecaches()
1325 1325
1326 1326 def invalidateall(self):
1327 1327 '''Fully invalidates both store and non-store parts, causing the
1328 1328 subsequent operation to reread any outside changes.'''
1329 1329 # extension should hook this to invalidate its caches
1330 1330 self.invalidate()
1331 1331 self.invalidatedirstate()
1332 1332
1333 1333 @unfilteredmethod
1334 1334 def _refreshfilecachestats(self, tr):
1335 1335 """Reload stats of cached files so that they are flagged as valid"""
1336 1336 for k, ce in self._filecache.items():
1337 1337 if k == 'dirstate' or k not in self.__dict__:
1338 1338 continue
1339 1339 ce.refresh()
1340 1340
1341 1341 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1342 1342 inheritchecker=None, parentenvvar=None):
1343 1343 parentlock = None
1344 1344 # the contents of parentenvvar are used by the underlying lock to
1345 1345 # determine whether it can be inherited
1346 1346 if parentenvvar is not None:
1347 1347 parentlock = encoding.environ.get(parentenvvar)
1348 1348 try:
1349 1349 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1350 1350 acquirefn=acquirefn, desc=desc,
1351 1351 inheritchecker=inheritchecker,
1352 1352 parentlock=parentlock)
1353 1353 except error.LockHeld as inst:
1354 1354 if not wait:
1355 1355 raise
1356 1356 # show more details for new-style locks
1357 1357 if ':' in inst.locker:
1358 1358 host, pid = inst.locker.split(":", 1)
1359 1359 self.ui.warn(
1360 1360 _("waiting for lock on %s held by process %r "
1361 1361 "on host %r\n") % (desc, pid, host))
1362 1362 else:
1363 1363 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1364 1364 (desc, inst.locker))
1365 1365 # default to 600 seconds timeout
1366 1366 l = lockmod.lock(vfs, lockname,
1367 1367 int(self.ui.config("ui", "timeout", "600")),
1368 1368 releasefn=releasefn, acquirefn=acquirefn,
1369 1369 desc=desc)
1370 1370 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1371 1371 return l
1372 1372
1373 1373 def _afterlock(self, callback):
1374 1374 """add a callback to be run when the repository is fully unlocked
1375 1375
1376 1376 The callback will be executed when the outermost lock is released
1377 1377 (with wlock being higher level than 'lock')."""
1378 1378 for ref in (self._wlockref, self._lockref):
1379 1379 l = ref and ref()
1380 1380 if l and l.held:
1381 1381 l.postrelease.append(callback)
1382 1382 break
1383 1383 else: # no lock have been found.
1384 1384 callback()
1385 1385
1386 1386 def lock(self, wait=True):
1387 1387 '''Lock the repository store (.hg/store) and return a weak reference
1388 1388 to the lock. Use this before modifying the store (e.g. committing or
1389 1389 stripping). If you are opening a transaction, get a lock as well.)
1390 1390
1391 1391 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1392 1392 'wlock' first to avoid a dead-lock hazard.'''
1393 1393 l = self._currentlock(self._lockref)
1394 1394 if l is not None:
1395 1395 l.lock()
1396 1396 return l
1397 1397
1398 1398 l = self._lock(self.svfs, "lock", wait, None,
1399 1399 self.invalidate, _('repository %s') % self.origroot)
1400 1400 self._lockref = weakref.ref(l)
1401 1401 return l
1402 1402
1403 1403 def _wlockchecktransaction(self):
1404 1404 if self.currenttransaction() is not None:
1405 1405 raise error.LockInheritanceContractViolation(
1406 1406 'wlock cannot be inherited in the middle of a transaction')
1407 1407
1408 1408 def wlock(self, wait=True):
1409 1409 '''Lock the non-store parts of the repository (everything under
1410 1410 .hg except .hg/store) and return a weak reference to the lock.
1411 1411
1412 1412 Use this before modifying files in .hg.
1413 1413
1414 1414 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1415 1415 'wlock' first to avoid a dead-lock hazard.'''
1416 1416 l = self._wlockref and self._wlockref()
1417 1417 if l is not None and l.held:
1418 1418 l.lock()
1419 1419 return l
1420 1420
1421 1421 # We do not need to check for non-waiting lock acquisition. Such
1422 1422 # acquisition would not cause dead-lock as they would just fail.
1423 1423 if wait and (self.ui.configbool('devel', 'all-warnings')
1424 1424 or self.ui.configbool('devel', 'check-locks')):
1425 1425 if self._currentlock(self._lockref) is not None:
1426 1426 self.ui.develwarn('"wlock" acquired after "lock"')
1427 1427
1428 1428 def unlock():
1429 1429 if self.dirstate.pendingparentchange():
1430 1430 self.dirstate.invalidate()
1431 1431 else:
1432 1432 self.dirstate.write(None)
1433 1433
1434 1434 self._filecache['dirstate'].refresh()
1435 1435
1436 1436 l = self._lock(self.vfs, "wlock", wait, unlock,
1437 1437 self.invalidatedirstate, _('working directory of %s') %
1438 1438 self.origroot,
1439 1439 inheritchecker=self._wlockchecktransaction,
1440 1440 parentenvvar='HG_WLOCK_LOCKER')
1441 1441 self._wlockref = weakref.ref(l)
1442 1442 return l
1443 1443
1444 1444 def _currentlock(self, lockref):
1445 1445 """Returns the lock if it's held, or None if it's not."""
1446 1446 if lockref is None:
1447 1447 return None
1448 1448 l = lockref()
1449 1449 if l is None or not l.held:
1450 1450 return None
1451 1451 return l
1452 1452
1453 1453 def currentwlock(self):
1454 1454 """Returns the wlock if it's held, or None if it's not."""
1455 1455 return self._currentlock(self._wlockref)
1456 1456
1457 1457 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1458 1458 """
1459 1459 commit an individual file as part of a larger transaction
1460 1460 """
1461 1461
1462 1462 fname = fctx.path()
1463 1463 fparent1 = manifest1.get(fname, nullid)
1464 1464 fparent2 = manifest2.get(fname, nullid)
1465 1465 if isinstance(fctx, context.filectx):
1466 1466 node = fctx.filenode()
1467 1467 if node in [fparent1, fparent2]:
1468 1468 self.ui.debug('reusing %s filelog entry\n' % fname)
1469 1469 if manifest1.flags(fname) != fctx.flags():
1470 1470 changelist.append(fname)
1471 1471 return node
1472 1472
1473 1473 flog = self.file(fname)
1474 1474 meta = {}
1475 1475 copy = fctx.renamed()
1476 1476 if copy and copy[0] != fname:
1477 1477 # Mark the new revision of this file as a copy of another
1478 1478 # file. This copy data will effectively act as a parent
1479 1479 # of this new revision. If this is a merge, the first
1480 1480 # parent will be the nullid (meaning "look up the copy data")
1481 1481 # and the second one will be the other parent. For example:
1482 1482 #
1483 1483 # 0 --- 1 --- 3 rev1 changes file foo
1484 1484 # \ / rev2 renames foo to bar and changes it
1485 1485 # \- 2 -/ rev3 should have bar with all changes and
1486 1486 # should record that bar descends from
1487 1487 # bar in rev2 and foo in rev1
1488 1488 #
1489 1489 # this allows this merge to succeed:
1490 1490 #
1491 1491 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1492 1492 # \ / merging rev3 and rev4 should use bar@rev2
1493 1493 # \- 2 --- 4 as the merge base
1494 1494 #
1495 1495
1496 1496 cfname = copy[0]
1497 1497 crev = manifest1.get(cfname)
1498 1498 newfparent = fparent2
1499 1499
1500 1500 if manifest2: # branch merge
1501 1501 if fparent2 == nullid or crev is None: # copied on remote side
1502 1502 if cfname in manifest2:
1503 1503 crev = manifest2[cfname]
1504 1504 newfparent = fparent1
1505 1505
1506 1506 # Here, we used to search backwards through history to try to find
1507 1507 # where the file copy came from if the source of a copy was not in
1508 1508 # the parent directory. However, this doesn't actually make sense to
1509 1509 # do (what does a copy from something not in your working copy even
1510 1510 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1511 1511 # the user that copy information was dropped, so if they didn't
1512 1512 # expect this outcome it can be fixed, but this is the correct
1513 1513 # behavior in this circumstance.
1514 1514
1515 1515 if crev:
1516 1516 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1517 1517 meta["copy"] = cfname
1518 1518 meta["copyrev"] = hex(crev)
1519 1519 fparent1, fparent2 = nullid, newfparent
1520 1520 else:
1521 1521 self.ui.warn(_("warning: can't find ancestor for '%s' "
1522 1522 "copied from '%s'!\n") % (fname, cfname))
1523 1523
1524 1524 elif fparent1 == nullid:
1525 1525 fparent1, fparent2 = fparent2, nullid
1526 1526 elif fparent2 != nullid:
1527 1527 # is one parent an ancestor of the other?
1528 1528 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1529 1529 if fparent1 in fparentancestors:
1530 1530 fparent1, fparent2 = fparent2, nullid
1531 1531 elif fparent2 in fparentancestors:
1532 1532 fparent2 = nullid
1533 1533
1534 1534 # is the file changed?
1535 1535 text = fctx.data()
1536 1536 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1537 1537 changelist.append(fname)
1538 1538 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1539 1539 # are just the flags changed during merge?
1540 1540 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1541 1541 changelist.append(fname)
1542 1542
1543 1543 return fparent1
1544 1544
1545 1545 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1546 1546 """check for commit arguments that aren't committable"""
1547 1547 if match.isexact() or match.prefix():
1548 1548 matched = set(status.modified + status.added + status.removed)
1549 1549
1550 1550 for f in match.files():
1551 1551 f = self.dirstate.normalize(f)
1552 1552 if f == '.' or f in matched or f in wctx.substate:
1553 1553 continue
1554 1554 if f in status.deleted:
1555 1555 fail(f, _('file not found!'))
1556 1556 if f in vdirs: # visited directory
1557 1557 d = f + '/'
1558 1558 for mf in matched:
1559 1559 if mf.startswith(d):
1560 1560 break
1561 1561 else:
1562 1562 fail(f, _("no match under directory!"))
1563 1563 elif f not in self.dirstate:
1564 1564 fail(f, _("file not tracked!"))
1565 1565
1566 1566 @unfilteredmethod
1567 1567 def commit(self, text="", user=None, date=None, match=None, force=False,
1568 1568 editor=False, extra=None):
1569 1569 """Add a new revision to current repository.
1570 1570
1571 1571 Revision information is gathered from the working directory,
1572 1572 match can be used to filter the committed files. If editor is
1573 1573 supplied, it is called to get a commit message.
1574 1574 """
1575 1575 if extra is None:
1576 1576 extra = {}
1577 1577
1578 1578 def fail(f, msg):
1579 1579 raise error.Abort('%s: %s' % (f, msg))
1580 1580
1581 1581 if not match:
1582 1582 match = matchmod.always(self.root, '')
1583 1583
1584 1584 if not force:
1585 1585 vdirs = []
1586 1586 match.explicitdir = vdirs.append
1587 1587 match.bad = fail
1588 1588
1589 1589 wlock = lock = tr = None
1590 1590 try:
1591 1591 wlock = self.wlock()
1592 1592 lock = self.lock() # for recent changelog (see issue4368)
1593 1593
1594 1594 wctx = self[None]
1595 1595 merge = len(wctx.parents()) > 1
1596 1596
1597 1597 if not force and merge and not match.always():
1598 1598 raise error.Abort(_('cannot partially commit a merge '
1599 1599 '(do not specify files or patterns)'))
1600 1600
1601 1601 status = self.status(match=match, clean=force)
1602 1602 if force:
1603 1603 status.modified.extend(status.clean) # mq may commit clean files
1604 1604
1605 1605 # check subrepos
1606 1606 subs = []
1607 1607 commitsubs = set()
1608 1608 newstate = wctx.substate.copy()
1609 1609 # only manage subrepos and .hgsubstate if .hgsub is present
1610 1610 if '.hgsub' in wctx:
1611 1611 # we'll decide whether to track this ourselves, thanks
1612 1612 for c in status.modified, status.added, status.removed:
1613 1613 if '.hgsubstate' in c:
1614 1614 c.remove('.hgsubstate')
1615 1615
1616 1616 # compare current state to last committed state
1617 1617 # build new substate based on last committed state
1618 1618 oldstate = wctx.p1().substate
1619 1619 for s in sorted(newstate.keys()):
1620 1620 if not match(s):
1621 1621 # ignore working copy, use old state if present
1622 1622 if s in oldstate:
1623 1623 newstate[s] = oldstate[s]
1624 1624 continue
1625 1625 if not force:
1626 1626 raise error.Abort(
1627 1627 _("commit with new subrepo %s excluded") % s)
1628 1628 dirtyreason = wctx.sub(s).dirtyreason(True)
1629 1629 if dirtyreason:
1630 1630 if not self.ui.configbool('ui', 'commitsubrepos'):
1631 1631 raise error.Abort(dirtyreason,
1632 1632 hint=_("use --subrepos for recursive commit"))
1633 1633 subs.append(s)
1634 1634 commitsubs.add(s)
1635 1635 else:
1636 1636 bs = wctx.sub(s).basestate()
1637 1637 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1638 1638 if oldstate.get(s, (None, None, None))[1] != bs:
1639 1639 subs.append(s)
1640 1640
1641 1641 # check for removed subrepos
1642 1642 for p in wctx.parents():
1643 1643 r = [s for s in p.substate if s not in newstate]
1644 1644 subs += [s for s in r if match(s)]
1645 1645 if subs:
1646 1646 if (not match('.hgsub') and
1647 1647 '.hgsub' in (wctx.modified() + wctx.added())):
1648 1648 raise error.Abort(
1649 1649 _("can't commit subrepos without .hgsub"))
1650 1650 status.modified.insert(0, '.hgsubstate')
1651 1651
1652 1652 elif '.hgsub' in status.removed:
1653 1653 # clean up .hgsubstate when .hgsub is removed
1654 1654 if ('.hgsubstate' in wctx and
1655 1655 '.hgsubstate' not in (status.modified + status.added +
1656 1656 status.removed)):
1657 1657 status.removed.insert(0, '.hgsubstate')
1658 1658
1659 1659 # make sure all explicit patterns are matched
1660 1660 if not force:
1661 1661 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1662 1662
1663 1663 cctx = context.workingcommitctx(self, status,
1664 1664 text, user, date, extra)
1665 1665
1666 1666 # internal config: ui.allowemptycommit
1667 1667 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1668 1668 or extra.get('close') or merge or cctx.files()
1669 1669 or self.ui.configbool('ui', 'allowemptycommit'))
1670 1670 if not allowemptycommit:
1671 1671 return None
1672 1672
1673 1673 if merge and cctx.deleted():
1674 1674 raise error.Abort(_("cannot commit merge with missing files"))
1675 1675
1676 1676 ms = mergemod.mergestate.read(self)
1677 1677 mergeutil.checkunresolved(ms)
1678 1678
1679 1679 if editor:
1680 1680 cctx._text = editor(self, cctx, subs)
1681 1681 edited = (text != cctx._text)
1682 1682
1683 1683 # Save commit message in case this transaction gets rolled back
1684 1684 # (e.g. by a pretxncommit hook). Leave the content alone on
1685 1685 # the assumption that the user will use the same editor again.
1686 1686 msgfn = self.savecommitmessage(cctx._text)
1687 1687
1688 1688 # commit subs and write new state
1689 1689 if subs:
1690 1690 for s in sorted(commitsubs):
1691 1691 sub = wctx.sub(s)
1692 1692 self.ui.status(_('committing subrepository %s\n') %
1693 1693 subrepo.subrelpath(sub))
1694 1694 sr = sub.commit(cctx._text, user, date)
1695 1695 newstate[s] = (newstate[s][0], sr)
1696 1696 subrepo.writestate(self, newstate)
1697 1697
1698 1698 p1, p2 = self.dirstate.parents()
1699 1699 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1700 1700 try:
1701 1701 self.hook("precommit", throw=True, parent1=hookp1,
1702 1702 parent2=hookp2)
1703 1703 tr = self.transaction('commit')
1704 1704 ret = self.commitctx(cctx, True)
1705 1705 except: # re-raises
1706 1706 if edited:
1707 1707 self.ui.write(
1708 1708 _('note: commit message saved in %s\n') % msgfn)
1709 1709 raise
1710 1710 # update bookmarks, dirstate and mergestate
1711 1711 bookmarks.update(self, [p1, p2], ret)
1712 1712 cctx.markcommitted(ret)
1713 1713 ms.reset()
1714 1714 tr.close()
1715 1715
1716 1716 finally:
1717 1717 lockmod.release(tr, lock, wlock)
1718 1718
1719 1719 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1720 1720 # hack for command that use a temporary commit (eg: histedit)
1721 1721 # temporary commit got stripped before hook release
1722 1722 if self.changelog.hasnode(ret):
1723 1723 self.hook("commit", node=node, parent1=parent1,
1724 1724 parent2=parent2)
1725 1725 self._afterlock(commithook)
1726 1726 return ret
1727 1727
1728 1728 @unfilteredmethod
1729 1729 def commitctx(self, ctx, error=False):
1730 1730 """Add a new revision to current repository.
1731 1731 Revision information is passed via the context argument.
1732 1732 """
1733 1733
1734 1734 tr = None
1735 1735 p1, p2 = ctx.p1(), ctx.p2()
1736 1736 user = ctx.user()
1737 1737
1738 1738 lock = self.lock()
1739 1739 try:
1740 1740 tr = self.transaction("commit")
1741 1741 trp = weakref.proxy(tr)
1742 1742
1743 1743 if ctx.manifestnode():
1744 1744 # reuse an existing manifest revision
1745 1745 mn = ctx.manifestnode()
1746 1746 files = ctx.files()
1747 1747 elif ctx.files():
1748 1748 m1ctx = p1.manifestctx()
1749 1749 m2ctx = p2.manifestctx()
1750 1750 mctx = m1ctx.copy()
1751 1751
1752 1752 m = mctx.read()
1753 1753 m1 = m1ctx.read()
1754 1754 m2 = m2ctx.read()
1755 1755
1756 1756 # check in files
1757 1757 added = []
1758 1758 changed = []
1759 1759 removed = list(ctx.removed())
1760 1760 linkrev = len(self)
1761 1761 self.ui.note(_("committing files:\n"))
1762 1762 for f in sorted(ctx.modified() + ctx.added()):
1763 1763 self.ui.note(f + "\n")
1764 1764 try:
1765 1765 fctx = ctx[f]
1766 1766 if fctx is None:
1767 1767 removed.append(f)
1768 1768 else:
1769 1769 added.append(f)
1770 1770 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1771 1771 trp, changed)
1772 1772 m.setflag(f, fctx.flags())
1773 1773 except OSError as inst:
1774 1774 self.ui.warn(_("trouble committing %s!\n") % f)
1775 1775 raise
1776 1776 except IOError as inst:
1777 1777 errcode = getattr(inst, 'errno', errno.ENOENT)
1778 1778 if error or errcode and errcode != errno.ENOENT:
1779 1779 self.ui.warn(_("trouble committing %s!\n") % f)
1780 1780 raise
1781 1781
1782 1782 # update manifest
1783 1783 self.ui.note(_("committing manifest\n"))
1784 1784 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1785 1785 drop = [f for f in removed if f in m]
1786 1786 for f in drop:
1787 1787 del m[f]
1788 1788 mn = mctx.write(trp, linkrev,
1789 1789 p1.manifestnode(), p2.manifestnode(),
1790 1790 added, drop)
1791 1791 files = changed + removed
1792 1792 else:
1793 1793 mn = p1.manifestnode()
1794 1794 files = []
1795 1795
1796 1796 # update changelog
1797 1797 self.ui.note(_("committing changelog\n"))
1798 1798 self.changelog.delayupdate(tr)
1799 1799 n = self.changelog.add(mn, files, ctx.description(),
1800 1800 trp, p1.node(), p2.node(),
1801 1801 user, ctx.date(), ctx.extra().copy())
1802 1802 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1803 1803 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1804 1804 parent2=xp2)
1805 1805 # set the new commit is proper phase
1806 1806 targetphase = subrepo.newcommitphase(self.ui, ctx)
1807 1807 if targetphase:
1808 1808 # retract boundary do not alter parent changeset.
1809 1809 # if a parent have higher the resulting phase will
1810 1810 # be compliant anyway
1811 1811 #
1812 1812 # if minimal phase was 0 we don't need to retract anything
1813 1813 phases.retractboundary(self, tr, targetphase, [n])
1814 1814 tr.close()
1815 1815 return n
1816 1816 finally:
1817 1817 if tr:
1818 1818 tr.release()
1819 1819 lock.release()
1820 1820
1821 1821 @unfilteredmethod
1822 1822 def destroying(self):
1823 1823 '''Inform the repository that nodes are about to be destroyed.
1824 1824 Intended for use by strip and rollback, so there's a common
1825 1825 place for anything that has to be done before destroying history.
1826 1826
1827 1827 This is mostly useful for saving state that is in memory and waiting
1828 1828 to be flushed when the current lock is released. Because a call to
1829 1829 destroyed is imminent, the repo will be invalidated causing those
1830 1830 changes to stay in memory (waiting for the next unlock), or vanish
1831 1831 completely.
1832 1832 '''
1833 1833 # When using the same lock to commit and strip, the phasecache is left
1834 1834 # dirty after committing. Then when we strip, the repo is invalidated,
1835 1835 # causing those changes to disappear.
1836 1836 if '_phasecache' in vars(self):
1837 1837 self._phasecache.write()
1838 1838
1839 1839 @unfilteredmethod
1840 1840 def destroyed(self):
1841 1841 '''Inform the repository that nodes have been destroyed.
1842 1842 Intended for use by strip and rollback, so there's a common
1843 1843 place for anything that has to be done after destroying history.
1844 1844 '''
1845 1845 # When one tries to:
1846 1846 # 1) destroy nodes thus calling this method (e.g. strip)
1847 1847 # 2) use phasecache somewhere (e.g. commit)
1848 1848 #
1849 1849 # then 2) will fail because the phasecache contains nodes that were
1850 1850 # removed. We can either remove phasecache from the filecache,
1851 1851 # causing it to reload next time it is accessed, or simply filter
1852 1852 # the removed nodes now and write the updated cache.
1853 1853 self._phasecache.filterunknown(self)
1854 1854 self._phasecache.write()
1855 1855
1856 1856 # refresh all repository caches
1857 1857 self.updatecaches()
1858 1858
1859 1859 # Ensure the persistent tag cache is updated. Doing it now
1860 1860 # means that the tag cache only has to worry about destroyed
1861 1861 # heads immediately after a strip/rollback. That in turn
1862 1862 # guarantees that "cachetip == currenttip" (comparing both rev
1863 1863 # and node) always means no nodes have been added or destroyed.
1864 1864
1865 1865 # XXX this is suboptimal when qrefresh'ing: we strip the current
1866 1866 # head, refresh the tag cache, then immediately add a new head.
1867 1867 # But I think doing it this way is necessary for the "instant
1868 1868 # tag cache retrieval" case to work.
1869 1869 self.invalidate()
1870 1870
1871 1871 def walk(self, match, node=None):
1872 1872 '''
1873 1873 walk recursively through the directory tree or a given
1874 1874 changeset, finding all files matched by the match
1875 1875 function
1876 1876 '''
1877 1877 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1878 1878 return self[node].walk(match)
1879 1879
1880 1880 def status(self, node1='.', node2=None, match=None,
1881 1881 ignored=False, clean=False, unknown=False,
1882 1882 listsubrepos=False):
1883 1883 '''a convenience method that calls node1.status(node2)'''
1884 1884 return self[node1].status(node2, match, ignored, clean, unknown,
1885 1885 listsubrepos)
1886 1886
1887 1887 def heads(self, start=None):
1888 1888 if start is None:
1889 1889 cl = self.changelog
1890 1890 headrevs = reversed(cl.headrevs())
1891 1891 return [cl.node(rev) for rev in headrevs]
1892 1892
1893 1893 heads = self.changelog.heads(start)
1894 1894 # sort the output in rev descending order
1895 1895 return sorted(heads, key=self.changelog.rev, reverse=True)
1896 1896
1897 1897 def branchheads(self, branch=None, start=None, closed=False):
1898 1898 '''return a (possibly filtered) list of heads for the given branch
1899 1899
1900 1900 Heads are returned in topological order, from newest to oldest.
1901 1901 If branch is None, use the dirstate branch.
1902 1902 If start is not None, return only heads reachable from start.
1903 1903 If closed is True, return heads that are marked as closed as well.
1904 1904 '''
1905 1905 if branch is None:
1906 1906 branch = self[None].branch()
1907 1907 branches = self.branchmap()
1908 1908 if branch not in branches:
1909 1909 return []
1910 1910 # the cache returns heads ordered lowest to highest
1911 1911 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1912 1912 if start is not None:
1913 1913 # filter out the heads that cannot be reached from startrev
1914 1914 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1915 1915 bheads = [h for h in bheads if h in fbheads]
1916 1916 return bheads
1917 1917
1918 1918 def branches(self, nodes):
1919 1919 if not nodes:
1920 1920 nodes = [self.changelog.tip()]
1921 1921 b = []
1922 1922 for n in nodes:
1923 1923 t = n
1924 1924 while True:
1925 1925 p = self.changelog.parents(n)
1926 1926 if p[1] != nullid or p[0] == nullid:
1927 1927 b.append((t, n, p[0], p[1]))
1928 1928 break
1929 1929 n = p[0]
1930 1930 return b
1931 1931
1932 1932 def between(self, pairs):
1933 1933 r = []
1934 1934
1935 1935 for top, bottom in pairs:
1936 1936 n, l, i = top, [], 0
1937 1937 f = 1
1938 1938
1939 1939 while n != bottom and n != nullid:
1940 1940 p = self.changelog.parents(n)[0]
1941 1941 if i == f:
1942 1942 l.append(n)
1943 1943 f = f * 2
1944 1944 n = p
1945 1945 i += 1
1946 1946
1947 1947 r.append(l)
1948 1948
1949 1949 return r
1950 1950
1951 1951 def checkpush(self, pushop):
1952 1952 """Extensions can override this function if additional checks have
1953 1953 to be performed before pushing, or call it if they override push
1954 1954 command.
1955 1955 """
1956 1956 pass
1957 1957
1958 1958 @unfilteredpropertycache
1959 1959 def prepushoutgoinghooks(self):
1960 1960 """Return util.hooks consists of a pushop with repo, remote, outgoing
1961 1961 methods, which are called before pushing changesets.
1962 1962 """
1963 1963 return util.hooks()
1964 1964
1965 1965 def pushkey(self, namespace, key, old, new):
1966 1966 try:
1967 1967 tr = self.currenttransaction()
1968 1968 hookargs = {}
1969 1969 if tr is not None:
1970 1970 hookargs.update(tr.hookargs)
1971 1971 hookargs['namespace'] = namespace
1972 1972 hookargs['key'] = key
1973 1973 hookargs['old'] = old
1974 1974 hookargs['new'] = new
1975 1975 self.hook('prepushkey', throw=True, **hookargs)
1976 1976 except error.HookAbort as exc:
1977 1977 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1978 1978 if exc.hint:
1979 1979 self.ui.write_err(_("(%s)\n") % exc.hint)
1980 1980 return False
1981 1981 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1982 1982 ret = pushkey.push(self, namespace, key, old, new)
1983 1983 def runhook():
1984 1984 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1985 1985 ret=ret)
1986 1986 self._afterlock(runhook)
1987 1987 return ret
1988 1988
1989 1989 def listkeys(self, namespace):
1990 1990 self.hook('prelistkeys', throw=True, namespace=namespace)
1991 1991 self.ui.debug('listing keys for "%s"\n' % namespace)
1992 1992 values = pushkey.list(self, namespace)
1993 1993 self.hook('listkeys', namespace=namespace, values=values)
1994 1994 return values
1995 1995
1996 1996 def debugwireargs(self, one, two, three=None, four=None, five=None):
1997 1997 '''used to test argument passing over the wire'''
1998 1998 return "%s %s %s %s %s" % (one, two, three, four, five)
1999 1999
2000 2000 def savecommitmessage(self, text):
2001 2001 fp = self.vfs('last-message.txt', 'wb')
2002 2002 try:
2003 2003 fp.write(text)
2004 2004 finally:
2005 2005 fp.close()
2006 2006 return self.pathto(fp.name[len(self.root) + 1:])
2007 2007
2008 2008 # used to avoid circular references so destructors work
2009 2009 def aftertrans(files):
2010 2010 renamefiles = [tuple(t) for t in files]
2011 2011 def a():
2012 2012 for vfs, src, dest in renamefiles:
2013 2013 # if src and dest refer to a same file, vfs.rename is a no-op,
2014 2014 # leaving both src and dest on disk. delete dest to make sure
2015 2015 # the rename couldn't be such a no-op.
2016 2016 vfs.tryunlink(dest)
2017 2017 try:
2018 2018 vfs.rename(src, dest)
2019 2019 except OSError: # journal file does not yet exist
2020 2020 pass
2021 2021 return a
2022 2022
2023 2023 def undoname(fn):
2024 2024 base, name = os.path.split(fn)
2025 2025 assert name.startswith('journal')
2026 2026 return os.path.join(base, name.replace('journal', 'undo', 1))
2027 2027
2028 2028 def instance(ui, path, create):
2029 2029 return localrepository(ui, util.urllocalpath(path), create)
2030 2030
2031 2031 def islocal(path):
2032 2032 return True
2033 2033
2034 2034 def newreporequirements(repo):
2035 2035 """Determine the set of requirements for a new local repository.
2036 2036
2037 2037 Extensions can wrap this function to specify custom requirements for
2038 2038 new repositories.
2039 2039 """
2040 2040 ui = repo.ui
2041 2041 requirements = {'revlogv1'}
2042 2042 if ui.configbool('format', 'usestore', True):
2043 2043 requirements.add('store')
2044 2044 if ui.configbool('format', 'usefncache', True):
2045 2045 requirements.add('fncache')
2046 2046 if ui.configbool('format', 'dotencode', True):
2047 2047 requirements.add('dotencode')
2048 2048
2049 2049 compengine = ui.config('experimental', 'format.compression', 'zlib')
2050 2050 if compengine not in util.compengines:
2051 2051 raise error.Abort(_('compression engine %s defined by '
2052 2052 'experimental.format.compression not available') %
2053 2053 compengine,
2054 2054 hint=_('run "hg debuginstall" to list available '
2055 2055 'compression engines'))
2056 2056
2057 2057 # zlib is the historical default and doesn't need an explicit requirement.
2058 2058 if compengine != 'zlib':
2059 2059 requirements.add('exp-compression-%s' % compengine)
2060 2060
2061 2061 if scmutil.gdinitconfig(ui):
2062 2062 requirements.add('generaldelta')
2063 2063 if ui.configbool('experimental', 'treemanifest', False):
2064 2064 requirements.add('treemanifest')
2065 2065 if ui.configbool('experimental', 'manifestv2', False):
2066 2066 requirements.add('manifestv2')
2067 2067
2068 2068 revlogv2 = ui.config('experimental', 'revlogv2')
2069 2069 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2070 2070 requirements.remove('revlogv1')
2071 2071 # generaldelta is implied by revlogv2.
2072 2072 requirements.discard('generaldelta')
2073 2073 requirements.add(REVLOGV2_REQUIREMENT)
2074 2074
2075 2075 return requirements
@@ -1,190 +1,191 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import os
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 byterange,
18 18 changelog,
19 19 error,
20 20 localrepo,
21 21 manifest,
22 22 namespaces,
23 23 scmutil,
24 24 store,
25 25 url,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29
30 30 urlerr = util.urlerr
31 31 urlreq = util.urlreq
32 32
33 33 class httprangereader(object):
34 34 def __init__(self, url, opener):
35 35 # we assume opener has HTTPRangeHandler
36 36 self.url = url
37 37 self.pos = 0
38 38 self.opener = opener
39 39 self.name = url
40 40
41 41 def __enter__(self):
42 42 return self
43 43
44 44 def __exit__(self, exc_type, exc_value, traceback):
45 45 self.close()
46 46
47 47 def seek(self, pos):
48 48 self.pos = pos
49 49 def read(self, bytes=None):
50 50 req = urlreq.request(self.url)
51 51 end = ''
52 52 if bytes:
53 53 end = self.pos + bytes - 1
54 54 if self.pos or end:
55 55 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
56 56
57 57 try:
58 58 f = self.opener.open(req)
59 59 data = f.read()
60 60 code = f.code
61 61 except urlerr.httperror as inst:
62 62 num = inst.code == 404 and errno.ENOENT or None
63 63 raise IOError(num, inst)
64 64 except urlerr.urlerror as inst:
65 65 raise IOError(None, inst.reason[1])
66 66
67 67 if code == 200:
68 68 # HTTPRangeHandler does nothing if remote does not support
69 69 # Range headers and returns the full entity. Let's slice it.
70 70 if bytes:
71 71 data = data[self.pos:self.pos + bytes]
72 72 else:
73 73 data = data[self.pos:]
74 74 elif bytes:
75 75 data = data[:bytes]
76 76 self.pos += len(data)
77 77 return data
78 78 def readlines(self):
79 79 return self.read().splitlines(True)
80 80 def __iter__(self):
81 81 return iter(self.readlines())
82 82 def close(self):
83 83 pass
84 84
85 85 def build_opener(ui, authinfo):
86 86 # urllib cannot handle URLs with embedded user or passwd
87 87 urlopener = url.opener(ui, authinfo)
88 88 urlopener.add_handler(byterange.HTTPRangeHandler())
89 89
90 90 class statichttpvfs(vfsmod.abstractvfs):
91 91 def __init__(self, base):
92 92 self.base = base
93 93
94 94 def __call__(self, path, mode='r', *args, **kw):
95 95 if mode not in ('r', 'rb'):
96 96 raise IOError('Permission denied')
97 97 f = "/".join((self.base, urlreq.quote(path)))
98 98 return httprangereader(f, urlopener)
99 99
100 100 def join(self, path):
101 101 if path:
102 102 return os.path.join(self.base, path)
103 103 else:
104 104 return self.base
105 105
106 106 return statichttpvfs
107 107
108 108 class statichttppeer(localrepo.localpeer):
109 109 def local(self):
110 110 return None
111 111 def canpush(self):
112 112 return False
113 113
114 114 class statichttprepository(localrepo.localrepository):
115 115 supported = localrepo.localrepository._basesupported
116 116
117 117 def __init__(self, ui, path):
118 118 self._url = path
119 119 self.ui = ui
120 120
121 121 self.root = path
122 122 u = util.url(path.rstrip('/') + "/.hg")
123 123 self.path, authinfo = u.authinfo()
124 124
125 125 vfsclass = build_opener(ui, authinfo)
126 126 self.vfs = vfsclass(self.path)
127 127 self._phasedefaults = []
128 128
129 129 self.names = namespaces.namespaces()
130 self.filtername = None
130 131
131 132 try:
132 133 requirements = scmutil.readrequires(self.vfs, self.supported)
133 134 except IOError as inst:
134 135 if inst.errno != errno.ENOENT:
135 136 raise
136 137 requirements = set()
137 138
138 139 # check if it is a non-empty old-style repository
139 140 try:
140 141 fp = self.vfs("00changelog.i")
141 142 fp.read(1)
142 143 fp.close()
143 144 except IOError as inst:
144 145 if inst.errno != errno.ENOENT:
145 146 raise
146 147 # we do not care about empty old-style repositories here
147 148 msg = _("'%s' does not appear to be an hg repository") % path
148 149 raise error.RepoError(msg)
149 150
150 151 # setup store
151 152 self.store = store.store(requirements, self.path, vfsclass)
152 153 self.spath = self.store.path
153 154 self.svfs = self.store.opener
154 155 self.sjoin = self.store.join
155 156 self._filecache = {}
156 157 self.requirements = requirements
157 158
158 159 self.manifestlog = manifest.manifestlog(self.svfs, self)
159 160 self.changelog = changelog.changelog(self.svfs)
160 161 self._tags = None
161 162 self.nodetagscache = None
162 163 self._branchcaches = {}
163 164 self._revbranchcache = None
164 165 self.encodepats = None
165 166 self.decodepats = None
166 167 self._transref = None
167 168
168 169 def _restrictcapabilities(self, caps):
169 170 caps = super(statichttprepository, self)._restrictcapabilities(caps)
170 171 return caps.difference(["pushkey"])
171 172
172 173 def url(self):
173 174 return self._url
174 175
175 176 def local(self):
176 177 return False
177 178
178 179 def peer(self):
179 180 return statichttppeer(self)
180 181
181 182 def lock(self, wait=True):
182 183 raise error.Abort(_('cannot lock static-http repository'))
183 184
184 185 def _writecaches(self):
185 186 pass # statichttprepository are read only
186 187
187 188 def instance(ui, path, create):
188 189 if create:
189 190 raise error.Abort(_('cannot create new static-http repository'))
190 191 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now