##// END OF EJS Templates
localrepo: delete localrepo.manifest...
Durham Goode -
r30376:f84fc6a9 default
parent child Browse files
Show More
@@ -1,2004 +1,2000 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps,
154 154 **kwargs)
155 155 cb = util.chunkbuffer(chunks)
156 156
157 157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 158 # When requesting a bundle2, getbundle returns a stream to make the
159 159 # wire level function happier. We need to build a proper object
160 160 # from it in local peer.
161 161 return bundle2.getunbundler(self.ui, cb)
162 162 else:
163 163 return changegroup.getunbundler('01', cb, None)
164 164
165 165 # TODO We might want to move the next two calls into legacypeer and add
166 166 # unbundle instead.
167 167
168 168 def unbundle(self, cg, heads, url):
169 169 """apply a bundle on a repo
170 170
171 171 This function handles the repo locking itself."""
172 172 try:
173 173 try:
174 174 cg = exchange.readbundle(self.ui, cg, None)
175 175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 176 if util.safehasattr(ret, 'getchunks'):
177 177 # This is a bundle20 object, turn it into an unbundler.
178 178 # This little dance should be dropped eventually when the
179 179 # API is finally improved.
180 180 stream = util.chunkbuffer(ret.getchunks())
181 181 ret = bundle2.getunbundler(self.ui, stream)
182 182 return ret
183 183 except Exception as exc:
184 184 # If the exception contains output salvaged from a bundle2
185 185 # reply, we need to make sure it is printed before continuing
186 186 # to fail. So we build a bundle2 with such output and consume
187 187 # it directly.
188 188 #
189 189 # This is not very elegant but allows a "simple" solution for
190 190 # issue4594
191 191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 192 if output:
193 193 bundler = bundle2.bundle20(self._repo.ui)
194 194 for out in output:
195 195 bundler.addpart(out)
196 196 stream = util.chunkbuffer(bundler.getchunks())
197 197 b = bundle2.getunbundler(self.ui, stream)
198 198 bundle2.processbundle(self._repo, b)
199 199 raise
200 200 except error.PushRaced as exc:
201 201 raise error.ResponseError(_('push failed:'), str(exc))
202 202
203 203 def lock(self):
204 204 return self._repo.lock()
205 205
206 206 def addchangegroup(self, cg, source, url):
207 207 return cg.apply(self._repo, source, url)
208 208
209 209 def pushkey(self, namespace, key, old, new):
210 210 return self._repo.pushkey(namespace, key, old, new)
211 211
212 212 def listkeys(self, namespace):
213 213 return self._repo.listkeys(namespace)
214 214
215 215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 216 '''used to test argument passing over the wire'''
217 217 return "%s %s %s %s %s" % (one, two, three, four, five)
218 218
219 219 class locallegacypeer(localpeer):
220 220 '''peer extension which implements legacy methods too; used for tests with
221 221 restricted capabilities'''
222 222
223 223 def __init__(self, repo):
224 224 localpeer.__init__(self, repo, caps=legacycaps)
225 225
226 226 def branches(self, nodes):
227 227 return self._repo.branches(nodes)
228 228
229 229 def between(self, pairs):
230 230 return self._repo.between(pairs)
231 231
232 232 def changegroup(self, basenodes, source):
233 233 return changegroup.changegroup(self._repo, basenodes, source)
234 234
235 235 def changegroupsubset(self, bases, heads, source):
236 236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 237
238 238 class localrepository(object):
239 239
240 240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 241 'manifestv2'))
242 242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 243 'dotencode'))
244 244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 245 filtername = None
246 246
247 247 # a list of (ui, featureset) functions.
248 248 # only functions defined in module of enabled extensions are invoked
249 249 featuresetupfuncs = set()
250 250
251 251 def __init__(self, baseui, path=None, create=False):
252 252 self.requirements = set()
253 253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 254 self.wopener = self.wvfs
255 255 self.root = self.wvfs.base
256 256 self.path = self.wvfs.join(".hg")
257 257 self.origroot = path
258 258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 260 realfs=False)
261 261 self.vfs = scmutil.vfs(self.path)
262 262 self.opener = self.vfs
263 263 self.baseui = baseui
264 264 self.ui = baseui.copy()
265 265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 266 # A list of callback to shape the phase if no data were found.
267 267 # Callback are in the form: func(repo, roots) --> processed root.
268 268 # This list it to be filled by extension during repo setup
269 269 self._phasedefaults = []
270 270 try:
271 271 self.ui.readconfig(self.join("hgrc"), self.root)
272 272 extensions.loadall(self.ui)
273 273 except IOError:
274 274 pass
275 275
276 276 if self.featuresetupfuncs:
277 277 self.supported = set(self._basesupported) # use private copy
278 278 extmods = set(m.__name__ for n, m
279 279 in extensions.extensions(self.ui))
280 280 for setupfunc in self.featuresetupfuncs:
281 281 if setupfunc.__module__ in extmods:
282 282 setupfunc(self.ui, self.supported)
283 283 else:
284 284 self.supported = self._basesupported
285 285
286 286 if not self.vfs.isdir():
287 287 if create:
288 288 self.requirements = newreporequirements(self)
289 289
290 290 if not self.wvfs.exists():
291 291 self.wvfs.makedirs()
292 292 self.vfs.makedir(notindexed=True)
293 293
294 294 if 'store' in self.requirements:
295 295 self.vfs.mkdir("store")
296 296
297 297 # create an invalid changelog
298 298 self.vfs.append(
299 299 "00changelog.i",
300 300 '\0\0\0\2' # represents revlogv2
301 301 ' dummy changelog to prevent using the old repo layout'
302 302 )
303 303 else:
304 304 raise error.RepoError(_("repository %s not found") % path)
305 305 elif create:
306 306 raise error.RepoError(_("repository %s already exists") % path)
307 307 else:
308 308 try:
309 309 self.requirements = scmutil.readrequires(
310 310 self.vfs, self.supported)
311 311 except IOError as inst:
312 312 if inst.errno != errno.ENOENT:
313 313 raise
314 314
315 315 self.sharedpath = self.path
316 316 try:
317 317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 318 realpath=True)
319 319 s = vfs.base
320 320 if not vfs.exists():
321 321 raise error.RepoError(
322 322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 323 self.sharedpath = s
324 324 except IOError as inst:
325 325 if inst.errno != errno.ENOENT:
326 326 raise
327 327
328 328 self.store = store.store(
329 329 self.requirements, self.sharedpath, scmutil.vfs)
330 330 self.spath = self.store.path
331 331 self.svfs = self.store.vfs
332 332 self.sjoin = self.store.join
333 333 self.vfs.createmode = self.store.createmode
334 334 self._applyopenerreqs()
335 335 if create:
336 336 self._writerequirements()
337 337
338 338 self._dirstatevalidatewarned = False
339 339
340 340 self._branchcaches = {}
341 341 self._revbranchcache = None
342 342 self.filterpats = {}
343 343 self._datafilters = {}
344 344 self._transref = self._lockref = self._wlockref = None
345 345
346 346 # A cache for various files under .hg/ that tracks file changes,
347 347 # (used by the filecache decorator)
348 348 #
349 349 # Maps a property name to its util.filecacheentry
350 350 self._filecache = {}
351 351
352 352 # hold sets of revision to be filtered
353 353 # should be cleared when something might have changed the filter value:
354 354 # - new changesets,
355 355 # - phase change,
356 356 # - new obsolescence marker,
357 357 # - working directory parent change,
358 358 # - bookmark changes
359 359 self.filteredrevcache = {}
360 360
361 361 # generic mapping between names and nodes
362 362 self.names = namespaces.namespaces()
363 363
364 364 def close(self):
365 365 self._writecaches()
366 366
367 367 def _writecaches(self):
368 368 if self._revbranchcache:
369 369 self._revbranchcache.write()
370 370
371 371 def _restrictcapabilities(self, caps):
372 372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 373 caps = set(caps)
374 374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 376 return caps
377 377
378 378 def _applyopenerreqs(self):
379 379 self.svfs.options = dict((r, 1) for r in self.requirements
380 380 if r in self.openerreqs)
381 381 # experimental config: format.chunkcachesize
382 382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 383 if chunkcachesize is not None:
384 384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 385 # experimental config: format.maxchainlen
386 386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 387 if maxchainlen is not None:
388 388 self.svfs.options['maxchainlen'] = maxchainlen
389 389 # experimental config: format.manifestcachesize
390 390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 391 if manifestcachesize is not None:
392 392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 393 # experimental config: format.aggressivemergedeltas
394 394 aggressivemergedeltas = self.ui.configbool('format',
395 395 'aggressivemergedeltas', False)
396 396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 398
399 399 def _writerequirements(self):
400 400 scmutil.writerequires(self.vfs, self.requirements)
401 401
402 402 def _checknested(self, path):
403 403 """Determine if path is a legal nested repository."""
404 404 if not path.startswith(self.root):
405 405 return False
406 406 subpath = path[len(self.root) + 1:]
407 407 normsubpath = util.pconvert(subpath)
408 408
409 409 # XXX: Checking against the current working copy is wrong in
410 410 # the sense that it can reject things like
411 411 #
412 412 # $ hg cat -r 10 sub/x.txt
413 413 #
414 414 # if sub/ is no longer a subrepository in the working copy
415 415 # parent revision.
416 416 #
417 417 # However, it can of course also allow things that would have
418 418 # been rejected before, such as the above cat command if sub/
419 419 # is a subrepository now, but was a normal directory before.
420 420 # The old path auditor would have rejected by mistake since it
421 421 # panics when it sees sub/.hg/.
422 422 #
423 423 # All in all, checking against the working copy seems sensible
424 424 # since we want to prevent access to nested repositories on
425 425 # the filesystem *now*.
426 426 ctx = self[None]
427 427 parts = util.splitpath(subpath)
428 428 while parts:
429 429 prefix = '/'.join(parts)
430 430 if prefix in ctx.substate:
431 431 if prefix == normsubpath:
432 432 return True
433 433 else:
434 434 sub = ctx.sub(prefix)
435 435 return sub.checknested(subpath[len(prefix) + 1:])
436 436 else:
437 437 parts.pop()
438 438 return False
439 439
440 440 def peer(self):
441 441 return localpeer(self) # not cached to avoid reference cycle
442 442
443 443 def unfiltered(self):
444 444 """Return unfiltered version of the repository
445 445
446 446 Intended to be overwritten by filtered repo."""
447 447 return self
448 448
449 449 def filtered(self, name):
450 450 """Return a filtered version of a repository"""
451 451 # build a new class with the mixin and the current class
452 452 # (possibly subclass of the repo)
453 453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 454 pass
455 455 return proxycls(self, name)
456 456
457 457 @repofilecache('bookmarks', 'bookmarks.current')
458 458 def _bookmarks(self):
459 459 return bookmarks.bmstore(self)
460 460
461 461 @property
462 462 def _activebookmark(self):
463 463 return self._bookmarks.active
464 464
465 465 def bookmarkheads(self, bookmark):
466 466 name = bookmark.split('@', 1)[0]
467 467 heads = []
468 468 for mark, n in self._bookmarks.iteritems():
469 469 if mark.split('@', 1)[0] == name:
470 470 heads.append(n)
471 471 return heads
472 472
473 473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 475 # can't be easily expressed in filecache mechanism.
476 476 @storecache('phaseroots', '00changelog.i')
477 477 def _phasecache(self):
478 478 return phases.phasecache(self, self._phasedefaults)
479 479
480 480 @storecache('obsstore')
481 481 def obsstore(self):
482 482 # read default format for new obsstore.
483 483 # developer config: format.obsstore-version
484 484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 485 # rely on obsstore class default when possible.
486 486 kwargs = {}
487 487 if defaultformat is not None:
488 488 kwargs['defaultformat'] = defaultformat
489 489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 491 **kwargs)
492 492 if store and readonly:
493 493 self.ui.warn(
494 494 _('obsolete feature not enabled but %i markers found!\n')
495 495 % len(list(store)))
496 496 return store
497 497
498 498 @storecache('00changelog.i')
499 499 def changelog(self):
500 500 c = changelog.changelog(self.svfs)
501 501 if 'HG_PENDING' in os.environ:
502 502 p = os.environ['HG_PENDING']
503 503 if p.startswith(self.root):
504 504 c.readpending('00changelog.i.a')
505 505 return c
506 506
507 @property
508 def manifest(self):
509 return self.manifestlog._oldmanifest
510
511 507 def _constructmanifest(self):
512 508 # This is a temporary function while we migrate from manifest to
513 509 # manifestlog. It allows bundlerepo and unionrepo to intercept the
514 510 # manifest creation.
515 511 return manifest.manifest(self.svfs)
516 512
517 513 @storecache('00manifest.i')
518 514 def manifestlog(self):
519 515 return manifest.manifestlog(self.svfs, self)
520 516
521 517 @repofilecache('dirstate')
522 518 def dirstate(self):
523 519 return dirstate.dirstate(self.vfs, self.ui, self.root,
524 520 self._dirstatevalidate)
525 521
526 522 def _dirstatevalidate(self, node):
527 523 try:
528 524 self.changelog.rev(node)
529 525 return node
530 526 except error.LookupError:
531 527 if not self._dirstatevalidatewarned:
532 528 self._dirstatevalidatewarned = True
533 529 self.ui.warn(_("warning: ignoring unknown"
534 530 " working parent %s!\n") % short(node))
535 531 return nullid
536 532
537 533 def __getitem__(self, changeid):
538 534 if changeid is None or changeid == wdirrev:
539 535 return context.workingctx(self)
540 536 if isinstance(changeid, slice):
541 537 return [context.changectx(self, i)
542 538 for i in xrange(*changeid.indices(len(self)))
543 539 if i not in self.changelog.filteredrevs]
544 540 return context.changectx(self, changeid)
545 541
546 542 def __contains__(self, changeid):
547 543 try:
548 544 self[changeid]
549 545 return True
550 546 except error.RepoLookupError:
551 547 return False
552 548
553 549 def __nonzero__(self):
554 550 return True
555 551
556 552 def __len__(self):
557 553 return len(self.changelog)
558 554
559 555 def __iter__(self):
560 556 return iter(self.changelog)
561 557
562 558 def revs(self, expr, *args):
563 559 '''Find revisions matching a revset.
564 560
565 561 The revset is specified as a string ``expr`` that may contain
566 562 %-formatting to escape certain types. See ``revset.formatspec``.
567 563
568 564 Revset aliases from the configuration are not expanded. To expand
569 565 user aliases, consider calling ``scmutil.revrange()``.
570 566
571 567 Returns a revset.abstractsmartset, which is a list-like interface
572 568 that contains integer revisions.
573 569 '''
574 570 expr = revset.formatspec(expr, *args)
575 571 m = revset.match(None, expr)
576 572 return m(self)
577 573
578 574 def set(self, expr, *args):
579 575 '''Find revisions matching a revset and emit changectx instances.
580 576
581 577 This is a convenience wrapper around ``revs()`` that iterates the
582 578 result and is a generator of changectx instances.
583 579
584 580 Revset aliases from the configuration are not expanded. To expand
585 581 user aliases, consider calling ``scmutil.revrange()``.
586 582 '''
587 583 for r in self.revs(expr, *args):
588 584 yield self[r]
589 585
590 586 def url(self):
591 587 return 'file:' + self.root
592 588
593 589 def hook(self, name, throw=False, **args):
594 590 """Call a hook, passing this repo instance.
595 591
596 592 This a convenience method to aid invoking hooks. Extensions likely
597 593 won't call this unless they have registered a custom hook or are
598 594 replacing code that is expected to call a hook.
599 595 """
600 596 return hook.hook(self.ui, self, name, throw, **args)
601 597
602 598 @unfilteredmethod
603 599 def _tag(self, names, node, message, local, user, date, extra=None,
604 600 editor=False):
605 601 if isinstance(names, str):
606 602 names = (names,)
607 603
608 604 branches = self.branchmap()
609 605 for name in names:
610 606 self.hook('pretag', throw=True, node=hex(node), tag=name,
611 607 local=local)
612 608 if name in branches:
613 609 self.ui.warn(_("warning: tag %s conflicts with existing"
614 610 " branch name\n") % name)
615 611
616 612 def writetags(fp, names, munge, prevtags):
617 613 fp.seek(0, 2)
618 614 if prevtags and prevtags[-1] != '\n':
619 615 fp.write('\n')
620 616 for name in names:
621 617 if munge:
622 618 m = munge(name)
623 619 else:
624 620 m = name
625 621
626 622 if (self._tagscache.tagtypes and
627 623 name in self._tagscache.tagtypes):
628 624 old = self.tags().get(name, nullid)
629 625 fp.write('%s %s\n' % (hex(old), m))
630 626 fp.write('%s %s\n' % (hex(node), m))
631 627 fp.close()
632 628
633 629 prevtags = ''
634 630 if local:
635 631 try:
636 632 fp = self.vfs('localtags', 'r+')
637 633 except IOError:
638 634 fp = self.vfs('localtags', 'a')
639 635 else:
640 636 prevtags = fp.read()
641 637
642 638 # local tags are stored in the current charset
643 639 writetags(fp, names, None, prevtags)
644 640 for name in names:
645 641 self.hook('tag', node=hex(node), tag=name, local=local)
646 642 return
647 643
648 644 try:
649 645 fp = self.wfile('.hgtags', 'rb+')
650 646 except IOError as e:
651 647 if e.errno != errno.ENOENT:
652 648 raise
653 649 fp = self.wfile('.hgtags', 'ab')
654 650 else:
655 651 prevtags = fp.read()
656 652
657 653 # committed tags are stored in UTF-8
658 654 writetags(fp, names, encoding.fromlocal, prevtags)
659 655
660 656 fp.close()
661 657
662 658 self.invalidatecaches()
663 659
664 660 if '.hgtags' not in self.dirstate:
665 661 self[None].add(['.hgtags'])
666 662
667 663 m = matchmod.exact(self.root, '', ['.hgtags'])
668 664 tagnode = self.commit(message, user, date, extra=extra, match=m,
669 665 editor=editor)
670 666
671 667 for name in names:
672 668 self.hook('tag', node=hex(node), tag=name, local=local)
673 669
674 670 return tagnode
675 671
676 672 def tag(self, names, node, message, local, user, date, editor=False):
677 673 '''tag a revision with one or more symbolic names.
678 674
679 675 names is a list of strings or, when adding a single tag, names may be a
680 676 string.
681 677
682 678 if local is True, the tags are stored in a per-repository file.
683 679 otherwise, they are stored in the .hgtags file, and a new
684 680 changeset is committed with the change.
685 681
686 682 keyword arguments:
687 683
688 684 local: whether to store tags in non-version-controlled file
689 685 (default False)
690 686
691 687 message: commit message to use if committing
692 688
693 689 user: name of user to use if committing
694 690
695 691 date: date tuple to use if committing'''
696 692
697 693 if not local:
698 694 m = matchmod.exact(self.root, '', ['.hgtags'])
699 695 if any(self.status(match=m, unknown=True, ignored=True)):
700 696 raise error.Abort(_('working copy of .hgtags is changed'),
701 697 hint=_('please commit .hgtags manually'))
702 698
703 699 self.tags() # instantiate the cache
704 700 self._tag(names, node, message, local, user, date, editor=editor)
705 701
706 702 @filteredpropertycache
707 703 def _tagscache(self):
708 704 '''Returns a tagscache object that contains various tags related
709 705 caches.'''
710 706
711 707 # This simplifies its cache management by having one decorated
712 708 # function (this one) and the rest simply fetch things from it.
713 709 class tagscache(object):
714 710 def __init__(self):
715 711 # These two define the set of tags for this repository. tags
716 712 # maps tag name to node; tagtypes maps tag name to 'global' or
717 713 # 'local'. (Global tags are defined by .hgtags across all
718 714 # heads, and local tags are defined in .hg/localtags.)
719 715 # They constitute the in-memory cache of tags.
720 716 self.tags = self.tagtypes = None
721 717
722 718 self.nodetagscache = self.tagslist = None
723 719
724 720 cache = tagscache()
725 721 cache.tags, cache.tagtypes = self._findtags()
726 722
727 723 return cache
728 724
729 725 def tags(self):
730 726 '''return a mapping of tag to node'''
731 727 t = {}
732 728 if self.changelog.filteredrevs:
733 729 tags, tt = self._findtags()
734 730 else:
735 731 tags = self._tagscache.tags
736 732 for k, v in tags.iteritems():
737 733 try:
738 734 # ignore tags to unknown nodes
739 735 self.changelog.rev(v)
740 736 t[k] = v
741 737 except (error.LookupError, ValueError):
742 738 pass
743 739 return t
744 740
745 741 def _findtags(self):
746 742 '''Do the hard work of finding tags. Return a pair of dicts
747 743 (tags, tagtypes) where tags maps tag name to node, and tagtypes
748 744 maps tag name to a string like \'global\' or \'local\'.
749 745 Subclasses or extensions are free to add their own tags, but
750 746 should be aware that the returned dicts will be retained for the
751 747 duration of the localrepo object.'''
752 748
753 749 # XXX what tagtype should subclasses/extensions use? Currently
754 750 # mq and bookmarks add tags, but do not set the tagtype at all.
755 751 # Should each extension invent its own tag type? Should there
756 752 # be one tagtype for all such "virtual" tags? Or is the status
757 753 # quo fine?
758 754
759 755 alltags = {} # map tag name to (node, hist)
760 756 tagtypes = {}
761 757
762 758 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
763 759 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
764 760
765 761 # Build the return dicts. Have to re-encode tag names because
766 762 # the tags module always uses UTF-8 (in order not to lose info
767 763 # writing to the cache), but the rest of Mercurial wants them in
768 764 # local encoding.
769 765 tags = {}
770 766 for (name, (node, hist)) in alltags.iteritems():
771 767 if node != nullid:
772 768 tags[encoding.tolocal(name)] = node
773 769 tags['tip'] = self.changelog.tip()
774 770 tagtypes = dict([(encoding.tolocal(name), value)
775 771 for (name, value) in tagtypes.iteritems()])
776 772 return (tags, tagtypes)
777 773
778 774 def tagtype(self, tagname):
779 775 '''
780 776 return the type of the given tag. result can be:
781 777
782 778 'local' : a local tag
783 779 'global' : a global tag
784 780 None : tag does not exist
785 781 '''
786 782
787 783 return self._tagscache.tagtypes.get(tagname)
788 784
789 785 def tagslist(self):
790 786 '''return a list of tags ordered by revision'''
791 787 if not self._tagscache.tagslist:
792 788 l = []
793 789 for t, n in self.tags().iteritems():
794 790 l.append((self.changelog.rev(n), t, n))
795 791 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
796 792
797 793 return self._tagscache.tagslist
798 794
799 795 def nodetags(self, node):
800 796 '''return the tags associated with a node'''
801 797 if not self._tagscache.nodetagscache:
802 798 nodetagscache = {}
803 799 for t, n in self._tagscache.tags.iteritems():
804 800 nodetagscache.setdefault(n, []).append(t)
805 801 for tags in nodetagscache.itervalues():
806 802 tags.sort()
807 803 self._tagscache.nodetagscache = nodetagscache
808 804 return self._tagscache.nodetagscache.get(node, [])
809 805
810 806 def nodebookmarks(self, node):
811 807 """return the list of bookmarks pointing to the specified node"""
812 808 marks = []
813 809 for bookmark, n in self._bookmarks.iteritems():
814 810 if n == node:
815 811 marks.append(bookmark)
816 812 return sorted(marks)
817 813
818 814 def branchmap(self):
819 815 '''returns a dictionary {branch: [branchheads]} with branchheads
820 816 ordered by increasing revision number'''
821 817 branchmap.updatecache(self)
822 818 return self._branchcaches[self.filtername]
823 819
824 820 @unfilteredmethod
825 821 def revbranchcache(self):
826 822 if not self._revbranchcache:
827 823 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
828 824 return self._revbranchcache
829 825
830 826 def branchtip(self, branch, ignoremissing=False):
831 827 '''return the tip node for a given branch
832 828
833 829 If ignoremissing is True, then this method will not raise an error.
834 830 This is helpful for callers that only expect None for a missing branch
835 831 (e.g. namespace).
836 832
837 833 '''
838 834 try:
839 835 return self.branchmap().branchtip(branch)
840 836 except KeyError:
841 837 if not ignoremissing:
842 838 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
843 839 else:
844 840 pass
845 841
846 842 def lookup(self, key):
847 843 return self[key].node()
848 844
849 845 def lookupbranch(self, key, remote=None):
850 846 repo = remote or self
851 847 if key in repo.branchmap():
852 848 return key
853 849
854 850 repo = (remote and remote.local()) and remote or self
855 851 return repo[key].branch()
856 852
857 853 def known(self, nodes):
858 854 cl = self.changelog
859 855 nm = cl.nodemap
860 856 filtered = cl.filteredrevs
861 857 result = []
862 858 for n in nodes:
863 859 r = nm.get(n)
864 860 resp = not (r is None or r in filtered)
865 861 result.append(resp)
866 862 return result
867 863
868 864 def local(self):
869 865 return self
870 866
871 867 def publishing(self):
872 868 # it's safe (and desirable) to trust the publish flag unconditionally
873 869 # so that we don't finalize changes shared between users via ssh or nfs
874 870 return self.ui.configbool('phases', 'publish', True, untrusted=True)
875 871
876 872 def cancopy(self):
877 873 # so statichttprepo's override of local() works
878 874 if not self.local():
879 875 return False
880 876 if not self.publishing():
881 877 return True
882 878 # if publishing we can't copy if there is filtered content
883 879 return not self.filtered('visible').changelog.filteredrevs
884 880
885 881 def shared(self):
886 882 '''the type of shared repository (None if not shared)'''
887 883 if self.sharedpath != self.path:
888 884 return 'store'
889 885 return None
890 886
891 887 def join(self, f, *insidef):
892 888 return self.vfs.join(os.path.join(f, *insidef))
893 889
894 890 def wjoin(self, f, *insidef):
895 891 return self.vfs.reljoin(self.root, f, *insidef)
896 892
897 893 def file(self, f):
898 894 if f[0] == '/':
899 895 f = f[1:]
900 896 return filelog.filelog(self.svfs, f)
901 897
902 898 def changectx(self, changeid):
903 899 return self[changeid]
904 900
905 901 def setparents(self, p1, p2=nullid):
906 902 self.dirstate.beginparentchange()
907 903 copies = self.dirstate.setparents(p1, p2)
908 904 pctx = self[p1]
909 905 if copies:
910 906 # Adjust copy records, the dirstate cannot do it, it
911 907 # requires access to parents manifests. Preserve them
912 908 # only for entries added to first parent.
913 909 for f in copies:
914 910 if f not in pctx and copies[f] in pctx:
915 911 self.dirstate.copy(copies[f], f)
916 912 if p2 == nullid:
917 913 for f, s in sorted(self.dirstate.copies().items()):
918 914 if f not in pctx and s not in pctx:
919 915 self.dirstate.copy(None, f)
920 916 self.dirstate.endparentchange()
921 917
922 918 def filectx(self, path, changeid=None, fileid=None):
923 919 """changeid can be a changeset revision, node, or tag.
924 920 fileid can be a file revision or node."""
925 921 return context.filectx(self, path, changeid, fileid)
926 922
927 923 def getcwd(self):
928 924 return self.dirstate.getcwd()
929 925
930 926 def pathto(self, f, cwd=None):
931 927 return self.dirstate.pathto(f, cwd)
932 928
933 929 def wfile(self, f, mode='r'):
934 930 return self.wvfs(f, mode)
935 931
936 932 def _link(self, f):
937 933 return self.wvfs.islink(f)
938 934
939 935 def _loadfilter(self, filter):
940 936 if filter not in self.filterpats:
941 937 l = []
942 938 for pat, cmd in self.ui.configitems(filter):
943 939 if cmd == '!':
944 940 continue
945 941 mf = matchmod.match(self.root, '', [pat])
946 942 fn = None
947 943 params = cmd
948 944 for name, filterfn in self._datafilters.iteritems():
949 945 if cmd.startswith(name):
950 946 fn = filterfn
951 947 params = cmd[len(name):].lstrip()
952 948 break
953 949 if not fn:
954 950 fn = lambda s, c, **kwargs: util.filter(s, c)
955 951 # Wrap old filters not supporting keyword arguments
956 952 if not inspect.getargspec(fn)[2]:
957 953 oldfn = fn
958 954 fn = lambda s, c, **kwargs: oldfn(s, c)
959 955 l.append((mf, fn, params))
960 956 self.filterpats[filter] = l
961 957 return self.filterpats[filter]
962 958
963 959 def _filter(self, filterpats, filename, data):
964 960 for mf, fn, cmd in filterpats:
965 961 if mf(filename):
966 962 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
967 963 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
968 964 break
969 965
970 966 return data
971 967
972 968 @unfilteredpropertycache
973 969 def _encodefilterpats(self):
974 970 return self._loadfilter('encode')
975 971
976 972 @unfilteredpropertycache
977 973 def _decodefilterpats(self):
978 974 return self._loadfilter('decode')
979 975
980 976 def adddatafilter(self, name, filter):
981 977 self._datafilters[name] = filter
982 978
983 979 def wread(self, filename):
984 980 if self._link(filename):
985 981 data = self.wvfs.readlink(filename)
986 982 else:
987 983 data = self.wvfs.read(filename)
988 984 return self._filter(self._encodefilterpats, filename, data)
989 985
990 986 def wwrite(self, filename, data, flags, backgroundclose=False):
991 987 """write ``data`` into ``filename`` in the working directory
992 988
993 989 This returns length of written (maybe decoded) data.
994 990 """
995 991 data = self._filter(self._decodefilterpats, filename, data)
996 992 if 'l' in flags:
997 993 self.wvfs.symlink(data, filename)
998 994 else:
999 995 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1000 996 if 'x' in flags:
1001 997 self.wvfs.setflags(filename, False, True)
1002 998 return len(data)
1003 999
1004 1000 def wwritedata(self, filename, data):
1005 1001 return self._filter(self._decodefilterpats, filename, data)
1006 1002
1007 1003 def currenttransaction(self):
1008 1004 """return the current transaction or None if non exists"""
1009 1005 if self._transref:
1010 1006 tr = self._transref()
1011 1007 else:
1012 1008 tr = None
1013 1009
1014 1010 if tr and tr.running():
1015 1011 return tr
1016 1012 return None
1017 1013
1018 1014 def transaction(self, desc, report=None):
1019 1015 if (self.ui.configbool('devel', 'all-warnings')
1020 1016 or self.ui.configbool('devel', 'check-locks')):
1021 1017 if self._currentlock(self._lockref) is None:
1022 1018 raise RuntimeError('programming error: transaction requires '
1023 1019 'locking')
1024 1020 tr = self.currenttransaction()
1025 1021 if tr is not None:
1026 1022 return tr.nest()
1027 1023
1028 1024 # abort here if the journal already exists
1029 1025 if self.svfs.exists("journal"):
1030 1026 raise error.RepoError(
1031 1027 _("abandoned transaction found"),
1032 1028 hint=_("run 'hg recover' to clean up transaction"))
1033 1029
1034 1030 idbase = "%.40f#%f" % (random.random(), time.time())
1035 1031 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1036 1032 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1037 1033
1038 1034 self._writejournal(desc)
1039 1035 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1040 1036 if report:
1041 1037 rp = report
1042 1038 else:
1043 1039 rp = self.ui.warn
1044 1040 vfsmap = {'plain': self.vfs} # root of .hg/
1045 1041 # we must avoid cyclic reference between repo and transaction.
1046 1042 reporef = weakref.ref(self)
1047 1043 def validate(tr):
1048 1044 """will run pre-closing hooks"""
1049 1045 reporef().hook('pretxnclose', throw=True,
1050 1046 txnname=desc, **tr.hookargs)
1051 1047 def releasefn(tr, success):
1052 1048 repo = reporef()
1053 1049 if success:
1054 1050 # this should be explicitly invoked here, because
1055 1051 # in-memory changes aren't written out at closing
1056 1052 # transaction, if tr.addfilegenerator (via
1057 1053 # dirstate.write or so) isn't invoked while
1058 1054 # transaction running
1059 1055 repo.dirstate.write(None)
1060 1056 else:
1061 1057 # discard all changes (including ones already written
1062 1058 # out) in this transaction
1063 1059 repo.dirstate.restorebackup(None, prefix='journal.')
1064 1060
1065 1061 repo.invalidate(clearfilecache=True)
1066 1062
1067 1063 tr = transaction.transaction(rp, self.svfs, vfsmap,
1068 1064 "journal",
1069 1065 "undo",
1070 1066 aftertrans(renames),
1071 1067 self.store.createmode,
1072 1068 validator=validate,
1073 1069 releasefn=releasefn)
1074 1070
1075 1071 tr.hookargs['txnid'] = txnid
1076 1072 # note: writing the fncache only during finalize mean that the file is
1077 1073 # outdated when running hooks. As fncache is used for streaming clone,
1078 1074 # this is not expected to break anything that happen during the hooks.
1079 1075 tr.addfinalize('flush-fncache', self.store.write)
1080 1076 def txnclosehook(tr2):
1081 1077 """To be run if transaction is successful, will schedule a hook run
1082 1078 """
1083 1079 # Don't reference tr2 in hook() so we don't hold a reference.
1084 1080 # This reduces memory consumption when there are multiple
1085 1081 # transactions per lock. This can likely go away if issue5045
1086 1082 # fixes the function accumulation.
1087 1083 hookargs = tr2.hookargs
1088 1084
1089 1085 def hook():
1090 1086 reporef().hook('txnclose', throw=False, txnname=desc,
1091 1087 **hookargs)
1092 1088 reporef()._afterlock(hook)
1093 1089 tr.addfinalize('txnclose-hook', txnclosehook)
1094 1090 def txnaborthook(tr2):
1095 1091 """To be run if transaction is aborted
1096 1092 """
1097 1093 reporef().hook('txnabort', throw=False, txnname=desc,
1098 1094 **tr2.hookargs)
1099 1095 tr.addabort('txnabort-hook', txnaborthook)
1100 1096 # avoid eager cache invalidation. in-memory data should be identical
1101 1097 # to stored data if transaction has no error.
1102 1098 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1103 1099 self._transref = weakref.ref(tr)
1104 1100 return tr
1105 1101
1106 1102 def _journalfiles(self):
1107 1103 return ((self.svfs, 'journal'),
1108 1104 (self.vfs, 'journal.dirstate'),
1109 1105 (self.vfs, 'journal.branch'),
1110 1106 (self.vfs, 'journal.desc'),
1111 1107 (self.vfs, 'journal.bookmarks'),
1112 1108 (self.svfs, 'journal.phaseroots'))
1113 1109
1114 1110 def undofiles(self):
1115 1111 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1116 1112
1117 1113 def _writejournal(self, desc):
1118 1114 self.dirstate.savebackup(None, prefix='journal.')
1119 1115 self.vfs.write("journal.branch",
1120 1116 encoding.fromlocal(self.dirstate.branch()))
1121 1117 self.vfs.write("journal.desc",
1122 1118 "%d\n%s\n" % (len(self), desc))
1123 1119 self.vfs.write("journal.bookmarks",
1124 1120 self.vfs.tryread("bookmarks"))
1125 1121 self.svfs.write("journal.phaseroots",
1126 1122 self.svfs.tryread("phaseroots"))
1127 1123
1128 1124 def recover(self):
1129 1125 with self.lock():
1130 1126 if self.svfs.exists("journal"):
1131 1127 self.ui.status(_("rolling back interrupted transaction\n"))
1132 1128 vfsmap = {'': self.svfs,
1133 1129 'plain': self.vfs,}
1134 1130 transaction.rollback(self.svfs, vfsmap, "journal",
1135 1131 self.ui.warn)
1136 1132 self.invalidate()
1137 1133 return True
1138 1134 else:
1139 1135 self.ui.warn(_("no interrupted transaction available\n"))
1140 1136 return False
1141 1137
1142 1138 def rollback(self, dryrun=False, force=False):
1143 1139 wlock = lock = dsguard = None
1144 1140 try:
1145 1141 wlock = self.wlock()
1146 1142 lock = self.lock()
1147 1143 if self.svfs.exists("undo"):
1148 1144 dsguard = cmdutil.dirstateguard(self, 'rollback')
1149 1145
1150 1146 return self._rollback(dryrun, force, dsguard)
1151 1147 else:
1152 1148 self.ui.warn(_("no rollback information available\n"))
1153 1149 return 1
1154 1150 finally:
1155 1151 release(dsguard, lock, wlock)
1156 1152
1157 1153 @unfilteredmethod # Until we get smarter cache management
1158 1154 def _rollback(self, dryrun, force, dsguard):
1159 1155 ui = self.ui
1160 1156 try:
1161 1157 args = self.vfs.read('undo.desc').splitlines()
1162 1158 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1163 1159 if len(args) >= 3:
1164 1160 detail = args[2]
1165 1161 oldtip = oldlen - 1
1166 1162
1167 1163 if detail and ui.verbose:
1168 1164 msg = (_('repository tip rolled back to revision %s'
1169 1165 ' (undo %s: %s)\n')
1170 1166 % (oldtip, desc, detail))
1171 1167 else:
1172 1168 msg = (_('repository tip rolled back to revision %s'
1173 1169 ' (undo %s)\n')
1174 1170 % (oldtip, desc))
1175 1171 except IOError:
1176 1172 msg = _('rolling back unknown transaction\n')
1177 1173 desc = None
1178 1174
1179 1175 if not force and self['.'] != self['tip'] and desc == 'commit':
1180 1176 raise error.Abort(
1181 1177 _('rollback of last commit while not checked out '
1182 1178 'may lose data'), hint=_('use -f to force'))
1183 1179
1184 1180 ui.status(msg)
1185 1181 if dryrun:
1186 1182 return 0
1187 1183
1188 1184 parents = self.dirstate.parents()
1189 1185 self.destroying()
1190 1186 vfsmap = {'plain': self.vfs, '': self.svfs}
1191 1187 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1192 1188 if self.vfs.exists('undo.bookmarks'):
1193 1189 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1194 1190 if self.svfs.exists('undo.phaseroots'):
1195 1191 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1196 1192 self.invalidate()
1197 1193
1198 1194 parentgone = (parents[0] not in self.changelog.nodemap or
1199 1195 parents[1] not in self.changelog.nodemap)
1200 1196 if parentgone:
1201 1197 # prevent dirstateguard from overwriting already restored one
1202 1198 dsguard.close()
1203 1199
1204 1200 self.dirstate.restorebackup(None, prefix='undo.')
1205 1201 try:
1206 1202 branch = self.vfs.read('undo.branch')
1207 1203 self.dirstate.setbranch(encoding.tolocal(branch))
1208 1204 except IOError:
1209 1205 ui.warn(_('named branch could not be reset: '
1210 1206 'current branch is still \'%s\'\n')
1211 1207 % self.dirstate.branch())
1212 1208
1213 1209 parents = tuple([p.rev() for p in self[None].parents()])
1214 1210 if len(parents) > 1:
1215 1211 ui.status(_('working directory now based on '
1216 1212 'revisions %d and %d\n') % parents)
1217 1213 else:
1218 1214 ui.status(_('working directory now based on '
1219 1215 'revision %d\n') % parents)
1220 1216 mergemod.mergestate.clean(self, self['.'].node())
1221 1217
1222 1218 # TODO: if we know which new heads may result from this rollback, pass
1223 1219 # them to destroy(), which will prevent the branchhead cache from being
1224 1220 # invalidated.
1225 1221 self.destroyed()
1226 1222 return 0
1227 1223
1228 1224 def invalidatecaches(self):
1229 1225
1230 1226 if '_tagscache' in vars(self):
1231 1227 # can't use delattr on proxy
1232 1228 del self.__dict__['_tagscache']
1233 1229
1234 1230 self.unfiltered()._branchcaches.clear()
1235 1231 self.invalidatevolatilesets()
1236 1232
1237 1233 def invalidatevolatilesets(self):
1238 1234 self.filteredrevcache.clear()
1239 1235 obsolete.clearobscaches(self)
1240 1236
1241 1237 def invalidatedirstate(self):
1242 1238 '''Invalidates the dirstate, causing the next call to dirstate
1243 1239 to check if it was modified since the last time it was read,
1244 1240 rereading it if it has.
1245 1241
1246 1242 This is different to dirstate.invalidate() that it doesn't always
1247 1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1248 1244 explicitly read the dirstate again (i.e. restoring it to a previous
1249 1245 known good state).'''
1250 1246 if hasunfilteredcache(self, 'dirstate'):
1251 1247 for k in self.dirstate._filecache:
1252 1248 try:
1253 1249 delattr(self.dirstate, k)
1254 1250 except AttributeError:
1255 1251 pass
1256 1252 delattr(self.unfiltered(), 'dirstate')
1257 1253
1258 1254 def invalidate(self, clearfilecache=False):
1259 1255 '''Invalidates both store and non-store parts other than dirstate
1260 1256
1261 1257 If a transaction is running, invalidation of store is omitted,
1262 1258 because discarding in-memory changes might cause inconsistency
1263 1259 (e.g. incomplete fncache causes unintentional failure, but
1264 1260 redundant one doesn't).
1265 1261 '''
1266 1262 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1267 1263 for k in self._filecache.keys():
1268 1264 # dirstate is invalidated separately in invalidatedirstate()
1269 1265 if k == 'dirstate':
1270 1266 continue
1271 1267
1272 1268 if clearfilecache:
1273 1269 del self._filecache[k]
1274 1270 try:
1275 1271 delattr(unfiltered, k)
1276 1272 except AttributeError:
1277 1273 pass
1278 1274 self.invalidatecaches()
1279 1275 if not self.currenttransaction():
1280 1276 # TODO: Changing contents of store outside transaction
1281 1277 # causes inconsistency. We should make in-memory store
1282 1278 # changes detectable, and abort if changed.
1283 1279 self.store.invalidatecaches()
1284 1280
1285 1281 def invalidateall(self):
1286 1282 '''Fully invalidates both store and non-store parts, causing the
1287 1283 subsequent operation to reread any outside changes.'''
1288 1284 # extension should hook this to invalidate its caches
1289 1285 self.invalidate()
1290 1286 self.invalidatedirstate()
1291 1287
1292 1288 @unfilteredmethod
1293 1289 def _refreshfilecachestats(self, tr):
1294 1290 """Reload stats of cached files so that they are flagged as valid"""
1295 1291 for k, ce in self._filecache.items():
1296 1292 if k == 'dirstate' or k not in self.__dict__:
1297 1293 continue
1298 1294 ce.refresh()
1299 1295
1300 1296 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1301 1297 inheritchecker=None, parentenvvar=None):
1302 1298 parentlock = None
1303 1299 # the contents of parentenvvar are used by the underlying lock to
1304 1300 # determine whether it can be inherited
1305 1301 if parentenvvar is not None:
1306 1302 parentlock = os.environ.get(parentenvvar)
1307 1303 try:
1308 1304 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1309 1305 acquirefn=acquirefn, desc=desc,
1310 1306 inheritchecker=inheritchecker,
1311 1307 parentlock=parentlock)
1312 1308 except error.LockHeld as inst:
1313 1309 if not wait:
1314 1310 raise
1315 1311 # show more details for new-style locks
1316 1312 if ':' in inst.locker:
1317 1313 host, pid = inst.locker.split(":", 1)
1318 1314 self.ui.warn(
1319 1315 _("waiting for lock on %s held by process %r "
1320 1316 "on host %r\n") % (desc, pid, host))
1321 1317 else:
1322 1318 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1323 1319 (desc, inst.locker))
1324 1320 # default to 600 seconds timeout
1325 1321 l = lockmod.lock(vfs, lockname,
1326 1322 int(self.ui.config("ui", "timeout", "600")),
1327 1323 releasefn=releasefn, acquirefn=acquirefn,
1328 1324 desc=desc)
1329 1325 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1330 1326 return l
1331 1327
1332 1328 def _afterlock(self, callback):
1333 1329 """add a callback to be run when the repository is fully unlocked
1334 1330
1335 1331 The callback will be executed when the outermost lock is released
1336 1332 (with wlock being higher level than 'lock')."""
1337 1333 for ref in (self._wlockref, self._lockref):
1338 1334 l = ref and ref()
1339 1335 if l and l.held:
1340 1336 l.postrelease.append(callback)
1341 1337 break
1342 1338 else: # no lock have been found.
1343 1339 callback()
1344 1340
1345 1341 def lock(self, wait=True):
1346 1342 '''Lock the repository store (.hg/store) and return a weak reference
1347 1343 to the lock. Use this before modifying the store (e.g. committing or
1348 1344 stripping). If you are opening a transaction, get a lock as well.)
1349 1345
1350 1346 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 1347 'wlock' first to avoid a dead-lock hazard.'''
1352 1348 l = self._currentlock(self._lockref)
1353 1349 if l is not None:
1354 1350 l.lock()
1355 1351 return l
1356 1352
1357 1353 l = self._lock(self.svfs, "lock", wait, None,
1358 1354 self.invalidate, _('repository %s') % self.origroot)
1359 1355 self._lockref = weakref.ref(l)
1360 1356 return l
1361 1357
1362 1358 def _wlockchecktransaction(self):
1363 1359 if self.currenttransaction() is not None:
1364 1360 raise error.LockInheritanceContractViolation(
1365 1361 'wlock cannot be inherited in the middle of a transaction')
1366 1362
1367 1363 def wlock(self, wait=True):
1368 1364 '''Lock the non-store parts of the repository (everything under
1369 1365 .hg except .hg/store) and return a weak reference to the lock.
1370 1366
1371 1367 Use this before modifying files in .hg.
1372 1368
1373 1369 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1374 1370 'wlock' first to avoid a dead-lock hazard.'''
1375 1371 l = self._wlockref and self._wlockref()
1376 1372 if l is not None and l.held:
1377 1373 l.lock()
1378 1374 return l
1379 1375
1380 1376 # We do not need to check for non-waiting lock acquisition. Such
1381 1377 # acquisition would not cause dead-lock as they would just fail.
1382 1378 if wait and (self.ui.configbool('devel', 'all-warnings')
1383 1379 or self.ui.configbool('devel', 'check-locks')):
1384 1380 if self._currentlock(self._lockref) is not None:
1385 1381 self.ui.develwarn('"wlock" acquired after "lock"')
1386 1382
1387 1383 def unlock():
1388 1384 if self.dirstate.pendingparentchange():
1389 1385 self.dirstate.invalidate()
1390 1386 else:
1391 1387 self.dirstate.write(None)
1392 1388
1393 1389 self._filecache['dirstate'].refresh()
1394 1390
1395 1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1396 1392 self.invalidatedirstate, _('working directory of %s') %
1397 1393 self.origroot,
1398 1394 inheritchecker=self._wlockchecktransaction,
1399 1395 parentenvvar='HG_WLOCK_LOCKER')
1400 1396 self._wlockref = weakref.ref(l)
1401 1397 return l
1402 1398
1403 1399 def _currentlock(self, lockref):
1404 1400 """Returns the lock if it's held, or None if it's not."""
1405 1401 if lockref is None:
1406 1402 return None
1407 1403 l = lockref()
1408 1404 if l is None or not l.held:
1409 1405 return None
1410 1406 return l
1411 1407
1412 1408 def currentwlock(self):
1413 1409 """Returns the wlock if it's held, or None if it's not."""
1414 1410 return self._currentlock(self._wlockref)
1415 1411
1416 1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1417 1413 """
1418 1414 commit an individual file as part of a larger transaction
1419 1415 """
1420 1416
1421 1417 fname = fctx.path()
1422 1418 fparent1 = manifest1.get(fname, nullid)
1423 1419 fparent2 = manifest2.get(fname, nullid)
1424 1420 if isinstance(fctx, context.filectx):
1425 1421 node = fctx.filenode()
1426 1422 if node in [fparent1, fparent2]:
1427 1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1428 1424 if manifest1.flags(fname) != fctx.flags():
1429 1425 changelist.append(fname)
1430 1426 return node
1431 1427
1432 1428 flog = self.file(fname)
1433 1429 meta = {}
1434 1430 copy = fctx.renamed()
1435 1431 if copy and copy[0] != fname:
1436 1432 # Mark the new revision of this file as a copy of another
1437 1433 # file. This copy data will effectively act as a parent
1438 1434 # of this new revision. If this is a merge, the first
1439 1435 # parent will be the nullid (meaning "look up the copy data")
1440 1436 # and the second one will be the other parent. For example:
1441 1437 #
1442 1438 # 0 --- 1 --- 3 rev1 changes file foo
1443 1439 # \ / rev2 renames foo to bar and changes it
1444 1440 # \- 2 -/ rev3 should have bar with all changes and
1445 1441 # should record that bar descends from
1446 1442 # bar in rev2 and foo in rev1
1447 1443 #
1448 1444 # this allows this merge to succeed:
1449 1445 #
1450 1446 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1451 1447 # \ / merging rev3 and rev4 should use bar@rev2
1452 1448 # \- 2 --- 4 as the merge base
1453 1449 #
1454 1450
1455 1451 cfname = copy[0]
1456 1452 crev = manifest1.get(cfname)
1457 1453 newfparent = fparent2
1458 1454
1459 1455 if manifest2: # branch merge
1460 1456 if fparent2 == nullid or crev is None: # copied on remote side
1461 1457 if cfname in manifest2:
1462 1458 crev = manifest2[cfname]
1463 1459 newfparent = fparent1
1464 1460
1465 1461 # Here, we used to search backwards through history to try to find
1466 1462 # where the file copy came from if the source of a copy was not in
1467 1463 # the parent directory. However, this doesn't actually make sense to
1468 1464 # do (what does a copy from something not in your working copy even
1469 1465 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1470 1466 # the user that copy information was dropped, so if they didn't
1471 1467 # expect this outcome it can be fixed, but this is the correct
1472 1468 # behavior in this circumstance.
1473 1469
1474 1470 if crev:
1475 1471 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1476 1472 meta["copy"] = cfname
1477 1473 meta["copyrev"] = hex(crev)
1478 1474 fparent1, fparent2 = nullid, newfparent
1479 1475 else:
1480 1476 self.ui.warn(_("warning: can't find ancestor for '%s' "
1481 1477 "copied from '%s'!\n") % (fname, cfname))
1482 1478
1483 1479 elif fparent1 == nullid:
1484 1480 fparent1, fparent2 = fparent2, nullid
1485 1481 elif fparent2 != nullid:
1486 1482 # is one parent an ancestor of the other?
1487 1483 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1488 1484 if fparent1 in fparentancestors:
1489 1485 fparent1, fparent2 = fparent2, nullid
1490 1486 elif fparent2 in fparentancestors:
1491 1487 fparent2 = nullid
1492 1488
1493 1489 # is the file changed?
1494 1490 text = fctx.data()
1495 1491 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1496 1492 changelist.append(fname)
1497 1493 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1498 1494 # are just the flags changed during merge?
1499 1495 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1500 1496 changelist.append(fname)
1501 1497
1502 1498 return fparent1
1503 1499
1504 1500 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1505 1501 """check for commit arguments that aren't committable"""
1506 1502 if match.isexact() or match.prefix():
1507 1503 matched = set(status.modified + status.added + status.removed)
1508 1504
1509 1505 for f in match.files():
1510 1506 f = self.dirstate.normalize(f)
1511 1507 if f == '.' or f in matched or f in wctx.substate:
1512 1508 continue
1513 1509 if f in status.deleted:
1514 1510 fail(f, _('file not found!'))
1515 1511 if f in vdirs: # visited directory
1516 1512 d = f + '/'
1517 1513 for mf in matched:
1518 1514 if mf.startswith(d):
1519 1515 break
1520 1516 else:
1521 1517 fail(f, _("no match under directory!"))
1522 1518 elif f not in self.dirstate:
1523 1519 fail(f, _("file not tracked!"))
1524 1520
1525 1521 @unfilteredmethod
1526 1522 def commit(self, text="", user=None, date=None, match=None, force=False,
1527 1523 editor=False, extra=None):
1528 1524 """Add a new revision to current repository.
1529 1525
1530 1526 Revision information is gathered from the working directory,
1531 1527 match can be used to filter the committed files. If editor is
1532 1528 supplied, it is called to get a commit message.
1533 1529 """
1534 1530 if extra is None:
1535 1531 extra = {}
1536 1532
1537 1533 def fail(f, msg):
1538 1534 raise error.Abort('%s: %s' % (f, msg))
1539 1535
1540 1536 if not match:
1541 1537 match = matchmod.always(self.root, '')
1542 1538
1543 1539 if not force:
1544 1540 vdirs = []
1545 1541 match.explicitdir = vdirs.append
1546 1542 match.bad = fail
1547 1543
1548 1544 wlock = lock = tr = None
1549 1545 try:
1550 1546 wlock = self.wlock()
1551 1547 lock = self.lock() # for recent changelog (see issue4368)
1552 1548
1553 1549 wctx = self[None]
1554 1550 merge = len(wctx.parents()) > 1
1555 1551
1556 1552 if not force and merge and match.ispartial():
1557 1553 raise error.Abort(_('cannot partially commit a merge '
1558 1554 '(do not specify files or patterns)'))
1559 1555
1560 1556 status = self.status(match=match, clean=force)
1561 1557 if force:
1562 1558 status.modified.extend(status.clean) # mq may commit clean files
1563 1559
1564 1560 # check subrepos
1565 1561 subs = []
1566 1562 commitsubs = set()
1567 1563 newstate = wctx.substate.copy()
1568 1564 # only manage subrepos and .hgsubstate if .hgsub is present
1569 1565 if '.hgsub' in wctx:
1570 1566 # we'll decide whether to track this ourselves, thanks
1571 1567 for c in status.modified, status.added, status.removed:
1572 1568 if '.hgsubstate' in c:
1573 1569 c.remove('.hgsubstate')
1574 1570
1575 1571 # compare current state to last committed state
1576 1572 # build new substate based on last committed state
1577 1573 oldstate = wctx.p1().substate
1578 1574 for s in sorted(newstate.keys()):
1579 1575 if not match(s):
1580 1576 # ignore working copy, use old state if present
1581 1577 if s in oldstate:
1582 1578 newstate[s] = oldstate[s]
1583 1579 continue
1584 1580 if not force:
1585 1581 raise error.Abort(
1586 1582 _("commit with new subrepo %s excluded") % s)
1587 1583 dirtyreason = wctx.sub(s).dirtyreason(True)
1588 1584 if dirtyreason:
1589 1585 if not self.ui.configbool('ui', 'commitsubrepos'):
1590 1586 raise error.Abort(dirtyreason,
1591 1587 hint=_("use --subrepos for recursive commit"))
1592 1588 subs.append(s)
1593 1589 commitsubs.add(s)
1594 1590 else:
1595 1591 bs = wctx.sub(s).basestate()
1596 1592 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1597 1593 if oldstate.get(s, (None, None, None))[1] != bs:
1598 1594 subs.append(s)
1599 1595
1600 1596 # check for removed subrepos
1601 1597 for p in wctx.parents():
1602 1598 r = [s for s in p.substate if s not in newstate]
1603 1599 subs += [s for s in r if match(s)]
1604 1600 if subs:
1605 1601 if (not match('.hgsub') and
1606 1602 '.hgsub' in (wctx.modified() + wctx.added())):
1607 1603 raise error.Abort(
1608 1604 _("can't commit subrepos without .hgsub"))
1609 1605 status.modified.insert(0, '.hgsubstate')
1610 1606
1611 1607 elif '.hgsub' in status.removed:
1612 1608 # clean up .hgsubstate when .hgsub is removed
1613 1609 if ('.hgsubstate' in wctx and
1614 1610 '.hgsubstate' not in (status.modified + status.added +
1615 1611 status.removed)):
1616 1612 status.removed.insert(0, '.hgsubstate')
1617 1613
1618 1614 # make sure all explicit patterns are matched
1619 1615 if not force:
1620 1616 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1621 1617
1622 1618 cctx = context.workingcommitctx(self, status,
1623 1619 text, user, date, extra)
1624 1620
1625 1621 # internal config: ui.allowemptycommit
1626 1622 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1627 1623 or extra.get('close') or merge or cctx.files()
1628 1624 or self.ui.configbool('ui', 'allowemptycommit'))
1629 1625 if not allowemptycommit:
1630 1626 return None
1631 1627
1632 1628 if merge and cctx.deleted():
1633 1629 raise error.Abort(_("cannot commit merge with missing files"))
1634 1630
1635 1631 ms = mergemod.mergestate.read(self)
1636 1632 cmdutil.checkunresolved(ms)
1637 1633
1638 1634 if editor:
1639 1635 cctx._text = editor(self, cctx, subs)
1640 1636 edited = (text != cctx._text)
1641 1637
1642 1638 # Save commit message in case this transaction gets rolled back
1643 1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1644 1640 # the assumption that the user will use the same editor again.
1645 1641 msgfn = self.savecommitmessage(cctx._text)
1646 1642
1647 1643 # commit subs and write new state
1648 1644 if subs:
1649 1645 for s in sorted(commitsubs):
1650 1646 sub = wctx.sub(s)
1651 1647 self.ui.status(_('committing subrepository %s\n') %
1652 1648 subrepo.subrelpath(sub))
1653 1649 sr = sub.commit(cctx._text, user, date)
1654 1650 newstate[s] = (newstate[s][0], sr)
1655 1651 subrepo.writestate(self, newstate)
1656 1652
1657 1653 p1, p2 = self.dirstate.parents()
1658 1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1659 1655 try:
1660 1656 self.hook("precommit", throw=True, parent1=hookp1,
1661 1657 parent2=hookp2)
1662 1658 tr = self.transaction('commit')
1663 1659 ret = self.commitctx(cctx, True)
1664 1660 except: # re-raises
1665 1661 if edited:
1666 1662 self.ui.write(
1667 1663 _('note: commit message saved in %s\n') % msgfn)
1668 1664 raise
1669 1665 # update bookmarks, dirstate and mergestate
1670 1666 bookmarks.update(self, [p1, p2], ret)
1671 1667 cctx.markcommitted(ret)
1672 1668 ms.reset()
1673 1669 tr.close()
1674 1670
1675 1671 finally:
1676 1672 lockmod.release(tr, lock, wlock)
1677 1673
1678 1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1679 1675 # hack for command that use a temporary commit (eg: histedit)
1680 1676 # temporary commit got stripped before hook release
1681 1677 if self.changelog.hasnode(ret):
1682 1678 self.hook("commit", node=node, parent1=parent1,
1683 1679 parent2=parent2)
1684 1680 self._afterlock(commithook)
1685 1681 return ret
1686 1682
1687 1683 @unfilteredmethod
1688 1684 def commitctx(self, ctx, error=False):
1689 1685 """Add a new revision to current repository.
1690 1686 Revision information is passed via the context argument.
1691 1687 """
1692 1688
1693 1689 tr = None
1694 1690 p1, p2 = ctx.p1(), ctx.p2()
1695 1691 user = ctx.user()
1696 1692
1697 1693 lock = self.lock()
1698 1694 try:
1699 1695 tr = self.transaction("commit")
1700 1696 trp = weakref.proxy(tr)
1701 1697
1702 1698 if ctx.files():
1703 1699 m1ctx = p1.manifestctx()
1704 1700 m2ctx = p2.manifestctx()
1705 1701 mctx = m1ctx.copy()
1706 1702
1707 1703 m = mctx.read()
1708 1704 m1 = m1ctx.read()
1709 1705 m2 = m2ctx.read()
1710 1706
1711 1707 # check in files
1712 1708 added = []
1713 1709 changed = []
1714 1710 removed = list(ctx.removed())
1715 1711 linkrev = len(self)
1716 1712 self.ui.note(_("committing files:\n"))
1717 1713 for f in sorted(ctx.modified() + ctx.added()):
1718 1714 self.ui.note(f + "\n")
1719 1715 try:
1720 1716 fctx = ctx[f]
1721 1717 if fctx is None:
1722 1718 removed.append(f)
1723 1719 else:
1724 1720 added.append(f)
1725 1721 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1726 1722 trp, changed)
1727 1723 m.setflag(f, fctx.flags())
1728 1724 except OSError as inst:
1729 1725 self.ui.warn(_("trouble committing %s!\n") % f)
1730 1726 raise
1731 1727 except IOError as inst:
1732 1728 errcode = getattr(inst, 'errno', errno.ENOENT)
1733 1729 if error or errcode and errcode != errno.ENOENT:
1734 1730 self.ui.warn(_("trouble committing %s!\n") % f)
1735 1731 raise
1736 1732
1737 1733 # update manifest
1738 1734 self.ui.note(_("committing manifest\n"))
1739 1735 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1740 1736 drop = [f for f in removed if f in m]
1741 1737 for f in drop:
1742 1738 del m[f]
1743 1739 mn = mctx.write(trp, linkrev,
1744 1740 p1.manifestnode(), p2.manifestnode(),
1745 1741 added, drop)
1746 1742 files = changed + removed
1747 1743 else:
1748 1744 mn = p1.manifestnode()
1749 1745 files = []
1750 1746
1751 1747 # update changelog
1752 1748 self.ui.note(_("committing changelog\n"))
1753 1749 self.changelog.delayupdate(tr)
1754 1750 n = self.changelog.add(mn, files, ctx.description(),
1755 1751 trp, p1.node(), p2.node(),
1756 1752 user, ctx.date(), ctx.extra().copy())
1757 1753 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1758 1754 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1759 1755 parent2=xp2)
1760 1756 # set the new commit is proper phase
1761 1757 targetphase = subrepo.newcommitphase(self.ui, ctx)
1762 1758 if targetphase:
1763 1759 # retract boundary do not alter parent changeset.
1764 1760 # if a parent have higher the resulting phase will
1765 1761 # be compliant anyway
1766 1762 #
1767 1763 # if minimal phase was 0 we don't need to retract anything
1768 1764 phases.retractboundary(self, tr, targetphase, [n])
1769 1765 tr.close()
1770 1766 branchmap.updatecache(self.filtered('served'))
1771 1767 return n
1772 1768 finally:
1773 1769 if tr:
1774 1770 tr.release()
1775 1771 lock.release()
1776 1772
1777 1773 @unfilteredmethod
1778 1774 def destroying(self):
1779 1775 '''Inform the repository that nodes are about to be destroyed.
1780 1776 Intended for use by strip and rollback, so there's a common
1781 1777 place for anything that has to be done before destroying history.
1782 1778
1783 1779 This is mostly useful for saving state that is in memory and waiting
1784 1780 to be flushed when the current lock is released. Because a call to
1785 1781 destroyed is imminent, the repo will be invalidated causing those
1786 1782 changes to stay in memory (waiting for the next unlock), or vanish
1787 1783 completely.
1788 1784 '''
1789 1785 # When using the same lock to commit and strip, the phasecache is left
1790 1786 # dirty after committing. Then when we strip, the repo is invalidated,
1791 1787 # causing those changes to disappear.
1792 1788 if '_phasecache' in vars(self):
1793 1789 self._phasecache.write()
1794 1790
1795 1791 @unfilteredmethod
1796 1792 def destroyed(self):
1797 1793 '''Inform the repository that nodes have been destroyed.
1798 1794 Intended for use by strip and rollback, so there's a common
1799 1795 place for anything that has to be done after destroying history.
1800 1796 '''
1801 1797 # When one tries to:
1802 1798 # 1) destroy nodes thus calling this method (e.g. strip)
1803 1799 # 2) use phasecache somewhere (e.g. commit)
1804 1800 #
1805 1801 # then 2) will fail because the phasecache contains nodes that were
1806 1802 # removed. We can either remove phasecache from the filecache,
1807 1803 # causing it to reload next time it is accessed, or simply filter
1808 1804 # the removed nodes now and write the updated cache.
1809 1805 self._phasecache.filterunknown(self)
1810 1806 self._phasecache.write()
1811 1807
1812 1808 # update the 'served' branch cache to help read only server process
1813 1809 # Thanks to branchcache collaboration this is done from the nearest
1814 1810 # filtered subset and it is expected to be fast.
1815 1811 branchmap.updatecache(self.filtered('served'))
1816 1812
1817 1813 # Ensure the persistent tag cache is updated. Doing it now
1818 1814 # means that the tag cache only has to worry about destroyed
1819 1815 # heads immediately after a strip/rollback. That in turn
1820 1816 # guarantees that "cachetip == currenttip" (comparing both rev
1821 1817 # and node) always means no nodes have been added or destroyed.
1822 1818
1823 1819 # XXX this is suboptimal when qrefresh'ing: we strip the current
1824 1820 # head, refresh the tag cache, then immediately add a new head.
1825 1821 # But I think doing it this way is necessary for the "instant
1826 1822 # tag cache retrieval" case to work.
1827 1823 self.invalidate()
1828 1824
1829 1825 def walk(self, match, node=None):
1830 1826 '''
1831 1827 walk recursively through the directory tree or a given
1832 1828 changeset, finding all files matched by the match
1833 1829 function
1834 1830 '''
1835 1831 return self[node].walk(match)
1836 1832
1837 1833 def status(self, node1='.', node2=None, match=None,
1838 1834 ignored=False, clean=False, unknown=False,
1839 1835 listsubrepos=False):
1840 1836 '''a convenience method that calls node1.status(node2)'''
1841 1837 return self[node1].status(node2, match, ignored, clean, unknown,
1842 1838 listsubrepos)
1843 1839
1844 1840 def heads(self, start=None):
1845 1841 heads = self.changelog.heads(start)
1846 1842 # sort the output in rev descending order
1847 1843 return sorted(heads, key=self.changelog.rev, reverse=True)
1848 1844
1849 1845 def branchheads(self, branch=None, start=None, closed=False):
1850 1846 '''return a (possibly filtered) list of heads for the given branch
1851 1847
1852 1848 Heads are returned in topological order, from newest to oldest.
1853 1849 If branch is None, use the dirstate branch.
1854 1850 If start is not None, return only heads reachable from start.
1855 1851 If closed is True, return heads that are marked as closed as well.
1856 1852 '''
1857 1853 if branch is None:
1858 1854 branch = self[None].branch()
1859 1855 branches = self.branchmap()
1860 1856 if branch not in branches:
1861 1857 return []
1862 1858 # the cache returns heads ordered lowest to highest
1863 1859 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1864 1860 if start is not None:
1865 1861 # filter out the heads that cannot be reached from startrev
1866 1862 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1867 1863 bheads = [h for h in bheads if h in fbheads]
1868 1864 return bheads
1869 1865
1870 1866 def branches(self, nodes):
1871 1867 if not nodes:
1872 1868 nodes = [self.changelog.tip()]
1873 1869 b = []
1874 1870 for n in nodes:
1875 1871 t = n
1876 1872 while True:
1877 1873 p = self.changelog.parents(n)
1878 1874 if p[1] != nullid or p[0] == nullid:
1879 1875 b.append((t, n, p[0], p[1]))
1880 1876 break
1881 1877 n = p[0]
1882 1878 return b
1883 1879
1884 1880 def between(self, pairs):
1885 1881 r = []
1886 1882
1887 1883 for top, bottom in pairs:
1888 1884 n, l, i = top, [], 0
1889 1885 f = 1
1890 1886
1891 1887 while n != bottom and n != nullid:
1892 1888 p = self.changelog.parents(n)[0]
1893 1889 if i == f:
1894 1890 l.append(n)
1895 1891 f = f * 2
1896 1892 n = p
1897 1893 i += 1
1898 1894
1899 1895 r.append(l)
1900 1896
1901 1897 return r
1902 1898
1903 1899 def checkpush(self, pushop):
1904 1900 """Extensions can override this function if additional checks have
1905 1901 to be performed before pushing, or call it if they override push
1906 1902 command.
1907 1903 """
1908 1904 pass
1909 1905
1910 1906 @unfilteredpropertycache
1911 1907 def prepushoutgoinghooks(self):
1912 1908 """Return util.hooks consists of a pushop with repo, remote, outgoing
1913 1909 methods, which are called before pushing changesets.
1914 1910 """
1915 1911 return util.hooks()
1916 1912
1917 1913 def pushkey(self, namespace, key, old, new):
1918 1914 try:
1919 1915 tr = self.currenttransaction()
1920 1916 hookargs = {}
1921 1917 if tr is not None:
1922 1918 hookargs.update(tr.hookargs)
1923 1919 hookargs['namespace'] = namespace
1924 1920 hookargs['key'] = key
1925 1921 hookargs['old'] = old
1926 1922 hookargs['new'] = new
1927 1923 self.hook('prepushkey', throw=True, **hookargs)
1928 1924 except error.HookAbort as exc:
1929 1925 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1930 1926 if exc.hint:
1931 1927 self.ui.write_err(_("(%s)\n") % exc.hint)
1932 1928 return False
1933 1929 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1934 1930 ret = pushkey.push(self, namespace, key, old, new)
1935 1931 def runhook():
1936 1932 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1937 1933 ret=ret)
1938 1934 self._afterlock(runhook)
1939 1935 return ret
1940 1936
1941 1937 def listkeys(self, namespace):
1942 1938 self.hook('prelistkeys', throw=True, namespace=namespace)
1943 1939 self.ui.debug('listing keys for "%s"\n' % namespace)
1944 1940 values = pushkey.list(self, namespace)
1945 1941 self.hook('listkeys', namespace=namespace, values=values)
1946 1942 return values
1947 1943
1948 1944 def debugwireargs(self, one, two, three=None, four=None, five=None):
1949 1945 '''used to test argument passing over the wire'''
1950 1946 return "%s %s %s %s %s" % (one, two, three, four, five)
1951 1947
1952 1948 def savecommitmessage(self, text):
1953 1949 fp = self.vfs('last-message.txt', 'wb')
1954 1950 try:
1955 1951 fp.write(text)
1956 1952 finally:
1957 1953 fp.close()
1958 1954 return self.pathto(fp.name[len(self.root) + 1:])
1959 1955
1960 1956 # used to avoid circular references so destructors work
1961 1957 def aftertrans(files):
1962 1958 renamefiles = [tuple(t) for t in files]
1963 1959 def a():
1964 1960 for vfs, src, dest in renamefiles:
1965 1961 try:
1966 1962 vfs.rename(src, dest)
1967 1963 except OSError: # journal file does not yet exist
1968 1964 pass
1969 1965 return a
1970 1966
1971 1967 def undoname(fn):
1972 1968 base, name = os.path.split(fn)
1973 1969 assert name.startswith('journal')
1974 1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1975 1971
1976 1972 def instance(ui, path, create):
1977 1973 return localrepository(ui, util.urllocalpath(path), create)
1978 1974
1979 1975 def islocal(path):
1980 1976 return True
1981 1977
1982 1978 def newreporequirements(repo):
1983 1979 """Determine the set of requirements for a new local repository.
1984 1980
1985 1981 Extensions can wrap this function to specify custom requirements for
1986 1982 new repositories.
1987 1983 """
1988 1984 ui = repo.ui
1989 1985 requirements = set(['revlogv1'])
1990 1986 if ui.configbool('format', 'usestore', True):
1991 1987 requirements.add('store')
1992 1988 if ui.configbool('format', 'usefncache', True):
1993 1989 requirements.add('fncache')
1994 1990 if ui.configbool('format', 'dotencode', True):
1995 1991 requirements.add('dotencode')
1996 1992
1997 1993 if scmutil.gdinitconfig(ui):
1998 1994 requirements.add('generaldelta')
1999 1995 if ui.configbool('experimental', 'treemanifest', False):
2000 1996 requirements.add('treemanifest')
2001 1997 if ui.configbool('experimental', 'manifestv2', False):
2002 1998 requirements.add('manifestv2')
2003 1999
2004 2000 return requirements
General Comments 0
You need to be logged in to leave comments. Login now