##// END OF EJS Templates
localrepo: improve vfs documentation...
Ryan McElroy -
r31536:48b9c9ca default
parent child Browse files
Show More
@@ -1,2085 +1,2087
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'))
118 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'))
252 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'))
254 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 # vfs to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs to access the content of the repository
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 # vfs to access the store part of the repository
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
268 270 self.svfs = None
269 271 self.root = self.wvfs.base
270 272 self.path = self.wvfs.join(".hg")
271 273 self.origroot = path
272 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
273 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
274 276 realfs=False)
275 277 self.vfs = vfsmod.vfs(self.path)
276 278 self.baseui = baseui
277 279 self.ui = baseui.copy()
278 280 self.ui.copy = baseui.copy # prevent copying repo configuration
279 281 # A list of callback to shape the phase if no data were found.
280 282 # Callback are in the form: func(repo, roots) --> processed root.
281 283 # This list it to be filled by extension during repo setup
282 284 self._phasedefaults = []
283 285 try:
284 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
285 287 self._loadextensions()
286 288 except IOError:
287 289 pass
288 290
289 291 if self.featuresetupfuncs:
290 292 self.supported = set(self._basesupported) # use private copy
291 293 extmods = set(m.__name__ for n, m
292 294 in extensions.extensions(self.ui))
293 295 for setupfunc in self.featuresetupfuncs:
294 296 if setupfunc.__module__ in extmods:
295 297 setupfunc(self.ui, self.supported)
296 298 else:
297 299 self.supported = self._basesupported
298 300 color.setup(self.ui)
299 301
300 302 # Add compression engines.
301 303 for name in util.compengines:
302 304 engine = util.compengines[name]
303 305 if engine.revlogheader():
304 306 self.supported.add('exp-compression-%s' % name)
305 307
306 308 if not self.vfs.isdir():
307 309 if create:
308 310 self.requirements = newreporequirements(self)
309 311
310 312 if not self.wvfs.exists():
311 313 self.wvfs.makedirs()
312 314 self.vfs.makedir(notindexed=True)
313 315
314 316 if 'store' in self.requirements:
315 317 self.vfs.mkdir("store")
316 318
317 319 # create an invalid changelog
318 320 self.vfs.append(
319 321 "00changelog.i",
320 322 '\0\0\0\2' # represents revlogv2
321 323 ' dummy changelog to prevent using the old repo layout'
322 324 )
323 325 else:
324 326 raise error.RepoError(_("repository %s not found") % path)
325 327 elif create:
326 328 raise error.RepoError(_("repository %s already exists") % path)
327 329 else:
328 330 try:
329 331 self.requirements = scmutil.readrequires(
330 332 self.vfs, self.supported)
331 333 except IOError as inst:
332 334 if inst.errno != errno.ENOENT:
333 335 raise
334 336
335 337 self.sharedpath = self.path
336 338 try:
337 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
338 340 if 'relshared' in self.requirements:
339 341 sharedpath = self.vfs.join(sharedpath)
340 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
341 343 s = vfs.base
342 344 if not vfs.exists():
343 345 raise error.RepoError(
344 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
345 347 self.sharedpath = s
346 348 except IOError as inst:
347 349 if inst.errno != errno.ENOENT:
348 350 raise
349 351
350 352 self.store = store.store(
351 353 self.requirements, self.sharedpath, vfsmod.vfs)
352 354 self.spath = self.store.path
353 355 self.svfs = self.store.vfs
354 356 self.sjoin = self.store.join
355 357 self.vfs.createmode = self.store.createmode
356 358 self._applyopenerreqs()
357 359 if create:
358 360 self._writerequirements()
359 361
360 362 self._dirstatevalidatewarned = False
361 363
362 364 self._branchcaches = {}
363 365 self._revbranchcache = None
364 366 self.filterpats = {}
365 367 self._datafilters = {}
366 368 self._transref = self._lockref = self._wlockref = None
367 369
368 370 # A cache for various files under .hg/ that tracks file changes,
369 371 # (used by the filecache decorator)
370 372 #
371 373 # Maps a property name to its util.filecacheentry
372 374 self._filecache = {}
373 375
374 376 # hold sets of revision to be filtered
375 377 # should be cleared when something might have changed the filter value:
376 378 # - new changesets,
377 379 # - phase change,
378 380 # - new obsolescence marker,
379 381 # - working directory parent change,
380 382 # - bookmark changes
381 383 self.filteredrevcache = {}
382 384
383 385 # generic mapping between names and nodes
384 386 self.names = namespaces.namespaces()
385 387
386 388 @property
387 389 def wopener(self):
388 390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
389 391 return self.wvfs
390 392
391 393 @property
392 394 def opener(self):
393 395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
394 396 return self.vfs
395 397
396 398 def close(self):
397 399 self._writecaches()
398 400
399 401 def _loadextensions(self):
400 402 extensions.loadall(self.ui)
401 403
402 404 def _writecaches(self):
403 405 if self._revbranchcache:
404 406 self._revbranchcache.write()
405 407
406 408 def _restrictcapabilities(self, caps):
407 409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
408 410 caps = set(caps)
409 411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
410 412 caps.add('bundle2=' + urlreq.quote(capsblob))
411 413 return caps
412 414
413 415 def _applyopenerreqs(self):
414 416 self.svfs.options = dict((r, 1) for r in self.requirements
415 417 if r in self.openerreqs)
416 418 # experimental config: format.chunkcachesize
417 419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
418 420 if chunkcachesize is not None:
419 421 self.svfs.options['chunkcachesize'] = chunkcachesize
420 422 # experimental config: format.maxchainlen
421 423 maxchainlen = self.ui.configint('format', 'maxchainlen')
422 424 if maxchainlen is not None:
423 425 self.svfs.options['maxchainlen'] = maxchainlen
424 426 # experimental config: format.manifestcachesize
425 427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
426 428 if manifestcachesize is not None:
427 429 self.svfs.options['manifestcachesize'] = manifestcachesize
428 430 # experimental config: format.aggressivemergedeltas
429 431 aggressivemergedeltas = self.ui.configbool('format',
430 432 'aggressivemergedeltas', False)
431 433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
432 434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
433 435
434 436 for r in self.requirements:
435 437 if r.startswith('exp-compression-'):
436 438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
437 439
438 440 def _writerequirements(self):
439 441 scmutil.writerequires(self.vfs, self.requirements)
440 442
441 443 def _checknested(self, path):
442 444 """Determine if path is a legal nested repository."""
443 445 if not path.startswith(self.root):
444 446 return False
445 447 subpath = path[len(self.root) + 1:]
446 448 normsubpath = util.pconvert(subpath)
447 449
448 450 # XXX: Checking against the current working copy is wrong in
449 451 # the sense that it can reject things like
450 452 #
451 453 # $ hg cat -r 10 sub/x.txt
452 454 #
453 455 # if sub/ is no longer a subrepository in the working copy
454 456 # parent revision.
455 457 #
456 458 # However, it can of course also allow things that would have
457 459 # been rejected before, such as the above cat command if sub/
458 460 # is a subrepository now, but was a normal directory before.
459 461 # The old path auditor would have rejected by mistake since it
460 462 # panics when it sees sub/.hg/.
461 463 #
462 464 # All in all, checking against the working copy seems sensible
463 465 # since we want to prevent access to nested repositories on
464 466 # the filesystem *now*.
465 467 ctx = self[None]
466 468 parts = util.splitpath(subpath)
467 469 while parts:
468 470 prefix = '/'.join(parts)
469 471 if prefix in ctx.substate:
470 472 if prefix == normsubpath:
471 473 return True
472 474 else:
473 475 sub = ctx.sub(prefix)
474 476 return sub.checknested(subpath[len(prefix) + 1:])
475 477 else:
476 478 parts.pop()
477 479 return False
478 480
479 481 def peer(self):
480 482 return localpeer(self) # not cached to avoid reference cycle
481 483
482 484 def unfiltered(self):
483 485 """Return unfiltered version of the repository
484 486
485 487 Intended to be overwritten by filtered repo."""
486 488 return self
487 489
488 490 def filtered(self, name):
489 491 """Return a filtered version of a repository"""
490 492 # build a new class with the mixin and the current class
491 493 # (possibly subclass of the repo)
492 494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
493 495 pass
494 496 return filteredrepo(self, name)
495 497
496 498 @repofilecache('bookmarks', 'bookmarks.current')
497 499 def _bookmarks(self):
498 500 return bookmarks.bmstore(self)
499 501
500 502 @property
501 503 def _activebookmark(self):
502 504 return self._bookmarks.active
503 505
504 506 def bookmarkheads(self, bookmark):
505 507 name = bookmark.split('@', 1)[0]
506 508 heads = []
507 509 for mark, n in self._bookmarks.iteritems():
508 510 if mark.split('@', 1)[0] == name:
509 511 heads.append(n)
510 512 return heads
511 513
512 514 # _phaserevs and _phasesets depend on changelog. what we need is to
513 515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
514 516 # can't be easily expressed in filecache mechanism.
515 517 @storecache('phaseroots', '00changelog.i')
516 518 def _phasecache(self):
517 519 return phases.phasecache(self, self._phasedefaults)
518 520
519 521 @storecache('obsstore')
520 522 def obsstore(self):
521 523 # read default format for new obsstore.
522 524 # developer config: format.obsstore-version
523 525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
524 526 # rely on obsstore class default when possible.
525 527 kwargs = {}
526 528 if defaultformat is not None:
527 529 kwargs['defaultformat'] = defaultformat
528 530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
529 531 store = obsolete.obsstore(self.svfs, readonly=readonly,
530 532 **kwargs)
531 533 if store and readonly:
532 534 self.ui.warn(
533 535 _('obsolete feature not enabled but %i markers found!\n')
534 536 % len(list(store)))
535 537 return store
536 538
537 539 @storecache('00changelog.i')
538 540 def changelog(self):
539 541 c = changelog.changelog(self.svfs)
540 542 if txnutil.mayhavepending(self.root):
541 543 c.readpending('00changelog.i.a')
542 544 return c
543 545
544 546 def _constructmanifest(self):
545 547 # This is a temporary function while we migrate from manifest to
546 548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
547 549 # manifest creation.
548 550 return manifest.manifestrevlog(self.svfs)
549 551
550 552 @storecache('00manifest.i')
551 553 def manifestlog(self):
552 554 return manifest.manifestlog(self.svfs, self)
553 555
554 556 @repofilecache('dirstate')
555 557 def dirstate(self):
556 558 return dirstate.dirstate(self.vfs, self.ui, self.root,
557 559 self._dirstatevalidate)
558 560
559 561 def _dirstatevalidate(self, node):
560 562 try:
561 563 self.changelog.rev(node)
562 564 return node
563 565 except error.LookupError:
564 566 if not self._dirstatevalidatewarned:
565 567 self._dirstatevalidatewarned = True
566 568 self.ui.warn(_("warning: ignoring unknown"
567 569 " working parent %s!\n") % short(node))
568 570 return nullid
569 571
570 572 def __getitem__(self, changeid):
571 573 if changeid is None or changeid == wdirrev:
572 574 return context.workingctx(self)
573 575 if isinstance(changeid, slice):
574 576 return [context.changectx(self, i)
575 577 for i in xrange(*changeid.indices(len(self)))
576 578 if i not in self.changelog.filteredrevs]
577 579 return context.changectx(self, changeid)
578 580
579 581 def __contains__(self, changeid):
580 582 try:
581 583 self[changeid]
582 584 return True
583 585 except error.RepoLookupError:
584 586 return False
585 587
586 588 def __nonzero__(self):
587 589 return True
588 590
589 591 __bool__ = __nonzero__
590 592
591 593 def __len__(self):
592 594 return len(self.changelog)
593 595
594 596 def __iter__(self):
595 597 return iter(self.changelog)
596 598
597 599 def revs(self, expr, *args):
598 600 '''Find revisions matching a revset.
599 601
600 602 The revset is specified as a string ``expr`` that may contain
601 603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
602 604
603 605 Revset aliases from the configuration are not expanded. To expand
604 606 user aliases, consider calling ``scmutil.revrange()`` or
605 607 ``repo.anyrevs([expr], user=True)``.
606 608
607 609 Returns a revset.abstractsmartset, which is a list-like interface
608 610 that contains integer revisions.
609 611 '''
610 612 expr = revsetlang.formatspec(expr, *args)
611 613 m = revset.match(None, expr)
612 614 return m(self)
613 615
614 616 def set(self, expr, *args):
615 617 '''Find revisions matching a revset and emit changectx instances.
616 618
617 619 This is a convenience wrapper around ``revs()`` that iterates the
618 620 result and is a generator of changectx instances.
619 621
620 622 Revset aliases from the configuration are not expanded. To expand
621 623 user aliases, consider calling ``scmutil.revrange()``.
622 624 '''
623 625 for r in self.revs(expr, *args):
624 626 yield self[r]
625 627
626 628 def anyrevs(self, specs, user=False):
627 629 '''Find revisions matching one of the given revsets.
628 630
629 631 Revset aliases from the configuration are not expanded by default. To
630 632 expand user aliases, specify ``user=True``.
631 633 '''
632 634 if user:
633 635 m = revset.matchany(self.ui, specs, repo=self)
634 636 else:
635 637 m = revset.matchany(None, specs)
636 638 return m(self)
637 639
638 640 def url(self):
639 641 return 'file:' + self.root
640 642
641 643 def hook(self, name, throw=False, **args):
642 644 """Call a hook, passing this repo instance.
643 645
644 646 This a convenience method to aid invoking hooks. Extensions likely
645 647 won't call this unless they have registered a custom hook or are
646 648 replacing code that is expected to call a hook.
647 649 """
648 650 return hook.hook(self.ui, self, name, throw, **args)
649 651
650 652 @unfilteredmethod
651 653 def _tag(self, names, node, message, local, user, date, extra=None,
652 654 editor=False):
653 655 if isinstance(names, str):
654 656 names = (names,)
655 657
656 658 branches = self.branchmap()
657 659 for name in names:
658 660 self.hook('pretag', throw=True, node=hex(node), tag=name,
659 661 local=local)
660 662 if name in branches:
661 663 self.ui.warn(_("warning: tag %s conflicts with existing"
662 664 " branch name\n") % name)
663 665
664 666 def writetags(fp, names, munge, prevtags):
665 667 fp.seek(0, 2)
666 668 if prevtags and prevtags[-1] != '\n':
667 669 fp.write('\n')
668 670 for name in names:
669 671 if munge:
670 672 m = munge(name)
671 673 else:
672 674 m = name
673 675
674 676 if (self._tagscache.tagtypes and
675 677 name in self._tagscache.tagtypes):
676 678 old = self.tags().get(name, nullid)
677 679 fp.write('%s %s\n' % (hex(old), m))
678 680 fp.write('%s %s\n' % (hex(node), m))
679 681 fp.close()
680 682
681 683 prevtags = ''
682 684 if local:
683 685 try:
684 686 fp = self.vfs('localtags', 'r+')
685 687 except IOError:
686 688 fp = self.vfs('localtags', 'a')
687 689 else:
688 690 prevtags = fp.read()
689 691
690 692 # local tags are stored in the current charset
691 693 writetags(fp, names, None, prevtags)
692 694 for name in names:
693 695 self.hook('tag', node=hex(node), tag=name, local=local)
694 696 return
695 697
696 698 try:
697 699 fp = self.wvfs('.hgtags', 'rb+')
698 700 except IOError as e:
699 701 if e.errno != errno.ENOENT:
700 702 raise
701 703 fp = self.wvfs('.hgtags', 'ab')
702 704 else:
703 705 prevtags = fp.read()
704 706
705 707 # committed tags are stored in UTF-8
706 708 writetags(fp, names, encoding.fromlocal, prevtags)
707 709
708 710 fp.close()
709 711
710 712 self.invalidatecaches()
711 713
712 714 if '.hgtags' not in self.dirstate:
713 715 self[None].add(['.hgtags'])
714 716
715 717 m = matchmod.exact(self.root, '', ['.hgtags'])
716 718 tagnode = self.commit(message, user, date, extra=extra, match=m,
717 719 editor=editor)
718 720
719 721 for name in names:
720 722 self.hook('tag', node=hex(node), tag=name, local=local)
721 723
722 724 return tagnode
723 725
724 726 def tag(self, names, node, message, local, user, date, editor=False):
725 727 '''tag a revision with one or more symbolic names.
726 728
727 729 names is a list of strings or, when adding a single tag, names may be a
728 730 string.
729 731
730 732 if local is True, the tags are stored in a per-repository file.
731 733 otherwise, they are stored in the .hgtags file, and a new
732 734 changeset is committed with the change.
733 735
734 736 keyword arguments:
735 737
736 738 local: whether to store tags in non-version-controlled file
737 739 (default False)
738 740
739 741 message: commit message to use if committing
740 742
741 743 user: name of user to use if committing
742 744
743 745 date: date tuple to use if committing'''
744 746
745 747 if not local:
746 748 m = matchmod.exact(self.root, '', ['.hgtags'])
747 749 if any(self.status(match=m, unknown=True, ignored=True)):
748 750 raise error.Abort(_('working copy of .hgtags is changed'),
749 751 hint=_('please commit .hgtags manually'))
750 752
751 753 self.tags() # instantiate the cache
752 754 self._tag(names, node, message, local, user, date, editor=editor)
753 755
754 756 @filteredpropertycache
755 757 def _tagscache(self):
756 758 '''Returns a tagscache object that contains various tags related
757 759 caches.'''
758 760
759 761 # This simplifies its cache management by having one decorated
760 762 # function (this one) and the rest simply fetch things from it.
761 763 class tagscache(object):
762 764 def __init__(self):
763 765 # These two define the set of tags for this repository. tags
764 766 # maps tag name to node; tagtypes maps tag name to 'global' or
765 767 # 'local'. (Global tags are defined by .hgtags across all
766 768 # heads, and local tags are defined in .hg/localtags.)
767 769 # They constitute the in-memory cache of tags.
768 770 self.tags = self.tagtypes = None
769 771
770 772 self.nodetagscache = self.tagslist = None
771 773
772 774 cache = tagscache()
773 775 cache.tags, cache.tagtypes = self._findtags()
774 776
775 777 return cache
776 778
777 779 def tags(self):
778 780 '''return a mapping of tag to node'''
779 781 t = {}
780 782 if self.changelog.filteredrevs:
781 783 tags, tt = self._findtags()
782 784 else:
783 785 tags = self._tagscache.tags
784 786 for k, v in tags.iteritems():
785 787 try:
786 788 # ignore tags to unknown nodes
787 789 self.changelog.rev(v)
788 790 t[k] = v
789 791 except (error.LookupError, ValueError):
790 792 pass
791 793 return t
792 794
793 795 def _findtags(self):
794 796 '''Do the hard work of finding tags. Return a pair of dicts
795 797 (tags, tagtypes) where tags maps tag name to node, and tagtypes
796 798 maps tag name to a string like \'global\' or \'local\'.
797 799 Subclasses or extensions are free to add their own tags, but
798 800 should be aware that the returned dicts will be retained for the
799 801 duration of the localrepo object.'''
800 802
801 803 # XXX what tagtype should subclasses/extensions use? Currently
802 804 # mq and bookmarks add tags, but do not set the tagtype at all.
803 805 # Should each extension invent its own tag type? Should there
804 806 # be one tagtype for all such "virtual" tags? Or is the status
805 807 # quo fine?
806 808
807 809 alltags = {} # map tag name to (node, hist)
808 810 tagtypes = {}
809 811
810 812 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
811 813 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
812 814
813 815 # Build the return dicts. Have to re-encode tag names because
814 816 # the tags module always uses UTF-8 (in order not to lose info
815 817 # writing to the cache), but the rest of Mercurial wants them in
816 818 # local encoding.
817 819 tags = {}
818 820 for (name, (node, hist)) in alltags.iteritems():
819 821 if node != nullid:
820 822 tags[encoding.tolocal(name)] = node
821 823 tags['tip'] = self.changelog.tip()
822 824 tagtypes = dict([(encoding.tolocal(name), value)
823 825 for (name, value) in tagtypes.iteritems()])
824 826 return (tags, tagtypes)
825 827
826 828 def tagtype(self, tagname):
827 829 '''
828 830 return the type of the given tag. result can be:
829 831
830 832 'local' : a local tag
831 833 'global' : a global tag
832 834 None : tag does not exist
833 835 '''
834 836
835 837 return self._tagscache.tagtypes.get(tagname)
836 838
837 839 def tagslist(self):
838 840 '''return a list of tags ordered by revision'''
839 841 if not self._tagscache.tagslist:
840 842 l = []
841 843 for t, n in self.tags().iteritems():
842 844 l.append((self.changelog.rev(n), t, n))
843 845 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
844 846
845 847 return self._tagscache.tagslist
846 848
847 849 def nodetags(self, node):
848 850 '''return the tags associated with a node'''
849 851 if not self._tagscache.nodetagscache:
850 852 nodetagscache = {}
851 853 for t, n in self._tagscache.tags.iteritems():
852 854 nodetagscache.setdefault(n, []).append(t)
853 855 for tags in nodetagscache.itervalues():
854 856 tags.sort()
855 857 self._tagscache.nodetagscache = nodetagscache
856 858 return self._tagscache.nodetagscache.get(node, [])
857 859
858 860 def nodebookmarks(self, node):
859 861 """return the list of bookmarks pointing to the specified node"""
860 862 marks = []
861 863 for bookmark, n in self._bookmarks.iteritems():
862 864 if n == node:
863 865 marks.append(bookmark)
864 866 return sorted(marks)
865 867
866 868 def branchmap(self):
867 869 '''returns a dictionary {branch: [branchheads]} with branchheads
868 870 ordered by increasing revision number'''
869 871 branchmap.updatecache(self)
870 872 return self._branchcaches[self.filtername]
871 873
872 874 @unfilteredmethod
873 875 def revbranchcache(self):
874 876 if not self._revbranchcache:
875 877 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
876 878 return self._revbranchcache
877 879
878 880 def branchtip(self, branch, ignoremissing=False):
879 881 '''return the tip node for a given branch
880 882
881 883 If ignoremissing is True, then this method will not raise an error.
882 884 This is helpful for callers that only expect None for a missing branch
883 885 (e.g. namespace).
884 886
885 887 '''
886 888 try:
887 889 return self.branchmap().branchtip(branch)
888 890 except KeyError:
889 891 if not ignoremissing:
890 892 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
891 893 else:
892 894 pass
893 895
894 896 def lookup(self, key):
895 897 return self[key].node()
896 898
897 899 def lookupbranch(self, key, remote=None):
898 900 repo = remote or self
899 901 if key in repo.branchmap():
900 902 return key
901 903
902 904 repo = (remote and remote.local()) and remote or self
903 905 return repo[key].branch()
904 906
905 907 def known(self, nodes):
906 908 cl = self.changelog
907 909 nm = cl.nodemap
908 910 filtered = cl.filteredrevs
909 911 result = []
910 912 for n in nodes:
911 913 r = nm.get(n)
912 914 resp = not (r is None or r in filtered)
913 915 result.append(resp)
914 916 return result
915 917
916 918 def local(self):
917 919 return self
918 920
919 921 def publishing(self):
920 922 # it's safe (and desirable) to trust the publish flag unconditionally
921 923 # so that we don't finalize changes shared between users via ssh or nfs
922 924 return self.ui.configbool('phases', 'publish', True, untrusted=True)
923 925
924 926 def cancopy(self):
925 927 # so statichttprepo's override of local() works
926 928 if not self.local():
927 929 return False
928 930 if not self.publishing():
929 931 return True
930 932 # if publishing we can't copy if there is filtered content
931 933 return not self.filtered('visible').changelog.filteredrevs
932 934
933 935 def shared(self):
934 936 '''the type of shared repository (None if not shared)'''
935 937 if self.sharedpath != self.path:
936 938 return 'store'
937 939 return None
938 940
939 941 def join(self, f, *insidef):
940 942 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
941 943 return self.vfs.join(os.path.join(f, *insidef))
942 944
943 945 def wjoin(self, f, *insidef):
944 946 return self.vfs.reljoin(self.root, f, *insidef)
945 947
946 948 def file(self, f):
947 949 if f[0] == '/':
948 950 f = f[1:]
949 951 return filelog.filelog(self.svfs, f)
950 952
951 953 def changectx(self, changeid):
952 954 return self[changeid]
953 955
954 956 def setparents(self, p1, p2=nullid):
955 957 self.dirstate.beginparentchange()
956 958 copies = self.dirstate.setparents(p1, p2)
957 959 pctx = self[p1]
958 960 if copies:
959 961 # Adjust copy records, the dirstate cannot do it, it
960 962 # requires access to parents manifests. Preserve them
961 963 # only for entries added to first parent.
962 964 for f in copies:
963 965 if f not in pctx and copies[f] in pctx:
964 966 self.dirstate.copy(copies[f], f)
965 967 if p2 == nullid:
966 968 for f, s in sorted(self.dirstate.copies().items()):
967 969 if f not in pctx and s not in pctx:
968 970 self.dirstate.copy(None, f)
969 971 self.dirstate.endparentchange()
970 972
971 973 def filectx(self, path, changeid=None, fileid=None):
972 974 """changeid can be a changeset revision, node, or tag.
973 975 fileid can be a file revision or node."""
974 976 return context.filectx(self, path, changeid, fileid)
975 977
976 978 def getcwd(self):
977 979 return self.dirstate.getcwd()
978 980
979 981 def pathto(self, f, cwd=None):
980 982 return self.dirstate.pathto(f, cwd)
981 983
982 984 def wfile(self, f, mode='r'):
983 985 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
984 986 return self.wvfs(f, mode)
985 987
986 988 def _link(self, f):
987 989 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
988 990 '4.0')
989 991 return self.wvfs.islink(f)
990 992
991 993 def _loadfilter(self, filter):
992 994 if filter not in self.filterpats:
993 995 l = []
994 996 for pat, cmd in self.ui.configitems(filter):
995 997 if cmd == '!':
996 998 continue
997 999 mf = matchmod.match(self.root, '', [pat])
998 1000 fn = None
999 1001 params = cmd
1000 1002 for name, filterfn in self._datafilters.iteritems():
1001 1003 if cmd.startswith(name):
1002 1004 fn = filterfn
1003 1005 params = cmd[len(name):].lstrip()
1004 1006 break
1005 1007 if not fn:
1006 1008 fn = lambda s, c, **kwargs: util.filter(s, c)
1007 1009 # Wrap old filters not supporting keyword arguments
1008 1010 if not inspect.getargspec(fn)[2]:
1009 1011 oldfn = fn
1010 1012 fn = lambda s, c, **kwargs: oldfn(s, c)
1011 1013 l.append((mf, fn, params))
1012 1014 self.filterpats[filter] = l
1013 1015 return self.filterpats[filter]
1014 1016
1015 1017 def _filter(self, filterpats, filename, data):
1016 1018 for mf, fn, cmd in filterpats:
1017 1019 if mf(filename):
1018 1020 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1019 1021 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1020 1022 break
1021 1023
1022 1024 return data
1023 1025
1024 1026 @unfilteredpropertycache
1025 1027 def _encodefilterpats(self):
1026 1028 return self._loadfilter('encode')
1027 1029
1028 1030 @unfilteredpropertycache
1029 1031 def _decodefilterpats(self):
1030 1032 return self._loadfilter('decode')
1031 1033
1032 1034 def adddatafilter(self, name, filter):
1033 1035 self._datafilters[name] = filter
1034 1036
1035 1037 def wread(self, filename):
1036 1038 if self.wvfs.islink(filename):
1037 1039 data = self.wvfs.readlink(filename)
1038 1040 else:
1039 1041 data = self.wvfs.read(filename)
1040 1042 return self._filter(self._encodefilterpats, filename, data)
1041 1043
1042 1044 def wwrite(self, filename, data, flags, backgroundclose=False):
1043 1045 """write ``data`` into ``filename`` in the working directory
1044 1046
1045 1047 This returns length of written (maybe decoded) data.
1046 1048 """
1047 1049 data = self._filter(self._decodefilterpats, filename, data)
1048 1050 if 'l' in flags:
1049 1051 self.wvfs.symlink(data, filename)
1050 1052 else:
1051 1053 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1052 1054 if 'x' in flags:
1053 1055 self.wvfs.setflags(filename, False, True)
1054 1056 return len(data)
1055 1057
1056 1058 def wwritedata(self, filename, data):
1057 1059 return self._filter(self._decodefilterpats, filename, data)
1058 1060
1059 1061 def currenttransaction(self):
1060 1062 """return the current transaction or None if non exists"""
1061 1063 if self._transref:
1062 1064 tr = self._transref()
1063 1065 else:
1064 1066 tr = None
1065 1067
1066 1068 if tr and tr.running():
1067 1069 return tr
1068 1070 return None
1069 1071
1070 1072 def transaction(self, desc, report=None):
1071 1073 if (self.ui.configbool('devel', 'all-warnings')
1072 1074 or self.ui.configbool('devel', 'check-locks')):
1073 1075 if self._currentlock(self._lockref) is None:
1074 1076 raise error.ProgrammingError('transaction requires locking')
1075 1077 tr = self.currenttransaction()
1076 1078 if tr is not None:
1077 1079 return tr.nest()
1078 1080
1079 1081 # abort here if the journal already exists
1080 1082 if self.svfs.exists("journal"):
1081 1083 raise error.RepoError(
1082 1084 _("abandoned transaction found"),
1083 1085 hint=_("run 'hg recover' to clean up transaction"))
1084 1086
1085 1087 idbase = "%.40f#%f" % (random.random(), time.time())
1086 1088 ha = hex(hashlib.sha1(idbase).digest())
1087 1089 txnid = 'TXN:' + ha
1088 1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1089 1091
1090 1092 self._writejournal(desc)
1091 1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1092 1094 if report:
1093 1095 rp = report
1094 1096 else:
1095 1097 rp = self.ui.warn
1096 1098 vfsmap = {'plain': self.vfs} # root of .hg/
1097 1099 # we must avoid cyclic reference between repo and transaction.
1098 1100 reporef = weakref.ref(self)
1099 1101 def validate(tr):
1100 1102 """will run pre-closing hooks"""
1101 1103 reporef().hook('pretxnclose', throw=True,
1102 1104 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1103 1105 def releasefn(tr, success):
1104 1106 repo = reporef()
1105 1107 if success:
1106 1108 # this should be explicitly invoked here, because
1107 1109 # in-memory changes aren't written out at closing
1108 1110 # transaction, if tr.addfilegenerator (via
1109 1111 # dirstate.write or so) isn't invoked while
1110 1112 # transaction running
1111 1113 repo.dirstate.write(None)
1112 1114 else:
1113 1115 # discard all changes (including ones already written
1114 1116 # out) in this transaction
1115 1117 repo.dirstate.restorebackup(None, prefix='journal.')
1116 1118
1117 1119 repo.invalidate(clearfilecache=True)
1118 1120
1119 1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1120 1122 "journal",
1121 1123 "undo",
1122 1124 aftertrans(renames),
1123 1125 self.store.createmode,
1124 1126 validator=validate,
1125 1127 releasefn=releasefn)
1126 1128
1127 1129 tr.hookargs['txnid'] = txnid
1128 1130 # note: writing the fncache only during finalize mean that the file is
1129 1131 # outdated when running hooks. As fncache is used for streaming clone,
1130 1132 # this is not expected to break anything that happen during the hooks.
1131 1133 tr.addfinalize('flush-fncache', self.store.write)
1132 1134 def txnclosehook(tr2):
1133 1135 """To be run if transaction is successful, will schedule a hook run
1134 1136 """
1135 1137 # Don't reference tr2 in hook() so we don't hold a reference.
1136 1138 # This reduces memory consumption when there are multiple
1137 1139 # transactions per lock. This can likely go away if issue5045
1138 1140 # fixes the function accumulation.
1139 1141 hookargs = tr2.hookargs
1140 1142
1141 1143 def hook():
1142 1144 reporef().hook('txnclose', throw=False, txnname=desc,
1143 1145 **pycompat.strkwargs(hookargs))
1144 1146 reporef()._afterlock(hook)
1145 1147 tr.addfinalize('txnclose-hook', txnclosehook)
1146 1148 def txnaborthook(tr2):
1147 1149 """To be run if transaction is aborted
1148 1150 """
1149 1151 reporef().hook('txnabort', throw=False, txnname=desc,
1150 1152 **tr2.hookargs)
1151 1153 tr.addabort('txnabort-hook', txnaborthook)
1152 1154 # avoid eager cache invalidation. in-memory data should be identical
1153 1155 # to stored data if transaction has no error.
1154 1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1155 1157 self._transref = weakref.ref(tr)
1156 1158 return tr
1157 1159
1158 1160 def _journalfiles(self):
1159 1161 return ((self.svfs, 'journal'),
1160 1162 (self.vfs, 'journal.dirstate'),
1161 1163 (self.vfs, 'journal.branch'),
1162 1164 (self.vfs, 'journal.desc'),
1163 1165 (self.vfs, 'journal.bookmarks'),
1164 1166 (self.svfs, 'journal.phaseroots'))
1165 1167
1166 1168 def undofiles(self):
1167 1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1168 1170
1169 1171 def _writejournal(self, desc):
1170 1172 self.dirstate.savebackup(None, prefix='journal.')
1171 1173 self.vfs.write("journal.branch",
1172 1174 encoding.fromlocal(self.dirstate.branch()))
1173 1175 self.vfs.write("journal.desc",
1174 1176 "%d\n%s\n" % (len(self), desc))
1175 1177 self.vfs.write("journal.bookmarks",
1176 1178 self.vfs.tryread("bookmarks"))
1177 1179 self.svfs.write("journal.phaseroots",
1178 1180 self.svfs.tryread("phaseroots"))
1179 1181
1180 1182 def recover(self):
1181 1183 with self.lock():
1182 1184 if self.svfs.exists("journal"):
1183 1185 self.ui.status(_("rolling back interrupted transaction\n"))
1184 1186 vfsmap = {'': self.svfs,
1185 1187 'plain': self.vfs,}
1186 1188 transaction.rollback(self.svfs, vfsmap, "journal",
1187 1189 self.ui.warn)
1188 1190 self.invalidate()
1189 1191 return True
1190 1192 else:
1191 1193 self.ui.warn(_("no interrupted transaction available\n"))
1192 1194 return False
1193 1195
1194 1196 def rollback(self, dryrun=False, force=False):
1195 1197 wlock = lock = dsguard = None
1196 1198 try:
1197 1199 wlock = self.wlock()
1198 1200 lock = self.lock()
1199 1201 if self.svfs.exists("undo"):
1200 1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1201 1203
1202 1204 return self._rollback(dryrun, force, dsguard)
1203 1205 else:
1204 1206 self.ui.warn(_("no rollback information available\n"))
1205 1207 return 1
1206 1208 finally:
1207 1209 release(dsguard, lock, wlock)
1208 1210
1209 1211 @unfilteredmethod # Until we get smarter cache management
1210 1212 def _rollback(self, dryrun, force, dsguard):
1211 1213 ui = self.ui
1212 1214 try:
1213 1215 args = self.vfs.read('undo.desc').splitlines()
1214 1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1215 1217 if len(args) >= 3:
1216 1218 detail = args[2]
1217 1219 oldtip = oldlen - 1
1218 1220
1219 1221 if detail and ui.verbose:
1220 1222 msg = (_('repository tip rolled back to revision %s'
1221 1223 ' (undo %s: %s)\n')
1222 1224 % (oldtip, desc, detail))
1223 1225 else:
1224 1226 msg = (_('repository tip rolled back to revision %s'
1225 1227 ' (undo %s)\n')
1226 1228 % (oldtip, desc))
1227 1229 except IOError:
1228 1230 msg = _('rolling back unknown transaction\n')
1229 1231 desc = None
1230 1232
1231 1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1232 1234 raise error.Abort(
1233 1235 _('rollback of last commit while not checked out '
1234 1236 'may lose data'), hint=_('use -f to force'))
1235 1237
1236 1238 ui.status(msg)
1237 1239 if dryrun:
1238 1240 return 0
1239 1241
1240 1242 parents = self.dirstate.parents()
1241 1243 self.destroying()
1242 1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1243 1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1244 1246 if self.vfs.exists('undo.bookmarks'):
1245 1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1246 1248 if self.svfs.exists('undo.phaseroots'):
1247 1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1248 1250 self.invalidate()
1249 1251
1250 1252 parentgone = (parents[0] not in self.changelog.nodemap or
1251 1253 parents[1] not in self.changelog.nodemap)
1252 1254 if parentgone:
1253 1255 # prevent dirstateguard from overwriting already restored one
1254 1256 dsguard.close()
1255 1257
1256 1258 self.dirstate.restorebackup(None, prefix='undo.')
1257 1259 try:
1258 1260 branch = self.vfs.read('undo.branch')
1259 1261 self.dirstate.setbranch(encoding.tolocal(branch))
1260 1262 except IOError:
1261 1263 ui.warn(_('named branch could not be reset: '
1262 1264 'current branch is still \'%s\'\n')
1263 1265 % self.dirstate.branch())
1264 1266
1265 1267 parents = tuple([p.rev() for p in self[None].parents()])
1266 1268 if len(parents) > 1:
1267 1269 ui.status(_('working directory now based on '
1268 1270 'revisions %d and %d\n') % parents)
1269 1271 else:
1270 1272 ui.status(_('working directory now based on '
1271 1273 'revision %d\n') % parents)
1272 1274 mergemod.mergestate.clean(self, self['.'].node())
1273 1275
1274 1276 # TODO: if we know which new heads may result from this rollback, pass
1275 1277 # them to destroy(), which will prevent the branchhead cache from being
1276 1278 # invalidated.
1277 1279 self.destroyed()
1278 1280 return 0
1279 1281
1280 1282 def invalidatecaches(self):
1281 1283
1282 1284 if '_tagscache' in vars(self):
1283 1285 # can't use delattr on proxy
1284 1286 del self.__dict__['_tagscache']
1285 1287
1286 1288 self.unfiltered()._branchcaches.clear()
1287 1289 self.invalidatevolatilesets()
1288 1290
1289 1291 def invalidatevolatilesets(self):
1290 1292 self.filteredrevcache.clear()
1291 1293 obsolete.clearobscaches(self)
1292 1294
1293 1295 def invalidatedirstate(self):
1294 1296 '''Invalidates the dirstate, causing the next call to dirstate
1295 1297 to check if it was modified since the last time it was read,
1296 1298 rereading it if it has.
1297 1299
1298 1300 This is different to dirstate.invalidate() that it doesn't always
1299 1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1300 1302 explicitly read the dirstate again (i.e. restoring it to a previous
1301 1303 known good state).'''
1302 1304 if hasunfilteredcache(self, 'dirstate'):
1303 1305 for k in self.dirstate._filecache:
1304 1306 try:
1305 1307 delattr(self.dirstate, k)
1306 1308 except AttributeError:
1307 1309 pass
1308 1310 delattr(self.unfiltered(), 'dirstate')
1309 1311
1310 1312 def invalidate(self, clearfilecache=False):
1311 1313 '''Invalidates both store and non-store parts other than dirstate
1312 1314
1313 1315 If a transaction is running, invalidation of store is omitted,
1314 1316 because discarding in-memory changes might cause inconsistency
1315 1317 (e.g. incomplete fncache causes unintentional failure, but
1316 1318 redundant one doesn't).
1317 1319 '''
1318 1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1319 1321 for k in list(self._filecache.keys()):
1320 1322 # dirstate is invalidated separately in invalidatedirstate()
1321 1323 if k == 'dirstate':
1322 1324 continue
1323 1325
1324 1326 if clearfilecache:
1325 1327 del self._filecache[k]
1326 1328 try:
1327 1329 delattr(unfiltered, k)
1328 1330 except AttributeError:
1329 1331 pass
1330 1332 self.invalidatecaches()
1331 1333 if not self.currenttransaction():
1332 1334 # TODO: Changing contents of store outside transaction
1333 1335 # causes inconsistency. We should make in-memory store
1334 1336 # changes detectable, and abort if changed.
1335 1337 self.store.invalidatecaches()
1336 1338
1337 1339 def invalidateall(self):
1338 1340 '''Fully invalidates both store and non-store parts, causing the
1339 1341 subsequent operation to reread any outside changes.'''
1340 1342 # extension should hook this to invalidate its caches
1341 1343 self.invalidate()
1342 1344 self.invalidatedirstate()
1343 1345
1344 1346 @unfilteredmethod
1345 1347 def _refreshfilecachestats(self, tr):
1346 1348 """Reload stats of cached files so that they are flagged as valid"""
1347 1349 for k, ce in self._filecache.items():
1348 1350 if k == 'dirstate' or k not in self.__dict__:
1349 1351 continue
1350 1352 ce.refresh()
1351 1353
1352 1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1353 1355 inheritchecker=None, parentenvvar=None):
1354 1356 parentlock = None
1355 1357 # the contents of parentenvvar are used by the underlying lock to
1356 1358 # determine whether it can be inherited
1357 1359 if parentenvvar is not None:
1358 1360 parentlock = encoding.environ.get(parentenvvar)
1359 1361 try:
1360 1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1361 1363 acquirefn=acquirefn, desc=desc,
1362 1364 inheritchecker=inheritchecker,
1363 1365 parentlock=parentlock)
1364 1366 except error.LockHeld as inst:
1365 1367 if not wait:
1366 1368 raise
1367 1369 # show more details for new-style locks
1368 1370 if ':' in inst.locker:
1369 1371 host, pid = inst.locker.split(":", 1)
1370 1372 self.ui.warn(
1371 1373 _("waiting for lock on %s held by process %r "
1372 1374 "on host %r\n") % (desc, pid, host))
1373 1375 else:
1374 1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1375 1377 (desc, inst.locker))
1376 1378 # default to 600 seconds timeout
1377 1379 l = lockmod.lock(vfs, lockname,
1378 1380 int(self.ui.config("ui", "timeout", "600")),
1379 1381 releasefn=releasefn, acquirefn=acquirefn,
1380 1382 desc=desc)
1381 1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1382 1384 return l
1383 1385
1384 1386 def _afterlock(self, callback):
1385 1387 """add a callback to be run when the repository is fully unlocked
1386 1388
1387 1389 The callback will be executed when the outermost lock is released
1388 1390 (with wlock being higher level than 'lock')."""
1389 1391 for ref in (self._wlockref, self._lockref):
1390 1392 l = ref and ref()
1391 1393 if l and l.held:
1392 1394 l.postrelease.append(callback)
1393 1395 break
1394 1396 else: # no lock have been found.
1395 1397 callback()
1396 1398
1397 1399 def lock(self, wait=True):
1398 1400 '''Lock the repository store (.hg/store) and return a weak reference
1399 1401 to the lock. Use this before modifying the store (e.g. committing or
1400 1402 stripping). If you are opening a transaction, get a lock as well.)
1401 1403
1402 1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1403 1405 'wlock' first to avoid a dead-lock hazard.'''
1404 1406 l = self._currentlock(self._lockref)
1405 1407 if l is not None:
1406 1408 l.lock()
1407 1409 return l
1408 1410
1409 1411 l = self._lock(self.svfs, "lock", wait, None,
1410 1412 self.invalidate, _('repository %s') % self.origroot)
1411 1413 self._lockref = weakref.ref(l)
1412 1414 return l
1413 1415
1414 1416 def _wlockchecktransaction(self):
1415 1417 if self.currenttransaction() is not None:
1416 1418 raise error.LockInheritanceContractViolation(
1417 1419 'wlock cannot be inherited in the middle of a transaction')
1418 1420
1419 1421 def wlock(self, wait=True):
1420 1422 '''Lock the non-store parts of the repository (everything under
1421 1423 .hg except .hg/store) and return a weak reference to the lock.
1422 1424
1423 1425 Use this before modifying files in .hg.
1424 1426
1425 1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1426 1428 'wlock' first to avoid a dead-lock hazard.'''
1427 1429 l = self._wlockref and self._wlockref()
1428 1430 if l is not None and l.held:
1429 1431 l.lock()
1430 1432 return l
1431 1433
1432 1434 # We do not need to check for non-waiting lock acquisition. Such
1433 1435 # acquisition would not cause dead-lock as they would just fail.
1434 1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1435 1437 or self.ui.configbool('devel', 'check-locks')):
1436 1438 if self._currentlock(self._lockref) is not None:
1437 1439 self.ui.develwarn('"wlock" acquired after "lock"')
1438 1440
1439 1441 def unlock():
1440 1442 if self.dirstate.pendingparentchange():
1441 1443 self.dirstate.invalidate()
1442 1444 else:
1443 1445 self.dirstate.write(None)
1444 1446
1445 1447 self._filecache['dirstate'].refresh()
1446 1448
1447 1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1448 1450 self.invalidatedirstate, _('working directory of %s') %
1449 1451 self.origroot,
1450 1452 inheritchecker=self._wlockchecktransaction,
1451 1453 parentenvvar='HG_WLOCK_LOCKER')
1452 1454 self._wlockref = weakref.ref(l)
1453 1455 return l
1454 1456
1455 1457 def _currentlock(self, lockref):
1456 1458 """Returns the lock if it's held, or None if it's not."""
1457 1459 if lockref is None:
1458 1460 return None
1459 1461 l = lockref()
1460 1462 if l is None or not l.held:
1461 1463 return None
1462 1464 return l
1463 1465
1464 1466 def currentwlock(self):
1465 1467 """Returns the wlock if it's held, or None if it's not."""
1466 1468 return self._currentlock(self._wlockref)
1467 1469
1468 1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1469 1471 """
1470 1472 commit an individual file as part of a larger transaction
1471 1473 """
1472 1474
1473 1475 fname = fctx.path()
1474 1476 fparent1 = manifest1.get(fname, nullid)
1475 1477 fparent2 = manifest2.get(fname, nullid)
1476 1478 if isinstance(fctx, context.filectx):
1477 1479 node = fctx.filenode()
1478 1480 if node in [fparent1, fparent2]:
1479 1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1480 1482 if manifest1.flags(fname) != fctx.flags():
1481 1483 changelist.append(fname)
1482 1484 return node
1483 1485
1484 1486 flog = self.file(fname)
1485 1487 meta = {}
1486 1488 copy = fctx.renamed()
1487 1489 if copy and copy[0] != fname:
1488 1490 # Mark the new revision of this file as a copy of another
1489 1491 # file. This copy data will effectively act as a parent
1490 1492 # of this new revision. If this is a merge, the first
1491 1493 # parent will be the nullid (meaning "look up the copy data")
1492 1494 # and the second one will be the other parent. For example:
1493 1495 #
1494 1496 # 0 --- 1 --- 3 rev1 changes file foo
1495 1497 # \ / rev2 renames foo to bar and changes it
1496 1498 # \- 2 -/ rev3 should have bar with all changes and
1497 1499 # should record that bar descends from
1498 1500 # bar in rev2 and foo in rev1
1499 1501 #
1500 1502 # this allows this merge to succeed:
1501 1503 #
1502 1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1503 1505 # \ / merging rev3 and rev4 should use bar@rev2
1504 1506 # \- 2 --- 4 as the merge base
1505 1507 #
1506 1508
1507 1509 cfname = copy[0]
1508 1510 crev = manifest1.get(cfname)
1509 1511 newfparent = fparent2
1510 1512
1511 1513 if manifest2: # branch merge
1512 1514 if fparent2 == nullid or crev is None: # copied on remote side
1513 1515 if cfname in manifest2:
1514 1516 crev = manifest2[cfname]
1515 1517 newfparent = fparent1
1516 1518
1517 1519 # Here, we used to search backwards through history to try to find
1518 1520 # where the file copy came from if the source of a copy was not in
1519 1521 # the parent directory. However, this doesn't actually make sense to
1520 1522 # do (what does a copy from something not in your working copy even
1521 1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1522 1524 # the user that copy information was dropped, so if they didn't
1523 1525 # expect this outcome it can be fixed, but this is the correct
1524 1526 # behavior in this circumstance.
1525 1527
1526 1528 if crev:
1527 1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1528 1530 meta["copy"] = cfname
1529 1531 meta["copyrev"] = hex(crev)
1530 1532 fparent1, fparent2 = nullid, newfparent
1531 1533 else:
1532 1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1533 1535 "copied from '%s'!\n") % (fname, cfname))
1534 1536
1535 1537 elif fparent1 == nullid:
1536 1538 fparent1, fparent2 = fparent2, nullid
1537 1539 elif fparent2 != nullid:
1538 1540 # is one parent an ancestor of the other?
1539 1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1540 1542 if fparent1 in fparentancestors:
1541 1543 fparent1, fparent2 = fparent2, nullid
1542 1544 elif fparent2 in fparentancestors:
1543 1545 fparent2 = nullid
1544 1546
1545 1547 # is the file changed?
1546 1548 text = fctx.data()
1547 1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1548 1550 changelist.append(fname)
1549 1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1550 1552 # are just the flags changed during merge?
1551 1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1552 1554 changelist.append(fname)
1553 1555
1554 1556 return fparent1
1555 1557
1556 1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1557 1559 """check for commit arguments that aren't committable"""
1558 1560 if match.isexact() or match.prefix():
1559 1561 matched = set(status.modified + status.added + status.removed)
1560 1562
1561 1563 for f in match.files():
1562 1564 f = self.dirstate.normalize(f)
1563 1565 if f == '.' or f in matched or f in wctx.substate:
1564 1566 continue
1565 1567 if f in status.deleted:
1566 1568 fail(f, _('file not found!'))
1567 1569 if f in vdirs: # visited directory
1568 1570 d = f + '/'
1569 1571 for mf in matched:
1570 1572 if mf.startswith(d):
1571 1573 break
1572 1574 else:
1573 1575 fail(f, _("no match under directory!"))
1574 1576 elif f not in self.dirstate:
1575 1577 fail(f, _("file not tracked!"))
1576 1578
1577 1579 @unfilteredmethod
1578 1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1579 1581 editor=False, extra=None):
1580 1582 """Add a new revision to current repository.
1581 1583
1582 1584 Revision information is gathered from the working directory,
1583 1585 match can be used to filter the committed files. If editor is
1584 1586 supplied, it is called to get a commit message.
1585 1587 """
1586 1588 if extra is None:
1587 1589 extra = {}
1588 1590
1589 1591 def fail(f, msg):
1590 1592 raise error.Abort('%s: %s' % (f, msg))
1591 1593
1592 1594 if not match:
1593 1595 match = matchmod.always(self.root, '')
1594 1596
1595 1597 if not force:
1596 1598 vdirs = []
1597 1599 match.explicitdir = vdirs.append
1598 1600 match.bad = fail
1599 1601
1600 1602 wlock = lock = tr = None
1601 1603 try:
1602 1604 wlock = self.wlock()
1603 1605 lock = self.lock() # for recent changelog (see issue4368)
1604 1606
1605 1607 wctx = self[None]
1606 1608 merge = len(wctx.parents()) > 1
1607 1609
1608 1610 if not force and merge and match.ispartial():
1609 1611 raise error.Abort(_('cannot partially commit a merge '
1610 1612 '(do not specify files or patterns)'))
1611 1613
1612 1614 status = self.status(match=match, clean=force)
1613 1615 if force:
1614 1616 status.modified.extend(status.clean) # mq may commit clean files
1615 1617
1616 1618 # check subrepos
1617 1619 subs = []
1618 1620 commitsubs = set()
1619 1621 newstate = wctx.substate.copy()
1620 1622 # only manage subrepos and .hgsubstate if .hgsub is present
1621 1623 if '.hgsub' in wctx:
1622 1624 # we'll decide whether to track this ourselves, thanks
1623 1625 for c in status.modified, status.added, status.removed:
1624 1626 if '.hgsubstate' in c:
1625 1627 c.remove('.hgsubstate')
1626 1628
1627 1629 # compare current state to last committed state
1628 1630 # build new substate based on last committed state
1629 1631 oldstate = wctx.p1().substate
1630 1632 for s in sorted(newstate.keys()):
1631 1633 if not match(s):
1632 1634 # ignore working copy, use old state if present
1633 1635 if s in oldstate:
1634 1636 newstate[s] = oldstate[s]
1635 1637 continue
1636 1638 if not force:
1637 1639 raise error.Abort(
1638 1640 _("commit with new subrepo %s excluded") % s)
1639 1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1640 1642 if dirtyreason:
1641 1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1642 1644 raise error.Abort(dirtyreason,
1643 1645 hint=_("use --subrepos for recursive commit"))
1644 1646 subs.append(s)
1645 1647 commitsubs.add(s)
1646 1648 else:
1647 1649 bs = wctx.sub(s).basestate()
1648 1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1649 1651 if oldstate.get(s, (None, None, None))[1] != bs:
1650 1652 subs.append(s)
1651 1653
1652 1654 # check for removed subrepos
1653 1655 for p in wctx.parents():
1654 1656 r = [s for s in p.substate if s not in newstate]
1655 1657 subs += [s for s in r if match(s)]
1656 1658 if subs:
1657 1659 if (not match('.hgsub') and
1658 1660 '.hgsub' in (wctx.modified() + wctx.added())):
1659 1661 raise error.Abort(
1660 1662 _("can't commit subrepos without .hgsub"))
1661 1663 status.modified.insert(0, '.hgsubstate')
1662 1664
1663 1665 elif '.hgsub' in status.removed:
1664 1666 # clean up .hgsubstate when .hgsub is removed
1665 1667 if ('.hgsubstate' in wctx and
1666 1668 '.hgsubstate' not in (status.modified + status.added +
1667 1669 status.removed)):
1668 1670 status.removed.insert(0, '.hgsubstate')
1669 1671
1670 1672 # make sure all explicit patterns are matched
1671 1673 if not force:
1672 1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1673 1675
1674 1676 cctx = context.workingcommitctx(self, status,
1675 1677 text, user, date, extra)
1676 1678
1677 1679 # internal config: ui.allowemptycommit
1678 1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1679 1681 or extra.get('close') or merge or cctx.files()
1680 1682 or self.ui.configbool('ui', 'allowemptycommit'))
1681 1683 if not allowemptycommit:
1682 1684 return None
1683 1685
1684 1686 if merge and cctx.deleted():
1685 1687 raise error.Abort(_("cannot commit merge with missing files"))
1686 1688
1687 1689 ms = mergemod.mergestate.read(self)
1688 1690 mergeutil.checkunresolved(ms)
1689 1691
1690 1692 if editor:
1691 1693 cctx._text = editor(self, cctx, subs)
1692 1694 edited = (text != cctx._text)
1693 1695
1694 1696 # Save commit message in case this transaction gets rolled back
1695 1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1696 1698 # the assumption that the user will use the same editor again.
1697 1699 msgfn = self.savecommitmessage(cctx._text)
1698 1700
1699 1701 # commit subs and write new state
1700 1702 if subs:
1701 1703 for s in sorted(commitsubs):
1702 1704 sub = wctx.sub(s)
1703 1705 self.ui.status(_('committing subrepository %s\n') %
1704 1706 subrepo.subrelpath(sub))
1705 1707 sr = sub.commit(cctx._text, user, date)
1706 1708 newstate[s] = (newstate[s][0], sr)
1707 1709 subrepo.writestate(self, newstate)
1708 1710
1709 1711 p1, p2 = self.dirstate.parents()
1710 1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1711 1713 try:
1712 1714 self.hook("precommit", throw=True, parent1=hookp1,
1713 1715 parent2=hookp2)
1714 1716 tr = self.transaction('commit')
1715 1717 ret = self.commitctx(cctx, True)
1716 1718 except: # re-raises
1717 1719 if edited:
1718 1720 self.ui.write(
1719 1721 _('note: commit message saved in %s\n') % msgfn)
1720 1722 raise
1721 1723 # update bookmarks, dirstate and mergestate
1722 1724 bookmarks.update(self, [p1, p2], ret)
1723 1725 cctx.markcommitted(ret)
1724 1726 ms.reset()
1725 1727 tr.close()
1726 1728
1727 1729 finally:
1728 1730 lockmod.release(tr, lock, wlock)
1729 1731
1730 1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1731 1733 # hack for command that use a temporary commit (eg: histedit)
1732 1734 # temporary commit got stripped before hook release
1733 1735 if self.changelog.hasnode(ret):
1734 1736 self.hook("commit", node=node, parent1=parent1,
1735 1737 parent2=parent2)
1736 1738 self._afterlock(commithook)
1737 1739 return ret
1738 1740
1739 1741 @unfilteredmethod
1740 1742 def commitctx(self, ctx, error=False):
1741 1743 """Add a new revision to current repository.
1742 1744 Revision information is passed via the context argument.
1743 1745 """
1744 1746
1745 1747 tr = None
1746 1748 p1, p2 = ctx.p1(), ctx.p2()
1747 1749 user = ctx.user()
1748 1750
1749 1751 lock = self.lock()
1750 1752 try:
1751 1753 tr = self.transaction("commit")
1752 1754 trp = weakref.proxy(tr)
1753 1755
1754 1756 if ctx.manifestnode():
1755 1757 # reuse an existing manifest revision
1756 1758 mn = ctx.manifestnode()
1757 1759 files = ctx.files()
1758 1760 elif ctx.files():
1759 1761 m1ctx = p1.manifestctx()
1760 1762 m2ctx = p2.manifestctx()
1761 1763 mctx = m1ctx.copy()
1762 1764
1763 1765 m = mctx.read()
1764 1766 m1 = m1ctx.read()
1765 1767 m2 = m2ctx.read()
1766 1768
1767 1769 # check in files
1768 1770 added = []
1769 1771 changed = []
1770 1772 removed = list(ctx.removed())
1771 1773 linkrev = len(self)
1772 1774 self.ui.note(_("committing files:\n"))
1773 1775 for f in sorted(ctx.modified() + ctx.added()):
1774 1776 self.ui.note(f + "\n")
1775 1777 try:
1776 1778 fctx = ctx[f]
1777 1779 if fctx is None:
1778 1780 removed.append(f)
1779 1781 else:
1780 1782 added.append(f)
1781 1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1782 1784 trp, changed)
1783 1785 m.setflag(f, fctx.flags())
1784 1786 except OSError as inst:
1785 1787 self.ui.warn(_("trouble committing %s!\n") % f)
1786 1788 raise
1787 1789 except IOError as inst:
1788 1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1789 1791 if error or errcode and errcode != errno.ENOENT:
1790 1792 self.ui.warn(_("trouble committing %s!\n") % f)
1791 1793 raise
1792 1794
1793 1795 # update manifest
1794 1796 self.ui.note(_("committing manifest\n"))
1795 1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1796 1798 drop = [f for f in removed if f in m]
1797 1799 for f in drop:
1798 1800 del m[f]
1799 1801 mn = mctx.write(trp, linkrev,
1800 1802 p1.manifestnode(), p2.manifestnode(),
1801 1803 added, drop)
1802 1804 files = changed + removed
1803 1805 else:
1804 1806 mn = p1.manifestnode()
1805 1807 files = []
1806 1808
1807 1809 # update changelog
1808 1810 self.ui.note(_("committing changelog\n"))
1809 1811 self.changelog.delayupdate(tr)
1810 1812 n = self.changelog.add(mn, files, ctx.description(),
1811 1813 trp, p1.node(), p2.node(),
1812 1814 user, ctx.date(), ctx.extra().copy())
1813 1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1814 1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1815 1817 parent2=xp2)
1816 1818 # set the new commit is proper phase
1817 1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1818 1820 if targetphase:
1819 1821 # retract boundary do not alter parent changeset.
1820 1822 # if a parent have higher the resulting phase will
1821 1823 # be compliant anyway
1822 1824 #
1823 1825 # if minimal phase was 0 we don't need to retract anything
1824 1826 phases.retractboundary(self, tr, targetphase, [n])
1825 1827 tr.close()
1826 1828 branchmap.updatecache(self.filtered('served'))
1827 1829 return n
1828 1830 finally:
1829 1831 if tr:
1830 1832 tr.release()
1831 1833 lock.release()
1832 1834
1833 1835 @unfilteredmethod
1834 1836 def destroying(self):
1835 1837 '''Inform the repository that nodes are about to be destroyed.
1836 1838 Intended for use by strip and rollback, so there's a common
1837 1839 place for anything that has to be done before destroying history.
1838 1840
1839 1841 This is mostly useful for saving state that is in memory and waiting
1840 1842 to be flushed when the current lock is released. Because a call to
1841 1843 destroyed is imminent, the repo will be invalidated causing those
1842 1844 changes to stay in memory (waiting for the next unlock), or vanish
1843 1845 completely.
1844 1846 '''
1845 1847 # When using the same lock to commit and strip, the phasecache is left
1846 1848 # dirty after committing. Then when we strip, the repo is invalidated,
1847 1849 # causing those changes to disappear.
1848 1850 if '_phasecache' in vars(self):
1849 1851 self._phasecache.write()
1850 1852
1851 1853 @unfilteredmethod
1852 1854 def destroyed(self):
1853 1855 '''Inform the repository that nodes have been destroyed.
1854 1856 Intended for use by strip and rollback, so there's a common
1855 1857 place for anything that has to be done after destroying history.
1856 1858 '''
1857 1859 # When one tries to:
1858 1860 # 1) destroy nodes thus calling this method (e.g. strip)
1859 1861 # 2) use phasecache somewhere (e.g. commit)
1860 1862 #
1861 1863 # then 2) will fail because the phasecache contains nodes that were
1862 1864 # removed. We can either remove phasecache from the filecache,
1863 1865 # causing it to reload next time it is accessed, or simply filter
1864 1866 # the removed nodes now and write the updated cache.
1865 1867 self._phasecache.filterunknown(self)
1866 1868 self._phasecache.write()
1867 1869
1868 1870 # update the 'served' branch cache to help read only server process
1869 1871 # Thanks to branchcache collaboration this is done from the nearest
1870 1872 # filtered subset and it is expected to be fast.
1871 1873 branchmap.updatecache(self.filtered('served'))
1872 1874
1873 1875 # Ensure the persistent tag cache is updated. Doing it now
1874 1876 # means that the tag cache only has to worry about destroyed
1875 1877 # heads immediately after a strip/rollback. That in turn
1876 1878 # guarantees that "cachetip == currenttip" (comparing both rev
1877 1879 # and node) always means no nodes have been added or destroyed.
1878 1880
1879 1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1880 1882 # head, refresh the tag cache, then immediately add a new head.
1881 1883 # But I think doing it this way is necessary for the "instant
1882 1884 # tag cache retrieval" case to work.
1883 1885 self.invalidate()
1884 1886
1885 1887 def walk(self, match, node=None):
1886 1888 '''
1887 1889 walk recursively through the directory tree or a given
1888 1890 changeset, finding all files matched by the match
1889 1891 function
1890 1892 '''
1891 1893 return self[node].walk(match)
1892 1894
1893 1895 def status(self, node1='.', node2=None, match=None,
1894 1896 ignored=False, clean=False, unknown=False,
1895 1897 listsubrepos=False):
1896 1898 '''a convenience method that calls node1.status(node2)'''
1897 1899 return self[node1].status(node2, match, ignored, clean, unknown,
1898 1900 listsubrepos)
1899 1901
1900 1902 def heads(self, start=None):
1901 1903 if start is None:
1902 1904 cl = self.changelog
1903 1905 headrevs = reversed(cl.headrevs())
1904 1906 return [cl.node(rev) for rev in headrevs]
1905 1907
1906 1908 heads = self.changelog.heads(start)
1907 1909 # sort the output in rev descending order
1908 1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1909 1911
1910 1912 def branchheads(self, branch=None, start=None, closed=False):
1911 1913 '''return a (possibly filtered) list of heads for the given branch
1912 1914
1913 1915 Heads are returned in topological order, from newest to oldest.
1914 1916 If branch is None, use the dirstate branch.
1915 1917 If start is not None, return only heads reachable from start.
1916 1918 If closed is True, return heads that are marked as closed as well.
1917 1919 '''
1918 1920 if branch is None:
1919 1921 branch = self[None].branch()
1920 1922 branches = self.branchmap()
1921 1923 if branch not in branches:
1922 1924 return []
1923 1925 # the cache returns heads ordered lowest to highest
1924 1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1925 1927 if start is not None:
1926 1928 # filter out the heads that cannot be reached from startrev
1927 1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1928 1930 bheads = [h for h in bheads if h in fbheads]
1929 1931 return bheads
1930 1932
1931 1933 def branches(self, nodes):
1932 1934 if not nodes:
1933 1935 nodes = [self.changelog.tip()]
1934 1936 b = []
1935 1937 for n in nodes:
1936 1938 t = n
1937 1939 while True:
1938 1940 p = self.changelog.parents(n)
1939 1941 if p[1] != nullid or p[0] == nullid:
1940 1942 b.append((t, n, p[0], p[1]))
1941 1943 break
1942 1944 n = p[0]
1943 1945 return b
1944 1946
1945 1947 def between(self, pairs):
1946 1948 r = []
1947 1949
1948 1950 for top, bottom in pairs:
1949 1951 n, l, i = top, [], 0
1950 1952 f = 1
1951 1953
1952 1954 while n != bottom and n != nullid:
1953 1955 p = self.changelog.parents(n)[0]
1954 1956 if i == f:
1955 1957 l.append(n)
1956 1958 f = f * 2
1957 1959 n = p
1958 1960 i += 1
1959 1961
1960 1962 r.append(l)
1961 1963
1962 1964 return r
1963 1965
1964 1966 def checkpush(self, pushop):
1965 1967 """Extensions can override this function if additional checks have
1966 1968 to be performed before pushing, or call it if they override push
1967 1969 command.
1968 1970 """
1969 1971 pass
1970 1972
1971 1973 @unfilteredpropertycache
1972 1974 def prepushoutgoinghooks(self):
1973 1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1974 1976 methods, which are called before pushing changesets.
1975 1977 """
1976 1978 return util.hooks()
1977 1979
1978 1980 def pushkey(self, namespace, key, old, new):
1979 1981 try:
1980 1982 tr = self.currenttransaction()
1981 1983 hookargs = {}
1982 1984 if tr is not None:
1983 1985 hookargs.update(tr.hookargs)
1984 1986 hookargs['namespace'] = namespace
1985 1987 hookargs['key'] = key
1986 1988 hookargs['old'] = old
1987 1989 hookargs['new'] = new
1988 1990 self.hook('prepushkey', throw=True, **hookargs)
1989 1991 except error.HookAbort as exc:
1990 1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1991 1993 if exc.hint:
1992 1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1993 1995 return False
1994 1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1995 1997 ret = pushkey.push(self, namespace, key, old, new)
1996 1998 def runhook():
1997 1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1998 2000 ret=ret)
1999 2001 self._afterlock(runhook)
2000 2002 return ret
2001 2003
2002 2004 def listkeys(self, namespace):
2003 2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2004 2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2005 2007 values = pushkey.list(self, namespace)
2006 2008 self.hook('listkeys', namespace=namespace, values=values)
2007 2009 return values
2008 2010
2009 2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2010 2012 '''used to test argument passing over the wire'''
2011 2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2012 2014
2013 2015 def savecommitmessage(self, text):
2014 2016 fp = self.vfs('last-message.txt', 'wb')
2015 2017 try:
2016 2018 fp.write(text)
2017 2019 finally:
2018 2020 fp.close()
2019 2021 return self.pathto(fp.name[len(self.root) + 1:])
2020 2022
2021 2023 # used to avoid circular references so destructors work
2022 2024 def aftertrans(files):
2023 2025 renamefiles = [tuple(t) for t in files]
2024 2026 def a():
2025 2027 for vfs, src, dest in renamefiles:
2026 2028 try:
2027 2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2028 2030 # leaving both src and dest on disk. delete dest to make sure
2029 2031 # the rename couldn't be such a no-op.
2030 2032 vfs.unlink(dest)
2031 2033 except OSError as ex:
2032 2034 if ex.errno != errno.ENOENT:
2033 2035 raise
2034 2036 try:
2035 2037 vfs.rename(src, dest)
2036 2038 except OSError: # journal file does not yet exist
2037 2039 pass
2038 2040 return a
2039 2041
2040 2042 def undoname(fn):
2041 2043 base, name = os.path.split(fn)
2042 2044 assert name.startswith('journal')
2043 2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2044 2046
2045 2047 def instance(ui, path, create):
2046 2048 return localrepository(ui, util.urllocalpath(path), create)
2047 2049
2048 2050 def islocal(path):
2049 2051 return True
2050 2052
2051 2053 def newreporequirements(repo):
2052 2054 """Determine the set of requirements for a new local repository.
2053 2055
2054 2056 Extensions can wrap this function to specify custom requirements for
2055 2057 new repositories.
2056 2058 """
2057 2059 ui = repo.ui
2058 2060 requirements = set(['revlogv1'])
2059 2061 if ui.configbool('format', 'usestore', True):
2060 2062 requirements.add('store')
2061 2063 if ui.configbool('format', 'usefncache', True):
2062 2064 requirements.add('fncache')
2063 2065 if ui.configbool('format', 'dotencode', True):
2064 2066 requirements.add('dotencode')
2065 2067
2066 2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2067 2069 if compengine not in util.compengines:
2068 2070 raise error.Abort(_('compression engine %s defined by '
2069 2071 'experimental.format.compression not available') %
2070 2072 compengine,
2071 2073 hint=_('run "hg debuginstall" to list available '
2072 2074 'compression engines'))
2073 2075
2074 2076 # zlib is the historical default and doesn't need an explicit requirement.
2075 2077 if compengine != 'zlib':
2076 2078 requirements.add('exp-compression-%s' % compengine)
2077 2079
2078 2080 if scmutil.gdinitconfig(ui):
2079 2081 requirements.add('generaldelta')
2080 2082 if ui.configbool('experimental', 'treemanifest', False):
2081 2083 requirements.add('treemanifest')
2082 2084 if ui.configbool('experimental', 'manifestv2', False):
2083 2085 requirements.add('manifestv2')
2084 2086
2085 2087 return requirements
General Comments 0
You need to be logged in to leave comments. Login now