##// END OF EJS Templates
localrepo: don't use mutable default argument value...
Pierre-Yves David -
r31412:ecc87acb default
parent child Browse files
Show More
@@ -1,2076 +1,2078 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 vfs as vfsmod,
63 63 )
64 64
65 65 release = lockmod.release
66 66 urlerr = util.urlerr
67 67 urlreq = util.urlreq
68 68
69 69 class repofilecache(scmutil.filecache):
70 70 """All filecache usage on repo are done for logic that should be unfiltered
71 71 """
72 72
73 73 def join(self, obj, fname):
74 74 return obj.vfs.join(fname)
75 75 def __get__(self, repo, type=None):
76 76 if repo is None:
77 77 return self
78 78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 79 def __set__(self, repo, value):
80 80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 81 def __delete__(self, repo):
82 82 return super(repofilecache, self).__delete__(repo.unfiltered())
83 83
84 84 class storecache(repofilecache):
85 85 """filecache for files in the store"""
86 86 def join(self, obj, fname):
87 87 return obj.sjoin(fname)
88 88
89 89 class unfilteredpropertycache(util.propertycache):
90 90 """propertycache that apply to unfiltered repo only"""
91 91
92 92 def __get__(self, repo, type=None):
93 93 unfi = repo.unfiltered()
94 94 if unfi is repo:
95 95 return super(unfilteredpropertycache, self).__get__(unfi)
96 96 return getattr(unfi, self.name)
97 97
98 98 class filteredpropertycache(util.propertycache):
99 99 """propertycache that must take filtering in account"""
100 100
101 101 def cachevalue(self, obj, value):
102 102 object.__setattr__(obj, self.name, value)
103 103
104 104
105 105 def hasunfilteredcache(repo, name):
106 106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 107 return name in vars(repo.unfiltered())
108 108
109 109 def unfilteredmethod(orig):
110 110 """decorate method that always need to be run on unfiltered version"""
111 111 def wrapper(repo, *args, **kwargs):
112 112 return orig(repo.unfiltered(), *args, **kwargs)
113 113 return wrapper
114 114
115 115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 116 'unbundle'))
117 117 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 118
119 119 class localpeer(peer.peerrepository):
120 120 '''peer for a local repo; reflects only the most recent API'''
121 121
122 def __init__(self, repo, caps=moderncaps):
122 def __init__(self, repo, caps=None):
123 if caps is None:
124 caps = moderncaps.copy()
123 125 peer.peerrepository.__init__(self)
124 126 self._repo = repo.filtered('served')
125 127 self.ui = repo.ui
126 128 self._caps = repo._restrictcapabilities(caps)
127 129 self.requirements = repo.requirements
128 130 self.supportedformats = repo.supportedformats
129 131
130 132 def close(self):
131 133 self._repo.close()
132 134
133 135 def _capabilities(self):
134 136 return self._caps
135 137
136 138 def local(self):
137 139 return self._repo
138 140
139 141 def canpush(self):
140 142 return True
141 143
142 144 def url(self):
143 145 return self._repo.url()
144 146
145 147 def lookup(self, key):
146 148 return self._repo.lookup(key)
147 149
148 150 def branchmap(self):
149 151 return self._repo.branchmap()
150 152
151 153 def heads(self):
152 154 return self._repo.heads()
153 155
154 156 def known(self, nodes):
155 157 return self._repo.known(nodes)
156 158
157 159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
158 160 **kwargs):
159 161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
160 162 common=common, bundlecaps=bundlecaps,
161 163 **kwargs)
162 164 cb = util.chunkbuffer(chunks)
163 165
164 166 if bundlecaps is not None and 'HG20' in bundlecaps:
165 167 # When requesting a bundle2, getbundle returns a stream to make the
166 168 # wire level function happier. We need to build a proper object
167 169 # from it in local peer.
168 170 return bundle2.getunbundler(self.ui, cb)
169 171 else:
170 172 return changegroup.getunbundler('01', cb, None)
171 173
172 174 # TODO We might want to move the next two calls into legacypeer and add
173 175 # unbundle instead.
174 176
175 177 def unbundle(self, cg, heads, url):
176 178 """apply a bundle on a repo
177 179
178 180 This function handles the repo locking itself."""
179 181 try:
180 182 try:
181 183 cg = exchange.readbundle(self.ui, cg, None)
182 184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
183 185 if util.safehasattr(ret, 'getchunks'):
184 186 # This is a bundle20 object, turn it into an unbundler.
185 187 # This little dance should be dropped eventually when the
186 188 # API is finally improved.
187 189 stream = util.chunkbuffer(ret.getchunks())
188 190 ret = bundle2.getunbundler(self.ui, stream)
189 191 return ret
190 192 except Exception as exc:
191 193 # If the exception contains output salvaged from a bundle2
192 194 # reply, we need to make sure it is printed before continuing
193 195 # to fail. So we build a bundle2 with such output and consume
194 196 # it directly.
195 197 #
196 198 # This is not very elegant but allows a "simple" solution for
197 199 # issue4594
198 200 output = getattr(exc, '_bundle2salvagedoutput', ())
199 201 if output:
200 202 bundler = bundle2.bundle20(self._repo.ui)
201 203 for out in output:
202 204 bundler.addpart(out)
203 205 stream = util.chunkbuffer(bundler.getchunks())
204 206 b = bundle2.getunbundler(self.ui, stream)
205 207 bundle2.processbundle(self._repo, b)
206 208 raise
207 209 except error.PushRaced as exc:
208 210 raise error.ResponseError(_('push failed:'), str(exc))
209 211
210 212 def lock(self):
211 213 return self._repo.lock()
212 214
213 215 def addchangegroup(self, cg, source, url):
214 216 return cg.apply(self._repo, source, url)
215 217
216 218 def pushkey(self, namespace, key, old, new):
217 219 return self._repo.pushkey(namespace, key, old, new)
218 220
219 221 def listkeys(self, namespace):
220 222 return self._repo.listkeys(namespace)
221 223
222 224 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 225 '''used to test argument passing over the wire'''
224 226 return "%s %s %s %s %s" % (one, two, three, four, five)
225 227
226 228 class locallegacypeer(localpeer):
227 229 '''peer extension which implements legacy methods too; used for tests with
228 230 restricted capabilities'''
229 231
230 232 def __init__(self, repo):
231 233 localpeer.__init__(self, repo, caps=legacycaps)
232 234
233 235 def branches(self, nodes):
234 236 return self._repo.branches(nodes)
235 237
236 238 def between(self, pairs):
237 239 return self._repo.between(pairs)
238 240
239 241 def changegroup(self, basenodes, source):
240 242 return changegroup.changegroup(self._repo, basenodes, source)
241 243
242 244 def changegroupsubset(self, bases, heads, source):
243 245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244 246
245 247 class localrepository(object):
246 248
247 249 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
248 250 'manifestv2'))
249 251 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
250 252 'relshared', 'dotencode'))
251 253 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
252 254 filtername = None
253 255
254 256 # a list of (ui, featureset) functions.
255 257 # only functions defined in module of enabled extensions are invoked
256 258 featuresetupfuncs = set()
257 259
258 260 def __init__(self, baseui, path, create=False):
259 261 self.requirements = set()
260 262 # vfs to access the working copy
261 263 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
262 264 # vfs to access the content of the repository
263 265 self.vfs = None
264 266 # vfs to access the store part of the repository
265 267 self.svfs = None
266 268 self.root = self.wvfs.base
267 269 self.path = self.wvfs.join(".hg")
268 270 self.origroot = path
269 271 self.auditor = pathutil.pathauditor(self.root, self._checknested)
270 272 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
271 273 realfs=False)
272 274 self.vfs = vfsmod.vfs(self.path)
273 275 self.baseui = baseui
274 276 self.ui = baseui.copy()
275 277 self.ui.copy = baseui.copy # prevent copying repo configuration
276 278 # A list of callback to shape the phase if no data were found.
277 279 # Callback are in the form: func(repo, roots) --> processed root.
278 280 # This list it to be filled by extension during repo setup
279 281 self._phasedefaults = []
280 282 try:
281 283 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
282 284 self._loadextensions()
283 285 except IOError:
284 286 pass
285 287
286 288 if self.featuresetupfuncs:
287 289 self.supported = set(self._basesupported) # use private copy
288 290 extmods = set(m.__name__ for n, m
289 291 in extensions.extensions(self.ui))
290 292 for setupfunc in self.featuresetupfuncs:
291 293 if setupfunc.__module__ in extmods:
292 294 setupfunc(self.ui, self.supported)
293 295 else:
294 296 self.supported = self._basesupported
295 297 color.setup(self.ui)
296 298
297 299 # Add compression engines.
298 300 for name in util.compengines:
299 301 engine = util.compengines[name]
300 302 if engine.revlogheader():
301 303 self.supported.add('exp-compression-%s' % name)
302 304
303 305 if not self.vfs.isdir():
304 306 if create:
305 307 self.requirements = newreporequirements(self)
306 308
307 309 if not self.wvfs.exists():
308 310 self.wvfs.makedirs()
309 311 self.vfs.makedir(notindexed=True)
310 312
311 313 if 'store' in self.requirements:
312 314 self.vfs.mkdir("store")
313 315
314 316 # create an invalid changelog
315 317 self.vfs.append(
316 318 "00changelog.i",
317 319 '\0\0\0\2' # represents revlogv2
318 320 ' dummy changelog to prevent using the old repo layout'
319 321 )
320 322 else:
321 323 raise error.RepoError(_("repository %s not found") % path)
322 324 elif create:
323 325 raise error.RepoError(_("repository %s already exists") % path)
324 326 else:
325 327 try:
326 328 self.requirements = scmutil.readrequires(
327 329 self.vfs, self.supported)
328 330 except IOError as inst:
329 331 if inst.errno != errno.ENOENT:
330 332 raise
331 333
332 334 self.sharedpath = self.path
333 335 try:
334 336 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
335 337 if 'relshared' in self.requirements:
336 338 sharedpath = self.vfs.join(sharedpath)
337 339 vfs = vfsmod.vfs(sharedpath, realpath=True)
338 340 s = vfs.base
339 341 if not vfs.exists():
340 342 raise error.RepoError(
341 343 _('.hg/sharedpath points to nonexistent directory %s') % s)
342 344 self.sharedpath = s
343 345 except IOError as inst:
344 346 if inst.errno != errno.ENOENT:
345 347 raise
346 348
347 349 self.store = store.store(
348 350 self.requirements, self.sharedpath, vfsmod.vfs)
349 351 self.spath = self.store.path
350 352 self.svfs = self.store.vfs
351 353 self.sjoin = self.store.join
352 354 self.vfs.createmode = self.store.createmode
353 355 self._applyopenerreqs()
354 356 if create:
355 357 self._writerequirements()
356 358
357 359 self._dirstatevalidatewarned = False
358 360
359 361 self._branchcaches = {}
360 362 self._revbranchcache = None
361 363 self.filterpats = {}
362 364 self._datafilters = {}
363 365 self._transref = self._lockref = self._wlockref = None
364 366
365 367 # A cache for various files under .hg/ that tracks file changes,
366 368 # (used by the filecache decorator)
367 369 #
368 370 # Maps a property name to its util.filecacheentry
369 371 self._filecache = {}
370 372
371 373 # hold sets of revision to be filtered
372 374 # should be cleared when something might have changed the filter value:
373 375 # - new changesets,
374 376 # - phase change,
375 377 # - new obsolescence marker,
376 378 # - working directory parent change,
377 379 # - bookmark changes
378 380 self.filteredrevcache = {}
379 381
380 382 # generic mapping between names and nodes
381 383 self.names = namespaces.namespaces()
382 384
383 385 @property
384 386 def wopener(self):
385 387 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
386 388 return self.wvfs
387 389
388 390 @property
389 391 def opener(self):
390 392 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
391 393 return self.vfs
392 394
393 395 def close(self):
394 396 self._writecaches()
395 397
396 398 def _loadextensions(self):
397 399 extensions.loadall(self.ui)
398 400
399 401 def _writecaches(self):
400 402 if self._revbranchcache:
401 403 self._revbranchcache.write()
402 404
403 405 def _restrictcapabilities(self, caps):
404 406 if self.ui.configbool('experimental', 'bundle2-advertise', True):
405 407 caps = set(caps)
406 408 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
407 409 caps.add('bundle2=' + urlreq.quote(capsblob))
408 410 return caps
409 411
410 412 def _applyopenerreqs(self):
411 413 self.svfs.options = dict((r, 1) for r in self.requirements
412 414 if r in self.openerreqs)
413 415 # experimental config: format.chunkcachesize
414 416 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
415 417 if chunkcachesize is not None:
416 418 self.svfs.options['chunkcachesize'] = chunkcachesize
417 419 # experimental config: format.maxchainlen
418 420 maxchainlen = self.ui.configint('format', 'maxchainlen')
419 421 if maxchainlen is not None:
420 422 self.svfs.options['maxchainlen'] = maxchainlen
421 423 # experimental config: format.manifestcachesize
422 424 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
423 425 if manifestcachesize is not None:
424 426 self.svfs.options['manifestcachesize'] = manifestcachesize
425 427 # experimental config: format.aggressivemergedeltas
426 428 aggressivemergedeltas = self.ui.configbool('format',
427 429 'aggressivemergedeltas', False)
428 430 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
429 431 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
430 432
431 433 for r in self.requirements:
432 434 if r.startswith('exp-compression-'):
433 435 self.svfs.options['compengine'] = r[len('exp-compression-'):]
434 436
435 437 def _writerequirements(self):
436 438 scmutil.writerequires(self.vfs, self.requirements)
437 439
438 440 def _checknested(self, path):
439 441 """Determine if path is a legal nested repository."""
440 442 if not path.startswith(self.root):
441 443 return False
442 444 subpath = path[len(self.root) + 1:]
443 445 normsubpath = util.pconvert(subpath)
444 446
445 447 # XXX: Checking against the current working copy is wrong in
446 448 # the sense that it can reject things like
447 449 #
448 450 # $ hg cat -r 10 sub/x.txt
449 451 #
450 452 # if sub/ is no longer a subrepository in the working copy
451 453 # parent revision.
452 454 #
453 455 # However, it can of course also allow things that would have
454 456 # been rejected before, such as the above cat command if sub/
455 457 # is a subrepository now, but was a normal directory before.
456 458 # The old path auditor would have rejected by mistake since it
457 459 # panics when it sees sub/.hg/.
458 460 #
459 461 # All in all, checking against the working copy seems sensible
460 462 # since we want to prevent access to nested repositories on
461 463 # the filesystem *now*.
462 464 ctx = self[None]
463 465 parts = util.splitpath(subpath)
464 466 while parts:
465 467 prefix = '/'.join(parts)
466 468 if prefix in ctx.substate:
467 469 if prefix == normsubpath:
468 470 return True
469 471 else:
470 472 sub = ctx.sub(prefix)
471 473 return sub.checknested(subpath[len(prefix) + 1:])
472 474 else:
473 475 parts.pop()
474 476 return False
475 477
476 478 def peer(self):
477 479 return localpeer(self) # not cached to avoid reference cycle
478 480
479 481 def unfiltered(self):
480 482 """Return unfiltered version of the repository
481 483
482 484 Intended to be overwritten by filtered repo."""
483 485 return self
484 486
485 487 def filtered(self, name):
486 488 """Return a filtered version of a repository"""
487 489 # build a new class with the mixin and the current class
488 490 # (possibly subclass of the repo)
489 491 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
490 492 pass
491 493 return filteredrepo(self, name)
492 494
493 495 @repofilecache('bookmarks', 'bookmarks.current')
494 496 def _bookmarks(self):
495 497 return bookmarks.bmstore(self)
496 498
497 499 @property
498 500 def _activebookmark(self):
499 501 return self._bookmarks.active
500 502
501 503 def bookmarkheads(self, bookmark):
502 504 name = bookmark.split('@', 1)[0]
503 505 heads = []
504 506 for mark, n in self._bookmarks.iteritems():
505 507 if mark.split('@', 1)[0] == name:
506 508 heads.append(n)
507 509 return heads
508 510
509 511 # _phaserevs and _phasesets depend on changelog. what we need is to
510 512 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
511 513 # can't be easily expressed in filecache mechanism.
512 514 @storecache('phaseroots', '00changelog.i')
513 515 def _phasecache(self):
514 516 return phases.phasecache(self, self._phasedefaults)
515 517
516 518 @storecache('obsstore')
517 519 def obsstore(self):
518 520 # read default format for new obsstore.
519 521 # developer config: format.obsstore-version
520 522 defaultformat = self.ui.configint('format', 'obsstore-version', None)
521 523 # rely on obsstore class default when possible.
522 524 kwargs = {}
523 525 if defaultformat is not None:
524 526 kwargs['defaultformat'] = defaultformat
525 527 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
526 528 store = obsolete.obsstore(self.svfs, readonly=readonly,
527 529 **kwargs)
528 530 if store and readonly:
529 531 self.ui.warn(
530 532 _('obsolete feature not enabled but %i markers found!\n')
531 533 % len(list(store)))
532 534 return store
533 535
534 536 @storecache('00changelog.i')
535 537 def changelog(self):
536 538 c = changelog.changelog(self.svfs)
537 539 if txnutil.mayhavepending(self.root):
538 540 c.readpending('00changelog.i.a')
539 541 return c
540 542
541 543 def _constructmanifest(self):
542 544 # This is a temporary function while we migrate from manifest to
543 545 # manifestlog. It allows bundlerepo and unionrepo to intercept the
544 546 # manifest creation.
545 547 return manifest.manifestrevlog(self.svfs)
546 548
547 549 @storecache('00manifest.i')
548 550 def manifestlog(self):
549 551 return manifest.manifestlog(self.svfs, self)
550 552
551 553 @repofilecache('dirstate')
552 554 def dirstate(self):
553 555 return dirstate.dirstate(self.vfs, self.ui, self.root,
554 556 self._dirstatevalidate)
555 557
556 558 def _dirstatevalidate(self, node):
557 559 try:
558 560 self.changelog.rev(node)
559 561 return node
560 562 except error.LookupError:
561 563 if not self._dirstatevalidatewarned:
562 564 self._dirstatevalidatewarned = True
563 565 self.ui.warn(_("warning: ignoring unknown"
564 566 " working parent %s!\n") % short(node))
565 567 return nullid
566 568
567 569 def __getitem__(self, changeid):
568 570 if changeid is None or changeid == wdirrev:
569 571 return context.workingctx(self)
570 572 if isinstance(changeid, slice):
571 573 return [context.changectx(self, i)
572 574 for i in xrange(*changeid.indices(len(self)))
573 575 if i not in self.changelog.filteredrevs]
574 576 return context.changectx(self, changeid)
575 577
576 578 def __contains__(self, changeid):
577 579 try:
578 580 self[changeid]
579 581 return True
580 582 except error.RepoLookupError:
581 583 return False
582 584
583 585 def __nonzero__(self):
584 586 return True
585 587
586 588 def __len__(self):
587 589 return len(self.changelog)
588 590
589 591 def __iter__(self):
590 592 return iter(self.changelog)
591 593
592 594 def revs(self, expr, *args):
593 595 '''Find revisions matching a revset.
594 596
595 597 The revset is specified as a string ``expr`` that may contain
596 598 %-formatting to escape certain types. See ``revsetlang.formatspec``.
597 599
598 600 Revset aliases from the configuration are not expanded. To expand
599 601 user aliases, consider calling ``scmutil.revrange()`` or
600 602 ``repo.anyrevs([expr], user=True)``.
601 603
602 604 Returns a revset.abstractsmartset, which is a list-like interface
603 605 that contains integer revisions.
604 606 '''
605 607 expr = revsetlang.formatspec(expr, *args)
606 608 m = revset.match(None, expr)
607 609 return m(self)
608 610
609 611 def set(self, expr, *args):
610 612 '''Find revisions matching a revset and emit changectx instances.
611 613
612 614 This is a convenience wrapper around ``revs()`` that iterates the
613 615 result and is a generator of changectx instances.
614 616
615 617 Revset aliases from the configuration are not expanded. To expand
616 618 user aliases, consider calling ``scmutil.revrange()``.
617 619 '''
618 620 for r in self.revs(expr, *args):
619 621 yield self[r]
620 622
621 623 def anyrevs(self, specs, user=False):
622 624 '''Find revisions matching one of the given revsets.
623 625
624 626 Revset aliases from the configuration are not expanded by default. To
625 627 expand user aliases, specify ``user=True``.
626 628 '''
627 629 if user:
628 630 m = revset.matchany(self.ui, specs, repo=self)
629 631 else:
630 632 m = revset.matchany(None, specs)
631 633 return m(self)
632 634
633 635 def url(self):
634 636 return 'file:' + self.root
635 637
636 638 def hook(self, name, throw=False, **args):
637 639 """Call a hook, passing this repo instance.
638 640
639 641 This a convenience method to aid invoking hooks. Extensions likely
640 642 won't call this unless they have registered a custom hook or are
641 643 replacing code that is expected to call a hook.
642 644 """
643 645 return hook.hook(self.ui, self, name, throw, **args)
644 646
645 647 @unfilteredmethod
646 648 def _tag(self, names, node, message, local, user, date, extra=None,
647 649 editor=False):
648 650 if isinstance(names, str):
649 651 names = (names,)
650 652
651 653 branches = self.branchmap()
652 654 for name in names:
653 655 self.hook('pretag', throw=True, node=hex(node), tag=name,
654 656 local=local)
655 657 if name in branches:
656 658 self.ui.warn(_("warning: tag %s conflicts with existing"
657 659 " branch name\n") % name)
658 660
659 661 def writetags(fp, names, munge, prevtags):
660 662 fp.seek(0, 2)
661 663 if prevtags and prevtags[-1] != '\n':
662 664 fp.write('\n')
663 665 for name in names:
664 666 if munge:
665 667 m = munge(name)
666 668 else:
667 669 m = name
668 670
669 671 if (self._tagscache.tagtypes and
670 672 name in self._tagscache.tagtypes):
671 673 old = self.tags().get(name, nullid)
672 674 fp.write('%s %s\n' % (hex(old), m))
673 675 fp.write('%s %s\n' % (hex(node), m))
674 676 fp.close()
675 677
676 678 prevtags = ''
677 679 if local:
678 680 try:
679 681 fp = self.vfs('localtags', 'r+')
680 682 except IOError:
681 683 fp = self.vfs('localtags', 'a')
682 684 else:
683 685 prevtags = fp.read()
684 686
685 687 # local tags are stored in the current charset
686 688 writetags(fp, names, None, prevtags)
687 689 for name in names:
688 690 self.hook('tag', node=hex(node), tag=name, local=local)
689 691 return
690 692
691 693 try:
692 694 fp = self.wfile('.hgtags', 'rb+')
693 695 except IOError as e:
694 696 if e.errno != errno.ENOENT:
695 697 raise
696 698 fp = self.wfile('.hgtags', 'ab')
697 699 else:
698 700 prevtags = fp.read()
699 701
700 702 # committed tags are stored in UTF-8
701 703 writetags(fp, names, encoding.fromlocal, prevtags)
702 704
703 705 fp.close()
704 706
705 707 self.invalidatecaches()
706 708
707 709 if '.hgtags' not in self.dirstate:
708 710 self[None].add(['.hgtags'])
709 711
710 712 m = matchmod.exact(self.root, '', ['.hgtags'])
711 713 tagnode = self.commit(message, user, date, extra=extra, match=m,
712 714 editor=editor)
713 715
714 716 for name in names:
715 717 self.hook('tag', node=hex(node), tag=name, local=local)
716 718
717 719 return tagnode
718 720
719 721 def tag(self, names, node, message, local, user, date, editor=False):
720 722 '''tag a revision with one or more symbolic names.
721 723
722 724 names is a list of strings or, when adding a single tag, names may be a
723 725 string.
724 726
725 727 if local is True, the tags are stored in a per-repository file.
726 728 otherwise, they are stored in the .hgtags file, and a new
727 729 changeset is committed with the change.
728 730
729 731 keyword arguments:
730 732
731 733 local: whether to store tags in non-version-controlled file
732 734 (default False)
733 735
734 736 message: commit message to use if committing
735 737
736 738 user: name of user to use if committing
737 739
738 740 date: date tuple to use if committing'''
739 741
740 742 if not local:
741 743 m = matchmod.exact(self.root, '', ['.hgtags'])
742 744 if any(self.status(match=m, unknown=True, ignored=True)):
743 745 raise error.Abort(_('working copy of .hgtags is changed'),
744 746 hint=_('please commit .hgtags manually'))
745 747
746 748 self.tags() # instantiate the cache
747 749 self._tag(names, node, message, local, user, date, editor=editor)
748 750
749 751 @filteredpropertycache
750 752 def _tagscache(self):
751 753 '''Returns a tagscache object that contains various tags related
752 754 caches.'''
753 755
754 756 # This simplifies its cache management by having one decorated
755 757 # function (this one) and the rest simply fetch things from it.
756 758 class tagscache(object):
757 759 def __init__(self):
758 760 # These two define the set of tags for this repository. tags
759 761 # maps tag name to node; tagtypes maps tag name to 'global' or
760 762 # 'local'. (Global tags are defined by .hgtags across all
761 763 # heads, and local tags are defined in .hg/localtags.)
762 764 # They constitute the in-memory cache of tags.
763 765 self.tags = self.tagtypes = None
764 766
765 767 self.nodetagscache = self.tagslist = None
766 768
767 769 cache = tagscache()
768 770 cache.tags, cache.tagtypes = self._findtags()
769 771
770 772 return cache
771 773
772 774 def tags(self):
773 775 '''return a mapping of tag to node'''
774 776 t = {}
775 777 if self.changelog.filteredrevs:
776 778 tags, tt = self._findtags()
777 779 else:
778 780 tags = self._tagscache.tags
779 781 for k, v in tags.iteritems():
780 782 try:
781 783 # ignore tags to unknown nodes
782 784 self.changelog.rev(v)
783 785 t[k] = v
784 786 except (error.LookupError, ValueError):
785 787 pass
786 788 return t
787 789
788 790 def _findtags(self):
789 791 '''Do the hard work of finding tags. Return a pair of dicts
790 792 (tags, tagtypes) where tags maps tag name to node, and tagtypes
791 793 maps tag name to a string like \'global\' or \'local\'.
792 794 Subclasses or extensions are free to add their own tags, but
793 795 should be aware that the returned dicts will be retained for the
794 796 duration of the localrepo object.'''
795 797
796 798 # XXX what tagtype should subclasses/extensions use? Currently
797 799 # mq and bookmarks add tags, but do not set the tagtype at all.
798 800 # Should each extension invent its own tag type? Should there
799 801 # be one tagtype for all such "virtual" tags? Or is the status
800 802 # quo fine?
801 803
802 804 alltags = {} # map tag name to (node, hist)
803 805 tagtypes = {}
804 806
805 807 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
806 808 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
807 809
808 810 # Build the return dicts. Have to re-encode tag names because
809 811 # the tags module always uses UTF-8 (in order not to lose info
810 812 # writing to the cache), but the rest of Mercurial wants them in
811 813 # local encoding.
812 814 tags = {}
813 815 for (name, (node, hist)) in alltags.iteritems():
814 816 if node != nullid:
815 817 tags[encoding.tolocal(name)] = node
816 818 tags['tip'] = self.changelog.tip()
817 819 tagtypes = dict([(encoding.tolocal(name), value)
818 820 for (name, value) in tagtypes.iteritems()])
819 821 return (tags, tagtypes)
820 822
821 823 def tagtype(self, tagname):
822 824 '''
823 825 return the type of the given tag. result can be:
824 826
825 827 'local' : a local tag
826 828 'global' : a global tag
827 829 None : tag does not exist
828 830 '''
829 831
830 832 return self._tagscache.tagtypes.get(tagname)
831 833
832 834 def tagslist(self):
833 835 '''return a list of tags ordered by revision'''
834 836 if not self._tagscache.tagslist:
835 837 l = []
836 838 for t, n in self.tags().iteritems():
837 839 l.append((self.changelog.rev(n), t, n))
838 840 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
839 841
840 842 return self._tagscache.tagslist
841 843
842 844 def nodetags(self, node):
843 845 '''return the tags associated with a node'''
844 846 if not self._tagscache.nodetagscache:
845 847 nodetagscache = {}
846 848 for t, n in self._tagscache.tags.iteritems():
847 849 nodetagscache.setdefault(n, []).append(t)
848 850 for tags in nodetagscache.itervalues():
849 851 tags.sort()
850 852 self._tagscache.nodetagscache = nodetagscache
851 853 return self._tagscache.nodetagscache.get(node, [])
852 854
853 855 def nodebookmarks(self, node):
854 856 """return the list of bookmarks pointing to the specified node"""
855 857 marks = []
856 858 for bookmark, n in self._bookmarks.iteritems():
857 859 if n == node:
858 860 marks.append(bookmark)
859 861 return sorted(marks)
860 862
861 863 def branchmap(self):
862 864 '''returns a dictionary {branch: [branchheads]} with branchheads
863 865 ordered by increasing revision number'''
864 866 branchmap.updatecache(self)
865 867 return self._branchcaches[self.filtername]
866 868
867 869 @unfilteredmethod
868 870 def revbranchcache(self):
869 871 if not self._revbranchcache:
870 872 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
871 873 return self._revbranchcache
872 874
873 875 def branchtip(self, branch, ignoremissing=False):
874 876 '''return the tip node for a given branch
875 877
876 878 If ignoremissing is True, then this method will not raise an error.
877 879 This is helpful for callers that only expect None for a missing branch
878 880 (e.g. namespace).
879 881
880 882 '''
881 883 try:
882 884 return self.branchmap().branchtip(branch)
883 885 except KeyError:
884 886 if not ignoremissing:
885 887 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
886 888 else:
887 889 pass
888 890
889 891 def lookup(self, key):
890 892 return self[key].node()
891 893
892 894 def lookupbranch(self, key, remote=None):
893 895 repo = remote or self
894 896 if key in repo.branchmap():
895 897 return key
896 898
897 899 repo = (remote and remote.local()) and remote or self
898 900 return repo[key].branch()
899 901
900 902 def known(self, nodes):
901 903 cl = self.changelog
902 904 nm = cl.nodemap
903 905 filtered = cl.filteredrevs
904 906 result = []
905 907 for n in nodes:
906 908 r = nm.get(n)
907 909 resp = not (r is None or r in filtered)
908 910 result.append(resp)
909 911 return result
910 912
911 913 def local(self):
912 914 return self
913 915
914 916 def publishing(self):
915 917 # it's safe (and desirable) to trust the publish flag unconditionally
916 918 # so that we don't finalize changes shared between users via ssh or nfs
917 919 return self.ui.configbool('phases', 'publish', True, untrusted=True)
918 920
919 921 def cancopy(self):
920 922 # so statichttprepo's override of local() works
921 923 if not self.local():
922 924 return False
923 925 if not self.publishing():
924 926 return True
925 927 # if publishing we can't copy if there is filtered content
926 928 return not self.filtered('visible').changelog.filteredrevs
927 929
928 930 def shared(self):
929 931 '''the type of shared repository (None if not shared)'''
930 932 if self.sharedpath != self.path:
931 933 return 'store'
932 934 return None
933 935
934 936 def join(self, f, *insidef):
935 937 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
936 938 return self.vfs.join(os.path.join(f, *insidef))
937 939
938 940 def wjoin(self, f, *insidef):
939 941 return self.vfs.reljoin(self.root, f, *insidef)
940 942
941 943 def file(self, f):
942 944 if f[0] == '/':
943 945 f = f[1:]
944 946 return filelog.filelog(self.svfs, f)
945 947
946 948 def changectx(self, changeid):
947 949 return self[changeid]
948 950
949 951 def setparents(self, p1, p2=nullid):
950 952 self.dirstate.beginparentchange()
951 953 copies = self.dirstate.setparents(p1, p2)
952 954 pctx = self[p1]
953 955 if copies:
954 956 # Adjust copy records, the dirstate cannot do it, it
955 957 # requires access to parents manifests. Preserve them
956 958 # only for entries added to first parent.
957 959 for f in copies:
958 960 if f not in pctx and copies[f] in pctx:
959 961 self.dirstate.copy(copies[f], f)
960 962 if p2 == nullid:
961 963 for f, s in sorted(self.dirstate.copies().items()):
962 964 if f not in pctx and s not in pctx:
963 965 self.dirstate.copy(None, f)
964 966 self.dirstate.endparentchange()
965 967
966 968 def filectx(self, path, changeid=None, fileid=None):
967 969 """changeid can be a changeset revision, node, or tag.
968 970 fileid can be a file revision or node."""
969 971 return context.filectx(self, path, changeid, fileid)
970 972
971 973 def getcwd(self):
972 974 return self.dirstate.getcwd()
973 975
974 976 def pathto(self, f, cwd=None):
975 977 return self.dirstate.pathto(f, cwd)
976 978
977 979 def wfile(self, f, mode='r'):
978 980 return self.wvfs(f, mode)
979 981
980 982 def _link(self, f):
981 983 return self.wvfs.islink(f)
982 984
983 985 def _loadfilter(self, filter):
984 986 if filter not in self.filterpats:
985 987 l = []
986 988 for pat, cmd in self.ui.configitems(filter):
987 989 if cmd == '!':
988 990 continue
989 991 mf = matchmod.match(self.root, '', [pat])
990 992 fn = None
991 993 params = cmd
992 994 for name, filterfn in self._datafilters.iteritems():
993 995 if cmd.startswith(name):
994 996 fn = filterfn
995 997 params = cmd[len(name):].lstrip()
996 998 break
997 999 if not fn:
998 1000 fn = lambda s, c, **kwargs: util.filter(s, c)
999 1001 # Wrap old filters not supporting keyword arguments
1000 1002 if not inspect.getargspec(fn)[2]:
1001 1003 oldfn = fn
1002 1004 fn = lambda s, c, **kwargs: oldfn(s, c)
1003 1005 l.append((mf, fn, params))
1004 1006 self.filterpats[filter] = l
1005 1007 return self.filterpats[filter]
1006 1008
1007 1009 def _filter(self, filterpats, filename, data):
1008 1010 for mf, fn, cmd in filterpats:
1009 1011 if mf(filename):
1010 1012 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1011 1013 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1012 1014 break
1013 1015
1014 1016 return data
1015 1017
1016 1018 @unfilteredpropertycache
1017 1019 def _encodefilterpats(self):
1018 1020 return self._loadfilter('encode')
1019 1021
1020 1022 @unfilteredpropertycache
1021 1023 def _decodefilterpats(self):
1022 1024 return self._loadfilter('decode')
1023 1025
1024 1026 def adddatafilter(self, name, filter):
1025 1027 self._datafilters[name] = filter
1026 1028
1027 1029 def wread(self, filename):
1028 1030 if self._link(filename):
1029 1031 data = self.wvfs.readlink(filename)
1030 1032 else:
1031 1033 data = self.wvfs.read(filename)
1032 1034 return self._filter(self._encodefilterpats, filename, data)
1033 1035
1034 1036 def wwrite(self, filename, data, flags, backgroundclose=False):
1035 1037 """write ``data`` into ``filename`` in the working directory
1036 1038
1037 1039 This returns length of written (maybe decoded) data.
1038 1040 """
1039 1041 data = self._filter(self._decodefilterpats, filename, data)
1040 1042 if 'l' in flags:
1041 1043 self.wvfs.symlink(data, filename)
1042 1044 else:
1043 1045 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1044 1046 if 'x' in flags:
1045 1047 self.wvfs.setflags(filename, False, True)
1046 1048 return len(data)
1047 1049
1048 1050 def wwritedata(self, filename, data):
1049 1051 return self._filter(self._decodefilterpats, filename, data)
1050 1052
1051 1053 def currenttransaction(self):
1052 1054 """return the current transaction or None if non exists"""
1053 1055 if self._transref:
1054 1056 tr = self._transref()
1055 1057 else:
1056 1058 tr = None
1057 1059
1058 1060 if tr and tr.running():
1059 1061 return tr
1060 1062 return None
1061 1063
1062 1064 def transaction(self, desc, report=None):
1063 1065 if (self.ui.configbool('devel', 'all-warnings')
1064 1066 or self.ui.configbool('devel', 'check-locks')):
1065 1067 if self._currentlock(self._lockref) is None:
1066 1068 raise error.ProgrammingError('transaction requires locking')
1067 1069 tr = self.currenttransaction()
1068 1070 if tr is not None:
1069 1071 return tr.nest()
1070 1072
1071 1073 # abort here if the journal already exists
1072 1074 if self.svfs.exists("journal"):
1073 1075 raise error.RepoError(
1074 1076 _("abandoned transaction found"),
1075 1077 hint=_("run 'hg recover' to clean up transaction"))
1076 1078
1077 1079 idbase = "%.40f#%f" % (random.random(), time.time())
1078 1080 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1079 1081 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1080 1082
1081 1083 self._writejournal(desc)
1082 1084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1083 1085 if report:
1084 1086 rp = report
1085 1087 else:
1086 1088 rp = self.ui.warn
1087 1089 vfsmap = {'plain': self.vfs} # root of .hg/
1088 1090 # we must avoid cyclic reference between repo and transaction.
1089 1091 reporef = weakref.ref(self)
1090 1092 def validate(tr):
1091 1093 """will run pre-closing hooks"""
1092 1094 reporef().hook('pretxnclose', throw=True,
1093 1095 txnname=desc, **tr.hookargs)
1094 1096 def releasefn(tr, success):
1095 1097 repo = reporef()
1096 1098 if success:
1097 1099 # this should be explicitly invoked here, because
1098 1100 # in-memory changes aren't written out at closing
1099 1101 # transaction, if tr.addfilegenerator (via
1100 1102 # dirstate.write or so) isn't invoked while
1101 1103 # transaction running
1102 1104 repo.dirstate.write(None)
1103 1105 else:
1104 1106 # discard all changes (including ones already written
1105 1107 # out) in this transaction
1106 1108 repo.dirstate.restorebackup(None, prefix='journal.')
1107 1109
1108 1110 repo.invalidate(clearfilecache=True)
1109 1111
1110 1112 tr = transaction.transaction(rp, self.svfs, vfsmap,
1111 1113 "journal",
1112 1114 "undo",
1113 1115 aftertrans(renames),
1114 1116 self.store.createmode,
1115 1117 validator=validate,
1116 1118 releasefn=releasefn)
1117 1119
1118 1120 tr.hookargs['txnid'] = txnid
1119 1121 # note: writing the fncache only during finalize mean that the file is
1120 1122 # outdated when running hooks. As fncache is used for streaming clone,
1121 1123 # this is not expected to break anything that happen during the hooks.
1122 1124 tr.addfinalize('flush-fncache', self.store.write)
1123 1125 def txnclosehook(tr2):
1124 1126 """To be run if transaction is successful, will schedule a hook run
1125 1127 """
1126 1128 # Don't reference tr2 in hook() so we don't hold a reference.
1127 1129 # This reduces memory consumption when there are multiple
1128 1130 # transactions per lock. This can likely go away if issue5045
1129 1131 # fixes the function accumulation.
1130 1132 hookargs = tr2.hookargs
1131 1133
1132 1134 def hook():
1133 1135 reporef().hook('txnclose', throw=False, txnname=desc,
1134 1136 **hookargs)
1135 1137 reporef()._afterlock(hook)
1136 1138 tr.addfinalize('txnclose-hook', txnclosehook)
1137 1139 def txnaborthook(tr2):
1138 1140 """To be run if transaction is aborted
1139 1141 """
1140 1142 reporef().hook('txnabort', throw=False, txnname=desc,
1141 1143 **tr2.hookargs)
1142 1144 tr.addabort('txnabort-hook', txnaborthook)
1143 1145 # avoid eager cache invalidation. in-memory data should be identical
1144 1146 # to stored data if transaction has no error.
1145 1147 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1146 1148 self._transref = weakref.ref(tr)
1147 1149 return tr
1148 1150
1149 1151 def _journalfiles(self):
1150 1152 return ((self.svfs, 'journal'),
1151 1153 (self.vfs, 'journal.dirstate'),
1152 1154 (self.vfs, 'journal.branch'),
1153 1155 (self.vfs, 'journal.desc'),
1154 1156 (self.vfs, 'journal.bookmarks'),
1155 1157 (self.svfs, 'journal.phaseroots'))
1156 1158
1157 1159 def undofiles(self):
1158 1160 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1159 1161
1160 1162 def _writejournal(self, desc):
1161 1163 self.dirstate.savebackup(None, prefix='journal.')
1162 1164 self.vfs.write("journal.branch",
1163 1165 encoding.fromlocal(self.dirstate.branch()))
1164 1166 self.vfs.write("journal.desc",
1165 1167 "%d\n%s\n" % (len(self), desc))
1166 1168 self.vfs.write("journal.bookmarks",
1167 1169 self.vfs.tryread("bookmarks"))
1168 1170 self.svfs.write("journal.phaseroots",
1169 1171 self.svfs.tryread("phaseroots"))
1170 1172
1171 1173 def recover(self):
1172 1174 with self.lock():
1173 1175 if self.svfs.exists("journal"):
1174 1176 self.ui.status(_("rolling back interrupted transaction\n"))
1175 1177 vfsmap = {'': self.svfs,
1176 1178 'plain': self.vfs,}
1177 1179 transaction.rollback(self.svfs, vfsmap, "journal",
1178 1180 self.ui.warn)
1179 1181 self.invalidate()
1180 1182 return True
1181 1183 else:
1182 1184 self.ui.warn(_("no interrupted transaction available\n"))
1183 1185 return False
1184 1186
1185 1187 def rollback(self, dryrun=False, force=False):
1186 1188 wlock = lock = dsguard = None
1187 1189 try:
1188 1190 wlock = self.wlock()
1189 1191 lock = self.lock()
1190 1192 if self.svfs.exists("undo"):
1191 1193 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1192 1194
1193 1195 return self._rollback(dryrun, force, dsguard)
1194 1196 else:
1195 1197 self.ui.warn(_("no rollback information available\n"))
1196 1198 return 1
1197 1199 finally:
1198 1200 release(dsguard, lock, wlock)
1199 1201
1200 1202 @unfilteredmethod # Until we get smarter cache management
1201 1203 def _rollback(self, dryrun, force, dsguard):
1202 1204 ui = self.ui
1203 1205 try:
1204 1206 args = self.vfs.read('undo.desc').splitlines()
1205 1207 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1206 1208 if len(args) >= 3:
1207 1209 detail = args[2]
1208 1210 oldtip = oldlen - 1
1209 1211
1210 1212 if detail and ui.verbose:
1211 1213 msg = (_('repository tip rolled back to revision %s'
1212 1214 ' (undo %s: %s)\n')
1213 1215 % (oldtip, desc, detail))
1214 1216 else:
1215 1217 msg = (_('repository tip rolled back to revision %s'
1216 1218 ' (undo %s)\n')
1217 1219 % (oldtip, desc))
1218 1220 except IOError:
1219 1221 msg = _('rolling back unknown transaction\n')
1220 1222 desc = None
1221 1223
1222 1224 if not force and self['.'] != self['tip'] and desc == 'commit':
1223 1225 raise error.Abort(
1224 1226 _('rollback of last commit while not checked out '
1225 1227 'may lose data'), hint=_('use -f to force'))
1226 1228
1227 1229 ui.status(msg)
1228 1230 if dryrun:
1229 1231 return 0
1230 1232
1231 1233 parents = self.dirstate.parents()
1232 1234 self.destroying()
1233 1235 vfsmap = {'plain': self.vfs, '': self.svfs}
1234 1236 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1235 1237 if self.vfs.exists('undo.bookmarks'):
1236 1238 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1237 1239 if self.svfs.exists('undo.phaseroots'):
1238 1240 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1239 1241 self.invalidate()
1240 1242
1241 1243 parentgone = (parents[0] not in self.changelog.nodemap or
1242 1244 parents[1] not in self.changelog.nodemap)
1243 1245 if parentgone:
1244 1246 # prevent dirstateguard from overwriting already restored one
1245 1247 dsguard.close()
1246 1248
1247 1249 self.dirstate.restorebackup(None, prefix='undo.')
1248 1250 try:
1249 1251 branch = self.vfs.read('undo.branch')
1250 1252 self.dirstate.setbranch(encoding.tolocal(branch))
1251 1253 except IOError:
1252 1254 ui.warn(_('named branch could not be reset: '
1253 1255 'current branch is still \'%s\'\n')
1254 1256 % self.dirstate.branch())
1255 1257
1256 1258 parents = tuple([p.rev() for p in self[None].parents()])
1257 1259 if len(parents) > 1:
1258 1260 ui.status(_('working directory now based on '
1259 1261 'revisions %d and %d\n') % parents)
1260 1262 else:
1261 1263 ui.status(_('working directory now based on '
1262 1264 'revision %d\n') % parents)
1263 1265 mergemod.mergestate.clean(self, self['.'].node())
1264 1266
1265 1267 # TODO: if we know which new heads may result from this rollback, pass
1266 1268 # them to destroy(), which will prevent the branchhead cache from being
1267 1269 # invalidated.
1268 1270 self.destroyed()
1269 1271 return 0
1270 1272
1271 1273 def invalidatecaches(self):
1272 1274
1273 1275 if '_tagscache' in vars(self):
1274 1276 # can't use delattr on proxy
1275 1277 del self.__dict__['_tagscache']
1276 1278
1277 1279 self.unfiltered()._branchcaches.clear()
1278 1280 self.invalidatevolatilesets()
1279 1281
1280 1282 def invalidatevolatilesets(self):
1281 1283 self.filteredrevcache.clear()
1282 1284 obsolete.clearobscaches(self)
1283 1285
1284 1286 def invalidatedirstate(self):
1285 1287 '''Invalidates the dirstate, causing the next call to dirstate
1286 1288 to check if it was modified since the last time it was read,
1287 1289 rereading it if it has.
1288 1290
1289 1291 This is different to dirstate.invalidate() that it doesn't always
1290 1292 rereads the dirstate. Use dirstate.invalidate() if you want to
1291 1293 explicitly read the dirstate again (i.e. restoring it to a previous
1292 1294 known good state).'''
1293 1295 if hasunfilteredcache(self, 'dirstate'):
1294 1296 for k in self.dirstate._filecache:
1295 1297 try:
1296 1298 delattr(self.dirstate, k)
1297 1299 except AttributeError:
1298 1300 pass
1299 1301 delattr(self.unfiltered(), 'dirstate')
1300 1302
1301 1303 def invalidate(self, clearfilecache=False):
1302 1304 '''Invalidates both store and non-store parts other than dirstate
1303 1305
1304 1306 If a transaction is running, invalidation of store is omitted,
1305 1307 because discarding in-memory changes might cause inconsistency
1306 1308 (e.g. incomplete fncache causes unintentional failure, but
1307 1309 redundant one doesn't).
1308 1310 '''
1309 1311 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1310 1312 for k in self._filecache.keys():
1311 1313 # dirstate is invalidated separately in invalidatedirstate()
1312 1314 if k == 'dirstate':
1313 1315 continue
1314 1316
1315 1317 if clearfilecache:
1316 1318 del self._filecache[k]
1317 1319 try:
1318 1320 delattr(unfiltered, k)
1319 1321 except AttributeError:
1320 1322 pass
1321 1323 self.invalidatecaches()
1322 1324 if not self.currenttransaction():
1323 1325 # TODO: Changing contents of store outside transaction
1324 1326 # causes inconsistency. We should make in-memory store
1325 1327 # changes detectable, and abort if changed.
1326 1328 self.store.invalidatecaches()
1327 1329
1328 1330 def invalidateall(self):
1329 1331 '''Fully invalidates both store and non-store parts, causing the
1330 1332 subsequent operation to reread any outside changes.'''
1331 1333 # extension should hook this to invalidate its caches
1332 1334 self.invalidate()
1333 1335 self.invalidatedirstate()
1334 1336
1335 1337 @unfilteredmethod
1336 1338 def _refreshfilecachestats(self, tr):
1337 1339 """Reload stats of cached files so that they are flagged as valid"""
1338 1340 for k, ce in self._filecache.items():
1339 1341 if k == 'dirstate' or k not in self.__dict__:
1340 1342 continue
1341 1343 ce.refresh()
1342 1344
1343 1345 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1344 1346 inheritchecker=None, parentenvvar=None):
1345 1347 parentlock = None
1346 1348 # the contents of parentenvvar are used by the underlying lock to
1347 1349 # determine whether it can be inherited
1348 1350 if parentenvvar is not None:
1349 1351 parentlock = encoding.environ.get(parentenvvar)
1350 1352 try:
1351 1353 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1352 1354 acquirefn=acquirefn, desc=desc,
1353 1355 inheritchecker=inheritchecker,
1354 1356 parentlock=parentlock)
1355 1357 except error.LockHeld as inst:
1356 1358 if not wait:
1357 1359 raise
1358 1360 # show more details for new-style locks
1359 1361 if ':' in inst.locker:
1360 1362 host, pid = inst.locker.split(":", 1)
1361 1363 self.ui.warn(
1362 1364 _("waiting for lock on %s held by process %r "
1363 1365 "on host %r\n") % (desc, pid, host))
1364 1366 else:
1365 1367 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1366 1368 (desc, inst.locker))
1367 1369 # default to 600 seconds timeout
1368 1370 l = lockmod.lock(vfs, lockname,
1369 1371 int(self.ui.config("ui", "timeout", "600")),
1370 1372 releasefn=releasefn, acquirefn=acquirefn,
1371 1373 desc=desc)
1372 1374 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1373 1375 return l
1374 1376
1375 1377 def _afterlock(self, callback):
1376 1378 """add a callback to be run when the repository is fully unlocked
1377 1379
1378 1380 The callback will be executed when the outermost lock is released
1379 1381 (with wlock being higher level than 'lock')."""
1380 1382 for ref in (self._wlockref, self._lockref):
1381 1383 l = ref and ref()
1382 1384 if l and l.held:
1383 1385 l.postrelease.append(callback)
1384 1386 break
1385 1387 else: # no lock have been found.
1386 1388 callback()
1387 1389
1388 1390 def lock(self, wait=True):
1389 1391 '''Lock the repository store (.hg/store) and return a weak reference
1390 1392 to the lock. Use this before modifying the store (e.g. committing or
1391 1393 stripping). If you are opening a transaction, get a lock as well.)
1392 1394
1393 1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1394 1396 'wlock' first to avoid a dead-lock hazard.'''
1395 1397 l = self._currentlock(self._lockref)
1396 1398 if l is not None:
1397 1399 l.lock()
1398 1400 return l
1399 1401
1400 1402 l = self._lock(self.svfs, "lock", wait, None,
1401 1403 self.invalidate, _('repository %s') % self.origroot)
1402 1404 self._lockref = weakref.ref(l)
1403 1405 return l
1404 1406
1405 1407 def _wlockchecktransaction(self):
1406 1408 if self.currenttransaction() is not None:
1407 1409 raise error.LockInheritanceContractViolation(
1408 1410 'wlock cannot be inherited in the middle of a transaction')
1409 1411
1410 1412 def wlock(self, wait=True):
1411 1413 '''Lock the non-store parts of the repository (everything under
1412 1414 .hg except .hg/store) and return a weak reference to the lock.
1413 1415
1414 1416 Use this before modifying files in .hg.
1415 1417
1416 1418 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1417 1419 'wlock' first to avoid a dead-lock hazard.'''
1418 1420 l = self._wlockref and self._wlockref()
1419 1421 if l is not None and l.held:
1420 1422 l.lock()
1421 1423 return l
1422 1424
1423 1425 # We do not need to check for non-waiting lock acquisition. Such
1424 1426 # acquisition would not cause dead-lock as they would just fail.
1425 1427 if wait and (self.ui.configbool('devel', 'all-warnings')
1426 1428 or self.ui.configbool('devel', 'check-locks')):
1427 1429 if self._currentlock(self._lockref) is not None:
1428 1430 self.ui.develwarn('"wlock" acquired after "lock"')
1429 1431
1430 1432 def unlock():
1431 1433 if self.dirstate.pendingparentchange():
1432 1434 self.dirstate.invalidate()
1433 1435 else:
1434 1436 self.dirstate.write(None)
1435 1437
1436 1438 self._filecache['dirstate'].refresh()
1437 1439
1438 1440 l = self._lock(self.vfs, "wlock", wait, unlock,
1439 1441 self.invalidatedirstate, _('working directory of %s') %
1440 1442 self.origroot,
1441 1443 inheritchecker=self._wlockchecktransaction,
1442 1444 parentenvvar='HG_WLOCK_LOCKER')
1443 1445 self._wlockref = weakref.ref(l)
1444 1446 return l
1445 1447
1446 1448 def _currentlock(self, lockref):
1447 1449 """Returns the lock if it's held, or None if it's not."""
1448 1450 if lockref is None:
1449 1451 return None
1450 1452 l = lockref()
1451 1453 if l is None or not l.held:
1452 1454 return None
1453 1455 return l
1454 1456
1455 1457 def currentwlock(self):
1456 1458 """Returns the wlock if it's held, or None if it's not."""
1457 1459 return self._currentlock(self._wlockref)
1458 1460
1459 1461 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1460 1462 """
1461 1463 commit an individual file as part of a larger transaction
1462 1464 """
1463 1465
1464 1466 fname = fctx.path()
1465 1467 fparent1 = manifest1.get(fname, nullid)
1466 1468 fparent2 = manifest2.get(fname, nullid)
1467 1469 if isinstance(fctx, context.filectx):
1468 1470 node = fctx.filenode()
1469 1471 if node in [fparent1, fparent2]:
1470 1472 self.ui.debug('reusing %s filelog entry\n' % fname)
1471 1473 if manifest1.flags(fname) != fctx.flags():
1472 1474 changelist.append(fname)
1473 1475 return node
1474 1476
1475 1477 flog = self.file(fname)
1476 1478 meta = {}
1477 1479 copy = fctx.renamed()
1478 1480 if copy and copy[0] != fname:
1479 1481 # Mark the new revision of this file as a copy of another
1480 1482 # file. This copy data will effectively act as a parent
1481 1483 # of this new revision. If this is a merge, the first
1482 1484 # parent will be the nullid (meaning "look up the copy data")
1483 1485 # and the second one will be the other parent. For example:
1484 1486 #
1485 1487 # 0 --- 1 --- 3 rev1 changes file foo
1486 1488 # \ / rev2 renames foo to bar and changes it
1487 1489 # \- 2 -/ rev3 should have bar with all changes and
1488 1490 # should record that bar descends from
1489 1491 # bar in rev2 and foo in rev1
1490 1492 #
1491 1493 # this allows this merge to succeed:
1492 1494 #
1493 1495 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1494 1496 # \ / merging rev3 and rev4 should use bar@rev2
1495 1497 # \- 2 --- 4 as the merge base
1496 1498 #
1497 1499
1498 1500 cfname = copy[0]
1499 1501 crev = manifest1.get(cfname)
1500 1502 newfparent = fparent2
1501 1503
1502 1504 if manifest2: # branch merge
1503 1505 if fparent2 == nullid or crev is None: # copied on remote side
1504 1506 if cfname in manifest2:
1505 1507 crev = manifest2[cfname]
1506 1508 newfparent = fparent1
1507 1509
1508 1510 # Here, we used to search backwards through history to try to find
1509 1511 # where the file copy came from if the source of a copy was not in
1510 1512 # the parent directory. However, this doesn't actually make sense to
1511 1513 # do (what does a copy from something not in your working copy even
1512 1514 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1513 1515 # the user that copy information was dropped, so if they didn't
1514 1516 # expect this outcome it can be fixed, but this is the correct
1515 1517 # behavior in this circumstance.
1516 1518
1517 1519 if crev:
1518 1520 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1519 1521 meta["copy"] = cfname
1520 1522 meta["copyrev"] = hex(crev)
1521 1523 fparent1, fparent2 = nullid, newfparent
1522 1524 else:
1523 1525 self.ui.warn(_("warning: can't find ancestor for '%s' "
1524 1526 "copied from '%s'!\n") % (fname, cfname))
1525 1527
1526 1528 elif fparent1 == nullid:
1527 1529 fparent1, fparent2 = fparent2, nullid
1528 1530 elif fparent2 != nullid:
1529 1531 # is one parent an ancestor of the other?
1530 1532 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1531 1533 if fparent1 in fparentancestors:
1532 1534 fparent1, fparent2 = fparent2, nullid
1533 1535 elif fparent2 in fparentancestors:
1534 1536 fparent2 = nullid
1535 1537
1536 1538 # is the file changed?
1537 1539 text = fctx.data()
1538 1540 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1539 1541 changelist.append(fname)
1540 1542 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1541 1543 # are just the flags changed during merge?
1542 1544 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1543 1545 changelist.append(fname)
1544 1546
1545 1547 return fparent1
1546 1548
1547 1549 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1548 1550 """check for commit arguments that aren't committable"""
1549 1551 if match.isexact() or match.prefix():
1550 1552 matched = set(status.modified + status.added + status.removed)
1551 1553
1552 1554 for f in match.files():
1553 1555 f = self.dirstate.normalize(f)
1554 1556 if f == '.' or f in matched or f in wctx.substate:
1555 1557 continue
1556 1558 if f in status.deleted:
1557 1559 fail(f, _('file not found!'))
1558 1560 if f in vdirs: # visited directory
1559 1561 d = f + '/'
1560 1562 for mf in matched:
1561 1563 if mf.startswith(d):
1562 1564 break
1563 1565 else:
1564 1566 fail(f, _("no match under directory!"))
1565 1567 elif f not in self.dirstate:
1566 1568 fail(f, _("file not tracked!"))
1567 1569
1568 1570 @unfilteredmethod
1569 1571 def commit(self, text="", user=None, date=None, match=None, force=False,
1570 1572 editor=False, extra=None):
1571 1573 """Add a new revision to current repository.
1572 1574
1573 1575 Revision information is gathered from the working directory,
1574 1576 match can be used to filter the committed files. If editor is
1575 1577 supplied, it is called to get a commit message.
1576 1578 """
1577 1579 if extra is None:
1578 1580 extra = {}
1579 1581
1580 1582 def fail(f, msg):
1581 1583 raise error.Abort('%s: %s' % (f, msg))
1582 1584
1583 1585 if not match:
1584 1586 match = matchmod.always(self.root, '')
1585 1587
1586 1588 if not force:
1587 1589 vdirs = []
1588 1590 match.explicitdir = vdirs.append
1589 1591 match.bad = fail
1590 1592
1591 1593 wlock = lock = tr = None
1592 1594 try:
1593 1595 wlock = self.wlock()
1594 1596 lock = self.lock() # for recent changelog (see issue4368)
1595 1597
1596 1598 wctx = self[None]
1597 1599 merge = len(wctx.parents()) > 1
1598 1600
1599 1601 if not force and merge and match.ispartial():
1600 1602 raise error.Abort(_('cannot partially commit a merge '
1601 1603 '(do not specify files or patterns)'))
1602 1604
1603 1605 status = self.status(match=match, clean=force)
1604 1606 if force:
1605 1607 status.modified.extend(status.clean) # mq may commit clean files
1606 1608
1607 1609 # check subrepos
1608 1610 subs = []
1609 1611 commitsubs = set()
1610 1612 newstate = wctx.substate.copy()
1611 1613 # only manage subrepos and .hgsubstate if .hgsub is present
1612 1614 if '.hgsub' in wctx:
1613 1615 # we'll decide whether to track this ourselves, thanks
1614 1616 for c in status.modified, status.added, status.removed:
1615 1617 if '.hgsubstate' in c:
1616 1618 c.remove('.hgsubstate')
1617 1619
1618 1620 # compare current state to last committed state
1619 1621 # build new substate based on last committed state
1620 1622 oldstate = wctx.p1().substate
1621 1623 for s in sorted(newstate.keys()):
1622 1624 if not match(s):
1623 1625 # ignore working copy, use old state if present
1624 1626 if s in oldstate:
1625 1627 newstate[s] = oldstate[s]
1626 1628 continue
1627 1629 if not force:
1628 1630 raise error.Abort(
1629 1631 _("commit with new subrepo %s excluded") % s)
1630 1632 dirtyreason = wctx.sub(s).dirtyreason(True)
1631 1633 if dirtyreason:
1632 1634 if not self.ui.configbool('ui', 'commitsubrepos'):
1633 1635 raise error.Abort(dirtyreason,
1634 1636 hint=_("use --subrepos for recursive commit"))
1635 1637 subs.append(s)
1636 1638 commitsubs.add(s)
1637 1639 else:
1638 1640 bs = wctx.sub(s).basestate()
1639 1641 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1640 1642 if oldstate.get(s, (None, None, None))[1] != bs:
1641 1643 subs.append(s)
1642 1644
1643 1645 # check for removed subrepos
1644 1646 for p in wctx.parents():
1645 1647 r = [s for s in p.substate if s not in newstate]
1646 1648 subs += [s for s in r if match(s)]
1647 1649 if subs:
1648 1650 if (not match('.hgsub') and
1649 1651 '.hgsub' in (wctx.modified() + wctx.added())):
1650 1652 raise error.Abort(
1651 1653 _("can't commit subrepos without .hgsub"))
1652 1654 status.modified.insert(0, '.hgsubstate')
1653 1655
1654 1656 elif '.hgsub' in status.removed:
1655 1657 # clean up .hgsubstate when .hgsub is removed
1656 1658 if ('.hgsubstate' in wctx and
1657 1659 '.hgsubstate' not in (status.modified + status.added +
1658 1660 status.removed)):
1659 1661 status.removed.insert(0, '.hgsubstate')
1660 1662
1661 1663 # make sure all explicit patterns are matched
1662 1664 if not force:
1663 1665 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1664 1666
1665 1667 cctx = context.workingcommitctx(self, status,
1666 1668 text, user, date, extra)
1667 1669
1668 1670 # internal config: ui.allowemptycommit
1669 1671 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1670 1672 or extra.get('close') or merge or cctx.files()
1671 1673 or self.ui.configbool('ui', 'allowemptycommit'))
1672 1674 if not allowemptycommit:
1673 1675 return None
1674 1676
1675 1677 if merge and cctx.deleted():
1676 1678 raise error.Abort(_("cannot commit merge with missing files"))
1677 1679
1678 1680 ms = mergemod.mergestate.read(self)
1679 1681 mergeutil.checkunresolved(ms)
1680 1682
1681 1683 if editor:
1682 1684 cctx._text = editor(self, cctx, subs)
1683 1685 edited = (text != cctx._text)
1684 1686
1685 1687 # Save commit message in case this transaction gets rolled back
1686 1688 # (e.g. by a pretxncommit hook). Leave the content alone on
1687 1689 # the assumption that the user will use the same editor again.
1688 1690 msgfn = self.savecommitmessage(cctx._text)
1689 1691
1690 1692 # commit subs and write new state
1691 1693 if subs:
1692 1694 for s in sorted(commitsubs):
1693 1695 sub = wctx.sub(s)
1694 1696 self.ui.status(_('committing subrepository %s\n') %
1695 1697 subrepo.subrelpath(sub))
1696 1698 sr = sub.commit(cctx._text, user, date)
1697 1699 newstate[s] = (newstate[s][0], sr)
1698 1700 subrepo.writestate(self, newstate)
1699 1701
1700 1702 p1, p2 = self.dirstate.parents()
1701 1703 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1702 1704 try:
1703 1705 self.hook("precommit", throw=True, parent1=hookp1,
1704 1706 parent2=hookp2)
1705 1707 tr = self.transaction('commit')
1706 1708 ret = self.commitctx(cctx, True)
1707 1709 except: # re-raises
1708 1710 if edited:
1709 1711 self.ui.write(
1710 1712 _('note: commit message saved in %s\n') % msgfn)
1711 1713 raise
1712 1714 # update bookmarks, dirstate and mergestate
1713 1715 bookmarks.update(self, [p1, p2], ret)
1714 1716 cctx.markcommitted(ret)
1715 1717 ms.reset()
1716 1718 tr.close()
1717 1719
1718 1720 finally:
1719 1721 lockmod.release(tr, lock, wlock)
1720 1722
1721 1723 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1722 1724 # hack for command that use a temporary commit (eg: histedit)
1723 1725 # temporary commit got stripped before hook release
1724 1726 if self.changelog.hasnode(ret):
1725 1727 self.hook("commit", node=node, parent1=parent1,
1726 1728 parent2=parent2)
1727 1729 self._afterlock(commithook)
1728 1730 return ret
1729 1731
1730 1732 @unfilteredmethod
1731 1733 def commitctx(self, ctx, error=False):
1732 1734 """Add a new revision to current repository.
1733 1735 Revision information is passed via the context argument.
1734 1736 """
1735 1737
1736 1738 tr = None
1737 1739 p1, p2 = ctx.p1(), ctx.p2()
1738 1740 user = ctx.user()
1739 1741
1740 1742 lock = self.lock()
1741 1743 try:
1742 1744 tr = self.transaction("commit")
1743 1745 trp = weakref.proxy(tr)
1744 1746
1745 1747 if ctx.manifestnode():
1746 1748 # reuse an existing manifest revision
1747 1749 mn = ctx.manifestnode()
1748 1750 files = ctx.files()
1749 1751 elif ctx.files():
1750 1752 m1ctx = p1.manifestctx()
1751 1753 m2ctx = p2.manifestctx()
1752 1754 mctx = m1ctx.copy()
1753 1755
1754 1756 m = mctx.read()
1755 1757 m1 = m1ctx.read()
1756 1758 m2 = m2ctx.read()
1757 1759
1758 1760 # check in files
1759 1761 added = []
1760 1762 changed = []
1761 1763 removed = list(ctx.removed())
1762 1764 linkrev = len(self)
1763 1765 self.ui.note(_("committing files:\n"))
1764 1766 for f in sorted(ctx.modified() + ctx.added()):
1765 1767 self.ui.note(f + "\n")
1766 1768 try:
1767 1769 fctx = ctx[f]
1768 1770 if fctx is None:
1769 1771 removed.append(f)
1770 1772 else:
1771 1773 added.append(f)
1772 1774 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1773 1775 trp, changed)
1774 1776 m.setflag(f, fctx.flags())
1775 1777 except OSError as inst:
1776 1778 self.ui.warn(_("trouble committing %s!\n") % f)
1777 1779 raise
1778 1780 except IOError as inst:
1779 1781 errcode = getattr(inst, 'errno', errno.ENOENT)
1780 1782 if error or errcode and errcode != errno.ENOENT:
1781 1783 self.ui.warn(_("trouble committing %s!\n") % f)
1782 1784 raise
1783 1785
1784 1786 # update manifest
1785 1787 self.ui.note(_("committing manifest\n"))
1786 1788 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1787 1789 drop = [f for f in removed if f in m]
1788 1790 for f in drop:
1789 1791 del m[f]
1790 1792 mn = mctx.write(trp, linkrev,
1791 1793 p1.manifestnode(), p2.manifestnode(),
1792 1794 added, drop)
1793 1795 files = changed + removed
1794 1796 else:
1795 1797 mn = p1.manifestnode()
1796 1798 files = []
1797 1799
1798 1800 # update changelog
1799 1801 self.ui.note(_("committing changelog\n"))
1800 1802 self.changelog.delayupdate(tr)
1801 1803 n = self.changelog.add(mn, files, ctx.description(),
1802 1804 trp, p1.node(), p2.node(),
1803 1805 user, ctx.date(), ctx.extra().copy())
1804 1806 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1805 1807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1806 1808 parent2=xp2)
1807 1809 # set the new commit is proper phase
1808 1810 targetphase = subrepo.newcommitphase(self.ui, ctx)
1809 1811 if targetphase:
1810 1812 # retract boundary do not alter parent changeset.
1811 1813 # if a parent have higher the resulting phase will
1812 1814 # be compliant anyway
1813 1815 #
1814 1816 # if minimal phase was 0 we don't need to retract anything
1815 1817 phases.retractboundary(self, tr, targetphase, [n])
1816 1818 tr.close()
1817 1819 branchmap.updatecache(self.filtered('served'))
1818 1820 return n
1819 1821 finally:
1820 1822 if tr:
1821 1823 tr.release()
1822 1824 lock.release()
1823 1825
1824 1826 @unfilteredmethod
1825 1827 def destroying(self):
1826 1828 '''Inform the repository that nodes are about to be destroyed.
1827 1829 Intended for use by strip and rollback, so there's a common
1828 1830 place for anything that has to be done before destroying history.
1829 1831
1830 1832 This is mostly useful for saving state that is in memory and waiting
1831 1833 to be flushed when the current lock is released. Because a call to
1832 1834 destroyed is imminent, the repo will be invalidated causing those
1833 1835 changes to stay in memory (waiting for the next unlock), or vanish
1834 1836 completely.
1835 1837 '''
1836 1838 # When using the same lock to commit and strip, the phasecache is left
1837 1839 # dirty after committing. Then when we strip, the repo is invalidated,
1838 1840 # causing those changes to disappear.
1839 1841 if '_phasecache' in vars(self):
1840 1842 self._phasecache.write()
1841 1843
1842 1844 @unfilteredmethod
1843 1845 def destroyed(self):
1844 1846 '''Inform the repository that nodes have been destroyed.
1845 1847 Intended for use by strip and rollback, so there's a common
1846 1848 place for anything that has to be done after destroying history.
1847 1849 '''
1848 1850 # When one tries to:
1849 1851 # 1) destroy nodes thus calling this method (e.g. strip)
1850 1852 # 2) use phasecache somewhere (e.g. commit)
1851 1853 #
1852 1854 # then 2) will fail because the phasecache contains nodes that were
1853 1855 # removed. We can either remove phasecache from the filecache,
1854 1856 # causing it to reload next time it is accessed, or simply filter
1855 1857 # the removed nodes now and write the updated cache.
1856 1858 self._phasecache.filterunknown(self)
1857 1859 self._phasecache.write()
1858 1860
1859 1861 # update the 'served' branch cache to help read only server process
1860 1862 # Thanks to branchcache collaboration this is done from the nearest
1861 1863 # filtered subset and it is expected to be fast.
1862 1864 branchmap.updatecache(self.filtered('served'))
1863 1865
1864 1866 # Ensure the persistent tag cache is updated. Doing it now
1865 1867 # means that the tag cache only has to worry about destroyed
1866 1868 # heads immediately after a strip/rollback. That in turn
1867 1869 # guarantees that "cachetip == currenttip" (comparing both rev
1868 1870 # and node) always means no nodes have been added or destroyed.
1869 1871
1870 1872 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 1873 # head, refresh the tag cache, then immediately add a new head.
1872 1874 # But I think doing it this way is necessary for the "instant
1873 1875 # tag cache retrieval" case to work.
1874 1876 self.invalidate()
1875 1877
1876 1878 def walk(self, match, node=None):
1877 1879 '''
1878 1880 walk recursively through the directory tree or a given
1879 1881 changeset, finding all files matched by the match
1880 1882 function
1881 1883 '''
1882 1884 return self[node].walk(match)
1883 1885
1884 1886 def status(self, node1='.', node2=None, match=None,
1885 1887 ignored=False, clean=False, unknown=False,
1886 1888 listsubrepos=False):
1887 1889 '''a convenience method that calls node1.status(node2)'''
1888 1890 return self[node1].status(node2, match, ignored, clean, unknown,
1889 1891 listsubrepos)
1890 1892
1891 1893 def heads(self, start=None):
1892 1894 if start is None:
1893 1895 cl = self.changelog
1894 1896 headrevs = reversed(cl.headrevs())
1895 1897 return [cl.node(rev) for rev in headrevs]
1896 1898
1897 1899 heads = self.changelog.heads(start)
1898 1900 # sort the output in rev descending order
1899 1901 return sorted(heads, key=self.changelog.rev, reverse=True)
1900 1902
1901 1903 def branchheads(self, branch=None, start=None, closed=False):
1902 1904 '''return a (possibly filtered) list of heads for the given branch
1903 1905
1904 1906 Heads are returned in topological order, from newest to oldest.
1905 1907 If branch is None, use the dirstate branch.
1906 1908 If start is not None, return only heads reachable from start.
1907 1909 If closed is True, return heads that are marked as closed as well.
1908 1910 '''
1909 1911 if branch is None:
1910 1912 branch = self[None].branch()
1911 1913 branches = self.branchmap()
1912 1914 if branch not in branches:
1913 1915 return []
1914 1916 # the cache returns heads ordered lowest to highest
1915 1917 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1916 1918 if start is not None:
1917 1919 # filter out the heads that cannot be reached from startrev
1918 1920 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1919 1921 bheads = [h for h in bheads if h in fbheads]
1920 1922 return bheads
1921 1923
1922 1924 def branches(self, nodes):
1923 1925 if not nodes:
1924 1926 nodes = [self.changelog.tip()]
1925 1927 b = []
1926 1928 for n in nodes:
1927 1929 t = n
1928 1930 while True:
1929 1931 p = self.changelog.parents(n)
1930 1932 if p[1] != nullid or p[0] == nullid:
1931 1933 b.append((t, n, p[0], p[1]))
1932 1934 break
1933 1935 n = p[0]
1934 1936 return b
1935 1937
1936 1938 def between(self, pairs):
1937 1939 r = []
1938 1940
1939 1941 for top, bottom in pairs:
1940 1942 n, l, i = top, [], 0
1941 1943 f = 1
1942 1944
1943 1945 while n != bottom and n != nullid:
1944 1946 p = self.changelog.parents(n)[0]
1945 1947 if i == f:
1946 1948 l.append(n)
1947 1949 f = f * 2
1948 1950 n = p
1949 1951 i += 1
1950 1952
1951 1953 r.append(l)
1952 1954
1953 1955 return r
1954 1956
1955 1957 def checkpush(self, pushop):
1956 1958 """Extensions can override this function if additional checks have
1957 1959 to be performed before pushing, or call it if they override push
1958 1960 command.
1959 1961 """
1960 1962 pass
1961 1963
1962 1964 @unfilteredpropertycache
1963 1965 def prepushoutgoinghooks(self):
1964 1966 """Return util.hooks consists of a pushop with repo, remote, outgoing
1965 1967 methods, which are called before pushing changesets.
1966 1968 """
1967 1969 return util.hooks()
1968 1970
1969 1971 def pushkey(self, namespace, key, old, new):
1970 1972 try:
1971 1973 tr = self.currenttransaction()
1972 1974 hookargs = {}
1973 1975 if tr is not None:
1974 1976 hookargs.update(tr.hookargs)
1975 1977 hookargs['namespace'] = namespace
1976 1978 hookargs['key'] = key
1977 1979 hookargs['old'] = old
1978 1980 hookargs['new'] = new
1979 1981 self.hook('prepushkey', throw=True, **hookargs)
1980 1982 except error.HookAbort as exc:
1981 1983 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1982 1984 if exc.hint:
1983 1985 self.ui.write_err(_("(%s)\n") % exc.hint)
1984 1986 return False
1985 1987 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1986 1988 ret = pushkey.push(self, namespace, key, old, new)
1987 1989 def runhook():
1988 1990 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1989 1991 ret=ret)
1990 1992 self._afterlock(runhook)
1991 1993 return ret
1992 1994
1993 1995 def listkeys(self, namespace):
1994 1996 self.hook('prelistkeys', throw=True, namespace=namespace)
1995 1997 self.ui.debug('listing keys for "%s"\n' % namespace)
1996 1998 values = pushkey.list(self, namespace)
1997 1999 self.hook('listkeys', namespace=namespace, values=values)
1998 2000 return values
1999 2001
2000 2002 def debugwireargs(self, one, two, three=None, four=None, five=None):
2001 2003 '''used to test argument passing over the wire'''
2002 2004 return "%s %s %s %s %s" % (one, two, three, four, five)
2003 2005
2004 2006 def savecommitmessage(self, text):
2005 2007 fp = self.vfs('last-message.txt', 'wb')
2006 2008 try:
2007 2009 fp.write(text)
2008 2010 finally:
2009 2011 fp.close()
2010 2012 return self.pathto(fp.name[len(self.root) + 1:])
2011 2013
2012 2014 # used to avoid circular references so destructors work
2013 2015 def aftertrans(files):
2014 2016 renamefiles = [tuple(t) for t in files]
2015 2017 def a():
2016 2018 for vfs, src, dest in renamefiles:
2017 2019 try:
2018 2020 # if src and dest refer to a same file, vfs.rename is a no-op,
2019 2021 # leaving both src and dest on disk. delete dest to make sure
2020 2022 # the rename couldn't be such a no-op.
2021 2023 vfs.unlink(dest)
2022 2024 except OSError as ex:
2023 2025 if ex.errno != errno.ENOENT:
2024 2026 raise
2025 2027 try:
2026 2028 vfs.rename(src, dest)
2027 2029 except OSError: # journal file does not yet exist
2028 2030 pass
2029 2031 return a
2030 2032
2031 2033 def undoname(fn):
2032 2034 base, name = os.path.split(fn)
2033 2035 assert name.startswith('journal')
2034 2036 return os.path.join(base, name.replace('journal', 'undo', 1))
2035 2037
2036 2038 def instance(ui, path, create):
2037 2039 return localrepository(ui, util.urllocalpath(path), create)
2038 2040
2039 2041 def islocal(path):
2040 2042 return True
2041 2043
2042 2044 def newreporequirements(repo):
2043 2045 """Determine the set of requirements for a new local repository.
2044 2046
2045 2047 Extensions can wrap this function to specify custom requirements for
2046 2048 new repositories.
2047 2049 """
2048 2050 ui = repo.ui
2049 2051 requirements = set(['revlogv1'])
2050 2052 if ui.configbool('format', 'usestore', True):
2051 2053 requirements.add('store')
2052 2054 if ui.configbool('format', 'usefncache', True):
2053 2055 requirements.add('fncache')
2054 2056 if ui.configbool('format', 'dotencode', True):
2055 2057 requirements.add('dotencode')
2056 2058
2057 2059 compengine = ui.config('experimental', 'format.compression', 'zlib')
2058 2060 if compengine not in util.compengines:
2059 2061 raise error.Abort(_('compression engine %s defined by '
2060 2062 'experimental.format.compression not available') %
2061 2063 compengine,
2062 2064 hint=_('run "hg debuginstall" to list available '
2063 2065 'compression engines'))
2064 2066
2065 2067 # zlib is the historical default and doesn't need an explicit requirement.
2066 2068 if compengine != 'zlib':
2067 2069 requirements.add('exp-compression-%s' % compengine)
2068 2070
2069 2071 if scmutil.gdinitconfig(ui):
2070 2072 requirements.add('generaldelta')
2071 2073 if ui.configbool('experimental', 'treemanifest', False):
2072 2074 requirements.add('treemanifest')
2073 2075 if ui.configbool('experimental', 'manifestv2', False):
2074 2076 requirements.add('manifestv2')
2075 2077
2076 2078 return requirements
General Comments 0
You need to be logged in to leave comments. Login now