##// END OF EJS Templates
localrepo: ensure transaction id is fully bytes on py3
Augie Fackler -
r31508:590319c0 default
parent child Browse files
Show More
@@ -1,2083 +1,2087 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 pycompat,
52 53 repoview,
53 54 revset,
54 55 revsetlang,
55 56 scmutil,
56 57 store,
57 58 subrepo,
58 59 tags as tagsmod,
59 60 transaction,
60 61 txnutil,
61 62 util,
62 63 vfs as vfsmod,
63 64 )
64 65
65 66 release = lockmod.release
66 67 urlerr = util.urlerr
67 68 urlreq = util.urlreq
68 69
69 70 class repofilecache(scmutil.filecache):
70 71 """All filecache usage on repo are done for logic that should be unfiltered
71 72 """
72 73
73 74 def join(self, obj, fname):
74 75 return obj.vfs.join(fname)
75 76 def __get__(self, repo, type=None):
76 77 if repo is None:
77 78 return self
78 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 80 def __set__(self, repo, value):
80 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 82 def __delete__(self, repo):
82 83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 84
84 85 class storecache(repofilecache):
85 86 """filecache for files in the store"""
86 87 def join(self, obj, fname):
87 88 return obj.sjoin(fname)
88 89
89 90 class unfilteredpropertycache(util.propertycache):
90 91 """propertycache that apply to unfiltered repo only"""
91 92
92 93 def __get__(self, repo, type=None):
93 94 unfi = repo.unfiltered()
94 95 if unfi is repo:
95 96 return super(unfilteredpropertycache, self).__get__(unfi)
96 97 return getattr(unfi, self.name)
97 98
98 99 class filteredpropertycache(util.propertycache):
99 100 """propertycache that must take filtering in account"""
100 101
101 102 def cachevalue(self, obj, value):
102 103 object.__setattr__(obj, self.name, value)
103 104
104 105
105 106 def hasunfilteredcache(repo, name):
106 107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 108 return name in vars(repo.unfiltered())
108 109
109 110 def unfilteredmethod(orig):
110 111 """decorate method that always need to be run on unfiltered version"""
111 112 def wrapper(repo, *args, **kwargs):
112 113 return orig(repo.unfiltered(), *args, **kwargs)
113 114 return wrapper
114 115
115 116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 117 'unbundle'))
117 118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 119
119 120 class localpeer(peer.peerrepository):
120 121 '''peer for a local repo; reflects only the most recent API'''
121 122
122 123 def __init__(self, repo, caps=None):
123 124 if caps is None:
124 125 caps = moderncaps.copy()
125 126 peer.peerrepository.__init__(self)
126 127 self._repo = repo.filtered('served')
127 128 self.ui = repo.ui
128 129 self._caps = repo._restrictcapabilities(caps)
129 130 self.requirements = repo.requirements
130 131 self.supportedformats = repo.supportedformats
131 132
132 133 def close(self):
133 134 self._repo.close()
134 135
135 136 def _capabilities(self):
136 137 return self._caps
137 138
138 139 def local(self):
139 140 return self._repo
140 141
141 142 def canpush(self):
142 143 return True
143 144
144 145 def url(self):
145 146 return self._repo.url()
146 147
147 148 def lookup(self, key):
148 149 return self._repo.lookup(key)
149 150
150 151 def branchmap(self):
151 152 return self._repo.branchmap()
152 153
153 154 def heads(self):
154 155 return self._repo.heads()
155 156
156 157 def known(self, nodes):
157 158 return self._repo.known(nodes)
158 159
159 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 161 **kwargs):
161 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 163 common=common, bundlecaps=bundlecaps,
163 164 **kwargs)
164 165 cb = util.chunkbuffer(chunks)
165 166
166 167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 168 # When requesting a bundle2, getbundle returns a stream to make the
168 169 # wire level function happier. We need to build a proper object
169 170 # from it in local peer.
170 171 return bundle2.getunbundler(self.ui, cb)
171 172 else:
172 173 return changegroup.getunbundler('01', cb, None)
173 174
174 175 # TODO We might want to move the next two calls into legacypeer and add
175 176 # unbundle instead.
176 177
177 178 def unbundle(self, cg, heads, url):
178 179 """apply a bundle on a repo
179 180
180 181 This function handles the repo locking itself."""
181 182 try:
182 183 try:
183 184 cg = exchange.readbundle(self.ui, cg, None)
184 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 186 if util.safehasattr(ret, 'getchunks'):
186 187 # This is a bundle20 object, turn it into an unbundler.
187 188 # This little dance should be dropped eventually when the
188 189 # API is finally improved.
189 190 stream = util.chunkbuffer(ret.getchunks())
190 191 ret = bundle2.getunbundler(self.ui, stream)
191 192 return ret
192 193 except Exception as exc:
193 194 # If the exception contains output salvaged from a bundle2
194 195 # reply, we need to make sure it is printed before continuing
195 196 # to fail. So we build a bundle2 with such output and consume
196 197 # it directly.
197 198 #
198 199 # This is not very elegant but allows a "simple" solution for
199 200 # issue4594
200 201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 202 if output:
202 203 bundler = bundle2.bundle20(self._repo.ui)
203 204 for out in output:
204 205 bundler.addpart(out)
205 206 stream = util.chunkbuffer(bundler.getchunks())
206 207 b = bundle2.getunbundler(self.ui, stream)
207 208 bundle2.processbundle(self._repo, b)
208 209 raise
209 210 except error.PushRaced as exc:
210 211 raise error.ResponseError(_('push failed:'), str(exc))
211 212
212 213 def lock(self):
213 214 return self._repo.lock()
214 215
215 216 def addchangegroup(self, cg, source, url):
216 217 return cg.apply(self._repo, source, url)
217 218
218 219 def pushkey(self, namespace, key, old, new):
219 220 return self._repo.pushkey(namespace, key, old, new)
220 221
221 222 def listkeys(self, namespace):
222 223 return self._repo.listkeys(namespace)
223 224
224 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 226 '''used to test argument passing over the wire'''
226 227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 228
228 229 class locallegacypeer(localpeer):
229 230 '''peer extension which implements legacy methods too; used for tests with
230 231 restricted capabilities'''
231 232
232 233 def __init__(self, repo):
233 234 localpeer.__init__(self, repo, caps=legacycaps)
234 235
235 236 def branches(self, nodes):
236 237 return self._repo.branches(nodes)
237 238
238 239 def between(self, pairs):
239 240 return self._repo.between(pairs)
240 241
241 242 def changegroup(self, basenodes, source):
242 243 return changegroup.changegroup(self._repo, basenodes, source)
243 244
244 245 def changegroupsubset(self, bases, heads, source):
245 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 247
247 248 class localrepository(object):
248 249
249 250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 251 'manifestv2'))
251 252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 253 'relshared', 'dotencode'))
253 254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 255 filtername = None
255 256
256 257 # a list of (ui, featureset) functions.
257 258 # only functions defined in module of enabled extensions are invoked
258 259 featuresetupfuncs = set()
259 260
260 261 def __init__(self, baseui, path, create=False):
261 262 self.requirements = set()
262 263 # vfs to access the working copy
263 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 265 # vfs to access the content of the repository
265 266 self.vfs = None
266 267 # vfs to access the store part of the repository
267 268 self.svfs = None
268 269 self.root = self.wvfs.base
269 270 self.path = self.wvfs.join(".hg")
270 271 self.origroot = path
271 272 self.auditor = pathutil.pathauditor(self.root, self._checknested)
272 273 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
273 274 realfs=False)
274 275 self.vfs = vfsmod.vfs(self.path)
275 276 self.baseui = baseui
276 277 self.ui = baseui.copy()
277 278 self.ui.copy = baseui.copy # prevent copying repo configuration
278 279 # A list of callback to shape the phase if no data were found.
279 280 # Callback are in the form: func(repo, roots) --> processed root.
280 281 # This list it to be filled by extension during repo setup
281 282 self._phasedefaults = []
282 283 try:
283 284 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
284 285 self._loadextensions()
285 286 except IOError:
286 287 pass
287 288
288 289 if self.featuresetupfuncs:
289 290 self.supported = set(self._basesupported) # use private copy
290 291 extmods = set(m.__name__ for n, m
291 292 in extensions.extensions(self.ui))
292 293 for setupfunc in self.featuresetupfuncs:
293 294 if setupfunc.__module__ in extmods:
294 295 setupfunc(self.ui, self.supported)
295 296 else:
296 297 self.supported = self._basesupported
297 298 color.setup(self.ui)
298 299
299 300 # Add compression engines.
300 301 for name in util.compengines:
301 302 engine = util.compengines[name]
302 303 if engine.revlogheader():
303 304 self.supported.add('exp-compression-%s' % name)
304 305
305 306 if not self.vfs.isdir():
306 307 if create:
307 308 self.requirements = newreporequirements(self)
308 309
309 310 if not self.wvfs.exists():
310 311 self.wvfs.makedirs()
311 312 self.vfs.makedir(notindexed=True)
312 313
313 314 if 'store' in self.requirements:
314 315 self.vfs.mkdir("store")
315 316
316 317 # create an invalid changelog
317 318 self.vfs.append(
318 319 "00changelog.i",
319 320 '\0\0\0\2' # represents revlogv2
320 321 ' dummy changelog to prevent using the old repo layout'
321 322 )
322 323 else:
323 324 raise error.RepoError(_("repository %s not found") % path)
324 325 elif create:
325 326 raise error.RepoError(_("repository %s already exists") % path)
326 327 else:
327 328 try:
328 329 self.requirements = scmutil.readrequires(
329 330 self.vfs, self.supported)
330 331 except IOError as inst:
331 332 if inst.errno != errno.ENOENT:
332 333 raise
333 334
334 335 self.sharedpath = self.path
335 336 try:
336 337 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
337 338 if 'relshared' in self.requirements:
338 339 sharedpath = self.vfs.join(sharedpath)
339 340 vfs = vfsmod.vfs(sharedpath, realpath=True)
340 341 s = vfs.base
341 342 if not vfs.exists():
342 343 raise error.RepoError(
343 344 _('.hg/sharedpath points to nonexistent directory %s') % s)
344 345 self.sharedpath = s
345 346 except IOError as inst:
346 347 if inst.errno != errno.ENOENT:
347 348 raise
348 349
349 350 self.store = store.store(
350 351 self.requirements, self.sharedpath, vfsmod.vfs)
351 352 self.spath = self.store.path
352 353 self.svfs = self.store.vfs
353 354 self.sjoin = self.store.join
354 355 self.vfs.createmode = self.store.createmode
355 356 self._applyopenerreqs()
356 357 if create:
357 358 self._writerequirements()
358 359
359 360 self._dirstatevalidatewarned = False
360 361
361 362 self._branchcaches = {}
362 363 self._revbranchcache = None
363 364 self.filterpats = {}
364 365 self._datafilters = {}
365 366 self._transref = self._lockref = self._wlockref = None
366 367
367 368 # A cache for various files under .hg/ that tracks file changes,
368 369 # (used by the filecache decorator)
369 370 #
370 371 # Maps a property name to its util.filecacheentry
371 372 self._filecache = {}
372 373
373 374 # hold sets of revision to be filtered
374 375 # should be cleared when something might have changed the filter value:
375 376 # - new changesets,
376 377 # - phase change,
377 378 # - new obsolescence marker,
378 379 # - working directory parent change,
379 380 # - bookmark changes
380 381 self.filteredrevcache = {}
381 382
382 383 # generic mapping between names and nodes
383 384 self.names = namespaces.namespaces()
384 385
385 386 @property
386 387 def wopener(self):
387 388 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
388 389 return self.wvfs
389 390
390 391 @property
391 392 def opener(self):
392 393 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
393 394 return self.vfs
394 395
395 396 def close(self):
396 397 self._writecaches()
397 398
398 399 def _loadextensions(self):
399 400 extensions.loadall(self.ui)
400 401
401 402 def _writecaches(self):
402 403 if self._revbranchcache:
403 404 self._revbranchcache.write()
404 405
405 406 def _restrictcapabilities(self, caps):
406 407 if self.ui.configbool('experimental', 'bundle2-advertise', True):
407 408 caps = set(caps)
408 409 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
409 410 caps.add('bundle2=' + urlreq.quote(capsblob))
410 411 return caps
411 412
412 413 def _applyopenerreqs(self):
413 414 self.svfs.options = dict((r, 1) for r in self.requirements
414 415 if r in self.openerreqs)
415 416 # experimental config: format.chunkcachesize
416 417 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
417 418 if chunkcachesize is not None:
418 419 self.svfs.options['chunkcachesize'] = chunkcachesize
419 420 # experimental config: format.maxchainlen
420 421 maxchainlen = self.ui.configint('format', 'maxchainlen')
421 422 if maxchainlen is not None:
422 423 self.svfs.options['maxchainlen'] = maxchainlen
423 424 # experimental config: format.manifestcachesize
424 425 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
425 426 if manifestcachesize is not None:
426 427 self.svfs.options['manifestcachesize'] = manifestcachesize
427 428 # experimental config: format.aggressivemergedeltas
428 429 aggressivemergedeltas = self.ui.configbool('format',
429 430 'aggressivemergedeltas', False)
430 431 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
431 432 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
432 433
433 434 for r in self.requirements:
434 435 if r.startswith('exp-compression-'):
435 436 self.svfs.options['compengine'] = r[len('exp-compression-'):]
436 437
437 438 def _writerequirements(self):
438 439 scmutil.writerequires(self.vfs, self.requirements)
439 440
440 441 def _checknested(self, path):
441 442 """Determine if path is a legal nested repository."""
442 443 if not path.startswith(self.root):
443 444 return False
444 445 subpath = path[len(self.root) + 1:]
445 446 normsubpath = util.pconvert(subpath)
446 447
447 448 # XXX: Checking against the current working copy is wrong in
448 449 # the sense that it can reject things like
449 450 #
450 451 # $ hg cat -r 10 sub/x.txt
451 452 #
452 453 # if sub/ is no longer a subrepository in the working copy
453 454 # parent revision.
454 455 #
455 456 # However, it can of course also allow things that would have
456 457 # been rejected before, such as the above cat command if sub/
457 458 # is a subrepository now, but was a normal directory before.
458 459 # The old path auditor would have rejected by mistake since it
459 460 # panics when it sees sub/.hg/.
460 461 #
461 462 # All in all, checking against the working copy seems sensible
462 463 # since we want to prevent access to nested repositories on
463 464 # the filesystem *now*.
464 465 ctx = self[None]
465 466 parts = util.splitpath(subpath)
466 467 while parts:
467 468 prefix = '/'.join(parts)
468 469 if prefix in ctx.substate:
469 470 if prefix == normsubpath:
470 471 return True
471 472 else:
472 473 sub = ctx.sub(prefix)
473 474 return sub.checknested(subpath[len(prefix) + 1:])
474 475 else:
475 476 parts.pop()
476 477 return False
477 478
478 479 def peer(self):
479 480 return localpeer(self) # not cached to avoid reference cycle
480 481
481 482 def unfiltered(self):
482 483 """Return unfiltered version of the repository
483 484
484 485 Intended to be overwritten by filtered repo."""
485 486 return self
486 487
487 488 def filtered(self, name):
488 489 """Return a filtered version of a repository"""
489 490 # build a new class with the mixin and the current class
490 491 # (possibly subclass of the repo)
491 492 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
492 493 pass
493 494 return filteredrepo(self, name)
494 495
495 496 @repofilecache('bookmarks', 'bookmarks.current')
496 497 def _bookmarks(self):
497 498 return bookmarks.bmstore(self)
498 499
499 500 @property
500 501 def _activebookmark(self):
501 502 return self._bookmarks.active
502 503
503 504 def bookmarkheads(self, bookmark):
504 505 name = bookmark.split('@', 1)[0]
505 506 heads = []
506 507 for mark, n in self._bookmarks.iteritems():
507 508 if mark.split('@', 1)[0] == name:
508 509 heads.append(n)
509 510 return heads
510 511
511 512 # _phaserevs and _phasesets depend on changelog. what we need is to
512 513 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
513 514 # can't be easily expressed in filecache mechanism.
514 515 @storecache('phaseroots', '00changelog.i')
515 516 def _phasecache(self):
516 517 return phases.phasecache(self, self._phasedefaults)
517 518
518 519 @storecache('obsstore')
519 520 def obsstore(self):
520 521 # read default format for new obsstore.
521 522 # developer config: format.obsstore-version
522 523 defaultformat = self.ui.configint('format', 'obsstore-version', None)
523 524 # rely on obsstore class default when possible.
524 525 kwargs = {}
525 526 if defaultformat is not None:
526 527 kwargs['defaultformat'] = defaultformat
527 528 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
528 529 store = obsolete.obsstore(self.svfs, readonly=readonly,
529 530 **kwargs)
530 531 if store and readonly:
531 532 self.ui.warn(
532 533 _('obsolete feature not enabled but %i markers found!\n')
533 534 % len(list(store)))
534 535 return store
535 536
536 537 @storecache('00changelog.i')
537 538 def changelog(self):
538 539 c = changelog.changelog(self.svfs)
539 540 if txnutil.mayhavepending(self.root):
540 541 c.readpending('00changelog.i.a')
541 542 return c
542 543
543 544 def _constructmanifest(self):
544 545 # This is a temporary function while we migrate from manifest to
545 546 # manifestlog. It allows bundlerepo and unionrepo to intercept the
546 547 # manifest creation.
547 548 return manifest.manifestrevlog(self.svfs)
548 549
549 550 @storecache('00manifest.i')
550 551 def manifestlog(self):
551 552 return manifest.manifestlog(self.svfs, self)
552 553
553 554 @repofilecache('dirstate')
554 555 def dirstate(self):
555 556 return dirstate.dirstate(self.vfs, self.ui, self.root,
556 557 self._dirstatevalidate)
557 558
558 559 def _dirstatevalidate(self, node):
559 560 try:
560 561 self.changelog.rev(node)
561 562 return node
562 563 except error.LookupError:
563 564 if not self._dirstatevalidatewarned:
564 565 self._dirstatevalidatewarned = True
565 566 self.ui.warn(_("warning: ignoring unknown"
566 567 " working parent %s!\n") % short(node))
567 568 return nullid
568 569
569 570 def __getitem__(self, changeid):
570 571 if changeid is None or changeid == wdirrev:
571 572 return context.workingctx(self)
572 573 if isinstance(changeid, slice):
573 574 return [context.changectx(self, i)
574 575 for i in xrange(*changeid.indices(len(self)))
575 576 if i not in self.changelog.filteredrevs]
576 577 return context.changectx(self, changeid)
577 578
578 579 def __contains__(self, changeid):
579 580 try:
580 581 self[changeid]
581 582 return True
582 583 except error.RepoLookupError:
583 584 return False
584 585
585 586 def __nonzero__(self):
586 587 return True
587 588
588 589 __bool__ = __nonzero__
589 590
590 591 def __len__(self):
591 592 return len(self.changelog)
592 593
593 594 def __iter__(self):
594 595 return iter(self.changelog)
595 596
596 597 def revs(self, expr, *args):
597 598 '''Find revisions matching a revset.
598 599
599 600 The revset is specified as a string ``expr`` that may contain
600 601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
601 602
602 603 Revset aliases from the configuration are not expanded. To expand
603 604 user aliases, consider calling ``scmutil.revrange()`` or
604 605 ``repo.anyrevs([expr], user=True)``.
605 606
606 607 Returns a revset.abstractsmartset, which is a list-like interface
607 608 that contains integer revisions.
608 609 '''
609 610 expr = revsetlang.formatspec(expr, *args)
610 611 m = revset.match(None, expr)
611 612 return m(self)
612 613
613 614 def set(self, expr, *args):
614 615 '''Find revisions matching a revset and emit changectx instances.
615 616
616 617 This is a convenience wrapper around ``revs()`` that iterates the
617 618 result and is a generator of changectx instances.
618 619
619 620 Revset aliases from the configuration are not expanded. To expand
620 621 user aliases, consider calling ``scmutil.revrange()``.
621 622 '''
622 623 for r in self.revs(expr, *args):
623 624 yield self[r]
624 625
625 626 def anyrevs(self, specs, user=False):
626 627 '''Find revisions matching one of the given revsets.
627 628
628 629 Revset aliases from the configuration are not expanded by default. To
629 630 expand user aliases, specify ``user=True``.
630 631 '''
631 632 if user:
632 633 m = revset.matchany(self.ui, specs, repo=self)
633 634 else:
634 635 m = revset.matchany(None, specs)
635 636 return m(self)
636 637
637 638 def url(self):
638 639 return 'file:' + self.root
639 640
640 641 def hook(self, name, throw=False, **args):
641 642 """Call a hook, passing this repo instance.
642 643
643 644 This a convenience method to aid invoking hooks. Extensions likely
644 645 won't call this unless they have registered a custom hook or are
645 646 replacing code that is expected to call a hook.
646 647 """
647 648 return hook.hook(self.ui, self, name, throw, **args)
648 649
649 650 @unfilteredmethod
650 651 def _tag(self, names, node, message, local, user, date, extra=None,
651 652 editor=False):
652 653 if isinstance(names, str):
653 654 names = (names,)
654 655
655 656 branches = self.branchmap()
656 657 for name in names:
657 658 self.hook('pretag', throw=True, node=hex(node), tag=name,
658 659 local=local)
659 660 if name in branches:
660 661 self.ui.warn(_("warning: tag %s conflicts with existing"
661 662 " branch name\n") % name)
662 663
663 664 def writetags(fp, names, munge, prevtags):
664 665 fp.seek(0, 2)
665 666 if prevtags and prevtags[-1] != '\n':
666 667 fp.write('\n')
667 668 for name in names:
668 669 if munge:
669 670 m = munge(name)
670 671 else:
671 672 m = name
672 673
673 674 if (self._tagscache.tagtypes and
674 675 name in self._tagscache.tagtypes):
675 676 old = self.tags().get(name, nullid)
676 677 fp.write('%s %s\n' % (hex(old), m))
677 678 fp.write('%s %s\n' % (hex(node), m))
678 679 fp.close()
679 680
680 681 prevtags = ''
681 682 if local:
682 683 try:
683 684 fp = self.vfs('localtags', 'r+')
684 685 except IOError:
685 686 fp = self.vfs('localtags', 'a')
686 687 else:
687 688 prevtags = fp.read()
688 689
689 690 # local tags are stored in the current charset
690 691 writetags(fp, names, None, prevtags)
691 692 for name in names:
692 693 self.hook('tag', node=hex(node), tag=name, local=local)
693 694 return
694 695
695 696 try:
696 697 fp = self.wvfs('.hgtags', 'rb+')
697 698 except IOError as e:
698 699 if e.errno != errno.ENOENT:
699 700 raise
700 701 fp = self.wvfs('.hgtags', 'ab')
701 702 else:
702 703 prevtags = fp.read()
703 704
704 705 # committed tags are stored in UTF-8
705 706 writetags(fp, names, encoding.fromlocal, prevtags)
706 707
707 708 fp.close()
708 709
709 710 self.invalidatecaches()
710 711
711 712 if '.hgtags' not in self.dirstate:
712 713 self[None].add(['.hgtags'])
713 714
714 715 m = matchmod.exact(self.root, '', ['.hgtags'])
715 716 tagnode = self.commit(message, user, date, extra=extra, match=m,
716 717 editor=editor)
717 718
718 719 for name in names:
719 720 self.hook('tag', node=hex(node), tag=name, local=local)
720 721
721 722 return tagnode
722 723
723 724 def tag(self, names, node, message, local, user, date, editor=False):
724 725 '''tag a revision with one or more symbolic names.
725 726
726 727 names is a list of strings or, when adding a single tag, names may be a
727 728 string.
728 729
729 730 if local is True, the tags are stored in a per-repository file.
730 731 otherwise, they are stored in the .hgtags file, and a new
731 732 changeset is committed with the change.
732 733
733 734 keyword arguments:
734 735
735 736 local: whether to store tags in non-version-controlled file
736 737 (default False)
737 738
738 739 message: commit message to use if committing
739 740
740 741 user: name of user to use if committing
741 742
742 743 date: date tuple to use if committing'''
743 744
744 745 if not local:
745 746 m = matchmod.exact(self.root, '', ['.hgtags'])
746 747 if any(self.status(match=m, unknown=True, ignored=True)):
747 748 raise error.Abort(_('working copy of .hgtags is changed'),
748 749 hint=_('please commit .hgtags manually'))
749 750
750 751 self.tags() # instantiate the cache
751 752 self._tag(names, node, message, local, user, date, editor=editor)
752 753
753 754 @filteredpropertycache
754 755 def _tagscache(self):
755 756 '''Returns a tagscache object that contains various tags related
756 757 caches.'''
757 758
758 759 # This simplifies its cache management by having one decorated
759 760 # function (this one) and the rest simply fetch things from it.
760 761 class tagscache(object):
761 762 def __init__(self):
762 763 # These two define the set of tags for this repository. tags
763 764 # maps tag name to node; tagtypes maps tag name to 'global' or
764 765 # 'local'. (Global tags are defined by .hgtags across all
765 766 # heads, and local tags are defined in .hg/localtags.)
766 767 # They constitute the in-memory cache of tags.
767 768 self.tags = self.tagtypes = None
768 769
769 770 self.nodetagscache = self.tagslist = None
770 771
771 772 cache = tagscache()
772 773 cache.tags, cache.tagtypes = self._findtags()
773 774
774 775 return cache
775 776
776 777 def tags(self):
777 778 '''return a mapping of tag to node'''
778 779 t = {}
779 780 if self.changelog.filteredrevs:
780 781 tags, tt = self._findtags()
781 782 else:
782 783 tags = self._tagscache.tags
783 784 for k, v in tags.iteritems():
784 785 try:
785 786 # ignore tags to unknown nodes
786 787 self.changelog.rev(v)
787 788 t[k] = v
788 789 except (error.LookupError, ValueError):
789 790 pass
790 791 return t
791 792
792 793 def _findtags(self):
793 794 '''Do the hard work of finding tags. Return a pair of dicts
794 795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
795 796 maps tag name to a string like \'global\' or \'local\'.
796 797 Subclasses or extensions are free to add their own tags, but
797 798 should be aware that the returned dicts will be retained for the
798 799 duration of the localrepo object.'''
799 800
800 801 # XXX what tagtype should subclasses/extensions use? Currently
801 802 # mq and bookmarks add tags, but do not set the tagtype at all.
802 803 # Should each extension invent its own tag type? Should there
803 804 # be one tagtype for all such "virtual" tags? Or is the status
804 805 # quo fine?
805 806
806 807 alltags = {} # map tag name to (node, hist)
807 808 tagtypes = {}
808 809
809 810 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
810 811 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
811 812
812 813 # Build the return dicts. Have to re-encode tag names because
813 814 # the tags module always uses UTF-8 (in order not to lose info
814 815 # writing to the cache), but the rest of Mercurial wants them in
815 816 # local encoding.
816 817 tags = {}
817 818 for (name, (node, hist)) in alltags.iteritems():
818 819 if node != nullid:
819 820 tags[encoding.tolocal(name)] = node
820 821 tags['tip'] = self.changelog.tip()
821 822 tagtypes = dict([(encoding.tolocal(name), value)
822 823 for (name, value) in tagtypes.iteritems()])
823 824 return (tags, tagtypes)
824 825
825 826 def tagtype(self, tagname):
826 827 '''
827 828 return the type of the given tag. result can be:
828 829
829 830 'local' : a local tag
830 831 'global' : a global tag
831 832 None : tag does not exist
832 833 '''
833 834
834 835 return self._tagscache.tagtypes.get(tagname)
835 836
836 837 def tagslist(self):
837 838 '''return a list of tags ordered by revision'''
838 839 if not self._tagscache.tagslist:
839 840 l = []
840 841 for t, n in self.tags().iteritems():
841 842 l.append((self.changelog.rev(n), t, n))
842 843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
843 844
844 845 return self._tagscache.tagslist
845 846
846 847 def nodetags(self, node):
847 848 '''return the tags associated with a node'''
848 849 if not self._tagscache.nodetagscache:
849 850 nodetagscache = {}
850 851 for t, n in self._tagscache.tags.iteritems():
851 852 nodetagscache.setdefault(n, []).append(t)
852 853 for tags in nodetagscache.itervalues():
853 854 tags.sort()
854 855 self._tagscache.nodetagscache = nodetagscache
855 856 return self._tagscache.nodetagscache.get(node, [])
856 857
857 858 def nodebookmarks(self, node):
858 859 """return the list of bookmarks pointing to the specified node"""
859 860 marks = []
860 861 for bookmark, n in self._bookmarks.iteritems():
861 862 if n == node:
862 863 marks.append(bookmark)
863 864 return sorted(marks)
864 865
865 866 def branchmap(self):
866 867 '''returns a dictionary {branch: [branchheads]} with branchheads
867 868 ordered by increasing revision number'''
868 869 branchmap.updatecache(self)
869 870 return self._branchcaches[self.filtername]
870 871
871 872 @unfilteredmethod
872 873 def revbranchcache(self):
873 874 if not self._revbranchcache:
874 875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
875 876 return self._revbranchcache
876 877
877 878 def branchtip(self, branch, ignoremissing=False):
878 879 '''return the tip node for a given branch
879 880
880 881 If ignoremissing is True, then this method will not raise an error.
881 882 This is helpful for callers that only expect None for a missing branch
882 883 (e.g. namespace).
883 884
884 885 '''
885 886 try:
886 887 return self.branchmap().branchtip(branch)
887 888 except KeyError:
888 889 if not ignoremissing:
889 890 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
890 891 else:
891 892 pass
892 893
893 894 def lookup(self, key):
894 895 return self[key].node()
895 896
896 897 def lookupbranch(self, key, remote=None):
897 898 repo = remote or self
898 899 if key in repo.branchmap():
899 900 return key
900 901
901 902 repo = (remote and remote.local()) and remote or self
902 903 return repo[key].branch()
903 904
904 905 def known(self, nodes):
905 906 cl = self.changelog
906 907 nm = cl.nodemap
907 908 filtered = cl.filteredrevs
908 909 result = []
909 910 for n in nodes:
910 911 r = nm.get(n)
911 912 resp = not (r is None or r in filtered)
912 913 result.append(resp)
913 914 return result
914 915
915 916 def local(self):
916 917 return self
917 918
918 919 def publishing(self):
919 920 # it's safe (and desirable) to trust the publish flag unconditionally
920 921 # so that we don't finalize changes shared between users via ssh or nfs
921 922 return self.ui.configbool('phases', 'publish', True, untrusted=True)
922 923
923 924 def cancopy(self):
924 925 # so statichttprepo's override of local() works
925 926 if not self.local():
926 927 return False
927 928 if not self.publishing():
928 929 return True
929 930 # if publishing we can't copy if there is filtered content
930 931 return not self.filtered('visible').changelog.filteredrevs
931 932
932 933 def shared(self):
933 934 '''the type of shared repository (None if not shared)'''
934 935 if self.sharedpath != self.path:
935 936 return 'store'
936 937 return None
937 938
938 939 def join(self, f, *insidef):
939 940 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
940 941 return self.vfs.join(os.path.join(f, *insidef))
941 942
942 943 def wjoin(self, f, *insidef):
943 944 return self.vfs.reljoin(self.root, f, *insidef)
944 945
945 946 def file(self, f):
946 947 if f[0] == '/':
947 948 f = f[1:]
948 949 return filelog.filelog(self.svfs, f)
949 950
950 951 def changectx(self, changeid):
951 952 return self[changeid]
952 953
953 954 def setparents(self, p1, p2=nullid):
954 955 self.dirstate.beginparentchange()
955 956 copies = self.dirstate.setparents(p1, p2)
956 957 pctx = self[p1]
957 958 if copies:
958 959 # Adjust copy records, the dirstate cannot do it, it
959 960 # requires access to parents manifests. Preserve them
960 961 # only for entries added to first parent.
961 962 for f in copies:
962 963 if f not in pctx and copies[f] in pctx:
963 964 self.dirstate.copy(copies[f], f)
964 965 if p2 == nullid:
965 966 for f, s in sorted(self.dirstate.copies().items()):
966 967 if f not in pctx and s not in pctx:
967 968 self.dirstate.copy(None, f)
968 969 self.dirstate.endparentchange()
969 970
970 971 def filectx(self, path, changeid=None, fileid=None):
971 972 """changeid can be a changeset revision, node, or tag.
972 973 fileid can be a file revision or node."""
973 974 return context.filectx(self, path, changeid, fileid)
974 975
975 976 def getcwd(self):
976 977 return self.dirstate.getcwd()
977 978
978 979 def pathto(self, f, cwd=None):
979 980 return self.dirstate.pathto(f, cwd)
980 981
981 982 def wfile(self, f, mode='r'):
982 983 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
983 984 return self.wvfs(f, mode)
984 985
985 986 def _link(self, f):
986 987 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
987 988 '4.0')
988 989 return self.wvfs.islink(f)
989 990
990 991 def _loadfilter(self, filter):
991 992 if filter not in self.filterpats:
992 993 l = []
993 994 for pat, cmd in self.ui.configitems(filter):
994 995 if cmd == '!':
995 996 continue
996 997 mf = matchmod.match(self.root, '', [pat])
997 998 fn = None
998 999 params = cmd
999 1000 for name, filterfn in self._datafilters.iteritems():
1000 1001 if cmd.startswith(name):
1001 1002 fn = filterfn
1002 1003 params = cmd[len(name):].lstrip()
1003 1004 break
1004 1005 if not fn:
1005 1006 fn = lambda s, c, **kwargs: util.filter(s, c)
1006 1007 # Wrap old filters not supporting keyword arguments
1007 1008 if not inspect.getargspec(fn)[2]:
1008 1009 oldfn = fn
1009 1010 fn = lambda s, c, **kwargs: oldfn(s, c)
1010 1011 l.append((mf, fn, params))
1011 1012 self.filterpats[filter] = l
1012 1013 return self.filterpats[filter]
1013 1014
1014 1015 def _filter(self, filterpats, filename, data):
1015 1016 for mf, fn, cmd in filterpats:
1016 1017 if mf(filename):
1017 1018 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1018 1019 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1019 1020 break
1020 1021
1021 1022 return data
1022 1023
1023 1024 @unfilteredpropertycache
1024 1025 def _encodefilterpats(self):
1025 1026 return self._loadfilter('encode')
1026 1027
1027 1028 @unfilteredpropertycache
1028 1029 def _decodefilterpats(self):
1029 1030 return self._loadfilter('decode')
1030 1031
1031 1032 def adddatafilter(self, name, filter):
1032 1033 self._datafilters[name] = filter
1033 1034
1034 1035 def wread(self, filename):
1035 1036 if self.wvfs.islink(filename):
1036 1037 data = self.wvfs.readlink(filename)
1037 1038 else:
1038 1039 data = self.wvfs.read(filename)
1039 1040 return self._filter(self._encodefilterpats, filename, data)
1040 1041
1041 1042 def wwrite(self, filename, data, flags, backgroundclose=False):
1042 1043 """write ``data`` into ``filename`` in the working directory
1043 1044
1044 1045 This returns length of written (maybe decoded) data.
1045 1046 """
1046 1047 data = self._filter(self._decodefilterpats, filename, data)
1047 1048 if 'l' in flags:
1048 1049 self.wvfs.symlink(data, filename)
1049 1050 else:
1050 1051 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1051 1052 if 'x' in flags:
1052 1053 self.wvfs.setflags(filename, False, True)
1053 1054 return len(data)
1054 1055
1055 1056 def wwritedata(self, filename, data):
1056 1057 return self._filter(self._decodefilterpats, filename, data)
1057 1058
1058 1059 def currenttransaction(self):
1059 1060 """return the current transaction or None if non exists"""
1060 1061 if self._transref:
1061 1062 tr = self._transref()
1062 1063 else:
1063 1064 tr = None
1064 1065
1065 1066 if tr and tr.running():
1066 1067 return tr
1067 1068 return None
1068 1069
1069 1070 def transaction(self, desc, report=None):
1070 1071 if (self.ui.configbool('devel', 'all-warnings')
1071 1072 or self.ui.configbool('devel', 'check-locks')):
1072 1073 if self._currentlock(self._lockref) is None:
1073 1074 raise error.ProgrammingError('transaction requires locking')
1074 1075 tr = self.currenttransaction()
1075 1076 if tr is not None:
1076 1077 return tr.nest()
1077 1078
1078 1079 # abort here if the journal already exists
1079 1080 if self.svfs.exists("journal"):
1080 1081 raise error.RepoError(
1081 1082 _("abandoned transaction found"),
1082 1083 hint=_("run 'hg recover' to clean up transaction"))
1083 1084
1084 1085 idbase = "%.40f#%f" % (random.random(), time.time())
1085 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1086 ha = hashlib.sha1(idbase).hexdigest()
1087 if pycompat.ispy3:
1088 ha = ha.encode('latin1')
1089 txnid = 'TXN:' + ha
1086 1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1087 1091
1088 1092 self._writejournal(desc)
1089 1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1090 1094 if report:
1091 1095 rp = report
1092 1096 else:
1093 1097 rp = self.ui.warn
1094 1098 vfsmap = {'plain': self.vfs} # root of .hg/
1095 1099 # we must avoid cyclic reference between repo and transaction.
1096 1100 reporef = weakref.ref(self)
1097 1101 def validate(tr):
1098 1102 """will run pre-closing hooks"""
1099 1103 reporef().hook('pretxnclose', throw=True,
1100 1104 txnname=desc, **tr.hookargs)
1101 1105 def releasefn(tr, success):
1102 1106 repo = reporef()
1103 1107 if success:
1104 1108 # this should be explicitly invoked here, because
1105 1109 # in-memory changes aren't written out at closing
1106 1110 # transaction, if tr.addfilegenerator (via
1107 1111 # dirstate.write or so) isn't invoked while
1108 1112 # transaction running
1109 1113 repo.dirstate.write(None)
1110 1114 else:
1111 1115 # discard all changes (including ones already written
1112 1116 # out) in this transaction
1113 1117 repo.dirstate.restorebackup(None, prefix='journal.')
1114 1118
1115 1119 repo.invalidate(clearfilecache=True)
1116 1120
1117 1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1118 1122 "journal",
1119 1123 "undo",
1120 1124 aftertrans(renames),
1121 1125 self.store.createmode,
1122 1126 validator=validate,
1123 1127 releasefn=releasefn)
1124 1128
1125 1129 tr.hookargs['txnid'] = txnid
1126 1130 # note: writing the fncache only during finalize mean that the file is
1127 1131 # outdated when running hooks. As fncache is used for streaming clone,
1128 1132 # this is not expected to break anything that happen during the hooks.
1129 1133 tr.addfinalize('flush-fncache', self.store.write)
1130 1134 def txnclosehook(tr2):
1131 1135 """To be run if transaction is successful, will schedule a hook run
1132 1136 """
1133 1137 # Don't reference tr2 in hook() so we don't hold a reference.
1134 1138 # This reduces memory consumption when there are multiple
1135 1139 # transactions per lock. This can likely go away if issue5045
1136 1140 # fixes the function accumulation.
1137 1141 hookargs = tr2.hookargs
1138 1142
1139 1143 def hook():
1140 1144 reporef().hook('txnclose', throw=False, txnname=desc,
1141 1145 **hookargs)
1142 1146 reporef()._afterlock(hook)
1143 1147 tr.addfinalize('txnclose-hook', txnclosehook)
1144 1148 def txnaborthook(tr2):
1145 1149 """To be run if transaction is aborted
1146 1150 """
1147 1151 reporef().hook('txnabort', throw=False, txnname=desc,
1148 1152 **tr2.hookargs)
1149 1153 tr.addabort('txnabort-hook', txnaborthook)
1150 1154 # avoid eager cache invalidation. in-memory data should be identical
1151 1155 # to stored data if transaction has no error.
1152 1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1153 1157 self._transref = weakref.ref(tr)
1154 1158 return tr
1155 1159
1156 1160 def _journalfiles(self):
1157 1161 return ((self.svfs, 'journal'),
1158 1162 (self.vfs, 'journal.dirstate'),
1159 1163 (self.vfs, 'journal.branch'),
1160 1164 (self.vfs, 'journal.desc'),
1161 1165 (self.vfs, 'journal.bookmarks'),
1162 1166 (self.svfs, 'journal.phaseroots'))
1163 1167
1164 1168 def undofiles(self):
1165 1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1166 1170
1167 1171 def _writejournal(self, desc):
1168 1172 self.dirstate.savebackup(None, prefix='journal.')
1169 1173 self.vfs.write("journal.branch",
1170 1174 encoding.fromlocal(self.dirstate.branch()))
1171 1175 self.vfs.write("journal.desc",
1172 1176 "%d\n%s\n" % (len(self), desc))
1173 1177 self.vfs.write("journal.bookmarks",
1174 1178 self.vfs.tryread("bookmarks"))
1175 1179 self.svfs.write("journal.phaseroots",
1176 1180 self.svfs.tryread("phaseroots"))
1177 1181
1178 1182 def recover(self):
1179 1183 with self.lock():
1180 1184 if self.svfs.exists("journal"):
1181 1185 self.ui.status(_("rolling back interrupted transaction\n"))
1182 1186 vfsmap = {'': self.svfs,
1183 1187 'plain': self.vfs,}
1184 1188 transaction.rollback(self.svfs, vfsmap, "journal",
1185 1189 self.ui.warn)
1186 1190 self.invalidate()
1187 1191 return True
1188 1192 else:
1189 1193 self.ui.warn(_("no interrupted transaction available\n"))
1190 1194 return False
1191 1195
1192 1196 def rollback(self, dryrun=False, force=False):
1193 1197 wlock = lock = dsguard = None
1194 1198 try:
1195 1199 wlock = self.wlock()
1196 1200 lock = self.lock()
1197 1201 if self.svfs.exists("undo"):
1198 1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1199 1203
1200 1204 return self._rollback(dryrun, force, dsguard)
1201 1205 else:
1202 1206 self.ui.warn(_("no rollback information available\n"))
1203 1207 return 1
1204 1208 finally:
1205 1209 release(dsguard, lock, wlock)
1206 1210
1207 1211 @unfilteredmethod # Until we get smarter cache management
1208 1212 def _rollback(self, dryrun, force, dsguard):
1209 1213 ui = self.ui
1210 1214 try:
1211 1215 args = self.vfs.read('undo.desc').splitlines()
1212 1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1213 1217 if len(args) >= 3:
1214 1218 detail = args[2]
1215 1219 oldtip = oldlen - 1
1216 1220
1217 1221 if detail and ui.verbose:
1218 1222 msg = (_('repository tip rolled back to revision %s'
1219 1223 ' (undo %s: %s)\n')
1220 1224 % (oldtip, desc, detail))
1221 1225 else:
1222 1226 msg = (_('repository tip rolled back to revision %s'
1223 1227 ' (undo %s)\n')
1224 1228 % (oldtip, desc))
1225 1229 except IOError:
1226 1230 msg = _('rolling back unknown transaction\n')
1227 1231 desc = None
1228 1232
1229 1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1230 1234 raise error.Abort(
1231 1235 _('rollback of last commit while not checked out '
1232 1236 'may lose data'), hint=_('use -f to force'))
1233 1237
1234 1238 ui.status(msg)
1235 1239 if dryrun:
1236 1240 return 0
1237 1241
1238 1242 parents = self.dirstate.parents()
1239 1243 self.destroying()
1240 1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1241 1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1242 1246 if self.vfs.exists('undo.bookmarks'):
1243 1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1244 1248 if self.svfs.exists('undo.phaseroots'):
1245 1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1246 1250 self.invalidate()
1247 1251
1248 1252 parentgone = (parents[0] not in self.changelog.nodemap or
1249 1253 parents[1] not in self.changelog.nodemap)
1250 1254 if parentgone:
1251 1255 # prevent dirstateguard from overwriting already restored one
1252 1256 dsguard.close()
1253 1257
1254 1258 self.dirstate.restorebackup(None, prefix='undo.')
1255 1259 try:
1256 1260 branch = self.vfs.read('undo.branch')
1257 1261 self.dirstate.setbranch(encoding.tolocal(branch))
1258 1262 except IOError:
1259 1263 ui.warn(_('named branch could not be reset: '
1260 1264 'current branch is still \'%s\'\n')
1261 1265 % self.dirstate.branch())
1262 1266
1263 1267 parents = tuple([p.rev() for p in self[None].parents()])
1264 1268 if len(parents) > 1:
1265 1269 ui.status(_('working directory now based on '
1266 1270 'revisions %d and %d\n') % parents)
1267 1271 else:
1268 1272 ui.status(_('working directory now based on '
1269 1273 'revision %d\n') % parents)
1270 1274 mergemod.mergestate.clean(self, self['.'].node())
1271 1275
1272 1276 # TODO: if we know which new heads may result from this rollback, pass
1273 1277 # them to destroy(), which will prevent the branchhead cache from being
1274 1278 # invalidated.
1275 1279 self.destroyed()
1276 1280 return 0
1277 1281
1278 1282 def invalidatecaches(self):
1279 1283
1280 1284 if '_tagscache' in vars(self):
1281 1285 # can't use delattr on proxy
1282 1286 del self.__dict__['_tagscache']
1283 1287
1284 1288 self.unfiltered()._branchcaches.clear()
1285 1289 self.invalidatevolatilesets()
1286 1290
1287 1291 def invalidatevolatilesets(self):
1288 1292 self.filteredrevcache.clear()
1289 1293 obsolete.clearobscaches(self)
1290 1294
1291 1295 def invalidatedirstate(self):
1292 1296 '''Invalidates the dirstate, causing the next call to dirstate
1293 1297 to check if it was modified since the last time it was read,
1294 1298 rereading it if it has.
1295 1299
1296 1300 This is different to dirstate.invalidate() that it doesn't always
1297 1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1298 1302 explicitly read the dirstate again (i.e. restoring it to a previous
1299 1303 known good state).'''
1300 1304 if hasunfilteredcache(self, 'dirstate'):
1301 1305 for k in self.dirstate._filecache:
1302 1306 try:
1303 1307 delattr(self.dirstate, k)
1304 1308 except AttributeError:
1305 1309 pass
1306 1310 delattr(self.unfiltered(), 'dirstate')
1307 1311
1308 1312 def invalidate(self, clearfilecache=False):
1309 1313 '''Invalidates both store and non-store parts other than dirstate
1310 1314
1311 1315 If a transaction is running, invalidation of store is omitted,
1312 1316 because discarding in-memory changes might cause inconsistency
1313 1317 (e.g. incomplete fncache causes unintentional failure, but
1314 1318 redundant one doesn't).
1315 1319 '''
1316 1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1317 1321 for k in self._filecache.keys():
1318 1322 # dirstate is invalidated separately in invalidatedirstate()
1319 1323 if k == 'dirstate':
1320 1324 continue
1321 1325
1322 1326 if clearfilecache:
1323 1327 del self._filecache[k]
1324 1328 try:
1325 1329 delattr(unfiltered, k)
1326 1330 except AttributeError:
1327 1331 pass
1328 1332 self.invalidatecaches()
1329 1333 if not self.currenttransaction():
1330 1334 # TODO: Changing contents of store outside transaction
1331 1335 # causes inconsistency. We should make in-memory store
1332 1336 # changes detectable, and abort if changed.
1333 1337 self.store.invalidatecaches()
1334 1338
1335 1339 def invalidateall(self):
1336 1340 '''Fully invalidates both store and non-store parts, causing the
1337 1341 subsequent operation to reread any outside changes.'''
1338 1342 # extension should hook this to invalidate its caches
1339 1343 self.invalidate()
1340 1344 self.invalidatedirstate()
1341 1345
1342 1346 @unfilteredmethod
1343 1347 def _refreshfilecachestats(self, tr):
1344 1348 """Reload stats of cached files so that they are flagged as valid"""
1345 1349 for k, ce in self._filecache.items():
1346 1350 if k == 'dirstate' or k not in self.__dict__:
1347 1351 continue
1348 1352 ce.refresh()
1349 1353
1350 1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1351 1355 inheritchecker=None, parentenvvar=None):
1352 1356 parentlock = None
1353 1357 # the contents of parentenvvar are used by the underlying lock to
1354 1358 # determine whether it can be inherited
1355 1359 if parentenvvar is not None:
1356 1360 parentlock = encoding.environ.get(parentenvvar)
1357 1361 try:
1358 1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1359 1363 acquirefn=acquirefn, desc=desc,
1360 1364 inheritchecker=inheritchecker,
1361 1365 parentlock=parentlock)
1362 1366 except error.LockHeld as inst:
1363 1367 if not wait:
1364 1368 raise
1365 1369 # show more details for new-style locks
1366 1370 if ':' in inst.locker:
1367 1371 host, pid = inst.locker.split(":", 1)
1368 1372 self.ui.warn(
1369 1373 _("waiting for lock on %s held by process %r "
1370 1374 "on host %r\n") % (desc, pid, host))
1371 1375 else:
1372 1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1373 1377 (desc, inst.locker))
1374 1378 # default to 600 seconds timeout
1375 1379 l = lockmod.lock(vfs, lockname,
1376 1380 int(self.ui.config("ui", "timeout", "600")),
1377 1381 releasefn=releasefn, acquirefn=acquirefn,
1378 1382 desc=desc)
1379 1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1380 1384 return l
1381 1385
1382 1386 def _afterlock(self, callback):
1383 1387 """add a callback to be run when the repository is fully unlocked
1384 1388
1385 1389 The callback will be executed when the outermost lock is released
1386 1390 (with wlock being higher level than 'lock')."""
1387 1391 for ref in (self._wlockref, self._lockref):
1388 1392 l = ref and ref()
1389 1393 if l and l.held:
1390 1394 l.postrelease.append(callback)
1391 1395 break
1392 1396 else: # no lock have been found.
1393 1397 callback()
1394 1398
1395 1399 def lock(self, wait=True):
1396 1400 '''Lock the repository store (.hg/store) and return a weak reference
1397 1401 to the lock. Use this before modifying the store (e.g. committing or
1398 1402 stripping). If you are opening a transaction, get a lock as well.)
1399 1403
1400 1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1401 1405 'wlock' first to avoid a dead-lock hazard.'''
1402 1406 l = self._currentlock(self._lockref)
1403 1407 if l is not None:
1404 1408 l.lock()
1405 1409 return l
1406 1410
1407 1411 l = self._lock(self.svfs, "lock", wait, None,
1408 1412 self.invalidate, _('repository %s') % self.origroot)
1409 1413 self._lockref = weakref.ref(l)
1410 1414 return l
1411 1415
1412 1416 def _wlockchecktransaction(self):
1413 1417 if self.currenttransaction() is not None:
1414 1418 raise error.LockInheritanceContractViolation(
1415 1419 'wlock cannot be inherited in the middle of a transaction')
1416 1420
1417 1421 def wlock(self, wait=True):
1418 1422 '''Lock the non-store parts of the repository (everything under
1419 1423 .hg except .hg/store) and return a weak reference to the lock.
1420 1424
1421 1425 Use this before modifying files in .hg.
1422 1426
1423 1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1424 1428 'wlock' first to avoid a dead-lock hazard.'''
1425 1429 l = self._wlockref and self._wlockref()
1426 1430 if l is not None and l.held:
1427 1431 l.lock()
1428 1432 return l
1429 1433
1430 1434 # We do not need to check for non-waiting lock acquisition. Such
1431 1435 # acquisition would not cause dead-lock as they would just fail.
1432 1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1433 1437 or self.ui.configbool('devel', 'check-locks')):
1434 1438 if self._currentlock(self._lockref) is not None:
1435 1439 self.ui.develwarn('"wlock" acquired after "lock"')
1436 1440
1437 1441 def unlock():
1438 1442 if self.dirstate.pendingparentchange():
1439 1443 self.dirstate.invalidate()
1440 1444 else:
1441 1445 self.dirstate.write(None)
1442 1446
1443 1447 self._filecache['dirstate'].refresh()
1444 1448
1445 1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1446 1450 self.invalidatedirstate, _('working directory of %s') %
1447 1451 self.origroot,
1448 1452 inheritchecker=self._wlockchecktransaction,
1449 1453 parentenvvar='HG_WLOCK_LOCKER')
1450 1454 self._wlockref = weakref.ref(l)
1451 1455 return l
1452 1456
1453 1457 def _currentlock(self, lockref):
1454 1458 """Returns the lock if it's held, or None if it's not."""
1455 1459 if lockref is None:
1456 1460 return None
1457 1461 l = lockref()
1458 1462 if l is None or not l.held:
1459 1463 return None
1460 1464 return l
1461 1465
1462 1466 def currentwlock(self):
1463 1467 """Returns the wlock if it's held, or None if it's not."""
1464 1468 return self._currentlock(self._wlockref)
1465 1469
1466 1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1467 1471 """
1468 1472 commit an individual file as part of a larger transaction
1469 1473 """
1470 1474
1471 1475 fname = fctx.path()
1472 1476 fparent1 = manifest1.get(fname, nullid)
1473 1477 fparent2 = manifest2.get(fname, nullid)
1474 1478 if isinstance(fctx, context.filectx):
1475 1479 node = fctx.filenode()
1476 1480 if node in [fparent1, fparent2]:
1477 1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1478 1482 if manifest1.flags(fname) != fctx.flags():
1479 1483 changelist.append(fname)
1480 1484 return node
1481 1485
1482 1486 flog = self.file(fname)
1483 1487 meta = {}
1484 1488 copy = fctx.renamed()
1485 1489 if copy and copy[0] != fname:
1486 1490 # Mark the new revision of this file as a copy of another
1487 1491 # file. This copy data will effectively act as a parent
1488 1492 # of this new revision. If this is a merge, the first
1489 1493 # parent will be the nullid (meaning "look up the copy data")
1490 1494 # and the second one will be the other parent. For example:
1491 1495 #
1492 1496 # 0 --- 1 --- 3 rev1 changes file foo
1493 1497 # \ / rev2 renames foo to bar and changes it
1494 1498 # \- 2 -/ rev3 should have bar with all changes and
1495 1499 # should record that bar descends from
1496 1500 # bar in rev2 and foo in rev1
1497 1501 #
1498 1502 # this allows this merge to succeed:
1499 1503 #
1500 1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1501 1505 # \ / merging rev3 and rev4 should use bar@rev2
1502 1506 # \- 2 --- 4 as the merge base
1503 1507 #
1504 1508
1505 1509 cfname = copy[0]
1506 1510 crev = manifest1.get(cfname)
1507 1511 newfparent = fparent2
1508 1512
1509 1513 if manifest2: # branch merge
1510 1514 if fparent2 == nullid or crev is None: # copied on remote side
1511 1515 if cfname in manifest2:
1512 1516 crev = manifest2[cfname]
1513 1517 newfparent = fparent1
1514 1518
1515 1519 # Here, we used to search backwards through history to try to find
1516 1520 # where the file copy came from if the source of a copy was not in
1517 1521 # the parent directory. However, this doesn't actually make sense to
1518 1522 # do (what does a copy from something not in your working copy even
1519 1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1520 1524 # the user that copy information was dropped, so if they didn't
1521 1525 # expect this outcome it can be fixed, but this is the correct
1522 1526 # behavior in this circumstance.
1523 1527
1524 1528 if crev:
1525 1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1526 1530 meta["copy"] = cfname
1527 1531 meta["copyrev"] = hex(crev)
1528 1532 fparent1, fparent2 = nullid, newfparent
1529 1533 else:
1530 1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1531 1535 "copied from '%s'!\n") % (fname, cfname))
1532 1536
1533 1537 elif fparent1 == nullid:
1534 1538 fparent1, fparent2 = fparent2, nullid
1535 1539 elif fparent2 != nullid:
1536 1540 # is one parent an ancestor of the other?
1537 1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1538 1542 if fparent1 in fparentancestors:
1539 1543 fparent1, fparent2 = fparent2, nullid
1540 1544 elif fparent2 in fparentancestors:
1541 1545 fparent2 = nullid
1542 1546
1543 1547 # is the file changed?
1544 1548 text = fctx.data()
1545 1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1546 1550 changelist.append(fname)
1547 1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1548 1552 # are just the flags changed during merge?
1549 1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1550 1554 changelist.append(fname)
1551 1555
1552 1556 return fparent1
1553 1557
1554 1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1555 1559 """check for commit arguments that aren't committable"""
1556 1560 if match.isexact() or match.prefix():
1557 1561 matched = set(status.modified + status.added + status.removed)
1558 1562
1559 1563 for f in match.files():
1560 1564 f = self.dirstate.normalize(f)
1561 1565 if f == '.' or f in matched or f in wctx.substate:
1562 1566 continue
1563 1567 if f in status.deleted:
1564 1568 fail(f, _('file not found!'))
1565 1569 if f in vdirs: # visited directory
1566 1570 d = f + '/'
1567 1571 for mf in matched:
1568 1572 if mf.startswith(d):
1569 1573 break
1570 1574 else:
1571 1575 fail(f, _("no match under directory!"))
1572 1576 elif f not in self.dirstate:
1573 1577 fail(f, _("file not tracked!"))
1574 1578
1575 1579 @unfilteredmethod
1576 1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1577 1581 editor=False, extra=None):
1578 1582 """Add a new revision to current repository.
1579 1583
1580 1584 Revision information is gathered from the working directory,
1581 1585 match can be used to filter the committed files. If editor is
1582 1586 supplied, it is called to get a commit message.
1583 1587 """
1584 1588 if extra is None:
1585 1589 extra = {}
1586 1590
1587 1591 def fail(f, msg):
1588 1592 raise error.Abort('%s: %s' % (f, msg))
1589 1593
1590 1594 if not match:
1591 1595 match = matchmod.always(self.root, '')
1592 1596
1593 1597 if not force:
1594 1598 vdirs = []
1595 1599 match.explicitdir = vdirs.append
1596 1600 match.bad = fail
1597 1601
1598 1602 wlock = lock = tr = None
1599 1603 try:
1600 1604 wlock = self.wlock()
1601 1605 lock = self.lock() # for recent changelog (see issue4368)
1602 1606
1603 1607 wctx = self[None]
1604 1608 merge = len(wctx.parents()) > 1
1605 1609
1606 1610 if not force and merge and match.ispartial():
1607 1611 raise error.Abort(_('cannot partially commit a merge '
1608 1612 '(do not specify files or patterns)'))
1609 1613
1610 1614 status = self.status(match=match, clean=force)
1611 1615 if force:
1612 1616 status.modified.extend(status.clean) # mq may commit clean files
1613 1617
1614 1618 # check subrepos
1615 1619 subs = []
1616 1620 commitsubs = set()
1617 1621 newstate = wctx.substate.copy()
1618 1622 # only manage subrepos and .hgsubstate if .hgsub is present
1619 1623 if '.hgsub' in wctx:
1620 1624 # we'll decide whether to track this ourselves, thanks
1621 1625 for c in status.modified, status.added, status.removed:
1622 1626 if '.hgsubstate' in c:
1623 1627 c.remove('.hgsubstate')
1624 1628
1625 1629 # compare current state to last committed state
1626 1630 # build new substate based on last committed state
1627 1631 oldstate = wctx.p1().substate
1628 1632 for s in sorted(newstate.keys()):
1629 1633 if not match(s):
1630 1634 # ignore working copy, use old state if present
1631 1635 if s in oldstate:
1632 1636 newstate[s] = oldstate[s]
1633 1637 continue
1634 1638 if not force:
1635 1639 raise error.Abort(
1636 1640 _("commit with new subrepo %s excluded") % s)
1637 1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1638 1642 if dirtyreason:
1639 1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1640 1644 raise error.Abort(dirtyreason,
1641 1645 hint=_("use --subrepos for recursive commit"))
1642 1646 subs.append(s)
1643 1647 commitsubs.add(s)
1644 1648 else:
1645 1649 bs = wctx.sub(s).basestate()
1646 1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1647 1651 if oldstate.get(s, (None, None, None))[1] != bs:
1648 1652 subs.append(s)
1649 1653
1650 1654 # check for removed subrepos
1651 1655 for p in wctx.parents():
1652 1656 r = [s for s in p.substate if s not in newstate]
1653 1657 subs += [s for s in r if match(s)]
1654 1658 if subs:
1655 1659 if (not match('.hgsub') and
1656 1660 '.hgsub' in (wctx.modified() + wctx.added())):
1657 1661 raise error.Abort(
1658 1662 _("can't commit subrepos without .hgsub"))
1659 1663 status.modified.insert(0, '.hgsubstate')
1660 1664
1661 1665 elif '.hgsub' in status.removed:
1662 1666 # clean up .hgsubstate when .hgsub is removed
1663 1667 if ('.hgsubstate' in wctx and
1664 1668 '.hgsubstate' not in (status.modified + status.added +
1665 1669 status.removed)):
1666 1670 status.removed.insert(0, '.hgsubstate')
1667 1671
1668 1672 # make sure all explicit patterns are matched
1669 1673 if not force:
1670 1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1671 1675
1672 1676 cctx = context.workingcommitctx(self, status,
1673 1677 text, user, date, extra)
1674 1678
1675 1679 # internal config: ui.allowemptycommit
1676 1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1677 1681 or extra.get('close') or merge or cctx.files()
1678 1682 or self.ui.configbool('ui', 'allowemptycommit'))
1679 1683 if not allowemptycommit:
1680 1684 return None
1681 1685
1682 1686 if merge and cctx.deleted():
1683 1687 raise error.Abort(_("cannot commit merge with missing files"))
1684 1688
1685 1689 ms = mergemod.mergestate.read(self)
1686 1690 mergeutil.checkunresolved(ms)
1687 1691
1688 1692 if editor:
1689 1693 cctx._text = editor(self, cctx, subs)
1690 1694 edited = (text != cctx._text)
1691 1695
1692 1696 # Save commit message in case this transaction gets rolled back
1693 1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1694 1698 # the assumption that the user will use the same editor again.
1695 1699 msgfn = self.savecommitmessage(cctx._text)
1696 1700
1697 1701 # commit subs and write new state
1698 1702 if subs:
1699 1703 for s in sorted(commitsubs):
1700 1704 sub = wctx.sub(s)
1701 1705 self.ui.status(_('committing subrepository %s\n') %
1702 1706 subrepo.subrelpath(sub))
1703 1707 sr = sub.commit(cctx._text, user, date)
1704 1708 newstate[s] = (newstate[s][0], sr)
1705 1709 subrepo.writestate(self, newstate)
1706 1710
1707 1711 p1, p2 = self.dirstate.parents()
1708 1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1709 1713 try:
1710 1714 self.hook("precommit", throw=True, parent1=hookp1,
1711 1715 parent2=hookp2)
1712 1716 tr = self.transaction('commit')
1713 1717 ret = self.commitctx(cctx, True)
1714 1718 except: # re-raises
1715 1719 if edited:
1716 1720 self.ui.write(
1717 1721 _('note: commit message saved in %s\n') % msgfn)
1718 1722 raise
1719 1723 # update bookmarks, dirstate and mergestate
1720 1724 bookmarks.update(self, [p1, p2], ret)
1721 1725 cctx.markcommitted(ret)
1722 1726 ms.reset()
1723 1727 tr.close()
1724 1728
1725 1729 finally:
1726 1730 lockmod.release(tr, lock, wlock)
1727 1731
1728 1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1729 1733 # hack for command that use a temporary commit (eg: histedit)
1730 1734 # temporary commit got stripped before hook release
1731 1735 if self.changelog.hasnode(ret):
1732 1736 self.hook("commit", node=node, parent1=parent1,
1733 1737 parent2=parent2)
1734 1738 self._afterlock(commithook)
1735 1739 return ret
1736 1740
1737 1741 @unfilteredmethod
1738 1742 def commitctx(self, ctx, error=False):
1739 1743 """Add a new revision to current repository.
1740 1744 Revision information is passed via the context argument.
1741 1745 """
1742 1746
1743 1747 tr = None
1744 1748 p1, p2 = ctx.p1(), ctx.p2()
1745 1749 user = ctx.user()
1746 1750
1747 1751 lock = self.lock()
1748 1752 try:
1749 1753 tr = self.transaction("commit")
1750 1754 trp = weakref.proxy(tr)
1751 1755
1752 1756 if ctx.manifestnode():
1753 1757 # reuse an existing manifest revision
1754 1758 mn = ctx.manifestnode()
1755 1759 files = ctx.files()
1756 1760 elif ctx.files():
1757 1761 m1ctx = p1.manifestctx()
1758 1762 m2ctx = p2.manifestctx()
1759 1763 mctx = m1ctx.copy()
1760 1764
1761 1765 m = mctx.read()
1762 1766 m1 = m1ctx.read()
1763 1767 m2 = m2ctx.read()
1764 1768
1765 1769 # check in files
1766 1770 added = []
1767 1771 changed = []
1768 1772 removed = list(ctx.removed())
1769 1773 linkrev = len(self)
1770 1774 self.ui.note(_("committing files:\n"))
1771 1775 for f in sorted(ctx.modified() + ctx.added()):
1772 1776 self.ui.note(f + "\n")
1773 1777 try:
1774 1778 fctx = ctx[f]
1775 1779 if fctx is None:
1776 1780 removed.append(f)
1777 1781 else:
1778 1782 added.append(f)
1779 1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1780 1784 trp, changed)
1781 1785 m.setflag(f, fctx.flags())
1782 1786 except OSError as inst:
1783 1787 self.ui.warn(_("trouble committing %s!\n") % f)
1784 1788 raise
1785 1789 except IOError as inst:
1786 1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1787 1791 if error or errcode and errcode != errno.ENOENT:
1788 1792 self.ui.warn(_("trouble committing %s!\n") % f)
1789 1793 raise
1790 1794
1791 1795 # update manifest
1792 1796 self.ui.note(_("committing manifest\n"))
1793 1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1794 1798 drop = [f for f in removed if f in m]
1795 1799 for f in drop:
1796 1800 del m[f]
1797 1801 mn = mctx.write(trp, linkrev,
1798 1802 p1.manifestnode(), p2.manifestnode(),
1799 1803 added, drop)
1800 1804 files = changed + removed
1801 1805 else:
1802 1806 mn = p1.manifestnode()
1803 1807 files = []
1804 1808
1805 1809 # update changelog
1806 1810 self.ui.note(_("committing changelog\n"))
1807 1811 self.changelog.delayupdate(tr)
1808 1812 n = self.changelog.add(mn, files, ctx.description(),
1809 1813 trp, p1.node(), p2.node(),
1810 1814 user, ctx.date(), ctx.extra().copy())
1811 1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1812 1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1813 1817 parent2=xp2)
1814 1818 # set the new commit is proper phase
1815 1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1816 1820 if targetphase:
1817 1821 # retract boundary do not alter parent changeset.
1818 1822 # if a parent have higher the resulting phase will
1819 1823 # be compliant anyway
1820 1824 #
1821 1825 # if minimal phase was 0 we don't need to retract anything
1822 1826 phases.retractboundary(self, tr, targetphase, [n])
1823 1827 tr.close()
1824 1828 branchmap.updatecache(self.filtered('served'))
1825 1829 return n
1826 1830 finally:
1827 1831 if tr:
1828 1832 tr.release()
1829 1833 lock.release()
1830 1834
1831 1835 @unfilteredmethod
1832 1836 def destroying(self):
1833 1837 '''Inform the repository that nodes are about to be destroyed.
1834 1838 Intended for use by strip and rollback, so there's a common
1835 1839 place for anything that has to be done before destroying history.
1836 1840
1837 1841 This is mostly useful for saving state that is in memory and waiting
1838 1842 to be flushed when the current lock is released. Because a call to
1839 1843 destroyed is imminent, the repo will be invalidated causing those
1840 1844 changes to stay in memory (waiting for the next unlock), or vanish
1841 1845 completely.
1842 1846 '''
1843 1847 # When using the same lock to commit and strip, the phasecache is left
1844 1848 # dirty after committing. Then when we strip, the repo is invalidated,
1845 1849 # causing those changes to disappear.
1846 1850 if '_phasecache' in vars(self):
1847 1851 self._phasecache.write()
1848 1852
1849 1853 @unfilteredmethod
1850 1854 def destroyed(self):
1851 1855 '''Inform the repository that nodes have been destroyed.
1852 1856 Intended for use by strip and rollback, so there's a common
1853 1857 place for anything that has to be done after destroying history.
1854 1858 '''
1855 1859 # When one tries to:
1856 1860 # 1) destroy nodes thus calling this method (e.g. strip)
1857 1861 # 2) use phasecache somewhere (e.g. commit)
1858 1862 #
1859 1863 # then 2) will fail because the phasecache contains nodes that were
1860 1864 # removed. We can either remove phasecache from the filecache,
1861 1865 # causing it to reload next time it is accessed, or simply filter
1862 1866 # the removed nodes now and write the updated cache.
1863 1867 self._phasecache.filterunknown(self)
1864 1868 self._phasecache.write()
1865 1869
1866 1870 # update the 'served' branch cache to help read only server process
1867 1871 # Thanks to branchcache collaboration this is done from the nearest
1868 1872 # filtered subset and it is expected to be fast.
1869 1873 branchmap.updatecache(self.filtered('served'))
1870 1874
1871 1875 # Ensure the persistent tag cache is updated. Doing it now
1872 1876 # means that the tag cache only has to worry about destroyed
1873 1877 # heads immediately after a strip/rollback. That in turn
1874 1878 # guarantees that "cachetip == currenttip" (comparing both rev
1875 1879 # and node) always means no nodes have been added or destroyed.
1876 1880
1877 1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1878 1882 # head, refresh the tag cache, then immediately add a new head.
1879 1883 # But I think doing it this way is necessary for the "instant
1880 1884 # tag cache retrieval" case to work.
1881 1885 self.invalidate()
1882 1886
1883 1887 def walk(self, match, node=None):
1884 1888 '''
1885 1889 walk recursively through the directory tree or a given
1886 1890 changeset, finding all files matched by the match
1887 1891 function
1888 1892 '''
1889 1893 return self[node].walk(match)
1890 1894
1891 1895 def status(self, node1='.', node2=None, match=None,
1892 1896 ignored=False, clean=False, unknown=False,
1893 1897 listsubrepos=False):
1894 1898 '''a convenience method that calls node1.status(node2)'''
1895 1899 return self[node1].status(node2, match, ignored, clean, unknown,
1896 1900 listsubrepos)
1897 1901
1898 1902 def heads(self, start=None):
1899 1903 if start is None:
1900 1904 cl = self.changelog
1901 1905 headrevs = reversed(cl.headrevs())
1902 1906 return [cl.node(rev) for rev in headrevs]
1903 1907
1904 1908 heads = self.changelog.heads(start)
1905 1909 # sort the output in rev descending order
1906 1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1907 1911
1908 1912 def branchheads(self, branch=None, start=None, closed=False):
1909 1913 '''return a (possibly filtered) list of heads for the given branch
1910 1914
1911 1915 Heads are returned in topological order, from newest to oldest.
1912 1916 If branch is None, use the dirstate branch.
1913 1917 If start is not None, return only heads reachable from start.
1914 1918 If closed is True, return heads that are marked as closed as well.
1915 1919 '''
1916 1920 if branch is None:
1917 1921 branch = self[None].branch()
1918 1922 branches = self.branchmap()
1919 1923 if branch not in branches:
1920 1924 return []
1921 1925 # the cache returns heads ordered lowest to highest
1922 1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1923 1927 if start is not None:
1924 1928 # filter out the heads that cannot be reached from startrev
1925 1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1926 1930 bheads = [h for h in bheads if h in fbheads]
1927 1931 return bheads
1928 1932
1929 1933 def branches(self, nodes):
1930 1934 if not nodes:
1931 1935 nodes = [self.changelog.tip()]
1932 1936 b = []
1933 1937 for n in nodes:
1934 1938 t = n
1935 1939 while True:
1936 1940 p = self.changelog.parents(n)
1937 1941 if p[1] != nullid or p[0] == nullid:
1938 1942 b.append((t, n, p[0], p[1]))
1939 1943 break
1940 1944 n = p[0]
1941 1945 return b
1942 1946
1943 1947 def between(self, pairs):
1944 1948 r = []
1945 1949
1946 1950 for top, bottom in pairs:
1947 1951 n, l, i = top, [], 0
1948 1952 f = 1
1949 1953
1950 1954 while n != bottom and n != nullid:
1951 1955 p = self.changelog.parents(n)[0]
1952 1956 if i == f:
1953 1957 l.append(n)
1954 1958 f = f * 2
1955 1959 n = p
1956 1960 i += 1
1957 1961
1958 1962 r.append(l)
1959 1963
1960 1964 return r
1961 1965
1962 1966 def checkpush(self, pushop):
1963 1967 """Extensions can override this function if additional checks have
1964 1968 to be performed before pushing, or call it if they override push
1965 1969 command.
1966 1970 """
1967 1971 pass
1968 1972
1969 1973 @unfilteredpropertycache
1970 1974 def prepushoutgoinghooks(self):
1971 1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1972 1976 methods, which are called before pushing changesets.
1973 1977 """
1974 1978 return util.hooks()
1975 1979
1976 1980 def pushkey(self, namespace, key, old, new):
1977 1981 try:
1978 1982 tr = self.currenttransaction()
1979 1983 hookargs = {}
1980 1984 if tr is not None:
1981 1985 hookargs.update(tr.hookargs)
1982 1986 hookargs['namespace'] = namespace
1983 1987 hookargs['key'] = key
1984 1988 hookargs['old'] = old
1985 1989 hookargs['new'] = new
1986 1990 self.hook('prepushkey', throw=True, **hookargs)
1987 1991 except error.HookAbort as exc:
1988 1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1989 1993 if exc.hint:
1990 1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1991 1995 return False
1992 1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1993 1997 ret = pushkey.push(self, namespace, key, old, new)
1994 1998 def runhook():
1995 1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1996 2000 ret=ret)
1997 2001 self._afterlock(runhook)
1998 2002 return ret
1999 2003
2000 2004 def listkeys(self, namespace):
2001 2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2002 2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2003 2007 values = pushkey.list(self, namespace)
2004 2008 self.hook('listkeys', namespace=namespace, values=values)
2005 2009 return values
2006 2010
2007 2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2008 2012 '''used to test argument passing over the wire'''
2009 2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2010 2014
2011 2015 def savecommitmessage(self, text):
2012 2016 fp = self.vfs('last-message.txt', 'wb')
2013 2017 try:
2014 2018 fp.write(text)
2015 2019 finally:
2016 2020 fp.close()
2017 2021 return self.pathto(fp.name[len(self.root) + 1:])
2018 2022
2019 2023 # used to avoid circular references so destructors work
2020 2024 def aftertrans(files):
2021 2025 renamefiles = [tuple(t) for t in files]
2022 2026 def a():
2023 2027 for vfs, src, dest in renamefiles:
2024 2028 try:
2025 2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2026 2030 # leaving both src and dest on disk. delete dest to make sure
2027 2031 # the rename couldn't be such a no-op.
2028 2032 vfs.unlink(dest)
2029 2033 except OSError as ex:
2030 2034 if ex.errno != errno.ENOENT:
2031 2035 raise
2032 2036 try:
2033 2037 vfs.rename(src, dest)
2034 2038 except OSError: # journal file does not yet exist
2035 2039 pass
2036 2040 return a
2037 2041
2038 2042 def undoname(fn):
2039 2043 base, name = os.path.split(fn)
2040 2044 assert name.startswith('journal')
2041 2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2042 2046
2043 2047 def instance(ui, path, create):
2044 2048 return localrepository(ui, util.urllocalpath(path), create)
2045 2049
2046 2050 def islocal(path):
2047 2051 return True
2048 2052
2049 2053 def newreporequirements(repo):
2050 2054 """Determine the set of requirements for a new local repository.
2051 2055
2052 2056 Extensions can wrap this function to specify custom requirements for
2053 2057 new repositories.
2054 2058 """
2055 2059 ui = repo.ui
2056 2060 requirements = set(['revlogv1'])
2057 2061 if ui.configbool('format', 'usestore', True):
2058 2062 requirements.add('store')
2059 2063 if ui.configbool('format', 'usefncache', True):
2060 2064 requirements.add('fncache')
2061 2065 if ui.configbool('format', 'dotencode', True):
2062 2066 requirements.add('dotencode')
2063 2067
2064 2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2065 2069 if compengine not in util.compengines:
2066 2070 raise error.Abort(_('compression engine %s defined by '
2067 2071 'experimental.format.compression not available') %
2068 2072 compengine,
2069 2073 hint=_('run "hg debuginstall" to list available '
2070 2074 'compression engines'))
2071 2075
2072 2076 # zlib is the historical default and doesn't need an explicit requirement.
2073 2077 if compengine != 'zlib':
2074 2078 requirements.add('exp-compression-%s' % compengine)
2075 2079
2076 2080 if scmutil.gdinitconfig(ui):
2077 2081 requirements.add('generaldelta')
2078 2082 if ui.configbool('experimental', 'treemanifest', False):
2079 2083 requirements.add('treemanifest')
2080 2084 if ui.configbool('experimental', 'manifestv2', False):
2081 2085 requirements.add('manifestv2')
2082 2086
2083 2087 return requirements
General Comments 0
You need to be logged in to leave comments. Login now