##// END OF EJS Templates
localrepo: use peer interfaces...
Gregory Szorc -
r33802:707750e5 default
parent child Browse files
Show More
@@ -1,2265 +1,2294 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 repository,
52 53 repoview,
53 54 revset,
54 55 revsetlang,
55 56 scmutil,
56 57 sparse,
57 58 store,
58 59 subrepo,
59 60 tags as tagsmod,
60 61 transaction,
61 62 txnutil,
62 63 util,
63 64 vfs as vfsmod,
64 65 )
65 66
66 67 release = lockmod.release
67 68 urlerr = util.urlerr
68 69 urlreq = util.urlreq
69 70
70 71 # set of (path, vfs-location) tuples. vfs-location is:
71 72 # - 'plain for vfs relative paths
72 73 # - '' for svfs relative paths
73 74 _cachedfiles = set()
74 75
75 76 class _basefilecache(scmutil.filecache):
76 77 """All filecache usage on repo are done for logic that should be unfiltered
77 78 """
78 79 def __get__(self, repo, type=None):
79 80 if repo is None:
80 81 return self
81 82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 83 def __set__(self, repo, value):
83 84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 85 def __delete__(self, repo):
85 86 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 87
87 88 class repofilecache(_basefilecache):
88 89 """filecache for files in .hg but outside of .hg/store"""
89 90 def __init__(self, *paths):
90 91 super(repofilecache, self).__init__(*paths)
91 92 for path in paths:
92 93 _cachedfiles.add((path, 'plain'))
93 94
94 95 def join(self, obj, fname):
95 96 return obj.vfs.join(fname)
96 97
97 98 class storecache(_basefilecache):
98 99 """filecache for files in the store"""
99 100 def __init__(self, *paths):
100 101 super(storecache, self).__init__(*paths)
101 102 for path in paths:
102 103 _cachedfiles.add((path, ''))
103 104
104 105 def join(self, obj, fname):
105 106 return obj.sjoin(fname)
106 107
107 108 def isfilecached(repo, name):
108 109 """check if a repo has already cached "name" filecache-ed property
109 110
110 111 This returns (cachedobj-or-None, iscached) tuple.
111 112 """
112 113 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 114 if not cacheentry:
114 115 return None, False
115 116 return cacheentry.obj, True
116 117
117 118 class unfilteredpropertycache(util.propertycache):
118 119 """propertycache that apply to unfiltered repo only"""
119 120
120 121 def __get__(self, repo, type=None):
121 122 unfi = repo.unfiltered()
122 123 if unfi is repo:
123 124 return super(unfilteredpropertycache, self).__get__(unfi)
124 125 return getattr(unfi, self.name)
125 126
126 127 class filteredpropertycache(util.propertycache):
127 128 """propertycache that must take filtering in account"""
128 129
129 130 def cachevalue(self, obj, value):
130 131 object.__setattr__(obj, self.name, value)
131 132
132 133
133 134 def hasunfilteredcache(repo, name):
134 135 """check if a repo has an unfilteredpropertycache value for <name>"""
135 136 return name in vars(repo.unfiltered())
136 137
137 138 def unfilteredmethod(orig):
138 139 """decorate method that always need to be run on unfiltered version"""
139 140 def wrapper(repo, *args, **kwargs):
140 141 return orig(repo.unfiltered(), *args, **kwargs)
141 142 return wrapper
142 143
143 144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 145 'unbundle'}
145 146 legacycaps = moderncaps.union({'changegroupsubset'})
146 147
147 class localpeer(peer.peerrepository):
148 class localpeer(repository.peer):
148 149 '''peer for a local repo; reflects only the most recent API'''
149 150
150 151 def __init__(self, repo, caps=None):
152 super(localpeer, self).__init__()
153
151 154 if caps is None:
152 155 caps = moderncaps.copy()
153 peer.peerrepository.__init__(self)
154 156 self._repo = repo.filtered('served')
155 self.ui = repo.ui
157 self._ui = repo.ui
156 158 self._caps = repo._restrictcapabilities(caps)
157 159
160 # Begin of _basepeer interface.
161
162 @util.propertycache
163 def ui(self):
164 return self._ui
165
166 def url(self):
167 return self._repo.url()
168
169 def local(self):
170 return self._repo
171
172 def peer(self):
173 return self
174
175 def canpush(self):
176 return True
177
158 178 def close(self):
159 179 self._repo.close()
160 180
161 def _capabilities(self):
162 return self._caps
163
164 def local(self):
165 return self._repo
181 # End of _basepeer interface.
166 182
167 def canpush(self):
168 return True
169
170 def url(self):
171 return self._repo.url()
172
173 def lookup(self, key):
174 return self._repo.lookup(key)
183 # Begin of _basewirecommands interface.
175 184
176 185 def branchmap(self):
177 186 return self._repo.branchmap()
178 187
179 def heads(self):
180 return self._repo.heads()
188 def capabilities(self):
189 return self._caps
181 190
182 def known(self, nodes):
183 return self._repo.known(nodes)
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 """Used to test argument passing over the wire"""
193 return "%s %s %s %s %s" % (one, two, three, four, five)
184 194
185 195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
186 196 **kwargs):
187 197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
188 198 common=common, bundlecaps=bundlecaps,
189 199 **kwargs)
190 200 cb = util.chunkbuffer(chunks)
191 201
192 202 if exchange.bundle2requested(bundlecaps):
193 203 # When requesting a bundle2, getbundle returns a stream to make the
194 204 # wire level function happier. We need to build a proper object
195 205 # from it in local peer.
196 206 return bundle2.getunbundler(self.ui, cb)
197 207 else:
198 208 return changegroup.getunbundler('01', cb, None)
199 209
200 # TODO We might want to move the next two calls into legacypeer and add
201 # unbundle instead.
210 def heads(self):
211 return self._repo.heads()
212
213 def known(self, nodes):
214 return self._repo.known(nodes)
215
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
218
219 def lookup(self, key):
220 return self._repo.lookup(key)
221
222 def pushkey(self, namespace, key, old, new):
223 return self._repo.pushkey(namespace, key, old, new)
224
225 def stream_out(self):
226 raise error.Abort(_('cannot perform stream clone against local '
227 'peer'))
202 228
203 229 def unbundle(self, cg, heads, url):
204 230 """apply a bundle on a repo
205 231
206 232 This function handles the repo locking itself."""
207 233 try:
208 234 try:
209 235 cg = exchange.readbundle(self.ui, cg, None)
210 236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
211 237 if util.safehasattr(ret, 'getchunks'):
212 238 # This is a bundle20 object, turn it into an unbundler.
213 239 # This little dance should be dropped eventually when the
214 240 # API is finally improved.
215 241 stream = util.chunkbuffer(ret.getchunks())
216 242 ret = bundle2.getunbundler(self.ui, stream)
217 243 return ret
218 244 except Exception as exc:
219 245 # If the exception contains output salvaged from a bundle2
220 246 # reply, we need to make sure it is printed before continuing
221 247 # to fail. So we build a bundle2 with such output and consume
222 248 # it directly.
223 249 #
224 250 # This is not very elegant but allows a "simple" solution for
225 251 # issue4594
226 252 output = getattr(exc, '_bundle2salvagedoutput', ())
227 253 if output:
228 254 bundler = bundle2.bundle20(self._repo.ui)
229 255 for out in output:
230 256 bundler.addpart(out)
231 257 stream = util.chunkbuffer(bundler.getchunks())
232 258 b = bundle2.getunbundler(self.ui, stream)
233 259 bundle2.processbundle(self._repo, b)
234 260 raise
235 261 except error.PushRaced as exc:
236 262 raise error.ResponseError(_('push failed:'), str(exc))
237 263
238 def pushkey(self, namespace, key, old, new):
239 return self._repo.pushkey(namespace, key, old, new)
264 # End of _basewirecommands interface.
240 265
241 def listkeys(self, namespace):
242 return self._repo.listkeys(namespace)
266 # Begin of peer interface.
243 267
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 '''used to test argument passing over the wire'''
246 return "%s %s %s %s %s" % (one, two, three, four, five)
268 def iterbatch(self):
269 return peer.localiterbatcher(self)
247 270
248 class locallegacypeer(localpeer):
271 # End of peer interface.
272
273 class locallegacypeer(repository.legacypeer, localpeer):
249 274 '''peer extension which implements legacy methods too; used for tests with
250 275 restricted capabilities'''
251 276
252 277 def __init__(self, repo):
253 localpeer.__init__(self, repo, caps=legacycaps)
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279
280 # Begin of baselegacywirecommands interface.
281
282 def between(self, pairs):
283 return self._repo.between(pairs)
254 284
255 285 def branches(self, nodes):
256 286 return self._repo.branches(nodes)
257 287
258 def between(self, pairs):
259 return self._repo.between(pairs)
260
261 288 def changegroup(self, basenodes, source):
262 289 return changegroup.changegroup(self._repo, basenodes, source)
263 290
264 291 def changegroupsubset(self, bases, heads, source):
265 292 return changegroup.changegroupsubset(self._repo, bases, heads, source)
266 293
294 # End of baselegacywirecommands interface.
295
267 296 # Increment the sub-version when the revlog v2 format changes to lock out old
268 297 # clients.
269 298 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
270 299
271 300 class localrepository(object):
272 301
273 302 supportedformats = {
274 303 'revlogv1',
275 304 'generaldelta',
276 305 'treemanifest',
277 306 'manifestv2',
278 307 REVLOGV2_REQUIREMENT,
279 308 }
280 309 _basesupported = supportedformats | {
281 310 'store',
282 311 'fncache',
283 312 'shared',
284 313 'relshared',
285 314 'dotencode',
286 315 'exp-sparse',
287 316 }
288 317 openerreqs = {
289 318 'revlogv1',
290 319 'generaldelta',
291 320 'treemanifest',
292 321 'manifestv2',
293 322 }
294 323
295 324 # a list of (ui, featureset) functions.
296 325 # only functions defined in module of enabled extensions are invoked
297 326 featuresetupfuncs = set()
298 327
299 328 # list of prefix for file which can be written without 'wlock'
300 329 # Extensions should extend this list when needed
301 330 _wlockfreeprefix = {
302 331 # We migh consider requiring 'wlock' for the next
303 332 # two, but pretty much all the existing code assume
304 333 # wlock is not needed so we keep them excluded for
305 334 # now.
306 335 'hgrc',
307 336 'requires',
308 337 # XXX cache is a complicatged business someone
309 338 # should investigate this in depth at some point
310 339 'cache/',
311 340 # XXX shouldn't be dirstate covered by the wlock?
312 341 'dirstate',
313 342 # XXX bisect was still a bit too messy at the time
314 343 # this changeset was introduced. Someone should fix
315 344 # the remainig bit and drop this line
316 345 'bisect.state',
317 346 }
318 347
319 348 def __init__(self, baseui, path, create=False):
320 349 self.requirements = set()
321 350 self.filtername = None
322 351 # wvfs: rooted at the repository root, used to access the working copy
323 352 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
324 353 # vfs: rooted at .hg, used to access repo files outside of .hg/store
325 354 self.vfs = None
326 355 # svfs: usually rooted at .hg/store, used to access repository history
327 356 # If this is a shared repository, this vfs may point to another
328 357 # repository's .hg/store directory.
329 358 self.svfs = None
330 359 self.root = self.wvfs.base
331 360 self.path = self.wvfs.join(".hg")
332 361 self.origroot = path
333 362 # These auditor are not used by the vfs,
334 363 # only used when writing this comment: basectx.match
335 364 self.auditor = pathutil.pathauditor(self.root, self._checknested)
336 365 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
337 366 realfs=False, cached=True)
338 367 self.baseui = baseui
339 368 self.ui = baseui.copy()
340 369 self.ui.copy = baseui.copy # prevent copying repo configuration
341 370 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
342 371 if (self.ui.configbool('devel', 'all-warnings') or
343 372 self.ui.configbool('devel', 'check-locks')):
344 373 self.vfs.audit = self._getvfsward(self.vfs.audit)
345 374 # A list of callback to shape the phase if no data were found.
346 375 # Callback are in the form: func(repo, roots) --> processed root.
347 376 # This list it to be filled by extension during repo setup
348 377 self._phasedefaults = []
349 378 try:
350 379 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
351 380 self._loadextensions()
352 381 except IOError:
353 382 pass
354 383
355 384 if self.featuresetupfuncs:
356 385 self.supported = set(self._basesupported) # use private copy
357 386 extmods = set(m.__name__ for n, m
358 387 in extensions.extensions(self.ui))
359 388 for setupfunc in self.featuresetupfuncs:
360 389 if setupfunc.__module__ in extmods:
361 390 setupfunc(self.ui, self.supported)
362 391 else:
363 392 self.supported = self._basesupported
364 393 color.setup(self.ui)
365 394
366 395 # Add compression engines.
367 396 for name in util.compengines:
368 397 engine = util.compengines[name]
369 398 if engine.revlogheader():
370 399 self.supported.add('exp-compression-%s' % name)
371 400
372 401 if not self.vfs.isdir():
373 402 if create:
374 403 self.requirements = newreporequirements(self)
375 404
376 405 if not self.wvfs.exists():
377 406 self.wvfs.makedirs()
378 407 self.vfs.makedir(notindexed=True)
379 408
380 409 if 'store' in self.requirements:
381 410 self.vfs.mkdir("store")
382 411
383 412 # create an invalid changelog
384 413 self.vfs.append(
385 414 "00changelog.i",
386 415 '\0\0\0\2' # represents revlogv2
387 416 ' dummy changelog to prevent using the old repo layout'
388 417 )
389 418 else:
390 419 raise error.RepoError(_("repository %s not found") % path)
391 420 elif create:
392 421 raise error.RepoError(_("repository %s already exists") % path)
393 422 else:
394 423 try:
395 424 self.requirements = scmutil.readrequires(
396 425 self.vfs, self.supported)
397 426 except IOError as inst:
398 427 if inst.errno != errno.ENOENT:
399 428 raise
400 429
401 430 cachepath = self.vfs.join('cache')
402 431 self.sharedpath = self.path
403 432 try:
404 433 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
405 434 if 'relshared' in self.requirements:
406 435 sharedpath = self.vfs.join(sharedpath)
407 436 vfs = vfsmod.vfs(sharedpath, realpath=True)
408 437 cachepath = vfs.join('cache')
409 438 s = vfs.base
410 439 if not vfs.exists():
411 440 raise error.RepoError(
412 441 _('.hg/sharedpath points to nonexistent directory %s') % s)
413 442 self.sharedpath = s
414 443 except IOError as inst:
415 444 if inst.errno != errno.ENOENT:
416 445 raise
417 446
418 447 if 'exp-sparse' in self.requirements and not sparse.enabled:
419 448 raise error.RepoError(_('repository is using sparse feature but '
420 449 'sparse is not enabled; enable the '
421 450 '"sparse" extensions to access'))
422 451
423 452 self.store = store.store(
424 453 self.requirements, self.sharedpath,
425 454 lambda base: vfsmod.vfs(base, cacheaudited=True))
426 455 self.spath = self.store.path
427 456 self.svfs = self.store.vfs
428 457 self.sjoin = self.store.join
429 458 self.vfs.createmode = self.store.createmode
430 459 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
431 460 self.cachevfs.createmode = self.store.createmode
432 461 if (self.ui.configbool('devel', 'all-warnings') or
433 462 self.ui.configbool('devel', 'check-locks')):
434 463 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
435 464 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
436 465 else: # standard vfs
437 466 self.svfs.audit = self._getsvfsward(self.svfs.audit)
438 467 self._applyopenerreqs()
439 468 if create:
440 469 self._writerequirements()
441 470
442 471 self._dirstatevalidatewarned = False
443 472
444 473 self._branchcaches = {}
445 474 self._revbranchcache = None
446 475 self.filterpats = {}
447 476 self._datafilters = {}
448 477 self._transref = self._lockref = self._wlockref = None
449 478
450 479 # A cache for various files under .hg/ that tracks file changes,
451 480 # (used by the filecache decorator)
452 481 #
453 482 # Maps a property name to its util.filecacheentry
454 483 self._filecache = {}
455 484
456 485 # hold sets of revision to be filtered
457 486 # should be cleared when something might have changed the filter value:
458 487 # - new changesets,
459 488 # - phase change,
460 489 # - new obsolescence marker,
461 490 # - working directory parent change,
462 491 # - bookmark changes
463 492 self.filteredrevcache = {}
464 493
465 494 # post-dirstate-status hooks
466 495 self._postdsstatus = []
467 496
468 497 # Cache of types representing filtered repos.
469 498 self._filteredrepotypes = weakref.WeakKeyDictionary()
470 499
471 500 # generic mapping between names and nodes
472 501 self.names = namespaces.namespaces()
473 502
474 503 # Key to signature value.
475 504 self._sparsesignaturecache = {}
476 505 # Signature to cached matcher instance.
477 506 self._sparsematchercache = {}
478 507
479 508 def _getvfsward(self, origfunc):
480 509 """build a ward for self.vfs"""
481 510 rref = weakref.ref(self)
482 511 def checkvfs(path, mode=None):
483 512 ret = origfunc(path, mode=mode)
484 513 repo = rref()
485 514 if (repo is None
486 515 or not util.safehasattr(repo, '_wlockref')
487 516 or not util.safehasattr(repo, '_lockref')):
488 517 return
489 518 if mode in (None, 'r', 'rb'):
490 519 return
491 520 if path.startswith(repo.path):
492 521 # truncate name relative to the repository (.hg)
493 522 path = path[len(repo.path) + 1:]
494 523 if path.startswith('cache/'):
495 524 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
496 525 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
497 526 if path.startswith('journal.'):
498 527 # journal is covered by 'lock'
499 528 if repo._currentlock(repo._lockref) is None:
500 529 repo.ui.develwarn('write with no lock: "%s"' % path,
501 530 stacklevel=2, config='check-locks')
502 531 elif repo._currentlock(repo._wlockref) is None:
503 532 # rest of vfs files are covered by 'wlock'
504 533 #
505 534 # exclude special files
506 535 for prefix in self._wlockfreeprefix:
507 536 if path.startswith(prefix):
508 537 return
509 538 repo.ui.develwarn('write with no wlock: "%s"' % path,
510 539 stacklevel=2, config='check-locks')
511 540 return ret
512 541 return checkvfs
513 542
514 543 def _getsvfsward(self, origfunc):
515 544 """build a ward for self.svfs"""
516 545 rref = weakref.ref(self)
517 546 def checksvfs(path, mode=None):
518 547 ret = origfunc(path, mode=mode)
519 548 repo = rref()
520 549 if repo is None or not util.safehasattr(repo, '_lockref'):
521 550 return
522 551 if mode in (None, 'r', 'rb'):
523 552 return
524 553 if path.startswith(repo.sharedpath):
525 554 # truncate name relative to the repository (.hg)
526 555 path = path[len(repo.sharedpath) + 1:]
527 556 if repo._currentlock(repo._lockref) is None:
528 557 repo.ui.develwarn('write with no lock: "%s"' % path,
529 558 stacklevel=3)
530 559 return ret
531 560 return checksvfs
532 561
533 562 def close(self):
534 563 self._writecaches()
535 564
536 565 def _loadextensions(self):
537 566 extensions.loadall(self.ui)
538 567
539 568 def _writecaches(self):
540 569 if self._revbranchcache:
541 570 self._revbranchcache.write()
542 571
543 572 def _restrictcapabilities(self, caps):
544 573 if self.ui.configbool('experimental', 'bundle2-advertise'):
545 574 caps = set(caps)
546 575 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
547 576 caps.add('bundle2=' + urlreq.quote(capsblob))
548 577 return caps
549 578
550 579 def _applyopenerreqs(self):
551 580 self.svfs.options = dict((r, 1) for r in self.requirements
552 581 if r in self.openerreqs)
553 582 # experimental config: format.chunkcachesize
554 583 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
555 584 if chunkcachesize is not None:
556 585 self.svfs.options['chunkcachesize'] = chunkcachesize
557 586 # experimental config: format.maxchainlen
558 587 maxchainlen = self.ui.configint('format', 'maxchainlen')
559 588 if maxchainlen is not None:
560 589 self.svfs.options['maxchainlen'] = maxchainlen
561 590 # experimental config: format.manifestcachesize
562 591 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
563 592 if manifestcachesize is not None:
564 593 self.svfs.options['manifestcachesize'] = manifestcachesize
565 594 # experimental config: format.aggressivemergedeltas
566 595 aggressivemergedeltas = self.ui.configbool('format',
567 596 'aggressivemergedeltas')
568 597 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
569 598 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
570 599 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
571 600 if 0 <= chainspan:
572 601 self.svfs.options['maxdeltachainspan'] = chainspan
573 602
574 603 for r in self.requirements:
575 604 if r.startswith('exp-compression-'):
576 605 self.svfs.options['compengine'] = r[len('exp-compression-'):]
577 606
578 607 # TODO move "revlogv2" to openerreqs once finalized.
579 608 if REVLOGV2_REQUIREMENT in self.requirements:
580 609 self.svfs.options['revlogv2'] = True
581 610
582 611 def _writerequirements(self):
583 612 scmutil.writerequires(self.vfs, self.requirements)
584 613
585 614 def _checknested(self, path):
586 615 """Determine if path is a legal nested repository."""
587 616 if not path.startswith(self.root):
588 617 return False
589 618 subpath = path[len(self.root) + 1:]
590 619 normsubpath = util.pconvert(subpath)
591 620
592 621 # XXX: Checking against the current working copy is wrong in
593 622 # the sense that it can reject things like
594 623 #
595 624 # $ hg cat -r 10 sub/x.txt
596 625 #
597 626 # if sub/ is no longer a subrepository in the working copy
598 627 # parent revision.
599 628 #
600 629 # However, it can of course also allow things that would have
601 630 # been rejected before, such as the above cat command if sub/
602 631 # is a subrepository now, but was a normal directory before.
603 632 # The old path auditor would have rejected by mistake since it
604 633 # panics when it sees sub/.hg/.
605 634 #
606 635 # All in all, checking against the working copy seems sensible
607 636 # since we want to prevent access to nested repositories on
608 637 # the filesystem *now*.
609 638 ctx = self[None]
610 639 parts = util.splitpath(subpath)
611 640 while parts:
612 641 prefix = '/'.join(parts)
613 642 if prefix in ctx.substate:
614 643 if prefix == normsubpath:
615 644 return True
616 645 else:
617 646 sub = ctx.sub(prefix)
618 647 return sub.checknested(subpath[len(prefix) + 1:])
619 648 else:
620 649 parts.pop()
621 650 return False
622 651
623 652 def peer(self):
624 653 return localpeer(self) # not cached to avoid reference cycle
625 654
626 655 def unfiltered(self):
627 656 """Return unfiltered version of the repository
628 657
629 658 Intended to be overwritten by filtered repo."""
630 659 return self
631 660
632 661 def filtered(self, name):
633 662 """Return a filtered version of a repository"""
634 663 # Python <3.4 easily leaks types via __mro__. See
635 664 # https://bugs.python.org/issue17950. We cache dynamically
636 665 # created types so this method doesn't leak on every
637 666 # invocation.
638 667
639 668 key = self.unfiltered().__class__
640 669 if key not in self._filteredrepotypes:
641 670 # Build a new type with the repoview mixin and the base
642 671 # class of this repo. Give it a name containing the
643 672 # filter name to aid debugging.
644 673 bases = (repoview.repoview, key)
645 674 cls = type(r'%sfilteredrepo' % name, bases, {})
646 675 self._filteredrepotypes[key] = cls
647 676
648 677 return self._filteredrepotypes[key](self, name)
649 678
650 679 @repofilecache('bookmarks', 'bookmarks.current')
651 680 def _bookmarks(self):
652 681 return bookmarks.bmstore(self)
653 682
654 683 @property
655 684 def _activebookmark(self):
656 685 return self._bookmarks.active
657 686
658 687 # _phaserevs and _phasesets depend on changelog. what we need is to
659 688 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
660 689 # can't be easily expressed in filecache mechanism.
661 690 @storecache('phaseroots', '00changelog.i')
662 691 def _phasecache(self):
663 692 return phases.phasecache(self, self._phasedefaults)
664 693
665 694 @storecache('obsstore')
666 695 def obsstore(self):
667 696 return obsolete.makestore(self.ui, self)
668 697
669 698 @storecache('00changelog.i')
670 699 def changelog(self):
671 700 return changelog.changelog(self.svfs,
672 701 trypending=txnutil.mayhavepending(self.root))
673 702
674 703 def _constructmanifest(self):
675 704 # This is a temporary function while we migrate from manifest to
676 705 # manifestlog. It allows bundlerepo and unionrepo to intercept the
677 706 # manifest creation.
678 707 return manifest.manifestrevlog(self.svfs)
679 708
680 709 @storecache('00manifest.i')
681 710 def manifestlog(self):
682 711 return manifest.manifestlog(self.svfs, self)
683 712
684 713 @repofilecache('dirstate')
685 714 def dirstate(self):
686 715 sparsematchfn = lambda: sparse.matcher(self)
687 716
688 717 return dirstate.dirstate(self.vfs, self.ui, self.root,
689 718 self._dirstatevalidate, sparsematchfn)
690 719
691 720 def _dirstatevalidate(self, node):
692 721 try:
693 722 self.changelog.rev(node)
694 723 return node
695 724 except error.LookupError:
696 725 if not self._dirstatevalidatewarned:
697 726 self._dirstatevalidatewarned = True
698 727 self.ui.warn(_("warning: ignoring unknown"
699 728 " working parent %s!\n") % short(node))
700 729 return nullid
701 730
702 731 def __getitem__(self, changeid):
703 732 if changeid is None:
704 733 return context.workingctx(self)
705 734 if isinstance(changeid, slice):
706 735 # wdirrev isn't contiguous so the slice shouldn't include it
707 736 return [context.changectx(self, i)
708 737 for i in xrange(*changeid.indices(len(self)))
709 738 if i not in self.changelog.filteredrevs]
710 739 try:
711 740 return context.changectx(self, changeid)
712 741 except error.WdirUnsupported:
713 742 return context.workingctx(self)
714 743
715 744 def __contains__(self, changeid):
716 745 """True if the given changeid exists
717 746
718 747 error.LookupError is raised if an ambiguous node specified.
719 748 """
720 749 try:
721 750 self[changeid]
722 751 return True
723 752 except error.RepoLookupError:
724 753 return False
725 754
726 755 def __nonzero__(self):
727 756 return True
728 757
729 758 __bool__ = __nonzero__
730 759
731 760 def __len__(self):
732 761 return len(self.changelog)
733 762
734 763 def __iter__(self):
735 764 return iter(self.changelog)
736 765
737 766 def revs(self, expr, *args):
738 767 '''Find revisions matching a revset.
739 768
740 769 The revset is specified as a string ``expr`` that may contain
741 770 %-formatting to escape certain types. See ``revsetlang.formatspec``.
742 771
743 772 Revset aliases from the configuration are not expanded. To expand
744 773 user aliases, consider calling ``scmutil.revrange()`` or
745 774 ``repo.anyrevs([expr], user=True)``.
746 775
747 776 Returns a revset.abstractsmartset, which is a list-like interface
748 777 that contains integer revisions.
749 778 '''
750 779 expr = revsetlang.formatspec(expr, *args)
751 780 m = revset.match(None, expr)
752 781 return m(self)
753 782
754 783 def set(self, expr, *args):
755 784 '''Find revisions matching a revset and emit changectx instances.
756 785
757 786 This is a convenience wrapper around ``revs()`` that iterates the
758 787 result and is a generator of changectx instances.
759 788
760 789 Revset aliases from the configuration are not expanded. To expand
761 790 user aliases, consider calling ``scmutil.revrange()``.
762 791 '''
763 792 for r in self.revs(expr, *args):
764 793 yield self[r]
765 794
766 795 def anyrevs(self, specs, user=False, localalias=None):
767 796 '''Find revisions matching one of the given revsets.
768 797
769 798 Revset aliases from the configuration are not expanded by default. To
770 799 expand user aliases, specify ``user=True``. To provide some local
771 800 definitions overriding user aliases, set ``localalias`` to
772 801 ``{name: definitionstring}``.
773 802 '''
774 803 if user:
775 804 m = revset.matchany(self.ui, specs, repo=self,
776 805 localalias=localalias)
777 806 else:
778 807 m = revset.matchany(None, specs, localalias=localalias)
779 808 return m(self)
780 809
781 810 def url(self):
782 811 return 'file:' + self.root
783 812
784 813 def hook(self, name, throw=False, **args):
785 814 """Call a hook, passing this repo instance.
786 815
787 816 This a convenience method to aid invoking hooks. Extensions likely
788 817 won't call this unless they have registered a custom hook or are
789 818 replacing code that is expected to call a hook.
790 819 """
791 820 return hook.hook(self.ui, self, name, throw, **args)
792 821
793 822 @filteredpropertycache
794 823 def _tagscache(self):
795 824 '''Returns a tagscache object that contains various tags related
796 825 caches.'''
797 826
798 827 # This simplifies its cache management by having one decorated
799 828 # function (this one) and the rest simply fetch things from it.
800 829 class tagscache(object):
801 830 def __init__(self):
802 831 # These two define the set of tags for this repository. tags
803 832 # maps tag name to node; tagtypes maps tag name to 'global' or
804 833 # 'local'. (Global tags are defined by .hgtags across all
805 834 # heads, and local tags are defined in .hg/localtags.)
806 835 # They constitute the in-memory cache of tags.
807 836 self.tags = self.tagtypes = None
808 837
809 838 self.nodetagscache = self.tagslist = None
810 839
811 840 cache = tagscache()
812 841 cache.tags, cache.tagtypes = self._findtags()
813 842
814 843 return cache
815 844
816 845 def tags(self):
817 846 '''return a mapping of tag to node'''
818 847 t = {}
819 848 if self.changelog.filteredrevs:
820 849 tags, tt = self._findtags()
821 850 else:
822 851 tags = self._tagscache.tags
823 852 for k, v in tags.iteritems():
824 853 try:
825 854 # ignore tags to unknown nodes
826 855 self.changelog.rev(v)
827 856 t[k] = v
828 857 except (error.LookupError, ValueError):
829 858 pass
830 859 return t
831 860
832 861 def _findtags(self):
833 862 '''Do the hard work of finding tags. Return a pair of dicts
834 863 (tags, tagtypes) where tags maps tag name to node, and tagtypes
835 864 maps tag name to a string like \'global\' or \'local\'.
836 865 Subclasses or extensions are free to add their own tags, but
837 866 should be aware that the returned dicts will be retained for the
838 867 duration of the localrepo object.'''
839 868
840 869 # XXX what tagtype should subclasses/extensions use? Currently
841 870 # mq and bookmarks add tags, but do not set the tagtype at all.
842 871 # Should each extension invent its own tag type? Should there
843 872 # be one tagtype for all such "virtual" tags? Or is the status
844 873 # quo fine?
845 874
846 875
847 876 # map tag name to (node, hist)
848 877 alltags = tagsmod.findglobaltags(self.ui, self)
849 878 # map tag name to tag type
850 879 tagtypes = dict((tag, 'global') for tag in alltags)
851 880
852 881 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
853 882
854 883 # Build the return dicts. Have to re-encode tag names because
855 884 # the tags module always uses UTF-8 (in order not to lose info
856 885 # writing to the cache), but the rest of Mercurial wants them in
857 886 # local encoding.
858 887 tags = {}
859 888 for (name, (node, hist)) in alltags.iteritems():
860 889 if node != nullid:
861 890 tags[encoding.tolocal(name)] = node
862 891 tags['tip'] = self.changelog.tip()
863 892 tagtypes = dict([(encoding.tolocal(name), value)
864 893 for (name, value) in tagtypes.iteritems()])
865 894 return (tags, tagtypes)
866 895
867 896 def tagtype(self, tagname):
868 897 '''
869 898 return the type of the given tag. result can be:
870 899
871 900 'local' : a local tag
872 901 'global' : a global tag
873 902 None : tag does not exist
874 903 '''
875 904
876 905 return self._tagscache.tagtypes.get(tagname)
877 906
878 907 def tagslist(self):
879 908 '''return a list of tags ordered by revision'''
880 909 if not self._tagscache.tagslist:
881 910 l = []
882 911 for t, n in self.tags().iteritems():
883 912 l.append((self.changelog.rev(n), t, n))
884 913 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
885 914
886 915 return self._tagscache.tagslist
887 916
888 917 def nodetags(self, node):
889 918 '''return the tags associated with a node'''
890 919 if not self._tagscache.nodetagscache:
891 920 nodetagscache = {}
892 921 for t, n in self._tagscache.tags.iteritems():
893 922 nodetagscache.setdefault(n, []).append(t)
894 923 for tags in nodetagscache.itervalues():
895 924 tags.sort()
896 925 self._tagscache.nodetagscache = nodetagscache
897 926 return self._tagscache.nodetagscache.get(node, [])
898 927
899 928 def nodebookmarks(self, node):
900 929 """return the list of bookmarks pointing to the specified node"""
901 930 marks = []
902 931 for bookmark, n in self._bookmarks.iteritems():
903 932 if n == node:
904 933 marks.append(bookmark)
905 934 return sorted(marks)
906 935
907 936 def branchmap(self):
908 937 '''returns a dictionary {branch: [branchheads]} with branchheads
909 938 ordered by increasing revision number'''
910 939 branchmap.updatecache(self)
911 940 return self._branchcaches[self.filtername]
912 941
913 942 @unfilteredmethod
914 943 def revbranchcache(self):
915 944 if not self._revbranchcache:
916 945 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
917 946 return self._revbranchcache
918 947
919 948 def branchtip(self, branch, ignoremissing=False):
920 949 '''return the tip node for a given branch
921 950
922 951 If ignoremissing is True, then this method will not raise an error.
923 952 This is helpful for callers that only expect None for a missing branch
924 953 (e.g. namespace).
925 954
926 955 '''
927 956 try:
928 957 return self.branchmap().branchtip(branch)
929 958 except KeyError:
930 959 if not ignoremissing:
931 960 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
932 961 else:
933 962 pass
934 963
935 964 def lookup(self, key):
936 965 return self[key].node()
937 966
938 967 def lookupbranch(self, key, remote=None):
939 968 repo = remote or self
940 969 if key in repo.branchmap():
941 970 return key
942 971
943 972 repo = (remote and remote.local()) and remote or self
944 973 return repo[key].branch()
945 974
946 975 def known(self, nodes):
947 976 cl = self.changelog
948 977 nm = cl.nodemap
949 978 filtered = cl.filteredrevs
950 979 result = []
951 980 for n in nodes:
952 981 r = nm.get(n)
953 982 resp = not (r is None or r in filtered)
954 983 result.append(resp)
955 984 return result
956 985
957 986 def local(self):
958 987 return self
959 988
960 989 def publishing(self):
961 990 # it's safe (and desirable) to trust the publish flag unconditionally
962 991 # so that we don't finalize changes shared between users via ssh or nfs
963 992 return self.ui.configbool('phases', 'publish', untrusted=True)
964 993
965 994 def cancopy(self):
966 995 # so statichttprepo's override of local() works
967 996 if not self.local():
968 997 return False
969 998 if not self.publishing():
970 999 return True
971 1000 # if publishing we can't copy if there is filtered content
972 1001 return not self.filtered('visible').changelog.filteredrevs
973 1002
974 1003 def shared(self):
975 1004 '''the type of shared repository (None if not shared)'''
976 1005 if self.sharedpath != self.path:
977 1006 return 'store'
978 1007 return None
979 1008
980 1009 def wjoin(self, f, *insidef):
981 1010 return self.vfs.reljoin(self.root, f, *insidef)
982 1011
983 1012 def file(self, f):
984 1013 if f[0] == '/':
985 1014 f = f[1:]
986 1015 return filelog.filelog(self.svfs, f)
987 1016
988 1017 def changectx(self, changeid):
989 1018 return self[changeid]
990 1019
991 1020 def setparents(self, p1, p2=nullid):
992 1021 with self.dirstate.parentchange():
993 1022 copies = self.dirstate.setparents(p1, p2)
994 1023 pctx = self[p1]
995 1024 if copies:
996 1025 # Adjust copy records, the dirstate cannot do it, it
997 1026 # requires access to parents manifests. Preserve them
998 1027 # only for entries added to first parent.
999 1028 for f in copies:
1000 1029 if f not in pctx and copies[f] in pctx:
1001 1030 self.dirstate.copy(copies[f], f)
1002 1031 if p2 == nullid:
1003 1032 for f, s in sorted(self.dirstate.copies().items()):
1004 1033 if f not in pctx and s not in pctx:
1005 1034 self.dirstate.copy(None, f)
1006 1035
1007 1036 def filectx(self, path, changeid=None, fileid=None):
1008 1037 """changeid can be a changeset revision, node, or tag.
1009 1038 fileid can be a file revision or node."""
1010 1039 return context.filectx(self, path, changeid, fileid)
1011 1040
1012 1041 def getcwd(self):
1013 1042 return self.dirstate.getcwd()
1014 1043
1015 1044 def pathto(self, f, cwd=None):
1016 1045 return self.dirstate.pathto(f, cwd)
1017 1046
1018 1047 def _loadfilter(self, filter):
1019 1048 if filter not in self.filterpats:
1020 1049 l = []
1021 1050 for pat, cmd in self.ui.configitems(filter):
1022 1051 if cmd == '!':
1023 1052 continue
1024 1053 mf = matchmod.match(self.root, '', [pat])
1025 1054 fn = None
1026 1055 params = cmd
1027 1056 for name, filterfn in self._datafilters.iteritems():
1028 1057 if cmd.startswith(name):
1029 1058 fn = filterfn
1030 1059 params = cmd[len(name):].lstrip()
1031 1060 break
1032 1061 if not fn:
1033 1062 fn = lambda s, c, **kwargs: util.filter(s, c)
1034 1063 # Wrap old filters not supporting keyword arguments
1035 1064 if not inspect.getargspec(fn)[2]:
1036 1065 oldfn = fn
1037 1066 fn = lambda s, c, **kwargs: oldfn(s, c)
1038 1067 l.append((mf, fn, params))
1039 1068 self.filterpats[filter] = l
1040 1069 return self.filterpats[filter]
1041 1070
1042 1071 def _filter(self, filterpats, filename, data):
1043 1072 for mf, fn, cmd in filterpats:
1044 1073 if mf(filename):
1045 1074 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1046 1075 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1047 1076 break
1048 1077
1049 1078 return data
1050 1079
1051 1080 @unfilteredpropertycache
1052 1081 def _encodefilterpats(self):
1053 1082 return self._loadfilter('encode')
1054 1083
1055 1084 @unfilteredpropertycache
1056 1085 def _decodefilterpats(self):
1057 1086 return self._loadfilter('decode')
1058 1087
1059 1088 def adddatafilter(self, name, filter):
1060 1089 self._datafilters[name] = filter
1061 1090
1062 1091 def wread(self, filename):
1063 1092 if self.wvfs.islink(filename):
1064 1093 data = self.wvfs.readlink(filename)
1065 1094 else:
1066 1095 data = self.wvfs.read(filename)
1067 1096 return self._filter(self._encodefilterpats, filename, data)
1068 1097
1069 1098 def wwrite(self, filename, data, flags, backgroundclose=False):
1070 1099 """write ``data`` into ``filename`` in the working directory
1071 1100
1072 1101 This returns length of written (maybe decoded) data.
1073 1102 """
1074 1103 data = self._filter(self._decodefilterpats, filename, data)
1075 1104 if 'l' in flags:
1076 1105 self.wvfs.symlink(data, filename)
1077 1106 else:
1078 1107 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1079 1108 if 'x' in flags:
1080 1109 self.wvfs.setflags(filename, False, True)
1081 1110 return len(data)
1082 1111
1083 1112 def wwritedata(self, filename, data):
1084 1113 return self._filter(self._decodefilterpats, filename, data)
1085 1114
1086 1115 def currenttransaction(self):
1087 1116 """return the current transaction or None if non exists"""
1088 1117 if self._transref:
1089 1118 tr = self._transref()
1090 1119 else:
1091 1120 tr = None
1092 1121
1093 1122 if tr and tr.running():
1094 1123 return tr
1095 1124 return None
1096 1125
1097 1126 def transaction(self, desc, report=None):
1098 1127 if (self.ui.configbool('devel', 'all-warnings')
1099 1128 or self.ui.configbool('devel', 'check-locks')):
1100 1129 if self._currentlock(self._lockref) is None:
1101 1130 raise error.ProgrammingError('transaction requires locking')
1102 1131 tr = self.currenttransaction()
1103 1132 if tr is not None:
1104 1133 scmutil.registersummarycallback(self, tr, desc)
1105 1134 return tr.nest()
1106 1135
1107 1136 # abort here if the journal already exists
1108 1137 if self.svfs.exists("journal"):
1109 1138 raise error.RepoError(
1110 1139 _("abandoned transaction found"),
1111 1140 hint=_("run 'hg recover' to clean up transaction"))
1112 1141
1113 1142 idbase = "%.40f#%f" % (random.random(), time.time())
1114 1143 ha = hex(hashlib.sha1(idbase).digest())
1115 1144 txnid = 'TXN:' + ha
1116 1145 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1117 1146
1118 1147 self._writejournal(desc)
1119 1148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1120 1149 if report:
1121 1150 rp = report
1122 1151 else:
1123 1152 rp = self.ui.warn
1124 1153 vfsmap = {'plain': self.vfs} # root of .hg/
1125 1154 # we must avoid cyclic reference between repo and transaction.
1126 1155 reporef = weakref.ref(self)
1127 1156 # Code to track tag movement
1128 1157 #
1129 1158 # Since tags are all handled as file content, it is actually quite hard
1130 1159 # to track these movement from a code perspective. So we fallback to a
1131 1160 # tracking at the repository level. One could envision to track changes
1132 1161 # to the '.hgtags' file through changegroup apply but that fails to
1133 1162 # cope with case where transaction expose new heads without changegroup
1134 1163 # being involved (eg: phase movement).
1135 1164 #
1136 1165 # For now, We gate the feature behind a flag since this likely comes
1137 1166 # with performance impacts. The current code run more often than needed
1138 1167 # and do not use caches as much as it could. The current focus is on
1139 1168 # the behavior of the feature so we disable it by default. The flag
1140 1169 # will be removed when we are happy with the performance impact.
1141 1170 #
1142 1171 # Once this feature is no longer experimental move the following
1143 1172 # documentation to the appropriate help section:
1144 1173 #
1145 1174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1146 1175 # tags (new or changed or deleted tags). In addition the details of
1147 1176 # these changes are made available in a file at:
1148 1177 # ``REPOROOT/.hg/changes/tags.changes``.
1149 1178 # Make sure you check for HG_TAG_MOVED before reading that file as it
1150 1179 # might exist from a previous transaction even if no tag were touched
1151 1180 # in this one. Changes are recorded in a line base format::
1152 1181 #
1153 1182 # <action> <hex-node> <tag-name>\n
1154 1183 #
1155 1184 # Actions are defined as follow:
1156 1185 # "-R": tag is removed,
1157 1186 # "+A": tag is added,
1158 1187 # "-M": tag is moved (old value),
1159 1188 # "+M": tag is moved (new value),
1160 1189 tracktags = lambda x: None
1161 1190 # experimental config: experimental.hook-track-tags
1162 1191 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1163 1192 if desc != 'strip' and shouldtracktags:
1164 1193 oldheads = self.changelog.headrevs()
1165 1194 def tracktags(tr2):
1166 1195 repo = reporef()
1167 1196 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1168 1197 newheads = repo.changelog.headrevs()
1169 1198 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1170 1199 # notes: we compare lists here.
1171 1200 # As we do it only once buiding set would not be cheaper
1172 1201 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1173 1202 if changes:
1174 1203 tr2.hookargs['tag_moved'] = '1'
1175 1204 with repo.vfs('changes/tags.changes', 'w',
1176 1205 atomictemp=True) as changesfile:
1177 1206 # note: we do not register the file to the transaction
1178 1207 # because we needs it to still exist on the transaction
1179 1208 # is close (for txnclose hooks)
1180 1209 tagsmod.writediff(changesfile, changes)
1181 1210 def validate(tr2):
1182 1211 """will run pre-closing hooks"""
1183 1212 # XXX the transaction API is a bit lacking here so we take a hacky
1184 1213 # path for now
1185 1214 #
1186 1215 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1187 1216 # dict is copied before these run. In addition we needs the data
1188 1217 # available to in memory hooks too.
1189 1218 #
1190 1219 # Moreover, we also need to make sure this runs before txnclose
1191 1220 # hooks and there is no "pending" mechanism that would execute
1192 1221 # logic only if hooks are about to run.
1193 1222 #
1194 1223 # Fixing this limitation of the transaction is also needed to track
1195 1224 # other families of changes (bookmarks, phases, obsolescence).
1196 1225 #
1197 1226 # This will have to be fixed before we remove the experimental
1198 1227 # gating.
1199 1228 tracktags(tr2)
1200 1229 reporef().hook('pretxnclose', throw=True,
1201 1230 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1202 1231 def releasefn(tr, success):
1203 1232 repo = reporef()
1204 1233 if success:
1205 1234 # this should be explicitly invoked here, because
1206 1235 # in-memory changes aren't written out at closing
1207 1236 # transaction, if tr.addfilegenerator (via
1208 1237 # dirstate.write or so) isn't invoked while
1209 1238 # transaction running
1210 1239 repo.dirstate.write(None)
1211 1240 else:
1212 1241 # discard all changes (including ones already written
1213 1242 # out) in this transaction
1214 1243 repo.dirstate.restorebackup(None, 'journal.dirstate')
1215 1244
1216 1245 repo.invalidate(clearfilecache=True)
1217 1246
1218 1247 tr = transaction.transaction(rp, self.svfs, vfsmap,
1219 1248 "journal",
1220 1249 "undo",
1221 1250 aftertrans(renames),
1222 1251 self.store.createmode,
1223 1252 validator=validate,
1224 1253 releasefn=releasefn,
1225 1254 checkambigfiles=_cachedfiles)
1226 1255 tr.changes['revs'] = set()
1227 1256 tr.changes['obsmarkers'] = set()
1228 1257 tr.changes['phases'] = {}
1229 1258 tr.changes['bookmarks'] = {}
1230 1259
1231 1260 tr.hookargs['txnid'] = txnid
1232 1261 # note: writing the fncache only during finalize mean that the file is
1233 1262 # outdated when running hooks. As fncache is used for streaming clone,
1234 1263 # this is not expected to break anything that happen during the hooks.
1235 1264 tr.addfinalize('flush-fncache', self.store.write)
1236 1265 def txnclosehook(tr2):
1237 1266 """To be run if transaction is successful, will schedule a hook run
1238 1267 """
1239 1268 # Don't reference tr2 in hook() so we don't hold a reference.
1240 1269 # This reduces memory consumption when there are multiple
1241 1270 # transactions per lock. This can likely go away if issue5045
1242 1271 # fixes the function accumulation.
1243 1272 hookargs = tr2.hookargs
1244 1273
1245 1274 def hook():
1246 1275 reporef().hook('txnclose', throw=False, txnname=desc,
1247 1276 **pycompat.strkwargs(hookargs))
1248 1277 reporef()._afterlock(hook)
1249 1278 tr.addfinalize('txnclose-hook', txnclosehook)
1250 1279 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1251 1280 def txnaborthook(tr2):
1252 1281 """To be run if transaction is aborted
1253 1282 """
1254 1283 reporef().hook('txnabort', throw=False, txnname=desc,
1255 1284 **tr2.hookargs)
1256 1285 tr.addabort('txnabort-hook', txnaborthook)
1257 1286 # avoid eager cache invalidation. in-memory data should be identical
1258 1287 # to stored data if transaction has no error.
1259 1288 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1260 1289 self._transref = weakref.ref(tr)
1261 1290 scmutil.registersummarycallback(self, tr, desc)
1262 1291 return tr
1263 1292
1264 1293 def _journalfiles(self):
1265 1294 return ((self.svfs, 'journal'),
1266 1295 (self.vfs, 'journal.dirstate'),
1267 1296 (self.vfs, 'journal.branch'),
1268 1297 (self.vfs, 'journal.desc'),
1269 1298 (self.vfs, 'journal.bookmarks'),
1270 1299 (self.svfs, 'journal.phaseroots'))
1271 1300
1272 1301 def undofiles(self):
1273 1302 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1274 1303
1275 1304 @unfilteredmethod
1276 1305 def _writejournal(self, desc):
1277 1306 self.dirstate.savebackup(None, 'journal.dirstate')
1278 1307 self.vfs.write("journal.branch",
1279 1308 encoding.fromlocal(self.dirstate.branch()))
1280 1309 self.vfs.write("journal.desc",
1281 1310 "%d\n%s\n" % (len(self), desc))
1282 1311 self.vfs.write("journal.bookmarks",
1283 1312 self.vfs.tryread("bookmarks"))
1284 1313 self.svfs.write("journal.phaseroots",
1285 1314 self.svfs.tryread("phaseroots"))
1286 1315
1287 1316 def recover(self):
1288 1317 with self.lock():
1289 1318 if self.svfs.exists("journal"):
1290 1319 self.ui.status(_("rolling back interrupted transaction\n"))
1291 1320 vfsmap = {'': self.svfs,
1292 1321 'plain': self.vfs,}
1293 1322 transaction.rollback(self.svfs, vfsmap, "journal",
1294 1323 self.ui.warn,
1295 1324 checkambigfiles=_cachedfiles)
1296 1325 self.invalidate()
1297 1326 return True
1298 1327 else:
1299 1328 self.ui.warn(_("no interrupted transaction available\n"))
1300 1329 return False
1301 1330
1302 1331 def rollback(self, dryrun=False, force=False):
1303 1332 wlock = lock = dsguard = None
1304 1333 try:
1305 1334 wlock = self.wlock()
1306 1335 lock = self.lock()
1307 1336 if self.svfs.exists("undo"):
1308 1337 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1309 1338
1310 1339 return self._rollback(dryrun, force, dsguard)
1311 1340 else:
1312 1341 self.ui.warn(_("no rollback information available\n"))
1313 1342 return 1
1314 1343 finally:
1315 1344 release(dsguard, lock, wlock)
1316 1345
1317 1346 @unfilteredmethod # Until we get smarter cache management
1318 1347 def _rollback(self, dryrun, force, dsguard):
1319 1348 ui = self.ui
1320 1349 try:
1321 1350 args = self.vfs.read('undo.desc').splitlines()
1322 1351 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1323 1352 if len(args) >= 3:
1324 1353 detail = args[2]
1325 1354 oldtip = oldlen - 1
1326 1355
1327 1356 if detail and ui.verbose:
1328 1357 msg = (_('repository tip rolled back to revision %d'
1329 1358 ' (undo %s: %s)\n')
1330 1359 % (oldtip, desc, detail))
1331 1360 else:
1332 1361 msg = (_('repository tip rolled back to revision %d'
1333 1362 ' (undo %s)\n')
1334 1363 % (oldtip, desc))
1335 1364 except IOError:
1336 1365 msg = _('rolling back unknown transaction\n')
1337 1366 desc = None
1338 1367
1339 1368 if not force and self['.'] != self['tip'] and desc == 'commit':
1340 1369 raise error.Abort(
1341 1370 _('rollback of last commit while not checked out '
1342 1371 'may lose data'), hint=_('use -f to force'))
1343 1372
1344 1373 ui.status(msg)
1345 1374 if dryrun:
1346 1375 return 0
1347 1376
1348 1377 parents = self.dirstate.parents()
1349 1378 self.destroying()
1350 1379 vfsmap = {'plain': self.vfs, '': self.svfs}
1351 1380 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1352 1381 checkambigfiles=_cachedfiles)
1353 1382 if self.vfs.exists('undo.bookmarks'):
1354 1383 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1355 1384 if self.svfs.exists('undo.phaseroots'):
1356 1385 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1357 1386 self.invalidate()
1358 1387
1359 1388 parentgone = (parents[0] not in self.changelog.nodemap or
1360 1389 parents[1] not in self.changelog.nodemap)
1361 1390 if parentgone:
1362 1391 # prevent dirstateguard from overwriting already restored one
1363 1392 dsguard.close()
1364 1393
1365 1394 self.dirstate.restorebackup(None, 'undo.dirstate')
1366 1395 try:
1367 1396 branch = self.vfs.read('undo.branch')
1368 1397 self.dirstate.setbranch(encoding.tolocal(branch))
1369 1398 except IOError:
1370 1399 ui.warn(_('named branch could not be reset: '
1371 1400 'current branch is still \'%s\'\n')
1372 1401 % self.dirstate.branch())
1373 1402
1374 1403 parents = tuple([p.rev() for p in self[None].parents()])
1375 1404 if len(parents) > 1:
1376 1405 ui.status(_('working directory now based on '
1377 1406 'revisions %d and %d\n') % parents)
1378 1407 else:
1379 1408 ui.status(_('working directory now based on '
1380 1409 'revision %d\n') % parents)
1381 1410 mergemod.mergestate.clean(self, self['.'].node())
1382 1411
1383 1412 # TODO: if we know which new heads may result from this rollback, pass
1384 1413 # them to destroy(), which will prevent the branchhead cache from being
1385 1414 # invalidated.
1386 1415 self.destroyed()
1387 1416 return 0
1388 1417
1389 1418 def _buildcacheupdater(self, newtransaction):
1390 1419 """called during transaction to build the callback updating cache
1391 1420
1392 1421 Lives on the repository to help extension who might want to augment
1393 1422 this logic. For this purpose, the created transaction is passed to the
1394 1423 method.
1395 1424 """
1396 1425 # we must avoid cyclic reference between repo and transaction.
1397 1426 reporef = weakref.ref(self)
1398 1427 def updater(tr):
1399 1428 repo = reporef()
1400 1429 repo.updatecaches(tr)
1401 1430 return updater
1402 1431
1403 1432 @unfilteredmethod
1404 1433 def updatecaches(self, tr=None):
1405 1434 """warm appropriate caches
1406 1435
1407 1436 If this function is called after a transaction closed. The transaction
1408 1437 will be available in the 'tr' argument. This can be used to selectively
1409 1438 update caches relevant to the changes in that transaction.
1410 1439 """
1411 1440 if tr is not None and tr.hookargs.get('source') == 'strip':
1412 1441 # During strip, many caches are invalid but
1413 1442 # later call to `destroyed` will refresh them.
1414 1443 return
1415 1444
1416 1445 if tr is None or tr.changes['revs']:
1417 1446 # updating the unfiltered branchmap should refresh all the others,
1418 1447 self.ui.debug('updating the branch cache\n')
1419 1448 branchmap.updatecache(self.filtered('served'))
1420 1449
1421 1450 def invalidatecaches(self):
1422 1451
1423 1452 if '_tagscache' in vars(self):
1424 1453 # can't use delattr on proxy
1425 1454 del self.__dict__['_tagscache']
1426 1455
1427 1456 self.unfiltered()._branchcaches.clear()
1428 1457 self.invalidatevolatilesets()
1429 1458 self._sparsesignaturecache.clear()
1430 1459
1431 1460 def invalidatevolatilesets(self):
1432 1461 self.filteredrevcache.clear()
1433 1462 obsolete.clearobscaches(self)
1434 1463
1435 1464 def invalidatedirstate(self):
1436 1465 '''Invalidates the dirstate, causing the next call to dirstate
1437 1466 to check if it was modified since the last time it was read,
1438 1467 rereading it if it has.
1439 1468
1440 1469 This is different to dirstate.invalidate() that it doesn't always
1441 1470 rereads the dirstate. Use dirstate.invalidate() if you want to
1442 1471 explicitly read the dirstate again (i.e. restoring it to a previous
1443 1472 known good state).'''
1444 1473 if hasunfilteredcache(self, 'dirstate'):
1445 1474 for k in self.dirstate._filecache:
1446 1475 try:
1447 1476 delattr(self.dirstate, k)
1448 1477 except AttributeError:
1449 1478 pass
1450 1479 delattr(self.unfiltered(), 'dirstate')
1451 1480
1452 1481 def invalidate(self, clearfilecache=False):
1453 1482 '''Invalidates both store and non-store parts other than dirstate
1454 1483
1455 1484 If a transaction is running, invalidation of store is omitted,
1456 1485 because discarding in-memory changes might cause inconsistency
1457 1486 (e.g. incomplete fncache causes unintentional failure, but
1458 1487 redundant one doesn't).
1459 1488 '''
1460 1489 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1461 1490 for k in list(self._filecache.keys()):
1462 1491 # dirstate is invalidated separately in invalidatedirstate()
1463 1492 if k == 'dirstate':
1464 1493 continue
1465 1494 if (k == 'changelog' and
1466 1495 self.currenttransaction() and
1467 1496 self.changelog._delayed):
1468 1497 # The changelog object may store unwritten revisions. We don't
1469 1498 # want to lose them.
1470 1499 # TODO: Solve the problem instead of working around it.
1471 1500 continue
1472 1501
1473 1502 if clearfilecache:
1474 1503 del self._filecache[k]
1475 1504 try:
1476 1505 delattr(unfiltered, k)
1477 1506 except AttributeError:
1478 1507 pass
1479 1508 self.invalidatecaches()
1480 1509 if not self.currenttransaction():
1481 1510 # TODO: Changing contents of store outside transaction
1482 1511 # causes inconsistency. We should make in-memory store
1483 1512 # changes detectable, and abort if changed.
1484 1513 self.store.invalidatecaches()
1485 1514
1486 1515 def invalidateall(self):
1487 1516 '''Fully invalidates both store and non-store parts, causing the
1488 1517 subsequent operation to reread any outside changes.'''
1489 1518 # extension should hook this to invalidate its caches
1490 1519 self.invalidate()
1491 1520 self.invalidatedirstate()
1492 1521
1493 1522 @unfilteredmethod
1494 1523 def _refreshfilecachestats(self, tr):
1495 1524 """Reload stats of cached files so that they are flagged as valid"""
1496 1525 for k, ce in self._filecache.items():
1497 1526 if k == 'dirstate' or k not in self.__dict__:
1498 1527 continue
1499 1528 ce.refresh()
1500 1529
1501 1530 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1502 1531 inheritchecker=None, parentenvvar=None):
1503 1532 parentlock = None
1504 1533 # the contents of parentenvvar are used by the underlying lock to
1505 1534 # determine whether it can be inherited
1506 1535 if parentenvvar is not None:
1507 1536 parentlock = encoding.environ.get(parentenvvar)
1508 1537 try:
1509 1538 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1510 1539 acquirefn=acquirefn, desc=desc,
1511 1540 inheritchecker=inheritchecker,
1512 1541 parentlock=parentlock)
1513 1542 except error.LockHeld as inst:
1514 1543 if not wait:
1515 1544 raise
1516 1545 # show more details for new-style locks
1517 1546 if ':' in inst.locker:
1518 1547 host, pid = inst.locker.split(":", 1)
1519 1548 self.ui.warn(
1520 1549 _("waiting for lock on %s held by process %r "
1521 1550 "on host %r\n") % (desc, pid, host))
1522 1551 else:
1523 1552 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1524 1553 (desc, inst.locker))
1525 1554 # default to 600 seconds timeout
1526 1555 l = lockmod.lock(vfs, lockname,
1527 1556 int(self.ui.config("ui", "timeout")),
1528 1557 releasefn=releasefn, acquirefn=acquirefn,
1529 1558 desc=desc)
1530 1559 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1531 1560 return l
1532 1561
1533 1562 def _afterlock(self, callback):
1534 1563 """add a callback to be run when the repository is fully unlocked
1535 1564
1536 1565 The callback will be executed when the outermost lock is released
1537 1566 (with wlock being higher level than 'lock')."""
1538 1567 for ref in (self._wlockref, self._lockref):
1539 1568 l = ref and ref()
1540 1569 if l and l.held:
1541 1570 l.postrelease.append(callback)
1542 1571 break
1543 1572 else: # no lock have been found.
1544 1573 callback()
1545 1574
1546 1575 def lock(self, wait=True):
1547 1576 '''Lock the repository store (.hg/store) and return a weak reference
1548 1577 to the lock. Use this before modifying the store (e.g. committing or
1549 1578 stripping). If you are opening a transaction, get a lock as well.)
1550 1579
1551 1580 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1552 1581 'wlock' first to avoid a dead-lock hazard.'''
1553 1582 l = self._currentlock(self._lockref)
1554 1583 if l is not None:
1555 1584 l.lock()
1556 1585 return l
1557 1586
1558 1587 l = self._lock(self.svfs, "lock", wait, None,
1559 1588 self.invalidate, _('repository %s') % self.origroot)
1560 1589 self._lockref = weakref.ref(l)
1561 1590 return l
1562 1591
1563 1592 def _wlockchecktransaction(self):
1564 1593 if self.currenttransaction() is not None:
1565 1594 raise error.LockInheritanceContractViolation(
1566 1595 'wlock cannot be inherited in the middle of a transaction')
1567 1596
1568 1597 def wlock(self, wait=True):
1569 1598 '''Lock the non-store parts of the repository (everything under
1570 1599 .hg except .hg/store) and return a weak reference to the lock.
1571 1600
1572 1601 Use this before modifying files in .hg.
1573 1602
1574 1603 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1575 1604 'wlock' first to avoid a dead-lock hazard.'''
1576 1605 l = self._wlockref and self._wlockref()
1577 1606 if l is not None and l.held:
1578 1607 l.lock()
1579 1608 return l
1580 1609
1581 1610 # We do not need to check for non-waiting lock acquisition. Such
1582 1611 # acquisition would not cause dead-lock as they would just fail.
1583 1612 if wait and (self.ui.configbool('devel', 'all-warnings')
1584 1613 or self.ui.configbool('devel', 'check-locks')):
1585 1614 if self._currentlock(self._lockref) is not None:
1586 1615 self.ui.develwarn('"wlock" acquired after "lock"')
1587 1616
1588 1617 def unlock():
1589 1618 if self.dirstate.pendingparentchange():
1590 1619 self.dirstate.invalidate()
1591 1620 else:
1592 1621 self.dirstate.write(None)
1593 1622
1594 1623 self._filecache['dirstate'].refresh()
1595 1624
1596 1625 l = self._lock(self.vfs, "wlock", wait, unlock,
1597 1626 self.invalidatedirstate, _('working directory of %s') %
1598 1627 self.origroot,
1599 1628 inheritchecker=self._wlockchecktransaction,
1600 1629 parentenvvar='HG_WLOCK_LOCKER')
1601 1630 self._wlockref = weakref.ref(l)
1602 1631 return l
1603 1632
1604 1633 def _currentlock(self, lockref):
1605 1634 """Returns the lock if it's held, or None if it's not."""
1606 1635 if lockref is None:
1607 1636 return None
1608 1637 l = lockref()
1609 1638 if l is None or not l.held:
1610 1639 return None
1611 1640 return l
1612 1641
1613 1642 def currentwlock(self):
1614 1643 """Returns the wlock if it's held, or None if it's not."""
1615 1644 return self._currentlock(self._wlockref)
1616 1645
1617 1646 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1618 1647 """
1619 1648 commit an individual file as part of a larger transaction
1620 1649 """
1621 1650
1622 1651 fname = fctx.path()
1623 1652 fparent1 = manifest1.get(fname, nullid)
1624 1653 fparent2 = manifest2.get(fname, nullid)
1625 1654 if isinstance(fctx, context.filectx):
1626 1655 node = fctx.filenode()
1627 1656 if node in [fparent1, fparent2]:
1628 1657 self.ui.debug('reusing %s filelog entry\n' % fname)
1629 1658 if manifest1.flags(fname) != fctx.flags():
1630 1659 changelist.append(fname)
1631 1660 return node
1632 1661
1633 1662 flog = self.file(fname)
1634 1663 meta = {}
1635 1664 copy = fctx.renamed()
1636 1665 if copy and copy[0] != fname:
1637 1666 # Mark the new revision of this file as a copy of another
1638 1667 # file. This copy data will effectively act as a parent
1639 1668 # of this new revision. If this is a merge, the first
1640 1669 # parent will be the nullid (meaning "look up the copy data")
1641 1670 # and the second one will be the other parent. For example:
1642 1671 #
1643 1672 # 0 --- 1 --- 3 rev1 changes file foo
1644 1673 # \ / rev2 renames foo to bar and changes it
1645 1674 # \- 2 -/ rev3 should have bar with all changes and
1646 1675 # should record that bar descends from
1647 1676 # bar in rev2 and foo in rev1
1648 1677 #
1649 1678 # this allows this merge to succeed:
1650 1679 #
1651 1680 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1652 1681 # \ / merging rev3 and rev4 should use bar@rev2
1653 1682 # \- 2 --- 4 as the merge base
1654 1683 #
1655 1684
1656 1685 cfname = copy[0]
1657 1686 crev = manifest1.get(cfname)
1658 1687 newfparent = fparent2
1659 1688
1660 1689 if manifest2: # branch merge
1661 1690 if fparent2 == nullid or crev is None: # copied on remote side
1662 1691 if cfname in manifest2:
1663 1692 crev = manifest2[cfname]
1664 1693 newfparent = fparent1
1665 1694
1666 1695 # Here, we used to search backwards through history to try to find
1667 1696 # where the file copy came from if the source of a copy was not in
1668 1697 # the parent directory. However, this doesn't actually make sense to
1669 1698 # do (what does a copy from something not in your working copy even
1670 1699 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1671 1700 # the user that copy information was dropped, so if they didn't
1672 1701 # expect this outcome it can be fixed, but this is the correct
1673 1702 # behavior in this circumstance.
1674 1703
1675 1704 if crev:
1676 1705 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1677 1706 meta["copy"] = cfname
1678 1707 meta["copyrev"] = hex(crev)
1679 1708 fparent1, fparent2 = nullid, newfparent
1680 1709 else:
1681 1710 self.ui.warn(_("warning: can't find ancestor for '%s' "
1682 1711 "copied from '%s'!\n") % (fname, cfname))
1683 1712
1684 1713 elif fparent1 == nullid:
1685 1714 fparent1, fparent2 = fparent2, nullid
1686 1715 elif fparent2 != nullid:
1687 1716 # is one parent an ancestor of the other?
1688 1717 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1689 1718 if fparent1 in fparentancestors:
1690 1719 fparent1, fparent2 = fparent2, nullid
1691 1720 elif fparent2 in fparentancestors:
1692 1721 fparent2 = nullid
1693 1722
1694 1723 # is the file changed?
1695 1724 text = fctx.data()
1696 1725 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1697 1726 changelist.append(fname)
1698 1727 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1699 1728 # are just the flags changed during merge?
1700 1729 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1701 1730 changelist.append(fname)
1702 1731
1703 1732 return fparent1
1704 1733
1705 1734 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1706 1735 """check for commit arguments that aren't committable"""
1707 1736 if match.isexact() or match.prefix():
1708 1737 matched = set(status.modified + status.added + status.removed)
1709 1738
1710 1739 for f in match.files():
1711 1740 f = self.dirstate.normalize(f)
1712 1741 if f == '.' or f in matched or f in wctx.substate:
1713 1742 continue
1714 1743 if f in status.deleted:
1715 1744 fail(f, _('file not found!'))
1716 1745 if f in vdirs: # visited directory
1717 1746 d = f + '/'
1718 1747 for mf in matched:
1719 1748 if mf.startswith(d):
1720 1749 break
1721 1750 else:
1722 1751 fail(f, _("no match under directory!"))
1723 1752 elif f not in self.dirstate:
1724 1753 fail(f, _("file not tracked!"))
1725 1754
1726 1755 @unfilteredmethod
1727 1756 def commit(self, text="", user=None, date=None, match=None, force=False,
1728 1757 editor=False, extra=None):
1729 1758 """Add a new revision to current repository.
1730 1759
1731 1760 Revision information is gathered from the working directory,
1732 1761 match can be used to filter the committed files. If editor is
1733 1762 supplied, it is called to get a commit message.
1734 1763 """
1735 1764 if extra is None:
1736 1765 extra = {}
1737 1766
1738 1767 def fail(f, msg):
1739 1768 raise error.Abort('%s: %s' % (f, msg))
1740 1769
1741 1770 if not match:
1742 1771 match = matchmod.always(self.root, '')
1743 1772
1744 1773 if not force:
1745 1774 vdirs = []
1746 1775 match.explicitdir = vdirs.append
1747 1776 match.bad = fail
1748 1777
1749 1778 wlock = lock = tr = None
1750 1779 try:
1751 1780 wlock = self.wlock()
1752 1781 lock = self.lock() # for recent changelog (see issue4368)
1753 1782
1754 1783 wctx = self[None]
1755 1784 merge = len(wctx.parents()) > 1
1756 1785
1757 1786 if not force and merge and not match.always():
1758 1787 raise error.Abort(_('cannot partially commit a merge '
1759 1788 '(do not specify files or patterns)'))
1760 1789
1761 1790 status = self.status(match=match, clean=force)
1762 1791 if force:
1763 1792 status.modified.extend(status.clean) # mq may commit clean files
1764 1793
1765 1794 # check subrepos
1766 1795 subs = []
1767 1796 commitsubs = set()
1768 1797 newstate = wctx.substate.copy()
1769 1798 # only manage subrepos and .hgsubstate if .hgsub is present
1770 1799 if '.hgsub' in wctx:
1771 1800 # we'll decide whether to track this ourselves, thanks
1772 1801 for c in status.modified, status.added, status.removed:
1773 1802 if '.hgsubstate' in c:
1774 1803 c.remove('.hgsubstate')
1775 1804
1776 1805 # compare current state to last committed state
1777 1806 # build new substate based on last committed state
1778 1807 oldstate = wctx.p1().substate
1779 1808 for s in sorted(newstate.keys()):
1780 1809 if not match(s):
1781 1810 # ignore working copy, use old state if present
1782 1811 if s in oldstate:
1783 1812 newstate[s] = oldstate[s]
1784 1813 continue
1785 1814 if not force:
1786 1815 raise error.Abort(
1787 1816 _("commit with new subrepo %s excluded") % s)
1788 1817 dirtyreason = wctx.sub(s).dirtyreason(True)
1789 1818 if dirtyreason:
1790 1819 if not self.ui.configbool('ui', 'commitsubrepos'):
1791 1820 raise error.Abort(dirtyreason,
1792 1821 hint=_("use --subrepos for recursive commit"))
1793 1822 subs.append(s)
1794 1823 commitsubs.add(s)
1795 1824 else:
1796 1825 bs = wctx.sub(s).basestate()
1797 1826 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1798 1827 if oldstate.get(s, (None, None, None))[1] != bs:
1799 1828 subs.append(s)
1800 1829
1801 1830 # check for removed subrepos
1802 1831 for p in wctx.parents():
1803 1832 r = [s for s in p.substate if s not in newstate]
1804 1833 subs += [s for s in r if match(s)]
1805 1834 if subs:
1806 1835 if (not match('.hgsub') and
1807 1836 '.hgsub' in (wctx.modified() + wctx.added())):
1808 1837 raise error.Abort(
1809 1838 _("can't commit subrepos without .hgsub"))
1810 1839 status.modified.insert(0, '.hgsubstate')
1811 1840
1812 1841 elif '.hgsub' in status.removed:
1813 1842 # clean up .hgsubstate when .hgsub is removed
1814 1843 if ('.hgsubstate' in wctx and
1815 1844 '.hgsubstate' not in (status.modified + status.added +
1816 1845 status.removed)):
1817 1846 status.removed.insert(0, '.hgsubstate')
1818 1847
1819 1848 # make sure all explicit patterns are matched
1820 1849 if not force:
1821 1850 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1822 1851
1823 1852 cctx = context.workingcommitctx(self, status,
1824 1853 text, user, date, extra)
1825 1854
1826 1855 # internal config: ui.allowemptycommit
1827 1856 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1828 1857 or extra.get('close') or merge or cctx.files()
1829 1858 or self.ui.configbool('ui', 'allowemptycommit'))
1830 1859 if not allowemptycommit:
1831 1860 return None
1832 1861
1833 1862 if merge and cctx.deleted():
1834 1863 raise error.Abort(_("cannot commit merge with missing files"))
1835 1864
1836 1865 ms = mergemod.mergestate.read(self)
1837 1866 mergeutil.checkunresolved(ms)
1838 1867
1839 1868 if editor:
1840 1869 cctx._text = editor(self, cctx, subs)
1841 1870 edited = (text != cctx._text)
1842 1871
1843 1872 # Save commit message in case this transaction gets rolled back
1844 1873 # (e.g. by a pretxncommit hook). Leave the content alone on
1845 1874 # the assumption that the user will use the same editor again.
1846 1875 msgfn = self.savecommitmessage(cctx._text)
1847 1876
1848 1877 # commit subs and write new state
1849 1878 if subs:
1850 1879 for s in sorted(commitsubs):
1851 1880 sub = wctx.sub(s)
1852 1881 self.ui.status(_('committing subrepository %s\n') %
1853 1882 subrepo.subrelpath(sub))
1854 1883 sr = sub.commit(cctx._text, user, date)
1855 1884 newstate[s] = (newstate[s][0], sr)
1856 1885 subrepo.writestate(self, newstate)
1857 1886
1858 1887 p1, p2 = self.dirstate.parents()
1859 1888 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1860 1889 try:
1861 1890 self.hook("precommit", throw=True, parent1=hookp1,
1862 1891 parent2=hookp2)
1863 1892 tr = self.transaction('commit')
1864 1893 ret = self.commitctx(cctx, True)
1865 1894 except: # re-raises
1866 1895 if edited:
1867 1896 self.ui.write(
1868 1897 _('note: commit message saved in %s\n') % msgfn)
1869 1898 raise
1870 1899 # update bookmarks, dirstate and mergestate
1871 1900 bookmarks.update(self, [p1, p2], ret)
1872 1901 cctx.markcommitted(ret)
1873 1902 ms.reset()
1874 1903 tr.close()
1875 1904
1876 1905 finally:
1877 1906 lockmod.release(tr, lock, wlock)
1878 1907
1879 1908 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1880 1909 # hack for command that use a temporary commit (eg: histedit)
1881 1910 # temporary commit got stripped before hook release
1882 1911 if self.changelog.hasnode(ret):
1883 1912 self.hook("commit", node=node, parent1=parent1,
1884 1913 parent2=parent2)
1885 1914 self._afterlock(commithook)
1886 1915 return ret
1887 1916
1888 1917 @unfilteredmethod
1889 1918 def commitctx(self, ctx, error=False):
1890 1919 """Add a new revision to current repository.
1891 1920 Revision information is passed via the context argument.
1892 1921 """
1893 1922
1894 1923 tr = None
1895 1924 p1, p2 = ctx.p1(), ctx.p2()
1896 1925 user = ctx.user()
1897 1926
1898 1927 lock = self.lock()
1899 1928 try:
1900 1929 tr = self.transaction("commit")
1901 1930 trp = weakref.proxy(tr)
1902 1931
1903 1932 if ctx.manifestnode():
1904 1933 # reuse an existing manifest revision
1905 1934 mn = ctx.manifestnode()
1906 1935 files = ctx.files()
1907 1936 elif ctx.files():
1908 1937 m1ctx = p1.manifestctx()
1909 1938 m2ctx = p2.manifestctx()
1910 1939 mctx = m1ctx.copy()
1911 1940
1912 1941 m = mctx.read()
1913 1942 m1 = m1ctx.read()
1914 1943 m2 = m2ctx.read()
1915 1944
1916 1945 # check in files
1917 1946 added = []
1918 1947 changed = []
1919 1948 removed = list(ctx.removed())
1920 1949 linkrev = len(self)
1921 1950 self.ui.note(_("committing files:\n"))
1922 1951 for f in sorted(ctx.modified() + ctx.added()):
1923 1952 self.ui.note(f + "\n")
1924 1953 try:
1925 1954 fctx = ctx[f]
1926 1955 if fctx is None:
1927 1956 removed.append(f)
1928 1957 else:
1929 1958 added.append(f)
1930 1959 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1931 1960 trp, changed)
1932 1961 m.setflag(f, fctx.flags())
1933 1962 except OSError as inst:
1934 1963 self.ui.warn(_("trouble committing %s!\n") % f)
1935 1964 raise
1936 1965 except IOError as inst:
1937 1966 errcode = getattr(inst, 'errno', errno.ENOENT)
1938 1967 if error or errcode and errcode != errno.ENOENT:
1939 1968 self.ui.warn(_("trouble committing %s!\n") % f)
1940 1969 raise
1941 1970
1942 1971 # update manifest
1943 1972 self.ui.note(_("committing manifest\n"))
1944 1973 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1945 1974 drop = [f for f in removed if f in m]
1946 1975 for f in drop:
1947 1976 del m[f]
1948 1977 mn = mctx.write(trp, linkrev,
1949 1978 p1.manifestnode(), p2.manifestnode(),
1950 1979 added, drop)
1951 1980 files = changed + removed
1952 1981 else:
1953 1982 mn = p1.manifestnode()
1954 1983 files = []
1955 1984
1956 1985 # update changelog
1957 1986 self.ui.note(_("committing changelog\n"))
1958 1987 self.changelog.delayupdate(tr)
1959 1988 n = self.changelog.add(mn, files, ctx.description(),
1960 1989 trp, p1.node(), p2.node(),
1961 1990 user, ctx.date(), ctx.extra().copy())
1962 1991 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1963 1992 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1964 1993 parent2=xp2)
1965 1994 # set the new commit is proper phase
1966 1995 targetphase = subrepo.newcommitphase(self.ui, ctx)
1967 1996 if targetphase:
1968 1997 # retract boundary do not alter parent changeset.
1969 1998 # if a parent have higher the resulting phase will
1970 1999 # be compliant anyway
1971 2000 #
1972 2001 # if minimal phase was 0 we don't need to retract anything
1973 2002 phases.registernew(self, tr, targetphase, [n])
1974 2003 tr.close()
1975 2004 return n
1976 2005 finally:
1977 2006 if tr:
1978 2007 tr.release()
1979 2008 lock.release()
1980 2009
1981 2010 @unfilteredmethod
1982 2011 def destroying(self):
1983 2012 '''Inform the repository that nodes are about to be destroyed.
1984 2013 Intended for use by strip and rollback, so there's a common
1985 2014 place for anything that has to be done before destroying history.
1986 2015
1987 2016 This is mostly useful for saving state that is in memory and waiting
1988 2017 to be flushed when the current lock is released. Because a call to
1989 2018 destroyed is imminent, the repo will be invalidated causing those
1990 2019 changes to stay in memory (waiting for the next unlock), or vanish
1991 2020 completely.
1992 2021 '''
1993 2022 # When using the same lock to commit and strip, the phasecache is left
1994 2023 # dirty after committing. Then when we strip, the repo is invalidated,
1995 2024 # causing those changes to disappear.
1996 2025 if '_phasecache' in vars(self):
1997 2026 self._phasecache.write()
1998 2027
1999 2028 @unfilteredmethod
2000 2029 def destroyed(self):
2001 2030 '''Inform the repository that nodes have been destroyed.
2002 2031 Intended for use by strip and rollback, so there's a common
2003 2032 place for anything that has to be done after destroying history.
2004 2033 '''
2005 2034 # When one tries to:
2006 2035 # 1) destroy nodes thus calling this method (e.g. strip)
2007 2036 # 2) use phasecache somewhere (e.g. commit)
2008 2037 #
2009 2038 # then 2) will fail because the phasecache contains nodes that were
2010 2039 # removed. We can either remove phasecache from the filecache,
2011 2040 # causing it to reload next time it is accessed, or simply filter
2012 2041 # the removed nodes now and write the updated cache.
2013 2042 self._phasecache.filterunknown(self)
2014 2043 self._phasecache.write()
2015 2044
2016 2045 # refresh all repository caches
2017 2046 self.updatecaches()
2018 2047
2019 2048 # Ensure the persistent tag cache is updated. Doing it now
2020 2049 # means that the tag cache only has to worry about destroyed
2021 2050 # heads immediately after a strip/rollback. That in turn
2022 2051 # guarantees that "cachetip == currenttip" (comparing both rev
2023 2052 # and node) always means no nodes have been added or destroyed.
2024 2053
2025 2054 # XXX this is suboptimal when qrefresh'ing: we strip the current
2026 2055 # head, refresh the tag cache, then immediately add a new head.
2027 2056 # But I think doing it this way is necessary for the "instant
2028 2057 # tag cache retrieval" case to work.
2029 2058 self.invalidate()
2030 2059
2031 2060 def walk(self, match, node=None):
2032 2061 '''
2033 2062 walk recursively through the directory tree or a given
2034 2063 changeset, finding all files matched by the match
2035 2064 function
2036 2065 '''
2037 2066 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2038 2067 return self[node].walk(match)
2039 2068
2040 2069 def status(self, node1='.', node2=None, match=None,
2041 2070 ignored=False, clean=False, unknown=False,
2042 2071 listsubrepos=False):
2043 2072 '''a convenience method that calls node1.status(node2)'''
2044 2073 return self[node1].status(node2, match, ignored, clean, unknown,
2045 2074 listsubrepos)
2046 2075
2047 2076 def addpostdsstatus(self, ps):
2048 2077 """Add a callback to run within the wlock, at the point at which status
2049 2078 fixups happen.
2050 2079
2051 2080 On status completion, callback(wctx, status) will be called with the
2052 2081 wlock held, unless the dirstate has changed from underneath or the wlock
2053 2082 couldn't be grabbed.
2054 2083
2055 2084 Callbacks should not capture and use a cached copy of the dirstate --
2056 2085 it might change in the meanwhile. Instead, they should access the
2057 2086 dirstate via wctx.repo().dirstate.
2058 2087
2059 2088 This list is emptied out after each status run -- extensions should
2060 2089 make sure it adds to this list each time dirstate.status is called.
2061 2090 Extensions should also make sure they don't call this for statuses
2062 2091 that don't involve the dirstate.
2063 2092 """
2064 2093
2065 2094 # The list is located here for uniqueness reasons -- it is actually
2066 2095 # managed by the workingctx, but that isn't unique per-repo.
2067 2096 self._postdsstatus.append(ps)
2068 2097
2069 2098 def postdsstatus(self):
2070 2099 """Used by workingctx to get the list of post-dirstate-status hooks."""
2071 2100 return self._postdsstatus
2072 2101
2073 2102 def clearpostdsstatus(self):
2074 2103 """Used by workingctx to clear post-dirstate-status hooks."""
2075 2104 del self._postdsstatus[:]
2076 2105
2077 2106 def heads(self, start=None):
2078 2107 if start is None:
2079 2108 cl = self.changelog
2080 2109 headrevs = reversed(cl.headrevs())
2081 2110 return [cl.node(rev) for rev in headrevs]
2082 2111
2083 2112 heads = self.changelog.heads(start)
2084 2113 # sort the output in rev descending order
2085 2114 return sorted(heads, key=self.changelog.rev, reverse=True)
2086 2115
2087 2116 def branchheads(self, branch=None, start=None, closed=False):
2088 2117 '''return a (possibly filtered) list of heads for the given branch
2089 2118
2090 2119 Heads are returned in topological order, from newest to oldest.
2091 2120 If branch is None, use the dirstate branch.
2092 2121 If start is not None, return only heads reachable from start.
2093 2122 If closed is True, return heads that are marked as closed as well.
2094 2123 '''
2095 2124 if branch is None:
2096 2125 branch = self[None].branch()
2097 2126 branches = self.branchmap()
2098 2127 if branch not in branches:
2099 2128 return []
2100 2129 # the cache returns heads ordered lowest to highest
2101 2130 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2102 2131 if start is not None:
2103 2132 # filter out the heads that cannot be reached from startrev
2104 2133 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2105 2134 bheads = [h for h in bheads if h in fbheads]
2106 2135 return bheads
2107 2136
2108 2137 def branches(self, nodes):
2109 2138 if not nodes:
2110 2139 nodes = [self.changelog.tip()]
2111 2140 b = []
2112 2141 for n in nodes:
2113 2142 t = n
2114 2143 while True:
2115 2144 p = self.changelog.parents(n)
2116 2145 if p[1] != nullid or p[0] == nullid:
2117 2146 b.append((t, n, p[0], p[1]))
2118 2147 break
2119 2148 n = p[0]
2120 2149 return b
2121 2150
2122 2151 def between(self, pairs):
2123 2152 r = []
2124 2153
2125 2154 for top, bottom in pairs:
2126 2155 n, l, i = top, [], 0
2127 2156 f = 1
2128 2157
2129 2158 while n != bottom and n != nullid:
2130 2159 p = self.changelog.parents(n)[0]
2131 2160 if i == f:
2132 2161 l.append(n)
2133 2162 f = f * 2
2134 2163 n = p
2135 2164 i += 1
2136 2165
2137 2166 r.append(l)
2138 2167
2139 2168 return r
2140 2169
2141 2170 def checkpush(self, pushop):
2142 2171 """Extensions can override this function if additional checks have
2143 2172 to be performed before pushing, or call it if they override push
2144 2173 command.
2145 2174 """
2146 2175 pass
2147 2176
2148 2177 @unfilteredpropertycache
2149 2178 def prepushoutgoinghooks(self):
2150 2179 """Return util.hooks consists of a pushop with repo, remote, outgoing
2151 2180 methods, which are called before pushing changesets.
2152 2181 """
2153 2182 return util.hooks()
2154 2183
2155 2184 def pushkey(self, namespace, key, old, new):
2156 2185 try:
2157 2186 tr = self.currenttransaction()
2158 2187 hookargs = {}
2159 2188 if tr is not None:
2160 2189 hookargs.update(tr.hookargs)
2161 2190 hookargs['namespace'] = namespace
2162 2191 hookargs['key'] = key
2163 2192 hookargs['old'] = old
2164 2193 hookargs['new'] = new
2165 2194 self.hook('prepushkey', throw=True, **hookargs)
2166 2195 except error.HookAbort as exc:
2167 2196 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2168 2197 if exc.hint:
2169 2198 self.ui.write_err(_("(%s)\n") % exc.hint)
2170 2199 return False
2171 2200 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2172 2201 ret = pushkey.push(self, namespace, key, old, new)
2173 2202 def runhook():
2174 2203 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2175 2204 ret=ret)
2176 2205 self._afterlock(runhook)
2177 2206 return ret
2178 2207
2179 2208 def listkeys(self, namespace):
2180 2209 self.hook('prelistkeys', throw=True, namespace=namespace)
2181 2210 self.ui.debug('listing keys for "%s"\n' % namespace)
2182 2211 values = pushkey.list(self, namespace)
2183 2212 self.hook('listkeys', namespace=namespace, values=values)
2184 2213 return values
2185 2214
2186 2215 def debugwireargs(self, one, two, three=None, four=None, five=None):
2187 2216 '''used to test argument passing over the wire'''
2188 2217 return "%s %s %s %s %s" % (one, two, three, four, five)
2189 2218
2190 2219 def savecommitmessage(self, text):
2191 2220 fp = self.vfs('last-message.txt', 'wb')
2192 2221 try:
2193 2222 fp.write(text)
2194 2223 finally:
2195 2224 fp.close()
2196 2225 return self.pathto(fp.name[len(self.root) + 1:])
2197 2226
2198 2227 # used to avoid circular references so destructors work
2199 2228 def aftertrans(files):
2200 2229 renamefiles = [tuple(t) for t in files]
2201 2230 def a():
2202 2231 for vfs, src, dest in renamefiles:
2203 2232 # if src and dest refer to a same file, vfs.rename is a no-op,
2204 2233 # leaving both src and dest on disk. delete dest to make sure
2205 2234 # the rename couldn't be such a no-op.
2206 2235 vfs.tryunlink(dest)
2207 2236 try:
2208 2237 vfs.rename(src, dest)
2209 2238 except OSError: # journal file does not yet exist
2210 2239 pass
2211 2240 return a
2212 2241
2213 2242 def undoname(fn):
2214 2243 base, name = os.path.split(fn)
2215 2244 assert name.startswith('journal')
2216 2245 return os.path.join(base, name.replace('journal', 'undo', 1))
2217 2246
2218 2247 def instance(ui, path, create):
2219 2248 return localrepository(ui, util.urllocalpath(path), create)
2220 2249
2221 2250 def islocal(path):
2222 2251 return True
2223 2252
2224 2253 def newreporequirements(repo):
2225 2254 """Determine the set of requirements for a new local repository.
2226 2255
2227 2256 Extensions can wrap this function to specify custom requirements for
2228 2257 new repositories.
2229 2258 """
2230 2259 ui = repo.ui
2231 2260 requirements = {'revlogv1'}
2232 2261 if ui.configbool('format', 'usestore'):
2233 2262 requirements.add('store')
2234 2263 if ui.configbool('format', 'usefncache'):
2235 2264 requirements.add('fncache')
2236 2265 if ui.configbool('format', 'dotencode'):
2237 2266 requirements.add('dotencode')
2238 2267
2239 2268 compengine = ui.config('experimental', 'format.compression')
2240 2269 if compengine not in util.compengines:
2241 2270 raise error.Abort(_('compression engine %s defined by '
2242 2271 'experimental.format.compression not available') %
2243 2272 compengine,
2244 2273 hint=_('run "hg debuginstall" to list available '
2245 2274 'compression engines'))
2246 2275
2247 2276 # zlib is the historical default and doesn't need an explicit requirement.
2248 2277 if compengine != 'zlib':
2249 2278 requirements.add('exp-compression-%s' % compengine)
2250 2279
2251 2280 if scmutil.gdinitconfig(ui):
2252 2281 requirements.add('generaldelta')
2253 2282 if ui.configbool('experimental', 'treemanifest'):
2254 2283 requirements.add('treemanifest')
2255 2284 if ui.configbool('experimental', 'manifestv2'):
2256 2285 requirements.add('manifestv2')
2257 2286
2258 2287 revlogv2 = ui.config('experimental', 'revlogv2')
2259 2288 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2260 2289 requirements.remove('revlogv1')
2261 2290 # generaldelta is implied by revlogv2.
2262 2291 requirements.discard('generaldelta')
2263 2292 requirements.add(REVLOGV2_REQUIREMENT)
2264 2293
2265 2294 return requirements
General Comments 0
You need to be logged in to leave comments. Login now