##// END OF EJS Templates
transaction: add a name and a __repr__ implementation (API)...
Martin von Zweigbergk -
r36837:aff5996f default
parent child Browse files
Show More
@@ -1,2313 +1,2314
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from . import (
24 24 bookmarks,
25 25 branchmap,
26 26 bundle2,
27 27 changegroup,
28 28 changelog,
29 29 color,
30 30 context,
31 31 dirstate,
32 32 dirstateguard,
33 33 discovery,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 narrowspec,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
195 195 pycompat.bytestr(four),
196 196 pycompat.bytestr(five))
197 197
198 198 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
199 199 **kwargs):
200 200 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
201 201 common=common, bundlecaps=bundlecaps,
202 202 **kwargs)[1]
203 203 cb = util.chunkbuffer(chunks)
204 204
205 205 if exchange.bundle2requested(bundlecaps):
206 206 # When requesting a bundle2, getbundle returns a stream to make the
207 207 # wire level function happier. We need to build a proper object
208 208 # from it in local peer.
209 209 return bundle2.getunbundler(self.ui, cb)
210 210 else:
211 211 return changegroup.getunbundler('01', cb, None)
212 212
213 213 def heads(self):
214 214 return self._repo.heads()
215 215
216 216 def known(self, nodes):
217 217 return self._repo.known(nodes)
218 218
219 219 def listkeys(self, namespace):
220 220 return self._repo.listkeys(namespace)
221 221
222 222 def lookup(self, key):
223 223 return self._repo.lookup(key)
224 224
225 225 def pushkey(self, namespace, key, old, new):
226 226 return self._repo.pushkey(namespace, key, old, new)
227 227
228 228 def stream_out(self):
229 229 raise error.Abort(_('cannot perform stream clone against local '
230 230 'peer'))
231 231
232 232 def unbundle(self, cg, heads, url):
233 233 """apply a bundle on a repo
234 234
235 235 This function handles the repo locking itself."""
236 236 try:
237 237 try:
238 238 cg = exchange.readbundle(self.ui, cg, None)
239 239 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
240 240 if util.safehasattr(ret, 'getchunks'):
241 241 # This is a bundle20 object, turn it into an unbundler.
242 242 # This little dance should be dropped eventually when the
243 243 # API is finally improved.
244 244 stream = util.chunkbuffer(ret.getchunks())
245 245 ret = bundle2.getunbundler(self.ui, stream)
246 246 return ret
247 247 except Exception as exc:
248 248 # If the exception contains output salvaged from a bundle2
249 249 # reply, we need to make sure it is printed before continuing
250 250 # to fail. So we build a bundle2 with such output and consume
251 251 # it directly.
252 252 #
253 253 # This is not very elegant but allows a "simple" solution for
254 254 # issue4594
255 255 output = getattr(exc, '_bundle2salvagedoutput', ())
256 256 if output:
257 257 bundler = bundle2.bundle20(self._repo.ui)
258 258 for out in output:
259 259 bundler.addpart(out)
260 260 stream = util.chunkbuffer(bundler.getchunks())
261 261 b = bundle2.getunbundler(self.ui, stream)
262 262 bundle2.processbundle(self._repo, b)
263 263 raise
264 264 except error.PushRaced as exc:
265 265 raise error.ResponseError(_('push failed:'),
266 266 util.forcebytestr(exc))
267 267
268 268 # End of _basewirecommands interface.
269 269
270 270 # Begin of peer interface.
271 271
272 272 def iterbatch(self):
273 273 return peer.localiterbatcher(self)
274 274
275 275 # End of peer interface.
276 276
277 277 class locallegacypeer(repository.legacypeer, localpeer):
278 278 '''peer extension which implements legacy methods too; used for tests with
279 279 restricted capabilities'''
280 280
281 281 def __init__(self, repo):
282 282 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
283 283
284 284 # Begin of baselegacywirecommands interface.
285 285
286 286 def between(self, pairs):
287 287 return self._repo.between(pairs)
288 288
289 289 def branches(self, nodes):
290 290 return self._repo.branches(nodes)
291 291
292 292 def changegroup(self, basenodes, source):
293 293 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
294 294 missingheads=self._repo.heads())
295 295 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
296 296
297 297 def changegroupsubset(self, bases, heads, source):
298 298 outgoing = discovery.outgoing(self._repo, missingroots=bases,
299 299 missingheads=heads)
300 300 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
301 301
302 302 # End of baselegacywirecommands interface.
303 303
304 304 # Increment the sub-version when the revlog v2 format changes to lock out old
305 305 # clients.
306 306 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
307 307
308 308 class localrepository(object):
309 309
310 310 # obsolete experimental requirements:
311 311 # - manifestv2: An experimental new manifest format that allowed
312 312 # for stem compression of long paths. Experiment ended up not
313 313 # being successful (repository sizes went up due to worse delta
314 314 # chains), and the code was deleted in 4.6.
315 315 supportedformats = {
316 316 'revlogv1',
317 317 'generaldelta',
318 318 'treemanifest',
319 319 REVLOGV2_REQUIREMENT,
320 320 }
321 321 _basesupported = supportedformats | {
322 322 'store',
323 323 'fncache',
324 324 'shared',
325 325 'relshared',
326 326 'dotencode',
327 327 'exp-sparse',
328 328 }
329 329 openerreqs = {
330 330 'revlogv1',
331 331 'generaldelta',
332 332 'treemanifest',
333 333 }
334 334
335 335 # a list of (ui, featureset) functions.
336 336 # only functions defined in module of enabled extensions are invoked
337 337 featuresetupfuncs = set()
338 338
339 339 # list of prefix for file which can be written without 'wlock'
340 340 # Extensions should extend this list when needed
341 341 _wlockfreeprefix = {
342 342 # We migh consider requiring 'wlock' for the next
343 343 # two, but pretty much all the existing code assume
344 344 # wlock is not needed so we keep them excluded for
345 345 # now.
346 346 'hgrc',
347 347 'requires',
348 348 # XXX cache is a complicatged business someone
349 349 # should investigate this in depth at some point
350 350 'cache/',
351 351 # XXX shouldn't be dirstate covered by the wlock?
352 352 'dirstate',
353 353 # XXX bisect was still a bit too messy at the time
354 354 # this changeset was introduced. Someone should fix
355 355 # the remainig bit and drop this line
356 356 'bisect.state',
357 357 }
358 358
359 359 def __init__(self, baseui, path, create=False):
360 360 self.requirements = set()
361 361 self.filtername = None
362 362 # wvfs: rooted at the repository root, used to access the working copy
363 363 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
364 364 # vfs: rooted at .hg, used to access repo files outside of .hg/store
365 365 self.vfs = None
366 366 # svfs: usually rooted at .hg/store, used to access repository history
367 367 # If this is a shared repository, this vfs may point to another
368 368 # repository's .hg/store directory.
369 369 self.svfs = None
370 370 self.root = self.wvfs.base
371 371 self.path = self.wvfs.join(".hg")
372 372 self.origroot = path
373 373 # This is only used by context.workingctx.match in order to
374 374 # detect files in subrepos.
375 375 self.auditor = pathutil.pathauditor(
376 376 self.root, callback=self._checknested)
377 377 # This is only used by context.basectx.match in order to detect
378 378 # files in subrepos.
379 379 self.nofsauditor = pathutil.pathauditor(
380 380 self.root, callback=self._checknested, realfs=False, cached=True)
381 381 self.baseui = baseui
382 382 self.ui = baseui.copy()
383 383 self.ui.copy = baseui.copy # prevent copying repo configuration
384 384 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
385 385 if (self.ui.configbool('devel', 'all-warnings') or
386 386 self.ui.configbool('devel', 'check-locks')):
387 387 self.vfs.audit = self._getvfsward(self.vfs.audit)
388 388 # A list of callback to shape the phase if no data were found.
389 389 # Callback are in the form: func(repo, roots) --> processed root.
390 390 # This list it to be filled by extension during repo setup
391 391 self._phasedefaults = []
392 392 try:
393 393 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
394 394 self._loadextensions()
395 395 except IOError:
396 396 pass
397 397
398 398 if self.featuresetupfuncs:
399 399 self.supported = set(self._basesupported) # use private copy
400 400 extmods = set(m.__name__ for n, m
401 401 in extensions.extensions(self.ui))
402 402 for setupfunc in self.featuresetupfuncs:
403 403 if setupfunc.__module__ in extmods:
404 404 setupfunc(self.ui, self.supported)
405 405 else:
406 406 self.supported = self._basesupported
407 407 color.setup(self.ui)
408 408
409 409 # Add compression engines.
410 410 for name in util.compengines:
411 411 engine = util.compengines[name]
412 412 if engine.revlogheader():
413 413 self.supported.add('exp-compression-%s' % name)
414 414
415 415 if not self.vfs.isdir():
416 416 if create:
417 417 self.requirements = newreporequirements(self)
418 418
419 419 if not self.wvfs.exists():
420 420 self.wvfs.makedirs()
421 421 self.vfs.makedir(notindexed=True)
422 422
423 423 if 'store' in self.requirements:
424 424 self.vfs.mkdir("store")
425 425
426 426 # create an invalid changelog
427 427 self.vfs.append(
428 428 "00changelog.i",
429 429 '\0\0\0\2' # represents revlogv2
430 430 ' dummy changelog to prevent using the old repo layout'
431 431 )
432 432 else:
433 433 raise error.RepoError(_("repository %s not found") % path)
434 434 elif create:
435 435 raise error.RepoError(_("repository %s already exists") % path)
436 436 else:
437 437 try:
438 438 self.requirements = scmutil.readrequires(
439 439 self.vfs, self.supported)
440 440 except IOError as inst:
441 441 if inst.errno != errno.ENOENT:
442 442 raise
443 443
444 444 cachepath = self.vfs.join('cache')
445 445 self.sharedpath = self.path
446 446 try:
447 447 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
448 448 if 'relshared' in self.requirements:
449 449 sharedpath = self.vfs.join(sharedpath)
450 450 vfs = vfsmod.vfs(sharedpath, realpath=True)
451 451 cachepath = vfs.join('cache')
452 452 s = vfs.base
453 453 if not vfs.exists():
454 454 raise error.RepoError(
455 455 _('.hg/sharedpath points to nonexistent directory %s') % s)
456 456 self.sharedpath = s
457 457 except IOError as inst:
458 458 if inst.errno != errno.ENOENT:
459 459 raise
460 460
461 461 if 'exp-sparse' in self.requirements and not sparse.enabled:
462 462 raise error.RepoError(_('repository is using sparse feature but '
463 463 'sparse is not enabled; enable the '
464 464 '"sparse" extensions to access'))
465 465
466 466 self.store = store.store(
467 467 self.requirements, self.sharedpath,
468 468 lambda base: vfsmod.vfs(base, cacheaudited=True))
469 469 self.spath = self.store.path
470 470 self.svfs = self.store.vfs
471 471 self.sjoin = self.store.join
472 472 self.vfs.createmode = self.store.createmode
473 473 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
474 474 self.cachevfs.createmode = self.store.createmode
475 475 if (self.ui.configbool('devel', 'all-warnings') or
476 476 self.ui.configbool('devel', 'check-locks')):
477 477 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
478 478 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
479 479 else: # standard vfs
480 480 self.svfs.audit = self._getsvfsward(self.svfs.audit)
481 481 self._applyopenerreqs()
482 482 if create:
483 483 self._writerequirements()
484 484
485 485 self._dirstatevalidatewarned = False
486 486
487 487 self._branchcaches = {}
488 488 self._revbranchcache = None
489 489 self.filterpats = {}
490 490 self._datafilters = {}
491 491 self._transref = self._lockref = self._wlockref = None
492 492
493 493 # A cache for various files under .hg/ that tracks file changes,
494 494 # (used by the filecache decorator)
495 495 #
496 496 # Maps a property name to its util.filecacheentry
497 497 self._filecache = {}
498 498
499 499 # hold sets of revision to be filtered
500 500 # should be cleared when something might have changed the filter value:
501 501 # - new changesets,
502 502 # - phase change,
503 503 # - new obsolescence marker,
504 504 # - working directory parent change,
505 505 # - bookmark changes
506 506 self.filteredrevcache = {}
507 507
508 508 # post-dirstate-status hooks
509 509 self._postdsstatus = []
510 510
511 511 # generic mapping between names and nodes
512 512 self.names = namespaces.namespaces()
513 513
514 514 # Key to signature value.
515 515 self._sparsesignaturecache = {}
516 516 # Signature to cached matcher instance.
517 517 self._sparsematchercache = {}
518 518
519 519 def _getvfsward(self, origfunc):
520 520 """build a ward for self.vfs"""
521 521 rref = weakref.ref(self)
522 522 def checkvfs(path, mode=None):
523 523 ret = origfunc(path, mode=mode)
524 524 repo = rref()
525 525 if (repo is None
526 526 or not util.safehasattr(repo, '_wlockref')
527 527 or not util.safehasattr(repo, '_lockref')):
528 528 return
529 529 if mode in (None, 'r', 'rb'):
530 530 return
531 531 if path.startswith(repo.path):
532 532 # truncate name relative to the repository (.hg)
533 533 path = path[len(repo.path) + 1:]
534 534 if path.startswith('cache/'):
535 535 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
536 536 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
537 537 if path.startswith('journal.'):
538 538 # journal is covered by 'lock'
539 539 if repo._currentlock(repo._lockref) is None:
540 540 repo.ui.develwarn('write with no lock: "%s"' % path,
541 541 stacklevel=2, config='check-locks')
542 542 elif repo._currentlock(repo._wlockref) is None:
543 543 # rest of vfs files are covered by 'wlock'
544 544 #
545 545 # exclude special files
546 546 for prefix in self._wlockfreeprefix:
547 547 if path.startswith(prefix):
548 548 return
549 549 repo.ui.develwarn('write with no wlock: "%s"' % path,
550 550 stacklevel=2, config='check-locks')
551 551 return ret
552 552 return checkvfs
553 553
554 554 def _getsvfsward(self, origfunc):
555 555 """build a ward for self.svfs"""
556 556 rref = weakref.ref(self)
557 557 def checksvfs(path, mode=None):
558 558 ret = origfunc(path, mode=mode)
559 559 repo = rref()
560 560 if repo is None or not util.safehasattr(repo, '_lockref'):
561 561 return
562 562 if mode in (None, 'r', 'rb'):
563 563 return
564 564 if path.startswith(repo.sharedpath):
565 565 # truncate name relative to the repository (.hg)
566 566 path = path[len(repo.sharedpath) + 1:]
567 567 if repo._currentlock(repo._lockref) is None:
568 568 repo.ui.develwarn('write with no lock: "%s"' % path,
569 569 stacklevel=3)
570 570 return ret
571 571 return checksvfs
572 572
573 573 def close(self):
574 574 self._writecaches()
575 575
576 576 def _loadextensions(self):
577 577 extensions.loadall(self.ui)
578 578
579 579 def _writecaches(self):
580 580 if self._revbranchcache:
581 581 self._revbranchcache.write()
582 582
583 583 def _restrictcapabilities(self, caps):
584 584 if self.ui.configbool('experimental', 'bundle2-advertise'):
585 585 caps = set(caps)
586 586 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
587 587 role='client'))
588 588 caps.add('bundle2=' + urlreq.quote(capsblob))
589 589 return caps
590 590
591 591 def _applyopenerreqs(self):
592 592 self.svfs.options = dict((r, 1) for r in self.requirements
593 593 if r in self.openerreqs)
594 594 # experimental config: format.chunkcachesize
595 595 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
596 596 if chunkcachesize is not None:
597 597 self.svfs.options['chunkcachesize'] = chunkcachesize
598 598 # experimental config: format.maxchainlen
599 599 maxchainlen = self.ui.configint('format', 'maxchainlen')
600 600 if maxchainlen is not None:
601 601 self.svfs.options['maxchainlen'] = maxchainlen
602 602 # experimental config: format.manifestcachesize
603 603 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
604 604 if manifestcachesize is not None:
605 605 self.svfs.options['manifestcachesize'] = manifestcachesize
606 606 # experimental config: format.aggressivemergedeltas
607 607 aggressivemergedeltas = self.ui.configbool('format',
608 608 'aggressivemergedeltas')
609 609 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
610 610 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
611 611 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
612 612 if 0 <= chainspan:
613 613 self.svfs.options['maxdeltachainspan'] = chainspan
614 614 mmapindexthreshold = self.ui.configbytes('experimental',
615 615 'mmapindexthreshold')
616 616 if mmapindexthreshold is not None:
617 617 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
618 618 withsparseread = self.ui.configbool('experimental', 'sparse-read')
619 619 srdensitythres = float(self.ui.config('experimental',
620 620 'sparse-read.density-threshold'))
621 621 srmingapsize = self.ui.configbytes('experimental',
622 622 'sparse-read.min-gap-size')
623 623 self.svfs.options['with-sparse-read'] = withsparseread
624 624 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
625 625 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
626 626
627 627 for r in self.requirements:
628 628 if r.startswith('exp-compression-'):
629 629 self.svfs.options['compengine'] = r[len('exp-compression-'):]
630 630
631 631 # TODO move "revlogv2" to openerreqs once finalized.
632 632 if REVLOGV2_REQUIREMENT in self.requirements:
633 633 self.svfs.options['revlogv2'] = True
634 634
635 635 def _writerequirements(self):
636 636 scmutil.writerequires(self.vfs, self.requirements)
637 637
638 638 def _checknested(self, path):
639 639 """Determine if path is a legal nested repository."""
640 640 if not path.startswith(self.root):
641 641 return False
642 642 subpath = path[len(self.root) + 1:]
643 643 normsubpath = util.pconvert(subpath)
644 644
645 645 # XXX: Checking against the current working copy is wrong in
646 646 # the sense that it can reject things like
647 647 #
648 648 # $ hg cat -r 10 sub/x.txt
649 649 #
650 650 # if sub/ is no longer a subrepository in the working copy
651 651 # parent revision.
652 652 #
653 653 # However, it can of course also allow things that would have
654 654 # been rejected before, such as the above cat command if sub/
655 655 # is a subrepository now, but was a normal directory before.
656 656 # The old path auditor would have rejected by mistake since it
657 657 # panics when it sees sub/.hg/.
658 658 #
659 659 # All in all, checking against the working copy seems sensible
660 660 # since we want to prevent access to nested repositories on
661 661 # the filesystem *now*.
662 662 ctx = self[None]
663 663 parts = util.splitpath(subpath)
664 664 while parts:
665 665 prefix = '/'.join(parts)
666 666 if prefix in ctx.substate:
667 667 if prefix == normsubpath:
668 668 return True
669 669 else:
670 670 sub = ctx.sub(prefix)
671 671 return sub.checknested(subpath[len(prefix) + 1:])
672 672 else:
673 673 parts.pop()
674 674 return False
675 675
676 676 def peer(self):
677 677 return localpeer(self) # not cached to avoid reference cycle
678 678
679 679 def unfiltered(self):
680 680 """Return unfiltered version of the repository
681 681
682 682 Intended to be overwritten by filtered repo."""
683 683 return self
684 684
685 685 def filtered(self, name, visibilityexceptions=None):
686 686 """Return a filtered version of a repository"""
687 687 cls = repoview.newtype(self.unfiltered().__class__)
688 688 return cls(self, name, visibilityexceptions)
689 689
690 690 @repofilecache('bookmarks', 'bookmarks.current')
691 691 def _bookmarks(self):
692 692 return bookmarks.bmstore(self)
693 693
694 694 @property
695 695 def _activebookmark(self):
696 696 return self._bookmarks.active
697 697
698 698 # _phasesets depend on changelog. what we need is to call
699 699 # _phasecache.invalidate() if '00changelog.i' was changed, but it
700 700 # can't be easily expressed in filecache mechanism.
701 701 @storecache('phaseroots', '00changelog.i')
702 702 def _phasecache(self):
703 703 return phases.phasecache(self, self._phasedefaults)
704 704
705 705 @storecache('obsstore')
706 706 def obsstore(self):
707 707 return obsolete.makestore(self.ui, self)
708 708
709 709 @storecache('00changelog.i')
710 710 def changelog(self):
711 711 return changelog.changelog(self.svfs,
712 712 trypending=txnutil.mayhavepending(self.root))
713 713
714 714 def _constructmanifest(self):
715 715 # This is a temporary function while we migrate from manifest to
716 716 # manifestlog. It allows bundlerepo and unionrepo to intercept the
717 717 # manifest creation.
718 718 return manifest.manifestrevlog(self.svfs)
719 719
720 720 @storecache('00manifest.i')
721 721 def manifestlog(self):
722 722 return manifest.manifestlog(self.svfs, self)
723 723
724 724 @repofilecache('dirstate')
725 725 def dirstate(self):
726 726 sparsematchfn = lambda: sparse.matcher(self)
727 727
728 728 return dirstate.dirstate(self.vfs, self.ui, self.root,
729 729 self._dirstatevalidate, sparsematchfn)
730 730
731 731 def _dirstatevalidate(self, node):
732 732 try:
733 733 self.changelog.rev(node)
734 734 return node
735 735 except error.LookupError:
736 736 if not self._dirstatevalidatewarned:
737 737 self._dirstatevalidatewarned = True
738 738 self.ui.warn(_("warning: ignoring unknown"
739 739 " working parent %s!\n") % short(node))
740 740 return nullid
741 741
742 742 @repofilecache(narrowspec.FILENAME)
743 743 def narrowpats(self):
744 744 """matcher patterns for this repository's narrowspec
745 745
746 746 A tuple of (includes, excludes).
747 747 """
748 748 source = self
749 749 if self.shared():
750 750 from . import hg
751 751 source = hg.sharedreposource(self)
752 752 return narrowspec.load(source)
753 753
754 754 @repofilecache(narrowspec.FILENAME)
755 755 def _narrowmatch(self):
756 756 if changegroup.NARROW_REQUIREMENT not in self.requirements:
757 757 return matchmod.always(self.root, '')
758 758 include, exclude = self.narrowpats
759 759 return narrowspec.match(self.root, include=include, exclude=exclude)
760 760
761 761 # TODO(martinvonz): make this property-like instead?
762 762 def narrowmatch(self):
763 763 return self._narrowmatch
764 764
765 765 def setnarrowpats(self, newincludes, newexcludes):
766 766 target = self
767 767 if self.shared():
768 768 from . import hg
769 769 target = hg.sharedreposource(self)
770 770 narrowspec.save(target, newincludes, newexcludes)
771 771 self.invalidate(clearfilecache=True)
772 772
773 773 def __getitem__(self, changeid):
774 774 if changeid is None:
775 775 return context.workingctx(self)
776 776 if isinstance(changeid, slice):
777 777 # wdirrev isn't contiguous so the slice shouldn't include it
778 778 return [context.changectx(self, i)
779 779 for i in xrange(*changeid.indices(len(self)))
780 780 if i not in self.changelog.filteredrevs]
781 781 try:
782 782 return context.changectx(self, changeid)
783 783 except error.WdirUnsupported:
784 784 return context.workingctx(self)
785 785
786 786 def __contains__(self, changeid):
787 787 """True if the given changeid exists
788 788
789 789 error.LookupError is raised if an ambiguous node specified.
790 790 """
791 791 try:
792 792 self[changeid]
793 793 return True
794 794 except error.RepoLookupError:
795 795 return False
796 796
797 797 def __nonzero__(self):
798 798 return True
799 799
800 800 __bool__ = __nonzero__
801 801
802 802 def __len__(self):
803 803 # no need to pay the cost of repoview.changelog
804 804 unfi = self.unfiltered()
805 805 return len(unfi.changelog)
806 806
807 807 def __iter__(self):
808 808 return iter(self.changelog)
809 809
810 810 def revs(self, expr, *args):
811 811 '''Find revisions matching a revset.
812 812
813 813 The revset is specified as a string ``expr`` that may contain
814 814 %-formatting to escape certain types. See ``revsetlang.formatspec``.
815 815
816 816 Revset aliases from the configuration are not expanded. To expand
817 817 user aliases, consider calling ``scmutil.revrange()`` or
818 818 ``repo.anyrevs([expr], user=True)``.
819 819
820 820 Returns a revset.abstractsmartset, which is a list-like interface
821 821 that contains integer revisions.
822 822 '''
823 823 expr = revsetlang.formatspec(expr, *args)
824 824 m = revset.match(None, expr)
825 825 return m(self)
826 826
827 827 def set(self, expr, *args):
828 828 '''Find revisions matching a revset and emit changectx instances.
829 829
830 830 This is a convenience wrapper around ``revs()`` that iterates the
831 831 result and is a generator of changectx instances.
832 832
833 833 Revset aliases from the configuration are not expanded. To expand
834 834 user aliases, consider calling ``scmutil.revrange()``.
835 835 '''
836 836 for r in self.revs(expr, *args):
837 837 yield self[r]
838 838
839 839 def anyrevs(self, specs, user=False, localalias=None):
840 840 '''Find revisions matching one of the given revsets.
841 841
842 842 Revset aliases from the configuration are not expanded by default. To
843 843 expand user aliases, specify ``user=True``. To provide some local
844 844 definitions overriding user aliases, set ``localalias`` to
845 845 ``{name: definitionstring}``.
846 846 '''
847 847 if user:
848 848 m = revset.matchany(self.ui, specs, repo=self,
849 849 localalias=localalias)
850 850 else:
851 851 m = revset.matchany(None, specs, localalias=localalias)
852 852 return m(self)
853 853
854 854 def url(self):
855 855 return 'file:' + self.root
856 856
857 857 def hook(self, name, throw=False, **args):
858 858 """Call a hook, passing this repo instance.
859 859
860 860 This a convenience method to aid invoking hooks. Extensions likely
861 861 won't call this unless they have registered a custom hook or are
862 862 replacing code that is expected to call a hook.
863 863 """
864 864 return hook.hook(self.ui, self, name, throw, **args)
865 865
866 866 @filteredpropertycache
867 867 def _tagscache(self):
868 868 '''Returns a tagscache object that contains various tags related
869 869 caches.'''
870 870
871 871 # This simplifies its cache management by having one decorated
872 872 # function (this one) and the rest simply fetch things from it.
873 873 class tagscache(object):
874 874 def __init__(self):
875 875 # These two define the set of tags for this repository. tags
876 876 # maps tag name to node; tagtypes maps tag name to 'global' or
877 877 # 'local'. (Global tags are defined by .hgtags across all
878 878 # heads, and local tags are defined in .hg/localtags.)
879 879 # They constitute the in-memory cache of tags.
880 880 self.tags = self.tagtypes = None
881 881
882 882 self.nodetagscache = self.tagslist = None
883 883
884 884 cache = tagscache()
885 885 cache.tags, cache.tagtypes = self._findtags()
886 886
887 887 return cache
888 888
889 889 def tags(self):
890 890 '''return a mapping of tag to node'''
891 891 t = {}
892 892 if self.changelog.filteredrevs:
893 893 tags, tt = self._findtags()
894 894 else:
895 895 tags = self._tagscache.tags
896 896 for k, v in tags.iteritems():
897 897 try:
898 898 # ignore tags to unknown nodes
899 899 self.changelog.rev(v)
900 900 t[k] = v
901 901 except (error.LookupError, ValueError):
902 902 pass
903 903 return t
904 904
905 905 def _findtags(self):
906 906 '''Do the hard work of finding tags. Return a pair of dicts
907 907 (tags, tagtypes) where tags maps tag name to node, and tagtypes
908 908 maps tag name to a string like \'global\' or \'local\'.
909 909 Subclasses or extensions are free to add their own tags, but
910 910 should be aware that the returned dicts will be retained for the
911 911 duration of the localrepo object.'''
912 912
913 913 # XXX what tagtype should subclasses/extensions use? Currently
914 914 # mq and bookmarks add tags, but do not set the tagtype at all.
915 915 # Should each extension invent its own tag type? Should there
916 916 # be one tagtype for all such "virtual" tags? Or is the status
917 917 # quo fine?
918 918
919 919
920 920 # map tag name to (node, hist)
921 921 alltags = tagsmod.findglobaltags(self.ui, self)
922 922 # map tag name to tag type
923 923 tagtypes = dict((tag, 'global') for tag in alltags)
924 924
925 925 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
926 926
927 927 # Build the return dicts. Have to re-encode tag names because
928 928 # the tags module always uses UTF-8 (in order not to lose info
929 929 # writing to the cache), but the rest of Mercurial wants them in
930 930 # local encoding.
931 931 tags = {}
932 932 for (name, (node, hist)) in alltags.iteritems():
933 933 if node != nullid:
934 934 tags[encoding.tolocal(name)] = node
935 935 tags['tip'] = self.changelog.tip()
936 936 tagtypes = dict([(encoding.tolocal(name), value)
937 937 for (name, value) in tagtypes.iteritems()])
938 938 return (tags, tagtypes)
939 939
940 940 def tagtype(self, tagname):
941 941 '''
942 942 return the type of the given tag. result can be:
943 943
944 944 'local' : a local tag
945 945 'global' : a global tag
946 946 None : tag does not exist
947 947 '''
948 948
949 949 return self._tagscache.tagtypes.get(tagname)
950 950
951 951 def tagslist(self):
952 952 '''return a list of tags ordered by revision'''
953 953 if not self._tagscache.tagslist:
954 954 l = []
955 955 for t, n in self.tags().iteritems():
956 956 l.append((self.changelog.rev(n), t, n))
957 957 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
958 958
959 959 return self._tagscache.tagslist
960 960
961 961 def nodetags(self, node):
962 962 '''return the tags associated with a node'''
963 963 if not self._tagscache.nodetagscache:
964 964 nodetagscache = {}
965 965 for t, n in self._tagscache.tags.iteritems():
966 966 nodetagscache.setdefault(n, []).append(t)
967 967 for tags in nodetagscache.itervalues():
968 968 tags.sort()
969 969 self._tagscache.nodetagscache = nodetagscache
970 970 return self._tagscache.nodetagscache.get(node, [])
971 971
972 972 def nodebookmarks(self, node):
973 973 """return the list of bookmarks pointing to the specified node"""
974 974 marks = []
975 975 for bookmark, n in self._bookmarks.iteritems():
976 976 if n == node:
977 977 marks.append(bookmark)
978 978 return sorted(marks)
979 979
980 980 def branchmap(self):
981 981 '''returns a dictionary {branch: [branchheads]} with branchheads
982 982 ordered by increasing revision number'''
983 983 branchmap.updatecache(self)
984 984 return self._branchcaches[self.filtername]
985 985
986 986 @unfilteredmethod
987 987 def revbranchcache(self):
988 988 if not self._revbranchcache:
989 989 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
990 990 return self._revbranchcache
991 991
992 992 def branchtip(self, branch, ignoremissing=False):
993 993 '''return the tip node for a given branch
994 994
995 995 If ignoremissing is True, then this method will not raise an error.
996 996 This is helpful for callers that only expect None for a missing branch
997 997 (e.g. namespace).
998 998
999 999 '''
1000 1000 try:
1001 1001 return self.branchmap().branchtip(branch)
1002 1002 except KeyError:
1003 1003 if not ignoremissing:
1004 1004 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1005 1005 else:
1006 1006 pass
1007 1007
1008 1008 def lookup(self, key):
1009 1009 return self[key].node()
1010 1010
1011 1011 def lookupbranch(self, key, remote=None):
1012 1012 repo = remote or self
1013 1013 if key in repo.branchmap():
1014 1014 return key
1015 1015
1016 1016 repo = (remote and remote.local()) and remote or self
1017 1017 return repo[key].branch()
1018 1018
1019 1019 def known(self, nodes):
1020 1020 cl = self.changelog
1021 1021 nm = cl.nodemap
1022 1022 filtered = cl.filteredrevs
1023 1023 result = []
1024 1024 for n in nodes:
1025 1025 r = nm.get(n)
1026 1026 resp = not (r is None or r in filtered)
1027 1027 result.append(resp)
1028 1028 return result
1029 1029
1030 1030 def local(self):
1031 1031 return self
1032 1032
1033 1033 def publishing(self):
1034 1034 # it's safe (and desirable) to trust the publish flag unconditionally
1035 1035 # so that we don't finalize changes shared between users via ssh or nfs
1036 1036 return self.ui.configbool('phases', 'publish', untrusted=True)
1037 1037
1038 1038 def cancopy(self):
1039 1039 # so statichttprepo's override of local() works
1040 1040 if not self.local():
1041 1041 return False
1042 1042 if not self.publishing():
1043 1043 return True
1044 1044 # if publishing we can't copy if there is filtered content
1045 1045 return not self.filtered('visible').changelog.filteredrevs
1046 1046
1047 1047 def shared(self):
1048 1048 '''the type of shared repository (None if not shared)'''
1049 1049 if self.sharedpath != self.path:
1050 1050 return 'store'
1051 1051 return None
1052 1052
1053 1053 def wjoin(self, f, *insidef):
1054 1054 return self.vfs.reljoin(self.root, f, *insidef)
1055 1055
1056 1056 def file(self, f):
1057 1057 if f[0] == '/':
1058 1058 f = f[1:]
1059 1059 return filelog.filelog(self.svfs, f)
1060 1060
1061 1061 def changectx(self, changeid):
1062 1062 return self[changeid]
1063 1063
1064 1064 def setparents(self, p1, p2=nullid):
1065 1065 with self.dirstate.parentchange():
1066 1066 copies = self.dirstate.setparents(p1, p2)
1067 1067 pctx = self[p1]
1068 1068 if copies:
1069 1069 # Adjust copy records, the dirstate cannot do it, it
1070 1070 # requires access to parents manifests. Preserve them
1071 1071 # only for entries added to first parent.
1072 1072 for f in copies:
1073 1073 if f not in pctx and copies[f] in pctx:
1074 1074 self.dirstate.copy(copies[f], f)
1075 1075 if p2 == nullid:
1076 1076 for f, s in sorted(self.dirstate.copies().items()):
1077 1077 if f not in pctx and s not in pctx:
1078 1078 self.dirstate.copy(None, f)
1079 1079
1080 1080 def filectx(self, path, changeid=None, fileid=None):
1081 1081 """changeid can be a changeset revision, node, or tag.
1082 1082 fileid can be a file revision or node."""
1083 1083 return context.filectx(self, path, changeid, fileid)
1084 1084
1085 1085 def getcwd(self):
1086 1086 return self.dirstate.getcwd()
1087 1087
1088 1088 def pathto(self, f, cwd=None):
1089 1089 return self.dirstate.pathto(f, cwd)
1090 1090
1091 1091 def _loadfilter(self, filter):
1092 1092 if filter not in self.filterpats:
1093 1093 l = []
1094 1094 for pat, cmd in self.ui.configitems(filter):
1095 1095 if cmd == '!':
1096 1096 continue
1097 1097 mf = matchmod.match(self.root, '', [pat])
1098 1098 fn = None
1099 1099 params = cmd
1100 1100 for name, filterfn in self._datafilters.iteritems():
1101 1101 if cmd.startswith(name):
1102 1102 fn = filterfn
1103 1103 params = cmd[len(name):].lstrip()
1104 1104 break
1105 1105 if not fn:
1106 1106 fn = lambda s, c, **kwargs: util.filter(s, c)
1107 1107 # Wrap old filters not supporting keyword arguments
1108 1108 if not pycompat.getargspec(fn)[2]:
1109 1109 oldfn = fn
1110 1110 fn = lambda s, c, **kwargs: oldfn(s, c)
1111 1111 l.append((mf, fn, params))
1112 1112 self.filterpats[filter] = l
1113 1113 return self.filterpats[filter]
1114 1114
1115 1115 def _filter(self, filterpats, filename, data):
1116 1116 for mf, fn, cmd in filterpats:
1117 1117 if mf(filename):
1118 1118 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1119 1119 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1120 1120 break
1121 1121
1122 1122 return data
1123 1123
1124 1124 @unfilteredpropertycache
1125 1125 def _encodefilterpats(self):
1126 1126 return self._loadfilter('encode')
1127 1127
1128 1128 @unfilteredpropertycache
1129 1129 def _decodefilterpats(self):
1130 1130 return self._loadfilter('decode')
1131 1131
1132 1132 def adddatafilter(self, name, filter):
1133 1133 self._datafilters[name] = filter
1134 1134
1135 1135 def wread(self, filename):
1136 1136 if self.wvfs.islink(filename):
1137 1137 data = self.wvfs.readlink(filename)
1138 1138 else:
1139 1139 data = self.wvfs.read(filename)
1140 1140 return self._filter(self._encodefilterpats, filename, data)
1141 1141
1142 1142 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1143 1143 """write ``data`` into ``filename`` in the working directory
1144 1144
1145 1145 This returns length of written (maybe decoded) data.
1146 1146 """
1147 1147 data = self._filter(self._decodefilterpats, filename, data)
1148 1148 if 'l' in flags:
1149 1149 self.wvfs.symlink(data, filename)
1150 1150 else:
1151 1151 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1152 1152 **kwargs)
1153 1153 if 'x' in flags:
1154 1154 self.wvfs.setflags(filename, False, True)
1155 1155 else:
1156 1156 self.wvfs.setflags(filename, False, False)
1157 1157 return len(data)
1158 1158
1159 1159 def wwritedata(self, filename, data):
1160 1160 return self._filter(self._decodefilterpats, filename, data)
1161 1161
1162 1162 def currenttransaction(self):
1163 1163 """return the current transaction or None if non exists"""
1164 1164 if self._transref:
1165 1165 tr = self._transref()
1166 1166 else:
1167 1167 tr = None
1168 1168
1169 1169 if tr and tr.running():
1170 1170 return tr
1171 1171 return None
1172 1172
1173 1173 def transaction(self, desc, report=None):
1174 1174 if (self.ui.configbool('devel', 'all-warnings')
1175 1175 or self.ui.configbool('devel', 'check-locks')):
1176 1176 if self._currentlock(self._lockref) is None:
1177 1177 raise error.ProgrammingError('transaction requires locking')
1178 1178 tr = self.currenttransaction()
1179 1179 if tr is not None:
1180 return tr.nest()
1180 return tr.nest(name=desc)
1181 1181
1182 1182 # abort here if the journal already exists
1183 1183 if self.svfs.exists("journal"):
1184 1184 raise error.RepoError(
1185 1185 _("abandoned transaction found"),
1186 1186 hint=_("run 'hg recover' to clean up transaction"))
1187 1187
1188 1188 idbase = "%.40f#%f" % (random.random(), time.time())
1189 1189 ha = hex(hashlib.sha1(idbase).digest())
1190 1190 txnid = 'TXN:' + ha
1191 1191 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1192 1192
1193 1193 self._writejournal(desc)
1194 1194 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1195 1195 if report:
1196 1196 rp = report
1197 1197 else:
1198 1198 rp = self.ui.warn
1199 1199 vfsmap = {'plain': self.vfs} # root of .hg/
1200 1200 # we must avoid cyclic reference between repo and transaction.
1201 1201 reporef = weakref.ref(self)
1202 1202 # Code to track tag movement
1203 1203 #
1204 1204 # Since tags are all handled as file content, it is actually quite hard
1205 1205 # to track these movement from a code perspective. So we fallback to a
1206 1206 # tracking at the repository level. One could envision to track changes
1207 1207 # to the '.hgtags' file through changegroup apply but that fails to
1208 1208 # cope with case where transaction expose new heads without changegroup
1209 1209 # being involved (eg: phase movement).
1210 1210 #
1211 1211 # For now, We gate the feature behind a flag since this likely comes
1212 1212 # with performance impacts. The current code run more often than needed
1213 1213 # and do not use caches as much as it could. The current focus is on
1214 1214 # the behavior of the feature so we disable it by default. The flag
1215 1215 # will be removed when we are happy with the performance impact.
1216 1216 #
1217 1217 # Once this feature is no longer experimental move the following
1218 1218 # documentation to the appropriate help section:
1219 1219 #
1220 1220 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1221 1221 # tags (new or changed or deleted tags). In addition the details of
1222 1222 # these changes are made available in a file at:
1223 1223 # ``REPOROOT/.hg/changes/tags.changes``.
1224 1224 # Make sure you check for HG_TAG_MOVED before reading that file as it
1225 1225 # might exist from a previous transaction even if no tag were touched
1226 1226 # in this one. Changes are recorded in a line base format::
1227 1227 #
1228 1228 # <action> <hex-node> <tag-name>\n
1229 1229 #
1230 1230 # Actions are defined as follow:
1231 1231 # "-R": tag is removed,
1232 1232 # "+A": tag is added,
1233 1233 # "-M": tag is moved (old value),
1234 1234 # "+M": tag is moved (new value),
1235 1235 tracktags = lambda x: None
1236 1236 # experimental config: experimental.hook-track-tags
1237 1237 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1238 1238 if desc != 'strip' and shouldtracktags:
1239 1239 oldheads = self.changelog.headrevs()
1240 1240 def tracktags(tr2):
1241 1241 repo = reporef()
1242 1242 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1243 1243 newheads = repo.changelog.headrevs()
1244 1244 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1245 1245 # notes: we compare lists here.
1246 1246 # As we do it only once buiding set would not be cheaper
1247 1247 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1248 1248 if changes:
1249 1249 tr2.hookargs['tag_moved'] = '1'
1250 1250 with repo.vfs('changes/tags.changes', 'w',
1251 1251 atomictemp=True) as changesfile:
1252 1252 # note: we do not register the file to the transaction
1253 1253 # because we needs it to still exist on the transaction
1254 1254 # is close (for txnclose hooks)
1255 1255 tagsmod.writediff(changesfile, changes)
1256 1256 def validate(tr2):
1257 1257 """will run pre-closing hooks"""
1258 1258 # XXX the transaction API is a bit lacking here so we take a hacky
1259 1259 # path for now
1260 1260 #
1261 1261 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1262 1262 # dict is copied before these run. In addition we needs the data
1263 1263 # available to in memory hooks too.
1264 1264 #
1265 1265 # Moreover, we also need to make sure this runs before txnclose
1266 1266 # hooks and there is no "pending" mechanism that would execute
1267 1267 # logic only if hooks are about to run.
1268 1268 #
1269 1269 # Fixing this limitation of the transaction is also needed to track
1270 1270 # other families of changes (bookmarks, phases, obsolescence).
1271 1271 #
1272 1272 # This will have to be fixed before we remove the experimental
1273 1273 # gating.
1274 1274 tracktags(tr2)
1275 1275 repo = reporef()
1276 1276 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1277 1277 scmutil.enforcesinglehead(repo, tr2, desc)
1278 1278 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1279 1279 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1280 1280 args = tr.hookargs.copy()
1281 1281 args.update(bookmarks.preparehookargs(name, old, new))
1282 1282 repo.hook('pretxnclose-bookmark', throw=True,
1283 1283 txnname=desc,
1284 1284 **pycompat.strkwargs(args))
1285 1285 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1286 1286 cl = repo.unfiltered().changelog
1287 1287 for rev, (old, new) in tr.changes['phases'].items():
1288 1288 args = tr.hookargs.copy()
1289 1289 node = hex(cl.node(rev))
1290 1290 args.update(phases.preparehookargs(node, old, new))
1291 1291 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1292 1292 **pycompat.strkwargs(args))
1293 1293
1294 1294 repo.hook('pretxnclose', throw=True,
1295 1295 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1296 1296 def releasefn(tr, success):
1297 1297 repo = reporef()
1298 1298 if success:
1299 1299 # this should be explicitly invoked here, because
1300 1300 # in-memory changes aren't written out at closing
1301 1301 # transaction, if tr.addfilegenerator (via
1302 1302 # dirstate.write or so) isn't invoked while
1303 1303 # transaction running
1304 1304 repo.dirstate.write(None)
1305 1305 else:
1306 1306 # discard all changes (including ones already written
1307 1307 # out) in this transaction
1308 1308 repo.dirstate.restorebackup(None, 'journal.dirstate')
1309 1309
1310 1310 repo.invalidate(clearfilecache=True)
1311 1311
1312 1312 tr = transaction.transaction(rp, self.svfs, vfsmap,
1313 1313 "journal",
1314 1314 "undo",
1315 1315 aftertrans(renames),
1316 1316 self.store.createmode,
1317 1317 validator=validate,
1318 1318 releasefn=releasefn,
1319 checkambigfiles=_cachedfiles)
1319 checkambigfiles=_cachedfiles,
1320 name=desc)
1320 1321 tr.changes['revs'] = xrange(0, 0)
1321 1322 tr.changes['obsmarkers'] = set()
1322 1323 tr.changes['phases'] = {}
1323 1324 tr.changes['bookmarks'] = {}
1324 1325
1325 1326 tr.hookargs['txnid'] = txnid
1326 1327 # note: writing the fncache only during finalize mean that the file is
1327 1328 # outdated when running hooks. As fncache is used for streaming clone,
1328 1329 # this is not expected to break anything that happen during the hooks.
1329 1330 tr.addfinalize('flush-fncache', self.store.write)
1330 1331 def txnclosehook(tr2):
1331 1332 """To be run if transaction is successful, will schedule a hook run
1332 1333 """
1333 1334 # Don't reference tr2 in hook() so we don't hold a reference.
1334 1335 # This reduces memory consumption when there are multiple
1335 1336 # transactions per lock. This can likely go away if issue5045
1336 1337 # fixes the function accumulation.
1337 1338 hookargs = tr2.hookargs
1338 1339
1339 1340 def hookfunc():
1340 1341 repo = reporef()
1341 1342 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1342 1343 bmchanges = sorted(tr.changes['bookmarks'].items())
1343 1344 for name, (old, new) in bmchanges:
1344 1345 args = tr.hookargs.copy()
1345 1346 args.update(bookmarks.preparehookargs(name, old, new))
1346 1347 repo.hook('txnclose-bookmark', throw=False,
1347 1348 txnname=desc, **pycompat.strkwargs(args))
1348 1349
1349 1350 if hook.hashook(repo.ui, 'txnclose-phase'):
1350 1351 cl = repo.unfiltered().changelog
1351 1352 phasemv = sorted(tr.changes['phases'].items())
1352 1353 for rev, (old, new) in phasemv:
1353 1354 args = tr.hookargs.copy()
1354 1355 node = hex(cl.node(rev))
1355 1356 args.update(phases.preparehookargs(node, old, new))
1356 1357 repo.hook('txnclose-phase', throw=False, txnname=desc,
1357 1358 **pycompat.strkwargs(args))
1358 1359
1359 1360 repo.hook('txnclose', throw=False, txnname=desc,
1360 1361 **pycompat.strkwargs(hookargs))
1361 1362 reporef()._afterlock(hookfunc)
1362 1363 tr.addfinalize('txnclose-hook', txnclosehook)
1363 1364 # Include a leading "-" to make it happen before the transaction summary
1364 1365 # reports registered via scmutil.registersummarycallback() whose names
1365 1366 # are 00-txnreport etc. That way, the caches will be warm when the
1366 1367 # callbacks run.
1367 1368 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1368 1369 def txnaborthook(tr2):
1369 1370 """To be run if transaction is aborted
1370 1371 """
1371 1372 reporef().hook('txnabort', throw=False, txnname=desc,
1372 1373 **pycompat.strkwargs(tr2.hookargs))
1373 1374 tr.addabort('txnabort-hook', txnaborthook)
1374 1375 # avoid eager cache invalidation. in-memory data should be identical
1375 1376 # to stored data if transaction has no error.
1376 1377 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1377 1378 self._transref = weakref.ref(tr)
1378 1379 scmutil.registersummarycallback(self, tr, desc)
1379 1380 return tr
1380 1381
1381 1382 def _journalfiles(self):
1382 1383 return ((self.svfs, 'journal'),
1383 1384 (self.vfs, 'journal.dirstate'),
1384 1385 (self.vfs, 'journal.branch'),
1385 1386 (self.vfs, 'journal.desc'),
1386 1387 (self.vfs, 'journal.bookmarks'),
1387 1388 (self.svfs, 'journal.phaseroots'))
1388 1389
1389 1390 def undofiles(self):
1390 1391 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1391 1392
1392 1393 @unfilteredmethod
1393 1394 def _writejournal(self, desc):
1394 1395 self.dirstate.savebackup(None, 'journal.dirstate')
1395 1396 self.vfs.write("journal.branch",
1396 1397 encoding.fromlocal(self.dirstate.branch()))
1397 1398 self.vfs.write("journal.desc",
1398 1399 "%d\n%s\n" % (len(self), desc))
1399 1400 self.vfs.write("journal.bookmarks",
1400 1401 self.vfs.tryread("bookmarks"))
1401 1402 self.svfs.write("journal.phaseroots",
1402 1403 self.svfs.tryread("phaseroots"))
1403 1404
1404 1405 def recover(self):
1405 1406 with self.lock():
1406 1407 if self.svfs.exists("journal"):
1407 1408 self.ui.status(_("rolling back interrupted transaction\n"))
1408 1409 vfsmap = {'': self.svfs,
1409 1410 'plain': self.vfs,}
1410 1411 transaction.rollback(self.svfs, vfsmap, "journal",
1411 1412 self.ui.warn,
1412 1413 checkambigfiles=_cachedfiles)
1413 1414 self.invalidate()
1414 1415 return True
1415 1416 else:
1416 1417 self.ui.warn(_("no interrupted transaction available\n"))
1417 1418 return False
1418 1419
1419 1420 def rollback(self, dryrun=False, force=False):
1420 1421 wlock = lock = dsguard = None
1421 1422 try:
1422 1423 wlock = self.wlock()
1423 1424 lock = self.lock()
1424 1425 if self.svfs.exists("undo"):
1425 1426 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1426 1427
1427 1428 return self._rollback(dryrun, force, dsguard)
1428 1429 else:
1429 1430 self.ui.warn(_("no rollback information available\n"))
1430 1431 return 1
1431 1432 finally:
1432 1433 release(dsguard, lock, wlock)
1433 1434
1434 1435 @unfilteredmethod # Until we get smarter cache management
1435 1436 def _rollback(self, dryrun, force, dsguard):
1436 1437 ui = self.ui
1437 1438 try:
1438 1439 args = self.vfs.read('undo.desc').splitlines()
1439 1440 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1440 1441 if len(args) >= 3:
1441 1442 detail = args[2]
1442 1443 oldtip = oldlen - 1
1443 1444
1444 1445 if detail and ui.verbose:
1445 1446 msg = (_('repository tip rolled back to revision %d'
1446 1447 ' (undo %s: %s)\n')
1447 1448 % (oldtip, desc, detail))
1448 1449 else:
1449 1450 msg = (_('repository tip rolled back to revision %d'
1450 1451 ' (undo %s)\n')
1451 1452 % (oldtip, desc))
1452 1453 except IOError:
1453 1454 msg = _('rolling back unknown transaction\n')
1454 1455 desc = None
1455 1456
1456 1457 if not force and self['.'] != self['tip'] and desc == 'commit':
1457 1458 raise error.Abort(
1458 1459 _('rollback of last commit while not checked out '
1459 1460 'may lose data'), hint=_('use -f to force'))
1460 1461
1461 1462 ui.status(msg)
1462 1463 if dryrun:
1463 1464 return 0
1464 1465
1465 1466 parents = self.dirstate.parents()
1466 1467 self.destroying()
1467 1468 vfsmap = {'plain': self.vfs, '': self.svfs}
1468 1469 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1469 1470 checkambigfiles=_cachedfiles)
1470 1471 if self.vfs.exists('undo.bookmarks'):
1471 1472 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1472 1473 if self.svfs.exists('undo.phaseroots'):
1473 1474 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1474 1475 self.invalidate()
1475 1476
1476 1477 parentgone = (parents[0] not in self.changelog.nodemap or
1477 1478 parents[1] not in self.changelog.nodemap)
1478 1479 if parentgone:
1479 1480 # prevent dirstateguard from overwriting already restored one
1480 1481 dsguard.close()
1481 1482
1482 1483 self.dirstate.restorebackup(None, 'undo.dirstate')
1483 1484 try:
1484 1485 branch = self.vfs.read('undo.branch')
1485 1486 self.dirstate.setbranch(encoding.tolocal(branch))
1486 1487 except IOError:
1487 1488 ui.warn(_('named branch could not be reset: '
1488 1489 'current branch is still \'%s\'\n')
1489 1490 % self.dirstate.branch())
1490 1491
1491 1492 parents = tuple([p.rev() for p in self[None].parents()])
1492 1493 if len(parents) > 1:
1493 1494 ui.status(_('working directory now based on '
1494 1495 'revisions %d and %d\n') % parents)
1495 1496 else:
1496 1497 ui.status(_('working directory now based on '
1497 1498 'revision %d\n') % parents)
1498 1499 mergemod.mergestate.clean(self, self['.'].node())
1499 1500
1500 1501 # TODO: if we know which new heads may result from this rollback, pass
1501 1502 # them to destroy(), which will prevent the branchhead cache from being
1502 1503 # invalidated.
1503 1504 self.destroyed()
1504 1505 return 0
1505 1506
1506 1507 def _buildcacheupdater(self, newtransaction):
1507 1508 """called during transaction to build the callback updating cache
1508 1509
1509 1510 Lives on the repository to help extension who might want to augment
1510 1511 this logic. For this purpose, the created transaction is passed to the
1511 1512 method.
1512 1513 """
1513 1514 # we must avoid cyclic reference between repo and transaction.
1514 1515 reporef = weakref.ref(self)
1515 1516 def updater(tr):
1516 1517 repo = reporef()
1517 1518 repo.updatecaches(tr)
1518 1519 return updater
1519 1520
1520 1521 @unfilteredmethod
1521 1522 def updatecaches(self, tr=None):
1522 1523 """warm appropriate caches
1523 1524
1524 1525 If this function is called after a transaction closed. The transaction
1525 1526 will be available in the 'tr' argument. This can be used to selectively
1526 1527 update caches relevant to the changes in that transaction.
1527 1528 """
1528 1529 if tr is not None and tr.hookargs.get('source') == 'strip':
1529 1530 # During strip, many caches are invalid but
1530 1531 # later call to `destroyed` will refresh them.
1531 1532 return
1532 1533
1533 1534 if tr is None or tr.changes['revs']:
1534 1535 # updating the unfiltered branchmap should refresh all the others,
1535 1536 self.ui.debug('updating the branch cache\n')
1536 1537 branchmap.updatecache(self.filtered('served'))
1537 1538
1538 1539 def invalidatecaches(self):
1539 1540
1540 1541 if '_tagscache' in vars(self):
1541 1542 # can't use delattr on proxy
1542 1543 del self.__dict__['_tagscache']
1543 1544
1544 1545 self.unfiltered()._branchcaches.clear()
1545 1546 self.invalidatevolatilesets()
1546 1547 self._sparsesignaturecache.clear()
1547 1548
1548 1549 def invalidatevolatilesets(self):
1549 1550 self.filteredrevcache.clear()
1550 1551 obsolete.clearobscaches(self)
1551 1552
1552 1553 def invalidatedirstate(self):
1553 1554 '''Invalidates the dirstate, causing the next call to dirstate
1554 1555 to check if it was modified since the last time it was read,
1555 1556 rereading it if it has.
1556 1557
1557 1558 This is different to dirstate.invalidate() that it doesn't always
1558 1559 rereads the dirstate. Use dirstate.invalidate() if you want to
1559 1560 explicitly read the dirstate again (i.e. restoring it to a previous
1560 1561 known good state).'''
1561 1562 if hasunfilteredcache(self, 'dirstate'):
1562 1563 for k in self.dirstate._filecache:
1563 1564 try:
1564 1565 delattr(self.dirstate, k)
1565 1566 except AttributeError:
1566 1567 pass
1567 1568 delattr(self.unfiltered(), 'dirstate')
1568 1569
1569 1570 def invalidate(self, clearfilecache=False):
1570 1571 '''Invalidates both store and non-store parts other than dirstate
1571 1572
1572 1573 If a transaction is running, invalidation of store is omitted,
1573 1574 because discarding in-memory changes might cause inconsistency
1574 1575 (e.g. incomplete fncache causes unintentional failure, but
1575 1576 redundant one doesn't).
1576 1577 '''
1577 1578 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1578 1579 for k in list(self._filecache.keys()):
1579 1580 # dirstate is invalidated separately in invalidatedirstate()
1580 1581 if k == 'dirstate':
1581 1582 continue
1582 1583 if (k == 'changelog' and
1583 1584 self.currenttransaction() and
1584 1585 self.changelog._delayed):
1585 1586 # The changelog object may store unwritten revisions. We don't
1586 1587 # want to lose them.
1587 1588 # TODO: Solve the problem instead of working around it.
1588 1589 continue
1589 1590
1590 1591 if clearfilecache:
1591 1592 del self._filecache[k]
1592 1593 try:
1593 1594 delattr(unfiltered, k)
1594 1595 except AttributeError:
1595 1596 pass
1596 1597 self.invalidatecaches()
1597 1598 if not self.currenttransaction():
1598 1599 # TODO: Changing contents of store outside transaction
1599 1600 # causes inconsistency. We should make in-memory store
1600 1601 # changes detectable, and abort if changed.
1601 1602 self.store.invalidatecaches()
1602 1603
1603 1604 def invalidateall(self):
1604 1605 '''Fully invalidates both store and non-store parts, causing the
1605 1606 subsequent operation to reread any outside changes.'''
1606 1607 # extension should hook this to invalidate its caches
1607 1608 self.invalidate()
1608 1609 self.invalidatedirstate()
1609 1610
1610 1611 @unfilteredmethod
1611 1612 def _refreshfilecachestats(self, tr):
1612 1613 """Reload stats of cached files so that they are flagged as valid"""
1613 1614 for k, ce in self._filecache.items():
1614 1615 k = pycompat.sysstr(k)
1615 1616 if k == r'dirstate' or k not in self.__dict__:
1616 1617 continue
1617 1618 ce.refresh()
1618 1619
1619 1620 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1620 1621 inheritchecker=None, parentenvvar=None):
1621 1622 parentlock = None
1622 1623 # the contents of parentenvvar are used by the underlying lock to
1623 1624 # determine whether it can be inherited
1624 1625 if parentenvvar is not None:
1625 1626 parentlock = encoding.environ.get(parentenvvar)
1626 1627
1627 1628 timeout = 0
1628 1629 warntimeout = 0
1629 1630 if wait:
1630 1631 timeout = self.ui.configint("ui", "timeout")
1631 1632 warntimeout = self.ui.configint("ui", "timeout.warn")
1632 1633
1633 1634 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1634 1635 releasefn=releasefn,
1635 1636 acquirefn=acquirefn, desc=desc,
1636 1637 inheritchecker=inheritchecker,
1637 1638 parentlock=parentlock)
1638 1639 return l
1639 1640
1640 1641 def _afterlock(self, callback):
1641 1642 """add a callback to be run when the repository is fully unlocked
1642 1643
1643 1644 The callback will be executed when the outermost lock is released
1644 1645 (with wlock being higher level than 'lock')."""
1645 1646 for ref in (self._wlockref, self._lockref):
1646 1647 l = ref and ref()
1647 1648 if l and l.held:
1648 1649 l.postrelease.append(callback)
1649 1650 break
1650 1651 else: # no lock have been found.
1651 1652 callback()
1652 1653
1653 1654 def lock(self, wait=True):
1654 1655 '''Lock the repository store (.hg/store) and return a weak reference
1655 1656 to the lock. Use this before modifying the store (e.g. committing or
1656 1657 stripping). If you are opening a transaction, get a lock as well.)
1657 1658
1658 1659 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1659 1660 'wlock' first to avoid a dead-lock hazard.'''
1660 1661 l = self._currentlock(self._lockref)
1661 1662 if l is not None:
1662 1663 l.lock()
1663 1664 return l
1664 1665
1665 1666 l = self._lock(self.svfs, "lock", wait, None,
1666 1667 self.invalidate, _('repository %s') % self.origroot)
1667 1668 self._lockref = weakref.ref(l)
1668 1669 return l
1669 1670
1670 1671 def _wlockchecktransaction(self):
1671 1672 if self.currenttransaction() is not None:
1672 1673 raise error.LockInheritanceContractViolation(
1673 1674 'wlock cannot be inherited in the middle of a transaction')
1674 1675
1675 1676 def wlock(self, wait=True):
1676 1677 '''Lock the non-store parts of the repository (everything under
1677 1678 .hg except .hg/store) and return a weak reference to the lock.
1678 1679
1679 1680 Use this before modifying files in .hg.
1680 1681
1681 1682 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1682 1683 'wlock' first to avoid a dead-lock hazard.'''
1683 1684 l = self._wlockref and self._wlockref()
1684 1685 if l is not None and l.held:
1685 1686 l.lock()
1686 1687 return l
1687 1688
1688 1689 # We do not need to check for non-waiting lock acquisition. Such
1689 1690 # acquisition would not cause dead-lock as they would just fail.
1690 1691 if wait and (self.ui.configbool('devel', 'all-warnings')
1691 1692 or self.ui.configbool('devel', 'check-locks')):
1692 1693 if self._currentlock(self._lockref) is not None:
1693 1694 self.ui.develwarn('"wlock" acquired after "lock"')
1694 1695
1695 1696 def unlock():
1696 1697 if self.dirstate.pendingparentchange():
1697 1698 self.dirstate.invalidate()
1698 1699 else:
1699 1700 self.dirstate.write(None)
1700 1701
1701 1702 self._filecache['dirstate'].refresh()
1702 1703
1703 1704 l = self._lock(self.vfs, "wlock", wait, unlock,
1704 1705 self.invalidatedirstate, _('working directory of %s') %
1705 1706 self.origroot,
1706 1707 inheritchecker=self._wlockchecktransaction,
1707 1708 parentenvvar='HG_WLOCK_LOCKER')
1708 1709 self._wlockref = weakref.ref(l)
1709 1710 return l
1710 1711
1711 1712 def _currentlock(self, lockref):
1712 1713 """Returns the lock if it's held, or None if it's not."""
1713 1714 if lockref is None:
1714 1715 return None
1715 1716 l = lockref()
1716 1717 if l is None or not l.held:
1717 1718 return None
1718 1719 return l
1719 1720
1720 1721 def currentwlock(self):
1721 1722 """Returns the wlock if it's held, or None if it's not."""
1722 1723 return self._currentlock(self._wlockref)
1723 1724
1724 1725 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1725 1726 """
1726 1727 commit an individual file as part of a larger transaction
1727 1728 """
1728 1729
1729 1730 fname = fctx.path()
1730 1731 fparent1 = manifest1.get(fname, nullid)
1731 1732 fparent2 = manifest2.get(fname, nullid)
1732 1733 if isinstance(fctx, context.filectx):
1733 1734 node = fctx.filenode()
1734 1735 if node in [fparent1, fparent2]:
1735 1736 self.ui.debug('reusing %s filelog entry\n' % fname)
1736 1737 if manifest1.flags(fname) != fctx.flags():
1737 1738 changelist.append(fname)
1738 1739 return node
1739 1740
1740 1741 flog = self.file(fname)
1741 1742 meta = {}
1742 1743 copy = fctx.renamed()
1743 1744 if copy and copy[0] != fname:
1744 1745 # Mark the new revision of this file as a copy of another
1745 1746 # file. This copy data will effectively act as a parent
1746 1747 # of this new revision. If this is a merge, the first
1747 1748 # parent will be the nullid (meaning "look up the copy data")
1748 1749 # and the second one will be the other parent. For example:
1749 1750 #
1750 1751 # 0 --- 1 --- 3 rev1 changes file foo
1751 1752 # \ / rev2 renames foo to bar and changes it
1752 1753 # \- 2 -/ rev3 should have bar with all changes and
1753 1754 # should record that bar descends from
1754 1755 # bar in rev2 and foo in rev1
1755 1756 #
1756 1757 # this allows this merge to succeed:
1757 1758 #
1758 1759 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1759 1760 # \ / merging rev3 and rev4 should use bar@rev2
1760 1761 # \- 2 --- 4 as the merge base
1761 1762 #
1762 1763
1763 1764 cfname = copy[0]
1764 1765 crev = manifest1.get(cfname)
1765 1766 newfparent = fparent2
1766 1767
1767 1768 if manifest2: # branch merge
1768 1769 if fparent2 == nullid or crev is None: # copied on remote side
1769 1770 if cfname in manifest2:
1770 1771 crev = manifest2[cfname]
1771 1772 newfparent = fparent1
1772 1773
1773 1774 # Here, we used to search backwards through history to try to find
1774 1775 # where the file copy came from if the source of a copy was not in
1775 1776 # the parent directory. However, this doesn't actually make sense to
1776 1777 # do (what does a copy from something not in your working copy even
1777 1778 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1778 1779 # the user that copy information was dropped, so if they didn't
1779 1780 # expect this outcome it can be fixed, but this is the correct
1780 1781 # behavior in this circumstance.
1781 1782
1782 1783 if crev:
1783 1784 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1784 1785 meta["copy"] = cfname
1785 1786 meta["copyrev"] = hex(crev)
1786 1787 fparent1, fparent2 = nullid, newfparent
1787 1788 else:
1788 1789 self.ui.warn(_("warning: can't find ancestor for '%s' "
1789 1790 "copied from '%s'!\n") % (fname, cfname))
1790 1791
1791 1792 elif fparent1 == nullid:
1792 1793 fparent1, fparent2 = fparent2, nullid
1793 1794 elif fparent2 != nullid:
1794 1795 # is one parent an ancestor of the other?
1795 1796 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1796 1797 if fparent1 in fparentancestors:
1797 1798 fparent1, fparent2 = fparent2, nullid
1798 1799 elif fparent2 in fparentancestors:
1799 1800 fparent2 = nullid
1800 1801
1801 1802 # is the file changed?
1802 1803 text = fctx.data()
1803 1804 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1804 1805 changelist.append(fname)
1805 1806 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1806 1807 # are just the flags changed during merge?
1807 1808 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1808 1809 changelist.append(fname)
1809 1810
1810 1811 return fparent1
1811 1812
1812 1813 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1813 1814 """check for commit arguments that aren't committable"""
1814 1815 if match.isexact() or match.prefix():
1815 1816 matched = set(status.modified + status.added + status.removed)
1816 1817
1817 1818 for f in match.files():
1818 1819 f = self.dirstate.normalize(f)
1819 1820 if f == '.' or f in matched or f in wctx.substate:
1820 1821 continue
1821 1822 if f in status.deleted:
1822 1823 fail(f, _('file not found!'))
1823 1824 if f in vdirs: # visited directory
1824 1825 d = f + '/'
1825 1826 for mf in matched:
1826 1827 if mf.startswith(d):
1827 1828 break
1828 1829 else:
1829 1830 fail(f, _("no match under directory!"))
1830 1831 elif f not in self.dirstate:
1831 1832 fail(f, _("file not tracked!"))
1832 1833
1833 1834 @unfilteredmethod
1834 1835 def commit(self, text="", user=None, date=None, match=None, force=False,
1835 1836 editor=False, extra=None):
1836 1837 """Add a new revision to current repository.
1837 1838
1838 1839 Revision information is gathered from the working directory,
1839 1840 match can be used to filter the committed files. If editor is
1840 1841 supplied, it is called to get a commit message.
1841 1842 """
1842 1843 if extra is None:
1843 1844 extra = {}
1844 1845
1845 1846 def fail(f, msg):
1846 1847 raise error.Abort('%s: %s' % (f, msg))
1847 1848
1848 1849 if not match:
1849 1850 match = matchmod.always(self.root, '')
1850 1851
1851 1852 if not force:
1852 1853 vdirs = []
1853 1854 match.explicitdir = vdirs.append
1854 1855 match.bad = fail
1855 1856
1856 1857 wlock = lock = tr = None
1857 1858 try:
1858 1859 wlock = self.wlock()
1859 1860 lock = self.lock() # for recent changelog (see issue4368)
1860 1861
1861 1862 wctx = self[None]
1862 1863 merge = len(wctx.parents()) > 1
1863 1864
1864 1865 if not force and merge and not match.always():
1865 1866 raise error.Abort(_('cannot partially commit a merge '
1866 1867 '(do not specify files or patterns)'))
1867 1868
1868 1869 status = self.status(match=match, clean=force)
1869 1870 if force:
1870 1871 status.modified.extend(status.clean) # mq may commit clean files
1871 1872
1872 1873 # check subrepos
1873 1874 subs, commitsubs, newstate = subrepoutil.precommit(
1874 1875 self.ui, wctx, status, match, force=force)
1875 1876
1876 1877 # make sure all explicit patterns are matched
1877 1878 if not force:
1878 1879 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1879 1880
1880 1881 cctx = context.workingcommitctx(self, status,
1881 1882 text, user, date, extra)
1882 1883
1883 1884 # internal config: ui.allowemptycommit
1884 1885 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1885 1886 or extra.get('close') or merge or cctx.files()
1886 1887 or self.ui.configbool('ui', 'allowemptycommit'))
1887 1888 if not allowemptycommit:
1888 1889 return None
1889 1890
1890 1891 if merge and cctx.deleted():
1891 1892 raise error.Abort(_("cannot commit merge with missing files"))
1892 1893
1893 1894 ms = mergemod.mergestate.read(self)
1894 1895 mergeutil.checkunresolved(ms)
1895 1896
1896 1897 if editor:
1897 1898 cctx._text = editor(self, cctx, subs)
1898 1899 edited = (text != cctx._text)
1899 1900
1900 1901 # Save commit message in case this transaction gets rolled back
1901 1902 # (e.g. by a pretxncommit hook). Leave the content alone on
1902 1903 # the assumption that the user will use the same editor again.
1903 1904 msgfn = self.savecommitmessage(cctx._text)
1904 1905
1905 1906 # commit subs and write new state
1906 1907 if subs:
1907 1908 for s in sorted(commitsubs):
1908 1909 sub = wctx.sub(s)
1909 1910 self.ui.status(_('committing subrepository %s\n') %
1910 1911 subrepoutil.subrelpath(sub))
1911 1912 sr = sub.commit(cctx._text, user, date)
1912 1913 newstate[s] = (newstate[s][0], sr)
1913 1914 subrepoutil.writestate(self, newstate)
1914 1915
1915 1916 p1, p2 = self.dirstate.parents()
1916 1917 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1917 1918 try:
1918 1919 self.hook("precommit", throw=True, parent1=hookp1,
1919 1920 parent2=hookp2)
1920 1921 tr = self.transaction('commit')
1921 1922 ret = self.commitctx(cctx, True)
1922 1923 except: # re-raises
1923 1924 if edited:
1924 1925 self.ui.write(
1925 1926 _('note: commit message saved in %s\n') % msgfn)
1926 1927 raise
1927 1928 # update bookmarks, dirstate and mergestate
1928 1929 bookmarks.update(self, [p1, p2], ret)
1929 1930 cctx.markcommitted(ret)
1930 1931 ms.reset()
1931 1932 tr.close()
1932 1933
1933 1934 finally:
1934 1935 lockmod.release(tr, lock, wlock)
1935 1936
1936 1937 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1937 1938 # hack for command that use a temporary commit (eg: histedit)
1938 1939 # temporary commit got stripped before hook release
1939 1940 if self.changelog.hasnode(ret):
1940 1941 self.hook("commit", node=node, parent1=parent1,
1941 1942 parent2=parent2)
1942 1943 self._afterlock(commithook)
1943 1944 return ret
1944 1945
1945 1946 @unfilteredmethod
1946 1947 def commitctx(self, ctx, error=False):
1947 1948 """Add a new revision to current repository.
1948 1949 Revision information is passed via the context argument.
1949 1950 """
1950 1951
1951 1952 tr = None
1952 1953 p1, p2 = ctx.p1(), ctx.p2()
1953 1954 user = ctx.user()
1954 1955
1955 1956 lock = self.lock()
1956 1957 try:
1957 1958 tr = self.transaction("commit")
1958 1959 trp = weakref.proxy(tr)
1959 1960
1960 1961 if ctx.manifestnode():
1961 1962 # reuse an existing manifest revision
1962 1963 mn = ctx.manifestnode()
1963 1964 files = ctx.files()
1964 1965 elif ctx.files():
1965 1966 m1ctx = p1.manifestctx()
1966 1967 m2ctx = p2.manifestctx()
1967 1968 mctx = m1ctx.copy()
1968 1969
1969 1970 m = mctx.read()
1970 1971 m1 = m1ctx.read()
1971 1972 m2 = m2ctx.read()
1972 1973
1973 1974 # check in files
1974 1975 added = []
1975 1976 changed = []
1976 1977 removed = list(ctx.removed())
1977 1978 linkrev = len(self)
1978 1979 self.ui.note(_("committing files:\n"))
1979 1980 for f in sorted(ctx.modified() + ctx.added()):
1980 1981 self.ui.note(f + "\n")
1981 1982 try:
1982 1983 fctx = ctx[f]
1983 1984 if fctx is None:
1984 1985 removed.append(f)
1985 1986 else:
1986 1987 added.append(f)
1987 1988 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1988 1989 trp, changed)
1989 1990 m.setflag(f, fctx.flags())
1990 1991 except OSError as inst:
1991 1992 self.ui.warn(_("trouble committing %s!\n") % f)
1992 1993 raise
1993 1994 except IOError as inst:
1994 1995 errcode = getattr(inst, 'errno', errno.ENOENT)
1995 1996 if error or errcode and errcode != errno.ENOENT:
1996 1997 self.ui.warn(_("trouble committing %s!\n") % f)
1997 1998 raise
1998 1999
1999 2000 # update manifest
2000 2001 self.ui.note(_("committing manifest\n"))
2001 2002 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2002 2003 drop = [f for f in removed if f in m]
2003 2004 for f in drop:
2004 2005 del m[f]
2005 2006 mn = mctx.write(trp, linkrev,
2006 2007 p1.manifestnode(), p2.manifestnode(),
2007 2008 added, drop)
2008 2009 files = changed + removed
2009 2010 else:
2010 2011 mn = p1.manifestnode()
2011 2012 files = []
2012 2013
2013 2014 # update changelog
2014 2015 self.ui.note(_("committing changelog\n"))
2015 2016 self.changelog.delayupdate(tr)
2016 2017 n = self.changelog.add(mn, files, ctx.description(),
2017 2018 trp, p1.node(), p2.node(),
2018 2019 user, ctx.date(), ctx.extra().copy())
2019 2020 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2020 2021 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2021 2022 parent2=xp2)
2022 2023 # set the new commit is proper phase
2023 2024 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2024 2025 if targetphase:
2025 2026 # retract boundary do not alter parent changeset.
2026 2027 # if a parent have higher the resulting phase will
2027 2028 # be compliant anyway
2028 2029 #
2029 2030 # if minimal phase was 0 we don't need to retract anything
2030 2031 phases.registernew(self, tr, targetphase, [n])
2031 2032 tr.close()
2032 2033 return n
2033 2034 finally:
2034 2035 if tr:
2035 2036 tr.release()
2036 2037 lock.release()
2037 2038
2038 2039 @unfilteredmethod
2039 2040 def destroying(self):
2040 2041 '''Inform the repository that nodes are about to be destroyed.
2041 2042 Intended for use by strip and rollback, so there's a common
2042 2043 place for anything that has to be done before destroying history.
2043 2044
2044 2045 This is mostly useful for saving state that is in memory and waiting
2045 2046 to be flushed when the current lock is released. Because a call to
2046 2047 destroyed is imminent, the repo will be invalidated causing those
2047 2048 changes to stay in memory (waiting for the next unlock), or vanish
2048 2049 completely.
2049 2050 '''
2050 2051 # When using the same lock to commit and strip, the phasecache is left
2051 2052 # dirty after committing. Then when we strip, the repo is invalidated,
2052 2053 # causing those changes to disappear.
2053 2054 if '_phasecache' in vars(self):
2054 2055 self._phasecache.write()
2055 2056
2056 2057 @unfilteredmethod
2057 2058 def destroyed(self):
2058 2059 '''Inform the repository that nodes have been destroyed.
2059 2060 Intended for use by strip and rollback, so there's a common
2060 2061 place for anything that has to be done after destroying history.
2061 2062 '''
2062 2063 # When one tries to:
2063 2064 # 1) destroy nodes thus calling this method (e.g. strip)
2064 2065 # 2) use phasecache somewhere (e.g. commit)
2065 2066 #
2066 2067 # then 2) will fail because the phasecache contains nodes that were
2067 2068 # removed. We can either remove phasecache from the filecache,
2068 2069 # causing it to reload next time it is accessed, or simply filter
2069 2070 # the removed nodes now and write the updated cache.
2070 2071 self._phasecache.filterunknown(self)
2071 2072 self._phasecache.write()
2072 2073
2073 2074 # refresh all repository caches
2074 2075 self.updatecaches()
2075 2076
2076 2077 # Ensure the persistent tag cache is updated. Doing it now
2077 2078 # means that the tag cache only has to worry about destroyed
2078 2079 # heads immediately after a strip/rollback. That in turn
2079 2080 # guarantees that "cachetip == currenttip" (comparing both rev
2080 2081 # and node) always means no nodes have been added or destroyed.
2081 2082
2082 2083 # XXX this is suboptimal when qrefresh'ing: we strip the current
2083 2084 # head, refresh the tag cache, then immediately add a new head.
2084 2085 # But I think doing it this way is necessary for the "instant
2085 2086 # tag cache retrieval" case to work.
2086 2087 self.invalidate()
2087 2088
2088 2089 def status(self, node1='.', node2=None, match=None,
2089 2090 ignored=False, clean=False, unknown=False,
2090 2091 listsubrepos=False):
2091 2092 '''a convenience method that calls node1.status(node2)'''
2092 2093 return self[node1].status(node2, match, ignored, clean, unknown,
2093 2094 listsubrepos)
2094 2095
2095 2096 def addpostdsstatus(self, ps):
2096 2097 """Add a callback to run within the wlock, at the point at which status
2097 2098 fixups happen.
2098 2099
2099 2100 On status completion, callback(wctx, status) will be called with the
2100 2101 wlock held, unless the dirstate has changed from underneath or the wlock
2101 2102 couldn't be grabbed.
2102 2103
2103 2104 Callbacks should not capture and use a cached copy of the dirstate --
2104 2105 it might change in the meanwhile. Instead, they should access the
2105 2106 dirstate via wctx.repo().dirstate.
2106 2107
2107 2108 This list is emptied out after each status run -- extensions should
2108 2109 make sure it adds to this list each time dirstate.status is called.
2109 2110 Extensions should also make sure they don't call this for statuses
2110 2111 that don't involve the dirstate.
2111 2112 """
2112 2113
2113 2114 # The list is located here for uniqueness reasons -- it is actually
2114 2115 # managed by the workingctx, but that isn't unique per-repo.
2115 2116 self._postdsstatus.append(ps)
2116 2117
2117 2118 def postdsstatus(self):
2118 2119 """Used by workingctx to get the list of post-dirstate-status hooks."""
2119 2120 return self._postdsstatus
2120 2121
2121 2122 def clearpostdsstatus(self):
2122 2123 """Used by workingctx to clear post-dirstate-status hooks."""
2123 2124 del self._postdsstatus[:]
2124 2125
2125 2126 def heads(self, start=None):
2126 2127 if start is None:
2127 2128 cl = self.changelog
2128 2129 headrevs = reversed(cl.headrevs())
2129 2130 return [cl.node(rev) for rev in headrevs]
2130 2131
2131 2132 heads = self.changelog.heads(start)
2132 2133 # sort the output in rev descending order
2133 2134 return sorted(heads, key=self.changelog.rev, reverse=True)
2134 2135
2135 2136 def branchheads(self, branch=None, start=None, closed=False):
2136 2137 '''return a (possibly filtered) list of heads for the given branch
2137 2138
2138 2139 Heads are returned in topological order, from newest to oldest.
2139 2140 If branch is None, use the dirstate branch.
2140 2141 If start is not None, return only heads reachable from start.
2141 2142 If closed is True, return heads that are marked as closed as well.
2142 2143 '''
2143 2144 if branch is None:
2144 2145 branch = self[None].branch()
2145 2146 branches = self.branchmap()
2146 2147 if branch not in branches:
2147 2148 return []
2148 2149 # the cache returns heads ordered lowest to highest
2149 2150 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2150 2151 if start is not None:
2151 2152 # filter out the heads that cannot be reached from startrev
2152 2153 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2153 2154 bheads = [h for h in bheads if h in fbheads]
2154 2155 return bheads
2155 2156
2156 2157 def branches(self, nodes):
2157 2158 if not nodes:
2158 2159 nodes = [self.changelog.tip()]
2159 2160 b = []
2160 2161 for n in nodes:
2161 2162 t = n
2162 2163 while True:
2163 2164 p = self.changelog.parents(n)
2164 2165 if p[1] != nullid or p[0] == nullid:
2165 2166 b.append((t, n, p[0], p[1]))
2166 2167 break
2167 2168 n = p[0]
2168 2169 return b
2169 2170
2170 2171 def between(self, pairs):
2171 2172 r = []
2172 2173
2173 2174 for top, bottom in pairs:
2174 2175 n, l, i = top, [], 0
2175 2176 f = 1
2176 2177
2177 2178 while n != bottom and n != nullid:
2178 2179 p = self.changelog.parents(n)[0]
2179 2180 if i == f:
2180 2181 l.append(n)
2181 2182 f = f * 2
2182 2183 n = p
2183 2184 i += 1
2184 2185
2185 2186 r.append(l)
2186 2187
2187 2188 return r
2188 2189
2189 2190 def checkpush(self, pushop):
2190 2191 """Extensions can override this function if additional checks have
2191 2192 to be performed before pushing, or call it if they override push
2192 2193 command.
2193 2194 """
2194 2195
2195 2196 @unfilteredpropertycache
2196 2197 def prepushoutgoinghooks(self):
2197 2198 """Return util.hooks consists of a pushop with repo, remote, outgoing
2198 2199 methods, which are called before pushing changesets.
2199 2200 """
2200 2201 return util.hooks()
2201 2202
2202 2203 def pushkey(self, namespace, key, old, new):
2203 2204 try:
2204 2205 tr = self.currenttransaction()
2205 2206 hookargs = {}
2206 2207 if tr is not None:
2207 2208 hookargs.update(tr.hookargs)
2208 2209 hookargs = pycompat.strkwargs(hookargs)
2209 2210 hookargs[r'namespace'] = namespace
2210 2211 hookargs[r'key'] = key
2211 2212 hookargs[r'old'] = old
2212 2213 hookargs[r'new'] = new
2213 2214 self.hook('prepushkey', throw=True, **hookargs)
2214 2215 except error.HookAbort as exc:
2215 2216 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2216 2217 if exc.hint:
2217 2218 self.ui.write_err(_("(%s)\n") % exc.hint)
2218 2219 return False
2219 2220 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2220 2221 ret = pushkey.push(self, namespace, key, old, new)
2221 2222 def runhook():
2222 2223 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2223 2224 ret=ret)
2224 2225 self._afterlock(runhook)
2225 2226 return ret
2226 2227
2227 2228 def listkeys(self, namespace):
2228 2229 self.hook('prelistkeys', throw=True, namespace=namespace)
2229 2230 self.ui.debug('listing keys for "%s"\n' % namespace)
2230 2231 values = pushkey.list(self, namespace)
2231 2232 self.hook('listkeys', namespace=namespace, values=values)
2232 2233 return values
2233 2234
2234 2235 def debugwireargs(self, one, two, three=None, four=None, five=None):
2235 2236 '''used to test argument passing over the wire'''
2236 2237 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2237 2238 pycompat.bytestr(four),
2238 2239 pycompat.bytestr(five))
2239 2240
2240 2241 def savecommitmessage(self, text):
2241 2242 fp = self.vfs('last-message.txt', 'wb')
2242 2243 try:
2243 2244 fp.write(text)
2244 2245 finally:
2245 2246 fp.close()
2246 2247 return self.pathto(fp.name[len(self.root) + 1:])
2247 2248
2248 2249 # used to avoid circular references so destructors work
2249 2250 def aftertrans(files):
2250 2251 renamefiles = [tuple(t) for t in files]
2251 2252 def a():
2252 2253 for vfs, src, dest in renamefiles:
2253 2254 # if src and dest refer to a same file, vfs.rename is a no-op,
2254 2255 # leaving both src and dest on disk. delete dest to make sure
2255 2256 # the rename couldn't be such a no-op.
2256 2257 vfs.tryunlink(dest)
2257 2258 try:
2258 2259 vfs.rename(src, dest)
2259 2260 except OSError: # journal file does not yet exist
2260 2261 pass
2261 2262 return a
2262 2263
2263 2264 def undoname(fn):
2264 2265 base, name = os.path.split(fn)
2265 2266 assert name.startswith('journal')
2266 2267 return os.path.join(base, name.replace('journal', 'undo', 1))
2267 2268
2268 2269 def instance(ui, path, create):
2269 2270 return localrepository(ui, util.urllocalpath(path), create)
2270 2271
2271 2272 def islocal(path):
2272 2273 return True
2273 2274
2274 2275 def newreporequirements(repo):
2275 2276 """Determine the set of requirements for a new local repository.
2276 2277
2277 2278 Extensions can wrap this function to specify custom requirements for
2278 2279 new repositories.
2279 2280 """
2280 2281 ui = repo.ui
2281 2282 requirements = {'revlogv1'}
2282 2283 if ui.configbool('format', 'usestore'):
2283 2284 requirements.add('store')
2284 2285 if ui.configbool('format', 'usefncache'):
2285 2286 requirements.add('fncache')
2286 2287 if ui.configbool('format', 'dotencode'):
2287 2288 requirements.add('dotencode')
2288 2289
2289 2290 compengine = ui.config('experimental', 'format.compression')
2290 2291 if compengine not in util.compengines:
2291 2292 raise error.Abort(_('compression engine %s defined by '
2292 2293 'experimental.format.compression not available') %
2293 2294 compengine,
2294 2295 hint=_('run "hg debuginstall" to list available '
2295 2296 'compression engines'))
2296 2297
2297 2298 # zlib is the historical default and doesn't need an explicit requirement.
2298 2299 if compengine != 'zlib':
2299 2300 requirements.add('exp-compression-%s' % compengine)
2300 2301
2301 2302 if scmutil.gdinitconfig(ui):
2302 2303 requirements.add('generaldelta')
2303 2304 if ui.configbool('experimental', 'treemanifest'):
2304 2305 requirements.add('treemanifest')
2305 2306
2306 2307 revlogv2 = ui.config('experimental', 'revlogv2')
2307 2308 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2308 2309 requirements.remove('revlogv1')
2309 2310 # generaldelta is implied by revlogv2.
2310 2311 requirements.discard('generaldelta')
2311 2312 requirements.add(REVLOGV2_REQUIREMENT)
2312 2313
2313 2314 return requirements
@@ -1,629 +1,639
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24
25 25 version = 2
26 26
27 27 # These are the file generators that should only be executed after the
28 28 # finalizers are done, since they rely on the output of the finalizers (like
29 29 # the changelog having been written).
30 30 postfinalizegenerators = {
31 31 'bookmarks',
32 32 'dirstate'
33 33 }
34 34
35 35 gengroupall='all'
36 36 gengroupprefinalize='prefinalize'
37 37 gengrouppostfinalize='postfinalize'
38 38
39 39 def active(func):
40 40 def _active(self, *args, **kwds):
41 41 if self.count == 0:
42 42 raise error.Abort(_(
43 43 'cannot use transaction when it is already committed/aborted'))
44 44 return func(self, *args, **kwds)
45 45 return _active
46 46
47 47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 48 unlink=True, checkambigfiles=None):
49 49 for f, o, _ignore in entries:
50 50 if o or not unlink:
51 51 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 52 try:
53 53 fp = opener(f, 'a', checkambig=checkambig)
54 54 fp.truncate(o)
55 55 fp.close()
56 56 except IOError:
57 57 report(_("failed to truncate %s\n") % f)
58 58 raise
59 59 else:
60 60 try:
61 61 opener.unlink(f)
62 62 except (IOError, OSError) as inst:
63 63 if inst.errno != errno.ENOENT:
64 64 raise
65 65
66 66 backupfiles = []
67 67 for l, f, b, c in backupentries:
68 68 if l not in vfsmap and c:
69 69 report("couldn't handle %s: unknown cache location %s\n"
70 70 % (b, l))
71 71 vfs = vfsmap[l]
72 72 try:
73 73 if f and b:
74 74 filepath = vfs.join(f)
75 75 backuppath = vfs.join(b)
76 76 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 77 try:
78 78 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 79 backupfiles.append(b)
80 80 except IOError:
81 81 report(_("failed to recover %s\n") % f)
82 82 else:
83 83 target = f or b
84 84 try:
85 85 vfs.unlink(target)
86 86 except (IOError, OSError) as inst:
87 87 if inst.errno != errno.ENOENT:
88 88 raise
89 89 except (IOError, OSError, error.Abort) as inst:
90 90 if not c:
91 91 raise
92 92
93 93 backuppath = "%s.backupfiles" % journal
94 94 if opener.exists(backuppath):
95 95 opener.unlink(backuppath)
96 96 opener.unlink(journal)
97 97 try:
98 98 for f in backupfiles:
99 99 if opener.exists(f):
100 100 opener.unlink(f)
101 101 except (IOError, OSError, error.Abort) as inst:
102 102 # only pure backup file remains, it is sage to ignore any error
103 103 pass
104 104
105 105 class transaction(util.transactional):
106 106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 107 after=None, createmode=None, validator=None, releasefn=None,
108 checkambigfiles=None):
108 checkambigfiles=None, name=r'<unnamed>'):
109 109 """Begin a new transaction
110 110
111 111 Begins a new transaction that allows rolling back writes in the event of
112 112 an exception.
113 113
114 114 * `after`: called after the transaction has been committed
115 115 * `createmode`: the mode of the journal file that will be created
116 116 * `releasefn`: called after releasing (with transaction and result)
117 117
118 118 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 119 which determine whether file stat ambiguity should be avoided
120 120 for corresponded files.
121 121 """
122 122 self.count = 1
123 123 self.usages = 1
124 124 self.report = report
125 125 # a vfs to the store content
126 126 self.opener = opener
127 127 # a map to access file in various {location -> vfs}
128 128 vfsmap = vfsmap.copy()
129 129 vfsmap[''] = opener # set default value
130 130 self._vfsmap = vfsmap
131 131 self.after = after
132 132 self.entries = []
133 133 self.map = {}
134 134 self.journal = journalname
135 135 self.undoname = undoname
136 136 self._queue = []
137 137 # A callback to validate transaction content before closing it.
138 138 # should raise exception is anything is wrong.
139 139 # target user is repository hooks.
140 140 if validator is None:
141 141 validator = lambda tr: None
142 142 self.validator = validator
143 143 # A callback to do something just after releasing transaction.
144 144 if releasefn is None:
145 145 releasefn = lambda tr, success: None
146 146 self.releasefn = releasefn
147 147
148 148 self.checkambigfiles = set()
149 149 if checkambigfiles:
150 150 self.checkambigfiles.update(checkambigfiles)
151 151
152 self.names = [name]
153
152 154 # A dict dedicated to precisely tracking the changes introduced in the
153 155 # transaction.
154 156 self.changes = {}
155 157
156 158 # a dict of arguments to be passed to hooks
157 159 self.hookargs = {}
158 160 self.file = opener.open(self.journal, "w")
159 161
160 162 # a list of ('location', 'path', 'backuppath', cache) entries.
161 163 # - if 'backuppath' is empty, no file existed at backup time
162 164 # - if 'path' is empty, this is a temporary transaction file
163 165 # - if 'location' is not empty, the path is outside main opener reach.
164 166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
165 167 # (cache is currently unused)
166 168 self._backupentries = []
167 169 self._backupmap = {}
168 170 self._backupjournal = "%s.backupfiles" % self.journal
169 171 self._backupsfile = opener.open(self._backupjournal, 'w')
170 172 self._backupsfile.write('%d\n' % version)
171 173
172 174 if createmode is not None:
173 175 opener.chmod(self.journal, createmode & 0o666)
174 176 opener.chmod(self._backupjournal, createmode & 0o666)
175 177
176 178 # hold file generations to be performed on commit
177 179 self._filegenerators = {}
178 180 # hold callback to write pending data for hooks
179 181 self._pendingcallback = {}
180 182 # True is any pending data have been written ever
181 183 self._anypending = False
182 184 # holds callback to call when writing the transaction
183 185 self._finalizecallback = {}
184 186 # hold callback for post transaction close
185 187 self._postclosecallback = {}
186 188 # holds callbacks to call during abort
187 189 self._abortcallback = {}
188 190
191 def __repr__(self):
192 name = r'/'.join(self.names)
193 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 (name, self.count, self.usages))
195
189 196 def __del__(self):
190 197 if self.journal:
191 198 self._abort()
192 199
193 200 @active
194 201 def startgroup(self):
195 202 """delay registration of file entry
196 203
197 204 This is used by strip to delay vision of strip offset. The transaction
198 205 sees either none or all of the strip actions to be done."""
199 206 self._queue.append([])
200 207
201 208 @active
202 209 def endgroup(self):
203 210 """apply delayed registration of file entry.
204 211
205 212 This is used by strip to delay vision of strip offset. The transaction
206 213 sees either none or all of the strip actions to be done."""
207 214 q = self._queue.pop()
208 215 for f, o, data in q:
209 216 self._addentry(f, o, data)
210 217
211 218 @active
212 219 def add(self, file, offset, data=None):
213 220 """record the state of an append-only file before update"""
214 221 if file in self.map or file in self._backupmap:
215 222 return
216 223 if self._queue:
217 224 self._queue[-1].append((file, offset, data))
218 225 return
219 226
220 227 self._addentry(file, offset, data)
221 228
222 229 def _addentry(self, file, offset, data):
223 230 """add a append-only entry to memory and on-disk state"""
224 231 if file in self.map or file in self._backupmap:
225 232 return
226 233 self.entries.append((file, offset, data))
227 234 self.map[file] = len(self.entries) - 1
228 235 # add enough data to the journal to do the truncate
229 236 self.file.write("%s\0%d\n" % (file, offset))
230 237 self.file.flush()
231 238
232 239 @active
233 240 def addbackup(self, file, hardlink=True, location=''):
234 241 """Adds a backup of the file to the transaction
235 242
236 243 Calling addbackup() creates a hardlink backup of the specified file
237 244 that is used to recover the file in the event of the transaction
238 245 aborting.
239 246
240 247 * `file`: the file path, relative to .hg/store
241 248 * `hardlink`: use a hardlink to quickly create the backup
242 249 """
243 250 if self._queue:
244 251 msg = 'cannot use transaction.addbackup inside "group"'
245 252 raise error.ProgrammingError(msg)
246 253
247 254 if file in self.map or file in self._backupmap:
248 255 return
249 256 vfs = self._vfsmap[location]
250 257 dirname, filename = vfs.split(file)
251 258 backupfilename = "%s.backup.%s" % (self.journal, filename)
252 259 backupfile = vfs.reljoin(dirname, backupfilename)
253 260 if vfs.exists(file):
254 261 filepath = vfs.join(file)
255 262 backuppath = vfs.join(backupfile)
256 263 util.copyfile(filepath, backuppath, hardlink=hardlink)
257 264 else:
258 265 backupfile = ''
259 266
260 267 self._addbackupentry((location, file, backupfile, False))
261 268
262 269 def _addbackupentry(self, entry):
263 270 """register a new backup entry and write it to disk"""
264 271 self._backupentries.append(entry)
265 272 self._backupmap[entry[1]] = len(self._backupentries) - 1
266 273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
267 274 self._backupsfile.flush()
268 275
269 276 @active
270 277 def registertmp(self, tmpfile, location=''):
271 278 """register a temporary transaction file
272 279
273 280 Such files will be deleted when the transaction exits (on both
274 281 failure and success).
275 282 """
276 283 self._addbackupentry((location, '', tmpfile, False))
277 284
278 285 @active
279 286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
280 287 location=''):
281 288 """add a function to generates some files at transaction commit
282 289
283 290 The `genfunc` argument is a function capable of generating proper
284 291 content of each entry in the `filename` tuple.
285 292
286 293 At transaction close time, `genfunc` will be called with one file
287 294 object argument per entries in `filenames`.
288 295
289 296 The transaction itself is responsible for the backup, creation and
290 297 final write of such file.
291 298
292 299 The `genid` argument is used to ensure the same set of file is only
293 300 generated once. Call to `addfilegenerator` for a `genid` already
294 301 present will overwrite the old entry.
295 302
296 303 The `order` argument may be used to control the order in which multiple
297 304 generator will be executed.
298 305
299 306 The `location` arguments may be used to indicate the files are located
300 307 outside of the the standard directory for transaction. It should match
301 308 one of the key of the `transaction.vfsmap` dictionary.
302 309 """
303 310 # For now, we are unable to do proper backup and restore of custom vfs
304 311 # but for bookmarks that are handled outside this mechanism.
305 312 self._filegenerators[genid] = (order, filenames, genfunc, location)
306 313
307 314 @active
308 315 def removefilegenerator(self, genid):
309 316 """reverse of addfilegenerator, remove a file generator function"""
310 317 if genid in self._filegenerators:
311 318 del self._filegenerators[genid]
312 319
313 320 def _generatefiles(self, suffix='', group=gengroupall):
314 321 # write files registered for generation
315 322 any = False
316 323 for id, entry in sorted(self._filegenerators.iteritems()):
317 324 any = True
318 325 order, filenames, genfunc, location = entry
319 326
320 327 # for generation at closing, check if it's before or after finalize
321 328 postfinalize = group == gengrouppostfinalize
322 329 if (group != gengroupall and
323 330 (id in postfinalizegenerators) != (postfinalize)):
324 331 continue
325 332
326 333 vfs = self._vfsmap[location]
327 334 files = []
328 335 try:
329 336 for name in filenames:
330 337 name += suffix
331 338 if suffix:
332 339 self.registertmp(name, location=location)
333 340 checkambig = False
334 341 else:
335 342 self.addbackup(name, location=location)
336 343 checkambig = (name, location) in self.checkambigfiles
337 344 files.append(vfs(name, 'w', atomictemp=True,
338 345 checkambig=checkambig))
339 346 genfunc(*files)
340 347 finally:
341 348 for f in files:
342 349 f.close()
343 350 return any
344 351
345 352 @active
346 353 def find(self, file):
347 354 if file in self.map:
348 355 return self.entries[self.map[file]]
349 356 if file in self._backupmap:
350 357 return self._backupentries[self._backupmap[file]]
351 358 return None
352 359
353 360 @active
354 361 def replace(self, file, offset, data=None):
355 362 '''
356 363 replace can only replace already committed entries
357 364 that are not pending in the queue
358 365 '''
359 366
360 367 if file not in self.map:
361 368 raise KeyError(file)
362 369 index = self.map[file]
363 370 self.entries[index] = (file, offset, data)
364 371 self.file.write("%s\0%d\n" % (file, offset))
365 372 self.file.flush()
366 373
367 374 @active
368 def nest(self):
375 def nest(self, name=r'<unnamed>'):
369 376 self.count += 1
370 377 self.usages += 1
378 self.names.append(name)
371 379 return self
372 380
373 381 def release(self):
374 382 if self.count > 0:
375 383 self.usages -= 1
384 if self.names:
385 self.names.pop()
376 386 # if the transaction scopes are left without being closed, fail
377 387 if self.count > 0 and self.usages == 0:
378 388 self._abort()
379 389
380 390 def running(self):
381 391 return self.count > 0
382 392
383 393 def addpending(self, category, callback):
384 394 """add a callback to be called when the transaction is pending
385 395
386 396 The transaction will be given as callback's first argument.
387 397
388 398 Category is a unique identifier to allow overwriting an old callback
389 399 with a newer callback.
390 400 """
391 401 self._pendingcallback[category] = callback
392 402
393 403 @active
394 404 def writepending(self):
395 405 '''write pending file to temporary version
396 406
397 407 This is used to allow hooks to view a transaction before commit'''
398 408 categories = sorted(self._pendingcallback)
399 409 for cat in categories:
400 410 # remove callback since the data will have been flushed
401 411 any = self._pendingcallback.pop(cat)(self)
402 412 self._anypending = self._anypending or any
403 413 self._anypending |= self._generatefiles(suffix='.pending')
404 414 return self._anypending
405 415
406 416 @active
407 417 def addfinalize(self, category, callback):
408 418 """add a callback to be called when the transaction is closed
409 419
410 420 The transaction will be given as callback's first argument.
411 421
412 422 Category is a unique identifier to allow overwriting old callbacks with
413 423 newer callbacks.
414 424 """
415 425 self._finalizecallback[category] = callback
416 426
417 427 @active
418 428 def addpostclose(self, category, callback):
419 429 """add or replace a callback to be called after the transaction closed
420 430
421 431 The transaction will be given as callback's first argument.
422 432
423 433 Category is a unique identifier to allow overwriting an old callback
424 434 with a newer callback.
425 435 """
426 436 self._postclosecallback[category] = callback
427 437
428 438 @active
429 439 def getpostclose(self, category):
430 440 """return a postclose callback added before, or None"""
431 441 return self._postclosecallback.get(category, None)
432 442
433 443 @active
434 444 def addabort(self, category, callback):
435 445 """add a callback to be called when the transaction is aborted.
436 446
437 447 The transaction will be given as the first argument to the callback.
438 448
439 449 Category is a unique identifier to allow overwriting an old callback
440 450 with a newer callback.
441 451 """
442 452 self._abortcallback[category] = callback
443 453
444 454 @active
445 455 def close(self):
446 456 '''commit the transaction'''
447 457 if self.count == 1:
448 458 self.validator(self) # will raise exception if needed
449 459 self.validator = None # Help prevent cycles.
450 460 self._generatefiles(group=gengroupprefinalize)
451 461 categories = sorted(self._finalizecallback)
452 462 for cat in categories:
453 463 self._finalizecallback[cat](self)
454 464 # Prevent double usage and help clear cycles.
455 465 self._finalizecallback = None
456 466 self._generatefiles(group=gengrouppostfinalize)
457 467
458 468 self.count -= 1
459 469 if self.count != 0:
460 470 return
461 471 self.file.close()
462 472 self._backupsfile.close()
463 473 # cleanup temporary files
464 474 for l, f, b, c in self._backupentries:
465 475 if l not in self._vfsmap and c:
466 476 self.report("couldn't remove %s: unknown cache location %s\n"
467 477 % (b, l))
468 478 continue
469 479 vfs = self._vfsmap[l]
470 480 if not f and b and vfs.exists(b):
471 481 try:
472 482 vfs.unlink(b)
473 483 except (IOError, OSError, error.Abort) as inst:
474 484 if not c:
475 485 raise
476 486 # Abort may be raise by read only opener
477 487 self.report("couldn't remove %s: %s\n"
478 488 % (vfs.join(b), inst))
479 489 self.entries = []
480 490 self._writeundo()
481 491 if self.after:
482 492 self.after()
483 493 self.after = None # Help prevent cycles.
484 494 if self.opener.isfile(self._backupjournal):
485 495 self.opener.unlink(self._backupjournal)
486 496 if self.opener.isfile(self.journal):
487 497 self.opener.unlink(self.journal)
488 498 for l, _f, b, c in self._backupentries:
489 499 if l not in self._vfsmap and c:
490 500 self.report("couldn't remove %s: unknown cache location"
491 501 "%s\n" % (b, l))
492 502 continue
493 503 vfs = self._vfsmap[l]
494 504 if b and vfs.exists(b):
495 505 try:
496 506 vfs.unlink(b)
497 507 except (IOError, OSError, error.Abort) as inst:
498 508 if not c:
499 509 raise
500 510 # Abort may be raise by read only opener
501 511 self.report("couldn't remove %s: %s\n"
502 512 % (vfs.join(b), inst))
503 513 self._backupentries = []
504 514 self.journal = None
505 515
506 516 self.releasefn(self, True) # notify success of closing transaction
507 517 self.releasefn = None # Help prevent cycles.
508 518
509 519 # run post close action
510 520 categories = sorted(self._postclosecallback)
511 521 for cat in categories:
512 522 self._postclosecallback[cat](self)
513 523 # Prevent double usage and help clear cycles.
514 524 self._postclosecallback = None
515 525
516 526 @active
517 527 def abort(self):
518 528 '''abort the transaction (generally called on error, or when the
519 529 transaction is not explicitly committed before going out of
520 530 scope)'''
521 531 self._abort()
522 532
523 533 def _writeundo(self):
524 534 """write transaction data for possible future undo call"""
525 535 if self.undoname is None:
526 536 return
527 537 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
528 538 undobackupfile.write('%d\n' % version)
529 539 for l, f, b, c in self._backupentries:
530 540 if not f: # temporary file
531 541 continue
532 542 if not b:
533 543 u = ''
534 544 else:
535 545 if l not in self._vfsmap and c:
536 546 self.report("couldn't remove %s: unknown cache location"
537 547 "%s\n" % (b, l))
538 548 continue
539 549 vfs = self._vfsmap[l]
540 550 base, name = vfs.split(b)
541 551 assert name.startswith(self.journal), name
542 552 uname = name.replace(self.journal, self.undoname, 1)
543 553 u = vfs.reljoin(base, uname)
544 554 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
545 555 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
546 556 undobackupfile.close()
547 557
548 558
549 559 def _abort(self):
550 560 self.count = 0
551 561 self.usages = 0
552 562 self.file.close()
553 563 self._backupsfile.close()
554 564
555 565 try:
556 566 if not self.entries and not self._backupentries:
557 567 if self._backupjournal:
558 568 self.opener.unlink(self._backupjournal)
559 569 if self.journal:
560 570 self.opener.unlink(self.journal)
561 571 return
562 572
563 573 self.report(_("transaction abort!\n"))
564 574
565 575 try:
566 576 for cat in sorted(self._abortcallback):
567 577 self._abortcallback[cat](self)
568 578 # Prevent double usage and help clear cycles.
569 579 self._abortcallback = None
570 580 _playback(self.journal, self.report, self.opener, self._vfsmap,
571 581 self.entries, self._backupentries, False,
572 582 checkambigfiles=self.checkambigfiles)
573 583 self.report(_("rollback completed\n"))
574 584 except BaseException:
575 585 self.report(_("rollback failed - please run hg recover\n"))
576 586 finally:
577 587 self.journal = None
578 588 self.releasefn(self, False) # notify failure of transaction
579 589 self.releasefn = None # Help prevent cycles.
580 590
581 591 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
582 592 """Rolls back the transaction contained in the given file
583 593
584 594 Reads the entries in the specified file, and the corresponding
585 595 '*.backupfiles' file, to recover from an incomplete transaction.
586 596
587 597 * `file`: a file containing a list of entries, specifying where
588 598 to truncate each file. The file should contain a list of
589 599 file\0offset pairs, delimited by newlines. The corresponding
590 600 '*.backupfiles' file should contain a list of file\0backupfile
591 601 pairs, delimited by \0.
592 602
593 603 `checkambigfiles` is a set of (path, vfs-location) tuples,
594 604 which determine whether file stat ambiguity should be avoided at
595 605 restoring corresponded files.
596 606 """
597 607 entries = []
598 608 backupentries = []
599 609
600 610 fp = opener.open(file)
601 611 lines = fp.readlines()
602 612 fp.close()
603 613 for l in lines:
604 614 try:
605 615 f, o = l.split('\0')
606 616 entries.append((f, int(o), None))
607 617 except ValueError:
608 618 report(
609 619 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
610 620
611 621 backupjournal = "%s.backupfiles" % file
612 622 if opener.exists(backupjournal):
613 623 fp = opener.open(backupjournal)
614 624 lines = fp.readlines()
615 625 if lines:
616 626 ver = lines[0][:-1]
617 627 if ver == (b'%d' % version):
618 628 for line in lines[1:]:
619 629 if line:
620 630 # Shave off the trailing newline
621 631 line = line[:-1]
622 632 l, f, b, c = line.split('\0')
623 633 backupentries.append((l, f, b, bool(c)))
624 634 else:
625 635 report(_("journal was created by a different version of "
626 636 "Mercurial\n"))
627 637
628 638 _playback(file, report, opener, vfsmap, entries, backupentries,
629 639 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now