##// END OF EJS Templates
commit: add debug message regarding manifest reuse
Yuya Nishihara -
r39145:a915db9a default
parent child Browse files
Show More
@@ -1,2410 +1,2412
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 release = lockmod.release
74 74 urlerr = util.urlerr
75 75 urlreq = util.urlreq
76 76
77 77 # set of (path, vfs-location) tuples. vfs-location is:
78 78 # - 'plain for vfs relative paths
79 79 # - '' for svfs relative paths
80 80 _cachedfiles = set()
81 81
82 82 class _basefilecache(scmutil.filecache):
83 83 """All filecache usage on repo are done for logic that should be unfiltered
84 84 """
85 85 def __get__(self, repo, type=None):
86 86 if repo is None:
87 87 return self
88 88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 89 def __set__(self, repo, value):
90 90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 91 def __delete__(self, repo):
92 92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93 93
94 94 class repofilecache(_basefilecache):
95 95 """filecache for files in .hg but outside of .hg/store"""
96 96 def __init__(self, *paths):
97 97 super(repofilecache, self).__init__(*paths)
98 98 for path in paths:
99 99 _cachedfiles.add((path, 'plain'))
100 100
101 101 def join(self, obj, fname):
102 102 return obj.vfs.join(fname)
103 103
104 104 class storecache(_basefilecache):
105 105 """filecache for files in the store"""
106 106 def __init__(self, *paths):
107 107 super(storecache, self).__init__(*paths)
108 108 for path in paths:
109 109 _cachedfiles.add((path, ''))
110 110
111 111 def join(self, obj, fname):
112 112 return obj.sjoin(fname)
113 113
114 114 def isfilecached(repo, name):
115 115 """check if a repo has already cached "name" filecache-ed property
116 116
117 117 This returns (cachedobj-or-None, iscached) tuple.
118 118 """
119 119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 120 if not cacheentry:
121 121 return None, False
122 122 return cacheentry.obj, True
123 123
124 124 class unfilteredpropertycache(util.propertycache):
125 125 """propertycache that apply to unfiltered repo only"""
126 126
127 127 def __get__(self, repo, type=None):
128 128 unfi = repo.unfiltered()
129 129 if unfi is repo:
130 130 return super(unfilteredpropertycache, self).__get__(unfi)
131 131 return getattr(unfi, self.name)
132 132
133 133 class filteredpropertycache(util.propertycache):
134 134 """propertycache that must take filtering in account"""
135 135
136 136 def cachevalue(self, obj, value):
137 137 object.__setattr__(obj, self.name, value)
138 138
139 139
140 140 def hasunfilteredcache(repo, name):
141 141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 142 return name in vars(repo.unfiltered())
143 143
144 144 def unfilteredmethod(orig):
145 145 """decorate method that always need to be run on unfiltered version"""
146 146 def wrapper(repo, *args, **kwargs):
147 147 return orig(repo.unfiltered(), *args, **kwargs)
148 148 return wrapper
149 149
150 150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 151 'unbundle'}
152 152 legacycaps = moderncaps.union({'changegroupsubset'})
153 153
154 154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 155 class localcommandexecutor(object):
156 156 def __init__(self, peer):
157 157 self._peer = peer
158 158 self._sent = False
159 159 self._closed = False
160 160
161 161 def __enter__(self):
162 162 return self
163 163
164 164 def __exit__(self, exctype, excvalue, exctb):
165 165 self.close()
166 166
167 167 def callcommand(self, command, args):
168 168 if self._sent:
169 169 raise error.ProgrammingError('callcommand() cannot be used after '
170 170 'sendcommands()')
171 171
172 172 if self._closed:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'close()')
175 175
176 176 # We don't need to support anything fancy. Just call the named
177 177 # method on the peer and return a resolved future.
178 178 fn = getattr(self._peer, pycompat.sysstr(command))
179 179
180 180 f = pycompat.futures.Future()
181 181
182 182 try:
183 183 result = fn(**pycompat.strkwargs(args))
184 184 except Exception:
185 185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 186 else:
187 187 f.set_result(result)
188 188
189 189 return f
190 190
191 191 def sendcommands(self):
192 192 self._sent = True
193 193
194 194 def close(self):
195 195 self._closed = True
196 196
197 197 @interfaceutil.implementer(repository.ipeercommands)
198 198 class localpeer(repository.peer):
199 199 '''peer for a local repo; reflects only the most recent API'''
200 200
201 201 def __init__(self, repo, caps=None):
202 202 super(localpeer, self).__init__()
203 203
204 204 if caps is None:
205 205 caps = moderncaps.copy()
206 206 self._repo = repo.filtered('served')
207 207 self.ui = repo.ui
208 208 self._caps = repo._restrictcapabilities(caps)
209 209
210 210 # Begin of _basepeer interface.
211 211
212 212 def url(self):
213 213 return self._repo.url()
214 214
215 215 def local(self):
216 216 return self._repo
217 217
218 218 def peer(self):
219 219 return self
220 220
221 221 def canpush(self):
222 222 return True
223 223
224 224 def close(self):
225 225 self._repo.close()
226 226
227 227 # End of _basepeer interface.
228 228
229 229 # Begin of _basewirecommands interface.
230 230
231 231 def branchmap(self):
232 232 return self._repo.branchmap()
233 233
234 234 def capabilities(self):
235 235 return self._caps
236 236
237 237 def clonebundles(self):
238 238 return self._repo.tryread('clonebundles.manifest')
239 239
240 240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 241 """Used to test argument passing over the wire"""
242 242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 243 pycompat.bytestr(four),
244 244 pycompat.bytestr(five))
245 245
246 246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 247 **kwargs):
248 248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 249 common=common, bundlecaps=bundlecaps,
250 250 **kwargs)[1]
251 251 cb = util.chunkbuffer(chunks)
252 252
253 253 if exchange.bundle2requested(bundlecaps):
254 254 # When requesting a bundle2, getbundle returns a stream to make the
255 255 # wire level function happier. We need to build a proper object
256 256 # from it in local peer.
257 257 return bundle2.getunbundler(self.ui, cb)
258 258 else:
259 259 return changegroup.getunbundler('01', cb, None)
260 260
261 261 def heads(self):
262 262 return self._repo.heads()
263 263
264 264 def known(self, nodes):
265 265 return self._repo.known(nodes)
266 266
267 267 def listkeys(self, namespace):
268 268 return self._repo.listkeys(namespace)
269 269
270 270 def lookup(self, key):
271 271 return self._repo.lookup(key)
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 return self._repo.pushkey(namespace, key, old, new)
275 275
276 276 def stream_out(self):
277 277 raise error.Abort(_('cannot perform stream clone against local '
278 278 'peer'))
279 279
280 280 def unbundle(self, bundle, heads, url):
281 281 """apply a bundle on a repo
282 282
283 283 This function handles the repo locking itself."""
284 284 try:
285 285 try:
286 286 bundle = exchange.readbundle(self.ui, bundle, None)
287 287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 288 if util.safehasattr(ret, 'getchunks'):
289 289 # This is a bundle20 object, turn it into an unbundler.
290 290 # This little dance should be dropped eventually when the
291 291 # API is finally improved.
292 292 stream = util.chunkbuffer(ret.getchunks())
293 293 ret = bundle2.getunbundler(self.ui, stream)
294 294 return ret
295 295 except Exception as exc:
296 296 # If the exception contains output salvaged from a bundle2
297 297 # reply, we need to make sure it is printed before continuing
298 298 # to fail. So we build a bundle2 with such output and consume
299 299 # it directly.
300 300 #
301 301 # This is not very elegant but allows a "simple" solution for
302 302 # issue4594
303 303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 304 if output:
305 305 bundler = bundle2.bundle20(self._repo.ui)
306 306 for out in output:
307 307 bundler.addpart(out)
308 308 stream = util.chunkbuffer(bundler.getchunks())
309 309 b = bundle2.getunbundler(self.ui, stream)
310 310 bundle2.processbundle(self._repo, b)
311 311 raise
312 312 except error.PushRaced as exc:
313 313 raise error.ResponseError(_('push failed:'),
314 314 stringutil.forcebytestr(exc))
315 315
316 316 # End of _basewirecommands interface.
317 317
318 318 # Begin of peer interface.
319 319
320 320 def commandexecutor(self):
321 321 return localcommandexecutor(self)
322 322
323 323 # End of peer interface.
324 324
325 325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 326 class locallegacypeer(localpeer):
327 327 '''peer extension which implements legacy methods too; used for tests with
328 328 restricted capabilities'''
329 329
330 330 def __init__(self, repo):
331 331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 332
333 333 # Begin of baselegacywirecommands interface.
334 334
335 335 def between(self, pairs):
336 336 return self._repo.between(pairs)
337 337
338 338 def branches(self, nodes):
339 339 return self._repo.branches(nodes)
340 340
341 341 def changegroup(self, nodes, source):
342 342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 343 missingheads=self._repo.heads())
344 344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 345
346 346 def changegroupsubset(self, bases, heads, source):
347 347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 348 missingheads=heads)
349 349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 350
351 351 # End of baselegacywirecommands interface.
352 352
353 353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 354 # clients.
355 355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 356
357 357 # A repository with the sparserevlog feature will have delta chains that
358 358 # can spread over a larger span. Sparse reading cuts these large spans into
359 359 # pieces, so that each piece isn't too big.
360 360 # Without the sparserevlog capability, reading from the repository could use
361 361 # huge amounts of memory, because the whole span would be read at once,
362 362 # including all the intermediate revisions that aren't pertinent for the chain.
363 363 # This is why once a repository has enabled sparse-read, it becomes required.
364 364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365 365
366 366 # Functions receiving (ui, features) that extensions can register to impact
367 367 # the ability to load repositories with custom requirements. Only
368 368 # functions defined in loaded extensions are called.
369 369 #
370 370 # The function receives a set of requirement strings that the repository
371 371 # is capable of opening. Functions will typically add elements to the
372 372 # set to reflect that the extension knows how to handle that requirements.
373 373 featuresetupfuncs = set()
374 374
375 375 @interfaceutil.implementer(repository.completelocalrepository)
376 376 class localrepository(object):
377 377
378 378 # obsolete experimental requirements:
379 379 # - manifestv2: An experimental new manifest format that allowed
380 380 # for stem compression of long paths. Experiment ended up not
381 381 # being successful (repository sizes went up due to worse delta
382 382 # chains), and the code was deleted in 4.6.
383 383 supportedformats = {
384 384 'revlogv1',
385 385 'generaldelta',
386 386 'treemanifest',
387 387 REVLOGV2_REQUIREMENT,
388 388 SPARSEREVLOG_REQUIREMENT,
389 389 }
390 390 _basesupported = supportedformats | {
391 391 'store',
392 392 'fncache',
393 393 'shared',
394 394 'relshared',
395 395 'dotencode',
396 396 'exp-sparse',
397 397 }
398 398 openerreqs = {
399 399 'revlogv1',
400 400 'generaldelta',
401 401 'treemanifest',
402 402 }
403 403
404 404 # list of prefix for file which can be written without 'wlock'
405 405 # Extensions should extend this list when needed
406 406 _wlockfreeprefix = {
407 407 # We migh consider requiring 'wlock' for the next
408 408 # two, but pretty much all the existing code assume
409 409 # wlock is not needed so we keep them excluded for
410 410 # now.
411 411 'hgrc',
412 412 'requires',
413 413 # XXX cache is a complicatged business someone
414 414 # should investigate this in depth at some point
415 415 'cache/',
416 416 # XXX shouldn't be dirstate covered by the wlock?
417 417 'dirstate',
418 418 # XXX bisect was still a bit too messy at the time
419 419 # this changeset was introduced. Someone should fix
420 420 # the remainig bit and drop this line
421 421 'bisect.state',
422 422 }
423 423
424 424 def __init__(self, baseui, path, create=False, intents=None):
425 425 self.requirements = set()
426 426 self.filtername = None
427 427 # wvfs: rooted at the repository root, used to access the working copy
428 428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 430 self.vfs = None
431 431 # svfs: usually rooted at .hg/store, used to access repository history
432 432 # If this is a shared repository, this vfs may point to another
433 433 # repository's .hg/store directory.
434 434 self.svfs = None
435 435 self.root = self.wvfs.base
436 436 self.path = self.wvfs.join(".hg")
437 437 self.origroot = path
438 438 # This is only used by context.workingctx.match in order to
439 439 # detect files in subrepos.
440 440 self.auditor = pathutil.pathauditor(
441 441 self.root, callback=self._checknested)
442 442 # This is only used by context.basectx.match in order to detect
443 443 # files in subrepos.
444 444 self.nofsauditor = pathutil.pathauditor(
445 445 self.root, callback=self._checknested, realfs=False, cached=True)
446 446 self.baseui = baseui
447 447 self.ui = baseui.copy()
448 448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 450 if (self.ui.configbool('devel', 'all-warnings') or
451 451 self.ui.configbool('devel', 'check-locks')):
452 452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 453 # A list of callback to shape the phase if no data were found.
454 454 # Callback are in the form: func(repo, roots) --> processed root.
455 455 # This list it to be filled by extension during repo setup
456 456 self._phasedefaults = []
457 457 try:
458 458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 459 self._loadextensions()
460 460 except IOError:
461 461 pass
462 462
463 463 if featuresetupfuncs:
464 464 self.supported = set(self._basesupported) # use private copy
465 465 extmods = set(m.__name__ for n, m
466 466 in extensions.extensions(self.ui))
467 467 for setupfunc in featuresetupfuncs:
468 468 if setupfunc.__module__ in extmods:
469 469 setupfunc(self.ui, self.supported)
470 470 else:
471 471 self.supported = self._basesupported
472 472 color.setup(self.ui)
473 473
474 474 # Add compression engines.
475 475 for name in util.compengines:
476 476 engine = util.compengines[name]
477 477 if engine.revlogheader():
478 478 self.supported.add('exp-compression-%s' % name)
479 479
480 480 if not self.vfs.isdir():
481 481 if create:
482 482 self.requirements = newreporequirements(self)
483 483
484 484 if not self.wvfs.exists():
485 485 self.wvfs.makedirs()
486 486 self.vfs.makedir(notindexed=True)
487 487
488 488 if 'store' in self.requirements:
489 489 self.vfs.mkdir("store")
490 490
491 491 # create an invalid changelog
492 492 self.vfs.append(
493 493 "00changelog.i",
494 494 '\0\0\0\2' # represents revlogv2
495 495 ' dummy changelog to prevent using the old repo layout'
496 496 )
497 497 else:
498 498 try:
499 499 self.vfs.stat()
500 500 except OSError as inst:
501 501 if inst.errno != errno.ENOENT:
502 502 raise
503 503 raise error.RepoError(_("repository %s not found") % path)
504 504 elif create:
505 505 raise error.RepoError(_("repository %s already exists") % path)
506 506 else:
507 507 try:
508 508 self.requirements = scmutil.readrequires(
509 509 self.vfs, self.supported)
510 510 except IOError as inst:
511 511 if inst.errno != errno.ENOENT:
512 512 raise
513 513
514 514 cachepath = self.vfs.join('cache')
515 515 self.sharedpath = self.path
516 516 try:
517 517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
518 518 if 'relshared' in self.requirements:
519 519 sharedpath = self.vfs.join(sharedpath)
520 520 vfs = vfsmod.vfs(sharedpath, realpath=True)
521 521 cachepath = vfs.join('cache')
522 522 s = vfs.base
523 523 if not vfs.exists():
524 524 raise error.RepoError(
525 525 _('.hg/sharedpath points to nonexistent directory %s') % s)
526 526 self.sharedpath = s
527 527 except IOError as inst:
528 528 if inst.errno != errno.ENOENT:
529 529 raise
530 530
531 531 if 'exp-sparse' in self.requirements and not sparse.enabled:
532 532 raise error.RepoError(_('repository is using sparse feature but '
533 533 'sparse is not enabled; enable the '
534 534 '"sparse" extensions to access'))
535 535
536 536 self.store = store.store(
537 537 self.requirements, self.sharedpath,
538 538 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 539 self.spath = self.store.path
540 540 self.svfs = self.store.vfs
541 541 self.sjoin = self.store.join
542 542 self.vfs.createmode = self.store.createmode
543 543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 544 self.cachevfs.createmode = self.store.createmode
545 545 if (self.ui.configbool('devel', 'all-warnings') or
546 546 self.ui.configbool('devel', 'check-locks')):
547 547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
548 548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
549 549 else: # standard vfs
550 550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
551 551 self._applyopenerreqs()
552 552 if create:
553 553 self._writerequirements()
554 554
555 555 self._dirstatevalidatewarned = False
556 556
557 557 self._branchcaches = {}
558 558 self._revbranchcache = None
559 559 self._filterpats = {}
560 560 self._datafilters = {}
561 561 self._transref = self._lockref = self._wlockref = None
562 562
563 563 # A cache for various files under .hg/ that tracks file changes,
564 564 # (used by the filecache decorator)
565 565 #
566 566 # Maps a property name to its util.filecacheentry
567 567 self._filecache = {}
568 568
569 569 # hold sets of revision to be filtered
570 570 # should be cleared when something might have changed the filter value:
571 571 # - new changesets,
572 572 # - phase change,
573 573 # - new obsolescence marker,
574 574 # - working directory parent change,
575 575 # - bookmark changes
576 576 self.filteredrevcache = {}
577 577
578 578 # post-dirstate-status hooks
579 579 self._postdsstatus = []
580 580
581 581 # generic mapping between names and nodes
582 582 self.names = namespaces.namespaces()
583 583
584 584 # Key to signature value.
585 585 self._sparsesignaturecache = {}
586 586 # Signature to cached matcher instance.
587 587 self._sparsematchercache = {}
588 588
589 589 def _getvfsward(self, origfunc):
590 590 """build a ward for self.vfs"""
591 591 rref = weakref.ref(self)
592 592 def checkvfs(path, mode=None):
593 593 ret = origfunc(path, mode=mode)
594 594 repo = rref()
595 595 if (repo is None
596 596 or not util.safehasattr(repo, '_wlockref')
597 597 or not util.safehasattr(repo, '_lockref')):
598 598 return
599 599 if mode in (None, 'r', 'rb'):
600 600 return
601 601 if path.startswith(repo.path):
602 602 # truncate name relative to the repository (.hg)
603 603 path = path[len(repo.path) + 1:]
604 604 if path.startswith('cache/'):
605 605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
606 606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
607 607 if path.startswith('journal.'):
608 608 # journal is covered by 'lock'
609 609 if repo._currentlock(repo._lockref) is None:
610 610 repo.ui.develwarn('write with no lock: "%s"' % path,
611 611 stacklevel=2, config='check-locks')
612 612 elif repo._currentlock(repo._wlockref) is None:
613 613 # rest of vfs files are covered by 'wlock'
614 614 #
615 615 # exclude special files
616 616 for prefix in self._wlockfreeprefix:
617 617 if path.startswith(prefix):
618 618 return
619 619 repo.ui.develwarn('write with no wlock: "%s"' % path,
620 620 stacklevel=2, config='check-locks')
621 621 return ret
622 622 return checkvfs
623 623
624 624 def _getsvfsward(self, origfunc):
625 625 """build a ward for self.svfs"""
626 626 rref = weakref.ref(self)
627 627 def checksvfs(path, mode=None):
628 628 ret = origfunc(path, mode=mode)
629 629 repo = rref()
630 630 if repo is None or not util.safehasattr(repo, '_lockref'):
631 631 return
632 632 if mode in (None, 'r', 'rb'):
633 633 return
634 634 if path.startswith(repo.sharedpath):
635 635 # truncate name relative to the repository (.hg)
636 636 path = path[len(repo.sharedpath) + 1:]
637 637 if repo._currentlock(repo._lockref) is None:
638 638 repo.ui.develwarn('write with no lock: "%s"' % path,
639 639 stacklevel=3)
640 640 return ret
641 641 return checksvfs
642 642
643 643 def close(self):
644 644 self._writecaches()
645 645
646 646 def _loadextensions(self):
647 647 extensions.loadall(self.ui)
648 648
649 649 def _writecaches(self):
650 650 if self._revbranchcache:
651 651 self._revbranchcache.write()
652 652
653 653 def _restrictcapabilities(self, caps):
654 654 if self.ui.configbool('experimental', 'bundle2-advertise'):
655 655 caps = set(caps)
656 656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
657 657 role='client'))
658 658 caps.add('bundle2=' + urlreq.quote(capsblob))
659 659 return caps
660 660
661 661 def _applyopenerreqs(self):
662 662 self.svfs.options = dict((r, 1) for r in self.requirements
663 663 if r in self.openerreqs)
664 664 # experimental config: format.chunkcachesize
665 665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
666 666 if chunkcachesize is not None:
667 667 self.svfs.options['chunkcachesize'] = chunkcachesize
668 668 # experimental config: format.maxchainlen
669 669 maxchainlen = self.ui.configint('format', 'maxchainlen')
670 670 if maxchainlen is not None:
671 671 self.svfs.options['maxchainlen'] = maxchainlen
672 672 # experimental config: format.manifestcachesize
673 673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
674 674 if manifestcachesize is not None:
675 675 self.svfs.options['manifestcachesize'] = manifestcachesize
676 676 deltabothparents = self.ui.configbool('storage',
677 677 'revlog.optimize-delta-parent-choice')
678 678 self.svfs.options['deltabothparents'] = deltabothparents
679 679 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
680 680 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
681 681 if 0 <= chainspan:
682 682 self.svfs.options['maxdeltachainspan'] = chainspan
683 683 mmapindexthreshold = self.ui.configbytes('experimental',
684 684 'mmapindexthreshold')
685 685 if mmapindexthreshold is not None:
686 686 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
687 687 withsparseread = self.ui.configbool('experimental', 'sparse-read')
688 688 srdensitythres = float(self.ui.config('experimental',
689 689 'sparse-read.density-threshold'))
690 690 srmingapsize = self.ui.configbytes('experimental',
691 691 'sparse-read.min-gap-size')
692 692 self.svfs.options['with-sparse-read'] = withsparseread
693 693 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
694 694 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
695 695 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
696 696 self.svfs.options['sparse-revlog'] = sparserevlog
697 697 if sparserevlog:
698 698 self.svfs.options['generaldelta'] = True
699 699
700 700 for r in self.requirements:
701 701 if r.startswith('exp-compression-'):
702 702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
703 703
704 704 # TODO move "revlogv2" to openerreqs once finalized.
705 705 if REVLOGV2_REQUIREMENT in self.requirements:
706 706 self.svfs.options['revlogv2'] = True
707 707
708 708 def _writerequirements(self):
709 709 scmutil.writerequires(self.vfs, self.requirements)
710 710
711 711 def _checknested(self, path):
712 712 """Determine if path is a legal nested repository."""
713 713 if not path.startswith(self.root):
714 714 return False
715 715 subpath = path[len(self.root) + 1:]
716 716 normsubpath = util.pconvert(subpath)
717 717
718 718 # XXX: Checking against the current working copy is wrong in
719 719 # the sense that it can reject things like
720 720 #
721 721 # $ hg cat -r 10 sub/x.txt
722 722 #
723 723 # if sub/ is no longer a subrepository in the working copy
724 724 # parent revision.
725 725 #
726 726 # However, it can of course also allow things that would have
727 727 # been rejected before, such as the above cat command if sub/
728 728 # is a subrepository now, but was a normal directory before.
729 729 # The old path auditor would have rejected by mistake since it
730 730 # panics when it sees sub/.hg/.
731 731 #
732 732 # All in all, checking against the working copy seems sensible
733 733 # since we want to prevent access to nested repositories on
734 734 # the filesystem *now*.
735 735 ctx = self[None]
736 736 parts = util.splitpath(subpath)
737 737 while parts:
738 738 prefix = '/'.join(parts)
739 739 if prefix in ctx.substate:
740 740 if prefix == normsubpath:
741 741 return True
742 742 else:
743 743 sub = ctx.sub(prefix)
744 744 return sub.checknested(subpath[len(prefix) + 1:])
745 745 else:
746 746 parts.pop()
747 747 return False
748 748
749 749 def peer(self):
750 750 return localpeer(self) # not cached to avoid reference cycle
751 751
752 752 def unfiltered(self):
753 753 """Return unfiltered version of the repository
754 754
755 755 Intended to be overwritten by filtered repo."""
756 756 return self
757 757
758 758 def filtered(self, name, visibilityexceptions=None):
759 759 """Return a filtered version of a repository"""
760 760 cls = repoview.newtype(self.unfiltered().__class__)
761 761 return cls(self, name, visibilityexceptions)
762 762
763 763 @repofilecache('bookmarks', 'bookmarks.current')
764 764 def _bookmarks(self):
765 765 return bookmarks.bmstore(self)
766 766
767 767 @property
768 768 def _activebookmark(self):
769 769 return self._bookmarks.active
770 770
771 771 # _phasesets depend on changelog. what we need is to call
772 772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 773 # can't be easily expressed in filecache mechanism.
774 774 @storecache('phaseroots', '00changelog.i')
775 775 def _phasecache(self):
776 776 return phases.phasecache(self, self._phasedefaults)
777 777
778 778 @storecache('obsstore')
779 779 def obsstore(self):
780 780 return obsolete.makestore(self.ui, self)
781 781
782 782 @storecache('00changelog.i')
783 783 def changelog(self):
784 784 return changelog.changelog(self.svfs,
785 785 trypending=txnutil.mayhavepending(self.root))
786 786
787 787 def _constructmanifest(self):
788 788 # This is a temporary function while we migrate from manifest to
789 789 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 790 # manifest creation.
791 791 return manifest.manifestrevlog(self.svfs)
792 792
793 793 @storecache('00manifest.i')
794 794 def manifestlog(self):
795 795 return manifest.manifestlog(self.svfs, self)
796 796
797 797 @repofilecache('dirstate')
798 798 def dirstate(self):
799 799 return self._makedirstate()
800 800
801 801 def _makedirstate(self):
802 802 """Extension point for wrapping the dirstate per-repo."""
803 803 sparsematchfn = lambda: sparse.matcher(self)
804 804
805 805 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 806 self._dirstatevalidate, sparsematchfn)
807 807
808 808 def _dirstatevalidate(self, node):
809 809 try:
810 810 self.changelog.rev(node)
811 811 return node
812 812 except error.LookupError:
813 813 if not self._dirstatevalidatewarned:
814 814 self._dirstatevalidatewarned = True
815 815 self.ui.warn(_("warning: ignoring unknown"
816 816 " working parent %s!\n") % short(node))
817 817 return nullid
818 818
819 819 @storecache(narrowspec.FILENAME)
820 820 def narrowpats(self):
821 821 """matcher patterns for this repository's narrowspec
822 822
823 823 A tuple of (includes, excludes).
824 824 """
825 825 source = self
826 826 if self.shared():
827 827 from . import hg
828 828 source = hg.sharedreposource(self)
829 829 return narrowspec.load(source)
830 830
831 831 @storecache(narrowspec.FILENAME)
832 832 def _narrowmatch(self):
833 833 if repository.NARROW_REQUIREMENT not in self.requirements:
834 834 return matchmod.always(self.root, '')
835 835 include, exclude = self.narrowpats
836 836 return narrowspec.match(self.root, include=include, exclude=exclude)
837 837
838 838 # TODO(martinvonz): make this property-like instead?
839 839 def narrowmatch(self):
840 840 return self._narrowmatch
841 841
842 842 def setnarrowpats(self, newincludes, newexcludes):
843 843 target = self
844 844 if self.shared():
845 845 from . import hg
846 846 target = hg.sharedreposource(self)
847 847 narrowspec.save(target, newincludes, newexcludes)
848 848 self.invalidate(clearfilecache=True)
849 849
850 850 def __getitem__(self, changeid):
851 851 if changeid is None:
852 852 return context.workingctx(self)
853 853 if isinstance(changeid, context.basectx):
854 854 return changeid
855 855 if isinstance(changeid, slice):
856 856 # wdirrev isn't contiguous so the slice shouldn't include it
857 857 return [context.changectx(self, i)
858 858 for i in pycompat.xrange(*changeid.indices(len(self)))
859 859 if i not in self.changelog.filteredrevs]
860 860 try:
861 861 return context.changectx(self, changeid)
862 862 except error.WdirUnsupported:
863 863 return context.workingctx(self)
864 864
865 865 def __contains__(self, changeid):
866 866 """True if the given changeid exists
867 867
868 868 error.AmbiguousPrefixLookupError is raised if an ambiguous node
869 869 specified.
870 870 """
871 871 try:
872 872 self[changeid]
873 873 return True
874 874 except error.RepoLookupError:
875 875 return False
876 876
877 877 def __nonzero__(self):
878 878 return True
879 879
880 880 __bool__ = __nonzero__
881 881
882 882 def __len__(self):
883 883 # no need to pay the cost of repoview.changelog
884 884 unfi = self.unfiltered()
885 885 return len(unfi.changelog)
886 886
887 887 def __iter__(self):
888 888 return iter(self.changelog)
889 889
890 890 def revs(self, expr, *args):
891 891 '''Find revisions matching a revset.
892 892
893 893 The revset is specified as a string ``expr`` that may contain
894 894 %-formatting to escape certain types. See ``revsetlang.formatspec``.
895 895
896 896 Revset aliases from the configuration are not expanded. To expand
897 897 user aliases, consider calling ``scmutil.revrange()`` or
898 898 ``repo.anyrevs([expr], user=True)``.
899 899
900 900 Returns a revset.abstractsmartset, which is a list-like interface
901 901 that contains integer revisions.
902 902 '''
903 903 expr = revsetlang.formatspec(expr, *args)
904 904 m = revset.match(None, expr)
905 905 return m(self)
906 906
907 907 def set(self, expr, *args):
908 908 '''Find revisions matching a revset and emit changectx instances.
909 909
910 910 This is a convenience wrapper around ``revs()`` that iterates the
911 911 result and is a generator of changectx instances.
912 912
913 913 Revset aliases from the configuration are not expanded. To expand
914 914 user aliases, consider calling ``scmutil.revrange()``.
915 915 '''
916 916 for r in self.revs(expr, *args):
917 917 yield self[r]
918 918
919 919 def anyrevs(self, specs, user=False, localalias=None):
920 920 '''Find revisions matching one of the given revsets.
921 921
922 922 Revset aliases from the configuration are not expanded by default. To
923 923 expand user aliases, specify ``user=True``. To provide some local
924 924 definitions overriding user aliases, set ``localalias`` to
925 925 ``{name: definitionstring}``.
926 926 '''
927 927 if user:
928 928 m = revset.matchany(self.ui, specs,
929 929 lookup=revset.lookupfn(self),
930 930 localalias=localalias)
931 931 else:
932 932 m = revset.matchany(None, specs, localalias=localalias)
933 933 return m(self)
934 934
935 935 def url(self):
936 936 return 'file:' + self.root
937 937
938 938 def hook(self, name, throw=False, **args):
939 939 """Call a hook, passing this repo instance.
940 940
941 941 This a convenience method to aid invoking hooks. Extensions likely
942 942 won't call this unless they have registered a custom hook or are
943 943 replacing code that is expected to call a hook.
944 944 """
945 945 return hook.hook(self.ui, self, name, throw, **args)
946 946
947 947 @filteredpropertycache
948 948 def _tagscache(self):
949 949 '''Returns a tagscache object that contains various tags related
950 950 caches.'''
951 951
952 952 # This simplifies its cache management by having one decorated
953 953 # function (this one) and the rest simply fetch things from it.
954 954 class tagscache(object):
955 955 def __init__(self):
956 956 # These two define the set of tags for this repository. tags
957 957 # maps tag name to node; tagtypes maps tag name to 'global' or
958 958 # 'local'. (Global tags are defined by .hgtags across all
959 959 # heads, and local tags are defined in .hg/localtags.)
960 960 # They constitute the in-memory cache of tags.
961 961 self.tags = self.tagtypes = None
962 962
963 963 self.nodetagscache = self.tagslist = None
964 964
965 965 cache = tagscache()
966 966 cache.tags, cache.tagtypes = self._findtags()
967 967
968 968 return cache
969 969
970 970 def tags(self):
971 971 '''return a mapping of tag to node'''
972 972 t = {}
973 973 if self.changelog.filteredrevs:
974 974 tags, tt = self._findtags()
975 975 else:
976 976 tags = self._tagscache.tags
977 977 for k, v in tags.iteritems():
978 978 try:
979 979 # ignore tags to unknown nodes
980 980 self.changelog.rev(v)
981 981 t[k] = v
982 982 except (error.LookupError, ValueError):
983 983 pass
984 984 return t
985 985
986 986 def _findtags(self):
987 987 '''Do the hard work of finding tags. Return a pair of dicts
988 988 (tags, tagtypes) where tags maps tag name to node, and tagtypes
989 989 maps tag name to a string like \'global\' or \'local\'.
990 990 Subclasses or extensions are free to add their own tags, but
991 991 should be aware that the returned dicts will be retained for the
992 992 duration of the localrepo object.'''
993 993
994 994 # XXX what tagtype should subclasses/extensions use? Currently
995 995 # mq and bookmarks add tags, but do not set the tagtype at all.
996 996 # Should each extension invent its own tag type? Should there
997 997 # be one tagtype for all such "virtual" tags? Or is the status
998 998 # quo fine?
999 999
1000 1000
1001 1001 # map tag name to (node, hist)
1002 1002 alltags = tagsmod.findglobaltags(self.ui, self)
1003 1003 # map tag name to tag type
1004 1004 tagtypes = dict((tag, 'global') for tag in alltags)
1005 1005
1006 1006 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1007 1007
1008 1008 # Build the return dicts. Have to re-encode tag names because
1009 1009 # the tags module always uses UTF-8 (in order not to lose info
1010 1010 # writing to the cache), but the rest of Mercurial wants them in
1011 1011 # local encoding.
1012 1012 tags = {}
1013 1013 for (name, (node, hist)) in alltags.iteritems():
1014 1014 if node != nullid:
1015 1015 tags[encoding.tolocal(name)] = node
1016 1016 tags['tip'] = self.changelog.tip()
1017 1017 tagtypes = dict([(encoding.tolocal(name), value)
1018 1018 for (name, value) in tagtypes.iteritems()])
1019 1019 return (tags, tagtypes)
1020 1020
1021 1021 def tagtype(self, tagname):
1022 1022 '''
1023 1023 return the type of the given tag. result can be:
1024 1024
1025 1025 'local' : a local tag
1026 1026 'global' : a global tag
1027 1027 None : tag does not exist
1028 1028 '''
1029 1029
1030 1030 return self._tagscache.tagtypes.get(tagname)
1031 1031
1032 1032 def tagslist(self):
1033 1033 '''return a list of tags ordered by revision'''
1034 1034 if not self._tagscache.tagslist:
1035 1035 l = []
1036 1036 for t, n in self.tags().iteritems():
1037 1037 l.append((self.changelog.rev(n), t, n))
1038 1038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1039 1039
1040 1040 return self._tagscache.tagslist
1041 1041
1042 1042 def nodetags(self, node):
1043 1043 '''return the tags associated with a node'''
1044 1044 if not self._tagscache.nodetagscache:
1045 1045 nodetagscache = {}
1046 1046 for t, n in self._tagscache.tags.iteritems():
1047 1047 nodetagscache.setdefault(n, []).append(t)
1048 1048 for tags in nodetagscache.itervalues():
1049 1049 tags.sort()
1050 1050 self._tagscache.nodetagscache = nodetagscache
1051 1051 return self._tagscache.nodetagscache.get(node, [])
1052 1052
1053 1053 def nodebookmarks(self, node):
1054 1054 """return the list of bookmarks pointing to the specified node"""
1055 1055 return self._bookmarks.names(node)
1056 1056
1057 1057 def branchmap(self):
1058 1058 '''returns a dictionary {branch: [branchheads]} with branchheads
1059 1059 ordered by increasing revision number'''
1060 1060 branchmap.updatecache(self)
1061 1061 return self._branchcaches[self.filtername]
1062 1062
1063 1063 @unfilteredmethod
1064 1064 def revbranchcache(self):
1065 1065 if not self._revbranchcache:
1066 1066 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1067 1067 return self._revbranchcache
1068 1068
1069 1069 def branchtip(self, branch, ignoremissing=False):
1070 1070 '''return the tip node for a given branch
1071 1071
1072 1072 If ignoremissing is True, then this method will not raise an error.
1073 1073 This is helpful for callers that only expect None for a missing branch
1074 1074 (e.g. namespace).
1075 1075
1076 1076 '''
1077 1077 try:
1078 1078 return self.branchmap().branchtip(branch)
1079 1079 except KeyError:
1080 1080 if not ignoremissing:
1081 1081 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1082 1082 else:
1083 1083 pass
1084 1084
1085 1085 def lookup(self, key):
1086 1086 return scmutil.revsymbol(self, key).node()
1087 1087
1088 1088 def lookupbranch(self, key):
1089 1089 if key in self.branchmap():
1090 1090 return key
1091 1091
1092 1092 return scmutil.revsymbol(self, key).branch()
1093 1093
1094 1094 def known(self, nodes):
1095 1095 cl = self.changelog
1096 1096 nm = cl.nodemap
1097 1097 filtered = cl.filteredrevs
1098 1098 result = []
1099 1099 for n in nodes:
1100 1100 r = nm.get(n)
1101 1101 resp = not (r is None or r in filtered)
1102 1102 result.append(resp)
1103 1103 return result
1104 1104
1105 1105 def local(self):
1106 1106 return self
1107 1107
1108 1108 def publishing(self):
1109 1109 # it's safe (and desirable) to trust the publish flag unconditionally
1110 1110 # so that we don't finalize changes shared between users via ssh or nfs
1111 1111 return self.ui.configbool('phases', 'publish', untrusted=True)
1112 1112
1113 1113 def cancopy(self):
1114 1114 # so statichttprepo's override of local() works
1115 1115 if not self.local():
1116 1116 return False
1117 1117 if not self.publishing():
1118 1118 return True
1119 1119 # if publishing we can't copy if there is filtered content
1120 1120 return not self.filtered('visible').changelog.filteredrevs
1121 1121
1122 1122 def shared(self):
1123 1123 '''the type of shared repository (None if not shared)'''
1124 1124 if self.sharedpath != self.path:
1125 1125 return 'store'
1126 1126 return None
1127 1127
1128 1128 def wjoin(self, f, *insidef):
1129 1129 return self.vfs.reljoin(self.root, f, *insidef)
1130 1130
1131 1131 def file(self, f):
1132 1132 if f[0] == '/':
1133 1133 f = f[1:]
1134 1134 return filelog.filelog(self.svfs, f)
1135 1135
1136 1136 def setparents(self, p1, p2=nullid):
1137 1137 with self.dirstate.parentchange():
1138 1138 copies = self.dirstate.setparents(p1, p2)
1139 1139 pctx = self[p1]
1140 1140 if copies:
1141 1141 # Adjust copy records, the dirstate cannot do it, it
1142 1142 # requires access to parents manifests. Preserve them
1143 1143 # only for entries added to first parent.
1144 1144 for f in copies:
1145 1145 if f not in pctx and copies[f] in pctx:
1146 1146 self.dirstate.copy(copies[f], f)
1147 1147 if p2 == nullid:
1148 1148 for f, s in sorted(self.dirstate.copies().items()):
1149 1149 if f not in pctx and s not in pctx:
1150 1150 self.dirstate.copy(None, f)
1151 1151
1152 1152 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1153 1153 """changeid can be a changeset revision, node, or tag.
1154 1154 fileid can be a file revision or node."""
1155 1155 return context.filectx(self, path, changeid, fileid,
1156 1156 changectx=changectx)
1157 1157
1158 1158 def getcwd(self):
1159 1159 return self.dirstate.getcwd()
1160 1160
1161 1161 def pathto(self, f, cwd=None):
1162 1162 return self.dirstate.pathto(f, cwd)
1163 1163
1164 1164 def _loadfilter(self, filter):
1165 1165 if filter not in self._filterpats:
1166 1166 l = []
1167 1167 for pat, cmd in self.ui.configitems(filter):
1168 1168 if cmd == '!':
1169 1169 continue
1170 1170 mf = matchmod.match(self.root, '', [pat])
1171 1171 fn = None
1172 1172 params = cmd
1173 1173 for name, filterfn in self._datafilters.iteritems():
1174 1174 if cmd.startswith(name):
1175 1175 fn = filterfn
1176 1176 params = cmd[len(name):].lstrip()
1177 1177 break
1178 1178 if not fn:
1179 1179 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1180 1180 # Wrap old filters not supporting keyword arguments
1181 1181 if not pycompat.getargspec(fn)[2]:
1182 1182 oldfn = fn
1183 1183 fn = lambda s, c, **kwargs: oldfn(s, c)
1184 1184 l.append((mf, fn, params))
1185 1185 self._filterpats[filter] = l
1186 1186 return self._filterpats[filter]
1187 1187
1188 1188 def _filter(self, filterpats, filename, data):
1189 1189 for mf, fn, cmd in filterpats:
1190 1190 if mf(filename):
1191 1191 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1192 1192 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1193 1193 break
1194 1194
1195 1195 return data
1196 1196
1197 1197 @unfilteredpropertycache
1198 1198 def _encodefilterpats(self):
1199 1199 return self._loadfilter('encode')
1200 1200
1201 1201 @unfilteredpropertycache
1202 1202 def _decodefilterpats(self):
1203 1203 return self._loadfilter('decode')
1204 1204
1205 1205 def adddatafilter(self, name, filter):
1206 1206 self._datafilters[name] = filter
1207 1207
1208 1208 def wread(self, filename):
1209 1209 if self.wvfs.islink(filename):
1210 1210 data = self.wvfs.readlink(filename)
1211 1211 else:
1212 1212 data = self.wvfs.read(filename)
1213 1213 return self._filter(self._encodefilterpats, filename, data)
1214 1214
1215 1215 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1216 1216 """write ``data`` into ``filename`` in the working directory
1217 1217
1218 1218 This returns length of written (maybe decoded) data.
1219 1219 """
1220 1220 data = self._filter(self._decodefilterpats, filename, data)
1221 1221 if 'l' in flags:
1222 1222 self.wvfs.symlink(data, filename)
1223 1223 else:
1224 1224 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1225 1225 **kwargs)
1226 1226 if 'x' in flags:
1227 1227 self.wvfs.setflags(filename, False, True)
1228 1228 else:
1229 1229 self.wvfs.setflags(filename, False, False)
1230 1230 return len(data)
1231 1231
1232 1232 def wwritedata(self, filename, data):
1233 1233 return self._filter(self._decodefilterpats, filename, data)
1234 1234
1235 1235 def currenttransaction(self):
1236 1236 """return the current transaction or None if non exists"""
1237 1237 if self._transref:
1238 1238 tr = self._transref()
1239 1239 else:
1240 1240 tr = None
1241 1241
1242 1242 if tr and tr.running():
1243 1243 return tr
1244 1244 return None
1245 1245
1246 1246 def transaction(self, desc, report=None):
1247 1247 if (self.ui.configbool('devel', 'all-warnings')
1248 1248 or self.ui.configbool('devel', 'check-locks')):
1249 1249 if self._currentlock(self._lockref) is None:
1250 1250 raise error.ProgrammingError('transaction requires locking')
1251 1251 tr = self.currenttransaction()
1252 1252 if tr is not None:
1253 1253 return tr.nest(name=desc)
1254 1254
1255 1255 # abort here if the journal already exists
1256 1256 if self.svfs.exists("journal"):
1257 1257 raise error.RepoError(
1258 1258 _("abandoned transaction found"),
1259 1259 hint=_("run 'hg recover' to clean up transaction"))
1260 1260
1261 1261 idbase = "%.40f#%f" % (random.random(), time.time())
1262 1262 ha = hex(hashlib.sha1(idbase).digest())
1263 1263 txnid = 'TXN:' + ha
1264 1264 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1265 1265
1266 1266 self._writejournal(desc)
1267 1267 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1268 1268 if report:
1269 1269 rp = report
1270 1270 else:
1271 1271 rp = self.ui.warn
1272 1272 vfsmap = {'plain': self.vfs} # root of .hg/
1273 1273 # we must avoid cyclic reference between repo and transaction.
1274 1274 reporef = weakref.ref(self)
1275 1275 # Code to track tag movement
1276 1276 #
1277 1277 # Since tags are all handled as file content, it is actually quite hard
1278 1278 # to track these movement from a code perspective. So we fallback to a
1279 1279 # tracking at the repository level. One could envision to track changes
1280 1280 # to the '.hgtags' file through changegroup apply but that fails to
1281 1281 # cope with case where transaction expose new heads without changegroup
1282 1282 # being involved (eg: phase movement).
1283 1283 #
1284 1284 # For now, We gate the feature behind a flag since this likely comes
1285 1285 # with performance impacts. The current code run more often than needed
1286 1286 # and do not use caches as much as it could. The current focus is on
1287 1287 # the behavior of the feature so we disable it by default. The flag
1288 1288 # will be removed when we are happy with the performance impact.
1289 1289 #
1290 1290 # Once this feature is no longer experimental move the following
1291 1291 # documentation to the appropriate help section:
1292 1292 #
1293 1293 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1294 1294 # tags (new or changed or deleted tags). In addition the details of
1295 1295 # these changes are made available in a file at:
1296 1296 # ``REPOROOT/.hg/changes/tags.changes``.
1297 1297 # Make sure you check for HG_TAG_MOVED before reading that file as it
1298 1298 # might exist from a previous transaction even if no tag were touched
1299 1299 # in this one. Changes are recorded in a line base format::
1300 1300 #
1301 1301 # <action> <hex-node> <tag-name>\n
1302 1302 #
1303 1303 # Actions are defined as follow:
1304 1304 # "-R": tag is removed,
1305 1305 # "+A": tag is added,
1306 1306 # "-M": tag is moved (old value),
1307 1307 # "+M": tag is moved (new value),
1308 1308 tracktags = lambda x: None
1309 1309 # experimental config: experimental.hook-track-tags
1310 1310 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1311 1311 if desc != 'strip' and shouldtracktags:
1312 1312 oldheads = self.changelog.headrevs()
1313 1313 def tracktags(tr2):
1314 1314 repo = reporef()
1315 1315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1316 1316 newheads = repo.changelog.headrevs()
1317 1317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1318 1318 # notes: we compare lists here.
1319 1319 # As we do it only once buiding set would not be cheaper
1320 1320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1321 1321 if changes:
1322 1322 tr2.hookargs['tag_moved'] = '1'
1323 1323 with repo.vfs('changes/tags.changes', 'w',
1324 1324 atomictemp=True) as changesfile:
1325 1325 # note: we do not register the file to the transaction
1326 1326 # because we needs it to still exist on the transaction
1327 1327 # is close (for txnclose hooks)
1328 1328 tagsmod.writediff(changesfile, changes)
1329 1329 def validate(tr2):
1330 1330 """will run pre-closing hooks"""
1331 1331 # XXX the transaction API is a bit lacking here so we take a hacky
1332 1332 # path for now
1333 1333 #
1334 1334 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1335 1335 # dict is copied before these run. In addition we needs the data
1336 1336 # available to in memory hooks too.
1337 1337 #
1338 1338 # Moreover, we also need to make sure this runs before txnclose
1339 1339 # hooks and there is no "pending" mechanism that would execute
1340 1340 # logic only if hooks are about to run.
1341 1341 #
1342 1342 # Fixing this limitation of the transaction is also needed to track
1343 1343 # other families of changes (bookmarks, phases, obsolescence).
1344 1344 #
1345 1345 # This will have to be fixed before we remove the experimental
1346 1346 # gating.
1347 1347 tracktags(tr2)
1348 1348 repo = reporef()
1349 1349 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1350 1350 scmutil.enforcesinglehead(repo, tr2, desc)
1351 1351 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1352 1352 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1353 1353 args = tr.hookargs.copy()
1354 1354 args.update(bookmarks.preparehookargs(name, old, new))
1355 1355 repo.hook('pretxnclose-bookmark', throw=True,
1356 1356 txnname=desc,
1357 1357 **pycompat.strkwargs(args))
1358 1358 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1359 1359 cl = repo.unfiltered().changelog
1360 1360 for rev, (old, new) in tr.changes['phases'].items():
1361 1361 args = tr.hookargs.copy()
1362 1362 node = hex(cl.node(rev))
1363 1363 args.update(phases.preparehookargs(node, old, new))
1364 1364 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1365 1365 **pycompat.strkwargs(args))
1366 1366
1367 1367 repo.hook('pretxnclose', throw=True,
1368 1368 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1369 1369 def releasefn(tr, success):
1370 1370 repo = reporef()
1371 1371 if success:
1372 1372 # this should be explicitly invoked here, because
1373 1373 # in-memory changes aren't written out at closing
1374 1374 # transaction, if tr.addfilegenerator (via
1375 1375 # dirstate.write or so) isn't invoked while
1376 1376 # transaction running
1377 1377 repo.dirstate.write(None)
1378 1378 else:
1379 1379 # discard all changes (including ones already written
1380 1380 # out) in this transaction
1381 1381 narrowspec.restorebackup(self, 'journal.narrowspec')
1382 1382 repo.dirstate.restorebackup(None, 'journal.dirstate')
1383 1383
1384 1384 repo.invalidate(clearfilecache=True)
1385 1385
1386 1386 tr = transaction.transaction(rp, self.svfs, vfsmap,
1387 1387 "journal",
1388 1388 "undo",
1389 1389 aftertrans(renames),
1390 1390 self.store.createmode,
1391 1391 validator=validate,
1392 1392 releasefn=releasefn,
1393 1393 checkambigfiles=_cachedfiles,
1394 1394 name=desc)
1395 1395 tr.changes['revs'] = pycompat.xrange(0, 0)
1396 1396 tr.changes['obsmarkers'] = set()
1397 1397 tr.changes['phases'] = {}
1398 1398 tr.changes['bookmarks'] = {}
1399 1399
1400 1400 tr.hookargs['txnid'] = txnid
1401 1401 # note: writing the fncache only during finalize mean that the file is
1402 1402 # outdated when running hooks. As fncache is used for streaming clone,
1403 1403 # this is not expected to break anything that happen during the hooks.
1404 1404 tr.addfinalize('flush-fncache', self.store.write)
1405 1405 def txnclosehook(tr2):
1406 1406 """To be run if transaction is successful, will schedule a hook run
1407 1407 """
1408 1408 # Don't reference tr2 in hook() so we don't hold a reference.
1409 1409 # This reduces memory consumption when there are multiple
1410 1410 # transactions per lock. This can likely go away if issue5045
1411 1411 # fixes the function accumulation.
1412 1412 hookargs = tr2.hookargs
1413 1413
1414 1414 def hookfunc():
1415 1415 repo = reporef()
1416 1416 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1417 1417 bmchanges = sorted(tr.changes['bookmarks'].items())
1418 1418 for name, (old, new) in bmchanges:
1419 1419 args = tr.hookargs.copy()
1420 1420 args.update(bookmarks.preparehookargs(name, old, new))
1421 1421 repo.hook('txnclose-bookmark', throw=False,
1422 1422 txnname=desc, **pycompat.strkwargs(args))
1423 1423
1424 1424 if hook.hashook(repo.ui, 'txnclose-phase'):
1425 1425 cl = repo.unfiltered().changelog
1426 1426 phasemv = sorted(tr.changes['phases'].items())
1427 1427 for rev, (old, new) in phasemv:
1428 1428 args = tr.hookargs.copy()
1429 1429 node = hex(cl.node(rev))
1430 1430 args.update(phases.preparehookargs(node, old, new))
1431 1431 repo.hook('txnclose-phase', throw=False, txnname=desc,
1432 1432 **pycompat.strkwargs(args))
1433 1433
1434 1434 repo.hook('txnclose', throw=False, txnname=desc,
1435 1435 **pycompat.strkwargs(hookargs))
1436 1436 reporef()._afterlock(hookfunc)
1437 1437 tr.addfinalize('txnclose-hook', txnclosehook)
1438 1438 # Include a leading "-" to make it happen before the transaction summary
1439 1439 # reports registered via scmutil.registersummarycallback() whose names
1440 1440 # are 00-txnreport etc. That way, the caches will be warm when the
1441 1441 # callbacks run.
1442 1442 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1443 1443 def txnaborthook(tr2):
1444 1444 """To be run if transaction is aborted
1445 1445 """
1446 1446 reporef().hook('txnabort', throw=False, txnname=desc,
1447 1447 **pycompat.strkwargs(tr2.hookargs))
1448 1448 tr.addabort('txnabort-hook', txnaborthook)
1449 1449 # avoid eager cache invalidation. in-memory data should be identical
1450 1450 # to stored data if transaction has no error.
1451 1451 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1452 1452 self._transref = weakref.ref(tr)
1453 1453 scmutil.registersummarycallback(self, tr, desc)
1454 1454 return tr
1455 1455
1456 1456 def _journalfiles(self):
1457 1457 return ((self.svfs, 'journal'),
1458 1458 (self.vfs, 'journal.dirstate'),
1459 1459 (self.vfs, 'journal.branch'),
1460 1460 (self.vfs, 'journal.desc'),
1461 1461 (self.vfs, 'journal.bookmarks'),
1462 1462 (self.svfs, 'journal.phaseroots'))
1463 1463
1464 1464 def undofiles(self):
1465 1465 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1466 1466
1467 1467 @unfilteredmethod
1468 1468 def _writejournal(self, desc):
1469 1469 self.dirstate.savebackup(None, 'journal.dirstate')
1470 1470 narrowspec.savebackup(self, 'journal.narrowspec')
1471 1471 self.vfs.write("journal.branch",
1472 1472 encoding.fromlocal(self.dirstate.branch()))
1473 1473 self.vfs.write("journal.desc",
1474 1474 "%d\n%s\n" % (len(self), desc))
1475 1475 self.vfs.write("journal.bookmarks",
1476 1476 self.vfs.tryread("bookmarks"))
1477 1477 self.svfs.write("journal.phaseroots",
1478 1478 self.svfs.tryread("phaseroots"))
1479 1479
1480 1480 def recover(self):
1481 1481 with self.lock():
1482 1482 if self.svfs.exists("journal"):
1483 1483 self.ui.status(_("rolling back interrupted transaction\n"))
1484 1484 vfsmap = {'': self.svfs,
1485 1485 'plain': self.vfs,}
1486 1486 transaction.rollback(self.svfs, vfsmap, "journal",
1487 1487 self.ui.warn,
1488 1488 checkambigfiles=_cachedfiles)
1489 1489 self.invalidate()
1490 1490 return True
1491 1491 else:
1492 1492 self.ui.warn(_("no interrupted transaction available\n"))
1493 1493 return False
1494 1494
1495 1495 def rollback(self, dryrun=False, force=False):
1496 1496 wlock = lock = dsguard = None
1497 1497 try:
1498 1498 wlock = self.wlock()
1499 1499 lock = self.lock()
1500 1500 if self.svfs.exists("undo"):
1501 1501 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1502 1502
1503 1503 return self._rollback(dryrun, force, dsguard)
1504 1504 else:
1505 1505 self.ui.warn(_("no rollback information available\n"))
1506 1506 return 1
1507 1507 finally:
1508 1508 release(dsguard, lock, wlock)
1509 1509
1510 1510 @unfilteredmethod # Until we get smarter cache management
1511 1511 def _rollback(self, dryrun, force, dsguard):
1512 1512 ui = self.ui
1513 1513 try:
1514 1514 args = self.vfs.read('undo.desc').splitlines()
1515 1515 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1516 1516 if len(args) >= 3:
1517 1517 detail = args[2]
1518 1518 oldtip = oldlen - 1
1519 1519
1520 1520 if detail and ui.verbose:
1521 1521 msg = (_('repository tip rolled back to revision %d'
1522 1522 ' (undo %s: %s)\n')
1523 1523 % (oldtip, desc, detail))
1524 1524 else:
1525 1525 msg = (_('repository tip rolled back to revision %d'
1526 1526 ' (undo %s)\n')
1527 1527 % (oldtip, desc))
1528 1528 except IOError:
1529 1529 msg = _('rolling back unknown transaction\n')
1530 1530 desc = None
1531 1531
1532 1532 if not force and self['.'] != self['tip'] and desc == 'commit':
1533 1533 raise error.Abort(
1534 1534 _('rollback of last commit while not checked out '
1535 1535 'may lose data'), hint=_('use -f to force'))
1536 1536
1537 1537 ui.status(msg)
1538 1538 if dryrun:
1539 1539 return 0
1540 1540
1541 1541 parents = self.dirstate.parents()
1542 1542 self.destroying()
1543 1543 vfsmap = {'plain': self.vfs, '': self.svfs}
1544 1544 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1545 1545 checkambigfiles=_cachedfiles)
1546 1546 if self.vfs.exists('undo.bookmarks'):
1547 1547 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1548 1548 if self.svfs.exists('undo.phaseroots'):
1549 1549 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1550 1550 self.invalidate()
1551 1551
1552 1552 parentgone = (parents[0] not in self.changelog.nodemap or
1553 1553 parents[1] not in self.changelog.nodemap)
1554 1554 if parentgone:
1555 1555 # prevent dirstateguard from overwriting already restored one
1556 1556 dsguard.close()
1557 1557
1558 1558 narrowspec.restorebackup(self, 'undo.narrowspec')
1559 1559 self.dirstate.restorebackup(None, 'undo.dirstate')
1560 1560 try:
1561 1561 branch = self.vfs.read('undo.branch')
1562 1562 self.dirstate.setbranch(encoding.tolocal(branch))
1563 1563 except IOError:
1564 1564 ui.warn(_('named branch could not be reset: '
1565 1565 'current branch is still \'%s\'\n')
1566 1566 % self.dirstate.branch())
1567 1567
1568 1568 parents = tuple([p.rev() for p in self[None].parents()])
1569 1569 if len(parents) > 1:
1570 1570 ui.status(_('working directory now based on '
1571 1571 'revisions %d and %d\n') % parents)
1572 1572 else:
1573 1573 ui.status(_('working directory now based on '
1574 1574 'revision %d\n') % parents)
1575 1575 mergemod.mergestate.clean(self, self['.'].node())
1576 1576
1577 1577 # TODO: if we know which new heads may result from this rollback, pass
1578 1578 # them to destroy(), which will prevent the branchhead cache from being
1579 1579 # invalidated.
1580 1580 self.destroyed()
1581 1581 return 0
1582 1582
1583 1583 def _buildcacheupdater(self, newtransaction):
1584 1584 """called during transaction to build the callback updating cache
1585 1585
1586 1586 Lives on the repository to help extension who might want to augment
1587 1587 this logic. For this purpose, the created transaction is passed to the
1588 1588 method.
1589 1589 """
1590 1590 # we must avoid cyclic reference between repo and transaction.
1591 1591 reporef = weakref.ref(self)
1592 1592 def updater(tr):
1593 1593 repo = reporef()
1594 1594 repo.updatecaches(tr)
1595 1595 return updater
1596 1596
1597 1597 @unfilteredmethod
1598 1598 def updatecaches(self, tr=None, full=False):
1599 1599 """warm appropriate caches
1600 1600
1601 1601 If this function is called after a transaction closed. The transaction
1602 1602 will be available in the 'tr' argument. This can be used to selectively
1603 1603 update caches relevant to the changes in that transaction.
1604 1604
1605 1605 If 'full' is set, make sure all caches the function knows about have
1606 1606 up-to-date data. Even the ones usually loaded more lazily.
1607 1607 """
1608 1608 if tr is not None and tr.hookargs.get('source') == 'strip':
1609 1609 # During strip, many caches are invalid but
1610 1610 # later call to `destroyed` will refresh them.
1611 1611 return
1612 1612
1613 1613 if tr is None or tr.changes['revs']:
1614 1614 # updating the unfiltered branchmap should refresh all the others,
1615 1615 self.ui.debug('updating the branch cache\n')
1616 1616 branchmap.updatecache(self.filtered('served'))
1617 1617
1618 1618 if full:
1619 1619 rbc = self.revbranchcache()
1620 1620 for r in self.changelog:
1621 1621 rbc.branchinfo(r)
1622 1622 rbc.write()
1623 1623
1624 1624 # ensure the working copy parents are in the manifestfulltextcache
1625 1625 for ctx in self['.'].parents():
1626 1626 ctx.manifest() # accessing the manifest is enough
1627 1627
1628 1628 def invalidatecaches(self):
1629 1629
1630 1630 if '_tagscache' in vars(self):
1631 1631 # can't use delattr on proxy
1632 1632 del self.__dict__['_tagscache']
1633 1633
1634 1634 self.unfiltered()._branchcaches.clear()
1635 1635 self.invalidatevolatilesets()
1636 1636 self._sparsesignaturecache.clear()
1637 1637
1638 1638 def invalidatevolatilesets(self):
1639 1639 self.filteredrevcache.clear()
1640 1640 obsolete.clearobscaches(self)
1641 1641
1642 1642 def invalidatedirstate(self):
1643 1643 '''Invalidates the dirstate, causing the next call to dirstate
1644 1644 to check if it was modified since the last time it was read,
1645 1645 rereading it if it has.
1646 1646
1647 1647 This is different to dirstate.invalidate() that it doesn't always
1648 1648 rereads the dirstate. Use dirstate.invalidate() if you want to
1649 1649 explicitly read the dirstate again (i.e. restoring it to a previous
1650 1650 known good state).'''
1651 1651 if hasunfilteredcache(self, 'dirstate'):
1652 1652 for k in self.dirstate._filecache:
1653 1653 try:
1654 1654 delattr(self.dirstate, k)
1655 1655 except AttributeError:
1656 1656 pass
1657 1657 delattr(self.unfiltered(), 'dirstate')
1658 1658
1659 1659 def invalidate(self, clearfilecache=False):
1660 1660 '''Invalidates both store and non-store parts other than dirstate
1661 1661
1662 1662 If a transaction is running, invalidation of store is omitted,
1663 1663 because discarding in-memory changes might cause inconsistency
1664 1664 (e.g. incomplete fncache causes unintentional failure, but
1665 1665 redundant one doesn't).
1666 1666 '''
1667 1667 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1668 1668 for k in list(self._filecache.keys()):
1669 1669 # dirstate is invalidated separately in invalidatedirstate()
1670 1670 if k == 'dirstate':
1671 1671 continue
1672 1672 if (k == 'changelog' and
1673 1673 self.currenttransaction() and
1674 1674 self.changelog._delayed):
1675 1675 # The changelog object may store unwritten revisions. We don't
1676 1676 # want to lose them.
1677 1677 # TODO: Solve the problem instead of working around it.
1678 1678 continue
1679 1679
1680 1680 if clearfilecache:
1681 1681 del self._filecache[k]
1682 1682 try:
1683 1683 delattr(unfiltered, k)
1684 1684 except AttributeError:
1685 1685 pass
1686 1686 self.invalidatecaches()
1687 1687 if not self.currenttransaction():
1688 1688 # TODO: Changing contents of store outside transaction
1689 1689 # causes inconsistency. We should make in-memory store
1690 1690 # changes detectable, and abort if changed.
1691 1691 self.store.invalidatecaches()
1692 1692
1693 1693 def invalidateall(self):
1694 1694 '''Fully invalidates both store and non-store parts, causing the
1695 1695 subsequent operation to reread any outside changes.'''
1696 1696 # extension should hook this to invalidate its caches
1697 1697 self.invalidate()
1698 1698 self.invalidatedirstate()
1699 1699
1700 1700 @unfilteredmethod
1701 1701 def _refreshfilecachestats(self, tr):
1702 1702 """Reload stats of cached files so that they are flagged as valid"""
1703 1703 for k, ce in self._filecache.items():
1704 1704 k = pycompat.sysstr(k)
1705 1705 if k == r'dirstate' or k not in self.__dict__:
1706 1706 continue
1707 1707 ce.refresh()
1708 1708
1709 1709 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1710 1710 inheritchecker=None, parentenvvar=None):
1711 1711 parentlock = None
1712 1712 # the contents of parentenvvar are used by the underlying lock to
1713 1713 # determine whether it can be inherited
1714 1714 if parentenvvar is not None:
1715 1715 parentlock = encoding.environ.get(parentenvvar)
1716 1716
1717 1717 timeout = 0
1718 1718 warntimeout = 0
1719 1719 if wait:
1720 1720 timeout = self.ui.configint("ui", "timeout")
1721 1721 warntimeout = self.ui.configint("ui", "timeout.warn")
1722 1722 # internal config: ui.signal-safe-lock
1723 1723 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1724 1724
1725 1725 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1726 1726 releasefn=releasefn,
1727 1727 acquirefn=acquirefn, desc=desc,
1728 1728 inheritchecker=inheritchecker,
1729 1729 parentlock=parentlock,
1730 1730 signalsafe=signalsafe)
1731 1731 return l
1732 1732
1733 1733 def _afterlock(self, callback):
1734 1734 """add a callback to be run when the repository is fully unlocked
1735 1735
1736 1736 The callback will be executed when the outermost lock is released
1737 1737 (with wlock being higher level than 'lock')."""
1738 1738 for ref in (self._wlockref, self._lockref):
1739 1739 l = ref and ref()
1740 1740 if l and l.held:
1741 1741 l.postrelease.append(callback)
1742 1742 break
1743 1743 else: # no lock have been found.
1744 1744 callback()
1745 1745
1746 1746 def lock(self, wait=True):
1747 1747 '''Lock the repository store (.hg/store) and return a weak reference
1748 1748 to the lock. Use this before modifying the store (e.g. committing or
1749 1749 stripping). If you are opening a transaction, get a lock as well.)
1750 1750
1751 1751 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1752 1752 'wlock' first to avoid a dead-lock hazard.'''
1753 1753 l = self._currentlock(self._lockref)
1754 1754 if l is not None:
1755 1755 l.lock()
1756 1756 return l
1757 1757
1758 1758 l = self._lock(self.svfs, "lock", wait, None,
1759 1759 self.invalidate, _('repository %s') % self.origroot)
1760 1760 self._lockref = weakref.ref(l)
1761 1761 return l
1762 1762
1763 1763 def _wlockchecktransaction(self):
1764 1764 if self.currenttransaction() is not None:
1765 1765 raise error.LockInheritanceContractViolation(
1766 1766 'wlock cannot be inherited in the middle of a transaction')
1767 1767
1768 1768 def wlock(self, wait=True):
1769 1769 '''Lock the non-store parts of the repository (everything under
1770 1770 .hg except .hg/store) and return a weak reference to the lock.
1771 1771
1772 1772 Use this before modifying files in .hg.
1773 1773
1774 1774 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1775 1775 'wlock' first to avoid a dead-lock hazard.'''
1776 1776 l = self._wlockref and self._wlockref()
1777 1777 if l is not None and l.held:
1778 1778 l.lock()
1779 1779 return l
1780 1780
1781 1781 # We do not need to check for non-waiting lock acquisition. Such
1782 1782 # acquisition would not cause dead-lock as they would just fail.
1783 1783 if wait and (self.ui.configbool('devel', 'all-warnings')
1784 1784 or self.ui.configbool('devel', 'check-locks')):
1785 1785 if self._currentlock(self._lockref) is not None:
1786 1786 self.ui.develwarn('"wlock" acquired after "lock"')
1787 1787
1788 1788 def unlock():
1789 1789 if self.dirstate.pendingparentchange():
1790 1790 self.dirstate.invalidate()
1791 1791 else:
1792 1792 self.dirstate.write(None)
1793 1793
1794 1794 self._filecache['dirstate'].refresh()
1795 1795
1796 1796 l = self._lock(self.vfs, "wlock", wait, unlock,
1797 1797 self.invalidatedirstate, _('working directory of %s') %
1798 1798 self.origroot,
1799 1799 inheritchecker=self._wlockchecktransaction,
1800 1800 parentenvvar='HG_WLOCK_LOCKER')
1801 1801 self._wlockref = weakref.ref(l)
1802 1802 return l
1803 1803
1804 1804 def _currentlock(self, lockref):
1805 1805 """Returns the lock if it's held, or None if it's not."""
1806 1806 if lockref is None:
1807 1807 return None
1808 1808 l = lockref()
1809 1809 if l is None or not l.held:
1810 1810 return None
1811 1811 return l
1812 1812
1813 1813 def currentwlock(self):
1814 1814 """Returns the wlock if it's held, or None if it's not."""
1815 1815 return self._currentlock(self._wlockref)
1816 1816
1817 1817 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1818 1818 """
1819 1819 commit an individual file as part of a larger transaction
1820 1820 """
1821 1821
1822 1822 fname = fctx.path()
1823 1823 fparent1 = manifest1.get(fname, nullid)
1824 1824 fparent2 = manifest2.get(fname, nullid)
1825 1825 if isinstance(fctx, context.filectx):
1826 1826 node = fctx.filenode()
1827 1827 if node in [fparent1, fparent2]:
1828 1828 self.ui.debug('reusing %s filelog entry\n' % fname)
1829 1829 if manifest1.flags(fname) != fctx.flags():
1830 1830 changelist.append(fname)
1831 1831 return node
1832 1832
1833 1833 flog = self.file(fname)
1834 1834 meta = {}
1835 1835 copy = fctx.renamed()
1836 1836 if copy and copy[0] != fname:
1837 1837 # Mark the new revision of this file as a copy of another
1838 1838 # file. This copy data will effectively act as a parent
1839 1839 # of this new revision. If this is a merge, the first
1840 1840 # parent will be the nullid (meaning "look up the copy data")
1841 1841 # and the second one will be the other parent. For example:
1842 1842 #
1843 1843 # 0 --- 1 --- 3 rev1 changes file foo
1844 1844 # \ / rev2 renames foo to bar and changes it
1845 1845 # \- 2 -/ rev3 should have bar with all changes and
1846 1846 # should record that bar descends from
1847 1847 # bar in rev2 and foo in rev1
1848 1848 #
1849 1849 # this allows this merge to succeed:
1850 1850 #
1851 1851 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1852 1852 # \ / merging rev3 and rev4 should use bar@rev2
1853 1853 # \- 2 --- 4 as the merge base
1854 1854 #
1855 1855
1856 1856 cfname = copy[0]
1857 1857 crev = manifest1.get(cfname)
1858 1858 newfparent = fparent2
1859 1859
1860 1860 if manifest2: # branch merge
1861 1861 if fparent2 == nullid or crev is None: # copied on remote side
1862 1862 if cfname in manifest2:
1863 1863 crev = manifest2[cfname]
1864 1864 newfparent = fparent1
1865 1865
1866 1866 # Here, we used to search backwards through history to try to find
1867 1867 # where the file copy came from if the source of a copy was not in
1868 1868 # the parent directory. However, this doesn't actually make sense to
1869 1869 # do (what does a copy from something not in your working copy even
1870 1870 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1871 1871 # the user that copy information was dropped, so if they didn't
1872 1872 # expect this outcome it can be fixed, but this is the correct
1873 1873 # behavior in this circumstance.
1874 1874
1875 1875 if crev:
1876 1876 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1877 1877 meta["copy"] = cfname
1878 1878 meta["copyrev"] = hex(crev)
1879 1879 fparent1, fparent2 = nullid, newfparent
1880 1880 else:
1881 1881 self.ui.warn(_("warning: can't find ancestor for '%s' "
1882 1882 "copied from '%s'!\n") % (fname, cfname))
1883 1883
1884 1884 elif fparent1 == nullid:
1885 1885 fparent1, fparent2 = fparent2, nullid
1886 1886 elif fparent2 != nullid:
1887 1887 # is one parent an ancestor of the other?
1888 1888 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1889 1889 if fparent1 in fparentancestors:
1890 1890 fparent1, fparent2 = fparent2, nullid
1891 1891 elif fparent2 in fparentancestors:
1892 1892 fparent2 = nullid
1893 1893
1894 1894 # is the file changed?
1895 1895 text = fctx.data()
1896 1896 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1897 1897 changelist.append(fname)
1898 1898 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1899 1899 # are just the flags changed during merge?
1900 1900 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1901 1901 changelist.append(fname)
1902 1902
1903 1903 return fparent1
1904 1904
1905 1905 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1906 1906 """check for commit arguments that aren't committable"""
1907 1907 if match.isexact() or match.prefix():
1908 1908 matched = set(status.modified + status.added + status.removed)
1909 1909
1910 1910 for f in match.files():
1911 1911 f = self.dirstate.normalize(f)
1912 1912 if f == '.' or f in matched or f in wctx.substate:
1913 1913 continue
1914 1914 if f in status.deleted:
1915 1915 fail(f, _('file not found!'))
1916 1916 if f in vdirs: # visited directory
1917 1917 d = f + '/'
1918 1918 for mf in matched:
1919 1919 if mf.startswith(d):
1920 1920 break
1921 1921 else:
1922 1922 fail(f, _("no match under directory!"))
1923 1923 elif f not in self.dirstate:
1924 1924 fail(f, _("file not tracked!"))
1925 1925
1926 1926 @unfilteredmethod
1927 1927 def commit(self, text="", user=None, date=None, match=None, force=False,
1928 1928 editor=False, extra=None):
1929 1929 """Add a new revision to current repository.
1930 1930
1931 1931 Revision information is gathered from the working directory,
1932 1932 match can be used to filter the committed files. If editor is
1933 1933 supplied, it is called to get a commit message.
1934 1934 """
1935 1935 if extra is None:
1936 1936 extra = {}
1937 1937
1938 1938 def fail(f, msg):
1939 1939 raise error.Abort('%s: %s' % (f, msg))
1940 1940
1941 1941 if not match:
1942 1942 match = matchmod.always(self.root, '')
1943 1943
1944 1944 if not force:
1945 1945 vdirs = []
1946 1946 match.explicitdir = vdirs.append
1947 1947 match.bad = fail
1948 1948
1949 1949 wlock = lock = tr = None
1950 1950 try:
1951 1951 wlock = self.wlock()
1952 1952 lock = self.lock() # for recent changelog (see issue4368)
1953 1953
1954 1954 wctx = self[None]
1955 1955 merge = len(wctx.parents()) > 1
1956 1956
1957 1957 if not force and merge and not match.always():
1958 1958 raise error.Abort(_('cannot partially commit a merge '
1959 1959 '(do not specify files or patterns)'))
1960 1960
1961 1961 status = self.status(match=match, clean=force)
1962 1962 if force:
1963 1963 status.modified.extend(status.clean) # mq may commit clean files
1964 1964
1965 1965 # check subrepos
1966 1966 subs, commitsubs, newstate = subrepoutil.precommit(
1967 1967 self.ui, wctx, status, match, force=force)
1968 1968
1969 1969 # make sure all explicit patterns are matched
1970 1970 if not force:
1971 1971 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1972 1972
1973 1973 cctx = context.workingcommitctx(self, status,
1974 1974 text, user, date, extra)
1975 1975
1976 1976 # internal config: ui.allowemptycommit
1977 1977 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1978 1978 or extra.get('close') or merge or cctx.files()
1979 1979 or self.ui.configbool('ui', 'allowemptycommit'))
1980 1980 if not allowemptycommit:
1981 1981 return None
1982 1982
1983 1983 if merge and cctx.deleted():
1984 1984 raise error.Abort(_("cannot commit merge with missing files"))
1985 1985
1986 1986 ms = mergemod.mergestate.read(self)
1987 1987 mergeutil.checkunresolved(ms)
1988 1988
1989 1989 if editor:
1990 1990 cctx._text = editor(self, cctx, subs)
1991 1991 edited = (text != cctx._text)
1992 1992
1993 1993 # Save commit message in case this transaction gets rolled back
1994 1994 # (e.g. by a pretxncommit hook). Leave the content alone on
1995 1995 # the assumption that the user will use the same editor again.
1996 1996 msgfn = self.savecommitmessage(cctx._text)
1997 1997
1998 1998 # commit subs and write new state
1999 1999 if subs:
2000 2000 for s in sorted(commitsubs):
2001 2001 sub = wctx.sub(s)
2002 2002 self.ui.status(_('committing subrepository %s\n') %
2003 2003 subrepoutil.subrelpath(sub))
2004 2004 sr = sub.commit(cctx._text, user, date)
2005 2005 newstate[s] = (newstate[s][0], sr)
2006 2006 subrepoutil.writestate(self, newstate)
2007 2007
2008 2008 p1, p2 = self.dirstate.parents()
2009 2009 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2010 2010 try:
2011 2011 self.hook("precommit", throw=True, parent1=hookp1,
2012 2012 parent2=hookp2)
2013 2013 tr = self.transaction('commit')
2014 2014 ret = self.commitctx(cctx, True)
2015 2015 except: # re-raises
2016 2016 if edited:
2017 2017 self.ui.write(
2018 2018 _('note: commit message saved in %s\n') % msgfn)
2019 2019 raise
2020 2020 # update bookmarks, dirstate and mergestate
2021 2021 bookmarks.update(self, [p1, p2], ret)
2022 2022 cctx.markcommitted(ret)
2023 2023 ms.reset()
2024 2024 tr.close()
2025 2025
2026 2026 finally:
2027 2027 lockmod.release(tr, lock, wlock)
2028 2028
2029 2029 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2030 2030 # hack for command that use a temporary commit (eg: histedit)
2031 2031 # temporary commit got stripped before hook release
2032 2032 if self.changelog.hasnode(ret):
2033 2033 self.hook("commit", node=node, parent1=parent1,
2034 2034 parent2=parent2)
2035 2035 self._afterlock(commithook)
2036 2036 return ret
2037 2037
2038 2038 @unfilteredmethod
2039 2039 def commitctx(self, ctx, error=False):
2040 2040 """Add a new revision to current repository.
2041 2041 Revision information is passed via the context argument.
2042 2042 """
2043 2043
2044 2044 tr = None
2045 2045 p1, p2 = ctx.p1(), ctx.p2()
2046 2046 user = ctx.user()
2047 2047
2048 2048 lock = self.lock()
2049 2049 try:
2050 2050 tr = self.transaction("commit")
2051 2051 trp = weakref.proxy(tr)
2052 2052
2053 2053 if ctx.manifestnode():
2054 2054 # reuse an existing manifest revision
2055 self.ui.debug('reusing known manifest\n')
2055 2056 mn = ctx.manifestnode()
2056 2057 files = ctx.files()
2057 2058 elif ctx.files():
2058 2059 m1ctx = p1.manifestctx()
2059 2060 m2ctx = p2.manifestctx()
2060 2061 mctx = m1ctx.copy()
2061 2062
2062 2063 m = mctx.read()
2063 2064 m1 = m1ctx.read()
2064 2065 m2 = m2ctx.read()
2065 2066
2066 2067 # check in files
2067 2068 added = []
2068 2069 changed = []
2069 2070 removed = list(ctx.removed())
2070 2071 linkrev = len(self)
2071 2072 self.ui.note(_("committing files:\n"))
2072 2073 for f in sorted(ctx.modified() + ctx.added()):
2073 2074 self.ui.note(f + "\n")
2074 2075 try:
2075 2076 fctx = ctx[f]
2076 2077 if fctx is None:
2077 2078 removed.append(f)
2078 2079 else:
2079 2080 added.append(f)
2080 2081 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2081 2082 trp, changed)
2082 2083 m.setflag(f, fctx.flags())
2083 2084 except OSError as inst:
2084 2085 self.ui.warn(_("trouble committing %s!\n") % f)
2085 2086 raise
2086 2087 except IOError as inst:
2087 2088 errcode = getattr(inst, 'errno', errno.ENOENT)
2088 2089 if error or errcode and errcode != errno.ENOENT:
2089 2090 self.ui.warn(_("trouble committing %s!\n") % f)
2090 2091 raise
2091 2092
2092 2093 # update manifest
2093 2094 self.ui.note(_("committing manifest\n"))
2094 2095 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2095 2096 drop = [f for f in removed if f in m]
2096 2097 for f in drop:
2097 2098 del m[f]
2098 2099 mn = mctx.write(trp, linkrev,
2099 2100 p1.manifestnode(), p2.manifestnode(),
2100 2101 added, drop)
2101 2102 files = changed + removed
2102 2103 else:
2104 self.ui.debug('reusing manifest from p1 (no file change)\n')
2103 2105 mn = p1.manifestnode()
2104 2106 files = []
2105 2107
2106 2108 # update changelog
2107 2109 self.ui.note(_("committing changelog\n"))
2108 2110 self.changelog.delayupdate(tr)
2109 2111 n = self.changelog.add(mn, files, ctx.description(),
2110 2112 trp, p1.node(), p2.node(),
2111 2113 user, ctx.date(), ctx.extra().copy())
2112 2114 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2113 2115 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2114 2116 parent2=xp2)
2115 2117 # set the new commit is proper phase
2116 2118 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2117 2119 if targetphase:
2118 2120 # retract boundary do not alter parent changeset.
2119 2121 # if a parent have higher the resulting phase will
2120 2122 # be compliant anyway
2121 2123 #
2122 2124 # if minimal phase was 0 we don't need to retract anything
2123 2125 phases.registernew(self, tr, targetphase, [n])
2124 2126 tr.close()
2125 2127 return n
2126 2128 finally:
2127 2129 if tr:
2128 2130 tr.release()
2129 2131 lock.release()
2130 2132
2131 2133 @unfilteredmethod
2132 2134 def destroying(self):
2133 2135 '''Inform the repository that nodes are about to be destroyed.
2134 2136 Intended for use by strip and rollback, so there's a common
2135 2137 place for anything that has to be done before destroying history.
2136 2138
2137 2139 This is mostly useful for saving state that is in memory and waiting
2138 2140 to be flushed when the current lock is released. Because a call to
2139 2141 destroyed is imminent, the repo will be invalidated causing those
2140 2142 changes to stay in memory (waiting for the next unlock), or vanish
2141 2143 completely.
2142 2144 '''
2143 2145 # When using the same lock to commit and strip, the phasecache is left
2144 2146 # dirty after committing. Then when we strip, the repo is invalidated,
2145 2147 # causing those changes to disappear.
2146 2148 if '_phasecache' in vars(self):
2147 2149 self._phasecache.write()
2148 2150
2149 2151 @unfilteredmethod
2150 2152 def destroyed(self):
2151 2153 '''Inform the repository that nodes have been destroyed.
2152 2154 Intended for use by strip and rollback, so there's a common
2153 2155 place for anything that has to be done after destroying history.
2154 2156 '''
2155 2157 # When one tries to:
2156 2158 # 1) destroy nodes thus calling this method (e.g. strip)
2157 2159 # 2) use phasecache somewhere (e.g. commit)
2158 2160 #
2159 2161 # then 2) will fail because the phasecache contains nodes that were
2160 2162 # removed. We can either remove phasecache from the filecache,
2161 2163 # causing it to reload next time it is accessed, or simply filter
2162 2164 # the removed nodes now and write the updated cache.
2163 2165 self._phasecache.filterunknown(self)
2164 2166 self._phasecache.write()
2165 2167
2166 2168 # refresh all repository caches
2167 2169 self.updatecaches()
2168 2170
2169 2171 # Ensure the persistent tag cache is updated. Doing it now
2170 2172 # means that the tag cache only has to worry about destroyed
2171 2173 # heads immediately after a strip/rollback. That in turn
2172 2174 # guarantees that "cachetip == currenttip" (comparing both rev
2173 2175 # and node) always means no nodes have been added or destroyed.
2174 2176
2175 2177 # XXX this is suboptimal when qrefresh'ing: we strip the current
2176 2178 # head, refresh the tag cache, then immediately add a new head.
2177 2179 # But I think doing it this way is necessary for the "instant
2178 2180 # tag cache retrieval" case to work.
2179 2181 self.invalidate()
2180 2182
2181 2183 def status(self, node1='.', node2=None, match=None,
2182 2184 ignored=False, clean=False, unknown=False,
2183 2185 listsubrepos=False):
2184 2186 '''a convenience method that calls node1.status(node2)'''
2185 2187 return self[node1].status(node2, match, ignored, clean, unknown,
2186 2188 listsubrepos)
2187 2189
2188 2190 def addpostdsstatus(self, ps):
2189 2191 """Add a callback to run within the wlock, at the point at which status
2190 2192 fixups happen.
2191 2193
2192 2194 On status completion, callback(wctx, status) will be called with the
2193 2195 wlock held, unless the dirstate has changed from underneath or the wlock
2194 2196 couldn't be grabbed.
2195 2197
2196 2198 Callbacks should not capture and use a cached copy of the dirstate --
2197 2199 it might change in the meanwhile. Instead, they should access the
2198 2200 dirstate via wctx.repo().dirstate.
2199 2201
2200 2202 This list is emptied out after each status run -- extensions should
2201 2203 make sure it adds to this list each time dirstate.status is called.
2202 2204 Extensions should also make sure they don't call this for statuses
2203 2205 that don't involve the dirstate.
2204 2206 """
2205 2207
2206 2208 # The list is located here for uniqueness reasons -- it is actually
2207 2209 # managed by the workingctx, but that isn't unique per-repo.
2208 2210 self._postdsstatus.append(ps)
2209 2211
2210 2212 def postdsstatus(self):
2211 2213 """Used by workingctx to get the list of post-dirstate-status hooks."""
2212 2214 return self._postdsstatus
2213 2215
2214 2216 def clearpostdsstatus(self):
2215 2217 """Used by workingctx to clear post-dirstate-status hooks."""
2216 2218 del self._postdsstatus[:]
2217 2219
2218 2220 def heads(self, start=None):
2219 2221 if start is None:
2220 2222 cl = self.changelog
2221 2223 headrevs = reversed(cl.headrevs())
2222 2224 return [cl.node(rev) for rev in headrevs]
2223 2225
2224 2226 heads = self.changelog.heads(start)
2225 2227 # sort the output in rev descending order
2226 2228 return sorted(heads, key=self.changelog.rev, reverse=True)
2227 2229
2228 2230 def branchheads(self, branch=None, start=None, closed=False):
2229 2231 '''return a (possibly filtered) list of heads for the given branch
2230 2232
2231 2233 Heads are returned in topological order, from newest to oldest.
2232 2234 If branch is None, use the dirstate branch.
2233 2235 If start is not None, return only heads reachable from start.
2234 2236 If closed is True, return heads that are marked as closed as well.
2235 2237 '''
2236 2238 if branch is None:
2237 2239 branch = self[None].branch()
2238 2240 branches = self.branchmap()
2239 2241 if branch not in branches:
2240 2242 return []
2241 2243 # the cache returns heads ordered lowest to highest
2242 2244 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2243 2245 if start is not None:
2244 2246 # filter out the heads that cannot be reached from startrev
2245 2247 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2246 2248 bheads = [h for h in bheads if h in fbheads]
2247 2249 return bheads
2248 2250
2249 2251 def branches(self, nodes):
2250 2252 if not nodes:
2251 2253 nodes = [self.changelog.tip()]
2252 2254 b = []
2253 2255 for n in nodes:
2254 2256 t = n
2255 2257 while True:
2256 2258 p = self.changelog.parents(n)
2257 2259 if p[1] != nullid or p[0] == nullid:
2258 2260 b.append((t, n, p[0], p[1]))
2259 2261 break
2260 2262 n = p[0]
2261 2263 return b
2262 2264
2263 2265 def between(self, pairs):
2264 2266 r = []
2265 2267
2266 2268 for top, bottom in pairs:
2267 2269 n, l, i = top, [], 0
2268 2270 f = 1
2269 2271
2270 2272 while n != bottom and n != nullid:
2271 2273 p = self.changelog.parents(n)[0]
2272 2274 if i == f:
2273 2275 l.append(n)
2274 2276 f = f * 2
2275 2277 n = p
2276 2278 i += 1
2277 2279
2278 2280 r.append(l)
2279 2281
2280 2282 return r
2281 2283
2282 2284 def checkpush(self, pushop):
2283 2285 """Extensions can override this function if additional checks have
2284 2286 to be performed before pushing, or call it if they override push
2285 2287 command.
2286 2288 """
2287 2289
2288 2290 @unfilteredpropertycache
2289 2291 def prepushoutgoinghooks(self):
2290 2292 """Return util.hooks consists of a pushop with repo, remote, outgoing
2291 2293 methods, which are called before pushing changesets.
2292 2294 """
2293 2295 return util.hooks()
2294 2296
2295 2297 def pushkey(self, namespace, key, old, new):
2296 2298 try:
2297 2299 tr = self.currenttransaction()
2298 2300 hookargs = {}
2299 2301 if tr is not None:
2300 2302 hookargs.update(tr.hookargs)
2301 2303 hookargs = pycompat.strkwargs(hookargs)
2302 2304 hookargs[r'namespace'] = namespace
2303 2305 hookargs[r'key'] = key
2304 2306 hookargs[r'old'] = old
2305 2307 hookargs[r'new'] = new
2306 2308 self.hook('prepushkey', throw=True, **hookargs)
2307 2309 except error.HookAbort as exc:
2308 2310 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2309 2311 if exc.hint:
2310 2312 self.ui.write_err(_("(%s)\n") % exc.hint)
2311 2313 return False
2312 2314 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2313 2315 ret = pushkey.push(self, namespace, key, old, new)
2314 2316 def runhook():
2315 2317 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2316 2318 ret=ret)
2317 2319 self._afterlock(runhook)
2318 2320 return ret
2319 2321
2320 2322 def listkeys(self, namespace):
2321 2323 self.hook('prelistkeys', throw=True, namespace=namespace)
2322 2324 self.ui.debug('listing keys for "%s"\n' % namespace)
2323 2325 values = pushkey.list(self, namespace)
2324 2326 self.hook('listkeys', namespace=namespace, values=values)
2325 2327 return values
2326 2328
2327 2329 def debugwireargs(self, one, two, three=None, four=None, five=None):
2328 2330 '''used to test argument passing over the wire'''
2329 2331 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2330 2332 pycompat.bytestr(four),
2331 2333 pycompat.bytestr(five))
2332 2334
2333 2335 def savecommitmessage(self, text):
2334 2336 fp = self.vfs('last-message.txt', 'wb')
2335 2337 try:
2336 2338 fp.write(text)
2337 2339 finally:
2338 2340 fp.close()
2339 2341 return self.pathto(fp.name[len(self.root) + 1:])
2340 2342
2341 2343 # used to avoid circular references so destructors work
2342 2344 def aftertrans(files):
2343 2345 renamefiles = [tuple(t) for t in files]
2344 2346 def a():
2345 2347 for vfs, src, dest in renamefiles:
2346 2348 # if src and dest refer to a same file, vfs.rename is a no-op,
2347 2349 # leaving both src and dest on disk. delete dest to make sure
2348 2350 # the rename couldn't be such a no-op.
2349 2351 vfs.tryunlink(dest)
2350 2352 try:
2351 2353 vfs.rename(src, dest)
2352 2354 except OSError: # journal file does not yet exist
2353 2355 pass
2354 2356 return a
2355 2357
2356 2358 def undoname(fn):
2357 2359 base, name = os.path.split(fn)
2358 2360 assert name.startswith('journal')
2359 2361 return os.path.join(base, name.replace('journal', 'undo', 1))
2360 2362
2361 2363 def instance(ui, path, create, intents=None):
2362 2364 return localrepository(ui, util.urllocalpath(path), create,
2363 2365 intents=intents)
2364 2366
2365 2367 def islocal(path):
2366 2368 return True
2367 2369
2368 2370 def newreporequirements(repo):
2369 2371 """Determine the set of requirements for a new local repository.
2370 2372
2371 2373 Extensions can wrap this function to specify custom requirements for
2372 2374 new repositories.
2373 2375 """
2374 2376 ui = repo.ui
2375 2377 requirements = {'revlogv1'}
2376 2378 if ui.configbool('format', 'usestore'):
2377 2379 requirements.add('store')
2378 2380 if ui.configbool('format', 'usefncache'):
2379 2381 requirements.add('fncache')
2380 2382 if ui.configbool('format', 'dotencode'):
2381 2383 requirements.add('dotencode')
2382 2384
2383 2385 compengine = ui.config('experimental', 'format.compression')
2384 2386 if compengine not in util.compengines:
2385 2387 raise error.Abort(_('compression engine %s defined by '
2386 2388 'experimental.format.compression not available') %
2387 2389 compengine,
2388 2390 hint=_('run "hg debuginstall" to list available '
2389 2391 'compression engines'))
2390 2392
2391 2393 # zlib is the historical default and doesn't need an explicit requirement.
2392 2394 if compengine != 'zlib':
2393 2395 requirements.add('exp-compression-%s' % compengine)
2394 2396
2395 2397 if scmutil.gdinitconfig(ui):
2396 2398 requirements.add('generaldelta')
2397 2399 if ui.configbool('experimental', 'treemanifest'):
2398 2400 requirements.add('treemanifest')
2399 2401 # experimental config: format.sparse-revlog
2400 2402 if ui.configbool('format', 'sparse-revlog'):
2401 2403 requirements.add(SPARSEREVLOG_REQUIREMENT)
2402 2404
2403 2405 revlogv2 = ui.config('experimental', 'revlogv2')
2404 2406 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2405 2407 requirements.remove('revlogv1')
2406 2408 # generaldelta is implied by revlogv2.
2407 2409 requirements.discard('generaldelta')
2408 2410 requirements.add(REVLOGV2_REQUIREMENT)
2409 2411
2410 2412 return requirements
@@ -1,151 +1,154
1 1 #require svn svn-bindings
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > convert =
6 6 > EOF
7 7
8 8 $ svnadmin create svn-repo
9 9 $ svnadmin load -q svn-repo < "$TESTDIR/svn/encoding.svndump"
10 10
11 11 Convert while testing all possible outputs
12 12
13 13 $ hg --debug convert svn-repo A-hg --config progress.debug=1
14 14 initializing destination A-hg repository
15 15 reparent to file:/*/$TESTTMP/svn-repo (glob)
16 16 run hg sink pre-conversion action
17 17 scanning source...
18 18 found trunk at 'trunk'
19 19 found tags at 'tags'
20 20 found branches at 'branches'
21 21 found branch branch\xc3\xa9 at 5 (esc)
22 22 found branch branch\xc3\xa9e at 6 (esc)
23 23 scanning: 1/4 revisions (25.00%)
24 24 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
25 25 fetching revision log for "/trunk" from 4 to 0
26 26 parsing revision 4 (2 changes)
27 27 parsing revision 3 (4 changes)
28 28 parsing revision 2 (3 changes)
29 29 parsing revision 1 (3 changes)
30 30 no copyfrom path, don't know what to do.
31 31 '/branches' is not under '/trunk', ignoring
32 32 '/tags' is not under '/trunk', ignoring
33 33 scanning: 2/4 revisions (50.00%)
34 34 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
35 35 fetching revision log for "/branches/branch\xc3\xa9" from 5 to 0 (esc)
36 36 parsing revision 5 (1 changes)
37 37 reparent to file:/*/$TESTTMP/svn-repo (glob)
38 38 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
39 39 found parent of branch /branches/branch\xc3\xa9 at 4: /trunk (esc)
40 40 scanning: 3/4 revisions (75.00%)
41 41 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
42 42 fetching revision log for "/branches/branch\xc3\xa9e" from 6 to 0 (esc)
43 43 parsing revision 6 (1 changes)
44 44 reparent to file:/*/$TESTTMP/svn-repo (glob)
45 45 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
46 46 found parent of branch /branches/branch\xc3\xa9e at 5: /branches/branch\xc3\xa9 (esc)
47 47 scanning: 4/4 revisions (100.00%)
48 48 scanning: 5/4 revisions (125.00%)
49 49 scanning: 6/4 revisions (150.00%)
50 50 sorting...
51 51 converting...
52 52 5 init projA
53 53 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
54 54 converting: 0/6 revisions (0.00%)
55 reusing manifest from p1 (no file change)
55 56 committing changelog
56 57 updating the branch cache
57 58 4 hello
58 59 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@2
59 60 converting: 1/6 revisions (16.67%)
60 61 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
61 62 scanning paths: /trunk/\xc3\xa0 0/3 paths (0.00%) (esc)
62 63 scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 paths (33.33%) (esc)
63 64 scanning paths: /trunk/\xc3\xa9 2/3 paths (66.67%) (esc)
64 65 committing files:
65 66 \xc3\xa0/e\xcc\x81 (esc)
66 67 getting files: \xc3\xa0/e\xcc\x81 1/2 files (50.00%) (esc)
67 68 \xc3\xa9 (esc)
68 69 getting files: \xc3\xa9 2/2 files (100.00%) (esc)
69 70 committing manifest
70 71 committing changelog
71 72 updating the branch cache
72 73 3 copy files
73 74 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@3
74 75 converting: 2/6 revisions (33.33%)
75 76 scanning paths: /trunk/\xc3\xa0 0/4 paths (0.00%) (esc)
76 77 gone from -1
77 78 reparent to file:/*/$TESTTMP/svn-repo (glob)
78 79 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
79 80 scanning paths: /trunk/\xc3\xa8 1/4 paths (25.00%) (esc)
80 81 copied to \xc3\xa8 from \xc3\xa9@2 (esc)
81 82 scanning paths: /trunk/\xc3\xa9 2/4 paths (50.00%) (esc)
82 83 gone from -1
83 84 reparent to file:/*/$TESTTMP/svn-repo (glob)
84 85 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
85 86 scanning paths: /trunk/\xc3\xb9 3/4 paths (75.00%) (esc)
86 87 mark /trunk/\xc3\xb9 came from \xc3\xa0:2 (esc)
87 88 getting files: \xc3\xa0/e\xcc\x81 1/4 files (25.00%) (esc)
88 89 getting files: \xc3\xa9 2/4 files (50.00%) (esc)
89 90 committing files:
90 91 \xc3\xa8 (esc)
91 92 getting files: \xc3\xa8 3/4 files (75.00%) (esc)
92 93 \xc3\xa8: copy \xc3\xa9:6b67ccefd5ce6de77e7ead4f5292843a0255329f (esc)
93 94 \xc3\xb9/e\xcc\x81 (esc)
94 95 getting files: \xc3\xb9/e\xcc\x81 4/4 files (100.00%) (esc)
95 96 \xc3\xb9/e\xcc\x81: copy \xc3\xa0/e\xcc\x81:a9092a3d84a37b9993b5c73576f6de29b7ea50f6 (esc)
96 97 committing manifest
97 98 committing changelog
98 99 updating the branch cache
99 100 2 remove files
100 101 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@4
101 102 converting: 3/6 revisions (50.00%)
102 103 scanning paths: /trunk/\xc3\xa8 0/2 paths (0.00%) (esc)
103 104 gone from -1
104 105 reparent to file:/*/$TESTTMP/svn-repo (glob)
105 106 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
106 107 scanning paths: /trunk/\xc3\xb9 1/2 paths (50.00%) (esc)
107 108 gone from -1
108 109 reparent to file:/*/$TESTTMP/svn-repo (glob)
109 110 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
110 111 getting files: \xc3\xa8 1/2 files (50.00%) (esc)
111 112 getting files: \xc3\xb9/e\xcc\x81 2/2 files (100.00%) (esc)
112 113 committing files:
113 114 committing manifest
114 115 committing changelog
115 116 updating the branch cache
116 117 1 branch to branch?
117 118 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?@5
118 119 converting: 4/6 revisions (66.67%)
119 120 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
120 121 scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
122 reusing manifest from p1 (no file change)
121 123 committing changelog
122 124 updating the branch cache
123 125 0 branch to branch?e
124 126 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?e@6
125 127 converting: 5/6 revisions (83.33%)
126 128 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
127 129 scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
130 reusing manifest from p1 (no file change)
128 131 committing changelog
129 132 updating the branch cache
130 133 reparent to file:/*/$TESTTMP/svn-repo (glob)
131 134 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
132 135 reparent to file:/*/$TESTTMP/svn-repo (glob)
133 136 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
134 137 updating tags
135 138 committing files:
136 139 .hgtags
137 140 committing manifest
138 141 committing changelog
139 142 updating the branch cache
140 143 run hg sink post-conversion action
141 144 $ cd A-hg
142 145 $ hg up
143 146 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 147
145 148 Check tags are in UTF-8
146 149
147 150 $ cat .hgtags
148 151 e94e4422020e715add80525e8f0f46c9968689f1 branch\xc3\xa9e (esc)
149 152 f7e66f98380ed1e53a797c5c7a7a2616a7ab377d branch\xc3\xa9 (esc)
150 153
151 154 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now