##// END OF EJS Templates
wireproto: implement command executor interface for version 1 peers...
Gregory Szorc -
r37648:e1b32dc4 default
parent child Browse files
Show More
@@ -1,2345 +1,2392 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 import sys
14 15 import time
15 16 import weakref
16 17
17 18 from .i18n import _
18 19 from .node import (
19 20 hex,
20 21 nullid,
21 22 short,
22 23 )
23 24 from .thirdparty.zope import (
24 25 interface as zi,
25 26 )
26 27 from . import (
27 28 bookmarks,
28 29 branchmap,
29 30 bundle2,
30 31 changegroup,
31 32 changelog,
32 33 color,
33 34 context,
34 35 dirstate,
35 36 dirstateguard,
36 37 discovery,
37 38 encoding,
38 39 error,
39 40 exchange,
40 41 extensions,
41 42 filelog,
42 43 hook,
43 44 lock as lockmod,
44 45 manifest,
45 46 match as matchmod,
46 47 merge as mergemod,
47 48 mergeutil,
48 49 namespaces,
49 50 narrowspec,
50 51 obsolete,
51 52 pathutil,
52 53 phases,
53 54 pushkey,
54 55 pycompat,
55 56 repository,
56 57 repoview,
57 58 revset,
58 59 revsetlang,
59 60 scmutil,
60 61 sparse,
61 62 store,
62 63 subrepoutil,
63 64 tags as tagsmod,
64 65 transaction,
65 66 txnutil,
66 67 util,
67 68 vfs as vfsmod,
68 69 wireprotov1peer,
69 70 )
70 71 from .utils import (
71 72 procutil,
72 73 stringutil,
73 74 )
74 75
75 76 release = lockmod.release
76 77 urlerr = util.urlerr
77 78 urlreq = util.urlreq
78 79
79 80 # set of (path, vfs-location) tuples. vfs-location is:
80 81 # - 'plain for vfs relative paths
81 82 # - '' for svfs relative paths
82 83 _cachedfiles = set()
83 84
84 85 class _basefilecache(scmutil.filecache):
85 86 """All filecache usage on repo are done for logic that should be unfiltered
86 87 """
87 88 def __get__(self, repo, type=None):
88 89 if repo is None:
89 90 return self
90 91 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 92 def __set__(self, repo, value):
92 93 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 94 def __delete__(self, repo):
94 95 return super(_basefilecache, self).__delete__(repo.unfiltered())
95 96
96 97 class repofilecache(_basefilecache):
97 98 """filecache for files in .hg but outside of .hg/store"""
98 99 def __init__(self, *paths):
99 100 super(repofilecache, self).__init__(*paths)
100 101 for path in paths:
101 102 _cachedfiles.add((path, 'plain'))
102 103
103 104 def join(self, obj, fname):
104 105 return obj.vfs.join(fname)
105 106
106 107 class storecache(_basefilecache):
107 108 """filecache for files in the store"""
108 109 def __init__(self, *paths):
109 110 super(storecache, self).__init__(*paths)
110 111 for path in paths:
111 112 _cachedfiles.add((path, ''))
112 113
113 114 def join(self, obj, fname):
114 115 return obj.sjoin(fname)
115 116
116 117 def isfilecached(repo, name):
117 118 """check if a repo has already cached "name" filecache-ed property
118 119
119 120 This returns (cachedobj-or-None, iscached) tuple.
120 121 """
121 122 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 123 if not cacheentry:
123 124 return None, False
124 125 return cacheentry.obj, True
125 126
126 127 class unfilteredpropertycache(util.propertycache):
127 128 """propertycache that apply to unfiltered repo only"""
128 129
129 130 def __get__(self, repo, type=None):
130 131 unfi = repo.unfiltered()
131 132 if unfi is repo:
132 133 return super(unfilteredpropertycache, self).__get__(unfi)
133 134 return getattr(unfi, self.name)
134 135
135 136 class filteredpropertycache(util.propertycache):
136 137 """propertycache that must take filtering in account"""
137 138
138 139 def cachevalue(self, obj, value):
139 140 object.__setattr__(obj, self.name, value)
140 141
141 142
142 143 def hasunfilteredcache(repo, name):
143 144 """check if a repo has an unfilteredpropertycache value for <name>"""
144 145 return name in vars(repo.unfiltered())
145 146
146 147 def unfilteredmethod(orig):
147 148 """decorate method that always need to be run on unfiltered version"""
148 149 def wrapper(repo, *args, **kwargs):
149 150 return orig(repo.unfiltered(), *args, **kwargs)
150 151 return wrapper
151 152
152 153 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 154 'unbundle'}
154 155 legacycaps = moderncaps.union({'changegroupsubset'})
155 156
156 157 class localiterbatcher(wireprotov1peer.iterbatcher):
157 158 def __init__(self, local):
158 159 super(localiterbatcher, self).__init__()
159 160 self.local = local
160 161
161 162 def submit(self):
162 163 # submit for a local iter batcher is a noop
163 164 pass
164 165
165 166 def results(self):
166 167 for name, args, opts, resref in self.calls:
167 168 resref.set(getattr(self.local, name)(*args, **opts))
168 169 yield resref.value
169 170
171 @zi.implementer(repository.ipeercommandexecutor)
172 class localcommandexecutor(object):
173 def __init__(self, peer):
174 self._peer = peer
175 self._sent = False
176 self._closed = False
177
178 def __enter__(self):
179 return self
180
181 def __exit__(self, exctype, excvalue, exctb):
182 self.close()
183
184 def callcommand(self, command, args):
185 if self._sent:
186 raise error.ProgrammingError('callcommand() cannot be used after '
187 'sendcommands()')
188
189 if self._closed:
190 raise error.ProgrammingError('callcommand() cannot be used after '
191 'close()')
192
193 # We don't need to support anything fancy. Just call the named
194 # method on the peer and return a resolved future.
195 fn = getattr(self._peer, pycompat.sysstr(command))
196
197 f = pycompat.futures.Future()
198
199 try:
200 result = fn(**args)
201 except Exception:
202 f.set_exception_info(*sys.exc_info()[1:])
203 else:
204 f.set_result(result)
205
206 return f
207
208 def sendcommands(self):
209 self._sent = True
210
211 def close(self):
212 self._closed = True
213
170 214 class localpeer(repository.peer):
171 215 '''peer for a local repo; reflects only the most recent API'''
172 216
173 217 def __init__(self, repo, caps=None):
174 218 super(localpeer, self).__init__()
175 219
176 220 if caps is None:
177 221 caps = moderncaps.copy()
178 222 self._repo = repo.filtered('served')
179 223 self.ui = repo.ui
180 224 self._caps = repo._restrictcapabilities(caps)
181 225
182 226 # Begin of _basepeer interface.
183 227
184 228 def url(self):
185 229 return self._repo.url()
186 230
187 231 def local(self):
188 232 return self._repo
189 233
190 234 def peer(self):
191 235 return self
192 236
193 237 def canpush(self):
194 238 return True
195 239
196 240 def close(self):
197 241 self._repo.close()
198 242
199 243 # End of _basepeer interface.
200 244
201 245 # Begin of _basewirecommands interface.
202 246
203 247 def branchmap(self):
204 248 return self._repo.branchmap()
205 249
206 250 def capabilities(self):
207 251 return self._caps
208 252
209 253 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 254 """Used to test argument passing over the wire"""
211 255 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
212 256 pycompat.bytestr(four),
213 257 pycompat.bytestr(five))
214 258
215 259 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
216 260 **kwargs):
217 261 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
218 262 common=common, bundlecaps=bundlecaps,
219 263 **kwargs)[1]
220 264 cb = util.chunkbuffer(chunks)
221 265
222 266 if exchange.bundle2requested(bundlecaps):
223 267 # When requesting a bundle2, getbundle returns a stream to make the
224 268 # wire level function happier. We need to build a proper object
225 269 # from it in local peer.
226 270 return bundle2.getunbundler(self.ui, cb)
227 271 else:
228 272 return changegroup.getunbundler('01', cb, None)
229 273
230 274 def heads(self):
231 275 return self._repo.heads()
232 276
233 277 def known(self, nodes):
234 278 return self._repo.known(nodes)
235 279
236 280 def listkeys(self, namespace):
237 281 return self._repo.listkeys(namespace)
238 282
239 283 def lookup(self, key):
240 284 return self._repo.lookup(key)
241 285
242 286 def pushkey(self, namespace, key, old, new):
243 287 return self._repo.pushkey(namespace, key, old, new)
244 288
245 289 def stream_out(self):
246 290 raise error.Abort(_('cannot perform stream clone against local '
247 291 'peer'))
248 292
249 293 def unbundle(self, cg, heads, url):
250 294 """apply a bundle on a repo
251 295
252 296 This function handles the repo locking itself."""
253 297 try:
254 298 try:
255 299 cg = exchange.readbundle(self.ui, cg, None)
256 300 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
257 301 if util.safehasattr(ret, 'getchunks'):
258 302 # This is a bundle20 object, turn it into an unbundler.
259 303 # This little dance should be dropped eventually when the
260 304 # API is finally improved.
261 305 stream = util.chunkbuffer(ret.getchunks())
262 306 ret = bundle2.getunbundler(self.ui, stream)
263 307 return ret
264 308 except Exception as exc:
265 309 # If the exception contains output salvaged from a bundle2
266 310 # reply, we need to make sure it is printed before continuing
267 311 # to fail. So we build a bundle2 with such output and consume
268 312 # it directly.
269 313 #
270 314 # This is not very elegant but allows a "simple" solution for
271 315 # issue4594
272 316 output = getattr(exc, '_bundle2salvagedoutput', ())
273 317 if output:
274 318 bundler = bundle2.bundle20(self._repo.ui)
275 319 for out in output:
276 320 bundler.addpart(out)
277 321 stream = util.chunkbuffer(bundler.getchunks())
278 322 b = bundle2.getunbundler(self.ui, stream)
279 323 bundle2.processbundle(self._repo, b)
280 324 raise
281 325 except error.PushRaced as exc:
282 326 raise error.ResponseError(_('push failed:'),
283 327 stringutil.forcebytestr(exc))
284 328
285 329 # End of _basewirecommands interface.
286 330
287 331 # Begin of peer interface.
288 332
333 def commandexecutor(self):
334 return localcommandexecutor(self)
335
289 336 def iterbatch(self):
290 337 return localiterbatcher(self)
291 338
292 339 # End of peer interface.
293 340
294 341 class locallegacypeer(repository.legacypeer, localpeer):
295 342 '''peer extension which implements legacy methods too; used for tests with
296 343 restricted capabilities'''
297 344
298 345 def __init__(self, repo):
299 346 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
300 347
301 348 # Begin of baselegacywirecommands interface.
302 349
303 350 def between(self, pairs):
304 351 return self._repo.between(pairs)
305 352
306 353 def branches(self, nodes):
307 354 return self._repo.branches(nodes)
308 355
309 356 def changegroup(self, basenodes, source):
310 357 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
311 358 missingheads=self._repo.heads())
312 359 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
313 360
314 361 def changegroupsubset(self, bases, heads, source):
315 362 outgoing = discovery.outgoing(self._repo, missingroots=bases,
316 363 missingheads=heads)
317 364 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
318 365
319 366 # End of baselegacywirecommands interface.
320 367
321 368 # Increment the sub-version when the revlog v2 format changes to lock out old
322 369 # clients.
323 370 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
324 371
325 372 # Functions receiving (ui, features) that extensions can register to impact
326 373 # the ability to load repositories with custom requirements. Only
327 374 # functions defined in loaded extensions are called.
328 375 #
329 376 # The function receives a set of requirement strings that the repository
330 377 # is capable of opening. Functions will typically add elements to the
331 378 # set to reflect that the extension knows how to handle that requirements.
332 379 featuresetupfuncs = set()
333 380
334 381 @zi.implementer(repository.completelocalrepository)
335 382 class localrepository(object):
336 383
337 384 # obsolete experimental requirements:
338 385 # - manifestv2: An experimental new manifest format that allowed
339 386 # for stem compression of long paths. Experiment ended up not
340 387 # being successful (repository sizes went up due to worse delta
341 388 # chains), and the code was deleted in 4.6.
342 389 supportedformats = {
343 390 'revlogv1',
344 391 'generaldelta',
345 392 'treemanifest',
346 393 REVLOGV2_REQUIREMENT,
347 394 }
348 395 _basesupported = supportedformats | {
349 396 'store',
350 397 'fncache',
351 398 'shared',
352 399 'relshared',
353 400 'dotencode',
354 401 'exp-sparse',
355 402 }
356 403 openerreqs = {
357 404 'revlogv1',
358 405 'generaldelta',
359 406 'treemanifest',
360 407 }
361 408
362 409 # list of prefix for file which can be written without 'wlock'
363 410 # Extensions should extend this list when needed
364 411 _wlockfreeprefix = {
365 412 # We migh consider requiring 'wlock' for the next
366 413 # two, but pretty much all the existing code assume
367 414 # wlock is not needed so we keep them excluded for
368 415 # now.
369 416 'hgrc',
370 417 'requires',
371 418 # XXX cache is a complicatged business someone
372 419 # should investigate this in depth at some point
373 420 'cache/',
374 421 # XXX shouldn't be dirstate covered by the wlock?
375 422 'dirstate',
376 423 # XXX bisect was still a bit too messy at the time
377 424 # this changeset was introduced. Someone should fix
378 425 # the remainig bit and drop this line
379 426 'bisect.state',
380 427 }
381 428
382 429 def __init__(self, baseui, path, create=False):
383 430 self.requirements = set()
384 431 self.filtername = None
385 432 # wvfs: rooted at the repository root, used to access the working copy
386 433 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
387 434 # vfs: rooted at .hg, used to access repo files outside of .hg/store
388 435 self.vfs = None
389 436 # svfs: usually rooted at .hg/store, used to access repository history
390 437 # If this is a shared repository, this vfs may point to another
391 438 # repository's .hg/store directory.
392 439 self.svfs = None
393 440 self.root = self.wvfs.base
394 441 self.path = self.wvfs.join(".hg")
395 442 self.origroot = path
396 443 # This is only used by context.workingctx.match in order to
397 444 # detect files in subrepos.
398 445 self.auditor = pathutil.pathauditor(
399 446 self.root, callback=self._checknested)
400 447 # This is only used by context.basectx.match in order to detect
401 448 # files in subrepos.
402 449 self.nofsauditor = pathutil.pathauditor(
403 450 self.root, callback=self._checknested, realfs=False, cached=True)
404 451 self.baseui = baseui
405 452 self.ui = baseui.copy()
406 453 self.ui.copy = baseui.copy # prevent copying repo configuration
407 454 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
408 455 if (self.ui.configbool('devel', 'all-warnings') or
409 456 self.ui.configbool('devel', 'check-locks')):
410 457 self.vfs.audit = self._getvfsward(self.vfs.audit)
411 458 # A list of callback to shape the phase if no data were found.
412 459 # Callback are in the form: func(repo, roots) --> processed root.
413 460 # This list it to be filled by extension during repo setup
414 461 self._phasedefaults = []
415 462 try:
416 463 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
417 464 self._loadextensions()
418 465 except IOError:
419 466 pass
420 467
421 468 if featuresetupfuncs:
422 469 self.supported = set(self._basesupported) # use private copy
423 470 extmods = set(m.__name__ for n, m
424 471 in extensions.extensions(self.ui))
425 472 for setupfunc in featuresetupfuncs:
426 473 if setupfunc.__module__ in extmods:
427 474 setupfunc(self.ui, self.supported)
428 475 else:
429 476 self.supported = self._basesupported
430 477 color.setup(self.ui)
431 478
432 479 # Add compression engines.
433 480 for name in util.compengines:
434 481 engine = util.compengines[name]
435 482 if engine.revlogheader():
436 483 self.supported.add('exp-compression-%s' % name)
437 484
438 485 if not self.vfs.isdir():
439 486 if create:
440 487 self.requirements = newreporequirements(self)
441 488
442 489 if not self.wvfs.exists():
443 490 self.wvfs.makedirs()
444 491 self.vfs.makedir(notindexed=True)
445 492
446 493 if 'store' in self.requirements:
447 494 self.vfs.mkdir("store")
448 495
449 496 # create an invalid changelog
450 497 self.vfs.append(
451 498 "00changelog.i",
452 499 '\0\0\0\2' # represents revlogv2
453 500 ' dummy changelog to prevent using the old repo layout'
454 501 )
455 502 else:
456 503 raise error.RepoError(_("repository %s not found") % path)
457 504 elif create:
458 505 raise error.RepoError(_("repository %s already exists") % path)
459 506 else:
460 507 try:
461 508 self.requirements = scmutil.readrequires(
462 509 self.vfs, self.supported)
463 510 except IOError as inst:
464 511 if inst.errno != errno.ENOENT:
465 512 raise
466 513
467 514 cachepath = self.vfs.join('cache')
468 515 self.sharedpath = self.path
469 516 try:
470 517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
471 518 if 'relshared' in self.requirements:
472 519 sharedpath = self.vfs.join(sharedpath)
473 520 vfs = vfsmod.vfs(sharedpath, realpath=True)
474 521 cachepath = vfs.join('cache')
475 522 s = vfs.base
476 523 if not vfs.exists():
477 524 raise error.RepoError(
478 525 _('.hg/sharedpath points to nonexistent directory %s') % s)
479 526 self.sharedpath = s
480 527 except IOError as inst:
481 528 if inst.errno != errno.ENOENT:
482 529 raise
483 530
484 531 if 'exp-sparse' in self.requirements and not sparse.enabled:
485 532 raise error.RepoError(_('repository is using sparse feature but '
486 533 'sparse is not enabled; enable the '
487 534 '"sparse" extensions to access'))
488 535
489 536 self.store = store.store(
490 537 self.requirements, self.sharedpath,
491 538 lambda base: vfsmod.vfs(base, cacheaudited=True))
492 539 self.spath = self.store.path
493 540 self.svfs = self.store.vfs
494 541 self.sjoin = self.store.join
495 542 self.vfs.createmode = self.store.createmode
496 543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
497 544 self.cachevfs.createmode = self.store.createmode
498 545 if (self.ui.configbool('devel', 'all-warnings') or
499 546 self.ui.configbool('devel', 'check-locks')):
500 547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
501 548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
502 549 else: # standard vfs
503 550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
504 551 self._applyopenerreqs()
505 552 if create:
506 553 self._writerequirements()
507 554
508 555 self._dirstatevalidatewarned = False
509 556
510 557 self._branchcaches = {}
511 558 self._revbranchcache = None
512 559 self._filterpats = {}
513 560 self._datafilters = {}
514 561 self._transref = self._lockref = self._wlockref = None
515 562
516 563 # A cache for various files under .hg/ that tracks file changes,
517 564 # (used by the filecache decorator)
518 565 #
519 566 # Maps a property name to its util.filecacheentry
520 567 self._filecache = {}
521 568
522 569 # hold sets of revision to be filtered
523 570 # should be cleared when something might have changed the filter value:
524 571 # - new changesets,
525 572 # - phase change,
526 573 # - new obsolescence marker,
527 574 # - working directory parent change,
528 575 # - bookmark changes
529 576 self.filteredrevcache = {}
530 577
531 578 # post-dirstate-status hooks
532 579 self._postdsstatus = []
533 580
534 581 # generic mapping between names and nodes
535 582 self.names = namespaces.namespaces()
536 583
537 584 # Key to signature value.
538 585 self._sparsesignaturecache = {}
539 586 # Signature to cached matcher instance.
540 587 self._sparsematchercache = {}
541 588
542 589 def _getvfsward(self, origfunc):
543 590 """build a ward for self.vfs"""
544 591 rref = weakref.ref(self)
545 592 def checkvfs(path, mode=None):
546 593 ret = origfunc(path, mode=mode)
547 594 repo = rref()
548 595 if (repo is None
549 596 or not util.safehasattr(repo, '_wlockref')
550 597 or not util.safehasattr(repo, '_lockref')):
551 598 return
552 599 if mode in (None, 'r', 'rb'):
553 600 return
554 601 if path.startswith(repo.path):
555 602 # truncate name relative to the repository (.hg)
556 603 path = path[len(repo.path) + 1:]
557 604 if path.startswith('cache/'):
558 605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
559 606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
560 607 if path.startswith('journal.'):
561 608 # journal is covered by 'lock'
562 609 if repo._currentlock(repo._lockref) is None:
563 610 repo.ui.develwarn('write with no lock: "%s"' % path,
564 611 stacklevel=2, config='check-locks')
565 612 elif repo._currentlock(repo._wlockref) is None:
566 613 # rest of vfs files are covered by 'wlock'
567 614 #
568 615 # exclude special files
569 616 for prefix in self._wlockfreeprefix:
570 617 if path.startswith(prefix):
571 618 return
572 619 repo.ui.develwarn('write with no wlock: "%s"' % path,
573 620 stacklevel=2, config='check-locks')
574 621 return ret
575 622 return checkvfs
576 623
577 624 def _getsvfsward(self, origfunc):
578 625 """build a ward for self.svfs"""
579 626 rref = weakref.ref(self)
580 627 def checksvfs(path, mode=None):
581 628 ret = origfunc(path, mode=mode)
582 629 repo = rref()
583 630 if repo is None or not util.safehasattr(repo, '_lockref'):
584 631 return
585 632 if mode in (None, 'r', 'rb'):
586 633 return
587 634 if path.startswith(repo.sharedpath):
588 635 # truncate name relative to the repository (.hg)
589 636 path = path[len(repo.sharedpath) + 1:]
590 637 if repo._currentlock(repo._lockref) is None:
591 638 repo.ui.develwarn('write with no lock: "%s"' % path,
592 639 stacklevel=3)
593 640 return ret
594 641 return checksvfs
595 642
596 643 def close(self):
597 644 self._writecaches()
598 645
599 646 def _loadextensions(self):
600 647 extensions.loadall(self.ui)
601 648
602 649 def _writecaches(self):
603 650 if self._revbranchcache:
604 651 self._revbranchcache.write()
605 652
606 653 def _restrictcapabilities(self, caps):
607 654 if self.ui.configbool('experimental', 'bundle2-advertise'):
608 655 caps = set(caps)
609 656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
610 657 role='client'))
611 658 caps.add('bundle2=' + urlreq.quote(capsblob))
612 659 return caps
613 660
614 661 def _applyopenerreqs(self):
615 662 self.svfs.options = dict((r, 1) for r in self.requirements
616 663 if r in self.openerreqs)
617 664 # experimental config: format.chunkcachesize
618 665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
619 666 if chunkcachesize is not None:
620 667 self.svfs.options['chunkcachesize'] = chunkcachesize
621 668 # experimental config: format.maxchainlen
622 669 maxchainlen = self.ui.configint('format', 'maxchainlen')
623 670 if maxchainlen is not None:
624 671 self.svfs.options['maxchainlen'] = maxchainlen
625 672 # experimental config: format.manifestcachesize
626 673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
627 674 if manifestcachesize is not None:
628 675 self.svfs.options['manifestcachesize'] = manifestcachesize
629 676 # experimental config: format.aggressivemergedeltas
630 677 aggressivemergedeltas = self.ui.configbool('format',
631 678 'aggressivemergedeltas')
632 679 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
633 680 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
634 681 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
635 682 if 0 <= chainspan:
636 683 self.svfs.options['maxdeltachainspan'] = chainspan
637 684 mmapindexthreshold = self.ui.configbytes('experimental',
638 685 'mmapindexthreshold')
639 686 if mmapindexthreshold is not None:
640 687 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
641 688 withsparseread = self.ui.configbool('experimental', 'sparse-read')
642 689 srdensitythres = float(self.ui.config('experimental',
643 690 'sparse-read.density-threshold'))
644 691 srmingapsize = self.ui.configbytes('experimental',
645 692 'sparse-read.min-gap-size')
646 693 self.svfs.options['with-sparse-read'] = withsparseread
647 694 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
648 695 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
649 696
650 697 for r in self.requirements:
651 698 if r.startswith('exp-compression-'):
652 699 self.svfs.options['compengine'] = r[len('exp-compression-'):]
653 700
654 701 # TODO move "revlogv2" to openerreqs once finalized.
655 702 if REVLOGV2_REQUIREMENT in self.requirements:
656 703 self.svfs.options['revlogv2'] = True
657 704
658 705 def _writerequirements(self):
659 706 scmutil.writerequires(self.vfs, self.requirements)
660 707
661 708 def _checknested(self, path):
662 709 """Determine if path is a legal nested repository."""
663 710 if not path.startswith(self.root):
664 711 return False
665 712 subpath = path[len(self.root) + 1:]
666 713 normsubpath = util.pconvert(subpath)
667 714
668 715 # XXX: Checking against the current working copy is wrong in
669 716 # the sense that it can reject things like
670 717 #
671 718 # $ hg cat -r 10 sub/x.txt
672 719 #
673 720 # if sub/ is no longer a subrepository in the working copy
674 721 # parent revision.
675 722 #
676 723 # However, it can of course also allow things that would have
677 724 # been rejected before, such as the above cat command if sub/
678 725 # is a subrepository now, but was a normal directory before.
679 726 # The old path auditor would have rejected by mistake since it
680 727 # panics when it sees sub/.hg/.
681 728 #
682 729 # All in all, checking against the working copy seems sensible
683 730 # since we want to prevent access to nested repositories on
684 731 # the filesystem *now*.
685 732 ctx = self[None]
686 733 parts = util.splitpath(subpath)
687 734 while parts:
688 735 prefix = '/'.join(parts)
689 736 if prefix in ctx.substate:
690 737 if prefix == normsubpath:
691 738 return True
692 739 else:
693 740 sub = ctx.sub(prefix)
694 741 return sub.checknested(subpath[len(prefix) + 1:])
695 742 else:
696 743 parts.pop()
697 744 return False
698 745
699 746 def peer(self):
700 747 return localpeer(self) # not cached to avoid reference cycle
701 748
702 749 def unfiltered(self):
703 750 """Return unfiltered version of the repository
704 751
705 752 Intended to be overwritten by filtered repo."""
706 753 return self
707 754
708 755 def filtered(self, name, visibilityexceptions=None):
709 756 """Return a filtered version of a repository"""
710 757 cls = repoview.newtype(self.unfiltered().__class__)
711 758 return cls(self, name, visibilityexceptions)
712 759
713 760 @repofilecache('bookmarks', 'bookmarks.current')
714 761 def _bookmarks(self):
715 762 return bookmarks.bmstore(self)
716 763
717 764 @property
718 765 def _activebookmark(self):
719 766 return self._bookmarks.active
720 767
721 768 # _phasesets depend on changelog. what we need is to call
722 769 # _phasecache.invalidate() if '00changelog.i' was changed, but it
723 770 # can't be easily expressed in filecache mechanism.
724 771 @storecache('phaseroots', '00changelog.i')
725 772 def _phasecache(self):
726 773 return phases.phasecache(self, self._phasedefaults)
727 774
728 775 @storecache('obsstore')
729 776 def obsstore(self):
730 777 return obsolete.makestore(self.ui, self)
731 778
732 779 @storecache('00changelog.i')
733 780 def changelog(self):
734 781 return changelog.changelog(self.svfs,
735 782 trypending=txnutil.mayhavepending(self.root))
736 783
737 784 def _constructmanifest(self):
738 785 # This is a temporary function while we migrate from manifest to
739 786 # manifestlog. It allows bundlerepo and unionrepo to intercept the
740 787 # manifest creation.
741 788 return manifest.manifestrevlog(self.svfs)
742 789
743 790 @storecache('00manifest.i')
744 791 def manifestlog(self):
745 792 return manifest.manifestlog(self.svfs, self)
746 793
747 794 @repofilecache('dirstate')
748 795 def dirstate(self):
749 796 sparsematchfn = lambda: sparse.matcher(self)
750 797
751 798 return dirstate.dirstate(self.vfs, self.ui, self.root,
752 799 self._dirstatevalidate, sparsematchfn)
753 800
754 801 def _dirstatevalidate(self, node):
755 802 try:
756 803 self.changelog.rev(node)
757 804 return node
758 805 except error.LookupError:
759 806 if not self._dirstatevalidatewarned:
760 807 self._dirstatevalidatewarned = True
761 808 self.ui.warn(_("warning: ignoring unknown"
762 809 " working parent %s!\n") % short(node))
763 810 return nullid
764 811
765 812 @repofilecache(narrowspec.FILENAME)
766 813 def narrowpats(self):
767 814 """matcher patterns for this repository's narrowspec
768 815
769 816 A tuple of (includes, excludes).
770 817 """
771 818 source = self
772 819 if self.shared():
773 820 from . import hg
774 821 source = hg.sharedreposource(self)
775 822 return narrowspec.load(source)
776 823
777 824 @repofilecache(narrowspec.FILENAME)
778 825 def _narrowmatch(self):
779 826 if changegroup.NARROW_REQUIREMENT not in self.requirements:
780 827 return matchmod.always(self.root, '')
781 828 include, exclude = self.narrowpats
782 829 return narrowspec.match(self.root, include=include, exclude=exclude)
783 830
784 831 # TODO(martinvonz): make this property-like instead?
785 832 def narrowmatch(self):
786 833 return self._narrowmatch
787 834
788 835 def setnarrowpats(self, newincludes, newexcludes):
789 836 target = self
790 837 if self.shared():
791 838 from . import hg
792 839 target = hg.sharedreposource(self)
793 840 narrowspec.save(target, newincludes, newexcludes)
794 841 self.invalidate(clearfilecache=True)
795 842
796 843 def __getitem__(self, changeid):
797 844 if changeid is None:
798 845 return context.workingctx(self)
799 846 if isinstance(changeid, context.basectx):
800 847 return changeid
801 848 if isinstance(changeid, slice):
802 849 # wdirrev isn't contiguous so the slice shouldn't include it
803 850 return [context.changectx(self, i)
804 851 for i in xrange(*changeid.indices(len(self)))
805 852 if i not in self.changelog.filteredrevs]
806 853 try:
807 854 return context.changectx(self, changeid)
808 855 except error.WdirUnsupported:
809 856 return context.workingctx(self)
810 857
811 858 def __contains__(self, changeid):
812 859 """True if the given changeid exists
813 860
814 861 error.LookupError is raised if an ambiguous node specified.
815 862 """
816 863 try:
817 864 self[changeid]
818 865 return True
819 866 except (error.RepoLookupError, error.FilteredIndexError,
820 867 error.FilteredLookupError):
821 868 return False
822 869
823 870 def __nonzero__(self):
824 871 return True
825 872
826 873 __bool__ = __nonzero__
827 874
828 875 def __len__(self):
829 876 # no need to pay the cost of repoview.changelog
830 877 unfi = self.unfiltered()
831 878 return len(unfi.changelog)
832 879
833 880 def __iter__(self):
834 881 return iter(self.changelog)
835 882
836 883 def revs(self, expr, *args):
837 884 '''Find revisions matching a revset.
838 885
839 886 The revset is specified as a string ``expr`` that may contain
840 887 %-formatting to escape certain types. See ``revsetlang.formatspec``.
841 888
842 889 Revset aliases from the configuration are not expanded. To expand
843 890 user aliases, consider calling ``scmutil.revrange()`` or
844 891 ``repo.anyrevs([expr], user=True)``.
845 892
846 893 Returns a revset.abstractsmartset, which is a list-like interface
847 894 that contains integer revisions.
848 895 '''
849 896 expr = revsetlang.formatspec(expr, *args)
850 897 m = revset.match(None, expr)
851 898 return m(self)
852 899
853 900 def set(self, expr, *args):
854 901 '''Find revisions matching a revset and emit changectx instances.
855 902
856 903 This is a convenience wrapper around ``revs()`` that iterates the
857 904 result and is a generator of changectx instances.
858 905
859 906 Revset aliases from the configuration are not expanded. To expand
860 907 user aliases, consider calling ``scmutil.revrange()``.
861 908 '''
862 909 for r in self.revs(expr, *args):
863 910 yield self[r]
864 911
865 912 def anyrevs(self, specs, user=False, localalias=None):
866 913 '''Find revisions matching one of the given revsets.
867 914
868 915 Revset aliases from the configuration are not expanded by default. To
869 916 expand user aliases, specify ``user=True``. To provide some local
870 917 definitions overriding user aliases, set ``localalias`` to
871 918 ``{name: definitionstring}``.
872 919 '''
873 920 if user:
874 921 m = revset.matchany(self.ui, specs, repo=self,
875 922 localalias=localalias)
876 923 else:
877 924 m = revset.matchany(None, specs, localalias=localalias)
878 925 return m(self)
879 926
880 927 def url(self):
881 928 return 'file:' + self.root
882 929
883 930 def hook(self, name, throw=False, **args):
884 931 """Call a hook, passing this repo instance.
885 932
886 933 This a convenience method to aid invoking hooks. Extensions likely
887 934 won't call this unless they have registered a custom hook or are
888 935 replacing code that is expected to call a hook.
889 936 """
890 937 return hook.hook(self.ui, self, name, throw, **args)
891 938
892 939 @filteredpropertycache
893 940 def _tagscache(self):
894 941 '''Returns a tagscache object that contains various tags related
895 942 caches.'''
896 943
897 944 # This simplifies its cache management by having one decorated
898 945 # function (this one) and the rest simply fetch things from it.
899 946 class tagscache(object):
900 947 def __init__(self):
901 948 # These two define the set of tags for this repository. tags
902 949 # maps tag name to node; tagtypes maps tag name to 'global' or
903 950 # 'local'. (Global tags are defined by .hgtags across all
904 951 # heads, and local tags are defined in .hg/localtags.)
905 952 # They constitute the in-memory cache of tags.
906 953 self.tags = self.tagtypes = None
907 954
908 955 self.nodetagscache = self.tagslist = None
909 956
910 957 cache = tagscache()
911 958 cache.tags, cache.tagtypes = self._findtags()
912 959
913 960 return cache
914 961
915 962 def tags(self):
916 963 '''return a mapping of tag to node'''
917 964 t = {}
918 965 if self.changelog.filteredrevs:
919 966 tags, tt = self._findtags()
920 967 else:
921 968 tags = self._tagscache.tags
922 969 for k, v in tags.iteritems():
923 970 try:
924 971 # ignore tags to unknown nodes
925 972 self.changelog.rev(v)
926 973 t[k] = v
927 974 except (error.LookupError, ValueError):
928 975 pass
929 976 return t
930 977
931 978 def _findtags(self):
932 979 '''Do the hard work of finding tags. Return a pair of dicts
933 980 (tags, tagtypes) where tags maps tag name to node, and tagtypes
934 981 maps tag name to a string like \'global\' or \'local\'.
935 982 Subclasses or extensions are free to add their own tags, but
936 983 should be aware that the returned dicts will be retained for the
937 984 duration of the localrepo object.'''
938 985
939 986 # XXX what tagtype should subclasses/extensions use? Currently
940 987 # mq and bookmarks add tags, but do not set the tagtype at all.
941 988 # Should each extension invent its own tag type? Should there
942 989 # be one tagtype for all such "virtual" tags? Or is the status
943 990 # quo fine?
944 991
945 992
946 993 # map tag name to (node, hist)
947 994 alltags = tagsmod.findglobaltags(self.ui, self)
948 995 # map tag name to tag type
949 996 tagtypes = dict((tag, 'global') for tag in alltags)
950 997
951 998 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
952 999
953 1000 # Build the return dicts. Have to re-encode tag names because
954 1001 # the tags module always uses UTF-8 (in order not to lose info
955 1002 # writing to the cache), but the rest of Mercurial wants them in
956 1003 # local encoding.
957 1004 tags = {}
958 1005 for (name, (node, hist)) in alltags.iteritems():
959 1006 if node != nullid:
960 1007 tags[encoding.tolocal(name)] = node
961 1008 tags['tip'] = self.changelog.tip()
962 1009 tagtypes = dict([(encoding.tolocal(name), value)
963 1010 for (name, value) in tagtypes.iteritems()])
964 1011 return (tags, tagtypes)
965 1012
966 1013 def tagtype(self, tagname):
967 1014 '''
968 1015 return the type of the given tag. result can be:
969 1016
970 1017 'local' : a local tag
971 1018 'global' : a global tag
972 1019 None : tag does not exist
973 1020 '''
974 1021
975 1022 return self._tagscache.tagtypes.get(tagname)
976 1023
977 1024 def tagslist(self):
978 1025 '''return a list of tags ordered by revision'''
979 1026 if not self._tagscache.tagslist:
980 1027 l = []
981 1028 for t, n in self.tags().iteritems():
982 1029 l.append((self.changelog.rev(n), t, n))
983 1030 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
984 1031
985 1032 return self._tagscache.tagslist
986 1033
987 1034 def nodetags(self, node):
988 1035 '''return the tags associated with a node'''
989 1036 if not self._tagscache.nodetagscache:
990 1037 nodetagscache = {}
991 1038 for t, n in self._tagscache.tags.iteritems():
992 1039 nodetagscache.setdefault(n, []).append(t)
993 1040 for tags in nodetagscache.itervalues():
994 1041 tags.sort()
995 1042 self._tagscache.nodetagscache = nodetagscache
996 1043 return self._tagscache.nodetagscache.get(node, [])
997 1044
998 1045 def nodebookmarks(self, node):
999 1046 """return the list of bookmarks pointing to the specified node"""
1000 1047 marks = []
1001 1048 for bookmark, n in self._bookmarks.iteritems():
1002 1049 if n == node:
1003 1050 marks.append(bookmark)
1004 1051 return sorted(marks)
1005 1052
1006 1053 def branchmap(self):
1007 1054 '''returns a dictionary {branch: [branchheads]} with branchheads
1008 1055 ordered by increasing revision number'''
1009 1056 branchmap.updatecache(self)
1010 1057 return self._branchcaches[self.filtername]
1011 1058
1012 1059 @unfilteredmethod
1013 1060 def revbranchcache(self):
1014 1061 if not self._revbranchcache:
1015 1062 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1016 1063 return self._revbranchcache
1017 1064
1018 1065 def branchtip(self, branch, ignoremissing=False):
1019 1066 '''return the tip node for a given branch
1020 1067
1021 1068 If ignoremissing is True, then this method will not raise an error.
1022 1069 This is helpful for callers that only expect None for a missing branch
1023 1070 (e.g. namespace).
1024 1071
1025 1072 '''
1026 1073 try:
1027 1074 return self.branchmap().branchtip(branch)
1028 1075 except KeyError:
1029 1076 if not ignoremissing:
1030 1077 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1031 1078 else:
1032 1079 pass
1033 1080
1034 1081 def lookup(self, key):
1035 1082 return scmutil.revsymbol(self, key).node()
1036 1083
1037 1084 def lookupbranch(self, key):
1038 1085 if key in self.branchmap():
1039 1086 return key
1040 1087
1041 1088 return scmutil.revsymbol(self, key).branch()
1042 1089
1043 1090 def known(self, nodes):
1044 1091 cl = self.changelog
1045 1092 nm = cl.nodemap
1046 1093 filtered = cl.filteredrevs
1047 1094 result = []
1048 1095 for n in nodes:
1049 1096 r = nm.get(n)
1050 1097 resp = not (r is None or r in filtered)
1051 1098 result.append(resp)
1052 1099 return result
1053 1100
1054 1101 def local(self):
1055 1102 return self
1056 1103
1057 1104 def publishing(self):
1058 1105 # it's safe (and desirable) to trust the publish flag unconditionally
1059 1106 # so that we don't finalize changes shared between users via ssh or nfs
1060 1107 return self.ui.configbool('phases', 'publish', untrusted=True)
1061 1108
1062 1109 def cancopy(self):
1063 1110 # so statichttprepo's override of local() works
1064 1111 if not self.local():
1065 1112 return False
1066 1113 if not self.publishing():
1067 1114 return True
1068 1115 # if publishing we can't copy if there is filtered content
1069 1116 return not self.filtered('visible').changelog.filteredrevs
1070 1117
1071 1118 def shared(self):
1072 1119 '''the type of shared repository (None if not shared)'''
1073 1120 if self.sharedpath != self.path:
1074 1121 return 'store'
1075 1122 return None
1076 1123
1077 1124 def wjoin(self, f, *insidef):
1078 1125 return self.vfs.reljoin(self.root, f, *insidef)
1079 1126
1080 1127 def file(self, f):
1081 1128 if f[0] == '/':
1082 1129 f = f[1:]
1083 1130 return filelog.filelog(self.svfs, f)
1084 1131
1085 1132 def setparents(self, p1, p2=nullid):
1086 1133 with self.dirstate.parentchange():
1087 1134 copies = self.dirstate.setparents(p1, p2)
1088 1135 pctx = self[p1]
1089 1136 if copies:
1090 1137 # Adjust copy records, the dirstate cannot do it, it
1091 1138 # requires access to parents manifests. Preserve them
1092 1139 # only for entries added to first parent.
1093 1140 for f in copies:
1094 1141 if f not in pctx and copies[f] in pctx:
1095 1142 self.dirstate.copy(copies[f], f)
1096 1143 if p2 == nullid:
1097 1144 for f, s in sorted(self.dirstate.copies().items()):
1098 1145 if f not in pctx and s not in pctx:
1099 1146 self.dirstate.copy(None, f)
1100 1147
1101 1148 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1102 1149 """changeid can be a changeset revision, node, or tag.
1103 1150 fileid can be a file revision or node."""
1104 1151 return context.filectx(self, path, changeid, fileid,
1105 1152 changectx=changectx)
1106 1153
1107 1154 def getcwd(self):
1108 1155 return self.dirstate.getcwd()
1109 1156
1110 1157 def pathto(self, f, cwd=None):
1111 1158 return self.dirstate.pathto(f, cwd)
1112 1159
1113 1160 def _loadfilter(self, filter):
1114 1161 if filter not in self._filterpats:
1115 1162 l = []
1116 1163 for pat, cmd in self.ui.configitems(filter):
1117 1164 if cmd == '!':
1118 1165 continue
1119 1166 mf = matchmod.match(self.root, '', [pat])
1120 1167 fn = None
1121 1168 params = cmd
1122 1169 for name, filterfn in self._datafilters.iteritems():
1123 1170 if cmd.startswith(name):
1124 1171 fn = filterfn
1125 1172 params = cmd[len(name):].lstrip()
1126 1173 break
1127 1174 if not fn:
1128 1175 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1129 1176 # Wrap old filters not supporting keyword arguments
1130 1177 if not pycompat.getargspec(fn)[2]:
1131 1178 oldfn = fn
1132 1179 fn = lambda s, c, **kwargs: oldfn(s, c)
1133 1180 l.append((mf, fn, params))
1134 1181 self._filterpats[filter] = l
1135 1182 return self._filterpats[filter]
1136 1183
1137 1184 def _filter(self, filterpats, filename, data):
1138 1185 for mf, fn, cmd in filterpats:
1139 1186 if mf(filename):
1140 1187 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1141 1188 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1142 1189 break
1143 1190
1144 1191 return data
1145 1192
1146 1193 @unfilteredpropertycache
1147 1194 def _encodefilterpats(self):
1148 1195 return self._loadfilter('encode')
1149 1196
1150 1197 @unfilteredpropertycache
1151 1198 def _decodefilterpats(self):
1152 1199 return self._loadfilter('decode')
1153 1200
1154 1201 def adddatafilter(self, name, filter):
1155 1202 self._datafilters[name] = filter
1156 1203
1157 1204 def wread(self, filename):
1158 1205 if self.wvfs.islink(filename):
1159 1206 data = self.wvfs.readlink(filename)
1160 1207 else:
1161 1208 data = self.wvfs.read(filename)
1162 1209 return self._filter(self._encodefilterpats, filename, data)
1163 1210
1164 1211 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1165 1212 """write ``data`` into ``filename`` in the working directory
1166 1213
1167 1214 This returns length of written (maybe decoded) data.
1168 1215 """
1169 1216 data = self._filter(self._decodefilterpats, filename, data)
1170 1217 if 'l' in flags:
1171 1218 self.wvfs.symlink(data, filename)
1172 1219 else:
1173 1220 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1174 1221 **kwargs)
1175 1222 if 'x' in flags:
1176 1223 self.wvfs.setflags(filename, False, True)
1177 1224 else:
1178 1225 self.wvfs.setflags(filename, False, False)
1179 1226 return len(data)
1180 1227
1181 1228 def wwritedata(self, filename, data):
1182 1229 return self._filter(self._decodefilterpats, filename, data)
1183 1230
1184 1231 def currenttransaction(self):
1185 1232 """return the current transaction or None if non exists"""
1186 1233 if self._transref:
1187 1234 tr = self._transref()
1188 1235 else:
1189 1236 tr = None
1190 1237
1191 1238 if tr and tr.running():
1192 1239 return tr
1193 1240 return None
1194 1241
1195 1242 def transaction(self, desc, report=None):
1196 1243 if (self.ui.configbool('devel', 'all-warnings')
1197 1244 or self.ui.configbool('devel', 'check-locks')):
1198 1245 if self._currentlock(self._lockref) is None:
1199 1246 raise error.ProgrammingError('transaction requires locking')
1200 1247 tr = self.currenttransaction()
1201 1248 if tr is not None:
1202 1249 return tr.nest(name=desc)
1203 1250
1204 1251 # abort here if the journal already exists
1205 1252 if self.svfs.exists("journal"):
1206 1253 raise error.RepoError(
1207 1254 _("abandoned transaction found"),
1208 1255 hint=_("run 'hg recover' to clean up transaction"))
1209 1256
1210 1257 idbase = "%.40f#%f" % (random.random(), time.time())
1211 1258 ha = hex(hashlib.sha1(idbase).digest())
1212 1259 txnid = 'TXN:' + ha
1213 1260 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1214 1261
1215 1262 self._writejournal(desc)
1216 1263 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1217 1264 if report:
1218 1265 rp = report
1219 1266 else:
1220 1267 rp = self.ui.warn
1221 1268 vfsmap = {'plain': self.vfs} # root of .hg/
1222 1269 # we must avoid cyclic reference between repo and transaction.
1223 1270 reporef = weakref.ref(self)
1224 1271 # Code to track tag movement
1225 1272 #
1226 1273 # Since tags are all handled as file content, it is actually quite hard
1227 1274 # to track these movement from a code perspective. So we fallback to a
1228 1275 # tracking at the repository level. One could envision to track changes
1229 1276 # to the '.hgtags' file through changegroup apply but that fails to
1230 1277 # cope with case where transaction expose new heads without changegroup
1231 1278 # being involved (eg: phase movement).
1232 1279 #
1233 1280 # For now, We gate the feature behind a flag since this likely comes
1234 1281 # with performance impacts. The current code run more often than needed
1235 1282 # and do not use caches as much as it could. The current focus is on
1236 1283 # the behavior of the feature so we disable it by default. The flag
1237 1284 # will be removed when we are happy with the performance impact.
1238 1285 #
1239 1286 # Once this feature is no longer experimental move the following
1240 1287 # documentation to the appropriate help section:
1241 1288 #
1242 1289 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1243 1290 # tags (new or changed or deleted tags). In addition the details of
1244 1291 # these changes are made available in a file at:
1245 1292 # ``REPOROOT/.hg/changes/tags.changes``.
1246 1293 # Make sure you check for HG_TAG_MOVED before reading that file as it
1247 1294 # might exist from a previous transaction even if no tag were touched
1248 1295 # in this one. Changes are recorded in a line base format::
1249 1296 #
1250 1297 # <action> <hex-node> <tag-name>\n
1251 1298 #
1252 1299 # Actions are defined as follow:
1253 1300 # "-R": tag is removed,
1254 1301 # "+A": tag is added,
1255 1302 # "-M": tag is moved (old value),
1256 1303 # "+M": tag is moved (new value),
1257 1304 tracktags = lambda x: None
1258 1305 # experimental config: experimental.hook-track-tags
1259 1306 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1260 1307 if desc != 'strip' and shouldtracktags:
1261 1308 oldheads = self.changelog.headrevs()
1262 1309 def tracktags(tr2):
1263 1310 repo = reporef()
1264 1311 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1265 1312 newheads = repo.changelog.headrevs()
1266 1313 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1267 1314 # notes: we compare lists here.
1268 1315 # As we do it only once buiding set would not be cheaper
1269 1316 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1270 1317 if changes:
1271 1318 tr2.hookargs['tag_moved'] = '1'
1272 1319 with repo.vfs('changes/tags.changes', 'w',
1273 1320 atomictemp=True) as changesfile:
1274 1321 # note: we do not register the file to the transaction
1275 1322 # because we needs it to still exist on the transaction
1276 1323 # is close (for txnclose hooks)
1277 1324 tagsmod.writediff(changesfile, changes)
1278 1325 def validate(tr2):
1279 1326 """will run pre-closing hooks"""
1280 1327 # XXX the transaction API is a bit lacking here so we take a hacky
1281 1328 # path for now
1282 1329 #
1283 1330 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1284 1331 # dict is copied before these run. In addition we needs the data
1285 1332 # available to in memory hooks too.
1286 1333 #
1287 1334 # Moreover, we also need to make sure this runs before txnclose
1288 1335 # hooks and there is no "pending" mechanism that would execute
1289 1336 # logic only if hooks are about to run.
1290 1337 #
1291 1338 # Fixing this limitation of the transaction is also needed to track
1292 1339 # other families of changes (bookmarks, phases, obsolescence).
1293 1340 #
1294 1341 # This will have to be fixed before we remove the experimental
1295 1342 # gating.
1296 1343 tracktags(tr2)
1297 1344 repo = reporef()
1298 1345 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1299 1346 scmutil.enforcesinglehead(repo, tr2, desc)
1300 1347 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1301 1348 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1302 1349 args = tr.hookargs.copy()
1303 1350 args.update(bookmarks.preparehookargs(name, old, new))
1304 1351 repo.hook('pretxnclose-bookmark', throw=True,
1305 1352 txnname=desc,
1306 1353 **pycompat.strkwargs(args))
1307 1354 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1308 1355 cl = repo.unfiltered().changelog
1309 1356 for rev, (old, new) in tr.changes['phases'].items():
1310 1357 args = tr.hookargs.copy()
1311 1358 node = hex(cl.node(rev))
1312 1359 args.update(phases.preparehookargs(node, old, new))
1313 1360 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1314 1361 **pycompat.strkwargs(args))
1315 1362
1316 1363 repo.hook('pretxnclose', throw=True,
1317 1364 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1318 1365 def releasefn(tr, success):
1319 1366 repo = reporef()
1320 1367 if success:
1321 1368 # this should be explicitly invoked here, because
1322 1369 # in-memory changes aren't written out at closing
1323 1370 # transaction, if tr.addfilegenerator (via
1324 1371 # dirstate.write or so) isn't invoked while
1325 1372 # transaction running
1326 1373 repo.dirstate.write(None)
1327 1374 else:
1328 1375 # discard all changes (including ones already written
1329 1376 # out) in this transaction
1330 1377 repo.dirstate.restorebackup(None, 'journal.dirstate')
1331 1378
1332 1379 repo.invalidate(clearfilecache=True)
1333 1380
1334 1381 tr = transaction.transaction(rp, self.svfs, vfsmap,
1335 1382 "journal",
1336 1383 "undo",
1337 1384 aftertrans(renames),
1338 1385 self.store.createmode,
1339 1386 validator=validate,
1340 1387 releasefn=releasefn,
1341 1388 checkambigfiles=_cachedfiles,
1342 1389 name=desc)
1343 1390 tr.changes['revs'] = xrange(0, 0)
1344 1391 tr.changes['obsmarkers'] = set()
1345 1392 tr.changes['phases'] = {}
1346 1393 tr.changes['bookmarks'] = {}
1347 1394
1348 1395 tr.hookargs['txnid'] = txnid
1349 1396 # note: writing the fncache only during finalize mean that the file is
1350 1397 # outdated when running hooks. As fncache is used for streaming clone,
1351 1398 # this is not expected to break anything that happen during the hooks.
1352 1399 tr.addfinalize('flush-fncache', self.store.write)
1353 1400 def txnclosehook(tr2):
1354 1401 """To be run if transaction is successful, will schedule a hook run
1355 1402 """
1356 1403 # Don't reference tr2 in hook() so we don't hold a reference.
1357 1404 # This reduces memory consumption when there are multiple
1358 1405 # transactions per lock. This can likely go away if issue5045
1359 1406 # fixes the function accumulation.
1360 1407 hookargs = tr2.hookargs
1361 1408
1362 1409 def hookfunc():
1363 1410 repo = reporef()
1364 1411 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1365 1412 bmchanges = sorted(tr.changes['bookmarks'].items())
1366 1413 for name, (old, new) in bmchanges:
1367 1414 args = tr.hookargs.copy()
1368 1415 args.update(bookmarks.preparehookargs(name, old, new))
1369 1416 repo.hook('txnclose-bookmark', throw=False,
1370 1417 txnname=desc, **pycompat.strkwargs(args))
1371 1418
1372 1419 if hook.hashook(repo.ui, 'txnclose-phase'):
1373 1420 cl = repo.unfiltered().changelog
1374 1421 phasemv = sorted(tr.changes['phases'].items())
1375 1422 for rev, (old, new) in phasemv:
1376 1423 args = tr.hookargs.copy()
1377 1424 node = hex(cl.node(rev))
1378 1425 args.update(phases.preparehookargs(node, old, new))
1379 1426 repo.hook('txnclose-phase', throw=False, txnname=desc,
1380 1427 **pycompat.strkwargs(args))
1381 1428
1382 1429 repo.hook('txnclose', throw=False, txnname=desc,
1383 1430 **pycompat.strkwargs(hookargs))
1384 1431 reporef()._afterlock(hookfunc)
1385 1432 tr.addfinalize('txnclose-hook', txnclosehook)
1386 1433 # Include a leading "-" to make it happen before the transaction summary
1387 1434 # reports registered via scmutil.registersummarycallback() whose names
1388 1435 # are 00-txnreport etc. That way, the caches will be warm when the
1389 1436 # callbacks run.
1390 1437 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1391 1438 def txnaborthook(tr2):
1392 1439 """To be run if transaction is aborted
1393 1440 """
1394 1441 reporef().hook('txnabort', throw=False, txnname=desc,
1395 1442 **pycompat.strkwargs(tr2.hookargs))
1396 1443 tr.addabort('txnabort-hook', txnaborthook)
1397 1444 # avoid eager cache invalidation. in-memory data should be identical
1398 1445 # to stored data if transaction has no error.
1399 1446 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1400 1447 self._transref = weakref.ref(tr)
1401 1448 scmutil.registersummarycallback(self, tr, desc)
1402 1449 return tr
1403 1450
1404 1451 def _journalfiles(self):
1405 1452 return ((self.svfs, 'journal'),
1406 1453 (self.vfs, 'journal.dirstate'),
1407 1454 (self.vfs, 'journal.branch'),
1408 1455 (self.vfs, 'journal.desc'),
1409 1456 (self.vfs, 'journal.bookmarks'),
1410 1457 (self.svfs, 'journal.phaseroots'))
1411 1458
1412 1459 def undofiles(self):
1413 1460 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1414 1461
1415 1462 @unfilteredmethod
1416 1463 def _writejournal(self, desc):
1417 1464 self.dirstate.savebackup(None, 'journal.dirstate')
1418 1465 self.vfs.write("journal.branch",
1419 1466 encoding.fromlocal(self.dirstate.branch()))
1420 1467 self.vfs.write("journal.desc",
1421 1468 "%d\n%s\n" % (len(self), desc))
1422 1469 self.vfs.write("journal.bookmarks",
1423 1470 self.vfs.tryread("bookmarks"))
1424 1471 self.svfs.write("journal.phaseroots",
1425 1472 self.svfs.tryread("phaseroots"))
1426 1473
1427 1474 def recover(self):
1428 1475 with self.lock():
1429 1476 if self.svfs.exists("journal"):
1430 1477 self.ui.status(_("rolling back interrupted transaction\n"))
1431 1478 vfsmap = {'': self.svfs,
1432 1479 'plain': self.vfs,}
1433 1480 transaction.rollback(self.svfs, vfsmap, "journal",
1434 1481 self.ui.warn,
1435 1482 checkambigfiles=_cachedfiles)
1436 1483 self.invalidate()
1437 1484 return True
1438 1485 else:
1439 1486 self.ui.warn(_("no interrupted transaction available\n"))
1440 1487 return False
1441 1488
1442 1489 def rollback(self, dryrun=False, force=False):
1443 1490 wlock = lock = dsguard = None
1444 1491 try:
1445 1492 wlock = self.wlock()
1446 1493 lock = self.lock()
1447 1494 if self.svfs.exists("undo"):
1448 1495 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1449 1496
1450 1497 return self._rollback(dryrun, force, dsguard)
1451 1498 else:
1452 1499 self.ui.warn(_("no rollback information available\n"))
1453 1500 return 1
1454 1501 finally:
1455 1502 release(dsguard, lock, wlock)
1456 1503
1457 1504 @unfilteredmethod # Until we get smarter cache management
1458 1505 def _rollback(self, dryrun, force, dsguard):
1459 1506 ui = self.ui
1460 1507 try:
1461 1508 args = self.vfs.read('undo.desc').splitlines()
1462 1509 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1463 1510 if len(args) >= 3:
1464 1511 detail = args[2]
1465 1512 oldtip = oldlen - 1
1466 1513
1467 1514 if detail and ui.verbose:
1468 1515 msg = (_('repository tip rolled back to revision %d'
1469 1516 ' (undo %s: %s)\n')
1470 1517 % (oldtip, desc, detail))
1471 1518 else:
1472 1519 msg = (_('repository tip rolled back to revision %d'
1473 1520 ' (undo %s)\n')
1474 1521 % (oldtip, desc))
1475 1522 except IOError:
1476 1523 msg = _('rolling back unknown transaction\n')
1477 1524 desc = None
1478 1525
1479 1526 if not force and self['.'] != self['tip'] and desc == 'commit':
1480 1527 raise error.Abort(
1481 1528 _('rollback of last commit while not checked out '
1482 1529 'may lose data'), hint=_('use -f to force'))
1483 1530
1484 1531 ui.status(msg)
1485 1532 if dryrun:
1486 1533 return 0
1487 1534
1488 1535 parents = self.dirstate.parents()
1489 1536 self.destroying()
1490 1537 vfsmap = {'plain': self.vfs, '': self.svfs}
1491 1538 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1492 1539 checkambigfiles=_cachedfiles)
1493 1540 if self.vfs.exists('undo.bookmarks'):
1494 1541 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1495 1542 if self.svfs.exists('undo.phaseroots'):
1496 1543 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1497 1544 self.invalidate()
1498 1545
1499 1546 parentgone = (parents[0] not in self.changelog.nodemap or
1500 1547 parents[1] not in self.changelog.nodemap)
1501 1548 if parentgone:
1502 1549 # prevent dirstateguard from overwriting already restored one
1503 1550 dsguard.close()
1504 1551
1505 1552 self.dirstate.restorebackup(None, 'undo.dirstate')
1506 1553 try:
1507 1554 branch = self.vfs.read('undo.branch')
1508 1555 self.dirstate.setbranch(encoding.tolocal(branch))
1509 1556 except IOError:
1510 1557 ui.warn(_('named branch could not be reset: '
1511 1558 'current branch is still \'%s\'\n')
1512 1559 % self.dirstate.branch())
1513 1560
1514 1561 parents = tuple([p.rev() for p in self[None].parents()])
1515 1562 if len(parents) > 1:
1516 1563 ui.status(_('working directory now based on '
1517 1564 'revisions %d and %d\n') % parents)
1518 1565 else:
1519 1566 ui.status(_('working directory now based on '
1520 1567 'revision %d\n') % parents)
1521 1568 mergemod.mergestate.clean(self, self['.'].node())
1522 1569
1523 1570 # TODO: if we know which new heads may result from this rollback, pass
1524 1571 # them to destroy(), which will prevent the branchhead cache from being
1525 1572 # invalidated.
1526 1573 self.destroyed()
1527 1574 return 0
1528 1575
1529 1576 def _buildcacheupdater(self, newtransaction):
1530 1577 """called during transaction to build the callback updating cache
1531 1578
1532 1579 Lives on the repository to help extension who might want to augment
1533 1580 this logic. For this purpose, the created transaction is passed to the
1534 1581 method.
1535 1582 """
1536 1583 # we must avoid cyclic reference between repo and transaction.
1537 1584 reporef = weakref.ref(self)
1538 1585 def updater(tr):
1539 1586 repo = reporef()
1540 1587 repo.updatecaches(tr)
1541 1588 return updater
1542 1589
1543 1590 @unfilteredmethod
1544 1591 def updatecaches(self, tr=None, full=False):
1545 1592 """warm appropriate caches
1546 1593
1547 1594 If this function is called after a transaction closed. The transaction
1548 1595 will be available in the 'tr' argument. This can be used to selectively
1549 1596 update caches relevant to the changes in that transaction.
1550 1597
1551 1598 If 'full' is set, make sure all caches the function knows about have
1552 1599 up-to-date data. Even the ones usually loaded more lazily.
1553 1600 """
1554 1601 if tr is not None and tr.hookargs.get('source') == 'strip':
1555 1602 # During strip, many caches are invalid but
1556 1603 # later call to `destroyed` will refresh them.
1557 1604 return
1558 1605
1559 1606 if tr is None or tr.changes['revs']:
1560 1607 # updating the unfiltered branchmap should refresh all the others,
1561 1608 self.ui.debug('updating the branch cache\n')
1562 1609 branchmap.updatecache(self.filtered('served'))
1563 1610
1564 1611 if full:
1565 1612 rbc = self.revbranchcache()
1566 1613 for r in self.changelog:
1567 1614 rbc.branchinfo(r)
1568 1615 rbc.write()
1569 1616
1570 1617 def invalidatecaches(self):
1571 1618
1572 1619 if '_tagscache' in vars(self):
1573 1620 # can't use delattr on proxy
1574 1621 del self.__dict__['_tagscache']
1575 1622
1576 1623 self.unfiltered()._branchcaches.clear()
1577 1624 self.invalidatevolatilesets()
1578 1625 self._sparsesignaturecache.clear()
1579 1626
1580 1627 def invalidatevolatilesets(self):
1581 1628 self.filteredrevcache.clear()
1582 1629 obsolete.clearobscaches(self)
1583 1630
1584 1631 def invalidatedirstate(self):
1585 1632 '''Invalidates the dirstate, causing the next call to dirstate
1586 1633 to check if it was modified since the last time it was read,
1587 1634 rereading it if it has.
1588 1635
1589 1636 This is different to dirstate.invalidate() that it doesn't always
1590 1637 rereads the dirstate. Use dirstate.invalidate() if you want to
1591 1638 explicitly read the dirstate again (i.e. restoring it to a previous
1592 1639 known good state).'''
1593 1640 if hasunfilteredcache(self, 'dirstate'):
1594 1641 for k in self.dirstate._filecache:
1595 1642 try:
1596 1643 delattr(self.dirstate, k)
1597 1644 except AttributeError:
1598 1645 pass
1599 1646 delattr(self.unfiltered(), 'dirstate')
1600 1647
1601 1648 def invalidate(self, clearfilecache=False):
1602 1649 '''Invalidates both store and non-store parts other than dirstate
1603 1650
1604 1651 If a transaction is running, invalidation of store is omitted,
1605 1652 because discarding in-memory changes might cause inconsistency
1606 1653 (e.g. incomplete fncache causes unintentional failure, but
1607 1654 redundant one doesn't).
1608 1655 '''
1609 1656 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1610 1657 for k in list(self._filecache.keys()):
1611 1658 # dirstate is invalidated separately in invalidatedirstate()
1612 1659 if k == 'dirstate':
1613 1660 continue
1614 1661 if (k == 'changelog' and
1615 1662 self.currenttransaction() and
1616 1663 self.changelog._delayed):
1617 1664 # The changelog object may store unwritten revisions. We don't
1618 1665 # want to lose them.
1619 1666 # TODO: Solve the problem instead of working around it.
1620 1667 continue
1621 1668
1622 1669 if clearfilecache:
1623 1670 del self._filecache[k]
1624 1671 try:
1625 1672 delattr(unfiltered, k)
1626 1673 except AttributeError:
1627 1674 pass
1628 1675 self.invalidatecaches()
1629 1676 if not self.currenttransaction():
1630 1677 # TODO: Changing contents of store outside transaction
1631 1678 # causes inconsistency. We should make in-memory store
1632 1679 # changes detectable, and abort if changed.
1633 1680 self.store.invalidatecaches()
1634 1681
1635 1682 def invalidateall(self):
1636 1683 '''Fully invalidates both store and non-store parts, causing the
1637 1684 subsequent operation to reread any outside changes.'''
1638 1685 # extension should hook this to invalidate its caches
1639 1686 self.invalidate()
1640 1687 self.invalidatedirstate()
1641 1688
1642 1689 @unfilteredmethod
1643 1690 def _refreshfilecachestats(self, tr):
1644 1691 """Reload stats of cached files so that they are flagged as valid"""
1645 1692 for k, ce in self._filecache.items():
1646 1693 k = pycompat.sysstr(k)
1647 1694 if k == r'dirstate' or k not in self.__dict__:
1648 1695 continue
1649 1696 ce.refresh()
1650 1697
1651 1698 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1652 1699 inheritchecker=None, parentenvvar=None):
1653 1700 parentlock = None
1654 1701 # the contents of parentenvvar are used by the underlying lock to
1655 1702 # determine whether it can be inherited
1656 1703 if parentenvvar is not None:
1657 1704 parentlock = encoding.environ.get(parentenvvar)
1658 1705
1659 1706 timeout = 0
1660 1707 warntimeout = 0
1661 1708 if wait:
1662 1709 timeout = self.ui.configint("ui", "timeout")
1663 1710 warntimeout = self.ui.configint("ui", "timeout.warn")
1664 1711
1665 1712 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1666 1713 releasefn=releasefn,
1667 1714 acquirefn=acquirefn, desc=desc,
1668 1715 inheritchecker=inheritchecker,
1669 1716 parentlock=parentlock)
1670 1717 return l
1671 1718
1672 1719 def _afterlock(self, callback):
1673 1720 """add a callback to be run when the repository is fully unlocked
1674 1721
1675 1722 The callback will be executed when the outermost lock is released
1676 1723 (with wlock being higher level than 'lock')."""
1677 1724 for ref in (self._wlockref, self._lockref):
1678 1725 l = ref and ref()
1679 1726 if l and l.held:
1680 1727 l.postrelease.append(callback)
1681 1728 break
1682 1729 else: # no lock have been found.
1683 1730 callback()
1684 1731
1685 1732 def lock(self, wait=True):
1686 1733 '''Lock the repository store (.hg/store) and return a weak reference
1687 1734 to the lock. Use this before modifying the store (e.g. committing or
1688 1735 stripping). If you are opening a transaction, get a lock as well.)
1689 1736
1690 1737 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1691 1738 'wlock' first to avoid a dead-lock hazard.'''
1692 1739 l = self._currentlock(self._lockref)
1693 1740 if l is not None:
1694 1741 l.lock()
1695 1742 return l
1696 1743
1697 1744 l = self._lock(self.svfs, "lock", wait, None,
1698 1745 self.invalidate, _('repository %s') % self.origroot)
1699 1746 self._lockref = weakref.ref(l)
1700 1747 return l
1701 1748
1702 1749 def _wlockchecktransaction(self):
1703 1750 if self.currenttransaction() is not None:
1704 1751 raise error.LockInheritanceContractViolation(
1705 1752 'wlock cannot be inherited in the middle of a transaction')
1706 1753
1707 1754 def wlock(self, wait=True):
1708 1755 '''Lock the non-store parts of the repository (everything under
1709 1756 .hg except .hg/store) and return a weak reference to the lock.
1710 1757
1711 1758 Use this before modifying files in .hg.
1712 1759
1713 1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1714 1761 'wlock' first to avoid a dead-lock hazard.'''
1715 1762 l = self._wlockref and self._wlockref()
1716 1763 if l is not None and l.held:
1717 1764 l.lock()
1718 1765 return l
1719 1766
1720 1767 # We do not need to check for non-waiting lock acquisition. Such
1721 1768 # acquisition would not cause dead-lock as they would just fail.
1722 1769 if wait and (self.ui.configbool('devel', 'all-warnings')
1723 1770 or self.ui.configbool('devel', 'check-locks')):
1724 1771 if self._currentlock(self._lockref) is not None:
1725 1772 self.ui.develwarn('"wlock" acquired after "lock"')
1726 1773
1727 1774 def unlock():
1728 1775 if self.dirstate.pendingparentchange():
1729 1776 self.dirstate.invalidate()
1730 1777 else:
1731 1778 self.dirstate.write(None)
1732 1779
1733 1780 self._filecache['dirstate'].refresh()
1734 1781
1735 1782 l = self._lock(self.vfs, "wlock", wait, unlock,
1736 1783 self.invalidatedirstate, _('working directory of %s') %
1737 1784 self.origroot,
1738 1785 inheritchecker=self._wlockchecktransaction,
1739 1786 parentenvvar='HG_WLOCK_LOCKER')
1740 1787 self._wlockref = weakref.ref(l)
1741 1788 return l
1742 1789
1743 1790 def _currentlock(self, lockref):
1744 1791 """Returns the lock if it's held, or None if it's not."""
1745 1792 if lockref is None:
1746 1793 return None
1747 1794 l = lockref()
1748 1795 if l is None or not l.held:
1749 1796 return None
1750 1797 return l
1751 1798
1752 1799 def currentwlock(self):
1753 1800 """Returns the wlock if it's held, or None if it's not."""
1754 1801 return self._currentlock(self._wlockref)
1755 1802
1756 1803 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1757 1804 """
1758 1805 commit an individual file as part of a larger transaction
1759 1806 """
1760 1807
1761 1808 fname = fctx.path()
1762 1809 fparent1 = manifest1.get(fname, nullid)
1763 1810 fparent2 = manifest2.get(fname, nullid)
1764 1811 if isinstance(fctx, context.filectx):
1765 1812 node = fctx.filenode()
1766 1813 if node in [fparent1, fparent2]:
1767 1814 self.ui.debug('reusing %s filelog entry\n' % fname)
1768 1815 if manifest1.flags(fname) != fctx.flags():
1769 1816 changelist.append(fname)
1770 1817 return node
1771 1818
1772 1819 flog = self.file(fname)
1773 1820 meta = {}
1774 1821 copy = fctx.renamed()
1775 1822 if copy and copy[0] != fname:
1776 1823 # Mark the new revision of this file as a copy of another
1777 1824 # file. This copy data will effectively act as a parent
1778 1825 # of this new revision. If this is a merge, the first
1779 1826 # parent will be the nullid (meaning "look up the copy data")
1780 1827 # and the second one will be the other parent. For example:
1781 1828 #
1782 1829 # 0 --- 1 --- 3 rev1 changes file foo
1783 1830 # \ / rev2 renames foo to bar and changes it
1784 1831 # \- 2 -/ rev3 should have bar with all changes and
1785 1832 # should record that bar descends from
1786 1833 # bar in rev2 and foo in rev1
1787 1834 #
1788 1835 # this allows this merge to succeed:
1789 1836 #
1790 1837 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1791 1838 # \ / merging rev3 and rev4 should use bar@rev2
1792 1839 # \- 2 --- 4 as the merge base
1793 1840 #
1794 1841
1795 1842 cfname = copy[0]
1796 1843 crev = manifest1.get(cfname)
1797 1844 newfparent = fparent2
1798 1845
1799 1846 if manifest2: # branch merge
1800 1847 if fparent2 == nullid or crev is None: # copied on remote side
1801 1848 if cfname in manifest2:
1802 1849 crev = manifest2[cfname]
1803 1850 newfparent = fparent1
1804 1851
1805 1852 # Here, we used to search backwards through history to try to find
1806 1853 # where the file copy came from if the source of a copy was not in
1807 1854 # the parent directory. However, this doesn't actually make sense to
1808 1855 # do (what does a copy from something not in your working copy even
1809 1856 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1810 1857 # the user that copy information was dropped, so if they didn't
1811 1858 # expect this outcome it can be fixed, but this is the correct
1812 1859 # behavior in this circumstance.
1813 1860
1814 1861 if crev:
1815 1862 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1816 1863 meta["copy"] = cfname
1817 1864 meta["copyrev"] = hex(crev)
1818 1865 fparent1, fparent2 = nullid, newfparent
1819 1866 else:
1820 1867 self.ui.warn(_("warning: can't find ancestor for '%s' "
1821 1868 "copied from '%s'!\n") % (fname, cfname))
1822 1869
1823 1870 elif fparent1 == nullid:
1824 1871 fparent1, fparent2 = fparent2, nullid
1825 1872 elif fparent2 != nullid:
1826 1873 # is one parent an ancestor of the other?
1827 1874 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1828 1875 if fparent1 in fparentancestors:
1829 1876 fparent1, fparent2 = fparent2, nullid
1830 1877 elif fparent2 in fparentancestors:
1831 1878 fparent2 = nullid
1832 1879
1833 1880 # is the file changed?
1834 1881 text = fctx.data()
1835 1882 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1836 1883 changelist.append(fname)
1837 1884 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1838 1885 # are just the flags changed during merge?
1839 1886 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1840 1887 changelist.append(fname)
1841 1888
1842 1889 return fparent1
1843 1890
1844 1891 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1845 1892 """check for commit arguments that aren't committable"""
1846 1893 if match.isexact() or match.prefix():
1847 1894 matched = set(status.modified + status.added + status.removed)
1848 1895
1849 1896 for f in match.files():
1850 1897 f = self.dirstate.normalize(f)
1851 1898 if f == '.' or f in matched or f in wctx.substate:
1852 1899 continue
1853 1900 if f in status.deleted:
1854 1901 fail(f, _('file not found!'))
1855 1902 if f in vdirs: # visited directory
1856 1903 d = f + '/'
1857 1904 for mf in matched:
1858 1905 if mf.startswith(d):
1859 1906 break
1860 1907 else:
1861 1908 fail(f, _("no match under directory!"))
1862 1909 elif f not in self.dirstate:
1863 1910 fail(f, _("file not tracked!"))
1864 1911
1865 1912 @unfilteredmethod
1866 1913 def commit(self, text="", user=None, date=None, match=None, force=False,
1867 1914 editor=False, extra=None):
1868 1915 """Add a new revision to current repository.
1869 1916
1870 1917 Revision information is gathered from the working directory,
1871 1918 match can be used to filter the committed files. If editor is
1872 1919 supplied, it is called to get a commit message.
1873 1920 """
1874 1921 if extra is None:
1875 1922 extra = {}
1876 1923
1877 1924 def fail(f, msg):
1878 1925 raise error.Abort('%s: %s' % (f, msg))
1879 1926
1880 1927 if not match:
1881 1928 match = matchmod.always(self.root, '')
1882 1929
1883 1930 if not force:
1884 1931 vdirs = []
1885 1932 match.explicitdir = vdirs.append
1886 1933 match.bad = fail
1887 1934
1888 1935 wlock = lock = tr = None
1889 1936 try:
1890 1937 wlock = self.wlock()
1891 1938 lock = self.lock() # for recent changelog (see issue4368)
1892 1939
1893 1940 wctx = self[None]
1894 1941 merge = len(wctx.parents()) > 1
1895 1942
1896 1943 if not force and merge and not match.always():
1897 1944 raise error.Abort(_('cannot partially commit a merge '
1898 1945 '(do not specify files or patterns)'))
1899 1946
1900 1947 status = self.status(match=match, clean=force)
1901 1948 if force:
1902 1949 status.modified.extend(status.clean) # mq may commit clean files
1903 1950
1904 1951 # check subrepos
1905 1952 subs, commitsubs, newstate = subrepoutil.precommit(
1906 1953 self.ui, wctx, status, match, force=force)
1907 1954
1908 1955 # make sure all explicit patterns are matched
1909 1956 if not force:
1910 1957 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1911 1958
1912 1959 cctx = context.workingcommitctx(self, status,
1913 1960 text, user, date, extra)
1914 1961
1915 1962 # internal config: ui.allowemptycommit
1916 1963 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1917 1964 or extra.get('close') or merge or cctx.files()
1918 1965 or self.ui.configbool('ui', 'allowemptycommit'))
1919 1966 if not allowemptycommit:
1920 1967 return None
1921 1968
1922 1969 if merge and cctx.deleted():
1923 1970 raise error.Abort(_("cannot commit merge with missing files"))
1924 1971
1925 1972 ms = mergemod.mergestate.read(self)
1926 1973 mergeutil.checkunresolved(ms)
1927 1974
1928 1975 if editor:
1929 1976 cctx._text = editor(self, cctx, subs)
1930 1977 edited = (text != cctx._text)
1931 1978
1932 1979 # Save commit message in case this transaction gets rolled back
1933 1980 # (e.g. by a pretxncommit hook). Leave the content alone on
1934 1981 # the assumption that the user will use the same editor again.
1935 1982 msgfn = self.savecommitmessage(cctx._text)
1936 1983
1937 1984 # commit subs and write new state
1938 1985 if subs:
1939 1986 for s in sorted(commitsubs):
1940 1987 sub = wctx.sub(s)
1941 1988 self.ui.status(_('committing subrepository %s\n') %
1942 1989 subrepoutil.subrelpath(sub))
1943 1990 sr = sub.commit(cctx._text, user, date)
1944 1991 newstate[s] = (newstate[s][0], sr)
1945 1992 subrepoutil.writestate(self, newstate)
1946 1993
1947 1994 p1, p2 = self.dirstate.parents()
1948 1995 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1949 1996 try:
1950 1997 self.hook("precommit", throw=True, parent1=hookp1,
1951 1998 parent2=hookp2)
1952 1999 tr = self.transaction('commit')
1953 2000 ret = self.commitctx(cctx, True)
1954 2001 except: # re-raises
1955 2002 if edited:
1956 2003 self.ui.write(
1957 2004 _('note: commit message saved in %s\n') % msgfn)
1958 2005 raise
1959 2006 # update bookmarks, dirstate and mergestate
1960 2007 bookmarks.update(self, [p1, p2], ret)
1961 2008 cctx.markcommitted(ret)
1962 2009 ms.reset()
1963 2010 tr.close()
1964 2011
1965 2012 finally:
1966 2013 lockmod.release(tr, lock, wlock)
1967 2014
1968 2015 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1969 2016 # hack for command that use a temporary commit (eg: histedit)
1970 2017 # temporary commit got stripped before hook release
1971 2018 if self.changelog.hasnode(ret):
1972 2019 self.hook("commit", node=node, parent1=parent1,
1973 2020 parent2=parent2)
1974 2021 self._afterlock(commithook)
1975 2022 return ret
1976 2023
1977 2024 @unfilteredmethod
1978 2025 def commitctx(self, ctx, error=False):
1979 2026 """Add a new revision to current repository.
1980 2027 Revision information is passed via the context argument.
1981 2028 """
1982 2029
1983 2030 tr = None
1984 2031 p1, p2 = ctx.p1(), ctx.p2()
1985 2032 user = ctx.user()
1986 2033
1987 2034 lock = self.lock()
1988 2035 try:
1989 2036 tr = self.transaction("commit")
1990 2037 trp = weakref.proxy(tr)
1991 2038
1992 2039 if ctx.manifestnode():
1993 2040 # reuse an existing manifest revision
1994 2041 mn = ctx.manifestnode()
1995 2042 files = ctx.files()
1996 2043 elif ctx.files():
1997 2044 m1ctx = p1.manifestctx()
1998 2045 m2ctx = p2.manifestctx()
1999 2046 mctx = m1ctx.copy()
2000 2047
2001 2048 m = mctx.read()
2002 2049 m1 = m1ctx.read()
2003 2050 m2 = m2ctx.read()
2004 2051
2005 2052 # check in files
2006 2053 added = []
2007 2054 changed = []
2008 2055 removed = list(ctx.removed())
2009 2056 linkrev = len(self)
2010 2057 self.ui.note(_("committing files:\n"))
2011 2058 for f in sorted(ctx.modified() + ctx.added()):
2012 2059 self.ui.note(f + "\n")
2013 2060 try:
2014 2061 fctx = ctx[f]
2015 2062 if fctx is None:
2016 2063 removed.append(f)
2017 2064 else:
2018 2065 added.append(f)
2019 2066 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2020 2067 trp, changed)
2021 2068 m.setflag(f, fctx.flags())
2022 2069 except OSError as inst:
2023 2070 self.ui.warn(_("trouble committing %s!\n") % f)
2024 2071 raise
2025 2072 except IOError as inst:
2026 2073 errcode = getattr(inst, 'errno', errno.ENOENT)
2027 2074 if error or errcode and errcode != errno.ENOENT:
2028 2075 self.ui.warn(_("trouble committing %s!\n") % f)
2029 2076 raise
2030 2077
2031 2078 # update manifest
2032 2079 self.ui.note(_("committing manifest\n"))
2033 2080 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2034 2081 drop = [f for f in removed if f in m]
2035 2082 for f in drop:
2036 2083 del m[f]
2037 2084 mn = mctx.write(trp, linkrev,
2038 2085 p1.manifestnode(), p2.manifestnode(),
2039 2086 added, drop)
2040 2087 files = changed + removed
2041 2088 else:
2042 2089 mn = p1.manifestnode()
2043 2090 files = []
2044 2091
2045 2092 # update changelog
2046 2093 self.ui.note(_("committing changelog\n"))
2047 2094 self.changelog.delayupdate(tr)
2048 2095 n = self.changelog.add(mn, files, ctx.description(),
2049 2096 trp, p1.node(), p2.node(),
2050 2097 user, ctx.date(), ctx.extra().copy())
2051 2098 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2052 2099 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2053 2100 parent2=xp2)
2054 2101 # set the new commit is proper phase
2055 2102 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2056 2103 if targetphase:
2057 2104 # retract boundary do not alter parent changeset.
2058 2105 # if a parent have higher the resulting phase will
2059 2106 # be compliant anyway
2060 2107 #
2061 2108 # if minimal phase was 0 we don't need to retract anything
2062 2109 phases.registernew(self, tr, targetphase, [n])
2063 2110 tr.close()
2064 2111 return n
2065 2112 finally:
2066 2113 if tr:
2067 2114 tr.release()
2068 2115 lock.release()
2069 2116
2070 2117 @unfilteredmethod
2071 2118 def destroying(self):
2072 2119 '''Inform the repository that nodes are about to be destroyed.
2073 2120 Intended for use by strip and rollback, so there's a common
2074 2121 place for anything that has to be done before destroying history.
2075 2122
2076 2123 This is mostly useful for saving state that is in memory and waiting
2077 2124 to be flushed when the current lock is released. Because a call to
2078 2125 destroyed is imminent, the repo will be invalidated causing those
2079 2126 changes to stay in memory (waiting for the next unlock), or vanish
2080 2127 completely.
2081 2128 '''
2082 2129 # When using the same lock to commit and strip, the phasecache is left
2083 2130 # dirty after committing. Then when we strip, the repo is invalidated,
2084 2131 # causing those changes to disappear.
2085 2132 if '_phasecache' in vars(self):
2086 2133 self._phasecache.write()
2087 2134
2088 2135 @unfilteredmethod
2089 2136 def destroyed(self):
2090 2137 '''Inform the repository that nodes have been destroyed.
2091 2138 Intended for use by strip and rollback, so there's a common
2092 2139 place for anything that has to be done after destroying history.
2093 2140 '''
2094 2141 # When one tries to:
2095 2142 # 1) destroy nodes thus calling this method (e.g. strip)
2096 2143 # 2) use phasecache somewhere (e.g. commit)
2097 2144 #
2098 2145 # then 2) will fail because the phasecache contains nodes that were
2099 2146 # removed. We can either remove phasecache from the filecache,
2100 2147 # causing it to reload next time it is accessed, or simply filter
2101 2148 # the removed nodes now and write the updated cache.
2102 2149 self._phasecache.filterunknown(self)
2103 2150 self._phasecache.write()
2104 2151
2105 2152 # refresh all repository caches
2106 2153 self.updatecaches()
2107 2154
2108 2155 # Ensure the persistent tag cache is updated. Doing it now
2109 2156 # means that the tag cache only has to worry about destroyed
2110 2157 # heads immediately after a strip/rollback. That in turn
2111 2158 # guarantees that "cachetip == currenttip" (comparing both rev
2112 2159 # and node) always means no nodes have been added or destroyed.
2113 2160
2114 2161 # XXX this is suboptimal when qrefresh'ing: we strip the current
2115 2162 # head, refresh the tag cache, then immediately add a new head.
2116 2163 # But I think doing it this way is necessary for the "instant
2117 2164 # tag cache retrieval" case to work.
2118 2165 self.invalidate()
2119 2166
2120 2167 def status(self, node1='.', node2=None, match=None,
2121 2168 ignored=False, clean=False, unknown=False,
2122 2169 listsubrepos=False):
2123 2170 '''a convenience method that calls node1.status(node2)'''
2124 2171 return self[node1].status(node2, match, ignored, clean, unknown,
2125 2172 listsubrepos)
2126 2173
2127 2174 def addpostdsstatus(self, ps):
2128 2175 """Add a callback to run within the wlock, at the point at which status
2129 2176 fixups happen.
2130 2177
2131 2178 On status completion, callback(wctx, status) will be called with the
2132 2179 wlock held, unless the dirstate has changed from underneath or the wlock
2133 2180 couldn't be grabbed.
2134 2181
2135 2182 Callbacks should not capture and use a cached copy of the dirstate --
2136 2183 it might change in the meanwhile. Instead, they should access the
2137 2184 dirstate via wctx.repo().dirstate.
2138 2185
2139 2186 This list is emptied out after each status run -- extensions should
2140 2187 make sure it adds to this list each time dirstate.status is called.
2141 2188 Extensions should also make sure they don't call this for statuses
2142 2189 that don't involve the dirstate.
2143 2190 """
2144 2191
2145 2192 # The list is located here for uniqueness reasons -- it is actually
2146 2193 # managed by the workingctx, but that isn't unique per-repo.
2147 2194 self._postdsstatus.append(ps)
2148 2195
2149 2196 def postdsstatus(self):
2150 2197 """Used by workingctx to get the list of post-dirstate-status hooks."""
2151 2198 return self._postdsstatus
2152 2199
2153 2200 def clearpostdsstatus(self):
2154 2201 """Used by workingctx to clear post-dirstate-status hooks."""
2155 2202 del self._postdsstatus[:]
2156 2203
2157 2204 def heads(self, start=None):
2158 2205 if start is None:
2159 2206 cl = self.changelog
2160 2207 headrevs = reversed(cl.headrevs())
2161 2208 return [cl.node(rev) for rev in headrevs]
2162 2209
2163 2210 heads = self.changelog.heads(start)
2164 2211 # sort the output in rev descending order
2165 2212 return sorted(heads, key=self.changelog.rev, reverse=True)
2166 2213
2167 2214 def branchheads(self, branch=None, start=None, closed=False):
2168 2215 '''return a (possibly filtered) list of heads for the given branch
2169 2216
2170 2217 Heads are returned in topological order, from newest to oldest.
2171 2218 If branch is None, use the dirstate branch.
2172 2219 If start is not None, return only heads reachable from start.
2173 2220 If closed is True, return heads that are marked as closed as well.
2174 2221 '''
2175 2222 if branch is None:
2176 2223 branch = self[None].branch()
2177 2224 branches = self.branchmap()
2178 2225 if branch not in branches:
2179 2226 return []
2180 2227 # the cache returns heads ordered lowest to highest
2181 2228 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2182 2229 if start is not None:
2183 2230 # filter out the heads that cannot be reached from startrev
2184 2231 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2185 2232 bheads = [h for h in bheads if h in fbheads]
2186 2233 return bheads
2187 2234
2188 2235 def branches(self, nodes):
2189 2236 if not nodes:
2190 2237 nodes = [self.changelog.tip()]
2191 2238 b = []
2192 2239 for n in nodes:
2193 2240 t = n
2194 2241 while True:
2195 2242 p = self.changelog.parents(n)
2196 2243 if p[1] != nullid or p[0] == nullid:
2197 2244 b.append((t, n, p[0], p[1]))
2198 2245 break
2199 2246 n = p[0]
2200 2247 return b
2201 2248
2202 2249 def between(self, pairs):
2203 2250 r = []
2204 2251
2205 2252 for top, bottom in pairs:
2206 2253 n, l, i = top, [], 0
2207 2254 f = 1
2208 2255
2209 2256 while n != bottom and n != nullid:
2210 2257 p = self.changelog.parents(n)[0]
2211 2258 if i == f:
2212 2259 l.append(n)
2213 2260 f = f * 2
2214 2261 n = p
2215 2262 i += 1
2216 2263
2217 2264 r.append(l)
2218 2265
2219 2266 return r
2220 2267
2221 2268 def checkpush(self, pushop):
2222 2269 """Extensions can override this function if additional checks have
2223 2270 to be performed before pushing, or call it if they override push
2224 2271 command.
2225 2272 """
2226 2273
2227 2274 @unfilteredpropertycache
2228 2275 def prepushoutgoinghooks(self):
2229 2276 """Return util.hooks consists of a pushop with repo, remote, outgoing
2230 2277 methods, which are called before pushing changesets.
2231 2278 """
2232 2279 return util.hooks()
2233 2280
2234 2281 def pushkey(self, namespace, key, old, new):
2235 2282 try:
2236 2283 tr = self.currenttransaction()
2237 2284 hookargs = {}
2238 2285 if tr is not None:
2239 2286 hookargs.update(tr.hookargs)
2240 2287 hookargs = pycompat.strkwargs(hookargs)
2241 2288 hookargs[r'namespace'] = namespace
2242 2289 hookargs[r'key'] = key
2243 2290 hookargs[r'old'] = old
2244 2291 hookargs[r'new'] = new
2245 2292 self.hook('prepushkey', throw=True, **hookargs)
2246 2293 except error.HookAbort as exc:
2247 2294 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2248 2295 if exc.hint:
2249 2296 self.ui.write_err(_("(%s)\n") % exc.hint)
2250 2297 return False
2251 2298 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2252 2299 ret = pushkey.push(self, namespace, key, old, new)
2253 2300 def runhook():
2254 2301 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2255 2302 ret=ret)
2256 2303 self._afterlock(runhook)
2257 2304 return ret
2258 2305
2259 2306 def listkeys(self, namespace):
2260 2307 self.hook('prelistkeys', throw=True, namespace=namespace)
2261 2308 self.ui.debug('listing keys for "%s"\n' % namespace)
2262 2309 values = pushkey.list(self, namespace)
2263 2310 self.hook('listkeys', namespace=namespace, values=values)
2264 2311 return values
2265 2312
2266 2313 def debugwireargs(self, one, two, three=None, four=None, five=None):
2267 2314 '''used to test argument passing over the wire'''
2268 2315 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2269 2316 pycompat.bytestr(four),
2270 2317 pycompat.bytestr(five))
2271 2318
2272 2319 def savecommitmessage(self, text):
2273 2320 fp = self.vfs('last-message.txt', 'wb')
2274 2321 try:
2275 2322 fp.write(text)
2276 2323 finally:
2277 2324 fp.close()
2278 2325 return self.pathto(fp.name[len(self.root) + 1:])
2279 2326
2280 2327 # used to avoid circular references so destructors work
2281 2328 def aftertrans(files):
2282 2329 renamefiles = [tuple(t) for t in files]
2283 2330 def a():
2284 2331 for vfs, src, dest in renamefiles:
2285 2332 # if src and dest refer to a same file, vfs.rename is a no-op,
2286 2333 # leaving both src and dest on disk. delete dest to make sure
2287 2334 # the rename couldn't be such a no-op.
2288 2335 vfs.tryunlink(dest)
2289 2336 try:
2290 2337 vfs.rename(src, dest)
2291 2338 except OSError: # journal file does not yet exist
2292 2339 pass
2293 2340 return a
2294 2341
2295 2342 def undoname(fn):
2296 2343 base, name = os.path.split(fn)
2297 2344 assert name.startswith('journal')
2298 2345 return os.path.join(base, name.replace('journal', 'undo', 1))
2299 2346
2300 2347 def instance(ui, path, create):
2301 2348 return localrepository(ui, util.urllocalpath(path), create)
2302 2349
2303 2350 def islocal(path):
2304 2351 return True
2305 2352
2306 2353 def newreporequirements(repo):
2307 2354 """Determine the set of requirements for a new local repository.
2308 2355
2309 2356 Extensions can wrap this function to specify custom requirements for
2310 2357 new repositories.
2311 2358 """
2312 2359 ui = repo.ui
2313 2360 requirements = {'revlogv1'}
2314 2361 if ui.configbool('format', 'usestore'):
2315 2362 requirements.add('store')
2316 2363 if ui.configbool('format', 'usefncache'):
2317 2364 requirements.add('fncache')
2318 2365 if ui.configbool('format', 'dotencode'):
2319 2366 requirements.add('dotencode')
2320 2367
2321 2368 compengine = ui.config('experimental', 'format.compression')
2322 2369 if compengine not in util.compengines:
2323 2370 raise error.Abort(_('compression engine %s defined by '
2324 2371 'experimental.format.compression not available') %
2325 2372 compengine,
2326 2373 hint=_('run "hg debuginstall" to list available '
2327 2374 'compression engines'))
2328 2375
2329 2376 # zlib is the historical default and doesn't need an explicit requirement.
2330 2377 if compengine != 'zlib':
2331 2378 requirements.add('exp-compression-%s' % compengine)
2332 2379
2333 2380 if scmutil.gdinitconfig(ui):
2334 2381 requirements.add('generaldelta')
2335 2382 if ui.configbool('experimental', 'treemanifest'):
2336 2383 requirements.add('treemanifest')
2337 2384
2338 2385 revlogv2 = ui.config('experimental', 'revlogv2')
2339 2386 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2340 2387 requirements.remove('revlogv1')
2341 2388 # generaldelta is implied by revlogv2.
2342 2389 requirements.discard('generaldelta')
2343 2390 requirements.add(REVLOGV2_REQUIREMENT)
2344 2391
2345 2392 return requirements
@@ -1,1018 +1,1019 b''
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .thirdparty.zope import (
12 12 interface as zi,
13 13 )
14 14 from . import (
15 15 error,
16 16 )
17 17
18 18 class ipeerconnection(zi.Interface):
19 19 """Represents a "connection" to a repository.
20 20
21 21 This is the base interface for representing a connection to a repository.
22 22 It holds basic properties and methods applicable to all peer types.
23 23
24 24 This is not a complete interface definition and should not be used
25 25 outside of this module.
26 26 """
27 27 ui = zi.Attribute("""ui.ui instance""")
28 28
29 29 def url():
30 30 """Returns a URL string representing this peer.
31 31
32 32 Currently, implementations expose the raw URL used to construct the
33 33 instance. It may contain credentials as part of the URL. The
34 34 expectations of the value aren't well-defined and this could lead to
35 35 data leakage.
36 36
37 37 TODO audit/clean consumers and more clearly define the contents of this
38 38 value.
39 39 """
40 40
41 41 def local():
42 42 """Returns a local repository instance.
43 43
44 44 If the peer represents a local repository, returns an object that
45 45 can be used to interface with it. Otherwise returns ``None``.
46 46 """
47 47
48 48 def peer():
49 49 """Returns an object conforming to this interface.
50 50
51 51 Most implementations will ``return self``.
52 52 """
53 53
54 54 def canpush():
55 55 """Returns a boolean indicating if this peer can be pushed to."""
56 56
57 57 def close():
58 58 """Close the connection to this peer.
59 59
60 60 This is called when the peer will no longer be used. Resources
61 61 associated with the peer should be cleaned up.
62 62 """
63 63
64 64 class ipeercapabilities(zi.Interface):
65 65 """Peer sub-interface related to capabilities."""
66 66
67 67 def capable(name):
68 68 """Determine support for a named capability.
69 69
70 70 Returns ``False`` if capability not supported.
71 71
72 72 Returns ``True`` if boolean capability is supported. Returns a string
73 73 if capability support is non-boolean.
74 74
75 75 Capability strings may or may not map to wire protocol capabilities.
76 76 """
77 77
78 78 def requirecap(name, purpose):
79 79 """Require a capability to be present.
80 80
81 81 Raises a ``CapabilityError`` if the capability isn't present.
82 82 """
83 83
84 84 class ipeercommands(zi.Interface):
85 85 """Client-side interface for communicating over the wire protocol.
86 86
87 87 This interface is used as a gateway to the Mercurial wire protocol.
88 88 methods commonly call wire protocol commands of the same name.
89 89 """
90 90
91 91 def branchmap():
92 92 """Obtain heads in named branches.
93 93
94 94 Returns a dict mapping branch name to an iterable of nodes that are
95 95 heads on that branch.
96 96 """
97 97
98 98 def capabilities():
99 99 """Obtain capabilities of the peer.
100 100
101 101 Returns a set of string capabilities.
102 102 """
103 103
104 104 def debugwireargs(one, two, three=None, four=None, five=None):
105 105 """Used to facilitate debugging of arguments passed over the wire."""
106 106
107 107 def getbundle(source, **kwargs):
108 108 """Obtain remote repository data as a bundle.
109 109
110 110 This command is how the bulk of repository data is transferred from
111 111 the peer to the local repository
112 112
113 113 Returns a generator of bundle data.
114 114 """
115 115
116 116 def heads():
117 117 """Determine all known head revisions in the peer.
118 118
119 119 Returns an iterable of binary nodes.
120 120 """
121 121
122 122 def known(nodes):
123 123 """Determine whether multiple nodes are known.
124 124
125 125 Accepts an iterable of nodes whose presence to check for.
126 126
127 127 Returns an iterable of booleans indicating of the corresponding node
128 128 at that index is known to the peer.
129 129 """
130 130
131 131 def listkeys(namespace):
132 132 """Obtain all keys in a pushkey namespace.
133 133
134 134 Returns an iterable of key names.
135 135 """
136 136
137 137 def lookup(key):
138 138 """Resolve a value to a known revision.
139 139
140 140 Returns a binary node of the resolved revision on success.
141 141 """
142 142
143 143 def pushkey(namespace, key, old, new):
144 144 """Set a value using the ``pushkey`` protocol.
145 145
146 146 Arguments correspond to the pushkey namespace and key to operate on and
147 147 the old and new values for that key.
148 148
149 149 Returns a string with the peer result. The value inside varies by the
150 150 namespace.
151 151 """
152 152
153 153 def stream_out():
154 154 """Obtain streaming clone data.
155 155
156 156 Successful result should be a generator of data chunks.
157 157 """
158 158
159 159 def unbundle(bundle, heads, url):
160 160 """Transfer repository data to the peer.
161 161
162 162 This is how the bulk of data during a push is transferred.
163 163
164 164 Returns the integer number of heads added to the peer.
165 165 """
166 166
167 167 class ipeerlegacycommands(zi.Interface):
168 168 """Interface for implementing support for legacy wire protocol commands.
169 169
170 170 Wire protocol commands transition to legacy status when they are no longer
171 171 used by modern clients. To facilitate identifying which commands are
172 172 legacy, the interfaces are split.
173 173 """
174 174
175 175 def between(pairs):
176 176 """Obtain nodes between pairs of nodes.
177 177
178 178 ``pairs`` is an iterable of node pairs.
179 179
180 180 Returns an iterable of iterables of nodes corresponding to each
181 181 requested pair.
182 182 """
183 183
184 184 def branches(nodes):
185 185 """Obtain ancestor changesets of specific nodes back to a branch point.
186 186
187 187 For each requested node, the peer finds the first ancestor node that is
188 188 a DAG root or is a merge.
189 189
190 190 Returns an iterable of iterables with the resolved values for each node.
191 191 """
192 192
193 193 def changegroup(nodes, kind):
194 194 """Obtain a changegroup with data for descendants of specified nodes."""
195 195
196 196 def changegroupsubset(bases, heads, kind):
197 197 pass
198 198
199 199 class ipeercommandexecutor(zi.Interface):
200 200 """Represents a mechanism to execute remote commands.
201 201
202 202 This is the primary interface for requesting that wire protocol commands
203 203 be executed. Instances of this interface are active in a context manager
204 204 and have a well-defined lifetime. When the context manager exits, all
205 205 outstanding requests are waited on.
206 206 """
207 207
208 208 def callcommand(name, args):
209 209 """Request that a named command be executed.
210 210
211 211 Receives the command name and a dictionary of command arguments.
212 212
213 213 Returns a ``concurrent.futures.Future`` that will resolve to the
214 214 result of that command request. That exact value is left up to
215 215 the implementation and possibly varies by command.
216 216
217 217 Not all commands can coexist with other commands in an executor
218 218 instance: it depends on the underlying wire protocol transport being
219 219 used and the command itself.
220 220
221 221 Implementations MAY call ``sendcommands()`` automatically if the
222 222 requested command can not coexist with other commands in this executor.
223 223
224 224 Implementations MAY call ``sendcommands()`` automatically when the
225 225 future's ``result()`` is called. So, consumers using multiple
226 226 commands with an executor MUST ensure that ``result()`` is not called
227 227 until all command requests have been issued.
228 228 """
229 229
230 230 def sendcommands():
231 231 """Trigger submission of queued command requests.
232 232
233 233 Not all transports submit commands as soon as they are requested to
234 234 run. When called, this method forces queued command requests to be
235 235 issued. It will no-op if all commands have already been sent.
236 236
237 237 When called, no more new commands may be issued with this executor.
238 238 """
239 239
240 240 def close():
241 241 """Signal that this command request is finished.
242 242
243 243 When called, no more new commands may be issued. All outstanding
244 244 commands that have previously been issued are waited on before
245 245 returning. This not only includes waiting for the futures to resolve,
246 246 but also waiting for all response data to arrive. In other words,
247 247 calling this waits for all on-wire state for issued command requests
248 248 to finish.
249 249
250 250 When used as a context manager, this method is called when exiting the
251 251 context manager.
252 252
253 253 This method may call ``sendcommands()`` if there are buffered commands.
254 254 """
255 255
256 256 class ipeerrequests(zi.Interface):
257 257 """Interface for executing commands on a peer."""
258 258
259 259 def commandexecutor():
260 260 """A context manager that resolves to an ipeercommandexecutor.
261 261
262 262 The object this resolves to can be used to issue command requests
263 263 to the peer.
264 264
265 265 Callers should call its ``callcommand`` method to issue command
266 266 requests.
267 267
268 268 A new executor should be obtained for each distinct set of commands
269 269 (possibly just a single command) that the consumer wants to execute
270 270 as part of a single operation or round trip. This is because some
271 271 peers are half-duplex and/or don't support persistent connections.
272 272 e.g. in the case of HTTP peers, commands sent to an executor represent
273 273 a single HTTP request. While some peers may support multiple command
274 274 sends over the wire per executor, consumers need to code to the least
275 275 capable peer. So it should be assumed that command executors buffer
276 276 called commands until they are told to send them and that each
277 277 command executor could result in a new connection or wire-level request
278 278 being issued.
279 279 """
280 280
281 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands):
281 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands,
282 ipeerrequests):
282 283 """Unified interface for peer repositories.
283 284
284 285 All peer instances must conform to this interface.
285 286 """
286 287 def iterbatch():
287 288 """Obtain an object to be used for multiple method calls.
288 289
289 290 Various operations call several methods on peer instances. If each
290 291 method call were performed immediately and serially, this would
291 292 require round trips to remote peers and/or would slow down execution.
292 293
293 294 Some peers have the ability to "batch" method calls to avoid costly
294 295 round trips or to facilitate concurrent execution.
295 296
296 297 This method returns an object that can be used to indicate intent to
297 298 perform batched method calls.
298 299
299 300 The returned object is a proxy of this peer. It intercepts calls to
300 301 batchable methods and queues them instead of performing them
301 302 immediately. This proxy object has a ``submit`` method that will
302 303 perform all queued batchable method calls. A ``results()`` method
303 304 exposes the results of queued/batched method calls. It is a generator
304 305 of results in the order they were called.
305 306
306 307 Not all peers or wire protocol implementations may actually batch method
307 308 calls. However, they must all support this API.
308 309 """
309 310
310 311 class ipeerbaselegacycommands(ipeerbase, ipeerlegacycommands):
311 312 """Unified peer interface that supports legacy commands."""
312 313
313 314 @zi.implementer(ipeerbase)
314 315 class peer(object):
315 316 """Base class for peer repositories."""
316 317
317 318 def capable(self, name):
318 319 caps = self.capabilities()
319 320 if name in caps:
320 321 return True
321 322
322 323 name = '%s=' % name
323 324 for cap in caps:
324 325 if cap.startswith(name):
325 326 return cap[len(name):]
326 327
327 328 return False
328 329
329 330 def requirecap(self, name, purpose):
330 331 if self.capable(name):
331 332 return
332 333
333 334 raise error.CapabilityError(
334 335 _('cannot %s; remote repository does not support the %r '
335 336 'capability') % (purpose, name))
336 337
337 338 @zi.implementer(ipeerbaselegacycommands)
338 339 class legacypeer(peer):
339 340 """peer but with support for legacy wire protocol commands."""
340 341
341 342 class ifilerevisionssequence(zi.Interface):
342 343 """Contains index data for all revisions of a file.
343 344
344 345 Types implementing this behave like lists of tuples. The index
345 346 in the list corresponds to the revision number. The values contain
346 347 index metadata.
347 348
348 349 The *null* revision (revision number -1) is always the last item
349 350 in the index.
350 351 """
351 352
352 353 def __len__():
353 354 """The total number of revisions."""
354 355
355 356 def __getitem__(rev):
356 357 """Returns the object having a specific revision number.
357 358
358 359 Returns an 8-tuple with the following fields:
359 360
360 361 offset+flags
361 362 Contains the offset and flags for the revision. 64-bit unsigned
362 363 integer where first 6 bytes are the offset and the next 2 bytes
363 364 are flags. The offset can be 0 if it is not used by the store.
364 365 compressed size
365 366 Size of the revision data in the store. It can be 0 if it isn't
366 367 needed by the store.
367 368 uncompressed size
368 369 Fulltext size. It can be 0 if it isn't needed by the store.
369 370 base revision
370 371 Revision number of revision the delta for storage is encoded
371 372 against. -1 indicates not encoded against a base revision.
372 373 link revision
373 374 Revision number of changelog revision this entry is related to.
374 375 p1 revision
375 376 Revision number of 1st parent. -1 if no 1st parent.
376 377 p2 revision
377 378 Revision number of 2nd parent. -1 if no 1st parent.
378 379 node
379 380 Binary node value for this revision number.
380 381
381 382 Negative values should index off the end of the sequence. ``-1``
382 383 should return the null revision. ``-2`` should return the most
383 384 recent revision.
384 385 """
385 386
386 387 def __contains__(rev):
387 388 """Whether a revision number exists."""
388 389
389 390 def insert(self, i, entry):
390 391 """Add an item to the index at specific revision."""
391 392
392 393 class ifileindex(zi.Interface):
393 394 """Storage interface for index data of a single file.
394 395
395 396 File storage data is divided into index metadata and data storage.
396 397 This interface defines the index portion of the interface.
397 398
398 399 The index logically consists of:
399 400
400 401 * A mapping between revision numbers and nodes.
401 402 * DAG data (storing and querying the relationship between nodes).
402 403 * Metadata to facilitate storage.
403 404 """
404 405 index = zi.Attribute(
405 406 """An ``ifilerevisionssequence`` instance.""")
406 407
407 408 def __len__():
408 409 """Obtain the number of revisions stored for this file."""
409 410
410 411 def __iter__():
411 412 """Iterate over revision numbers for this file."""
412 413
413 414 def revs(start=0, stop=None):
414 415 """Iterate over revision numbers for this file, with control."""
415 416
416 417 def parents(node):
417 418 """Returns a 2-tuple of parent nodes for a revision.
418 419
419 420 Values will be ``nullid`` if the parent is empty.
420 421 """
421 422
422 423 def parentrevs(rev):
423 424 """Like parents() but operates on revision numbers."""
424 425
425 426 def rev(node):
426 427 """Obtain the revision number given a node.
427 428
428 429 Raises ``error.LookupError`` if the node is not known.
429 430 """
430 431
431 432 def node(rev):
432 433 """Obtain the node value given a revision number.
433 434
434 435 Raises ``IndexError`` if the node is not known.
435 436 """
436 437
437 438 def lookup(node):
438 439 """Attempt to resolve a value to a node.
439 440
440 441 Value can be a binary node, hex node, revision number, or a string
441 442 that can be converted to an integer.
442 443
443 444 Raises ``error.LookupError`` if a node could not be resolved.
444 445 """
445 446
446 447 def linkrev(rev):
447 448 """Obtain the changeset revision number a revision is linked to."""
448 449
449 450 def flags(rev):
450 451 """Obtain flags used to affect storage of a revision."""
451 452
452 453 def iscensored(rev):
453 454 """Return whether a revision's content has been censored."""
454 455
455 456 def commonancestorsheads(node1, node2):
456 457 """Obtain an iterable of nodes containing heads of common ancestors.
457 458
458 459 See ``ancestor.commonancestorsheads()``.
459 460 """
460 461
461 462 def descendants(revs):
462 463 """Obtain descendant revision numbers for a set of revision numbers.
463 464
464 465 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
465 466 """
466 467
467 468 def headrevs():
468 469 """Obtain a list of revision numbers that are DAG heads.
469 470
470 471 The list is sorted oldest to newest.
471 472
472 473 TODO determine if sorting is required.
473 474 """
474 475
475 476 def heads(start=None, stop=None):
476 477 """Obtain a list of nodes that are DAG heads, with control.
477 478
478 479 The set of revisions examined can be limited by specifying
479 480 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
480 481 iterable of nodes. DAG traversal starts at earlier revision
481 482 ``start`` and iterates forward until any node in ``stop`` is
482 483 encountered.
483 484 """
484 485
485 486 def children(node):
486 487 """Obtain nodes that are children of a node.
487 488
488 489 Returns a list of nodes.
489 490 """
490 491
491 492 def deltaparent(rev):
492 493 """"Return the revision that is a suitable parent to delta against."""
493 494
494 495 def candelta(baserev, rev):
495 496 """"Whether a delta can be generated between two revisions."""
496 497
497 498 class ifiledata(zi.Interface):
498 499 """Storage interface for data storage of a specific file.
499 500
500 501 This complements ``ifileindex`` and provides an interface for accessing
501 502 data for a tracked file.
502 503 """
503 504 def rawsize(rev):
504 505 """The size of the fulltext data for a revision as stored."""
505 506
506 507 def size(rev):
507 508 """Obtain the fulltext size of file data.
508 509
509 510 Any metadata is excluded from size measurements. Use ``rawsize()`` if
510 511 metadata size is important.
511 512 """
512 513
513 514 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
514 515 """Validate the stored hash of a given fulltext and node.
515 516
516 517 Raises ``error.RevlogError`` is hash validation fails.
517 518 """
518 519
519 520 def revision(node, raw=False):
520 521 """"Obtain fulltext data for a node.
521 522
522 523 By default, any storage transformations are applied before the data
523 524 is returned. If ``raw`` is True, non-raw storage transformations
524 525 are not applied.
525 526
526 527 The fulltext data may contain a header containing metadata. Most
527 528 consumers should use ``read()`` to obtain the actual file data.
528 529 """
529 530
530 531 def read(node):
531 532 """Resolve file fulltext data.
532 533
533 534 This is similar to ``revision()`` except any metadata in the data
534 535 headers is stripped.
535 536 """
536 537
537 538 def renamed(node):
538 539 """Obtain copy metadata for a node.
539 540
540 541 Returns ``False`` if no copy metadata is stored or a 2-tuple of
541 542 (path, node) from which this revision was copied.
542 543 """
543 544
544 545 def cmp(node, fulltext):
545 546 """Compare fulltext to another revision.
546 547
547 548 Returns True if the fulltext is different from what is stored.
548 549
549 550 This takes copy metadata into account.
550 551
551 552 TODO better document the copy metadata and censoring logic.
552 553 """
553 554
554 555 def revdiff(rev1, rev2):
555 556 """Obtain a delta between two revision numbers.
556 557
557 558 Operates on raw data in the store (``revision(node, raw=True)``).
558 559
559 560 The returned data is the result of ``bdiff.bdiff`` on the raw
560 561 revision data.
561 562 """
562 563
563 564 class ifilemutation(zi.Interface):
564 565 """Storage interface for mutation events of a tracked file."""
565 566
566 567 def add(filedata, meta, transaction, linkrev, p1, p2):
567 568 """Add a new revision to the store.
568 569
569 570 Takes file data, dictionary of metadata, a transaction, linkrev,
570 571 and parent nodes.
571 572
572 573 Returns the node that was added.
573 574
574 575 May no-op if a revision matching the supplied data is already stored.
575 576 """
576 577
577 578 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
578 579 flags=0, cachedelta=None):
579 580 """Add a new revision to the store.
580 581
581 582 This is similar to ``add()`` except it operates at a lower level.
582 583
583 584 The data passed in already contains a metadata header, if any.
584 585
585 586 ``node`` and ``flags`` can be used to define the expected node and
586 587 the flags to use with storage.
587 588
588 589 ``add()`` is usually called when adding files from e.g. the working
589 590 directory. ``addrevision()`` is often called by ``add()`` and for
590 591 scenarios where revision data has already been computed, such as when
591 592 applying raw data from a peer repo.
592 593 """
593 594
594 595 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
595 596 """Process a series of deltas for storage.
596 597
597 598 ``deltas`` is an iterable of 7-tuples of
598 599 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
599 600 to add.
600 601
601 602 The ``delta`` field contains ``mpatch`` data to apply to a base
602 603 revision, identified by ``deltabase``. The base node can be
603 604 ``nullid``, in which case the header from the delta can be ignored
604 605 and the delta used as the fulltext.
605 606
606 607 ``addrevisioncb`` should be called for each node as it is committed.
607 608
608 609 Returns a list of nodes that were processed. A node will be in the list
609 610 even if it existed in the store previously.
610 611 """
611 612
612 613 def getstrippoint(minlink):
613 614 """Find the minimum revision that must be stripped to strip a linkrev.
614 615
615 616 Returns a 2-tuple containing the minimum revision number and a set
616 617 of all revisions numbers that would be broken by this strip.
617 618
618 619 TODO this is highly revlog centric and should be abstracted into
619 620 a higher-level deletion API. ``repair.strip()`` relies on this.
620 621 """
621 622
622 623 def strip(minlink, transaction):
623 624 """Remove storage of items starting at a linkrev.
624 625
625 626 This uses ``getstrippoint()`` to determine the first node to remove.
626 627 Then it effectively truncates storage for all revisions after that.
627 628
628 629 TODO this is highly revlog centric and should be abstracted into a
629 630 higher-level deletion API.
630 631 """
631 632
632 633 class ifilestorage(ifileindex, ifiledata, ifilemutation):
633 634 """Complete storage interface for a single tracked file."""
634 635
635 636 version = zi.Attribute(
636 637 """Version number of storage.
637 638
638 639 TODO this feels revlog centric and could likely be removed.
639 640 """)
640 641
641 642 storedeltachains = zi.Attribute(
642 643 """Whether the store stores deltas.
643 644
644 645 TODO deltachains are revlog centric. This can probably removed
645 646 once there are better abstractions for obtaining/writing
646 647 data.
647 648 """)
648 649
649 650 _generaldelta = zi.Attribute(
650 651 """Whether deltas can be against any parent revision.
651 652
652 653 TODO this is used by changegroup code and it could probably be
653 654 folded into another API.
654 655 """)
655 656
656 657 def files():
657 658 """Obtain paths that are backing storage for this file.
658 659
659 660 TODO this is used heavily by verify code and there should probably
660 661 be a better API for that.
661 662 """
662 663
663 664 def checksize():
664 665 """Obtain the expected sizes of backing files.
665 666
666 667 TODO this is used by verify and it should not be part of the interface.
667 668 """
668 669
669 670 class completelocalrepository(zi.Interface):
670 671 """Monolithic interface for local repositories.
671 672
672 673 This currently captures the reality of things - not how things should be.
673 674 """
674 675
675 676 supportedformats = zi.Attribute(
676 677 """Set of requirements that apply to stream clone.
677 678
678 679 This is actually a class attribute and is shared among all instances.
679 680 """)
680 681
681 682 openerreqs = zi.Attribute(
682 683 """Set of requirements that are passed to the opener.
683 684
684 685 This is actually a class attribute and is shared among all instances.
685 686 """)
686 687
687 688 supported = zi.Attribute(
688 689 """Set of requirements that this repo is capable of opening.""")
689 690
690 691 requirements = zi.Attribute(
691 692 """Set of requirements this repo uses.""")
692 693
693 694 filtername = zi.Attribute(
694 695 """Name of the repoview that is active on this repo.""")
695 696
696 697 wvfs = zi.Attribute(
697 698 """VFS used to access the working directory.""")
698 699
699 700 vfs = zi.Attribute(
700 701 """VFS rooted at the .hg directory.
701 702
702 703 Used to access repository data not in the store.
703 704 """)
704 705
705 706 svfs = zi.Attribute(
706 707 """VFS rooted at the store.
707 708
708 709 Used to access repository data in the store. Typically .hg/store.
709 710 But can point elsewhere if the store is shared.
710 711 """)
711 712
712 713 root = zi.Attribute(
713 714 """Path to the root of the working directory.""")
714 715
715 716 path = zi.Attribute(
716 717 """Path to the .hg directory.""")
717 718
718 719 origroot = zi.Attribute(
719 720 """The filesystem path that was used to construct the repo.""")
720 721
721 722 auditor = zi.Attribute(
722 723 """A pathauditor for the working directory.
723 724
724 725 This checks if a path refers to a nested repository.
725 726
726 727 Operates on the filesystem.
727 728 """)
728 729
729 730 nofsauditor = zi.Attribute(
730 731 """A pathauditor for the working directory.
731 732
732 733 This is like ``auditor`` except it doesn't do filesystem checks.
733 734 """)
734 735
735 736 baseui = zi.Attribute(
736 737 """Original ui instance passed into constructor.""")
737 738
738 739 ui = zi.Attribute(
739 740 """Main ui instance for this instance.""")
740 741
741 742 sharedpath = zi.Attribute(
742 743 """Path to the .hg directory of the repo this repo was shared from.""")
743 744
744 745 store = zi.Attribute(
745 746 """A store instance.""")
746 747
747 748 spath = zi.Attribute(
748 749 """Path to the store.""")
749 750
750 751 sjoin = zi.Attribute(
751 752 """Alias to self.store.join.""")
752 753
753 754 cachevfs = zi.Attribute(
754 755 """A VFS used to access the cache directory.
755 756
756 757 Typically .hg/cache.
757 758 """)
758 759
759 760 filteredrevcache = zi.Attribute(
760 761 """Holds sets of revisions to be filtered.""")
761 762
762 763 names = zi.Attribute(
763 764 """A ``namespaces`` instance.""")
764 765
765 766 def close():
766 767 """Close the handle on this repository."""
767 768
768 769 def peer():
769 770 """Obtain an object conforming to the ``peer`` interface."""
770 771
771 772 def unfiltered():
772 773 """Obtain an unfiltered/raw view of this repo."""
773 774
774 775 def filtered(name, visibilityexceptions=None):
775 776 """Obtain a named view of this repository."""
776 777
777 778 obsstore = zi.Attribute(
778 779 """A store of obsolescence data.""")
779 780
780 781 changelog = zi.Attribute(
781 782 """A handle on the changelog revlog.""")
782 783
783 784 manifestlog = zi.Attribute(
784 785 """A handle on the root manifest revlog.""")
785 786
786 787 dirstate = zi.Attribute(
787 788 """Working directory state.""")
788 789
789 790 narrowpats = zi.Attribute(
790 791 """Matcher patterns for this repository's narrowspec.""")
791 792
792 793 def narrowmatch():
793 794 """Obtain a matcher for the narrowspec."""
794 795
795 796 def setnarrowpats(newincludes, newexcludes):
796 797 """Define the narrowspec for this repository."""
797 798
798 799 def __getitem__(changeid):
799 800 """Try to resolve a changectx."""
800 801
801 802 def __contains__(changeid):
802 803 """Whether a changeset exists."""
803 804
804 805 def __nonzero__():
805 806 """Always returns True."""
806 807 return True
807 808
808 809 __bool__ = __nonzero__
809 810
810 811 def __len__():
811 812 """Returns the number of changesets in the repo."""
812 813
813 814 def __iter__():
814 815 """Iterate over revisions in the changelog."""
815 816
816 817 def revs(expr, *args):
817 818 """Evaluate a revset.
818 819
819 820 Emits revisions.
820 821 """
821 822
822 823 def set(expr, *args):
823 824 """Evaluate a revset.
824 825
825 826 Emits changectx instances.
826 827 """
827 828
828 829 def anyrevs(specs, user=False, localalias=None):
829 830 """Find revisions matching one of the given revsets."""
830 831
831 832 def url():
832 833 """Returns a string representing the location of this repo."""
833 834
834 835 def hook(name, throw=False, **args):
835 836 """Call a hook."""
836 837
837 838 def tags():
838 839 """Return a mapping of tag to node."""
839 840
840 841 def tagtype(tagname):
841 842 """Return the type of a given tag."""
842 843
843 844 def tagslist():
844 845 """Return a list of tags ordered by revision."""
845 846
846 847 def nodetags(node):
847 848 """Return the tags associated with a node."""
848 849
849 850 def nodebookmarks(node):
850 851 """Return the list of bookmarks pointing to the specified node."""
851 852
852 853 def branchmap():
853 854 """Return a mapping of branch to heads in that branch."""
854 855
855 856 def revbranchcache():
856 857 pass
857 858
858 859 def branchtip(branchtip, ignoremissing=False):
859 860 """Return the tip node for a given branch."""
860 861
861 862 def lookup(key):
862 863 """Resolve the node for a revision."""
863 864
864 865 def lookupbranch(key):
865 866 """Look up the branch name of the given revision or branch name."""
866 867
867 868 def known(nodes):
868 869 """Determine whether a series of nodes is known.
869 870
870 871 Returns a list of bools.
871 872 """
872 873
873 874 def local():
874 875 """Whether the repository is local."""
875 876 return True
876 877
877 878 def publishing():
878 879 """Whether the repository is a publishing repository."""
879 880
880 881 def cancopy():
881 882 pass
882 883
883 884 def shared():
884 885 """The type of shared repository or None."""
885 886
886 887 def wjoin(f, *insidef):
887 888 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
888 889
889 890 def file(f):
890 891 """Obtain a filelog for a tracked path."""
891 892
892 893 def setparents(p1, p2):
893 894 """Set the parent nodes of the working directory."""
894 895
895 896 def filectx(path, changeid=None, fileid=None):
896 897 """Obtain a filectx for the given file revision."""
897 898
898 899 def getcwd():
899 900 """Obtain the current working directory from the dirstate."""
900 901
901 902 def pathto(f, cwd=None):
902 903 """Obtain the relative path to a file."""
903 904
904 905 def adddatafilter(name, fltr):
905 906 pass
906 907
907 908 def wread(filename):
908 909 """Read a file from wvfs, using data filters."""
909 910
910 911 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
911 912 """Write data to a file in the wvfs, using data filters."""
912 913
913 914 def wwritedata(filename, data):
914 915 """Resolve data for writing to the wvfs, using data filters."""
915 916
916 917 def currenttransaction():
917 918 """Obtain the current transaction instance or None."""
918 919
919 920 def transaction(desc, report=None):
920 921 """Open a new transaction to write to the repository."""
921 922
922 923 def undofiles():
923 924 """Returns a list of (vfs, path) for files to undo transactions."""
924 925
925 926 def recover():
926 927 """Roll back an interrupted transaction."""
927 928
928 929 def rollback(dryrun=False, force=False):
929 930 """Undo the last transaction.
930 931
931 932 DANGEROUS.
932 933 """
933 934
934 935 def updatecaches(tr=None, full=False):
935 936 """Warm repo caches."""
936 937
937 938 def invalidatecaches():
938 939 """Invalidate cached data due to the repository mutating."""
939 940
940 941 def invalidatevolatilesets():
941 942 pass
942 943
943 944 def invalidatedirstate():
944 945 """Invalidate the dirstate."""
945 946
946 947 def invalidate(clearfilecache=False):
947 948 pass
948 949
949 950 def invalidateall():
950 951 pass
951 952
952 953 def lock(wait=True):
953 954 """Lock the repository store and return a lock instance."""
954 955
955 956 def wlock(wait=True):
956 957 """Lock the non-store parts of the repository."""
957 958
958 959 def currentwlock():
959 960 """Return the wlock if it's held or None."""
960 961
961 962 def checkcommitpatterns(wctx, vdirs, match, status, fail):
962 963 pass
963 964
964 965 def commit(text='', user=None, date=None, match=None, force=False,
965 966 editor=False, extra=None):
966 967 """Add a new revision to the repository."""
967 968
968 969 def commitctx(ctx, error=False):
969 970 """Commit a commitctx instance to the repository."""
970 971
971 972 def destroying():
972 973 """Inform the repository that nodes are about to be destroyed."""
973 974
974 975 def destroyed():
975 976 """Inform the repository that nodes have been destroyed."""
976 977
977 978 def status(node1='.', node2=None, match=None, ignored=False,
978 979 clean=False, unknown=False, listsubrepos=False):
979 980 """Convenience method to call repo[x].status()."""
980 981
981 982 def addpostdsstatus(ps):
982 983 pass
983 984
984 985 def postdsstatus():
985 986 pass
986 987
987 988 def clearpostdsstatus():
988 989 pass
989 990
990 991 def heads(start=None):
991 992 """Obtain list of nodes that are DAG heads."""
992 993
993 994 def branchheads(branch=None, start=None, closed=False):
994 995 pass
995 996
996 997 def branches(nodes):
997 998 pass
998 999
999 1000 def between(pairs):
1000 1001 pass
1001 1002
1002 1003 def checkpush(pushop):
1003 1004 pass
1004 1005
1005 1006 prepushoutgoinghooks = zi.Attribute(
1006 1007 """util.hooks instance.""")
1007 1008
1008 1009 def pushkey(namespace, key, old, new):
1009 1010 pass
1010 1011
1011 1012 def listkeys(namespace):
1012 1013 pass
1013 1014
1014 1015 def debugwireargs(one, two, three=None, four=None, five=None):
1015 1016 pass
1016 1017
1017 1018 def savecommitmessage(text):
1018 1019 pass
@@ -1,262 +1,267 b''
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """
9 9 Algorithm works in the following way. You have two repository: local and
10 10 remote. They both contains a DAG of changelists.
11 11
12 12 The goal of the discovery protocol is to find one set of node *common*,
13 13 the set of nodes shared by local and remote.
14 14
15 15 One of the issue with the original protocol was latency, it could
16 16 potentially require lots of roundtrips to discover that the local repo was a
17 17 subset of remote (which is a very common case, you usually have few changes
18 18 compared to upstream, while upstream probably had lots of development).
19 19
20 20 The new protocol only requires one interface for the remote repo: `known()`,
21 21 which given a set of changelists tells you if they are present in the DAG.
22 22
23 23 The algorithm then works as follow:
24 24
25 25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 26 all nodes are in `unknown`.
27 27 - Take a sample from `unknown`, call `remote.known(sample)`
28 28 - For each node that remote knows, move it and all its ancestors to `common`
29 29 - For each node that remote doesn't know, move it and all its descendants
30 30 to `missing`
31 31 - Iterate until `unknown` is empty
32 32
33 33 There are a couple optimizations, first is instead of starting with a random
34 34 sample of missing, start by sending all heads, in the case where the local
35 35 repo is a subset, you computed the answer in one round trip.
36 36
37 37 Then you can do something similar to the bisecting strategy used when
38 38 finding faulty changesets. Instead of random samples, you can try picking
39 39 nodes that will maximize the number of nodes that will be
40 40 classified with it (since all ancestors or descendants will be marked as well).
41 41 """
42 42
43 43 from __future__ import absolute_import
44 44
45 45 import collections
46 46 import random
47 47
48 48 from .i18n import _
49 49 from .node import (
50 50 nullid,
51 51 nullrev,
52 52 )
53 53 from . import (
54 54 dagutil,
55 55 error,
56 56 util,
57 57 )
58 58
59 59 def _updatesample(dag, nodes, sample, quicksamplesize=0):
60 60 """update an existing sample to match the expected size
61 61
62 62 The sample is updated with nodes exponentially distant from each head of the
63 63 <nodes> set. (H~1, H~2, H~4, H~8, etc).
64 64
65 65 If a target size is specified, the sampling will stop once this size is
66 66 reached. Otherwise sampling will happen until roots of the <nodes> set are
67 67 reached.
68 68
69 69 :dag: a dag object from dagutil
70 70 :nodes: set of nodes we want to discover (if None, assume the whole dag)
71 71 :sample: a sample to update
72 72 :quicksamplesize: optional target size of the sample"""
73 73 # if nodes is empty we scan the entire graph
74 74 if nodes:
75 75 heads = dag.headsetofconnecteds(nodes)
76 76 else:
77 77 heads = dag.heads()
78 78 dist = {}
79 79 visit = collections.deque(heads)
80 80 seen = set()
81 81 factor = 1
82 82 while visit:
83 83 curr = visit.popleft()
84 84 if curr in seen:
85 85 continue
86 86 d = dist.setdefault(curr, 1)
87 87 if d > factor:
88 88 factor *= 2
89 89 if d == factor:
90 90 sample.add(curr)
91 91 if quicksamplesize and (len(sample) >= quicksamplesize):
92 92 return
93 93 seen.add(curr)
94 94 for p in dag.parents(curr):
95 95 if not nodes or p in nodes:
96 96 dist.setdefault(p, d + 1)
97 97 visit.append(p)
98 98
99 99 def _takequicksample(dag, nodes, size):
100 100 """takes a quick sample of size <size>
101 101
102 102 It is meant for initial sampling and focuses on querying heads and close
103 103 ancestors of heads.
104 104
105 105 :dag: a dag object
106 106 :nodes: set of nodes to discover
107 107 :size: the maximum size of the sample"""
108 108 sample = dag.headsetofconnecteds(nodes)
109 109 if len(sample) >= size:
110 110 return _limitsample(sample, size)
111 111 _updatesample(dag, None, sample, quicksamplesize=size)
112 112 return sample
113 113
114 114 def _takefullsample(dag, nodes, size):
115 115 sample = dag.headsetofconnecteds(nodes)
116 116 # update from heads
117 117 _updatesample(dag, nodes, sample)
118 118 # update from roots
119 119 _updatesample(dag.inverse(), nodes, sample)
120 120 assert sample
121 121 sample = _limitsample(sample, size)
122 122 if len(sample) < size:
123 123 more = size - len(sample)
124 124 sample.update(random.sample(list(nodes - sample), more))
125 125 return sample
126 126
127 127 def _limitsample(sample, desiredlen):
128 128 """return a random subset of sample of at most desiredlen item"""
129 129 if len(sample) > desiredlen:
130 130 sample = set(random.sample(sample, desiredlen))
131 131 return sample
132 132
133 133 def findcommonheads(ui, local, remote,
134 134 initialsamplesize=100,
135 135 fullsamplesize=200,
136 136 abortwhenunrelated=True,
137 137 ancestorsof=None):
138 138 '''Return a tuple (common, anyincoming, remoteheads) used to identify
139 139 missing nodes from or in remote.
140 140 '''
141 141 start = util.timer()
142 142
143 143 roundtrips = 0
144 144 cl = local.changelog
145 145 localsubset = None
146 146 if ancestorsof is not None:
147 147 rev = local.changelog.rev
148 148 localsubset = [rev(n) for n in ancestorsof]
149 149 dag = dagutil.revlogdag(cl, localsubset=localsubset)
150 150
151 151 # early exit if we know all the specified remote heads already
152 152 ui.debug("query 1; heads\n")
153 153 roundtrips += 1
154 154 ownheads = dag.heads()
155 155 sample = _limitsample(ownheads, initialsamplesize)
156 156 # indices between sample and externalized version must match
157 157 sample = list(sample)
158 158 batch = remote.iterbatch()
159 159 batch.heads()
160 160 batch.known(dag.externalizeall(sample))
161 161 batch.submit()
162 162 srvheadhashes, yesno = batch.results()
163 163
164 164 if cl.tip() == nullid:
165 165 if srvheadhashes != [nullid]:
166 166 return [nullid], True, srvheadhashes
167 167 return [nullid], False, []
168 168
169 169 # start actual discovery (we note this before the next "if" for
170 170 # compatibility reasons)
171 171 ui.status(_("searching for changes\n"))
172 172
173 173 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
174 174 if len(srvheads) == len(srvheadhashes):
175 175 ui.debug("all remote heads known locally\n")
176 176 return (srvheadhashes, False, srvheadhashes,)
177 177
178 178 if len(sample) == len(ownheads) and all(yesno):
179 179 ui.note(_("all local heads known remotely\n"))
180 180 ownheadhashes = dag.externalizeall(ownheads)
181 181 return (ownheadhashes, True, srvheadhashes,)
182 182
183 183 # full blown discovery
184 184
185 185 # own nodes I know we both know
186 186 # treat remote heads (and maybe own heads) as a first implicit sample
187 187 # response
188 188 common = cl.incrementalmissingrevs(srvheads)
189 189 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
190 190 common.addbases(commoninsample)
191 191 # own nodes where I don't know if remote knows them
192 192 undecided = set(common.missingancestors(ownheads))
193 193 # own nodes I know remote lacks
194 194 missing = set()
195 195
196 196 full = False
197 197 while undecided:
198 198
199 199 if sample:
200 200 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
201 201 missing.update(dag.descendantset(missinginsample, missing))
202 202
203 203 undecided.difference_update(missing)
204 204
205 205 if not undecided:
206 206 break
207 207
208 208 if full or common.hasbases():
209 209 if full:
210 210 ui.note(_("sampling from both directions\n"))
211 211 else:
212 212 ui.debug("taking initial sample\n")
213 213 samplefunc = _takefullsample
214 214 targetsize = fullsamplesize
215 215 else:
216 216 # use even cheaper initial sample
217 217 ui.debug("taking quick initial sample\n")
218 218 samplefunc = _takequicksample
219 219 targetsize = initialsamplesize
220 220 if len(undecided) < targetsize:
221 221 sample = list(undecided)
222 222 else:
223 223 sample = samplefunc(dag, undecided, targetsize)
224 224
225 225 roundtrips += 1
226 226 ui.progress(_('searching'), roundtrips, unit=_('queries'))
227 227 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
228 228 % (roundtrips, len(undecided), len(sample)))
229 229 # indices between sample and externalized version must match
230 230 sample = list(sample)
231 yesno = remote.known(dag.externalizeall(sample))
231
232 with remote.commandexecutor() as e:
233 yesno = e.callcommand('known', {
234 'nodes': dag.externalizeall(sample),
235 }).result()
236
232 237 full = True
233 238
234 239 if sample:
235 240 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
236 241 common.addbases(commoninsample)
237 242 common.removeancestorsfrom(undecided)
238 243
239 244 # heads(common) == heads(common.bases) since common represents common.bases
240 245 # and all its ancestors
241 246 result = dag.headsetofconnecteds(common.bases)
242 247 # common.bases can include nullrev, but our contract requires us to not
243 248 # return any heads in that case, so discard that
244 249 result.discard(nullrev)
245 250 elapsed = util.timer() - start
246 251 ui.progress(_('searching'), None)
247 252 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
248 253 msg = ('found %d common and %d unknown server heads,'
249 254 ' %d roundtrips in %.4fs\n')
250 255 missing = set(result) - set(srvheads)
251 256 ui.log('discovery', msg, len(result), len(missing), roundtrips,
252 257 elapsed)
253 258
254 259 if not result and srvheadhashes != [nullid]:
255 260 if abortwhenunrelated:
256 261 raise error.Abort(_("repository is unrelated"))
257 262 else:
258 263 ui.warn(_("warning: repository is unrelated\n"))
259 264 return ({nullid}, True, srvheadhashes,)
260 265
261 266 anyincoming = (srvheadhashes != [nullid])
262 267 return dag.externalizeall(result), anyincoming, srvheadhashes
@@ -1,483 +1,576 b''
1 1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 import sys
11 12
12 13 from .i18n import _
13 14 from .node import (
14 15 bin,
15 16 )
16
17 from .thirdparty.zope import (
18 interface as zi,
19 )
17 20 from . import (
18 21 bundle2,
19 22 changegroup as changegroupmod,
20 23 encoding,
21 24 error,
22 25 pushkey as pushkeymod,
23 26 pycompat,
24 27 repository,
25 28 util,
26 29 wireprototypes,
27 30 )
28 31
29 32 urlreq = util.urlreq
30 33
31 34 def batchable(f):
32 35 '''annotation for batchable methods
33 36
34 37 Such methods must implement a coroutine as follows:
35 38
36 39 @batchable
37 40 def sample(self, one, two=None):
38 41 # Build list of encoded arguments suitable for your wire protocol:
39 42 encargs = [('one', encode(one),), ('two', encode(two),)]
40 43 # Create future for injection of encoded result:
41 44 encresref = future()
42 45 # Return encoded arguments and future:
43 46 yield encargs, encresref
44 47 # Assuming the future to be filled with the result from the batched
45 48 # request now. Decode it:
46 49 yield decode(encresref.value)
47 50
48 51 The decorator returns a function which wraps this coroutine as a plain
49 52 method, but adds the original method as an attribute called "batchable",
50 53 which is used by remotebatch to split the call into separate encoding and
51 54 decoding phases.
52 55 '''
53 56 def plain(*args, **opts):
54 57 batchable = f(*args, **opts)
55 58 encargsorres, encresref = next(batchable)
56 59 if not encresref:
57 60 return encargsorres # a local result in this case
58 61 self = args[0]
59 62 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
60 63 encresref.set(self._submitone(cmd, encargsorres))
61 64 return next(batchable)
62 65 setattr(plain, 'batchable', f)
63 66 return plain
64 67
65 68 class future(object):
66 69 '''placeholder for a value to be set later'''
67 70 def set(self, value):
68 71 if util.safehasattr(self, 'value'):
69 72 raise error.RepoError("future is already set")
70 73 self.value = value
71 74
72 75 class batcher(object):
73 76 '''base class for batches of commands submittable in a single request
74 77
75 78 All methods invoked on instances of this class are simply queued and
76 79 return a a future for the result. Once you call submit(), all the queued
77 80 calls are performed and the results set in their respective futures.
78 81 '''
79 82 def __init__(self):
80 83 self.calls = []
81 84 def __getattr__(self, name):
82 85 def call(*args, **opts):
83 86 resref = future()
84 87 # Please don't invent non-ascii method names, or you will
85 88 # give core hg a very sad time.
86 89 self.calls.append((name.encode('ascii'), args, opts, resref,))
87 90 return resref
88 91 return call
89 92 def submit(self):
90 93 raise NotImplementedError()
91 94
92 95 class iterbatcher(batcher):
93 96
94 97 def submit(self):
95 98 raise NotImplementedError()
96 99
97 100 def results(self):
98 101 raise NotImplementedError()
99 102
100 103 class remoteiterbatcher(iterbatcher):
101 104 def __init__(self, remote):
102 105 super(remoteiterbatcher, self).__init__()
103 106 self._remote = remote
104 107
105 108 def __getattr__(self, name):
106 109 # Validate this method is batchable, since submit() only supports
107 110 # batchable methods.
108 111 fn = getattr(self._remote, name)
109 112 if not getattr(fn, 'batchable', None):
110 113 raise error.ProgrammingError('Attempted to batch a non-batchable '
111 114 'call to %r' % name)
112 115
113 116 return super(remoteiterbatcher, self).__getattr__(name)
114 117
115 118 def submit(self):
116 119 """Break the batch request into many patch calls and pipeline them.
117 120
118 121 This is mostly valuable over http where request sizes can be
119 122 limited, but can be used in other places as well.
120 123 """
121 124 # 2-tuple of (command, arguments) that represents what will be
122 125 # sent over the wire.
123 126 requests = []
124 127
125 128 # 4-tuple of (command, final future, @batchable generator, remote
126 129 # future).
127 130 results = []
128 131
129 132 for command, args, opts, finalfuture in self.calls:
130 133 mtd = getattr(self._remote, command)
131 134 batchable = mtd.batchable(mtd.__self__, *args, **opts)
132 135
133 136 commandargs, fremote = next(batchable)
134 137 assert fremote
135 138 requests.append((command, commandargs))
136 139 results.append((command, finalfuture, batchable, fremote))
137 140
138 141 if requests:
139 142 self._resultiter = self._remote._submitbatch(requests)
140 143
141 144 self._results = results
142 145
143 146 def results(self):
144 147 for command, finalfuture, batchable, remotefuture in self._results:
145 148 # Get the raw result, set it in the remote future, feed it
146 149 # back into the @batchable generator so it can be decoded, and
147 150 # set the result on the final future to this value.
148 151 remoteresult = next(self._resultiter)
149 152 remotefuture.set(remoteresult)
150 153 finalfuture.set(next(batchable))
151 154
152 155 # Verify our @batchable generators only emit 2 values.
153 156 try:
154 157 next(batchable)
155 158 except StopIteration:
156 159 pass
157 160 else:
158 161 raise error.ProgrammingError('%s @batchable generator emitted '
159 162 'unexpected value count' % command)
160 163
161 164 yield finalfuture.value
162 165
163 166 def encodebatchcmds(req):
164 167 """Return a ``cmds`` argument value for the ``batch`` command."""
165 168 escapearg = wireprototypes.escapebatcharg
166 169
167 170 cmds = []
168 171 for op, argsdict in req:
169 172 # Old servers didn't properly unescape argument names. So prevent
170 173 # the sending of argument names that may not be decoded properly by
171 174 # servers.
172 175 assert all(escapearg(k) == k for k in argsdict)
173 176
174 177 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
175 178 for k, v in argsdict.iteritems())
176 179 cmds.append('%s %s' % (op, args))
177 180
178 181 return ';'.join(cmds)
179 182
183 @zi.implementer(repository.ipeercommandexecutor)
184 class peerexecutor(object):
185 def __init__(self, peer):
186 self._peer = peer
187 self._sent = False
188 self._closed = False
189 self._calls = []
190
191 def __enter__(self):
192 return self
193
194 def __exit__(self, exctype, excvalee, exctb):
195 self.close()
196
197 def callcommand(self, command, args):
198 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used '
200 'after commands are sent')
201
202 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used '
204 'after close()')
205
206 # Commands are dispatched through methods on the peer.
207 fn = getattr(self._peer, pycompat.sysstr(command), None)
208
209 if not fn:
210 raise error.ProgrammingError(
211 'cannot call command %s: method of same name not available '
212 'on peer' % command)
213
214 # Commands are either batchable or they aren't. If a command
215 # isn't batchable, we send it immediately because the executor
216 # can no longer accept new commands after a non-batchable command.
217 # If a command is batchable, we queue it for later.
218
219 if getattr(fn, 'batchable', False):
220 pass
221 else:
222 if self._calls:
223 raise error.ProgrammingError(
224 '%s is not batchable and cannot be called on a command '
225 'executor along with other commands' % command)
226
227 # We don't support batching yet. So resolve it immediately.
228 f = pycompat.futures.Future()
229 self._calls.append((command, args, fn, f))
230 self.sendcommands()
231 return f
232
233 def sendcommands(self):
234 if self._sent:
235 return
236
237 if not self._calls:
238 return
239
240 self._sent = True
241
242 calls = self._calls
243 # Mainly to destroy references to futures.
244 self._calls = None
245
246 if len(calls) == 1:
247 command, args, fn, f = calls[0]
248
249 # Future was cancelled. Ignore it.
250 if not f.set_running_or_notify_cancel():
251 return
252
253 try:
254 result = fn(**pycompat.strkwargs(args))
255 except Exception:
256 f.set_exception_info(*sys.exc_info()[1:])
257 else:
258 f.set_result(result)
259
260 return
261
262 raise error.ProgrammingError('support for multiple commands not '
263 'yet implemented')
264
265 def close(self):
266 self.sendcommands()
267
268 self._closed = True
269
180 270 class wirepeer(repository.legacypeer):
181 271 """Client-side interface for communicating with a peer repository.
182 272
183 273 Methods commonly call wire protocol commands of the same name.
184 274
185 275 See also httppeer.py and sshpeer.py for protocol-specific
186 276 implementations of this interface.
187 277 """
278 def commandexecutor(self):
279 return peerexecutor(self)
280
188 281 # Begin of ipeercommands interface.
189 282
190 283 def iterbatch(self):
191 284 return remoteiterbatcher(self)
192 285
193 286 @batchable
194 287 def lookup(self, key):
195 288 self.requirecap('lookup', _('look up remote revision'))
196 289 f = future()
197 290 yield {'key': encoding.fromlocal(key)}, f
198 291 d = f.value
199 292 success, data = d[:-1].split(" ", 1)
200 293 if int(success):
201 294 yield bin(data)
202 295 else:
203 296 self._abort(error.RepoError(data))
204 297
205 298 @batchable
206 299 def heads(self):
207 300 f = future()
208 301 yield {}, f
209 302 d = f.value
210 303 try:
211 304 yield wireprototypes.decodelist(d[:-1])
212 305 except ValueError:
213 306 self._abort(error.ResponseError(_("unexpected response:"), d))
214 307
215 308 @batchable
216 309 def known(self, nodes):
217 310 f = future()
218 311 yield {'nodes': wireprototypes.encodelist(nodes)}, f
219 312 d = f.value
220 313 try:
221 314 yield [bool(int(b)) for b in d]
222 315 except ValueError:
223 316 self._abort(error.ResponseError(_("unexpected response:"), d))
224 317
225 318 @batchable
226 319 def branchmap(self):
227 320 f = future()
228 321 yield {}, f
229 322 d = f.value
230 323 try:
231 324 branchmap = {}
232 325 for branchpart in d.splitlines():
233 326 branchname, branchheads = branchpart.split(' ', 1)
234 327 branchname = encoding.tolocal(urlreq.unquote(branchname))
235 328 branchheads = wireprototypes.decodelist(branchheads)
236 329 branchmap[branchname] = branchheads
237 330 yield branchmap
238 331 except TypeError:
239 332 self._abort(error.ResponseError(_("unexpected response:"), d))
240 333
241 334 @batchable
242 335 def listkeys(self, namespace):
243 336 if not self.capable('pushkey'):
244 337 yield {}, None
245 338 f = future()
246 339 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
247 340 yield {'namespace': encoding.fromlocal(namespace)}, f
248 341 d = f.value
249 342 self.ui.debug('received listkey for "%s": %i bytes\n'
250 343 % (namespace, len(d)))
251 344 yield pushkeymod.decodekeys(d)
252 345
253 346 @batchable
254 347 def pushkey(self, namespace, key, old, new):
255 348 if not self.capable('pushkey'):
256 349 yield False, None
257 350 f = future()
258 351 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
259 352 yield {'namespace': encoding.fromlocal(namespace),
260 353 'key': encoding.fromlocal(key),
261 354 'old': encoding.fromlocal(old),
262 355 'new': encoding.fromlocal(new)}, f
263 356 d = f.value
264 357 d, output = d.split('\n', 1)
265 358 try:
266 359 d = bool(int(d))
267 360 except ValueError:
268 361 raise error.ResponseError(
269 362 _('push failed (unexpected response):'), d)
270 363 for l in output.splitlines(True):
271 364 self.ui.status(_('remote: '), l)
272 365 yield d
273 366
274 367 def stream_out(self):
275 368 return self._callstream('stream_out')
276 369
277 370 def getbundle(self, source, **kwargs):
278 371 kwargs = pycompat.byteskwargs(kwargs)
279 372 self.requirecap('getbundle', _('look up remote changes'))
280 373 opts = {}
281 374 bundlecaps = kwargs.get('bundlecaps') or set()
282 375 for key, value in kwargs.iteritems():
283 376 if value is None:
284 377 continue
285 378 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
286 379 if keytype is None:
287 380 raise error.ProgrammingError(
288 381 'Unexpectedly None keytype for key %s' % key)
289 382 elif keytype == 'nodes':
290 383 value = wireprototypes.encodelist(value)
291 384 elif keytype == 'csv':
292 385 value = ','.join(value)
293 386 elif keytype == 'scsv':
294 387 value = ','.join(sorted(value))
295 388 elif keytype == 'boolean':
296 389 value = '%i' % bool(value)
297 390 elif keytype != 'plain':
298 391 raise KeyError('unknown getbundle option type %s'
299 392 % keytype)
300 393 opts[key] = value
301 394 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
302 395 if any((cap.startswith('HG2') for cap in bundlecaps)):
303 396 return bundle2.getunbundler(self.ui, f)
304 397 else:
305 398 return changegroupmod.cg1unpacker(f, 'UN')
306 399
307 400 def unbundle(self, cg, heads, url):
308 401 '''Send cg (a readable file-like object representing the
309 402 changegroup to push, typically a chunkbuffer object) to the
310 403 remote server as a bundle.
311 404
312 405 When pushing a bundle10 stream, return an integer indicating the
313 406 result of the push (see changegroup.apply()).
314 407
315 408 When pushing a bundle20 stream, return a bundle20 stream.
316 409
317 410 `url` is the url the client thinks it's pushing to, which is
318 411 visible to hooks.
319 412 '''
320 413
321 414 if heads != ['force'] and self.capable('unbundlehash'):
322 415 heads = wireprototypes.encodelist(
323 416 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
324 417 else:
325 418 heads = wireprototypes.encodelist(heads)
326 419
327 420 if util.safehasattr(cg, 'deltaheader'):
328 421 # this a bundle10, do the old style call sequence
329 422 ret, output = self._callpush("unbundle", cg, heads=heads)
330 423 if ret == "":
331 424 raise error.ResponseError(
332 425 _('push failed:'), output)
333 426 try:
334 427 ret = int(ret)
335 428 except ValueError:
336 429 raise error.ResponseError(
337 430 _('push failed (unexpected response):'), ret)
338 431
339 432 for l in output.splitlines(True):
340 433 self.ui.status(_('remote: '), l)
341 434 else:
342 435 # bundle2 push. Send a stream, fetch a stream.
343 436 stream = self._calltwowaystream('unbundle', cg, heads=heads)
344 437 ret = bundle2.getunbundler(self.ui, stream)
345 438 return ret
346 439
347 440 # End of ipeercommands interface.
348 441
349 442 # Begin of ipeerlegacycommands interface.
350 443
351 444 def branches(self, nodes):
352 445 n = wireprototypes.encodelist(nodes)
353 446 d = self._call("branches", nodes=n)
354 447 try:
355 448 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
356 449 return br
357 450 except ValueError:
358 451 self._abort(error.ResponseError(_("unexpected response:"), d))
359 452
360 453 def between(self, pairs):
361 454 batch = 8 # avoid giant requests
362 455 r = []
363 456 for i in xrange(0, len(pairs), batch):
364 457 n = " ".join([wireprototypes.encodelist(p, '-')
365 458 for p in pairs[i:i + batch]])
366 459 d = self._call("between", pairs=n)
367 460 try:
368 461 r.extend(l and wireprototypes.decodelist(l) or []
369 462 for l in d.splitlines())
370 463 except ValueError:
371 464 self._abort(error.ResponseError(_("unexpected response:"), d))
372 465 return r
373 466
374 467 def changegroup(self, nodes, kind):
375 468 n = wireprototypes.encodelist(nodes)
376 469 f = self._callcompressable("changegroup", roots=n)
377 470 return changegroupmod.cg1unpacker(f, 'UN')
378 471
379 472 def changegroupsubset(self, bases, heads, kind):
380 473 self.requirecap('changegroupsubset', _('look up remote changes'))
381 474 bases = wireprototypes.encodelist(bases)
382 475 heads = wireprototypes.encodelist(heads)
383 476 f = self._callcompressable("changegroupsubset",
384 477 bases=bases, heads=heads)
385 478 return changegroupmod.cg1unpacker(f, 'UN')
386 479
387 480 # End of ipeerlegacycommands interface.
388 481
389 482 def _submitbatch(self, req):
390 483 """run batch request <req> on the server
391 484
392 485 Returns an iterator of the raw responses from the server.
393 486 """
394 487 ui = self.ui
395 488 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
396 489 ui.debug('devel-peer-request: batched-content\n')
397 490 for op, args in req:
398 491 msg = 'devel-peer-request: - %s (%d arguments)\n'
399 492 ui.debug(msg % (op, len(args)))
400 493
401 494 unescapearg = wireprototypes.unescapebatcharg
402 495
403 496 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
404 497 chunk = rsp.read(1024)
405 498 work = [chunk]
406 499 while chunk:
407 500 while ';' not in chunk and chunk:
408 501 chunk = rsp.read(1024)
409 502 work.append(chunk)
410 503 merged = ''.join(work)
411 504 while ';' in merged:
412 505 one, merged = merged.split(';', 1)
413 506 yield unescapearg(one)
414 507 chunk = rsp.read(1024)
415 508 work = [merged, chunk]
416 509 yield unescapearg(''.join(work))
417 510
418 511 def _submitone(self, op, args):
419 512 return self._call(op, **pycompat.strkwargs(args))
420 513
421 514 def debugwireargs(self, one, two, three=None, four=None, five=None):
422 515 # don't pass optional arguments left at their default value
423 516 opts = {}
424 517 if three is not None:
425 518 opts[r'three'] = three
426 519 if four is not None:
427 520 opts[r'four'] = four
428 521 return self._call('debugwireargs', one=one, two=two, **opts)
429 522
430 523 def _call(self, cmd, **args):
431 524 """execute <cmd> on the server
432 525
433 526 The command is expected to return a simple string.
434 527
435 528 returns the server reply as a string."""
436 529 raise NotImplementedError()
437 530
438 531 def _callstream(self, cmd, **args):
439 532 """execute <cmd> on the server
440 533
441 534 The command is expected to return a stream. Note that if the
442 535 command doesn't return a stream, _callstream behaves
443 536 differently for ssh and http peers.
444 537
445 538 returns the server reply as a file like object.
446 539 """
447 540 raise NotImplementedError()
448 541
449 542 def _callcompressable(self, cmd, **args):
450 543 """execute <cmd> on the server
451 544
452 545 The command is expected to return a stream.
453 546
454 547 The stream may have been compressed in some implementations. This
455 548 function takes care of the decompression. This is the only difference
456 549 with _callstream.
457 550
458 551 returns the server reply as a file like object.
459 552 """
460 553 raise NotImplementedError()
461 554
462 555 def _callpush(self, cmd, fp, **args):
463 556 """execute a <cmd> on server
464 557
465 558 The command is expected to be related to a push. Push has a special
466 559 return method.
467 560
468 561 returns the server reply as a (ret, output) tuple. ret is either
469 562 empty (error) or a stringified int.
470 563 """
471 564 raise NotImplementedError()
472 565
473 566 def _calltwowaystream(self, cmd, fp, **args):
474 567 """execute <cmd> on server
475 568
476 569 The command will send a stream to the server and get a stream in reply.
477 570 """
478 571 raise NotImplementedError()
479 572
480 573 def _abort(self, exception):
481 574 """clearly abort the wire protocol connection and raise the exception
482 575 """
483 576 raise NotImplementedError()
@@ -1,154 +1,163 b''
1 1 # Test that certain objects conform to well-defined interfaces.
2 2
3 3 from __future__ import absolute_import, print_function
4 4
5 5 import os
6 6
7 7 from mercurial.thirdparty.zope import (
8 8 interface as zi,
9 9 )
10 10 from mercurial.thirdparty.zope.interface import (
11 11 verify as ziverify,
12 12 )
13 13 from mercurial import (
14 14 bundlerepo,
15 15 filelog,
16 16 httppeer,
17 17 localrepo,
18 18 repository,
19 19 sshpeer,
20 20 statichttprepo,
21 21 ui as uimod,
22 22 unionrepo,
23 23 vfs as vfsmod,
24 24 wireprotoserver,
25 25 wireprototypes,
26 wireprotov1peer,
26 27 wireprotov2server,
27 28 )
28 29
29 30 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
30 31
31 32 def checkzobject(o, allowextra=False):
32 33 """Verify an object with a zope interface."""
33 34 ifaces = zi.providedBy(o)
34 35 if not ifaces:
35 36 print('%r does not provide any zope interfaces' % o)
36 37 return
37 38
38 39 # Run zope.interface's built-in verification routine. This verifies that
39 40 # everything that is supposed to be present is present.
40 41 for iface in ifaces:
41 42 ziverify.verifyObject(iface, o)
42 43
43 44 if allowextra:
44 45 return
45 46
46 47 # Now verify that the object provides no extra public attributes that
47 48 # aren't declared as part of interfaces.
48 49 allowed = set()
49 50 for iface in ifaces:
50 51 allowed |= set(iface.names(all=True))
51 52
52 53 public = {a for a in dir(o) if not a.startswith('_')}
53 54
54 55 for attr in sorted(public - allowed):
55 56 print('public attribute not declared in interfaces: %s.%s' % (
56 57 o.__class__.__name__, attr))
57 58
58 59 # Facilitates testing localpeer.
59 60 class dummyrepo(object):
60 61 def __init__(self):
61 62 self.ui = uimod.ui()
62 63 def filtered(self, name):
63 64 pass
64 65 def _restrictcapabilities(self, caps):
65 66 pass
66 67
67 68 class dummyopener(object):
68 69 handlers = []
69 70
70 71 # Facilitates testing sshpeer without requiring a server.
71 72 class badpeer(httppeer.httppeer):
72 73 def __init__(self):
73 74 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
74 75 None)
75 76 self.badattribute = True
76 77
77 78 def badmethod(self):
78 79 pass
79 80
80 81 class dummypipe(object):
81 82 def close(self):
82 83 pass
83 84
84 85 def main():
85 86 ui = uimod.ui()
86 87 # Needed so we can open a local repo with obsstore without a warning.
87 88 ui.setconfig('experimental', 'evolution.createmarkers', True)
88 89
89 90 checkzobject(badpeer())
90 91
91 92 ziverify.verifyClass(repository.ipeerbaselegacycommands,
92 93 httppeer.httppeer)
93 94 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
94 95
95 96 ziverify.verifyClass(repository.ipeerconnection,
96 97 httppeer.httpv2peer)
97 98 ziverify.verifyClass(repository.ipeercapabilities,
98 99 httppeer.httpv2peer)
99 100 checkzobject(httppeer.httpv2peer(None, '', None, None, None, None))
100 101
101 102 ziverify.verifyClass(repository.ipeerbase,
102 103 localrepo.localpeer)
103 104 checkzobject(localrepo.localpeer(dummyrepo()))
104 105
106 ziverify.verifyClass(repository.ipeercommandexecutor,
107 localrepo.localcommandexecutor)
108 checkzobject(localrepo.localcommandexecutor(None))
109
110 ziverify.verifyClass(repository.ipeercommandexecutor,
111 wireprotov1peer.peerexecutor)
112 checkzobject(wireprotov1peer.peerexecutor(None))
113
105 114 ziverify.verifyClass(repository.ipeerbaselegacycommands,
106 115 sshpeer.sshv1peer)
107 116 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
108 117 dummypipe(), None, None))
109 118
110 119 ziverify.verifyClass(repository.ipeerbaselegacycommands,
111 120 sshpeer.sshv2peer)
112 121 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
113 122 dummypipe(), None, None))
114 123
115 124 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
116 125 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
117 126
118 127 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
119 128 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
120 129
121 130 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
122 131 checkzobject(unionrepo.unionpeer(dummyrepo()))
123 132
124 133 ziverify.verifyClass(repository.completelocalrepository,
125 134 localrepo.localrepository)
126 135 repo = localrepo.localrepository(ui, rootdir)
127 136 checkzobject(repo)
128 137
129 138 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
130 139 wireprotoserver.sshv1protocolhandler)
131 140 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
132 141 wireprotoserver.sshv2protocolhandler)
133 142 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
134 143 wireprotoserver.httpv1protocolhandler)
135 144 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
136 145 wireprotov2server.httpv2protocolhandler)
137 146
138 147 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
139 148 checkzobject(sshv1)
140 149 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
141 150 checkzobject(sshv2)
142 151
143 152 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
144 153 checkzobject(httpv1)
145 154 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
146 155 checkzobject(httpv2)
147 156
148 157 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
149 158
150 159 vfs = vfsmod.vfs('.')
151 160 fl = filelog.filelog(vfs, 'dummy.i')
152 161 checkzobject(fl, allowextra=True)
153 162
154 163 main()
General Comments 0
You need to be logged in to leave comments. Login now