##// END OF EJS Templates
localrepo: better error when a repo exists but we lack permissions...
Valentin Gatien-Baron -
r39022:ac0a8716 default
parent child Browse files
Show More
@@ -1,2405 +1,2410
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 release = lockmod.release
74 74 urlerr = util.urlerr
75 75 urlreq = util.urlreq
76 76
77 77 # set of (path, vfs-location) tuples. vfs-location is:
78 78 # - 'plain for vfs relative paths
79 79 # - '' for svfs relative paths
80 80 _cachedfiles = set()
81 81
82 82 class _basefilecache(scmutil.filecache):
83 83 """All filecache usage on repo are done for logic that should be unfiltered
84 84 """
85 85 def __get__(self, repo, type=None):
86 86 if repo is None:
87 87 return self
88 88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 89 def __set__(self, repo, value):
90 90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 91 def __delete__(self, repo):
92 92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93 93
94 94 class repofilecache(_basefilecache):
95 95 """filecache for files in .hg but outside of .hg/store"""
96 96 def __init__(self, *paths):
97 97 super(repofilecache, self).__init__(*paths)
98 98 for path in paths:
99 99 _cachedfiles.add((path, 'plain'))
100 100
101 101 def join(self, obj, fname):
102 102 return obj.vfs.join(fname)
103 103
104 104 class storecache(_basefilecache):
105 105 """filecache for files in the store"""
106 106 def __init__(self, *paths):
107 107 super(storecache, self).__init__(*paths)
108 108 for path in paths:
109 109 _cachedfiles.add((path, ''))
110 110
111 111 def join(self, obj, fname):
112 112 return obj.sjoin(fname)
113 113
114 114 def isfilecached(repo, name):
115 115 """check if a repo has already cached "name" filecache-ed property
116 116
117 117 This returns (cachedobj-or-None, iscached) tuple.
118 118 """
119 119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 120 if not cacheentry:
121 121 return None, False
122 122 return cacheentry.obj, True
123 123
124 124 class unfilteredpropertycache(util.propertycache):
125 125 """propertycache that apply to unfiltered repo only"""
126 126
127 127 def __get__(self, repo, type=None):
128 128 unfi = repo.unfiltered()
129 129 if unfi is repo:
130 130 return super(unfilteredpropertycache, self).__get__(unfi)
131 131 return getattr(unfi, self.name)
132 132
133 133 class filteredpropertycache(util.propertycache):
134 134 """propertycache that must take filtering in account"""
135 135
136 136 def cachevalue(self, obj, value):
137 137 object.__setattr__(obj, self.name, value)
138 138
139 139
140 140 def hasunfilteredcache(repo, name):
141 141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 142 return name in vars(repo.unfiltered())
143 143
144 144 def unfilteredmethod(orig):
145 145 """decorate method that always need to be run on unfiltered version"""
146 146 def wrapper(repo, *args, **kwargs):
147 147 return orig(repo.unfiltered(), *args, **kwargs)
148 148 return wrapper
149 149
150 150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 151 'unbundle'}
152 152 legacycaps = moderncaps.union({'changegroupsubset'})
153 153
154 154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 155 class localcommandexecutor(object):
156 156 def __init__(self, peer):
157 157 self._peer = peer
158 158 self._sent = False
159 159 self._closed = False
160 160
161 161 def __enter__(self):
162 162 return self
163 163
164 164 def __exit__(self, exctype, excvalue, exctb):
165 165 self.close()
166 166
167 167 def callcommand(self, command, args):
168 168 if self._sent:
169 169 raise error.ProgrammingError('callcommand() cannot be used after '
170 170 'sendcommands()')
171 171
172 172 if self._closed:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'close()')
175 175
176 176 # We don't need to support anything fancy. Just call the named
177 177 # method on the peer and return a resolved future.
178 178 fn = getattr(self._peer, pycompat.sysstr(command))
179 179
180 180 f = pycompat.futures.Future()
181 181
182 182 try:
183 183 result = fn(**pycompat.strkwargs(args))
184 184 except Exception:
185 185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 186 else:
187 187 f.set_result(result)
188 188
189 189 return f
190 190
191 191 def sendcommands(self):
192 192 self._sent = True
193 193
194 194 def close(self):
195 195 self._closed = True
196 196
197 197 @interfaceutil.implementer(repository.ipeercommands)
198 198 class localpeer(repository.peer):
199 199 '''peer for a local repo; reflects only the most recent API'''
200 200
201 201 def __init__(self, repo, caps=None):
202 202 super(localpeer, self).__init__()
203 203
204 204 if caps is None:
205 205 caps = moderncaps.copy()
206 206 self._repo = repo.filtered('served')
207 207 self.ui = repo.ui
208 208 self._caps = repo._restrictcapabilities(caps)
209 209
210 210 # Begin of _basepeer interface.
211 211
212 212 def url(self):
213 213 return self._repo.url()
214 214
215 215 def local(self):
216 216 return self._repo
217 217
218 218 def peer(self):
219 219 return self
220 220
221 221 def canpush(self):
222 222 return True
223 223
224 224 def close(self):
225 225 self._repo.close()
226 226
227 227 # End of _basepeer interface.
228 228
229 229 # Begin of _basewirecommands interface.
230 230
231 231 def branchmap(self):
232 232 return self._repo.branchmap()
233 233
234 234 def capabilities(self):
235 235 return self._caps
236 236
237 237 def clonebundles(self):
238 238 return self._repo.tryread('clonebundles.manifest')
239 239
240 240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 241 """Used to test argument passing over the wire"""
242 242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 243 pycompat.bytestr(four),
244 244 pycompat.bytestr(five))
245 245
246 246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 247 **kwargs):
248 248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 249 common=common, bundlecaps=bundlecaps,
250 250 **kwargs)[1]
251 251 cb = util.chunkbuffer(chunks)
252 252
253 253 if exchange.bundle2requested(bundlecaps):
254 254 # When requesting a bundle2, getbundle returns a stream to make the
255 255 # wire level function happier. We need to build a proper object
256 256 # from it in local peer.
257 257 return bundle2.getunbundler(self.ui, cb)
258 258 else:
259 259 return changegroup.getunbundler('01', cb, None)
260 260
261 261 def heads(self):
262 262 return self._repo.heads()
263 263
264 264 def known(self, nodes):
265 265 return self._repo.known(nodes)
266 266
267 267 def listkeys(self, namespace):
268 268 return self._repo.listkeys(namespace)
269 269
270 270 def lookup(self, key):
271 271 return self._repo.lookup(key)
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 return self._repo.pushkey(namespace, key, old, new)
275 275
276 276 def stream_out(self):
277 277 raise error.Abort(_('cannot perform stream clone against local '
278 278 'peer'))
279 279
280 280 def unbundle(self, bundle, heads, url):
281 281 """apply a bundle on a repo
282 282
283 283 This function handles the repo locking itself."""
284 284 try:
285 285 try:
286 286 bundle = exchange.readbundle(self.ui, bundle, None)
287 287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 288 if util.safehasattr(ret, 'getchunks'):
289 289 # This is a bundle20 object, turn it into an unbundler.
290 290 # This little dance should be dropped eventually when the
291 291 # API is finally improved.
292 292 stream = util.chunkbuffer(ret.getchunks())
293 293 ret = bundle2.getunbundler(self.ui, stream)
294 294 return ret
295 295 except Exception as exc:
296 296 # If the exception contains output salvaged from a bundle2
297 297 # reply, we need to make sure it is printed before continuing
298 298 # to fail. So we build a bundle2 with such output and consume
299 299 # it directly.
300 300 #
301 301 # This is not very elegant but allows a "simple" solution for
302 302 # issue4594
303 303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 304 if output:
305 305 bundler = bundle2.bundle20(self._repo.ui)
306 306 for out in output:
307 307 bundler.addpart(out)
308 308 stream = util.chunkbuffer(bundler.getchunks())
309 309 b = bundle2.getunbundler(self.ui, stream)
310 310 bundle2.processbundle(self._repo, b)
311 311 raise
312 312 except error.PushRaced as exc:
313 313 raise error.ResponseError(_('push failed:'),
314 314 stringutil.forcebytestr(exc))
315 315
316 316 # End of _basewirecommands interface.
317 317
318 318 # Begin of peer interface.
319 319
320 320 def commandexecutor(self):
321 321 return localcommandexecutor(self)
322 322
323 323 # End of peer interface.
324 324
325 325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 326 class locallegacypeer(localpeer):
327 327 '''peer extension which implements legacy methods too; used for tests with
328 328 restricted capabilities'''
329 329
330 330 def __init__(self, repo):
331 331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 332
333 333 # Begin of baselegacywirecommands interface.
334 334
335 335 def between(self, pairs):
336 336 return self._repo.between(pairs)
337 337
338 338 def branches(self, nodes):
339 339 return self._repo.branches(nodes)
340 340
341 341 def changegroup(self, nodes, source):
342 342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 343 missingheads=self._repo.heads())
344 344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 345
346 346 def changegroupsubset(self, bases, heads, source):
347 347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 348 missingheads=heads)
349 349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 350
351 351 # End of baselegacywirecommands interface.
352 352
353 353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 354 # clients.
355 355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 356
357 357 # A repository with the sparserevlog feature will have delta chains that
358 358 # can spread over a larger span. Sparse reading cuts these large spans into
359 359 # pieces, so that each piece isn't too big.
360 360 # Without the sparserevlog capability, reading from the repository could use
361 361 # huge amounts of memory, because the whole span would be read at once,
362 362 # including all the intermediate revisions that aren't pertinent for the chain.
363 363 # This is why once a repository has enabled sparse-read, it becomes required.
364 364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365 365
366 366 # Functions receiving (ui, features) that extensions can register to impact
367 367 # the ability to load repositories with custom requirements. Only
368 368 # functions defined in loaded extensions are called.
369 369 #
370 370 # The function receives a set of requirement strings that the repository
371 371 # is capable of opening. Functions will typically add elements to the
372 372 # set to reflect that the extension knows how to handle that requirements.
373 373 featuresetupfuncs = set()
374 374
375 375 @interfaceutil.implementer(repository.completelocalrepository)
376 376 class localrepository(object):
377 377
378 378 # obsolete experimental requirements:
379 379 # - manifestv2: An experimental new manifest format that allowed
380 380 # for stem compression of long paths. Experiment ended up not
381 381 # being successful (repository sizes went up due to worse delta
382 382 # chains), and the code was deleted in 4.6.
383 383 supportedformats = {
384 384 'revlogv1',
385 385 'generaldelta',
386 386 'treemanifest',
387 387 REVLOGV2_REQUIREMENT,
388 388 SPARSEREVLOG_REQUIREMENT,
389 389 }
390 390 _basesupported = supportedformats | {
391 391 'store',
392 392 'fncache',
393 393 'shared',
394 394 'relshared',
395 395 'dotencode',
396 396 'exp-sparse',
397 397 }
398 398 openerreqs = {
399 399 'revlogv1',
400 400 'generaldelta',
401 401 'treemanifest',
402 402 }
403 403
404 404 # list of prefix for file which can be written without 'wlock'
405 405 # Extensions should extend this list when needed
406 406 _wlockfreeprefix = {
407 407 # We migh consider requiring 'wlock' for the next
408 408 # two, but pretty much all the existing code assume
409 409 # wlock is not needed so we keep them excluded for
410 410 # now.
411 411 'hgrc',
412 412 'requires',
413 413 # XXX cache is a complicatged business someone
414 414 # should investigate this in depth at some point
415 415 'cache/',
416 416 # XXX shouldn't be dirstate covered by the wlock?
417 417 'dirstate',
418 418 # XXX bisect was still a bit too messy at the time
419 419 # this changeset was introduced. Someone should fix
420 420 # the remainig bit and drop this line
421 421 'bisect.state',
422 422 }
423 423
424 424 def __init__(self, baseui, path, create=False, intents=None):
425 425 self.requirements = set()
426 426 self.filtername = None
427 427 # wvfs: rooted at the repository root, used to access the working copy
428 428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 430 self.vfs = None
431 431 # svfs: usually rooted at .hg/store, used to access repository history
432 432 # If this is a shared repository, this vfs may point to another
433 433 # repository's .hg/store directory.
434 434 self.svfs = None
435 435 self.root = self.wvfs.base
436 436 self.path = self.wvfs.join(".hg")
437 437 self.origroot = path
438 438 # This is only used by context.workingctx.match in order to
439 439 # detect files in subrepos.
440 440 self.auditor = pathutil.pathauditor(
441 441 self.root, callback=self._checknested)
442 442 # This is only used by context.basectx.match in order to detect
443 443 # files in subrepos.
444 444 self.nofsauditor = pathutil.pathauditor(
445 445 self.root, callback=self._checknested, realfs=False, cached=True)
446 446 self.baseui = baseui
447 447 self.ui = baseui.copy()
448 448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 450 if (self.ui.configbool('devel', 'all-warnings') or
451 451 self.ui.configbool('devel', 'check-locks')):
452 452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 453 # A list of callback to shape the phase if no data were found.
454 454 # Callback are in the form: func(repo, roots) --> processed root.
455 455 # This list it to be filled by extension during repo setup
456 456 self._phasedefaults = []
457 457 try:
458 458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 459 self._loadextensions()
460 460 except IOError:
461 461 pass
462 462
463 463 if featuresetupfuncs:
464 464 self.supported = set(self._basesupported) # use private copy
465 465 extmods = set(m.__name__ for n, m
466 466 in extensions.extensions(self.ui))
467 467 for setupfunc in featuresetupfuncs:
468 468 if setupfunc.__module__ in extmods:
469 469 setupfunc(self.ui, self.supported)
470 470 else:
471 471 self.supported = self._basesupported
472 472 color.setup(self.ui)
473 473
474 474 # Add compression engines.
475 475 for name in util.compengines:
476 476 engine = util.compengines[name]
477 477 if engine.revlogheader():
478 478 self.supported.add('exp-compression-%s' % name)
479 479
480 480 if not self.vfs.isdir():
481 481 if create:
482 482 self.requirements = newreporequirements(self)
483 483
484 484 if not self.wvfs.exists():
485 485 self.wvfs.makedirs()
486 486 self.vfs.makedir(notindexed=True)
487 487
488 488 if 'store' in self.requirements:
489 489 self.vfs.mkdir("store")
490 490
491 491 # create an invalid changelog
492 492 self.vfs.append(
493 493 "00changelog.i",
494 494 '\0\0\0\2' # represents revlogv2
495 495 ' dummy changelog to prevent using the old repo layout'
496 496 )
497 497 else:
498 try:
499 self.vfs.stat()
500 except OSError as inst:
501 if inst.errno != errno.ENOENT:
502 raise
498 503 raise error.RepoError(_("repository %s not found") % path)
499 504 elif create:
500 505 raise error.RepoError(_("repository %s already exists") % path)
501 506 else:
502 507 try:
503 508 self.requirements = scmutil.readrequires(
504 509 self.vfs, self.supported)
505 510 except IOError as inst:
506 511 if inst.errno != errno.ENOENT:
507 512 raise
508 513
509 514 cachepath = self.vfs.join('cache')
510 515 self.sharedpath = self.path
511 516 try:
512 517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 518 if 'relshared' in self.requirements:
514 519 sharedpath = self.vfs.join(sharedpath)
515 520 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 521 cachepath = vfs.join('cache')
517 522 s = vfs.base
518 523 if not vfs.exists():
519 524 raise error.RepoError(
520 525 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 526 self.sharedpath = s
522 527 except IOError as inst:
523 528 if inst.errno != errno.ENOENT:
524 529 raise
525 530
526 531 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 532 raise error.RepoError(_('repository is using sparse feature but '
528 533 'sparse is not enabled; enable the '
529 534 '"sparse" extensions to access'))
530 535
531 536 self.store = store.store(
532 537 self.requirements, self.sharedpath,
533 538 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 539 self.spath = self.store.path
535 540 self.svfs = self.store.vfs
536 541 self.sjoin = self.store.join
537 542 self.vfs.createmode = self.store.createmode
538 543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 544 self.cachevfs.createmode = self.store.createmode
540 545 if (self.ui.configbool('devel', 'all-warnings') or
541 546 self.ui.configbool('devel', 'check-locks')):
542 547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 549 else: # standard vfs
545 550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 551 self._applyopenerreqs()
547 552 if create:
548 553 self._writerequirements()
549 554
550 555 self._dirstatevalidatewarned = False
551 556
552 557 self._branchcaches = {}
553 558 self._revbranchcache = None
554 559 self._filterpats = {}
555 560 self._datafilters = {}
556 561 self._transref = self._lockref = self._wlockref = None
557 562
558 563 # A cache for various files under .hg/ that tracks file changes,
559 564 # (used by the filecache decorator)
560 565 #
561 566 # Maps a property name to its util.filecacheentry
562 567 self._filecache = {}
563 568
564 569 # hold sets of revision to be filtered
565 570 # should be cleared when something might have changed the filter value:
566 571 # - new changesets,
567 572 # - phase change,
568 573 # - new obsolescence marker,
569 574 # - working directory parent change,
570 575 # - bookmark changes
571 576 self.filteredrevcache = {}
572 577
573 578 # post-dirstate-status hooks
574 579 self._postdsstatus = []
575 580
576 581 # generic mapping between names and nodes
577 582 self.names = namespaces.namespaces()
578 583
579 584 # Key to signature value.
580 585 self._sparsesignaturecache = {}
581 586 # Signature to cached matcher instance.
582 587 self._sparsematchercache = {}
583 588
584 589 def _getvfsward(self, origfunc):
585 590 """build a ward for self.vfs"""
586 591 rref = weakref.ref(self)
587 592 def checkvfs(path, mode=None):
588 593 ret = origfunc(path, mode=mode)
589 594 repo = rref()
590 595 if (repo is None
591 596 or not util.safehasattr(repo, '_wlockref')
592 597 or not util.safehasattr(repo, '_lockref')):
593 598 return
594 599 if mode in (None, 'r', 'rb'):
595 600 return
596 601 if path.startswith(repo.path):
597 602 # truncate name relative to the repository (.hg)
598 603 path = path[len(repo.path) + 1:]
599 604 if path.startswith('cache/'):
600 605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 607 if path.startswith('journal.'):
603 608 # journal is covered by 'lock'
604 609 if repo._currentlock(repo._lockref) is None:
605 610 repo.ui.develwarn('write with no lock: "%s"' % path,
606 611 stacklevel=2, config='check-locks')
607 612 elif repo._currentlock(repo._wlockref) is None:
608 613 # rest of vfs files are covered by 'wlock'
609 614 #
610 615 # exclude special files
611 616 for prefix in self._wlockfreeprefix:
612 617 if path.startswith(prefix):
613 618 return
614 619 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 620 stacklevel=2, config='check-locks')
616 621 return ret
617 622 return checkvfs
618 623
619 624 def _getsvfsward(self, origfunc):
620 625 """build a ward for self.svfs"""
621 626 rref = weakref.ref(self)
622 627 def checksvfs(path, mode=None):
623 628 ret = origfunc(path, mode=mode)
624 629 repo = rref()
625 630 if repo is None or not util.safehasattr(repo, '_lockref'):
626 631 return
627 632 if mode in (None, 'r', 'rb'):
628 633 return
629 634 if path.startswith(repo.sharedpath):
630 635 # truncate name relative to the repository (.hg)
631 636 path = path[len(repo.sharedpath) + 1:]
632 637 if repo._currentlock(repo._lockref) is None:
633 638 repo.ui.develwarn('write with no lock: "%s"' % path,
634 639 stacklevel=3)
635 640 return ret
636 641 return checksvfs
637 642
638 643 def close(self):
639 644 self._writecaches()
640 645
641 646 def _loadextensions(self):
642 647 extensions.loadall(self.ui)
643 648
644 649 def _writecaches(self):
645 650 if self._revbranchcache:
646 651 self._revbranchcache.write()
647 652
648 653 def _restrictcapabilities(self, caps):
649 654 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 655 caps = set(caps)
651 656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 657 role='client'))
653 658 caps.add('bundle2=' + urlreq.quote(capsblob))
654 659 return caps
655 660
656 661 def _applyopenerreqs(self):
657 662 self.svfs.options = dict((r, 1) for r in self.requirements
658 663 if r in self.openerreqs)
659 664 # experimental config: format.chunkcachesize
660 665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 666 if chunkcachesize is not None:
662 667 self.svfs.options['chunkcachesize'] = chunkcachesize
663 668 # experimental config: format.maxchainlen
664 669 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 670 if maxchainlen is not None:
666 671 self.svfs.options['maxchainlen'] = maxchainlen
667 672 # experimental config: format.manifestcachesize
668 673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 674 if manifestcachesize is not None:
670 675 self.svfs.options['manifestcachesize'] = manifestcachesize
671 676 deltabothparents = self.ui.configbool('storage',
672 677 'revlog.optimize-delta-parent-choice')
673 678 self.svfs.options['deltabothparents'] = deltabothparents
674 679 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 680 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 681 if 0 <= chainspan:
677 682 self.svfs.options['maxdeltachainspan'] = chainspan
678 683 mmapindexthreshold = self.ui.configbytes('experimental',
679 684 'mmapindexthreshold')
680 685 if mmapindexthreshold is not None:
681 686 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 687 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 688 srdensitythres = float(self.ui.config('experimental',
684 689 'sparse-read.density-threshold'))
685 690 srmingapsize = self.ui.configbytes('experimental',
686 691 'sparse-read.min-gap-size')
687 692 self.svfs.options['with-sparse-read'] = withsparseread
688 693 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 694 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 695 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 696 self.svfs.options['sparse-revlog'] = sparserevlog
692 697 if sparserevlog:
693 698 self.svfs.options['generaldelta'] = True
694 699
695 700 for r in self.requirements:
696 701 if r.startswith('exp-compression-'):
697 702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
698 703
699 704 # TODO move "revlogv2" to openerreqs once finalized.
700 705 if REVLOGV2_REQUIREMENT in self.requirements:
701 706 self.svfs.options['revlogv2'] = True
702 707
703 708 def _writerequirements(self):
704 709 scmutil.writerequires(self.vfs, self.requirements)
705 710
706 711 def _checknested(self, path):
707 712 """Determine if path is a legal nested repository."""
708 713 if not path.startswith(self.root):
709 714 return False
710 715 subpath = path[len(self.root) + 1:]
711 716 normsubpath = util.pconvert(subpath)
712 717
713 718 # XXX: Checking against the current working copy is wrong in
714 719 # the sense that it can reject things like
715 720 #
716 721 # $ hg cat -r 10 sub/x.txt
717 722 #
718 723 # if sub/ is no longer a subrepository in the working copy
719 724 # parent revision.
720 725 #
721 726 # However, it can of course also allow things that would have
722 727 # been rejected before, such as the above cat command if sub/
723 728 # is a subrepository now, but was a normal directory before.
724 729 # The old path auditor would have rejected by mistake since it
725 730 # panics when it sees sub/.hg/.
726 731 #
727 732 # All in all, checking against the working copy seems sensible
728 733 # since we want to prevent access to nested repositories on
729 734 # the filesystem *now*.
730 735 ctx = self[None]
731 736 parts = util.splitpath(subpath)
732 737 while parts:
733 738 prefix = '/'.join(parts)
734 739 if prefix in ctx.substate:
735 740 if prefix == normsubpath:
736 741 return True
737 742 else:
738 743 sub = ctx.sub(prefix)
739 744 return sub.checknested(subpath[len(prefix) + 1:])
740 745 else:
741 746 parts.pop()
742 747 return False
743 748
744 749 def peer(self):
745 750 return localpeer(self) # not cached to avoid reference cycle
746 751
747 752 def unfiltered(self):
748 753 """Return unfiltered version of the repository
749 754
750 755 Intended to be overwritten by filtered repo."""
751 756 return self
752 757
753 758 def filtered(self, name, visibilityexceptions=None):
754 759 """Return a filtered version of a repository"""
755 760 cls = repoview.newtype(self.unfiltered().__class__)
756 761 return cls(self, name, visibilityexceptions)
757 762
758 763 @repofilecache('bookmarks', 'bookmarks.current')
759 764 def _bookmarks(self):
760 765 return bookmarks.bmstore(self)
761 766
762 767 @property
763 768 def _activebookmark(self):
764 769 return self._bookmarks.active
765 770
766 771 # _phasesets depend on changelog. what we need is to call
767 772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
768 773 # can't be easily expressed in filecache mechanism.
769 774 @storecache('phaseroots', '00changelog.i')
770 775 def _phasecache(self):
771 776 return phases.phasecache(self, self._phasedefaults)
772 777
773 778 @storecache('obsstore')
774 779 def obsstore(self):
775 780 return obsolete.makestore(self.ui, self)
776 781
777 782 @storecache('00changelog.i')
778 783 def changelog(self):
779 784 return changelog.changelog(self.svfs,
780 785 trypending=txnutil.mayhavepending(self.root))
781 786
782 787 def _constructmanifest(self):
783 788 # This is a temporary function while we migrate from manifest to
784 789 # manifestlog. It allows bundlerepo and unionrepo to intercept the
785 790 # manifest creation.
786 791 return manifest.manifestrevlog(self.svfs)
787 792
788 793 @storecache('00manifest.i')
789 794 def manifestlog(self):
790 795 return manifest.manifestlog(self.svfs, self)
791 796
792 797 @repofilecache('dirstate')
793 798 def dirstate(self):
794 799 return self._makedirstate()
795 800
796 801 def _makedirstate(self):
797 802 """Extension point for wrapping the dirstate per-repo."""
798 803 sparsematchfn = lambda: sparse.matcher(self)
799 804
800 805 return dirstate.dirstate(self.vfs, self.ui, self.root,
801 806 self._dirstatevalidate, sparsematchfn)
802 807
803 808 def _dirstatevalidate(self, node):
804 809 try:
805 810 self.changelog.rev(node)
806 811 return node
807 812 except error.LookupError:
808 813 if not self._dirstatevalidatewarned:
809 814 self._dirstatevalidatewarned = True
810 815 self.ui.warn(_("warning: ignoring unknown"
811 816 " working parent %s!\n") % short(node))
812 817 return nullid
813 818
814 819 @storecache(narrowspec.FILENAME)
815 820 def narrowpats(self):
816 821 """matcher patterns for this repository's narrowspec
817 822
818 823 A tuple of (includes, excludes).
819 824 """
820 825 source = self
821 826 if self.shared():
822 827 from . import hg
823 828 source = hg.sharedreposource(self)
824 829 return narrowspec.load(source)
825 830
826 831 @storecache(narrowspec.FILENAME)
827 832 def _narrowmatch(self):
828 833 if repository.NARROW_REQUIREMENT not in self.requirements:
829 834 return matchmod.always(self.root, '')
830 835 include, exclude = self.narrowpats
831 836 return narrowspec.match(self.root, include=include, exclude=exclude)
832 837
833 838 # TODO(martinvonz): make this property-like instead?
834 839 def narrowmatch(self):
835 840 return self._narrowmatch
836 841
837 842 def setnarrowpats(self, newincludes, newexcludes):
838 843 target = self
839 844 if self.shared():
840 845 from . import hg
841 846 target = hg.sharedreposource(self)
842 847 narrowspec.save(target, newincludes, newexcludes)
843 848 self.invalidate(clearfilecache=True)
844 849
845 850 def __getitem__(self, changeid):
846 851 if changeid is None:
847 852 return context.workingctx(self)
848 853 if isinstance(changeid, context.basectx):
849 854 return changeid
850 855 if isinstance(changeid, slice):
851 856 # wdirrev isn't contiguous so the slice shouldn't include it
852 857 return [context.changectx(self, i)
853 858 for i in pycompat.xrange(*changeid.indices(len(self)))
854 859 if i not in self.changelog.filteredrevs]
855 860 try:
856 861 return context.changectx(self, changeid)
857 862 except error.WdirUnsupported:
858 863 return context.workingctx(self)
859 864
860 865 def __contains__(self, changeid):
861 866 """True if the given changeid exists
862 867
863 868 error.AmbiguousPrefixLookupError is raised if an ambiguous node
864 869 specified.
865 870 """
866 871 try:
867 872 self[changeid]
868 873 return True
869 874 except error.RepoLookupError:
870 875 return False
871 876
872 877 def __nonzero__(self):
873 878 return True
874 879
875 880 __bool__ = __nonzero__
876 881
877 882 def __len__(self):
878 883 # no need to pay the cost of repoview.changelog
879 884 unfi = self.unfiltered()
880 885 return len(unfi.changelog)
881 886
882 887 def __iter__(self):
883 888 return iter(self.changelog)
884 889
885 890 def revs(self, expr, *args):
886 891 '''Find revisions matching a revset.
887 892
888 893 The revset is specified as a string ``expr`` that may contain
889 894 %-formatting to escape certain types. See ``revsetlang.formatspec``.
890 895
891 896 Revset aliases from the configuration are not expanded. To expand
892 897 user aliases, consider calling ``scmutil.revrange()`` or
893 898 ``repo.anyrevs([expr], user=True)``.
894 899
895 900 Returns a revset.abstractsmartset, which is a list-like interface
896 901 that contains integer revisions.
897 902 '''
898 903 expr = revsetlang.formatspec(expr, *args)
899 904 m = revset.match(None, expr)
900 905 return m(self)
901 906
902 907 def set(self, expr, *args):
903 908 '''Find revisions matching a revset and emit changectx instances.
904 909
905 910 This is a convenience wrapper around ``revs()`` that iterates the
906 911 result and is a generator of changectx instances.
907 912
908 913 Revset aliases from the configuration are not expanded. To expand
909 914 user aliases, consider calling ``scmutil.revrange()``.
910 915 '''
911 916 for r in self.revs(expr, *args):
912 917 yield self[r]
913 918
914 919 def anyrevs(self, specs, user=False, localalias=None):
915 920 '''Find revisions matching one of the given revsets.
916 921
917 922 Revset aliases from the configuration are not expanded by default. To
918 923 expand user aliases, specify ``user=True``. To provide some local
919 924 definitions overriding user aliases, set ``localalias`` to
920 925 ``{name: definitionstring}``.
921 926 '''
922 927 if user:
923 928 m = revset.matchany(self.ui, specs,
924 929 lookup=revset.lookupfn(self),
925 930 localalias=localalias)
926 931 else:
927 932 m = revset.matchany(None, specs, localalias=localalias)
928 933 return m(self)
929 934
930 935 def url(self):
931 936 return 'file:' + self.root
932 937
933 938 def hook(self, name, throw=False, **args):
934 939 """Call a hook, passing this repo instance.
935 940
936 941 This a convenience method to aid invoking hooks. Extensions likely
937 942 won't call this unless they have registered a custom hook or are
938 943 replacing code that is expected to call a hook.
939 944 """
940 945 return hook.hook(self.ui, self, name, throw, **args)
941 946
942 947 @filteredpropertycache
943 948 def _tagscache(self):
944 949 '''Returns a tagscache object that contains various tags related
945 950 caches.'''
946 951
947 952 # This simplifies its cache management by having one decorated
948 953 # function (this one) and the rest simply fetch things from it.
949 954 class tagscache(object):
950 955 def __init__(self):
951 956 # These two define the set of tags for this repository. tags
952 957 # maps tag name to node; tagtypes maps tag name to 'global' or
953 958 # 'local'. (Global tags are defined by .hgtags across all
954 959 # heads, and local tags are defined in .hg/localtags.)
955 960 # They constitute the in-memory cache of tags.
956 961 self.tags = self.tagtypes = None
957 962
958 963 self.nodetagscache = self.tagslist = None
959 964
960 965 cache = tagscache()
961 966 cache.tags, cache.tagtypes = self._findtags()
962 967
963 968 return cache
964 969
965 970 def tags(self):
966 971 '''return a mapping of tag to node'''
967 972 t = {}
968 973 if self.changelog.filteredrevs:
969 974 tags, tt = self._findtags()
970 975 else:
971 976 tags = self._tagscache.tags
972 977 for k, v in tags.iteritems():
973 978 try:
974 979 # ignore tags to unknown nodes
975 980 self.changelog.rev(v)
976 981 t[k] = v
977 982 except (error.LookupError, ValueError):
978 983 pass
979 984 return t
980 985
981 986 def _findtags(self):
982 987 '''Do the hard work of finding tags. Return a pair of dicts
983 988 (tags, tagtypes) where tags maps tag name to node, and tagtypes
984 989 maps tag name to a string like \'global\' or \'local\'.
985 990 Subclasses or extensions are free to add their own tags, but
986 991 should be aware that the returned dicts will be retained for the
987 992 duration of the localrepo object.'''
988 993
989 994 # XXX what tagtype should subclasses/extensions use? Currently
990 995 # mq and bookmarks add tags, but do not set the tagtype at all.
991 996 # Should each extension invent its own tag type? Should there
992 997 # be one tagtype for all such "virtual" tags? Or is the status
993 998 # quo fine?
994 999
995 1000
996 1001 # map tag name to (node, hist)
997 1002 alltags = tagsmod.findglobaltags(self.ui, self)
998 1003 # map tag name to tag type
999 1004 tagtypes = dict((tag, 'global') for tag in alltags)
1000 1005
1001 1006 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1002 1007
1003 1008 # Build the return dicts. Have to re-encode tag names because
1004 1009 # the tags module always uses UTF-8 (in order not to lose info
1005 1010 # writing to the cache), but the rest of Mercurial wants them in
1006 1011 # local encoding.
1007 1012 tags = {}
1008 1013 for (name, (node, hist)) in alltags.iteritems():
1009 1014 if node != nullid:
1010 1015 tags[encoding.tolocal(name)] = node
1011 1016 tags['tip'] = self.changelog.tip()
1012 1017 tagtypes = dict([(encoding.tolocal(name), value)
1013 1018 for (name, value) in tagtypes.iteritems()])
1014 1019 return (tags, tagtypes)
1015 1020
1016 1021 def tagtype(self, tagname):
1017 1022 '''
1018 1023 return the type of the given tag. result can be:
1019 1024
1020 1025 'local' : a local tag
1021 1026 'global' : a global tag
1022 1027 None : tag does not exist
1023 1028 '''
1024 1029
1025 1030 return self._tagscache.tagtypes.get(tagname)
1026 1031
1027 1032 def tagslist(self):
1028 1033 '''return a list of tags ordered by revision'''
1029 1034 if not self._tagscache.tagslist:
1030 1035 l = []
1031 1036 for t, n in self.tags().iteritems():
1032 1037 l.append((self.changelog.rev(n), t, n))
1033 1038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1034 1039
1035 1040 return self._tagscache.tagslist
1036 1041
1037 1042 def nodetags(self, node):
1038 1043 '''return the tags associated with a node'''
1039 1044 if not self._tagscache.nodetagscache:
1040 1045 nodetagscache = {}
1041 1046 for t, n in self._tagscache.tags.iteritems():
1042 1047 nodetagscache.setdefault(n, []).append(t)
1043 1048 for tags in nodetagscache.itervalues():
1044 1049 tags.sort()
1045 1050 self._tagscache.nodetagscache = nodetagscache
1046 1051 return self._tagscache.nodetagscache.get(node, [])
1047 1052
1048 1053 def nodebookmarks(self, node):
1049 1054 """return the list of bookmarks pointing to the specified node"""
1050 1055 return self._bookmarks.names(node)
1051 1056
1052 1057 def branchmap(self):
1053 1058 '''returns a dictionary {branch: [branchheads]} with branchheads
1054 1059 ordered by increasing revision number'''
1055 1060 branchmap.updatecache(self)
1056 1061 return self._branchcaches[self.filtername]
1057 1062
1058 1063 @unfilteredmethod
1059 1064 def revbranchcache(self):
1060 1065 if not self._revbranchcache:
1061 1066 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1062 1067 return self._revbranchcache
1063 1068
1064 1069 def branchtip(self, branch, ignoremissing=False):
1065 1070 '''return the tip node for a given branch
1066 1071
1067 1072 If ignoremissing is True, then this method will not raise an error.
1068 1073 This is helpful for callers that only expect None for a missing branch
1069 1074 (e.g. namespace).
1070 1075
1071 1076 '''
1072 1077 try:
1073 1078 return self.branchmap().branchtip(branch)
1074 1079 except KeyError:
1075 1080 if not ignoremissing:
1076 1081 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1077 1082 else:
1078 1083 pass
1079 1084
1080 1085 def lookup(self, key):
1081 1086 return scmutil.revsymbol(self, key).node()
1082 1087
1083 1088 def lookupbranch(self, key):
1084 1089 if key in self.branchmap():
1085 1090 return key
1086 1091
1087 1092 return scmutil.revsymbol(self, key).branch()
1088 1093
1089 1094 def known(self, nodes):
1090 1095 cl = self.changelog
1091 1096 nm = cl.nodemap
1092 1097 filtered = cl.filteredrevs
1093 1098 result = []
1094 1099 for n in nodes:
1095 1100 r = nm.get(n)
1096 1101 resp = not (r is None or r in filtered)
1097 1102 result.append(resp)
1098 1103 return result
1099 1104
1100 1105 def local(self):
1101 1106 return self
1102 1107
1103 1108 def publishing(self):
1104 1109 # it's safe (and desirable) to trust the publish flag unconditionally
1105 1110 # so that we don't finalize changes shared between users via ssh or nfs
1106 1111 return self.ui.configbool('phases', 'publish', untrusted=True)
1107 1112
1108 1113 def cancopy(self):
1109 1114 # so statichttprepo's override of local() works
1110 1115 if not self.local():
1111 1116 return False
1112 1117 if not self.publishing():
1113 1118 return True
1114 1119 # if publishing we can't copy if there is filtered content
1115 1120 return not self.filtered('visible').changelog.filteredrevs
1116 1121
1117 1122 def shared(self):
1118 1123 '''the type of shared repository (None if not shared)'''
1119 1124 if self.sharedpath != self.path:
1120 1125 return 'store'
1121 1126 return None
1122 1127
1123 1128 def wjoin(self, f, *insidef):
1124 1129 return self.vfs.reljoin(self.root, f, *insidef)
1125 1130
1126 1131 def file(self, f):
1127 1132 if f[0] == '/':
1128 1133 f = f[1:]
1129 1134 return filelog.filelog(self.svfs, f)
1130 1135
1131 1136 def setparents(self, p1, p2=nullid):
1132 1137 with self.dirstate.parentchange():
1133 1138 copies = self.dirstate.setparents(p1, p2)
1134 1139 pctx = self[p1]
1135 1140 if copies:
1136 1141 # Adjust copy records, the dirstate cannot do it, it
1137 1142 # requires access to parents manifests. Preserve them
1138 1143 # only for entries added to first parent.
1139 1144 for f in copies:
1140 1145 if f not in pctx and copies[f] in pctx:
1141 1146 self.dirstate.copy(copies[f], f)
1142 1147 if p2 == nullid:
1143 1148 for f, s in sorted(self.dirstate.copies().items()):
1144 1149 if f not in pctx and s not in pctx:
1145 1150 self.dirstate.copy(None, f)
1146 1151
1147 1152 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1148 1153 """changeid can be a changeset revision, node, or tag.
1149 1154 fileid can be a file revision or node."""
1150 1155 return context.filectx(self, path, changeid, fileid,
1151 1156 changectx=changectx)
1152 1157
1153 1158 def getcwd(self):
1154 1159 return self.dirstate.getcwd()
1155 1160
1156 1161 def pathto(self, f, cwd=None):
1157 1162 return self.dirstate.pathto(f, cwd)
1158 1163
1159 1164 def _loadfilter(self, filter):
1160 1165 if filter not in self._filterpats:
1161 1166 l = []
1162 1167 for pat, cmd in self.ui.configitems(filter):
1163 1168 if cmd == '!':
1164 1169 continue
1165 1170 mf = matchmod.match(self.root, '', [pat])
1166 1171 fn = None
1167 1172 params = cmd
1168 1173 for name, filterfn in self._datafilters.iteritems():
1169 1174 if cmd.startswith(name):
1170 1175 fn = filterfn
1171 1176 params = cmd[len(name):].lstrip()
1172 1177 break
1173 1178 if not fn:
1174 1179 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1175 1180 # Wrap old filters not supporting keyword arguments
1176 1181 if not pycompat.getargspec(fn)[2]:
1177 1182 oldfn = fn
1178 1183 fn = lambda s, c, **kwargs: oldfn(s, c)
1179 1184 l.append((mf, fn, params))
1180 1185 self._filterpats[filter] = l
1181 1186 return self._filterpats[filter]
1182 1187
1183 1188 def _filter(self, filterpats, filename, data):
1184 1189 for mf, fn, cmd in filterpats:
1185 1190 if mf(filename):
1186 1191 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1187 1192 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1188 1193 break
1189 1194
1190 1195 return data
1191 1196
1192 1197 @unfilteredpropertycache
1193 1198 def _encodefilterpats(self):
1194 1199 return self._loadfilter('encode')
1195 1200
1196 1201 @unfilteredpropertycache
1197 1202 def _decodefilterpats(self):
1198 1203 return self._loadfilter('decode')
1199 1204
1200 1205 def adddatafilter(self, name, filter):
1201 1206 self._datafilters[name] = filter
1202 1207
1203 1208 def wread(self, filename):
1204 1209 if self.wvfs.islink(filename):
1205 1210 data = self.wvfs.readlink(filename)
1206 1211 else:
1207 1212 data = self.wvfs.read(filename)
1208 1213 return self._filter(self._encodefilterpats, filename, data)
1209 1214
1210 1215 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1211 1216 """write ``data`` into ``filename`` in the working directory
1212 1217
1213 1218 This returns length of written (maybe decoded) data.
1214 1219 """
1215 1220 data = self._filter(self._decodefilterpats, filename, data)
1216 1221 if 'l' in flags:
1217 1222 self.wvfs.symlink(data, filename)
1218 1223 else:
1219 1224 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1220 1225 **kwargs)
1221 1226 if 'x' in flags:
1222 1227 self.wvfs.setflags(filename, False, True)
1223 1228 else:
1224 1229 self.wvfs.setflags(filename, False, False)
1225 1230 return len(data)
1226 1231
1227 1232 def wwritedata(self, filename, data):
1228 1233 return self._filter(self._decodefilterpats, filename, data)
1229 1234
1230 1235 def currenttransaction(self):
1231 1236 """return the current transaction or None if non exists"""
1232 1237 if self._transref:
1233 1238 tr = self._transref()
1234 1239 else:
1235 1240 tr = None
1236 1241
1237 1242 if tr and tr.running():
1238 1243 return tr
1239 1244 return None
1240 1245
1241 1246 def transaction(self, desc, report=None):
1242 1247 if (self.ui.configbool('devel', 'all-warnings')
1243 1248 or self.ui.configbool('devel', 'check-locks')):
1244 1249 if self._currentlock(self._lockref) is None:
1245 1250 raise error.ProgrammingError('transaction requires locking')
1246 1251 tr = self.currenttransaction()
1247 1252 if tr is not None:
1248 1253 return tr.nest(name=desc)
1249 1254
1250 1255 # abort here if the journal already exists
1251 1256 if self.svfs.exists("journal"):
1252 1257 raise error.RepoError(
1253 1258 _("abandoned transaction found"),
1254 1259 hint=_("run 'hg recover' to clean up transaction"))
1255 1260
1256 1261 idbase = "%.40f#%f" % (random.random(), time.time())
1257 1262 ha = hex(hashlib.sha1(idbase).digest())
1258 1263 txnid = 'TXN:' + ha
1259 1264 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1260 1265
1261 1266 self._writejournal(desc)
1262 1267 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1263 1268 if report:
1264 1269 rp = report
1265 1270 else:
1266 1271 rp = self.ui.warn
1267 1272 vfsmap = {'plain': self.vfs} # root of .hg/
1268 1273 # we must avoid cyclic reference between repo and transaction.
1269 1274 reporef = weakref.ref(self)
1270 1275 # Code to track tag movement
1271 1276 #
1272 1277 # Since tags are all handled as file content, it is actually quite hard
1273 1278 # to track these movement from a code perspective. So we fallback to a
1274 1279 # tracking at the repository level. One could envision to track changes
1275 1280 # to the '.hgtags' file through changegroup apply but that fails to
1276 1281 # cope with case where transaction expose new heads without changegroup
1277 1282 # being involved (eg: phase movement).
1278 1283 #
1279 1284 # For now, We gate the feature behind a flag since this likely comes
1280 1285 # with performance impacts. The current code run more often than needed
1281 1286 # and do not use caches as much as it could. The current focus is on
1282 1287 # the behavior of the feature so we disable it by default. The flag
1283 1288 # will be removed when we are happy with the performance impact.
1284 1289 #
1285 1290 # Once this feature is no longer experimental move the following
1286 1291 # documentation to the appropriate help section:
1287 1292 #
1288 1293 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1289 1294 # tags (new or changed or deleted tags). In addition the details of
1290 1295 # these changes are made available in a file at:
1291 1296 # ``REPOROOT/.hg/changes/tags.changes``.
1292 1297 # Make sure you check for HG_TAG_MOVED before reading that file as it
1293 1298 # might exist from a previous transaction even if no tag were touched
1294 1299 # in this one. Changes are recorded in a line base format::
1295 1300 #
1296 1301 # <action> <hex-node> <tag-name>\n
1297 1302 #
1298 1303 # Actions are defined as follow:
1299 1304 # "-R": tag is removed,
1300 1305 # "+A": tag is added,
1301 1306 # "-M": tag is moved (old value),
1302 1307 # "+M": tag is moved (new value),
1303 1308 tracktags = lambda x: None
1304 1309 # experimental config: experimental.hook-track-tags
1305 1310 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1306 1311 if desc != 'strip' and shouldtracktags:
1307 1312 oldheads = self.changelog.headrevs()
1308 1313 def tracktags(tr2):
1309 1314 repo = reporef()
1310 1315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1311 1316 newheads = repo.changelog.headrevs()
1312 1317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1313 1318 # notes: we compare lists here.
1314 1319 # As we do it only once buiding set would not be cheaper
1315 1320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1316 1321 if changes:
1317 1322 tr2.hookargs['tag_moved'] = '1'
1318 1323 with repo.vfs('changes/tags.changes', 'w',
1319 1324 atomictemp=True) as changesfile:
1320 1325 # note: we do not register the file to the transaction
1321 1326 # because we needs it to still exist on the transaction
1322 1327 # is close (for txnclose hooks)
1323 1328 tagsmod.writediff(changesfile, changes)
1324 1329 def validate(tr2):
1325 1330 """will run pre-closing hooks"""
1326 1331 # XXX the transaction API is a bit lacking here so we take a hacky
1327 1332 # path for now
1328 1333 #
1329 1334 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1330 1335 # dict is copied before these run. In addition we needs the data
1331 1336 # available to in memory hooks too.
1332 1337 #
1333 1338 # Moreover, we also need to make sure this runs before txnclose
1334 1339 # hooks and there is no "pending" mechanism that would execute
1335 1340 # logic only if hooks are about to run.
1336 1341 #
1337 1342 # Fixing this limitation of the transaction is also needed to track
1338 1343 # other families of changes (bookmarks, phases, obsolescence).
1339 1344 #
1340 1345 # This will have to be fixed before we remove the experimental
1341 1346 # gating.
1342 1347 tracktags(tr2)
1343 1348 repo = reporef()
1344 1349 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1345 1350 scmutil.enforcesinglehead(repo, tr2, desc)
1346 1351 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1347 1352 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1348 1353 args = tr.hookargs.copy()
1349 1354 args.update(bookmarks.preparehookargs(name, old, new))
1350 1355 repo.hook('pretxnclose-bookmark', throw=True,
1351 1356 txnname=desc,
1352 1357 **pycompat.strkwargs(args))
1353 1358 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1354 1359 cl = repo.unfiltered().changelog
1355 1360 for rev, (old, new) in tr.changes['phases'].items():
1356 1361 args = tr.hookargs.copy()
1357 1362 node = hex(cl.node(rev))
1358 1363 args.update(phases.preparehookargs(node, old, new))
1359 1364 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1360 1365 **pycompat.strkwargs(args))
1361 1366
1362 1367 repo.hook('pretxnclose', throw=True,
1363 1368 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1364 1369 def releasefn(tr, success):
1365 1370 repo = reporef()
1366 1371 if success:
1367 1372 # this should be explicitly invoked here, because
1368 1373 # in-memory changes aren't written out at closing
1369 1374 # transaction, if tr.addfilegenerator (via
1370 1375 # dirstate.write or so) isn't invoked while
1371 1376 # transaction running
1372 1377 repo.dirstate.write(None)
1373 1378 else:
1374 1379 # discard all changes (including ones already written
1375 1380 # out) in this transaction
1376 1381 narrowspec.restorebackup(self, 'journal.narrowspec')
1377 1382 repo.dirstate.restorebackup(None, 'journal.dirstate')
1378 1383
1379 1384 repo.invalidate(clearfilecache=True)
1380 1385
1381 1386 tr = transaction.transaction(rp, self.svfs, vfsmap,
1382 1387 "journal",
1383 1388 "undo",
1384 1389 aftertrans(renames),
1385 1390 self.store.createmode,
1386 1391 validator=validate,
1387 1392 releasefn=releasefn,
1388 1393 checkambigfiles=_cachedfiles,
1389 1394 name=desc)
1390 1395 tr.changes['revs'] = pycompat.xrange(0, 0)
1391 1396 tr.changes['obsmarkers'] = set()
1392 1397 tr.changes['phases'] = {}
1393 1398 tr.changes['bookmarks'] = {}
1394 1399
1395 1400 tr.hookargs['txnid'] = txnid
1396 1401 # note: writing the fncache only during finalize mean that the file is
1397 1402 # outdated when running hooks. As fncache is used for streaming clone,
1398 1403 # this is not expected to break anything that happen during the hooks.
1399 1404 tr.addfinalize('flush-fncache', self.store.write)
1400 1405 def txnclosehook(tr2):
1401 1406 """To be run if transaction is successful, will schedule a hook run
1402 1407 """
1403 1408 # Don't reference tr2 in hook() so we don't hold a reference.
1404 1409 # This reduces memory consumption when there are multiple
1405 1410 # transactions per lock. This can likely go away if issue5045
1406 1411 # fixes the function accumulation.
1407 1412 hookargs = tr2.hookargs
1408 1413
1409 1414 def hookfunc():
1410 1415 repo = reporef()
1411 1416 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1412 1417 bmchanges = sorted(tr.changes['bookmarks'].items())
1413 1418 for name, (old, new) in bmchanges:
1414 1419 args = tr.hookargs.copy()
1415 1420 args.update(bookmarks.preparehookargs(name, old, new))
1416 1421 repo.hook('txnclose-bookmark', throw=False,
1417 1422 txnname=desc, **pycompat.strkwargs(args))
1418 1423
1419 1424 if hook.hashook(repo.ui, 'txnclose-phase'):
1420 1425 cl = repo.unfiltered().changelog
1421 1426 phasemv = sorted(tr.changes['phases'].items())
1422 1427 for rev, (old, new) in phasemv:
1423 1428 args = tr.hookargs.copy()
1424 1429 node = hex(cl.node(rev))
1425 1430 args.update(phases.preparehookargs(node, old, new))
1426 1431 repo.hook('txnclose-phase', throw=False, txnname=desc,
1427 1432 **pycompat.strkwargs(args))
1428 1433
1429 1434 repo.hook('txnclose', throw=False, txnname=desc,
1430 1435 **pycompat.strkwargs(hookargs))
1431 1436 reporef()._afterlock(hookfunc)
1432 1437 tr.addfinalize('txnclose-hook', txnclosehook)
1433 1438 # Include a leading "-" to make it happen before the transaction summary
1434 1439 # reports registered via scmutil.registersummarycallback() whose names
1435 1440 # are 00-txnreport etc. That way, the caches will be warm when the
1436 1441 # callbacks run.
1437 1442 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1438 1443 def txnaborthook(tr2):
1439 1444 """To be run if transaction is aborted
1440 1445 """
1441 1446 reporef().hook('txnabort', throw=False, txnname=desc,
1442 1447 **pycompat.strkwargs(tr2.hookargs))
1443 1448 tr.addabort('txnabort-hook', txnaborthook)
1444 1449 # avoid eager cache invalidation. in-memory data should be identical
1445 1450 # to stored data if transaction has no error.
1446 1451 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1447 1452 self._transref = weakref.ref(tr)
1448 1453 scmutil.registersummarycallback(self, tr, desc)
1449 1454 return tr
1450 1455
1451 1456 def _journalfiles(self):
1452 1457 return ((self.svfs, 'journal'),
1453 1458 (self.vfs, 'journal.dirstate'),
1454 1459 (self.vfs, 'journal.branch'),
1455 1460 (self.vfs, 'journal.desc'),
1456 1461 (self.vfs, 'journal.bookmarks'),
1457 1462 (self.svfs, 'journal.phaseroots'))
1458 1463
1459 1464 def undofiles(self):
1460 1465 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1461 1466
1462 1467 @unfilteredmethod
1463 1468 def _writejournal(self, desc):
1464 1469 self.dirstate.savebackup(None, 'journal.dirstate')
1465 1470 narrowspec.savebackup(self, 'journal.narrowspec')
1466 1471 self.vfs.write("journal.branch",
1467 1472 encoding.fromlocal(self.dirstate.branch()))
1468 1473 self.vfs.write("journal.desc",
1469 1474 "%d\n%s\n" % (len(self), desc))
1470 1475 self.vfs.write("journal.bookmarks",
1471 1476 self.vfs.tryread("bookmarks"))
1472 1477 self.svfs.write("journal.phaseroots",
1473 1478 self.svfs.tryread("phaseroots"))
1474 1479
1475 1480 def recover(self):
1476 1481 with self.lock():
1477 1482 if self.svfs.exists("journal"):
1478 1483 self.ui.status(_("rolling back interrupted transaction\n"))
1479 1484 vfsmap = {'': self.svfs,
1480 1485 'plain': self.vfs,}
1481 1486 transaction.rollback(self.svfs, vfsmap, "journal",
1482 1487 self.ui.warn,
1483 1488 checkambigfiles=_cachedfiles)
1484 1489 self.invalidate()
1485 1490 return True
1486 1491 else:
1487 1492 self.ui.warn(_("no interrupted transaction available\n"))
1488 1493 return False
1489 1494
1490 1495 def rollback(self, dryrun=False, force=False):
1491 1496 wlock = lock = dsguard = None
1492 1497 try:
1493 1498 wlock = self.wlock()
1494 1499 lock = self.lock()
1495 1500 if self.svfs.exists("undo"):
1496 1501 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1497 1502
1498 1503 return self._rollback(dryrun, force, dsguard)
1499 1504 else:
1500 1505 self.ui.warn(_("no rollback information available\n"))
1501 1506 return 1
1502 1507 finally:
1503 1508 release(dsguard, lock, wlock)
1504 1509
1505 1510 @unfilteredmethod # Until we get smarter cache management
1506 1511 def _rollback(self, dryrun, force, dsguard):
1507 1512 ui = self.ui
1508 1513 try:
1509 1514 args = self.vfs.read('undo.desc').splitlines()
1510 1515 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1511 1516 if len(args) >= 3:
1512 1517 detail = args[2]
1513 1518 oldtip = oldlen - 1
1514 1519
1515 1520 if detail and ui.verbose:
1516 1521 msg = (_('repository tip rolled back to revision %d'
1517 1522 ' (undo %s: %s)\n')
1518 1523 % (oldtip, desc, detail))
1519 1524 else:
1520 1525 msg = (_('repository tip rolled back to revision %d'
1521 1526 ' (undo %s)\n')
1522 1527 % (oldtip, desc))
1523 1528 except IOError:
1524 1529 msg = _('rolling back unknown transaction\n')
1525 1530 desc = None
1526 1531
1527 1532 if not force and self['.'] != self['tip'] and desc == 'commit':
1528 1533 raise error.Abort(
1529 1534 _('rollback of last commit while not checked out '
1530 1535 'may lose data'), hint=_('use -f to force'))
1531 1536
1532 1537 ui.status(msg)
1533 1538 if dryrun:
1534 1539 return 0
1535 1540
1536 1541 parents = self.dirstate.parents()
1537 1542 self.destroying()
1538 1543 vfsmap = {'plain': self.vfs, '': self.svfs}
1539 1544 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1540 1545 checkambigfiles=_cachedfiles)
1541 1546 if self.vfs.exists('undo.bookmarks'):
1542 1547 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1543 1548 if self.svfs.exists('undo.phaseroots'):
1544 1549 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1545 1550 self.invalidate()
1546 1551
1547 1552 parentgone = (parents[0] not in self.changelog.nodemap or
1548 1553 parents[1] not in self.changelog.nodemap)
1549 1554 if parentgone:
1550 1555 # prevent dirstateguard from overwriting already restored one
1551 1556 dsguard.close()
1552 1557
1553 1558 narrowspec.restorebackup(self, 'undo.narrowspec')
1554 1559 self.dirstate.restorebackup(None, 'undo.dirstate')
1555 1560 try:
1556 1561 branch = self.vfs.read('undo.branch')
1557 1562 self.dirstate.setbranch(encoding.tolocal(branch))
1558 1563 except IOError:
1559 1564 ui.warn(_('named branch could not be reset: '
1560 1565 'current branch is still \'%s\'\n')
1561 1566 % self.dirstate.branch())
1562 1567
1563 1568 parents = tuple([p.rev() for p in self[None].parents()])
1564 1569 if len(parents) > 1:
1565 1570 ui.status(_('working directory now based on '
1566 1571 'revisions %d and %d\n') % parents)
1567 1572 else:
1568 1573 ui.status(_('working directory now based on '
1569 1574 'revision %d\n') % parents)
1570 1575 mergemod.mergestate.clean(self, self['.'].node())
1571 1576
1572 1577 # TODO: if we know which new heads may result from this rollback, pass
1573 1578 # them to destroy(), which will prevent the branchhead cache from being
1574 1579 # invalidated.
1575 1580 self.destroyed()
1576 1581 return 0
1577 1582
1578 1583 def _buildcacheupdater(self, newtransaction):
1579 1584 """called during transaction to build the callback updating cache
1580 1585
1581 1586 Lives on the repository to help extension who might want to augment
1582 1587 this logic. For this purpose, the created transaction is passed to the
1583 1588 method.
1584 1589 """
1585 1590 # we must avoid cyclic reference between repo and transaction.
1586 1591 reporef = weakref.ref(self)
1587 1592 def updater(tr):
1588 1593 repo = reporef()
1589 1594 repo.updatecaches(tr)
1590 1595 return updater
1591 1596
1592 1597 @unfilteredmethod
1593 1598 def updatecaches(self, tr=None, full=False):
1594 1599 """warm appropriate caches
1595 1600
1596 1601 If this function is called after a transaction closed. The transaction
1597 1602 will be available in the 'tr' argument. This can be used to selectively
1598 1603 update caches relevant to the changes in that transaction.
1599 1604
1600 1605 If 'full' is set, make sure all caches the function knows about have
1601 1606 up-to-date data. Even the ones usually loaded more lazily.
1602 1607 """
1603 1608 if tr is not None and tr.hookargs.get('source') == 'strip':
1604 1609 # During strip, many caches are invalid but
1605 1610 # later call to `destroyed` will refresh them.
1606 1611 return
1607 1612
1608 1613 if tr is None or tr.changes['revs']:
1609 1614 # updating the unfiltered branchmap should refresh all the others,
1610 1615 self.ui.debug('updating the branch cache\n')
1611 1616 branchmap.updatecache(self.filtered('served'))
1612 1617
1613 1618 if full:
1614 1619 rbc = self.revbranchcache()
1615 1620 for r in self.changelog:
1616 1621 rbc.branchinfo(r)
1617 1622 rbc.write()
1618 1623
1619 1624 # ensure the working copy parents are in the manifestfulltextcache
1620 1625 for ctx in self['.'].parents():
1621 1626 ctx.manifest() # accessing the manifest is enough
1622 1627
1623 1628 def invalidatecaches(self):
1624 1629
1625 1630 if '_tagscache' in vars(self):
1626 1631 # can't use delattr on proxy
1627 1632 del self.__dict__['_tagscache']
1628 1633
1629 1634 self.unfiltered()._branchcaches.clear()
1630 1635 self.invalidatevolatilesets()
1631 1636 self._sparsesignaturecache.clear()
1632 1637
1633 1638 def invalidatevolatilesets(self):
1634 1639 self.filteredrevcache.clear()
1635 1640 obsolete.clearobscaches(self)
1636 1641
1637 1642 def invalidatedirstate(self):
1638 1643 '''Invalidates the dirstate, causing the next call to dirstate
1639 1644 to check if it was modified since the last time it was read,
1640 1645 rereading it if it has.
1641 1646
1642 1647 This is different to dirstate.invalidate() that it doesn't always
1643 1648 rereads the dirstate. Use dirstate.invalidate() if you want to
1644 1649 explicitly read the dirstate again (i.e. restoring it to a previous
1645 1650 known good state).'''
1646 1651 if hasunfilteredcache(self, 'dirstate'):
1647 1652 for k in self.dirstate._filecache:
1648 1653 try:
1649 1654 delattr(self.dirstate, k)
1650 1655 except AttributeError:
1651 1656 pass
1652 1657 delattr(self.unfiltered(), 'dirstate')
1653 1658
1654 1659 def invalidate(self, clearfilecache=False):
1655 1660 '''Invalidates both store and non-store parts other than dirstate
1656 1661
1657 1662 If a transaction is running, invalidation of store is omitted,
1658 1663 because discarding in-memory changes might cause inconsistency
1659 1664 (e.g. incomplete fncache causes unintentional failure, but
1660 1665 redundant one doesn't).
1661 1666 '''
1662 1667 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1663 1668 for k in list(self._filecache.keys()):
1664 1669 # dirstate is invalidated separately in invalidatedirstate()
1665 1670 if k == 'dirstate':
1666 1671 continue
1667 1672 if (k == 'changelog' and
1668 1673 self.currenttransaction() and
1669 1674 self.changelog._delayed):
1670 1675 # The changelog object may store unwritten revisions. We don't
1671 1676 # want to lose them.
1672 1677 # TODO: Solve the problem instead of working around it.
1673 1678 continue
1674 1679
1675 1680 if clearfilecache:
1676 1681 del self._filecache[k]
1677 1682 try:
1678 1683 delattr(unfiltered, k)
1679 1684 except AttributeError:
1680 1685 pass
1681 1686 self.invalidatecaches()
1682 1687 if not self.currenttransaction():
1683 1688 # TODO: Changing contents of store outside transaction
1684 1689 # causes inconsistency. We should make in-memory store
1685 1690 # changes detectable, and abort if changed.
1686 1691 self.store.invalidatecaches()
1687 1692
1688 1693 def invalidateall(self):
1689 1694 '''Fully invalidates both store and non-store parts, causing the
1690 1695 subsequent operation to reread any outside changes.'''
1691 1696 # extension should hook this to invalidate its caches
1692 1697 self.invalidate()
1693 1698 self.invalidatedirstate()
1694 1699
1695 1700 @unfilteredmethod
1696 1701 def _refreshfilecachestats(self, tr):
1697 1702 """Reload stats of cached files so that they are flagged as valid"""
1698 1703 for k, ce in self._filecache.items():
1699 1704 k = pycompat.sysstr(k)
1700 1705 if k == r'dirstate' or k not in self.__dict__:
1701 1706 continue
1702 1707 ce.refresh()
1703 1708
1704 1709 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1705 1710 inheritchecker=None, parentenvvar=None):
1706 1711 parentlock = None
1707 1712 # the contents of parentenvvar are used by the underlying lock to
1708 1713 # determine whether it can be inherited
1709 1714 if parentenvvar is not None:
1710 1715 parentlock = encoding.environ.get(parentenvvar)
1711 1716
1712 1717 timeout = 0
1713 1718 warntimeout = 0
1714 1719 if wait:
1715 1720 timeout = self.ui.configint("ui", "timeout")
1716 1721 warntimeout = self.ui.configint("ui", "timeout.warn")
1717 1722 # internal config: ui.signal-safe-lock
1718 1723 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1719 1724
1720 1725 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1721 1726 releasefn=releasefn,
1722 1727 acquirefn=acquirefn, desc=desc,
1723 1728 inheritchecker=inheritchecker,
1724 1729 parentlock=parentlock,
1725 1730 signalsafe=signalsafe)
1726 1731 return l
1727 1732
1728 1733 def _afterlock(self, callback):
1729 1734 """add a callback to be run when the repository is fully unlocked
1730 1735
1731 1736 The callback will be executed when the outermost lock is released
1732 1737 (with wlock being higher level than 'lock')."""
1733 1738 for ref in (self._wlockref, self._lockref):
1734 1739 l = ref and ref()
1735 1740 if l and l.held:
1736 1741 l.postrelease.append(callback)
1737 1742 break
1738 1743 else: # no lock have been found.
1739 1744 callback()
1740 1745
1741 1746 def lock(self, wait=True):
1742 1747 '''Lock the repository store (.hg/store) and return a weak reference
1743 1748 to the lock. Use this before modifying the store (e.g. committing or
1744 1749 stripping). If you are opening a transaction, get a lock as well.)
1745 1750
1746 1751 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1747 1752 'wlock' first to avoid a dead-lock hazard.'''
1748 1753 l = self._currentlock(self._lockref)
1749 1754 if l is not None:
1750 1755 l.lock()
1751 1756 return l
1752 1757
1753 1758 l = self._lock(self.svfs, "lock", wait, None,
1754 1759 self.invalidate, _('repository %s') % self.origroot)
1755 1760 self._lockref = weakref.ref(l)
1756 1761 return l
1757 1762
1758 1763 def _wlockchecktransaction(self):
1759 1764 if self.currenttransaction() is not None:
1760 1765 raise error.LockInheritanceContractViolation(
1761 1766 'wlock cannot be inherited in the middle of a transaction')
1762 1767
1763 1768 def wlock(self, wait=True):
1764 1769 '''Lock the non-store parts of the repository (everything under
1765 1770 .hg except .hg/store) and return a weak reference to the lock.
1766 1771
1767 1772 Use this before modifying files in .hg.
1768 1773
1769 1774 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1770 1775 'wlock' first to avoid a dead-lock hazard.'''
1771 1776 l = self._wlockref and self._wlockref()
1772 1777 if l is not None and l.held:
1773 1778 l.lock()
1774 1779 return l
1775 1780
1776 1781 # We do not need to check for non-waiting lock acquisition. Such
1777 1782 # acquisition would not cause dead-lock as they would just fail.
1778 1783 if wait and (self.ui.configbool('devel', 'all-warnings')
1779 1784 or self.ui.configbool('devel', 'check-locks')):
1780 1785 if self._currentlock(self._lockref) is not None:
1781 1786 self.ui.develwarn('"wlock" acquired after "lock"')
1782 1787
1783 1788 def unlock():
1784 1789 if self.dirstate.pendingparentchange():
1785 1790 self.dirstate.invalidate()
1786 1791 else:
1787 1792 self.dirstate.write(None)
1788 1793
1789 1794 self._filecache['dirstate'].refresh()
1790 1795
1791 1796 l = self._lock(self.vfs, "wlock", wait, unlock,
1792 1797 self.invalidatedirstate, _('working directory of %s') %
1793 1798 self.origroot,
1794 1799 inheritchecker=self._wlockchecktransaction,
1795 1800 parentenvvar='HG_WLOCK_LOCKER')
1796 1801 self._wlockref = weakref.ref(l)
1797 1802 return l
1798 1803
1799 1804 def _currentlock(self, lockref):
1800 1805 """Returns the lock if it's held, or None if it's not."""
1801 1806 if lockref is None:
1802 1807 return None
1803 1808 l = lockref()
1804 1809 if l is None or not l.held:
1805 1810 return None
1806 1811 return l
1807 1812
1808 1813 def currentwlock(self):
1809 1814 """Returns the wlock if it's held, or None if it's not."""
1810 1815 return self._currentlock(self._wlockref)
1811 1816
1812 1817 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1813 1818 """
1814 1819 commit an individual file as part of a larger transaction
1815 1820 """
1816 1821
1817 1822 fname = fctx.path()
1818 1823 fparent1 = manifest1.get(fname, nullid)
1819 1824 fparent2 = manifest2.get(fname, nullid)
1820 1825 if isinstance(fctx, context.filectx):
1821 1826 node = fctx.filenode()
1822 1827 if node in [fparent1, fparent2]:
1823 1828 self.ui.debug('reusing %s filelog entry\n' % fname)
1824 1829 if manifest1.flags(fname) != fctx.flags():
1825 1830 changelist.append(fname)
1826 1831 return node
1827 1832
1828 1833 flog = self.file(fname)
1829 1834 meta = {}
1830 1835 copy = fctx.renamed()
1831 1836 if copy and copy[0] != fname:
1832 1837 # Mark the new revision of this file as a copy of another
1833 1838 # file. This copy data will effectively act as a parent
1834 1839 # of this new revision. If this is a merge, the first
1835 1840 # parent will be the nullid (meaning "look up the copy data")
1836 1841 # and the second one will be the other parent. For example:
1837 1842 #
1838 1843 # 0 --- 1 --- 3 rev1 changes file foo
1839 1844 # \ / rev2 renames foo to bar and changes it
1840 1845 # \- 2 -/ rev3 should have bar with all changes and
1841 1846 # should record that bar descends from
1842 1847 # bar in rev2 and foo in rev1
1843 1848 #
1844 1849 # this allows this merge to succeed:
1845 1850 #
1846 1851 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1847 1852 # \ / merging rev3 and rev4 should use bar@rev2
1848 1853 # \- 2 --- 4 as the merge base
1849 1854 #
1850 1855
1851 1856 cfname = copy[0]
1852 1857 crev = manifest1.get(cfname)
1853 1858 newfparent = fparent2
1854 1859
1855 1860 if manifest2: # branch merge
1856 1861 if fparent2 == nullid or crev is None: # copied on remote side
1857 1862 if cfname in manifest2:
1858 1863 crev = manifest2[cfname]
1859 1864 newfparent = fparent1
1860 1865
1861 1866 # Here, we used to search backwards through history to try to find
1862 1867 # where the file copy came from if the source of a copy was not in
1863 1868 # the parent directory. However, this doesn't actually make sense to
1864 1869 # do (what does a copy from something not in your working copy even
1865 1870 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1866 1871 # the user that copy information was dropped, so if they didn't
1867 1872 # expect this outcome it can be fixed, but this is the correct
1868 1873 # behavior in this circumstance.
1869 1874
1870 1875 if crev:
1871 1876 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1872 1877 meta["copy"] = cfname
1873 1878 meta["copyrev"] = hex(crev)
1874 1879 fparent1, fparent2 = nullid, newfparent
1875 1880 else:
1876 1881 self.ui.warn(_("warning: can't find ancestor for '%s' "
1877 1882 "copied from '%s'!\n") % (fname, cfname))
1878 1883
1879 1884 elif fparent1 == nullid:
1880 1885 fparent1, fparent2 = fparent2, nullid
1881 1886 elif fparent2 != nullid:
1882 1887 # is one parent an ancestor of the other?
1883 1888 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1884 1889 if fparent1 in fparentancestors:
1885 1890 fparent1, fparent2 = fparent2, nullid
1886 1891 elif fparent2 in fparentancestors:
1887 1892 fparent2 = nullid
1888 1893
1889 1894 # is the file changed?
1890 1895 text = fctx.data()
1891 1896 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1892 1897 changelist.append(fname)
1893 1898 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1894 1899 # are just the flags changed during merge?
1895 1900 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1896 1901 changelist.append(fname)
1897 1902
1898 1903 return fparent1
1899 1904
1900 1905 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1901 1906 """check for commit arguments that aren't committable"""
1902 1907 if match.isexact() or match.prefix():
1903 1908 matched = set(status.modified + status.added + status.removed)
1904 1909
1905 1910 for f in match.files():
1906 1911 f = self.dirstate.normalize(f)
1907 1912 if f == '.' or f in matched or f in wctx.substate:
1908 1913 continue
1909 1914 if f in status.deleted:
1910 1915 fail(f, _('file not found!'))
1911 1916 if f in vdirs: # visited directory
1912 1917 d = f + '/'
1913 1918 for mf in matched:
1914 1919 if mf.startswith(d):
1915 1920 break
1916 1921 else:
1917 1922 fail(f, _("no match under directory!"))
1918 1923 elif f not in self.dirstate:
1919 1924 fail(f, _("file not tracked!"))
1920 1925
1921 1926 @unfilteredmethod
1922 1927 def commit(self, text="", user=None, date=None, match=None, force=False,
1923 1928 editor=False, extra=None):
1924 1929 """Add a new revision to current repository.
1925 1930
1926 1931 Revision information is gathered from the working directory,
1927 1932 match can be used to filter the committed files. If editor is
1928 1933 supplied, it is called to get a commit message.
1929 1934 """
1930 1935 if extra is None:
1931 1936 extra = {}
1932 1937
1933 1938 def fail(f, msg):
1934 1939 raise error.Abort('%s: %s' % (f, msg))
1935 1940
1936 1941 if not match:
1937 1942 match = matchmod.always(self.root, '')
1938 1943
1939 1944 if not force:
1940 1945 vdirs = []
1941 1946 match.explicitdir = vdirs.append
1942 1947 match.bad = fail
1943 1948
1944 1949 wlock = lock = tr = None
1945 1950 try:
1946 1951 wlock = self.wlock()
1947 1952 lock = self.lock() # for recent changelog (see issue4368)
1948 1953
1949 1954 wctx = self[None]
1950 1955 merge = len(wctx.parents()) > 1
1951 1956
1952 1957 if not force and merge and not match.always():
1953 1958 raise error.Abort(_('cannot partially commit a merge '
1954 1959 '(do not specify files or patterns)'))
1955 1960
1956 1961 status = self.status(match=match, clean=force)
1957 1962 if force:
1958 1963 status.modified.extend(status.clean) # mq may commit clean files
1959 1964
1960 1965 # check subrepos
1961 1966 subs, commitsubs, newstate = subrepoutil.precommit(
1962 1967 self.ui, wctx, status, match, force=force)
1963 1968
1964 1969 # make sure all explicit patterns are matched
1965 1970 if not force:
1966 1971 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1967 1972
1968 1973 cctx = context.workingcommitctx(self, status,
1969 1974 text, user, date, extra)
1970 1975
1971 1976 # internal config: ui.allowemptycommit
1972 1977 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1973 1978 or extra.get('close') or merge or cctx.files()
1974 1979 or self.ui.configbool('ui', 'allowemptycommit'))
1975 1980 if not allowemptycommit:
1976 1981 return None
1977 1982
1978 1983 if merge and cctx.deleted():
1979 1984 raise error.Abort(_("cannot commit merge with missing files"))
1980 1985
1981 1986 ms = mergemod.mergestate.read(self)
1982 1987 mergeutil.checkunresolved(ms)
1983 1988
1984 1989 if editor:
1985 1990 cctx._text = editor(self, cctx, subs)
1986 1991 edited = (text != cctx._text)
1987 1992
1988 1993 # Save commit message in case this transaction gets rolled back
1989 1994 # (e.g. by a pretxncommit hook). Leave the content alone on
1990 1995 # the assumption that the user will use the same editor again.
1991 1996 msgfn = self.savecommitmessage(cctx._text)
1992 1997
1993 1998 # commit subs and write new state
1994 1999 if subs:
1995 2000 for s in sorted(commitsubs):
1996 2001 sub = wctx.sub(s)
1997 2002 self.ui.status(_('committing subrepository %s\n') %
1998 2003 subrepoutil.subrelpath(sub))
1999 2004 sr = sub.commit(cctx._text, user, date)
2000 2005 newstate[s] = (newstate[s][0], sr)
2001 2006 subrepoutil.writestate(self, newstate)
2002 2007
2003 2008 p1, p2 = self.dirstate.parents()
2004 2009 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2005 2010 try:
2006 2011 self.hook("precommit", throw=True, parent1=hookp1,
2007 2012 parent2=hookp2)
2008 2013 tr = self.transaction('commit')
2009 2014 ret = self.commitctx(cctx, True)
2010 2015 except: # re-raises
2011 2016 if edited:
2012 2017 self.ui.write(
2013 2018 _('note: commit message saved in %s\n') % msgfn)
2014 2019 raise
2015 2020 # update bookmarks, dirstate and mergestate
2016 2021 bookmarks.update(self, [p1, p2], ret)
2017 2022 cctx.markcommitted(ret)
2018 2023 ms.reset()
2019 2024 tr.close()
2020 2025
2021 2026 finally:
2022 2027 lockmod.release(tr, lock, wlock)
2023 2028
2024 2029 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2025 2030 # hack for command that use a temporary commit (eg: histedit)
2026 2031 # temporary commit got stripped before hook release
2027 2032 if self.changelog.hasnode(ret):
2028 2033 self.hook("commit", node=node, parent1=parent1,
2029 2034 parent2=parent2)
2030 2035 self._afterlock(commithook)
2031 2036 return ret
2032 2037
2033 2038 @unfilteredmethod
2034 2039 def commitctx(self, ctx, error=False):
2035 2040 """Add a new revision to current repository.
2036 2041 Revision information is passed via the context argument.
2037 2042 """
2038 2043
2039 2044 tr = None
2040 2045 p1, p2 = ctx.p1(), ctx.p2()
2041 2046 user = ctx.user()
2042 2047
2043 2048 lock = self.lock()
2044 2049 try:
2045 2050 tr = self.transaction("commit")
2046 2051 trp = weakref.proxy(tr)
2047 2052
2048 2053 if ctx.manifestnode():
2049 2054 # reuse an existing manifest revision
2050 2055 mn = ctx.manifestnode()
2051 2056 files = ctx.files()
2052 2057 elif ctx.files():
2053 2058 m1ctx = p1.manifestctx()
2054 2059 m2ctx = p2.manifestctx()
2055 2060 mctx = m1ctx.copy()
2056 2061
2057 2062 m = mctx.read()
2058 2063 m1 = m1ctx.read()
2059 2064 m2 = m2ctx.read()
2060 2065
2061 2066 # check in files
2062 2067 added = []
2063 2068 changed = []
2064 2069 removed = list(ctx.removed())
2065 2070 linkrev = len(self)
2066 2071 self.ui.note(_("committing files:\n"))
2067 2072 for f in sorted(ctx.modified() + ctx.added()):
2068 2073 self.ui.note(f + "\n")
2069 2074 try:
2070 2075 fctx = ctx[f]
2071 2076 if fctx is None:
2072 2077 removed.append(f)
2073 2078 else:
2074 2079 added.append(f)
2075 2080 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2076 2081 trp, changed)
2077 2082 m.setflag(f, fctx.flags())
2078 2083 except OSError as inst:
2079 2084 self.ui.warn(_("trouble committing %s!\n") % f)
2080 2085 raise
2081 2086 except IOError as inst:
2082 2087 errcode = getattr(inst, 'errno', errno.ENOENT)
2083 2088 if error or errcode and errcode != errno.ENOENT:
2084 2089 self.ui.warn(_("trouble committing %s!\n") % f)
2085 2090 raise
2086 2091
2087 2092 # update manifest
2088 2093 self.ui.note(_("committing manifest\n"))
2089 2094 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2090 2095 drop = [f for f in removed if f in m]
2091 2096 for f in drop:
2092 2097 del m[f]
2093 2098 mn = mctx.write(trp, linkrev,
2094 2099 p1.manifestnode(), p2.manifestnode(),
2095 2100 added, drop)
2096 2101 files = changed + removed
2097 2102 else:
2098 2103 mn = p1.manifestnode()
2099 2104 files = []
2100 2105
2101 2106 # update changelog
2102 2107 self.ui.note(_("committing changelog\n"))
2103 2108 self.changelog.delayupdate(tr)
2104 2109 n = self.changelog.add(mn, files, ctx.description(),
2105 2110 trp, p1.node(), p2.node(),
2106 2111 user, ctx.date(), ctx.extra().copy())
2107 2112 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2108 2113 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2109 2114 parent2=xp2)
2110 2115 # set the new commit is proper phase
2111 2116 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2112 2117 if targetphase:
2113 2118 # retract boundary do not alter parent changeset.
2114 2119 # if a parent have higher the resulting phase will
2115 2120 # be compliant anyway
2116 2121 #
2117 2122 # if minimal phase was 0 we don't need to retract anything
2118 2123 phases.registernew(self, tr, targetphase, [n])
2119 2124 tr.close()
2120 2125 return n
2121 2126 finally:
2122 2127 if tr:
2123 2128 tr.release()
2124 2129 lock.release()
2125 2130
2126 2131 @unfilteredmethod
2127 2132 def destroying(self):
2128 2133 '''Inform the repository that nodes are about to be destroyed.
2129 2134 Intended for use by strip and rollback, so there's a common
2130 2135 place for anything that has to be done before destroying history.
2131 2136
2132 2137 This is mostly useful for saving state that is in memory and waiting
2133 2138 to be flushed when the current lock is released. Because a call to
2134 2139 destroyed is imminent, the repo will be invalidated causing those
2135 2140 changes to stay in memory (waiting for the next unlock), or vanish
2136 2141 completely.
2137 2142 '''
2138 2143 # When using the same lock to commit and strip, the phasecache is left
2139 2144 # dirty after committing. Then when we strip, the repo is invalidated,
2140 2145 # causing those changes to disappear.
2141 2146 if '_phasecache' in vars(self):
2142 2147 self._phasecache.write()
2143 2148
2144 2149 @unfilteredmethod
2145 2150 def destroyed(self):
2146 2151 '''Inform the repository that nodes have been destroyed.
2147 2152 Intended for use by strip and rollback, so there's a common
2148 2153 place for anything that has to be done after destroying history.
2149 2154 '''
2150 2155 # When one tries to:
2151 2156 # 1) destroy nodes thus calling this method (e.g. strip)
2152 2157 # 2) use phasecache somewhere (e.g. commit)
2153 2158 #
2154 2159 # then 2) will fail because the phasecache contains nodes that were
2155 2160 # removed. We can either remove phasecache from the filecache,
2156 2161 # causing it to reload next time it is accessed, or simply filter
2157 2162 # the removed nodes now and write the updated cache.
2158 2163 self._phasecache.filterunknown(self)
2159 2164 self._phasecache.write()
2160 2165
2161 2166 # refresh all repository caches
2162 2167 self.updatecaches()
2163 2168
2164 2169 # Ensure the persistent tag cache is updated. Doing it now
2165 2170 # means that the tag cache only has to worry about destroyed
2166 2171 # heads immediately after a strip/rollback. That in turn
2167 2172 # guarantees that "cachetip == currenttip" (comparing both rev
2168 2173 # and node) always means no nodes have been added or destroyed.
2169 2174
2170 2175 # XXX this is suboptimal when qrefresh'ing: we strip the current
2171 2176 # head, refresh the tag cache, then immediately add a new head.
2172 2177 # But I think doing it this way is necessary for the "instant
2173 2178 # tag cache retrieval" case to work.
2174 2179 self.invalidate()
2175 2180
2176 2181 def status(self, node1='.', node2=None, match=None,
2177 2182 ignored=False, clean=False, unknown=False,
2178 2183 listsubrepos=False):
2179 2184 '''a convenience method that calls node1.status(node2)'''
2180 2185 return self[node1].status(node2, match, ignored, clean, unknown,
2181 2186 listsubrepos)
2182 2187
2183 2188 def addpostdsstatus(self, ps):
2184 2189 """Add a callback to run within the wlock, at the point at which status
2185 2190 fixups happen.
2186 2191
2187 2192 On status completion, callback(wctx, status) will be called with the
2188 2193 wlock held, unless the dirstate has changed from underneath or the wlock
2189 2194 couldn't be grabbed.
2190 2195
2191 2196 Callbacks should not capture and use a cached copy of the dirstate --
2192 2197 it might change in the meanwhile. Instead, they should access the
2193 2198 dirstate via wctx.repo().dirstate.
2194 2199
2195 2200 This list is emptied out after each status run -- extensions should
2196 2201 make sure it adds to this list each time dirstate.status is called.
2197 2202 Extensions should also make sure they don't call this for statuses
2198 2203 that don't involve the dirstate.
2199 2204 """
2200 2205
2201 2206 # The list is located here for uniqueness reasons -- it is actually
2202 2207 # managed by the workingctx, but that isn't unique per-repo.
2203 2208 self._postdsstatus.append(ps)
2204 2209
2205 2210 def postdsstatus(self):
2206 2211 """Used by workingctx to get the list of post-dirstate-status hooks."""
2207 2212 return self._postdsstatus
2208 2213
2209 2214 def clearpostdsstatus(self):
2210 2215 """Used by workingctx to clear post-dirstate-status hooks."""
2211 2216 del self._postdsstatus[:]
2212 2217
2213 2218 def heads(self, start=None):
2214 2219 if start is None:
2215 2220 cl = self.changelog
2216 2221 headrevs = reversed(cl.headrevs())
2217 2222 return [cl.node(rev) for rev in headrevs]
2218 2223
2219 2224 heads = self.changelog.heads(start)
2220 2225 # sort the output in rev descending order
2221 2226 return sorted(heads, key=self.changelog.rev, reverse=True)
2222 2227
2223 2228 def branchheads(self, branch=None, start=None, closed=False):
2224 2229 '''return a (possibly filtered) list of heads for the given branch
2225 2230
2226 2231 Heads are returned in topological order, from newest to oldest.
2227 2232 If branch is None, use the dirstate branch.
2228 2233 If start is not None, return only heads reachable from start.
2229 2234 If closed is True, return heads that are marked as closed as well.
2230 2235 '''
2231 2236 if branch is None:
2232 2237 branch = self[None].branch()
2233 2238 branches = self.branchmap()
2234 2239 if branch not in branches:
2235 2240 return []
2236 2241 # the cache returns heads ordered lowest to highest
2237 2242 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2238 2243 if start is not None:
2239 2244 # filter out the heads that cannot be reached from startrev
2240 2245 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2241 2246 bheads = [h for h in bheads if h in fbheads]
2242 2247 return bheads
2243 2248
2244 2249 def branches(self, nodes):
2245 2250 if not nodes:
2246 2251 nodes = [self.changelog.tip()]
2247 2252 b = []
2248 2253 for n in nodes:
2249 2254 t = n
2250 2255 while True:
2251 2256 p = self.changelog.parents(n)
2252 2257 if p[1] != nullid or p[0] == nullid:
2253 2258 b.append((t, n, p[0], p[1]))
2254 2259 break
2255 2260 n = p[0]
2256 2261 return b
2257 2262
2258 2263 def between(self, pairs):
2259 2264 r = []
2260 2265
2261 2266 for top, bottom in pairs:
2262 2267 n, l, i = top, [], 0
2263 2268 f = 1
2264 2269
2265 2270 while n != bottom and n != nullid:
2266 2271 p = self.changelog.parents(n)[0]
2267 2272 if i == f:
2268 2273 l.append(n)
2269 2274 f = f * 2
2270 2275 n = p
2271 2276 i += 1
2272 2277
2273 2278 r.append(l)
2274 2279
2275 2280 return r
2276 2281
2277 2282 def checkpush(self, pushop):
2278 2283 """Extensions can override this function if additional checks have
2279 2284 to be performed before pushing, or call it if they override push
2280 2285 command.
2281 2286 """
2282 2287
2283 2288 @unfilteredpropertycache
2284 2289 def prepushoutgoinghooks(self):
2285 2290 """Return util.hooks consists of a pushop with repo, remote, outgoing
2286 2291 methods, which are called before pushing changesets.
2287 2292 """
2288 2293 return util.hooks()
2289 2294
2290 2295 def pushkey(self, namespace, key, old, new):
2291 2296 try:
2292 2297 tr = self.currenttransaction()
2293 2298 hookargs = {}
2294 2299 if tr is not None:
2295 2300 hookargs.update(tr.hookargs)
2296 2301 hookargs = pycompat.strkwargs(hookargs)
2297 2302 hookargs[r'namespace'] = namespace
2298 2303 hookargs[r'key'] = key
2299 2304 hookargs[r'old'] = old
2300 2305 hookargs[r'new'] = new
2301 2306 self.hook('prepushkey', throw=True, **hookargs)
2302 2307 except error.HookAbort as exc:
2303 2308 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2304 2309 if exc.hint:
2305 2310 self.ui.write_err(_("(%s)\n") % exc.hint)
2306 2311 return False
2307 2312 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2308 2313 ret = pushkey.push(self, namespace, key, old, new)
2309 2314 def runhook():
2310 2315 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2311 2316 ret=ret)
2312 2317 self._afterlock(runhook)
2313 2318 return ret
2314 2319
2315 2320 def listkeys(self, namespace):
2316 2321 self.hook('prelistkeys', throw=True, namespace=namespace)
2317 2322 self.ui.debug('listing keys for "%s"\n' % namespace)
2318 2323 values = pushkey.list(self, namespace)
2319 2324 self.hook('listkeys', namespace=namespace, values=values)
2320 2325 return values
2321 2326
2322 2327 def debugwireargs(self, one, two, three=None, four=None, five=None):
2323 2328 '''used to test argument passing over the wire'''
2324 2329 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2325 2330 pycompat.bytestr(four),
2326 2331 pycompat.bytestr(five))
2327 2332
2328 2333 def savecommitmessage(self, text):
2329 2334 fp = self.vfs('last-message.txt', 'wb')
2330 2335 try:
2331 2336 fp.write(text)
2332 2337 finally:
2333 2338 fp.close()
2334 2339 return self.pathto(fp.name[len(self.root) + 1:])
2335 2340
2336 2341 # used to avoid circular references so destructors work
2337 2342 def aftertrans(files):
2338 2343 renamefiles = [tuple(t) for t in files]
2339 2344 def a():
2340 2345 for vfs, src, dest in renamefiles:
2341 2346 # if src and dest refer to a same file, vfs.rename is a no-op,
2342 2347 # leaving both src and dest on disk. delete dest to make sure
2343 2348 # the rename couldn't be such a no-op.
2344 2349 vfs.tryunlink(dest)
2345 2350 try:
2346 2351 vfs.rename(src, dest)
2347 2352 except OSError: # journal file does not yet exist
2348 2353 pass
2349 2354 return a
2350 2355
2351 2356 def undoname(fn):
2352 2357 base, name = os.path.split(fn)
2353 2358 assert name.startswith('journal')
2354 2359 return os.path.join(base, name.replace('journal', 'undo', 1))
2355 2360
2356 2361 def instance(ui, path, create, intents=None):
2357 2362 return localrepository(ui, util.urllocalpath(path), create,
2358 2363 intents=intents)
2359 2364
2360 2365 def islocal(path):
2361 2366 return True
2362 2367
2363 2368 def newreporequirements(repo):
2364 2369 """Determine the set of requirements for a new local repository.
2365 2370
2366 2371 Extensions can wrap this function to specify custom requirements for
2367 2372 new repositories.
2368 2373 """
2369 2374 ui = repo.ui
2370 2375 requirements = {'revlogv1'}
2371 2376 if ui.configbool('format', 'usestore'):
2372 2377 requirements.add('store')
2373 2378 if ui.configbool('format', 'usefncache'):
2374 2379 requirements.add('fncache')
2375 2380 if ui.configbool('format', 'dotencode'):
2376 2381 requirements.add('dotencode')
2377 2382
2378 2383 compengine = ui.config('experimental', 'format.compression')
2379 2384 if compengine not in util.compengines:
2380 2385 raise error.Abort(_('compression engine %s defined by '
2381 2386 'experimental.format.compression not available') %
2382 2387 compengine,
2383 2388 hint=_('run "hg debuginstall" to list available '
2384 2389 'compression engines'))
2385 2390
2386 2391 # zlib is the historical default and doesn't need an explicit requirement.
2387 2392 if compengine != 'zlib':
2388 2393 requirements.add('exp-compression-%s' % compengine)
2389 2394
2390 2395 if scmutil.gdinitconfig(ui):
2391 2396 requirements.add('generaldelta')
2392 2397 if ui.configbool('experimental', 'treemanifest'):
2393 2398 requirements.add('treemanifest')
2394 2399 # experimental config: format.sparse-revlog
2395 2400 if ui.configbool('format', 'sparse-revlog'):
2396 2401 requirements.add(SPARSEREVLOG_REQUIREMENT)
2397 2402
2398 2403 revlogv2 = ui.config('experimental', 'revlogv2')
2399 2404 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2400 2405 requirements.remove('revlogv1')
2401 2406 # generaldelta is implied by revlogv2.
2402 2407 requirements.discard('generaldelta')
2403 2408 requirements.add(REVLOGV2_REQUIREMENT)
2404 2409
2405 2410 return requirements
@@ -1,1296 +1,1296
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 checkisexec (execbit !)
47 47 checklink (symlink !)
48 48 checklink-target (symlink !)
49 49 checknoexec (execbit !)
50 50 manifestfulltextcache
51 51 rbc-names-v1
52 52 rbc-revs-v1
53 53
54 54 Default operation:
55 55
56 56 $ hg clone . ../b
57 57 updating to branch default
58 58 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 59 $ cd ../b
60 60
61 61 Ensure branchcache got copied over:
62 62
63 63 $ ls .hg/cache
64 64 branch2-served
65 65 checkisexec (execbit !)
66 66 checklink (symlink !)
67 67 checklink-target (symlink !)
68 68 rbc-names-v1
69 69 rbc-revs-v1
70 70
71 71 $ cat a
72 72 a
73 73 $ hg verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 2 files, 11 changesets, 11 total revisions
79 79
80 80 Invalid dest '' must abort:
81 81
82 82 $ hg clone . ''
83 83 abort: empty destination path is not valid
84 84 [255]
85 85
86 86 No update, with debug option:
87 87
88 88 #if hardlink
89 89 $ hg --debug clone -U . ../c --config progress.debug=true
90 90 linking: 1
91 91 linking: 2
92 92 linking: 3
93 93 linking: 4
94 94 linking: 5
95 95 linking: 6
96 96 linking: 7
97 97 linking: 8
98 98 linked 8 files (reporevlogstore !)
99 99 linking: 9 (reposimplestore !)
100 100 linking: 10 (reposimplestore !)
101 101 linking: 11 (reposimplestore !)
102 102 linking: 12 (reposimplestore !)
103 103 linking: 13 (reposimplestore !)
104 104 linking: 14 (reposimplestore !)
105 105 linking: 15 (reposimplestore !)
106 106 linking: 16 (reposimplestore !)
107 107 linking: 17 (reposimplestore !)
108 108 linking: 18 (reposimplestore !)
109 109 linked 18 files (reposimplestore !)
110 110 #else
111 111 $ hg --debug clone -U . ../c --config progress.debug=true
112 112 linking: 1
113 113 copying: 2
114 114 copying: 3
115 115 copying: 4
116 116 copying: 5
117 117 copying: 6
118 118 copying: 7
119 119 copying: 8
120 120 copied 8 files (reporevlogstore !)
121 121 copying: 9 (reposimplestore !)
122 122 copying: 10 (reposimplestore !)
123 123 copying: 11 (reposimplestore !)
124 124 copying: 12 (reposimplestore !)
125 125 copying: 13 (reposimplestore !)
126 126 copying: 14 (reposimplestore !)
127 127 copying: 15 (reposimplestore !)
128 128 copying: 16 (reposimplestore !)
129 129 copying: 17 (reposimplestore !)
130 130 copying: 18 (reposimplestore !)
131 131 copied 18 files (reposimplestore !)
132 132 #endif
133 133 $ cd ../c
134 134
135 135 Ensure branchcache got copied over:
136 136
137 137 $ ls .hg/cache
138 138 branch2-served
139 139 rbc-names-v1
140 140 rbc-revs-v1
141 141
142 142 $ cat a 2>/dev/null || echo "a not present"
143 143 a not present
144 144 $ hg verify
145 145 checking changesets
146 146 checking manifests
147 147 crosschecking files in changesets and manifests
148 148 checking files
149 149 2 files, 11 changesets, 11 total revisions
150 150
151 151 Default destination:
152 152
153 153 $ mkdir ../d
154 154 $ cd ../d
155 155 $ hg clone ../a
156 156 destination directory: a
157 157 updating to branch default
158 158 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 159 $ cd a
160 160 $ hg cat a
161 161 a
162 162 $ cd ../..
163 163
164 164 Check that we drop the 'file:' from the path before writing the .hgrc:
165 165
166 166 $ hg clone file:a e
167 167 updating to branch default
168 168 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 169 $ grep 'file:' e/.hg/hgrc
170 170 [1]
171 171
172 172 Check that path aliases are expanded:
173 173
174 174 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
175 175 $ hg -R f showconfig paths.default
176 176 $TESTTMP/a#0
177 177
178 178 Use --pull:
179 179
180 180 $ hg clone --pull a g
181 181 requesting all changes
182 182 adding changesets
183 183 adding manifests
184 184 adding file changes
185 185 added 11 changesets with 11 changes to 2 files
186 186 new changesets acb14030fe0a:a7949464abda
187 187 updating to branch default
188 188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
189 189 $ hg -R g verify
190 190 checking changesets
191 191 checking manifests
192 192 crosschecking files in changesets and manifests
193 193 checking files
194 194 2 files, 11 changesets, 11 total revisions
195 195
196 196 Invalid dest '' with --pull must abort (issue2528):
197 197
198 198 $ hg clone --pull a ''
199 199 abort: empty destination path is not valid
200 200 [255]
201 201
202 202 Clone to '.':
203 203
204 204 $ mkdir h
205 205 $ cd h
206 206 $ hg clone ../a .
207 207 updating to branch default
208 208 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 209 $ cd ..
210 210
211 211
212 212 *** Tests for option -u ***
213 213
214 214 Adding some more history to repo a:
215 215
216 216 $ cd a
217 217 $ hg tag ref1
218 218 $ echo the quick brown fox >a
219 219 $ hg ci -m "hacked default"
220 220 $ hg up ref1
221 221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
222 222 $ hg branch stable
223 223 marked working directory as branch stable
224 224 (branches are permanent and global, did you want a bookmark?)
225 225 $ echo some text >a
226 226 $ hg ci -m "starting branch stable"
227 227 $ hg tag ref2
228 228 $ echo some more text >a
229 229 $ hg ci -m "another change for branch stable"
230 230 $ hg up ref2
231 231 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
232 232 $ hg parents
233 233 changeset: 13:e8ece76546a6
234 234 branch: stable
235 235 tag: ref2
236 236 parent: 10:a7949464abda
237 237 user: test
238 238 date: Thu Jan 01 00:00:00 1970 +0000
239 239 summary: starting branch stable
240 240
241 241
242 242 Repo a has two heads:
243 243
244 244 $ hg heads
245 245 changeset: 15:0aae7cf88f0d
246 246 branch: stable
247 247 tag: tip
248 248 user: test
249 249 date: Thu Jan 01 00:00:00 1970 +0000
250 250 summary: another change for branch stable
251 251
252 252 changeset: 12:f21241060d6a
253 253 user: test
254 254 date: Thu Jan 01 00:00:00 1970 +0000
255 255 summary: hacked default
256 256
257 257
258 258 $ cd ..
259 259
260 260
261 261 Testing --noupdate with --updaterev (must abort):
262 262
263 263 $ hg clone --noupdate --updaterev 1 a ua
264 264 abort: cannot specify both --noupdate and --updaterev
265 265 [255]
266 266
267 267
268 268 Testing clone -u:
269 269
270 270 $ hg clone -u . a ua
271 271 updating to branch stable
272 272 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 273
274 274 Repo ua has both heads:
275 275
276 276 $ hg -R ua heads
277 277 changeset: 15:0aae7cf88f0d
278 278 branch: stable
279 279 tag: tip
280 280 user: test
281 281 date: Thu Jan 01 00:00:00 1970 +0000
282 282 summary: another change for branch stable
283 283
284 284 changeset: 12:f21241060d6a
285 285 user: test
286 286 date: Thu Jan 01 00:00:00 1970 +0000
287 287 summary: hacked default
288 288
289 289
290 290 Same revision checked out in repo a and ua:
291 291
292 292 $ hg -R a parents --template "{node|short}\n"
293 293 e8ece76546a6
294 294 $ hg -R ua parents --template "{node|short}\n"
295 295 e8ece76546a6
296 296
297 297 $ rm -r ua
298 298
299 299
300 300 Testing clone --pull -u:
301 301
302 302 $ hg clone --pull -u . a ua
303 303 requesting all changes
304 304 adding changesets
305 305 adding manifests
306 306 adding file changes
307 307 added 16 changesets with 16 changes to 3 files (+1 heads)
308 308 new changesets acb14030fe0a:0aae7cf88f0d
309 309 updating to branch stable
310 310 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
311 311
312 312 Repo ua has both heads:
313 313
314 314 $ hg -R ua heads
315 315 changeset: 15:0aae7cf88f0d
316 316 branch: stable
317 317 tag: tip
318 318 user: test
319 319 date: Thu Jan 01 00:00:00 1970 +0000
320 320 summary: another change for branch stable
321 321
322 322 changeset: 12:f21241060d6a
323 323 user: test
324 324 date: Thu Jan 01 00:00:00 1970 +0000
325 325 summary: hacked default
326 326
327 327
328 328 Same revision checked out in repo a and ua:
329 329
330 330 $ hg -R a parents --template "{node|short}\n"
331 331 e8ece76546a6
332 332 $ hg -R ua parents --template "{node|short}\n"
333 333 e8ece76546a6
334 334
335 335 $ rm -r ua
336 336
337 337
338 338 Testing clone -u <branch>:
339 339
340 340 $ hg clone -u stable a ua
341 341 updating to branch stable
342 342 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
343 343
344 344 Repo ua has both heads:
345 345
346 346 $ hg -R ua heads
347 347 changeset: 15:0aae7cf88f0d
348 348 branch: stable
349 349 tag: tip
350 350 user: test
351 351 date: Thu Jan 01 00:00:00 1970 +0000
352 352 summary: another change for branch stable
353 353
354 354 changeset: 12:f21241060d6a
355 355 user: test
356 356 date: Thu Jan 01 00:00:00 1970 +0000
357 357 summary: hacked default
358 358
359 359
360 360 Branch 'stable' is checked out:
361 361
362 362 $ hg -R ua parents
363 363 changeset: 15:0aae7cf88f0d
364 364 branch: stable
365 365 tag: tip
366 366 user: test
367 367 date: Thu Jan 01 00:00:00 1970 +0000
368 368 summary: another change for branch stable
369 369
370 370
371 371 $ rm -r ua
372 372
373 373
374 374 Testing default checkout:
375 375
376 376 $ hg clone a ua
377 377 updating to branch default
378 378 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
379 379
380 380 Repo ua has both heads:
381 381
382 382 $ hg -R ua heads
383 383 changeset: 15:0aae7cf88f0d
384 384 branch: stable
385 385 tag: tip
386 386 user: test
387 387 date: Thu Jan 01 00:00:00 1970 +0000
388 388 summary: another change for branch stable
389 389
390 390 changeset: 12:f21241060d6a
391 391 user: test
392 392 date: Thu Jan 01 00:00:00 1970 +0000
393 393 summary: hacked default
394 394
395 395
396 396 Branch 'default' is checked out:
397 397
398 398 $ hg -R ua parents
399 399 changeset: 12:f21241060d6a
400 400 user: test
401 401 date: Thu Jan 01 00:00:00 1970 +0000
402 402 summary: hacked default
403 403
404 404 Test clone with a branch named "@" (issue3677)
405 405
406 406 $ hg -R ua branch @
407 407 marked working directory as branch @
408 408 $ hg -R ua commit -m 'created branch @'
409 409 $ hg clone ua atbranch
410 410 updating to branch default
411 411 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
412 412 $ hg -R atbranch heads
413 413 changeset: 16:798b6d97153e
414 414 branch: @
415 415 tag: tip
416 416 parent: 12:f21241060d6a
417 417 user: test
418 418 date: Thu Jan 01 00:00:00 1970 +0000
419 419 summary: created branch @
420 420
421 421 changeset: 15:0aae7cf88f0d
422 422 branch: stable
423 423 user: test
424 424 date: Thu Jan 01 00:00:00 1970 +0000
425 425 summary: another change for branch stable
426 426
427 427 changeset: 12:f21241060d6a
428 428 user: test
429 429 date: Thu Jan 01 00:00:00 1970 +0000
430 430 summary: hacked default
431 431
432 432 $ hg -R atbranch parents
433 433 changeset: 12:f21241060d6a
434 434 user: test
435 435 date: Thu Jan 01 00:00:00 1970 +0000
436 436 summary: hacked default
437 437
438 438
439 439 $ rm -r ua atbranch
440 440
441 441
442 442 Testing #<branch>:
443 443
444 444 $ hg clone -u . a#stable ua
445 445 adding changesets
446 446 adding manifests
447 447 adding file changes
448 448 added 14 changesets with 14 changes to 3 files
449 449 new changesets acb14030fe0a:0aae7cf88f0d
450 450 updating to branch stable
451 451 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
452 452
453 453 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
454 454
455 455 $ hg -R ua heads
456 456 changeset: 13:0aae7cf88f0d
457 457 branch: stable
458 458 tag: tip
459 459 user: test
460 460 date: Thu Jan 01 00:00:00 1970 +0000
461 461 summary: another change for branch stable
462 462
463 463 changeset: 10:a7949464abda
464 464 user: test
465 465 date: Thu Jan 01 00:00:00 1970 +0000
466 466 summary: test
467 467
468 468
469 469 Same revision checked out in repo a and ua:
470 470
471 471 $ hg -R a parents --template "{node|short}\n"
472 472 e8ece76546a6
473 473 $ hg -R ua parents --template "{node|short}\n"
474 474 e8ece76546a6
475 475
476 476 $ rm -r ua
477 477
478 478
479 479 Testing -u -r <branch>:
480 480
481 481 $ hg clone -u . -r stable a ua
482 482 adding changesets
483 483 adding manifests
484 484 adding file changes
485 485 added 14 changesets with 14 changes to 3 files
486 486 new changesets acb14030fe0a:0aae7cf88f0d
487 487 updating to branch stable
488 488 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
489 489
490 490 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
491 491
492 492 $ hg -R ua heads
493 493 changeset: 13:0aae7cf88f0d
494 494 branch: stable
495 495 tag: tip
496 496 user: test
497 497 date: Thu Jan 01 00:00:00 1970 +0000
498 498 summary: another change for branch stable
499 499
500 500 changeset: 10:a7949464abda
501 501 user: test
502 502 date: Thu Jan 01 00:00:00 1970 +0000
503 503 summary: test
504 504
505 505
506 506 Same revision checked out in repo a and ua:
507 507
508 508 $ hg -R a parents --template "{node|short}\n"
509 509 e8ece76546a6
510 510 $ hg -R ua parents --template "{node|short}\n"
511 511 e8ece76546a6
512 512
513 513 $ rm -r ua
514 514
515 515
516 516 Testing -r <branch>:
517 517
518 518 $ hg clone -r stable a ua
519 519 adding changesets
520 520 adding manifests
521 521 adding file changes
522 522 added 14 changesets with 14 changes to 3 files
523 523 new changesets acb14030fe0a:0aae7cf88f0d
524 524 updating to branch stable
525 525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 526
527 527 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
528 528
529 529 $ hg -R ua heads
530 530 changeset: 13:0aae7cf88f0d
531 531 branch: stable
532 532 tag: tip
533 533 user: test
534 534 date: Thu Jan 01 00:00:00 1970 +0000
535 535 summary: another change for branch stable
536 536
537 537 changeset: 10:a7949464abda
538 538 user: test
539 539 date: Thu Jan 01 00:00:00 1970 +0000
540 540 summary: test
541 541
542 542
543 543 Branch 'stable' is checked out:
544 544
545 545 $ hg -R ua parents
546 546 changeset: 13:0aae7cf88f0d
547 547 branch: stable
548 548 tag: tip
549 549 user: test
550 550 date: Thu Jan 01 00:00:00 1970 +0000
551 551 summary: another change for branch stable
552 552
553 553
554 554 $ rm -r ua
555 555
556 556
557 557 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
558 558 iterable in addbranchrevs()
559 559
560 560 $ cat <<EOF > simpleclone.py
561 561 > from mercurial import ui, hg
562 562 > myui = ui.ui.load()
563 563 > repo = hg.repository(myui, b'a')
564 564 > hg.clone(myui, {}, repo, dest=b"ua")
565 565 > EOF
566 566
567 567 $ $PYTHON simpleclone.py
568 568 updating to branch default
569 569 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
570 570
571 571 $ rm -r ua
572 572
573 573 $ cat <<EOF > branchclone.py
574 574 > from mercurial import ui, hg, extensions
575 575 > myui = ui.ui.load()
576 576 > extensions.loadall(myui)
577 577 > repo = hg.repository(myui, b'a')
578 578 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
579 579 > EOF
580 580
581 581 $ $PYTHON branchclone.py
582 582 adding changesets
583 583 adding manifests
584 584 adding file changes
585 585 added 14 changesets with 14 changes to 3 files
586 586 new changesets acb14030fe0a:0aae7cf88f0d
587 587 updating to branch stable
588 588 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
589 589 $ rm -r ua
590 590
591 591
592 592 Test clone with special '@' bookmark:
593 593 $ cd a
594 594 $ hg bookmark -r a7949464abda @ # branch point of stable from default
595 595 $ hg clone . ../i
596 596 updating to bookmark @
597 597 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
598 598 $ hg id -i ../i
599 599 a7949464abda
600 600 $ rm -r ../i
601 601
602 602 $ hg bookmark -f -r stable @
603 603 $ hg bookmarks
604 604 @ 15:0aae7cf88f0d
605 605 $ hg clone . ../i
606 606 updating to bookmark @ on branch stable
607 607 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 608 $ hg id -i ../i
609 609 0aae7cf88f0d
610 610 $ cd "$TESTTMP"
611 611
612 612
613 613 Testing failures:
614 614
615 615 $ mkdir fail
616 616 $ cd fail
617 617
618 618 No local source
619 619
620 620 $ hg clone a b
621 621 abort: repository a not found!
622 622 [255]
623 623
624 624 No remote source
625 625
626 626 #if windows
627 627 $ hg clone http://$LOCALIP:3121/a b
628 628 abort: error: * (glob)
629 629 [255]
630 630 #else
631 631 $ hg clone http://$LOCALIP:3121/a b
632 632 abort: error: *refused* (glob)
633 633 [255]
634 634 #endif
635 635 $ rm -rf b # work around bug with http clone
636 636
637 637
638 638 #if unix-permissions no-root
639 639
640 640 Inaccessible source
641 641
642 642 $ mkdir a
643 643 $ chmod 000 a
644 644 $ hg clone a b
645 abort: repository a not found!
645 abort: Permission denied: '$TESTTMP/fail/a/.hg'
646 646 [255]
647 647
648 648 Inaccessible destination
649 649
650 650 $ hg init b
651 651 $ cd b
652 652 $ hg clone . ../a
653 653 abort: Permission denied: '../a'
654 654 [255]
655 655 $ cd ..
656 656 $ chmod 700 a
657 657 $ rm -r a b
658 658
659 659 #endif
660 660
661 661
662 662 #if fifo
663 663
664 664 Source of wrong type
665 665
666 666 $ mkfifo a
667 667 $ hg clone a b
668 abort: repository a not found!
668 abort: $ENOTDIR$: '$TESTTMP/fail/a/.hg'
669 669 [255]
670 670 $ rm a
671 671
672 672 #endif
673 673
674 674 Default destination, same directory
675 675
676 676 $ hg init q
677 677 $ hg clone q
678 678 destination directory: q
679 679 abort: destination 'q' is not empty
680 680 [255]
681 681
682 682 destination directory not empty
683 683
684 684 $ mkdir a
685 685 $ echo stuff > a/a
686 686 $ hg clone q a
687 687 abort: destination 'a' is not empty
688 688 [255]
689 689
690 690
691 691 #if unix-permissions no-root
692 692
693 693 leave existing directory in place after clone failure
694 694
695 695 $ hg init c
696 696 $ cd c
697 697 $ echo c > c
698 698 $ hg commit -A -m test
699 699 adding c
700 700 $ chmod -rx .hg/store/data
701 701 $ cd ..
702 702 $ mkdir d
703 703 $ hg clone c d 2> err
704 704 [255]
705 705 $ test -d d
706 706 $ test -d d/.hg
707 707 [1]
708 708
709 709 re-enable perm to allow deletion
710 710
711 711 $ chmod +rx c/.hg/store/data
712 712
713 713 #endif
714 714
715 715 $ cd ..
716 716
717 717 Test clone from the repository in (emulated) revlog format 0 (issue4203):
718 718
719 719 $ mkdir issue4203
720 720 $ mkdir -p src/.hg
721 721 $ echo foo > src/foo
722 722 $ hg -R src add src/foo
723 723 $ hg -R src commit -m '#0'
724 724 $ hg -R src log -q
725 725 0:e1bab28bca43
726 726 $ hg clone -U -q src dst
727 727 $ hg -R dst log -q
728 728 0:e1bab28bca43
729 729
730 730 Create repositories to test auto sharing functionality
731 731
732 732 $ cat >> $HGRCPATH << EOF
733 733 > [extensions]
734 734 > share=
735 735 > EOF
736 736
737 737 $ hg init empty
738 738 $ hg init source1a
739 739 $ cd source1a
740 740 $ echo initial1 > foo
741 741 $ hg -q commit -A -m initial
742 742 $ echo second > foo
743 743 $ hg commit -m second
744 744 $ cd ..
745 745
746 746 $ hg init filteredrev0
747 747 $ cd filteredrev0
748 748 $ cat >> .hg/hgrc << EOF
749 749 > [experimental]
750 750 > evolution.createmarkers=True
751 751 > EOF
752 752 $ echo initial1 > foo
753 753 $ hg -q commit -A -m initial0
754 754 $ hg -q up -r null
755 755 $ echo initial2 > foo
756 756 $ hg -q commit -A -m initial1
757 757 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
758 758 obsoleted 1 changesets
759 759 $ cd ..
760 760
761 761 $ hg -q clone --pull source1a source1b
762 762 $ cd source1a
763 763 $ hg bookmark bookA
764 764 $ echo 1a > foo
765 765 $ hg commit -m 1a
766 766 $ cd ../source1b
767 767 $ hg -q up -r 0
768 768 $ echo head1 > foo
769 769 $ hg commit -m head1
770 770 created new head
771 771 $ hg bookmark head1
772 772 $ hg -q up -r 0
773 773 $ echo head2 > foo
774 774 $ hg commit -m head2
775 775 created new head
776 776 $ hg bookmark head2
777 777 $ hg -q up -r 0
778 778 $ hg branch branch1
779 779 marked working directory as branch branch1
780 780 (branches are permanent and global, did you want a bookmark?)
781 781 $ echo branch1 > foo
782 782 $ hg commit -m branch1
783 783 $ hg -q up -r 0
784 784 $ hg branch branch2
785 785 marked working directory as branch branch2
786 786 $ echo branch2 > foo
787 787 $ hg commit -m branch2
788 788 $ cd ..
789 789 $ hg init source2
790 790 $ cd source2
791 791 $ echo initial2 > foo
792 792 $ hg -q commit -A -m initial2
793 793 $ echo second > foo
794 794 $ hg commit -m second
795 795 $ cd ..
796 796
797 797 Clone with auto share from an empty repo should not result in share
798 798
799 799 $ mkdir share
800 800 $ hg --config share.pool=share clone empty share-empty
801 801 (not using pooled storage: remote appears to be empty)
802 802 updating to branch default
803 803 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
804 804 $ ls share
805 805 $ test -d share-empty/.hg/store
806 806 $ test -f share-empty/.hg/sharedpath
807 807 [1]
808 808
809 809 Clone with auto share from a repo with filtered revision 0 should not result in share
810 810
811 811 $ hg --config share.pool=share clone filteredrev0 share-filtered
812 812 (not using pooled storage: unable to resolve identity of remote)
813 813 requesting all changes
814 814 adding changesets
815 815 adding manifests
816 816 adding file changes
817 817 added 1 changesets with 1 changes to 1 files
818 818 new changesets e082c1832e09
819 819 updating to branch default
820 820 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
821 821
822 822 Clone from repo with content should result in shared store being created
823 823
824 824 $ hg --config share.pool=share clone source1a share-dest1a
825 825 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
826 826 requesting all changes
827 827 adding changesets
828 828 adding manifests
829 829 adding file changes
830 830 added 3 changesets with 3 changes to 1 files
831 831 new changesets b5f04eac9d8f:e5bfe23c0b47
832 832 searching for changes
833 833 no changes found
834 834 adding remote bookmark bookA
835 835 updating working directory
836 836 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
837 837
838 838 The shared repo should have been created
839 839
840 840 $ ls share
841 841 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
842 842
843 843 The destination should point to it
844 844
845 845 $ cat share-dest1a/.hg/sharedpath; echo
846 846 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
847 847
848 848 The destination should have bookmarks
849 849
850 850 $ hg -R share-dest1a bookmarks
851 851 bookA 2:e5bfe23c0b47
852 852
853 853 The default path should be the remote, not the share
854 854
855 855 $ hg -R share-dest1a config paths.default
856 856 $TESTTMP/source1a
857 857
858 858 Clone with existing share dir should result in pull + share
859 859
860 860 $ hg --config share.pool=share clone source1b share-dest1b
861 861 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
862 862 searching for changes
863 863 adding changesets
864 864 adding manifests
865 865 adding file changes
866 866 added 4 changesets with 4 changes to 1 files (+4 heads)
867 867 adding remote bookmark head1
868 868 adding remote bookmark head2
869 869 new changesets 4a8dc1ab4c13:6bacf4683960
870 870 updating working directory
871 871 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
872 872
873 873 $ ls share
874 874 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
875 875
876 876 $ cat share-dest1b/.hg/sharedpath; echo
877 877 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
878 878
879 879 We only get bookmarks from the remote, not everything in the share
880 880
881 881 $ hg -R share-dest1b bookmarks
882 882 head1 3:4a8dc1ab4c13
883 883 head2 4:99f71071f117
884 884
885 885 Default path should be source, not share.
886 886
887 887 $ hg -R share-dest1b config paths.default
888 888 $TESTTMP/source1b
889 889
890 890 Checked out revision should be head of default branch
891 891
892 892 $ hg -R share-dest1b log -r .
893 893 changeset: 4:99f71071f117
894 894 bookmark: head2
895 895 parent: 0:b5f04eac9d8f
896 896 user: test
897 897 date: Thu Jan 01 00:00:00 1970 +0000
898 898 summary: head2
899 899
900 900
901 901 Clone from unrelated repo should result in new share
902 902
903 903 $ hg --config share.pool=share clone source2 share-dest2
904 904 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
905 905 requesting all changes
906 906 adding changesets
907 907 adding manifests
908 908 adding file changes
909 909 added 2 changesets with 2 changes to 1 files
910 910 new changesets 22aeff664783:63cf6c3dba4a
911 911 searching for changes
912 912 no changes found
913 913 updating working directory
914 914 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
915 915
916 916 $ ls share
917 917 22aeff664783fd44c6d9b435618173c118c3448e
918 918 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
919 919
920 920 remote naming mode works as advertised
921 921
922 922 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
923 923 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
924 924 requesting all changes
925 925 adding changesets
926 926 adding manifests
927 927 adding file changes
928 928 added 3 changesets with 3 changes to 1 files
929 929 new changesets b5f04eac9d8f:e5bfe23c0b47
930 930 searching for changes
931 931 no changes found
932 932 adding remote bookmark bookA
933 933 updating working directory
934 934 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
935 935
936 936 $ ls shareremote
937 937 195bb1fcdb595c14a6c13e0269129ed78f6debde
938 938
939 939 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
940 940 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
941 941 requesting all changes
942 942 adding changesets
943 943 adding manifests
944 944 adding file changes
945 945 added 6 changesets with 6 changes to 1 files (+4 heads)
946 946 new changesets b5f04eac9d8f:6bacf4683960
947 947 searching for changes
948 948 no changes found
949 949 adding remote bookmark head1
950 950 adding remote bookmark head2
951 951 updating working directory
952 952 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
953 953
954 954 $ ls shareremote
955 955 195bb1fcdb595c14a6c13e0269129ed78f6debde
956 956 c0d4f83847ca2a873741feb7048a45085fd47c46
957 957
958 958 request to clone a single revision is respected in sharing mode
959 959
960 960 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
961 961 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
962 962 adding changesets
963 963 adding manifests
964 964 adding file changes
965 965 added 2 changesets with 2 changes to 1 files
966 966 new changesets b5f04eac9d8f:4a8dc1ab4c13
967 967 no changes found
968 968 adding remote bookmark head1
969 969 updating working directory
970 970 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
971 971
972 972 $ hg -R share-1arev log -G
973 973 @ changeset: 1:4a8dc1ab4c13
974 974 | bookmark: head1
975 975 | tag: tip
976 976 | user: test
977 977 | date: Thu Jan 01 00:00:00 1970 +0000
978 978 | summary: head1
979 979 |
980 980 o changeset: 0:b5f04eac9d8f
981 981 user: test
982 982 date: Thu Jan 01 00:00:00 1970 +0000
983 983 summary: initial
984 984
985 985
986 986 making another clone should only pull down requested rev
987 987
988 988 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
989 989 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 990 searching for changes
991 991 adding changesets
992 992 adding manifests
993 993 adding file changes
994 994 added 1 changesets with 1 changes to 1 files (+1 heads)
995 995 adding remote bookmark head1
996 996 adding remote bookmark head2
997 997 new changesets 99f71071f117
998 998 updating working directory
999 999 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1000 1000
1001 1001 $ hg -R share-1brev log -G
1002 1002 @ changeset: 2:99f71071f117
1003 1003 | bookmark: head2
1004 1004 | tag: tip
1005 1005 | parent: 0:b5f04eac9d8f
1006 1006 | user: test
1007 1007 | date: Thu Jan 01 00:00:00 1970 +0000
1008 1008 | summary: head2
1009 1009 |
1010 1010 | o changeset: 1:4a8dc1ab4c13
1011 1011 |/ bookmark: head1
1012 1012 | user: test
1013 1013 | date: Thu Jan 01 00:00:00 1970 +0000
1014 1014 | summary: head1
1015 1015 |
1016 1016 o changeset: 0:b5f04eac9d8f
1017 1017 user: test
1018 1018 date: Thu Jan 01 00:00:00 1970 +0000
1019 1019 summary: initial
1020 1020
1021 1021
1022 1022 Request to clone a single branch is respected in sharing mode
1023 1023
1024 1024 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1025 1025 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1026 1026 adding changesets
1027 1027 adding manifests
1028 1028 adding file changes
1029 1029 added 2 changesets with 2 changes to 1 files
1030 1030 new changesets b5f04eac9d8f:5f92a6c1a1b1
1031 1031 no changes found
1032 1032 updating working directory
1033 1033 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1034 1034
1035 1035 $ hg -R share-1bbranch1 log -G
1036 1036 o changeset: 1:5f92a6c1a1b1
1037 1037 | branch: branch1
1038 1038 | tag: tip
1039 1039 | user: test
1040 1040 | date: Thu Jan 01 00:00:00 1970 +0000
1041 1041 | summary: branch1
1042 1042 |
1043 1043 @ changeset: 0:b5f04eac9d8f
1044 1044 user: test
1045 1045 date: Thu Jan 01 00:00:00 1970 +0000
1046 1046 summary: initial
1047 1047
1048 1048
1049 1049 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1050 1050 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1051 1051 searching for changes
1052 1052 adding changesets
1053 1053 adding manifests
1054 1054 adding file changes
1055 1055 added 1 changesets with 1 changes to 1 files (+1 heads)
1056 1056 new changesets 6bacf4683960
1057 1057 updating working directory
1058 1058 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059 1059
1060 1060 $ hg -R share-1bbranch2 log -G
1061 1061 o changeset: 2:6bacf4683960
1062 1062 | branch: branch2
1063 1063 | tag: tip
1064 1064 | parent: 0:b5f04eac9d8f
1065 1065 | user: test
1066 1066 | date: Thu Jan 01 00:00:00 1970 +0000
1067 1067 | summary: branch2
1068 1068 |
1069 1069 | o changeset: 1:5f92a6c1a1b1
1070 1070 |/ branch: branch1
1071 1071 | user: test
1072 1072 | date: Thu Jan 01 00:00:00 1970 +0000
1073 1073 | summary: branch1
1074 1074 |
1075 1075 @ changeset: 0:b5f04eac9d8f
1076 1076 user: test
1077 1077 date: Thu Jan 01 00:00:00 1970 +0000
1078 1078 summary: initial
1079 1079
1080 1080
1081 1081 -U is respected in share clone mode
1082 1082
1083 1083 $ hg --config share.pool=share clone -U source1a share-1anowc
1084 1084 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1085 1085 searching for changes
1086 1086 no changes found
1087 1087 adding remote bookmark bookA
1088 1088
1089 1089 $ ls share-1anowc
1090 1090
1091 1091 Test that auto sharing doesn't cause failure of "hg clone local remote"
1092 1092
1093 1093 $ cd $TESTTMP
1094 1094 $ hg -R a id -r 0
1095 1095 acb14030fe0a
1096 1096 $ hg id -R remote -r 0
1097 1097 abort: repository remote not found!
1098 1098 [255]
1099 1099 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1100 1100 $ hg -R remote id -r 0
1101 1101 acb14030fe0a
1102 1102
1103 1103 Cloning into pooled storage doesn't race (issue5104)
1104 1104
1105 1105 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1106 1106 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1107 1107 $ wait
1108 1108
1109 1109 $ hg -R share-destrace1 log -r tip
1110 1110 changeset: 2:e5bfe23c0b47
1111 1111 bookmark: bookA
1112 1112 tag: tip
1113 1113 user: test
1114 1114 date: Thu Jan 01 00:00:00 1970 +0000
1115 1115 summary: 1a
1116 1116
1117 1117
1118 1118 $ hg -R share-destrace2 log -r tip
1119 1119 changeset: 2:e5bfe23c0b47
1120 1120 bookmark: bookA
1121 1121 tag: tip
1122 1122 user: test
1123 1123 date: Thu Jan 01 00:00:00 1970 +0000
1124 1124 summary: 1a
1125 1125
1126 1126 One repo should be new, the other should be shared from the pool. We
1127 1127 don't care which is which, so we just make sure we always print the
1128 1128 one containing "new pooled" first, then one one containing "existing
1129 1129 pooled".
1130 1130
1131 1131 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1132 1132 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1133 1133 requesting all changes
1134 1134 adding changesets
1135 1135 adding manifests
1136 1136 adding file changes
1137 1137 added 3 changesets with 3 changes to 1 files
1138 1138 new changesets b5f04eac9d8f:e5bfe23c0b47
1139 1139 searching for changes
1140 1140 no changes found
1141 1141 adding remote bookmark bookA
1142 1142 updating working directory
1143 1143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1144 1144
1145 1145 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1146 1146 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1147 1147 searching for changes
1148 1148 no changes found
1149 1149 adding remote bookmark bookA
1150 1150 updating working directory
1151 1151 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1152 1152
1153 1153 SEC: check for unsafe ssh url
1154 1154
1155 1155 $ cat >> $HGRCPATH << EOF
1156 1156 > [ui]
1157 1157 > ssh = sh -c "read l; read l; read l"
1158 1158 > EOF
1159 1159
1160 1160 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 1162 [255]
1163 1163 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1164 1164 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1165 1165 [255]
1166 1166 $ hg clone 'ssh://fakehost|touch%20owned/path'
1167 1167 abort: no suitable response from remote hg!
1168 1168 [255]
1169 1169 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1170 1170 abort: no suitable response from remote hg!
1171 1171 [255]
1172 1172
1173 1173 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1174 1174 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1175 1175 [255]
1176 1176
1177 1177 #if windows
1178 1178 $ hg clone "ssh://%26touch%20owned%20/" --debug
1179 1179 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1180 1180 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1181 1181 sending hello command
1182 1182 sending between command
1183 1183 abort: no suitable response from remote hg!
1184 1184 [255]
1185 1185 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1186 1186 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1187 1187 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1188 1188 sending hello command
1189 1189 sending between command
1190 1190 abort: no suitable response from remote hg!
1191 1191 [255]
1192 1192 #else
1193 1193 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1194 1194 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1195 1195 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1196 1196 sending hello command
1197 1197 sending between command
1198 1198 abort: no suitable response from remote hg!
1199 1199 [255]
1200 1200 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1201 1201 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1202 1202 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1203 1203 sending hello command
1204 1204 sending between command
1205 1205 abort: no suitable response from remote hg!
1206 1206 [255]
1207 1207 #endif
1208 1208
1209 1209 $ hg clone "ssh://v-alid.example.com/" --debug
1210 1210 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1211 1211 sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
1212 1212 sending hello command
1213 1213 sending between command
1214 1214 abort: no suitable response from remote hg!
1215 1215 [255]
1216 1216
1217 1217 We should not have created a file named owned - if it exists, the
1218 1218 attack succeeded.
1219 1219 $ if test -f owned; then echo 'you got owned'; fi
1220 1220
1221 1221 Cloning without fsmonitor enabled does not print a warning for small repos
1222 1222
1223 1223 $ hg clone a fsmonitor-default
1224 1224 updating to bookmark @ on branch stable
1225 1225 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1226 1226
1227 1227 Lower the warning threshold to simulate a large repo
1228 1228
1229 1229 $ cat >> $HGRCPATH << EOF
1230 1230 > [fsmonitor]
1231 1231 > warn_update_file_count = 2
1232 1232 > EOF
1233 1233
1234 1234 We should see a warning about no fsmonitor on supported platforms
1235 1235
1236 1236 #if linuxormacos no-fsmonitor
1237 1237 $ hg clone a nofsmonitor
1238 1238 updating to bookmark @ on branch stable
1239 1239 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1240 1240 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 1241 #else
1242 1242 $ hg clone a nofsmonitor
1243 1243 updating to bookmark @ on branch stable
1244 1244 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1245 1245 #endif
1246 1246
1247 1247 We should not see warning about fsmonitor when it is enabled
1248 1248
1249 1249 #if fsmonitor
1250 1250 $ hg clone a fsmonitor-enabled
1251 1251 updating to bookmark @ on branch stable
1252 1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1253 1253 #endif
1254 1254
1255 1255 We can disable the fsmonitor warning
1256 1256
1257 1257 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1258 1258 updating to bookmark @ on branch stable
1259 1259 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1260 1260
1261 1261 Loaded fsmonitor but disabled in config should still print warning
1262 1262
1263 1263 #if linuxormacos fsmonitor
1264 1264 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1265 1265 updating to bookmark @ on branch stable
1266 1266 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1267 1267 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 1268 #endif
1269 1269
1270 1270 Warning not printed if working directory isn't empty
1271 1271
1272 1272 $ hg -q clone a fsmonitor-update
1273 1273 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1274 1274 $ cd fsmonitor-update
1275 1275 $ hg up acb14030fe0a
1276 1276 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1277 1277 (leaving bookmark @)
1278 1278 $ hg up cf0fe1914066
1279 1279 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 1280
1281 1281 `hg update` from null revision also prints
1282 1282
1283 1283 $ hg up null
1284 1284 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1285 1285
1286 1286 #if linuxormacos no-fsmonitor
1287 1287 $ hg up cf0fe1914066
1288 1288 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1289 1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 1290 #else
1291 1291 $ hg up cf0fe1914066
1292 1292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1293 1293 #endif
1294 1294
1295 1295 $ cd ..
1296 1296
General Comments 0
You need to be logged in to leave comments. Login now