##// END OF EJS Templates
hg: don't reuse repo instance after unshare()...
Gregory Szorc -
r39642:c5e6c1ba default
parent child Browse files
Show More
@@ -1,1231 +1,1242 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 41 scmutil,
42 42 sshpeer,
43 43 statichttprepo,
44 44 ui as uimod,
45 45 unionrepo,
46 46 url,
47 47 util,
48 48 verify as verifymod,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 from .utils import (
53 53 stringutil,
54 54 )
55 55
56 56 release = lock.release
57 57
58 58 # shared features
59 59 sharedbookmarks = 'bookmarks'
60 60
61 61 def _local(path):
62 62 path = util.expandpath(util.urllocalpath(path))
63 63 return (os.path.isfile(path) and bundlerepo or localrepo)
64 64
65 65 def addbranchrevs(lrepo, other, branches, revs):
66 66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 67 hashbranch, branches = branches
68 68 if not hashbranch and not branches:
69 69 x = revs or None
70 70 if revs:
71 71 y = revs[0]
72 72 else:
73 73 y = None
74 74 return x, y
75 75 if revs:
76 76 revs = list(revs)
77 77 else:
78 78 revs = []
79 79
80 80 if not peer.capable('branchmap'):
81 81 if branches:
82 82 raise error.Abort(_("remote branch lookup not supported"))
83 83 revs.append(hashbranch)
84 84 return revs, revs[0]
85 85
86 86 with peer.commandexecutor() as e:
87 87 branchmap = e.callcommand('branchmap', {}).result()
88 88
89 89 def primary(branch):
90 90 if branch == '.':
91 91 if not lrepo:
92 92 raise error.Abort(_("dirstate branch not accessible"))
93 93 branch = lrepo.dirstate.branch()
94 94 if branch in branchmap:
95 95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 96 return True
97 97 else:
98 98 return False
99 99
100 100 for branch in branches:
101 101 if not primary(branch):
102 102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 103 if hashbranch:
104 104 if not primary(hashbranch):
105 105 revs.append(hashbranch)
106 106 return revs, revs[0]
107 107
108 108 def parseurl(path, branches=None):
109 109 '''parse url#branch, returning (url, (branch, branches))'''
110 110
111 111 u = util.url(path)
112 112 branch = None
113 113 if u.fragment:
114 114 branch = u.fragment
115 115 u.fragment = None
116 116 return bytes(u), (branch, branches or [])
117 117
118 118 schemes = {
119 119 'bundle': bundlerepo,
120 120 'union': unionrepo,
121 121 'file': _local,
122 122 'http': httppeer,
123 123 'https': httppeer,
124 124 'ssh': sshpeer,
125 125 'static-http': statichttprepo,
126 126 }
127 127
128 128 def _peerlookup(path):
129 129 u = util.url(path)
130 130 scheme = u.scheme or 'file'
131 131 thing = schemes.get(scheme) or schemes['file']
132 132 try:
133 133 return thing(path)
134 134 except TypeError:
135 135 # we can't test callable(thing) because 'thing' can be an unloaded
136 136 # module that implements __call__
137 137 if not util.safehasattr(thing, 'instance'):
138 138 raise
139 139 return thing
140 140
141 141 def islocal(repo):
142 142 '''return true if repo (or path pointing to repo) is local'''
143 143 if isinstance(repo, bytes):
144 144 try:
145 145 return _peerlookup(repo).islocal(repo)
146 146 except AttributeError:
147 147 return False
148 148 return repo.local()
149 149
150 150 def openpath(ui, path):
151 151 '''open path with open if local, url.open if remote'''
152 152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 153 if pathurl.islocal():
154 154 return util.posixfile(pathurl.localpath(), 'rb')
155 155 else:
156 156 return url.open(ui, path)
157 157
158 158 # a list of (ui, repo) functions called for wire peer initialization
159 159 wirepeersetupfuncs = []
160 160
161 161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 162 intents=None, createopts=None):
163 163 """return a repository object for the specified path"""
164 164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
165 165 createopts=createopts)
166 166 ui = getattr(obj, "ui", ui)
167 167 if ui.configbool('devel', 'debug.extensions'):
168 168 log = lambda msg, *values: ui.debug('debug.extensions: ',
169 169 msg % values, label='debug.extensions')
170 170 else:
171 171 log = lambda *a, **kw: None
172 172 for f in presetupfuncs or []:
173 173 f(ui, obj)
174 174 log('- executing reposetup hooks\n')
175 175 with util.timedcm('all reposetup') as allreposetupstats:
176 176 for name, module in extensions.extensions(ui):
177 177 log(' - running reposetup for %s\n' % (name,))
178 178 hook = getattr(module, 'reposetup', None)
179 179 if hook:
180 180 with util.timedcm('reposetup %r', name) as stats:
181 181 hook(ui, obj)
182 182 log(' > reposetup for %r took %s\n', name, stats)
183 183 log('> all reposetup took %s\n', allreposetupstats)
184 184 if not obj.local():
185 185 for f in wirepeersetupfuncs:
186 186 f(ui, obj)
187 187 return obj
188 188
189 189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
190 190 createopts=None):
191 191 """return a repository object for the specified path"""
192 192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
193 193 intents=intents, createopts=createopts)
194 194 repo = peer.local()
195 195 if not repo:
196 196 raise error.Abort(_("repository '%s' is not local") %
197 197 (path or peer.url()))
198 198 return repo.filtered('visible')
199 199
200 200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
201 201 '''return a repository peer for the specified path'''
202 202 rui = remoteui(uiorrepo, opts)
203 203 return _peerorrepo(rui, path, create, intents=intents,
204 204 createopts=createopts).peer()
205 205
206 206 def defaultdest(source):
207 207 '''return default destination of clone if none is given
208 208
209 209 >>> defaultdest(b'foo')
210 210 'foo'
211 211 >>> defaultdest(b'/foo/bar')
212 212 'bar'
213 213 >>> defaultdest(b'/')
214 214 ''
215 215 >>> defaultdest(b'')
216 216 ''
217 217 >>> defaultdest(b'http://example.org/')
218 218 ''
219 219 >>> defaultdest(b'http://example.org/foo/')
220 220 'foo'
221 221 '''
222 222 path = util.url(source).path
223 223 if not path:
224 224 return ''
225 225 return os.path.basename(os.path.normpath(path))
226 226
227 227 def sharedreposource(repo):
228 228 """Returns repository object for source repository of a shared repo.
229 229
230 230 If repo is not a shared repository, returns None.
231 231 """
232 232 if repo.sharedpath == repo.path:
233 233 return None
234 234
235 235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
236 236 return repo.srcrepo
237 237
238 238 # the sharedpath always ends in the .hg; we want the path to the repo
239 239 source = repo.vfs.split(repo.sharedpath)[0]
240 240 srcurl, branches = parseurl(source)
241 241 srcrepo = repository(repo.ui, srcurl)
242 242 repo.srcrepo = srcrepo
243 243 return srcrepo
244 244
245 245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
246 246 relative=False):
247 247 '''create a shared repository'''
248 248
249 249 if not islocal(source):
250 250 raise error.Abort(_('can only share local repositories'))
251 251
252 252 if not dest:
253 253 dest = defaultdest(source)
254 254 else:
255 255 dest = ui.expandpath(dest)
256 256
257 257 if isinstance(source, bytes):
258 258 origsource = ui.expandpath(source)
259 259 source, branches = parseurl(origsource)
260 260 srcrepo = repository(ui, source)
261 261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
262 262 else:
263 263 srcrepo = source.local()
264 264 origsource = source = srcrepo.url()
265 265 checkout = None
266 266
267 267 sharedpath = srcrepo.sharedpath # if our source is already sharing
268 268
269 269 destwvfs = vfsmod.vfs(dest, realpath=True)
270 270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
271 271
272 272 if destvfs.lexists():
273 273 raise error.Abort(_('destination already exists'))
274 274
275 275 if not destwvfs.isdir():
276 276 destwvfs.makedirs()
277 277 destvfs.makedir()
278 278
279 279 requirements = ''
280 280 try:
281 281 requirements = srcrepo.vfs.read('requires')
282 282 except IOError as inst:
283 283 if inst.errno != errno.ENOENT:
284 284 raise
285 285
286 286 if relative:
287 287 try:
288 288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
289 289 requirements += 'relshared\n'
290 290 except (IOError, ValueError) as e:
291 291 # ValueError is raised on Windows if the drive letters differ on
292 292 # each path
293 293 raise error.Abort(_('cannot calculate relative path'),
294 294 hint=stringutil.forcebytestr(e))
295 295 else:
296 296 requirements += 'shared\n'
297 297
298 298 destvfs.write('requires', requirements)
299 299 destvfs.write('sharedpath', sharedpath)
300 300
301 301 r = repository(ui, destwvfs.base)
302 302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
303 303 _postshareupdate(r, update, checkout=checkout)
304 304 return r
305 305
306 306 def unshare(ui, repo):
307 307 """convert a shared repository to a normal one
308 308
309 309 Copy the store data to the repo and remove the sharedpath data.
310
311 Returns a new repository object representing the unshared repository.
312
313 The passed repository object is not usable after this function is
314 called.
310 315 """
311 316
312 317 destlock = lock = None
313 318 lock = repo.lock()
314 319 try:
315 320 # we use locks here because if we race with commit, we
316 321 # can end up with extra data in the cloned revlogs that's
317 322 # not pointed to by changesets, thus causing verify to
318 323 # fail
319 324
320 325 destlock = copystore(ui, repo, repo.path)
321 326
322 327 sharefile = repo.vfs.join('sharedpath')
323 328 util.rename(sharefile, sharefile + '.old')
324 329
325 330 repo.requirements.discard('shared')
326 331 repo.requirements.discard('relshared')
327 332 repo._writerequirements()
328 333 finally:
329 334 destlock and destlock.release()
330 335 lock and lock.release()
331 336
332 # update store, spath, svfs and sjoin of repo
333 repo.unfiltered().__init__(repo.baseui, repo.root)
337 # Removing share changes some fundamental properties of the repo instance.
338 # So we instantiate a new repo object and operate on it rather than
339 # try to keep the existing repo usable.
340 newrepo = repository(repo.baseui, repo.root, create=False)
334 341
335 342 # TODO: figure out how to access subrepos that exist, but were previously
336 343 # removed from .hgsub
337 c = repo['.']
344 c = newrepo['.']
338 345 subs = c.substate
339 346 for s in sorted(subs):
340 347 c.sub(s).unshare()
341 348
349 localrepo.poisonrepository(repo)
350
351 return newrepo
352
342 353 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
343 354 """Called after a new shared repo is created.
344 355
345 356 The new repo only has a requirements file and pointer to the source.
346 357 This function configures additional shared data.
347 358
348 359 Extensions can wrap this function and write additional entries to
349 360 destrepo/.hg/shared to indicate additional pieces of data to be shared.
350 361 """
351 362 default = defaultpath or sourcerepo.ui.config('paths', 'default')
352 363 if default:
353 364 template = ('[paths]\n'
354 365 'default = %s\n')
355 366 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
356 367
357 368 with destrepo.wlock():
358 369 if bookmarks:
359 370 destrepo.vfs.write('shared', sharedbookmarks + '\n')
360 371
361 372 def _postshareupdate(repo, update, checkout=None):
362 373 """Maybe perform a working directory update after a shared repo is created.
363 374
364 375 ``update`` can be a boolean or a revision to update to.
365 376 """
366 377 if not update:
367 378 return
368 379
369 380 repo.ui.status(_("updating working directory\n"))
370 381 if update is not True:
371 382 checkout = update
372 383 for test in (checkout, 'default', 'tip'):
373 384 if test is None:
374 385 continue
375 386 try:
376 387 uprev = repo.lookup(test)
377 388 break
378 389 except error.RepoLookupError:
379 390 continue
380 391 _update(repo, uprev)
381 392
382 393 def copystore(ui, srcrepo, destpath):
383 394 '''copy files from store of srcrepo in destpath
384 395
385 396 returns destlock
386 397 '''
387 398 destlock = None
388 399 try:
389 400 hardlink = None
390 401 topic = _('linking') if hardlink else _('copying')
391 402 with ui.makeprogress(topic) as progress:
392 403 num = 0
393 404 srcpublishing = srcrepo.publishing()
394 405 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
395 406 dstvfs = vfsmod.vfs(destpath)
396 407 for f in srcrepo.store.copylist():
397 408 if srcpublishing and f.endswith('phaseroots'):
398 409 continue
399 410 dstbase = os.path.dirname(f)
400 411 if dstbase and not dstvfs.exists(dstbase):
401 412 dstvfs.mkdir(dstbase)
402 413 if srcvfs.exists(f):
403 414 if f.endswith('data'):
404 415 # 'dstbase' may be empty (e.g. revlog format 0)
405 416 lockfile = os.path.join(dstbase, "lock")
406 417 # lock to avoid premature writing to the target
407 418 destlock = lock.lock(dstvfs, lockfile)
408 419 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
409 420 hardlink, progress)
410 421 num += n
411 422 if hardlink:
412 423 ui.debug("linked %d files\n" % num)
413 424 else:
414 425 ui.debug("copied %d files\n" % num)
415 426 return destlock
416 427 except: # re-raises
417 428 release(destlock)
418 429 raise
419 430
420 431 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
421 432 rev=None, update=True, stream=False):
422 433 """Perform a clone using a shared repo.
423 434
424 435 The store for the repository will be located at <sharepath>/.hg. The
425 436 specified revisions will be cloned or pulled from "source". A shared repo
426 437 will be created at "dest" and a working copy will be created if "update" is
427 438 True.
428 439 """
429 440 revs = None
430 441 if rev:
431 442 if not srcpeer.capable('lookup'):
432 443 raise error.Abort(_("src repository does not support "
433 444 "revision lookup and so doesn't "
434 445 "support clone by revision"))
435 446
436 447 # TODO this is batchable.
437 448 remoterevs = []
438 449 for r in rev:
439 450 with srcpeer.commandexecutor() as e:
440 451 remoterevs.append(e.callcommand('lookup', {
441 452 'key': r,
442 453 }).result())
443 454 revs = remoterevs
444 455
445 456 # Obtain a lock before checking for or cloning the pooled repo otherwise
446 457 # 2 clients may race creating or populating it.
447 458 pooldir = os.path.dirname(sharepath)
448 459 # lock class requires the directory to exist.
449 460 try:
450 461 util.makedir(pooldir, False)
451 462 except OSError as e:
452 463 if e.errno != errno.EEXIST:
453 464 raise
454 465
455 466 poolvfs = vfsmod.vfs(pooldir)
456 467 basename = os.path.basename(sharepath)
457 468
458 469 with lock.lock(poolvfs, '%s.lock' % basename):
459 470 if os.path.exists(sharepath):
460 471 ui.status(_('(sharing from existing pooled repository %s)\n') %
461 472 basename)
462 473 else:
463 474 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
464 475 # Always use pull mode because hardlinks in share mode don't work
465 476 # well. Never update because working copies aren't necessary in
466 477 # share mode.
467 478 clone(ui, peeropts, source, dest=sharepath, pull=True,
468 479 revs=rev, update=False, stream=stream)
469 480
470 481 # Resolve the value to put in [paths] section for the source.
471 482 if islocal(source):
472 483 defaultpath = os.path.abspath(util.urllocalpath(source))
473 484 else:
474 485 defaultpath = source
475 486
476 487 sharerepo = repository(ui, path=sharepath)
477 488 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
478 489 defaultpath=defaultpath)
479 490
480 491 # We need to perform a pull against the dest repo to fetch bookmarks
481 492 # and other non-store data that isn't shared by default. In the case of
482 493 # non-existing shared repo, this means we pull from the remote twice. This
483 494 # is a bit weird. But at the time it was implemented, there wasn't an easy
484 495 # way to pull just non-changegroup data.
485 496 destrepo = repository(ui, path=dest)
486 497 exchange.pull(destrepo, srcpeer, heads=revs)
487 498
488 499 _postshareupdate(destrepo, update)
489 500
490 501 return srcpeer, peer(ui, peeropts, dest)
491 502
492 503 # Recomputing branch cache might be slow on big repos,
493 504 # so just copy it
494 505 def _copycache(srcrepo, dstcachedir, fname):
495 506 """copy a cache from srcrepo to destcachedir (if it exists)"""
496 507 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
497 508 dstbranchcache = os.path.join(dstcachedir, fname)
498 509 if os.path.exists(srcbranchcache):
499 510 if not os.path.exists(dstcachedir):
500 511 os.mkdir(dstcachedir)
501 512 util.copyfile(srcbranchcache, dstbranchcache)
502 513
503 514 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
504 515 update=True, stream=False, branch=None, shareopts=None,
505 516 storeincludepats=None, storeexcludepats=None):
506 517 """Make a copy of an existing repository.
507 518
508 519 Create a copy of an existing repository in a new directory. The
509 520 source and destination are URLs, as passed to the repository
510 521 function. Returns a pair of repository peers, the source and
511 522 newly created destination.
512 523
513 524 The location of the source is added to the new repository's
514 525 .hg/hgrc file, as the default to be used for future pulls and
515 526 pushes.
516 527
517 528 If an exception is raised, the partly cloned/updated destination
518 529 repository will be deleted.
519 530
520 531 Arguments:
521 532
522 533 source: repository object or URL
523 534
524 535 dest: URL of destination repository to create (defaults to base
525 536 name of source repository)
526 537
527 538 pull: always pull from source repository, even in local case or if the
528 539 server prefers streaming
529 540
530 541 stream: stream raw data uncompressed from repository (fast over
531 542 LAN, slow over WAN)
532 543
533 544 revs: revision to clone up to (implies pull=True)
534 545
535 546 update: update working directory after clone completes, if
536 547 destination is local repository (True means update to default rev,
537 548 anything else is treated as a revision)
538 549
539 550 branch: branches to clone
540 551
541 552 shareopts: dict of options to control auto sharing behavior. The "pool" key
542 553 activates auto sharing mode and defines the directory for stores. The
543 554 "mode" key determines how to construct the directory name of the shared
544 555 repository. "identity" means the name is derived from the node of the first
545 556 changeset in the repository. "remote" means the name is derived from the
546 557 remote's path/URL. Defaults to "identity."
547 558
548 559 storeincludepats and storeexcludepats: sets of file patterns to include and
549 560 exclude in the repository copy, respectively. If not defined, all files
550 561 will be included (a "full" clone). Otherwise a "narrow" clone containing
551 562 only the requested files will be performed. If ``storeincludepats`` is not
552 563 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
553 564 ``path:.``. If both are empty sets, no files will be cloned.
554 565 """
555 566
556 567 if isinstance(source, bytes):
557 568 origsource = ui.expandpath(source)
558 569 source, branches = parseurl(origsource, branch)
559 570 srcpeer = peer(ui, peeropts, source)
560 571 else:
561 572 srcpeer = source.peer() # in case we were called with a localrepo
562 573 branches = (None, branch or [])
563 574 origsource = source = srcpeer.url()
564 575 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
565 576
566 577 if dest is None:
567 578 dest = defaultdest(source)
568 579 if dest:
569 580 ui.status(_("destination directory: %s\n") % dest)
570 581 else:
571 582 dest = ui.expandpath(dest)
572 583
573 584 dest = util.urllocalpath(dest)
574 585 source = util.urllocalpath(source)
575 586
576 587 if not dest:
577 588 raise error.Abort(_("empty destination path is not valid"))
578 589
579 590 destvfs = vfsmod.vfs(dest, expandpath=True)
580 591 if destvfs.lexists():
581 592 if not destvfs.isdir():
582 593 raise error.Abort(_("destination '%s' already exists") % dest)
583 594 elif destvfs.listdir():
584 595 raise error.Abort(_("destination '%s' is not empty") % dest)
585 596
586 597 createopts = {}
587 598 narrow = False
588 599
589 600 if storeincludepats is not None:
590 601 narrowspec.validatepatterns(storeincludepats)
591 602 narrow = True
592 603
593 604 if storeexcludepats is not None:
594 605 narrowspec.validatepatterns(storeexcludepats)
595 606 narrow = True
596 607
597 608 if narrow:
598 609 # Include everything by default if only exclusion patterns defined.
599 610 if storeexcludepats and not storeincludepats:
600 611 storeincludepats = {'path:.'}
601 612
602 613 createopts['narrowfiles'] = True
603 614
604 615 shareopts = shareopts or {}
605 616 sharepool = shareopts.get('pool')
606 617 sharenamemode = shareopts.get('mode')
607 618 if sharepool and islocal(dest):
608 619 sharepath = None
609 620 if sharenamemode == 'identity':
610 621 # Resolve the name from the initial changeset in the remote
611 622 # repository. This returns nullid when the remote is empty. It
612 623 # raises RepoLookupError if revision 0 is filtered or otherwise
613 624 # not available. If we fail to resolve, sharing is not enabled.
614 625 try:
615 626 with srcpeer.commandexecutor() as e:
616 627 rootnode = e.callcommand('lookup', {
617 628 'key': '0',
618 629 }).result()
619 630
620 631 if rootnode != node.nullid:
621 632 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 633 else:
623 634 ui.status(_('(not using pooled storage: '
624 635 'remote appears to be empty)\n'))
625 636 except error.RepoLookupError:
626 637 ui.status(_('(not using pooled storage: '
627 638 'unable to resolve identity of remote)\n'))
628 639 elif sharenamemode == 'remote':
629 640 sharepath = os.path.join(
630 641 sharepool, node.hex(hashlib.sha1(source).digest()))
631 642 else:
632 643 raise error.Abort(_('unknown share naming mode: %s') %
633 644 sharenamemode)
634 645
635 646 # TODO this is a somewhat arbitrary restriction.
636 647 if narrow:
637 648 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 649 sharepath = None
639 650
640 651 if sharepath:
641 652 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 653 dest, pull=pull, rev=revs, update=update,
643 654 stream=stream)
644 655
645 656 srclock = destlock = cleandir = None
646 657 srcrepo = srcpeer.local()
647 658 try:
648 659 abspath = origsource
649 660 if islocal(origsource):
650 661 abspath = os.path.abspath(util.urllocalpath(origsource))
651 662
652 663 if islocal(dest):
653 664 cleandir = dest
654 665
655 666 copy = False
656 667 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 668 and not phases.hassecret(srcrepo)):
658 669 copy = not pull and not revs
659 670
660 671 # TODO this is a somewhat arbitrary restriction.
661 672 if narrow:
662 673 copy = False
663 674
664 675 if copy:
665 676 try:
666 677 # we use a lock here because if we race with commit, we
667 678 # can end up with extra data in the cloned revlogs that's
668 679 # not pointed to by changesets, thus causing verify to
669 680 # fail
670 681 srclock = srcrepo.lock(wait=False)
671 682 except error.LockError:
672 683 copy = False
673 684
674 685 if copy:
675 686 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 687 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 688 if not os.path.exists(dest):
678 689 util.makedirs(dest)
679 690 else:
680 691 # only clean up directories we create ourselves
681 692 cleandir = hgdir
682 693 try:
683 694 destpath = hgdir
684 695 util.makedir(destpath, notindexed=True)
685 696 except OSError as inst:
686 697 if inst.errno == errno.EEXIST:
687 698 cleandir = None
688 699 raise error.Abort(_("destination '%s' already exists")
689 700 % dest)
690 701 raise
691 702
692 703 destlock = copystore(ui, srcrepo, destpath)
693 704 # copy bookmarks over
694 705 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 706 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 707 if os.path.exists(srcbookmarks):
697 708 util.copyfile(srcbookmarks, dstbookmarks)
698 709
699 710 dstcachedir = os.path.join(destpath, 'cache')
700 711 for cache in cacheutil.cachetocopy(srcrepo):
701 712 _copycache(srcrepo, dstcachedir, cache)
702 713
703 714 # we need to re-init the repo after manually copying the data
704 715 # into it
705 716 destpeer = peer(srcrepo, peeropts, dest)
706 717 srcrepo.hook('outgoing', source='clone',
707 718 node=node.hex(node.nullid))
708 719 else:
709 720 try:
710 721 # only pass ui when no srcrepo
711 722 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 723 createopts=createopts)
713 724 except OSError as inst:
714 725 if inst.errno == errno.EEXIST:
715 726 cleandir = None
716 727 raise error.Abort(_("destination '%s' already exists")
717 728 % dest)
718 729 raise
719 730
720 731 if revs:
721 732 if not srcpeer.capable('lookup'):
722 733 raise error.Abort(_("src repository does not support "
723 734 "revision lookup and so doesn't "
724 735 "support clone by revision"))
725 736
726 737 # TODO this is batchable.
727 738 remoterevs = []
728 739 for rev in revs:
729 740 with srcpeer.commandexecutor() as e:
730 741 remoterevs.append(e.callcommand('lookup', {
731 742 'key': rev,
732 743 }).result())
733 744 revs = remoterevs
734 745
735 746 checkout = revs[0]
736 747 else:
737 748 revs = None
738 749 local = destpeer.local()
739 750 if local:
740 751 if narrow:
741 752 with local.lock():
742 753 local.setnarrowpats(storeincludepats, storeexcludepats)
743 754
744 755 u = util.url(abspath)
745 756 defaulturl = bytes(u)
746 757 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
747 758 if not stream:
748 759 if pull:
749 760 stream = False
750 761 else:
751 762 stream = None
752 763 # internal config: ui.quietbookmarkmove
753 764 overrides = {('ui', 'quietbookmarkmove'): True}
754 765 with local.ui.configoverride(overrides, 'clone'):
755 766 exchange.pull(local, srcpeer, revs,
756 767 streamclonerequested=stream,
757 768 includepats=storeincludepats,
758 769 excludepats=storeexcludepats)
759 770 elif srcrepo:
760 771 # TODO lift restriction once exchange.push() accepts narrow
761 772 # push.
762 773 if narrow:
763 774 raise error.Abort(_('narrow clone not available for '
764 775 'remote destinations'))
765 776
766 777 exchange.push(srcrepo, destpeer, revs=revs,
767 778 bookmarks=srcrepo._bookmarks.keys())
768 779 else:
769 780 raise error.Abort(_("clone from remote to remote not supported")
770 781 )
771 782
772 783 cleandir = None
773 784
774 785 destrepo = destpeer.local()
775 786 if destrepo:
776 787 template = uimod.samplehgrcs['cloned']
777 788 u = util.url(abspath)
778 789 u.passwd = None
779 790 defaulturl = bytes(u)
780 791 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
781 792 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
782 793
783 794 if ui.configbool('experimental', 'remotenames'):
784 795 logexchange.pullremotenames(destrepo, srcpeer)
785 796
786 797 if update:
787 798 if update is not True:
788 799 with srcpeer.commandexecutor() as e:
789 800 checkout = e.callcommand('lookup', {
790 801 'key': update,
791 802 }).result()
792 803
793 804 uprev = None
794 805 status = None
795 806 if checkout is not None:
796 807 # Some extensions (at least hg-git and hg-subversion) have
797 808 # a peer.lookup() implementation that returns a name instead
798 809 # of a nodeid. We work around it here until we've figured
799 810 # out a better solution.
800 811 if len(checkout) == 20 and checkout in destrepo:
801 812 uprev = checkout
802 813 elif scmutil.isrevsymbol(destrepo, checkout):
803 814 uprev = scmutil.revsymbol(destrepo, checkout).node()
804 815 else:
805 816 if update is not True:
806 817 try:
807 818 uprev = destrepo.lookup(update)
808 819 except error.RepoLookupError:
809 820 pass
810 821 if uprev is None:
811 822 try:
812 823 uprev = destrepo._bookmarks['@']
813 824 update = '@'
814 825 bn = destrepo[uprev].branch()
815 826 if bn == 'default':
816 827 status = _("updating to bookmark @\n")
817 828 else:
818 829 status = (_("updating to bookmark @ on branch %s\n")
819 830 % bn)
820 831 except KeyError:
821 832 try:
822 833 uprev = destrepo.branchtip('default')
823 834 except error.RepoLookupError:
824 835 uprev = destrepo.lookup('tip')
825 836 if not status:
826 837 bn = destrepo[uprev].branch()
827 838 status = _("updating to branch %s\n") % bn
828 839 destrepo.ui.status(status)
829 840 _update(destrepo, uprev)
830 841 if update in destrepo._bookmarks:
831 842 bookmarks.activate(destrepo, update)
832 843 finally:
833 844 release(srclock, destlock)
834 845 if cleandir is not None:
835 846 shutil.rmtree(cleandir, True)
836 847 if srcpeer is not None:
837 848 srcpeer.close()
838 849 return srcpeer, destpeer
839 850
840 851 def _showstats(repo, stats, quietempty=False):
841 852 if quietempty and stats.isempty():
842 853 return
843 854 repo.ui.status(_("%d files updated, %d files merged, "
844 855 "%d files removed, %d files unresolved\n") % (
845 856 stats.updatedcount, stats.mergedcount,
846 857 stats.removedcount, stats.unresolvedcount))
847 858
848 859 def updaterepo(repo, node, overwrite, updatecheck=None):
849 860 """Update the working directory to node.
850 861
851 862 When overwrite is set, changes are clobbered, merged else
852 863
853 864 returns stats (see pydoc mercurial.merge.applyupdates)"""
854 865 return mergemod.update(repo, node, False, overwrite,
855 866 labels=['working copy', 'destination'],
856 867 updatecheck=updatecheck)
857 868
858 869 def update(repo, node, quietempty=False, updatecheck=None):
859 870 """update the working directory to node"""
860 871 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
861 872 _showstats(repo, stats, quietempty)
862 873 if stats.unresolvedcount:
863 874 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
864 875 return stats.unresolvedcount > 0
865 876
866 877 # naming conflict in clone()
867 878 _update = update
868 879
869 880 def clean(repo, node, show_stats=True, quietempty=False):
870 881 """forcibly switch the working directory to node, clobbering changes"""
871 882 stats = updaterepo(repo, node, True)
872 883 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
873 884 if show_stats:
874 885 _showstats(repo, stats, quietempty)
875 886 return stats.unresolvedcount > 0
876 887
877 888 # naming conflict in updatetotally()
878 889 _clean = clean
879 890
880 891 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
881 892 """Update the working directory with extra care for non-file components
882 893
883 894 This takes care of non-file components below:
884 895
885 896 :bookmark: might be advanced or (in)activated
886 897
887 898 This takes arguments below:
888 899
889 900 :checkout: to which revision the working directory is updated
890 901 :brev: a name, which might be a bookmark to be activated after updating
891 902 :clean: whether changes in the working directory can be discarded
892 903 :updatecheck: how to deal with a dirty working directory
893 904
894 905 Valid values for updatecheck are (None => linear):
895 906
896 907 * abort: abort if the working directory is dirty
897 908 * none: don't check (merge working directory changes into destination)
898 909 * linear: check that update is linear before merging working directory
899 910 changes into destination
900 911 * noconflict: check that the update does not result in file merges
901 912
902 913 This returns whether conflict is detected at updating or not.
903 914 """
904 915 if updatecheck is None:
905 916 updatecheck = ui.config('commands', 'update.check')
906 917 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
907 918 # If not configured, or invalid value configured
908 919 updatecheck = 'linear'
909 920 with repo.wlock():
910 921 movemarkfrom = None
911 922 warndest = False
912 923 if checkout is None:
913 924 updata = destutil.destupdate(repo, clean=clean)
914 925 checkout, movemarkfrom, brev = updata
915 926 warndest = True
916 927
917 928 if clean:
918 929 ret = _clean(repo, checkout)
919 930 else:
920 931 if updatecheck == 'abort':
921 932 cmdutil.bailifchanged(repo, merge=False)
922 933 updatecheck = 'none'
923 934 ret = _update(repo, checkout, updatecheck=updatecheck)
924 935
925 936 if not ret and movemarkfrom:
926 937 if movemarkfrom == repo['.'].node():
927 938 pass # no-op update
928 939 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
929 940 b = ui.label(repo._activebookmark, 'bookmarks.active')
930 941 ui.status(_("updating bookmark %s\n") % b)
931 942 else:
932 943 # this can happen with a non-linear update
933 944 b = ui.label(repo._activebookmark, 'bookmarks')
934 945 ui.status(_("(leaving bookmark %s)\n") % b)
935 946 bookmarks.deactivate(repo)
936 947 elif brev in repo._bookmarks:
937 948 if brev != repo._activebookmark:
938 949 b = ui.label(brev, 'bookmarks.active')
939 950 ui.status(_("(activating bookmark %s)\n") % b)
940 951 bookmarks.activate(repo, brev)
941 952 elif brev:
942 953 if repo._activebookmark:
943 954 b = ui.label(repo._activebookmark, 'bookmarks')
944 955 ui.status(_("(leaving bookmark %s)\n") % b)
945 956 bookmarks.deactivate(repo)
946 957
947 958 if warndest:
948 959 destutil.statusotherdests(ui, repo)
949 960
950 961 return ret
951 962
952 963 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
953 964 abort=False):
954 965 """Branch merge with node, resolving changes. Return true if any
955 966 unresolved conflicts."""
956 967 if not abort:
957 968 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
958 969 labels=labels)
959 970 else:
960 971 ms = mergemod.mergestate.read(repo)
961 972 if ms.active():
962 973 # there were conflicts
963 974 node = ms.localctx.hex()
964 975 else:
965 976 # there were no conficts, mergestate was not stored
966 977 node = repo['.'].hex()
967 978
968 979 repo.ui.status(_("aborting the merge, updating back to"
969 980 " %s\n") % node[:12])
970 981 stats = mergemod.update(repo, node, branchmerge=False, force=True,
971 982 labels=labels)
972 983
973 984 _showstats(repo, stats)
974 985 if stats.unresolvedcount:
975 986 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
976 987 "or 'hg merge --abort' to abandon\n"))
977 988 elif remind and not abort:
978 989 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
979 990 return stats.unresolvedcount > 0
980 991
981 992 def _incoming(displaychlist, subreporecurse, ui, repo, source,
982 993 opts, buffered=False):
983 994 """
984 995 Helper for incoming / gincoming.
985 996 displaychlist gets called with
986 997 (remoterepo, incomingchangesetlist, displayer) parameters,
987 998 and is supposed to contain only code that can't be unified.
988 999 """
989 1000 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
990 1001 other = peer(repo, opts, source)
991 1002 ui.status(_('comparing with %s\n') % util.hidepassword(source))
992 1003 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
993 1004
994 1005 if revs:
995 1006 revs = [other.lookup(rev) for rev in revs]
996 1007 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
997 1008 revs, opts["bundle"], opts["force"])
998 1009 try:
999 1010 if not chlist:
1000 1011 ui.status(_("no changes found\n"))
1001 1012 return subreporecurse()
1002 1013 ui.pager('incoming')
1003 1014 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1004 1015 buffered=buffered)
1005 1016 displaychlist(other, chlist, displayer)
1006 1017 displayer.close()
1007 1018 finally:
1008 1019 cleanupfn()
1009 1020 subreporecurse()
1010 1021 return 0 # exit code is zero since we found incoming changes
1011 1022
1012 1023 def incoming(ui, repo, source, opts):
1013 1024 def subreporecurse():
1014 1025 ret = 1
1015 1026 if opts.get('subrepos'):
1016 1027 ctx = repo[None]
1017 1028 for subpath in sorted(ctx.substate):
1018 1029 sub = ctx.sub(subpath)
1019 1030 ret = min(ret, sub.incoming(ui, source, opts))
1020 1031 return ret
1021 1032
1022 1033 def display(other, chlist, displayer):
1023 1034 limit = logcmdutil.getlimit(opts)
1024 1035 if opts.get('newest_first'):
1025 1036 chlist.reverse()
1026 1037 count = 0
1027 1038 for n in chlist:
1028 1039 if limit is not None and count >= limit:
1029 1040 break
1030 1041 parents = [p for p in other.changelog.parents(n) if p != nullid]
1031 1042 if opts.get('no_merges') and len(parents) == 2:
1032 1043 continue
1033 1044 count += 1
1034 1045 displayer.show(other[n])
1035 1046 return _incoming(display, subreporecurse, ui, repo, source, opts)
1036 1047
1037 1048 def _outgoing(ui, repo, dest, opts):
1038 1049 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1039 1050 if not path:
1040 1051 raise error.Abort(_('default repository not configured!'),
1041 1052 hint=_("see 'hg help config.paths'"))
1042 1053 dest = path.pushloc or path.loc
1043 1054 branches = path.branch, opts.get('branch') or []
1044 1055
1045 1056 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1046 1057 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1047 1058 if revs:
1048 1059 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1049 1060
1050 1061 other = peer(repo, opts, dest)
1051 1062 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1052 1063 force=opts.get('force'))
1053 1064 o = outgoing.missing
1054 1065 if not o:
1055 1066 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1056 1067 return o, other
1057 1068
1058 1069 def outgoing(ui, repo, dest, opts):
1059 1070 def recurse():
1060 1071 ret = 1
1061 1072 if opts.get('subrepos'):
1062 1073 ctx = repo[None]
1063 1074 for subpath in sorted(ctx.substate):
1064 1075 sub = ctx.sub(subpath)
1065 1076 ret = min(ret, sub.outgoing(ui, dest, opts))
1066 1077 return ret
1067 1078
1068 1079 limit = logcmdutil.getlimit(opts)
1069 1080 o, other = _outgoing(ui, repo, dest, opts)
1070 1081 if not o:
1071 1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1072 1083 return recurse()
1073 1084
1074 1085 if opts.get('newest_first'):
1075 1086 o.reverse()
1076 1087 ui.pager('outgoing')
1077 1088 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1078 1089 count = 0
1079 1090 for n in o:
1080 1091 if limit is not None and count >= limit:
1081 1092 break
1082 1093 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1083 1094 if opts.get('no_merges') and len(parents) == 2:
1084 1095 continue
1085 1096 count += 1
1086 1097 displayer.show(repo[n])
1087 1098 displayer.close()
1088 1099 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1089 1100 recurse()
1090 1101 return 0 # exit code is zero since we found outgoing changes
1091 1102
1092 1103 def verify(repo):
1093 1104 """verify the consistency of a repository"""
1094 1105 ret = verifymod.verify(repo)
1095 1106
1096 1107 # Broken subrepo references in hidden csets don't seem worth worrying about,
1097 1108 # since they can't be pushed/pulled, and --hidden can be used if they are a
1098 1109 # concern.
1099 1110
1100 1111 # pathto() is needed for -R case
1101 1112 revs = repo.revs("filelog(%s)",
1102 1113 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1103 1114
1104 1115 if revs:
1105 1116 repo.ui.status(_('checking subrepo links\n'))
1106 1117 for rev in revs:
1107 1118 ctx = repo[rev]
1108 1119 try:
1109 1120 for subpath in ctx.substate:
1110 1121 try:
1111 1122 ret = (ctx.sub(subpath, allowcreate=False).verify()
1112 1123 or ret)
1113 1124 except error.RepoError as e:
1114 1125 repo.ui.warn(('%d: %s\n') % (rev, e))
1115 1126 except Exception:
1116 1127 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1117 1128 node.short(ctx.node()))
1118 1129
1119 1130 return ret
1120 1131
1121 1132 def remoteui(src, opts):
1122 1133 'build a remote ui from ui or repo and opts'
1123 1134 if util.safehasattr(src, 'baseui'): # looks like a repository
1124 1135 dst = src.baseui.copy() # drop repo-specific config
1125 1136 src = src.ui # copy target options from repo
1126 1137 else: # assume it's a global ui object
1127 1138 dst = src.copy() # keep all global options
1128 1139
1129 1140 # copy ssh-specific options
1130 1141 for o in 'ssh', 'remotecmd':
1131 1142 v = opts.get(o) or src.config('ui', o)
1132 1143 if v:
1133 1144 dst.setconfig("ui", o, v, 'copied')
1134 1145
1135 1146 # copy bundle-specific options
1136 1147 r = src.config('bundle', 'mainreporoot')
1137 1148 if r:
1138 1149 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1139 1150
1140 1151 # copy selected local settings to the remote ui
1141 1152 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1142 1153 for key, val in src.configitems(sect):
1143 1154 dst.setconfig(sect, key, val, 'copied')
1144 1155 v = src.config('web', 'cacerts')
1145 1156 if v:
1146 1157 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1147 1158
1148 1159 return dst
1149 1160
1150 1161 # Files of interest
1151 1162 # Used to check if the repository has changed looking at mtime and size of
1152 1163 # these files.
1153 1164 foi = [('spath', '00changelog.i'),
1154 1165 ('spath', 'phaseroots'), # ! phase can change content at the same size
1155 1166 ('spath', 'obsstore'),
1156 1167 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1157 1168 ]
1158 1169
1159 1170 class cachedlocalrepo(object):
1160 1171 """Holds a localrepository that can be cached and reused."""
1161 1172
1162 1173 def __init__(self, repo):
1163 1174 """Create a new cached repo from an existing repo.
1164 1175
1165 1176 We assume the passed in repo was recently created. If the
1166 1177 repo has changed between when it was created and when it was
1167 1178 turned into a cache, it may not refresh properly.
1168 1179 """
1169 1180 assert isinstance(repo, localrepo.localrepository)
1170 1181 self._repo = repo
1171 1182 self._state, self.mtime = self._repostate()
1172 1183 self._filtername = repo.filtername
1173 1184
1174 1185 def fetch(self):
1175 1186 """Refresh (if necessary) and return a repository.
1176 1187
1177 1188 If the cached instance is out of date, it will be recreated
1178 1189 automatically and returned.
1179 1190
1180 1191 Returns a tuple of the repo and a boolean indicating whether a new
1181 1192 repo instance was created.
1182 1193 """
1183 1194 # We compare the mtimes and sizes of some well-known files to
1184 1195 # determine if the repo changed. This is not precise, as mtimes
1185 1196 # are susceptible to clock skew and imprecise filesystems and
1186 1197 # file content can change while maintaining the same size.
1187 1198
1188 1199 state, mtime = self._repostate()
1189 1200 if state == self._state:
1190 1201 return self._repo, False
1191 1202
1192 1203 repo = repository(self._repo.baseui, self._repo.url())
1193 1204 if self._filtername:
1194 1205 self._repo = repo.filtered(self._filtername)
1195 1206 else:
1196 1207 self._repo = repo.unfiltered()
1197 1208 self._state = state
1198 1209 self.mtime = mtime
1199 1210
1200 1211 return self._repo, True
1201 1212
1202 1213 def _repostate(self):
1203 1214 state = []
1204 1215 maxmtime = -1
1205 1216 for attr, fname in foi:
1206 1217 prefix = getattr(self._repo, attr)
1207 1218 p = os.path.join(prefix, fname)
1208 1219 try:
1209 1220 st = os.stat(p)
1210 1221 except OSError:
1211 1222 st = os.stat(prefix)
1212 1223 state.append((st[stat.ST_MTIME], st.st_size))
1213 1224 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1214 1225
1215 1226 return tuple(state), maxmtime
1216 1227
1217 1228 def copy(self):
1218 1229 """Obtain a copy of this class instance.
1219 1230
1220 1231 A new localrepository instance is obtained. The new instance should be
1221 1232 completely independent of the original.
1222 1233 """
1223 1234 repo = repository(self._repo.baseui, self._repo.origroot)
1224 1235 if self._filtername:
1225 1236 repo = repo.filtered(self._filtername)
1226 1237 else:
1227 1238 repo = repo.unfiltered()
1228 1239 c = cachedlocalrepo(repo)
1229 1240 c._state = self._state
1230 1241 c.mtime = self.mtime
1231 1242 return c
@@ -1,2505 +1,2530 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 narrowspec,
48 48 obsolete,
49 49 pathutil,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67 from .utils import (
68 68 interfaceutil,
69 69 procutil,
70 70 stringutil,
71 71 )
72 72
73 73 from .revlogutils import (
74 74 constants as revlogconst,
75 75 )
76 76
77 77 release = lockmod.release
78 78 urlerr = util.urlerr
79 79 urlreq = util.urlreq
80 80
81 81 # set of (path, vfs-location) tuples. vfs-location is:
82 82 # - 'plain for vfs relative paths
83 83 # - '' for svfs relative paths
84 84 _cachedfiles = set()
85 85
86 86 class _basefilecache(scmutil.filecache):
87 87 """All filecache usage on repo are done for logic that should be unfiltered
88 88 """
89 89 def __get__(self, repo, type=None):
90 90 if repo is None:
91 91 return self
92 92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 93 def __set__(self, repo, value):
94 94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 95 def __delete__(self, repo):
96 96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 97
98 98 class repofilecache(_basefilecache):
99 99 """filecache for files in .hg but outside of .hg/store"""
100 100 def __init__(self, *paths):
101 101 super(repofilecache, self).__init__(*paths)
102 102 for path in paths:
103 103 _cachedfiles.add((path, 'plain'))
104 104
105 105 def join(self, obj, fname):
106 106 return obj.vfs.join(fname)
107 107
108 108 class storecache(_basefilecache):
109 109 """filecache for files in the store"""
110 110 def __init__(self, *paths):
111 111 super(storecache, self).__init__(*paths)
112 112 for path in paths:
113 113 _cachedfiles.add((path, ''))
114 114
115 115 def join(self, obj, fname):
116 116 return obj.sjoin(fname)
117 117
118 118 def isfilecached(repo, name):
119 119 """check if a repo has already cached "name" filecache-ed property
120 120
121 121 This returns (cachedobj-or-None, iscached) tuple.
122 122 """
123 123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 124 if not cacheentry:
125 125 return None, False
126 126 return cacheentry.obj, True
127 127
128 128 class unfilteredpropertycache(util.propertycache):
129 129 """propertycache that apply to unfiltered repo only"""
130 130
131 131 def __get__(self, repo, type=None):
132 132 unfi = repo.unfiltered()
133 133 if unfi is repo:
134 134 return super(unfilteredpropertycache, self).__get__(unfi)
135 135 return getattr(unfi, self.name)
136 136
137 137 class filteredpropertycache(util.propertycache):
138 138 """propertycache that must take filtering in account"""
139 139
140 140 def cachevalue(self, obj, value):
141 141 object.__setattr__(obj, self.name, value)
142 142
143 143
144 144 def hasunfilteredcache(repo, name):
145 145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 146 return name in vars(repo.unfiltered())
147 147
148 148 def unfilteredmethod(orig):
149 149 """decorate method that always need to be run on unfiltered version"""
150 150 def wrapper(repo, *args, **kwargs):
151 151 return orig(repo.unfiltered(), *args, **kwargs)
152 152 return wrapper
153 153
154 154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 155 'unbundle'}
156 156 legacycaps = moderncaps.union({'changegroupsubset'})
157 157
158 158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 159 class localcommandexecutor(object):
160 160 def __init__(self, peer):
161 161 self._peer = peer
162 162 self._sent = False
163 163 self._closed = False
164 164
165 165 def __enter__(self):
166 166 return self
167 167
168 168 def __exit__(self, exctype, excvalue, exctb):
169 169 self.close()
170 170
171 171 def callcommand(self, command, args):
172 172 if self._sent:
173 173 raise error.ProgrammingError('callcommand() cannot be used after '
174 174 'sendcommands()')
175 175
176 176 if self._closed:
177 177 raise error.ProgrammingError('callcommand() cannot be used after '
178 178 'close()')
179 179
180 180 # We don't need to support anything fancy. Just call the named
181 181 # method on the peer and return a resolved future.
182 182 fn = getattr(self._peer, pycompat.sysstr(command))
183 183
184 184 f = pycompat.futures.Future()
185 185
186 186 try:
187 187 result = fn(**pycompat.strkwargs(args))
188 188 except Exception:
189 189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 190 else:
191 191 f.set_result(result)
192 192
193 193 return f
194 194
195 195 def sendcommands(self):
196 196 self._sent = True
197 197
198 198 def close(self):
199 199 self._closed = True
200 200
201 201 @interfaceutil.implementer(repository.ipeercommands)
202 202 class localpeer(repository.peer):
203 203 '''peer for a local repo; reflects only the most recent API'''
204 204
205 205 def __init__(self, repo, caps=None):
206 206 super(localpeer, self).__init__()
207 207
208 208 if caps is None:
209 209 caps = moderncaps.copy()
210 210 self._repo = repo.filtered('served')
211 211 self.ui = repo.ui
212 212 self._caps = repo._restrictcapabilities(caps)
213 213
214 214 # Begin of _basepeer interface.
215 215
216 216 def url(self):
217 217 return self._repo.url()
218 218
219 219 def local(self):
220 220 return self._repo
221 221
222 222 def peer(self):
223 223 return self
224 224
225 225 def canpush(self):
226 226 return True
227 227
228 228 def close(self):
229 229 self._repo.close()
230 230
231 231 # End of _basepeer interface.
232 232
233 233 # Begin of _basewirecommands interface.
234 234
235 235 def branchmap(self):
236 236 return self._repo.branchmap()
237 237
238 238 def capabilities(self):
239 239 return self._caps
240 240
241 241 def clonebundles(self):
242 242 return self._repo.tryread('clonebundles.manifest')
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 """Used to test argument passing over the wire"""
246 246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 247 pycompat.bytestr(four),
248 248 pycompat.bytestr(five))
249 249
250 250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 251 **kwargs):
252 252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 253 common=common, bundlecaps=bundlecaps,
254 254 **kwargs)[1]
255 255 cb = util.chunkbuffer(chunks)
256 256
257 257 if exchange.bundle2requested(bundlecaps):
258 258 # When requesting a bundle2, getbundle returns a stream to make the
259 259 # wire level function happier. We need to build a proper object
260 260 # from it in local peer.
261 261 return bundle2.getunbundler(self.ui, cb)
262 262 else:
263 263 return changegroup.getunbundler('01', cb, None)
264 264
265 265 def heads(self):
266 266 return self._repo.heads()
267 267
268 268 def known(self, nodes):
269 269 return self._repo.known(nodes)
270 270
271 271 def listkeys(self, namespace):
272 272 return self._repo.listkeys(namespace)
273 273
274 274 def lookup(self, key):
275 275 return self._repo.lookup(key)
276 276
277 277 def pushkey(self, namespace, key, old, new):
278 278 return self._repo.pushkey(namespace, key, old, new)
279 279
280 280 def stream_out(self):
281 281 raise error.Abort(_('cannot perform stream clone against local '
282 282 'peer'))
283 283
284 284 def unbundle(self, bundle, heads, url):
285 285 """apply a bundle on a repo
286 286
287 287 This function handles the repo locking itself."""
288 288 try:
289 289 try:
290 290 bundle = exchange.readbundle(self.ui, bundle, None)
291 291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 292 if util.safehasattr(ret, 'getchunks'):
293 293 # This is a bundle20 object, turn it into an unbundler.
294 294 # This little dance should be dropped eventually when the
295 295 # API is finally improved.
296 296 stream = util.chunkbuffer(ret.getchunks())
297 297 ret = bundle2.getunbundler(self.ui, stream)
298 298 return ret
299 299 except Exception as exc:
300 300 # If the exception contains output salvaged from a bundle2
301 301 # reply, we need to make sure it is printed before continuing
302 302 # to fail. So we build a bundle2 with such output and consume
303 303 # it directly.
304 304 #
305 305 # This is not very elegant but allows a "simple" solution for
306 306 # issue4594
307 307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 308 if output:
309 309 bundler = bundle2.bundle20(self._repo.ui)
310 310 for out in output:
311 311 bundler.addpart(out)
312 312 stream = util.chunkbuffer(bundler.getchunks())
313 313 b = bundle2.getunbundler(self.ui, stream)
314 314 bundle2.processbundle(self._repo, b)
315 315 raise
316 316 except error.PushRaced as exc:
317 317 raise error.ResponseError(_('push failed:'),
318 318 stringutil.forcebytestr(exc))
319 319
320 320 # End of _basewirecommands interface.
321 321
322 322 # Begin of peer interface.
323 323
324 324 def commandexecutor(self):
325 325 return localcommandexecutor(self)
326 326
327 327 # End of peer interface.
328 328
329 329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 330 class locallegacypeer(localpeer):
331 331 '''peer extension which implements legacy methods too; used for tests with
332 332 restricted capabilities'''
333 333
334 334 def __init__(self, repo):
335 335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 336
337 337 # Begin of baselegacywirecommands interface.
338 338
339 339 def between(self, pairs):
340 340 return self._repo.between(pairs)
341 341
342 342 def branches(self, nodes):
343 343 return self._repo.branches(nodes)
344 344
345 345 def changegroup(self, nodes, source):
346 346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 347 missingheads=self._repo.heads())
348 348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 349
350 350 def changegroupsubset(self, bases, heads, source):
351 351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 352 missingheads=heads)
353 353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 354
355 355 # End of baselegacywirecommands interface.
356 356
357 357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 358 # clients.
359 359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 360
361 361 # A repository with the sparserevlog feature will have delta chains that
362 362 # can spread over a larger span. Sparse reading cuts these large spans into
363 363 # pieces, so that each piece isn't too big.
364 364 # Without the sparserevlog capability, reading from the repository could use
365 365 # huge amounts of memory, because the whole span would be read at once,
366 366 # including all the intermediate revisions that aren't pertinent for the chain.
367 367 # This is why once a repository has enabled sparse-read, it becomes required.
368 368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 369
370 370 # Functions receiving (ui, features) that extensions can register to impact
371 371 # the ability to load repositories with custom requirements. Only
372 372 # functions defined in loaded extensions are called.
373 373 #
374 374 # The function receives a set of requirement strings that the repository
375 375 # is capable of opening. Functions will typically add elements to the
376 376 # set to reflect that the extension knows how to handle that requirements.
377 377 featuresetupfuncs = set()
378 378
379 379 @interfaceutil.implementer(repository.completelocalrepository)
380 380 class localrepository(object):
381 381
382 382 # obsolete experimental requirements:
383 383 # - manifestv2: An experimental new manifest format that allowed
384 384 # for stem compression of long paths. Experiment ended up not
385 385 # being successful (repository sizes went up due to worse delta
386 386 # chains), and the code was deleted in 4.6.
387 387 supportedformats = {
388 388 'revlogv1',
389 389 'generaldelta',
390 390 'treemanifest',
391 391 REVLOGV2_REQUIREMENT,
392 392 SPARSEREVLOG_REQUIREMENT,
393 393 }
394 394 _basesupported = supportedformats | {
395 395 'store',
396 396 'fncache',
397 397 'shared',
398 398 'relshared',
399 399 'dotencode',
400 400 'exp-sparse',
401 401 'internal-phase'
402 402 }
403 403 openerreqs = {
404 404 'revlogv1',
405 405 'generaldelta',
406 406 'treemanifest',
407 407 }
408 408
409 409 # list of prefix for file which can be written without 'wlock'
410 410 # Extensions should extend this list when needed
411 411 _wlockfreeprefix = {
412 412 # We migh consider requiring 'wlock' for the next
413 413 # two, but pretty much all the existing code assume
414 414 # wlock is not needed so we keep them excluded for
415 415 # now.
416 416 'hgrc',
417 417 'requires',
418 418 # XXX cache is a complicatged business someone
419 419 # should investigate this in depth at some point
420 420 'cache/',
421 421 # XXX shouldn't be dirstate covered by the wlock?
422 422 'dirstate',
423 423 # XXX bisect was still a bit too messy at the time
424 424 # this changeset was introduced. Someone should fix
425 425 # the remainig bit and drop this line
426 426 'bisect.state',
427 427 }
428 428
429 429 def __init__(self, baseui, path, intents=None):
430 430 """Create a new local repository instance.
431 431
432 432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 433 for obtaining a new repository object.
434 434 """
435 435
436 436 self.requirements = set()
437 437 self.filtername = None
438 438 # wvfs: rooted at the repository root, used to access the working copy
439 439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
441 441 self.vfs = None
442 442 # svfs: usually rooted at .hg/store, used to access repository history
443 443 # If this is a shared repository, this vfs may point to another
444 444 # repository's .hg/store directory.
445 445 self.svfs = None
446 446 self.root = self.wvfs.base
447 447 self.path = self.wvfs.join(".hg")
448 448 self.origroot = path
449 449 self.baseui = baseui
450 450 self.ui = baseui.copy()
451 451 self.ui.copy = baseui.copy # prevent copying repo configuration
452 452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
453 453 if (self.ui.configbool('devel', 'all-warnings') or
454 454 self.ui.configbool('devel', 'check-locks')):
455 455 self.vfs.audit = self._getvfsward(self.vfs.audit)
456 456 # A list of callback to shape the phase if no data were found.
457 457 # Callback are in the form: func(repo, roots) --> processed root.
458 458 # This list it to be filled by extension during repo setup
459 459 self._phasedefaults = []
460 460 try:
461 461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
462 462 self._loadextensions()
463 463 except IOError:
464 464 pass
465 465
466 466 if featuresetupfuncs:
467 467 self.supported = set(self._basesupported) # use private copy
468 468 extmods = set(m.__name__ for n, m
469 469 in extensions.extensions(self.ui))
470 470 for setupfunc in featuresetupfuncs:
471 471 if setupfunc.__module__ in extmods:
472 472 setupfunc(self.ui, self.supported)
473 473 else:
474 474 self.supported = self._basesupported
475 475 color.setup(self.ui)
476 476
477 477 # Add compression engines.
478 478 for name in util.compengines:
479 479 engine = util.compengines[name]
480 480 if engine.revlogheader():
481 481 self.supported.add('exp-compression-%s' % name)
482 482
483 483 if not self.vfs.isdir():
484 484 try:
485 485 self.vfs.stat()
486 486 except OSError as inst:
487 487 if inst.errno != errno.ENOENT:
488 488 raise
489 489 raise error.RepoError(_("repository %s not found") % path)
490 490 else:
491 491 try:
492 492 self.requirements = scmutil.readrequires(
493 493 self.vfs, self.supported)
494 494 except IOError as inst:
495 495 if inst.errno != errno.ENOENT:
496 496 raise
497 497
498 498 cachepath = self.vfs.join('cache')
499 499 self.sharedpath = self.path
500 500 try:
501 501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 502 if 'relshared' in self.requirements:
503 503 sharedpath = self.vfs.join(sharedpath)
504 504 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 505 cachepath = vfs.join('cache')
506 506 s = vfs.base
507 507 if not vfs.exists():
508 508 raise error.RepoError(
509 509 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 510 self.sharedpath = s
511 511 except IOError as inst:
512 512 if inst.errno != errno.ENOENT:
513 513 raise
514 514
515 515 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 516 raise error.RepoError(_('repository is using sparse feature but '
517 517 'sparse is not enabled; enable the '
518 518 '"sparse" extensions to access'))
519 519
520 520 self.store = store.store(
521 521 self.requirements, self.sharedpath,
522 522 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 523 self.spath = self.store.path
524 524 self.svfs = self.store.vfs
525 525 self.sjoin = self.store.join
526 526 self.vfs.createmode = self.store.createmode
527 527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 528 self.cachevfs.createmode = self.store.createmode
529 529 if (self.ui.configbool('devel', 'all-warnings') or
530 530 self.ui.configbool('devel', 'check-locks')):
531 531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 533 else: # standard vfs
534 534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 535 self._applyopenerreqs()
536 536
537 537 self._dirstatevalidatewarned = False
538 538
539 539 self._branchcaches = {}
540 540 self._revbranchcache = None
541 541 self._filterpats = {}
542 542 self._datafilters = {}
543 543 self._transref = self._lockref = self._wlockref = None
544 544
545 545 # A cache for various files under .hg/ that tracks file changes,
546 546 # (used by the filecache decorator)
547 547 #
548 548 # Maps a property name to its util.filecacheentry
549 549 self._filecache = {}
550 550
551 551 # hold sets of revision to be filtered
552 552 # should be cleared when something might have changed the filter value:
553 553 # - new changesets,
554 554 # - phase change,
555 555 # - new obsolescence marker,
556 556 # - working directory parent change,
557 557 # - bookmark changes
558 558 self.filteredrevcache = {}
559 559
560 560 # post-dirstate-status hooks
561 561 self._postdsstatus = []
562 562
563 563 # generic mapping between names and nodes
564 564 self.names = namespaces.namespaces()
565 565
566 566 # Key to signature value.
567 567 self._sparsesignaturecache = {}
568 568 # Signature to cached matcher instance.
569 569 self._sparsematchercache = {}
570 570
571 571 def _getvfsward(self, origfunc):
572 572 """build a ward for self.vfs"""
573 573 rref = weakref.ref(self)
574 574 def checkvfs(path, mode=None):
575 575 ret = origfunc(path, mode=mode)
576 576 repo = rref()
577 577 if (repo is None
578 578 or not util.safehasattr(repo, '_wlockref')
579 579 or not util.safehasattr(repo, '_lockref')):
580 580 return
581 581 if mode in (None, 'r', 'rb'):
582 582 return
583 583 if path.startswith(repo.path):
584 584 # truncate name relative to the repository (.hg)
585 585 path = path[len(repo.path) + 1:]
586 586 if path.startswith('cache/'):
587 587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
588 588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
589 589 if path.startswith('journal.'):
590 590 # journal is covered by 'lock'
591 591 if repo._currentlock(repo._lockref) is None:
592 592 repo.ui.develwarn('write with no lock: "%s"' % path,
593 593 stacklevel=2, config='check-locks')
594 594 elif repo._currentlock(repo._wlockref) is None:
595 595 # rest of vfs files are covered by 'wlock'
596 596 #
597 597 # exclude special files
598 598 for prefix in self._wlockfreeprefix:
599 599 if path.startswith(prefix):
600 600 return
601 601 repo.ui.develwarn('write with no wlock: "%s"' % path,
602 602 stacklevel=2, config='check-locks')
603 603 return ret
604 604 return checkvfs
605 605
606 606 def _getsvfsward(self, origfunc):
607 607 """build a ward for self.svfs"""
608 608 rref = weakref.ref(self)
609 609 def checksvfs(path, mode=None):
610 610 ret = origfunc(path, mode=mode)
611 611 repo = rref()
612 612 if repo is None or not util.safehasattr(repo, '_lockref'):
613 613 return
614 614 if mode in (None, 'r', 'rb'):
615 615 return
616 616 if path.startswith(repo.sharedpath):
617 617 # truncate name relative to the repository (.hg)
618 618 path = path[len(repo.sharedpath) + 1:]
619 619 if repo._currentlock(repo._lockref) is None:
620 620 repo.ui.develwarn('write with no lock: "%s"' % path,
621 621 stacklevel=3)
622 622 return ret
623 623 return checksvfs
624 624
625 625 def close(self):
626 626 self._writecaches()
627 627
628 628 def _loadextensions(self):
629 629 extensions.loadall(self.ui)
630 630
631 631 def _writecaches(self):
632 632 if self._revbranchcache:
633 633 self._revbranchcache.write()
634 634
635 635 def _restrictcapabilities(self, caps):
636 636 if self.ui.configbool('experimental', 'bundle2-advertise'):
637 637 caps = set(caps)
638 638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
639 639 role='client'))
640 640 caps.add('bundle2=' + urlreq.quote(capsblob))
641 641 return caps
642 642
643 643 def _applyopenerreqs(self):
644 644 self.svfs.options = dict((r, 1) for r in self.requirements
645 645 if r in self.openerreqs)
646 646 # experimental config: format.chunkcachesize
647 647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
648 648 if chunkcachesize is not None:
649 649 self.svfs.options['chunkcachesize'] = chunkcachesize
650 650 # experimental config: format.manifestcachesize
651 651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
652 652 if manifestcachesize is not None:
653 653 self.svfs.options['manifestcachesize'] = manifestcachesize
654 654 deltabothparents = self.ui.configbool('storage',
655 655 'revlog.optimize-delta-parent-choice')
656 656 self.svfs.options['deltabothparents'] = deltabothparents
657 657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
658 658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
659 659 if 0 <= chainspan:
660 660 self.svfs.options['maxdeltachainspan'] = chainspan
661 661 mmapindexthreshold = self.ui.configbytes('experimental',
662 662 'mmapindexthreshold')
663 663 if mmapindexthreshold is not None:
664 664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
665 665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
666 666 srdensitythres = float(self.ui.config('experimental',
667 667 'sparse-read.density-threshold'))
668 668 srmingapsize = self.ui.configbytes('experimental',
669 669 'sparse-read.min-gap-size')
670 670 self.svfs.options['with-sparse-read'] = withsparseread
671 671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
672 672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
673 673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
674 674 self.svfs.options['sparse-revlog'] = sparserevlog
675 675 if sparserevlog:
676 676 self.svfs.options['generaldelta'] = True
677 677 maxchainlen = None
678 678 if sparserevlog:
679 679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
680 680 # experimental config: format.maxchainlen
681 681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
682 682 if maxchainlen is not None:
683 683 self.svfs.options['maxchainlen'] = maxchainlen
684 684
685 685 for r in self.requirements:
686 686 if r.startswith('exp-compression-'):
687 687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
688 688
689 689 # TODO move "revlogv2" to openerreqs once finalized.
690 690 if REVLOGV2_REQUIREMENT in self.requirements:
691 691 self.svfs.options['revlogv2'] = True
692 692
693 693 def _writerequirements(self):
694 694 scmutil.writerequires(self.vfs, self.requirements)
695 695
696 696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
697 697 # self -> auditor -> self._checknested -> self
698 698
699 699 @property
700 700 def auditor(self):
701 701 # This is only used by context.workingctx.match in order to
702 702 # detect files in subrepos.
703 703 return pathutil.pathauditor(self.root, callback=self._checknested)
704 704
705 705 @property
706 706 def nofsauditor(self):
707 707 # This is only used by context.basectx.match in order to detect
708 708 # files in subrepos.
709 709 return pathutil.pathauditor(self.root, callback=self._checknested,
710 710 realfs=False, cached=True)
711 711
712 712 def _checknested(self, path):
713 713 """Determine if path is a legal nested repository."""
714 714 if not path.startswith(self.root):
715 715 return False
716 716 subpath = path[len(self.root) + 1:]
717 717 normsubpath = util.pconvert(subpath)
718 718
719 719 # XXX: Checking against the current working copy is wrong in
720 720 # the sense that it can reject things like
721 721 #
722 722 # $ hg cat -r 10 sub/x.txt
723 723 #
724 724 # if sub/ is no longer a subrepository in the working copy
725 725 # parent revision.
726 726 #
727 727 # However, it can of course also allow things that would have
728 728 # been rejected before, such as the above cat command if sub/
729 729 # is a subrepository now, but was a normal directory before.
730 730 # The old path auditor would have rejected by mistake since it
731 731 # panics when it sees sub/.hg/.
732 732 #
733 733 # All in all, checking against the working copy seems sensible
734 734 # since we want to prevent access to nested repositories on
735 735 # the filesystem *now*.
736 736 ctx = self[None]
737 737 parts = util.splitpath(subpath)
738 738 while parts:
739 739 prefix = '/'.join(parts)
740 740 if prefix in ctx.substate:
741 741 if prefix == normsubpath:
742 742 return True
743 743 else:
744 744 sub = ctx.sub(prefix)
745 745 return sub.checknested(subpath[len(prefix) + 1:])
746 746 else:
747 747 parts.pop()
748 748 return False
749 749
750 750 def peer(self):
751 751 return localpeer(self) # not cached to avoid reference cycle
752 752
753 753 def unfiltered(self):
754 754 """Return unfiltered version of the repository
755 755
756 756 Intended to be overwritten by filtered repo."""
757 757 return self
758 758
759 759 def filtered(self, name, visibilityexceptions=None):
760 760 """Return a filtered version of a repository"""
761 761 cls = repoview.newtype(self.unfiltered().__class__)
762 762 return cls(self, name, visibilityexceptions)
763 763
764 764 @repofilecache('bookmarks', 'bookmarks.current')
765 765 def _bookmarks(self):
766 766 return bookmarks.bmstore(self)
767 767
768 768 @property
769 769 def _activebookmark(self):
770 770 return self._bookmarks.active
771 771
772 772 # _phasesets depend on changelog. what we need is to call
773 773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 774 # can't be easily expressed in filecache mechanism.
775 775 @storecache('phaseroots', '00changelog.i')
776 776 def _phasecache(self):
777 777 return phases.phasecache(self, self._phasedefaults)
778 778
779 779 @storecache('obsstore')
780 780 def obsstore(self):
781 781 return obsolete.makestore(self.ui, self)
782 782
783 783 @storecache('00changelog.i')
784 784 def changelog(self):
785 785 return changelog.changelog(self.svfs,
786 786 trypending=txnutil.mayhavepending(self.root))
787 787
788 788 def _constructmanifest(self):
789 789 # This is a temporary function while we migrate from manifest to
790 790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 791 # manifest creation.
792 792 return manifest.manifestrevlog(self.svfs)
793 793
794 794 @storecache('00manifest.i')
795 795 def manifestlog(self):
796 796 return manifest.manifestlog(self.svfs, self)
797 797
798 798 @repofilecache('dirstate')
799 799 def dirstate(self):
800 800 return self._makedirstate()
801 801
802 802 def _makedirstate(self):
803 803 """Extension point for wrapping the dirstate per-repo."""
804 804 sparsematchfn = lambda: sparse.matcher(self)
805 805
806 806 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 807 self._dirstatevalidate, sparsematchfn)
808 808
809 809 def _dirstatevalidate(self, node):
810 810 try:
811 811 self.changelog.rev(node)
812 812 return node
813 813 except error.LookupError:
814 814 if not self._dirstatevalidatewarned:
815 815 self._dirstatevalidatewarned = True
816 816 self.ui.warn(_("warning: ignoring unknown"
817 817 " working parent %s!\n") % short(node))
818 818 return nullid
819 819
820 820 @storecache(narrowspec.FILENAME)
821 821 def narrowpats(self):
822 822 """matcher patterns for this repository's narrowspec
823 823
824 824 A tuple of (includes, excludes).
825 825 """
826 826 source = self
827 827 if self.shared():
828 828 from . import hg
829 829 source = hg.sharedreposource(self)
830 830 return narrowspec.load(source)
831 831
832 832 @storecache(narrowspec.FILENAME)
833 833 def _narrowmatch(self):
834 834 if repository.NARROW_REQUIREMENT not in self.requirements:
835 835 return matchmod.always(self.root, '')
836 836 include, exclude = self.narrowpats
837 837 return narrowspec.match(self.root, include=include, exclude=exclude)
838 838
839 839 # TODO(martinvonz): make this property-like instead?
840 840 def narrowmatch(self):
841 841 return self._narrowmatch
842 842
843 843 def setnarrowpats(self, newincludes, newexcludes):
844 844 narrowspec.save(self, newincludes, newexcludes)
845 845 self.invalidate(clearfilecache=True)
846 846
847 847 def __getitem__(self, changeid):
848 848 if changeid is None:
849 849 return context.workingctx(self)
850 850 if isinstance(changeid, context.basectx):
851 851 return changeid
852 852 if isinstance(changeid, slice):
853 853 # wdirrev isn't contiguous so the slice shouldn't include it
854 854 return [context.changectx(self, i)
855 855 for i in pycompat.xrange(*changeid.indices(len(self)))
856 856 if i not in self.changelog.filteredrevs]
857 857 try:
858 858 return context.changectx(self, changeid)
859 859 except error.WdirUnsupported:
860 860 return context.workingctx(self)
861 861
862 862 def __contains__(self, changeid):
863 863 """True if the given changeid exists
864 864
865 865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
866 866 specified.
867 867 """
868 868 try:
869 869 self[changeid]
870 870 return True
871 871 except error.RepoLookupError:
872 872 return False
873 873
874 874 def __nonzero__(self):
875 875 return True
876 876
877 877 __bool__ = __nonzero__
878 878
879 879 def __len__(self):
880 880 # no need to pay the cost of repoview.changelog
881 881 unfi = self.unfiltered()
882 882 return len(unfi.changelog)
883 883
884 884 def __iter__(self):
885 885 return iter(self.changelog)
886 886
887 887 def revs(self, expr, *args):
888 888 '''Find revisions matching a revset.
889 889
890 890 The revset is specified as a string ``expr`` that may contain
891 891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
892 892
893 893 Revset aliases from the configuration are not expanded. To expand
894 894 user aliases, consider calling ``scmutil.revrange()`` or
895 895 ``repo.anyrevs([expr], user=True)``.
896 896
897 897 Returns a revset.abstractsmartset, which is a list-like interface
898 898 that contains integer revisions.
899 899 '''
900 900 expr = revsetlang.formatspec(expr, *args)
901 901 m = revset.match(None, expr)
902 902 return m(self)
903 903
904 904 def set(self, expr, *args):
905 905 '''Find revisions matching a revset and emit changectx instances.
906 906
907 907 This is a convenience wrapper around ``revs()`` that iterates the
908 908 result and is a generator of changectx instances.
909 909
910 910 Revset aliases from the configuration are not expanded. To expand
911 911 user aliases, consider calling ``scmutil.revrange()``.
912 912 '''
913 913 for r in self.revs(expr, *args):
914 914 yield self[r]
915 915
916 916 def anyrevs(self, specs, user=False, localalias=None):
917 917 '''Find revisions matching one of the given revsets.
918 918
919 919 Revset aliases from the configuration are not expanded by default. To
920 920 expand user aliases, specify ``user=True``. To provide some local
921 921 definitions overriding user aliases, set ``localalias`` to
922 922 ``{name: definitionstring}``.
923 923 '''
924 924 if user:
925 925 m = revset.matchany(self.ui, specs,
926 926 lookup=revset.lookupfn(self),
927 927 localalias=localalias)
928 928 else:
929 929 m = revset.matchany(None, specs, localalias=localalias)
930 930 return m(self)
931 931
932 932 def url(self):
933 933 return 'file:' + self.root
934 934
935 935 def hook(self, name, throw=False, **args):
936 936 """Call a hook, passing this repo instance.
937 937
938 938 This a convenience method to aid invoking hooks. Extensions likely
939 939 won't call this unless they have registered a custom hook or are
940 940 replacing code that is expected to call a hook.
941 941 """
942 942 return hook.hook(self.ui, self, name, throw, **args)
943 943
944 944 @filteredpropertycache
945 945 def _tagscache(self):
946 946 '''Returns a tagscache object that contains various tags related
947 947 caches.'''
948 948
949 949 # This simplifies its cache management by having one decorated
950 950 # function (this one) and the rest simply fetch things from it.
951 951 class tagscache(object):
952 952 def __init__(self):
953 953 # These two define the set of tags for this repository. tags
954 954 # maps tag name to node; tagtypes maps tag name to 'global' or
955 955 # 'local'. (Global tags are defined by .hgtags across all
956 956 # heads, and local tags are defined in .hg/localtags.)
957 957 # They constitute the in-memory cache of tags.
958 958 self.tags = self.tagtypes = None
959 959
960 960 self.nodetagscache = self.tagslist = None
961 961
962 962 cache = tagscache()
963 963 cache.tags, cache.tagtypes = self._findtags()
964 964
965 965 return cache
966 966
967 967 def tags(self):
968 968 '''return a mapping of tag to node'''
969 969 t = {}
970 970 if self.changelog.filteredrevs:
971 971 tags, tt = self._findtags()
972 972 else:
973 973 tags = self._tagscache.tags
974 974 for k, v in tags.iteritems():
975 975 try:
976 976 # ignore tags to unknown nodes
977 977 self.changelog.rev(v)
978 978 t[k] = v
979 979 except (error.LookupError, ValueError):
980 980 pass
981 981 return t
982 982
983 983 def _findtags(self):
984 984 '''Do the hard work of finding tags. Return a pair of dicts
985 985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
986 986 maps tag name to a string like \'global\' or \'local\'.
987 987 Subclasses or extensions are free to add their own tags, but
988 988 should be aware that the returned dicts will be retained for the
989 989 duration of the localrepo object.'''
990 990
991 991 # XXX what tagtype should subclasses/extensions use? Currently
992 992 # mq and bookmarks add tags, but do not set the tagtype at all.
993 993 # Should each extension invent its own tag type? Should there
994 994 # be one tagtype for all such "virtual" tags? Or is the status
995 995 # quo fine?
996 996
997 997
998 998 # map tag name to (node, hist)
999 999 alltags = tagsmod.findglobaltags(self.ui, self)
1000 1000 # map tag name to tag type
1001 1001 tagtypes = dict((tag, 'global') for tag in alltags)
1002 1002
1003 1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1004 1004
1005 1005 # Build the return dicts. Have to re-encode tag names because
1006 1006 # the tags module always uses UTF-8 (in order not to lose info
1007 1007 # writing to the cache), but the rest of Mercurial wants them in
1008 1008 # local encoding.
1009 1009 tags = {}
1010 1010 for (name, (node, hist)) in alltags.iteritems():
1011 1011 if node != nullid:
1012 1012 tags[encoding.tolocal(name)] = node
1013 1013 tags['tip'] = self.changelog.tip()
1014 1014 tagtypes = dict([(encoding.tolocal(name), value)
1015 1015 for (name, value) in tagtypes.iteritems()])
1016 1016 return (tags, tagtypes)
1017 1017
1018 1018 def tagtype(self, tagname):
1019 1019 '''
1020 1020 return the type of the given tag. result can be:
1021 1021
1022 1022 'local' : a local tag
1023 1023 'global' : a global tag
1024 1024 None : tag does not exist
1025 1025 '''
1026 1026
1027 1027 return self._tagscache.tagtypes.get(tagname)
1028 1028
1029 1029 def tagslist(self):
1030 1030 '''return a list of tags ordered by revision'''
1031 1031 if not self._tagscache.tagslist:
1032 1032 l = []
1033 1033 for t, n in self.tags().iteritems():
1034 1034 l.append((self.changelog.rev(n), t, n))
1035 1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1036 1036
1037 1037 return self._tagscache.tagslist
1038 1038
1039 1039 def nodetags(self, node):
1040 1040 '''return the tags associated with a node'''
1041 1041 if not self._tagscache.nodetagscache:
1042 1042 nodetagscache = {}
1043 1043 for t, n in self._tagscache.tags.iteritems():
1044 1044 nodetagscache.setdefault(n, []).append(t)
1045 1045 for tags in nodetagscache.itervalues():
1046 1046 tags.sort()
1047 1047 self._tagscache.nodetagscache = nodetagscache
1048 1048 return self._tagscache.nodetagscache.get(node, [])
1049 1049
1050 1050 def nodebookmarks(self, node):
1051 1051 """return the list of bookmarks pointing to the specified node"""
1052 1052 return self._bookmarks.names(node)
1053 1053
1054 1054 def branchmap(self):
1055 1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1056 1056 ordered by increasing revision number'''
1057 1057 branchmap.updatecache(self)
1058 1058 return self._branchcaches[self.filtername]
1059 1059
1060 1060 @unfilteredmethod
1061 1061 def revbranchcache(self):
1062 1062 if not self._revbranchcache:
1063 1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1064 1064 return self._revbranchcache
1065 1065
1066 1066 def branchtip(self, branch, ignoremissing=False):
1067 1067 '''return the tip node for a given branch
1068 1068
1069 1069 If ignoremissing is True, then this method will not raise an error.
1070 1070 This is helpful for callers that only expect None for a missing branch
1071 1071 (e.g. namespace).
1072 1072
1073 1073 '''
1074 1074 try:
1075 1075 return self.branchmap().branchtip(branch)
1076 1076 except KeyError:
1077 1077 if not ignoremissing:
1078 1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1079 1079 else:
1080 1080 pass
1081 1081
1082 1082 def lookup(self, key):
1083 1083 return scmutil.revsymbol(self, key).node()
1084 1084
1085 1085 def lookupbranch(self, key):
1086 1086 if key in self.branchmap():
1087 1087 return key
1088 1088
1089 1089 return scmutil.revsymbol(self, key).branch()
1090 1090
1091 1091 def known(self, nodes):
1092 1092 cl = self.changelog
1093 1093 nm = cl.nodemap
1094 1094 filtered = cl.filteredrevs
1095 1095 result = []
1096 1096 for n in nodes:
1097 1097 r = nm.get(n)
1098 1098 resp = not (r is None or r in filtered)
1099 1099 result.append(resp)
1100 1100 return result
1101 1101
1102 1102 def local(self):
1103 1103 return self
1104 1104
1105 1105 def publishing(self):
1106 1106 # it's safe (and desirable) to trust the publish flag unconditionally
1107 1107 # so that we don't finalize changes shared between users via ssh or nfs
1108 1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1109 1109
1110 1110 def cancopy(self):
1111 1111 # so statichttprepo's override of local() works
1112 1112 if not self.local():
1113 1113 return False
1114 1114 if not self.publishing():
1115 1115 return True
1116 1116 # if publishing we can't copy if there is filtered content
1117 1117 return not self.filtered('visible').changelog.filteredrevs
1118 1118
1119 1119 def shared(self):
1120 1120 '''the type of shared repository (None if not shared)'''
1121 1121 if self.sharedpath != self.path:
1122 1122 return 'store'
1123 1123 return None
1124 1124
1125 1125 def wjoin(self, f, *insidef):
1126 1126 return self.vfs.reljoin(self.root, f, *insidef)
1127 1127
1128 1128 def file(self, f):
1129 1129 if f[0] == '/':
1130 1130 f = f[1:]
1131 1131 return filelog.filelog(self.svfs, f)
1132 1132
1133 1133 def setparents(self, p1, p2=nullid):
1134 1134 with self.dirstate.parentchange():
1135 1135 copies = self.dirstate.setparents(p1, p2)
1136 1136 pctx = self[p1]
1137 1137 if copies:
1138 1138 # Adjust copy records, the dirstate cannot do it, it
1139 1139 # requires access to parents manifests. Preserve them
1140 1140 # only for entries added to first parent.
1141 1141 for f in copies:
1142 1142 if f not in pctx and copies[f] in pctx:
1143 1143 self.dirstate.copy(copies[f], f)
1144 1144 if p2 == nullid:
1145 1145 for f, s in sorted(self.dirstate.copies().items()):
1146 1146 if f not in pctx and s not in pctx:
1147 1147 self.dirstate.copy(None, f)
1148 1148
1149 1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1150 1150 """changeid can be a changeset revision, node, or tag.
1151 1151 fileid can be a file revision or node."""
1152 1152 return context.filectx(self, path, changeid, fileid,
1153 1153 changectx=changectx)
1154 1154
1155 1155 def getcwd(self):
1156 1156 return self.dirstate.getcwd()
1157 1157
1158 1158 def pathto(self, f, cwd=None):
1159 1159 return self.dirstate.pathto(f, cwd)
1160 1160
1161 1161 def _loadfilter(self, filter):
1162 1162 if filter not in self._filterpats:
1163 1163 l = []
1164 1164 for pat, cmd in self.ui.configitems(filter):
1165 1165 if cmd == '!':
1166 1166 continue
1167 1167 mf = matchmod.match(self.root, '', [pat])
1168 1168 fn = None
1169 1169 params = cmd
1170 1170 for name, filterfn in self._datafilters.iteritems():
1171 1171 if cmd.startswith(name):
1172 1172 fn = filterfn
1173 1173 params = cmd[len(name):].lstrip()
1174 1174 break
1175 1175 if not fn:
1176 1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1177 1177 # Wrap old filters not supporting keyword arguments
1178 1178 if not pycompat.getargspec(fn)[2]:
1179 1179 oldfn = fn
1180 1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1181 1181 l.append((mf, fn, params))
1182 1182 self._filterpats[filter] = l
1183 1183 return self._filterpats[filter]
1184 1184
1185 1185 def _filter(self, filterpats, filename, data):
1186 1186 for mf, fn, cmd in filterpats:
1187 1187 if mf(filename):
1188 1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1189 1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1190 1190 break
1191 1191
1192 1192 return data
1193 1193
1194 1194 @unfilteredpropertycache
1195 1195 def _encodefilterpats(self):
1196 1196 return self._loadfilter('encode')
1197 1197
1198 1198 @unfilteredpropertycache
1199 1199 def _decodefilterpats(self):
1200 1200 return self._loadfilter('decode')
1201 1201
1202 1202 def adddatafilter(self, name, filter):
1203 1203 self._datafilters[name] = filter
1204 1204
1205 1205 def wread(self, filename):
1206 1206 if self.wvfs.islink(filename):
1207 1207 data = self.wvfs.readlink(filename)
1208 1208 else:
1209 1209 data = self.wvfs.read(filename)
1210 1210 return self._filter(self._encodefilterpats, filename, data)
1211 1211
1212 1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1213 1213 """write ``data`` into ``filename`` in the working directory
1214 1214
1215 1215 This returns length of written (maybe decoded) data.
1216 1216 """
1217 1217 data = self._filter(self._decodefilterpats, filename, data)
1218 1218 if 'l' in flags:
1219 1219 self.wvfs.symlink(data, filename)
1220 1220 else:
1221 1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1222 1222 **kwargs)
1223 1223 if 'x' in flags:
1224 1224 self.wvfs.setflags(filename, False, True)
1225 1225 else:
1226 1226 self.wvfs.setflags(filename, False, False)
1227 1227 return len(data)
1228 1228
1229 1229 def wwritedata(self, filename, data):
1230 1230 return self._filter(self._decodefilterpats, filename, data)
1231 1231
1232 1232 def currenttransaction(self):
1233 1233 """return the current transaction or None if non exists"""
1234 1234 if self._transref:
1235 1235 tr = self._transref()
1236 1236 else:
1237 1237 tr = None
1238 1238
1239 1239 if tr and tr.running():
1240 1240 return tr
1241 1241 return None
1242 1242
1243 1243 def transaction(self, desc, report=None):
1244 1244 if (self.ui.configbool('devel', 'all-warnings')
1245 1245 or self.ui.configbool('devel', 'check-locks')):
1246 1246 if self._currentlock(self._lockref) is None:
1247 1247 raise error.ProgrammingError('transaction requires locking')
1248 1248 tr = self.currenttransaction()
1249 1249 if tr is not None:
1250 1250 return tr.nest(name=desc)
1251 1251
1252 1252 # abort here if the journal already exists
1253 1253 if self.svfs.exists("journal"):
1254 1254 raise error.RepoError(
1255 1255 _("abandoned transaction found"),
1256 1256 hint=_("run 'hg recover' to clean up transaction"))
1257 1257
1258 1258 idbase = "%.40f#%f" % (random.random(), time.time())
1259 1259 ha = hex(hashlib.sha1(idbase).digest())
1260 1260 txnid = 'TXN:' + ha
1261 1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1262 1262
1263 1263 self._writejournal(desc)
1264 1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1265 1265 if report:
1266 1266 rp = report
1267 1267 else:
1268 1268 rp = self.ui.warn
1269 1269 vfsmap = {'plain': self.vfs} # root of .hg/
1270 1270 # we must avoid cyclic reference between repo and transaction.
1271 1271 reporef = weakref.ref(self)
1272 1272 # Code to track tag movement
1273 1273 #
1274 1274 # Since tags are all handled as file content, it is actually quite hard
1275 1275 # to track these movement from a code perspective. So we fallback to a
1276 1276 # tracking at the repository level. One could envision to track changes
1277 1277 # to the '.hgtags' file through changegroup apply but that fails to
1278 1278 # cope with case where transaction expose new heads without changegroup
1279 1279 # being involved (eg: phase movement).
1280 1280 #
1281 1281 # For now, We gate the feature behind a flag since this likely comes
1282 1282 # with performance impacts. The current code run more often than needed
1283 1283 # and do not use caches as much as it could. The current focus is on
1284 1284 # the behavior of the feature so we disable it by default. The flag
1285 1285 # will be removed when we are happy with the performance impact.
1286 1286 #
1287 1287 # Once this feature is no longer experimental move the following
1288 1288 # documentation to the appropriate help section:
1289 1289 #
1290 1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1291 1291 # tags (new or changed or deleted tags). In addition the details of
1292 1292 # these changes are made available in a file at:
1293 1293 # ``REPOROOT/.hg/changes/tags.changes``.
1294 1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1295 1295 # might exist from a previous transaction even if no tag were touched
1296 1296 # in this one. Changes are recorded in a line base format::
1297 1297 #
1298 1298 # <action> <hex-node> <tag-name>\n
1299 1299 #
1300 1300 # Actions are defined as follow:
1301 1301 # "-R": tag is removed,
1302 1302 # "+A": tag is added,
1303 1303 # "-M": tag is moved (old value),
1304 1304 # "+M": tag is moved (new value),
1305 1305 tracktags = lambda x: None
1306 1306 # experimental config: experimental.hook-track-tags
1307 1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1308 1308 if desc != 'strip' and shouldtracktags:
1309 1309 oldheads = self.changelog.headrevs()
1310 1310 def tracktags(tr2):
1311 1311 repo = reporef()
1312 1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1313 1313 newheads = repo.changelog.headrevs()
1314 1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1315 1315 # notes: we compare lists here.
1316 1316 # As we do it only once buiding set would not be cheaper
1317 1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1318 1318 if changes:
1319 1319 tr2.hookargs['tag_moved'] = '1'
1320 1320 with repo.vfs('changes/tags.changes', 'w',
1321 1321 atomictemp=True) as changesfile:
1322 1322 # note: we do not register the file to the transaction
1323 1323 # because we needs it to still exist on the transaction
1324 1324 # is close (for txnclose hooks)
1325 1325 tagsmod.writediff(changesfile, changes)
1326 1326 def validate(tr2):
1327 1327 """will run pre-closing hooks"""
1328 1328 # XXX the transaction API is a bit lacking here so we take a hacky
1329 1329 # path for now
1330 1330 #
1331 1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1332 1332 # dict is copied before these run. In addition we needs the data
1333 1333 # available to in memory hooks too.
1334 1334 #
1335 1335 # Moreover, we also need to make sure this runs before txnclose
1336 1336 # hooks and there is no "pending" mechanism that would execute
1337 1337 # logic only if hooks are about to run.
1338 1338 #
1339 1339 # Fixing this limitation of the transaction is also needed to track
1340 1340 # other families of changes (bookmarks, phases, obsolescence).
1341 1341 #
1342 1342 # This will have to be fixed before we remove the experimental
1343 1343 # gating.
1344 1344 tracktags(tr2)
1345 1345 repo = reporef()
1346 1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1347 1347 scmutil.enforcesinglehead(repo, tr2, desc)
1348 1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1349 1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1350 1350 args = tr.hookargs.copy()
1351 1351 args.update(bookmarks.preparehookargs(name, old, new))
1352 1352 repo.hook('pretxnclose-bookmark', throw=True,
1353 1353 txnname=desc,
1354 1354 **pycompat.strkwargs(args))
1355 1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1356 1356 cl = repo.unfiltered().changelog
1357 1357 for rev, (old, new) in tr.changes['phases'].items():
1358 1358 args = tr.hookargs.copy()
1359 1359 node = hex(cl.node(rev))
1360 1360 args.update(phases.preparehookargs(node, old, new))
1361 1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1362 1362 **pycompat.strkwargs(args))
1363 1363
1364 1364 repo.hook('pretxnclose', throw=True,
1365 1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1366 1366 def releasefn(tr, success):
1367 1367 repo = reporef()
1368 1368 if success:
1369 1369 # this should be explicitly invoked here, because
1370 1370 # in-memory changes aren't written out at closing
1371 1371 # transaction, if tr.addfilegenerator (via
1372 1372 # dirstate.write or so) isn't invoked while
1373 1373 # transaction running
1374 1374 repo.dirstate.write(None)
1375 1375 else:
1376 1376 # discard all changes (including ones already written
1377 1377 # out) in this transaction
1378 1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1379 1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1380 1380
1381 1381 repo.invalidate(clearfilecache=True)
1382 1382
1383 1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1384 1384 "journal",
1385 1385 "undo",
1386 1386 aftertrans(renames),
1387 1387 self.store.createmode,
1388 1388 validator=validate,
1389 1389 releasefn=releasefn,
1390 1390 checkambigfiles=_cachedfiles,
1391 1391 name=desc)
1392 1392 tr.changes['origrepolen'] = len(self)
1393 1393 tr.changes['obsmarkers'] = set()
1394 1394 tr.changes['phases'] = {}
1395 1395 tr.changes['bookmarks'] = {}
1396 1396
1397 1397 tr.hookargs['txnid'] = txnid
1398 1398 # note: writing the fncache only during finalize mean that the file is
1399 1399 # outdated when running hooks. As fncache is used for streaming clone,
1400 1400 # this is not expected to break anything that happen during the hooks.
1401 1401 tr.addfinalize('flush-fncache', self.store.write)
1402 1402 def txnclosehook(tr2):
1403 1403 """To be run if transaction is successful, will schedule a hook run
1404 1404 """
1405 1405 # Don't reference tr2 in hook() so we don't hold a reference.
1406 1406 # This reduces memory consumption when there are multiple
1407 1407 # transactions per lock. This can likely go away if issue5045
1408 1408 # fixes the function accumulation.
1409 1409 hookargs = tr2.hookargs
1410 1410
1411 1411 def hookfunc():
1412 1412 repo = reporef()
1413 1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1414 1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1415 1415 for name, (old, new) in bmchanges:
1416 1416 args = tr.hookargs.copy()
1417 1417 args.update(bookmarks.preparehookargs(name, old, new))
1418 1418 repo.hook('txnclose-bookmark', throw=False,
1419 1419 txnname=desc, **pycompat.strkwargs(args))
1420 1420
1421 1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1422 1422 cl = repo.unfiltered().changelog
1423 1423 phasemv = sorted(tr.changes['phases'].items())
1424 1424 for rev, (old, new) in phasemv:
1425 1425 args = tr.hookargs.copy()
1426 1426 node = hex(cl.node(rev))
1427 1427 args.update(phases.preparehookargs(node, old, new))
1428 1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1429 1429 **pycompat.strkwargs(args))
1430 1430
1431 1431 repo.hook('txnclose', throw=False, txnname=desc,
1432 1432 **pycompat.strkwargs(hookargs))
1433 1433 reporef()._afterlock(hookfunc)
1434 1434 tr.addfinalize('txnclose-hook', txnclosehook)
1435 1435 # Include a leading "-" to make it happen before the transaction summary
1436 1436 # reports registered via scmutil.registersummarycallback() whose names
1437 1437 # are 00-txnreport etc. That way, the caches will be warm when the
1438 1438 # callbacks run.
1439 1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1440 1440 def txnaborthook(tr2):
1441 1441 """To be run if transaction is aborted
1442 1442 """
1443 1443 reporef().hook('txnabort', throw=False, txnname=desc,
1444 1444 **pycompat.strkwargs(tr2.hookargs))
1445 1445 tr.addabort('txnabort-hook', txnaborthook)
1446 1446 # avoid eager cache invalidation. in-memory data should be identical
1447 1447 # to stored data if transaction has no error.
1448 1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1449 1449 self._transref = weakref.ref(tr)
1450 1450 scmutil.registersummarycallback(self, tr, desc)
1451 1451 return tr
1452 1452
1453 1453 def _journalfiles(self):
1454 1454 return ((self.svfs, 'journal'),
1455 1455 (self.vfs, 'journal.dirstate'),
1456 1456 (self.vfs, 'journal.branch'),
1457 1457 (self.vfs, 'journal.desc'),
1458 1458 (self.vfs, 'journal.bookmarks'),
1459 1459 (self.svfs, 'journal.phaseroots'))
1460 1460
1461 1461 def undofiles(self):
1462 1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1463 1463
1464 1464 @unfilteredmethod
1465 1465 def _writejournal(self, desc):
1466 1466 self.dirstate.savebackup(None, 'journal.dirstate')
1467 1467 narrowspec.savebackup(self, 'journal.narrowspec')
1468 1468 self.vfs.write("journal.branch",
1469 1469 encoding.fromlocal(self.dirstate.branch()))
1470 1470 self.vfs.write("journal.desc",
1471 1471 "%d\n%s\n" % (len(self), desc))
1472 1472 self.vfs.write("journal.bookmarks",
1473 1473 self.vfs.tryread("bookmarks"))
1474 1474 self.svfs.write("journal.phaseroots",
1475 1475 self.svfs.tryread("phaseroots"))
1476 1476
1477 1477 def recover(self):
1478 1478 with self.lock():
1479 1479 if self.svfs.exists("journal"):
1480 1480 self.ui.status(_("rolling back interrupted transaction\n"))
1481 1481 vfsmap = {'': self.svfs,
1482 1482 'plain': self.vfs,}
1483 1483 transaction.rollback(self.svfs, vfsmap, "journal",
1484 1484 self.ui.warn,
1485 1485 checkambigfiles=_cachedfiles)
1486 1486 self.invalidate()
1487 1487 return True
1488 1488 else:
1489 1489 self.ui.warn(_("no interrupted transaction available\n"))
1490 1490 return False
1491 1491
1492 1492 def rollback(self, dryrun=False, force=False):
1493 1493 wlock = lock = dsguard = None
1494 1494 try:
1495 1495 wlock = self.wlock()
1496 1496 lock = self.lock()
1497 1497 if self.svfs.exists("undo"):
1498 1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1499 1499
1500 1500 return self._rollback(dryrun, force, dsguard)
1501 1501 else:
1502 1502 self.ui.warn(_("no rollback information available\n"))
1503 1503 return 1
1504 1504 finally:
1505 1505 release(dsguard, lock, wlock)
1506 1506
1507 1507 @unfilteredmethod # Until we get smarter cache management
1508 1508 def _rollback(self, dryrun, force, dsguard):
1509 1509 ui = self.ui
1510 1510 try:
1511 1511 args = self.vfs.read('undo.desc').splitlines()
1512 1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1513 1513 if len(args) >= 3:
1514 1514 detail = args[2]
1515 1515 oldtip = oldlen - 1
1516 1516
1517 1517 if detail and ui.verbose:
1518 1518 msg = (_('repository tip rolled back to revision %d'
1519 1519 ' (undo %s: %s)\n')
1520 1520 % (oldtip, desc, detail))
1521 1521 else:
1522 1522 msg = (_('repository tip rolled back to revision %d'
1523 1523 ' (undo %s)\n')
1524 1524 % (oldtip, desc))
1525 1525 except IOError:
1526 1526 msg = _('rolling back unknown transaction\n')
1527 1527 desc = None
1528 1528
1529 1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1530 1530 raise error.Abort(
1531 1531 _('rollback of last commit while not checked out '
1532 1532 'may lose data'), hint=_('use -f to force'))
1533 1533
1534 1534 ui.status(msg)
1535 1535 if dryrun:
1536 1536 return 0
1537 1537
1538 1538 parents = self.dirstate.parents()
1539 1539 self.destroying()
1540 1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1541 1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1542 1542 checkambigfiles=_cachedfiles)
1543 1543 if self.vfs.exists('undo.bookmarks'):
1544 1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1545 1545 if self.svfs.exists('undo.phaseroots'):
1546 1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1547 1547 self.invalidate()
1548 1548
1549 1549 parentgone = (parents[0] not in self.changelog.nodemap or
1550 1550 parents[1] not in self.changelog.nodemap)
1551 1551 if parentgone:
1552 1552 # prevent dirstateguard from overwriting already restored one
1553 1553 dsguard.close()
1554 1554
1555 1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1556 1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1557 1557 try:
1558 1558 branch = self.vfs.read('undo.branch')
1559 1559 self.dirstate.setbranch(encoding.tolocal(branch))
1560 1560 except IOError:
1561 1561 ui.warn(_('named branch could not be reset: '
1562 1562 'current branch is still \'%s\'\n')
1563 1563 % self.dirstate.branch())
1564 1564
1565 1565 parents = tuple([p.rev() for p in self[None].parents()])
1566 1566 if len(parents) > 1:
1567 1567 ui.status(_('working directory now based on '
1568 1568 'revisions %d and %d\n') % parents)
1569 1569 else:
1570 1570 ui.status(_('working directory now based on '
1571 1571 'revision %d\n') % parents)
1572 1572 mergemod.mergestate.clean(self, self['.'].node())
1573 1573
1574 1574 # TODO: if we know which new heads may result from this rollback, pass
1575 1575 # them to destroy(), which will prevent the branchhead cache from being
1576 1576 # invalidated.
1577 1577 self.destroyed()
1578 1578 return 0
1579 1579
1580 1580 def _buildcacheupdater(self, newtransaction):
1581 1581 """called during transaction to build the callback updating cache
1582 1582
1583 1583 Lives on the repository to help extension who might want to augment
1584 1584 this logic. For this purpose, the created transaction is passed to the
1585 1585 method.
1586 1586 """
1587 1587 # we must avoid cyclic reference between repo and transaction.
1588 1588 reporef = weakref.ref(self)
1589 1589 def updater(tr):
1590 1590 repo = reporef()
1591 1591 repo.updatecaches(tr)
1592 1592 return updater
1593 1593
1594 1594 @unfilteredmethod
1595 1595 def updatecaches(self, tr=None, full=False):
1596 1596 """warm appropriate caches
1597 1597
1598 1598 If this function is called after a transaction closed. The transaction
1599 1599 will be available in the 'tr' argument. This can be used to selectively
1600 1600 update caches relevant to the changes in that transaction.
1601 1601
1602 1602 If 'full' is set, make sure all caches the function knows about have
1603 1603 up-to-date data. Even the ones usually loaded more lazily.
1604 1604 """
1605 1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1606 1606 # During strip, many caches are invalid but
1607 1607 # later call to `destroyed` will refresh them.
1608 1608 return
1609 1609
1610 1610 if tr is None or tr.changes['origrepolen'] < len(self):
1611 1611 # updating the unfiltered branchmap should refresh all the others,
1612 1612 self.ui.debug('updating the branch cache\n')
1613 1613 branchmap.updatecache(self.filtered('served'))
1614 1614
1615 1615 if full:
1616 1616 rbc = self.revbranchcache()
1617 1617 for r in self.changelog:
1618 1618 rbc.branchinfo(r)
1619 1619 rbc.write()
1620 1620
1621 1621 # ensure the working copy parents are in the manifestfulltextcache
1622 1622 for ctx in self['.'].parents():
1623 1623 ctx.manifest() # accessing the manifest is enough
1624 1624
1625 1625 def invalidatecaches(self):
1626 1626
1627 1627 if '_tagscache' in vars(self):
1628 1628 # can't use delattr on proxy
1629 1629 del self.__dict__['_tagscache']
1630 1630
1631 1631 self.unfiltered()._branchcaches.clear()
1632 1632 self.invalidatevolatilesets()
1633 1633 self._sparsesignaturecache.clear()
1634 1634
1635 1635 def invalidatevolatilesets(self):
1636 1636 self.filteredrevcache.clear()
1637 1637 obsolete.clearobscaches(self)
1638 1638
1639 1639 def invalidatedirstate(self):
1640 1640 '''Invalidates the dirstate, causing the next call to dirstate
1641 1641 to check if it was modified since the last time it was read,
1642 1642 rereading it if it has.
1643 1643
1644 1644 This is different to dirstate.invalidate() that it doesn't always
1645 1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1646 1646 explicitly read the dirstate again (i.e. restoring it to a previous
1647 1647 known good state).'''
1648 1648 if hasunfilteredcache(self, 'dirstate'):
1649 1649 for k in self.dirstate._filecache:
1650 1650 try:
1651 1651 delattr(self.dirstate, k)
1652 1652 except AttributeError:
1653 1653 pass
1654 1654 delattr(self.unfiltered(), 'dirstate')
1655 1655
1656 1656 def invalidate(self, clearfilecache=False):
1657 1657 '''Invalidates both store and non-store parts other than dirstate
1658 1658
1659 1659 If a transaction is running, invalidation of store is omitted,
1660 1660 because discarding in-memory changes might cause inconsistency
1661 1661 (e.g. incomplete fncache causes unintentional failure, but
1662 1662 redundant one doesn't).
1663 1663 '''
1664 1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1665 1665 for k in list(self._filecache.keys()):
1666 1666 # dirstate is invalidated separately in invalidatedirstate()
1667 1667 if k == 'dirstate':
1668 1668 continue
1669 1669 if (k == 'changelog' and
1670 1670 self.currenttransaction() and
1671 1671 self.changelog._delayed):
1672 1672 # The changelog object may store unwritten revisions. We don't
1673 1673 # want to lose them.
1674 1674 # TODO: Solve the problem instead of working around it.
1675 1675 continue
1676 1676
1677 1677 if clearfilecache:
1678 1678 del self._filecache[k]
1679 1679 try:
1680 1680 delattr(unfiltered, k)
1681 1681 except AttributeError:
1682 1682 pass
1683 1683 self.invalidatecaches()
1684 1684 if not self.currenttransaction():
1685 1685 # TODO: Changing contents of store outside transaction
1686 1686 # causes inconsistency. We should make in-memory store
1687 1687 # changes detectable, and abort if changed.
1688 1688 self.store.invalidatecaches()
1689 1689
1690 1690 def invalidateall(self):
1691 1691 '''Fully invalidates both store and non-store parts, causing the
1692 1692 subsequent operation to reread any outside changes.'''
1693 1693 # extension should hook this to invalidate its caches
1694 1694 self.invalidate()
1695 1695 self.invalidatedirstate()
1696 1696
1697 1697 @unfilteredmethod
1698 1698 def _refreshfilecachestats(self, tr):
1699 1699 """Reload stats of cached files so that they are flagged as valid"""
1700 1700 for k, ce in self._filecache.items():
1701 1701 k = pycompat.sysstr(k)
1702 1702 if k == r'dirstate' or k not in self.__dict__:
1703 1703 continue
1704 1704 ce.refresh()
1705 1705
1706 1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1707 1707 inheritchecker=None, parentenvvar=None):
1708 1708 parentlock = None
1709 1709 # the contents of parentenvvar are used by the underlying lock to
1710 1710 # determine whether it can be inherited
1711 1711 if parentenvvar is not None:
1712 1712 parentlock = encoding.environ.get(parentenvvar)
1713 1713
1714 1714 timeout = 0
1715 1715 warntimeout = 0
1716 1716 if wait:
1717 1717 timeout = self.ui.configint("ui", "timeout")
1718 1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1719 1719 # internal config: ui.signal-safe-lock
1720 1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1721 1721
1722 1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1723 1723 releasefn=releasefn,
1724 1724 acquirefn=acquirefn, desc=desc,
1725 1725 inheritchecker=inheritchecker,
1726 1726 parentlock=parentlock,
1727 1727 signalsafe=signalsafe)
1728 1728 return l
1729 1729
1730 1730 def _afterlock(self, callback):
1731 1731 """add a callback to be run when the repository is fully unlocked
1732 1732
1733 1733 The callback will be executed when the outermost lock is released
1734 1734 (with wlock being higher level than 'lock')."""
1735 1735 for ref in (self._wlockref, self._lockref):
1736 1736 l = ref and ref()
1737 1737 if l and l.held:
1738 1738 l.postrelease.append(callback)
1739 1739 break
1740 1740 else: # no lock have been found.
1741 1741 callback()
1742 1742
1743 1743 def lock(self, wait=True):
1744 1744 '''Lock the repository store (.hg/store) and return a weak reference
1745 1745 to the lock. Use this before modifying the store (e.g. committing or
1746 1746 stripping). If you are opening a transaction, get a lock as well.)
1747 1747
1748 1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 1749 'wlock' first to avoid a dead-lock hazard.'''
1750 1750 l = self._currentlock(self._lockref)
1751 1751 if l is not None:
1752 1752 l.lock()
1753 1753 return l
1754 1754
1755 1755 l = self._lock(self.svfs, "lock", wait, None,
1756 1756 self.invalidate, _('repository %s') % self.origroot)
1757 1757 self._lockref = weakref.ref(l)
1758 1758 return l
1759 1759
1760 1760 def _wlockchecktransaction(self):
1761 1761 if self.currenttransaction() is not None:
1762 1762 raise error.LockInheritanceContractViolation(
1763 1763 'wlock cannot be inherited in the middle of a transaction')
1764 1764
1765 1765 def wlock(self, wait=True):
1766 1766 '''Lock the non-store parts of the repository (everything under
1767 1767 .hg except .hg/store) and return a weak reference to the lock.
1768 1768
1769 1769 Use this before modifying files in .hg.
1770 1770
1771 1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1772 1772 'wlock' first to avoid a dead-lock hazard.'''
1773 1773 l = self._wlockref and self._wlockref()
1774 1774 if l is not None and l.held:
1775 1775 l.lock()
1776 1776 return l
1777 1777
1778 1778 # We do not need to check for non-waiting lock acquisition. Such
1779 1779 # acquisition would not cause dead-lock as they would just fail.
1780 1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1781 1781 or self.ui.configbool('devel', 'check-locks')):
1782 1782 if self._currentlock(self._lockref) is not None:
1783 1783 self.ui.develwarn('"wlock" acquired after "lock"')
1784 1784
1785 1785 def unlock():
1786 1786 if self.dirstate.pendingparentchange():
1787 1787 self.dirstate.invalidate()
1788 1788 else:
1789 1789 self.dirstate.write(None)
1790 1790
1791 1791 self._filecache['dirstate'].refresh()
1792 1792
1793 1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1794 1794 self.invalidatedirstate, _('working directory of %s') %
1795 1795 self.origroot,
1796 1796 inheritchecker=self._wlockchecktransaction,
1797 1797 parentenvvar='HG_WLOCK_LOCKER')
1798 1798 self._wlockref = weakref.ref(l)
1799 1799 return l
1800 1800
1801 1801 def _currentlock(self, lockref):
1802 1802 """Returns the lock if it's held, or None if it's not."""
1803 1803 if lockref is None:
1804 1804 return None
1805 1805 l = lockref()
1806 1806 if l is None or not l.held:
1807 1807 return None
1808 1808 return l
1809 1809
1810 1810 def currentwlock(self):
1811 1811 """Returns the wlock if it's held, or None if it's not."""
1812 1812 return self._currentlock(self._wlockref)
1813 1813
1814 1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1815 1815 """
1816 1816 commit an individual file as part of a larger transaction
1817 1817 """
1818 1818
1819 1819 fname = fctx.path()
1820 1820 fparent1 = manifest1.get(fname, nullid)
1821 1821 fparent2 = manifest2.get(fname, nullid)
1822 1822 if isinstance(fctx, context.filectx):
1823 1823 node = fctx.filenode()
1824 1824 if node in [fparent1, fparent2]:
1825 1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1826 1826 if manifest1.flags(fname) != fctx.flags():
1827 1827 changelist.append(fname)
1828 1828 return node
1829 1829
1830 1830 flog = self.file(fname)
1831 1831 meta = {}
1832 1832 copy = fctx.renamed()
1833 1833 if copy and copy[0] != fname:
1834 1834 # Mark the new revision of this file as a copy of another
1835 1835 # file. This copy data will effectively act as a parent
1836 1836 # of this new revision. If this is a merge, the first
1837 1837 # parent will be the nullid (meaning "look up the copy data")
1838 1838 # and the second one will be the other parent. For example:
1839 1839 #
1840 1840 # 0 --- 1 --- 3 rev1 changes file foo
1841 1841 # \ / rev2 renames foo to bar and changes it
1842 1842 # \- 2 -/ rev3 should have bar with all changes and
1843 1843 # should record that bar descends from
1844 1844 # bar in rev2 and foo in rev1
1845 1845 #
1846 1846 # this allows this merge to succeed:
1847 1847 #
1848 1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1849 1849 # \ / merging rev3 and rev4 should use bar@rev2
1850 1850 # \- 2 --- 4 as the merge base
1851 1851 #
1852 1852
1853 1853 cfname = copy[0]
1854 1854 crev = manifest1.get(cfname)
1855 1855 newfparent = fparent2
1856 1856
1857 1857 if manifest2: # branch merge
1858 1858 if fparent2 == nullid or crev is None: # copied on remote side
1859 1859 if cfname in manifest2:
1860 1860 crev = manifest2[cfname]
1861 1861 newfparent = fparent1
1862 1862
1863 1863 # Here, we used to search backwards through history to try to find
1864 1864 # where the file copy came from if the source of a copy was not in
1865 1865 # the parent directory. However, this doesn't actually make sense to
1866 1866 # do (what does a copy from something not in your working copy even
1867 1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1868 1868 # the user that copy information was dropped, so if they didn't
1869 1869 # expect this outcome it can be fixed, but this is the correct
1870 1870 # behavior in this circumstance.
1871 1871
1872 1872 if crev:
1873 1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1874 1874 meta["copy"] = cfname
1875 1875 meta["copyrev"] = hex(crev)
1876 1876 fparent1, fparent2 = nullid, newfparent
1877 1877 else:
1878 1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1879 1879 "copied from '%s'!\n") % (fname, cfname))
1880 1880
1881 1881 elif fparent1 == nullid:
1882 1882 fparent1, fparent2 = fparent2, nullid
1883 1883 elif fparent2 != nullid:
1884 1884 # is one parent an ancestor of the other?
1885 1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1886 1886 if fparent1 in fparentancestors:
1887 1887 fparent1, fparent2 = fparent2, nullid
1888 1888 elif fparent2 in fparentancestors:
1889 1889 fparent2 = nullid
1890 1890
1891 1891 # is the file changed?
1892 1892 text = fctx.data()
1893 1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1894 1894 changelist.append(fname)
1895 1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1896 1896 # are just the flags changed during merge?
1897 1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1898 1898 changelist.append(fname)
1899 1899
1900 1900 return fparent1
1901 1901
1902 1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1903 1903 """check for commit arguments that aren't committable"""
1904 1904 if match.isexact() or match.prefix():
1905 1905 matched = set(status.modified + status.added + status.removed)
1906 1906
1907 1907 for f in match.files():
1908 1908 f = self.dirstate.normalize(f)
1909 1909 if f == '.' or f in matched or f in wctx.substate:
1910 1910 continue
1911 1911 if f in status.deleted:
1912 1912 fail(f, _('file not found!'))
1913 1913 if f in vdirs: # visited directory
1914 1914 d = f + '/'
1915 1915 for mf in matched:
1916 1916 if mf.startswith(d):
1917 1917 break
1918 1918 else:
1919 1919 fail(f, _("no match under directory!"))
1920 1920 elif f not in self.dirstate:
1921 1921 fail(f, _("file not tracked!"))
1922 1922
1923 1923 @unfilteredmethod
1924 1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1925 1925 editor=False, extra=None):
1926 1926 """Add a new revision to current repository.
1927 1927
1928 1928 Revision information is gathered from the working directory,
1929 1929 match can be used to filter the committed files. If editor is
1930 1930 supplied, it is called to get a commit message.
1931 1931 """
1932 1932 if extra is None:
1933 1933 extra = {}
1934 1934
1935 1935 def fail(f, msg):
1936 1936 raise error.Abort('%s: %s' % (f, msg))
1937 1937
1938 1938 if not match:
1939 1939 match = matchmod.always(self.root, '')
1940 1940
1941 1941 if not force:
1942 1942 vdirs = []
1943 1943 match.explicitdir = vdirs.append
1944 1944 match.bad = fail
1945 1945
1946 1946 wlock = lock = tr = None
1947 1947 try:
1948 1948 wlock = self.wlock()
1949 1949 lock = self.lock() # for recent changelog (see issue4368)
1950 1950
1951 1951 wctx = self[None]
1952 1952 merge = len(wctx.parents()) > 1
1953 1953
1954 1954 if not force and merge and not match.always():
1955 1955 raise error.Abort(_('cannot partially commit a merge '
1956 1956 '(do not specify files or patterns)'))
1957 1957
1958 1958 status = self.status(match=match, clean=force)
1959 1959 if force:
1960 1960 status.modified.extend(status.clean) # mq may commit clean files
1961 1961
1962 1962 # check subrepos
1963 1963 subs, commitsubs, newstate = subrepoutil.precommit(
1964 1964 self.ui, wctx, status, match, force=force)
1965 1965
1966 1966 # make sure all explicit patterns are matched
1967 1967 if not force:
1968 1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1969 1969
1970 1970 cctx = context.workingcommitctx(self, status,
1971 1971 text, user, date, extra)
1972 1972
1973 1973 # internal config: ui.allowemptycommit
1974 1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1975 1975 or extra.get('close') or merge or cctx.files()
1976 1976 or self.ui.configbool('ui', 'allowemptycommit'))
1977 1977 if not allowemptycommit:
1978 1978 return None
1979 1979
1980 1980 if merge and cctx.deleted():
1981 1981 raise error.Abort(_("cannot commit merge with missing files"))
1982 1982
1983 1983 ms = mergemod.mergestate.read(self)
1984 1984 mergeutil.checkunresolved(ms)
1985 1985
1986 1986 if editor:
1987 1987 cctx._text = editor(self, cctx, subs)
1988 1988 edited = (text != cctx._text)
1989 1989
1990 1990 # Save commit message in case this transaction gets rolled back
1991 1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1992 1992 # the assumption that the user will use the same editor again.
1993 1993 msgfn = self.savecommitmessage(cctx._text)
1994 1994
1995 1995 # commit subs and write new state
1996 1996 if subs:
1997 1997 for s in sorted(commitsubs):
1998 1998 sub = wctx.sub(s)
1999 1999 self.ui.status(_('committing subrepository %s\n') %
2000 2000 subrepoutil.subrelpath(sub))
2001 2001 sr = sub.commit(cctx._text, user, date)
2002 2002 newstate[s] = (newstate[s][0], sr)
2003 2003 subrepoutil.writestate(self, newstate)
2004 2004
2005 2005 p1, p2 = self.dirstate.parents()
2006 2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2007 2007 try:
2008 2008 self.hook("precommit", throw=True, parent1=hookp1,
2009 2009 parent2=hookp2)
2010 2010 tr = self.transaction('commit')
2011 2011 ret = self.commitctx(cctx, True)
2012 2012 except: # re-raises
2013 2013 if edited:
2014 2014 self.ui.write(
2015 2015 _('note: commit message saved in %s\n') % msgfn)
2016 2016 raise
2017 2017 # update bookmarks, dirstate and mergestate
2018 2018 bookmarks.update(self, [p1, p2], ret)
2019 2019 cctx.markcommitted(ret)
2020 2020 ms.reset()
2021 2021 tr.close()
2022 2022
2023 2023 finally:
2024 2024 lockmod.release(tr, lock, wlock)
2025 2025
2026 2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2027 2027 # hack for command that use a temporary commit (eg: histedit)
2028 2028 # temporary commit got stripped before hook release
2029 2029 if self.changelog.hasnode(ret):
2030 2030 self.hook("commit", node=node, parent1=parent1,
2031 2031 parent2=parent2)
2032 2032 self._afterlock(commithook)
2033 2033 return ret
2034 2034
2035 2035 @unfilteredmethod
2036 2036 def commitctx(self, ctx, error=False):
2037 2037 """Add a new revision to current repository.
2038 2038 Revision information is passed via the context argument.
2039 2039
2040 2040 ctx.files() should list all files involved in this commit, i.e.
2041 2041 modified/added/removed files. On merge, it may be wider than the
2042 2042 ctx.files() to be committed, since any file nodes derived directly
2043 2043 from p1 or p2 are excluded from the committed ctx.files().
2044 2044 """
2045 2045
2046 2046 tr = None
2047 2047 p1, p2 = ctx.p1(), ctx.p2()
2048 2048 user = ctx.user()
2049 2049
2050 2050 lock = self.lock()
2051 2051 try:
2052 2052 tr = self.transaction("commit")
2053 2053 trp = weakref.proxy(tr)
2054 2054
2055 2055 if ctx.manifestnode():
2056 2056 # reuse an existing manifest revision
2057 2057 self.ui.debug('reusing known manifest\n')
2058 2058 mn = ctx.manifestnode()
2059 2059 files = ctx.files()
2060 2060 elif ctx.files():
2061 2061 m1ctx = p1.manifestctx()
2062 2062 m2ctx = p2.manifestctx()
2063 2063 mctx = m1ctx.copy()
2064 2064
2065 2065 m = mctx.read()
2066 2066 m1 = m1ctx.read()
2067 2067 m2 = m2ctx.read()
2068 2068
2069 2069 # check in files
2070 2070 added = []
2071 2071 changed = []
2072 2072 removed = list(ctx.removed())
2073 2073 linkrev = len(self)
2074 2074 self.ui.note(_("committing files:\n"))
2075 2075 for f in sorted(ctx.modified() + ctx.added()):
2076 2076 self.ui.note(f + "\n")
2077 2077 try:
2078 2078 fctx = ctx[f]
2079 2079 if fctx is None:
2080 2080 removed.append(f)
2081 2081 else:
2082 2082 added.append(f)
2083 2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2084 2084 trp, changed)
2085 2085 m.setflag(f, fctx.flags())
2086 2086 except OSError as inst:
2087 2087 self.ui.warn(_("trouble committing %s!\n") % f)
2088 2088 raise
2089 2089 except IOError as inst:
2090 2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2091 2091 if error or errcode and errcode != errno.ENOENT:
2092 2092 self.ui.warn(_("trouble committing %s!\n") % f)
2093 2093 raise
2094 2094
2095 2095 # update manifest
2096 2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2097 2097 drop = [f for f in removed if f in m]
2098 2098 for f in drop:
2099 2099 del m[f]
2100 2100 files = changed + removed
2101 2101 md = None
2102 2102 if not files:
2103 2103 # if no "files" actually changed in terms of the changelog,
2104 2104 # try hard to detect unmodified manifest entry so that the
2105 2105 # exact same commit can be reproduced later on convert.
2106 2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2107 2107 if not files and md:
2108 2108 self.ui.debug('not reusing manifest (no file change in '
2109 2109 'changelog, but manifest differs)\n')
2110 2110 if files or md:
2111 2111 self.ui.note(_("committing manifest\n"))
2112 2112 mn = mctx.write(trp, linkrev,
2113 2113 p1.manifestnode(), p2.manifestnode(),
2114 2114 added, drop)
2115 2115 else:
2116 2116 self.ui.debug('reusing manifest form p1 (listed files '
2117 2117 'actually unchanged)\n')
2118 2118 mn = p1.manifestnode()
2119 2119 else:
2120 2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2121 2121 mn = p1.manifestnode()
2122 2122 files = []
2123 2123
2124 2124 # update changelog
2125 2125 self.ui.note(_("committing changelog\n"))
2126 2126 self.changelog.delayupdate(tr)
2127 2127 n = self.changelog.add(mn, files, ctx.description(),
2128 2128 trp, p1.node(), p2.node(),
2129 2129 user, ctx.date(), ctx.extra().copy())
2130 2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2131 2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2132 2132 parent2=xp2)
2133 2133 # set the new commit is proper phase
2134 2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2135 2135 if targetphase:
2136 2136 # retract boundary do not alter parent changeset.
2137 2137 # if a parent have higher the resulting phase will
2138 2138 # be compliant anyway
2139 2139 #
2140 2140 # if minimal phase was 0 we don't need to retract anything
2141 2141 phases.registernew(self, tr, targetphase, [n])
2142 2142 tr.close()
2143 2143 return n
2144 2144 finally:
2145 2145 if tr:
2146 2146 tr.release()
2147 2147 lock.release()
2148 2148
2149 2149 @unfilteredmethod
2150 2150 def destroying(self):
2151 2151 '''Inform the repository that nodes are about to be destroyed.
2152 2152 Intended for use by strip and rollback, so there's a common
2153 2153 place for anything that has to be done before destroying history.
2154 2154
2155 2155 This is mostly useful for saving state that is in memory and waiting
2156 2156 to be flushed when the current lock is released. Because a call to
2157 2157 destroyed is imminent, the repo will be invalidated causing those
2158 2158 changes to stay in memory (waiting for the next unlock), or vanish
2159 2159 completely.
2160 2160 '''
2161 2161 # When using the same lock to commit and strip, the phasecache is left
2162 2162 # dirty after committing. Then when we strip, the repo is invalidated,
2163 2163 # causing those changes to disappear.
2164 2164 if '_phasecache' in vars(self):
2165 2165 self._phasecache.write()
2166 2166
2167 2167 @unfilteredmethod
2168 2168 def destroyed(self):
2169 2169 '''Inform the repository that nodes have been destroyed.
2170 2170 Intended for use by strip and rollback, so there's a common
2171 2171 place for anything that has to be done after destroying history.
2172 2172 '''
2173 2173 # When one tries to:
2174 2174 # 1) destroy nodes thus calling this method (e.g. strip)
2175 2175 # 2) use phasecache somewhere (e.g. commit)
2176 2176 #
2177 2177 # then 2) will fail because the phasecache contains nodes that were
2178 2178 # removed. We can either remove phasecache from the filecache,
2179 2179 # causing it to reload next time it is accessed, or simply filter
2180 2180 # the removed nodes now and write the updated cache.
2181 2181 self._phasecache.filterunknown(self)
2182 2182 self._phasecache.write()
2183 2183
2184 2184 # refresh all repository caches
2185 2185 self.updatecaches()
2186 2186
2187 2187 # Ensure the persistent tag cache is updated. Doing it now
2188 2188 # means that the tag cache only has to worry about destroyed
2189 2189 # heads immediately after a strip/rollback. That in turn
2190 2190 # guarantees that "cachetip == currenttip" (comparing both rev
2191 2191 # and node) always means no nodes have been added or destroyed.
2192 2192
2193 2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2194 2194 # head, refresh the tag cache, then immediately add a new head.
2195 2195 # But I think doing it this way is necessary for the "instant
2196 2196 # tag cache retrieval" case to work.
2197 2197 self.invalidate()
2198 2198
2199 2199 def status(self, node1='.', node2=None, match=None,
2200 2200 ignored=False, clean=False, unknown=False,
2201 2201 listsubrepos=False):
2202 2202 '''a convenience method that calls node1.status(node2)'''
2203 2203 return self[node1].status(node2, match, ignored, clean, unknown,
2204 2204 listsubrepos)
2205 2205
2206 2206 def addpostdsstatus(self, ps):
2207 2207 """Add a callback to run within the wlock, at the point at which status
2208 2208 fixups happen.
2209 2209
2210 2210 On status completion, callback(wctx, status) will be called with the
2211 2211 wlock held, unless the dirstate has changed from underneath or the wlock
2212 2212 couldn't be grabbed.
2213 2213
2214 2214 Callbacks should not capture and use a cached copy of the dirstate --
2215 2215 it might change in the meanwhile. Instead, they should access the
2216 2216 dirstate via wctx.repo().dirstate.
2217 2217
2218 2218 This list is emptied out after each status run -- extensions should
2219 2219 make sure it adds to this list each time dirstate.status is called.
2220 2220 Extensions should also make sure they don't call this for statuses
2221 2221 that don't involve the dirstate.
2222 2222 """
2223 2223
2224 2224 # The list is located here for uniqueness reasons -- it is actually
2225 2225 # managed by the workingctx, but that isn't unique per-repo.
2226 2226 self._postdsstatus.append(ps)
2227 2227
2228 2228 def postdsstatus(self):
2229 2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2230 2230 return self._postdsstatus
2231 2231
2232 2232 def clearpostdsstatus(self):
2233 2233 """Used by workingctx to clear post-dirstate-status hooks."""
2234 2234 del self._postdsstatus[:]
2235 2235
2236 2236 def heads(self, start=None):
2237 2237 if start is None:
2238 2238 cl = self.changelog
2239 2239 headrevs = reversed(cl.headrevs())
2240 2240 return [cl.node(rev) for rev in headrevs]
2241 2241
2242 2242 heads = self.changelog.heads(start)
2243 2243 # sort the output in rev descending order
2244 2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2245 2245
2246 2246 def branchheads(self, branch=None, start=None, closed=False):
2247 2247 '''return a (possibly filtered) list of heads for the given branch
2248 2248
2249 2249 Heads are returned in topological order, from newest to oldest.
2250 2250 If branch is None, use the dirstate branch.
2251 2251 If start is not None, return only heads reachable from start.
2252 2252 If closed is True, return heads that are marked as closed as well.
2253 2253 '''
2254 2254 if branch is None:
2255 2255 branch = self[None].branch()
2256 2256 branches = self.branchmap()
2257 2257 if branch not in branches:
2258 2258 return []
2259 2259 # the cache returns heads ordered lowest to highest
2260 2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2261 2261 if start is not None:
2262 2262 # filter out the heads that cannot be reached from startrev
2263 2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2264 2264 bheads = [h for h in bheads if h in fbheads]
2265 2265 return bheads
2266 2266
2267 2267 def branches(self, nodes):
2268 2268 if not nodes:
2269 2269 nodes = [self.changelog.tip()]
2270 2270 b = []
2271 2271 for n in nodes:
2272 2272 t = n
2273 2273 while True:
2274 2274 p = self.changelog.parents(n)
2275 2275 if p[1] != nullid or p[0] == nullid:
2276 2276 b.append((t, n, p[0], p[1]))
2277 2277 break
2278 2278 n = p[0]
2279 2279 return b
2280 2280
2281 2281 def between(self, pairs):
2282 2282 r = []
2283 2283
2284 2284 for top, bottom in pairs:
2285 2285 n, l, i = top, [], 0
2286 2286 f = 1
2287 2287
2288 2288 while n != bottom and n != nullid:
2289 2289 p = self.changelog.parents(n)[0]
2290 2290 if i == f:
2291 2291 l.append(n)
2292 2292 f = f * 2
2293 2293 n = p
2294 2294 i += 1
2295 2295
2296 2296 r.append(l)
2297 2297
2298 2298 return r
2299 2299
2300 2300 def checkpush(self, pushop):
2301 2301 """Extensions can override this function if additional checks have
2302 2302 to be performed before pushing, or call it if they override push
2303 2303 command.
2304 2304 """
2305 2305
2306 2306 @unfilteredpropertycache
2307 2307 def prepushoutgoinghooks(self):
2308 2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2309 2309 methods, which are called before pushing changesets.
2310 2310 """
2311 2311 return util.hooks()
2312 2312
2313 2313 def pushkey(self, namespace, key, old, new):
2314 2314 try:
2315 2315 tr = self.currenttransaction()
2316 2316 hookargs = {}
2317 2317 if tr is not None:
2318 2318 hookargs.update(tr.hookargs)
2319 2319 hookargs = pycompat.strkwargs(hookargs)
2320 2320 hookargs[r'namespace'] = namespace
2321 2321 hookargs[r'key'] = key
2322 2322 hookargs[r'old'] = old
2323 2323 hookargs[r'new'] = new
2324 2324 self.hook('prepushkey', throw=True, **hookargs)
2325 2325 except error.HookAbort as exc:
2326 2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2327 2327 if exc.hint:
2328 2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2329 2329 return False
2330 2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2331 2331 ret = pushkey.push(self, namespace, key, old, new)
2332 2332 def runhook():
2333 2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2334 2334 ret=ret)
2335 2335 self._afterlock(runhook)
2336 2336 return ret
2337 2337
2338 2338 def listkeys(self, namespace):
2339 2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2340 2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2341 2341 values = pushkey.list(self, namespace)
2342 2342 self.hook('listkeys', namespace=namespace, values=values)
2343 2343 return values
2344 2344
2345 2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2346 2346 '''used to test argument passing over the wire'''
2347 2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2348 2348 pycompat.bytestr(four),
2349 2349 pycompat.bytestr(five))
2350 2350
2351 2351 def savecommitmessage(self, text):
2352 2352 fp = self.vfs('last-message.txt', 'wb')
2353 2353 try:
2354 2354 fp.write(text)
2355 2355 finally:
2356 2356 fp.close()
2357 2357 return self.pathto(fp.name[len(self.root) + 1:])
2358 2358
2359 2359 # used to avoid circular references so destructors work
2360 2360 def aftertrans(files):
2361 2361 renamefiles = [tuple(t) for t in files]
2362 2362 def a():
2363 2363 for vfs, src, dest in renamefiles:
2364 2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2365 2365 # leaving both src and dest on disk. delete dest to make sure
2366 2366 # the rename couldn't be such a no-op.
2367 2367 vfs.tryunlink(dest)
2368 2368 try:
2369 2369 vfs.rename(src, dest)
2370 2370 except OSError: # journal file does not yet exist
2371 2371 pass
2372 2372 return a
2373 2373
2374 2374 def undoname(fn):
2375 2375 base, name = os.path.split(fn)
2376 2376 assert name.startswith('journal')
2377 2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2378 2378
2379 2379 def instance(ui, path, create, intents=None, createopts=None):
2380 2380 localpath = util.urllocalpath(path)
2381 2381 if create:
2382 2382 createrepository(ui, localpath, createopts=createopts)
2383 2383
2384 2384 return localrepository(ui, localpath, intents=intents)
2385 2385
2386 2386 def islocal(path):
2387 2387 return True
2388 2388
2389 2389 def newreporequirements(ui, createopts=None):
2390 2390 """Determine the set of requirements for a new local repository.
2391 2391
2392 2392 Extensions can wrap this function to specify custom requirements for
2393 2393 new repositories.
2394 2394 """
2395 2395 createopts = createopts or {}
2396 2396
2397 2397 requirements = {'revlogv1'}
2398 2398 if ui.configbool('format', 'usestore'):
2399 2399 requirements.add('store')
2400 2400 if ui.configbool('format', 'usefncache'):
2401 2401 requirements.add('fncache')
2402 2402 if ui.configbool('format', 'dotencode'):
2403 2403 requirements.add('dotencode')
2404 2404
2405 2405 compengine = ui.config('experimental', 'format.compression')
2406 2406 if compengine not in util.compengines:
2407 2407 raise error.Abort(_('compression engine %s defined by '
2408 2408 'experimental.format.compression not available') %
2409 2409 compengine,
2410 2410 hint=_('run "hg debuginstall" to list available '
2411 2411 'compression engines'))
2412 2412
2413 2413 # zlib is the historical default and doesn't need an explicit requirement.
2414 2414 if compengine != 'zlib':
2415 2415 requirements.add('exp-compression-%s' % compengine)
2416 2416
2417 2417 if scmutil.gdinitconfig(ui):
2418 2418 requirements.add('generaldelta')
2419 2419 if ui.configbool('experimental', 'treemanifest'):
2420 2420 requirements.add('treemanifest')
2421 2421 # experimental config: format.sparse-revlog
2422 2422 if ui.configbool('format', 'sparse-revlog'):
2423 2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2424 2424
2425 2425 revlogv2 = ui.config('experimental', 'revlogv2')
2426 2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2427 2427 requirements.remove('revlogv1')
2428 2428 # generaldelta is implied by revlogv2.
2429 2429 requirements.discard('generaldelta')
2430 2430 requirements.add(REVLOGV2_REQUIREMENT)
2431 2431 # experimental config: format.internal-phase
2432 2432 if ui.configbool('format', 'internal-phase'):
2433 2433 requirements.add('internal-phase')
2434 2434
2435 2435 if createopts.get('narrowfiles'):
2436 2436 requirements.add(repository.NARROW_REQUIREMENT)
2437 2437
2438 2438 return requirements
2439 2439
2440 2440 def filterknowncreateopts(ui, createopts):
2441 2441 """Filters a dict of repo creation options against options that are known.
2442 2442
2443 2443 Receives a dict of repo creation options and returns a dict of those
2444 2444 options that we don't know how to handle.
2445 2445
2446 2446 This function is called as part of repository creation. If the
2447 2447 returned dict contains any items, repository creation will not
2448 2448 be allowed, as it means there was a request to create a repository
2449 2449 with options not recognized by loaded code.
2450 2450
2451 2451 Extensions can wrap this function to filter out creation options
2452 2452 they know how to handle.
2453 2453 """
2454 2454 known = {'narrowfiles'}
2455 2455
2456 2456 return {k: v for k, v in createopts.items() if k not in known}
2457 2457
2458 2458 def createrepository(ui, path, createopts=None):
2459 2459 """Create a new repository in a vfs.
2460 2460
2461 2461 ``path`` path to the new repo's working directory.
2462 2462 ``createopts`` options for the new repository.
2463 2463 """
2464 2464 createopts = createopts or {}
2465 2465
2466 2466 unknownopts = filterknowncreateopts(ui, createopts)
2467 2467
2468 2468 if not isinstance(unknownopts, dict):
2469 2469 raise error.ProgrammingError('filterknowncreateopts() did not return '
2470 2470 'a dict')
2471 2471
2472 2472 if unknownopts:
2473 2473 raise error.Abort(_('unable to create repository because of unknown '
2474 2474 'creation option: %s') %
2475 2475 ', '.sorted(unknownopts),
2476 2476 hint=_('is a required extension not loaded?'))
2477 2477
2478 2478 requirements = newreporequirements(ui, createopts=createopts)
2479 2479
2480 2480 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2481 2481 if not wdirvfs.exists():
2482 2482 wdirvfs.makedirs()
2483 2483
2484 2484 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2485 2485 if hgvfs.exists():
2486 2486 raise error.RepoError(_('repository %s already exists') % path)
2487 2487
2488 2488 hgvfs.makedir(notindexed=True)
2489 2489
2490 2490 if b'store' in requirements:
2491 2491 hgvfs.mkdir(b'store')
2492 2492
2493 2493 # We create an invalid changelog outside the store so very old
2494 2494 # Mercurial versions (which didn't know about the requirements
2495 2495 # file) encounter an error on reading the changelog. This
2496 2496 # effectively locks out old clients and prevents them from
2497 2497 # mucking with a repo in an unknown format.
2498 2498 #
2499 2499 # The revlog header has version 2, which won't be recognized by
2500 2500 # such old clients.
2501 2501 hgvfs.append(b'00changelog.i',
2502 2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2503 2503 b'layout')
2504 2504
2505 2505 scmutil.writerequires(hgvfs, requirements)
2506
2507 def poisonrepository(repo):
2508 """Poison a repository instance so it can no longer be used."""
2509 # Perform any cleanup on the instance.
2510 repo.close()
2511
2512 # Our strategy is to replace the type of the object with one that
2513 # has all attribute lookups result in error.
2514 #
2515 # But we have to allow the close() method because some constructors
2516 # of repos call close() on repo references.
2517 class poisonedrepository(object):
2518 def __getattribute__(self, item):
2519 if item == r'close':
2520 return object.__getattribute__(self, item)
2521
2522 raise error.ProgrammingError('repo instances should not be used '
2523 'after unshare')
2524
2525 def close(self):
2526 pass
2527
2528 # We may have a repoview, which intercepts __setattr__. So be sure
2529 # we operate at the lowest level possible.
2530 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now