##// END OF EJS Templates
localrepo: remove clone method by hoisting into hg.py...
Augie Fackler -
r27165:70884715 default
parent child Browse files
Show More
@@ -1,897 +1,906
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17
18 18 from . import (
19 19 bookmarks,
20 20 bundlerepo,
21 21 cmdutil,
22 22 discovery,
23 23 error,
24 24 exchange,
25 25 extensions,
26 26 httppeer,
27 27 localrepo,
28 28 lock,
29 29 merge as mergemod,
30 30 node,
31 31 phases,
32 32 repoview,
33 33 scmutil,
34 34 sshpeer,
35 35 statichttprepo,
36 36 ui as uimod,
37 37 unionrepo,
38 38 url,
39 39 util,
40 40 verify as verifymod,
41 41 )
42 42
43 43 release = lock.release
44 44
45 45 def _local(path):
46 46 path = util.expandpath(util.urllocalpath(path))
47 47 return (os.path.isfile(path) and bundlerepo or localrepo)
48 48
49 49 def addbranchrevs(lrepo, other, branches, revs):
50 50 peer = other.peer() # a courtesy to callers using a localrepo for other
51 51 hashbranch, branches = branches
52 52 if not hashbranch and not branches:
53 53 x = revs or None
54 54 if util.safehasattr(revs, 'first'):
55 55 y = revs.first()
56 56 elif revs:
57 57 y = revs[0]
58 58 else:
59 59 y = None
60 60 return x, y
61 61 if revs:
62 62 revs = list(revs)
63 63 else:
64 64 revs = []
65 65
66 66 if not peer.capable('branchmap'):
67 67 if branches:
68 68 raise error.Abort(_("remote branch lookup not supported"))
69 69 revs.append(hashbranch)
70 70 return revs, revs[0]
71 71 branchmap = peer.branchmap()
72 72
73 73 def primary(branch):
74 74 if branch == '.':
75 75 if not lrepo:
76 76 raise error.Abort(_("dirstate branch not accessible"))
77 77 branch = lrepo.dirstate.branch()
78 78 if branch in branchmap:
79 79 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
80 80 return True
81 81 else:
82 82 return False
83 83
84 84 for branch in branches:
85 85 if not primary(branch):
86 86 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
87 87 if hashbranch:
88 88 if not primary(hashbranch):
89 89 revs.append(hashbranch)
90 90 return revs, revs[0]
91 91
92 92 def parseurl(path, branches=None):
93 93 '''parse url#branch, returning (url, (branch, branches))'''
94 94
95 95 u = util.url(path)
96 96 branch = None
97 97 if u.fragment:
98 98 branch = u.fragment
99 99 u.fragment = None
100 100 return str(u), (branch, branches or [])
101 101
102 102 schemes = {
103 103 'bundle': bundlerepo,
104 104 'union': unionrepo,
105 105 'file': _local,
106 106 'http': httppeer,
107 107 'https': httppeer,
108 108 'ssh': sshpeer,
109 109 'static-http': statichttprepo,
110 110 }
111 111
112 112 def _peerlookup(path):
113 113 u = util.url(path)
114 114 scheme = u.scheme or 'file'
115 115 thing = schemes.get(scheme) or schemes['file']
116 116 try:
117 117 return thing(path)
118 118 except TypeError:
119 119 # we can't test callable(thing) because 'thing' can be an unloaded
120 120 # module that implements __call__
121 121 if not util.safehasattr(thing, 'instance'):
122 122 raise
123 123 return thing
124 124
125 125 def islocal(repo):
126 126 '''return true if repo (or path pointing to repo) is local'''
127 127 if isinstance(repo, str):
128 128 try:
129 129 return _peerlookup(repo).islocal(repo)
130 130 except AttributeError:
131 131 return False
132 132 return repo.local()
133 133
134 134 def openpath(ui, path):
135 135 '''open path with open if local, url.open if remote'''
136 136 pathurl = util.url(path, parsequery=False, parsefragment=False)
137 137 if pathurl.islocal():
138 138 return util.posixfile(pathurl.localpath(), 'rb')
139 139 else:
140 140 return url.open(ui, path)
141 141
142 142 # a list of (ui, repo) functions called for wire peer initialization
143 143 wirepeersetupfuncs = []
144 144
145 145 def _peerorrepo(ui, path, create=False):
146 146 """return a repository object for the specified path"""
147 147 obj = _peerlookup(path).instance(ui, path, create)
148 148 ui = getattr(obj, "ui", ui)
149 149 for name, module in extensions.extensions(ui):
150 150 hook = getattr(module, 'reposetup', None)
151 151 if hook:
152 152 hook(ui, obj)
153 153 if not obj.local():
154 154 for f in wirepeersetupfuncs:
155 155 f(ui, obj)
156 156 return obj
157 157
158 158 def repository(ui, path='', create=False):
159 159 """return a repository object for the specified path"""
160 160 peer = _peerorrepo(ui, path, create)
161 161 repo = peer.local()
162 162 if not repo:
163 163 raise error.Abort(_("repository '%s' is not local") %
164 164 (path or peer.url()))
165 165 return repo.filtered('visible')
166 166
167 167 def peer(uiorrepo, opts, path, create=False):
168 168 '''return a repository peer for the specified path'''
169 169 rui = remoteui(uiorrepo, opts)
170 170 return _peerorrepo(rui, path, create).peer()
171 171
172 172 def defaultdest(source):
173 173 '''return default destination of clone if none is given
174 174
175 175 >>> defaultdest('foo')
176 176 'foo'
177 177 >>> defaultdest('/foo/bar')
178 178 'bar'
179 179 >>> defaultdest('/')
180 180 ''
181 181 >>> defaultdest('')
182 182 ''
183 183 >>> defaultdest('http://example.org/')
184 184 ''
185 185 >>> defaultdest('http://example.org/foo/')
186 186 'foo'
187 187 '''
188 188 path = util.url(source).path
189 189 if not path:
190 190 return ''
191 191 return os.path.basename(os.path.normpath(path))
192 192
193 193 def share(ui, source, dest=None, update=True, bookmarks=True):
194 194 '''create a shared repository'''
195 195
196 196 if not islocal(source):
197 197 raise error.Abort(_('can only share local repositories'))
198 198
199 199 if not dest:
200 200 dest = defaultdest(source)
201 201 else:
202 202 dest = ui.expandpath(dest)
203 203
204 204 if isinstance(source, str):
205 205 origsource = ui.expandpath(source)
206 206 source, branches = parseurl(origsource)
207 207 srcrepo = repository(ui, source)
208 208 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
209 209 else:
210 210 srcrepo = source.local()
211 211 origsource = source = srcrepo.url()
212 212 checkout = None
213 213
214 214 sharedpath = srcrepo.sharedpath # if our source is already sharing
215 215
216 216 destwvfs = scmutil.vfs(dest, realpath=True)
217 217 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
218 218
219 219 if destvfs.lexists():
220 220 raise error.Abort(_('destination already exists'))
221 221
222 222 if not destwvfs.isdir():
223 223 destwvfs.mkdir()
224 224 destvfs.makedir()
225 225
226 226 requirements = ''
227 227 try:
228 228 requirements = srcrepo.vfs.read('requires')
229 229 except IOError as inst:
230 230 if inst.errno != errno.ENOENT:
231 231 raise
232 232
233 233 requirements += 'shared\n'
234 234 destvfs.write('requires', requirements)
235 235 destvfs.write('sharedpath', sharedpath)
236 236
237 237 r = repository(ui, destwvfs.base)
238 238
239 239 default = srcrepo.ui.config('paths', 'default')
240 240 if default:
241 241 fp = r.vfs("hgrc", "w", text=True)
242 242 fp.write("[paths]\n")
243 243 fp.write("default = %s\n" % default)
244 244 fp.close()
245 245
246 246 if update:
247 247 r.ui.status(_("updating working directory\n"))
248 248 if update is not True:
249 249 checkout = update
250 250 for test in (checkout, 'default', 'tip'):
251 251 if test is None:
252 252 continue
253 253 try:
254 254 uprev = r.lookup(test)
255 255 break
256 256 except error.RepoLookupError:
257 257 continue
258 258 _update(r, uprev)
259 259
260 260 if bookmarks:
261 261 fp = r.vfs('shared', 'w')
262 262 fp.write('bookmarks\n')
263 263 fp.close()
264 264
265 265 def copystore(ui, srcrepo, destpath):
266 266 '''copy files from store of srcrepo in destpath
267 267
268 268 returns destlock
269 269 '''
270 270 destlock = None
271 271 try:
272 272 hardlink = None
273 273 num = 0
274 274 closetopic = [None]
275 275 def prog(topic, pos):
276 276 if pos is None:
277 277 closetopic[0] = topic
278 278 else:
279 279 ui.progress(topic, pos + num)
280 280 srcpublishing = srcrepo.publishing()
281 281 srcvfs = scmutil.vfs(srcrepo.sharedpath)
282 282 dstvfs = scmutil.vfs(destpath)
283 283 for f in srcrepo.store.copylist():
284 284 if srcpublishing and f.endswith('phaseroots'):
285 285 continue
286 286 dstbase = os.path.dirname(f)
287 287 if dstbase and not dstvfs.exists(dstbase):
288 288 dstvfs.mkdir(dstbase)
289 289 if srcvfs.exists(f):
290 290 if f.endswith('data'):
291 291 # 'dstbase' may be empty (e.g. revlog format 0)
292 292 lockfile = os.path.join(dstbase, "lock")
293 293 # lock to avoid premature writing to the target
294 294 destlock = lock.lock(dstvfs, lockfile)
295 295 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
296 296 hardlink, progress=prog)
297 297 num += n
298 298 if hardlink:
299 299 ui.debug("linked %d files\n" % num)
300 300 if closetopic[0]:
301 301 ui.progress(closetopic[0], None)
302 302 else:
303 303 ui.debug("copied %d files\n" % num)
304 304 if closetopic[0]:
305 305 ui.progress(closetopic[0], None)
306 306 return destlock
307 307 except: # re-raises
308 308 release(destlock)
309 309 raise
310 310
311 311 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
312 312 rev=None, update=True, stream=False):
313 313 """Perform a clone using a shared repo.
314 314
315 315 The store for the repository will be located at <sharepath>/.hg. The
316 316 specified revisions will be cloned or pulled from "source". A shared repo
317 317 will be created at "dest" and a working copy will be created if "update" is
318 318 True.
319 319 """
320 320 revs = None
321 321 if rev:
322 322 if not srcpeer.capable('lookup'):
323 323 raise error.Abort(_("src repository does not support "
324 324 "revision lookup and so doesn't "
325 325 "support clone by revision"))
326 326 revs = [srcpeer.lookup(r) for r in rev]
327 327
328 328 basename = os.path.basename(sharepath)
329 329
330 330 if os.path.exists(sharepath):
331 331 ui.status(_('(sharing from existing pooled repository %s)\n') %
332 332 basename)
333 333 else:
334 334 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
335 335 # Always use pull mode because hardlinks in share mode don't work well.
336 336 # Never update because working copies aren't necessary in share mode.
337 337 clone(ui, peeropts, source, dest=sharepath, pull=True,
338 338 rev=rev, update=False, stream=stream)
339 339
340 340 sharerepo = repository(ui, path=sharepath)
341 341 share(ui, sharerepo, dest=dest, update=update, bookmarks=False)
342 342
343 343 # We need to perform a pull against the dest repo to fetch bookmarks
344 344 # and other non-store data that isn't shared by default. In the case of
345 345 # non-existing shared repo, this means we pull from the remote twice. This
346 346 # is a bit weird. But at the time it was implemented, there wasn't an easy
347 347 # way to pull just non-changegroup data.
348 348 destrepo = repository(ui, path=dest)
349 349 exchange.pull(destrepo, srcpeer, heads=revs)
350 350
351 351 return srcpeer, peer(ui, peeropts, dest)
352 352
353 353 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
354 354 update=True, stream=False, branch=None, shareopts=None):
355 355 """Make a copy of an existing repository.
356 356
357 357 Create a copy of an existing repository in a new directory. The
358 358 source and destination are URLs, as passed to the repository
359 359 function. Returns a pair of repository peers, the source and
360 360 newly created destination.
361 361
362 362 The location of the source is added to the new repository's
363 363 .hg/hgrc file, as the default to be used for future pulls and
364 364 pushes.
365 365
366 366 If an exception is raised, the partly cloned/updated destination
367 367 repository will be deleted.
368 368
369 369 Arguments:
370 370
371 371 source: repository object or URL
372 372
373 373 dest: URL of destination repository to create (defaults to base
374 374 name of source repository)
375 375
376 376 pull: always pull from source repository, even in local case or if the
377 377 server prefers streaming
378 378
379 379 stream: stream raw data uncompressed from repository (fast over
380 380 LAN, slow over WAN)
381 381
382 382 rev: revision to clone up to (implies pull=True)
383 383
384 384 update: update working directory after clone completes, if
385 385 destination is local repository (True means update to default rev,
386 386 anything else is treated as a revision)
387 387
388 388 branch: branches to clone
389 389
390 390 shareopts: dict of options to control auto sharing behavior. The "pool" key
391 391 activates auto sharing mode and defines the directory for stores. The
392 392 "mode" key determines how to construct the directory name of the shared
393 393 repository. "identity" means the name is derived from the node of the first
394 394 changeset in the repository. "remote" means the name is derived from the
395 395 remote's path/URL. Defaults to "identity."
396 396 """
397 397
398 398 if isinstance(source, str):
399 399 origsource = ui.expandpath(source)
400 400 source, branch = parseurl(origsource, branch)
401 401 srcpeer = peer(ui, peeropts, source)
402 402 else:
403 403 srcpeer = source.peer() # in case we were called with a localrepo
404 404 branch = (None, branch or [])
405 405 origsource = source = srcpeer.url()
406 406 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
407 407
408 408 if dest is None:
409 409 dest = defaultdest(source)
410 410 if dest:
411 411 ui.status(_("destination directory: %s\n") % dest)
412 412 else:
413 413 dest = ui.expandpath(dest)
414 414
415 415 dest = util.urllocalpath(dest)
416 416 source = util.urllocalpath(source)
417 417
418 418 if not dest:
419 419 raise error.Abort(_("empty destination path is not valid"))
420 420
421 421 destvfs = scmutil.vfs(dest, expandpath=True)
422 422 if destvfs.lexists():
423 423 if not destvfs.isdir():
424 424 raise error.Abort(_("destination '%s' already exists") % dest)
425 425 elif destvfs.listdir():
426 426 raise error.Abort(_("destination '%s' is not empty") % dest)
427 427
428 428 shareopts = shareopts or {}
429 429 sharepool = shareopts.get('pool')
430 430 sharenamemode = shareopts.get('mode')
431 431 if sharepool and islocal(dest):
432 432 sharepath = None
433 433 if sharenamemode == 'identity':
434 434 # Resolve the name from the initial changeset in the remote
435 435 # repository. This returns nullid when the remote is empty. It
436 436 # raises RepoLookupError if revision 0 is filtered or otherwise
437 437 # not available. If we fail to resolve, sharing is not enabled.
438 438 try:
439 439 rootnode = srcpeer.lookup('0')
440 440 if rootnode != node.nullid:
441 441 sharepath = os.path.join(sharepool, node.hex(rootnode))
442 442 else:
443 443 ui.status(_('(not using pooled storage: '
444 444 'remote appears to be empty)\n'))
445 445 except error.RepoLookupError:
446 446 ui.status(_('(not using pooled storage: '
447 447 'unable to resolve identity of remote)\n'))
448 448 elif sharenamemode == 'remote':
449 449 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
450 450 else:
451 451 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
452 452
453 453 if sharepath:
454 454 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
455 455 dest, pull=pull, rev=rev, update=update,
456 456 stream=stream)
457 457
458 458 srclock = destlock = cleandir = None
459 459 srcrepo = srcpeer.local()
460 460 try:
461 461 abspath = origsource
462 462 if islocal(origsource):
463 463 abspath = os.path.abspath(util.urllocalpath(origsource))
464 464
465 465 if islocal(dest):
466 466 cleandir = dest
467 467
468 468 copy = False
469 469 if (srcrepo and srcrepo.cancopy() and islocal(dest)
470 470 and not phases.hassecret(srcrepo)):
471 471 copy = not pull and not rev
472 472
473 473 if copy:
474 474 try:
475 475 # we use a lock here because if we race with commit, we
476 476 # can end up with extra data in the cloned revlogs that's
477 477 # not pointed to by changesets, thus causing verify to
478 478 # fail
479 479 srclock = srcrepo.lock(wait=False)
480 480 except error.LockError:
481 481 copy = False
482 482
483 483 if copy:
484 484 srcrepo.hook('preoutgoing', throw=True, source='clone')
485 485 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
486 486 if not os.path.exists(dest):
487 487 os.mkdir(dest)
488 488 else:
489 489 # only clean up directories we create ourselves
490 490 cleandir = hgdir
491 491 try:
492 492 destpath = hgdir
493 493 util.makedir(destpath, notindexed=True)
494 494 except OSError as inst:
495 495 if inst.errno == errno.EEXIST:
496 496 cleandir = None
497 497 raise error.Abort(_("destination '%s' already exists")
498 498 % dest)
499 499 raise
500 500
501 501 destlock = copystore(ui, srcrepo, destpath)
502 502 # copy bookmarks over
503 503 srcbookmarks = srcrepo.join('bookmarks')
504 504 dstbookmarks = os.path.join(destpath, 'bookmarks')
505 505 if os.path.exists(srcbookmarks):
506 506 util.copyfile(srcbookmarks, dstbookmarks)
507 507
508 508 # Recomputing branch cache might be slow on big repos,
509 509 # so just copy it
510 510 def copybranchcache(fname):
511 511 srcbranchcache = srcrepo.join('cache/%s' % fname)
512 512 dstbranchcache = os.path.join(dstcachedir, fname)
513 513 if os.path.exists(srcbranchcache):
514 514 if not os.path.exists(dstcachedir):
515 515 os.mkdir(dstcachedir)
516 516 util.copyfile(srcbranchcache, dstbranchcache)
517 517
518 518 dstcachedir = os.path.join(destpath, 'cache')
519 519 # In local clones we're copying all nodes, not just served
520 520 # ones. Therefore copy all branch caches over.
521 521 copybranchcache('branch2')
522 522 for cachename in repoview.filtertable:
523 523 copybranchcache('branch2-%s' % cachename)
524 524
525 525 # we need to re-init the repo after manually copying the data
526 526 # into it
527 527 destpeer = peer(srcrepo, peeropts, dest)
528 528 srcrepo.hook('outgoing', source='clone',
529 529 node=node.hex(node.nullid))
530 530 else:
531 531 try:
532 532 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
533 533 # only pass ui when no srcrepo
534 534 except OSError as inst:
535 535 if inst.errno == errno.EEXIST:
536 536 cleandir = None
537 537 raise error.Abort(_("destination '%s' already exists")
538 538 % dest)
539 539 raise
540 540
541 541 revs = None
542 542 if rev:
543 543 if not srcpeer.capable('lookup'):
544 544 raise error.Abort(_("src repository does not support "
545 545 "revision lookup and so doesn't "
546 546 "support clone by revision"))
547 547 revs = [srcpeer.lookup(r) for r in rev]
548 548 checkout = revs[0]
549 if destpeer.local():
549 local = destpeer.local()
550 if local:
550 551 if not stream:
551 552 if pull:
552 553 stream = False
553 554 else:
554 555 stream = None
555 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
556 # internal config: ui.quietbookmarkmove
557 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
558 try:
559 local.ui.setconfig(
560 'ui', 'quietbookmarkmove', True, 'clone')
561 exchange.pull(local, srcpeer, revs,
562 streamclonerequested=stream)
563 finally:
564 local.ui.restoreconfig(quiet)
556 565 elif srcrepo:
557 566 exchange.push(srcrepo, destpeer, revs=revs,
558 567 bookmarks=srcrepo._bookmarks.keys())
559 568 else:
560 569 raise error.Abort(_("clone from remote to remote not supported")
561 570 )
562 571
563 572 cleandir = None
564 573
565 574 destrepo = destpeer.local()
566 575 if destrepo:
567 576 template = uimod.samplehgrcs['cloned']
568 577 fp = destrepo.vfs("hgrc", "w", text=True)
569 578 u = util.url(abspath)
570 579 u.passwd = None
571 580 defaulturl = str(u)
572 581 fp.write(template % defaulturl)
573 582 fp.close()
574 583
575 584 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
576 585
577 586 if update:
578 587 if update is not True:
579 588 checkout = srcpeer.lookup(update)
580 589 uprev = None
581 590 status = None
582 591 if checkout is not None:
583 592 try:
584 593 uprev = destrepo.lookup(checkout)
585 594 except error.RepoLookupError:
586 595 if update is not True:
587 596 try:
588 597 uprev = destrepo.lookup(update)
589 598 except error.RepoLookupError:
590 599 pass
591 600 if uprev is None:
592 601 try:
593 602 uprev = destrepo._bookmarks['@']
594 603 update = '@'
595 604 bn = destrepo[uprev].branch()
596 605 if bn == 'default':
597 606 status = _("updating to bookmark @\n")
598 607 else:
599 608 status = (_("updating to bookmark @ on branch %s\n")
600 609 % bn)
601 610 except KeyError:
602 611 try:
603 612 uprev = destrepo.branchtip('default')
604 613 except error.RepoLookupError:
605 614 uprev = destrepo.lookup('tip')
606 615 if not status:
607 616 bn = destrepo[uprev].branch()
608 617 status = _("updating to branch %s\n") % bn
609 618 destrepo.ui.status(status)
610 619 _update(destrepo, uprev)
611 620 if update in destrepo._bookmarks:
612 621 bookmarks.activate(destrepo, update)
613 622 finally:
614 623 release(srclock, destlock)
615 624 if cleandir is not None:
616 625 shutil.rmtree(cleandir, True)
617 626 if srcpeer is not None:
618 627 srcpeer.close()
619 628 return srcpeer, destpeer
620 629
621 630 def _showstats(repo, stats):
622 631 repo.ui.status(_("%d files updated, %d files merged, "
623 632 "%d files removed, %d files unresolved\n") % stats)
624 633
625 634 def updaterepo(repo, node, overwrite):
626 635 """Update the working directory to node.
627 636
628 637 When overwrite is set, changes are clobbered, merged else
629 638
630 639 returns stats (see pydoc mercurial.merge.applyupdates)"""
631 640 return mergemod.update(repo, node, False, overwrite, None,
632 641 labels=['working copy', 'destination'])
633 642
634 643 def update(repo, node):
635 644 """update the working directory to node, merging linear changes"""
636 645 stats = updaterepo(repo, node, False)
637 646 _showstats(repo, stats)
638 647 if stats[3]:
639 648 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
640 649 return stats[3] > 0
641 650
642 651 # naming conflict in clone()
643 652 _update = update
644 653
645 654 def clean(repo, node, show_stats=True):
646 655 """forcibly switch the working directory to node, clobbering changes"""
647 656 stats = updaterepo(repo, node, True)
648 657 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
649 658 if show_stats:
650 659 _showstats(repo, stats)
651 660 return stats[3] > 0
652 661
653 662 def merge(repo, node, force=None, remind=True):
654 663 """Branch merge with node, resolving changes. Return true if any
655 664 unresolved conflicts."""
656 665 stats = mergemod.update(repo, node, True, force, False)
657 666 _showstats(repo, stats)
658 667 if stats[3]:
659 668 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
660 669 "or 'hg update -C .' to abandon\n"))
661 670 elif remind:
662 671 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
663 672 return stats[3] > 0
664 673
665 674 def _incoming(displaychlist, subreporecurse, ui, repo, source,
666 675 opts, buffered=False):
667 676 """
668 677 Helper for incoming / gincoming.
669 678 displaychlist gets called with
670 679 (remoterepo, incomingchangesetlist, displayer) parameters,
671 680 and is supposed to contain only code that can't be unified.
672 681 """
673 682 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
674 683 other = peer(repo, opts, source)
675 684 ui.status(_('comparing with %s\n') % util.hidepassword(source))
676 685 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
677 686
678 687 if revs:
679 688 revs = [other.lookup(rev) for rev in revs]
680 689 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
681 690 revs, opts["bundle"], opts["force"])
682 691 try:
683 692 if not chlist:
684 693 ui.status(_("no changes found\n"))
685 694 return subreporecurse()
686 695
687 696 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
688 697 displaychlist(other, chlist, displayer)
689 698 displayer.close()
690 699 finally:
691 700 cleanupfn()
692 701 subreporecurse()
693 702 return 0 # exit code is zero since we found incoming changes
694 703
695 704 def incoming(ui, repo, source, opts):
696 705 def subreporecurse():
697 706 ret = 1
698 707 if opts.get('subrepos'):
699 708 ctx = repo[None]
700 709 for subpath in sorted(ctx.substate):
701 710 sub = ctx.sub(subpath)
702 711 ret = min(ret, sub.incoming(ui, source, opts))
703 712 return ret
704 713
705 714 def display(other, chlist, displayer):
706 715 limit = cmdutil.loglimit(opts)
707 716 if opts.get('newest_first'):
708 717 chlist.reverse()
709 718 count = 0
710 719 for n in chlist:
711 720 if limit is not None and count >= limit:
712 721 break
713 722 parents = [p for p in other.changelog.parents(n) if p != nullid]
714 723 if opts.get('no_merges') and len(parents) == 2:
715 724 continue
716 725 count += 1
717 726 displayer.show(other[n])
718 727 return _incoming(display, subreporecurse, ui, repo, source, opts)
719 728
720 729 def _outgoing(ui, repo, dest, opts):
721 730 dest = ui.expandpath(dest or 'default-push', dest or 'default')
722 731 dest, branches = parseurl(dest, opts.get('branch'))
723 732 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
724 733 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
725 734 if revs:
726 735 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
727 736
728 737 other = peer(repo, opts, dest)
729 738 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
730 739 force=opts.get('force'))
731 740 o = outgoing.missing
732 741 if not o:
733 742 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
734 743 return o, other
735 744
736 745 def outgoing(ui, repo, dest, opts):
737 746 def recurse():
738 747 ret = 1
739 748 if opts.get('subrepos'):
740 749 ctx = repo[None]
741 750 for subpath in sorted(ctx.substate):
742 751 sub = ctx.sub(subpath)
743 752 ret = min(ret, sub.outgoing(ui, dest, opts))
744 753 return ret
745 754
746 755 limit = cmdutil.loglimit(opts)
747 756 o, other = _outgoing(ui, repo, dest, opts)
748 757 if not o:
749 758 cmdutil.outgoinghooks(ui, repo, other, opts, o)
750 759 return recurse()
751 760
752 761 if opts.get('newest_first'):
753 762 o.reverse()
754 763 displayer = cmdutil.show_changeset(ui, repo, opts)
755 764 count = 0
756 765 for n in o:
757 766 if limit is not None and count >= limit:
758 767 break
759 768 parents = [p for p in repo.changelog.parents(n) if p != nullid]
760 769 if opts.get('no_merges') and len(parents) == 2:
761 770 continue
762 771 count += 1
763 772 displayer.show(repo[n])
764 773 displayer.close()
765 774 cmdutil.outgoinghooks(ui, repo, other, opts, o)
766 775 recurse()
767 776 return 0 # exit code is zero since we found outgoing changes
768 777
769 778 def verify(repo):
770 779 """verify the consistency of a repository"""
771 780 ret = verifymod.verify(repo)
772 781
773 782 # Broken subrepo references in hidden csets don't seem worth worrying about,
774 783 # since they can't be pushed/pulled, and --hidden can be used if they are a
775 784 # concern.
776 785
777 786 # pathto() is needed for -R case
778 787 revs = repo.revs("filelog(%s)",
779 788 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
780 789
781 790 if revs:
782 791 repo.ui.status(_('checking subrepo links\n'))
783 792 for rev in revs:
784 793 ctx = repo[rev]
785 794 try:
786 795 for subpath in ctx.substate:
787 796 ret = ctx.sub(subpath).verify() or ret
788 797 except Exception:
789 798 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
790 799 node.short(ctx.node()))
791 800
792 801 return ret
793 802
794 803 def remoteui(src, opts):
795 804 'build a remote ui from ui or repo and opts'
796 805 if util.safehasattr(src, 'baseui'): # looks like a repository
797 806 dst = src.baseui.copy() # drop repo-specific config
798 807 src = src.ui # copy target options from repo
799 808 else: # assume it's a global ui object
800 809 dst = src.copy() # keep all global options
801 810
802 811 # copy ssh-specific options
803 812 for o in 'ssh', 'remotecmd':
804 813 v = opts.get(o) or src.config('ui', o)
805 814 if v:
806 815 dst.setconfig("ui", o, v, 'copied')
807 816
808 817 # copy bundle-specific options
809 818 r = src.config('bundle', 'mainreporoot')
810 819 if r:
811 820 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
812 821
813 822 # copy selected local settings to the remote ui
814 823 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
815 824 for key, val in src.configitems(sect):
816 825 dst.setconfig(sect, key, val, 'copied')
817 826 v = src.config('web', 'cacerts')
818 827 if v == '!':
819 828 dst.setconfig('web', 'cacerts', v, 'copied')
820 829 elif v:
821 830 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
822 831
823 832 return dst
824 833
825 834 # Files of interest
826 835 # Used to check if the repository has changed looking at mtime and size of
827 836 # these files.
828 837 foi = [('spath', '00changelog.i'),
829 838 ('spath', 'phaseroots'), # ! phase can change content at the same size
830 839 ('spath', 'obsstore'),
831 840 ('path', 'bookmarks'), # ! bookmark can change content at the same size
832 841 ]
833 842
834 843 class cachedlocalrepo(object):
835 844 """Holds a localrepository that can be cached and reused."""
836 845
837 846 def __init__(self, repo):
838 847 """Create a new cached repo from an existing repo.
839 848
840 849 We assume the passed in repo was recently created. If the
841 850 repo has changed between when it was created and when it was
842 851 turned into a cache, it may not refresh properly.
843 852 """
844 853 assert isinstance(repo, localrepo.localrepository)
845 854 self._repo = repo
846 855 self._state, self.mtime = self._repostate()
847 856
848 857 def fetch(self):
849 858 """Refresh (if necessary) and return a repository.
850 859
851 860 If the cached instance is out of date, it will be recreated
852 861 automatically and returned.
853 862
854 863 Returns a tuple of the repo and a boolean indicating whether a new
855 864 repo instance was created.
856 865 """
857 866 # We compare the mtimes and sizes of some well-known files to
858 867 # determine if the repo changed. This is not precise, as mtimes
859 868 # are susceptible to clock skew and imprecise filesystems and
860 869 # file content can change while maintaining the same size.
861 870
862 871 state, mtime = self._repostate()
863 872 if state == self._state:
864 873 return self._repo, False
865 874
866 875 self._repo = repository(self._repo.baseui, self._repo.url())
867 876 self._state = state
868 877 self.mtime = mtime
869 878
870 879 return self._repo, True
871 880
872 881 def _repostate(self):
873 882 state = []
874 883 maxmtime = -1
875 884 for attr, fname in foi:
876 885 prefix = getattr(self._repo, attr)
877 886 p = os.path.join(prefix, fname)
878 887 try:
879 888 st = os.stat(p)
880 889 except OSError:
881 890 st = os.stat(prefix)
882 891 state.append((st.st_mtime, st.st_size))
883 892 maxmtime = max(maxmtime, st.st_mtime)
884 893
885 894 return tuple(state), maxmtime
886 895
887 896 def copy(self):
888 897 """Obtain a copy of this class instance.
889 898
890 899 A new localrepository instance is obtained. The new instance should be
891 900 completely independent of the original.
892 901 """
893 902 repo = repository(self._repo.baseui, self._repo.origroot)
894 903 c = cachedlocalrepo(repo)
895 904 c._state = self._state
896 905 c.mtime = self.mtime
897 906 return c
@@ -1,1938 +1,1922
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return cg.apply(self._repo, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if scmutil.gdinitconfig(self.ui):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError as inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError as inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sjoin = self.store.join
297 297 self.vfs.createmode = self.store.createmode
298 298 self._applyopenerreqs()
299 299 if create:
300 300 self._writerequirements()
301 301
302 302 self._dirstatevalidatewarned = False
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 357 # experimental config: format.aggressivemergedeltas
358 358 aggressivemergedeltas = self.ui.configbool('format',
359 359 'aggressivemergedeltas', False)
360 360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
362 362
363 363 def _writerequirements(self):
364 364 scmutil.writerequires(self.vfs, self.requirements)
365 365
366 366 def _checknested(self, path):
367 367 """Determine if path is a legal nested repository."""
368 368 if not path.startswith(self.root):
369 369 return False
370 370 subpath = path[len(self.root) + 1:]
371 371 normsubpath = util.pconvert(subpath)
372 372
373 373 # XXX: Checking against the current working copy is wrong in
374 374 # the sense that it can reject things like
375 375 #
376 376 # $ hg cat -r 10 sub/x.txt
377 377 #
378 378 # if sub/ is no longer a subrepository in the working copy
379 379 # parent revision.
380 380 #
381 381 # However, it can of course also allow things that would have
382 382 # been rejected before, such as the above cat command if sub/
383 383 # is a subrepository now, but was a normal directory before.
384 384 # The old path auditor would have rejected by mistake since it
385 385 # panics when it sees sub/.hg/.
386 386 #
387 387 # All in all, checking against the working copy seems sensible
388 388 # since we want to prevent access to nested repositories on
389 389 # the filesystem *now*.
390 390 ctx = self[None]
391 391 parts = util.splitpath(subpath)
392 392 while parts:
393 393 prefix = '/'.join(parts)
394 394 if prefix in ctx.substate:
395 395 if prefix == normsubpath:
396 396 return True
397 397 else:
398 398 sub = ctx.sub(prefix)
399 399 return sub.checknested(subpath[len(prefix) + 1:])
400 400 else:
401 401 parts.pop()
402 402 return False
403 403
404 404 def peer(self):
405 405 return localpeer(self) # not cached to avoid reference cycle
406 406
407 407 def unfiltered(self):
408 408 """Return unfiltered version of the repository
409 409
410 410 Intended to be overwritten by filtered repo."""
411 411 return self
412 412
413 413 def filtered(self, name):
414 414 """Return a filtered version of a repository"""
415 415 # build a new class with the mixin and the current class
416 416 # (possibly subclass of the repo)
417 417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 418 pass
419 419 return proxycls(self, name)
420 420
421 421 @repofilecache('bookmarks')
422 422 def _bookmarks(self):
423 423 return bookmarks.bmstore(self)
424 424
425 425 @repofilecache('bookmarks.current')
426 426 def _activebookmark(self):
427 427 return bookmarks.readactive(self)
428 428
429 429 def bookmarkheads(self, bookmark):
430 430 name = bookmark.split('@', 1)[0]
431 431 heads = []
432 432 for mark, n in self._bookmarks.iteritems():
433 433 if mark.split('@', 1)[0] == name:
434 434 heads.append(n)
435 435 return heads
436 436
437 437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 439 # can't be easily expressed in filecache mechanism.
440 440 @storecache('phaseroots', '00changelog.i')
441 441 def _phasecache(self):
442 442 return phases.phasecache(self, self._phasedefaults)
443 443
444 444 @storecache('obsstore')
445 445 def obsstore(self):
446 446 # read default format for new obsstore.
447 447 # developer config: format.obsstore-version
448 448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 449 # rely on obsstore class default when possible.
450 450 kwargs = {}
451 451 if defaultformat is not None:
452 452 kwargs['defaultformat'] = defaultformat
453 453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 455 **kwargs)
456 456 if store and readonly:
457 457 self.ui.warn(
458 458 _('obsolete feature not enabled but %i markers found!\n')
459 459 % len(list(store)))
460 460 return store
461 461
462 462 @storecache('00changelog.i')
463 463 def changelog(self):
464 464 c = changelog.changelog(self.svfs)
465 465 if 'HG_PENDING' in os.environ:
466 466 p = os.environ['HG_PENDING']
467 467 if p.startswith(self.root):
468 468 c.readpending('00changelog.i.a')
469 469 return c
470 470
471 471 @storecache('00manifest.i')
472 472 def manifest(self):
473 473 return manifest.manifest(self.svfs)
474 474
475 475 def dirlog(self, dir):
476 476 return self.manifest.dirlog(dir)
477 477
478 478 @repofilecache('dirstate')
479 479 def dirstate(self):
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 481 self._dirstatevalidate)
482 482
483 483 def _dirstatevalidate(self, node):
484 484 try:
485 485 self.changelog.rev(node)
486 486 return node
487 487 except error.LookupError:
488 488 if not self._dirstatevalidatewarned:
489 489 self._dirstatevalidatewarned = True
490 490 self.ui.warn(_("warning: ignoring unknown"
491 491 " working parent %s!\n") % short(node))
492 492 return nullid
493 493
494 494 def __getitem__(self, changeid):
495 495 if changeid is None or changeid == wdirrev:
496 496 return context.workingctx(self)
497 497 if isinstance(changeid, slice):
498 498 return [context.changectx(self, i)
499 499 for i in xrange(*changeid.indices(len(self)))
500 500 if i not in self.changelog.filteredrevs]
501 501 return context.changectx(self, changeid)
502 502
503 503 def __contains__(self, changeid):
504 504 try:
505 505 self[changeid]
506 506 return True
507 507 except error.RepoLookupError:
508 508 return False
509 509
510 510 def __nonzero__(self):
511 511 return True
512 512
513 513 def __len__(self):
514 514 return len(self.changelog)
515 515
516 516 def __iter__(self):
517 517 return iter(self.changelog)
518 518
519 519 def revs(self, expr, *args):
520 520 '''Find revisions matching a revset.
521 521
522 522 The revset is specified as a string ``expr`` that may contain
523 523 %-formatting to escape certain types. See ``revset.formatspec``.
524 524
525 525 Return a revset.abstractsmartset, which is a list-like interface
526 526 that contains integer revisions.
527 527 '''
528 528 expr = revset.formatspec(expr, *args)
529 529 m = revset.match(None, expr)
530 530 return m(self)
531 531
532 532 def set(self, expr, *args):
533 533 '''Find revisions matching a revset and emit changectx instances.
534 534
535 535 This is a convenience wrapper around ``revs()`` that iterates the
536 536 result and is a generator of changectx instances.
537 537 '''
538 538 for r in self.revs(expr, *args):
539 539 yield self[r]
540 540
541 541 def url(self):
542 542 return 'file:' + self.root
543 543
544 544 def hook(self, name, throw=False, **args):
545 545 """Call a hook, passing this repo instance.
546 546
547 547 This a convenience method to aid invoking hooks. Extensions likely
548 548 won't call this unless they have registered a custom hook or are
549 549 replacing code that is expected to call a hook.
550 550 """
551 551 return hook.hook(self.ui, self, name, throw, **args)
552 552
553 553 @unfilteredmethod
554 554 def _tag(self, names, node, message, local, user, date, extra=None,
555 555 editor=False):
556 556 if isinstance(names, str):
557 557 names = (names,)
558 558
559 559 branches = self.branchmap()
560 560 for name in names:
561 561 self.hook('pretag', throw=True, node=hex(node), tag=name,
562 562 local=local)
563 563 if name in branches:
564 564 self.ui.warn(_("warning: tag %s conflicts with existing"
565 565 " branch name\n") % name)
566 566
567 567 def writetags(fp, names, munge, prevtags):
568 568 fp.seek(0, 2)
569 569 if prevtags and prevtags[-1] != '\n':
570 570 fp.write('\n')
571 571 for name in names:
572 572 if munge:
573 573 m = munge(name)
574 574 else:
575 575 m = name
576 576
577 577 if (self._tagscache.tagtypes and
578 578 name in self._tagscache.tagtypes):
579 579 old = self.tags().get(name, nullid)
580 580 fp.write('%s %s\n' % (hex(old), m))
581 581 fp.write('%s %s\n' % (hex(node), m))
582 582 fp.close()
583 583
584 584 prevtags = ''
585 585 if local:
586 586 try:
587 587 fp = self.vfs('localtags', 'r+')
588 588 except IOError:
589 589 fp = self.vfs('localtags', 'a')
590 590 else:
591 591 prevtags = fp.read()
592 592
593 593 # local tags are stored in the current charset
594 594 writetags(fp, names, None, prevtags)
595 595 for name in names:
596 596 self.hook('tag', node=hex(node), tag=name, local=local)
597 597 return
598 598
599 599 try:
600 600 fp = self.wfile('.hgtags', 'rb+')
601 601 except IOError as e:
602 602 if e.errno != errno.ENOENT:
603 603 raise
604 604 fp = self.wfile('.hgtags', 'ab')
605 605 else:
606 606 prevtags = fp.read()
607 607
608 608 # committed tags are stored in UTF-8
609 609 writetags(fp, names, encoding.fromlocal, prevtags)
610 610
611 611 fp.close()
612 612
613 613 self.invalidatecaches()
614 614
615 615 if '.hgtags' not in self.dirstate:
616 616 self[None].add(['.hgtags'])
617 617
618 618 m = matchmod.exact(self.root, '', ['.hgtags'])
619 619 tagnode = self.commit(message, user, date, extra=extra, match=m,
620 620 editor=editor)
621 621
622 622 for name in names:
623 623 self.hook('tag', node=hex(node), tag=name, local=local)
624 624
625 625 return tagnode
626 626
627 627 def tag(self, names, node, message, local, user, date, editor=False):
628 628 '''tag a revision with one or more symbolic names.
629 629
630 630 names is a list of strings or, when adding a single tag, names may be a
631 631 string.
632 632
633 633 if local is True, the tags are stored in a per-repository file.
634 634 otherwise, they are stored in the .hgtags file, and a new
635 635 changeset is committed with the change.
636 636
637 637 keyword arguments:
638 638
639 639 local: whether to store tags in non-version-controlled file
640 640 (default False)
641 641
642 642 message: commit message to use if committing
643 643
644 644 user: name of user to use if committing
645 645
646 646 date: date tuple to use if committing'''
647 647
648 648 if not local:
649 649 m = matchmod.exact(self.root, '', ['.hgtags'])
650 650 if any(self.status(match=m, unknown=True, ignored=True)):
651 651 raise error.Abort(_('working copy of .hgtags is changed'),
652 652 hint=_('please commit .hgtags manually'))
653 653
654 654 self.tags() # instantiate the cache
655 655 self._tag(names, node, message, local, user, date, editor=editor)
656 656
657 657 @filteredpropertycache
658 658 def _tagscache(self):
659 659 '''Returns a tagscache object that contains various tags related
660 660 caches.'''
661 661
662 662 # This simplifies its cache management by having one decorated
663 663 # function (this one) and the rest simply fetch things from it.
664 664 class tagscache(object):
665 665 def __init__(self):
666 666 # These two define the set of tags for this repository. tags
667 667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 668 # 'local'. (Global tags are defined by .hgtags across all
669 669 # heads, and local tags are defined in .hg/localtags.)
670 670 # They constitute the in-memory cache of tags.
671 671 self.tags = self.tagtypes = None
672 672
673 673 self.nodetagscache = self.tagslist = None
674 674
675 675 cache = tagscache()
676 676 cache.tags, cache.tagtypes = self._findtags()
677 677
678 678 return cache
679 679
680 680 def tags(self):
681 681 '''return a mapping of tag to node'''
682 682 t = {}
683 683 if self.changelog.filteredrevs:
684 684 tags, tt = self._findtags()
685 685 else:
686 686 tags = self._tagscache.tags
687 687 for k, v in tags.iteritems():
688 688 try:
689 689 # ignore tags to unknown nodes
690 690 self.changelog.rev(v)
691 691 t[k] = v
692 692 except (error.LookupError, ValueError):
693 693 pass
694 694 return t
695 695
696 696 def _findtags(self):
697 697 '''Do the hard work of finding tags. Return a pair of dicts
698 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 699 maps tag name to a string like \'global\' or \'local\'.
700 700 Subclasses or extensions are free to add their own tags, but
701 701 should be aware that the returned dicts will be retained for the
702 702 duration of the localrepo object.'''
703 703
704 704 # XXX what tagtype should subclasses/extensions use? Currently
705 705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 706 # Should each extension invent its own tag type? Should there
707 707 # be one tagtype for all such "virtual" tags? Or is the status
708 708 # quo fine?
709 709
710 710 alltags = {} # map tag name to (node, hist)
711 711 tagtypes = {}
712 712
713 713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
714 714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
715 715
716 716 # Build the return dicts. Have to re-encode tag names because
717 717 # the tags module always uses UTF-8 (in order not to lose info
718 718 # writing to the cache), but the rest of Mercurial wants them in
719 719 # local encoding.
720 720 tags = {}
721 721 for (name, (node, hist)) in alltags.iteritems():
722 722 if node != nullid:
723 723 tags[encoding.tolocal(name)] = node
724 724 tags['tip'] = self.changelog.tip()
725 725 tagtypes = dict([(encoding.tolocal(name), value)
726 726 for (name, value) in tagtypes.iteritems()])
727 727 return (tags, tagtypes)
728 728
729 729 def tagtype(self, tagname):
730 730 '''
731 731 return the type of the given tag. result can be:
732 732
733 733 'local' : a local tag
734 734 'global' : a global tag
735 735 None : tag does not exist
736 736 '''
737 737
738 738 return self._tagscache.tagtypes.get(tagname)
739 739
740 740 def tagslist(self):
741 741 '''return a list of tags ordered by revision'''
742 742 if not self._tagscache.tagslist:
743 743 l = []
744 744 for t, n in self.tags().iteritems():
745 745 l.append((self.changelog.rev(n), t, n))
746 746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
747 747
748 748 return self._tagscache.tagslist
749 749
750 750 def nodetags(self, node):
751 751 '''return the tags associated with a node'''
752 752 if not self._tagscache.nodetagscache:
753 753 nodetagscache = {}
754 754 for t, n in self._tagscache.tags.iteritems():
755 755 nodetagscache.setdefault(n, []).append(t)
756 756 for tags in nodetagscache.itervalues():
757 757 tags.sort()
758 758 self._tagscache.nodetagscache = nodetagscache
759 759 return self._tagscache.nodetagscache.get(node, [])
760 760
761 761 def nodebookmarks(self, node):
762 762 marks = []
763 763 for bookmark, n in self._bookmarks.iteritems():
764 764 if n == node:
765 765 marks.append(bookmark)
766 766 return sorted(marks)
767 767
768 768 def branchmap(self):
769 769 '''returns a dictionary {branch: [branchheads]} with branchheads
770 770 ordered by increasing revision number'''
771 771 branchmap.updatecache(self)
772 772 return self._branchcaches[self.filtername]
773 773
774 774 @unfilteredmethod
775 775 def revbranchcache(self):
776 776 if not self._revbranchcache:
777 777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 778 return self._revbranchcache
779 779
780 780 def branchtip(self, branch, ignoremissing=False):
781 781 '''return the tip node for a given branch
782 782
783 783 If ignoremissing is True, then this method will not raise an error.
784 784 This is helpful for callers that only expect None for a missing branch
785 785 (e.g. namespace).
786 786
787 787 '''
788 788 try:
789 789 return self.branchmap().branchtip(branch)
790 790 except KeyError:
791 791 if not ignoremissing:
792 792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 793 else:
794 794 pass
795 795
796 796 def lookup(self, key):
797 797 return self[key].node()
798 798
799 799 def lookupbranch(self, key, remote=None):
800 800 repo = remote or self
801 801 if key in repo.branchmap():
802 802 return key
803 803
804 804 repo = (remote and remote.local()) and remote or self
805 805 return repo[key].branch()
806 806
807 807 def known(self, nodes):
808 808 nm = self.changelog.nodemap
809 809 pc = self._phasecache
810 810 result = []
811 811 for n in nodes:
812 812 r = nm.get(n)
813 813 resp = not (r is None or pc.phase(self, r) >= phases.secret)
814 814 result.append(resp)
815 815 return result
816 816
817 817 def local(self):
818 818 return self
819 819
820 820 def publishing(self):
821 821 # it's safe (and desirable) to trust the publish flag unconditionally
822 822 # so that we don't finalize changes shared between users via ssh or nfs
823 823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824 824
825 825 def cancopy(self):
826 826 # so statichttprepo's override of local() works
827 827 if not self.local():
828 828 return False
829 829 if not self.publishing():
830 830 return True
831 831 # if publishing we can't copy if there is filtered content
832 832 return not self.filtered('visible').changelog.filteredrevs
833 833
834 834 def shared(self):
835 835 '''the type of shared repository (None if not shared)'''
836 836 if self.sharedpath != self.path:
837 837 return 'store'
838 838 return None
839 839
840 840 def join(self, f, *insidef):
841 841 return self.vfs.join(os.path.join(f, *insidef))
842 842
843 843 def wjoin(self, f, *insidef):
844 844 return self.vfs.reljoin(self.root, f, *insidef)
845 845
846 846 def file(self, f):
847 847 if f[0] == '/':
848 848 f = f[1:]
849 849 return filelog.filelog(self.svfs, f)
850 850
851 851 def changectx(self, changeid):
852 852 return self[changeid]
853 853
854 854 def parents(self, changeid=None):
855 855 '''get list of changectxs for parents of changeid'''
856 856 return self[changeid].parents()
857 857
858 858 def setparents(self, p1, p2=nullid):
859 859 self.dirstate.beginparentchange()
860 860 copies = self.dirstate.setparents(p1, p2)
861 861 pctx = self[p1]
862 862 if copies:
863 863 # Adjust copy records, the dirstate cannot do it, it
864 864 # requires access to parents manifests. Preserve them
865 865 # only for entries added to first parent.
866 866 for f in copies:
867 867 if f not in pctx and copies[f] in pctx:
868 868 self.dirstate.copy(copies[f], f)
869 869 if p2 == nullid:
870 870 for f, s in sorted(self.dirstate.copies().items()):
871 871 if f not in pctx and s not in pctx:
872 872 self.dirstate.copy(None, f)
873 873 self.dirstate.endparentchange()
874 874
875 875 def filectx(self, path, changeid=None, fileid=None):
876 876 """changeid can be a changeset revision, node, or tag.
877 877 fileid can be a file revision or node."""
878 878 return context.filectx(self, path, changeid, fileid)
879 879
880 880 def getcwd(self):
881 881 return self.dirstate.getcwd()
882 882
883 883 def pathto(self, f, cwd=None):
884 884 return self.dirstate.pathto(f, cwd)
885 885
886 886 def wfile(self, f, mode='r'):
887 887 return self.wvfs(f, mode)
888 888
889 889 def _link(self, f):
890 890 return self.wvfs.islink(f)
891 891
892 892 def _loadfilter(self, filter):
893 893 if filter not in self.filterpats:
894 894 l = []
895 895 for pat, cmd in self.ui.configitems(filter):
896 896 if cmd == '!':
897 897 continue
898 898 mf = matchmod.match(self.root, '', [pat])
899 899 fn = None
900 900 params = cmd
901 901 for name, filterfn in self._datafilters.iteritems():
902 902 if cmd.startswith(name):
903 903 fn = filterfn
904 904 params = cmd[len(name):].lstrip()
905 905 break
906 906 if not fn:
907 907 fn = lambda s, c, **kwargs: util.filter(s, c)
908 908 # Wrap old filters not supporting keyword arguments
909 909 if not inspect.getargspec(fn)[2]:
910 910 oldfn = fn
911 911 fn = lambda s, c, **kwargs: oldfn(s, c)
912 912 l.append((mf, fn, params))
913 913 self.filterpats[filter] = l
914 914 return self.filterpats[filter]
915 915
916 916 def _filter(self, filterpats, filename, data):
917 917 for mf, fn, cmd in filterpats:
918 918 if mf(filename):
919 919 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
920 920 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
921 921 break
922 922
923 923 return data
924 924
925 925 @unfilteredpropertycache
926 926 def _encodefilterpats(self):
927 927 return self._loadfilter('encode')
928 928
929 929 @unfilteredpropertycache
930 930 def _decodefilterpats(self):
931 931 return self._loadfilter('decode')
932 932
933 933 def adddatafilter(self, name, filter):
934 934 self._datafilters[name] = filter
935 935
936 936 def wread(self, filename):
937 937 if self._link(filename):
938 938 data = self.wvfs.readlink(filename)
939 939 else:
940 940 data = self.wvfs.read(filename)
941 941 return self._filter(self._encodefilterpats, filename, data)
942 942
943 943 def wwrite(self, filename, data, flags):
944 944 """write ``data`` into ``filename`` in the working directory
945 945
946 946 This returns length of written (maybe decoded) data.
947 947 """
948 948 data = self._filter(self._decodefilterpats, filename, data)
949 949 if 'l' in flags:
950 950 self.wvfs.symlink(data, filename)
951 951 else:
952 952 self.wvfs.write(filename, data)
953 953 if 'x' in flags:
954 954 self.wvfs.setflags(filename, False, True)
955 955 return len(data)
956 956
957 957 def wwritedata(self, filename, data):
958 958 return self._filter(self._decodefilterpats, filename, data)
959 959
960 960 def currenttransaction(self):
961 961 """return the current transaction or None if non exists"""
962 962 if self._transref:
963 963 tr = self._transref()
964 964 else:
965 965 tr = None
966 966
967 967 if tr and tr.running():
968 968 return tr
969 969 return None
970 970
971 971 def transaction(self, desc, report=None):
972 972 if (self.ui.configbool('devel', 'all-warnings')
973 973 or self.ui.configbool('devel', 'check-locks')):
974 974 l = self._lockref and self._lockref()
975 975 if l is None or not l.held:
976 976 self.ui.develwarn('transaction with no lock')
977 977 tr = self.currenttransaction()
978 978 if tr is not None:
979 979 return tr.nest()
980 980
981 981 # abort here if the journal already exists
982 982 if self.svfs.exists("journal"):
983 983 raise error.RepoError(
984 984 _("abandoned transaction found"),
985 985 hint=_("run 'hg recover' to clean up transaction"))
986 986
987 987 # make journal.dirstate contain in-memory changes at this point
988 988 self.dirstate.write(None)
989 989
990 990 idbase = "%.40f#%f" % (random.random(), time.time())
991 991 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
992 992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
993 993
994 994 self._writejournal(desc)
995 995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
996 996 if report:
997 997 rp = report
998 998 else:
999 999 rp = self.ui.warn
1000 1000 vfsmap = {'plain': self.vfs} # root of .hg/
1001 1001 # we must avoid cyclic reference between repo and transaction.
1002 1002 reporef = weakref.ref(self)
1003 1003 def validate(tr):
1004 1004 """will run pre-closing hooks"""
1005 1005 reporef().hook('pretxnclose', throw=True,
1006 1006 txnname=desc, **tr.hookargs)
1007 1007 def releasefn(tr, success):
1008 1008 repo = reporef()
1009 1009 if success:
1010 1010 # this should be explicitly invoked here, because
1011 1011 # in-memory changes aren't written out at closing
1012 1012 # transaction, if tr.addfilegenerator (via
1013 1013 # dirstate.write or so) isn't invoked while
1014 1014 # transaction running
1015 1015 repo.dirstate.write(None)
1016 1016 else:
1017 1017 # prevent in-memory changes from being written out at
1018 1018 # the end of outer wlock scope or so
1019 1019 repo.dirstate.invalidate()
1020 1020
1021 1021 # discard all changes (including ones already written
1022 1022 # out) in this transaction
1023 1023 repo.vfs.rename('journal.dirstate', 'dirstate')
1024 1024
1025 1025 repo.invalidate(clearfilecache=True)
1026 1026
1027 1027 tr = transaction.transaction(rp, self.svfs, vfsmap,
1028 1028 "journal",
1029 1029 "undo",
1030 1030 aftertrans(renames),
1031 1031 self.store.createmode,
1032 1032 validator=validate,
1033 1033 releasefn=releasefn)
1034 1034
1035 1035 tr.hookargs['txnid'] = txnid
1036 1036 # note: writing the fncache only during finalize mean that the file is
1037 1037 # outdated when running hooks. As fncache is used for streaming clone,
1038 1038 # this is not expected to break anything that happen during the hooks.
1039 1039 tr.addfinalize('flush-fncache', self.store.write)
1040 1040 def txnclosehook(tr2):
1041 1041 """To be run if transaction is successful, will schedule a hook run
1042 1042 """
1043 1043 def hook():
1044 1044 reporef().hook('txnclose', throw=False, txnname=desc,
1045 1045 **tr2.hookargs)
1046 1046 reporef()._afterlock(hook)
1047 1047 tr.addfinalize('txnclose-hook', txnclosehook)
1048 1048 def txnaborthook(tr2):
1049 1049 """To be run if transaction is aborted
1050 1050 """
1051 1051 reporef().hook('txnabort', throw=False, txnname=desc,
1052 1052 **tr2.hookargs)
1053 1053 tr.addabort('txnabort-hook', txnaborthook)
1054 1054 # avoid eager cache invalidation. in-memory data should be identical
1055 1055 # to stored data if transaction has no error.
1056 1056 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1057 1057 self._transref = weakref.ref(tr)
1058 1058 return tr
1059 1059
1060 1060 def _journalfiles(self):
1061 1061 return ((self.svfs, 'journal'),
1062 1062 (self.vfs, 'journal.dirstate'),
1063 1063 (self.vfs, 'journal.branch'),
1064 1064 (self.vfs, 'journal.desc'),
1065 1065 (self.vfs, 'journal.bookmarks'),
1066 1066 (self.svfs, 'journal.phaseroots'))
1067 1067
1068 1068 def undofiles(self):
1069 1069 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1070 1070
1071 1071 def _writejournal(self, desc):
1072 1072 self.vfs.write("journal.dirstate",
1073 1073 self.vfs.tryread("dirstate"))
1074 1074 self.vfs.write("journal.branch",
1075 1075 encoding.fromlocal(self.dirstate.branch()))
1076 1076 self.vfs.write("journal.desc",
1077 1077 "%d\n%s\n" % (len(self), desc))
1078 1078 self.vfs.write("journal.bookmarks",
1079 1079 self.vfs.tryread("bookmarks"))
1080 1080 self.svfs.write("journal.phaseroots",
1081 1081 self.svfs.tryread("phaseroots"))
1082 1082
1083 1083 def recover(self):
1084 1084 lock = self.lock()
1085 1085 try:
1086 1086 if self.svfs.exists("journal"):
1087 1087 self.ui.status(_("rolling back interrupted transaction\n"))
1088 1088 vfsmap = {'': self.svfs,
1089 1089 'plain': self.vfs,}
1090 1090 transaction.rollback(self.svfs, vfsmap, "journal",
1091 1091 self.ui.warn)
1092 1092 self.invalidate()
1093 1093 return True
1094 1094 else:
1095 1095 self.ui.warn(_("no interrupted transaction available\n"))
1096 1096 return False
1097 1097 finally:
1098 1098 lock.release()
1099 1099
1100 1100 def rollback(self, dryrun=False, force=False):
1101 1101 wlock = lock = dsguard = None
1102 1102 try:
1103 1103 wlock = self.wlock()
1104 1104 lock = self.lock()
1105 1105 if self.svfs.exists("undo"):
1106 1106 dsguard = cmdutil.dirstateguard(self, 'rollback')
1107 1107
1108 1108 return self._rollback(dryrun, force, dsguard)
1109 1109 else:
1110 1110 self.ui.warn(_("no rollback information available\n"))
1111 1111 return 1
1112 1112 finally:
1113 1113 release(dsguard, lock, wlock)
1114 1114
1115 1115 @unfilteredmethod # Until we get smarter cache management
1116 1116 def _rollback(self, dryrun, force, dsguard):
1117 1117 ui = self.ui
1118 1118 try:
1119 1119 args = self.vfs.read('undo.desc').splitlines()
1120 1120 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1121 1121 if len(args) >= 3:
1122 1122 detail = args[2]
1123 1123 oldtip = oldlen - 1
1124 1124
1125 1125 if detail and ui.verbose:
1126 1126 msg = (_('repository tip rolled back to revision %s'
1127 1127 ' (undo %s: %s)\n')
1128 1128 % (oldtip, desc, detail))
1129 1129 else:
1130 1130 msg = (_('repository tip rolled back to revision %s'
1131 1131 ' (undo %s)\n')
1132 1132 % (oldtip, desc))
1133 1133 except IOError:
1134 1134 msg = _('rolling back unknown transaction\n')
1135 1135 desc = None
1136 1136
1137 1137 if not force and self['.'] != self['tip'] and desc == 'commit':
1138 1138 raise error.Abort(
1139 1139 _('rollback of last commit while not checked out '
1140 1140 'may lose data'), hint=_('use -f to force'))
1141 1141
1142 1142 ui.status(msg)
1143 1143 if dryrun:
1144 1144 return 0
1145 1145
1146 1146 parents = self.dirstate.parents()
1147 1147 self.destroying()
1148 1148 vfsmap = {'plain': self.vfs, '': self.svfs}
1149 1149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1150 1150 if self.vfs.exists('undo.bookmarks'):
1151 1151 self.vfs.rename('undo.bookmarks', 'bookmarks')
1152 1152 if self.svfs.exists('undo.phaseroots'):
1153 1153 self.svfs.rename('undo.phaseroots', 'phaseroots')
1154 1154 self.invalidate()
1155 1155
1156 1156 parentgone = (parents[0] not in self.changelog.nodemap or
1157 1157 parents[1] not in self.changelog.nodemap)
1158 1158 if parentgone:
1159 1159 # prevent dirstateguard from overwriting already restored one
1160 1160 dsguard.close()
1161 1161
1162 1162 self.vfs.rename('undo.dirstate', 'dirstate')
1163 1163 try:
1164 1164 branch = self.vfs.read('undo.branch')
1165 1165 self.dirstate.setbranch(encoding.tolocal(branch))
1166 1166 except IOError:
1167 1167 ui.warn(_('named branch could not be reset: '
1168 1168 'current branch is still \'%s\'\n')
1169 1169 % self.dirstate.branch())
1170 1170
1171 1171 self.dirstate.invalidate()
1172 1172 parents = tuple([p.rev() for p in self.parents()])
1173 1173 if len(parents) > 1:
1174 1174 ui.status(_('working directory now based on '
1175 1175 'revisions %d and %d\n') % parents)
1176 1176 else:
1177 1177 ui.status(_('working directory now based on '
1178 1178 'revision %d\n') % parents)
1179 1179 mergemod.mergestate.clean(self, self['.'].node())
1180 1180
1181 1181 # TODO: if we know which new heads may result from this rollback, pass
1182 1182 # them to destroy(), which will prevent the branchhead cache from being
1183 1183 # invalidated.
1184 1184 self.destroyed()
1185 1185 return 0
1186 1186
1187 1187 def invalidatecaches(self):
1188 1188
1189 1189 if '_tagscache' in vars(self):
1190 1190 # can't use delattr on proxy
1191 1191 del self.__dict__['_tagscache']
1192 1192
1193 1193 self.unfiltered()._branchcaches.clear()
1194 1194 self.invalidatevolatilesets()
1195 1195
1196 1196 def invalidatevolatilesets(self):
1197 1197 self.filteredrevcache.clear()
1198 1198 obsolete.clearobscaches(self)
1199 1199
1200 1200 def invalidatedirstate(self):
1201 1201 '''Invalidates the dirstate, causing the next call to dirstate
1202 1202 to check if it was modified since the last time it was read,
1203 1203 rereading it if it has.
1204 1204
1205 1205 This is different to dirstate.invalidate() that it doesn't always
1206 1206 rereads the dirstate. Use dirstate.invalidate() if you want to
1207 1207 explicitly read the dirstate again (i.e. restoring it to a previous
1208 1208 known good state).'''
1209 1209 if hasunfilteredcache(self, 'dirstate'):
1210 1210 for k in self.dirstate._filecache:
1211 1211 try:
1212 1212 delattr(self.dirstate, k)
1213 1213 except AttributeError:
1214 1214 pass
1215 1215 delattr(self.unfiltered(), 'dirstate')
1216 1216
1217 1217 def invalidate(self, clearfilecache=False):
1218 1218 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1219 1219 for k in self._filecache.keys():
1220 1220 # dirstate is invalidated separately in invalidatedirstate()
1221 1221 if k == 'dirstate':
1222 1222 continue
1223 1223
1224 1224 if clearfilecache:
1225 1225 del self._filecache[k]
1226 1226 try:
1227 1227 delattr(unfiltered, k)
1228 1228 except AttributeError:
1229 1229 pass
1230 1230 self.invalidatecaches()
1231 1231 self.store.invalidatecaches()
1232 1232
1233 1233 def invalidateall(self):
1234 1234 '''Fully invalidates both store and non-store parts, causing the
1235 1235 subsequent operation to reread any outside changes.'''
1236 1236 # extension should hook this to invalidate its caches
1237 1237 self.invalidate()
1238 1238 self.invalidatedirstate()
1239 1239
1240 1240 def _refreshfilecachestats(self, tr):
1241 1241 """Reload stats of cached files so that they are flagged as valid"""
1242 1242 for k, ce in self._filecache.items():
1243 1243 if k == 'dirstate' or k not in self.__dict__:
1244 1244 continue
1245 1245 ce.refresh()
1246 1246
1247 1247 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1248 1248 inheritchecker=None, parentenvvar=None):
1249 1249 parentlock = None
1250 1250 # the contents of parentenvvar are used by the underlying lock to
1251 1251 # determine whether it can be inherited
1252 1252 if parentenvvar is not None:
1253 1253 parentlock = os.environ.get(parentenvvar)
1254 1254 try:
1255 1255 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1256 1256 acquirefn=acquirefn, desc=desc,
1257 1257 inheritchecker=inheritchecker,
1258 1258 parentlock=parentlock)
1259 1259 except error.LockHeld as inst:
1260 1260 if not wait:
1261 1261 raise
1262 1262 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1263 1263 (desc, inst.locker))
1264 1264 # default to 600 seconds timeout
1265 1265 l = lockmod.lock(vfs, lockname,
1266 1266 int(self.ui.config("ui", "timeout", "600")),
1267 1267 releasefn=releasefn, acquirefn=acquirefn,
1268 1268 desc=desc)
1269 1269 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1270 1270 return l
1271 1271
1272 1272 def _afterlock(self, callback):
1273 1273 """add a callback to be run when the repository is fully unlocked
1274 1274
1275 1275 The callback will be executed when the outermost lock is released
1276 1276 (with wlock being higher level than 'lock')."""
1277 1277 for ref in (self._wlockref, self._lockref):
1278 1278 l = ref and ref()
1279 1279 if l and l.held:
1280 1280 l.postrelease.append(callback)
1281 1281 break
1282 1282 else: # no lock have been found.
1283 1283 callback()
1284 1284
1285 1285 def lock(self, wait=True):
1286 1286 '''Lock the repository store (.hg/store) and return a weak reference
1287 1287 to the lock. Use this before modifying the store (e.g. committing or
1288 1288 stripping). If you are opening a transaction, get a lock as well.)
1289 1289
1290 1290 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1291 1291 'wlock' first to avoid a dead-lock hazard.'''
1292 1292 l = self._lockref and self._lockref()
1293 1293 if l is not None and l.held:
1294 1294 l.lock()
1295 1295 return l
1296 1296
1297 1297 l = self._lock(self.svfs, "lock", wait, None,
1298 1298 self.invalidate, _('repository %s') % self.origroot)
1299 1299 self._lockref = weakref.ref(l)
1300 1300 return l
1301 1301
1302 1302 def _wlockchecktransaction(self):
1303 1303 if self.currenttransaction() is not None:
1304 1304 raise error.LockInheritanceContractViolation(
1305 1305 'wlock cannot be inherited in the middle of a transaction')
1306 1306
1307 1307 def wlock(self, wait=True):
1308 1308 '''Lock the non-store parts of the repository (everything under
1309 1309 .hg except .hg/store) and return a weak reference to the lock.
1310 1310
1311 1311 Use this before modifying files in .hg.
1312 1312
1313 1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 1314 'wlock' first to avoid a dead-lock hazard.'''
1315 1315 l = self._wlockref and self._wlockref()
1316 1316 if l is not None and l.held:
1317 1317 l.lock()
1318 1318 return l
1319 1319
1320 1320 # We do not need to check for non-waiting lock acquisition. Such
1321 1321 # acquisition would not cause dead-lock as they would just fail.
1322 1322 if wait and (self.ui.configbool('devel', 'all-warnings')
1323 1323 or self.ui.configbool('devel', 'check-locks')):
1324 1324 l = self._lockref and self._lockref()
1325 1325 if l is not None and l.held:
1326 1326 self.ui.develwarn('"wlock" acquired after "lock"')
1327 1327
1328 1328 def unlock():
1329 1329 if self.dirstate.pendingparentchange():
1330 1330 self.dirstate.invalidate()
1331 1331 else:
1332 1332 self.dirstate.write(None)
1333 1333
1334 1334 self._filecache['dirstate'].refresh()
1335 1335
1336 1336 l = self._lock(self.vfs, "wlock", wait, unlock,
1337 1337 self.invalidatedirstate, _('working directory of %s') %
1338 1338 self.origroot,
1339 1339 inheritchecker=self._wlockchecktransaction,
1340 1340 parentenvvar='HG_WLOCK_LOCKER')
1341 1341 self._wlockref = weakref.ref(l)
1342 1342 return l
1343 1343
1344 1344 def _currentlock(self, lockref):
1345 1345 """Returns the lock if it's held, or None if it's not."""
1346 1346 if lockref is None:
1347 1347 return None
1348 1348 l = lockref()
1349 1349 if l is None or not l.held:
1350 1350 return None
1351 1351 return l
1352 1352
1353 1353 def currentwlock(self):
1354 1354 """Returns the wlock if it's held, or None if it's not."""
1355 1355 return self._currentlock(self._wlockref)
1356 1356
1357 1357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1358 1358 """
1359 1359 commit an individual file as part of a larger transaction
1360 1360 """
1361 1361
1362 1362 fname = fctx.path()
1363 1363 fparent1 = manifest1.get(fname, nullid)
1364 1364 fparent2 = manifest2.get(fname, nullid)
1365 1365 if isinstance(fctx, context.filectx):
1366 1366 node = fctx.filenode()
1367 1367 if node in [fparent1, fparent2]:
1368 1368 self.ui.debug('reusing %s filelog entry\n' % fname)
1369 1369 return node
1370 1370
1371 1371 flog = self.file(fname)
1372 1372 meta = {}
1373 1373 copy = fctx.renamed()
1374 1374 if copy and copy[0] != fname:
1375 1375 # Mark the new revision of this file as a copy of another
1376 1376 # file. This copy data will effectively act as a parent
1377 1377 # of this new revision. If this is a merge, the first
1378 1378 # parent will be the nullid (meaning "look up the copy data")
1379 1379 # and the second one will be the other parent. For example:
1380 1380 #
1381 1381 # 0 --- 1 --- 3 rev1 changes file foo
1382 1382 # \ / rev2 renames foo to bar and changes it
1383 1383 # \- 2 -/ rev3 should have bar with all changes and
1384 1384 # should record that bar descends from
1385 1385 # bar in rev2 and foo in rev1
1386 1386 #
1387 1387 # this allows this merge to succeed:
1388 1388 #
1389 1389 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1390 1390 # \ / merging rev3 and rev4 should use bar@rev2
1391 1391 # \- 2 --- 4 as the merge base
1392 1392 #
1393 1393
1394 1394 cfname = copy[0]
1395 1395 crev = manifest1.get(cfname)
1396 1396 newfparent = fparent2
1397 1397
1398 1398 if manifest2: # branch merge
1399 1399 if fparent2 == nullid or crev is None: # copied on remote side
1400 1400 if cfname in manifest2:
1401 1401 crev = manifest2[cfname]
1402 1402 newfparent = fparent1
1403 1403
1404 1404 # Here, we used to search backwards through history to try to find
1405 1405 # where the file copy came from if the source of a copy was not in
1406 1406 # the parent directory. However, this doesn't actually make sense to
1407 1407 # do (what does a copy from something not in your working copy even
1408 1408 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1409 1409 # the user that copy information was dropped, so if they didn't
1410 1410 # expect this outcome it can be fixed, but this is the correct
1411 1411 # behavior in this circumstance.
1412 1412
1413 1413 if crev:
1414 1414 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1415 1415 meta["copy"] = cfname
1416 1416 meta["copyrev"] = hex(crev)
1417 1417 fparent1, fparent2 = nullid, newfparent
1418 1418 else:
1419 1419 self.ui.warn(_("warning: can't find ancestor for '%s' "
1420 1420 "copied from '%s'!\n") % (fname, cfname))
1421 1421
1422 1422 elif fparent1 == nullid:
1423 1423 fparent1, fparent2 = fparent2, nullid
1424 1424 elif fparent2 != nullid:
1425 1425 # is one parent an ancestor of the other?
1426 1426 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1427 1427 if fparent1 in fparentancestors:
1428 1428 fparent1, fparent2 = fparent2, nullid
1429 1429 elif fparent2 in fparentancestors:
1430 1430 fparent2 = nullid
1431 1431
1432 1432 # is the file changed?
1433 1433 text = fctx.data()
1434 1434 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1435 1435 changelist.append(fname)
1436 1436 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1437 1437 # are just the flags changed during merge?
1438 1438 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1439 1439 changelist.append(fname)
1440 1440
1441 1441 return fparent1
1442 1442
1443 1443 @unfilteredmethod
1444 1444 def commit(self, text="", user=None, date=None, match=None, force=False,
1445 1445 editor=False, extra=None):
1446 1446 """Add a new revision to current repository.
1447 1447
1448 1448 Revision information is gathered from the working directory,
1449 1449 match can be used to filter the committed files. If editor is
1450 1450 supplied, it is called to get a commit message.
1451 1451 """
1452 1452 if extra is None:
1453 1453 extra = {}
1454 1454
1455 1455 def fail(f, msg):
1456 1456 raise error.Abort('%s: %s' % (f, msg))
1457 1457
1458 1458 if not match:
1459 1459 match = matchmod.always(self.root, '')
1460 1460
1461 1461 if not force:
1462 1462 vdirs = []
1463 1463 match.explicitdir = vdirs.append
1464 1464 match.bad = fail
1465 1465
1466 1466 wlock = lock = tr = None
1467 1467 try:
1468 1468 wlock = self.wlock()
1469 1469 wctx = self[None]
1470 1470 merge = len(wctx.parents()) > 1
1471 1471
1472 1472 if not force and merge and match.ispartial():
1473 1473 raise error.Abort(_('cannot partially commit a merge '
1474 1474 '(do not specify files or patterns)'))
1475 1475
1476 1476 status = self.status(match=match, clean=force)
1477 1477 if force:
1478 1478 status.modified.extend(status.clean) # mq may commit clean files
1479 1479
1480 1480 # check subrepos
1481 1481 subs = []
1482 1482 commitsubs = set()
1483 1483 newstate = wctx.substate.copy()
1484 1484 # only manage subrepos and .hgsubstate if .hgsub is present
1485 1485 if '.hgsub' in wctx:
1486 1486 # we'll decide whether to track this ourselves, thanks
1487 1487 for c in status.modified, status.added, status.removed:
1488 1488 if '.hgsubstate' in c:
1489 1489 c.remove('.hgsubstate')
1490 1490
1491 1491 # compare current state to last committed state
1492 1492 # build new substate based on last committed state
1493 1493 oldstate = wctx.p1().substate
1494 1494 for s in sorted(newstate.keys()):
1495 1495 if not match(s):
1496 1496 # ignore working copy, use old state if present
1497 1497 if s in oldstate:
1498 1498 newstate[s] = oldstate[s]
1499 1499 continue
1500 1500 if not force:
1501 1501 raise error.Abort(
1502 1502 _("commit with new subrepo %s excluded") % s)
1503 1503 dirtyreason = wctx.sub(s).dirtyreason(True)
1504 1504 if dirtyreason:
1505 1505 if not self.ui.configbool('ui', 'commitsubrepos'):
1506 1506 raise error.Abort(dirtyreason,
1507 1507 hint=_("use --subrepos for recursive commit"))
1508 1508 subs.append(s)
1509 1509 commitsubs.add(s)
1510 1510 else:
1511 1511 bs = wctx.sub(s).basestate()
1512 1512 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1513 1513 if oldstate.get(s, (None, None, None))[1] != bs:
1514 1514 subs.append(s)
1515 1515
1516 1516 # check for removed subrepos
1517 1517 for p in wctx.parents():
1518 1518 r = [s for s in p.substate if s not in newstate]
1519 1519 subs += [s for s in r if match(s)]
1520 1520 if subs:
1521 1521 if (not match('.hgsub') and
1522 1522 '.hgsub' in (wctx.modified() + wctx.added())):
1523 1523 raise error.Abort(
1524 1524 _("can't commit subrepos without .hgsub"))
1525 1525 status.modified.insert(0, '.hgsubstate')
1526 1526
1527 1527 elif '.hgsub' in status.removed:
1528 1528 # clean up .hgsubstate when .hgsub is removed
1529 1529 if ('.hgsubstate' in wctx and
1530 1530 '.hgsubstate' not in (status.modified + status.added +
1531 1531 status.removed)):
1532 1532 status.removed.insert(0, '.hgsubstate')
1533 1533
1534 1534 # make sure all explicit patterns are matched
1535 1535 if not force and (match.isexact() or match.prefix()):
1536 1536 matched = set(status.modified + status.added + status.removed)
1537 1537
1538 1538 for f in match.files():
1539 1539 f = self.dirstate.normalize(f)
1540 1540 if f == '.' or f in matched or f in wctx.substate:
1541 1541 continue
1542 1542 if f in status.deleted:
1543 1543 fail(f, _('file not found!'))
1544 1544 if f in vdirs: # visited directory
1545 1545 d = f + '/'
1546 1546 for mf in matched:
1547 1547 if mf.startswith(d):
1548 1548 break
1549 1549 else:
1550 1550 fail(f, _("no match under directory!"))
1551 1551 elif f not in self.dirstate:
1552 1552 fail(f, _("file not tracked!"))
1553 1553
1554 1554 cctx = context.workingcommitctx(self, status,
1555 1555 text, user, date, extra)
1556 1556
1557 1557 # internal config: ui.allowemptycommit
1558 1558 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1559 1559 or extra.get('close') or merge or cctx.files()
1560 1560 or self.ui.configbool('ui', 'allowemptycommit'))
1561 1561 if not allowemptycommit:
1562 1562 return None
1563 1563
1564 1564 if merge and cctx.deleted():
1565 1565 raise error.Abort(_("cannot commit merge with missing files"))
1566 1566
1567 1567 unresolved, driverresolved = False, False
1568 1568 ms = mergemod.mergestate.read(self)
1569 1569 for f in status.modified:
1570 1570 if f in ms:
1571 1571 if ms[f] == 'u':
1572 1572 unresolved = True
1573 1573 elif ms[f] == 'd':
1574 1574 driverresolved = True
1575 1575
1576 1576 if unresolved:
1577 1577 raise error.Abort(_('unresolved merge conflicts '
1578 1578 '(see "hg help resolve")'))
1579 1579 if driverresolved or ms.mdstate() != 's':
1580 1580 raise error.Abort(_('driver-resolved merge conflicts'),
1581 1581 hint=_('run "hg resolve --all" to resolve'))
1582 1582
1583 1583 if editor:
1584 1584 cctx._text = editor(self, cctx, subs)
1585 1585 edited = (text != cctx._text)
1586 1586
1587 1587 # Save commit message in case this transaction gets rolled back
1588 1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1589 1589 # the assumption that the user will use the same editor again.
1590 1590 msgfn = self.savecommitmessage(cctx._text)
1591 1591
1592 1592 # commit subs and write new state
1593 1593 if subs:
1594 1594 for s in sorted(commitsubs):
1595 1595 sub = wctx.sub(s)
1596 1596 self.ui.status(_('committing subrepository %s\n') %
1597 1597 subrepo.subrelpath(sub))
1598 1598 sr = sub.commit(cctx._text, user, date)
1599 1599 newstate[s] = (newstate[s][0], sr)
1600 1600 subrepo.writestate(self, newstate)
1601 1601
1602 1602 p1, p2 = self.dirstate.parents()
1603 1603 lock = self.lock()
1604 1604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1605 1605 try:
1606 1606 self.hook("precommit", throw=True, parent1=hookp1,
1607 1607 parent2=hookp2)
1608 1608 tr = self.transaction('commit')
1609 1609 ret = self.commitctx(cctx, True)
1610 1610 except: # re-raises
1611 1611 if edited:
1612 1612 self.ui.write(
1613 1613 _('note: commit message saved in %s\n') % msgfn)
1614 1614 raise
1615 1615 # update bookmarks, dirstate and mergestate
1616 1616 bookmarks.update(self, [p1, p2], ret)
1617 1617 cctx.markcommitted(ret)
1618 1618 ms.reset()
1619 1619 tr.close()
1620 1620
1621 1621 finally:
1622 1622 lockmod.release(tr, lock, wlock)
1623 1623
1624 1624 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1625 1625 # hack for command that use a temporary commit (eg: histedit)
1626 1626 # temporary commit got stripped before hook release
1627 1627 if self.changelog.hasnode(ret):
1628 1628 self.hook("commit", node=node, parent1=parent1,
1629 1629 parent2=parent2)
1630 1630 self._afterlock(commithook)
1631 1631 return ret
1632 1632
1633 1633 @unfilteredmethod
1634 1634 def commitctx(self, ctx, error=False):
1635 1635 """Add a new revision to current repository.
1636 1636 Revision information is passed via the context argument.
1637 1637 """
1638 1638
1639 1639 tr = None
1640 1640 p1, p2 = ctx.p1(), ctx.p2()
1641 1641 user = ctx.user()
1642 1642
1643 1643 lock = self.lock()
1644 1644 try:
1645 1645 tr = self.transaction("commit")
1646 1646 trp = weakref.proxy(tr)
1647 1647
1648 1648 if ctx.files():
1649 1649 m1 = p1.manifest()
1650 1650 m2 = p2.manifest()
1651 1651 m = m1.copy()
1652 1652
1653 1653 # check in files
1654 1654 added = []
1655 1655 changed = []
1656 1656 removed = list(ctx.removed())
1657 1657 linkrev = len(self)
1658 1658 self.ui.note(_("committing files:\n"))
1659 1659 for f in sorted(ctx.modified() + ctx.added()):
1660 1660 self.ui.note(f + "\n")
1661 1661 try:
1662 1662 fctx = ctx[f]
1663 1663 if fctx is None:
1664 1664 removed.append(f)
1665 1665 else:
1666 1666 added.append(f)
1667 1667 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1668 1668 trp, changed)
1669 1669 m.setflag(f, fctx.flags())
1670 1670 except OSError as inst:
1671 1671 self.ui.warn(_("trouble committing %s!\n") % f)
1672 1672 raise
1673 1673 except IOError as inst:
1674 1674 errcode = getattr(inst, 'errno', errno.ENOENT)
1675 1675 if error or errcode and errcode != errno.ENOENT:
1676 1676 self.ui.warn(_("trouble committing %s!\n") % f)
1677 1677 raise
1678 1678
1679 1679 # update manifest
1680 1680 self.ui.note(_("committing manifest\n"))
1681 1681 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1682 1682 drop = [f for f in removed if f in m]
1683 1683 for f in drop:
1684 1684 del m[f]
1685 1685 mn = self.manifest.add(m, trp, linkrev,
1686 1686 p1.manifestnode(), p2.manifestnode(),
1687 1687 added, drop)
1688 1688 files = changed + removed
1689 1689 else:
1690 1690 mn = p1.manifestnode()
1691 1691 files = []
1692 1692
1693 1693 # update changelog
1694 1694 self.ui.note(_("committing changelog\n"))
1695 1695 self.changelog.delayupdate(tr)
1696 1696 n = self.changelog.add(mn, files, ctx.description(),
1697 1697 trp, p1.node(), p2.node(),
1698 1698 user, ctx.date(), ctx.extra().copy())
1699 1699 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1700 1700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1701 1701 parent2=xp2)
1702 1702 # set the new commit is proper phase
1703 1703 targetphase = subrepo.newcommitphase(self.ui, ctx)
1704 1704 if targetphase:
1705 1705 # retract boundary do not alter parent changeset.
1706 1706 # if a parent have higher the resulting phase will
1707 1707 # be compliant anyway
1708 1708 #
1709 1709 # if minimal phase was 0 we don't need to retract anything
1710 1710 phases.retractboundary(self, tr, targetphase, [n])
1711 1711 tr.close()
1712 1712 branchmap.updatecache(self.filtered('served'))
1713 1713 return n
1714 1714 finally:
1715 1715 if tr:
1716 1716 tr.release()
1717 1717 lock.release()
1718 1718
1719 1719 @unfilteredmethod
1720 1720 def destroying(self):
1721 1721 '''Inform the repository that nodes are about to be destroyed.
1722 1722 Intended for use by strip and rollback, so there's a common
1723 1723 place for anything that has to be done before destroying history.
1724 1724
1725 1725 This is mostly useful for saving state that is in memory and waiting
1726 1726 to be flushed when the current lock is released. Because a call to
1727 1727 destroyed is imminent, the repo will be invalidated causing those
1728 1728 changes to stay in memory (waiting for the next unlock), or vanish
1729 1729 completely.
1730 1730 '''
1731 1731 # When using the same lock to commit and strip, the phasecache is left
1732 1732 # dirty after committing. Then when we strip, the repo is invalidated,
1733 1733 # causing those changes to disappear.
1734 1734 if '_phasecache' in vars(self):
1735 1735 self._phasecache.write()
1736 1736
1737 1737 @unfilteredmethod
1738 1738 def destroyed(self):
1739 1739 '''Inform the repository that nodes have been destroyed.
1740 1740 Intended for use by strip and rollback, so there's a common
1741 1741 place for anything that has to be done after destroying history.
1742 1742 '''
1743 1743 # When one tries to:
1744 1744 # 1) destroy nodes thus calling this method (e.g. strip)
1745 1745 # 2) use phasecache somewhere (e.g. commit)
1746 1746 #
1747 1747 # then 2) will fail because the phasecache contains nodes that were
1748 1748 # removed. We can either remove phasecache from the filecache,
1749 1749 # causing it to reload next time it is accessed, or simply filter
1750 1750 # the removed nodes now and write the updated cache.
1751 1751 self._phasecache.filterunknown(self)
1752 1752 self._phasecache.write()
1753 1753
1754 1754 # update the 'served' branch cache to help read only server process
1755 1755 # Thanks to branchcache collaboration this is done from the nearest
1756 1756 # filtered subset and it is expected to be fast.
1757 1757 branchmap.updatecache(self.filtered('served'))
1758 1758
1759 1759 # Ensure the persistent tag cache is updated. Doing it now
1760 1760 # means that the tag cache only has to worry about destroyed
1761 1761 # heads immediately after a strip/rollback. That in turn
1762 1762 # guarantees that "cachetip == currenttip" (comparing both rev
1763 1763 # and node) always means no nodes have been added or destroyed.
1764 1764
1765 1765 # XXX this is suboptimal when qrefresh'ing: we strip the current
1766 1766 # head, refresh the tag cache, then immediately add a new head.
1767 1767 # But I think doing it this way is necessary for the "instant
1768 1768 # tag cache retrieval" case to work.
1769 1769 self.invalidate()
1770 1770
1771 1771 def walk(self, match, node=None):
1772 1772 '''
1773 1773 walk recursively through the directory tree or a given
1774 1774 changeset, finding all files matched by the match
1775 1775 function
1776 1776 '''
1777 1777 return self[node].walk(match)
1778 1778
1779 1779 def status(self, node1='.', node2=None, match=None,
1780 1780 ignored=False, clean=False, unknown=False,
1781 1781 listsubrepos=False):
1782 1782 '''a convenience method that calls node1.status(node2)'''
1783 1783 return self[node1].status(node2, match, ignored, clean, unknown,
1784 1784 listsubrepos)
1785 1785
1786 1786 def heads(self, start=None):
1787 1787 heads = self.changelog.heads(start)
1788 1788 # sort the output in rev descending order
1789 1789 return sorted(heads, key=self.changelog.rev, reverse=True)
1790 1790
1791 1791 def branchheads(self, branch=None, start=None, closed=False):
1792 1792 '''return a (possibly filtered) list of heads for the given branch
1793 1793
1794 1794 Heads are returned in topological order, from newest to oldest.
1795 1795 If branch is None, use the dirstate branch.
1796 1796 If start is not None, return only heads reachable from start.
1797 1797 If closed is True, return heads that are marked as closed as well.
1798 1798 '''
1799 1799 if branch is None:
1800 1800 branch = self[None].branch()
1801 1801 branches = self.branchmap()
1802 1802 if branch not in branches:
1803 1803 return []
1804 1804 # the cache returns heads ordered lowest to highest
1805 1805 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1806 1806 if start is not None:
1807 1807 # filter out the heads that cannot be reached from startrev
1808 1808 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1809 1809 bheads = [h for h in bheads if h in fbheads]
1810 1810 return bheads
1811 1811
1812 1812 def branches(self, nodes):
1813 1813 if not nodes:
1814 1814 nodes = [self.changelog.tip()]
1815 1815 b = []
1816 1816 for n in nodes:
1817 1817 t = n
1818 1818 while True:
1819 1819 p = self.changelog.parents(n)
1820 1820 if p[1] != nullid or p[0] == nullid:
1821 1821 b.append((t, n, p[0], p[1]))
1822 1822 break
1823 1823 n = p[0]
1824 1824 return b
1825 1825
1826 1826 def between(self, pairs):
1827 1827 r = []
1828 1828
1829 1829 for top, bottom in pairs:
1830 1830 n, l, i = top, [], 0
1831 1831 f = 1
1832 1832
1833 1833 while n != bottom and n != nullid:
1834 1834 p = self.changelog.parents(n)[0]
1835 1835 if i == f:
1836 1836 l.append(n)
1837 1837 f = f * 2
1838 1838 n = p
1839 1839 i += 1
1840 1840
1841 1841 r.append(l)
1842 1842
1843 1843 return r
1844 1844
1845 1845 def checkpush(self, pushop):
1846 1846 """Extensions can override this function if additional checks have
1847 1847 to be performed before pushing, or call it if they override push
1848 1848 command.
1849 1849 """
1850 1850 pass
1851 1851
1852 1852 @unfilteredpropertycache
1853 1853 def prepushoutgoinghooks(self):
1854 1854 """Return util.hooks consists of "(repo, remote, outgoing)"
1855 1855 functions, which are called before pushing changesets.
1856 1856 """
1857 1857 return util.hooks()
1858 1858
1859 def clone(self, remote, heads=[], stream=None):
1860 '''clone remote repository.
1861
1862 keyword arguments:
1863 heads: list of revs to clone (forces use of pull)
1864 stream: use streaming clone if possible'''
1865 # internal config: ui.quietbookmarkmove
1866 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1867 try:
1868 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1869 pullop = exchange.pull(self, remote, heads,
1870 streamclonerequested=stream)
1871 return pullop.cgresult
1872 finally:
1873 self.ui.restoreconfig(quiet)
1874
1875 1859 def pushkey(self, namespace, key, old, new):
1876 1860 try:
1877 1861 tr = self.currenttransaction()
1878 1862 hookargs = {}
1879 1863 if tr is not None:
1880 1864 hookargs.update(tr.hookargs)
1881 1865 hookargs['namespace'] = namespace
1882 1866 hookargs['key'] = key
1883 1867 hookargs['old'] = old
1884 1868 hookargs['new'] = new
1885 1869 self.hook('prepushkey', throw=True, **hookargs)
1886 1870 except error.HookAbort as exc:
1887 1871 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1888 1872 if exc.hint:
1889 1873 self.ui.write_err(_("(%s)\n") % exc.hint)
1890 1874 return False
1891 1875 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1892 1876 ret = pushkey.push(self, namespace, key, old, new)
1893 1877 def runhook():
1894 1878 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1895 1879 ret=ret)
1896 1880 self._afterlock(runhook)
1897 1881 return ret
1898 1882
1899 1883 def listkeys(self, namespace):
1900 1884 self.hook('prelistkeys', throw=True, namespace=namespace)
1901 1885 self.ui.debug('listing keys for "%s"\n' % namespace)
1902 1886 values = pushkey.list(self, namespace)
1903 1887 self.hook('listkeys', namespace=namespace, values=values)
1904 1888 return values
1905 1889
1906 1890 def debugwireargs(self, one, two, three=None, four=None, five=None):
1907 1891 '''used to test argument passing over the wire'''
1908 1892 return "%s %s %s %s %s" % (one, two, three, four, five)
1909 1893
1910 1894 def savecommitmessage(self, text):
1911 1895 fp = self.vfs('last-message.txt', 'wb')
1912 1896 try:
1913 1897 fp.write(text)
1914 1898 finally:
1915 1899 fp.close()
1916 1900 return self.pathto(fp.name[len(self.root) + 1:])
1917 1901
1918 1902 # used to avoid circular references so destructors work
1919 1903 def aftertrans(files):
1920 1904 renamefiles = [tuple(t) for t in files]
1921 1905 def a():
1922 1906 for vfs, src, dest in renamefiles:
1923 1907 try:
1924 1908 vfs.rename(src, dest)
1925 1909 except OSError: # journal file does not yet exist
1926 1910 pass
1927 1911 return a
1928 1912
1929 1913 def undoname(fn):
1930 1914 base, name = os.path.split(fn)
1931 1915 assert name.startswith('journal')
1932 1916 return os.path.join(base, name.replace('journal', 'undo', 1))
1933 1917
1934 1918 def instance(ui, path, create):
1935 1919 return localrepository(ui, util.urllocalpath(path), create)
1936 1920
1937 1921 def islocal(path):
1938 1922 return True
General Comments 0
You need to be logged in to leave comments. Login now