##// END OF EJS Templates
commit: mark internal-only option
Matt Mackall -
r25840:25897d77 default
parent child Browse files
Show More
@@ -1,1947 +1,1948
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 # experimental config: format.generaldelta
262 262 if self.ui.configbool('format', 'generaldelta', False):
263 263 self.requirements.add("generaldelta")
264 264 if self.ui.configbool('experimental', 'treemanifest', False):
265 265 self.requirements.add("treemanifest")
266 266 if self.ui.configbool('experimental', 'manifestv2', False):
267 267 self.requirements.add("manifestv2")
268 268 else:
269 269 raise error.RepoError(_("repository %s not found") % path)
270 270 elif create:
271 271 raise error.RepoError(_("repository %s already exists") % path)
272 272 else:
273 273 try:
274 274 self.requirements = scmutil.readrequires(
275 275 self.vfs, self.supported)
276 276 except IOError as inst:
277 277 if inst.errno != errno.ENOENT:
278 278 raise
279 279
280 280 self.sharedpath = self.path
281 281 try:
282 282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 283 realpath=True)
284 284 s = vfs.base
285 285 if not vfs.exists():
286 286 raise error.RepoError(
287 287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 288 self.sharedpath = s
289 289 except IOError as inst:
290 290 if inst.errno != errno.ENOENT:
291 291 raise
292 292
293 293 self.store = store.store(
294 294 self.requirements, self.sharedpath, scmutil.vfs)
295 295 self.spath = self.store.path
296 296 self.svfs = self.store.vfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 357
358 358 def _writerequirements(self):
359 359 scmutil.writerequires(self.vfs, self.requirements)
360 360
361 361 def _checknested(self, path):
362 362 """Determine if path is a legal nested repository."""
363 363 if not path.startswith(self.root):
364 364 return False
365 365 subpath = path[len(self.root) + 1:]
366 366 normsubpath = util.pconvert(subpath)
367 367
368 368 # XXX: Checking against the current working copy is wrong in
369 369 # the sense that it can reject things like
370 370 #
371 371 # $ hg cat -r 10 sub/x.txt
372 372 #
373 373 # if sub/ is no longer a subrepository in the working copy
374 374 # parent revision.
375 375 #
376 376 # However, it can of course also allow things that would have
377 377 # been rejected before, such as the above cat command if sub/
378 378 # is a subrepository now, but was a normal directory before.
379 379 # The old path auditor would have rejected by mistake since it
380 380 # panics when it sees sub/.hg/.
381 381 #
382 382 # All in all, checking against the working copy seems sensible
383 383 # since we want to prevent access to nested repositories on
384 384 # the filesystem *now*.
385 385 ctx = self[None]
386 386 parts = util.splitpath(subpath)
387 387 while parts:
388 388 prefix = '/'.join(parts)
389 389 if prefix in ctx.substate:
390 390 if prefix == normsubpath:
391 391 return True
392 392 else:
393 393 sub = ctx.sub(prefix)
394 394 return sub.checknested(subpath[len(prefix) + 1:])
395 395 else:
396 396 parts.pop()
397 397 return False
398 398
399 399 def peer(self):
400 400 return localpeer(self) # not cached to avoid reference cycle
401 401
402 402 def unfiltered(self):
403 403 """Return unfiltered version of the repository
404 404
405 405 Intended to be overwritten by filtered repo."""
406 406 return self
407 407
408 408 def filtered(self, name):
409 409 """Return a filtered version of a repository"""
410 410 # build a new class with the mixin and the current class
411 411 # (possibly subclass of the repo)
412 412 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 413 pass
414 414 return proxycls(self, name)
415 415
416 416 @repofilecache('bookmarks')
417 417 def _bookmarks(self):
418 418 return bookmarks.bmstore(self)
419 419
420 420 @repofilecache('bookmarks.current')
421 421 def _activebookmark(self):
422 422 return bookmarks.readactive(self)
423 423
424 424 def bookmarkheads(self, bookmark):
425 425 name = bookmark.split('@', 1)[0]
426 426 heads = []
427 427 for mark, n in self._bookmarks.iteritems():
428 428 if mark.split('@', 1)[0] == name:
429 429 heads.append(n)
430 430 return heads
431 431
432 432 @storecache('phaseroots')
433 433 def _phasecache(self):
434 434 return phases.phasecache(self, self._phasedefaults)
435 435
436 436 @storecache('obsstore')
437 437 def obsstore(self):
438 438 # read default format for new obsstore.
439 439 # developer config: format.obsstore-version
440 440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 441 # rely on obsstore class default when possible.
442 442 kwargs = {}
443 443 if defaultformat is not None:
444 444 kwargs['defaultformat'] = defaultformat
445 445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 447 **kwargs)
448 448 if store and readonly:
449 449 self.ui.warn(
450 450 _('obsolete feature not enabled but %i markers found!\n')
451 451 % len(list(store)))
452 452 return store
453 453
454 454 @storecache('00changelog.i')
455 455 def changelog(self):
456 456 c = changelog.changelog(self.svfs)
457 457 if 'HG_PENDING' in os.environ:
458 458 p = os.environ['HG_PENDING']
459 459 if p.startswith(self.root):
460 460 c.readpending('00changelog.i.a')
461 461 return c
462 462
463 463 @storecache('00manifest.i')
464 464 def manifest(self):
465 465 return manifest.manifest(self.svfs)
466 466
467 467 def dirlog(self, dir):
468 468 return self.manifest.dirlog(dir)
469 469
470 470 @repofilecache('dirstate')
471 471 def dirstate(self):
472 472 warned = [0]
473 473 def validate(node):
474 474 try:
475 475 self.changelog.rev(node)
476 476 return node
477 477 except error.LookupError:
478 478 if not warned[0]:
479 479 warned[0] = True
480 480 self.ui.warn(_("warning: ignoring unknown"
481 481 " working parent %s!\n") % short(node))
482 482 return nullid
483 483
484 484 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
485 485
486 486 def __getitem__(self, changeid):
487 487 if changeid is None or changeid == wdirrev:
488 488 return context.workingctx(self)
489 489 if isinstance(changeid, slice):
490 490 return [context.changectx(self, i)
491 491 for i in xrange(*changeid.indices(len(self)))
492 492 if i not in self.changelog.filteredrevs]
493 493 return context.changectx(self, changeid)
494 494
495 495 def __contains__(self, changeid):
496 496 try:
497 497 self[changeid]
498 498 return True
499 499 except error.RepoLookupError:
500 500 return False
501 501
502 502 def __nonzero__(self):
503 503 return True
504 504
505 505 def __len__(self):
506 506 return len(self.changelog)
507 507
508 508 def __iter__(self):
509 509 return iter(self.changelog)
510 510
511 511 def revs(self, expr, *args):
512 512 '''Return a list of revisions matching the given revset'''
513 513 expr = revset.formatspec(expr, *args)
514 514 m = revset.match(None, expr)
515 515 return m(self)
516 516
517 517 def set(self, expr, *args):
518 518 '''
519 519 Yield a context for each matching revision, after doing arg
520 520 replacement via revset.formatspec
521 521 '''
522 522 for r in self.revs(expr, *args):
523 523 yield self[r]
524 524
525 525 def url(self):
526 526 return 'file:' + self.root
527 527
528 528 def hook(self, name, throw=False, **args):
529 529 """Call a hook, passing this repo instance.
530 530
531 531 This a convenience method to aid invoking hooks. Extensions likely
532 532 won't call this unless they have registered a custom hook or are
533 533 replacing code that is expected to call a hook.
534 534 """
535 535 return hook.hook(self.ui, self, name, throw, **args)
536 536
537 537 @unfilteredmethod
538 538 def _tag(self, names, node, message, local, user, date, extra={},
539 539 editor=False):
540 540 if isinstance(names, str):
541 541 names = (names,)
542 542
543 543 branches = self.branchmap()
544 544 for name in names:
545 545 self.hook('pretag', throw=True, node=hex(node), tag=name,
546 546 local=local)
547 547 if name in branches:
548 548 self.ui.warn(_("warning: tag %s conflicts with existing"
549 549 " branch name\n") % name)
550 550
551 551 def writetags(fp, names, munge, prevtags):
552 552 fp.seek(0, 2)
553 553 if prevtags and prevtags[-1] != '\n':
554 554 fp.write('\n')
555 555 for name in names:
556 556 if munge:
557 557 m = munge(name)
558 558 else:
559 559 m = name
560 560
561 561 if (self._tagscache.tagtypes and
562 562 name in self._tagscache.tagtypes):
563 563 old = self.tags().get(name, nullid)
564 564 fp.write('%s %s\n' % (hex(old), m))
565 565 fp.write('%s %s\n' % (hex(node), m))
566 566 fp.close()
567 567
568 568 prevtags = ''
569 569 if local:
570 570 try:
571 571 fp = self.vfs('localtags', 'r+')
572 572 except IOError:
573 573 fp = self.vfs('localtags', 'a')
574 574 else:
575 575 prevtags = fp.read()
576 576
577 577 # local tags are stored in the current charset
578 578 writetags(fp, names, None, prevtags)
579 579 for name in names:
580 580 self.hook('tag', node=hex(node), tag=name, local=local)
581 581 return
582 582
583 583 try:
584 584 fp = self.wfile('.hgtags', 'rb+')
585 585 except IOError as e:
586 586 if e.errno != errno.ENOENT:
587 587 raise
588 588 fp = self.wfile('.hgtags', 'ab')
589 589 else:
590 590 prevtags = fp.read()
591 591
592 592 # committed tags are stored in UTF-8
593 593 writetags(fp, names, encoding.fromlocal, prevtags)
594 594
595 595 fp.close()
596 596
597 597 self.invalidatecaches()
598 598
599 599 if '.hgtags' not in self.dirstate:
600 600 self[None].add(['.hgtags'])
601 601
602 602 m = matchmod.exact(self.root, '', ['.hgtags'])
603 603 tagnode = self.commit(message, user, date, extra=extra, match=m,
604 604 editor=editor)
605 605
606 606 for name in names:
607 607 self.hook('tag', node=hex(node), tag=name, local=local)
608 608
609 609 return tagnode
610 610
611 611 def tag(self, names, node, message, local, user, date, editor=False):
612 612 '''tag a revision with one or more symbolic names.
613 613
614 614 names is a list of strings or, when adding a single tag, names may be a
615 615 string.
616 616
617 617 if local is True, the tags are stored in a per-repository file.
618 618 otherwise, they are stored in the .hgtags file, and a new
619 619 changeset is committed with the change.
620 620
621 621 keyword arguments:
622 622
623 623 local: whether to store tags in non-version-controlled file
624 624 (default False)
625 625
626 626 message: commit message to use if committing
627 627
628 628 user: name of user to use if committing
629 629
630 630 date: date tuple to use if committing'''
631 631
632 632 if not local:
633 633 m = matchmod.exact(self.root, '', ['.hgtags'])
634 634 if any(self.status(match=m, unknown=True, ignored=True)):
635 635 raise util.Abort(_('working copy of .hgtags is changed'),
636 636 hint=_('please commit .hgtags manually'))
637 637
638 638 self.tags() # instantiate the cache
639 639 self._tag(names, node, message, local, user, date, editor=editor)
640 640
641 641 @filteredpropertycache
642 642 def _tagscache(self):
643 643 '''Returns a tagscache object that contains various tags related
644 644 caches.'''
645 645
646 646 # This simplifies its cache management by having one decorated
647 647 # function (this one) and the rest simply fetch things from it.
648 648 class tagscache(object):
649 649 def __init__(self):
650 650 # These two define the set of tags for this repository. tags
651 651 # maps tag name to node; tagtypes maps tag name to 'global' or
652 652 # 'local'. (Global tags are defined by .hgtags across all
653 653 # heads, and local tags are defined in .hg/localtags.)
654 654 # They constitute the in-memory cache of tags.
655 655 self.tags = self.tagtypes = None
656 656
657 657 self.nodetagscache = self.tagslist = None
658 658
659 659 cache = tagscache()
660 660 cache.tags, cache.tagtypes = self._findtags()
661 661
662 662 return cache
663 663
664 664 def tags(self):
665 665 '''return a mapping of tag to node'''
666 666 t = {}
667 667 if self.changelog.filteredrevs:
668 668 tags, tt = self._findtags()
669 669 else:
670 670 tags = self._tagscache.tags
671 671 for k, v in tags.iteritems():
672 672 try:
673 673 # ignore tags to unknown nodes
674 674 self.changelog.rev(v)
675 675 t[k] = v
676 676 except (error.LookupError, ValueError):
677 677 pass
678 678 return t
679 679
680 680 def _findtags(self):
681 681 '''Do the hard work of finding tags. Return a pair of dicts
682 682 (tags, tagtypes) where tags maps tag name to node, and tagtypes
683 683 maps tag name to a string like \'global\' or \'local\'.
684 684 Subclasses or extensions are free to add their own tags, but
685 685 should be aware that the returned dicts will be retained for the
686 686 duration of the localrepo object.'''
687 687
688 688 # XXX what tagtype should subclasses/extensions use? Currently
689 689 # mq and bookmarks add tags, but do not set the tagtype at all.
690 690 # Should each extension invent its own tag type? Should there
691 691 # be one tagtype for all such "virtual" tags? Or is the status
692 692 # quo fine?
693 693
694 694 alltags = {} # map tag name to (node, hist)
695 695 tagtypes = {}
696 696
697 697 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
698 698 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
699 699
700 700 # Build the return dicts. Have to re-encode tag names because
701 701 # the tags module always uses UTF-8 (in order not to lose info
702 702 # writing to the cache), but the rest of Mercurial wants them in
703 703 # local encoding.
704 704 tags = {}
705 705 for (name, (node, hist)) in alltags.iteritems():
706 706 if node != nullid:
707 707 tags[encoding.tolocal(name)] = node
708 708 tags['tip'] = self.changelog.tip()
709 709 tagtypes = dict([(encoding.tolocal(name), value)
710 710 for (name, value) in tagtypes.iteritems()])
711 711 return (tags, tagtypes)
712 712
713 713 def tagtype(self, tagname):
714 714 '''
715 715 return the type of the given tag. result can be:
716 716
717 717 'local' : a local tag
718 718 'global' : a global tag
719 719 None : tag does not exist
720 720 '''
721 721
722 722 return self._tagscache.tagtypes.get(tagname)
723 723
724 724 def tagslist(self):
725 725 '''return a list of tags ordered by revision'''
726 726 if not self._tagscache.tagslist:
727 727 l = []
728 728 for t, n in self.tags().iteritems():
729 729 l.append((self.changelog.rev(n), t, n))
730 730 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
731 731
732 732 return self._tagscache.tagslist
733 733
734 734 def nodetags(self, node):
735 735 '''return the tags associated with a node'''
736 736 if not self._tagscache.nodetagscache:
737 737 nodetagscache = {}
738 738 for t, n in self._tagscache.tags.iteritems():
739 739 nodetagscache.setdefault(n, []).append(t)
740 740 for tags in nodetagscache.itervalues():
741 741 tags.sort()
742 742 self._tagscache.nodetagscache = nodetagscache
743 743 return self._tagscache.nodetagscache.get(node, [])
744 744
745 745 def nodebookmarks(self, node):
746 746 marks = []
747 747 for bookmark, n in self._bookmarks.iteritems():
748 748 if n == node:
749 749 marks.append(bookmark)
750 750 return sorted(marks)
751 751
752 752 def branchmap(self):
753 753 '''returns a dictionary {branch: [branchheads]} with branchheads
754 754 ordered by increasing revision number'''
755 755 branchmap.updatecache(self)
756 756 return self._branchcaches[self.filtername]
757 757
758 758 @unfilteredmethod
759 759 def revbranchcache(self):
760 760 if not self._revbranchcache:
761 761 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
762 762 return self._revbranchcache
763 763
764 764 def branchtip(self, branch, ignoremissing=False):
765 765 '''return the tip node for a given branch
766 766
767 767 If ignoremissing is True, then this method will not raise an error.
768 768 This is helpful for callers that only expect None for a missing branch
769 769 (e.g. namespace).
770 770
771 771 '''
772 772 try:
773 773 return self.branchmap().branchtip(branch)
774 774 except KeyError:
775 775 if not ignoremissing:
776 776 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
777 777 else:
778 778 pass
779 779
780 780 def lookup(self, key):
781 781 return self[key].node()
782 782
783 783 def lookupbranch(self, key, remote=None):
784 784 repo = remote or self
785 785 if key in repo.branchmap():
786 786 return key
787 787
788 788 repo = (remote and remote.local()) and remote or self
789 789 return repo[key].branch()
790 790
791 791 def known(self, nodes):
792 792 nm = self.changelog.nodemap
793 793 pc = self._phasecache
794 794 result = []
795 795 for n in nodes:
796 796 r = nm.get(n)
797 797 resp = not (r is None or pc.phase(self, r) >= phases.secret)
798 798 result.append(resp)
799 799 return result
800 800
801 801 def local(self):
802 802 return self
803 803
804 804 def publishing(self):
805 805 # it's safe (and desirable) to trust the publish flag unconditionally
806 806 # so that we don't finalize changes shared between users via ssh or nfs
807 807 return self.ui.configbool('phases', 'publish', True, untrusted=True)
808 808
809 809 def cancopy(self):
810 810 # so statichttprepo's override of local() works
811 811 if not self.local():
812 812 return False
813 813 if not self.publishing():
814 814 return True
815 815 # if publishing we can't copy if there is filtered content
816 816 return not self.filtered('visible').changelog.filteredrevs
817 817
818 818 def shared(self):
819 819 '''the type of shared repository (None if not shared)'''
820 820 if self.sharedpath != self.path:
821 821 return 'store'
822 822 return None
823 823
824 824 def join(self, f, *insidef):
825 825 return self.vfs.join(os.path.join(f, *insidef))
826 826
827 827 def wjoin(self, f, *insidef):
828 828 return self.vfs.reljoin(self.root, f, *insidef)
829 829
830 830 def file(self, f):
831 831 if f[0] == '/':
832 832 f = f[1:]
833 833 return filelog.filelog(self.svfs, f)
834 834
835 835 def changectx(self, changeid):
836 836 return self[changeid]
837 837
838 838 def parents(self, changeid=None):
839 839 '''get list of changectxs for parents of changeid'''
840 840 return self[changeid].parents()
841 841
842 842 def setparents(self, p1, p2=nullid):
843 843 self.dirstate.beginparentchange()
844 844 copies = self.dirstate.setparents(p1, p2)
845 845 pctx = self[p1]
846 846 if copies:
847 847 # Adjust copy records, the dirstate cannot do it, it
848 848 # requires access to parents manifests. Preserve them
849 849 # only for entries added to first parent.
850 850 for f in copies:
851 851 if f not in pctx and copies[f] in pctx:
852 852 self.dirstate.copy(copies[f], f)
853 853 if p2 == nullid:
854 854 for f, s in sorted(self.dirstate.copies().items()):
855 855 if f not in pctx and s not in pctx:
856 856 self.dirstate.copy(None, f)
857 857 self.dirstate.endparentchange()
858 858
859 859 def filectx(self, path, changeid=None, fileid=None):
860 860 """changeid can be a changeset revision, node, or tag.
861 861 fileid can be a file revision or node."""
862 862 return context.filectx(self, path, changeid, fileid)
863 863
864 864 def getcwd(self):
865 865 return self.dirstate.getcwd()
866 866
867 867 def pathto(self, f, cwd=None):
868 868 return self.dirstate.pathto(f, cwd)
869 869
870 870 def wfile(self, f, mode='r'):
871 871 return self.wvfs(f, mode)
872 872
873 873 def _link(self, f):
874 874 return self.wvfs.islink(f)
875 875
876 876 def _loadfilter(self, filter):
877 877 if filter not in self.filterpats:
878 878 l = []
879 879 for pat, cmd in self.ui.configitems(filter):
880 880 if cmd == '!':
881 881 continue
882 882 mf = matchmod.match(self.root, '', [pat])
883 883 fn = None
884 884 params = cmd
885 885 for name, filterfn in self._datafilters.iteritems():
886 886 if cmd.startswith(name):
887 887 fn = filterfn
888 888 params = cmd[len(name):].lstrip()
889 889 break
890 890 if not fn:
891 891 fn = lambda s, c, **kwargs: util.filter(s, c)
892 892 # Wrap old filters not supporting keyword arguments
893 893 if not inspect.getargspec(fn)[2]:
894 894 oldfn = fn
895 895 fn = lambda s, c, **kwargs: oldfn(s, c)
896 896 l.append((mf, fn, params))
897 897 self.filterpats[filter] = l
898 898 return self.filterpats[filter]
899 899
900 900 def _filter(self, filterpats, filename, data):
901 901 for mf, fn, cmd in filterpats:
902 902 if mf(filename):
903 903 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 904 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 905 break
906 906
907 907 return data
908 908
909 909 @unfilteredpropertycache
910 910 def _encodefilterpats(self):
911 911 return self._loadfilter('encode')
912 912
913 913 @unfilteredpropertycache
914 914 def _decodefilterpats(self):
915 915 return self._loadfilter('decode')
916 916
917 917 def adddatafilter(self, name, filter):
918 918 self._datafilters[name] = filter
919 919
920 920 def wread(self, filename):
921 921 if self._link(filename):
922 922 data = self.wvfs.readlink(filename)
923 923 else:
924 924 data = self.wvfs.read(filename)
925 925 return self._filter(self._encodefilterpats, filename, data)
926 926
927 927 def wwrite(self, filename, data, flags):
928 928 """write ``data`` into ``filename`` in the working directory
929 929
930 930 This returns length of written (maybe decoded) data.
931 931 """
932 932 data = self._filter(self._decodefilterpats, filename, data)
933 933 if 'l' in flags:
934 934 self.wvfs.symlink(data, filename)
935 935 else:
936 936 self.wvfs.write(filename, data)
937 937 if 'x' in flags:
938 938 self.wvfs.setflags(filename, False, True)
939 939 return len(data)
940 940
941 941 def wwritedata(self, filename, data):
942 942 return self._filter(self._decodefilterpats, filename, data)
943 943
944 944 def currenttransaction(self):
945 945 """return the current transaction or None if non exists"""
946 946 if self._transref:
947 947 tr = self._transref()
948 948 else:
949 949 tr = None
950 950
951 951 if tr and tr.running():
952 952 return tr
953 953 return None
954 954
955 955 def transaction(self, desc, report=None):
956 956 if (self.ui.configbool('devel', 'all-warnings')
957 957 or self.ui.configbool('devel', 'check-locks')):
958 958 l = self._lockref and self._lockref()
959 959 if l is None or not l.held:
960 960 self.ui.develwarn('transaction with no lock')
961 961 tr = self.currenttransaction()
962 962 if tr is not None:
963 963 return tr.nest()
964 964
965 965 # abort here if the journal already exists
966 966 if self.svfs.exists("journal"):
967 967 raise error.RepoError(
968 968 _("abandoned transaction found"),
969 969 hint=_("run 'hg recover' to clean up transaction"))
970 970
971 971 idbase = "%.40f#%f" % (random.random(), time.time())
972 972 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
973 973 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
974 974
975 975 self._writejournal(desc)
976 976 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
977 977 if report:
978 978 rp = report
979 979 else:
980 980 rp = self.ui.warn
981 981 vfsmap = {'plain': self.vfs} # root of .hg/
982 982 # we must avoid cyclic reference between repo and transaction.
983 983 reporef = weakref.ref(self)
984 984 def validate(tr):
985 985 """will run pre-closing hooks"""
986 986 pending = lambda: tr.writepending() and self.root or ""
987 987 reporef().hook('pretxnclose', throw=True, pending=pending,
988 988 txnname=desc, **tr.hookargs)
989 989
990 990 tr = transaction.transaction(rp, self.svfs, vfsmap,
991 991 "journal",
992 992 "undo",
993 993 aftertrans(renames),
994 994 self.store.createmode,
995 995 validator=validate)
996 996
997 997 tr.hookargs['txnid'] = txnid
998 998 # note: writing the fncache only during finalize mean that the file is
999 999 # outdated when running hooks. As fncache is used for streaming clone,
1000 1000 # this is not expected to break anything that happen during the hooks.
1001 1001 tr.addfinalize('flush-fncache', self.store.write)
1002 1002 def txnclosehook(tr2):
1003 1003 """To be run if transaction is successful, will schedule a hook run
1004 1004 """
1005 1005 def hook():
1006 1006 reporef().hook('txnclose', throw=False, txnname=desc,
1007 1007 **tr2.hookargs)
1008 1008 reporef()._afterlock(hook)
1009 1009 tr.addfinalize('txnclose-hook', txnclosehook)
1010 1010 def txnaborthook(tr2):
1011 1011 """To be run if transaction is aborted
1012 1012 """
1013 1013 reporef().hook('txnabort', throw=False, txnname=desc,
1014 1014 **tr2.hookargs)
1015 1015 tr.addabort('txnabort-hook', txnaborthook)
1016 1016 self._transref = weakref.ref(tr)
1017 1017 return tr
1018 1018
1019 1019 def _journalfiles(self):
1020 1020 return ((self.svfs, 'journal'),
1021 1021 (self.vfs, 'journal.dirstate'),
1022 1022 (self.vfs, 'journal.branch'),
1023 1023 (self.vfs, 'journal.desc'),
1024 1024 (self.vfs, 'journal.bookmarks'),
1025 1025 (self.svfs, 'journal.phaseroots'))
1026 1026
1027 1027 def undofiles(self):
1028 1028 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1029 1029
1030 1030 def _writejournal(self, desc):
1031 1031 self.vfs.write("journal.dirstate",
1032 1032 self.vfs.tryread("dirstate"))
1033 1033 self.vfs.write("journal.branch",
1034 1034 encoding.fromlocal(self.dirstate.branch()))
1035 1035 self.vfs.write("journal.desc",
1036 1036 "%d\n%s\n" % (len(self), desc))
1037 1037 self.vfs.write("journal.bookmarks",
1038 1038 self.vfs.tryread("bookmarks"))
1039 1039 self.svfs.write("journal.phaseroots",
1040 1040 self.svfs.tryread("phaseroots"))
1041 1041
1042 1042 def recover(self):
1043 1043 lock = self.lock()
1044 1044 try:
1045 1045 if self.svfs.exists("journal"):
1046 1046 self.ui.status(_("rolling back interrupted transaction\n"))
1047 1047 vfsmap = {'': self.svfs,
1048 1048 'plain': self.vfs,}
1049 1049 transaction.rollback(self.svfs, vfsmap, "journal",
1050 1050 self.ui.warn)
1051 1051 self.invalidate()
1052 1052 return True
1053 1053 else:
1054 1054 self.ui.warn(_("no interrupted transaction available\n"))
1055 1055 return False
1056 1056 finally:
1057 1057 lock.release()
1058 1058
1059 1059 def rollback(self, dryrun=False, force=False):
1060 1060 wlock = lock = None
1061 1061 try:
1062 1062 wlock = self.wlock()
1063 1063 lock = self.lock()
1064 1064 if self.svfs.exists("undo"):
1065 1065 return self._rollback(dryrun, force)
1066 1066 else:
1067 1067 self.ui.warn(_("no rollback information available\n"))
1068 1068 return 1
1069 1069 finally:
1070 1070 release(lock, wlock)
1071 1071
1072 1072 @unfilteredmethod # Until we get smarter cache management
1073 1073 def _rollback(self, dryrun, force):
1074 1074 ui = self.ui
1075 1075 try:
1076 1076 args = self.vfs.read('undo.desc').splitlines()
1077 1077 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1078 1078 if len(args) >= 3:
1079 1079 detail = args[2]
1080 1080 oldtip = oldlen - 1
1081 1081
1082 1082 if detail and ui.verbose:
1083 1083 msg = (_('repository tip rolled back to revision %s'
1084 1084 ' (undo %s: %s)\n')
1085 1085 % (oldtip, desc, detail))
1086 1086 else:
1087 1087 msg = (_('repository tip rolled back to revision %s'
1088 1088 ' (undo %s)\n')
1089 1089 % (oldtip, desc))
1090 1090 except IOError:
1091 1091 msg = _('rolling back unknown transaction\n')
1092 1092 desc = None
1093 1093
1094 1094 if not force and self['.'] != self['tip'] and desc == 'commit':
1095 1095 raise util.Abort(
1096 1096 _('rollback of last commit while not checked out '
1097 1097 'may lose data'), hint=_('use -f to force'))
1098 1098
1099 1099 ui.status(msg)
1100 1100 if dryrun:
1101 1101 return 0
1102 1102
1103 1103 parents = self.dirstate.parents()
1104 1104 self.destroying()
1105 1105 vfsmap = {'plain': self.vfs, '': self.svfs}
1106 1106 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1107 1107 if self.vfs.exists('undo.bookmarks'):
1108 1108 self.vfs.rename('undo.bookmarks', 'bookmarks')
1109 1109 if self.svfs.exists('undo.phaseroots'):
1110 1110 self.svfs.rename('undo.phaseroots', 'phaseroots')
1111 1111 self.invalidate()
1112 1112
1113 1113 parentgone = (parents[0] not in self.changelog.nodemap or
1114 1114 parents[1] not in self.changelog.nodemap)
1115 1115 if parentgone:
1116 1116 self.vfs.rename('undo.dirstate', 'dirstate')
1117 1117 try:
1118 1118 branch = self.vfs.read('undo.branch')
1119 1119 self.dirstate.setbranch(encoding.tolocal(branch))
1120 1120 except IOError:
1121 1121 ui.warn(_('named branch could not be reset: '
1122 1122 'current branch is still \'%s\'\n')
1123 1123 % self.dirstate.branch())
1124 1124
1125 1125 self.dirstate.invalidate()
1126 1126 parents = tuple([p.rev() for p in self.parents()])
1127 1127 if len(parents) > 1:
1128 1128 ui.status(_('working directory now based on '
1129 1129 'revisions %d and %d\n') % parents)
1130 1130 else:
1131 1131 ui.status(_('working directory now based on '
1132 1132 'revision %d\n') % parents)
1133 1133 ms = mergemod.mergestate(self)
1134 1134 ms.reset(self['.'].node())
1135 1135
1136 1136 # TODO: if we know which new heads may result from this rollback, pass
1137 1137 # them to destroy(), which will prevent the branchhead cache from being
1138 1138 # invalidated.
1139 1139 self.destroyed()
1140 1140 return 0
1141 1141
1142 1142 def invalidatecaches(self):
1143 1143
1144 1144 if '_tagscache' in vars(self):
1145 1145 # can't use delattr on proxy
1146 1146 del self.__dict__['_tagscache']
1147 1147
1148 1148 self.unfiltered()._branchcaches.clear()
1149 1149 self.invalidatevolatilesets()
1150 1150
1151 1151 def invalidatevolatilesets(self):
1152 1152 self.filteredrevcache.clear()
1153 1153 obsolete.clearobscaches(self)
1154 1154
1155 1155 def invalidatedirstate(self):
1156 1156 '''Invalidates the dirstate, causing the next call to dirstate
1157 1157 to check if it was modified since the last time it was read,
1158 1158 rereading it if it has.
1159 1159
1160 1160 This is different to dirstate.invalidate() that it doesn't always
1161 1161 rereads the dirstate. Use dirstate.invalidate() if you want to
1162 1162 explicitly read the dirstate again (i.e. restoring it to a previous
1163 1163 known good state).'''
1164 1164 if hasunfilteredcache(self, 'dirstate'):
1165 1165 for k in self.dirstate._filecache:
1166 1166 try:
1167 1167 delattr(self.dirstate, k)
1168 1168 except AttributeError:
1169 1169 pass
1170 1170 delattr(self.unfiltered(), 'dirstate')
1171 1171
1172 1172 def invalidate(self):
1173 1173 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1174 1174 for k in self._filecache:
1175 1175 # dirstate is invalidated separately in invalidatedirstate()
1176 1176 if k == 'dirstate':
1177 1177 continue
1178 1178
1179 1179 try:
1180 1180 delattr(unfiltered, k)
1181 1181 except AttributeError:
1182 1182 pass
1183 1183 self.invalidatecaches()
1184 1184 self.store.invalidatecaches()
1185 1185
1186 1186 def invalidateall(self):
1187 1187 '''Fully invalidates both store and non-store parts, causing the
1188 1188 subsequent operation to reread any outside changes.'''
1189 1189 # extension should hook this to invalidate its caches
1190 1190 self.invalidate()
1191 1191 self.invalidatedirstate()
1192 1192
1193 1193 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1194 1194 try:
1195 1195 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1196 1196 except error.LockHeld as inst:
1197 1197 if not wait:
1198 1198 raise
1199 1199 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1200 1200 (desc, inst.locker))
1201 1201 # default to 600 seconds timeout
1202 1202 l = lockmod.lock(vfs, lockname,
1203 1203 int(self.ui.config("ui", "timeout", "600")),
1204 1204 releasefn, desc=desc)
1205 1205 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1206 1206 if acquirefn:
1207 1207 acquirefn()
1208 1208 return l
1209 1209
1210 1210 def _afterlock(self, callback):
1211 1211 """add a callback to be run when the repository is fully unlocked
1212 1212
1213 1213 The callback will be executed when the outermost lock is released
1214 1214 (with wlock being higher level than 'lock')."""
1215 1215 for ref in (self._wlockref, self._lockref):
1216 1216 l = ref and ref()
1217 1217 if l and l.held:
1218 1218 l.postrelease.append(callback)
1219 1219 break
1220 1220 else: # no lock have been found.
1221 1221 callback()
1222 1222
1223 1223 def lock(self, wait=True):
1224 1224 '''Lock the repository store (.hg/store) and return a weak reference
1225 1225 to the lock. Use this before modifying the store (e.g. committing or
1226 1226 stripping). If you are opening a transaction, get a lock as well.)
1227 1227
1228 1228 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1229 1229 'wlock' first to avoid a dead-lock hazard.'''
1230 1230 l = self._lockref and self._lockref()
1231 1231 if l is not None and l.held:
1232 1232 l.lock()
1233 1233 return l
1234 1234
1235 1235 def unlock():
1236 1236 for k, ce in self._filecache.items():
1237 1237 if k == 'dirstate' or k not in self.__dict__:
1238 1238 continue
1239 1239 ce.refresh()
1240 1240
1241 1241 l = self._lock(self.svfs, "lock", wait, unlock,
1242 1242 self.invalidate, _('repository %s') % self.origroot)
1243 1243 self._lockref = weakref.ref(l)
1244 1244 return l
1245 1245
1246 1246 def wlock(self, wait=True):
1247 1247 '''Lock the non-store parts of the repository (everything under
1248 1248 .hg except .hg/store) and return a weak reference to the lock.
1249 1249
1250 1250 Use this before modifying files in .hg.
1251 1251
1252 1252 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1253 1253 'wlock' first to avoid a dead-lock hazard.'''
1254 1254 l = self._wlockref and self._wlockref()
1255 1255 if l is not None and l.held:
1256 1256 l.lock()
1257 1257 return l
1258 1258
1259 1259 # We do not need to check for non-waiting lock aquisition. Such
1260 1260 # acquisition would not cause dead-lock as they would just fail.
1261 1261 if wait and (self.ui.configbool('devel', 'all-warnings')
1262 1262 or self.ui.configbool('devel', 'check-locks')):
1263 1263 l = self._lockref and self._lockref()
1264 1264 if l is not None and l.held:
1265 1265 self.ui.develwarn('"wlock" acquired after "lock"')
1266 1266
1267 1267 def unlock():
1268 1268 if self.dirstate.pendingparentchange():
1269 1269 self.dirstate.invalidate()
1270 1270 else:
1271 1271 self.dirstate.write()
1272 1272
1273 1273 self._filecache['dirstate'].refresh()
1274 1274
1275 1275 l = self._lock(self.vfs, "wlock", wait, unlock,
1276 1276 self.invalidatedirstate, _('working directory of %s') %
1277 1277 self.origroot)
1278 1278 self._wlockref = weakref.ref(l)
1279 1279 return l
1280 1280
1281 1281 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1282 1282 """
1283 1283 commit an individual file as part of a larger transaction
1284 1284 """
1285 1285
1286 1286 fname = fctx.path()
1287 1287 fparent1 = manifest1.get(fname, nullid)
1288 1288 fparent2 = manifest2.get(fname, nullid)
1289 1289 if isinstance(fctx, context.filectx):
1290 1290 node = fctx.filenode()
1291 1291 if node in [fparent1, fparent2]:
1292 1292 self.ui.debug('reusing %s filelog entry\n' % fname)
1293 1293 return node
1294 1294
1295 1295 flog = self.file(fname)
1296 1296 meta = {}
1297 1297 copy = fctx.renamed()
1298 1298 if copy and copy[0] != fname:
1299 1299 # Mark the new revision of this file as a copy of another
1300 1300 # file. This copy data will effectively act as a parent
1301 1301 # of this new revision. If this is a merge, the first
1302 1302 # parent will be the nullid (meaning "look up the copy data")
1303 1303 # and the second one will be the other parent. For example:
1304 1304 #
1305 1305 # 0 --- 1 --- 3 rev1 changes file foo
1306 1306 # \ / rev2 renames foo to bar and changes it
1307 1307 # \- 2 -/ rev3 should have bar with all changes and
1308 1308 # should record that bar descends from
1309 1309 # bar in rev2 and foo in rev1
1310 1310 #
1311 1311 # this allows this merge to succeed:
1312 1312 #
1313 1313 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1314 1314 # \ / merging rev3 and rev4 should use bar@rev2
1315 1315 # \- 2 --- 4 as the merge base
1316 1316 #
1317 1317
1318 1318 cfname = copy[0]
1319 1319 crev = manifest1.get(cfname)
1320 1320 newfparent = fparent2
1321 1321
1322 1322 if manifest2: # branch merge
1323 1323 if fparent2 == nullid or crev is None: # copied on remote side
1324 1324 if cfname in manifest2:
1325 1325 crev = manifest2[cfname]
1326 1326 newfparent = fparent1
1327 1327
1328 1328 # Here, we used to search backwards through history to try to find
1329 1329 # where the file copy came from if the source of a copy was not in
1330 1330 # the parent directory. However, this doesn't actually make sense to
1331 1331 # do (what does a copy from something not in your working copy even
1332 1332 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1333 1333 # the user that copy information was dropped, so if they didn't
1334 1334 # expect this outcome it can be fixed, but this is the correct
1335 1335 # behavior in this circumstance.
1336 1336
1337 1337 if crev:
1338 1338 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1339 1339 meta["copy"] = cfname
1340 1340 meta["copyrev"] = hex(crev)
1341 1341 fparent1, fparent2 = nullid, newfparent
1342 1342 else:
1343 1343 self.ui.warn(_("warning: can't find ancestor for '%s' "
1344 1344 "copied from '%s'!\n") % (fname, cfname))
1345 1345
1346 1346 elif fparent1 == nullid:
1347 1347 fparent1, fparent2 = fparent2, nullid
1348 1348 elif fparent2 != nullid:
1349 1349 # is one parent an ancestor of the other?
1350 1350 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1351 1351 if fparent1 in fparentancestors:
1352 1352 fparent1, fparent2 = fparent2, nullid
1353 1353 elif fparent2 in fparentancestors:
1354 1354 fparent2 = nullid
1355 1355
1356 1356 # is the file changed?
1357 1357 text = fctx.data()
1358 1358 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1359 1359 changelist.append(fname)
1360 1360 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1361 1361 # are just the flags changed during merge?
1362 1362 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1363 1363 changelist.append(fname)
1364 1364
1365 1365 return fparent1
1366 1366
1367 1367 @unfilteredmethod
1368 1368 def commit(self, text="", user=None, date=None, match=None, force=False,
1369 1369 editor=False, extra={}):
1370 1370 """Add a new revision to current repository.
1371 1371
1372 1372 Revision information is gathered from the working directory,
1373 1373 match can be used to filter the committed files. If editor is
1374 1374 supplied, it is called to get a commit message.
1375 1375 """
1376 1376
1377 1377 def fail(f, msg):
1378 1378 raise util.Abort('%s: %s' % (f, msg))
1379 1379
1380 1380 if not match:
1381 1381 match = matchmod.always(self.root, '')
1382 1382
1383 1383 if not force:
1384 1384 vdirs = []
1385 1385 match.explicitdir = vdirs.append
1386 1386 match.bad = fail
1387 1387
1388 1388 wlock = self.wlock()
1389 1389 try:
1390 1390 wctx = self[None]
1391 1391 merge = len(wctx.parents()) > 1
1392 1392
1393 1393 if not force and merge and match.ispartial():
1394 1394 raise util.Abort(_('cannot partially commit a merge '
1395 1395 '(do not specify files or patterns)'))
1396 1396
1397 1397 status = self.status(match=match, clean=force)
1398 1398 if force:
1399 1399 status.modified.extend(status.clean) # mq may commit clean files
1400 1400
1401 1401 # check subrepos
1402 1402 subs = []
1403 1403 commitsubs = set()
1404 1404 newstate = wctx.substate.copy()
1405 1405 # only manage subrepos and .hgsubstate if .hgsub is present
1406 1406 if '.hgsub' in wctx:
1407 1407 # we'll decide whether to track this ourselves, thanks
1408 1408 for c in status.modified, status.added, status.removed:
1409 1409 if '.hgsubstate' in c:
1410 1410 c.remove('.hgsubstate')
1411 1411
1412 1412 # compare current state to last committed state
1413 1413 # build new substate based on last committed state
1414 1414 oldstate = wctx.p1().substate
1415 1415 for s in sorted(newstate.keys()):
1416 1416 if not match(s):
1417 1417 # ignore working copy, use old state if present
1418 1418 if s in oldstate:
1419 1419 newstate[s] = oldstate[s]
1420 1420 continue
1421 1421 if not force:
1422 1422 raise util.Abort(
1423 1423 _("commit with new subrepo %s excluded") % s)
1424 1424 dirtyreason = wctx.sub(s).dirtyreason(True)
1425 1425 if dirtyreason:
1426 1426 if not self.ui.configbool('ui', 'commitsubrepos'):
1427 1427 raise util.Abort(dirtyreason,
1428 1428 hint=_("use --subrepos for recursive commit"))
1429 1429 subs.append(s)
1430 1430 commitsubs.add(s)
1431 1431 else:
1432 1432 bs = wctx.sub(s).basestate()
1433 1433 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1434 1434 if oldstate.get(s, (None, None, None))[1] != bs:
1435 1435 subs.append(s)
1436 1436
1437 1437 # check for removed subrepos
1438 1438 for p in wctx.parents():
1439 1439 r = [s for s in p.substate if s not in newstate]
1440 1440 subs += [s for s in r if match(s)]
1441 1441 if subs:
1442 1442 if (not match('.hgsub') and
1443 1443 '.hgsub' in (wctx.modified() + wctx.added())):
1444 1444 raise util.Abort(
1445 1445 _("can't commit subrepos without .hgsub"))
1446 1446 status.modified.insert(0, '.hgsubstate')
1447 1447
1448 1448 elif '.hgsub' in status.removed:
1449 1449 # clean up .hgsubstate when .hgsub is removed
1450 1450 if ('.hgsubstate' in wctx and
1451 1451 '.hgsubstate' not in (status.modified + status.added +
1452 1452 status.removed)):
1453 1453 status.removed.insert(0, '.hgsubstate')
1454 1454
1455 1455 # make sure all explicit patterns are matched
1456 1456 if not force and (match.isexact() or match.prefix()):
1457 1457 matched = set(status.modified + status.added + status.removed)
1458 1458
1459 1459 for f in match.files():
1460 1460 f = self.dirstate.normalize(f)
1461 1461 if f == '.' or f in matched or f in wctx.substate:
1462 1462 continue
1463 1463 if f in status.deleted:
1464 1464 fail(f, _('file not found!'))
1465 1465 if f in vdirs: # visited directory
1466 1466 d = f + '/'
1467 1467 for mf in matched:
1468 1468 if mf.startswith(d):
1469 1469 break
1470 1470 else:
1471 1471 fail(f, _("no match under directory!"))
1472 1472 elif f not in self.dirstate:
1473 1473 fail(f, _("file not tracked!"))
1474 1474
1475 1475 cctx = context.workingcommitctx(self, status,
1476 1476 text, user, date, extra)
1477 1477
1478 # internal config: ui.allowemptycommit
1478 1479 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1479 1480 or extra.get('close') or merge or cctx.files()
1480 1481 or self.ui.configbool('ui', 'allowemptycommit'))
1481 1482 if not allowemptycommit:
1482 1483 return None
1483 1484
1484 1485 if merge and cctx.deleted():
1485 1486 raise util.Abort(_("cannot commit merge with missing files"))
1486 1487
1487 1488 ms = mergemod.mergestate(self)
1488 1489 for f in status.modified:
1489 1490 if f in ms and ms[f] == 'u':
1490 1491 raise util.Abort(_('unresolved merge conflicts '
1491 1492 '(see "hg help resolve")'))
1492 1493
1493 1494 if editor:
1494 1495 cctx._text = editor(self, cctx, subs)
1495 1496 edited = (text != cctx._text)
1496 1497
1497 1498 # Save commit message in case this transaction gets rolled back
1498 1499 # (e.g. by a pretxncommit hook). Leave the content alone on
1499 1500 # the assumption that the user will use the same editor again.
1500 1501 msgfn = self.savecommitmessage(cctx._text)
1501 1502
1502 1503 # commit subs and write new state
1503 1504 if subs:
1504 1505 for s in sorted(commitsubs):
1505 1506 sub = wctx.sub(s)
1506 1507 self.ui.status(_('committing subrepository %s\n') %
1507 1508 subrepo.subrelpath(sub))
1508 1509 sr = sub.commit(cctx._text, user, date)
1509 1510 newstate[s] = (newstate[s][0], sr)
1510 1511 subrepo.writestate(self, newstate)
1511 1512
1512 1513 p1, p2 = self.dirstate.parents()
1513 1514 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1514 1515 try:
1515 1516 self.hook("precommit", throw=True, parent1=hookp1,
1516 1517 parent2=hookp2)
1517 1518 ret = self.commitctx(cctx, True)
1518 1519 except: # re-raises
1519 1520 if edited:
1520 1521 self.ui.write(
1521 1522 _('note: commit message saved in %s\n') % msgfn)
1522 1523 raise
1523 1524
1524 1525 # update bookmarks, dirstate and mergestate
1525 1526 bookmarks.update(self, [p1, p2], ret)
1526 1527 cctx.markcommitted(ret)
1527 1528 ms.reset()
1528 1529 finally:
1529 1530 wlock.release()
1530 1531
1531 1532 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1532 1533 # hack for command that use a temporary commit (eg: histedit)
1533 1534 # temporary commit got stripped before hook release
1534 1535 if self.changelog.hasnode(ret):
1535 1536 self.hook("commit", node=node, parent1=parent1,
1536 1537 parent2=parent2)
1537 1538 self._afterlock(commithook)
1538 1539 return ret
1539 1540
1540 1541 @unfilteredmethod
1541 1542 def commitctx(self, ctx, error=False):
1542 1543 """Add a new revision to current repository.
1543 1544 Revision information is passed via the context argument.
1544 1545 """
1545 1546
1546 1547 tr = None
1547 1548 p1, p2 = ctx.p1(), ctx.p2()
1548 1549 user = ctx.user()
1549 1550
1550 1551 lock = self.lock()
1551 1552 try:
1552 1553 tr = self.transaction("commit")
1553 1554 trp = weakref.proxy(tr)
1554 1555
1555 1556 if ctx.files():
1556 1557 m1 = p1.manifest()
1557 1558 m2 = p2.manifest()
1558 1559 m = m1.copy()
1559 1560
1560 1561 # check in files
1561 1562 added = []
1562 1563 changed = []
1563 1564 removed = list(ctx.removed())
1564 1565 linkrev = len(self)
1565 1566 self.ui.note(_("committing files:\n"))
1566 1567 for f in sorted(ctx.modified() + ctx.added()):
1567 1568 self.ui.note(f + "\n")
1568 1569 try:
1569 1570 fctx = ctx[f]
1570 1571 if fctx is None:
1571 1572 removed.append(f)
1572 1573 else:
1573 1574 added.append(f)
1574 1575 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1575 1576 trp, changed)
1576 1577 m.setflag(f, fctx.flags())
1577 1578 except OSError as inst:
1578 1579 self.ui.warn(_("trouble committing %s!\n") % f)
1579 1580 raise
1580 1581 except IOError as inst:
1581 1582 errcode = getattr(inst, 'errno', errno.ENOENT)
1582 1583 if error or errcode and errcode != errno.ENOENT:
1583 1584 self.ui.warn(_("trouble committing %s!\n") % f)
1584 1585 raise
1585 1586
1586 1587 # update manifest
1587 1588 self.ui.note(_("committing manifest\n"))
1588 1589 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1589 1590 drop = [f for f in removed if f in m]
1590 1591 for f in drop:
1591 1592 del m[f]
1592 1593 mn = self.manifest.add(m, trp, linkrev,
1593 1594 p1.manifestnode(), p2.manifestnode(),
1594 1595 added, drop)
1595 1596 files = changed + removed
1596 1597 else:
1597 1598 mn = p1.manifestnode()
1598 1599 files = []
1599 1600
1600 1601 # update changelog
1601 1602 self.ui.note(_("committing changelog\n"))
1602 1603 self.changelog.delayupdate(tr)
1603 1604 n = self.changelog.add(mn, files, ctx.description(),
1604 1605 trp, p1.node(), p2.node(),
1605 1606 user, ctx.date(), ctx.extra().copy())
1606 1607 p = lambda: tr.writepending() and self.root or ""
1607 1608 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1608 1609 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1609 1610 parent2=xp2, pending=p)
1610 1611 # set the new commit is proper phase
1611 1612 targetphase = subrepo.newcommitphase(self.ui, ctx)
1612 1613 if targetphase:
1613 1614 # retract boundary do not alter parent changeset.
1614 1615 # if a parent have higher the resulting phase will
1615 1616 # be compliant anyway
1616 1617 #
1617 1618 # if minimal phase was 0 we don't need to retract anything
1618 1619 phases.retractboundary(self, tr, targetphase, [n])
1619 1620 tr.close()
1620 1621 branchmap.updatecache(self.filtered('served'))
1621 1622 return n
1622 1623 finally:
1623 1624 if tr:
1624 1625 tr.release()
1625 1626 lock.release()
1626 1627
1627 1628 @unfilteredmethod
1628 1629 def destroying(self):
1629 1630 '''Inform the repository that nodes are about to be destroyed.
1630 1631 Intended for use by strip and rollback, so there's a common
1631 1632 place for anything that has to be done before destroying history.
1632 1633
1633 1634 This is mostly useful for saving state that is in memory and waiting
1634 1635 to be flushed when the current lock is released. Because a call to
1635 1636 destroyed is imminent, the repo will be invalidated causing those
1636 1637 changes to stay in memory (waiting for the next unlock), or vanish
1637 1638 completely.
1638 1639 '''
1639 1640 # When using the same lock to commit and strip, the phasecache is left
1640 1641 # dirty after committing. Then when we strip, the repo is invalidated,
1641 1642 # causing those changes to disappear.
1642 1643 if '_phasecache' in vars(self):
1643 1644 self._phasecache.write()
1644 1645
1645 1646 @unfilteredmethod
1646 1647 def destroyed(self):
1647 1648 '''Inform the repository that nodes have been destroyed.
1648 1649 Intended for use by strip and rollback, so there's a common
1649 1650 place for anything that has to be done after destroying history.
1650 1651 '''
1651 1652 # When one tries to:
1652 1653 # 1) destroy nodes thus calling this method (e.g. strip)
1653 1654 # 2) use phasecache somewhere (e.g. commit)
1654 1655 #
1655 1656 # then 2) will fail because the phasecache contains nodes that were
1656 1657 # removed. We can either remove phasecache from the filecache,
1657 1658 # causing it to reload next time it is accessed, or simply filter
1658 1659 # the removed nodes now and write the updated cache.
1659 1660 self._phasecache.filterunknown(self)
1660 1661 self._phasecache.write()
1661 1662
1662 1663 # update the 'served' branch cache to help read only server process
1663 1664 # Thanks to branchcache collaboration this is done from the nearest
1664 1665 # filtered subset and it is expected to be fast.
1665 1666 branchmap.updatecache(self.filtered('served'))
1666 1667
1667 1668 # Ensure the persistent tag cache is updated. Doing it now
1668 1669 # means that the tag cache only has to worry about destroyed
1669 1670 # heads immediately after a strip/rollback. That in turn
1670 1671 # guarantees that "cachetip == currenttip" (comparing both rev
1671 1672 # and node) always means no nodes have been added or destroyed.
1672 1673
1673 1674 # XXX this is suboptimal when qrefresh'ing: we strip the current
1674 1675 # head, refresh the tag cache, then immediately add a new head.
1675 1676 # But I think doing it this way is necessary for the "instant
1676 1677 # tag cache retrieval" case to work.
1677 1678 self.invalidate()
1678 1679
1679 1680 def walk(self, match, node=None):
1680 1681 '''
1681 1682 walk recursively through the directory tree or a given
1682 1683 changeset, finding all files matched by the match
1683 1684 function
1684 1685 '''
1685 1686 return self[node].walk(match)
1686 1687
1687 1688 def status(self, node1='.', node2=None, match=None,
1688 1689 ignored=False, clean=False, unknown=False,
1689 1690 listsubrepos=False):
1690 1691 '''a convenience method that calls node1.status(node2)'''
1691 1692 return self[node1].status(node2, match, ignored, clean, unknown,
1692 1693 listsubrepos)
1693 1694
1694 1695 def heads(self, start=None):
1695 1696 heads = self.changelog.heads(start)
1696 1697 # sort the output in rev descending order
1697 1698 return sorted(heads, key=self.changelog.rev, reverse=True)
1698 1699
1699 1700 def branchheads(self, branch=None, start=None, closed=False):
1700 1701 '''return a (possibly filtered) list of heads for the given branch
1701 1702
1702 1703 Heads are returned in topological order, from newest to oldest.
1703 1704 If branch is None, use the dirstate branch.
1704 1705 If start is not None, return only heads reachable from start.
1705 1706 If closed is True, return heads that are marked as closed as well.
1706 1707 '''
1707 1708 if branch is None:
1708 1709 branch = self[None].branch()
1709 1710 branches = self.branchmap()
1710 1711 if branch not in branches:
1711 1712 return []
1712 1713 # the cache returns heads ordered lowest to highest
1713 1714 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1714 1715 if start is not None:
1715 1716 # filter out the heads that cannot be reached from startrev
1716 1717 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1717 1718 bheads = [h for h in bheads if h in fbheads]
1718 1719 return bheads
1719 1720
1720 1721 def branches(self, nodes):
1721 1722 if not nodes:
1722 1723 nodes = [self.changelog.tip()]
1723 1724 b = []
1724 1725 for n in nodes:
1725 1726 t = n
1726 1727 while True:
1727 1728 p = self.changelog.parents(n)
1728 1729 if p[1] != nullid or p[0] == nullid:
1729 1730 b.append((t, n, p[0], p[1]))
1730 1731 break
1731 1732 n = p[0]
1732 1733 return b
1733 1734
1734 1735 def between(self, pairs):
1735 1736 r = []
1736 1737
1737 1738 for top, bottom in pairs:
1738 1739 n, l, i = top, [], 0
1739 1740 f = 1
1740 1741
1741 1742 while n != bottom and n != nullid:
1742 1743 p = self.changelog.parents(n)[0]
1743 1744 if i == f:
1744 1745 l.append(n)
1745 1746 f = f * 2
1746 1747 n = p
1747 1748 i += 1
1748 1749
1749 1750 r.append(l)
1750 1751
1751 1752 return r
1752 1753
1753 1754 def checkpush(self, pushop):
1754 1755 """Extensions can override this function if additional checks have
1755 1756 to be performed before pushing, or call it if they override push
1756 1757 command.
1757 1758 """
1758 1759 pass
1759 1760
1760 1761 @unfilteredpropertycache
1761 1762 def prepushoutgoinghooks(self):
1762 1763 """Return util.hooks consists of "(repo, remote, outgoing)"
1763 1764 functions, which are called before pushing changesets.
1764 1765 """
1765 1766 return util.hooks()
1766 1767
1767 1768 def stream_in(self, remote, remotereqs):
1768 1769 # Save remote branchmap. We will use it later
1769 1770 # to speed up branchcache creation
1770 1771 rbranchmap = None
1771 1772 if remote.capable("branchmap"):
1772 1773 rbranchmap = remote.branchmap()
1773 1774
1774 1775 fp = remote.stream_out()
1775 1776 l = fp.readline()
1776 1777 try:
1777 1778 resp = int(l)
1778 1779 except ValueError:
1779 1780 raise error.ResponseError(
1780 1781 _('unexpected response from remote server:'), l)
1781 1782 if resp == 1:
1782 1783 raise util.Abort(_('operation forbidden by server'))
1783 1784 elif resp == 2:
1784 1785 raise util.Abort(_('locking the remote repository failed'))
1785 1786 elif resp != 0:
1786 1787 raise util.Abort(_('the server sent an unknown error code'))
1787 1788
1788 1789 self.applystreamclone(remotereqs, rbranchmap, fp)
1789 1790 return len(self.heads()) + 1
1790 1791
1791 1792 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1792 1793 """Apply stream clone data to this repository.
1793 1794
1794 1795 "remotereqs" is a set of requirements to handle the incoming data.
1795 1796 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1796 1797 can be None.
1797 1798 "fp" is a file object containing the raw stream data, suitable for
1798 1799 feeding into exchange.consumestreamclone.
1799 1800 """
1800 1801 lock = self.lock()
1801 1802 try:
1802 1803 exchange.consumestreamclone(self, fp)
1803 1804
1804 1805 # new requirements = old non-format requirements +
1805 1806 # new format-related remote requirements
1806 1807 # requirements from the streamed-in repository
1807 1808 self.requirements = remotereqs | (
1808 1809 self.requirements - self.supportedformats)
1809 1810 self._applyopenerreqs()
1810 1811 self._writerequirements()
1811 1812
1812 1813 if remotebranchmap:
1813 1814 rbheads = []
1814 1815 closed = []
1815 1816 for bheads in remotebranchmap.itervalues():
1816 1817 rbheads.extend(bheads)
1817 1818 for h in bheads:
1818 1819 r = self.changelog.rev(h)
1819 1820 b, c = self.changelog.branchinfo(r)
1820 1821 if c:
1821 1822 closed.append(h)
1822 1823
1823 1824 if rbheads:
1824 1825 rtiprev = max((int(self.changelog.rev(node))
1825 1826 for node in rbheads))
1826 1827 cache = branchmap.branchcache(remotebranchmap,
1827 1828 self[rtiprev].node(),
1828 1829 rtiprev,
1829 1830 closednodes=closed)
1830 1831 # Try to stick it as low as possible
1831 1832 # filter above served are unlikely to be fetch from a clone
1832 1833 for candidate in ('base', 'immutable', 'served'):
1833 1834 rview = self.filtered(candidate)
1834 1835 if cache.validfor(rview):
1835 1836 self._branchcaches[candidate] = cache
1836 1837 cache.write(rview)
1837 1838 break
1838 1839 self.invalidate()
1839 1840 finally:
1840 1841 lock.release()
1841 1842
1842 1843 def clone(self, remote, heads=[], stream=None):
1843 1844 '''clone remote repository.
1844 1845
1845 1846 keyword arguments:
1846 1847 heads: list of revs to clone (forces use of pull)
1847 1848 stream: use streaming clone if possible'''
1848 1849
1849 1850 # now, all clients that can request uncompressed clones can
1850 1851 # read repo formats supported by all servers that can serve
1851 1852 # them.
1852 1853
1853 1854 # if revlog format changes, client will have to check version
1854 1855 # and format flags on "stream" capability, and use
1855 1856 # uncompressed only if compatible.
1856 1857
1857 1858 if stream is None:
1858 1859 # if the server explicitly prefers to stream (for fast LANs)
1859 1860 stream = remote.capable('stream-preferred')
1860 1861
1861 1862 if stream and not heads:
1862 1863 # 'stream' means remote revlog format is revlogv1 only
1863 1864 if remote.capable('stream'):
1864 1865 self.stream_in(remote, set(('revlogv1',)))
1865 1866 else:
1866 1867 # otherwise, 'streamreqs' contains the remote revlog format
1867 1868 streamreqs = remote.capable('streamreqs')
1868 1869 if streamreqs:
1869 1870 streamreqs = set(streamreqs.split(','))
1870 1871 # if we support it, stream in and adjust our requirements
1871 1872 if not streamreqs - self.supportedformats:
1872 1873 self.stream_in(remote, streamreqs)
1873 1874
1874 1875 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1875 1876 try:
1876 1877 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1877 1878 ret = exchange.pull(self, remote, heads).cgresult
1878 1879 finally:
1879 1880 self.ui.restoreconfig(quiet)
1880 1881 return ret
1881 1882
1882 1883 def pushkey(self, namespace, key, old, new):
1883 1884 try:
1884 1885 tr = self.currenttransaction()
1885 1886 hookargs = {}
1886 1887 if tr is not None:
1887 1888 hookargs.update(tr.hookargs)
1888 1889 pending = lambda: tr.writepending() and self.root or ""
1889 1890 hookargs['pending'] = pending
1890 1891 hookargs['namespace'] = namespace
1891 1892 hookargs['key'] = key
1892 1893 hookargs['old'] = old
1893 1894 hookargs['new'] = new
1894 1895 self.hook('prepushkey', throw=True, **hookargs)
1895 1896 except error.HookAbort as exc:
1896 1897 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1897 1898 if exc.hint:
1898 1899 self.ui.write_err(_("(%s)\n") % exc.hint)
1899 1900 return False
1900 1901 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1901 1902 ret = pushkey.push(self, namespace, key, old, new)
1902 1903 def runhook():
1903 1904 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1904 1905 ret=ret)
1905 1906 self._afterlock(runhook)
1906 1907 return ret
1907 1908
1908 1909 def listkeys(self, namespace):
1909 1910 self.hook('prelistkeys', throw=True, namespace=namespace)
1910 1911 self.ui.debug('listing keys for "%s"\n' % namespace)
1911 1912 values = pushkey.list(self, namespace)
1912 1913 self.hook('listkeys', namespace=namespace, values=values)
1913 1914 return values
1914 1915
1915 1916 def debugwireargs(self, one, two, three=None, four=None, five=None):
1916 1917 '''used to test argument passing over the wire'''
1917 1918 return "%s %s %s %s %s" % (one, two, three, four, five)
1918 1919
1919 1920 def savecommitmessage(self, text):
1920 1921 fp = self.vfs('last-message.txt', 'wb')
1921 1922 try:
1922 1923 fp.write(text)
1923 1924 finally:
1924 1925 fp.close()
1925 1926 return self.pathto(fp.name[len(self.root) + 1:])
1926 1927
1927 1928 # used to avoid circular references so destructors work
1928 1929 def aftertrans(files):
1929 1930 renamefiles = [tuple(t) for t in files]
1930 1931 def a():
1931 1932 for vfs, src, dest in renamefiles:
1932 1933 try:
1933 1934 vfs.rename(src, dest)
1934 1935 except OSError: # journal file does not yet exist
1935 1936 pass
1936 1937 return a
1937 1938
1938 1939 def undoname(fn):
1939 1940 base, name = os.path.split(fn)
1940 1941 assert name.startswith('journal')
1941 1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1942 1943
1943 1944 def instance(ui, path, create):
1944 1945 return localrepository(ui, util.urllocalpath(path), create)
1945 1946
1946 1947 def islocal(path):
1947 1948 return True
General Comments 0
You need to be logged in to leave comments. Login now