##// END OF EJS Templates
localrepo: add ignoremissing parameter to branchtip...
Sean Farley -
r23775:885c0290 default
parent child Browse files
Show More
@@ -1,1843 +1,1852 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self.filterpats = {}
283 283 self._datafilters = {}
284 284 self._transref = self._lockref = self._wlockref = None
285 285
286 286 # A cache for various files under .hg/ that tracks file changes,
287 287 # (used by the filecache decorator)
288 288 #
289 289 # Maps a property name to its util.filecacheentry
290 290 self._filecache = {}
291 291
292 292 # hold sets of revision to be filtered
293 293 # should be cleared when something might have changed the filter value:
294 294 # - new changesets,
295 295 # - phase change,
296 296 # - new obsolescence marker,
297 297 # - working directory parent change,
298 298 # - bookmark changes
299 299 self.filteredrevcache = {}
300 300
301 301 # generic mapping between names and nodes
302 302 self.names = namespaces.namespaces()
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.sopener.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.sopener.options['chunkcachesize'] = chunkcachesize
323 323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 324 if maxchainlen is not None:
325 325 self.sopener.options['maxchainlen'] = maxchainlen
326 326
327 327 def _writerequirements(self):
328 328 reqfile = self.opener("requires", "w")
329 329 for r in sorted(self.requirements):
330 330 reqfile.write("%s\n" % r)
331 331 reqfile.close()
332 332
333 333 def _checknested(self, path):
334 334 """Determine if path is a legal nested repository."""
335 335 if not path.startswith(self.root):
336 336 return False
337 337 subpath = path[len(self.root) + 1:]
338 338 normsubpath = util.pconvert(subpath)
339 339
340 340 # XXX: Checking against the current working copy is wrong in
341 341 # the sense that it can reject things like
342 342 #
343 343 # $ hg cat -r 10 sub/x.txt
344 344 #
345 345 # if sub/ is no longer a subrepository in the working copy
346 346 # parent revision.
347 347 #
348 348 # However, it can of course also allow things that would have
349 349 # been rejected before, such as the above cat command if sub/
350 350 # is a subrepository now, but was a normal directory before.
351 351 # The old path auditor would have rejected by mistake since it
352 352 # panics when it sees sub/.hg/.
353 353 #
354 354 # All in all, checking against the working copy seems sensible
355 355 # since we want to prevent access to nested repositories on
356 356 # the filesystem *now*.
357 357 ctx = self[None]
358 358 parts = util.splitpath(subpath)
359 359 while parts:
360 360 prefix = '/'.join(parts)
361 361 if prefix in ctx.substate:
362 362 if prefix == normsubpath:
363 363 return True
364 364 else:
365 365 sub = ctx.sub(prefix)
366 366 return sub.checknested(subpath[len(prefix) + 1:])
367 367 else:
368 368 parts.pop()
369 369 return False
370 370
371 371 def peer(self):
372 372 return localpeer(self) # not cached to avoid reference cycle
373 373
374 374 def unfiltered(self):
375 375 """Return unfiltered version of the repository
376 376
377 377 Intended to be overwritten by filtered repo."""
378 378 return self
379 379
380 380 def filtered(self, name):
381 381 """Return a filtered version of a repository"""
382 382 # build a new class with the mixin and the current class
383 383 # (possibly subclass of the repo)
384 384 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 385 pass
386 386 return proxycls(self, name)
387 387
388 388 @repofilecache('bookmarks')
389 389 def _bookmarks(self):
390 390 return bookmarks.bmstore(self)
391 391
392 392 @repofilecache('bookmarks.current')
393 393 def _bookmarkcurrent(self):
394 394 return bookmarks.readcurrent(self)
395 395
396 396 def bookmarkheads(self, bookmark):
397 397 name = bookmark.split('@', 1)[0]
398 398 heads = []
399 399 for mark, n in self._bookmarks.iteritems():
400 400 if mark.split('@', 1)[0] == name:
401 401 heads.append(n)
402 402 return heads
403 403
404 404 @storecache('phaseroots')
405 405 def _phasecache(self):
406 406 return phases.phasecache(self, self._phasedefaults)
407 407
408 408 @storecache('obsstore')
409 409 def obsstore(self):
410 410 # read default format for new obsstore.
411 411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 412 # rely on obsstore class default when possible.
413 413 kwargs = {}
414 414 if defaultformat is not None:
415 415 kwargs['defaultformat'] = defaultformat
416 416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 417 store = obsolete.obsstore(self.sopener, readonly=readonly,
418 418 **kwargs)
419 419 if store and readonly:
420 420 # message is rare enough to not be translated
421 421 msg = 'obsolete feature not enabled but %i markers found!\n'
422 422 self.ui.warn(msg % len(list(store)))
423 423 return store
424 424
425 425 @storecache('00changelog.i')
426 426 def changelog(self):
427 427 c = changelog.changelog(self.sopener)
428 428 if 'HG_PENDING' in os.environ:
429 429 p = os.environ['HG_PENDING']
430 430 if p.startswith(self.root):
431 431 c.readpending('00changelog.i.a')
432 432 return c
433 433
434 434 @storecache('00manifest.i')
435 435 def manifest(self):
436 436 return manifest.manifest(self.sopener)
437 437
438 438 @repofilecache('dirstate')
439 439 def dirstate(self):
440 440 warned = [0]
441 441 def validate(node):
442 442 try:
443 443 self.changelog.rev(node)
444 444 return node
445 445 except error.LookupError:
446 446 if not warned[0]:
447 447 warned[0] = True
448 448 self.ui.warn(_("warning: ignoring unknown"
449 449 " working parent %s!\n") % short(node))
450 450 return nullid
451 451
452 452 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
453 453
454 454 def __getitem__(self, changeid):
455 455 if changeid is None:
456 456 return context.workingctx(self)
457 457 if isinstance(changeid, slice):
458 458 return [context.changectx(self, i)
459 459 for i in xrange(*changeid.indices(len(self)))
460 460 if i not in self.changelog.filteredrevs]
461 461 return context.changectx(self, changeid)
462 462
463 463 def __contains__(self, changeid):
464 464 try:
465 465 return bool(self.lookup(changeid))
466 466 except error.RepoLookupError:
467 467 return False
468 468
469 469 def __nonzero__(self):
470 470 return True
471 471
472 472 def __len__(self):
473 473 return len(self.changelog)
474 474
475 475 def __iter__(self):
476 476 return iter(self.changelog)
477 477
478 478 def revs(self, expr, *args):
479 479 '''Return a list of revisions matching the given revset'''
480 480 expr = revset.formatspec(expr, *args)
481 481 m = revset.match(None, expr)
482 482 return m(self, revset.spanset(self))
483 483
484 484 def set(self, expr, *args):
485 485 '''
486 486 Yield a context for each matching revision, after doing arg
487 487 replacement via revset.formatspec
488 488 '''
489 489 for r in self.revs(expr, *args):
490 490 yield self[r]
491 491
492 492 def url(self):
493 493 return 'file:' + self.root
494 494
495 495 def hook(self, name, throw=False, **args):
496 496 """Call a hook, passing this repo instance.
497 497
498 498 This a convenience method to aid invoking hooks. Extensions likely
499 499 won't call this unless they have registered a custom hook or are
500 500 replacing code that is expected to call a hook.
501 501 """
502 502 return hook.hook(self.ui, self, name, throw, **args)
503 503
504 504 @unfilteredmethod
505 505 def _tag(self, names, node, message, local, user, date, extra={},
506 506 editor=False):
507 507 if isinstance(names, str):
508 508 names = (names,)
509 509
510 510 branches = self.branchmap()
511 511 for name in names:
512 512 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 513 local=local)
514 514 if name in branches:
515 515 self.ui.warn(_("warning: tag %s conflicts with existing"
516 516 " branch name\n") % name)
517 517
518 518 def writetags(fp, names, munge, prevtags):
519 519 fp.seek(0, 2)
520 520 if prevtags and prevtags[-1] != '\n':
521 521 fp.write('\n')
522 522 for name in names:
523 523 m = munge and munge(name) or name
524 524 if (self._tagscache.tagtypes and
525 525 name in self._tagscache.tagtypes):
526 526 old = self.tags().get(name, nullid)
527 527 fp.write('%s %s\n' % (hex(old), m))
528 528 fp.write('%s %s\n' % (hex(node), m))
529 529 fp.close()
530 530
531 531 prevtags = ''
532 532 if local:
533 533 try:
534 534 fp = self.opener('localtags', 'r+')
535 535 except IOError:
536 536 fp = self.opener('localtags', 'a')
537 537 else:
538 538 prevtags = fp.read()
539 539
540 540 # local tags are stored in the current charset
541 541 writetags(fp, names, None, prevtags)
542 542 for name in names:
543 543 self.hook('tag', node=hex(node), tag=name, local=local)
544 544 return
545 545
546 546 try:
547 547 fp = self.wfile('.hgtags', 'rb+')
548 548 except IOError, e:
549 549 if e.errno != errno.ENOENT:
550 550 raise
551 551 fp = self.wfile('.hgtags', 'ab')
552 552 else:
553 553 prevtags = fp.read()
554 554
555 555 # committed tags are stored in UTF-8
556 556 writetags(fp, names, encoding.fromlocal, prevtags)
557 557
558 558 fp.close()
559 559
560 560 self.invalidatecaches()
561 561
562 562 if '.hgtags' not in self.dirstate:
563 563 self[None].add(['.hgtags'])
564 564
565 565 m = matchmod.exact(self.root, '', ['.hgtags'])
566 566 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 567 editor=editor)
568 568
569 569 for name in names:
570 570 self.hook('tag', node=hex(node), tag=name, local=local)
571 571
572 572 return tagnode
573 573
574 574 def tag(self, names, node, message, local, user, date, editor=False):
575 575 '''tag a revision with one or more symbolic names.
576 576
577 577 names is a list of strings or, when adding a single tag, names may be a
578 578 string.
579 579
580 580 if local is True, the tags are stored in a per-repository file.
581 581 otherwise, they are stored in the .hgtags file, and a new
582 582 changeset is committed with the change.
583 583
584 584 keyword arguments:
585 585
586 586 local: whether to store tags in non-version-controlled file
587 587 (default False)
588 588
589 589 message: commit message to use if committing
590 590
591 591 user: name of user to use if committing
592 592
593 593 date: date tuple to use if committing'''
594 594
595 595 if not local:
596 596 m = matchmod.exact(self.root, '', ['.hgtags'])
597 597 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 598 raise util.Abort(_('working copy of .hgtags is changed'),
599 599 hint=_('please commit .hgtags manually'))
600 600
601 601 self.tags() # instantiate the cache
602 602 self._tag(names, node, message, local, user, date, editor=editor)
603 603
604 604 @filteredpropertycache
605 605 def _tagscache(self):
606 606 '''Returns a tagscache object that contains various tags related
607 607 caches.'''
608 608
609 609 # This simplifies its cache management by having one decorated
610 610 # function (this one) and the rest simply fetch things from it.
611 611 class tagscache(object):
612 612 def __init__(self):
613 613 # These two define the set of tags for this repository. tags
614 614 # maps tag name to node; tagtypes maps tag name to 'global' or
615 615 # 'local'. (Global tags are defined by .hgtags across all
616 616 # heads, and local tags are defined in .hg/localtags.)
617 617 # They constitute the in-memory cache of tags.
618 618 self.tags = self.tagtypes = None
619 619
620 620 self.nodetagscache = self.tagslist = None
621 621
622 622 cache = tagscache()
623 623 cache.tags, cache.tagtypes = self._findtags()
624 624
625 625 return cache
626 626
627 627 def tags(self):
628 628 '''return a mapping of tag to node'''
629 629 t = {}
630 630 if self.changelog.filteredrevs:
631 631 tags, tt = self._findtags()
632 632 else:
633 633 tags = self._tagscache.tags
634 634 for k, v in tags.iteritems():
635 635 try:
636 636 # ignore tags to unknown nodes
637 637 self.changelog.rev(v)
638 638 t[k] = v
639 639 except (error.LookupError, ValueError):
640 640 pass
641 641 return t
642 642
643 643 def _findtags(self):
644 644 '''Do the hard work of finding tags. Return a pair of dicts
645 645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 646 maps tag name to a string like \'global\' or \'local\'.
647 647 Subclasses or extensions are free to add their own tags, but
648 648 should be aware that the returned dicts will be retained for the
649 649 duration of the localrepo object.'''
650 650
651 651 # XXX what tagtype should subclasses/extensions use? Currently
652 652 # mq and bookmarks add tags, but do not set the tagtype at all.
653 653 # Should each extension invent its own tag type? Should there
654 654 # be one tagtype for all such "virtual" tags? Or is the status
655 655 # quo fine?
656 656
657 657 alltags = {} # map tag name to (node, hist)
658 658 tagtypes = {}
659 659
660 660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662 662
663 663 # Build the return dicts. Have to re-encode tag names because
664 664 # the tags module always uses UTF-8 (in order not to lose info
665 665 # writing to the cache), but the rest of Mercurial wants them in
666 666 # local encoding.
667 667 tags = {}
668 668 for (name, (node, hist)) in alltags.iteritems():
669 669 if node != nullid:
670 670 tags[encoding.tolocal(name)] = node
671 671 tags['tip'] = self.changelog.tip()
672 672 tagtypes = dict([(encoding.tolocal(name), value)
673 673 for (name, value) in tagtypes.iteritems()])
674 674 return (tags, tagtypes)
675 675
676 676 def tagtype(self, tagname):
677 677 '''
678 678 return the type of the given tag. result can be:
679 679
680 680 'local' : a local tag
681 681 'global' : a global tag
682 682 None : tag does not exist
683 683 '''
684 684
685 685 return self._tagscache.tagtypes.get(tagname)
686 686
687 687 def tagslist(self):
688 688 '''return a list of tags ordered by revision'''
689 689 if not self._tagscache.tagslist:
690 690 l = []
691 691 for t, n in self.tags().iteritems():
692 692 l.append((self.changelog.rev(n), t, n))
693 693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694 694
695 695 return self._tagscache.tagslist
696 696
697 697 def nodetags(self, node):
698 698 '''return the tags associated with a node'''
699 699 if not self._tagscache.nodetagscache:
700 700 nodetagscache = {}
701 701 for t, n in self._tagscache.tags.iteritems():
702 702 nodetagscache.setdefault(n, []).append(t)
703 703 for tags in nodetagscache.itervalues():
704 704 tags.sort()
705 705 self._tagscache.nodetagscache = nodetagscache
706 706 return self._tagscache.nodetagscache.get(node, [])
707 707
708 708 def nodebookmarks(self, node):
709 709 marks = []
710 710 for bookmark, n in self._bookmarks.iteritems():
711 711 if n == node:
712 712 marks.append(bookmark)
713 713 return sorted(marks)
714 714
715 715 def branchmap(self):
716 716 '''returns a dictionary {branch: [branchheads]} with branchheads
717 717 ordered by increasing revision number'''
718 718 branchmap.updatecache(self)
719 719 return self._branchcaches[self.filtername]
720 720
721 def branchtip(self, branch):
722 '''return the tip node for a given branch'''
721 def branchtip(self, branch, ignoremissing=False):
722 '''return the tip node for a given branch
723
724 If ignoremissing is True, then this method will not raise an error.
725 This is helpful for callers that only expect None for a missing branch
726 (e.g. namespace).
727
728 '''
723 729 try:
724 730 return self.branchmap().branchtip(branch)
725 731 except KeyError:
726 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
732 if not ignoremissing:
733 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
734 else:
735 pass
727 736
728 737 def lookup(self, key):
729 738 return self[key].node()
730 739
731 740 def lookupbranch(self, key, remote=None):
732 741 repo = remote or self
733 742 if key in repo.branchmap():
734 743 return key
735 744
736 745 repo = (remote and remote.local()) and remote or self
737 746 return repo[key].branch()
738 747
739 748 def known(self, nodes):
740 749 nm = self.changelog.nodemap
741 750 pc = self._phasecache
742 751 result = []
743 752 for n in nodes:
744 753 r = nm.get(n)
745 754 resp = not (r is None or pc.phase(self, r) >= phases.secret)
746 755 result.append(resp)
747 756 return result
748 757
749 758 def local(self):
750 759 return self
751 760
752 761 def cancopy(self):
753 762 # so statichttprepo's override of local() works
754 763 if not self.local():
755 764 return False
756 765 if not self.ui.configbool('phases', 'publish', True):
757 766 return True
758 767 # if publishing we can't copy if there is filtered content
759 768 return not self.filtered('visible').changelog.filteredrevs
760 769
761 770 def shared(self):
762 771 '''the type of shared repository (None if not shared)'''
763 772 if self.sharedpath != self.path:
764 773 return 'store'
765 774 return None
766 775
767 776 def join(self, f, *insidef):
768 777 return self.vfs.join(os.path.join(f, *insidef))
769 778
770 779 def wjoin(self, f, *insidef):
771 780 return self.vfs.reljoin(self.root, f, *insidef)
772 781
773 782 def file(self, f):
774 783 if f[0] == '/':
775 784 f = f[1:]
776 785 return filelog.filelog(self.sopener, f)
777 786
778 787 def changectx(self, changeid):
779 788 return self[changeid]
780 789
781 790 def parents(self, changeid=None):
782 791 '''get list of changectxs for parents of changeid'''
783 792 return self[changeid].parents()
784 793
785 794 def setparents(self, p1, p2=nullid):
786 795 self.dirstate.beginparentchange()
787 796 copies = self.dirstate.setparents(p1, p2)
788 797 pctx = self[p1]
789 798 if copies:
790 799 # Adjust copy records, the dirstate cannot do it, it
791 800 # requires access to parents manifests. Preserve them
792 801 # only for entries added to first parent.
793 802 for f in copies:
794 803 if f not in pctx and copies[f] in pctx:
795 804 self.dirstate.copy(copies[f], f)
796 805 if p2 == nullid:
797 806 for f, s in sorted(self.dirstate.copies().items()):
798 807 if f not in pctx and s not in pctx:
799 808 self.dirstate.copy(None, f)
800 809 self.dirstate.endparentchange()
801 810
802 811 def filectx(self, path, changeid=None, fileid=None):
803 812 """changeid can be a changeset revision, node, or tag.
804 813 fileid can be a file revision or node."""
805 814 return context.filectx(self, path, changeid, fileid)
806 815
807 816 def getcwd(self):
808 817 return self.dirstate.getcwd()
809 818
810 819 def pathto(self, f, cwd=None):
811 820 return self.dirstate.pathto(f, cwd)
812 821
813 822 def wfile(self, f, mode='r'):
814 823 return self.wopener(f, mode)
815 824
816 825 def _link(self, f):
817 826 return self.wvfs.islink(f)
818 827
819 828 def _loadfilter(self, filter):
820 829 if filter not in self.filterpats:
821 830 l = []
822 831 for pat, cmd in self.ui.configitems(filter):
823 832 if cmd == '!':
824 833 continue
825 834 mf = matchmod.match(self.root, '', [pat])
826 835 fn = None
827 836 params = cmd
828 837 for name, filterfn in self._datafilters.iteritems():
829 838 if cmd.startswith(name):
830 839 fn = filterfn
831 840 params = cmd[len(name):].lstrip()
832 841 break
833 842 if not fn:
834 843 fn = lambda s, c, **kwargs: util.filter(s, c)
835 844 # Wrap old filters not supporting keyword arguments
836 845 if not inspect.getargspec(fn)[2]:
837 846 oldfn = fn
838 847 fn = lambda s, c, **kwargs: oldfn(s, c)
839 848 l.append((mf, fn, params))
840 849 self.filterpats[filter] = l
841 850 return self.filterpats[filter]
842 851
843 852 def _filter(self, filterpats, filename, data):
844 853 for mf, fn, cmd in filterpats:
845 854 if mf(filename):
846 855 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
847 856 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
848 857 break
849 858
850 859 return data
851 860
852 861 @unfilteredpropertycache
853 862 def _encodefilterpats(self):
854 863 return self._loadfilter('encode')
855 864
856 865 @unfilteredpropertycache
857 866 def _decodefilterpats(self):
858 867 return self._loadfilter('decode')
859 868
860 869 def adddatafilter(self, name, filter):
861 870 self._datafilters[name] = filter
862 871
863 872 def wread(self, filename):
864 873 if self._link(filename):
865 874 data = self.wvfs.readlink(filename)
866 875 else:
867 876 data = self.wopener.read(filename)
868 877 return self._filter(self._encodefilterpats, filename, data)
869 878
870 879 def wwrite(self, filename, data, flags):
871 880 data = self._filter(self._decodefilterpats, filename, data)
872 881 if 'l' in flags:
873 882 self.wopener.symlink(data, filename)
874 883 else:
875 884 self.wopener.write(filename, data)
876 885 if 'x' in flags:
877 886 self.wvfs.setflags(filename, False, True)
878 887
879 888 def wwritedata(self, filename, data):
880 889 return self._filter(self._decodefilterpats, filename, data)
881 890
882 891 def currenttransaction(self):
883 892 """return the current transaction or None if non exists"""
884 893 tr = self._transref and self._transref() or None
885 894 if tr and tr.running():
886 895 return tr
887 896 return None
888 897
889 898 def transaction(self, desc, report=None):
890 899 tr = self.currenttransaction()
891 900 if tr is not None:
892 901 return tr.nest()
893 902
894 903 # abort here if the journal already exists
895 904 if self.svfs.exists("journal"):
896 905 raise error.RepoError(
897 906 _("abandoned transaction found"),
898 907 hint=_("run 'hg recover' to clean up transaction"))
899 908
900 909 self._writejournal(desc)
901 910 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
902 911 rp = report and report or self.ui.warn
903 912 vfsmap = {'plain': self.opener} # root of .hg/
904 913 tr = transaction.transaction(rp, self.sopener, vfsmap,
905 914 "journal",
906 915 aftertrans(renames),
907 916 self.store.createmode)
908 917 # note: writing the fncache only during finalize mean that the file is
909 918 # outdated when running hooks. As fncache is used for streaming clone,
910 919 # this is not expected to break anything that happen during the hooks.
911 920 tr.addfinalize('flush-fncache', self.store.write)
912 921 self._transref = weakref.ref(tr)
913 922 return tr
914 923
915 924 def _journalfiles(self):
916 925 return ((self.svfs, 'journal'),
917 926 (self.vfs, 'journal.dirstate'),
918 927 (self.vfs, 'journal.branch'),
919 928 (self.vfs, 'journal.desc'),
920 929 (self.vfs, 'journal.bookmarks'),
921 930 (self.svfs, 'journal.phaseroots'))
922 931
923 932 def undofiles(self):
924 933 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
925 934
926 935 def _writejournal(self, desc):
927 936 self.opener.write("journal.dirstate",
928 937 self.opener.tryread("dirstate"))
929 938 self.opener.write("journal.branch",
930 939 encoding.fromlocal(self.dirstate.branch()))
931 940 self.opener.write("journal.desc",
932 941 "%d\n%s\n" % (len(self), desc))
933 942 self.opener.write("journal.bookmarks",
934 943 self.opener.tryread("bookmarks"))
935 944 self.sopener.write("journal.phaseroots",
936 945 self.sopener.tryread("phaseroots"))
937 946
938 947 def recover(self):
939 948 lock = self.lock()
940 949 try:
941 950 if self.svfs.exists("journal"):
942 951 self.ui.status(_("rolling back interrupted transaction\n"))
943 952 vfsmap = {'': self.sopener,
944 953 'plain': self.opener,}
945 954 transaction.rollback(self.sopener, vfsmap, "journal",
946 955 self.ui.warn)
947 956 self.invalidate()
948 957 return True
949 958 else:
950 959 self.ui.warn(_("no interrupted transaction available\n"))
951 960 return False
952 961 finally:
953 962 lock.release()
954 963
955 964 def rollback(self, dryrun=False, force=False):
956 965 wlock = lock = None
957 966 try:
958 967 wlock = self.wlock()
959 968 lock = self.lock()
960 969 if self.svfs.exists("undo"):
961 970 return self._rollback(dryrun, force)
962 971 else:
963 972 self.ui.warn(_("no rollback information available\n"))
964 973 return 1
965 974 finally:
966 975 release(lock, wlock)
967 976
968 977 @unfilteredmethod # Until we get smarter cache management
969 978 def _rollback(self, dryrun, force):
970 979 ui = self.ui
971 980 try:
972 981 args = self.opener.read('undo.desc').splitlines()
973 982 (oldlen, desc, detail) = (int(args[0]), args[1], None)
974 983 if len(args) >= 3:
975 984 detail = args[2]
976 985 oldtip = oldlen - 1
977 986
978 987 if detail and ui.verbose:
979 988 msg = (_('repository tip rolled back to revision %s'
980 989 ' (undo %s: %s)\n')
981 990 % (oldtip, desc, detail))
982 991 else:
983 992 msg = (_('repository tip rolled back to revision %s'
984 993 ' (undo %s)\n')
985 994 % (oldtip, desc))
986 995 except IOError:
987 996 msg = _('rolling back unknown transaction\n')
988 997 desc = None
989 998
990 999 if not force and self['.'] != self['tip'] and desc == 'commit':
991 1000 raise util.Abort(
992 1001 _('rollback of last commit while not checked out '
993 1002 'may lose data'), hint=_('use -f to force'))
994 1003
995 1004 ui.status(msg)
996 1005 if dryrun:
997 1006 return 0
998 1007
999 1008 parents = self.dirstate.parents()
1000 1009 self.destroying()
1001 1010 vfsmap = {'plain': self.opener}
1002 1011 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
1003 1012 if self.vfs.exists('undo.bookmarks'):
1004 1013 self.vfs.rename('undo.bookmarks', 'bookmarks')
1005 1014 if self.svfs.exists('undo.phaseroots'):
1006 1015 self.svfs.rename('undo.phaseroots', 'phaseroots')
1007 1016 self.invalidate()
1008 1017
1009 1018 parentgone = (parents[0] not in self.changelog.nodemap or
1010 1019 parents[1] not in self.changelog.nodemap)
1011 1020 if parentgone:
1012 1021 self.vfs.rename('undo.dirstate', 'dirstate')
1013 1022 try:
1014 1023 branch = self.opener.read('undo.branch')
1015 1024 self.dirstate.setbranch(encoding.tolocal(branch))
1016 1025 except IOError:
1017 1026 ui.warn(_('named branch could not be reset: '
1018 1027 'current branch is still \'%s\'\n')
1019 1028 % self.dirstate.branch())
1020 1029
1021 1030 self.dirstate.invalidate()
1022 1031 parents = tuple([p.rev() for p in self.parents()])
1023 1032 if len(parents) > 1:
1024 1033 ui.status(_('working directory now based on '
1025 1034 'revisions %d and %d\n') % parents)
1026 1035 else:
1027 1036 ui.status(_('working directory now based on '
1028 1037 'revision %d\n') % parents)
1029 1038 # TODO: if we know which new heads may result from this rollback, pass
1030 1039 # them to destroy(), which will prevent the branchhead cache from being
1031 1040 # invalidated.
1032 1041 self.destroyed()
1033 1042 return 0
1034 1043
1035 1044 def invalidatecaches(self):
1036 1045
1037 1046 if '_tagscache' in vars(self):
1038 1047 # can't use delattr on proxy
1039 1048 del self.__dict__['_tagscache']
1040 1049
1041 1050 self.unfiltered()._branchcaches.clear()
1042 1051 self.invalidatevolatilesets()
1043 1052
1044 1053 def invalidatevolatilesets(self):
1045 1054 self.filteredrevcache.clear()
1046 1055 obsolete.clearobscaches(self)
1047 1056
1048 1057 def invalidatedirstate(self):
1049 1058 '''Invalidates the dirstate, causing the next call to dirstate
1050 1059 to check if it was modified since the last time it was read,
1051 1060 rereading it if it has.
1052 1061
1053 1062 This is different to dirstate.invalidate() that it doesn't always
1054 1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1055 1064 explicitly read the dirstate again (i.e. restoring it to a previous
1056 1065 known good state).'''
1057 1066 if hasunfilteredcache(self, 'dirstate'):
1058 1067 for k in self.dirstate._filecache:
1059 1068 try:
1060 1069 delattr(self.dirstate, k)
1061 1070 except AttributeError:
1062 1071 pass
1063 1072 delattr(self.unfiltered(), 'dirstate')
1064 1073
1065 1074 def invalidate(self):
1066 1075 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1067 1076 for k in self._filecache:
1068 1077 # dirstate is invalidated separately in invalidatedirstate()
1069 1078 if k == 'dirstate':
1070 1079 continue
1071 1080
1072 1081 try:
1073 1082 delattr(unfiltered, k)
1074 1083 except AttributeError:
1075 1084 pass
1076 1085 self.invalidatecaches()
1077 1086 self.store.invalidatecaches()
1078 1087
1079 1088 def invalidateall(self):
1080 1089 '''Fully invalidates both store and non-store parts, causing the
1081 1090 subsequent operation to reread any outside changes.'''
1082 1091 # extension should hook this to invalidate its caches
1083 1092 self.invalidate()
1084 1093 self.invalidatedirstate()
1085 1094
1086 1095 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1087 1096 try:
1088 1097 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1089 1098 except error.LockHeld, inst:
1090 1099 if not wait:
1091 1100 raise
1092 1101 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1093 1102 (desc, inst.locker))
1094 1103 # default to 600 seconds timeout
1095 1104 l = lockmod.lock(vfs, lockname,
1096 1105 int(self.ui.config("ui", "timeout", "600")),
1097 1106 releasefn, desc=desc)
1098 1107 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1099 1108 if acquirefn:
1100 1109 acquirefn()
1101 1110 return l
1102 1111
1103 1112 def _afterlock(self, callback):
1104 1113 """add a callback to the current repository lock.
1105 1114
1106 1115 The callback will be executed on lock release."""
1107 1116 l = self._lockref and self._lockref()
1108 1117 if l:
1109 1118 l.postrelease.append(callback)
1110 1119 else:
1111 1120 callback()
1112 1121
1113 1122 def lock(self, wait=True):
1114 1123 '''Lock the repository store (.hg/store) and return a weak reference
1115 1124 to the lock. Use this before modifying the store (e.g. committing or
1116 1125 stripping). If you are opening a transaction, get a lock as well.)'''
1117 1126 l = self._lockref and self._lockref()
1118 1127 if l is not None and l.held:
1119 1128 l.lock()
1120 1129 return l
1121 1130
1122 1131 def unlock():
1123 1132 for k, ce in self._filecache.items():
1124 1133 if k == 'dirstate' or k not in self.__dict__:
1125 1134 continue
1126 1135 ce.refresh()
1127 1136
1128 1137 l = self._lock(self.svfs, "lock", wait, unlock,
1129 1138 self.invalidate, _('repository %s') % self.origroot)
1130 1139 self._lockref = weakref.ref(l)
1131 1140 return l
1132 1141
1133 1142 def wlock(self, wait=True):
1134 1143 '''Lock the non-store parts of the repository (everything under
1135 1144 .hg except .hg/store) and return a weak reference to the lock.
1136 1145 Use this before modifying files in .hg.'''
1137 1146 l = self._wlockref and self._wlockref()
1138 1147 if l is not None and l.held:
1139 1148 l.lock()
1140 1149 return l
1141 1150
1142 1151 def unlock():
1143 1152 if self.dirstate.pendingparentchange():
1144 1153 self.dirstate.invalidate()
1145 1154 else:
1146 1155 self.dirstate.write()
1147 1156
1148 1157 self._filecache['dirstate'].refresh()
1149 1158
1150 1159 l = self._lock(self.vfs, "wlock", wait, unlock,
1151 1160 self.invalidatedirstate, _('working directory of %s') %
1152 1161 self.origroot)
1153 1162 self._wlockref = weakref.ref(l)
1154 1163 return l
1155 1164
1156 1165 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1157 1166 """
1158 1167 commit an individual file as part of a larger transaction
1159 1168 """
1160 1169
1161 1170 fname = fctx.path()
1162 1171 text = fctx.data()
1163 1172 flog = self.file(fname)
1164 1173 fparent1 = manifest1.get(fname, nullid)
1165 1174 fparent2 = manifest2.get(fname, nullid)
1166 1175
1167 1176 meta = {}
1168 1177 copy = fctx.renamed()
1169 1178 if copy and copy[0] != fname:
1170 1179 # Mark the new revision of this file as a copy of another
1171 1180 # file. This copy data will effectively act as a parent
1172 1181 # of this new revision. If this is a merge, the first
1173 1182 # parent will be the nullid (meaning "look up the copy data")
1174 1183 # and the second one will be the other parent. For example:
1175 1184 #
1176 1185 # 0 --- 1 --- 3 rev1 changes file foo
1177 1186 # \ / rev2 renames foo to bar and changes it
1178 1187 # \- 2 -/ rev3 should have bar with all changes and
1179 1188 # should record that bar descends from
1180 1189 # bar in rev2 and foo in rev1
1181 1190 #
1182 1191 # this allows this merge to succeed:
1183 1192 #
1184 1193 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1185 1194 # \ / merging rev3 and rev4 should use bar@rev2
1186 1195 # \- 2 --- 4 as the merge base
1187 1196 #
1188 1197
1189 1198 cfname = copy[0]
1190 1199 crev = manifest1.get(cfname)
1191 1200 newfparent = fparent2
1192 1201
1193 1202 if manifest2: # branch merge
1194 1203 if fparent2 == nullid or crev is None: # copied on remote side
1195 1204 if cfname in manifest2:
1196 1205 crev = manifest2[cfname]
1197 1206 newfparent = fparent1
1198 1207
1199 1208 # find source in nearest ancestor if we've lost track
1200 1209 if not crev:
1201 1210 self.ui.debug(" %s: searching for copy revision for %s\n" %
1202 1211 (fname, cfname))
1203 1212 for ancestor in self[None].ancestors():
1204 1213 if cfname in ancestor:
1205 1214 crev = ancestor[cfname].filenode()
1206 1215 break
1207 1216
1208 1217 if crev:
1209 1218 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1210 1219 meta["copy"] = cfname
1211 1220 meta["copyrev"] = hex(crev)
1212 1221 fparent1, fparent2 = nullid, newfparent
1213 1222 else:
1214 1223 self.ui.warn(_("warning: can't find ancestor for '%s' "
1215 1224 "copied from '%s'!\n") % (fname, cfname))
1216 1225
1217 1226 elif fparent1 == nullid:
1218 1227 fparent1, fparent2 = fparent2, nullid
1219 1228 elif fparent2 != nullid:
1220 1229 # is one parent an ancestor of the other?
1221 1230 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1222 1231 if fparent1 in fparentancestors:
1223 1232 fparent1, fparent2 = fparent2, nullid
1224 1233 elif fparent2 in fparentancestors:
1225 1234 fparent2 = nullid
1226 1235
1227 1236 # is the file changed?
1228 1237 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1229 1238 changelist.append(fname)
1230 1239 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1231 1240 # are just the flags changed during merge?
1232 1241 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1233 1242 changelist.append(fname)
1234 1243
1235 1244 return fparent1
1236 1245
1237 1246 @unfilteredmethod
1238 1247 def commit(self, text="", user=None, date=None, match=None, force=False,
1239 1248 editor=False, extra={}):
1240 1249 """Add a new revision to current repository.
1241 1250
1242 1251 Revision information is gathered from the working directory,
1243 1252 match can be used to filter the committed files. If editor is
1244 1253 supplied, it is called to get a commit message.
1245 1254 """
1246 1255
1247 1256 def fail(f, msg):
1248 1257 raise util.Abort('%s: %s' % (f, msg))
1249 1258
1250 1259 if not match:
1251 1260 match = matchmod.always(self.root, '')
1252 1261
1253 1262 if not force:
1254 1263 vdirs = []
1255 1264 match.explicitdir = vdirs.append
1256 1265 match.bad = fail
1257 1266
1258 1267 wlock = self.wlock()
1259 1268 try:
1260 1269 wctx = self[None]
1261 1270 merge = len(wctx.parents()) > 1
1262 1271
1263 1272 if (not force and merge and match and
1264 1273 (match.files() or match.anypats())):
1265 1274 raise util.Abort(_('cannot partially commit a merge '
1266 1275 '(do not specify files or patterns)'))
1267 1276
1268 1277 status = self.status(match=match, clean=force)
1269 1278 if force:
1270 1279 status.modified.extend(status.clean) # mq may commit clean files
1271 1280
1272 1281 # check subrepos
1273 1282 subs = []
1274 1283 commitsubs = set()
1275 1284 newstate = wctx.substate.copy()
1276 1285 # only manage subrepos and .hgsubstate if .hgsub is present
1277 1286 if '.hgsub' in wctx:
1278 1287 # we'll decide whether to track this ourselves, thanks
1279 1288 for c in status.modified, status.added, status.removed:
1280 1289 if '.hgsubstate' in c:
1281 1290 c.remove('.hgsubstate')
1282 1291
1283 1292 # compare current state to last committed state
1284 1293 # build new substate based on last committed state
1285 1294 oldstate = wctx.p1().substate
1286 1295 for s in sorted(newstate.keys()):
1287 1296 if not match(s):
1288 1297 # ignore working copy, use old state if present
1289 1298 if s in oldstate:
1290 1299 newstate[s] = oldstate[s]
1291 1300 continue
1292 1301 if not force:
1293 1302 raise util.Abort(
1294 1303 _("commit with new subrepo %s excluded") % s)
1295 1304 if wctx.sub(s).dirty(True):
1296 1305 if not self.ui.configbool('ui', 'commitsubrepos'):
1297 1306 raise util.Abort(
1298 1307 _("uncommitted changes in subrepo %s") % s,
1299 1308 hint=_("use --subrepos for recursive commit"))
1300 1309 subs.append(s)
1301 1310 commitsubs.add(s)
1302 1311 else:
1303 1312 bs = wctx.sub(s).basestate()
1304 1313 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1305 1314 if oldstate.get(s, (None, None, None))[1] != bs:
1306 1315 subs.append(s)
1307 1316
1308 1317 # check for removed subrepos
1309 1318 for p in wctx.parents():
1310 1319 r = [s for s in p.substate if s not in newstate]
1311 1320 subs += [s for s in r if match(s)]
1312 1321 if subs:
1313 1322 if (not match('.hgsub') and
1314 1323 '.hgsub' in (wctx.modified() + wctx.added())):
1315 1324 raise util.Abort(
1316 1325 _("can't commit subrepos without .hgsub"))
1317 1326 status.modified.insert(0, '.hgsubstate')
1318 1327
1319 1328 elif '.hgsub' in status.removed:
1320 1329 # clean up .hgsubstate when .hgsub is removed
1321 1330 if ('.hgsubstate' in wctx and
1322 1331 '.hgsubstate' not in (status.modified + status.added +
1323 1332 status.removed)):
1324 1333 status.removed.insert(0, '.hgsubstate')
1325 1334
1326 1335 # make sure all explicit patterns are matched
1327 1336 if not force and match.files():
1328 1337 matched = set(status.modified + status.added + status.removed)
1329 1338
1330 1339 for f in match.files():
1331 1340 f = self.dirstate.normalize(f)
1332 1341 if f == '.' or f in matched or f in wctx.substate:
1333 1342 continue
1334 1343 if f in status.deleted:
1335 1344 fail(f, _('file not found!'))
1336 1345 if f in vdirs: # visited directory
1337 1346 d = f + '/'
1338 1347 for mf in matched:
1339 1348 if mf.startswith(d):
1340 1349 break
1341 1350 else:
1342 1351 fail(f, _("no match under directory!"))
1343 1352 elif f not in self.dirstate:
1344 1353 fail(f, _("file not tracked!"))
1345 1354
1346 1355 cctx = context.workingcommitctx(self, status,
1347 1356 text, user, date, extra)
1348 1357
1349 1358 if (not force and not extra.get("close") and not merge
1350 1359 and not cctx.files()
1351 1360 and wctx.branch() == wctx.p1().branch()):
1352 1361 return None
1353 1362
1354 1363 if merge and cctx.deleted():
1355 1364 raise util.Abort(_("cannot commit merge with missing files"))
1356 1365
1357 1366 ms = mergemod.mergestate(self)
1358 1367 for f in status.modified:
1359 1368 if f in ms and ms[f] == 'u':
1360 1369 raise util.Abort(_("unresolved merge conflicts "
1361 1370 "(see hg help resolve)"))
1362 1371
1363 1372 if editor:
1364 1373 cctx._text = editor(self, cctx, subs)
1365 1374 edited = (text != cctx._text)
1366 1375
1367 1376 # Save commit message in case this transaction gets rolled back
1368 1377 # (e.g. by a pretxncommit hook). Leave the content alone on
1369 1378 # the assumption that the user will use the same editor again.
1370 1379 msgfn = self.savecommitmessage(cctx._text)
1371 1380
1372 1381 # commit subs and write new state
1373 1382 if subs:
1374 1383 for s in sorted(commitsubs):
1375 1384 sub = wctx.sub(s)
1376 1385 self.ui.status(_('committing subrepository %s\n') %
1377 1386 subrepo.subrelpath(sub))
1378 1387 sr = sub.commit(cctx._text, user, date)
1379 1388 newstate[s] = (newstate[s][0], sr)
1380 1389 subrepo.writestate(self, newstate)
1381 1390
1382 1391 p1, p2 = self.dirstate.parents()
1383 1392 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1384 1393 try:
1385 1394 self.hook("precommit", throw=True, parent1=hookp1,
1386 1395 parent2=hookp2)
1387 1396 ret = self.commitctx(cctx, True)
1388 1397 except: # re-raises
1389 1398 if edited:
1390 1399 self.ui.write(
1391 1400 _('note: commit message saved in %s\n') % msgfn)
1392 1401 raise
1393 1402
1394 1403 # update bookmarks, dirstate and mergestate
1395 1404 bookmarks.update(self, [p1, p2], ret)
1396 1405 cctx.markcommitted(ret)
1397 1406 ms.reset()
1398 1407 finally:
1399 1408 wlock.release()
1400 1409
1401 1410 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1402 1411 # hack for command that use a temporary commit (eg: histedit)
1403 1412 # temporary commit got stripped before hook release
1404 1413 if node in self:
1405 1414 self.hook("commit", node=node, parent1=parent1,
1406 1415 parent2=parent2)
1407 1416 self._afterlock(commithook)
1408 1417 return ret
1409 1418
1410 1419 @unfilteredmethod
1411 1420 def commitctx(self, ctx, error=False):
1412 1421 """Add a new revision to current repository.
1413 1422 Revision information is passed via the context argument.
1414 1423 """
1415 1424
1416 1425 tr = None
1417 1426 p1, p2 = ctx.p1(), ctx.p2()
1418 1427 user = ctx.user()
1419 1428
1420 1429 lock = self.lock()
1421 1430 try:
1422 1431 tr = self.transaction("commit")
1423 1432 trp = weakref.proxy(tr)
1424 1433
1425 1434 if ctx.files():
1426 1435 m1 = p1.manifest()
1427 1436 m2 = p2.manifest()
1428 1437 m = m1.copy()
1429 1438
1430 1439 # check in files
1431 1440 added = []
1432 1441 changed = []
1433 1442 removed = list(ctx.removed())
1434 1443 linkrev = len(self)
1435 1444 self.ui.note(_("committing files:\n"))
1436 1445 for f in sorted(ctx.modified() + ctx.added()):
1437 1446 self.ui.note(f + "\n")
1438 1447 try:
1439 1448 fctx = ctx[f]
1440 1449 if fctx is None:
1441 1450 removed.append(f)
1442 1451 else:
1443 1452 added.append(f)
1444 1453 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1445 1454 trp, changed)
1446 1455 m.setflag(f, fctx.flags())
1447 1456 except OSError, inst:
1448 1457 self.ui.warn(_("trouble committing %s!\n") % f)
1449 1458 raise
1450 1459 except IOError, inst:
1451 1460 errcode = getattr(inst, 'errno', errno.ENOENT)
1452 1461 if error or errcode and errcode != errno.ENOENT:
1453 1462 self.ui.warn(_("trouble committing %s!\n") % f)
1454 1463 raise
1455 1464
1456 1465 # update manifest
1457 1466 self.ui.note(_("committing manifest\n"))
1458 1467 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1459 1468 drop = [f for f in removed if f in m]
1460 1469 for f in drop:
1461 1470 del m[f]
1462 1471 mn = self.manifest.add(m, trp, linkrev,
1463 1472 p1.manifestnode(), p2.manifestnode(),
1464 1473 added, drop)
1465 1474 files = changed + removed
1466 1475 else:
1467 1476 mn = p1.manifestnode()
1468 1477 files = []
1469 1478
1470 1479 # update changelog
1471 1480 self.ui.note(_("committing changelog\n"))
1472 1481 self.changelog.delayupdate(tr)
1473 1482 n = self.changelog.add(mn, files, ctx.description(),
1474 1483 trp, p1.node(), p2.node(),
1475 1484 user, ctx.date(), ctx.extra().copy())
1476 1485 p = lambda: tr.writepending() and self.root or ""
1477 1486 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1478 1487 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1479 1488 parent2=xp2, pending=p)
1480 1489 # set the new commit is proper phase
1481 1490 targetphase = subrepo.newcommitphase(self.ui, ctx)
1482 1491 if targetphase:
1483 1492 # retract boundary do not alter parent changeset.
1484 1493 # if a parent have higher the resulting phase will
1485 1494 # be compliant anyway
1486 1495 #
1487 1496 # if minimal phase was 0 we don't need to retract anything
1488 1497 phases.retractboundary(self, tr, targetphase, [n])
1489 1498 tr.close()
1490 1499 branchmap.updatecache(self.filtered('served'))
1491 1500 return n
1492 1501 finally:
1493 1502 if tr:
1494 1503 tr.release()
1495 1504 lock.release()
1496 1505
1497 1506 @unfilteredmethod
1498 1507 def destroying(self):
1499 1508 '''Inform the repository that nodes are about to be destroyed.
1500 1509 Intended for use by strip and rollback, so there's a common
1501 1510 place for anything that has to be done before destroying history.
1502 1511
1503 1512 This is mostly useful for saving state that is in memory and waiting
1504 1513 to be flushed when the current lock is released. Because a call to
1505 1514 destroyed is imminent, the repo will be invalidated causing those
1506 1515 changes to stay in memory (waiting for the next unlock), or vanish
1507 1516 completely.
1508 1517 '''
1509 1518 # When using the same lock to commit and strip, the phasecache is left
1510 1519 # dirty after committing. Then when we strip, the repo is invalidated,
1511 1520 # causing those changes to disappear.
1512 1521 if '_phasecache' in vars(self):
1513 1522 self._phasecache.write()
1514 1523
1515 1524 @unfilteredmethod
1516 1525 def destroyed(self):
1517 1526 '''Inform the repository that nodes have been destroyed.
1518 1527 Intended for use by strip and rollback, so there's a common
1519 1528 place for anything that has to be done after destroying history.
1520 1529 '''
1521 1530 # When one tries to:
1522 1531 # 1) destroy nodes thus calling this method (e.g. strip)
1523 1532 # 2) use phasecache somewhere (e.g. commit)
1524 1533 #
1525 1534 # then 2) will fail because the phasecache contains nodes that were
1526 1535 # removed. We can either remove phasecache from the filecache,
1527 1536 # causing it to reload next time it is accessed, or simply filter
1528 1537 # the removed nodes now and write the updated cache.
1529 1538 self._phasecache.filterunknown(self)
1530 1539 self._phasecache.write()
1531 1540
1532 1541 # update the 'served' branch cache to help read only server process
1533 1542 # Thanks to branchcache collaboration this is done from the nearest
1534 1543 # filtered subset and it is expected to be fast.
1535 1544 branchmap.updatecache(self.filtered('served'))
1536 1545
1537 1546 # Ensure the persistent tag cache is updated. Doing it now
1538 1547 # means that the tag cache only has to worry about destroyed
1539 1548 # heads immediately after a strip/rollback. That in turn
1540 1549 # guarantees that "cachetip == currenttip" (comparing both rev
1541 1550 # and node) always means no nodes have been added or destroyed.
1542 1551
1543 1552 # XXX this is suboptimal when qrefresh'ing: we strip the current
1544 1553 # head, refresh the tag cache, then immediately add a new head.
1545 1554 # But I think doing it this way is necessary for the "instant
1546 1555 # tag cache retrieval" case to work.
1547 1556 self.invalidate()
1548 1557
1549 1558 def walk(self, match, node=None):
1550 1559 '''
1551 1560 walk recursively through the directory tree or a given
1552 1561 changeset, finding all files matched by the match
1553 1562 function
1554 1563 '''
1555 1564 return self[node].walk(match)
1556 1565
1557 1566 def status(self, node1='.', node2=None, match=None,
1558 1567 ignored=False, clean=False, unknown=False,
1559 1568 listsubrepos=False):
1560 1569 '''a convenience method that calls node1.status(node2)'''
1561 1570 return self[node1].status(node2, match, ignored, clean, unknown,
1562 1571 listsubrepos)
1563 1572
1564 1573 def heads(self, start=None):
1565 1574 heads = self.changelog.heads(start)
1566 1575 # sort the output in rev descending order
1567 1576 return sorted(heads, key=self.changelog.rev, reverse=True)
1568 1577
1569 1578 def branchheads(self, branch=None, start=None, closed=False):
1570 1579 '''return a (possibly filtered) list of heads for the given branch
1571 1580
1572 1581 Heads are returned in topological order, from newest to oldest.
1573 1582 If branch is None, use the dirstate branch.
1574 1583 If start is not None, return only heads reachable from start.
1575 1584 If closed is True, return heads that are marked as closed as well.
1576 1585 '''
1577 1586 if branch is None:
1578 1587 branch = self[None].branch()
1579 1588 branches = self.branchmap()
1580 1589 if branch not in branches:
1581 1590 return []
1582 1591 # the cache returns heads ordered lowest to highest
1583 1592 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1584 1593 if start is not None:
1585 1594 # filter out the heads that cannot be reached from startrev
1586 1595 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1587 1596 bheads = [h for h in bheads if h in fbheads]
1588 1597 return bheads
1589 1598
1590 1599 def branches(self, nodes):
1591 1600 if not nodes:
1592 1601 nodes = [self.changelog.tip()]
1593 1602 b = []
1594 1603 for n in nodes:
1595 1604 t = n
1596 1605 while True:
1597 1606 p = self.changelog.parents(n)
1598 1607 if p[1] != nullid or p[0] == nullid:
1599 1608 b.append((t, n, p[0], p[1]))
1600 1609 break
1601 1610 n = p[0]
1602 1611 return b
1603 1612
1604 1613 def between(self, pairs):
1605 1614 r = []
1606 1615
1607 1616 for top, bottom in pairs:
1608 1617 n, l, i = top, [], 0
1609 1618 f = 1
1610 1619
1611 1620 while n != bottom and n != nullid:
1612 1621 p = self.changelog.parents(n)[0]
1613 1622 if i == f:
1614 1623 l.append(n)
1615 1624 f = f * 2
1616 1625 n = p
1617 1626 i += 1
1618 1627
1619 1628 r.append(l)
1620 1629
1621 1630 return r
1622 1631
1623 1632 def checkpush(self, pushop):
1624 1633 """Extensions can override this function if additional checks have
1625 1634 to be performed before pushing, or call it if they override push
1626 1635 command.
1627 1636 """
1628 1637 pass
1629 1638
1630 1639 @unfilteredpropertycache
1631 1640 def prepushoutgoinghooks(self):
1632 1641 """Return util.hooks consists of "(repo, remote, outgoing)"
1633 1642 functions, which are called before pushing changesets.
1634 1643 """
1635 1644 return util.hooks()
1636 1645
1637 1646 def stream_in(self, remote, requirements):
1638 1647 lock = self.lock()
1639 1648 try:
1640 1649 # Save remote branchmap. We will use it later
1641 1650 # to speed up branchcache creation
1642 1651 rbranchmap = None
1643 1652 if remote.capable("branchmap"):
1644 1653 rbranchmap = remote.branchmap()
1645 1654
1646 1655 fp = remote.stream_out()
1647 1656 l = fp.readline()
1648 1657 try:
1649 1658 resp = int(l)
1650 1659 except ValueError:
1651 1660 raise error.ResponseError(
1652 1661 _('unexpected response from remote server:'), l)
1653 1662 if resp == 1:
1654 1663 raise util.Abort(_('operation forbidden by server'))
1655 1664 elif resp == 2:
1656 1665 raise util.Abort(_('locking the remote repository failed'))
1657 1666 elif resp != 0:
1658 1667 raise util.Abort(_('the server sent an unknown error code'))
1659 1668 self.ui.status(_('streaming all changes\n'))
1660 1669 l = fp.readline()
1661 1670 try:
1662 1671 total_files, total_bytes = map(int, l.split(' ', 1))
1663 1672 except (ValueError, TypeError):
1664 1673 raise error.ResponseError(
1665 1674 _('unexpected response from remote server:'), l)
1666 1675 self.ui.status(_('%d files to transfer, %s of data\n') %
1667 1676 (total_files, util.bytecount(total_bytes)))
1668 1677 handled_bytes = 0
1669 1678 self.ui.progress(_('clone'), 0, total=total_bytes)
1670 1679 start = time.time()
1671 1680
1672 1681 tr = self.transaction(_('clone'))
1673 1682 try:
1674 1683 for i in xrange(total_files):
1675 1684 # XXX doesn't support '\n' or '\r' in filenames
1676 1685 l = fp.readline()
1677 1686 try:
1678 1687 name, size = l.split('\0', 1)
1679 1688 size = int(size)
1680 1689 except (ValueError, TypeError):
1681 1690 raise error.ResponseError(
1682 1691 _('unexpected response from remote server:'), l)
1683 1692 if self.ui.debugflag:
1684 1693 self.ui.debug('adding %s (%s)\n' %
1685 1694 (name, util.bytecount(size)))
1686 1695 # for backwards compat, name was partially encoded
1687 1696 ofp = self.sopener(store.decodedir(name), 'w')
1688 1697 for chunk in util.filechunkiter(fp, limit=size):
1689 1698 handled_bytes += len(chunk)
1690 1699 self.ui.progress(_('clone'), handled_bytes,
1691 1700 total=total_bytes)
1692 1701 ofp.write(chunk)
1693 1702 ofp.close()
1694 1703 tr.close()
1695 1704 finally:
1696 1705 tr.release()
1697 1706
1698 1707 # Writing straight to files circumvented the inmemory caches
1699 1708 self.invalidate()
1700 1709
1701 1710 elapsed = time.time() - start
1702 1711 if elapsed <= 0:
1703 1712 elapsed = 0.001
1704 1713 self.ui.progress(_('clone'), None)
1705 1714 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1706 1715 (util.bytecount(total_bytes), elapsed,
1707 1716 util.bytecount(total_bytes / elapsed)))
1708 1717
1709 1718 # new requirements = old non-format requirements +
1710 1719 # new format-related
1711 1720 # requirements from the streamed-in repository
1712 1721 requirements.update(set(self.requirements) - self.supportedformats)
1713 1722 self._applyrequirements(requirements)
1714 1723 self._writerequirements()
1715 1724
1716 1725 if rbranchmap:
1717 1726 rbheads = []
1718 1727 closed = []
1719 1728 for bheads in rbranchmap.itervalues():
1720 1729 rbheads.extend(bheads)
1721 1730 for h in bheads:
1722 1731 r = self.changelog.rev(h)
1723 1732 b, c = self.changelog.branchinfo(r)
1724 1733 if c:
1725 1734 closed.append(h)
1726 1735
1727 1736 if rbheads:
1728 1737 rtiprev = max((int(self.changelog.rev(node))
1729 1738 for node in rbheads))
1730 1739 cache = branchmap.branchcache(rbranchmap,
1731 1740 self[rtiprev].node(),
1732 1741 rtiprev,
1733 1742 closednodes=closed)
1734 1743 # Try to stick it as low as possible
1735 1744 # filter above served are unlikely to be fetch from a clone
1736 1745 for candidate in ('base', 'immutable', 'served'):
1737 1746 rview = self.filtered(candidate)
1738 1747 if cache.validfor(rview):
1739 1748 self._branchcaches[candidate] = cache
1740 1749 cache.write(rview)
1741 1750 break
1742 1751 self.invalidate()
1743 1752 return len(self.heads()) + 1
1744 1753 finally:
1745 1754 lock.release()
1746 1755
1747 1756 def clone(self, remote, heads=[], stream=None):
1748 1757 '''clone remote repository.
1749 1758
1750 1759 keyword arguments:
1751 1760 heads: list of revs to clone (forces use of pull)
1752 1761 stream: use streaming clone if possible'''
1753 1762
1754 1763 # now, all clients that can request uncompressed clones can
1755 1764 # read repo formats supported by all servers that can serve
1756 1765 # them.
1757 1766
1758 1767 # if revlog format changes, client will have to check version
1759 1768 # and format flags on "stream" capability, and use
1760 1769 # uncompressed only if compatible.
1761 1770
1762 1771 if stream is None:
1763 1772 # if the server explicitly prefers to stream (for fast LANs)
1764 1773 stream = remote.capable('stream-preferred')
1765 1774
1766 1775 if stream and not heads:
1767 1776 # 'stream' means remote revlog format is revlogv1 only
1768 1777 if remote.capable('stream'):
1769 1778 self.stream_in(remote, set(('revlogv1',)))
1770 1779 else:
1771 1780 # otherwise, 'streamreqs' contains the remote revlog format
1772 1781 streamreqs = remote.capable('streamreqs')
1773 1782 if streamreqs:
1774 1783 streamreqs = set(streamreqs.split(','))
1775 1784 # if we support it, stream in and adjust our requirements
1776 1785 if not streamreqs - self.supportedformats:
1777 1786 self.stream_in(remote, streamreqs)
1778 1787
1779 1788 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1780 1789 try:
1781 1790 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1782 1791 ret = exchange.pull(self, remote, heads).cgresult
1783 1792 finally:
1784 1793 self.ui.restoreconfig(quiet)
1785 1794 return ret
1786 1795
1787 1796 def pushkey(self, namespace, key, old, new):
1788 1797 try:
1789 1798 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1790 1799 old=old, new=new)
1791 1800 except error.HookAbort, exc:
1792 1801 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1793 1802 if exc.hint:
1794 1803 self.ui.write_err(_("(%s)\n") % exc.hint)
1795 1804 return False
1796 1805 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1797 1806 ret = pushkey.push(self, namespace, key, old, new)
1798 1807 def runhook():
1799 1808 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1800 1809 ret=ret)
1801 1810 self._afterlock(runhook)
1802 1811 return ret
1803 1812
1804 1813 def listkeys(self, namespace):
1805 1814 self.hook('prelistkeys', throw=True, namespace=namespace)
1806 1815 self.ui.debug('listing keys for "%s"\n' % namespace)
1807 1816 values = pushkey.list(self, namespace)
1808 1817 self.hook('listkeys', namespace=namespace, values=values)
1809 1818 return values
1810 1819
1811 1820 def debugwireargs(self, one, two, three=None, four=None, five=None):
1812 1821 '''used to test argument passing over the wire'''
1813 1822 return "%s %s %s %s %s" % (one, two, three, four, five)
1814 1823
1815 1824 def savecommitmessage(self, text):
1816 1825 fp = self.opener('last-message.txt', 'wb')
1817 1826 try:
1818 1827 fp.write(text)
1819 1828 finally:
1820 1829 fp.close()
1821 1830 return self.pathto(fp.name[len(self.root) + 1:])
1822 1831
1823 1832 # used to avoid circular references so destructors work
1824 1833 def aftertrans(files):
1825 1834 renamefiles = [tuple(t) for t in files]
1826 1835 def a():
1827 1836 for vfs, src, dest in renamefiles:
1828 1837 try:
1829 1838 vfs.rename(src, dest)
1830 1839 except OSError: # journal file does not yet exist
1831 1840 pass
1832 1841 return a
1833 1842
1834 1843 def undoname(fn):
1835 1844 base, name = os.path.split(fn)
1836 1845 assert name.startswith('journal')
1837 1846 return os.path.join(base, name.replace('journal', 'undo', 1))
1838 1847
1839 1848 def instance(ui, path, create):
1840 1849 return localrepository(ui, util.urllocalpath(path), create)
1841 1850
1842 1851 def islocal(path):
1843 1852 return True
@@ -1,149 +1,149 b''
1 1 from i18n import _
2 2 from mercurial import util
3 3 import templatekw
4 4
5 5 def tolist(val):
6 6 """
7 7 a convenience method to return an empty list instead of None
8 8 """
9 9 if val is None:
10 10 return []
11 11 else:
12 12 return [val]
13 13
14 14 class namespaces(object):
15 15 """provides an interface to register and operate on multiple namespaces. See
16 16 the namespace class below for details on the namespace object.
17 17
18 18 """
19 19
20 20 _names_version = 0
21 21
22 22 def __init__(self):
23 23 self._names = util.sortdict()
24 24
25 25 # shorten the class name for less indentation
26 26 ns = namespace
27 27
28 28 # we need current mercurial named objects (bookmarks, tags, and
29 29 # branches) to be initialized somewhere, so that place is here
30 30 n = ns("bookmarks", "bookmark",
31 31 lambda repo: repo._bookmarks.keys(),
32 32 lambda repo, name: tolist(repo._bookmarks.get(name)),
33 33 lambda repo, name: repo.nodebookmarks(name))
34 34 self.addnamespace(n)
35 35
36 36 n = ns("tags", "tag",
37 37 lambda repo: [t for t, n in repo.tagslist()],
38 38 lambda repo, name: tolist(repo._tagscache.tags.get(name)),
39 39 lambda repo, name: repo.nodetags(name))
40 40 self.addnamespace(n)
41 41
42 42 n = ns("branches", "branch",
43 43 lambda repo: repo.branchmap().keys(),
44 lambda repo, name: tolist(repo.branchtip(name)),
44 lambda repo, name: tolist(repo.branchtip(name, True)),
45 45 lambda repo, node: [repo[node].branch()])
46 46 self.addnamespace(n)
47 47
48 48 def __getitem__(self, namespace):
49 49 """returns the namespace object"""
50 50 return self._names[namespace]
51 51
52 52 def __iter__(self):
53 53 return self._names.__iter__()
54 54
55 55 def iteritems(self):
56 56 return self._names.iteritems()
57 57
58 58 def addnamespace(self, namespace, order=None):
59 59 """register a namespace
60 60
61 61 namespace: the name to be registered (in plural form)
62 62 order: optional argument to specify the order of namespaces
63 63 (e.g. 'branches' should be listed before 'bookmarks')
64 64
65 65 """
66 66 if order is not None:
67 67 self._names.insert(order, namespace.name, namespace)
68 68 else:
69 69 self._names[namespace.name] = namespace
70 70
71 71 # we only generate a template keyword if one does not already exist
72 72 if namespace.name not in templatekw.keywords:
73 73 def generatekw(**args):
74 74 return templatekw.shownames(namespace.name, **args)
75 75
76 76 templatekw.keywords[namespace.name] = generatekw
77 77
78 78 def singlenode(self, repo, name):
79 79 """
80 80 Return the 'best' node for the given name. Best means the first node
81 81 in the first nonempty list returned by a name-to-nodes mapping function
82 82 in the defined precedence order.
83 83
84 84 Raises a KeyError if there is no such node.
85 85 """
86 86 for ns, v in self._names.iteritems():
87 87 n = v.namemap(repo, name)
88 88 if n:
89 89 # return max revision number
90 90 if len(n) > 1:
91 91 cl = repo.changelog
92 92 maxrev = max(cl.rev(node) for node in n)
93 93 return cl.node(maxrev)
94 94 return n[0]
95 95 raise KeyError(_('no such name: %s') % name)
96 96
97 97 class namespace(object):
98 98 """provides an interface to a namespace
99 99
100 100 Namespaces are basically generic many-to-many mapping between some
101 101 (namespaced) names and nodes. The goal here is to control the pollution of
102 102 jamming things into tags or bookmarks (in extension-land) and to simplify
103 103 internal bits of mercurial: log output, tab completion, etc.
104 104
105 105 More precisely, we define a mapping of names to nodes, and a mapping from
106 106 nodes to names. Each mapping returns a list.
107 107
108 108 Furthermore, each name mapping will be passed a name to lookup which might
109 109 not be in its domain. In this case, each method should return an empty list
110 110 and not raise an error.
111 111
112 112 This namespace object will define the properties we need:
113 113 'name': the namespace (plural form)
114 114 'templatename': name to use for templating (usually the singular form
115 115 of the plural namespace name)
116 116 'listnames': list of all names in the namespace (usually the keys of a
117 117 dictionary)
118 118 'namemap': function that takes a name and returns a list of nodes
119 119 'nodemap': function that takes a node and returns a list of names
120 120
121 121 """
122 122
123 123 def __init__(self, name, templatename, listnames, namemap, nodemap):
124 124 """create a namespace
125 125
126 126 name: the namespace to be registered (in plural form)
127 127 listnames: function to list all names
128 128 templatename: the name to use for templating
129 129 namemap: function that inputs a node, output name(s)
130 130 nodemap: function that inputs a name, output node(s)
131 131
132 132 """
133 133 self.name = name
134 134 self.templatename = templatename
135 135 self.listnames = listnames
136 136 self.namemap = namemap
137 137 self.nodemap = nodemap
138 138
139 139 def names(self, repo, node):
140 140 """method that returns a (sorted) list of names in a namespace that
141 141 match a given node"""
142 142 return sorted(self.nodemap(repo, node))
143 143
144 144 def nodes(self, repo, name):
145 145 """method that returns a list of nodes in a namespace that
146 146 match a given name.
147 147
148 148 """
149 149 return sorted(self.namemap(repo, name))
General Comments 0
You need to be logged in to leave comments. Login now