##// END OF EJS Templates
localrepo: improve docstring for revset methods...
Gregory Szorc -
r27071:dfb31eeb default
parent child Browse files
Show More
@@ -1,1930 +1,1938 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return cg.apply(self._repo, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if scmutil.gdinitconfig(self.ui):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError as inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError as inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sjoin = self.store.join
297 297 self.vfs.createmode = self.store.createmode
298 298 self._applyopenerreqs()
299 299 if create:
300 300 self._writerequirements()
301 301
302 302 self._dirstatevalidatewarned = False
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 357 # experimental config: format.aggressivemergedeltas
358 358 aggressivemergedeltas = self.ui.configbool('format',
359 359 'aggressivemergedeltas', False)
360 360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
362 362
363 363 def _writerequirements(self):
364 364 scmutil.writerequires(self.vfs, self.requirements)
365 365
366 366 def _checknested(self, path):
367 367 """Determine if path is a legal nested repository."""
368 368 if not path.startswith(self.root):
369 369 return False
370 370 subpath = path[len(self.root) + 1:]
371 371 normsubpath = util.pconvert(subpath)
372 372
373 373 # XXX: Checking against the current working copy is wrong in
374 374 # the sense that it can reject things like
375 375 #
376 376 # $ hg cat -r 10 sub/x.txt
377 377 #
378 378 # if sub/ is no longer a subrepository in the working copy
379 379 # parent revision.
380 380 #
381 381 # However, it can of course also allow things that would have
382 382 # been rejected before, such as the above cat command if sub/
383 383 # is a subrepository now, but was a normal directory before.
384 384 # The old path auditor would have rejected by mistake since it
385 385 # panics when it sees sub/.hg/.
386 386 #
387 387 # All in all, checking against the working copy seems sensible
388 388 # since we want to prevent access to nested repositories on
389 389 # the filesystem *now*.
390 390 ctx = self[None]
391 391 parts = util.splitpath(subpath)
392 392 while parts:
393 393 prefix = '/'.join(parts)
394 394 if prefix in ctx.substate:
395 395 if prefix == normsubpath:
396 396 return True
397 397 else:
398 398 sub = ctx.sub(prefix)
399 399 return sub.checknested(subpath[len(prefix) + 1:])
400 400 else:
401 401 parts.pop()
402 402 return False
403 403
404 404 def peer(self):
405 405 return localpeer(self) # not cached to avoid reference cycle
406 406
407 407 def unfiltered(self):
408 408 """Return unfiltered version of the repository
409 409
410 410 Intended to be overwritten by filtered repo."""
411 411 return self
412 412
413 413 def filtered(self, name):
414 414 """Return a filtered version of a repository"""
415 415 # build a new class with the mixin and the current class
416 416 # (possibly subclass of the repo)
417 417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 418 pass
419 419 return proxycls(self, name)
420 420
421 421 @repofilecache('bookmarks')
422 422 def _bookmarks(self):
423 423 return bookmarks.bmstore(self)
424 424
425 425 @repofilecache('bookmarks.current')
426 426 def _activebookmark(self):
427 427 return bookmarks.readactive(self)
428 428
429 429 def bookmarkheads(self, bookmark):
430 430 name = bookmark.split('@', 1)[0]
431 431 heads = []
432 432 for mark, n in self._bookmarks.iteritems():
433 433 if mark.split('@', 1)[0] == name:
434 434 heads.append(n)
435 435 return heads
436 436
437 437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 439 # can't be easily expressed in filecache mechanism.
440 440 @storecache('phaseroots', '00changelog.i')
441 441 def _phasecache(self):
442 442 return phases.phasecache(self, self._phasedefaults)
443 443
444 444 @storecache('obsstore')
445 445 def obsstore(self):
446 446 # read default format for new obsstore.
447 447 # developer config: format.obsstore-version
448 448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 449 # rely on obsstore class default when possible.
450 450 kwargs = {}
451 451 if defaultformat is not None:
452 452 kwargs['defaultformat'] = defaultformat
453 453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 455 **kwargs)
456 456 if store and readonly:
457 457 self.ui.warn(
458 458 _('obsolete feature not enabled but %i markers found!\n')
459 459 % len(list(store)))
460 460 return store
461 461
462 462 @storecache('00changelog.i')
463 463 def changelog(self):
464 464 c = changelog.changelog(self.svfs)
465 465 if 'HG_PENDING' in os.environ:
466 466 p = os.environ['HG_PENDING']
467 467 if p.startswith(self.root):
468 468 c.readpending('00changelog.i.a')
469 469 return c
470 470
471 471 @storecache('00manifest.i')
472 472 def manifest(self):
473 473 return manifest.manifest(self.svfs)
474 474
475 475 def dirlog(self, dir):
476 476 return self.manifest.dirlog(dir)
477 477
478 478 @repofilecache('dirstate')
479 479 def dirstate(self):
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 481 self._dirstatevalidate)
482 482
483 483 def _dirstatevalidate(self, node):
484 484 try:
485 485 self.changelog.rev(node)
486 486 return node
487 487 except error.LookupError:
488 488 if not self._dirstatevalidatewarned:
489 489 self._dirstatevalidatewarned = True
490 490 self.ui.warn(_("warning: ignoring unknown"
491 491 " working parent %s!\n") % short(node))
492 492 return nullid
493 493
494 494 def __getitem__(self, changeid):
495 495 if changeid is None or changeid == wdirrev:
496 496 return context.workingctx(self)
497 497 if isinstance(changeid, slice):
498 498 return [context.changectx(self, i)
499 499 for i in xrange(*changeid.indices(len(self)))
500 500 if i not in self.changelog.filteredrevs]
501 501 return context.changectx(self, changeid)
502 502
503 503 def __contains__(self, changeid):
504 504 try:
505 505 self[changeid]
506 506 return True
507 507 except error.RepoLookupError:
508 508 return False
509 509
510 510 def __nonzero__(self):
511 511 return True
512 512
513 513 def __len__(self):
514 514 return len(self.changelog)
515 515
516 516 def __iter__(self):
517 517 return iter(self.changelog)
518 518
519 519 def revs(self, expr, *args):
520 '''Return a list of revisions matching the given revset'''
520 '''Find revisions matching a revset.
521
522 The revset is specified as a string ``expr`` that may contain
523 %-formatting to escape certain types. See ``revset.formatspec``.
524
525 Return a revset.abstractsmartset, which is a list-like interface
526 that contains integer revisions.
527 '''
521 528 expr = revset.formatspec(expr, *args)
522 529 m = revset.match(None, expr)
523 530 return m(self)
524 531
525 532 def set(self, expr, *args):
526 '''
527 Yield a context for each matching revision, after doing arg
528 replacement via revset.formatspec
533 '''Find revisions matching a revset and emit changectx instances.
534
535 This is a convenience wrapper around ``revs()`` that iterates the
536 result and is a generator of changectx instances.
529 537 '''
530 538 for r in self.revs(expr, *args):
531 539 yield self[r]
532 540
533 541 def url(self):
534 542 return 'file:' + self.root
535 543
536 544 def hook(self, name, throw=False, **args):
537 545 """Call a hook, passing this repo instance.
538 546
539 547 This a convenience method to aid invoking hooks. Extensions likely
540 548 won't call this unless they have registered a custom hook or are
541 549 replacing code that is expected to call a hook.
542 550 """
543 551 return hook.hook(self.ui, self, name, throw, **args)
544 552
545 553 @unfilteredmethod
546 554 def _tag(self, names, node, message, local, user, date, extra=None,
547 555 editor=False):
548 556 if isinstance(names, str):
549 557 names = (names,)
550 558
551 559 branches = self.branchmap()
552 560 for name in names:
553 561 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 562 local=local)
555 563 if name in branches:
556 564 self.ui.warn(_("warning: tag %s conflicts with existing"
557 565 " branch name\n") % name)
558 566
559 567 def writetags(fp, names, munge, prevtags):
560 568 fp.seek(0, 2)
561 569 if prevtags and prevtags[-1] != '\n':
562 570 fp.write('\n')
563 571 for name in names:
564 572 if munge:
565 573 m = munge(name)
566 574 else:
567 575 m = name
568 576
569 577 if (self._tagscache.tagtypes and
570 578 name in self._tagscache.tagtypes):
571 579 old = self.tags().get(name, nullid)
572 580 fp.write('%s %s\n' % (hex(old), m))
573 581 fp.write('%s %s\n' % (hex(node), m))
574 582 fp.close()
575 583
576 584 prevtags = ''
577 585 if local:
578 586 try:
579 587 fp = self.vfs('localtags', 'r+')
580 588 except IOError:
581 589 fp = self.vfs('localtags', 'a')
582 590 else:
583 591 prevtags = fp.read()
584 592
585 593 # local tags are stored in the current charset
586 594 writetags(fp, names, None, prevtags)
587 595 for name in names:
588 596 self.hook('tag', node=hex(node), tag=name, local=local)
589 597 return
590 598
591 599 try:
592 600 fp = self.wfile('.hgtags', 'rb+')
593 601 except IOError as e:
594 602 if e.errno != errno.ENOENT:
595 603 raise
596 604 fp = self.wfile('.hgtags', 'ab')
597 605 else:
598 606 prevtags = fp.read()
599 607
600 608 # committed tags are stored in UTF-8
601 609 writetags(fp, names, encoding.fromlocal, prevtags)
602 610
603 611 fp.close()
604 612
605 613 self.invalidatecaches()
606 614
607 615 if '.hgtags' not in self.dirstate:
608 616 self[None].add(['.hgtags'])
609 617
610 618 m = matchmod.exact(self.root, '', ['.hgtags'])
611 619 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 620 editor=editor)
613 621
614 622 for name in names:
615 623 self.hook('tag', node=hex(node), tag=name, local=local)
616 624
617 625 return tagnode
618 626
619 627 def tag(self, names, node, message, local, user, date, editor=False):
620 628 '''tag a revision with one or more symbolic names.
621 629
622 630 names is a list of strings or, when adding a single tag, names may be a
623 631 string.
624 632
625 633 if local is True, the tags are stored in a per-repository file.
626 634 otherwise, they are stored in the .hgtags file, and a new
627 635 changeset is committed with the change.
628 636
629 637 keyword arguments:
630 638
631 639 local: whether to store tags in non-version-controlled file
632 640 (default False)
633 641
634 642 message: commit message to use if committing
635 643
636 644 user: name of user to use if committing
637 645
638 646 date: date tuple to use if committing'''
639 647
640 648 if not local:
641 649 m = matchmod.exact(self.root, '', ['.hgtags'])
642 650 if any(self.status(match=m, unknown=True, ignored=True)):
643 651 raise error.Abort(_('working copy of .hgtags is changed'),
644 652 hint=_('please commit .hgtags manually'))
645 653
646 654 self.tags() # instantiate the cache
647 655 self._tag(names, node, message, local, user, date, editor=editor)
648 656
649 657 @filteredpropertycache
650 658 def _tagscache(self):
651 659 '''Returns a tagscache object that contains various tags related
652 660 caches.'''
653 661
654 662 # This simplifies its cache management by having one decorated
655 663 # function (this one) and the rest simply fetch things from it.
656 664 class tagscache(object):
657 665 def __init__(self):
658 666 # These two define the set of tags for this repository. tags
659 667 # maps tag name to node; tagtypes maps tag name to 'global' or
660 668 # 'local'. (Global tags are defined by .hgtags across all
661 669 # heads, and local tags are defined in .hg/localtags.)
662 670 # They constitute the in-memory cache of tags.
663 671 self.tags = self.tagtypes = None
664 672
665 673 self.nodetagscache = self.tagslist = None
666 674
667 675 cache = tagscache()
668 676 cache.tags, cache.tagtypes = self._findtags()
669 677
670 678 return cache
671 679
672 680 def tags(self):
673 681 '''return a mapping of tag to node'''
674 682 t = {}
675 683 if self.changelog.filteredrevs:
676 684 tags, tt = self._findtags()
677 685 else:
678 686 tags = self._tagscache.tags
679 687 for k, v in tags.iteritems():
680 688 try:
681 689 # ignore tags to unknown nodes
682 690 self.changelog.rev(v)
683 691 t[k] = v
684 692 except (error.LookupError, ValueError):
685 693 pass
686 694 return t
687 695
688 696 def _findtags(self):
689 697 '''Do the hard work of finding tags. Return a pair of dicts
690 698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 699 maps tag name to a string like \'global\' or \'local\'.
692 700 Subclasses or extensions are free to add their own tags, but
693 701 should be aware that the returned dicts will be retained for the
694 702 duration of the localrepo object.'''
695 703
696 704 # XXX what tagtype should subclasses/extensions use? Currently
697 705 # mq and bookmarks add tags, but do not set the tagtype at all.
698 706 # Should each extension invent its own tag type? Should there
699 707 # be one tagtype for all such "virtual" tags? Or is the status
700 708 # quo fine?
701 709
702 710 alltags = {} # map tag name to (node, hist)
703 711 tagtypes = {}
704 712
705 713 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707 715
708 716 # Build the return dicts. Have to re-encode tag names because
709 717 # the tags module always uses UTF-8 (in order not to lose info
710 718 # writing to the cache), but the rest of Mercurial wants them in
711 719 # local encoding.
712 720 tags = {}
713 721 for (name, (node, hist)) in alltags.iteritems():
714 722 if node != nullid:
715 723 tags[encoding.tolocal(name)] = node
716 724 tags['tip'] = self.changelog.tip()
717 725 tagtypes = dict([(encoding.tolocal(name), value)
718 726 for (name, value) in tagtypes.iteritems()])
719 727 return (tags, tagtypes)
720 728
721 729 def tagtype(self, tagname):
722 730 '''
723 731 return the type of the given tag. result can be:
724 732
725 733 'local' : a local tag
726 734 'global' : a global tag
727 735 None : tag does not exist
728 736 '''
729 737
730 738 return self._tagscache.tagtypes.get(tagname)
731 739
732 740 def tagslist(self):
733 741 '''return a list of tags ordered by revision'''
734 742 if not self._tagscache.tagslist:
735 743 l = []
736 744 for t, n in self.tags().iteritems():
737 745 l.append((self.changelog.rev(n), t, n))
738 746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739 747
740 748 return self._tagscache.tagslist
741 749
742 750 def nodetags(self, node):
743 751 '''return the tags associated with a node'''
744 752 if not self._tagscache.nodetagscache:
745 753 nodetagscache = {}
746 754 for t, n in self._tagscache.tags.iteritems():
747 755 nodetagscache.setdefault(n, []).append(t)
748 756 for tags in nodetagscache.itervalues():
749 757 tags.sort()
750 758 self._tagscache.nodetagscache = nodetagscache
751 759 return self._tagscache.nodetagscache.get(node, [])
752 760
753 761 def nodebookmarks(self, node):
754 762 marks = []
755 763 for bookmark, n in self._bookmarks.iteritems():
756 764 if n == node:
757 765 marks.append(bookmark)
758 766 return sorted(marks)
759 767
760 768 def branchmap(self):
761 769 '''returns a dictionary {branch: [branchheads]} with branchheads
762 770 ordered by increasing revision number'''
763 771 branchmap.updatecache(self)
764 772 return self._branchcaches[self.filtername]
765 773
766 774 @unfilteredmethod
767 775 def revbranchcache(self):
768 776 if not self._revbranchcache:
769 777 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 778 return self._revbranchcache
771 779
772 780 def branchtip(self, branch, ignoremissing=False):
773 781 '''return the tip node for a given branch
774 782
775 783 If ignoremissing is True, then this method will not raise an error.
776 784 This is helpful for callers that only expect None for a missing branch
777 785 (e.g. namespace).
778 786
779 787 '''
780 788 try:
781 789 return self.branchmap().branchtip(branch)
782 790 except KeyError:
783 791 if not ignoremissing:
784 792 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 793 else:
786 794 pass
787 795
788 796 def lookup(self, key):
789 797 return self[key].node()
790 798
791 799 def lookupbranch(self, key, remote=None):
792 800 repo = remote or self
793 801 if key in repo.branchmap():
794 802 return key
795 803
796 804 repo = (remote and remote.local()) and remote or self
797 805 return repo[key].branch()
798 806
799 807 def known(self, nodes):
800 808 nm = self.changelog.nodemap
801 809 pc = self._phasecache
802 810 result = []
803 811 for n in nodes:
804 812 r = nm.get(n)
805 813 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 814 result.append(resp)
807 815 return result
808 816
809 817 def local(self):
810 818 return self
811 819
812 820 def publishing(self):
813 821 # it's safe (and desirable) to trust the publish flag unconditionally
814 822 # so that we don't finalize changes shared between users via ssh or nfs
815 823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 824
817 825 def cancopy(self):
818 826 # so statichttprepo's override of local() works
819 827 if not self.local():
820 828 return False
821 829 if not self.publishing():
822 830 return True
823 831 # if publishing we can't copy if there is filtered content
824 832 return not self.filtered('visible').changelog.filteredrevs
825 833
826 834 def shared(self):
827 835 '''the type of shared repository (None if not shared)'''
828 836 if self.sharedpath != self.path:
829 837 return 'store'
830 838 return None
831 839
832 840 def join(self, f, *insidef):
833 841 return self.vfs.join(os.path.join(f, *insidef))
834 842
835 843 def wjoin(self, f, *insidef):
836 844 return self.vfs.reljoin(self.root, f, *insidef)
837 845
838 846 def file(self, f):
839 847 if f[0] == '/':
840 848 f = f[1:]
841 849 return filelog.filelog(self.svfs, f)
842 850
843 851 def changectx(self, changeid):
844 852 return self[changeid]
845 853
846 854 def parents(self, changeid=None):
847 855 '''get list of changectxs for parents of changeid'''
848 856 return self[changeid].parents()
849 857
850 858 def setparents(self, p1, p2=nullid):
851 859 self.dirstate.beginparentchange()
852 860 copies = self.dirstate.setparents(p1, p2)
853 861 pctx = self[p1]
854 862 if copies:
855 863 # Adjust copy records, the dirstate cannot do it, it
856 864 # requires access to parents manifests. Preserve them
857 865 # only for entries added to first parent.
858 866 for f in copies:
859 867 if f not in pctx and copies[f] in pctx:
860 868 self.dirstate.copy(copies[f], f)
861 869 if p2 == nullid:
862 870 for f, s in sorted(self.dirstate.copies().items()):
863 871 if f not in pctx and s not in pctx:
864 872 self.dirstate.copy(None, f)
865 873 self.dirstate.endparentchange()
866 874
867 875 def filectx(self, path, changeid=None, fileid=None):
868 876 """changeid can be a changeset revision, node, or tag.
869 877 fileid can be a file revision or node."""
870 878 return context.filectx(self, path, changeid, fileid)
871 879
872 880 def getcwd(self):
873 881 return self.dirstate.getcwd()
874 882
875 883 def pathto(self, f, cwd=None):
876 884 return self.dirstate.pathto(f, cwd)
877 885
878 886 def wfile(self, f, mode='r'):
879 887 return self.wvfs(f, mode)
880 888
881 889 def _link(self, f):
882 890 return self.wvfs.islink(f)
883 891
884 892 def _loadfilter(self, filter):
885 893 if filter not in self.filterpats:
886 894 l = []
887 895 for pat, cmd in self.ui.configitems(filter):
888 896 if cmd == '!':
889 897 continue
890 898 mf = matchmod.match(self.root, '', [pat])
891 899 fn = None
892 900 params = cmd
893 901 for name, filterfn in self._datafilters.iteritems():
894 902 if cmd.startswith(name):
895 903 fn = filterfn
896 904 params = cmd[len(name):].lstrip()
897 905 break
898 906 if not fn:
899 907 fn = lambda s, c, **kwargs: util.filter(s, c)
900 908 # Wrap old filters not supporting keyword arguments
901 909 if not inspect.getargspec(fn)[2]:
902 910 oldfn = fn
903 911 fn = lambda s, c, **kwargs: oldfn(s, c)
904 912 l.append((mf, fn, params))
905 913 self.filterpats[filter] = l
906 914 return self.filterpats[filter]
907 915
908 916 def _filter(self, filterpats, filename, data):
909 917 for mf, fn, cmd in filterpats:
910 918 if mf(filename):
911 919 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 920 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 921 break
914 922
915 923 return data
916 924
917 925 @unfilteredpropertycache
918 926 def _encodefilterpats(self):
919 927 return self._loadfilter('encode')
920 928
921 929 @unfilteredpropertycache
922 930 def _decodefilterpats(self):
923 931 return self._loadfilter('decode')
924 932
925 933 def adddatafilter(self, name, filter):
926 934 self._datafilters[name] = filter
927 935
928 936 def wread(self, filename):
929 937 if self._link(filename):
930 938 data = self.wvfs.readlink(filename)
931 939 else:
932 940 data = self.wvfs.read(filename)
933 941 return self._filter(self._encodefilterpats, filename, data)
934 942
935 943 def wwrite(self, filename, data, flags):
936 944 """write ``data`` into ``filename`` in the working directory
937 945
938 946 This returns length of written (maybe decoded) data.
939 947 """
940 948 data = self._filter(self._decodefilterpats, filename, data)
941 949 if 'l' in flags:
942 950 self.wvfs.symlink(data, filename)
943 951 else:
944 952 self.wvfs.write(filename, data)
945 953 if 'x' in flags:
946 954 self.wvfs.setflags(filename, False, True)
947 955 return len(data)
948 956
949 957 def wwritedata(self, filename, data):
950 958 return self._filter(self._decodefilterpats, filename, data)
951 959
952 960 def currenttransaction(self):
953 961 """return the current transaction or None if non exists"""
954 962 if self._transref:
955 963 tr = self._transref()
956 964 else:
957 965 tr = None
958 966
959 967 if tr and tr.running():
960 968 return tr
961 969 return None
962 970
963 971 def transaction(self, desc, report=None):
964 972 if (self.ui.configbool('devel', 'all-warnings')
965 973 or self.ui.configbool('devel', 'check-locks')):
966 974 l = self._lockref and self._lockref()
967 975 if l is None or not l.held:
968 976 self.ui.develwarn('transaction with no lock')
969 977 tr = self.currenttransaction()
970 978 if tr is not None:
971 979 return tr.nest()
972 980
973 981 # abort here if the journal already exists
974 982 if self.svfs.exists("journal"):
975 983 raise error.RepoError(
976 984 _("abandoned transaction found"),
977 985 hint=_("run 'hg recover' to clean up transaction"))
978 986
979 987 # make journal.dirstate contain in-memory changes at this point
980 988 self.dirstate.write(None)
981 989
982 990 idbase = "%.40f#%f" % (random.random(), time.time())
983 991 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985 993
986 994 self._writejournal(desc)
987 995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 996 if report:
989 997 rp = report
990 998 else:
991 999 rp = self.ui.warn
992 1000 vfsmap = {'plain': self.vfs} # root of .hg/
993 1001 # we must avoid cyclic reference between repo and transaction.
994 1002 reporef = weakref.ref(self)
995 1003 def validate(tr):
996 1004 """will run pre-closing hooks"""
997 1005 reporef().hook('pretxnclose', throw=True,
998 1006 txnname=desc, **tr.hookargs)
999 1007 def releasefn(tr, success):
1000 1008 repo = reporef()
1001 1009 if success:
1002 1010 # this should be explicitly invoked here, because
1003 1011 # in-memory changes aren't written out at closing
1004 1012 # transaction, if tr.addfilegenerator (via
1005 1013 # dirstate.write or so) isn't invoked while
1006 1014 # transaction running
1007 1015 repo.dirstate.write(None)
1008 1016 else:
1009 1017 # prevent in-memory changes from being written out at
1010 1018 # the end of outer wlock scope or so
1011 1019 repo.dirstate.invalidate()
1012 1020
1013 1021 # discard all changes (including ones already written
1014 1022 # out) in this transaction
1015 1023 repo.vfs.rename('journal.dirstate', 'dirstate')
1016 1024
1017 1025 repo.invalidate(clearfilecache=True)
1018 1026
1019 1027 tr = transaction.transaction(rp, self.svfs, vfsmap,
1020 1028 "journal",
1021 1029 "undo",
1022 1030 aftertrans(renames),
1023 1031 self.store.createmode,
1024 1032 validator=validate,
1025 1033 releasefn=releasefn)
1026 1034
1027 1035 tr.hookargs['txnid'] = txnid
1028 1036 # note: writing the fncache only during finalize mean that the file is
1029 1037 # outdated when running hooks. As fncache is used for streaming clone,
1030 1038 # this is not expected to break anything that happen during the hooks.
1031 1039 tr.addfinalize('flush-fncache', self.store.write)
1032 1040 def txnclosehook(tr2):
1033 1041 """To be run if transaction is successful, will schedule a hook run
1034 1042 """
1035 1043 def hook():
1036 1044 reporef().hook('txnclose', throw=False, txnname=desc,
1037 1045 **tr2.hookargs)
1038 1046 reporef()._afterlock(hook)
1039 1047 tr.addfinalize('txnclose-hook', txnclosehook)
1040 1048 def txnaborthook(tr2):
1041 1049 """To be run if transaction is aborted
1042 1050 """
1043 1051 reporef().hook('txnabort', throw=False, txnname=desc,
1044 1052 **tr2.hookargs)
1045 1053 tr.addabort('txnabort-hook', txnaborthook)
1046 1054 # avoid eager cache invalidation. in-memory data should be identical
1047 1055 # to stored data if transaction has no error.
1048 1056 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1049 1057 self._transref = weakref.ref(tr)
1050 1058 return tr
1051 1059
1052 1060 def _journalfiles(self):
1053 1061 return ((self.svfs, 'journal'),
1054 1062 (self.vfs, 'journal.dirstate'),
1055 1063 (self.vfs, 'journal.branch'),
1056 1064 (self.vfs, 'journal.desc'),
1057 1065 (self.vfs, 'journal.bookmarks'),
1058 1066 (self.svfs, 'journal.phaseroots'))
1059 1067
1060 1068 def undofiles(self):
1061 1069 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1062 1070
1063 1071 def _writejournal(self, desc):
1064 1072 self.vfs.write("journal.dirstate",
1065 1073 self.vfs.tryread("dirstate"))
1066 1074 self.vfs.write("journal.branch",
1067 1075 encoding.fromlocal(self.dirstate.branch()))
1068 1076 self.vfs.write("journal.desc",
1069 1077 "%d\n%s\n" % (len(self), desc))
1070 1078 self.vfs.write("journal.bookmarks",
1071 1079 self.vfs.tryread("bookmarks"))
1072 1080 self.svfs.write("journal.phaseroots",
1073 1081 self.svfs.tryread("phaseroots"))
1074 1082
1075 1083 def recover(self):
1076 1084 lock = self.lock()
1077 1085 try:
1078 1086 if self.svfs.exists("journal"):
1079 1087 self.ui.status(_("rolling back interrupted transaction\n"))
1080 1088 vfsmap = {'': self.svfs,
1081 1089 'plain': self.vfs,}
1082 1090 transaction.rollback(self.svfs, vfsmap, "journal",
1083 1091 self.ui.warn)
1084 1092 self.invalidate()
1085 1093 return True
1086 1094 else:
1087 1095 self.ui.warn(_("no interrupted transaction available\n"))
1088 1096 return False
1089 1097 finally:
1090 1098 lock.release()
1091 1099
1092 1100 def rollback(self, dryrun=False, force=False):
1093 1101 wlock = lock = dsguard = None
1094 1102 try:
1095 1103 wlock = self.wlock()
1096 1104 lock = self.lock()
1097 1105 if self.svfs.exists("undo"):
1098 1106 dsguard = cmdutil.dirstateguard(self, 'rollback')
1099 1107
1100 1108 return self._rollback(dryrun, force, dsguard)
1101 1109 else:
1102 1110 self.ui.warn(_("no rollback information available\n"))
1103 1111 return 1
1104 1112 finally:
1105 1113 release(dsguard, lock, wlock)
1106 1114
1107 1115 @unfilteredmethod # Until we get smarter cache management
1108 1116 def _rollback(self, dryrun, force, dsguard):
1109 1117 ui = self.ui
1110 1118 try:
1111 1119 args = self.vfs.read('undo.desc').splitlines()
1112 1120 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1113 1121 if len(args) >= 3:
1114 1122 detail = args[2]
1115 1123 oldtip = oldlen - 1
1116 1124
1117 1125 if detail and ui.verbose:
1118 1126 msg = (_('repository tip rolled back to revision %s'
1119 1127 ' (undo %s: %s)\n')
1120 1128 % (oldtip, desc, detail))
1121 1129 else:
1122 1130 msg = (_('repository tip rolled back to revision %s'
1123 1131 ' (undo %s)\n')
1124 1132 % (oldtip, desc))
1125 1133 except IOError:
1126 1134 msg = _('rolling back unknown transaction\n')
1127 1135 desc = None
1128 1136
1129 1137 if not force and self['.'] != self['tip'] and desc == 'commit':
1130 1138 raise error.Abort(
1131 1139 _('rollback of last commit while not checked out '
1132 1140 'may lose data'), hint=_('use -f to force'))
1133 1141
1134 1142 ui.status(msg)
1135 1143 if dryrun:
1136 1144 return 0
1137 1145
1138 1146 parents = self.dirstate.parents()
1139 1147 self.destroying()
1140 1148 vfsmap = {'plain': self.vfs, '': self.svfs}
1141 1149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1142 1150 if self.vfs.exists('undo.bookmarks'):
1143 1151 self.vfs.rename('undo.bookmarks', 'bookmarks')
1144 1152 if self.svfs.exists('undo.phaseroots'):
1145 1153 self.svfs.rename('undo.phaseroots', 'phaseroots')
1146 1154 self.invalidate()
1147 1155
1148 1156 parentgone = (parents[0] not in self.changelog.nodemap or
1149 1157 parents[1] not in self.changelog.nodemap)
1150 1158 if parentgone:
1151 1159 # prevent dirstateguard from overwriting already restored one
1152 1160 dsguard.close()
1153 1161
1154 1162 self.vfs.rename('undo.dirstate', 'dirstate')
1155 1163 try:
1156 1164 branch = self.vfs.read('undo.branch')
1157 1165 self.dirstate.setbranch(encoding.tolocal(branch))
1158 1166 except IOError:
1159 1167 ui.warn(_('named branch could not be reset: '
1160 1168 'current branch is still \'%s\'\n')
1161 1169 % self.dirstate.branch())
1162 1170
1163 1171 self.dirstate.invalidate()
1164 1172 parents = tuple([p.rev() for p in self.parents()])
1165 1173 if len(parents) > 1:
1166 1174 ui.status(_('working directory now based on '
1167 1175 'revisions %d and %d\n') % parents)
1168 1176 else:
1169 1177 ui.status(_('working directory now based on '
1170 1178 'revision %d\n') % parents)
1171 1179 mergemod.mergestate.clean(self, self['.'].node())
1172 1180
1173 1181 # TODO: if we know which new heads may result from this rollback, pass
1174 1182 # them to destroy(), which will prevent the branchhead cache from being
1175 1183 # invalidated.
1176 1184 self.destroyed()
1177 1185 return 0
1178 1186
1179 1187 def invalidatecaches(self):
1180 1188
1181 1189 if '_tagscache' in vars(self):
1182 1190 # can't use delattr on proxy
1183 1191 del self.__dict__['_tagscache']
1184 1192
1185 1193 self.unfiltered()._branchcaches.clear()
1186 1194 self.invalidatevolatilesets()
1187 1195
1188 1196 def invalidatevolatilesets(self):
1189 1197 self.filteredrevcache.clear()
1190 1198 obsolete.clearobscaches(self)
1191 1199
1192 1200 def invalidatedirstate(self):
1193 1201 '''Invalidates the dirstate, causing the next call to dirstate
1194 1202 to check if it was modified since the last time it was read,
1195 1203 rereading it if it has.
1196 1204
1197 1205 This is different to dirstate.invalidate() that it doesn't always
1198 1206 rereads the dirstate. Use dirstate.invalidate() if you want to
1199 1207 explicitly read the dirstate again (i.e. restoring it to a previous
1200 1208 known good state).'''
1201 1209 if hasunfilteredcache(self, 'dirstate'):
1202 1210 for k in self.dirstate._filecache:
1203 1211 try:
1204 1212 delattr(self.dirstate, k)
1205 1213 except AttributeError:
1206 1214 pass
1207 1215 delattr(self.unfiltered(), 'dirstate')
1208 1216
1209 1217 def invalidate(self, clearfilecache=False):
1210 1218 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1211 1219 for k in self._filecache.keys():
1212 1220 # dirstate is invalidated separately in invalidatedirstate()
1213 1221 if k == 'dirstate':
1214 1222 continue
1215 1223
1216 1224 if clearfilecache:
1217 1225 del self._filecache[k]
1218 1226 try:
1219 1227 delattr(unfiltered, k)
1220 1228 except AttributeError:
1221 1229 pass
1222 1230 self.invalidatecaches()
1223 1231 self.store.invalidatecaches()
1224 1232
1225 1233 def invalidateall(self):
1226 1234 '''Fully invalidates both store and non-store parts, causing the
1227 1235 subsequent operation to reread any outside changes.'''
1228 1236 # extension should hook this to invalidate its caches
1229 1237 self.invalidate()
1230 1238 self.invalidatedirstate()
1231 1239
1232 1240 def _refreshfilecachestats(self, tr):
1233 1241 """Reload stats of cached files so that they are flagged as valid"""
1234 1242 for k, ce in self._filecache.items():
1235 1243 if k == 'dirstate' or k not in self.__dict__:
1236 1244 continue
1237 1245 ce.refresh()
1238 1246
1239 1247 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1240 1248 inheritchecker=None, parentenvvar=None):
1241 1249 parentlock = None
1242 1250 # the contents of parentenvvar are used by the underlying lock to
1243 1251 # determine whether it can be inherited
1244 1252 if parentenvvar is not None:
1245 1253 parentlock = os.environ.get(parentenvvar)
1246 1254 try:
1247 1255 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1248 1256 acquirefn=acquirefn, desc=desc,
1249 1257 inheritchecker=inheritchecker,
1250 1258 parentlock=parentlock)
1251 1259 except error.LockHeld as inst:
1252 1260 if not wait:
1253 1261 raise
1254 1262 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1255 1263 (desc, inst.locker))
1256 1264 # default to 600 seconds timeout
1257 1265 l = lockmod.lock(vfs, lockname,
1258 1266 int(self.ui.config("ui", "timeout", "600")),
1259 1267 releasefn=releasefn, acquirefn=acquirefn,
1260 1268 desc=desc)
1261 1269 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1262 1270 return l
1263 1271
1264 1272 def _afterlock(self, callback):
1265 1273 """add a callback to be run when the repository is fully unlocked
1266 1274
1267 1275 The callback will be executed when the outermost lock is released
1268 1276 (with wlock being higher level than 'lock')."""
1269 1277 for ref in (self._wlockref, self._lockref):
1270 1278 l = ref and ref()
1271 1279 if l and l.held:
1272 1280 l.postrelease.append(callback)
1273 1281 break
1274 1282 else: # no lock have been found.
1275 1283 callback()
1276 1284
1277 1285 def lock(self, wait=True):
1278 1286 '''Lock the repository store (.hg/store) and return a weak reference
1279 1287 to the lock. Use this before modifying the store (e.g. committing or
1280 1288 stripping). If you are opening a transaction, get a lock as well.)
1281 1289
1282 1290 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1283 1291 'wlock' first to avoid a dead-lock hazard.'''
1284 1292 l = self._lockref and self._lockref()
1285 1293 if l is not None and l.held:
1286 1294 l.lock()
1287 1295 return l
1288 1296
1289 1297 l = self._lock(self.svfs, "lock", wait, None,
1290 1298 self.invalidate, _('repository %s') % self.origroot)
1291 1299 self._lockref = weakref.ref(l)
1292 1300 return l
1293 1301
1294 1302 def _wlockchecktransaction(self):
1295 1303 if self.currenttransaction() is not None:
1296 1304 raise error.LockInheritanceContractViolation(
1297 1305 'wlock cannot be inherited in the middle of a transaction')
1298 1306
1299 1307 def wlock(self, wait=True):
1300 1308 '''Lock the non-store parts of the repository (everything under
1301 1309 .hg except .hg/store) and return a weak reference to the lock.
1302 1310
1303 1311 Use this before modifying files in .hg.
1304 1312
1305 1313 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1306 1314 'wlock' first to avoid a dead-lock hazard.'''
1307 1315 l = self._wlockref and self._wlockref()
1308 1316 if l is not None and l.held:
1309 1317 l.lock()
1310 1318 return l
1311 1319
1312 1320 # We do not need to check for non-waiting lock acquisition. Such
1313 1321 # acquisition would not cause dead-lock as they would just fail.
1314 1322 if wait and (self.ui.configbool('devel', 'all-warnings')
1315 1323 or self.ui.configbool('devel', 'check-locks')):
1316 1324 l = self._lockref and self._lockref()
1317 1325 if l is not None and l.held:
1318 1326 self.ui.develwarn('"wlock" acquired after "lock"')
1319 1327
1320 1328 def unlock():
1321 1329 if self.dirstate.pendingparentchange():
1322 1330 self.dirstate.invalidate()
1323 1331 else:
1324 1332 self.dirstate.write(None)
1325 1333
1326 1334 self._filecache['dirstate'].refresh()
1327 1335
1328 1336 l = self._lock(self.vfs, "wlock", wait, unlock,
1329 1337 self.invalidatedirstate, _('working directory of %s') %
1330 1338 self.origroot,
1331 1339 inheritchecker=self._wlockchecktransaction,
1332 1340 parentenvvar='HG_WLOCK_LOCKER')
1333 1341 self._wlockref = weakref.ref(l)
1334 1342 return l
1335 1343
1336 1344 def _currentlock(self, lockref):
1337 1345 """Returns the lock if it's held, or None if it's not."""
1338 1346 if lockref is None:
1339 1347 return None
1340 1348 l = lockref()
1341 1349 if l is None or not l.held:
1342 1350 return None
1343 1351 return l
1344 1352
1345 1353 def currentwlock(self):
1346 1354 """Returns the wlock if it's held, or None if it's not."""
1347 1355 return self._currentlock(self._wlockref)
1348 1356
1349 1357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1350 1358 """
1351 1359 commit an individual file as part of a larger transaction
1352 1360 """
1353 1361
1354 1362 fname = fctx.path()
1355 1363 fparent1 = manifest1.get(fname, nullid)
1356 1364 fparent2 = manifest2.get(fname, nullid)
1357 1365 if isinstance(fctx, context.filectx):
1358 1366 node = fctx.filenode()
1359 1367 if node in [fparent1, fparent2]:
1360 1368 self.ui.debug('reusing %s filelog entry\n' % fname)
1361 1369 return node
1362 1370
1363 1371 flog = self.file(fname)
1364 1372 meta = {}
1365 1373 copy = fctx.renamed()
1366 1374 if copy and copy[0] != fname:
1367 1375 # Mark the new revision of this file as a copy of another
1368 1376 # file. This copy data will effectively act as a parent
1369 1377 # of this new revision. If this is a merge, the first
1370 1378 # parent will be the nullid (meaning "look up the copy data")
1371 1379 # and the second one will be the other parent. For example:
1372 1380 #
1373 1381 # 0 --- 1 --- 3 rev1 changes file foo
1374 1382 # \ / rev2 renames foo to bar and changes it
1375 1383 # \- 2 -/ rev3 should have bar with all changes and
1376 1384 # should record that bar descends from
1377 1385 # bar in rev2 and foo in rev1
1378 1386 #
1379 1387 # this allows this merge to succeed:
1380 1388 #
1381 1389 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1382 1390 # \ / merging rev3 and rev4 should use bar@rev2
1383 1391 # \- 2 --- 4 as the merge base
1384 1392 #
1385 1393
1386 1394 cfname = copy[0]
1387 1395 crev = manifest1.get(cfname)
1388 1396 newfparent = fparent2
1389 1397
1390 1398 if manifest2: # branch merge
1391 1399 if fparent2 == nullid or crev is None: # copied on remote side
1392 1400 if cfname in manifest2:
1393 1401 crev = manifest2[cfname]
1394 1402 newfparent = fparent1
1395 1403
1396 1404 # Here, we used to search backwards through history to try to find
1397 1405 # where the file copy came from if the source of a copy was not in
1398 1406 # the parent directory. However, this doesn't actually make sense to
1399 1407 # do (what does a copy from something not in your working copy even
1400 1408 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1401 1409 # the user that copy information was dropped, so if they didn't
1402 1410 # expect this outcome it can be fixed, but this is the correct
1403 1411 # behavior in this circumstance.
1404 1412
1405 1413 if crev:
1406 1414 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1407 1415 meta["copy"] = cfname
1408 1416 meta["copyrev"] = hex(crev)
1409 1417 fparent1, fparent2 = nullid, newfparent
1410 1418 else:
1411 1419 self.ui.warn(_("warning: can't find ancestor for '%s' "
1412 1420 "copied from '%s'!\n") % (fname, cfname))
1413 1421
1414 1422 elif fparent1 == nullid:
1415 1423 fparent1, fparent2 = fparent2, nullid
1416 1424 elif fparent2 != nullid:
1417 1425 # is one parent an ancestor of the other?
1418 1426 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1419 1427 if fparent1 in fparentancestors:
1420 1428 fparent1, fparent2 = fparent2, nullid
1421 1429 elif fparent2 in fparentancestors:
1422 1430 fparent2 = nullid
1423 1431
1424 1432 # is the file changed?
1425 1433 text = fctx.data()
1426 1434 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1427 1435 changelist.append(fname)
1428 1436 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1429 1437 # are just the flags changed during merge?
1430 1438 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1431 1439 changelist.append(fname)
1432 1440
1433 1441 return fparent1
1434 1442
1435 1443 @unfilteredmethod
1436 1444 def commit(self, text="", user=None, date=None, match=None, force=False,
1437 1445 editor=False, extra=None):
1438 1446 """Add a new revision to current repository.
1439 1447
1440 1448 Revision information is gathered from the working directory,
1441 1449 match can be used to filter the committed files. If editor is
1442 1450 supplied, it is called to get a commit message.
1443 1451 """
1444 1452 if extra is None:
1445 1453 extra = {}
1446 1454
1447 1455 def fail(f, msg):
1448 1456 raise error.Abort('%s: %s' % (f, msg))
1449 1457
1450 1458 if not match:
1451 1459 match = matchmod.always(self.root, '')
1452 1460
1453 1461 if not force:
1454 1462 vdirs = []
1455 1463 match.explicitdir = vdirs.append
1456 1464 match.bad = fail
1457 1465
1458 1466 wlock = lock = tr = None
1459 1467 try:
1460 1468 wlock = self.wlock()
1461 1469 wctx = self[None]
1462 1470 merge = len(wctx.parents()) > 1
1463 1471
1464 1472 if not force and merge and match.ispartial():
1465 1473 raise error.Abort(_('cannot partially commit a merge '
1466 1474 '(do not specify files or patterns)'))
1467 1475
1468 1476 status = self.status(match=match, clean=force)
1469 1477 if force:
1470 1478 status.modified.extend(status.clean) # mq may commit clean files
1471 1479
1472 1480 # check subrepos
1473 1481 subs = []
1474 1482 commitsubs = set()
1475 1483 newstate = wctx.substate.copy()
1476 1484 # only manage subrepos and .hgsubstate if .hgsub is present
1477 1485 if '.hgsub' in wctx:
1478 1486 # we'll decide whether to track this ourselves, thanks
1479 1487 for c in status.modified, status.added, status.removed:
1480 1488 if '.hgsubstate' in c:
1481 1489 c.remove('.hgsubstate')
1482 1490
1483 1491 # compare current state to last committed state
1484 1492 # build new substate based on last committed state
1485 1493 oldstate = wctx.p1().substate
1486 1494 for s in sorted(newstate.keys()):
1487 1495 if not match(s):
1488 1496 # ignore working copy, use old state if present
1489 1497 if s in oldstate:
1490 1498 newstate[s] = oldstate[s]
1491 1499 continue
1492 1500 if not force:
1493 1501 raise error.Abort(
1494 1502 _("commit with new subrepo %s excluded") % s)
1495 1503 dirtyreason = wctx.sub(s).dirtyreason(True)
1496 1504 if dirtyreason:
1497 1505 if not self.ui.configbool('ui', 'commitsubrepos'):
1498 1506 raise error.Abort(dirtyreason,
1499 1507 hint=_("use --subrepos for recursive commit"))
1500 1508 subs.append(s)
1501 1509 commitsubs.add(s)
1502 1510 else:
1503 1511 bs = wctx.sub(s).basestate()
1504 1512 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1505 1513 if oldstate.get(s, (None, None, None))[1] != bs:
1506 1514 subs.append(s)
1507 1515
1508 1516 # check for removed subrepos
1509 1517 for p in wctx.parents():
1510 1518 r = [s for s in p.substate if s not in newstate]
1511 1519 subs += [s for s in r if match(s)]
1512 1520 if subs:
1513 1521 if (not match('.hgsub') and
1514 1522 '.hgsub' in (wctx.modified() + wctx.added())):
1515 1523 raise error.Abort(
1516 1524 _("can't commit subrepos without .hgsub"))
1517 1525 status.modified.insert(0, '.hgsubstate')
1518 1526
1519 1527 elif '.hgsub' in status.removed:
1520 1528 # clean up .hgsubstate when .hgsub is removed
1521 1529 if ('.hgsubstate' in wctx and
1522 1530 '.hgsubstate' not in (status.modified + status.added +
1523 1531 status.removed)):
1524 1532 status.removed.insert(0, '.hgsubstate')
1525 1533
1526 1534 # make sure all explicit patterns are matched
1527 1535 if not force and (match.isexact() or match.prefix()):
1528 1536 matched = set(status.modified + status.added + status.removed)
1529 1537
1530 1538 for f in match.files():
1531 1539 f = self.dirstate.normalize(f)
1532 1540 if f == '.' or f in matched or f in wctx.substate:
1533 1541 continue
1534 1542 if f in status.deleted:
1535 1543 fail(f, _('file not found!'))
1536 1544 if f in vdirs: # visited directory
1537 1545 d = f + '/'
1538 1546 for mf in matched:
1539 1547 if mf.startswith(d):
1540 1548 break
1541 1549 else:
1542 1550 fail(f, _("no match under directory!"))
1543 1551 elif f not in self.dirstate:
1544 1552 fail(f, _("file not tracked!"))
1545 1553
1546 1554 cctx = context.workingcommitctx(self, status,
1547 1555 text, user, date, extra)
1548 1556
1549 1557 # internal config: ui.allowemptycommit
1550 1558 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1551 1559 or extra.get('close') or merge or cctx.files()
1552 1560 or self.ui.configbool('ui', 'allowemptycommit'))
1553 1561 if not allowemptycommit:
1554 1562 return None
1555 1563
1556 1564 if merge and cctx.deleted():
1557 1565 raise error.Abort(_("cannot commit merge with missing files"))
1558 1566
1559 1567 unresolved, driverresolved = False, False
1560 1568 ms = mergemod.mergestate.read(self)
1561 1569 for f in status.modified:
1562 1570 if f in ms:
1563 1571 if ms[f] == 'u':
1564 1572 unresolved = True
1565 1573 elif ms[f] == 'd':
1566 1574 driverresolved = True
1567 1575
1568 1576 if unresolved:
1569 1577 raise error.Abort(_('unresolved merge conflicts '
1570 1578 '(see "hg help resolve")'))
1571 1579 if driverresolved or ms.mdstate() != 's':
1572 1580 raise error.Abort(_('driver-resolved merge conflicts'),
1573 1581 hint=_('run "hg resolve --all" to resolve'))
1574 1582
1575 1583 if editor:
1576 1584 cctx._text = editor(self, cctx, subs)
1577 1585 edited = (text != cctx._text)
1578 1586
1579 1587 # Save commit message in case this transaction gets rolled back
1580 1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1581 1589 # the assumption that the user will use the same editor again.
1582 1590 msgfn = self.savecommitmessage(cctx._text)
1583 1591
1584 1592 # commit subs and write new state
1585 1593 if subs:
1586 1594 for s in sorted(commitsubs):
1587 1595 sub = wctx.sub(s)
1588 1596 self.ui.status(_('committing subrepository %s\n') %
1589 1597 subrepo.subrelpath(sub))
1590 1598 sr = sub.commit(cctx._text, user, date)
1591 1599 newstate[s] = (newstate[s][0], sr)
1592 1600 subrepo.writestate(self, newstate)
1593 1601
1594 1602 p1, p2 = self.dirstate.parents()
1595 1603 lock = self.lock()
1596 1604 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1597 1605 try:
1598 1606 self.hook("precommit", throw=True, parent1=hookp1,
1599 1607 parent2=hookp2)
1600 1608 tr = self.transaction('commit')
1601 1609 ret = self.commitctx(cctx, True)
1602 1610 except: # re-raises
1603 1611 if edited:
1604 1612 self.ui.write(
1605 1613 _('note: commit message saved in %s\n') % msgfn)
1606 1614 raise
1607 1615 # update bookmarks, dirstate and mergestate
1608 1616 bookmarks.update(self, [p1, p2], ret)
1609 1617 cctx.markcommitted(ret)
1610 1618 ms.reset()
1611 1619 tr.close()
1612 1620
1613 1621 finally:
1614 1622 lockmod.release(tr, lock, wlock)
1615 1623
1616 1624 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1617 1625 # hack for command that use a temporary commit (eg: histedit)
1618 1626 # temporary commit got stripped before hook release
1619 1627 if self.changelog.hasnode(ret):
1620 1628 self.hook("commit", node=node, parent1=parent1,
1621 1629 parent2=parent2)
1622 1630 self._afterlock(commithook)
1623 1631 return ret
1624 1632
1625 1633 @unfilteredmethod
1626 1634 def commitctx(self, ctx, error=False):
1627 1635 """Add a new revision to current repository.
1628 1636 Revision information is passed via the context argument.
1629 1637 """
1630 1638
1631 1639 tr = None
1632 1640 p1, p2 = ctx.p1(), ctx.p2()
1633 1641 user = ctx.user()
1634 1642
1635 1643 lock = self.lock()
1636 1644 try:
1637 1645 tr = self.transaction("commit")
1638 1646 trp = weakref.proxy(tr)
1639 1647
1640 1648 if ctx.files():
1641 1649 m1 = p1.manifest()
1642 1650 m2 = p2.manifest()
1643 1651 m = m1.copy()
1644 1652
1645 1653 # check in files
1646 1654 added = []
1647 1655 changed = []
1648 1656 removed = list(ctx.removed())
1649 1657 linkrev = len(self)
1650 1658 self.ui.note(_("committing files:\n"))
1651 1659 for f in sorted(ctx.modified() + ctx.added()):
1652 1660 self.ui.note(f + "\n")
1653 1661 try:
1654 1662 fctx = ctx[f]
1655 1663 if fctx is None:
1656 1664 removed.append(f)
1657 1665 else:
1658 1666 added.append(f)
1659 1667 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1660 1668 trp, changed)
1661 1669 m.setflag(f, fctx.flags())
1662 1670 except OSError as inst:
1663 1671 self.ui.warn(_("trouble committing %s!\n") % f)
1664 1672 raise
1665 1673 except IOError as inst:
1666 1674 errcode = getattr(inst, 'errno', errno.ENOENT)
1667 1675 if error or errcode and errcode != errno.ENOENT:
1668 1676 self.ui.warn(_("trouble committing %s!\n") % f)
1669 1677 raise
1670 1678
1671 1679 # update manifest
1672 1680 self.ui.note(_("committing manifest\n"))
1673 1681 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1674 1682 drop = [f for f in removed if f in m]
1675 1683 for f in drop:
1676 1684 del m[f]
1677 1685 mn = self.manifest.add(m, trp, linkrev,
1678 1686 p1.manifestnode(), p2.manifestnode(),
1679 1687 added, drop)
1680 1688 files = changed + removed
1681 1689 else:
1682 1690 mn = p1.manifestnode()
1683 1691 files = []
1684 1692
1685 1693 # update changelog
1686 1694 self.ui.note(_("committing changelog\n"))
1687 1695 self.changelog.delayupdate(tr)
1688 1696 n = self.changelog.add(mn, files, ctx.description(),
1689 1697 trp, p1.node(), p2.node(),
1690 1698 user, ctx.date(), ctx.extra().copy())
1691 1699 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1692 1700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1693 1701 parent2=xp2)
1694 1702 # set the new commit is proper phase
1695 1703 targetphase = subrepo.newcommitphase(self.ui, ctx)
1696 1704 if targetphase:
1697 1705 # retract boundary do not alter parent changeset.
1698 1706 # if a parent have higher the resulting phase will
1699 1707 # be compliant anyway
1700 1708 #
1701 1709 # if minimal phase was 0 we don't need to retract anything
1702 1710 phases.retractboundary(self, tr, targetphase, [n])
1703 1711 tr.close()
1704 1712 branchmap.updatecache(self.filtered('served'))
1705 1713 return n
1706 1714 finally:
1707 1715 if tr:
1708 1716 tr.release()
1709 1717 lock.release()
1710 1718
1711 1719 @unfilteredmethod
1712 1720 def destroying(self):
1713 1721 '''Inform the repository that nodes are about to be destroyed.
1714 1722 Intended for use by strip and rollback, so there's a common
1715 1723 place for anything that has to be done before destroying history.
1716 1724
1717 1725 This is mostly useful for saving state that is in memory and waiting
1718 1726 to be flushed when the current lock is released. Because a call to
1719 1727 destroyed is imminent, the repo will be invalidated causing those
1720 1728 changes to stay in memory (waiting for the next unlock), or vanish
1721 1729 completely.
1722 1730 '''
1723 1731 # When using the same lock to commit and strip, the phasecache is left
1724 1732 # dirty after committing. Then when we strip, the repo is invalidated,
1725 1733 # causing those changes to disappear.
1726 1734 if '_phasecache' in vars(self):
1727 1735 self._phasecache.write()
1728 1736
1729 1737 @unfilteredmethod
1730 1738 def destroyed(self):
1731 1739 '''Inform the repository that nodes have been destroyed.
1732 1740 Intended for use by strip and rollback, so there's a common
1733 1741 place for anything that has to be done after destroying history.
1734 1742 '''
1735 1743 # When one tries to:
1736 1744 # 1) destroy nodes thus calling this method (e.g. strip)
1737 1745 # 2) use phasecache somewhere (e.g. commit)
1738 1746 #
1739 1747 # then 2) will fail because the phasecache contains nodes that were
1740 1748 # removed. We can either remove phasecache from the filecache,
1741 1749 # causing it to reload next time it is accessed, or simply filter
1742 1750 # the removed nodes now and write the updated cache.
1743 1751 self._phasecache.filterunknown(self)
1744 1752 self._phasecache.write()
1745 1753
1746 1754 # update the 'served' branch cache to help read only server process
1747 1755 # Thanks to branchcache collaboration this is done from the nearest
1748 1756 # filtered subset and it is expected to be fast.
1749 1757 branchmap.updatecache(self.filtered('served'))
1750 1758
1751 1759 # Ensure the persistent tag cache is updated. Doing it now
1752 1760 # means that the tag cache only has to worry about destroyed
1753 1761 # heads immediately after a strip/rollback. That in turn
1754 1762 # guarantees that "cachetip == currenttip" (comparing both rev
1755 1763 # and node) always means no nodes have been added or destroyed.
1756 1764
1757 1765 # XXX this is suboptimal when qrefresh'ing: we strip the current
1758 1766 # head, refresh the tag cache, then immediately add a new head.
1759 1767 # But I think doing it this way is necessary for the "instant
1760 1768 # tag cache retrieval" case to work.
1761 1769 self.invalidate()
1762 1770
1763 1771 def walk(self, match, node=None):
1764 1772 '''
1765 1773 walk recursively through the directory tree or a given
1766 1774 changeset, finding all files matched by the match
1767 1775 function
1768 1776 '''
1769 1777 return self[node].walk(match)
1770 1778
1771 1779 def status(self, node1='.', node2=None, match=None,
1772 1780 ignored=False, clean=False, unknown=False,
1773 1781 listsubrepos=False):
1774 1782 '''a convenience method that calls node1.status(node2)'''
1775 1783 return self[node1].status(node2, match, ignored, clean, unknown,
1776 1784 listsubrepos)
1777 1785
1778 1786 def heads(self, start=None):
1779 1787 heads = self.changelog.heads(start)
1780 1788 # sort the output in rev descending order
1781 1789 return sorted(heads, key=self.changelog.rev, reverse=True)
1782 1790
1783 1791 def branchheads(self, branch=None, start=None, closed=False):
1784 1792 '''return a (possibly filtered) list of heads for the given branch
1785 1793
1786 1794 Heads are returned in topological order, from newest to oldest.
1787 1795 If branch is None, use the dirstate branch.
1788 1796 If start is not None, return only heads reachable from start.
1789 1797 If closed is True, return heads that are marked as closed as well.
1790 1798 '''
1791 1799 if branch is None:
1792 1800 branch = self[None].branch()
1793 1801 branches = self.branchmap()
1794 1802 if branch not in branches:
1795 1803 return []
1796 1804 # the cache returns heads ordered lowest to highest
1797 1805 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1798 1806 if start is not None:
1799 1807 # filter out the heads that cannot be reached from startrev
1800 1808 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1801 1809 bheads = [h for h in bheads if h in fbheads]
1802 1810 return bheads
1803 1811
1804 1812 def branches(self, nodes):
1805 1813 if not nodes:
1806 1814 nodes = [self.changelog.tip()]
1807 1815 b = []
1808 1816 for n in nodes:
1809 1817 t = n
1810 1818 while True:
1811 1819 p = self.changelog.parents(n)
1812 1820 if p[1] != nullid or p[0] == nullid:
1813 1821 b.append((t, n, p[0], p[1]))
1814 1822 break
1815 1823 n = p[0]
1816 1824 return b
1817 1825
1818 1826 def between(self, pairs):
1819 1827 r = []
1820 1828
1821 1829 for top, bottom in pairs:
1822 1830 n, l, i = top, [], 0
1823 1831 f = 1
1824 1832
1825 1833 while n != bottom and n != nullid:
1826 1834 p = self.changelog.parents(n)[0]
1827 1835 if i == f:
1828 1836 l.append(n)
1829 1837 f = f * 2
1830 1838 n = p
1831 1839 i += 1
1832 1840
1833 1841 r.append(l)
1834 1842
1835 1843 return r
1836 1844
1837 1845 def checkpush(self, pushop):
1838 1846 """Extensions can override this function if additional checks have
1839 1847 to be performed before pushing, or call it if they override push
1840 1848 command.
1841 1849 """
1842 1850 pass
1843 1851
1844 1852 @unfilteredpropertycache
1845 1853 def prepushoutgoinghooks(self):
1846 1854 """Return util.hooks consists of "(repo, remote, outgoing)"
1847 1855 functions, which are called before pushing changesets.
1848 1856 """
1849 1857 return util.hooks()
1850 1858
1851 1859 def clone(self, remote, heads=[], stream=None):
1852 1860 '''clone remote repository.
1853 1861
1854 1862 keyword arguments:
1855 1863 heads: list of revs to clone (forces use of pull)
1856 1864 stream: use streaming clone if possible'''
1857 1865 # internal config: ui.quietbookmarkmove
1858 1866 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1859 1867 try:
1860 1868 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1861 1869 pullop = exchange.pull(self, remote, heads,
1862 1870 streamclonerequested=stream)
1863 1871 return pullop.cgresult
1864 1872 finally:
1865 1873 self.ui.restoreconfig(quiet)
1866 1874
1867 1875 def pushkey(self, namespace, key, old, new):
1868 1876 try:
1869 1877 tr = self.currenttransaction()
1870 1878 hookargs = {}
1871 1879 if tr is not None:
1872 1880 hookargs.update(tr.hookargs)
1873 1881 hookargs['namespace'] = namespace
1874 1882 hookargs['key'] = key
1875 1883 hookargs['old'] = old
1876 1884 hookargs['new'] = new
1877 1885 self.hook('prepushkey', throw=True, **hookargs)
1878 1886 except error.HookAbort as exc:
1879 1887 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1880 1888 if exc.hint:
1881 1889 self.ui.write_err(_("(%s)\n") % exc.hint)
1882 1890 return False
1883 1891 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1884 1892 ret = pushkey.push(self, namespace, key, old, new)
1885 1893 def runhook():
1886 1894 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1887 1895 ret=ret)
1888 1896 self._afterlock(runhook)
1889 1897 return ret
1890 1898
1891 1899 def listkeys(self, namespace):
1892 1900 self.hook('prelistkeys', throw=True, namespace=namespace)
1893 1901 self.ui.debug('listing keys for "%s"\n' % namespace)
1894 1902 values = pushkey.list(self, namespace)
1895 1903 self.hook('listkeys', namespace=namespace, values=values)
1896 1904 return values
1897 1905
1898 1906 def debugwireargs(self, one, two, three=None, four=None, five=None):
1899 1907 '''used to test argument passing over the wire'''
1900 1908 return "%s %s %s %s %s" % (one, two, three, four, five)
1901 1909
1902 1910 def savecommitmessage(self, text):
1903 1911 fp = self.vfs('last-message.txt', 'wb')
1904 1912 try:
1905 1913 fp.write(text)
1906 1914 finally:
1907 1915 fp.close()
1908 1916 return self.pathto(fp.name[len(self.root) + 1:])
1909 1917
1910 1918 # used to avoid circular references so destructors work
1911 1919 def aftertrans(files):
1912 1920 renamefiles = [tuple(t) for t in files]
1913 1921 def a():
1914 1922 for vfs, src, dest in renamefiles:
1915 1923 try:
1916 1924 vfs.rename(src, dest)
1917 1925 except OSError: # journal file does not yet exist
1918 1926 pass
1919 1927 return a
1920 1928
1921 1929 def undoname(fn):
1922 1930 base, name = os.path.split(fn)
1923 1931 assert name.startswith('journal')
1924 1932 return os.path.join(base, name.replace('journal', 'undo', 1))
1925 1933
1926 1934 def instance(ui, path, create):
1927 1935 return localrepository(ui, util.urllocalpath(path), create)
1928 1936
1929 1937 def islocal(path):
1930 1938 return True
General Comments 0
You need to be logged in to leave comments. Login now