##// END OF EJS Templates
localrepo: move new repo requirements into standalone function (API)...
Gregory Szorc -
r28164:ad11edef default
parent child Browse files
Show More
@@ -1,1971 +1,1977
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import urllib
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 propertycache = util.propertycache
62 62 filecache = scmutil.filecache
63 63
64 64 class repofilecache(filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 70 def __set__(self, repo, value):
71 71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 72 def __delete__(self, repo):
73 73 return super(repofilecache, self).__delete__(repo.unfiltered())
74 74
75 75 class storecache(repofilecache):
76 76 """filecache for files in the store"""
77 77 def join(self, obj, fname):
78 78 return obj.sjoin(fname)
79 79
80 80 class unfilteredpropertycache(propertycache):
81 81 """propertycache that apply to unfiltered repo only"""
82 82
83 83 def __get__(self, repo, type=None):
84 84 unfi = repo.unfiltered()
85 85 if unfi is repo:
86 86 return super(unfilteredpropertycache, self).__get__(unfi)
87 87 return getattr(unfi, self.name)
88 88
89 89 class filteredpropertycache(propertycache):
90 90 """propertycache that must take filtering in account"""
91 91
92 92 def cachevalue(self, obj, value):
93 93 object.__setattr__(obj, self.name, value)
94 94
95 95
96 96 def hasunfilteredcache(repo, name):
97 97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 98 return name in vars(repo.unfiltered())
99 99
100 100 def unfilteredmethod(orig):
101 101 """decorate method that always need to be run on unfiltered version"""
102 102 def wrapper(repo, *args, **kwargs):
103 103 return orig(repo.unfiltered(), *args, **kwargs)
104 104 return wrapper
105 105
106 106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 107 'unbundle'))
108 108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 109
110 110 class localpeer(peer.peerrepository):
111 111 '''peer for a local repo; reflects only the most recent API'''
112 112
113 113 def __init__(self, repo, caps=moderncaps):
114 114 peer.peerrepository.__init__(self)
115 115 self._repo = repo.filtered('served')
116 116 self.ui = repo.ui
117 117 self._caps = repo._restrictcapabilities(caps)
118 118 self.requirements = repo.requirements
119 119 self.supportedformats = repo.supportedformats
120 120
121 121 def close(self):
122 122 self._repo.close()
123 123
124 124 def _capabilities(self):
125 125 return self._caps
126 126
127 127 def local(self):
128 128 return self._repo
129 129
130 130 def canpush(self):
131 131 return True
132 132
133 133 def url(self):
134 134 return self._repo.url()
135 135
136 136 def lookup(self, key):
137 137 return self._repo.lookup(key)
138 138
139 139 def branchmap(self):
140 140 return self._repo.branchmap()
141 141
142 142 def heads(self):
143 143 return self._repo.heads()
144 144
145 145 def known(self, nodes):
146 146 return self._repo.known(nodes)
147 147
148 148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 149 **kwargs):
150 150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 151 common=common, bundlecaps=bundlecaps, **kwargs)
152 152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 153 # When requesting a bundle2, getbundle returns a stream to make the
154 154 # wire level function happier. We need to build a proper object
155 155 # from it in local peer.
156 156 cg = bundle2.getunbundler(self.ui, cg)
157 157 return cg
158 158
159 159 # TODO We might want to move the next two calls into legacypeer and add
160 160 # unbundle instead.
161 161
162 162 def unbundle(self, cg, heads, url):
163 163 """apply a bundle on a repo
164 164
165 165 This function handles the repo locking itself."""
166 166 try:
167 167 try:
168 168 cg = exchange.readbundle(self.ui, cg, None)
169 169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 170 if util.safehasattr(ret, 'getchunks'):
171 171 # This is a bundle20 object, turn it into an unbundler.
172 172 # This little dance should be dropped eventually when the
173 173 # API is finally improved.
174 174 stream = util.chunkbuffer(ret.getchunks())
175 175 ret = bundle2.getunbundler(self.ui, stream)
176 176 return ret
177 177 except Exception as exc:
178 178 # If the exception contains output salvaged from a bundle2
179 179 # reply, we need to make sure it is printed before continuing
180 180 # to fail. So we build a bundle2 with such output and consume
181 181 # it directly.
182 182 #
183 183 # This is not very elegant but allows a "simple" solution for
184 184 # issue4594
185 185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 186 if output:
187 187 bundler = bundle2.bundle20(self._repo.ui)
188 188 for out in output:
189 189 bundler.addpart(out)
190 190 stream = util.chunkbuffer(bundler.getchunks())
191 191 b = bundle2.getunbundler(self.ui, stream)
192 192 bundle2.processbundle(self._repo, b)
193 193 raise
194 194 except error.PushRaced as exc:
195 195 raise error.ResponseError(_('push failed:'), str(exc))
196 196
197 197 def lock(self):
198 198 return self._repo.lock()
199 199
200 200 def addchangegroup(self, cg, source, url):
201 201 return cg.apply(self._repo, source, url)
202 202
203 203 def pushkey(self, namespace, key, old, new):
204 204 return self._repo.pushkey(namespace, key, old, new)
205 205
206 206 def listkeys(self, namespace):
207 207 return self._repo.listkeys(namespace)
208 208
209 209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 210 '''used to test argument passing over the wire'''
211 211 return "%s %s %s %s %s" % (one, two, three, four, five)
212 212
213 213 class locallegacypeer(localpeer):
214 214 '''peer extension which implements legacy methods too; used for tests with
215 215 restricted capabilities'''
216 216
217 217 def __init__(self, repo):
218 218 localpeer.__init__(self, repo, caps=legacycaps)
219 219
220 220 def branches(self, nodes):
221 221 return self._repo.branches(nodes)
222 222
223 223 def between(self, pairs):
224 224 return self._repo.between(pairs)
225 225
226 226 def changegroup(self, basenodes, source):
227 227 return changegroup.changegroup(self._repo, basenodes, source)
228 228
229 229 def changegroupsubset(self, bases, heads, source):
230 230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 231
232 232 class localrepository(object):
233 233
234 234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 235 'manifestv2'))
236 236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 237 'dotencode'))
238 238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 239 filtername = None
240 240
241 241 # a list of (ui, featureset) functions.
242 242 # only functions defined in module of enabled extensions are invoked
243 243 featuresetupfuncs = set()
244 244
245 def _baserequirements(self, create):
246 return ['revlogv1']
247
248 245 def __init__(self, baseui, path=None, create=False):
249 246 self.requirements = set()
250 247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 248 self.wopener = self.wvfs
252 249 self.root = self.wvfs.base
253 250 self.path = self.wvfs.join(".hg")
254 251 self.origroot = path
255 252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 254 realfs=False)
258 255 self.vfs = scmutil.vfs(self.path)
259 256 self.opener = self.vfs
260 257 self.baseui = baseui
261 258 self.ui = baseui.copy()
262 259 self.ui.copy = baseui.copy # prevent copying repo configuration
263 260 # A list of callback to shape the phase if no data were found.
264 261 # Callback are in the form: func(repo, roots) --> processed root.
265 262 # This list it to be filled by extension during repo setup
266 263 self._phasedefaults = []
267 264 try:
268 265 self.ui.readconfig(self.join("hgrc"), self.root)
269 266 extensions.loadall(self.ui)
270 267 except IOError:
271 268 pass
272 269
273 270 if self.featuresetupfuncs:
274 271 self.supported = set(self._basesupported) # use private copy
275 272 extmods = set(m.__name__ for n, m
276 273 in extensions.extensions(self.ui))
277 274 for setupfunc in self.featuresetupfuncs:
278 275 if setupfunc.__module__ in extmods:
279 276 setupfunc(self.ui, self.supported)
280 277 else:
281 278 self.supported = self._basesupported
282 279
283 280 if not self.vfs.isdir():
284 281 if create:
285 requirements = set(self._baserequirements(create))
286 if self.ui.configbool('format', 'usestore', True):
287 requirements.add("store")
288 if self.ui.configbool('format', 'usefncache', True):
289 requirements.add("fncache")
290 if self.ui.configbool('format', 'dotencode', True):
291 requirements.add('dotencode')
292
293 if scmutil.gdinitconfig(self.ui):
294 requirements.add("generaldelta")
295 if self.ui.configbool('experimental', 'treemanifest', False):
296 requirements.add("treemanifest")
297 if self.ui.configbool('experimental', 'manifestv2', False):
298 requirements.add("manifestv2")
299
300 self.requirements = requirements
282 self.requirements = newreporequirements(self)
301 283
302 284 if not self.wvfs.exists():
303 285 self.wvfs.makedirs()
304 286 self.vfs.makedir(notindexed=True)
305 287
306 if 'store' in requirements:
288 if 'store' in self.requirements:
307 289 self.vfs.mkdir("store")
308 290
309 291 # create an invalid changelog
310 292 self.vfs.append(
311 293 "00changelog.i",
312 294 '\0\0\0\2' # represents revlogv2
313 295 ' dummy changelog to prevent using the old repo layout'
314 296 )
315 297 else:
316 298 raise error.RepoError(_("repository %s not found") % path)
317 299 elif create:
318 300 raise error.RepoError(_("repository %s already exists") % path)
319 301 else:
320 302 try:
321 303 self.requirements = scmutil.readrequires(
322 304 self.vfs, self.supported)
323 305 except IOError as inst:
324 306 if inst.errno != errno.ENOENT:
325 307 raise
326 308
327 309 self.sharedpath = self.path
328 310 try:
329 311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
330 312 realpath=True)
331 313 s = vfs.base
332 314 if not vfs.exists():
333 315 raise error.RepoError(
334 316 _('.hg/sharedpath points to nonexistent directory %s') % s)
335 317 self.sharedpath = s
336 318 except IOError as inst:
337 319 if inst.errno != errno.ENOENT:
338 320 raise
339 321
340 322 self.store = store.store(
341 323 self.requirements, self.sharedpath, scmutil.vfs)
342 324 self.spath = self.store.path
343 325 self.svfs = self.store.vfs
344 326 self.sjoin = self.store.join
345 327 self.vfs.createmode = self.store.createmode
346 328 self._applyopenerreqs()
347 329 if create:
348 330 self._writerequirements()
349 331
350 332 self._dirstatevalidatewarned = False
351 333
352 334 self._branchcaches = {}
353 335 self._revbranchcache = None
354 336 self.filterpats = {}
355 337 self._datafilters = {}
356 338 self._transref = self._lockref = self._wlockref = None
357 339
358 340 # A cache for various files under .hg/ that tracks file changes,
359 341 # (used by the filecache decorator)
360 342 #
361 343 # Maps a property name to its util.filecacheentry
362 344 self._filecache = {}
363 345
364 346 # hold sets of revision to be filtered
365 347 # should be cleared when something might have changed the filter value:
366 348 # - new changesets,
367 349 # - phase change,
368 350 # - new obsolescence marker,
369 351 # - working directory parent change,
370 352 # - bookmark changes
371 353 self.filteredrevcache = {}
372 354
373 355 # generic mapping between names and nodes
374 356 self.names = namespaces.namespaces()
375 357
376 358 def close(self):
377 359 self._writecaches()
378 360
379 361 def _writecaches(self):
380 362 if self._revbranchcache:
381 363 self._revbranchcache.write()
382 364
383 365 def _restrictcapabilities(self, caps):
384 366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
385 367 caps = set(caps)
386 368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
387 369 caps.add('bundle2=' + urllib.quote(capsblob))
388 370 return caps
389 371
390 372 def _applyopenerreqs(self):
391 373 self.svfs.options = dict((r, 1) for r in self.requirements
392 374 if r in self.openerreqs)
393 375 # experimental config: format.chunkcachesize
394 376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
395 377 if chunkcachesize is not None:
396 378 self.svfs.options['chunkcachesize'] = chunkcachesize
397 379 # experimental config: format.maxchainlen
398 380 maxchainlen = self.ui.configint('format', 'maxchainlen')
399 381 if maxchainlen is not None:
400 382 self.svfs.options['maxchainlen'] = maxchainlen
401 383 # experimental config: format.manifestcachesize
402 384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
403 385 if manifestcachesize is not None:
404 386 self.svfs.options['manifestcachesize'] = manifestcachesize
405 387 # experimental config: format.aggressivemergedeltas
406 388 aggressivemergedeltas = self.ui.configbool('format',
407 389 'aggressivemergedeltas', False)
408 390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
409 391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
410 392
411 393 def _writerequirements(self):
412 394 scmutil.writerequires(self.vfs, self.requirements)
413 395
414 396 def _checknested(self, path):
415 397 """Determine if path is a legal nested repository."""
416 398 if not path.startswith(self.root):
417 399 return False
418 400 subpath = path[len(self.root) + 1:]
419 401 normsubpath = util.pconvert(subpath)
420 402
421 403 # XXX: Checking against the current working copy is wrong in
422 404 # the sense that it can reject things like
423 405 #
424 406 # $ hg cat -r 10 sub/x.txt
425 407 #
426 408 # if sub/ is no longer a subrepository in the working copy
427 409 # parent revision.
428 410 #
429 411 # However, it can of course also allow things that would have
430 412 # been rejected before, such as the above cat command if sub/
431 413 # is a subrepository now, but was a normal directory before.
432 414 # The old path auditor would have rejected by mistake since it
433 415 # panics when it sees sub/.hg/.
434 416 #
435 417 # All in all, checking against the working copy seems sensible
436 418 # since we want to prevent access to nested repositories on
437 419 # the filesystem *now*.
438 420 ctx = self[None]
439 421 parts = util.splitpath(subpath)
440 422 while parts:
441 423 prefix = '/'.join(parts)
442 424 if prefix in ctx.substate:
443 425 if prefix == normsubpath:
444 426 return True
445 427 else:
446 428 sub = ctx.sub(prefix)
447 429 return sub.checknested(subpath[len(prefix) + 1:])
448 430 else:
449 431 parts.pop()
450 432 return False
451 433
452 434 def peer(self):
453 435 return localpeer(self) # not cached to avoid reference cycle
454 436
455 437 def unfiltered(self):
456 438 """Return unfiltered version of the repository
457 439
458 440 Intended to be overwritten by filtered repo."""
459 441 return self
460 442
461 443 def filtered(self, name):
462 444 """Return a filtered version of a repository"""
463 445 # build a new class with the mixin and the current class
464 446 # (possibly subclass of the repo)
465 447 class proxycls(repoview.repoview, self.unfiltered().__class__):
466 448 pass
467 449 return proxycls(self, name)
468 450
469 451 @repofilecache('bookmarks', 'bookmarks.current')
470 452 def _bookmarks(self):
471 453 return bookmarks.bmstore(self)
472 454
473 455 @property
474 456 def _activebookmark(self):
475 457 return self._bookmarks.active
476 458
477 459 def bookmarkheads(self, bookmark):
478 460 name = bookmark.split('@', 1)[0]
479 461 heads = []
480 462 for mark, n in self._bookmarks.iteritems():
481 463 if mark.split('@', 1)[0] == name:
482 464 heads.append(n)
483 465 return heads
484 466
485 467 # _phaserevs and _phasesets depend on changelog. what we need is to
486 468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
487 469 # can't be easily expressed in filecache mechanism.
488 470 @storecache('phaseroots', '00changelog.i')
489 471 def _phasecache(self):
490 472 return phases.phasecache(self, self._phasedefaults)
491 473
492 474 @storecache('obsstore')
493 475 def obsstore(self):
494 476 # read default format for new obsstore.
495 477 # developer config: format.obsstore-version
496 478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
497 479 # rely on obsstore class default when possible.
498 480 kwargs = {}
499 481 if defaultformat is not None:
500 482 kwargs['defaultformat'] = defaultformat
501 483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
502 484 store = obsolete.obsstore(self.svfs, readonly=readonly,
503 485 **kwargs)
504 486 if store and readonly:
505 487 self.ui.warn(
506 488 _('obsolete feature not enabled but %i markers found!\n')
507 489 % len(list(store)))
508 490 return store
509 491
510 492 @storecache('00changelog.i')
511 493 def changelog(self):
512 494 c = changelog.changelog(self.svfs)
513 495 if 'HG_PENDING' in os.environ:
514 496 p = os.environ['HG_PENDING']
515 497 if p.startswith(self.root):
516 498 c.readpending('00changelog.i.a')
517 499 return c
518 500
519 501 @storecache('00manifest.i')
520 502 def manifest(self):
521 503 return manifest.manifest(self.svfs)
522 504
523 505 def dirlog(self, dir):
524 506 return self.manifest.dirlog(dir)
525 507
526 508 @repofilecache('dirstate')
527 509 def dirstate(self):
528 510 return dirstate.dirstate(self.vfs, self.ui, self.root,
529 511 self._dirstatevalidate)
530 512
531 513 def _dirstatevalidate(self, node):
532 514 try:
533 515 self.changelog.rev(node)
534 516 return node
535 517 except error.LookupError:
536 518 if not self._dirstatevalidatewarned:
537 519 self._dirstatevalidatewarned = True
538 520 self.ui.warn(_("warning: ignoring unknown"
539 521 " working parent %s!\n") % short(node))
540 522 return nullid
541 523
542 524 def __getitem__(self, changeid):
543 525 if changeid is None or changeid == wdirrev:
544 526 return context.workingctx(self)
545 527 if isinstance(changeid, slice):
546 528 return [context.changectx(self, i)
547 529 for i in xrange(*changeid.indices(len(self)))
548 530 if i not in self.changelog.filteredrevs]
549 531 return context.changectx(self, changeid)
550 532
551 533 def __contains__(self, changeid):
552 534 try:
553 535 self[changeid]
554 536 return True
555 537 except error.RepoLookupError:
556 538 return False
557 539
558 540 def __nonzero__(self):
559 541 return True
560 542
561 543 def __len__(self):
562 544 return len(self.changelog)
563 545
564 546 def __iter__(self):
565 547 return iter(self.changelog)
566 548
567 549 def revs(self, expr, *args):
568 550 '''Find revisions matching a revset.
569 551
570 552 The revset is specified as a string ``expr`` that may contain
571 553 %-formatting to escape certain types. See ``revset.formatspec``.
572 554
573 555 Return a revset.abstractsmartset, which is a list-like interface
574 556 that contains integer revisions.
575 557 '''
576 558 expr = revset.formatspec(expr, *args)
577 559 m = revset.match(None, expr)
578 560 return m(self)
579 561
580 562 def set(self, expr, *args):
581 563 '''Find revisions matching a revset and emit changectx instances.
582 564
583 565 This is a convenience wrapper around ``revs()`` that iterates the
584 566 result and is a generator of changectx instances.
585 567 '''
586 568 for r in self.revs(expr, *args):
587 569 yield self[r]
588 570
589 571 def url(self):
590 572 return 'file:' + self.root
591 573
592 574 def hook(self, name, throw=False, **args):
593 575 """Call a hook, passing this repo instance.
594 576
595 577 This a convenience method to aid invoking hooks. Extensions likely
596 578 won't call this unless they have registered a custom hook or are
597 579 replacing code that is expected to call a hook.
598 580 """
599 581 return hook.hook(self.ui, self, name, throw, **args)
600 582
601 583 @unfilteredmethod
602 584 def _tag(self, names, node, message, local, user, date, extra=None,
603 585 editor=False):
604 586 if isinstance(names, str):
605 587 names = (names,)
606 588
607 589 branches = self.branchmap()
608 590 for name in names:
609 591 self.hook('pretag', throw=True, node=hex(node), tag=name,
610 592 local=local)
611 593 if name in branches:
612 594 self.ui.warn(_("warning: tag %s conflicts with existing"
613 595 " branch name\n") % name)
614 596
615 597 def writetags(fp, names, munge, prevtags):
616 598 fp.seek(0, 2)
617 599 if prevtags and prevtags[-1] != '\n':
618 600 fp.write('\n')
619 601 for name in names:
620 602 if munge:
621 603 m = munge(name)
622 604 else:
623 605 m = name
624 606
625 607 if (self._tagscache.tagtypes and
626 608 name in self._tagscache.tagtypes):
627 609 old = self.tags().get(name, nullid)
628 610 fp.write('%s %s\n' % (hex(old), m))
629 611 fp.write('%s %s\n' % (hex(node), m))
630 612 fp.close()
631 613
632 614 prevtags = ''
633 615 if local:
634 616 try:
635 617 fp = self.vfs('localtags', 'r+')
636 618 except IOError:
637 619 fp = self.vfs('localtags', 'a')
638 620 else:
639 621 prevtags = fp.read()
640 622
641 623 # local tags are stored in the current charset
642 624 writetags(fp, names, None, prevtags)
643 625 for name in names:
644 626 self.hook('tag', node=hex(node), tag=name, local=local)
645 627 return
646 628
647 629 try:
648 630 fp = self.wfile('.hgtags', 'rb+')
649 631 except IOError as e:
650 632 if e.errno != errno.ENOENT:
651 633 raise
652 634 fp = self.wfile('.hgtags', 'ab')
653 635 else:
654 636 prevtags = fp.read()
655 637
656 638 # committed tags are stored in UTF-8
657 639 writetags(fp, names, encoding.fromlocal, prevtags)
658 640
659 641 fp.close()
660 642
661 643 self.invalidatecaches()
662 644
663 645 if '.hgtags' not in self.dirstate:
664 646 self[None].add(['.hgtags'])
665 647
666 648 m = matchmod.exact(self.root, '', ['.hgtags'])
667 649 tagnode = self.commit(message, user, date, extra=extra, match=m,
668 650 editor=editor)
669 651
670 652 for name in names:
671 653 self.hook('tag', node=hex(node), tag=name, local=local)
672 654
673 655 return tagnode
674 656
675 657 def tag(self, names, node, message, local, user, date, editor=False):
676 658 '''tag a revision with one or more symbolic names.
677 659
678 660 names is a list of strings or, when adding a single tag, names may be a
679 661 string.
680 662
681 663 if local is True, the tags are stored in a per-repository file.
682 664 otherwise, they are stored in the .hgtags file, and a new
683 665 changeset is committed with the change.
684 666
685 667 keyword arguments:
686 668
687 669 local: whether to store tags in non-version-controlled file
688 670 (default False)
689 671
690 672 message: commit message to use if committing
691 673
692 674 user: name of user to use if committing
693 675
694 676 date: date tuple to use if committing'''
695 677
696 678 if not local:
697 679 m = matchmod.exact(self.root, '', ['.hgtags'])
698 680 if any(self.status(match=m, unknown=True, ignored=True)):
699 681 raise error.Abort(_('working copy of .hgtags is changed'),
700 682 hint=_('please commit .hgtags manually'))
701 683
702 684 self.tags() # instantiate the cache
703 685 self._tag(names, node, message, local, user, date, editor=editor)
704 686
705 687 @filteredpropertycache
706 688 def _tagscache(self):
707 689 '''Returns a tagscache object that contains various tags related
708 690 caches.'''
709 691
710 692 # This simplifies its cache management by having one decorated
711 693 # function (this one) and the rest simply fetch things from it.
712 694 class tagscache(object):
713 695 def __init__(self):
714 696 # These two define the set of tags for this repository. tags
715 697 # maps tag name to node; tagtypes maps tag name to 'global' or
716 698 # 'local'. (Global tags are defined by .hgtags across all
717 699 # heads, and local tags are defined in .hg/localtags.)
718 700 # They constitute the in-memory cache of tags.
719 701 self.tags = self.tagtypes = None
720 702
721 703 self.nodetagscache = self.tagslist = None
722 704
723 705 cache = tagscache()
724 706 cache.tags, cache.tagtypes = self._findtags()
725 707
726 708 return cache
727 709
728 710 def tags(self):
729 711 '''return a mapping of tag to node'''
730 712 t = {}
731 713 if self.changelog.filteredrevs:
732 714 tags, tt = self._findtags()
733 715 else:
734 716 tags = self._tagscache.tags
735 717 for k, v in tags.iteritems():
736 718 try:
737 719 # ignore tags to unknown nodes
738 720 self.changelog.rev(v)
739 721 t[k] = v
740 722 except (error.LookupError, ValueError):
741 723 pass
742 724 return t
743 725
744 726 def _findtags(self):
745 727 '''Do the hard work of finding tags. Return a pair of dicts
746 728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
747 729 maps tag name to a string like \'global\' or \'local\'.
748 730 Subclasses or extensions are free to add their own tags, but
749 731 should be aware that the returned dicts will be retained for the
750 732 duration of the localrepo object.'''
751 733
752 734 # XXX what tagtype should subclasses/extensions use? Currently
753 735 # mq and bookmarks add tags, but do not set the tagtype at all.
754 736 # Should each extension invent its own tag type? Should there
755 737 # be one tagtype for all such "virtual" tags? Or is the status
756 738 # quo fine?
757 739
758 740 alltags = {} # map tag name to (node, hist)
759 741 tagtypes = {}
760 742
761 743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
762 744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
763 745
764 746 # Build the return dicts. Have to re-encode tag names because
765 747 # the tags module always uses UTF-8 (in order not to lose info
766 748 # writing to the cache), but the rest of Mercurial wants them in
767 749 # local encoding.
768 750 tags = {}
769 751 for (name, (node, hist)) in alltags.iteritems():
770 752 if node != nullid:
771 753 tags[encoding.tolocal(name)] = node
772 754 tags['tip'] = self.changelog.tip()
773 755 tagtypes = dict([(encoding.tolocal(name), value)
774 756 for (name, value) in tagtypes.iteritems()])
775 757 return (tags, tagtypes)
776 758
777 759 def tagtype(self, tagname):
778 760 '''
779 761 return the type of the given tag. result can be:
780 762
781 763 'local' : a local tag
782 764 'global' : a global tag
783 765 None : tag does not exist
784 766 '''
785 767
786 768 return self._tagscache.tagtypes.get(tagname)
787 769
788 770 def tagslist(self):
789 771 '''return a list of tags ordered by revision'''
790 772 if not self._tagscache.tagslist:
791 773 l = []
792 774 for t, n in self.tags().iteritems():
793 775 l.append((self.changelog.rev(n), t, n))
794 776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
795 777
796 778 return self._tagscache.tagslist
797 779
798 780 def nodetags(self, node):
799 781 '''return the tags associated with a node'''
800 782 if not self._tagscache.nodetagscache:
801 783 nodetagscache = {}
802 784 for t, n in self._tagscache.tags.iteritems():
803 785 nodetagscache.setdefault(n, []).append(t)
804 786 for tags in nodetagscache.itervalues():
805 787 tags.sort()
806 788 self._tagscache.nodetagscache = nodetagscache
807 789 return self._tagscache.nodetagscache.get(node, [])
808 790
809 791 def nodebookmarks(self, node):
810 792 """return the list of bookmarks pointing to the specified node"""
811 793 marks = []
812 794 for bookmark, n in self._bookmarks.iteritems():
813 795 if n == node:
814 796 marks.append(bookmark)
815 797 return sorted(marks)
816 798
817 799 def branchmap(self):
818 800 '''returns a dictionary {branch: [branchheads]} with branchheads
819 801 ordered by increasing revision number'''
820 802 branchmap.updatecache(self)
821 803 return self._branchcaches[self.filtername]
822 804
823 805 @unfilteredmethod
824 806 def revbranchcache(self):
825 807 if not self._revbranchcache:
826 808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
827 809 return self._revbranchcache
828 810
829 811 def branchtip(self, branch, ignoremissing=False):
830 812 '''return the tip node for a given branch
831 813
832 814 If ignoremissing is True, then this method will not raise an error.
833 815 This is helpful for callers that only expect None for a missing branch
834 816 (e.g. namespace).
835 817
836 818 '''
837 819 try:
838 820 return self.branchmap().branchtip(branch)
839 821 except KeyError:
840 822 if not ignoremissing:
841 823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
842 824 else:
843 825 pass
844 826
845 827 def lookup(self, key):
846 828 return self[key].node()
847 829
848 830 def lookupbranch(self, key, remote=None):
849 831 repo = remote or self
850 832 if key in repo.branchmap():
851 833 return key
852 834
853 835 repo = (remote and remote.local()) and remote or self
854 836 return repo[key].branch()
855 837
856 838 def known(self, nodes):
857 839 cl = self.changelog
858 840 nm = cl.nodemap
859 841 filtered = cl.filteredrevs
860 842 result = []
861 843 for n in nodes:
862 844 r = nm.get(n)
863 845 resp = not (r is None or r in filtered)
864 846 result.append(resp)
865 847 return result
866 848
867 849 def local(self):
868 850 return self
869 851
870 852 def publishing(self):
871 853 # it's safe (and desirable) to trust the publish flag unconditionally
872 854 # so that we don't finalize changes shared between users via ssh or nfs
873 855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
874 856
875 857 def cancopy(self):
876 858 # so statichttprepo's override of local() works
877 859 if not self.local():
878 860 return False
879 861 if not self.publishing():
880 862 return True
881 863 # if publishing we can't copy if there is filtered content
882 864 return not self.filtered('visible').changelog.filteredrevs
883 865
884 866 def shared(self):
885 867 '''the type of shared repository (None if not shared)'''
886 868 if self.sharedpath != self.path:
887 869 return 'store'
888 870 return None
889 871
890 872 def join(self, f, *insidef):
891 873 return self.vfs.join(os.path.join(f, *insidef))
892 874
893 875 def wjoin(self, f, *insidef):
894 876 return self.vfs.reljoin(self.root, f, *insidef)
895 877
896 878 def file(self, f):
897 879 if f[0] == '/':
898 880 f = f[1:]
899 881 return filelog.filelog(self.svfs, f)
900 882
901 883 def parents(self, changeid=None):
902 884 '''get list of changectxs for parents of changeid'''
903 885 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
904 886 self.ui.deprecwarn(msg, '3.7')
905 887 return self[changeid].parents()
906 888
907 889 def changectx(self, changeid):
908 890 return self[changeid]
909 891
910 892 def setparents(self, p1, p2=nullid):
911 893 self.dirstate.beginparentchange()
912 894 copies = self.dirstate.setparents(p1, p2)
913 895 pctx = self[p1]
914 896 if copies:
915 897 # Adjust copy records, the dirstate cannot do it, it
916 898 # requires access to parents manifests. Preserve them
917 899 # only for entries added to first parent.
918 900 for f in copies:
919 901 if f not in pctx and copies[f] in pctx:
920 902 self.dirstate.copy(copies[f], f)
921 903 if p2 == nullid:
922 904 for f, s in sorted(self.dirstate.copies().items()):
923 905 if f not in pctx and s not in pctx:
924 906 self.dirstate.copy(None, f)
925 907 self.dirstate.endparentchange()
926 908
927 909 def filectx(self, path, changeid=None, fileid=None):
928 910 """changeid can be a changeset revision, node, or tag.
929 911 fileid can be a file revision or node."""
930 912 return context.filectx(self, path, changeid, fileid)
931 913
932 914 def getcwd(self):
933 915 return self.dirstate.getcwd()
934 916
935 917 def pathto(self, f, cwd=None):
936 918 return self.dirstate.pathto(f, cwd)
937 919
938 920 def wfile(self, f, mode='r'):
939 921 return self.wvfs(f, mode)
940 922
941 923 def _link(self, f):
942 924 return self.wvfs.islink(f)
943 925
944 926 def _loadfilter(self, filter):
945 927 if filter not in self.filterpats:
946 928 l = []
947 929 for pat, cmd in self.ui.configitems(filter):
948 930 if cmd == '!':
949 931 continue
950 932 mf = matchmod.match(self.root, '', [pat])
951 933 fn = None
952 934 params = cmd
953 935 for name, filterfn in self._datafilters.iteritems():
954 936 if cmd.startswith(name):
955 937 fn = filterfn
956 938 params = cmd[len(name):].lstrip()
957 939 break
958 940 if not fn:
959 941 fn = lambda s, c, **kwargs: util.filter(s, c)
960 942 # Wrap old filters not supporting keyword arguments
961 943 if not inspect.getargspec(fn)[2]:
962 944 oldfn = fn
963 945 fn = lambda s, c, **kwargs: oldfn(s, c)
964 946 l.append((mf, fn, params))
965 947 self.filterpats[filter] = l
966 948 return self.filterpats[filter]
967 949
968 950 def _filter(self, filterpats, filename, data):
969 951 for mf, fn, cmd in filterpats:
970 952 if mf(filename):
971 953 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
972 954 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
973 955 break
974 956
975 957 return data
976 958
977 959 @unfilteredpropertycache
978 960 def _encodefilterpats(self):
979 961 return self._loadfilter('encode')
980 962
981 963 @unfilteredpropertycache
982 964 def _decodefilterpats(self):
983 965 return self._loadfilter('decode')
984 966
985 967 def adddatafilter(self, name, filter):
986 968 self._datafilters[name] = filter
987 969
988 970 def wread(self, filename):
989 971 if self._link(filename):
990 972 data = self.wvfs.readlink(filename)
991 973 else:
992 974 data = self.wvfs.read(filename)
993 975 return self._filter(self._encodefilterpats, filename, data)
994 976
995 977 def wwrite(self, filename, data, flags):
996 978 """write ``data`` into ``filename`` in the working directory
997 979
998 980 This returns length of written (maybe decoded) data.
999 981 """
1000 982 data = self._filter(self._decodefilterpats, filename, data)
1001 983 if 'l' in flags:
1002 984 self.wvfs.symlink(data, filename)
1003 985 else:
1004 986 self.wvfs.write(filename, data)
1005 987 if 'x' in flags:
1006 988 self.wvfs.setflags(filename, False, True)
1007 989 return len(data)
1008 990
1009 991 def wwritedata(self, filename, data):
1010 992 return self._filter(self._decodefilterpats, filename, data)
1011 993
1012 994 def currenttransaction(self):
1013 995 """return the current transaction or None if non exists"""
1014 996 if self._transref:
1015 997 tr = self._transref()
1016 998 else:
1017 999 tr = None
1018 1000
1019 1001 if tr and tr.running():
1020 1002 return tr
1021 1003 return None
1022 1004
1023 1005 def transaction(self, desc, report=None):
1024 1006 if (self.ui.configbool('devel', 'all-warnings')
1025 1007 or self.ui.configbool('devel', 'check-locks')):
1026 1008 l = self._lockref and self._lockref()
1027 1009 if l is None or not l.held:
1028 1010 self.ui.develwarn('transaction with no lock')
1029 1011 tr = self.currenttransaction()
1030 1012 if tr is not None:
1031 1013 return tr.nest()
1032 1014
1033 1015 # abort here if the journal already exists
1034 1016 if self.svfs.exists("journal"):
1035 1017 raise error.RepoError(
1036 1018 _("abandoned transaction found"),
1037 1019 hint=_("run 'hg recover' to clean up transaction"))
1038 1020
1039 1021 # make journal.dirstate contain in-memory changes at this point
1040 1022 self.dirstate.write(None)
1041 1023
1042 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1043 1025 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1044 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1045 1027
1046 1028 self._writejournal(desc)
1047 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1048 1030 if report:
1049 1031 rp = report
1050 1032 else:
1051 1033 rp = self.ui.warn
1052 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1053 1035 # we must avoid cyclic reference between repo and transaction.
1054 1036 reporef = weakref.ref(self)
1055 1037 def validate(tr):
1056 1038 """will run pre-closing hooks"""
1057 1039 reporef().hook('pretxnclose', throw=True,
1058 1040 txnname=desc, **tr.hookargs)
1059 1041 def releasefn(tr, success):
1060 1042 repo = reporef()
1061 1043 if success:
1062 1044 # this should be explicitly invoked here, because
1063 1045 # in-memory changes aren't written out at closing
1064 1046 # transaction, if tr.addfilegenerator (via
1065 1047 # dirstate.write or so) isn't invoked while
1066 1048 # transaction running
1067 1049 repo.dirstate.write(None)
1068 1050 else:
1069 1051 # prevent in-memory changes from being written out at
1070 1052 # the end of outer wlock scope or so
1071 1053 repo.dirstate.invalidate()
1072 1054
1073 1055 # discard all changes (including ones already written
1074 1056 # out) in this transaction
1075 1057 repo.vfs.rename('journal.dirstate', 'dirstate')
1076 1058
1077 1059 repo.invalidate(clearfilecache=True)
1078 1060
1079 1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1080 1062 "journal",
1081 1063 "undo",
1082 1064 aftertrans(renames),
1083 1065 self.store.createmode,
1084 1066 validator=validate,
1085 1067 releasefn=releasefn)
1086 1068
1087 1069 tr.hookargs['txnid'] = txnid
1088 1070 # note: writing the fncache only during finalize mean that the file is
1089 1071 # outdated when running hooks. As fncache is used for streaming clone,
1090 1072 # this is not expected to break anything that happen during the hooks.
1091 1073 tr.addfinalize('flush-fncache', self.store.write)
1092 1074 def txnclosehook(tr2):
1093 1075 """To be run if transaction is successful, will schedule a hook run
1094 1076 """
1095 1077 # Don't reference tr2 in hook() so we don't hold a reference.
1096 1078 # This reduces memory consumption when there are multiple
1097 1079 # transactions per lock. This can likely go away if issue5045
1098 1080 # fixes the function accumulation.
1099 1081 hookargs = tr2.hookargs
1100 1082
1101 1083 def hook():
1102 1084 reporef().hook('txnclose', throw=False, txnname=desc,
1103 1085 **hookargs)
1104 1086 reporef()._afterlock(hook)
1105 1087 tr.addfinalize('txnclose-hook', txnclosehook)
1106 1088 def txnaborthook(tr2):
1107 1089 """To be run if transaction is aborted
1108 1090 """
1109 1091 reporef().hook('txnabort', throw=False, txnname=desc,
1110 1092 **tr2.hookargs)
1111 1093 tr.addabort('txnabort-hook', txnaborthook)
1112 1094 # avoid eager cache invalidation. in-memory data should be identical
1113 1095 # to stored data if transaction has no error.
1114 1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1115 1097 self._transref = weakref.ref(tr)
1116 1098 return tr
1117 1099
1118 1100 def _journalfiles(self):
1119 1101 return ((self.svfs, 'journal'),
1120 1102 (self.vfs, 'journal.dirstate'),
1121 1103 (self.vfs, 'journal.branch'),
1122 1104 (self.vfs, 'journal.desc'),
1123 1105 (self.vfs, 'journal.bookmarks'),
1124 1106 (self.svfs, 'journal.phaseroots'))
1125 1107
1126 1108 def undofiles(self):
1127 1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1128 1110
1129 1111 def _writejournal(self, desc):
1130 1112 self.vfs.write("journal.dirstate",
1131 1113 self.vfs.tryread("dirstate"))
1132 1114 self.vfs.write("journal.branch",
1133 1115 encoding.fromlocal(self.dirstate.branch()))
1134 1116 self.vfs.write("journal.desc",
1135 1117 "%d\n%s\n" % (len(self), desc))
1136 1118 self.vfs.write("journal.bookmarks",
1137 1119 self.vfs.tryread("bookmarks"))
1138 1120 self.svfs.write("journal.phaseroots",
1139 1121 self.svfs.tryread("phaseroots"))
1140 1122
1141 1123 def recover(self):
1142 1124 with self.lock():
1143 1125 if self.svfs.exists("journal"):
1144 1126 self.ui.status(_("rolling back interrupted transaction\n"))
1145 1127 vfsmap = {'': self.svfs,
1146 1128 'plain': self.vfs,}
1147 1129 transaction.rollback(self.svfs, vfsmap, "journal",
1148 1130 self.ui.warn)
1149 1131 self.invalidate()
1150 1132 return True
1151 1133 else:
1152 1134 self.ui.warn(_("no interrupted transaction available\n"))
1153 1135 return False
1154 1136
1155 1137 def rollback(self, dryrun=False, force=False):
1156 1138 wlock = lock = dsguard = None
1157 1139 try:
1158 1140 wlock = self.wlock()
1159 1141 lock = self.lock()
1160 1142 if self.svfs.exists("undo"):
1161 1143 dsguard = cmdutil.dirstateguard(self, 'rollback')
1162 1144
1163 1145 return self._rollback(dryrun, force, dsguard)
1164 1146 else:
1165 1147 self.ui.warn(_("no rollback information available\n"))
1166 1148 return 1
1167 1149 finally:
1168 1150 release(dsguard, lock, wlock)
1169 1151
1170 1152 @unfilteredmethod # Until we get smarter cache management
1171 1153 def _rollback(self, dryrun, force, dsguard):
1172 1154 ui = self.ui
1173 1155 try:
1174 1156 args = self.vfs.read('undo.desc').splitlines()
1175 1157 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1176 1158 if len(args) >= 3:
1177 1159 detail = args[2]
1178 1160 oldtip = oldlen - 1
1179 1161
1180 1162 if detail and ui.verbose:
1181 1163 msg = (_('repository tip rolled back to revision %s'
1182 1164 ' (undo %s: %s)\n')
1183 1165 % (oldtip, desc, detail))
1184 1166 else:
1185 1167 msg = (_('repository tip rolled back to revision %s'
1186 1168 ' (undo %s)\n')
1187 1169 % (oldtip, desc))
1188 1170 except IOError:
1189 1171 msg = _('rolling back unknown transaction\n')
1190 1172 desc = None
1191 1173
1192 1174 if not force and self['.'] != self['tip'] and desc == 'commit':
1193 1175 raise error.Abort(
1194 1176 _('rollback of last commit while not checked out '
1195 1177 'may lose data'), hint=_('use -f to force'))
1196 1178
1197 1179 ui.status(msg)
1198 1180 if dryrun:
1199 1181 return 0
1200 1182
1201 1183 parents = self.dirstate.parents()
1202 1184 self.destroying()
1203 1185 vfsmap = {'plain': self.vfs, '': self.svfs}
1204 1186 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1205 1187 if self.vfs.exists('undo.bookmarks'):
1206 1188 self.vfs.rename('undo.bookmarks', 'bookmarks')
1207 1189 if self.svfs.exists('undo.phaseroots'):
1208 1190 self.svfs.rename('undo.phaseroots', 'phaseroots')
1209 1191 self.invalidate()
1210 1192
1211 1193 parentgone = (parents[0] not in self.changelog.nodemap or
1212 1194 parents[1] not in self.changelog.nodemap)
1213 1195 if parentgone:
1214 1196 # prevent dirstateguard from overwriting already restored one
1215 1197 dsguard.close()
1216 1198
1217 1199 self.vfs.rename('undo.dirstate', 'dirstate')
1218 1200 try:
1219 1201 branch = self.vfs.read('undo.branch')
1220 1202 self.dirstate.setbranch(encoding.tolocal(branch))
1221 1203 except IOError:
1222 1204 ui.warn(_('named branch could not be reset: '
1223 1205 'current branch is still \'%s\'\n')
1224 1206 % self.dirstate.branch())
1225 1207
1226 1208 self.dirstate.invalidate()
1227 1209 parents = tuple([p.rev() for p in self[None].parents()])
1228 1210 if len(parents) > 1:
1229 1211 ui.status(_('working directory now based on '
1230 1212 'revisions %d and %d\n') % parents)
1231 1213 else:
1232 1214 ui.status(_('working directory now based on '
1233 1215 'revision %d\n') % parents)
1234 1216 mergemod.mergestate.clean(self, self['.'].node())
1235 1217
1236 1218 # TODO: if we know which new heads may result from this rollback, pass
1237 1219 # them to destroy(), which will prevent the branchhead cache from being
1238 1220 # invalidated.
1239 1221 self.destroyed()
1240 1222 return 0
1241 1223
1242 1224 def invalidatecaches(self):
1243 1225
1244 1226 if '_tagscache' in vars(self):
1245 1227 # can't use delattr on proxy
1246 1228 del self.__dict__['_tagscache']
1247 1229
1248 1230 self.unfiltered()._branchcaches.clear()
1249 1231 self.invalidatevolatilesets()
1250 1232
1251 1233 def invalidatevolatilesets(self):
1252 1234 self.filteredrevcache.clear()
1253 1235 obsolete.clearobscaches(self)
1254 1236
1255 1237 def invalidatedirstate(self):
1256 1238 '''Invalidates the dirstate, causing the next call to dirstate
1257 1239 to check if it was modified since the last time it was read,
1258 1240 rereading it if it has.
1259 1241
1260 1242 This is different to dirstate.invalidate() that it doesn't always
1261 1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1262 1244 explicitly read the dirstate again (i.e. restoring it to a previous
1263 1245 known good state).'''
1264 1246 if hasunfilteredcache(self, 'dirstate'):
1265 1247 for k in self.dirstate._filecache:
1266 1248 try:
1267 1249 delattr(self.dirstate, k)
1268 1250 except AttributeError:
1269 1251 pass
1270 1252 delattr(self.unfiltered(), 'dirstate')
1271 1253
1272 1254 def invalidate(self, clearfilecache=False):
1273 1255 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1274 1256 for k in self._filecache.keys():
1275 1257 # dirstate is invalidated separately in invalidatedirstate()
1276 1258 if k == 'dirstate':
1277 1259 continue
1278 1260
1279 1261 if clearfilecache:
1280 1262 del self._filecache[k]
1281 1263 try:
1282 1264 delattr(unfiltered, k)
1283 1265 except AttributeError:
1284 1266 pass
1285 1267 self.invalidatecaches()
1286 1268 self.store.invalidatecaches()
1287 1269
1288 1270 def invalidateall(self):
1289 1271 '''Fully invalidates both store and non-store parts, causing the
1290 1272 subsequent operation to reread any outside changes.'''
1291 1273 # extension should hook this to invalidate its caches
1292 1274 self.invalidate()
1293 1275 self.invalidatedirstate()
1294 1276
1295 1277 def _refreshfilecachestats(self, tr):
1296 1278 """Reload stats of cached files so that they are flagged as valid"""
1297 1279 for k, ce in self._filecache.items():
1298 1280 if k == 'dirstate' or k not in self.__dict__:
1299 1281 continue
1300 1282 ce.refresh()
1301 1283
1302 1284 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1303 1285 inheritchecker=None, parentenvvar=None):
1304 1286 parentlock = None
1305 1287 # the contents of parentenvvar are used by the underlying lock to
1306 1288 # determine whether it can be inherited
1307 1289 if parentenvvar is not None:
1308 1290 parentlock = os.environ.get(parentenvvar)
1309 1291 try:
1310 1292 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1311 1293 acquirefn=acquirefn, desc=desc,
1312 1294 inheritchecker=inheritchecker,
1313 1295 parentlock=parentlock)
1314 1296 except error.LockHeld as inst:
1315 1297 if not wait:
1316 1298 raise
1317 1299 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1318 1300 (desc, inst.locker))
1319 1301 # default to 600 seconds timeout
1320 1302 l = lockmod.lock(vfs, lockname,
1321 1303 int(self.ui.config("ui", "timeout", "600")),
1322 1304 releasefn=releasefn, acquirefn=acquirefn,
1323 1305 desc=desc)
1324 1306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1325 1307 return l
1326 1308
1327 1309 def _afterlock(self, callback):
1328 1310 """add a callback to be run when the repository is fully unlocked
1329 1311
1330 1312 The callback will be executed when the outermost lock is released
1331 1313 (with wlock being higher level than 'lock')."""
1332 1314 for ref in (self._wlockref, self._lockref):
1333 1315 l = ref and ref()
1334 1316 if l and l.held:
1335 1317 l.postrelease.append(callback)
1336 1318 break
1337 1319 else: # no lock have been found.
1338 1320 callback()
1339 1321
1340 1322 def lock(self, wait=True):
1341 1323 '''Lock the repository store (.hg/store) and return a weak reference
1342 1324 to the lock. Use this before modifying the store (e.g. committing or
1343 1325 stripping). If you are opening a transaction, get a lock as well.)
1344 1326
1345 1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 1328 'wlock' first to avoid a dead-lock hazard.'''
1347 1329 l = self._lockref and self._lockref()
1348 1330 if l is not None and l.held:
1349 1331 l.lock()
1350 1332 return l
1351 1333
1352 1334 l = self._lock(self.svfs, "lock", wait, None,
1353 1335 self.invalidate, _('repository %s') % self.origroot)
1354 1336 self._lockref = weakref.ref(l)
1355 1337 return l
1356 1338
1357 1339 def _wlockchecktransaction(self):
1358 1340 if self.currenttransaction() is not None:
1359 1341 raise error.LockInheritanceContractViolation(
1360 1342 'wlock cannot be inherited in the middle of a transaction')
1361 1343
1362 1344 def wlock(self, wait=True):
1363 1345 '''Lock the non-store parts of the repository (everything under
1364 1346 .hg except .hg/store) and return a weak reference to the lock.
1365 1347
1366 1348 Use this before modifying files in .hg.
1367 1349
1368 1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1369 1351 'wlock' first to avoid a dead-lock hazard.'''
1370 1352 l = self._wlockref and self._wlockref()
1371 1353 if l is not None and l.held:
1372 1354 l.lock()
1373 1355 return l
1374 1356
1375 1357 # We do not need to check for non-waiting lock acquisition. Such
1376 1358 # acquisition would not cause dead-lock as they would just fail.
1377 1359 if wait and (self.ui.configbool('devel', 'all-warnings')
1378 1360 or self.ui.configbool('devel', 'check-locks')):
1379 1361 l = self._lockref and self._lockref()
1380 1362 if l is not None and l.held:
1381 1363 self.ui.develwarn('"wlock" acquired after "lock"')
1382 1364
1383 1365 def unlock():
1384 1366 if self.dirstate.pendingparentchange():
1385 1367 self.dirstate.invalidate()
1386 1368 else:
1387 1369 self.dirstate.write(None)
1388 1370
1389 1371 self._filecache['dirstate'].refresh()
1390 1372
1391 1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 1374 self.invalidatedirstate, _('working directory of %s') %
1393 1375 self.origroot,
1394 1376 inheritchecker=self._wlockchecktransaction,
1395 1377 parentenvvar='HG_WLOCK_LOCKER')
1396 1378 self._wlockref = weakref.ref(l)
1397 1379 return l
1398 1380
1399 1381 def _currentlock(self, lockref):
1400 1382 """Returns the lock if it's held, or None if it's not."""
1401 1383 if lockref is None:
1402 1384 return None
1403 1385 l = lockref()
1404 1386 if l is None or not l.held:
1405 1387 return None
1406 1388 return l
1407 1389
1408 1390 def currentwlock(self):
1409 1391 """Returns the wlock if it's held, or None if it's not."""
1410 1392 return self._currentlock(self._wlockref)
1411 1393
1412 1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 1395 """
1414 1396 commit an individual file as part of a larger transaction
1415 1397 """
1416 1398
1417 1399 fname = fctx.path()
1418 1400 fparent1 = manifest1.get(fname, nullid)
1419 1401 fparent2 = manifest2.get(fname, nullid)
1420 1402 if isinstance(fctx, context.filectx):
1421 1403 node = fctx.filenode()
1422 1404 if node in [fparent1, fparent2]:
1423 1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 1406 return node
1425 1407
1426 1408 flog = self.file(fname)
1427 1409 meta = {}
1428 1410 copy = fctx.renamed()
1429 1411 if copy and copy[0] != fname:
1430 1412 # Mark the new revision of this file as a copy of another
1431 1413 # file. This copy data will effectively act as a parent
1432 1414 # of this new revision. If this is a merge, the first
1433 1415 # parent will be the nullid (meaning "look up the copy data")
1434 1416 # and the second one will be the other parent. For example:
1435 1417 #
1436 1418 # 0 --- 1 --- 3 rev1 changes file foo
1437 1419 # \ / rev2 renames foo to bar and changes it
1438 1420 # \- 2 -/ rev3 should have bar with all changes and
1439 1421 # should record that bar descends from
1440 1422 # bar in rev2 and foo in rev1
1441 1423 #
1442 1424 # this allows this merge to succeed:
1443 1425 #
1444 1426 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1445 1427 # \ / merging rev3 and rev4 should use bar@rev2
1446 1428 # \- 2 --- 4 as the merge base
1447 1429 #
1448 1430
1449 1431 cfname = copy[0]
1450 1432 crev = manifest1.get(cfname)
1451 1433 newfparent = fparent2
1452 1434
1453 1435 if manifest2: # branch merge
1454 1436 if fparent2 == nullid or crev is None: # copied on remote side
1455 1437 if cfname in manifest2:
1456 1438 crev = manifest2[cfname]
1457 1439 newfparent = fparent1
1458 1440
1459 1441 # Here, we used to search backwards through history to try to find
1460 1442 # where the file copy came from if the source of a copy was not in
1461 1443 # the parent directory. However, this doesn't actually make sense to
1462 1444 # do (what does a copy from something not in your working copy even
1463 1445 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1464 1446 # the user that copy information was dropped, so if they didn't
1465 1447 # expect this outcome it can be fixed, but this is the correct
1466 1448 # behavior in this circumstance.
1467 1449
1468 1450 if crev:
1469 1451 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1470 1452 meta["copy"] = cfname
1471 1453 meta["copyrev"] = hex(crev)
1472 1454 fparent1, fparent2 = nullid, newfparent
1473 1455 else:
1474 1456 self.ui.warn(_("warning: can't find ancestor for '%s' "
1475 1457 "copied from '%s'!\n") % (fname, cfname))
1476 1458
1477 1459 elif fparent1 == nullid:
1478 1460 fparent1, fparent2 = fparent2, nullid
1479 1461 elif fparent2 != nullid:
1480 1462 # is one parent an ancestor of the other?
1481 1463 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1482 1464 if fparent1 in fparentancestors:
1483 1465 fparent1, fparent2 = fparent2, nullid
1484 1466 elif fparent2 in fparentancestors:
1485 1467 fparent2 = nullid
1486 1468
1487 1469 # is the file changed?
1488 1470 text = fctx.data()
1489 1471 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1490 1472 changelist.append(fname)
1491 1473 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1492 1474 # are just the flags changed during merge?
1493 1475 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1494 1476 changelist.append(fname)
1495 1477
1496 1478 return fparent1
1497 1479
1498 1480 @unfilteredmethod
1499 1481 def commit(self, text="", user=None, date=None, match=None, force=False,
1500 1482 editor=False, extra=None):
1501 1483 """Add a new revision to current repository.
1502 1484
1503 1485 Revision information is gathered from the working directory,
1504 1486 match can be used to filter the committed files. If editor is
1505 1487 supplied, it is called to get a commit message.
1506 1488 """
1507 1489 if extra is None:
1508 1490 extra = {}
1509 1491
1510 1492 def fail(f, msg):
1511 1493 raise error.Abort('%s: %s' % (f, msg))
1512 1494
1513 1495 if not match:
1514 1496 match = matchmod.always(self.root, '')
1515 1497
1516 1498 if not force:
1517 1499 vdirs = []
1518 1500 match.explicitdir = vdirs.append
1519 1501 match.bad = fail
1520 1502
1521 1503 wlock = lock = tr = None
1522 1504 try:
1523 1505 wlock = self.wlock()
1524 1506 lock = self.lock() # for recent changelog (see issue4368)
1525 1507
1526 1508 wctx = self[None]
1527 1509 merge = len(wctx.parents()) > 1
1528 1510
1529 1511 if not force and merge and match.ispartial():
1530 1512 raise error.Abort(_('cannot partially commit a merge '
1531 1513 '(do not specify files or patterns)'))
1532 1514
1533 1515 status = self.status(match=match, clean=force)
1534 1516 if force:
1535 1517 status.modified.extend(status.clean) # mq may commit clean files
1536 1518
1537 1519 # check subrepos
1538 1520 subs = []
1539 1521 commitsubs = set()
1540 1522 newstate = wctx.substate.copy()
1541 1523 # only manage subrepos and .hgsubstate if .hgsub is present
1542 1524 if '.hgsub' in wctx:
1543 1525 # we'll decide whether to track this ourselves, thanks
1544 1526 for c in status.modified, status.added, status.removed:
1545 1527 if '.hgsubstate' in c:
1546 1528 c.remove('.hgsubstate')
1547 1529
1548 1530 # compare current state to last committed state
1549 1531 # build new substate based on last committed state
1550 1532 oldstate = wctx.p1().substate
1551 1533 for s in sorted(newstate.keys()):
1552 1534 if not match(s):
1553 1535 # ignore working copy, use old state if present
1554 1536 if s in oldstate:
1555 1537 newstate[s] = oldstate[s]
1556 1538 continue
1557 1539 if not force:
1558 1540 raise error.Abort(
1559 1541 _("commit with new subrepo %s excluded") % s)
1560 1542 dirtyreason = wctx.sub(s).dirtyreason(True)
1561 1543 if dirtyreason:
1562 1544 if not self.ui.configbool('ui', 'commitsubrepos'):
1563 1545 raise error.Abort(dirtyreason,
1564 1546 hint=_("use --subrepos for recursive commit"))
1565 1547 subs.append(s)
1566 1548 commitsubs.add(s)
1567 1549 else:
1568 1550 bs = wctx.sub(s).basestate()
1569 1551 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1570 1552 if oldstate.get(s, (None, None, None))[1] != bs:
1571 1553 subs.append(s)
1572 1554
1573 1555 # check for removed subrepos
1574 1556 for p in wctx.parents():
1575 1557 r = [s for s in p.substate if s not in newstate]
1576 1558 subs += [s for s in r if match(s)]
1577 1559 if subs:
1578 1560 if (not match('.hgsub') and
1579 1561 '.hgsub' in (wctx.modified() + wctx.added())):
1580 1562 raise error.Abort(
1581 1563 _("can't commit subrepos without .hgsub"))
1582 1564 status.modified.insert(0, '.hgsubstate')
1583 1565
1584 1566 elif '.hgsub' in status.removed:
1585 1567 # clean up .hgsubstate when .hgsub is removed
1586 1568 if ('.hgsubstate' in wctx and
1587 1569 '.hgsubstate' not in (status.modified + status.added +
1588 1570 status.removed)):
1589 1571 status.removed.insert(0, '.hgsubstate')
1590 1572
1591 1573 # make sure all explicit patterns are matched
1592 1574 if not force and (match.isexact() or match.prefix()):
1593 1575 matched = set(status.modified + status.added + status.removed)
1594 1576
1595 1577 for f in match.files():
1596 1578 f = self.dirstate.normalize(f)
1597 1579 if f == '.' or f in matched or f in wctx.substate:
1598 1580 continue
1599 1581 if f in status.deleted:
1600 1582 fail(f, _('file not found!'))
1601 1583 if f in vdirs: # visited directory
1602 1584 d = f + '/'
1603 1585 for mf in matched:
1604 1586 if mf.startswith(d):
1605 1587 break
1606 1588 else:
1607 1589 fail(f, _("no match under directory!"))
1608 1590 elif f not in self.dirstate:
1609 1591 fail(f, _("file not tracked!"))
1610 1592
1611 1593 cctx = context.workingcommitctx(self, status,
1612 1594 text, user, date, extra)
1613 1595
1614 1596 # internal config: ui.allowemptycommit
1615 1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1616 1598 or extra.get('close') or merge or cctx.files()
1617 1599 or self.ui.configbool('ui', 'allowemptycommit'))
1618 1600 if not allowemptycommit:
1619 1601 return None
1620 1602
1621 1603 if merge and cctx.deleted():
1622 1604 raise error.Abort(_("cannot commit merge with missing files"))
1623 1605
1624 1606 ms = mergemod.mergestate.read(self)
1625 1607
1626 1608 if list(ms.unresolved()):
1627 1609 raise error.Abort(_('unresolved merge conflicts '
1628 1610 '(see "hg help resolve")'))
1629 1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1630 1612 raise error.Abort(_('driver-resolved merge conflicts'),
1631 1613 hint=_('run "hg resolve --all" to resolve'))
1632 1614
1633 1615 if editor:
1634 1616 cctx._text = editor(self, cctx, subs)
1635 1617 edited = (text != cctx._text)
1636 1618
1637 1619 # Save commit message in case this transaction gets rolled back
1638 1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1639 1621 # the assumption that the user will use the same editor again.
1640 1622 msgfn = self.savecommitmessage(cctx._text)
1641 1623
1642 1624 # commit subs and write new state
1643 1625 if subs:
1644 1626 for s in sorted(commitsubs):
1645 1627 sub = wctx.sub(s)
1646 1628 self.ui.status(_('committing subrepository %s\n') %
1647 1629 subrepo.subrelpath(sub))
1648 1630 sr = sub.commit(cctx._text, user, date)
1649 1631 newstate[s] = (newstate[s][0], sr)
1650 1632 subrepo.writestate(self, newstate)
1651 1633
1652 1634 p1, p2 = self.dirstate.parents()
1653 1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1654 1636 try:
1655 1637 self.hook("precommit", throw=True, parent1=hookp1,
1656 1638 parent2=hookp2)
1657 1639 tr = self.transaction('commit')
1658 1640 ret = self.commitctx(cctx, True)
1659 1641 except: # re-raises
1660 1642 if edited:
1661 1643 self.ui.write(
1662 1644 _('note: commit message saved in %s\n') % msgfn)
1663 1645 raise
1664 1646 # update bookmarks, dirstate and mergestate
1665 1647 bookmarks.update(self, [p1, p2], ret)
1666 1648 cctx.markcommitted(ret)
1667 1649 ms.reset()
1668 1650 tr.close()
1669 1651
1670 1652 finally:
1671 1653 lockmod.release(tr, lock, wlock)
1672 1654
1673 1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1674 1656 # hack for command that use a temporary commit (eg: histedit)
1675 1657 # temporary commit got stripped before hook release
1676 1658 if self.changelog.hasnode(ret):
1677 1659 self.hook("commit", node=node, parent1=parent1,
1678 1660 parent2=parent2)
1679 1661 self._afterlock(commithook)
1680 1662 return ret
1681 1663
1682 1664 @unfilteredmethod
1683 1665 def commitctx(self, ctx, error=False):
1684 1666 """Add a new revision to current repository.
1685 1667 Revision information is passed via the context argument.
1686 1668 """
1687 1669
1688 1670 tr = None
1689 1671 p1, p2 = ctx.p1(), ctx.p2()
1690 1672 user = ctx.user()
1691 1673
1692 1674 lock = self.lock()
1693 1675 try:
1694 1676 tr = self.transaction("commit")
1695 1677 trp = weakref.proxy(tr)
1696 1678
1697 1679 if ctx.files():
1698 1680 m1 = p1.manifest()
1699 1681 m2 = p2.manifest()
1700 1682 m = m1.copy()
1701 1683
1702 1684 # check in files
1703 1685 added = []
1704 1686 changed = []
1705 1687 removed = list(ctx.removed())
1706 1688 linkrev = len(self)
1707 1689 self.ui.note(_("committing files:\n"))
1708 1690 for f in sorted(ctx.modified() + ctx.added()):
1709 1691 self.ui.note(f + "\n")
1710 1692 try:
1711 1693 fctx = ctx[f]
1712 1694 if fctx is None:
1713 1695 removed.append(f)
1714 1696 else:
1715 1697 added.append(f)
1716 1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1717 1699 trp, changed)
1718 1700 m.setflag(f, fctx.flags())
1719 1701 except OSError as inst:
1720 1702 self.ui.warn(_("trouble committing %s!\n") % f)
1721 1703 raise
1722 1704 except IOError as inst:
1723 1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1724 1706 if error or errcode and errcode != errno.ENOENT:
1725 1707 self.ui.warn(_("trouble committing %s!\n") % f)
1726 1708 raise
1727 1709
1728 1710 # update manifest
1729 1711 self.ui.note(_("committing manifest\n"))
1730 1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1731 1713 drop = [f for f in removed if f in m]
1732 1714 for f in drop:
1733 1715 del m[f]
1734 1716 mn = self.manifest.add(m, trp, linkrev,
1735 1717 p1.manifestnode(), p2.manifestnode(),
1736 1718 added, drop)
1737 1719 files = changed + removed
1738 1720 else:
1739 1721 mn = p1.manifestnode()
1740 1722 files = []
1741 1723
1742 1724 # update changelog
1743 1725 self.ui.note(_("committing changelog\n"))
1744 1726 self.changelog.delayupdate(tr)
1745 1727 n = self.changelog.add(mn, files, ctx.description(),
1746 1728 trp, p1.node(), p2.node(),
1747 1729 user, ctx.date(), ctx.extra().copy())
1748 1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1749 1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1750 1732 parent2=xp2)
1751 1733 # set the new commit is proper phase
1752 1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1753 1735 if targetphase:
1754 1736 # retract boundary do not alter parent changeset.
1755 1737 # if a parent have higher the resulting phase will
1756 1738 # be compliant anyway
1757 1739 #
1758 1740 # if minimal phase was 0 we don't need to retract anything
1759 1741 phases.retractboundary(self, tr, targetphase, [n])
1760 1742 tr.close()
1761 1743 branchmap.updatecache(self.filtered('served'))
1762 1744 return n
1763 1745 finally:
1764 1746 if tr:
1765 1747 tr.release()
1766 1748 lock.release()
1767 1749
1768 1750 @unfilteredmethod
1769 1751 def destroying(self):
1770 1752 '''Inform the repository that nodes are about to be destroyed.
1771 1753 Intended for use by strip and rollback, so there's a common
1772 1754 place for anything that has to be done before destroying history.
1773 1755
1774 1756 This is mostly useful for saving state that is in memory and waiting
1775 1757 to be flushed when the current lock is released. Because a call to
1776 1758 destroyed is imminent, the repo will be invalidated causing those
1777 1759 changes to stay in memory (waiting for the next unlock), or vanish
1778 1760 completely.
1779 1761 '''
1780 1762 # When using the same lock to commit and strip, the phasecache is left
1781 1763 # dirty after committing. Then when we strip, the repo is invalidated,
1782 1764 # causing those changes to disappear.
1783 1765 if '_phasecache' in vars(self):
1784 1766 self._phasecache.write()
1785 1767
1786 1768 @unfilteredmethod
1787 1769 def destroyed(self):
1788 1770 '''Inform the repository that nodes have been destroyed.
1789 1771 Intended for use by strip and rollback, so there's a common
1790 1772 place for anything that has to be done after destroying history.
1791 1773 '''
1792 1774 # When one tries to:
1793 1775 # 1) destroy nodes thus calling this method (e.g. strip)
1794 1776 # 2) use phasecache somewhere (e.g. commit)
1795 1777 #
1796 1778 # then 2) will fail because the phasecache contains nodes that were
1797 1779 # removed. We can either remove phasecache from the filecache,
1798 1780 # causing it to reload next time it is accessed, or simply filter
1799 1781 # the removed nodes now and write the updated cache.
1800 1782 self._phasecache.filterunknown(self)
1801 1783 self._phasecache.write()
1802 1784
1803 1785 # update the 'served' branch cache to help read only server process
1804 1786 # Thanks to branchcache collaboration this is done from the nearest
1805 1787 # filtered subset and it is expected to be fast.
1806 1788 branchmap.updatecache(self.filtered('served'))
1807 1789
1808 1790 # Ensure the persistent tag cache is updated. Doing it now
1809 1791 # means that the tag cache only has to worry about destroyed
1810 1792 # heads immediately after a strip/rollback. That in turn
1811 1793 # guarantees that "cachetip == currenttip" (comparing both rev
1812 1794 # and node) always means no nodes have been added or destroyed.
1813 1795
1814 1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1815 1797 # head, refresh the tag cache, then immediately add a new head.
1816 1798 # But I think doing it this way is necessary for the "instant
1817 1799 # tag cache retrieval" case to work.
1818 1800 self.invalidate()
1819 1801
1820 1802 def walk(self, match, node=None):
1821 1803 '''
1822 1804 walk recursively through the directory tree or a given
1823 1805 changeset, finding all files matched by the match
1824 1806 function
1825 1807 '''
1826 1808 return self[node].walk(match)
1827 1809
1828 1810 def status(self, node1='.', node2=None, match=None,
1829 1811 ignored=False, clean=False, unknown=False,
1830 1812 listsubrepos=False):
1831 1813 '''a convenience method that calls node1.status(node2)'''
1832 1814 return self[node1].status(node2, match, ignored, clean, unknown,
1833 1815 listsubrepos)
1834 1816
1835 1817 def heads(self, start=None):
1836 1818 heads = self.changelog.heads(start)
1837 1819 # sort the output in rev descending order
1838 1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1839 1821
1840 1822 def branchheads(self, branch=None, start=None, closed=False):
1841 1823 '''return a (possibly filtered) list of heads for the given branch
1842 1824
1843 1825 Heads are returned in topological order, from newest to oldest.
1844 1826 If branch is None, use the dirstate branch.
1845 1827 If start is not None, return only heads reachable from start.
1846 1828 If closed is True, return heads that are marked as closed as well.
1847 1829 '''
1848 1830 if branch is None:
1849 1831 branch = self[None].branch()
1850 1832 branches = self.branchmap()
1851 1833 if branch not in branches:
1852 1834 return []
1853 1835 # the cache returns heads ordered lowest to highest
1854 1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1855 1837 if start is not None:
1856 1838 # filter out the heads that cannot be reached from startrev
1857 1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1858 1840 bheads = [h for h in bheads if h in fbheads]
1859 1841 return bheads
1860 1842
1861 1843 def branches(self, nodes):
1862 1844 if not nodes:
1863 1845 nodes = [self.changelog.tip()]
1864 1846 b = []
1865 1847 for n in nodes:
1866 1848 t = n
1867 1849 while True:
1868 1850 p = self.changelog.parents(n)
1869 1851 if p[1] != nullid or p[0] == nullid:
1870 1852 b.append((t, n, p[0], p[1]))
1871 1853 break
1872 1854 n = p[0]
1873 1855 return b
1874 1856
1875 1857 def between(self, pairs):
1876 1858 r = []
1877 1859
1878 1860 for top, bottom in pairs:
1879 1861 n, l, i = top, [], 0
1880 1862 f = 1
1881 1863
1882 1864 while n != bottom and n != nullid:
1883 1865 p = self.changelog.parents(n)[0]
1884 1866 if i == f:
1885 1867 l.append(n)
1886 1868 f = f * 2
1887 1869 n = p
1888 1870 i += 1
1889 1871
1890 1872 r.append(l)
1891 1873
1892 1874 return r
1893 1875
1894 1876 def checkpush(self, pushop):
1895 1877 """Extensions can override this function if additional checks have
1896 1878 to be performed before pushing, or call it if they override push
1897 1879 command.
1898 1880 """
1899 1881 pass
1900 1882
1901 1883 @unfilteredpropertycache
1902 1884 def prepushoutgoinghooks(self):
1903 1885 """Return util.hooks consists of "(repo, remote, outgoing)"
1904 1886 functions, which are called before pushing changesets.
1905 1887 """
1906 1888 return util.hooks()
1907 1889
1908 1890 def pushkey(self, namespace, key, old, new):
1909 1891 try:
1910 1892 tr = self.currenttransaction()
1911 1893 hookargs = {}
1912 1894 if tr is not None:
1913 1895 hookargs.update(tr.hookargs)
1914 1896 hookargs['namespace'] = namespace
1915 1897 hookargs['key'] = key
1916 1898 hookargs['old'] = old
1917 1899 hookargs['new'] = new
1918 1900 self.hook('prepushkey', throw=True, **hookargs)
1919 1901 except error.HookAbort as exc:
1920 1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 1903 if exc.hint:
1922 1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 1905 return False
1924 1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 1907 ret = pushkey.push(self, namespace, key, old, new)
1926 1908 def runhook():
1927 1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 1910 ret=ret)
1929 1911 self._afterlock(runhook)
1930 1912 return ret
1931 1913
1932 1914 def listkeys(self, namespace):
1933 1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 1917 values = pushkey.list(self, namespace)
1936 1918 self.hook('listkeys', namespace=namespace, values=values)
1937 1919 return values
1938 1920
1939 1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 1922 '''used to test argument passing over the wire'''
1941 1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 1924
1943 1925 def savecommitmessage(self, text):
1944 1926 fp = self.vfs('last-message.txt', 'wb')
1945 1927 try:
1946 1928 fp.write(text)
1947 1929 finally:
1948 1930 fp.close()
1949 1931 return self.pathto(fp.name[len(self.root) + 1:])
1950 1932
1951 1933 # used to avoid circular references so destructors work
1952 1934 def aftertrans(files):
1953 1935 renamefiles = [tuple(t) for t in files]
1954 1936 def a():
1955 1937 for vfs, src, dest in renamefiles:
1956 1938 try:
1957 1939 vfs.rename(src, dest)
1958 1940 except OSError: # journal file does not yet exist
1959 1941 pass
1960 1942 return a
1961 1943
1962 1944 def undoname(fn):
1963 1945 base, name = os.path.split(fn)
1964 1946 assert name.startswith('journal')
1965 1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 1948
1967 1949 def instance(ui, path, create):
1968 1950 return localrepository(ui, util.urllocalpath(path), create)
1969 1951
1970 1952 def islocal(path):
1971 1953 return True
1954
1955 def newreporequirements(repo):
1956 """Determine the set of requirements for a new local repository.
1957
1958 Extensions can wrap this function to specify custom requirements for
1959 new repositories.
1960 """
1961 ui = repo.ui
1962 requirements = set(['revlogv1'])
1963 if ui.configbool('format', 'usestore', True):
1964 requirements.add('store')
1965 if ui.configbool('format', 'usefncache', True):
1966 requirements.add('fncache')
1967 if ui.configbool('format', 'dotencode', True):
1968 requirements.add('dotencode')
1969
1970 if scmutil.gdinitconfig(ui):
1971 requirements.add('generaldelta')
1972 if ui.configbool('experimental', 'treemanifest', False):
1973 requirements.add('treemanifest')
1974 if ui.configbool('experimental', 'manifestv2', False):
1975 requirements.add('manifestv2')
1976
1977 return requirements
General Comments 0
You need to be logged in to leave comments. Login now