##// END OF EJS Templates
localrepo: drop force check from checkcommitpatterns...
timeless -
r28814:1f65f291 default
parent child Browse files
Show More
@@ -1,1983 +1,1982 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import urllib
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 propertycache = util.propertycache
62 62 filecache = scmutil.filecache
63 63
64 64 class repofilecache(filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 70 def __set__(self, repo, value):
71 71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 72 def __delete__(self, repo):
73 73 return super(repofilecache, self).__delete__(repo.unfiltered())
74 74
75 75 class storecache(repofilecache):
76 76 """filecache for files in the store"""
77 77 def join(self, obj, fname):
78 78 return obj.sjoin(fname)
79 79
80 80 class unfilteredpropertycache(propertycache):
81 81 """propertycache that apply to unfiltered repo only"""
82 82
83 83 def __get__(self, repo, type=None):
84 84 unfi = repo.unfiltered()
85 85 if unfi is repo:
86 86 return super(unfilteredpropertycache, self).__get__(unfi)
87 87 return getattr(unfi, self.name)
88 88
89 89 class filteredpropertycache(propertycache):
90 90 """propertycache that must take filtering in account"""
91 91
92 92 def cachevalue(self, obj, value):
93 93 object.__setattr__(obj, self.name, value)
94 94
95 95
96 96 def hasunfilteredcache(repo, name):
97 97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 98 return name in vars(repo.unfiltered())
99 99
100 100 def unfilteredmethod(orig):
101 101 """decorate method that always need to be run on unfiltered version"""
102 102 def wrapper(repo, *args, **kwargs):
103 103 return orig(repo.unfiltered(), *args, **kwargs)
104 104 return wrapper
105 105
106 106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 107 'unbundle'))
108 108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 109
110 110 class localpeer(peer.peerrepository):
111 111 '''peer for a local repo; reflects only the most recent API'''
112 112
113 113 def __init__(self, repo, caps=moderncaps):
114 114 peer.peerrepository.__init__(self)
115 115 self._repo = repo.filtered('served')
116 116 self.ui = repo.ui
117 117 self._caps = repo._restrictcapabilities(caps)
118 118 self.requirements = repo.requirements
119 119 self.supportedformats = repo.supportedformats
120 120
121 121 def close(self):
122 122 self._repo.close()
123 123
124 124 def _capabilities(self):
125 125 return self._caps
126 126
127 127 def local(self):
128 128 return self._repo
129 129
130 130 def canpush(self):
131 131 return True
132 132
133 133 def url(self):
134 134 return self._repo.url()
135 135
136 136 def lookup(self, key):
137 137 return self._repo.lookup(key)
138 138
139 139 def branchmap(self):
140 140 return self._repo.branchmap()
141 141
142 142 def heads(self):
143 143 return self._repo.heads()
144 144
145 145 def known(self, nodes):
146 146 return self._repo.known(nodes)
147 147
148 148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 149 **kwargs):
150 150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 151 common=common, bundlecaps=bundlecaps, **kwargs)
152 152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 153 # When requesting a bundle2, getbundle returns a stream to make the
154 154 # wire level function happier. We need to build a proper object
155 155 # from it in local peer.
156 156 cg = bundle2.getunbundler(self.ui, cg)
157 157 return cg
158 158
159 159 # TODO We might want to move the next two calls into legacypeer and add
160 160 # unbundle instead.
161 161
162 162 def unbundle(self, cg, heads, url):
163 163 """apply a bundle on a repo
164 164
165 165 This function handles the repo locking itself."""
166 166 try:
167 167 try:
168 168 cg = exchange.readbundle(self.ui, cg, None)
169 169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 170 if util.safehasattr(ret, 'getchunks'):
171 171 # This is a bundle20 object, turn it into an unbundler.
172 172 # This little dance should be dropped eventually when the
173 173 # API is finally improved.
174 174 stream = util.chunkbuffer(ret.getchunks())
175 175 ret = bundle2.getunbundler(self.ui, stream)
176 176 return ret
177 177 except Exception as exc:
178 178 # If the exception contains output salvaged from a bundle2
179 179 # reply, we need to make sure it is printed before continuing
180 180 # to fail. So we build a bundle2 with such output and consume
181 181 # it directly.
182 182 #
183 183 # This is not very elegant but allows a "simple" solution for
184 184 # issue4594
185 185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 186 if output:
187 187 bundler = bundle2.bundle20(self._repo.ui)
188 188 for out in output:
189 189 bundler.addpart(out)
190 190 stream = util.chunkbuffer(bundler.getchunks())
191 191 b = bundle2.getunbundler(self.ui, stream)
192 192 bundle2.processbundle(self._repo, b)
193 193 raise
194 194 except error.PushRaced as exc:
195 195 raise error.ResponseError(_('push failed:'), str(exc))
196 196
197 197 def lock(self):
198 198 return self._repo.lock()
199 199
200 200 def addchangegroup(self, cg, source, url):
201 201 return cg.apply(self._repo, source, url)
202 202
203 203 def pushkey(self, namespace, key, old, new):
204 204 return self._repo.pushkey(namespace, key, old, new)
205 205
206 206 def listkeys(self, namespace):
207 207 return self._repo.listkeys(namespace)
208 208
209 209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 210 '''used to test argument passing over the wire'''
211 211 return "%s %s %s %s %s" % (one, two, three, four, five)
212 212
213 213 class locallegacypeer(localpeer):
214 214 '''peer extension which implements legacy methods too; used for tests with
215 215 restricted capabilities'''
216 216
217 217 def __init__(self, repo):
218 218 localpeer.__init__(self, repo, caps=legacycaps)
219 219
220 220 def branches(self, nodes):
221 221 return self._repo.branches(nodes)
222 222
223 223 def between(self, pairs):
224 224 return self._repo.between(pairs)
225 225
226 226 def changegroup(self, basenodes, source):
227 227 return changegroup.changegroup(self._repo, basenodes, source)
228 228
229 229 def changegroupsubset(self, bases, heads, source):
230 230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 231
232 232 class localrepository(object):
233 233
234 234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 235 'manifestv2'))
236 236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 237 'dotencode'))
238 238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 239 filtername = None
240 240
241 241 # a list of (ui, featureset) functions.
242 242 # only functions defined in module of enabled extensions are invoked
243 243 featuresetupfuncs = set()
244 244
245 245 def __init__(self, baseui, path=None, create=False):
246 246 self.requirements = set()
247 247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 248 self.wopener = self.wvfs
249 249 self.root = self.wvfs.base
250 250 self.path = self.wvfs.join(".hg")
251 251 self.origroot = path
252 252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 254 realfs=False)
255 255 self.vfs = scmutil.vfs(self.path)
256 256 self.opener = self.vfs
257 257 self.baseui = baseui
258 258 self.ui = baseui.copy()
259 259 self.ui.copy = baseui.copy # prevent copying repo configuration
260 260 # A list of callback to shape the phase if no data were found.
261 261 # Callback are in the form: func(repo, roots) --> processed root.
262 262 # This list it to be filled by extension during repo setup
263 263 self._phasedefaults = []
264 264 try:
265 265 self.ui.readconfig(self.join("hgrc"), self.root)
266 266 extensions.loadall(self.ui)
267 267 except IOError:
268 268 pass
269 269
270 270 if self.featuresetupfuncs:
271 271 self.supported = set(self._basesupported) # use private copy
272 272 extmods = set(m.__name__ for n, m
273 273 in extensions.extensions(self.ui))
274 274 for setupfunc in self.featuresetupfuncs:
275 275 if setupfunc.__module__ in extmods:
276 276 setupfunc(self.ui, self.supported)
277 277 else:
278 278 self.supported = self._basesupported
279 279
280 280 if not self.vfs.isdir():
281 281 if create:
282 282 self.requirements = newreporequirements(self)
283 283
284 284 if not self.wvfs.exists():
285 285 self.wvfs.makedirs()
286 286 self.vfs.makedir(notindexed=True)
287 287
288 288 if 'store' in self.requirements:
289 289 self.vfs.mkdir("store")
290 290
291 291 # create an invalid changelog
292 292 self.vfs.append(
293 293 "00changelog.i",
294 294 '\0\0\0\2' # represents revlogv2
295 295 ' dummy changelog to prevent using the old repo layout'
296 296 )
297 297 else:
298 298 raise error.RepoError(_("repository %s not found") % path)
299 299 elif create:
300 300 raise error.RepoError(_("repository %s already exists") % path)
301 301 else:
302 302 try:
303 303 self.requirements = scmutil.readrequires(
304 304 self.vfs, self.supported)
305 305 except IOError as inst:
306 306 if inst.errno != errno.ENOENT:
307 307 raise
308 308
309 309 self.sharedpath = self.path
310 310 try:
311 311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 312 realpath=True)
313 313 s = vfs.base
314 314 if not vfs.exists():
315 315 raise error.RepoError(
316 316 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 317 self.sharedpath = s
318 318 except IOError as inst:
319 319 if inst.errno != errno.ENOENT:
320 320 raise
321 321
322 322 self.store = store.store(
323 323 self.requirements, self.sharedpath, scmutil.vfs)
324 324 self.spath = self.store.path
325 325 self.svfs = self.store.vfs
326 326 self.sjoin = self.store.join
327 327 self.vfs.createmode = self.store.createmode
328 328 self._applyopenerreqs()
329 329 if create:
330 330 self._writerequirements()
331 331
332 332 self._dirstatevalidatewarned = False
333 333
334 334 self._branchcaches = {}
335 335 self._revbranchcache = None
336 336 self.filterpats = {}
337 337 self._datafilters = {}
338 338 self._transref = self._lockref = self._wlockref = None
339 339
340 340 # A cache for various files under .hg/ that tracks file changes,
341 341 # (used by the filecache decorator)
342 342 #
343 343 # Maps a property name to its util.filecacheentry
344 344 self._filecache = {}
345 345
346 346 # hold sets of revision to be filtered
347 347 # should be cleared when something might have changed the filter value:
348 348 # - new changesets,
349 349 # - phase change,
350 350 # - new obsolescence marker,
351 351 # - working directory parent change,
352 352 # - bookmark changes
353 353 self.filteredrevcache = {}
354 354
355 355 # generic mapping between names and nodes
356 356 self.names = namespaces.namespaces()
357 357
358 358 def close(self):
359 359 self._writecaches()
360 360
361 361 def _writecaches(self):
362 362 if self._revbranchcache:
363 363 self._revbranchcache.write()
364 364
365 365 def _restrictcapabilities(self, caps):
366 366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 367 caps = set(caps)
368 368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 369 caps.add('bundle2=' + urllib.quote(capsblob))
370 370 return caps
371 371
372 372 def _applyopenerreqs(self):
373 373 self.svfs.options = dict((r, 1) for r in self.requirements
374 374 if r in self.openerreqs)
375 375 # experimental config: format.chunkcachesize
376 376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 377 if chunkcachesize is not None:
378 378 self.svfs.options['chunkcachesize'] = chunkcachesize
379 379 # experimental config: format.maxchainlen
380 380 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 381 if maxchainlen is not None:
382 382 self.svfs.options['maxchainlen'] = maxchainlen
383 383 # experimental config: format.manifestcachesize
384 384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 385 if manifestcachesize is not None:
386 386 self.svfs.options['manifestcachesize'] = manifestcachesize
387 387 # experimental config: format.aggressivemergedeltas
388 388 aggressivemergedeltas = self.ui.configbool('format',
389 389 'aggressivemergedeltas', False)
390 390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392 392
393 393 def _writerequirements(self):
394 394 scmutil.writerequires(self.vfs, self.requirements)
395 395
396 396 def _checknested(self, path):
397 397 """Determine if path is a legal nested repository."""
398 398 if not path.startswith(self.root):
399 399 return False
400 400 subpath = path[len(self.root) + 1:]
401 401 normsubpath = util.pconvert(subpath)
402 402
403 403 # XXX: Checking against the current working copy is wrong in
404 404 # the sense that it can reject things like
405 405 #
406 406 # $ hg cat -r 10 sub/x.txt
407 407 #
408 408 # if sub/ is no longer a subrepository in the working copy
409 409 # parent revision.
410 410 #
411 411 # However, it can of course also allow things that would have
412 412 # been rejected before, such as the above cat command if sub/
413 413 # is a subrepository now, but was a normal directory before.
414 414 # The old path auditor would have rejected by mistake since it
415 415 # panics when it sees sub/.hg/.
416 416 #
417 417 # All in all, checking against the working copy seems sensible
418 418 # since we want to prevent access to nested repositories on
419 419 # the filesystem *now*.
420 420 ctx = self[None]
421 421 parts = util.splitpath(subpath)
422 422 while parts:
423 423 prefix = '/'.join(parts)
424 424 if prefix in ctx.substate:
425 425 if prefix == normsubpath:
426 426 return True
427 427 else:
428 428 sub = ctx.sub(prefix)
429 429 return sub.checknested(subpath[len(prefix) + 1:])
430 430 else:
431 431 parts.pop()
432 432 return False
433 433
434 434 def peer(self):
435 435 return localpeer(self) # not cached to avoid reference cycle
436 436
437 437 def unfiltered(self):
438 438 """Return unfiltered version of the repository
439 439
440 440 Intended to be overwritten by filtered repo."""
441 441 return self
442 442
443 443 def filtered(self, name):
444 444 """Return a filtered version of a repository"""
445 445 # build a new class with the mixin and the current class
446 446 # (possibly subclass of the repo)
447 447 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 448 pass
449 449 return proxycls(self, name)
450 450
451 451 @repofilecache('bookmarks', 'bookmarks.current')
452 452 def _bookmarks(self):
453 453 return bookmarks.bmstore(self)
454 454
455 455 @property
456 456 def _activebookmark(self):
457 457 return self._bookmarks.active
458 458
459 459 def bookmarkheads(self, bookmark):
460 460 name = bookmark.split('@', 1)[0]
461 461 heads = []
462 462 for mark, n in self._bookmarks.iteritems():
463 463 if mark.split('@', 1)[0] == name:
464 464 heads.append(n)
465 465 return heads
466 466
467 467 # _phaserevs and _phasesets depend on changelog. what we need is to
468 468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 469 # can't be easily expressed in filecache mechanism.
470 470 @storecache('phaseroots', '00changelog.i')
471 471 def _phasecache(self):
472 472 return phases.phasecache(self, self._phasedefaults)
473 473
474 474 @storecache('obsstore')
475 475 def obsstore(self):
476 476 # read default format for new obsstore.
477 477 # developer config: format.obsstore-version
478 478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 479 # rely on obsstore class default when possible.
480 480 kwargs = {}
481 481 if defaultformat is not None:
482 482 kwargs['defaultformat'] = defaultformat
483 483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 484 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 485 **kwargs)
486 486 if store and readonly:
487 487 self.ui.warn(
488 488 _('obsolete feature not enabled but %i markers found!\n')
489 489 % len(list(store)))
490 490 return store
491 491
492 492 @storecache('00changelog.i')
493 493 def changelog(self):
494 494 c = changelog.changelog(self.svfs)
495 495 if 'HG_PENDING' in os.environ:
496 496 p = os.environ['HG_PENDING']
497 497 if p.startswith(self.root):
498 498 c.readpending('00changelog.i.a')
499 499 return c
500 500
501 501 @storecache('00manifest.i')
502 502 def manifest(self):
503 503 return manifest.manifest(self.svfs)
504 504
505 505 def dirlog(self, dir):
506 506 return self.manifest.dirlog(dir)
507 507
508 508 @repofilecache('dirstate')
509 509 def dirstate(self):
510 510 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 511 self._dirstatevalidate)
512 512
513 513 def _dirstatevalidate(self, node):
514 514 try:
515 515 self.changelog.rev(node)
516 516 return node
517 517 except error.LookupError:
518 518 if not self._dirstatevalidatewarned:
519 519 self._dirstatevalidatewarned = True
520 520 self.ui.warn(_("warning: ignoring unknown"
521 521 " working parent %s!\n") % short(node))
522 522 return nullid
523 523
524 524 def __getitem__(self, changeid):
525 525 if changeid is None or changeid == wdirrev:
526 526 return context.workingctx(self)
527 527 if isinstance(changeid, slice):
528 528 return [context.changectx(self, i)
529 529 for i in xrange(*changeid.indices(len(self)))
530 530 if i not in self.changelog.filteredrevs]
531 531 return context.changectx(self, changeid)
532 532
533 533 def __contains__(self, changeid):
534 534 try:
535 535 self[changeid]
536 536 return True
537 537 except error.RepoLookupError:
538 538 return False
539 539
540 540 def __nonzero__(self):
541 541 return True
542 542
543 543 def __len__(self):
544 544 return len(self.changelog)
545 545
546 546 def __iter__(self):
547 547 return iter(self.changelog)
548 548
549 549 def revs(self, expr, *args):
550 550 '''Find revisions matching a revset.
551 551
552 552 The revset is specified as a string ``expr`` that may contain
553 553 %-formatting to escape certain types. See ``revset.formatspec``.
554 554
555 555 Return a revset.abstractsmartset, which is a list-like interface
556 556 that contains integer revisions.
557 557 '''
558 558 expr = revset.formatspec(expr, *args)
559 559 m = revset.match(None, expr)
560 560 return m(self)
561 561
562 562 def set(self, expr, *args):
563 563 '''Find revisions matching a revset and emit changectx instances.
564 564
565 565 This is a convenience wrapper around ``revs()`` that iterates the
566 566 result and is a generator of changectx instances.
567 567 '''
568 568 for r in self.revs(expr, *args):
569 569 yield self[r]
570 570
571 571 def url(self):
572 572 return 'file:' + self.root
573 573
574 574 def hook(self, name, throw=False, **args):
575 575 """Call a hook, passing this repo instance.
576 576
577 577 This a convenience method to aid invoking hooks. Extensions likely
578 578 won't call this unless they have registered a custom hook or are
579 579 replacing code that is expected to call a hook.
580 580 """
581 581 return hook.hook(self.ui, self, name, throw, **args)
582 582
583 583 @unfilteredmethod
584 584 def _tag(self, names, node, message, local, user, date, extra=None,
585 585 editor=False):
586 586 if isinstance(names, str):
587 587 names = (names,)
588 588
589 589 branches = self.branchmap()
590 590 for name in names:
591 591 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 592 local=local)
593 593 if name in branches:
594 594 self.ui.warn(_("warning: tag %s conflicts with existing"
595 595 " branch name\n") % name)
596 596
597 597 def writetags(fp, names, munge, prevtags):
598 598 fp.seek(0, 2)
599 599 if prevtags and prevtags[-1] != '\n':
600 600 fp.write('\n')
601 601 for name in names:
602 602 if munge:
603 603 m = munge(name)
604 604 else:
605 605 m = name
606 606
607 607 if (self._tagscache.tagtypes and
608 608 name in self._tagscache.tagtypes):
609 609 old = self.tags().get(name, nullid)
610 610 fp.write('%s %s\n' % (hex(old), m))
611 611 fp.write('%s %s\n' % (hex(node), m))
612 612 fp.close()
613 613
614 614 prevtags = ''
615 615 if local:
616 616 try:
617 617 fp = self.vfs('localtags', 'r+')
618 618 except IOError:
619 619 fp = self.vfs('localtags', 'a')
620 620 else:
621 621 prevtags = fp.read()
622 622
623 623 # local tags are stored in the current charset
624 624 writetags(fp, names, None, prevtags)
625 625 for name in names:
626 626 self.hook('tag', node=hex(node), tag=name, local=local)
627 627 return
628 628
629 629 try:
630 630 fp = self.wfile('.hgtags', 'rb+')
631 631 except IOError as e:
632 632 if e.errno != errno.ENOENT:
633 633 raise
634 634 fp = self.wfile('.hgtags', 'ab')
635 635 else:
636 636 prevtags = fp.read()
637 637
638 638 # committed tags are stored in UTF-8
639 639 writetags(fp, names, encoding.fromlocal, prevtags)
640 640
641 641 fp.close()
642 642
643 643 self.invalidatecaches()
644 644
645 645 if '.hgtags' not in self.dirstate:
646 646 self[None].add(['.hgtags'])
647 647
648 648 m = matchmod.exact(self.root, '', ['.hgtags'])
649 649 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 650 editor=editor)
651 651
652 652 for name in names:
653 653 self.hook('tag', node=hex(node), tag=name, local=local)
654 654
655 655 return tagnode
656 656
657 657 def tag(self, names, node, message, local, user, date, editor=False):
658 658 '''tag a revision with one or more symbolic names.
659 659
660 660 names is a list of strings or, when adding a single tag, names may be a
661 661 string.
662 662
663 663 if local is True, the tags are stored in a per-repository file.
664 664 otherwise, they are stored in the .hgtags file, and a new
665 665 changeset is committed with the change.
666 666
667 667 keyword arguments:
668 668
669 669 local: whether to store tags in non-version-controlled file
670 670 (default False)
671 671
672 672 message: commit message to use if committing
673 673
674 674 user: name of user to use if committing
675 675
676 676 date: date tuple to use if committing'''
677 677
678 678 if not local:
679 679 m = matchmod.exact(self.root, '', ['.hgtags'])
680 680 if any(self.status(match=m, unknown=True, ignored=True)):
681 681 raise error.Abort(_('working copy of .hgtags is changed'),
682 682 hint=_('please commit .hgtags manually'))
683 683
684 684 self.tags() # instantiate the cache
685 685 self._tag(names, node, message, local, user, date, editor=editor)
686 686
687 687 @filteredpropertycache
688 688 def _tagscache(self):
689 689 '''Returns a tagscache object that contains various tags related
690 690 caches.'''
691 691
692 692 # This simplifies its cache management by having one decorated
693 693 # function (this one) and the rest simply fetch things from it.
694 694 class tagscache(object):
695 695 def __init__(self):
696 696 # These two define the set of tags for this repository. tags
697 697 # maps tag name to node; tagtypes maps tag name to 'global' or
698 698 # 'local'. (Global tags are defined by .hgtags across all
699 699 # heads, and local tags are defined in .hg/localtags.)
700 700 # They constitute the in-memory cache of tags.
701 701 self.tags = self.tagtypes = None
702 702
703 703 self.nodetagscache = self.tagslist = None
704 704
705 705 cache = tagscache()
706 706 cache.tags, cache.tagtypes = self._findtags()
707 707
708 708 return cache
709 709
710 710 def tags(self):
711 711 '''return a mapping of tag to node'''
712 712 t = {}
713 713 if self.changelog.filteredrevs:
714 714 tags, tt = self._findtags()
715 715 else:
716 716 tags = self._tagscache.tags
717 717 for k, v in tags.iteritems():
718 718 try:
719 719 # ignore tags to unknown nodes
720 720 self.changelog.rev(v)
721 721 t[k] = v
722 722 except (error.LookupError, ValueError):
723 723 pass
724 724 return t
725 725
726 726 def _findtags(self):
727 727 '''Do the hard work of finding tags. Return a pair of dicts
728 728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 729 maps tag name to a string like \'global\' or \'local\'.
730 730 Subclasses or extensions are free to add their own tags, but
731 731 should be aware that the returned dicts will be retained for the
732 732 duration of the localrepo object.'''
733 733
734 734 # XXX what tagtype should subclasses/extensions use? Currently
735 735 # mq and bookmarks add tags, but do not set the tagtype at all.
736 736 # Should each extension invent its own tag type? Should there
737 737 # be one tagtype for all such "virtual" tags? Or is the status
738 738 # quo fine?
739 739
740 740 alltags = {} # map tag name to (node, hist)
741 741 tagtypes = {}
742 742
743 743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745 745
746 746 # Build the return dicts. Have to re-encode tag names because
747 747 # the tags module always uses UTF-8 (in order not to lose info
748 748 # writing to the cache), but the rest of Mercurial wants them in
749 749 # local encoding.
750 750 tags = {}
751 751 for (name, (node, hist)) in alltags.iteritems():
752 752 if node != nullid:
753 753 tags[encoding.tolocal(name)] = node
754 754 tags['tip'] = self.changelog.tip()
755 755 tagtypes = dict([(encoding.tolocal(name), value)
756 756 for (name, value) in tagtypes.iteritems()])
757 757 return (tags, tagtypes)
758 758
759 759 def tagtype(self, tagname):
760 760 '''
761 761 return the type of the given tag. result can be:
762 762
763 763 'local' : a local tag
764 764 'global' : a global tag
765 765 None : tag does not exist
766 766 '''
767 767
768 768 return self._tagscache.tagtypes.get(tagname)
769 769
770 770 def tagslist(self):
771 771 '''return a list of tags ordered by revision'''
772 772 if not self._tagscache.tagslist:
773 773 l = []
774 774 for t, n in self.tags().iteritems():
775 775 l.append((self.changelog.rev(n), t, n))
776 776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777 777
778 778 return self._tagscache.tagslist
779 779
780 780 def nodetags(self, node):
781 781 '''return the tags associated with a node'''
782 782 if not self._tagscache.nodetagscache:
783 783 nodetagscache = {}
784 784 for t, n in self._tagscache.tags.iteritems():
785 785 nodetagscache.setdefault(n, []).append(t)
786 786 for tags in nodetagscache.itervalues():
787 787 tags.sort()
788 788 self._tagscache.nodetagscache = nodetagscache
789 789 return self._tagscache.nodetagscache.get(node, [])
790 790
791 791 def nodebookmarks(self, node):
792 792 """return the list of bookmarks pointing to the specified node"""
793 793 marks = []
794 794 for bookmark, n in self._bookmarks.iteritems():
795 795 if n == node:
796 796 marks.append(bookmark)
797 797 return sorted(marks)
798 798
799 799 def branchmap(self):
800 800 '''returns a dictionary {branch: [branchheads]} with branchheads
801 801 ordered by increasing revision number'''
802 802 branchmap.updatecache(self)
803 803 return self._branchcaches[self.filtername]
804 804
805 805 @unfilteredmethod
806 806 def revbranchcache(self):
807 807 if not self._revbranchcache:
808 808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 809 return self._revbranchcache
810 810
811 811 def branchtip(self, branch, ignoremissing=False):
812 812 '''return the tip node for a given branch
813 813
814 814 If ignoremissing is True, then this method will not raise an error.
815 815 This is helpful for callers that only expect None for a missing branch
816 816 (e.g. namespace).
817 817
818 818 '''
819 819 try:
820 820 return self.branchmap().branchtip(branch)
821 821 except KeyError:
822 822 if not ignoremissing:
823 823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 824 else:
825 825 pass
826 826
827 827 def lookup(self, key):
828 828 return self[key].node()
829 829
830 830 def lookupbranch(self, key, remote=None):
831 831 repo = remote or self
832 832 if key in repo.branchmap():
833 833 return key
834 834
835 835 repo = (remote and remote.local()) and remote or self
836 836 return repo[key].branch()
837 837
838 838 def known(self, nodes):
839 839 cl = self.changelog
840 840 nm = cl.nodemap
841 841 filtered = cl.filteredrevs
842 842 result = []
843 843 for n in nodes:
844 844 r = nm.get(n)
845 845 resp = not (r is None or r in filtered)
846 846 result.append(resp)
847 847 return result
848 848
849 849 def local(self):
850 850 return self
851 851
852 852 def publishing(self):
853 853 # it's safe (and desirable) to trust the publish flag unconditionally
854 854 # so that we don't finalize changes shared between users via ssh or nfs
855 855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856 856
857 857 def cancopy(self):
858 858 # so statichttprepo's override of local() works
859 859 if not self.local():
860 860 return False
861 861 if not self.publishing():
862 862 return True
863 863 # if publishing we can't copy if there is filtered content
864 864 return not self.filtered('visible').changelog.filteredrevs
865 865
866 866 def shared(self):
867 867 '''the type of shared repository (None if not shared)'''
868 868 if self.sharedpath != self.path:
869 869 return 'store'
870 870 return None
871 871
872 872 def join(self, f, *insidef):
873 873 return self.vfs.join(os.path.join(f, *insidef))
874 874
875 875 def wjoin(self, f, *insidef):
876 876 return self.vfs.reljoin(self.root, f, *insidef)
877 877
878 878 def file(self, f):
879 879 if f[0] == '/':
880 880 f = f[1:]
881 881 return filelog.filelog(self.svfs, f)
882 882
883 883 def parents(self, changeid=None):
884 884 '''get list of changectxs for parents of changeid'''
885 885 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
886 886 self.ui.deprecwarn(msg, '3.7')
887 887 return self[changeid].parents()
888 888
889 889 def changectx(self, changeid):
890 890 return self[changeid]
891 891
892 892 def setparents(self, p1, p2=nullid):
893 893 self.dirstate.beginparentchange()
894 894 copies = self.dirstate.setparents(p1, p2)
895 895 pctx = self[p1]
896 896 if copies:
897 897 # Adjust copy records, the dirstate cannot do it, it
898 898 # requires access to parents manifests. Preserve them
899 899 # only for entries added to first parent.
900 900 for f in copies:
901 901 if f not in pctx and copies[f] in pctx:
902 902 self.dirstate.copy(copies[f], f)
903 903 if p2 == nullid:
904 904 for f, s in sorted(self.dirstate.copies().items()):
905 905 if f not in pctx and s not in pctx:
906 906 self.dirstate.copy(None, f)
907 907 self.dirstate.endparentchange()
908 908
909 909 def filectx(self, path, changeid=None, fileid=None):
910 910 """changeid can be a changeset revision, node, or tag.
911 911 fileid can be a file revision or node."""
912 912 return context.filectx(self, path, changeid, fileid)
913 913
914 914 def getcwd(self):
915 915 return self.dirstate.getcwd()
916 916
917 917 def pathto(self, f, cwd=None):
918 918 return self.dirstate.pathto(f, cwd)
919 919
920 920 def wfile(self, f, mode='r'):
921 921 return self.wvfs(f, mode)
922 922
923 923 def _link(self, f):
924 924 return self.wvfs.islink(f)
925 925
926 926 def _loadfilter(self, filter):
927 927 if filter not in self.filterpats:
928 928 l = []
929 929 for pat, cmd in self.ui.configitems(filter):
930 930 if cmd == '!':
931 931 continue
932 932 mf = matchmod.match(self.root, '', [pat])
933 933 fn = None
934 934 params = cmd
935 935 for name, filterfn in self._datafilters.iteritems():
936 936 if cmd.startswith(name):
937 937 fn = filterfn
938 938 params = cmd[len(name):].lstrip()
939 939 break
940 940 if not fn:
941 941 fn = lambda s, c, **kwargs: util.filter(s, c)
942 942 # Wrap old filters not supporting keyword arguments
943 943 if not inspect.getargspec(fn)[2]:
944 944 oldfn = fn
945 945 fn = lambda s, c, **kwargs: oldfn(s, c)
946 946 l.append((mf, fn, params))
947 947 self.filterpats[filter] = l
948 948 return self.filterpats[filter]
949 949
950 950 def _filter(self, filterpats, filename, data):
951 951 for mf, fn, cmd in filterpats:
952 952 if mf(filename):
953 953 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
954 954 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
955 955 break
956 956
957 957 return data
958 958
959 959 @unfilteredpropertycache
960 960 def _encodefilterpats(self):
961 961 return self._loadfilter('encode')
962 962
963 963 @unfilteredpropertycache
964 964 def _decodefilterpats(self):
965 965 return self._loadfilter('decode')
966 966
967 967 def adddatafilter(self, name, filter):
968 968 self._datafilters[name] = filter
969 969
970 970 def wread(self, filename):
971 971 if self._link(filename):
972 972 data = self.wvfs.readlink(filename)
973 973 else:
974 974 data = self.wvfs.read(filename)
975 975 return self._filter(self._encodefilterpats, filename, data)
976 976
977 977 def wwrite(self, filename, data, flags, backgroundclose=False):
978 978 """write ``data`` into ``filename`` in the working directory
979 979
980 980 This returns length of written (maybe decoded) data.
981 981 """
982 982 data = self._filter(self._decodefilterpats, filename, data)
983 983 if 'l' in flags:
984 984 self.wvfs.symlink(data, filename)
985 985 else:
986 986 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
987 987 if 'x' in flags:
988 988 self.wvfs.setflags(filename, False, True)
989 989 return len(data)
990 990
991 991 def wwritedata(self, filename, data):
992 992 return self._filter(self._decodefilterpats, filename, data)
993 993
994 994 def currenttransaction(self):
995 995 """return the current transaction or None if non exists"""
996 996 if self._transref:
997 997 tr = self._transref()
998 998 else:
999 999 tr = None
1000 1000
1001 1001 if tr and tr.running():
1002 1002 return tr
1003 1003 return None
1004 1004
1005 1005 def transaction(self, desc, report=None):
1006 1006 if (self.ui.configbool('devel', 'all-warnings')
1007 1007 or self.ui.configbool('devel', 'check-locks')):
1008 1008 l = self._lockref and self._lockref()
1009 1009 if l is None or not l.held:
1010 1010 self.ui.develwarn('transaction with no lock')
1011 1011 tr = self.currenttransaction()
1012 1012 if tr is not None:
1013 1013 return tr.nest()
1014 1014
1015 1015 # abort here if the journal already exists
1016 1016 if self.svfs.exists("journal"):
1017 1017 raise error.RepoError(
1018 1018 _("abandoned transaction found"),
1019 1019 hint=_("run 'hg recover' to clean up transaction"))
1020 1020
1021 1021 # make journal.dirstate contain in-memory changes at this point
1022 1022 self.dirstate.write(None)
1023 1023
1024 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 1025 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1026 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027 1027
1028 1028 self._writejournal(desc)
1029 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 1030 if report:
1031 1031 rp = report
1032 1032 else:
1033 1033 rp = self.ui.warn
1034 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 1035 # we must avoid cyclic reference between repo and transaction.
1036 1036 reporef = weakref.ref(self)
1037 1037 def validate(tr):
1038 1038 """will run pre-closing hooks"""
1039 1039 reporef().hook('pretxnclose', throw=True,
1040 1040 txnname=desc, **tr.hookargs)
1041 1041 def releasefn(tr, success):
1042 1042 repo = reporef()
1043 1043 if success:
1044 1044 # this should be explicitly invoked here, because
1045 1045 # in-memory changes aren't written out at closing
1046 1046 # transaction, if tr.addfilegenerator (via
1047 1047 # dirstate.write or so) isn't invoked while
1048 1048 # transaction running
1049 1049 repo.dirstate.write(None)
1050 1050 else:
1051 1051 # prevent in-memory changes from being written out at
1052 1052 # the end of outer wlock scope or so
1053 1053 repo.dirstate.invalidate()
1054 1054
1055 1055 # discard all changes (including ones already written
1056 1056 # out) in this transaction
1057 1057 repo.vfs.rename('journal.dirstate', 'dirstate')
1058 1058
1059 1059 repo.invalidate(clearfilecache=True)
1060 1060
1061 1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1062 1062 "journal",
1063 1063 "undo",
1064 1064 aftertrans(renames),
1065 1065 self.store.createmode,
1066 1066 validator=validate,
1067 1067 releasefn=releasefn)
1068 1068
1069 1069 tr.hookargs['txnid'] = txnid
1070 1070 # note: writing the fncache only during finalize mean that the file is
1071 1071 # outdated when running hooks. As fncache is used for streaming clone,
1072 1072 # this is not expected to break anything that happen during the hooks.
1073 1073 tr.addfinalize('flush-fncache', self.store.write)
1074 1074 def txnclosehook(tr2):
1075 1075 """To be run if transaction is successful, will schedule a hook run
1076 1076 """
1077 1077 # Don't reference tr2 in hook() so we don't hold a reference.
1078 1078 # This reduces memory consumption when there are multiple
1079 1079 # transactions per lock. This can likely go away if issue5045
1080 1080 # fixes the function accumulation.
1081 1081 hookargs = tr2.hookargs
1082 1082
1083 1083 def hook():
1084 1084 reporef().hook('txnclose', throw=False, txnname=desc,
1085 1085 **hookargs)
1086 1086 reporef()._afterlock(hook)
1087 1087 tr.addfinalize('txnclose-hook', txnclosehook)
1088 1088 def txnaborthook(tr2):
1089 1089 """To be run if transaction is aborted
1090 1090 """
1091 1091 reporef().hook('txnabort', throw=False, txnname=desc,
1092 1092 **tr2.hookargs)
1093 1093 tr.addabort('txnabort-hook', txnaborthook)
1094 1094 # avoid eager cache invalidation. in-memory data should be identical
1095 1095 # to stored data if transaction has no error.
1096 1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1097 1097 self._transref = weakref.ref(tr)
1098 1098 return tr
1099 1099
1100 1100 def _journalfiles(self):
1101 1101 return ((self.svfs, 'journal'),
1102 1102 (self.vfs, 'journal.dirstate'),
1103 1103 (self.vfs, 'journal.branch'),
1104 1104 (self.vfs, 'journal.desc'),
1105 1105 (self.vfs, 'journal.bookmarks'),
1106 1106 (self.svfs, 'journal.phaseroots'))
1107 1107
1108 1108 def undofiles(self):
1109 1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1110 1110
1111 1111 def _writejournal(self, desc):
1112 1112 self.vfs.write("journal.dirstate",
1113 1113 self.vfs.tryread("dirstate"))
1114 1114 self.vfs.write("journal.branch",
1115 1115 encoding.fromlocal(self.dirstate.branch()))
1116 1116 self.vfs.write("journal.desc",
1117 1117 "%d\n%s\n" % (len(self), desc))
1118 1118 self.vfs.write("journal.bookmarks",
1119 1119 self.vfs.tryread("bookmarks"))
1120 1120 self.svfs.write("journal.phaseroots",
1121 1121 self.svfs.tryread("phaseroots"))
1122 1122
1123 1123 def recover(self):
1124 1124 with self.lock():
1125 1125 if self.svfs.exists("journal"):
1126 1126 self.ui.status(_("rolling back interrupted transaction\n"))
1127 1127 vfsmap = {'': self.svfs,
1128 1128 'plain': self.vfs,}
1129 1129 transaction.rollback(self.svfs, vfsmap, "journal",
1130 1130 self.ui.warn)
1131 1131 self.invalidate()
1132 1132 return True
1133 1133 else:
1134 1134 self.ui.warn(_("no interrupted transaction available\n"))
1135 1135 return False
1136 1136
1137 1137 def rollback(self, dryrun=False, force=False):
1138 1138 wlock = lock = dsguard = None
1139 1139 try:
1140 1140 wlock = self.wlock()
1141 1141 lock = self.lock()
1142 1142 if self.svfs.exists("undo"):
1143 1143 dsguard = cmdutil.dirstateguard(self, 'rollback')
1144 1144
1145 1145 return self._rollback(dryrun, force, dsguard)
1146 1146 else:
1147 1147 self.ui.warn(_("no rollback information available\n"))
1148 1148 return 1
1149 1149 finally:
1150 1150 release(dsguard, lock, wlock)
1151 1151
1152 1152 @unfilteredmethod # Until we get smarter cache management
1153 1153 def _rollback(self, dryrun, force, dsguard):
1154 1154 ui = self.ui
1155 1155 try:
1156 1156 args = self.vfs.read('undo.desc').splitlines()
1157 1157 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1158 1158 if len(args) >= 3:
1159 1159 detail = args[2]
1160 1160 oldtip = oldlen - 1
1161 1161
1162 1162 if detail and ui.verbose:
1163 1163 msg = (_('repository tip rolled back to revision %s'
1164 1164 ' (undo %s: %s)\n')
1165 1165 % (oldtip, desc, detail))
1166 1166 else:
1167 1167 msg = (_('repository tip rolled back to revision %s'
1168 1168 ' (undo %s)\n')
1169 1169 % (oldtip, desc))
1170 1170 except IOError:
1171 1171 msg = _('rolling back unknown transaction\n')
1172 1172 desc = None
1173 1173
1174 1174 if not force and self['.'] != self['tip'] and desc == 'commit':
1175 1175 raise error.Abort(
1176 1176 _('rollback of last commit while not checked out '
1177 1177 'may lose data'), hint=_('use -f to force'))
1178 1178
1179 1179 ui.status(msg)
1180 1180 if dryrun:
1181 1181 return 0
1182 1182
1183 1183 parents = self.dirstate.parents()
1184 1184 self.destroying()
1185 1185 vfsmap = {'plain': self.vfs, '': self.svfs}
1186 1186 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1187 1187 if self.vfs.exists('undo.bookmarks'):
1188 1188 self.vfs.rename('undo.bookmarks', 'bookmarks')
1189 1189 if self.svfs.exists('undo.phaseroots'):
1190 1190 self.svfs.rename('undo.phaseroots', 'phaseroots')
1191 1191 self.invalidate()
1192 1192
1193 1193 parentgone = (parents[0] not in self.changelog.nodemap or
1194 1194 parents[1] not in self.changelog.nodemap)
1195 1195 if parentgone:
1196 1196 # prevent dirstateguard from overwriting already restored one
1197 1197 dsguard.close()
1198 1198
1199 1199 self.vfs.rename('undo.dirstate', 'dirstate')
1200 1200 try:
1201 1201 branch = self.vfs.read('undo.branch')
1202 1202 self.dirstate.setbranch(encoding.tolocal(branch))
1203 1203 except IOError:
1204 1204 ui.warn(_('named branch could not be reset: '
1205 1205 'current branch is still \'%s\'\n')
1206 1206 % self.dirstate.branch())
1207 1207
1208 1208 self.dirstate.invalidate()
1209 1209 parents = tuple([p.rev() for p in self[None].parents()])
1210 1210 if len(parents) > 1:
1211 1211 ui.status(_('working directory now based on '
1212 1212 'revisions %d and %d\n') % parents)
1213 1213 else:
1214 1214 ui.status(_('working directory now based on '
1215 1215 'revision %d\n') % parents)
1216 1216 mergemod.mergestate.clean(self, self['.'].node())
1217 1217
1218 1218 # TODO: if we know which new heads may result from this rollback, pass
1219 1219 # them to destroy(), which will prevent the branchhead cache from being
1220 1220 # invalidated.
1221 1221 self.destroyed()
1222 1222 return 0
1223 1223
1224 1224 def invalidatecaches(self):
1225 1225
1226 1226 if '_tagscache' in vars(self):
1227 1227 # can't use delattr on proxy
1228 1228 del self.__dict__['_tagscache']
1229 1229
1230 1230 self.unfiltered()._branchcaches.clear()
1231 1231 self.invalidatevolatilesets()
1232 1232
1233 1233 def invalidatevolatilesets(self):
1234 1234 self.filteredrevcache.clear()
1235 1235 obsolete.clearobscaches(self)
1236 1236
1237 1237 def invalidatedirstate(self):
1238 1238 '''Invalidates the dirstate, causing the next call to dirstate
1239 1239 to check if it was modified since the last time it was read,
1240 1240 rereading it if it has.
1241 1241
1242 1242 This is different to dirstate.invalidate() that it doesn't always
1243 1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1244 1244 explicitly read the dirstate again (i.e. restoring it to a previous
1245 1245 known good state).'''
1246 1246 if hasunfilteredcache(self, 'dirstate'):
1247 1247 for k in self.dirstate._filecache:
1248 1248 try:
1249 1249 delattr(self.dirstate, k)
1250 1250 except AttributeError:
1251 1251 pass
1252 1252 delattr(self.unfiltered(), 'dirstate')
1253 1253
1254 1254 def invalidate(self, clearfilecache=False):
1255 1255 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1256 1256 for k in self._filecache.keys():
1257 1257 # dirstate is invalidated separately in invalidatedirstate()
1258 1258 if k == 'dirstate':
1259 1259 continue
1260 1260
1261 1261 if clearfilecache:
1262 1262 del self._filecache[k]
1263 1263 try:
1264 1264 delattr(unfiltered, k)
1265 1265 except AttributeError:
1266 1266 pass
1267 1267 self.invalidatecaches()
1268 1268 self.store.invalidatecaches()
1269 1269
1270 1270 def invalidateall(self):
1271 1271 '''Fully invalidates both store and non-store parts, causing the
1272 1272 subsequent operation to reread any outside changes.'''
1273 1273 # extension should hook this to invalidate its caches
1274 1274 self.invalidate()
1275 1275 self.invalidatedirstate()
1276 1276
1277 1277 def _refreshfilecachestats(self, tr):
1278 1278 """Reload stats of cached files so that they are flagged as valid"""
1279 1279 for k, ce in self._filecache.items():
1280 1280 if k == 'dirstate' or k not in self.__dict__:
1281 1281 continue
1282 1282 ce.refresh()
1283 1283
1284 1284 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1285 1285 inheritchecker=None, parentenvvar=None):
1286 1286 parentlock = None
1287 1287 # the contents of parentenvvar are used by the underlying lock to
1288 1288 # determine whether it can be inherited
1289 1289 if parentenvvar is not None:
1290 1290 parentlock = os.environ.get(parentenvvar)
1291 1291 try:
1292 1292 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1293 1293 acquirefn=acquirefn, desc=desc,
1294 1294 inheritchecker=inheritchecker,
1295 1295 parentlock=parentlock)
1296 1296 except error.LockHeld as inst:
1297 1297 if not wait:
1298 1298 raise
1299 1299 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1300 1300 (desc, inst.locker))
1301 1301 # default to 600 seconds timeout
1302 1302 l = lockmod.lock(vfs, lockname,
1303 1303 int(self.ui.config("ui", "timeout", "600")),
1304 1304 releasefn=releasefn, acquirefn=acquirefn,
1305 1305 desc=desc)
1306 1306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1307 1307 return l
1308 1308
1309 1309 def _afterlock(self, callback):
1310 1310 """add a callback to be run when the repository is fully unlocked
1311 1311
1312 1312 The callback will be executed when the outermost lock is released
1313 1313 (with wlock being higher level than 'lock')."""
1314 1314 for ref in (self._wlockref, self._lockref):
1315 1315 l = ref and ref()
1316 1316 if l and l.held:
1317 1317 l.postrelease.append(callback)
1318 1318 break
1319 1319 else: # no lock have been found.
1320 1320 callback()
1321 1321
1322 1322 def lock(self, wait=True):
1323 1323 '''Lock the repository store (.hg/store) and return a weak reference
1324 1324 to the lock. Use this before modifying the store (e.g. committing or
1325 1325 stripping). If you are opening a transaction, get a lock as well.)
1326 1326
1327 1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1328 1328 'wlock' first to avoid a dead-lock hazard.'''
1329 1329 l = self._lockref and self._lockref()
1330 1330 if l is not None and l.held:
1331 1331 l.lock()
1332 1332 return l
1333 1333
1334 1334 l = self._lock(self.svfs, "lock", wait, None,
1335 1335 self.invalidate, _('repository %s') % self.origroot)
1336 1336 self._lockref = weakref.ref(l)
1337 1337 return l
1338 1338
1339 1339 def _wlockchecktransaction(self):
1340 1340 if self.currenttransaction() is not None:
1341 1341 raise error.LockInheritanceContractViolation(
1342 1342 'wlock cannot be inherited in the middle of a transaction')
1343 1343
1344 1344 def wlock(self, wait=True):
1345 1345 '''Lock the non-store parts of the repository (everything under
1346 1346 .hg except .hg/store) and return a weak reference to the lock.
1347 1347
1348 1348 Use this before modifying files in .hg.
1349 1349
1350 1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 1351 'wlock' first to avoid a dead-lock hazard.'''
1352 1352 l = self._wlockref and self._wlockref()
1353 1353 if l is not None and l.held:
1354 1354 l.lock()
1355 1355 return l
1356 1356
1357 1357 # We do not need to check for non-waiting lock acquisition. Such
1358 1358 # acquisition would not cause dead-lock as they would just fail.
1359 1359 if wait and (self.ui.configbool('devel', 'all-warnings')
1360 1360 or self.ui.configbool('devel', 'check-locks')):
1361 1361 l = self._lockref and self._lockref()
1362 1362 if l is not None and l.held:
1363 1363 self.ui.develwarn('"wlock" acquired after "lock"')
1364 1364
1365 1365 def unlock():
1366 1366 if self.dirstate.pendingparentchange():
1367 1367 self.dirstate.invalidate()
1368 1368 else:
1369 1369 self.dirstate.write(None)
1370 1370
1371 1371 self._filecache['dirstate'].refresh()
1372 1372
1373 1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1374 1374 self.invalidatedirstate, _('working directory of %s') %
1375 1375 self.origroot,
1376 1376 inheritchecker=self._wlockchecktransaction,
1377 1377 parentenvvar='HG_WLOCK_LOCKER')
1378 1378 self._wlockref = weakref.ref(l)
1379 1379 return l
1380 1380
1381 1381 def _currentlock(self, lockref):
1382 1382 """Returns the lock if it's held, or None if it's not."""
1383 1383 if lockref is None:
1384 1384 return None
1385 1385 l = lockref()
1386 1386 if l is None or not l.held:
1387 1387 return None
1388 1388 return l
1389 1389
1390 1390 def currentwlock(self):
1391 1391 """Returns the wlock if it's held, or None if it's not."""
1392 1392 return self._currentlock(self._wlockref)
1393 1393
1394 1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1395 1395 """
1396 1396 commit an individual file as part of a larger transaction
1397 1397 """
1398 1398
1399 1399 fname = fctx.path()
1400 1400 fparent1 = manifest1.get(fname, nullid)
1401 1401 fparent2 = manifest2.get(fname, nullid)
1402 1402 if isinstance(fctx, context.filectx):
1403 1403 node = fctx.filenode()
1404 1404 if node in [fparent1, fparent2]:
1405 1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1406 1406 return node
1407 1407
1408 1408 flog = self.file(fname)
1409 1409 meta = {}
1410 1410 copy = fctx.renamed()
1411 1411 if copy and copy[0] != fname:
1412 1412 # Mark the new revision of this file as a copy of another
1413 1413 # file. This copy data will effectively act as a parent
1414 1414 # of this new revision. If this is a merge, the first
1415 1415 # parent will be the nullid (meaning "look up the copy data")
1416 1416 # and the second one will be the other parent. For example:
1417 1417 #
1418 1418 # 0 --- 1 --- 3 rev1 changes file foo
1419 1419 # \ / rev2 renames foo to bar and changes it
1420 1420 # \- 2 -/ rev3 should have bar with all changes and
1421 1421 # should record that bar descends from
1422 1422 # bar in rev2 and foo in rev1
1423 1423 #
1424 1424 # this allows this merge to succeed:
1425 1425 #
1426 1426 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1427 1427 # \ / merging rev3 and rev4 should use bar@rev2
1428 1428 # \- 2 --- 4 as the merge base
1429 1429 #
1430 1430
1431 1431 cfname = copy[0]
1432 1432 crev = manifest1.get(cfname)
1433 1433 newfparent = fparent2
1434 1434
1435 1435 if manifest2: # branch merge
1436 1436 if fparent2 == nullid or crev is None: # copied on remote side
1437 1437 if cfname in manifest2:
1438 1438 crev = manifest2[cfname]
1439 1439 newfparent = fparent1
1440 1440
1441 1441 # Here, we used to search backwards through history to try to find
1442 1442 # where the file copy came from if the source of a copy was not in
1443 1443 # the parent directory. However, this doesn't actually make sense to
1444 1444 # do (what does a copy from something not in your working copy even
1445 1445 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1446 1446 # the user that copy information was dropped, so if they didn't
1447 1447 # expect this outcome it can be fixed, but this is the correct
1448 1448 # behavior in this circumstance.
1449 1449
1450 1450 if crev:
1451 1451 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1452 1452 meta["copy"] = cfname
1453 1453 meta["copyrev"] = hex(crev)
1454 1454 fparent1, fparent2 = nullid, newfparent
1455 1455 else:
1456 1456 self.ui.warn(_("warning: can't find ancestor for '%s' "
1457 1457 "copied from '%s'!\n") % (fname, cfname))
1458 1458
1459 1459 elif fparent1 == nullid:
1460 1460 fparent1, fparent2 = fparent2, nullid
1461 1461 elif fparent2 != nullid:
1462 1462 # is one parent an ancestor of the other?
1463 1463 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1464 1464 if fparent1 in fparentancestors:
1465 1465 fparent1, fparent2 = fparent2, nullid
1466 1466 elif fparent2 in fparentancestors:
1467 1467 fparent2 = nullid
1468 1468
1469 1469 # is the file changed?
1470 1470 text = fctx.data()
1471 1471 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1472 1472 changelist.append(fname)
1473 1473 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1474 1474 # are just the flags changed during merge?
1475 1475 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1476 1476 changelist.append(fname)
1477 1477
1478 1478 return fparent1
1479 1479
1480 1480 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1481 1481 """check for commit arguments that aren't commitable"""
1482 force = False
1483 if not force and (match.isexact() or match.prefix()):
1482 if match.isexact() or match.prefix():
1484 1483 matched = set(status.modified + status.added + status.removed)
1485 1484
1486 1485 for f in match.files():
1487 1486 f = self.dirstate.normalize(f)
1488 1487 if f == '.' or f in matched or f in wctx.substate:
1489 1488 continue
1490 1489 if f in status.deleted:
1491 1490 fail(f, _('file not found!'))
1492 1491 if f in vdirs: # visited directory
1493 1492 d = f + '/'
1494 1493 for mf in matched:
1495 1494 if mf.startswith(d):
1496 1495 break
1497 1496 else:
1498 1497 fail(f, _("no match under directory!"))
1499 1498 elif f not in self.dirstate:
1500 1499 fail(f, _("file not tracked!"))
1501 1500
1502 1501 @unfilteredmethod
1503 1502 def commit(self, text="", user=None, date=None, match=None, force=False,
1504 1503 editor=False, extra=None):
1505 1504 """Add a new revision to current repository.
1506 1505
1507 1506 Revision information is gathered from the working directory,
1508 1507 match can be used to filter the committed files. If editor is
1509 1508 supplied, it is called to get a commit message.
1510 1509 """
1511 1510 if extra is None:
1512 1511 extra = {}
1513 1512
1514 1513 def fail(f, msg):
1515 1514 raise error.Abort('%s: %s' % (f, msg))
1516 1515
1517 1516 if not match:
1518 1517 match = matchmod.always(self.root, '')
1519 1518
1520 1519 if not force:
1521 1520 vdirs = []
1522 1521 match.explicitdir = vdirs.append
1523 1522 match.bad = fail
1524 1523
1525 1524 wlock = lock = tr = None
1526 1525 try:
1527 1526 wlock = self.wlock()
1528 1527 lock = self.lock() # for recent changelog (see issue4368)
1529 1528
1530 1529 wctx = self[None]
1531 1530 merge = len(wctx.parents()) > 1
1532 1531
1533 1532 if not force and merge and match.ispartial():
1534 1533 raise error.Abort(_('cannot partially commit a merge '
1535 1534 '(do not specify files or patterns)'))
1536 1535
1537 1536 status = self.status(match=match, clean=force)
1538 1537 if force:
1539 1538 status.modified.extend(status.clean) # mq may commit clean files
1540 1539
1541 1540 # check subrepos
1542 1541 subs = []
1543 1542 commitsubs = set()
1544 1543 newstate = wctx.substate.copy()
1545 1544 # only manage subrepos and .hgsubstate if .hgsub is present
1546 1545 if '.hgsub' in wctx:
1547 1546 # we'll decide whether to track this ourselves, thanks
1548 1547 for c in status.modified, status.added, status.removed:
1549 1548 if '.hgsubstate' in c:
1550 1549 c.remove('.hgsubstate')
1551 1550
1552 1551 # compare current state to last committed state
1553 1552 # build new substate based on last committed state
1554 1553 oldstate = wctx.p1().substate
1555 1554 for s in sorted(newstate.keys()):
1556 1555 if not match(s):
1557 1556 # ignore working copy, use old state if present
1558 1557 if s in oldstate:
1559 1558 newstate[s] = oldstate[s]
1560 1559 continue
1561 1560 if not force:
1562 1561 raise error.Abort(
1563 1562 _("commit with new subrepo %s excluded") % s)
1564 1563 dirtyreason = wctx.sub(s).dirtyreason(True)
1565 1564 if dirtyreason:
1566 1565 if not self.ui.configbool('ui', 'commitsubrepos'):
1567 1566 raise error.Abort(dirtyreason,
1568 1567 hint=_("use --subrepos for recursive commit"))
1569 1568 subs.append(s)
1570 1569 commitsubs.add(s)
1571 1570 else:
1572 1571 bs = wctx.sub(s).basestate()
1573 1572 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1574 1573 if oldstate.get(s, (None, None, None))[1] != bs:
1575 1574 subs.append(s)
1576 1575
1577 1576 # check for removed subrepos
1578 1577 for p in wctx.parents():
1579 1578 r = [s for s in p.substate if s not in newstate]
1580 1579 subs += [s for s in r if match(s)]
1581 1580 if subs:
1582 1581 if (not match('.hgsub') and
1583 1582 '.hgsub' in (wctx.modified() + wctx.added())):
1584 1583 raise error.Abort(
1585 1584 _("can't commit subrepos without .hgsub"))
1586 1585 status.modified.insert(0, '.hgsubstate')
1587 1586
1588 1587 elif '.hgsub' in status.removed:
1589 1588 # clean up .hgsubstate when .hgsub is removed
1590 1589 if ('.hgsubstate' in wctx and
1591 1590 '.hgsubstate' not in (status.modified + status.added +
1592 1591 status.removed)):
1593 1592 status.removed.insert(0, '.hgsubstate')
1594 1593
1595 1594 # make sure all explicit patterns are matched
1596 1595 if not force:
1597 1596 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1598 1597
1599 1598 cctx = context.workingcommitctx(self, status,
1600 1599 text, user, date, extra)
1601 1600
1602 1601 # internal config: ui.allowemptycommit
1603 1602 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1604 1603 or extra.get('close') or merge or cctx.files()
1605 1604 or self.ui.configbool('ui', 'allowemptycommit'))
1606 1605 if not allowemptycommit:
1607 1606 return None
1608 1607
1609 1608 if merge and cctx.deleted():
1610 1609 raise error.Abort(_("cannot commit merge with missing files"))
1611 1610
1612 1611 ms = mergemod.mergestate.read(self)
1613 1612
1614 1613 if list(ms.unresolved()):
1615 1614 raise error.Abort(_('unresolved merge conflicts '
1616 1615 '(see "hg help resolve")'))
1617 1616 if ms.mdstate() != 's' or list(ms.driverresolved()):
1618 1617 raise error.Abort(_('driver-resolved merge conflicts'),
1619 1618 hint=_('run "hg resolve --all" to resolve'))
1620 1619
1621 1620 if editor:
1622 1621 cctx._text = editor(self, cctx, subs)
1623 1622 edited = (text != cctx._text)
1624 1623
1625 1624 # Save commit message in case this transaction gets rolled back
1626 1625 # (e.g. by a pretxncommit hook). Leave the content alone on
1627 1626 # the assumption that the user will use the same editor again.
1628 1627 msgfn = self.savecommitmessage(cctx._text)
1629 1628
1630 1629 # commit subs and write new state
1631 1630 if subs:
1632 1631 for s in sorted(commitsubs):
1633 1632 sub = wctx.sub(s)
1634 1633 self.ui.status(_('committing subrepository %s\n') %
1635 1634 subrepo.subrelpath(sub))
1636 1635 sr = sub.commit(cctx._text, user, date)
1637 1636 newstate[s] = (newstate[s][0], sr)
1638 1637 subrepo.writestate(self, newstate)
1639 1638
1640 1639 p1, p2 = self.dirstate.parents()
1641 1640 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1642 1641 try:
1643 1642 self.hook("precommit", throw=True, parent1=hookp1,
1644 1643 parent2=hookp2)
1645 1644 tr = self.transaction('commit')
1646 1645 ret = self.commitctx(cctx, True)
1647 1646 except: # re-raises
1648 1647 if edited:
1649 1648 self.ui.write(
1650 1649 _('note: commit message saved in %s\n') % msgfn)
1651 1650 raise
1652 1651 # update bookmarks, dirstate and mergestate
1653 1652 bookmarks.update(self, [p1, p2], ret)
1654 1653 cctx.markcommitted(ret)
1655 1654 ms.reset()
1656 1655 tr.close()
1657 1656
1658 1657 finally:
1659 1658 lockmod.release(tr, lock, wlock)
1660 1659
1661 1660 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1662 1661 # hack for command that use a temporary commit (eg: histedit)
1663 1662 # temporary commit got stripped before hook release
1664 1663 if self.changelog.hasnode(ret):
1665 1664 self.hook("commit", node=node, parent1=parent1,
1666 1665 parent2=parent2)
1667 1666 self._afterlock(commithook)
1668 1667 return ret
1669 1668
1670 1669 @unfilteredmethod
1671 1670 def commitctx(self, ctx, error=False):
1672 1671 """Add a new revision to current repository.
1673 1672 Revision information is passed via the context argument.
1674 1673 """
1675 1674
1676 1675 tr = None
1677 1676 p1, p2 = ctx.p1(), ctx.p2()
1678 1677 user = ctx.user()
1679 1678
1680 1679 lock = self.lock()
1681 1680 try:
1682 1681 tr = self.transaction("commit")
1683 1682 trp = weakref.proxy(tr)
1684 1683
1685 1684 if ctx.files():
1686 1685 m1 = p1.manifest()
1687 1686 m2 = p2.manifest()
1688 1687 m = m1.copy()
1689 1688
1690 1689 # check in files
1691 1690 added = []
1692 1691 changed = []
1693 1692 removed = list(ctx.removed())
1694 1693 linkrev = len(self)
1695 1694 self.ui.note(_("committing files:\n"))
1696 1695 for f in sorted(ctx.modified() + ctx.added()):
1697 1696 self.ui.note(f + "\n")
1698 1697 try:
1699 1698 fctx = ctx[f]
1700 1699 if fctx is None:
1701 1700 removed.append(f)
1702 1701 else:
1703 1702 added.append(f)
1704 1703 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1705 1704 trp, changed)
1706 1705 m.setflag(f, fctx.flags())
1707 1706 except OSError as inst:
1708 1707 self.ui.warn(_("trouble committing %s!\n") % f)
1709 1708 raise
1710 1709 except IOError as inst:
1711 1710 errcode = getattr(inst, 'errno', errno.ENOENT)
1712 1711 if error or errcode and errcode != errno.ENOENT:
1713 1712 self.ui.warn(_("trouble committing %s!\n") % f)
1714 1713 raise
1715 1714
1716 1715 # update manifest
1717 1716 self.ui.note(_("committing manifest\n"))
1718 1717 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1719 1718 drop = [f for f in removed if f in m]
1720 1719 for f in drop:
1721 1720 del m[f]
1722 1721 mn = self.manifest.add(m, trp, linkrev,
1723 1722 p1.manifestnode(), p2.manifestnode(),
1724 1723 added, drop)
1725 1724 files = changed + removed
1726 1725 else:
1727 1726 mn = p1.manifestnode()
1728 1727 files = []
1729 1728
1730 1729 # update changelog
1731 1730 self.ui.note(_("committing changelog\n"))
1732 1731 self.changelog.delayupdate(tr)
1733 1732 n = self.changelog.add(mn, files, ctx.description(),
1734 1733 trp, p1.node(), p2.node(),
1735 1734 user, ctx.date(), ctx.extra().copy())
1736 1735 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1737 1736 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1738 1737 parent2=xp2)
1739 1738 # set the new commit is proper phase
1740 1739 targetphase = subrepo.newcommitphase(self.ui, ctx)
1741 1740 if targetphase:
1742 1741 # retract boundary do not alter parent changeset.
1743 1742 # if a parent have higher the resulting phase will
1744 1743 # be compliant anyway
1745 1744 #
1746 1745 # if minimal phase was 0 we don't need to retract anything
1747 1746 phases.retractboundary(self, tr, targetphase, [n])
1748 1747 tr.close()
1749 1748 branchmap.updatecache(self.filtered('served'))
1750 1749 return n
1751 1750 finally:
1752 1751 if tr:
1753 1752 tr.release()
1754 1753 lock.release()
1755 1754
1756 1755 @unfilteredmethod
1757 1756 def destroying(self):
1758 1757 '''Inform the repository that nodes are about to be destroyed.
1759 1758 Intended for use by strip and rollback, so there's a common
1760 1759 place for anything that has to be done before destroying history.
1761 1760
1762 1761 This is mostly useful for saving state that is in memory and waiting
1763 1762 to be flushed when the current lock is released. Because a call to
1764 1763 destroyed is imminent, the repo will be invalidated causing those
1765 1764 changes to stay in memory (waiting for the next unlock), or vanish
1766 1765 completely.
1767 1766 '''
1768 1767 # When using the same lock to commit and strip, the phasecache is left
1769 1768 # dirty after committing. Then when we strip, the repo is invalidated,
1770 1769 # causing those changes to disappear.
1771 1770 if '_phasecache' in vars(self):
1772 1771 self._phasecache.write()
1773 1772
1774 1773 @unfilteredmethod
1775 1774 def destroyed(self):
1776 1775 '''Inform the repository that nodes have been destroyed.
1777 1776 Intended for use by strip and rollback, so there's a common
1778 1777 place for anything that has to be done after destroying history.
1779 1778 '''
1780 1779 # When one tries to:
1781 1780 # 1) destroy nodes thus calling this method (e.g. strip)
1782 1781 # 2) use phasecache somewhere (e.g. commit)
1783 1782 #
1784 1783 # then 2) will fail because the phasecache contains nodes that were
1785 1784 # removed. We can either remove phasecache from the filecache,
1786 1785 # causing it to reload next time it is accessed, or simply filter
1787 1786 # the removed nodes now and write the updated cache.
1788 1787 self._phasecache.filterunknown(self)
1789 1788 self._phasecache.write()
1790 1789
1791 1790 # update the 'served' branch cache to help read only server process
1792 1791 # Thanks to branchcache collaboration this is done from the nearest
1793 1792 # filtered subset and it is expected to be fast.
1794 1793 branchmap.updatecache(self.filtered('served'))
1795 1794
1796 1795 # Ensure the persistent tag cache is updated. Doing it now
1797 1796 # means that the tag cache only has to worry about destroyed
1798 1797 # heads immediately after a strip/rollback. That in turn
1799 1798 # guarantees that "cachetip == currenttip" (comparing both rev
1800 1799 # and node) always means no nodes have been added or destroyed.
1801 1800
1802 1801 # XXX this is suboptimal when qrefresh'ing: we strip the current
1803 1802 # head, refresh the tag cache, then immediately add a new head.
1804 1803 # But I think doing it this way is necessary for the "instant
1805 1804 # tag cache retrieval" case to work.
1806 1805 self.invalidate()
1807 1806
1808 1807 def walk(self, match, node=None):
1809 1808 '''
1810 1809 walk recursively through the directory tree or a given
1811 1810 changeset, finding all files matched by the match
1812 1811 function
1813 1812 '''
1814 1813 return self[node].walk(match)
1815 1814
1816 1815 def status(self, node1='.', node2=None, match=None,
1817 1816 ignored=False, clean=False, unknown=False,
1818 1817 listsubrepos=False):
1819 1818 '''a convenience method that calls node1.status(node2)'''
1820 1819 return self[node1].status(node2, match, ignored, clean, unknown,
1821 1820 listsubrepos)
1822 1821
1823 1822 def heads(self, start=None):
1824 1823 heads = self.changelog.heads(start)
1825 1824 # sort the output in rev descending order
1826 1825 return sorted(heads, key=self.changelog.rev, reverse=True)
1827 1826
1828 1827 def branchheads(self, branch=None, start=None, closed=False):
1829 1828 '''return a (possibly filtered) list of heads for the given branch
1830 1829
1831 1830 Heads are returned in topological order, from newest to oldest.
1832 1831 If branch is None, use the dirstate branch.
1833 1832 If start is not None, return only heads reachable from start.
1834 1833 If closed is True, return heads that are marked as closed as well.
1835 1834 '''
1836 1835 if branch is None:
1837 1836 branch = self[None].branch()
1838 1837 branches = self.branchmap()
1839 1838 if branch not in branches:
1840 1839 return []
1841 1840 # the cache returns heads ordered lowest to highest
1842 1841 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1843 1842 if start is not None:
1844 1843 # filter out the heads that cannot be reached from startrev
1845 1844 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1846 1845 bheads = [h for h in bheads if h in fbheads]
1847 1846 return bheads
1848 1847
1849 1848 def branches(self, nodes):
1850 1849 if not nodes:
1851 1850 nodes = [self.changelog.tip()]
1852 1851 b = []
1853 1852 for n in nodes:
1854 1853 t = n
1855 1854 while True:
1856 1855 p = self.changelog.parents(n)
1857 1856 if p[1] != nullid or p[0] == nullid:
1858 1857 b.append((t, n, p[0], p[1]))
1859 1858 break
1860 1859 n = p[0]
1861 1860 return b
1862 1861
1863 1862 def between(self, pairs):
1864 1863 r = []
1865 1864
1866 1865 for top, bottom in pairs:
1867 1866 n, l, i = top, [], 0
1868 1867 f = 1
1869 1868
1870 1869 while n != bottom and n != nullid:
1871 1870 p = self.changelog.parents(n)[0]
1872 1871 if i == f:
1873 1872 l.append(n)
1874 1873 f = f * 2
1875 1874 n = p
1876 1875 i += 1
1877 1876
1878 1877 r.append(l)
1879 1878
1880 1879 return r
1881 1880
1882 1881 def checkpush(self, pushop):
1883 1882 """Extensions can override this function if additional checks have
1884 1883 to be performed before pushing, or call it if they override push
1885 1884 command.
1886 1885 """
1887 1886 pass
1888 1887
1889 1888 @unfilteredpropertycache
1890 1889 def prepushoutgoinghooks(self):
1891 1890 """Return util.hooks consists of "(repo, remote, outgoing)"
1892 1891 functions, which are called before pushing changesets.
1893 1892 """
1894 1893 return util.hooks()
1895 1894
1896 1895 def pushkey(self, namespace, key, old, new):
1897 1896 try:
1898 1897 tr = self.currenttransaction()
1899 1898 hookargs = {}
1900 1899 if tr is not None:
1901 1900 hookargs.update(tr.hookargs)
1902 1901 hookargs['namespace'] = namespace
1903 1902 hookargs['key'] = key
1904 1903 hookargs['old'] = old
1905 1904 hookargs['new'] = new
1906 1905 self.hook('prepushkey', throw=True, **hookargs)
1907 1906 except error.HookAbort as exc:
1908 1907 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1909 1908 if exc.hint:
1910 1909 self.ui.write_err(_("(%s)\n") % exc.hint)
1911 1910 return False
1912 1911 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1913 1912 ret = pushkey.push(self, namespace, key, old, new)
1914 1913 def runhook():
1915 1914 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1916 1915 ret=ret)
1917 1916 self._afterlock(runhook)
1918 1917 return ret
1919 1918
1920 1919 def listkeys(self, namespace):
1921 1920 self.hook('prelistkeys', throw=True, namespace=namespace)
1922 1921 self.ui.debug('listing keys for "%s"\n' % namespace)
1923 1922 values = pushkey.list(self, namespace)
1924 1923 self.hook('listkeys', namespace=namespace, values=values)
1925 1924 return values
1926 1925
1927 1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1928 1927 '''used to test argument passing over the wire'''
1929 1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1930 1929
1931 1930 def savecommitmessage(self, text):
1932 1931 fp = self.vfs('last-message.txt', 'wb')
1933 1932 try:
1934 1933 fp.write(text)
1935 1934 finally:
1936 1935 fp.close()
1937 1936 return self.pathto(fp.name[len(self.root) + 1:])
1938 1937
1939 1938 # used to avoid circular references so destructors work
1940 1939 def aftertrans(files):
1941 1940 renamefiles = [tuple(t) for t in files]
1942 1941 def a():
1943 1942 for vfs, src, dest in renamefiles:
1944 1943 try:
1945 1944 vfs.rename(src, dest)
1946 1945 except OSError: # journal file does not yet exist
1947 1946 pass
1948 1947 return a
1949 1948
1950 1949 def undoname(fn):
1951 1950 base, name = os.path.split(fn)
1952 1951 assert name.startswith('journal')
1953 1952 return os.path.join(base, name.replace('journal', 'undo', 1))
1954 1953
1955 1954 def instance(ui, path, create):
1956 1955 return localrepository(ui, util.urllocalpath(path), create)
1957 1956
1958 1957 def islocal(path):
1959 1958 return True
1960 1959
1961 1960 def newreporequirements(repo):
1962 1961 """Determine the set of requirements for a new local repository.
1963 1962
1964 1963 Extensions can wrap this function to specify custom requirements for
1965 1964 new repositories.
1966 1965 """
1967 1966 ui = repo.ui
1968 1967 requirements = set(['revlogv1'])
1969 1968 if ui.configbool('format', 'usestore', True):
1970 1969 requirements.add('store')
1971 1970 if ui.configbool('format', 'usefncache', True):
1972 1971 requirements.add('fncache')
1973 1972 if ui.configbool('format', 'dotencode', True):
1974 1973 requirements.add('dotencode')
1975 1974
1976 1975 if scmutil.gdinitconfig(ui):
1977 1976 requirements.add('generaldelta')
1978 1977 if ui.configbool('experimental', 'treemanifest', False):
1979 1978 requirements.add('treemanifest')
1980 1979 if ui.configbool('experimental', 'manifestv2', False):
1981 1980 requirements.add('manifestv2')
1982 1981
1983 1982 return requirements
General Comments 0
You need to be logged in to leave comments. Login now