##// END OF EJS Templates
localrepo: jettison parents() method per deprecation policy (API)
Augie Fackler -
r29075:3f0177d2 default
parent child Browse files
Show More
@@ -1,1983 +1,1977 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 wdirrev,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 cmdutil,
31 31 context,
32 32 dirstate,
33 33 encoding,
34 34 error,
35 35 exchange,
36 36 extensions,
37 37 filelog,
38 38 hook,
39 39 lock as lockmod,
40 40 manifest,
41 41 match as matchmod,
42 42 merge as mergemod,
43 43 namespaces,
44 44 obsolete,
45 45 pathutil,
46 46 peer,
47 47 phases,
48 48 pushkey,
49 49 repoview,
50 50 revset,
51 51 scmutil,
52 52 store,
53 53 subrepo,
54 54 tags as tagsmod,
55 55 transaction,
56 56 util,
57 57 )
58 58
59 59 release = lockmod.release
60 60 propertycache = util.propertycache
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63 filecache = scmutil.filecache
64 64
65 65 class repofilecache(filecache):
66 66 """All filecache usage on repo are done for logic that should be unfiltered
67 67 """
68 68
69 69 def __get__(self, repo, type=None):
70 70 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 71 def __set__(self, repo, value):
72 72 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 73 def __delete__(self, repo):
74 74 return super(repofilecache, self).__delete__(repo.unfiltered())
75 75
76 76 class storecache(repofilecache):
77 77 """filecache for files in the store"""
78 78 def join(self, obj, fname):
79 79 return obj.sjoin(fname)
80 80
81 81 class unfilteredpropertycache(propertycache):
82 82 """propertycache that apply to unfiltered repo only"""
83 83
84 84 def __get__(self, repo, type=None):
85 85 unfi = repo.unfiltered()
86 86 if unfi is repo:
87 87 return super(unfilteredpropertycache, self).__get__(unfi)
88 88 return getattr(unfi, self.name)
89 89
90 90 class filteredpropertycache(propertycache):
91 91 """propertycache that must take filtering in account"""
92 92
93 93 def cachevalue(self, obj, value):
94 94 object.__setattr__(obj, self.name, value)
95 95
96 96
97 97 def hasunfilteredcache(repo, name):
98 98 """check if a repo has an unfilteredpropertycache value for <name>"""
99 99 return name in vars(repo.unfiltered())
100 100
101 101 def unfilteredmethod(orig):
102 102 """decorate method that always need to be run on unfiltered version"""
103 103 def wrapper(repo, *args, **kwargs):
104 104 return orig(repo.unfiltered(), *args, **kwargs)
105 105 return wrapper
106 106
107 107 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 108 'unbundle'))
109 109 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 110
111 111 class localpeer(peer.peerrepository):
112 112 '''peer for a local repo; reflects only the most recent API'''
113 113
114 114 def __init__(self, repo, caps=moderncaps):
115 115 peer.peerrepository.__init__(self)
116 116 self._repo = repo.filtered('served')
117 117 self.ui = repo.ui
118 118 self._caps = repo._restrictcapabilities(caps)
119 119 self.requirements = repo.requirements
120 120 self.supportedformats = repo.supportedformats
121 121
122 122 def close(self):
123 123 self._repo.close()
124 124
125 125 def _capabilities(self):
126 126 return self._caps
127 127
128 128 def local(self):
129 129 return self._repo
130 130
131 131 def canpush(self):
132 132 return True
133 133
134 134 def url(self):
135 135 return self._repo.url()
136 136
137 137 def lookup(self, key):
138 138 return self._repo.lookup(key)
139 139
140 140 def branchmap(self):
141 141 return self._repo.branchmap()
142 142
143 143 def heads(self):
144 144 return self._repo.heads()
145 145
146 146 def known(self, nodes):
147 147 return self._repo.known(nodes)
148 148
149 149 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 150 **kwargs):
151 151 cg = exchange.getbundle(self._repo, source, heads=heads,
152 152 common=common, bundlecaps=bundlecaps, **kwargs)
153 153 if bundlecaps is not None and 'HG20' in bundlecaps:
154 154 # When requesting a bundle2, getbundle returns a stream to make the
155 155 # wire level function happier. We need to build a proper object
156 156 # from it in local peer.
157 157 cg = bundle2.getunbundler(self.ui, cg)
158 158 return cg
159 159
160 160 # TODO We might want to move the next two calls into legacypeer and add
161 161 # unbundle instead.
162 162
163 163 def unbundle(self, cg, heads, url):
164 164 """apply a bundle on a repo
165 165
166 166 This function handles the repo locking itself."""
167 167 try:
168 168 try:
169 169 cg = exchange.readbundle(self.ui, cg, None)
170 170 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
171 171 if util.safehasattr(ret, 'getchunks'):
172 172 # This is a bundle20 object, turn it into an unbundler.
173 173 # This little dance should be dropped eventually when the
174 174 # API is finally improved.
175 175 stream = util.chunkbuffer(ret.getchunks())
176 176 ret = bundle2.getunbundler(self.ui, stream)
177 177 return ret
178 178 except Exception as exc:
179 179 # If the exception contains output salvaged from a bundle2
180 180 # reply, we need to make sure it is printed before continuing
181 181 # to fail. So we build a bundle2 with such output and consume
182 182 # it directly.
183 183 #
184 184 # This is not very elegant but allows a "simple" solution for
185 185 # issue4594
186 186 output = getattr(exc, '_bundle2salvagedoutput', ())
187 187 if output:
188 188 bundler = bundle2.bundle20(self._repo.ui)
189 189 for out in output:
190 190 bundler.addpart(out)
191 191 stream = util.chunkbuffer(bundler.getchunks())
192 192 b = bundle2.getunbundler(self.ui, stream)
193 193 bundle2.processbundle(self._repo, b)
194 194 raise
195 195 except error.PushRaced as exc:
196 196 raise error.ResponseError(_('push failed:'), str(exc))
197 197
198 198 def lock(self):
199 199 return self._repo.lock()
200 200
201 201 def addchangegroup(self, cg, source, url):
202 202 return cg.apply(self._repo, source, url)
203 203
204 204 def pushkey(self, namespace, key, old, new):
205 205 return self._repo.pushkey(namespace, key, old, new)
206 206
207 207 def listkeys(self, namespace):
208 208 return self._repo.listkeys(namespace)
209 209
210 210 def debugwireargs(self, one, two, three=None, four=None, five=None):
211 211 '''used to test argument passing over the wire'''
212 212 return "%s %s %s %s %s" % (one, two, three, four, five)
213 213
214 214 class locallegacypeer(localpeer):
215 215 '''peer extension which implements legacy methods too; used for tests with
216 216 restricted capabilities'''
217 217
218 218 def __init__(self, repo):
219 219 localpeer.__init__(self, repo, caps=legacycaps)
220 220
221 221 def branches(self, nodes):
222 222 return self._repo.branches(nodes)
223 223
224 224 def between(self, pairs):
225 225 return self._repo.between(pairs)
226 226
227 227 def changegroup(self, basenodes, source):
228 228 return changegroup.changegroup(self._repo, basenodes, source)
229 229
230 230 def changegroupsubset(self, bases, heads, source):
231 231 return changegroup.changegroupsubset(self._repo, bases, heads, source)
232 232
233 233 class localrepository(object):
234 234
235 235 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
236 236 'manifestv2'))
237 237 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
238 238 'dotencode'))
239 239 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
240 240 filtername = None
241 241
242 242 # a list of (ui, featureset) functions.
243 243 # only functions defined in module of enabled extensions are invoked
244 244 featuresetupfuncs = set()
245 245
246 246 def __init__(self, baseui, path=None, create=False):
247 247 self.requirements = set()
248 248 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
249 249 self.wopener = self.wvfs
250 250 self.root = self.wvfs.base
251 251 self.path = self.wvfs.join(".hg")
252 252 self.origroot = path
253 253 self.auditor = pathutil.pathauditor(self.root, self._checknested)
254 254 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
255 255 realfs=False)
256 256 self.vfs = scmutil.vfs(self.path)
257 257 self.opener = self.vfs
258 258 self.baseui = baseui
259 259 self.ui = baseui.copy()
260 260 self.ui.copy = baseui.copy # prevent copying repo configuration
261 261 # A list of callback to shape the phase if no data were found.
262 262 # Callback are in the form: func(repo, roots) --> processed root.
263 263 # This list it to be filled by extension during repo setup
264 264 self._phasedefaults = []
265 265 try:
266 266 self.ui.readconfig(self.join("hgrc"), self.root)
267 267 extensions.loadall(self.ui)
268 268 except IOError:
269 269 pass
270 270
271 271 if self.featuresetupfuncs:
272 272 self.supported = set(self._basesupported) # use private copy
273 273 extmods = set(m.__name__ for n, m
274 274 in extensions.extensions(self.ui))
275 275 for setupfunc in self.featuresetupfuncs:
276 276 if setupfunc.__module__ in extmods:
277 277 setupfunc(self.ui, self.supported)
278 278 else:
279 279 self.supported = self._basesupported
280 280
281 281 if not self.vfs.isdir():
282 282 if create:
283 283 self.requirements = newreporequirements(self)
284 284
285 285 if not self.wvfs.exists():
286 286 self.wvfs.makedirs()
287 287 self.vfs.makedir(notindexed=True)
288 288
289 289 if 'store' in self.requirements:
290 290 self.vfs.mkdir("store")
291 291
292 292 # create an invalid changelog
293 293 self.vfs.append(
294 294 "00changelog.i",
295 295 '\0\0\0\2' # represents revlogv2
296 296 ' dummy changelog to prevent using the old repo layout'
297 297 )
298 298 else:
299 299 raise error.RepoError(_("repository %s not found") % path)
300 300 elif create:
301 301 raise error.RepoError(_("repository %s already exists") % path)
302 302 else:
303 303 try:
304 304 self.requirements = scmutil.readrequires(
305 305 self.vfs, self.supported)
306 306 except IOError as inst:
307 307 if inst.errno != errno.ENOENT:
308 308 raise
309 309
310 310 self.sharedpath = self.path
311 311 try:
312 312 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
313 313 realpath=True)
314 314 s = vfs.base
315 315 if not vfs.exists():
316 316 raise error.RepoError(
317 317 _('.hg/sharedpath points to nonexistent directory %s') % s)
318 318 self.sharedpath = s
319 319 except IOError as inst:
320 320 if inst.errno != errno.ENOENT:
321 321 raise
322 322
323 323 self.store = store.store(
324 324 self.requirements, self.sharedpath, scmutil.vfs)
325 325 self.spath = self.store.path
326 326 self.svfs = self.store.vfs
327 327 self.sjoin = self.store.join
328 328 self.vfs.createmode = self.store.createmode
329 329 self._applyopenerreqs()
330 330 if create:
331 331 self._writerequirements()
332 332
333 333 self._dirstatevalidatewarned = False
334 334
335 335 self._branchcaches = {}
336 336 self._revbranchcache = None
337 337 self.filterpats = {}
338 338 self._datafilters = {}
339 339 self._transref = self._lockref = self._wlockref = None
340 340
341 341 # A cache for various files under .hg/ that tracks file changes,
342 342 # (used by the filecache decorator)
343 343 #
344 344 # Maps a property name to its util.filecacheentry
345 345 self._filecache = {}
346 346
347 347 # hold sets of revision to be filtered
348 348 # should be cleared when something might have changed the filter value:
349 349 # - new changesets,
350 350 # - phase change,
351 351 # - new obsolescence marker,
352 352 # - working directory parent change,
353 353 # - bookmark changes
354 354 self.filteredrevcache = {}
355 355
356 356 # generic mapping between names and nodes
357 357 self.names = namespaces.namespaces()
358 358
359 359 def close(self):
360 360 self._writecaches()
361 361
362 362 def _writecaches(self):
363 363 if self._revbranchcache:
364 364 self._revbranchcache.write()
365 365
366 366 def _restrictcapabilities(self, caps):
367 367 if self.ui.configbool('experimental', 'bundle2-advertise', True):
368 368 caps = set(caps)
369 369 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
370 370 caps.add('bundle2=' + urlreq.quote(capsblob))
371 371 return caps
372 372
373 373 def _applyopenerreqs(self):
374 374 self.svfs.options = dict((r, 1) for r in self.requirements
375 375 if r in self.openerreqs)
376 376 # experimental config: format.chunkcachesize
377 377 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
378 378 if chunkcachesize is not None:
379 379 self.svfs.options['chunkcachesize'] = chunkcachesize
380 380 # experimental config: format.maxchainlen
381 381 maxchainlen = self.ui.configint('format', 'maxchainlen')
382 382 if maxchainlen is not None:
383 383 self.svfs.options['maxchainlen'] = maxchainlen
384 384 # experimental config: format.manifestcachesize
385 385 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
386 386 if manifestcachesize is not None:
387 387 self.svfs.options['manifestcachesize'] = manifestcachesize
388 388 # experimental config: format.aggressivemergedeltas
389 389 aggressivemergedeltas = self.ui.configbool('format',
390 390 'aggressivemergedeltas', False)
391 391 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
392 392 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
393 393
394 394 def _writerequirements(self):
395 395 scmutil.writerequires(self.vfs, self.requirements)
396 396
397 397 def _checknested(self, path):
398 398 """Determine if path is a legal nested repository."""
399 399 if not path.startswith(self.root):
400 400 return False
401 401 subpath = path[len(self.root) + 1:]
402 402 normsubpath = util.pconvert(subpath)
403 403
404 404 # XXX: Checking against the current working copy is wrong in
405 405 # the sense that it can reject things like
406 406 #
407 407 # $ hg cat -r 10 sub/x.txt
408 408 #
409 409 # if sub/ is no longer a subrepository in the working copy
410 410 # parent revision.
411 411 #
412 412 # However, it can of course also allow things that would have
413 413 # been rejected before, such as the above cat command if sub/
414 414 # is a subrepository now, but was a normal directory before.
415 415 # The old path auditor would have rejected by mistake since it
416 416 # panics when it sees sub/.hg/.
417 417 #
418 418 # All in all, checking against the working copy seems sensible
419 419 # since we want to prevent access to nested repositories on
420 420 # the filesystem *now*.
421 421 ctx = self[None]
422 422 parts = util.splitpath(subpath)
423 423 while parts:
424 424 prefix = '/'.join(parts)
425 425 if prefix in ctx.substate:
426 426 if prefix == normsubpath:
427 427 return True
428 428 else:
429 429 sub = ctx.sub(prefix)
430 430 return sub.checknested(subpath[len(prefix) + 1:])
431 431 else:
432 432 parts.pop()
433 433 return False
434 434
435 435 def peer(self):
436 436 return localpeer(self) # not cached to avoid reference cycle
437 437
438 438 def unfiltered(self):
439 439 """Return unfiltered version of the repository
440 440
441 441 Intended to be overwritten by filtered repo."""
442 442 return self
443 443
444 444 def filtered(self, name):
445 445 """Return a filtered version of a repository"""
446 446 # build a new class with the mixin and the current class
447 447 # (possibly subclass of the repo)
448 448 class proxycls(repoview.repoview, self.unfiltered().__class__):
449 449 pass
450 450 return proxycls(self, name)
451 451
452 452 @repofilecache('bookmarks', 'bookmarks.current')
453 453 def _bookmarks(self):
454 454 return bookmarks.bmstore(self)
455 455
456 456 @property
457 457 def _activebookmark(self):
458 458 return self._bookmarks.active
459 459
460 460 def bookmarkheads(self, bookmark):
461 461 name = bookmark.split('@', 1)[0]
462 462 heads = []
463 463 for mark, n in self._bookmarks.iteritems():
464 464 if mark.split('@', 1)[0] == name:
465 465 heads.append(n)
466 466 return heads
467 467
468 468 # _phaserevs and _phasesets depend on changelog. what we need is to
469 469 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
470 470 # can't be easily expressed in filecache mechanism.
471 471 @storecache('phaseroots', '00changelog.i')
472 472 def _phasecache(self):
473 473 return phases.phasecache(self, self._phasedefaults)
474 474
475 475 @storecache('obsstore')
476 476 def obsstore(self):
477 477 # read default format for new obsstore.
478 478 # developer config: format.obsstore-version
479 479 defaultformat = self.ui.configint('format', 'obsstore-version', None)
480 480 # rely on obsstore class default when possible.
481 481 kwargs = {}
482 482 if defaultformat is not None:
483 483 kwargs['defaultformat'] = defaultformat
484 484 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
485 485 store = obsolete.obsstore(self.svfs, readonly=readonly,
486 486 **kwargs)
487 487 if store and readonly:
488 488 self.ui.warn(
489 489 _('obsolete feature not enabled but %i markers found!\n')
490 490 % len(list(store)))
491 491 return store
492 492
493 493 @storecache('00changelog.i')
494 494 def changelog(self):
495 495 c = changelog.changelog(self.svfs)
496 496 if 'HG_PENDING' in os.environ:
497 497 p = os.environ['HG_PENDING']
498 498 if p.startswith(self.root):
499 499 c.readpending('00changelog.i.a')
500 500 return c
501 501
502 502 @storecache('00manifest.i')
503 503 def manifest(self):
504 504 return manifest.manifest(self.svfs)
505 505
506 506 def dirlog(self, dir):
507 507 return self.manifest.dirlog(dir)
508 508
509 509 @repofilecache('dirstate')
510 510 def dirstate(self):
511 511 return dirstate.dirstate(self.vfs, self.ui, self.root,
512 512 self._dirstatevalidate)
513 513
514 514 def _dirstatevalidate(self, node):
515 515 try:
516 516 self.changelog.rev(node)
517 517 return node
518 518 except error.LookupError:
519 519 if not self._dirstatevalidatewarned:
520 520 self._dirstatevalidatewarned = True
521 521 self.ui.warn(_("warning: ignoring unknown"
522 522 " working parent %s!\n") % short(node))
523 523 return nullid
524 524
525 525 def __getitem__(self, changeid):
526 526 if changeid is None or changeid == wdirrev:
527 527 return context.workingctx(self)
528 528 if isinstance(changeid, slice):
529 529 return [context.changectx(self, i)
530 530 for i in xrange(*changeid.indices(len(self)))
531 531 if i not in self.changelog.filteredrevs]
532 532 return context.changectx(self, changeid)
533 533
534 534 def __contains__(self, changeid):
535 535 try:
536 536 self[changeid]
537 537 return True
538 538 except error.RepoLookupError:
539 539 return False
540 540
541 541 def __nonzero__(self):
542 542 return True
543 543
544 544 def __len__(self):
545 545 return len(self.changelog)
546 546
547 547 def __iter__(self):
548 548 return iter(self.changelog)
549 549
550 550 def revs(self, expr, *args):
551 551 '''Find revisions matching a revset.
552 552
553 553 The revset is specified as a string ``expr`` that may contain
554 554 %-formatting to escape certain types. See ``revset.formatspec``.
555 555
556 556 Return a revset.abstractsmartset, which is a list-like interface
557 557 that contains integer revisions.
558 558 '''
559 559 expr = revset.formatspec(expr, *args)
560 560 m = revset.match(None, expr)
561 561 return m(self)
562 562
563 563 def set(self, expr, *args):
564 564 '''Find revisions matching a revset and emit changectx instances.
565 565
566 566 This is a convenience wrapper around ``revs()`` that iterates the
567 567 result and is a generator of changectx instances.
568 568 '''
569 569 for r in self.revs(expr, *args):
570 570 yield self[r]
571 571
572 572 def url(self):
573 573 return 'file:' + self.root
574 574
575 575 def hook(self, name, throw=False, **args):
576 576 """Call a hook, passing this repo instance.
577 577
578 578 This a convenience method to aid invoking hooks. Extensions likely
579 579 won't call this unless they have registered a custom hook or are
580 580 replacing code that is expected to call a hook.
581 581 """
582 582 return hook.hook(self.ui, self, name, throw, **args)
583 583
584 584 @unfilteredmethod
585 585 def _tag(self, names, node, message, local, user, date, extra=None,
586 586 editor=False):
587 587 if isinstance(names, str):
588 588 names = (names,)
589 589
590 590 branches = self.branchmap()
591 591 for name in names:
592 592 self.hook('pretag', throw=True, node=hex(node), tag=name,
593 593 local=local)
594 594 if name in branches:
595 595 self.ui.warn(_("warning: tag %s conflicts with existing"
596 596 " branch name\n") % name)
597 597
598 598 def writetags(fp, names, munge, prevtags):
599 599 fp.seek(0, 2)
600 600 if prevtags and prevtags[-1] != '\n':
601 601 fp.write('\n')
602 602 for name in names:
603 603 if munge:
604 604 m = munge(name)
605 605 else:
606 606 m = name
607 607
608 608 if (self._tagscache.tagtypes and
609 609 name in self._tagscache.tagtypes):
610 610 old = self.tags().get(name, nullid)
611 611 fp.write('%s %s\n' % (hex(old), m))
612 612 fp.write('%s %s\n' % (hex(node), m))
613 613 fp.close()
614 614
615 615 prevtags = ''
616 616 if local:
617 617 try:
618 618 fp = self.vfs('localtags', 'r+')
619 619 except IOError:
620 620 fp = self.vfs('localtags', 'a')
621 621 else:
622 622 prevtags = fp.read()
623 623
624 624 # local tags are stored in the current charset
625 625 writetags(fp, names, None, prevtags)
626 626 for name in names:
627 627 self.hook('tag', node=hex(node), tag=name, local=local)
628 628 return
629 629
630 630 try:
631 631 fp = self.wfile('.hgtags', 'rb+')
632 632 except IOError as e:
633 633 if e.errno != errno.ENOENT:
634 634 raise
635 635 fp = self.wfile('.hgtags', 'ab')
636 636 else:
637 637 prevtags = fp.read()
638 638
639 639 # committed tags are stored in UTF-8
640 640 writetags(fp, names, encoding.fromlocal, prevtags)
641 641
642 642 fp.close()
643 643
644 644 self.invalidatecaches()
645 645
646 646 if '.hgtags' not in self.dirstate:
647 647 self[None].add(['.hgtags'])
648 648
649 649 m = matchmod.exact(self.root, '', ['.hgtags'])
650 650 tagnode = self.commit(message, user, date, extra=extra, match=m,
651 651 editor=editor)
652 652
653 653 for name in names:
654 654 self.hook('tag', node=hex(node), tag=name, local=local)
655 655
656 656 return tagnode
657 657
658 658 def tag(self, names, node, message, local, user, date, editor=False):
659 659 '''tag a revision with one or more symbolic names.
660 660
661 661 names is a list of strings or, when adding a single tag, names may be a
662 662 string.
663 663
664 664 if local is True, the tags are stored in a per-repository file.
665 665 otherwise, they are stored in the .hgtags file, and a new
666 666 changeset is committed with the change.
667 667
668 668 keyword arguments:
669 669
670 670 local: whether to store tags in non-version-controlled file
671 671 (default False)
672 672
673 673 message: commit message to use if committing
674 674
675 675 user: name of user to use if committing
676 676
677 677 date: date tuple to use if committing'''
678 678
679 679 if not local:
680 680 m = matchmod.exact(self.root, '', ['.hgtags'])
681 681 if any(self.status(match=m, unknown=True, ignored=True)):
682 682 raise error.Abort(_('working copy of .hgtags is changed'),
683 683 hint=_('please commit .hgtags manually'))
684 684
685 685 self.tags() # instantiate the cache
686 686 self._tag(names, node, message, local, user, date, editor=editor)
687 687
688 688 @filteredpropertycache
689 689 def _tagscache(self):
690 690 '''Returns a tagscache object that contains various tags related
691 691 caches.'''
692 692
693 693 # This simplifies its cache management by having one decorated
694 694 # function (this one) and the rest simply fetch things from it.
695 695 class tagscache(object):
696 696 def __init__(self):
697 697 # These two define the set of tags for this repository. tags
698 698 # maps tag name to node; tagtypes maps tag name to 'global' or
699 699 # 'local'. (Global tags are defined by .hgtags across all
700 700 # heads, and local tags are defined in .hg/localtags.)
701 701 # They constitute the in-memory cache of tags.
702 702 self.tags = self.tagtypes = None
703 703
704 704 self.nodetagscache = self.tagslist = None
705 705
706 706 cache = tagscache()
707 707 cache.tags, cache.tagtypes = self._findtags()
708 708
709 709 return cache
710 710
711 711 def tags(self):
712 712 '''return a mapping of tag to node'''
713 713 t = {}
714 714 if self.changelog.filteredrevs:
715 715 tags, tt = self._findtags()
716 716 else:
717 717 tags = self._tagscache.tags
718 718 for k, v in tags.iteritems():
719 719 try:
720 720 # ignore tags to unknown nodes
721 721 self.changelog.rev(v)
722 722 t[k] = v
723 723 except (error.LookupError, ValueError):
724 724 pass
725 725 return t
726 726
727 727 def _findtags(self):
728 728 '''Do the hard work of finding tags. Return a pair of dicts
729 729 (tags, tagtypes) where tags maps tag name to node, and tagtypes
730 730 maps tag name to a string like \'global\' or \'local\'.
731 731 Subclasses or extensions are free to add their own tags, but
732 732 should be aware that the returned dicts will be retained for the
733 733 duration of the localrepo object.'''
734 734
735 735 # XXX what tagtype should subclasses/extensions use? Currently
736 736 # mq and bookmarks add tags, but do not set the tagtype at all.
737 737 # Should each extension invent its own tag type? Should there
738 738 # be one tagtype for all such "virtual" tags? Or is the status
739 739 # quo fine?
740 740
741 741 alltags = {} # map tag name to (node, hist)
742 742 tagtypes = {}
743 743
744 744 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
745 745 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
746 746
747 747 # Build the return dicts. Have to re-encode tag names because
748 748 # the tags module always uses UTF-8 (in order not to lose info
749 749 # writing to the cache), but the rest of Mercurial wants them in
750 750 # local encoding.
751 751 tags = {}
752 752 for (name, (node, hist)) in alltags.iteritems():
753 753 if node != nullid:
754 754 tags[encoding.tolocal(name)] = node
755 755 tags['tip'] = self.changelog.tip()
756 756 tagtypes = dict([(encoding.tolocal(name), value)
757 757 for (name, value) in tagtypes.iteritems()])
758 758 return (tags, tagtypes)
759 759
760 760 def tagtype(self, tagname):
761 761 '''
762 762 return the type of the given tag. result can be:
763 763
764 764 'local' : a local tag
765 765 'global' : a global tag
766 766 None : tag does not exist
767 767 '''
768 768
769 769 return self._tagscache.tagtypes.get(tagname)
770 770
771 771 def tagslist(self):
772 772 '''return a list of tags ordered by revision'''
773 773 if not self._tagscache.tagslist:
774 774 l = []
775 775 for t, n in self.tags().iteritems():
776 776 l.append((self.changelog.rev(n), t, n))
777 777 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
778 778
779 779 return self._tagscache.tagslist
780 780
781 781 def nodetags(self, node):
782 782 '''return the tags associated with a node'''
783 783 if not self._tagscache.nodetagscache:
784 784 nodetagscache = {}
785 785 for t, n in self._tagscache.tags.iteritems():
786 786 nodetagscache.setdefault(n, []).append(t)
787 787 for tags in nodetagscache.itervalues():
788 788 tags.sort()
789 789 self._tagscache.nodetagscache = nodetagscache
790 790 return self._tagscache.nodetagscache.get(node, [])
791 791
792 792 def nodebookmarks(self, node):
793 793 """return the list of bookmarks pointing to the specified node"""
794 794 marks = []
795 795 for bookmark, n in self._bookmarks.iteritems():
796 796 if n == node:
797 797 marks.append(bookmark)
798 798 return sorted(marks)
799 799
800 800 def branchmap(self):
801 801 '''returns a dictionary {branch: [branchheads]} with branchheads
802 802 ordered by increasing revision number'''
803 803 branchmap.updatecache(self)
804 804 return self._branchcaches[self.filtername]
805 805
806 806 @unfilteredmethod
807 807 def revbranchcache(self):
808 808 if not self._revbranchcache:
809 809 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
810 810 return self._revbranchcache
811 811
812 812 def branchtip(self, branch, ignoremissing=False):
813 813 '''return the tip node for a given branch
814 814
815 815 If ignoremissing is True, then this method will not raise an error.
816 816 This is helpful for callers that only expect None for a missing branch
817 817 (e.g. namespace).
818 818
819 819 '''
820 820 try:
821 821 return self.branchmap().branchtip(branch)
822 822 except KeyError:
823 823 if not ignoremissing:
824 824 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
825 825 else:
826 826 pass
827 827
828 828 def lookup(self, key):
829 829 return self[key].node()
830 830
831 831 def lookupbranch(self, key, remote=None):
832 832 repo = remote or self
833 833 if key in repo.branchmap():
834 834 return key
835 835
836 836 repo = (remote and remote.local()) and remote or self
837 837 return repo[key].branch()
838 838
839 839 def known(self, nodes):
840 840 cl = self.changelog
841 841 nm = cl.nodemap
842 842 filtered = cl.filteredrevs
843 843 result = []
844 844 for n in nodes:
845 845 r = nm.get(n)
846 846 resp = not (r is None or r in filtered)
847 847 result.append(resp)
848 848 return result
849 849
850 850 def local(self):
851 851 return self
852 852
853 853 def publishing(self):
854 854 # it's safe (and desirable) to trust the publish flag unconditionally
855 855 # so that we don't finalize changes shared between users via ssh or nfs
856 856 return self.ui.configbool('phases', 'publish', True, untrusted=True)
857 857
858 858 def cancopy(self):
859 859 # so statichttprepo's override of local() works
860 860 if not self.local():
861 861 return False
862 862 if not self.publishing():
863 863 return True
864 864 # if publishing we can't copy if there is filtered content
865 865 return not self.filtered('visible').changelog.filteredrevs
866 866
867 867 def shared(self):
868 868 '''the type of shared repository (None if not shared)'''
869 869 if self.sharedpath != self.path:
870 870 return 'store'
871 871 return None
872 872
873 873 def join(self, f, *insidef):
874 874 return self.vfs.join(os.path.join(f, *insidef))
875 875
876 876 def wjoin(self, f, *insidef):
877 877 return self.vfs.reljoin(self.root, f, *insidef)
878 878
879 879 def file(self, f):
880 880 if f[0] == '/':
881 881 f = f[1:]
882 882 return filelog.filelog(self.svfs, f)
883 883
884 def parents(self, changeid=None):
885 '''get list of changectxs for parents of changeid'''
886 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
887 self.ui.deprecwarn(msg, '3.7')
888 return self[changeid].parents()
889
890 884 def changectx(self, changeid):
891 885 return self[changeid]
892 886
893 887 def setparents(self, p1, p2=nullid):
894 888 self.dirstate.beginparentchange()
895 889 copies = self.dirstate.setparents(p1, p2)
896 890 pctx = self[p1]
897 891 if copies:
898 892 # Adjust copy records, the dirstate cannot do it, it
899 893 # requires access to parents manifests. Preserve them
900 894 # only for entries added to first parent.
901 895 for f in copies:
902 896 if f not in pctx and copies[f] in pctx:
903 897 self.dirstate.copy(copies[f], f)
904 898 if p2 == nullid:
905 899 for f, s in sorted(self.dirstate.copies().items()):
906 900 if f not in pctx and s not in pctx:
907 901 self.dirstate.copy(None, f)
908 902 self.dirstate.endparentchange()
909 903
910 904 def filectx(self, path, changeid=None, fileid=None):
911 905 """changeid can be a changeset revision, node, or tag.
912 906 fileid can be a file revision or node."""
913 907 return context.filectx(self, path, changeid, fileid)
914 908
915 909 def getcwd(self):
916 910 return self.dirstate.getcwd()
917 911
918 912 def pathto(self, f, cwd=None):
919 913 return self.dirstate.pathto(f, cwd)
920 914
921 915 def wfile(self, f, mode='r'):
922 916 return self.wvfs(f, mode)
923 917
924 918 def _link(self, f):
925 919 return self.wvfs.islink(f)
926 920
927 921 def _loadfilter(self, filter):
928 922 if filter not in self.filterpats:
929 923 l = []
930 924 for pat, cmd in self.ui.configitems(filter):
931 925 if cmd == '!':
932 926 continue
933 927 mf = matchmod.match(self.root, '', [pat])
934 928 fn = None
935 929 params = cmd
936 930 for name, filterfn in self._datafilters.iteritems():
937 931 if cmd.startswith(name):
938 932 fn = filterfn
939 933 params = cmd[len(name):].lstrip()
940 934 break
941 935 if not fn:
942 936 fn = lambda s, c, **kwargs: util.filter(s, c)
943 937 # Wrap old filters not supporting keyword arguments
944 938 if not inspect.getargspec(fn)[2]:
945 939 oldfn = fn
946 940 fn = lambda s, c, **kwargs: oldfn(s, c)
947 941 l.append((mf, fn, params))
948 942 self.filterpats[filter] = l
949 943 return self.filterpats[filter]
950 944
951 945 def _filter(self, filterpats, filename, data):
952 946 for mf, fn, cmd in filterpats:
953 947 if mf(filename):
954 948 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
955 949 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
956 950 break
957 951
958 952 return data
959 953
960 954 @unfilteredpropertycache
961 955 def _encodefilterpats(self):
962 956 return self._loadfilter('encode')
963 957
964 958 @unfilteredpropertycache
965 959 def _decodefilterpats(self):
966 960 return self._loadfilter('decode')
967 961
968 962 def adddatafilter(self, name, filter):
969 963 self._datafilters[name] = filter
970 964
971 965 def wread(self, filename):
972 966 if self._link(filename):
973 967 data = self.wvfs.readlink(filename)
974 968 else:
975 969 data = self.wvfs.read(filename)
976 970 return self._filter(self._encodefilterpats, filename, data)
977 971
978 972 def wwrite(self, filename, data, flags, backgroundclose=False):
979 973 """write ``data`` into ``filename`` in the working directory
980 974
981 975 This returns length of written (maybe decoded) data.
982 976 """
983 977 data = self._filter(self._decodefilterpats, filename, data)
984 978 if 'l' in flags:
985 979 self.wvfs.symlink(data, filename)
986 980 else:
987 981 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
988 982 if 'x' in flags:
989 983 self.wvfs.setflags(filename, False, True)
990 984 return len(data)
991 985
992 986 def wwritedata(self, filename, data):
993 987 return self._filter(self._decodefilterpats, filename, data)
994 988
995 989 def currenttransaction(self):
996 990 """return the current transaction or None if non exists"""
997 991 if self._transref:
998 992 tr = self._transref()
999 993 else:
1000 994 tr = None
1001 995
1002 996 if tr and tr.running():
1003 997 return tr
1004 998 return None
1005 999
1006 1000 def transaction(self, desc, report=None):
1007 1001 if (self.ui.configbool('devel', 'all-warnings')
1008 1002 or self.ui.configbool('devel', 'check-locks')):
1009 1003 l = self._lockref and self._lockref()
1010 1004 if l is None or not l.held:
1011 1005 self.ui.develwarn('transaction with no lock')
1012 1006 tr = self.currenttransaction()
1013 1007 if tr is not None:
1014 1008 return tr.nest()
1015 1009
1016 1010 # abort here if the journal already exists
1017 1011 if self.svfs.exists("journal"):
1018 1012 raise error.RepoError(
1019 1013 _("abandoned transaction found"),
1020 1014 hint=_("run 'hg recover' to clean up transaction"))
1021 1015
1022 1016 # make journal.dirstate contain in-memory changes at this point
1023 1017 self.dirstate.write(None)
1024 1018
1025 1019 idbase = "%.40f#%f" % (random.random(), time.time())
1026 1020 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1027 1021 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1028 1022
1029 1023 self._writejournal(desc)
1030 1024 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1031 1025 if report:
1032 1026 rp = report
1033 1027 else:
1034 1028 rp = self.ui.warn
1035 1029 vfsmap = {'plain': self.vfs} # root of .hg/
1036 1030 # we must avoid cyclic reference between repo and transaction.
1037 1031 reporef = weakref.ref(self)
1038 1032 def validate(tr):
1039 1033 """will run pre-closing hooks"""
1040 1034 reporef().hook('pretxnclose', throw=True,
1041 1035 txnname=desc, **tr.hookargs)
1042 1036 def releasefn(tr, success):
1043 1037 repo = reporef()
1044 1038 if success:
1045 1039 # this should be explicitly invoked here, because
1046 1040 # in-memory changes aren't written out at closing
1047 1041 # transaction, if tr.addfilegenerator (via
1048 1042 # dirstate.write or so) isn't invoked while
1049 1043 # transaction running
1050 1044 repo.dirstate.write(None)
1051 1045 else:
1052 1046 # prevent in-memory changes from being written out at
1053 1047 # the end of outer wlock scope or so
1054 1048 repo.dirstate.invalidate()
1055 1049
1056 1050 # discard all changes (including ones already written
1057 1051 # out) in this transaction
1058 1052 repo.vfs.rename('journal.dirstate', 'dirstate')
1059 1053
1060 1054 repo.invalidate(clearfilecache=True)
1061 1055
1062 1056 tr = transaction.transaction(rp, self.svfs, vfsmap,
1063 1057 "journal",
1064 1058 "undo",
1065 1059 aftertrans(renames),
1066 1060 self.store.createmode,
1067 1061 validator=validate,
1068 1062 releasefn=releasefn)
1069 1063
1070 1064 tr.hookargs['txnid'] = txnid
1071 1065 # note: writing the fncache only during finalize mean that the file is
1072 1066 # outdated when running hooks. As fncache is used for streaming clone,
1073 1067 # this is not expected to break anything that happen during the hooks.
1074 1068 tr.addfinalize('flush-fncache', self.store.write)
1075 1069 def txnclosehook(tr2):
1076 1070 """To be run if transaction is successful, will schedule a hook run
1077 1071 """
1078 1072 # Don't reference tr2 in hook() so we don't hold a reference.
1079 1073 # This reduces memory consumption when there are multiple
1080 1074 # transactions per lock. This can likely go away if issue5045
1081 1075 # fixes the function accumulation.
1082 1076 hookargs = tr2.hookargs
1083 1077
1084 1078 def hook():
1085 1079 reporef().hook('txnclose', throw=False, txnname=desc,
1086 1080 **hookargs)
1087 1081 reporef()._afterlock(hook)
1088 1082 tr.addfinalize('txnclose-hook', txnclosehook)
1089 1083 def txnaborthook(tr2):
1090 1084 """To be run if transaction is aborted
1091 1085 """
1092 1086 reporef().hook('txnabort', throw=False, txnname=desc,
1093 1087 **tr2.hookargs)
1094 1088 tr.addabort('txnabort-hook', txnaborthook)
1095 1089 # avoid eager cache invalidation. in-memory data should be identical
1096 1090 # to stored data if transaction has no error.
1097 1091 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1098 1092 self._transref = weakref.ref(tr)
1099 1093 return tr
1100 1094
1101 1095 def _journalfiles(self):
1102 1096 return ((self.svfs, 'journal'),
1103 1097 (self.vfs, 'journal.dirstate'),
1104 1098 (self.vfs, 'journal.branch'),
1105 1099 (self.vfs, 'journal.desc'),
1106 1100 (self.vfs, 'journal.bookmarks'),
1107 1101 (self.svfs, 'journal.phaseroots'))
1108 1102
1109 1103 def undofiles(self):
1110 1104 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1111 1105
1112 1106 def _writejournal(self, desc):
1113 1107 self.vfs.write("journal.dirstate",
1114 1108 self.vfs.tryread("dirstate"))
1115 1109 self.vfs.write("journal.branch",
1116 1110 encoding.fromlocal(self.dirstate.branch()))
1117 1111 self.vfs.write("journal.desc",
1118 1112 "%d\n%s\n" % (len(self), desc))
1119 1113 self.vfs.write("journal.bookmarks",
1120 1114 self.vfs.tryread("bookmarks"))
1121 1115 self.svfs.write("journal.phaseroots",
1122 1116 self.svfs.tryread("phaseroots"))
1123 1117
1124 1118 def recover(self):
1125 1119 with self.lock():
1126 1120 if self.svfs.exists("journal"):
1127 1121 self.ui.status(_("rolling back interrupted transaction\n"))
1128 1122 vfsmap = {'': self.svfs,
1129 1123 'plain': self.vfs,}
1130 1124 transaction.rollback(self.svfs, vfsmap, "journal",
1131 1125 self.ui.warn)
1132 1126 self.invalidate()
1133 1127 return True
1134 1128 else:
1135 1129 self.ui.warn(_("no interrupted transaction available\n"))
1136 1130 return False
1137 1131
1138 1132 def rollback(self, dryrun=False, force=False):
1139 1133 wlock = lock = dsguard = None
1140 1134 try:
1141 1135 wlock = self.wlock()
1142 1136 lock = self.lock()
1143 1137 if self.svfs.exists("undo"):
1144 1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1145 1139
1146 1140 return self._rollback(dryrun, force, dsguard)
1147 1141 else:
1148 1142 self.ui.warn(_("no rollback information available\n"))
1149 1143 return 1
1150 1144 finally:
1151 1145 release(dsguard, lock, wlock)
1152 1146
1153 1147 @unfilteredmethod # Until we get smarter cache management
1154 1148 def _rollback(self, dryrun, force, dsguard):
1155 1149 ui = self.ui
1156 1150 try:
1157 1151 args = self.vfs.read('undo.desc').splitlines()
1158 1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1159 1153 if len(args) >= 3:
1160 1154 detail = args[2]
1161 1155 oldtip = oldlen - 1
1162 1156
1163 1157 if detail and ui.verbose:
1164 1158 msg = (_('repository tip rolled back to revision %s'
1165 1159 ' (undo %s: %s)\n')
1166 1160 % (oldtip, desc, detail))
1167 1161 else:
1168 1162 msg = (_('repository tip rolled back to revision %s'
1169 1163 ' (undo %s)\n')
1170 1164 % (oldtip, desc))
1171 1165 except IOError:
1172 1166 msg = _('rolling back unknown transaction\n')
1173 1167 desc = None
1174 1168
1175 1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1176 1170 raise error.Abort(
1177 1171 _('rollback of last commit while not checked out '
1178 1172 'may lose data'), hint=_('use -f to force'))
1179 1173
1180 1174 ui.status(msg)
1181 1175 if dryrun:
1182 1176 return 0
1183 1177
1184 1178 parents = self.dirstate.parents()
1185 1179 self.destroying()
1186 1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1187 1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1188 1182 if self.vfs.exists('undo.bookmarks'):
1189 1183 self.vfs.rename('undo.bookmarks', 'bookmarks')
1190 1184 if self.svfs.exists('undo.phaseroots'):
1191 1185 self.svfs.rename('undo.phaseroots', 'phaseroots')
1192 1186 self.invalidate()
1193 1187
1194 1188 parentgone = (parents[0] not in self.changelog.nodemap or
1195 1189 parents[1] not in self.changelog.nodemap)
1196 1190 if parentgone:
1197 1191 # prevent dirstateguard from overwriting already restored one
1198 1192 dsguard.close()
1199 1193
1200 1194 self.vfs.rename('undo.dirstate', 'dirstate')
1201 1195 try:
1202 1196 branch = self.vfs.read('undo.branch')
1203 1197 self.dirstate.setbranch(encoding.tolocal(branch))
1204 1198 except IOError:
1205 1199 ui.warn(_('named branch could not be reset: '
1206 1200 'current branch is still \'%s\'\n')
1207 1201 % self.dirstate.branch())
1208 1202
1209 1203 self.dirstate.invalidate()
1210 1204 parents = tuple([p.rev() for p in self[None].parents()])
1211 1205 if len(parents) > 1:
1212 1206 ui.status(_('working directory now based on '
1213 1207 'revisions %d and %d\n') % parents)
1214 1208 else:
1215 1209 ui.status(_('working directory now based on '
1216 1210 'revision %d\n') % parents)
1217 1211 mergemod.mergestate.clean(self, self['.'].node())
1218 1212
1219 1213 # TODO: if we know which new heads may result from this rollback, pass
1220 1214 # them to destroy(), which will prevent the branchhead cache from being
1221 1215 # invalidated.
1222 1216 self.destroyed()
1223 1217 return 0
1224 1218
1225 1219 def invalidatecaches(self):
1226 1220
1227 1221 if '_tagscache' in vars(self):
1228 1222 # can't use delattr on proxy
1229 1223 del self.__dict__['_tagscache']
1230 1224
1231 1225 self.unfiltered()._branchcaches.clear()
1232 1226 self.invalidatevolatilesets()
1233 1227
1234 1228 def invalidatevolatilesets(self):
1235 1229 self.filteredrevcache.clear()
1236 1230 obsolete.clearobscaches(self)
1237 1231
1238 1232 def invalidatedirstate(self):
1239 1233 '''Invalidates the dirstate, causing the next call to dirstate
1240 1234 to check if it was modified since the last time it was read,
1241 1235 rereading it if it has.
1242 1236
1243 1237 This is different to dirstate.invalidate() that it doesn't always
1244 1238 rereads the dirstate. Use dirstate.invalidate() if you want to
1245 1239 explicitly read the dirstate again (i.e. restoring it to a previous
1246 1240 known good state).'''
1247 1241 if hasunfilteredcache(self, 'dirstate'):
1248 1242 for k in self.dirstate._filecache:
1249 1243 try:
1250 1244 delattr(self.dirstate, k)
1251 1245 except AttributeError:
1252 1246 pass
1253 1247 delattr(self.unfiltered(), 'dirstate')
1254 1248
1255 1249 def invalidate(self, clearfilecache=False):
1256 1250 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1257 1251 for k in self._filecache.keys():
1258 1252 # dirstate is invalidated separately in invalidatedirstate()
1259 1253 if k == 'dirstate':
1260 1254 continue
1261 1255
1262 1256 if clearfilecache:
1263 1257 del self._filecache[k]
1264 1258 try:
1265 1259 delattr(unfiltered, k)
1266 1260 except AttributeError:
1267 1261 pass
1268 1262 self.invalidatecaches()
1269 1263 self.store.invalidatecaches()
1270 1264
1271 1265 def invalidateall(self):
1272 1266 '''Fully invalidates both store and non-store parts, causing the
1273 1267 subsequent operation to reread any outside changes.'''
1274 1268 # extension should hook this to invalidate its caches
1275 1269 self.invalidate()
1276 1270 self.invalidatedirstate()
1277 1271
1278 1272 def _refreshfilecachestats(self, tr):
1279 1273 """Reload stats of cached files so that they are flagged as valid"""
1280 1274 for k, ce in self._filecache.items():
1281 1275 if k == 'dirstate' or k not in self.__dict__:
1282 1276 continue
1283 1277 ce.refresh()
1284 1278
1285 1279 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1286 1280 inheritchecker=None, parentenvvar=None):
1287 1281 parentlock = None
1288 1282 # the contents of parentenvvar are used by the underlying lock to
1289 1283 # determine whether it can be inherited
1290 1284 if parentenvvar is not None:
1291 1285 parentlock = os.environ.get(parentenvvar)
1292 1286 try:
1293 1287 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1294 1288 acquirefn=acquirefn, desc=desc,
1295 1289 inheritchecker=inheritchecker,
1296 1290 parentlock=parentlock)
1297 1291 except error.LockHeld as inst:
1298 1292 if not wait:
1299 1293 raise
1300 1294 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1301 1295 (desc, inst.locker))
1302 1296 # default to 600 seconds timeout
1303 1297 l = lockmod.lock(vfs, lockname,
1304 1298 int(self.ui.config("ui", "timeout", "600")),
1305 1299 releasefn=releasefn, acquirefn=acquirefn,
1306 1300 desc=desc)
1307 1301 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1308 1302 return l
1309 1303
1310 1304 def _afterlock(self, callback):
1311 1305 """add a callback to be run when the repository is fully unlocked
1312 1306
1313 1307 The callback will be executed when the outermost lock is released
1314 1308 (with wlock being higher level than 'lock')."""
1315 1309 for ref in (self._wlockref, self._lockref):
1316 1310 l = ref and ref()
1317 1311 if l and l.held:
1318 1312 l.postrelease.append(callback)
1319 1313 break
1320 1314 else: # no lock have been found.
1321 1315 callback()
1322 1316
1323 1317 def lock(self, wait=True):
1324 1318 '''Lock the repository store (.hg/store) and return a weak reference
1325 1319 to the lock. Use this before modifying the store (e.g. committing or
1326 1320 stripping). If you are opening a transaction, get a lock as well.)
1327 1321
1328 1322 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1329 1323 'wlock' first to avoid a dead-lock hazard.'''
1330 1324 l = self._lockref and self._lockref()
1331 1325 if l is not None and l.held:
1332 1326 l.lock()
1333 1327 return l
1334 1328
1335 1329 l = self._lock(self.svfs, "lock", wait, None,
1336 1330 self.invalidate, _('repository %s') % self.origroot)
1337 1331 self._lockref = weakref.ref(l)
1338 1332 return l
1339 1333
1340 1334 def _wlockchecktransaction(self):
1341 1335 if self.currenttransaction() is not None:
1342 1336 raise error.LockInheritanceContractViolation(
1343 1337 'wlock cannot be inherited in the middle of a transaction')
1344 1338
1345 1339 def wlock(self, wait=True):
1346 1340 '''Lock the non-store parts of the repository (everything under
1347 1341 .hg except .hg/store) and return a weak reference to the lock.
1348 1342
1349 1343 Use this before modifying files in .hg.
1350 1344
1351 1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1352 1346 'wlock' first to avoid a dead-lock hazard.'''
1353 1347 l = self._wlockref and self._wlockref()
1354 1348 if l is not None and l.held:
1355 1349 l.lock()
1356 1350 return l
1357 1351
1358 1352 # We do not need to check for non-waiting lock acquisition. Such
1359 1353 # acquisition would not cause dead-lock as they would just fail.
1360 1354 if wait and (self.ui.configbool('devel', 'all-warnings')
1361 1355 or self.ui.configbool('devel', 'check-locks')):
1362 1356 l = self._lockref and self._lockref()
1363 1357 if l is not None and l.held:
1364 1358 self.ui.develwarn('"wlock" acquired after "lock"')
1365 1359
1366 1360 def unlock():
1367 1361 if self.dirstate.pendingparentchange():
1368 1362 self.dirstate.invalidate()
1369 1363 else:
1370 1364 self.dirstate.write(None)
1371 1365
1372 1366 self._filecache['dirstate'].refresh()
1373 1367
1374 1368 l = self._lock(self.vfs, "wlock", wait, unlock,
1375 1369 self.invalidatedirstate, _('working directory of %s') %
1376 1370 self.origroot,
1377 1371 inheritchecker=self._wlockchecktransaction,
1378 1372 parentenvvar='HG_WLOCK_LOCKER')
1379 1373 self._wlockref = weakref.ref(l)
1380 1374 return l
1381 1375
1382 1376 def _currentlock(self, lockref):
1383 1377 """Returns the lock if it's held, or None if it's not."""
1384 1378 if lockref is None:
1385 1379 return None
1386 1380 l = lockref()
1387 1381 if l is None or not l.held:
1388 1382 return None
1389 1383 return l
1390 1384
1391 1385 def currentwlock(self):
1392 1386 """Returns the wlock if it's held, or None if it's not."""
1393 1387 return self._currentlock(self._wlockref)
1394 1388
1395 1389 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1396 1390 """
1397 1391 commit an individual file as part of a larger transaction
1398 1392 """
1399 1393
1400 1394 fname = fctx.path()
1401 1395 fparent1 = manifest1.get(fname, nullid)
1402 1396 fparent2 = manifest2.get(fname, nullid)
1403 1397 if isinstance(fctx, context.filectx):
1404 1398 node = fctx.filenode()
1405 1399 if node in [fparent1, fparent2]:
1406 1400 self.ui.debug('reusing %s filelog entry\n' % fname)
1407 1401 return node
1408 1402
1409 1403 flog = self.file(fname)
1410 1404 meta = {}
1411 1405 copy = fctx.renamed()
1412 1406 if copy and copy[0] != fname:
1413 1407 # Mark the new revision of this file as a copy of another
1414 1408 # file. This copy data will effectively act as a parent
1415 1409 # of this new revision. If this is a merge, the first
1416 1410 # parent will be the nullid (meaning "look up the copy data")
1417 1411 # and the second one will be the other parent. For example:
1418 1412 #
1419 1413 # 0 --- 1 --- 3 rev1 changes file foo
1420 1414 # \ / rev2 renames foo to bar and changes it
1421 1415 # \- 2 -/ rev3 should have bar with all changes and
1422 1416 # should record that bar descends from
1423 1417 # bar in rev2 and foo in rev1
1424 1418 #
1425 1419 # this allows this merge to succeed:
1426 1420 #
1427 1421 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1428 1422 # \ / merging rev3 and rev4 should use bar@rev2
1429 1423 # \- 2 --- 4 as the merge base
1430 1424 #
1431 1425
1432 1426 cfname = copy[0]
1433 1427 crev = manifest1.get(cfname)
1434 1428 newfparent = fparent2
1435 1429
1436 1430 if manifest2: # branch merge
1437 1431 if fparent2 == nullid or crev is None: # copied on remote side
1438 1432 if cfname in manifest2:
1439 1433 crev = manifest2[cfname]
1440 1434 newfparent = fparent1
1441 1435
1442 1436 # Here, we used to search backwards through history to try to find
1443 1437 # where the file copy came from if the source of a copy was not in
1444 1438 # the parent directory. However, this doesn't actually make sense to
1445 1439 # do (what does a copy from something not in your working copy even
1446 1440 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1447 1441 # the user that copy information was dropped, so if they didn't
1448 1442 # expect this outcome it can be fixed, but this is the correct
1449 1443 # behavior in this circumstance.
1450 1444
1451 1445 if crev:
1452 1446 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1453 1447 meta["copy"] = cfname
1454 1448 meta["copyrev"] = hex(crev)
1455 1449 fparent1, fparent2 = nullid, newfparent
1456 1450 else:
1457 1451 self.ui.warn(_("warning: can't find ancestor for '%s' "
1458 1452 "copied from '%s'!\n") % (fname, cfname))
1459 1453
1460 1454 elif fparent1 == nullid:
1461 1455 fparent1, fparent2 = fparent2, nullid
1462 1456 elif fparent2 != nullid:
1463 1457 # is one parent an ancestor of the other?
1464 1458 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1465 1459 if fparent1 in fparentancestors:
1466 1460 fparent1, fparent2 = fparent2, nullid
1467 1461 elif fparent2 in fparentancestors:
1468 1462 fparent2 = nullid
1469 1463
1470 1464 # is the file changed?
1471 1465 text = fctx.data()
1472 1466 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1473 1467 changelist.append(fname)
1474 1468 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1475 1469 # are just the flags changed during merge?
1476 1470 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1477 1471 changelist.append(fname)
1478 1472
1479 1473 return fparent1
1480 1474
1481 1475 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1482 1476 """check for commit arguments that aren't commitable"""
1483 1477 if match.isexact() or match.prefix():
1484 1478 matched = set(status.modified + status.added + status.removed)
1485 1479
1486 1480 for f in match.files():
1487 1481 f = self.dirstate.normalize(f)
1488 1482 if f == '.' or f in matched or f in wctx.substate:
1489 1483 continue
1490 1484 if f in status.deleted:
1491 1485 fail(f, _('file not found!'))
1492 1486 if f in vdirs: # visited directory
1493 1487 d = f + '/'
1494 1488 for mf in matched:
1495 1489 if mf.startswith(d):
1496 1490 break
1497 1491 else:
1498 1492 fail(f, _("no match under directory!"))
1499 1493 elif f not in self.dirstate:
1500 1494 fail(f, _("file not tracked!"))
1501 1495
1502 1496 @unfilteredmethod
1503 1497 def commit(self, text="", user=None, date=None, match=None, force=False,
1504 1498 editor=False, extra=None):
1505 1499 """Add a new revision to current repository.
1506 1500
1507 1501 Revision information is gathered from the working directory,
1508 1502 match can be used to filter the committed files. If editor is
1509 1503 supplied, it is called to get a commit message.
1510 1504 """
1511 1505 if extra is None:
1512 1506 extra = {}
1513 1507
1514 1508 def fail(f, msg):
1515 1509 raise error.Abort('%s: %s' % (f, msg))
1516 1510
1517 1511 if not match:
1518 1512 match = matchmod.always(self.root, '')
1519 1513
1520 1514 if not force:
1521 1515 vdirs = []
1522 1516 match.explicitdir = vdirs.append
1523 1517 match.bad = fail
1524 1518
1525 1519 wlock = lock = tr = None
1526 1520 try:
1527 1521 wlock = self.wlock()
1528 1522 lock = self.lock() # for recent changelog (see issue4368)
1529 1523
1530 1524 wctx = self[None]
1531 1525 merge = len(wctx.parents()) > 1
1532 1526
1533 1527 if not force and merge and match.ispartial():
1534 1528 raise error.Abort(_('cannot partially commit a merge '
1535 1529 '(do not specify files or patterns)'))
1536 1530
1537 1531 status = self.status(match=match, clean=force)
1538 1532 if force:
1539 1533 status.modified.extend(status.clean) # mq may commit clean files
1540 1534
1541 1535 # check subrepos
1542 1536 subs = []
1543 1537 commitsubs = set()
1544 1538 newstate = wctx.substate.copy()
1545 1539 # only manage subrepos and .hgsubstate if .hgsub is present
1546 1540 if '.hgsub' in wctx:
1547 1541 # we'll decide whether to track this ourselves, thanks
1548 1542 for c in status.modified, status.added, status.removed:
1549 1543 if '.hgsubstate' in c:
1550 1544 c.remove('.hgsubstate')
1551 1545
1552 1546 # compare current state to last committed state
1553 1547 # build new substate based on last committed state
1554 1548 oldstate = wctx.p1().substate
1555 1549 for s in sorted(newstate.keys()):
1556 1550 if not match(s):
1557 1551 # ignore working copy, use old state if present
1558 1552 if s in oldstate:
1559 1553 newstate[s] = oldstate[s]
1560 1554 continue
1561 1555 if not force:
1562 1556 raise error.Abort(
1563 1557 _("commit with new subrepo %s excluded") % s)
1564 1558 dirtyreason = wctx.sub(s).dirtyreason(True)
1565 1559 if dirtyreason:
1566 1560 if not self.ui.configbool('ui', 'commitsubrepos'):
1567 1561 raise error.Abort(dirtyreason,
1568 1562 hint=_("use --subrepos for recursive commit"))
1569 1563 subs.append(s)
1570 1564 commitsubs.add(s)
1571 1565 else:
1572 1566 bs = wctx.sub(s).basestate()
1573 1567 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1574 1568 if oldstate.get(s, (None, None, None))[1] != bs:
1575 1569 subs.append(s)
1576 1570
1577 1571 # check for removed subrepos
1578 1572 for p in wctx.parents():
1579 1573 r = [s for s in p.substate if s not in newstate]
1580 1574 subs += [s for s in r if match(s)]
1581 1575 if subs:
1582 1576 if (not match('.hgsub') and
1583 1577 '.hgsub' in (wctx.modified() + wctx.added())):
1584 1578 raise error.Abort(
1585 1579 _("can't commit subrepos without .hgsub"))
1586 1580 status.modified.insert(0, '.hgsubstate')
1587 1581
1588 1582 elif '.hgsub' in status.removed:
1589 1583 # clean up .hgsubstate when .hgsub is removed
1590 1584 if ('.hgsubstate' in wctx and
1591 1585 '.hgsubstate' not in (status.modified + status.added +
1592 1586 status.removed)):
1593 1587 status.removed.insert(0, '.hgsubstate')
1594 1588
1595 1589 # make sure all explicit patterns are matched
1596 1590 if not force:
1597 1591 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1598 1592
1599 1593 cctx = context.workingcommitctx(self, status,
1600 1594 text, user, date, extra)
1601 1595
1602 1596 # internal config: ui.allowemptycommit
1603 1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1604 1598 or extra.get('close') or merge or cctx.files()
1605 1599 or self.ui.configbool('ui', 'allowemptycommit'))
1606 1600 if not allowemptycommit:
1607 1601 return None
1608 1602
1609 1603 if merge and cctx.deleted():
1610 1604 raise error.Abort(_("cannot commit merge with missing files"))
1611 1605
1612 1606 ms = mergemod.mergestate.read(self)
1613 1607
1614 1608 if list(ms.unresolved()):
1615 1609 raise error.Abort(_('unresolved merge conflicts '
1616 1610 '(see "hg help resolve")'))
1617 1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1618 1612 raise error.Abort(_('driver-resolved merge conflicts'),
1619 1613 hint=_('run "hg resolve --all" to resolve'))
1620 1614
1621 1615 if editor:
1622 1616 cctx._text = editor(self, cctx, subs)
1623 1617 edited = (text != cctx._text)
1624 1618
1625 1619 # Save commit message in case this transaction gets rolled back
1626 1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1627 1621 # the assumption that the user will use the same editor again.
1628 1622 msgfn = self.savecommitmessage(cctx._text)
1629 1623
1630 1624 # commit subs and write new state
1631 1625 if subs:
1632 1626 for s in sorted(commitsubs):
1633 1627 sub = wctx.sub(s)
1634 1628 self.ui.status(_('committing subrepository %s\n') %
1635 1629 subrepo.subrelpath(sub))
1636 1630 sr = sub.commit(cctx._text, user, date)
1637 1631 newstate[s] = (newstate[s][0], sr)
1638 1632 subrepo.writestate(self, newstate)
1639 1633
1640 1634 p1, p2 = self.dirstate.parents()
1641 1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1642 1636 try:
1643 1637 self.hook("precommit", throw=True, parent1=hookp1,
1644 1638 parent2=hookp2)
1645 1639 tr = self.transaction('commit')
1646 1640 ret = self.commitctx(cctx, True)
1647 1641 except: # re-raises
1648 1642 if edited:
1649 1643 self.ui.write(
1650 1644 _('note: commit message saved in %s\n') % msgfn)
1651 1645 raise
1652 1646 # update bookmarks, dirstate and mergestate
1653 1647 bookmarks.update(self, [p1, p2], ret)
1654 1648 cctx.markcommitted(ret)
1655 1649 ms.reset()
1656 1650 tr.close()
1657 1651
1658 1652 finally:
1659 1653 lockmod.release(tr, lock, wlock)
1660 1654
1661 1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1662 1656 # hack for command that use a temporary commit (eg: histedit)
1663 1657 # temporary commit got stripped before hook release
1664 1658 if self.changelog.hasnode(ret):
1665 1659 self.hook("commit", node=node, parent1=parent1,
1666 1660 parent2=parent2)
1667 1661 self._afterlock(commithook)
1668 1662 return ret
1669 1663
1670 1664 @unfilteredmethod
1671 1665 def commitctx(self, ctx, error=False):
1672 1666 """Add a new revision to current repository.
1673 1667 Revision information is passed via the context argument.
1674 1668 """
1675 1669
1676 1670 tr = None
1677 1671 p1, p2 = ctx.p1(), ctx.p2()
1678 1672 user = ctx.user()
1679 1673
1680 1674 lock = self.lock()
1681 1675 try:
1682 1676 tr = self.transaction("commit")
1683 1677 trp = weakref.proxy(tr)
1684 1678
1685 1679 if ctx.files():
1686 1680 m1 = p1.manifest()
1687 1681 m2 = p2.manifest()
1688 1682 m = m1.copy()
1689 1683
1690 1684 # check in files
1691 1685 added = []
1692 1686 changed = []
1693 1687 removed = list(ctx.removed())
1694 1688 linkrev = len(self)
1695 1689 self.ui.note(_("committing files:\n"))
1696 1690 for f in sorted(ctx.modified() + ctx.added()):
1697 1691 self.ui.note(f + "\n")
1698 1692 try:
1699 1693 fctx = ctx[f]
1700 1694 if fctx is None:
1701 1695 removed.append(f)
1702 1696 else:
1703 1697 added.append(f)
1704 1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1705 1699 trp, changed)
1706 1700 m.setflag(f, fctx.flags())
1707 1701 except OSError as inst:
1708 1702 self.ui.warn(_("trouble committing %s!\n") % f)
1709 1703 raise
1710 1704 except IOError as inst:
1711 1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1712 1706 if error or errcode and errcode != errno.ENOENT:
1713 1707 self.ui.warn(_("trouble committing %s!\n") % f)
1714 1708 raise
1715 1709
1716 1710 # update manifest
1717 1711 self.ui.note(_("committing manifest\n"))
1718 1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1719 1713 drop = [f for f in removed if f in m]
1720 1714 for f in drop:
1721 1715 del m[f]
1722 1716 mn = self.manifest.add(m, trp, linkrev,
1723 1717 p1.manifestnode(), p2.manifestnode(),
1724 1718 added, drop)
1725 1719 files = changed + removed
1726 1720 else:
1727 1721 mn = p1.manifestnode()
1728 1722 files = []
1729 1723
1730 1724 # update changelog
1731 1725 self.ui.note(_("committing changelog\n"))
1732 1726 self.changelog.delayupdate(tr)
1733 1727 n = self.changelog.add(mn, files, ctx.description(),
1734 1728 trp, p1.node(), p2.node(),
1735 1729 user, ctx.date(), ctx.extra().copy())
1736 1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1737 1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1738 1732 parent2=xp2)
1739 1733 # set the new commit is proper phase
1740 1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1741 1735 if targetphase:
1742 1736 # retract boundary do not alter parent changeset.
1743 1737 # if a parent have higher the resulting phase will
1744 1738 # be compliant anyway
1745 1739 #
1746 1740 # if minimal phase was 0 we don't need to retract anything
1747 1741 phases.retractboundary(self, tr, targetphase, [n])
1748 1742 tr.close()
1749 1743 branchmap.updatecache(self.filtered('served'))
1750 1744 return n
1751 1745 finally:
1752 1746 if tr:
1753 1747 tr.release()
1754 1748 lock.release()
1755 1749
1756 1750 @unfilteredmethod
1757 1751 def destroying(self):
1758 1752 '''Inform the repository that nodes are about to be destroyed.
1759 1753 Intended for use by strip and rollback, so there's a common
1760 1754 place for anything that has to be done before destroying history.
1761 1755
1762 1756 This is mostly useful for saving state that is in memory and waiting
1763 1757 to be flushed when the current lock is released. Because a call to
1764 1758 destroyed is imminent, the repo will be invalidated causing those
1765 1759 changes to stay in memory (waiting for the next unlock), or vanish
1766 1760 completely.
1767 1761 '''
1768 1762 # When using the same lock to commit and strip, the phasecache is left
1769 1763 # dirty after committing. Then when we strip, the repo is invalidated,
1770 1764 # causing those changes to disappear.
1771 1765 if '_phasecache' in vars(self):
1772 1766 self._phasecache.write()
1773 1767
1774 1768 @unfilteredmethod
1775 1769 def destroyed(self):
1776 1770 '''Inform the repository that nodes have been destroyed.
1777 1771 Intended for use by strip and rollback, so there's a common
1778 1772 place for anything that has to be done after destroying history.
1779 1773 '''
1780 1774 # When one tries to:
1781 1775 # 1) destroy nodes thus calling this method (e.g. strip)
1782 1776 # 2) use phasecache somewhere (e.g. commit)
1783 1777 #
1784 1778 # then 2) will fail because the phasecache contains nodes that were
1785 1779 # removed. We can either remove phasecache from the filecache,
1786 1780 # causing it to reload next time it is accessed, or simply filter
1787 1781 # the removed nodes now and write the updated cache.
1788 1782 self._phasecache.filterunknown(self)
1789 1783 self._phasecache.write()
1790 1784
1791 1785 # update the 'served' branch cache to help read only server process
1792 1786 # Thanks to branchcache collaboration this is done from the nearest
1793 1787 # filtered subset and it is expected to be fast.
1794 1788 branchmap.updatecache(self.filtered('served'))
1795 1789
1796 1790 # Ensure the persistent tag cache is updated. Doing it now
1797 1791 # means that the tag cache only has to worry about destroyed
1798 1792 # heads immediately after a strip/rollback. That in turn
1799 1793 # guarantees that "cachetip == currenttip" (comparing both rev
1800 1794 # and node) always means no nodes have been added or destroyed.
1801 1795
1802 1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1803 1797 # head, refresh the tag cache, then immediately add a new head.
1804 1798 # But I think doing it this way is necessary for the "instant
1805 1799 # tag cache retrieval" case to work.
1806 1800 self.invalidate()
1807 1801
1808 1802 def walk(self, match, node=None):
1809 1803 '''
1810 1804 walk recursively through the directory tree or a given
1811 1805 changeset, finding all files matched by the match
1812 1806 function
1813 1807 '''
1814 1808 return self[node].walk(match)
1815 1809
1816 1810 def status(self, node1='.', node2=None, match=None,
1817 1811 ignored=False, clean=False, unknown=False,
1818 1812 listsubrepos=False):
1819 1813 '''a convenience method that calls node1.status(node2)'''
1820 1814 return self[node1].status(node2, match, ignored, clean, unknown,
1821 1815 listsubrepos)
1822 1816
1823 1817 def heads(self, start=None):
1824 1818 heads = self.changelog.heads(start)
1825 1819 # sort the output in rev descending order
1826 1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1827 1821
1828 1822 def branchheads(self, branch=None, start=None, closed=False):
1829 1823 '''return a (possibly filtered) list of heads for the given branch
1830 1824
1831 1825 Heads are returned in topological order, from newest to oldest.
1832 1826 If branch is None, use the dirstate branch.
1833 1827 If start is not None, return only heads reachable from start.
1834 1828 If closed is True, return heads that are marked as closed as well.
1835 1829 '''
1836 1830 if branch is None:
1837 1831 branch = self[None].branch()
1838 1832 branches = self.branchmap()
1839 1833 if branch not in branches:
1840 1834 return []
1841 1835 # the cache returns heads ordered lowest to highest
1842 1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1843 1837 if start is not None:
1844 1838 # filter out the heads that cannot be reached from startrev
1845 1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1846 1840 bheads = [h for h in bheads if h in fbheads]
1847 1841 return bheads
1848 1842
1849 1843 def branches(self, nodes):
1850 1844 if not nodes:
1851 1845 nodes = [self.changelog.tip()]
1852 1846 b = []
1853 1847 for n in nodes:
1854 1848 t = n
1855 1849 while True:
1856 1850 p = self.changelog.parents(n)
1857 1851 if p[1] != nullid or p[0] == nullid:
1858 1852 b.append((t, n, p[0], p[1]))
1859 1853 break
1860 1854 n = p[0]
1861 1855 return b
1862 1856
1863 1857 def between(self, pairs):
1864 1858 r = []
1865 1859
1866 1860 for top, bottom in pairs:
1867 1861 n, l, i = top, [], 0
1868 1862 f = 1
1869 1863
1870 1864 while n != bottom and n != nullid:
1871 1865 p = self.changelog.parents(n)[0]
1872 1866 if i == f:
1873 1867 l.append(n)
1874 1868 f = f * 2
1875 1869 n = p
1876 1870 i += 1
1877 1871
1878 1872 r.append(l)
1879 1873
1880 1874 return r
1881 1875
1882 1876 def checkpush(self, pushop):
1883 1877 """Extensions can override this function if additional checks have
1884 1878 to be performed before pushing, or call it if they override push
1885 1879 command.
1886 1880 """
1887 1881 pass
1888 1882
1889 1883 @unfilteredpropertycache
1890 1884 def prepushoutgoinghooks(self):
1891 1885 """Return util.hooks consists of a pushop with repo, remote, outgoing
1892 1886 methods, which are called before pushing changesets.
1893 1887 """
1894 1888 return util.hooks()
1895 1889
1896 1890 def pushkey(self, namespace, key, old, new):
1897 1891 try:
1898 1892 tr = self.currenttransaction()
1899 1893 hookargs = {}
1900 1894 if tr is not None:
1901 1895 hookargs.update(tr.hookargs)
1902 1896 hookargs['namespace'] = namespace
1903 1897 hookargs['key'] = key
1904 1898 hookargs['old'] = old
1905 1899 hookargs['new'] = new
1906 1900 self.hook('prepushkey', throw=True, **hookargs)
1907 1901 except error.HookAbort as exc:
1908 1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1909 1903 if exc.hint:
1910 1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1911 1905 return False
1912 1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1913 1907 ret = pushkey.push(self, namespace, key, old, new)
1914 1908 def runhook():
1915 1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1916 1910 ret=ret)
1917 1911 self._afterlock(runhook)
1918 1912 return ret
1919 1913
1920 1914 def listkeys(self, namespace):
1921 1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1922 1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1923 1917 values = pushkey.list(self, namespace)
1924 1918 self.hook('listkeys', namespace=namespace, values=values)
1925 1919 return values
1926 1920
1927 1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1928 1922 '''used to test argument passing over the wire'''
1929 1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1930 1924
1931 1925 def savecommitmessage(self, text):
1932 1926 fp = self.vfs('last-message.txt', 'wb')
1933 1927 try:
1934 1928 fp.write(text)
1935 1929 finally:
1936 1930 fp.close()
1937 1931 return self.pathto(fp.name[len(self.root) + 1:])
1938 1932
1939 1933 # used to avoid circular references so destructors work
1940 1934 def aftertrans(files):
1941 1935 renamefiles = [tuple(t) for t in files]
1942 1936 def a():
1943 1937 for vfs, src, dest in renamefiles:
1944 1938 try:
1945 1939 vfs.rename(src, dest)
1946 1940 except OSError: # journal file does not yet exist
1947 1941 pass
1948 1942 return a
1949 1943
1950 1944 def undoname(fn):
1951 1945 base, name = os.path.split(fn)
1952 1946 assert name.startswith('journal')
1953 1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1954 1948
1955 1949 def instance(ui, path, create):
1956 1950 return localrepository(ui, util.urllocalpath(path), create)
1957 1951
1958 1952 def islocal(path):
1959 1953 return True
1960 1954
1961 1955 def newreporequirements(repo):
1962 1956 """Determine the set of requirements for a new local repository.
1963 1957
1964 1958 Extensions can wrap this function to specify custom requirements for
1965 1959 new repositories.
1966 1960 """
1967 1961 ui = repo.ui
1968 1962 requirements = set(['revlogv1'])
1969 1963 if ui.configbool('format', 'usestore', True):
1970 1964 requirements.add('store')
1971 1965 if ui.configbool('format', 'usefncache', True):
1972 1966 requirements.add('fncache')
1973 1967 if ui.configbool('format', 'dotencode', True):
1974 1968 requirements.add('dotencode')
1975 1969
1976 1970 if scmutil.gdinitconfig(ui):
1977 1971 requirements.add('generaldelta')
1978 1972 if ui.configbool('experimental', 'treemanifest', False):
1979 1973 requirements.add('treemanifest')
1980 1974 if ui.configbool('experimental', 'manifestv2', False):
1981 1975 requirements.add('manifestv2')
1982 1976
1983 1977 return requirements
General Comments 0
You need to be logged in to leave comments. Login now