##// END OF EJS Templates
localrepo: switch to mergestate.clean()...
Siddharth Agarwal -
r26989:a65ea44f default
parent child Browse files
Show More
@@ -1,1927 +1,1926 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return cg.apply(self._repo, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if scmutil.gdinitconfig(self.ui):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError as inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError as inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sjoin = self.store.join
297 297 self.vfs.createmode = self.store.createmode
298 298 self._applyopenerreqs()
299 299 if create:
300 300 self._writerequirements()
301 301
302 302 self._dirstatevalidatewarned = False
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 357 # experimental config: format.aggressivemergedeltas
358 358 aggressivemergedeltas = self.ui.configbool('format',
359 359 'aggressivemergedeltas', False)
360 360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 361 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
362 362
363 363 def _writerequirements(self):
364 364 scmutil.writerequires(self.vfs, self.requirements)
365 365
366 366 def _checknested(self, path):
367 367 """Determine if path is a legal nested repository."""
368 368 if not path.startswith(self.root):
369 369 return False
370 370 subpath = path[len(self.root) + 1:]
371 371 normsubpath = util.pconvert(subpath)
372 372
373 373 # XXX: Checking against the current working copy is wrong in
374 374 # the sense that it can reject things like
375 375 #
376 376 # $ hg cat -r 10 sub/x.txt
377 377 #
378 378 # if sub/ is no longer a subrepository in the working copy
379 379 # parent revision.
380 380 #
381 381 # However, it can of course also allow things that would have
382 382 # been rejected before, such as the above cat command if sub/
383 383 # is a subrepository now, but was a normal directory before.
384 384 # The old path auditor would have rejected by mistake since it
385 385 # panics when it sees sub/.hg/.
386 386 #
387 387 # All in all, checking against the working copy seems sensible
388 388 # since we want to prevent access to nested repositories on
389 389 # the filesystem *now*.
390 390 ctx = self[None]
391 391 parts = util.splitpath(subpath)
392 392 while parts:
393 393 prefix = '/'.join(parts)
394 394 if prefix in ctx.substate:
395 395 if prefix == normsubpath:
396 396 return True
397 397 else:
398 398 sub = ctx.sub(prefix)
399 399 return sub.checknested(subpath[len(prefix) + 1:])
400 400 else:
401 401 parts.pop()
402 402 return False
403 403
404 404 def peer(self):
405 405 return localpeer(self) # not cached to avoid reference cycle
406 406
407 407 def unfiltered(self):
408 408 """Return unfiltered version of the repository
409 409
410 410 Intended to be overwritten by filtered repo."""
411 411 return self
412 412
413 413 def filtered(self, name):
414 414 """Return a filtered version of a repository"""
415 415 # build a new class with the mixin and the current class
416 416 # (possibly subclass of the repo)
417 417 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 418 pass
419 419 return proxycls(self, name)
420 420
421 421 @repofilecache('bookmarks')
422 422 def _bookmarks(self):
423 423 return bookmarks.bmstore(self)
424 424
425 425 @repofilecache('bookmarks.current')
426 426 def _activebookmark(self):
427 427 return bookmarks.readactive(self)
428 428
429 429 def bookmarkheads(self, bookmark):
430 430 name = bookmark.split('@', 1)[0]
431 431 heads = []
432 432 for mark, n in self._bookmarks.iteritems():
433 433 if mark.split('@', 1)[0] == name:
434 434 heads.append(n)
435 435 return heads
436 436
437 437 # _phaserevs and _phasesets depend on changelog. what we need is to
438 438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 439 # can't be easily expressed in filecache mechanism.
440 440 @storecache('phaseroots', '00changelog.i')
441 441 def _phasecache(self):
442 442 return phases.phasecache(self, self._phasedefaults)
443 443
444 444 @storecache('obsstore')
445 445 def obsstore(self):
446 446 # read default format for new obsstore.
447 447 # developer config: format.obsstore-version
448 448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 449 # rely on obsstore class default when possible.
450 450 kwargs = {}
451 451 if defaultformat is not None:
452 452 kwargs['defaultformat'] = defaultformat
453 453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 454 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 455 **kwargs)
456 456 if store and readonly:
457 457 self.ui.warn(
458 458 _('obsolete feature not enabled but %i markers found!\n')
459 459 % len(list(store)))
460 460 return store
461 461
462 462 @storecache('00changelog.i')
463 463 def changelog(self):
464 464 c = changelog.changelog(self.svfs)
465 465 if 'HG_PENDING' in os.environ:
466 466 p = os.environ['HG_PENDING']
467 467 if p.startswith(self.root):
468 468 c.readpending('00changelog.i.a')
469 469 return c
470 470
471 471 @storecache('00manifest.i')
472 472 def manifest(self):
473 473 return manifest.manifest(self.svfs)
474 474
475 475 def dirlog(self, dir):
476 476 return self.manifest.dirlog(dir)
477 477
478 478 @repofilecache('dirstate')
479 479 def dirstate(self):
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 481 self._dirstatevalidate)
482 482
483 483 def _dirstatevalidate(self, node):
484 484 try:
485 485 self.changelog.rev(node)
486 486 return node
487 487 except error.LookupError:
488 488 if not self._dirstatevalidatewarned:
489 489 self._dirstatevalidatewarned = True
490 490 self.ui.warn(_("warning: ignoring unknown"
491 491 " working parent %s!\n") % short(node))
492 492 return nullid
493 493
494 494 def __getitem__(self, changeid):
495 495 if changeid is None or changeid == wdirrev:
496 496 return context.workingctx(self)
497 497 if isinstance(changeid, slice):
498 498 return [context.changectx(self, i)
499 499 for i in xrange(*changeid.indices(len(self)))
500 500 if i not in self.changelog.filteredrevs]
501 501 return context.changectx(self, changeid)
502 502
503 503 def __contains__(self, changeid):
504 504 try:
505 505 self[changeid]
506 506 return True
507 507 except error.RepoLookupError:
508 508 return False
509 509
510 510 def __nonzero__(self):
511 511 return True
512 512
513 513 def __len__(self):
514 514 return len(self.changelog)
515 515
516 516 def __iter__(self):
517 517 return iter(self.changelog)
518 518
519 519 def revs(self, expr, *args):
520 520 '''Return a list of revisions matching the given revset'''
521 521 expr = revset.formatspec(expr, *args)
522 522 m = revset.match(None, expr)
523 523 return m(self)
524 524
525 525 def set(self, expr, *args):
526 526 '''
527 527 Yield a context for each matching revision, after doing arg
528 528 replacement via revset.formatspec
529 529 '''
530 530 for r in self.revs(expr, *args):
531 531 yield self[r]
532 532
533 533 def url(self):
534 534 return 'file:' + self.root
535 535
536 536 def hook(self, name, throw=False, **args):
537 537 """Call a hook, passing this repo instance.
538 538
539 539 This a convenience method to aid invoking hooks. Extensions likely
540 540 won't call this unless they have registered a custom hook or are
541 541 replacing code that is expected to call a hook.
542 542 """
543 543 return hook.hook(self.ui, self, name, throw, **args)
544 544
545 545 @unfilteredmethod
546 546 def _tag(self, names, node, message, local, user, date, extra=None,
547 547 editor=False):
548 548 if isinstance(names, str):
549 549 names = (names,)
550 550
551 551 branches = self.branchmap()
552 552 for name in names:
553 553 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 554 local=local)
555 555 if name in branches:
556 556 self.ui.warn(_("warning: tag %s conflicts with existing"
557 557 " branch name\n") % name)
558 558
559 559 def writetags(fp, names, munge, prevtags):
560 560 fp.seek(0, 2)
561 561 if prevtags and prevtags[-1] != '\n':
562 562 fp.write('\n')
563 563 for name in names:
564 564 if munge:
565 565 m = munge(name)
566 566 else:
567 567 m = name
568 568
569 569 if (self._tagscache.tagtypes and
570 570 name in self._tagscache.tagtypes):
571 571 old = self.tags().get(name, nullid)
572 572 fp.write('%s %s\n' % (hex(old), m))
573 573 fp.write('%s %s\n' % (hex(node), m))
574 574 fp.close()
575 575
576 576 prevtags = ''
577 577 if local:
578 578 try:
579 579 fp = self.vfs('localtags', 'r+')
580 580 except IOError:
581 581 fp = self.vfs('localtags', 'a')
582 582 else:
583 583 prevtags = fp.read()
584 584
585 585 # local tags are stored in the current charset
586 586 writetags(fp, names, None, prevtags)
587 587 for name in names:
588 588 self.hook('tag', node=hex(node), tag=name, local=local)
589 589 return
590 590
591 591 try:
592 592 fp = self.wfile('.hgtags', 'rb+')
593 593 except IOError as e:
594 594 if e.errno != errno.ENOENT:
595 595 raise
596 596 fp = self.wfile('.hgtags', 'ab')
597 597 else:
598 598 prevtags = fp.read()
599 599
600 600 # committed tags are stored in UTF-8
601 601 writetags(fp, names, encoding.fromlocal, prevtags)
602 602
603 603 fp.close()
604 604
605 605 self.invalidatecaches()
606 606
607 607 if '.hgtags' not in self.dirstate:
608 608 self[None].add(['.hgtags'])
609 609
610 610 m = matchmod.exact(self.root, '', ['.hgtags'])
611 611 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 612 editor=editor)
613 613
614 614 for name in names:
615 615 self.hook('tag', node=hex(node), tag=name, local=local)
616 616
617 617 return tagnode
618 618
619 619 def tag(self, names, node, message, local, user, date, editor=False):
620 620 '''tag a revision with one or more symbolic names.
621 621
622 622 names is a list of strings or, when adding a single tag, names may be a
623 623 string.
624 624
625 625 if local is True, the tags are stored in a per-repository file.
626 626 otherwise, they are stored in the .hgtags file, and a new
627 627 changeset is committed with the change.
628 628
629 629 keyword arguments:
630 630
631 631 local: whether to store tags in non-version-controlled file
632 632 (default False)
633 633
634 634 message: commit message to use if committing
635 635
636 636 user: name of user to use if committing
637 637
638 638 date: date tuple to use if committing'''
639 639
640 640 if not local:
641 641 m = matchmod.exact(self.root, '', ['.hgtags'])
642 642 if any(self.status(match=m, unknown=True, ignored=True)):
643 643 raise error.Abort(_('working copy of .hgtags is changed'),
644 644 hint=_('please commit .hgtags manually'))
645 645
646 646 self.tags() # instantiate the cache
647 647 self._tag(names, node, message, local, user, date, editor=editor)
648 648
649 649 @filteredpropertycache
650 650 def _tagscache(self):
651 651 '''Returns a tagscache object that contains various tags related
652 652 caches.'''
653 653
654 654 # This simplifies its cache management by having one decorated
655 655 # function (this one) and the rest simply fetch things from it.
656 656 class tagscache(object):
657 657 def __init__(self):
658 658 # These two define the set of tags for this repository. tags
659 659 # maps tag name to node; tagtypes maps tag name to 'global' or
660 660 # 'local'. (Global tags are defined by .hgtags across all
661 661 # heads, and local tags are defined in .hg/localtags.)
662 662 # They constitute the in-memory cache of tags.
663 663 self.tags = self.tagtypes = None
664 664
665 665 self.nodetagscache = self.tagslist = None
666 666
667 667 cache = tagscache()
668 668 cache.tags, cache.tagtypes = self._findtags()
669 669
670 670 return cache
671 671
672 672 def tags(self):
673 673 '''return a mapping of tag to node'''
674 674 t = {}
675 675 if self.changelog.filteredrevs:
676 676 tags, tt = self._findtags()
677 677 else:
678 678 tags = self._tagscache.tags
679 679 for k, v in tags.iteritems():
680 680 try:
681 681 # ignore tags to unknown nodes
682 682 self.changelog.rev(v)
683 683 t[k] = v
684 684 except (error.LookupError, ValueError):
685 685 pass
686 686 return t
687 687
688 688 def _findtags(self):
689 689 '''Do the hard work of finding tags. Return a pair of dicts
690 690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 691 maps tag name to a string like \'global\' or \'local\'.
692 692 Subclasses or extensions are free to add their own tags, but
693 693 should be aware that the returned dicts will be retained for the
694 694 duration of the localrepo object.'''
695 695
696 696 # XXX what tagtype should subclasses/extensions use? Currently
697 697 # mq and bookmarks add tags, but do not set the tagtype at all.
698 698 # Should each extension invent its own tag type? Should there
699 699 # be one tagtype for all such "virtual" tags? Or is the status
700 700 # quo fine?
701 701
702 702 alltags = {} # map tag name to (node, hist)
703 703 tagtypes = {}
704 704
705 705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707 707
708 708 # Build the return dicts. Have to re-encode tag names because
709 709 # the tags module always uses UTF-8 (in order not to lose info
710 710 # writing to the cache), but the rest of Mercurial wants them in
711 711 # local encoding.
712 712 tags = {}
713 713 for (name, (node, hist)) in alltags.iteritems():
714 714 if node != nullid:
715 715 tags[encoding.tolocal(name)] = node
716 716 tags['tip'] = self.changelog.tip()
717 717 tagtypes = dict([(encoding.tolocal(name), value)
718 718 for (name, value) in tagtypes.iteritems()])
719 719 return (tags, tagtypes)
720 720
721 721 def tagtype(self, tagname):
722 722 '''
723 723 return the type of the given tag. result can be:
724 724
725 725 'local' : a local tag
726 726 'global' : a global tag
727 727 None : tag does not exist
728 728 '''
729 729
730 730 return self._tagscache.tagtypes.get(tagname)
731 731
732 732 def tagslist(self):
733 733 '''return a list of tags ordered by revision'''
734 734 if not self._tagscache.tagslist:
735 735 l = []
736 736 for t, n in self.tags().iteritems():
737 737 l.append((self.changelog.rev(n), t, n))
738 738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739 739
740 740 return self._tagscache.tagslist
741 741
742 742 def nodetags(self, node):
743 743 '''return the tags associated with a node'''
744 744 if not self._tagscache.nodetagscache:
745 745 nodetagscache = {}
746 746 for t, n in self._tagscache.tags.iteritems():
747 747 nodetagscache.setdefault(n, []).append(t)
748 748 for tags in nodetagscache.itervalues():
749 749 tags.sort()
750 750 self._tagscache.nodetagscache = nodetagscache
751 751 return self._tagscache.nodetagscache.get(node, [])
752 752
753 753 def nodebookmarks(self, node):
754 754 marks = []
755 755 for bookmark, n in self._bookmarks.iteritems():
756 756 if n == node:
757 757 marks.append(bookmark)
758 758 return sorted(marks)
759 759
760 760 def branchmap(self):
761 761 '''returns a dictionary {branch: [branchheads]} with branchheads
762 762 ordered by increasing revision number'''
763 763 branchmap.updatecache(self)
764 764 return self._branchcaches[self.filtername]
765 765
766 766 @unfilteredmethod
767 767 def revbranchcache(self):
768 768 if not self._revbranchcache:
769 769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 770 return self._revbranchcache
771 771
772 772 def branchtip(self, branch, ignoremissing=False):
773 773 '''return the tip node for a given branch
774 774
775 775 If ignoremissing is True, then this method will not raise an error.
776 776 This is helpful for callers that only expect None for a missing branch
777 777 (e.g. namespace).
778 778
779 779 '''
780 780 try:
781 781 return self.branchmap().branchtip(branch)
782 782 except KeyError:
783 783 if not ignoremissing:
784 784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 785 else:
786 786 pass
787 787
788 788 def lookup(self, key):
789 789 return self[key].node()
790 790
791 791 def lookupbranch(self, key, remote=None):
792 792 repo = remote or self
793 793 if key in repo.branchmap():
794 794 return key
795 795
796 796 repo = (remote and remote.local()) and remote or self
797 797 return repo[key].branch()
798 798
799 799 def known(self, nodes):
800 800 nm = self.changelog.nodemap
801 801 pc = self._phasecache
802 802 result = []
803 803 for n in nodes:
804 804 r = nm.get(n)
805 805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 806 result.append(resp)
807 807 return result
808 808
809 809 def local(self):
810 810 return self
811 811
812 812 def publishing(self):
813 813 # it's safe (and desirable) to trust the publish flag unconditionally
814 814 # so that we don't finalize changes shared between users via ssh or nfs
815 815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 816
817 817 def cancopy(self):
818 818 # so statichttprepo's override of local() works
819 819 if not self.local():
820 820 return False
821 821 if not self.publishing():
822 822 return True
823 823 # if publishing we can't copy if there is filtered content
824 824 return not self.filtered('visible').changelog.filteredrevs
825 825
826 826 def shared(self):
827 827 '''the type of shared repository (None if not shared)'''
828 828 if self.sharedpath != self.path:
829 829 return 'store'
830 830 return None
831 831
832 832 def join(self, f, *insidef):
833 833 return self.vfs.join(os.path.join(f, *insidef))
834 834
835 835 def wjoin(self, f, *insidef):
836 836 return self.vfs.reljoin(self.root, f, *insidef)
837 837
838 838 def file(self, f):
839 839 if f[0] == '/':
840 840 f = f[1:]
841 841 return filelog.filelog(self.svfs, f)
842 842
843 843 def changectx(self, changeid):
844 844 return self[changeid]
845 845
846 846 def parents(self, changeid=None):
847 847 '''get list of changectxs for parents of changeid'''
848 848 return self[changeid].parents()
849 849
850 850 def setparents(self, p1, p2=nullid):
851 851 self.dirstate.beginparentchange()
852 852 copies = self.dirstate.setparents(p1, p2)
853 853 pctx = self[p1]
854 854 if copies:
855 855 # Adjust copy records, the dirstate cannot do it, it
856 856 # requires access to parents manifests. Preserve them
857 857 # only for entries added to first parent.
858 858 for f in copies:
859 859 if f not in pctx and copies[f] in pctx:
860 860 self.dirstate.copy(copies[f], f)
861 861 if p2 == nullid:
862 862 for f, s in sorted(self.dirstate.copies().items()):
863 863 if f not in pctx and s not in pctx:
864 864 self.dirstate.copy(None, f)
865 865 self.dirstate.endparentchange()
866 866
867 867 def filectx(self, path, changeid=None, fileid=None):
868 868 """changeid can be a changeset revision, node, or tag.
869 869 fileid can be a file revision or node."""
870 870 return context.filectx(self, path, changeid, fileid)
871 871
872 872 def getcwd(self):
873 873 return self.dirstate.getcwd()
874 874
875 875 def pathto(self, f, cwd=None):
876 876 return self.dirstate.pathto(f, cwd)
877 877
878 878 def wfile(self, f, mode='r'):
879 879 return self.wvfs(f, mode)
880 880
881 881 def _link(self, f):
882 882 return self.wvfs.islink(f)
883 883
884 884 def _loadfilter(self, filter):
885 885 if filter not in self.filterpats:
886 886 l = []
887 887 for pat, cmd in self.ui.configitems(filter):
888 888 if cmd == '!':
889 889 continue
890 890 mf = matchmod.match(self.root, '', [pat])
891 891 fn = None
892 892 params = cmd
893 893 for name, filterfn in self._datafilters.iteritems():
894 894 if cmd.startswith(name):
895 895 fn = filterfn
896 896 params = cmd[len(name):].lstrip()
897 897 break
898 898 if not fn:
899 899 fn = lambda s, c, **kwargs: util.filter(s, c)
900 900 # Wrap old filters not supporting keyword arguments
901 901 if not inspect.getargspec(fn)[2]:
902 902 oldfn = fn
903 903 fn = lambda s, c, **kwargs: oldfn(s, c)
904 904 l.append((mf, fn, params))
905 905 self.filterpats[filter] = l
906 906 return self.filterpats[filter]
907 907
908 908 def _filter(self, filterpats, filename, data):
909 909 for mf, fn, cmd in filterpats:
910 910 if mf(filename):
911 911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 913 break
914 914
915 915 return data
916 916
917 917 @unfilteredpropertycache
918 918 def _encodefilterpats(self):
919 919 return self._loadfilter('encode')
920 920
921 921 @unfilteredpropertycache
922 922 def _decodefilterpats(self):
923 923 return self._loadfilter('decode')
924 924
925 925 def adddatafilter(self, name, filter):
926 926 self._datafilters[name] = filter
927 927
928 928 def wread(self, filename):
929 929 if self._link(filename):
930 930 data = self.wvfs.readlink(filename)
931 931 else:
932 932 data = self.wvfs.read(filename)
933 933 return self._filter(self._encodefilterpats, filename, data)
934 934
935 935 def wwrite(self, filename, data, flags):
936 936 """write ``data`` into ``filename`` in the working directory
937 937
938 938 This returns length of written (maybe decoded) data.
939 939 """
940 940 data = self._filter(self._decodefilterpats, filename, data)
941 941 if 'l' in flags:
942 942 self.wvfs.symlink(data, filename)
943 943 else:
944 944 self.wvfs.write(filename, data)
945 945 if 'x' in flags:
946 946 self.wvfs.setflags(filename, False, True)
947 947 return len(data)
948 948
949 949 def wwritedata(self, filename, data):
950 950 return self._filter(self._decodefilterpats, filename, data)
951 951
952 952 def currenttransaction(self):
953 953 """return the current transaction or None if non exists"""
954 954 if self._transref:
955 955 tr = self._transref()
956 956 else:
957 957 tr = None
958 958
959 959 if tr and tr.running():
960 960 return tr
961 961 return None
962 962
963 963 def transaction(self, desc, report=None):
964 964 if (self.ui.configbool('devel', 'all-warnings')
965 965 or self.ui.configbool('devel', 'check-locks')):
966 966 l = self._lockref and self._lockref()
967 967 if l is None or not l.held:
968 968 self.ui.develwarn('transaction with no lock')
969 969 tr = self.currenttransaction()
970 970 if tr is not None:
971 971 return tr.nest()
972 972
973 973 # abort here if the journal already exists
974 974 if self.svfs.exists("journal"):
975 975 raise error.RepoError(
976 976 _("abandoned transaction found"),
977 977 hint=_("run 'hg recover' to clean up transaction"))
978 978
979 979 # make journal.dirstate contain in-memory changes at this point
980 980 self.dirstate.write(None)
981 981
982 982 idbase = "%.40f#%f" % (random.random(), time.time())
983 983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985 985
986 986 self._writejournal(desc)
987 987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 988 if report:
989 989 rp = report
990 990 else:
991 991 rp = self.ui.warn
992 992 vfsmap = {'plain': self.vfs} # root of .hg/
993 993 # we must avoid cyclic reference between repo and transaction.
994 994 reporef = weakref.ref(self)
995 995 def validate(tr):
996 996 """will run pre-closing hooks"""
997 997 reporef().hook('pretxnclose', throw=True,
998 998 txnname=desc, **tr.hookargs)
999 999 def releasefn(tr, success):
1000 1000 repo = reporef()
1001 1001 if success:
1002 1002 # this should be explicitly invoked here, because
1003 1003 # in-memory changes aren't written out at closing
1004 1004 # transaction, if tr.addfilegenerator (via
1005 1005 # dirstate.write or so) isn't invoked while
1006 1006 # transaction running
1007 1007 repo.dirstate.write(None)
1008 1008 else:
1009 1009 # prevent in-memory changes from being written out at
1010 1010 # the end of outer wlock scope or so
1011 1011 repo.dirstate.invalidate()
1012 1012
1013 1013 # discard all changes (including ones already written
1014 1014 # out) in this transaction
1015 1015 repo.vfs.rename('journal.dirstate', 'dirstate')
1016 1016
1017 1017 repo.invalidate(clearfilecache=True)
1018 1018
1019 1019 tr = transaction.transaction(rp, self.svfs, vfsmap,
1020 1020 "journal",
1021 1021 "undo",
1022 1022 aftertrans(renames),
1023 1023 self.store.createmode,
1024 1024 validator=validate,
1025 1025 releasefn=releasefn)
1026 1026
1027 1027 tr.hookargs['txnid'] = txnid
1028 1028 # note: writing the fncache only during finalize mean that the file is
1029 1029 # outdated when running hooks. As fncache is used for streaming clone,
1030 1030 # this is not expected to break anything that happen during the hooks.
1031 1031 tr.addfinalize('flush-fncache', self.store.write)
1032 1032 def txnclosehook(tr2):
1033 1033 """To be run if transaction is successful, will schedule a hook run
1034 1034 """
1035 1035 def hook():
1036 1036 reporef().hook('txnclose', throw=False, txnname=desc,
1037 1037 **tr2.hookargs)
1038 1038 reporef()._afterlock(hook)
1039 1039 tr.addfinalize('txnclose-hook', txnclosehook)
1040 1040 def txnaborthook(tr2):
1041 1041 """To be run if transaction is aborted
1042 1042 """
1043 1043 reporef().hook('txnabort', throw=False, txnname=desc,
1044 1044 **tr2.hookargs)
1045 1045 tr.addabort('txnabort-hook', txnaborthook)
1046 1046 # avoid eager cache invalidation. in-memory data should be identical
1047 1047 # to stored data if transaction has no error.
1048 1048 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1049 1049 self._transref = weakref.ref(tr)
1050 1050 return tr
1051 1051
1052 1052 def _journalfiles(self):
1053 1053 return ((self.svfs, 'journal'),
1054 1054 (self.vfs, 'journal.dirstate'),
1055 1055 (self.vfs, 'journal.branch'),
1056 1056 (self.vfs, 'journal.desc'),
1057 1057 (self.vfs, 'journal.bookmarks'),
1058 1058 (self.svfs, 'journal.phaseroots'))
1059 1059
1060 1060 def undofiles(self):
1061 1061 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1062 1062
1063 1063 def _writejournal(self, desc):
1064 1064 self.vfs.write("journal.dirstate",
1065 1065 self.vfs.tryread("dirstate"))
1066 1066 self.vfs.write("journal.branch",
1067 1067 encoding.fromlocal(self.dirstate.branch()))
1068 1068 self.vfs.write("journal.desc",
1069 1069 "%d\n%s\n" % (len(self), desc))
1070 1070 self.vfs.write("journal.bookmarks",
1071 1071 self.vfs.tryread("bookmarks"))
1072 1072 self.svfs.write("journal.phaseroots",
1073 1073 self.svfs.tryread("phaseroots"))
1074 1074
1075 1075 def recover(self):
1076 1076 lock = self.lock()
1077 1077 try:
1078 1078 if self.svfs.exists("journal"):
1079 1079 self.ui.status(_("rolling back interrupted transaction\n"))
1080 1080 vfsmap = {'': self.svfs,
1081 1081 'plain': self.vfs,}
1082 1082 transaction.rollback(self.svfs, vfsmap, "journal",
1083 1083 self.ui.warn)
1084 1084 self.invalidate()
1085 1085 return True
1086 1086 else:
1087 1087 self.ui.warn(_("no interrupted transaction available\n"))
1088 1088 return False
1089 1089 finally:
1090 1090 lock.release()
1091 1091
1092 1092 def rollback(self, dryrun=False, force=False):
1093 1093 wlock = lock = dsguard = None
1094 1094 try:
1095 1095 wlock = self.wlock()
1096 1096 lock = self.lock()
1097 1097 if self.svfs.exists("undo"):
1098 1098 dsguard = cmdutil.dirstateguard(self, 'rollback')
1099 1099
1100 1100 return self._rollback(dryrun, force, dsguard)
1101 1101 else:
1102 1102 self.ui.warn(_("no rollback information available\n"))
1103 1103 return 1
1104 1104 finally:
1105 1105 release(dsguard, lock, wlock)
1106 1106
1107 1107 @unfilteredmethod # Until we get smarter cache management
1108 1108 def _rollback(self, dryrun, force, dsguard):
1109 1109 ui = self.ui
1110 1110 try:
1111 1111 args = self.vfs.read('undo.desc').splitlines()
1112 1112 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1113 1113 if len(args) >= 3:
1114 1114 detail = args[2]
1115 1115 oldtip = oldlen - 1
1116 1116
1117 1117 if detail and ui.verbose:
1118 1118 msg = (_('repository tip rolled back to revision %s'
1119 1119 ' (undo %s: %s)\n')
1120 1120 % (oldtip, desc, detail))
1121 1121 else:
1122 1122 msg = (_('repository tip rolled back to revision %s'
1123 1123 ' (undo %s)\n')
1124 1124 % (oldtip, desc))
1125 1125 except IOError:
1126 1126 msg = _('rolling back unknown transaction\n')
1127 1127 desc = None
1128 1128
1129 1129 if not force and self['.'] != self['tip'] and desc == 'commit':
1130 1130 raise error.Abort(
1131 1131 _('rollback of last commit while not checked out '
1132 1132 'may lose data'), hint=_('use -f to force'))
1133 1133
1134 1134 ui.status(msg)
1135 1135 if dryrun:
1136 1136 return 0
1137 1137
1138 1138 parents = self.dirstate.parents()
1139 1139 self.destroying()
1140 1140 vfsmap = {'plain': self.vfs, '': self.svfs}
1141 1141 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1142 1142 if self.vfs.exists('undo.bookmarks'):
1143 1143 self.vfs.rename('undo.bookmarks', 'bookmarks')
1144 1144 if self.svfs.exists('undo.phaseroots'):
1145 1145 self.svfs.rename('undo.phaseroots', 'phaseroots')
1146 1146 self.invalidate()
1147 1147
1148 1148 parentgone = (parents[0] not in self.changelog.nodemap or
1149 1149 parents[1] not in self.changelog.nodemap)
1150 1150 if parentgone:
1151 1151 # prevent dirstateguard from overwriting already restored one
1152 1152 dsguard.close()
1153 1153
1154 1154 self.vfs.rename('undo.dirstate', 'dirstate')
1155 1155 try:
1156 1156 branch = self.vfs.read('undo.branch')
1157 1157 self.dirstate.setbranch(encoding.tolocal(branch))
1158 1158 except IOError:
1159 1159 ui.warn(_('named branch could not be reset: '
1160 1160 'current branch is still \'%s\'\n')
1161 1161 % self.dirstate.branch())
1162 1162
1163 1163 self.dirstate.invalidate()
1164 1164 parents = tuple([p.rev() for p in self.parents()])
1165 1165 if len(parents) > 1:
1166 1166 ui.status(_('working directory now based on '
1167 1167 'revisions %d and %d\n') % parents)
1168 1168 else:
1169 1169 ui.status(_('working directory now based on '
1170 1170 'revision %d\n') % parents)
1171 ms = mergemod.mergestate(self)
1172 ms.reset(self['.'].node())
1171 mergemod.mergestate.clean(self, self['.'].node())
1173 1172
1174 1173 # TODO: if we know which new heads may result from this rollback, pass
1175 1174 # them to destroy(), which will prevent the branchhead cache from being
1176 1175 # invalidated.
1177 1176 self.destroyed()
1178 1177 return 0
1179 1178
1180 1179 def invalidatecaches(self):
1181 1180
1182 1181 if '_tagscache' in vars(self):
1183 1182 # can't use delattr on proxy
1184 1183 del self.__dict__['_tagscache']
1185 1184
1186 1185 self.unfiltered()._branchcaches.clear()
1187 1186 self.invalidatevolatilesets()
1188 1187
1189 1188 def invalidatevolatilesets(self):
1190 1189 self.filteredrevcache.clear()
1191 1190 obsolete.clearobscaches(self)
1192 1191
1193 1192 def invalidatedirstate(self):
1194 1193 '''Invalidates the dirstate, causing the next call to dirstate
1195 1194 to check if it was modified since the last time it was read,
1196 1195 rereading it if it has.
1197 1196
1198 1197 This is different to dirstate.invalidate() that it doesn't always
1199 1198 rereads the dirstate. Use dirstate.invalidate() if you want to
1200 1199 explicitly read the dirstate again (i.e. restoring it to a previous
1201 1200 known good state).'''
1202 1201 if hasunfilteredcache(self, 'dirstate'):
1203 1202 for k in self.dirstate._filecache:
1204 1203 try:
1205 1204 delattr(self.dirstate, k)
1206 1205 except AttributeError:
1207 1206 pass
1208 1207 delattr(self.unfiltered(), 'dirstate')
1209 1208
1210 1209 def invalidate(self, clearfilecache=False):
1211 1210 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1212 1211 for k in self._filecache.keys():
1213 1212 # dirstate is invalidated separately in invalidatedirstate()
1214 1213 if k == 'dirstate':
1215 1214 continue
1216 1215
1217 1216 if clearfilecache:
1218 1217 del self._filecache[k]
1219 1218 try:
1220 1219 delattr(unfiltered, k)
1221 1220 except AttributeError:
1222 1221 pass
1223 1222 self.invalidatecaches()
1224 1223 self.store.invalidatecaches()
1225 1224
1226 1225 def invalidateall(self):
1227 1226 '''Fully invalidates both store and non-store parts, causing the
1228 1227 subsequent operation to reread any outside changes.'''
1229 1228 # extension should hook this to invalidate its caches
1230 1229 self.invalidate()
1231 1230 self.invalidatedirstate()
1232 1231
1233 1232 def _refreshfilecachestats(self, tr):
1234 1233 """Reload stats of cached files so that they are flagged as valid"""
1235 1234 for k, ce in self._filecache.items():
1236 1235 if k == 'dirstate' or k not in self.__dict__:
1237 1236 continue
1238 1237 ce.refresh()
1239 1238
1240 1239 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1241 1240 inheritchecker=None, parentenvvar=None):
1242 1241 parentlock = None
1243 1242 # the contents of parentenvvar are used by the underlying lock to
1244 1243 # determine whether it can be inherited
1245 1244 if parentenvvar is not None:
1246 1245 parentlock = os.environ.get(parentenvvar)
1247 1246 try:
1248 1247 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1249 1248 acquirefn=acquirefn, desc=desc,
1250 1249 inheritchecker=inheritchecker,
1251 1250 parentlock=parentlock)
1252 1251 except error.LockHeld as inst:
1253 1252 if not wait:
1254 1253 raise
1255 1254 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1256 1255 (desc, inst.locker))
1257 1256 # default to 600 seconds timeout
1258 1257 l = lockmod.lock(vfs, lockname,
1259 1258 int(self.ui.config("ui", "timeout", "600")),
1260 1259 releasefn=releasefn, acquirefn=acquirefn,
1261 1260 desc=desc)
1262 1261 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1263 1262 return l
1264 1263
1265 1264 def _afterlock(self, callback):
1266 1265 """add a callback to be run when the repository is fully unlocked
1267 1266
1268 1267 The callback will be executed when the outermost lock is released
1269 1268 (with wlock being higher level than 'lock')."""
1270 1269 for ref in (self._wlockref, self._lockref):
1271 1270 l = ref and ref()
1272 1271 if l and l.held:
1273 1272 l.postrelease.append(callback)
1274 1273 break
1275 1274 else: # no lock have been found.
1276 1275 callback()
1277 1276
1278 1277 def lock(self, wait=True):
1279 1278 '''Lock the repository store (.hg/store) and return a weak reference
1280 1279 to the lock. Use this before modifying the store (e.g. committing or
1281 1280 stripping). If you are opening a transaction, get a lock as well.)
1282 1281
1283 1282 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1284 1283 'wlock' first to avoid a dead-lock hazard.'''
1285 1284 l = self._lockref and self._lockref()
1286 1285 if l is not None and l.held:
1287 1286 l.lock()
1288 1287 return l
1289 1288
1290 1289 l = self._lock(self.svfs, "lock", wait, None,
1291 1290 self.invalidate, _('repository %s') % self.origroot)
1292 1291 self._lockref = weakref.ref(l)
1293 1292 return l
1294 1293
1295 1294 def _wlockchecktransaction(self):
1296 1295 if self.currenttransaction() is not None:
1297 1296 raise error.LockInheritanceContractViolation(
1298 1297 'wlock cannot be inherited in the middle of a transaction')
1299 1298
1300 1299 def wlock(self, wait=True):
1301 1300 '''Lock the non-store parts of the repository (everything under
1302 1301 .hg except .hg/store) and return a weak reference to the lock.
1303 1302
1304 1303 Use this before modifying files in .hg.
1305 1304
1306 1305 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1307 1306 'wlock' first to avoid a dead-lock hazard.'''
1308 1307 l = self._wlockref and self._wlockref()
1309 1308 if l is not None and l.held:
1310 1309 l.lock()
1311 1310 return l
1312 1311
1313 1312 # We do not need to check for non-waiting lock acquisition. Such
1314 1313 # acquisition would not cause dead-lock as they would just fail.
1315 1314 if wait and (self.ui.configbool('devel', 'all-warnings')
1316 1315 or self.ui.configbool('devel', 'check-locks')):
1317 1316 l = self._lockref and self._lockref()
1318 1317 if l is not None and l.held:
1319 1318 self.ui.develwarn('"wlock" acquired after "lock"')
1320 1319
1321 1320 def unlock():
1322 1321 if self.dirstate.pendingparentchange():
1323 1322 self.dirstate.invalidate()
1324 1323 else:
1325 1324 self.dirstate.write(None)
1326 1325
1327 1326 self._filecache['dirstate'].refresh()
1328 1327
1329 1328 l = self._lock(self.vfs, "wlock", wait, unlock,
1330 1329 self.invalidatedirstate, _('working directory of %s') %
1331 1330 self.origroot,
1332 1331 inheritchecker=self._wlockchecktransaction,
1333 1332 parentenvvar='HG_WLOCK_LOCKER')
1334 1333 self._wlockref = weakref.ref(l)
1335 1334 return l
1336 1335
1337 1336 def _currentlock(self, lockref):
1338 1337 """Returns the lock if it's held, or None if it's not."""
1339 1338 if lockref is None:
1340 1339 return None
1341 1340 l = lockref()
1342 1341 if l is None or not l.held:
1343 1342 return None
1344 1343 return l
1345 1344
1346 1345 def currentwlock(self):
1347 1346 """Returns the wlock if it's held, or None if it's not."""
1348 1347 return self._currentlock(self._wlockref)
1349 1348
1350 1349 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1351 1350 """
1352 1351 commit an individual file as part of a larger transaction
1353 1352 """
1354 1353
1355 1354 fname = fctx.path()
1356 1355 fparent1 = manifest1.get(fname, nullid)
1357 1356 fparent2 = manifest2.get(fname, nullid)
1358 1357 if isinstance(fctx, context.filectx):
1359 1358 node = fctx.filenode()
1360 1359 if node in [fparent1, fparent2]:
1361 1360 self.ui.debug('reusing %s filelog entry\n' % fname)
1362 1361 return node
1363 1362
1364 1363 flog = self.file(fname)
1365 1364 meta = {}
1366 1365 copy = fctx.renamed()
1367 1366 if copy and copy[0] != fname:
1368 1367 # Mark the new revision of this file as a copy of another
1369 1368 # file. This copy data will effectively act as a parent
1370 1369 # of this new revision. If this is a merge, the first
1371 1370 # parent will be the nullid (meaning "look up the copy data")
1372 1371 # and the second one will be the other parent. For example:
1373 1372 #
1374 1373 # 0 --- 1 --- 3 rev1 changes file foo
1375 1374 # \ / rev2 renames foo to bar and changes it
1376 1375 # \- 2 -/ rev3 should have bar with all changes and
1377 1376 # should record that bar descends from
1378 1377 # bar in rev2 and foo in rev1
1379 1378 #
1380 1379 # this allows this merge to succeed:
1381 1380 #
1382 1381 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1383 1382 # \ / merging rev3 and rev4 should use bar@rev2
1384 1383 # \- 2 --- 4 as the merge base
1385 1384 #
1386 1385
1387 1386 cfname = copy[0]
1388 1387 crev = manifest1.get(cfname)
1389 1388 newfparent = fparent2
1390 1389
1391 1390 if manifest2: # branch merge
1392 1391 if fparent2 == nullid or crev is None: # copied on remote side
1393 1392 if cfname in manifest2:
1394 1393 crev = manifest2[cfname]
1395 1394 newfparent = fparent1
1396 1395
1397 1396 # Here, we used to search backwards through history to try to find
1398 1397 # where the file copy came from if the source of a copy was not in
1399 1398 # the parent directory. However, this doesn't actually make sense to
1400 1399 # do (what does a copy from something not in your working copy even
1401 1400 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1402 1401 # the user that copy information was dropped, so if they didn't
1403 1402 # expect this outcome it can be fixed, but this is the correct
1404 1403 # behavior in this circumstance.
1405 1404
1406 1405 if crev:
1407 1406 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1408 1407 meta["copy"] = cfname
1409 1408 meta["copyrev"] = hex(crev)
1410 1409 fparent1, fparent2 = nullid, newfparent
1411 1410 else:
1412 1411 self.ui.warn(_("warning: can't find ancestor for '%s' "
1413 1412 "copied from '%s'!\n") % (fname, cfname))
1414 1413
1415 1414 elif fparent1 == nullid:
1416 1415 fparent1, fparent2 = fparent2, nullid
1417 1416 elif fparent2 != nullid:
1418 1417 # is one parent an ancestor of the other?
1419 1418 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1420 1419 if fparent1 in fparentancestors:
1421 1420 fparent1, fparent2 = fparent2, nullid
1422 1421 elif fparent2 in fparentancestors:
1423 1422 fparent2 = nullid
1424 1423
1425 1424 # is the file changed?
1426 1425 text = fctx.data()
1427 1426 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1428 1427 changelist.append(fname)
1429 1428 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1430 1429 # are just the flags changed during merge?
1431 1430 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1432 1431 changelist.append(fname)
1433 1432
1434 1433 return fparent1
1435 1434
1436 1435 @unfilteredmethod
1437 1436 def commit(self, text="", user=None, date=None, match=None, force=False,
1438 1437 editor=False, extra=None):
1439 1438 """Add a new revision to current repository.
1440 1439
1441 1440 Revision information is gathered from the working directory,
1442 1441 match can be used to filter the committed files. If editor is
1443 1442 supplied, it is called to get a commit message.
1444 1443 """
1445 1444 if extra is None:
1446 1445 extra = {}
1447 1446
1448 1447 def fail(f, msg):
1449 1448 raise error.Abort('%s: %s' % (f, msg))
1450 1449
1451 1450 if not match:
1452 1451 match = matchmod.always(self.root, '')
1453 1452
1454 1453 if not force:
1455 1454 vdirs = []
1456 1455 match.explicitdir = vdirs.append
1457 1456 match.bad = fail
1458 1457
1459 1458 wlock = self.wlock()
1460 1459 try:
1461 1460 wctx = self[None]
1462 1461 merge = len(wctx.parents()) > 1
1463 1462
1464 1463 if not force and merge and match.ispartial():
1465 1464 raise error.Abort(_('cannot partially commit a merge '
1466 1465 '(do not specify files or patterns)'))
1467 1466
1468 1467 status = self.status(match=match, clean=force)
1469 1468 if force:
1470 1469 status.modified.extend(status.clean) # mq may commit clean files
1471 1470
1472 1471 # check subrepos
1473 1472 subs = []
1474 1473 commitsubs = set()
1475 1474 newstate = wctx.substate.copy()
1476 1475 # only manage subrepos and .hgsubstate if .hgsub is present
1477 1476 if '.hgsub' in wctx:
1478 1477 # we'll decide whether to track this ourselves, thanks
1479 1478 for c in status.modified, status.added, status.removed:
1480 1479 if '.hgsubstate' in c:
1481 1480 c.remove('.hgsubstate')
1482 1481
1483 1482 # compare current state to last committed state
1484 1483 # build new substate based on last committed state
1485 1484 oldstate = wctx.p1().substate
1486 1485 for s in sorted(newstate.keys()):
1487 1486 if not match(s):
1488 1487 # ignore working copy, use old state if present
1489 1488 if s in oldstate:
1490 1489 newstate[s] = oldstate[s]
1491 1490 continue
1492 1491 if not force:
1493 1492 raise error.Abort(
1494 1493 _("commit with new subrepo %s excluded") % s)
1495 1494 dirtyreason = wctx.sub(s).dirtyreason(True)
1496 1495 if dirtyreason:
1497 1496 if not self.ui.configbool('ui', 'commitsubrepos'):
1498 1497 raise error.Abort(dirtyreason,
1499 1498 hint=_("use --subrepos for recursive commit"))
1500 1499 subs.append(s)
1501 1500 commitsubs.add(s)
1502 1501 else:
1503 1502 bs = wctx.sub(s).basestate()
1504 1503 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1505 1504 if oldstate.get(s, (None, None, None))[1] != bs:
1506 1505 subs.append(s)
1507 1506
1508 1507 # check for removed subrepos
1509 1508 for p in wctx.parents():
1510 1509 r = [s for s in p.substate if s not in newstate]
1511 1510 subs += [s for s in r if match(s)]
1512 1511 if subs:
1513 1512 if (not match('.hgsub') and
1514 1513 '.hgsub' in (wctx.modified() + wctx.added())):
1515 1514 raise error.Abort(
1516 1515 _("can't commit subrepos without .hgsub"))
1517 1516 status.modified.insert(0, '.hgsubstate')
1518 1517
1519 1518 elif '.hgsub' in status.removed:
1520 1519 # clean up .hgsubstate when .hgsub is removed
1521 1520 if ('.hgsubstate' in wctx and
1522 1521 '.hgsubstate' not in (status.modified + status.added +
1523 1522 status.removed)):
1524 1523 status.removed.insert(0, '.hgsubstate')
1525 1524
1526 1525 # make sure all explicit patterns are matched
1527 1526 if not force and (match.isexact() or match.prefix()):
1528 1527 matched = set(status.modified + status.added + status.removed)
1529 1528
1530 1529 for f in match.files():
1531 1530 f = self.dirstate.normalize(f)
1532 1531 if f == '.' or f in matched or f in wctx.substate:
1533 1532 continue
1534 1533 if f in status.deleted:
1535 1534 fail(f, _('file not found!'))
1536 1535 if f in vdirs: # visited directory
1537 1536 d = f + '/'
1538 1537 for mf in matched:
1539 1538 if mf.startswith(d):
1540 1539 break
1541 1540 else:
1542 1541 fail(f, _("no match under directory!"))
1543 1542 elif f not in self.dirstate:
1544 1543 fail(f, _("file not tracked!"))
1545 1544
1546 1545 cctx = context.workingcommitctx(self, status,
1547 1546 text, user, date, extra)
1548 1547
1549 1548 # internal config: ui.allowemptycommit
1550 1549 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1551 1550 or extra.get('close') or merge or cctx.files()
1552 1551 or self.ui.configbool('ui', 'allowemptycommit'))
1553 1552 if not allowemptycommit:
1554 1553 return None
1555 1554
1556 1555 if merge and cctx.deleted():
1557 1556 raise error.Abort(_("cannot commit merge with missing files"))
1558 1557
1559 1558 unresolved, driverresolved = False, False
1560 1559 ms = mergemod.mergestate(self)
1561 1560 for f in status.modified:
1562 1561 if f in ms:
1563 1562 if ms[f] == 'u':
1564 1563 unresolved = True
1565 1564 elif ms[f] == 'd':
1566 1565 driverresolved = True
1567 1566
1568 1567 if unresolved:
1569 1568 raise error.Abort(_('unresolved merge conflicts '
1570 1569 '(see "hg help resolve")'))
1571 1570 if driverresolved or ms.mdstate() != 's':
1572 1571 raise error.Abort(_('driver-resolved merge conflicts'),
1573 1572 hint=_('run "hg resolve --all" to resolve'))
1574 1573
1575 1574 if editor:
1576 1575 cctx._text = editor(self, cctx, subs)
1577 1576 edited = (text != cctx._text)
1578 1577
1579 1578 # Save commit message in case this transaction gets rolled back
1580 1579 # (e.g. by a pretxncommit hook). Leave the content alone on
1581 1580 # the assumption that the user will use the same editor again.
1582 1581 msgfn = self.savecommitmessage(cctx._text)
1583 1582
1584 1583 # commit subs and write new state
1585 1584 if subs:
1586 1585 for s in sorted(commitsubs):
1587 1586 sub = wctx.sub(s)
1588 1587 self.ui.status(_('committing subrepository %s\n') %
1589 1588 subrepo.subrelpath(sub))
1590 1589 sr = sub.commit(cctx._text, user, date)
1591 1590 newstate[s] = (newstate[s][0], sr)
1592 1591 subrepo.writestate(self, newstate)
1593 1592
1594 1593 p1, p2 = self.dirstate.parents()
1595 1594 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1596 1595 try:
1597 1596 self.hook("precommit", throw=True, parent1=hookp1,
1598 1597 parent2=hookp2)
1599 1598 ret = self.commitctx(cctx, True)
1600 1599 except: # re-raises
1601 1600 if edited:
1602 1601 self.ui.write(
1603 1602 _('note: commit message saved in %s\n') % msgfn)
1604 1603 raise
1605 1604
1606 1605 # update bookmarks, dirstate and mergestate
1607 1606 bookmarks.update(self, [p1, p2], ret)
1608 1607 cctx.markcommitted(ret)
1609 1608 ms.reset()
1610 1609 finally:
1611 1610 wlock.release()
1612 1611
1613 1612 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1614 1613 # hack for command that use a temporary commit (eg: histedit)
1615 1614 # temporary commit got stripped before hook release
1616 1615 if self.changelog.hasnode(ret):
1617 1616 self.hook("commit", node=node, parent1=parent1,
1618 1617 parent2=parent2)
1619 1618 self._afterlock(commithook)
1620 1619 return ret
1621 1620
1622 1621 @unfilteredmethod
1623 1622 def commitctx(self, ctx, error=False):
1624 1623 """Add a new revision to current repository.
1625 1624 Revision information is passed via the context argument.
1626 1625 """
1627 1626
1628 1627 tr = None
1629 1628 p1, p2 = ctx.p1(), ctx.p2()
1630 1629 user = ctx.user()
1631 1630
1632 1631 lock = self.lock()
1633 1632 try:
1634 1633 tr = self.transaction("commit")
1635 1634 trp = weakref.proxy(tr)
1636 1635
1637 1636 if ctx.files():
1638 1637 m1 = p1.manifest()
1639 1638 m2 = p2.manifest()
1640 1639 m = m1.copy()
1641 1640
1642 1641 # check in files
1643 1642 added = []
1644 1643 changed = []
1645 1644 removed = list(ctx.removed())
1646 1645 linkrev = len(self)
1647 1646 self.ui.note(_("committing files:\n"))
1648 1647 for f in sorted(ctx.modified() + ctx.added()):
1649 1648 self.ui.note(f + "\n")
1650 1649 try:
1651 1650 fctx = ctx[f]
1652 1651 if fctx is None:
1653 1652 removed.append(f)
1654 1653 else:
1655 1654 added.append(f)
1656 1655 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1657 1656 trp, changed)
1658 1657 m.setflag(f, fctx.flags())
1659 1658 except OSError as inst:
1660 1659 self.ui.warn(_("trouble committing %s!\n") % f)
1661 1660 raise
1662 1661 except IOError as inst:
1663 1662 errcode = getattr(inst, 'errno', errno.ENOENT)
1664 1663 if error or errcode and errcode != errno.ENOENT:
1665 1664 self.ui.warn(_("trouble committing %s!\n") % f)
1666 1665 raise
1667 1666
1668 1667 # update manifest
1669 1668 self.ui.note(_("committing manifest\n"))
1670 1669 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1671 1670 drop = [f for f in removed if f in m]
1672 1671 for f in drop:
1673 1672 del m[f]
1674 1673 mn = self.manifest.add(m, trp, linkrev,
1675 1674 p1.manifestnode(), p2.manifestnode(),
1676 1675 added, drop)
1677 1676 files = changed + removed
1678 1677 else:
1679 1678 mn = p1.manifestnode()
1680 1679 files = []
1681 1680
1682 1681 # update changelog
1683 1682 self.ui.note(_("committing changelog\n"))
1684 1683 self.changelog.delayupdate(tr)
1685 1684 n = self.changelog.add(mn, files, ctx.description(),
1686 1685 trp, p1.node(), p2.node(),
1687 1686 user, ctx.date(), ctx.extra().copy())
1688 1687 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1689 1688 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1690 1689 parent2=xp2)
1691 1690 # set the new commit is proper phase
1692 1691 targetphase = subrepo.newcommitphase(self.ui, ctx)
1693 1692 if targetphase:
1694 1693 # retract boundary do not alter parent changeset.
1695 1694 # if a parent have higher the resulting phase will
1696 1695 # be compliant anyway
1697 1696 #
1698 1697 # if minimal phase was 0 we don't need to retract anything
1699 1698 phases.retractboundary(self, tr, targetphase, [n])
1700 1699 tr.close()
1701 1700 branchmap.updatecache(self.filtered('served'))
1702 1701 return n
1703 1702 finally:
1704 1703 if tr:
1705 1704 tr.release()
1706 1705 lock.release()
1707 1706
1708 1707 @unfilteredmethod
1709 1708 def destroying(self):
1710 1709 '''Inform the repository that nodes are about to be destroyed.
1711 1710 Intended for use by strip and rollback, so there's a common
1712 1711 place for anything that has to be done before destroying history.
1713 1712
1714 1713 This is mostly useful for saving state that is in memory and waiting
1715 1714 to be flushed when the current lock is released. Because a call to
1716 1715 destroyed is imminent, the repo will be invalidated causing those
1717 1716 changes to stay in memory (waiting for the next unlock), or vanish
1718 1717 completely.
1719 1718 '''
1720 1719 # When using the same lock to commit and strip, the phasecache is left
1721 1720 # dirty after committing. Then when we strip, the repo is invalidated,
1722 1721 # causing those changes to disappear.
1723 1722 if '_phasecache' in vars(self):
1724 1723 self._phasecache.write()
1725 1724
1726 1725 @unfilteredmethod
1727 1726 def destroyed(self):
1728 1727 '''Inform the repository that nodes have been destroyed.
1729 1728 Intended for use by strip and rollback, so there's a common
1730 1729 place for anything that has to be done after destroying history.
1731 1730 '''
1732 1731 # When one tries to:
1733 1732 # 1) destroy nodes thus calling this method (e.g. strip)
1734 1733 # 2) use phasecache somewhere (e.g. commit)
1735 1734 #
1736 1735 # then 2) will fail because the phasecache contains nodes that were
1737 1736 # removed. We can either remove phasecache from the filecache,
1738 1737 # causing it to reload next time it is accessed, or simply filter
1739 1738 # the removed nodes now and write the updated cache.
1740 1739 self._phasecache.filterunknown(self)
1741 1740 self._phasecache.write()
1742 1741
1743 1742 # update the 'served' branch cache to help read only server process
1744 1743 # Thanks to branchcache collaboration this is done from the nearest
1745 1744 # filtered subset and it is expected to be fast.
1746 1745 branchmap.updatecache(self.filtered('served'))
1747 1746
1748 1747 # Ensure the persistent tag cache is updated. Doing it now
1749 1748 # means that the tag cache only has to worry about destroyed
1750 1749 # heads immediately after a strip/rollback. That in turn
1751 1750 # guarantees that "cachetip == currenttip" (comparing both rev
1752 1751 # and node) always means no nodes have been added or destroyed.
1753 1752
1754 1753 # XXX this is suboptimal when qrefresh'ing: we strip the current
1755 1754 # head, refresh the tag cache, then immediately add a new head.
1756 1755 # But I think doing it this way is necessary for the "instant
1757 1756 # tag cache retrieval" case to work.
1758 1757 self.invalidate()
1759 1758
1760 1759 def walk(self, match, node=None):
1761 1760 '''
1762 1761 walk recursively through the directory tree or a given
1763 1762 changeset, finding all files matched by the match
1764 1763 function
1765 1764 '''
1766 1765 return self[node].walk(match)
1767 1766
1768 1767 def status(self, node1='.', node2=None, match=None,
1769 1768 ignored=False, clean=False, unknown=False,
1770 1769 listsubrepos=False):
1771 1770 '''a convenience method that calls node1.status(node2)'''
1772 1771 return self[node1].status(node2, match, ignored, clean, unknown,
1773 1772 listsubrepos)
1774 1773
1775 1774 def heads(self, start=None):
1776 1775 heads = self.changelog.heads(start)
1777 1776 # sort the output in rev descending order
1778 1777 return sorted(heads, key=self.changelog.rev, reverse=True)
1779 1778
1780 1779 def branchheads(self, branch=None, start=None, closed=False):
1781 1780 '''return a (possibly filtered) list of heads for the given branch
1782 1781
1783 1782 Heads are returned in topological order, from newest to oldest.
1784 1783 If branch is None, use the dirstate branch.
1785 1784 If start is not None, return only heads reachable from start.
1786 1785 If closed is True, return heads that are marked as closed as well.
1787 1786 '''
1788 1787 if branch is None:
1789 1788 branch = self[None].branch()
1790 1789 branches = self.branchmap()
1791 1790 if branch not in branches:
1792 1791 return []
1793 1792 # the cache returns heads ordered lowest to highest
1794 1793 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1795 1794 if start is not None:
1796 1795 # filter out the heads that cannot be reached from startrev
1797 1796 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1798 1797 bheads = [h for h in bheads if h in fbheads]
1799 1798 return bheads
1800 1799
1801 1800 def branches(self, nodes):
1802 1801 if not nodes:
1803 1802 nodes = [self.changelog.tip()]
1804 1803 b = []
1805 1804 for n in nodes:
1806 1805 t = n
1807 1806 while True:
1808 1807 p = self.changelog.parents(n)
1809 1808 if p[1] != nullid or p[0] == nullid:
1810 1809 b.append((t, n, p[0], p[1]))
1811 1810 break
1812 1811 n = p[0]
1813 1812 return b
1814 1813
1815 1814 def between(self, pairs):
1816 1815 r = []
1817 1816
1818 1817 for top, bottom in pairs:
1819 1818 n, l, i = top, [], 0
1820 1819 f = 1
1821 1820
1822 1821 while n != bottom and n != nullid:
1823 1822 p = self.changelog.parents(n)[0]
1824 1823 if i == f:
1825 1824 l.append(n)
1826 1825 f = f * 2
1827 1826 n = p
1828 1827 i += 1
1829 1828
1830 1829 r.append(l)
1831 1830
1832 1831 return r
1833 1832
1834 1833 def checkpush(self, pushop):
1835 1834 """Extensions can override this function if additional checks have
1836 1835 to be performed before pushing, or call it if they override push
1837 1836 command.
1838 1837 """
1839 1838 pass
1840 1839
1841 1840 @unfilteredpropertycache
1842 1841 def prepushoutgoinghooks(self):
1843 1842 """Return util.hooks consists of "(repo, remote, outgoing)"
1844 1843 functions, which are called before pushing changesets.
1845 1844 """
1846 1845 return util.hooks()
1847 1846
1848 1847 def clone(self, remote, heads=[], stream=None):
1849 1848 '''clone remote repository.
1850 1849
1851 1850 keyword arguments:
1852 1851 heads: list of revs to clone (forces use of pull)
1853 1852 stream: use streaming clone if possible'''
1854 1853 # internal config: ui.quietbookmarkmove
1855 1854 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1856 1855 try:
1857 1856 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1858 1857 pullop = exchange.pull(self, remote, heads,
1859 1858 streamclonerequested=stream)
1860 1859 return pullop.cgresult
1861 1860 finally:
1862 1861 self.ui.restoreconfig(quiet)
1863 1862
1864 1863 def pushkey(self, namespace, key, old, new):
1865 1864 try:
1866 1865 tr = self.currenttransaction()
1867 1866 hookargs = {}
1868 1867 if tr is not None:
1869 1868 hookargs.update(tr.hookargs)
1870 1869 hookargs['namespace'] = namespace
1871 1870 hookargs['key'] = key
1872 1871 hookargs['old'] = old
1873 1872 hookargs['new'] = new
1874 1873 self.hook('prepushkey', throw=True, **hookargs)
1875 1874 except error.HookAbort as exc:
1876 1875 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1877 1876 if exc.hint:
1878 1877 self.ui.write_err(_("(%s)\n") % exc.hint)
1879 1878 return False
1880 1879 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1881 1880 ret = pushkey.push(self, namespace, key, old, new)
1882 1881 def runhook():
1883 1882 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1884 1883 ret=ret)
1885 1884 self._afterlock(runhook)
1886 1885 return ret
1887 1886
1888 1887 def listkeys(self, namespace):
1889 1888 self.hook('prelistkeys', throw=True, namespace=namespace)
1890 1889 self.ui.debug('listing keys for "%s"\n' % namespace)
1891 1890 values = pushkey.list(self, namespace)
1892 1891 self.hook('listkeys', namespace=namespace, values=values)
1893 1892 return values
1894 1893
1895 1894 def debugwireargs(self, one, two, three=None, four=None, five=None):
1896 1895 '''used to test argument passing over the wire'''
1897 1896 return "%s %s %s %s %s" % (one, two, three, four, five)
1898 1897
1899 1898 def savecommitmessage(self, text):
1900 1899 fp = self.vfs('last-message.txt', 'wb')
1901 1900 try:
1902 1901 fp.write(text)
1903 1902 finally:
1904 1903 fp.close()
1905 1904 return self.pathto(fp.name[len(self.root) + 1:])
1906 1905
1907 1906 # used to avoid circular references so destructors work
1908 1907 def aftertrans(files):
1909 1908 renamefiles = [tuple(t) for t in files]
1910 1909 def a():
1911 1910 for vfs, src, dest in renamefiles:
1912 1911 try:
1913 1912 vfs.rename(src, dest)
1914 1913 except OSError: # journal file does not yet exist
1915 1914 pass
1916 1915 return a
1917 1916
1918 1917 def undoname(fn):
1919 1918 base, name = os.path.split(fn)
1920 1919 assert name.startswith('journal')
1921 1920 return os.path.join(base, name.replace('journal', 'undo', 1))
1922 1921
1923 1922 def instance(ui, path, create):
1924 1923 return localrepository(ui, util.urllocalpath(path), create)
1925 1924
1926 1925 def islocal(path):
1927 1926 return True
General Comments 0
You need to be logged in to leave comments. Login now