##// END OF EJS Templates
obsolete: add a "format.obsstore-version" config option...
Pierre-Yves David -
r22852:e994b034 default
parent child Browse files
Show More
@@ -1,1781 +1,1787
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 319
320 320 def _writerequirements(self):
321 321 reqfile = self.opener("requires", "w")
322 322 for r in sorted(self.requirements):
323 323 reqfile.write("%s\n" % r)
324 324 reqfile.close()
325 325
326 326 def _checknested(self, path):
327 327 """Determine if path is a legal nested repository."""
328 328 if not path.startswith(self.root):
329 329 return False
330 330 subpath = path[len(self.root) + 1:]
331 331 normsubpath = util.pconvert(subpath)
332 332
333 333 # XXX: Checking against the current working copy is wrong in
334 334 # the sense that it can reject things like
335 335 #
336 336 # $ hg cat -r 10 sub/x.txt
337 337 #
338 338 # if sub/ is no longer a subrepository in the working copy
339 339 # parent revision.
340 340 #
341 341 # However, it can of course also allow things that would have
342 342 # been rejected before, such as the above cat command if sub/
343 343 # is a subrepository now, but was a normal directory before.
344 344 # The old path auditor would have rejected by mistake since it
345 345 # panics when it sees sub/.hg/.
346 346 #
347 347 # All in all, checking against the working copy seems sensible
348 348 # since we want to prevent access to nested repositories on
349 349 # the filesystem *now*.
350 350 ctx = self[None]
351 351 parts = util.splitpath(subpath)
352 352 while parts:
353 353 prefix = '/'.join(parts)
354 354 if prefix in ctx.substate:
355 355 if prefix == normsubpath:
356 356 return True
357 357 else:
358 358 sub = ctx.sub(prefix)
359 359 return sub.checknested(subpath[len(prefix) + 1:])
360 360 else:
361 361 parts.pop()
362 362 return False
363 363
364 364 def peer(self):
365 365 return localpeer(self) # not cached to avoid reference cycle
366 366
367 367 def unfiltered(self):
368 368 """Return unfiltered version of the repository
369 369
370 370 Intended to be overwritten by filtered repo."""
371 371 return self
372 372
373 373 def filtered(self, name):
374 374 """Return a filtered version of a repository"""
375 375 # build a new class with the mixin and the current class
376 376 # (possibly subclass of the repo)
377 377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 378 pass
379 379 return proxycls(self, name)
380 380
381 381 @repofilecache('bookmarks')
382 382 def _bookmarks(self):
383 383 return bookmarks.bmstore(self)
384 384
385 385 @repofilecache('bookmarks.current')
386 386 def _bookmarkcurrent(self):
387 387 return bookmarks.readcurrent(self)
388 388
389 389 def bookmarkheads(self, bookmark):
390 390 name = bookmark.split('@', 1)[0]
391 391 heads = []
392 392 for mark, n in self._bookmarks.iteritems():
393 393 if mark.split('@', 1)[0] == name:
394 394 heads.append(n)
395 395 return heads
396 396
397 397 @storecache('phaseroots')
398 398 def _phasecache(self):
399 399 return phases.phasecache(self, self._phasedefaults)
400 400
401 401 @storecache('obsstore')
402 402 def obsstore(self):
403 store = obsolete.obsstore(self.sopener)
403 # read default format for new obsstore.
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 # rely on obsstore class default when possible.
406 kwargs = {}
407 if defaultformat is not None:
408 defaultformat['defaultformat'] = defaultformat
409 store = obsolete.obsstore(self.sopener, **kwargs)
404 410 if store and not obsolete._enabled:
405 411 # message is rare enough to not be translated
406 412 msg = 'obsolete feature not enabled but %i markers found!\n'
407 413 self.ui.warn(msg % len(list(store)))
408 414 return store
409 415
410 416 @storecache('00changelog.i')
411 417 def changelog(self):
412 418 c = changelog.changelog(self.sopener)
413 419 if 'HG_PENDING' in os.environ:
414 420 p = os.environ['HG_PENDING']
415 421 if p.startswith(self.root):
416 422 c.readpending('00changelog.i.a')
417 423 return c
418 424
419 425 @storecache('00manifest.i')
420 426 def manifest(self):
421 427 return manifest.manifest(self.sopener)
422 428
423 429 @repofilecache('dirstate')
424 430 def dirstate(self):
425 431 warned = [0]
426 432 def validate(node):
427 433 try:
428 434 self.changelog.rev(node)
429 435 return node
430 436 except error.LookupError:
431 437 if not warned[0]:
432 438 warned[0] = True
433 439 self.ui.warn(_("warning: ignoring unknown"
434 440 " working parent %s!\n") % short(node))
435 441 return nullid
436 442
437 443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
438 444
439 445 def __getitem__(self, changeid):
440 446 if changeid is None:
441 447 return context.workingctx(self)
442 448 return context.changectx(self, changeid)
443 449
444 450 def __contains__(self, changeid):
445 451 try:
446 452 return bool(self.lookup(changeid))
447 453 except error.RepoLookupError:
448 454 return False
449 455
450 456 def __nonzero__(self):
451 457 return True
452 458
453 459 def __len__(self):
454 460 return len(self.changelog)
455 461
456 462 def __iter__(self):
457 463 return iter(self.changelog)
458 464
459 465 def revs(self, expr, *args):
460 466 '''Return a list of revisions matching the given revset'''
461 467 expr = revset.formatspec(expr, *args)
462 468 m = revset.match(None, expr)
463 469 return m(self, revset.spanset(self))
464 470
465 471 def set(self, expr, *args):
466 472 '''
467 473 Yield a context for each matching revision, after doing arg
468 474 replacement via revset.formatspec
469 475 '''
470 476 for r in self.revs(expr, *args):
471 477 yield self[r]
472 478
473 479 def url(self):
474 480 return 'file:' + self.root
475 481
476 482 def hook(self, name, throw=False, **args):
477 483 """Call a hook, passing this repo instance.
478 484
479 485 This a convenience method to aid invoking hooks. Extensions likely
480 486 won't call this unless they have registered a custom hook or are
481 487 replacing code that is expected to call a hook.
482 488 """
483 489 return hook.hook(self.ui, self, name, throw, **args)
484 490
485 491 @unfilteredmethod
486 492 def _tag(self, names, node, message, local, user, date, extra={},
487 493 editor=False):
488 494 if isinstance(names, str):
489 495 names = (names,)
490 496
491 497 branches = self.branchmap()
492 498 for name in names:
493 499 self.hook('pretag', throw=True, node=hex(node), tag=name,
494 500 local=local)
495 501 if name in branches:
496 502 self.ui.warn(_("warning: tag %s conflicts with existing"
497 503 " branch name\n") % name)
498 504
499 505 def writetags(fp, names, munge, prevtags):
500 506 fp.seek(0, 2)
501 507 if prevtags and prevtags[-1] != '\n':
502 508 fp.write('\n')
503 509 for name in names:
504 510 m = munge and munge(name) or name
505 511 if (self._tagscache.tagtypes and
506 512 name in self._tagscache.tagtypes):
507 513 old = self.tags().get(name, nullid)
508 514 fp.write('%s %s\n' % (hex(old), m))
509 515 fp.write('%s %s\n' % (hex(node), m))
510 516 fp.close()
511 517
512 518 prevtags = ''
513 519 if local:
514 520 try:
515 521 fp = self.opener('localtags', 'r+')
516 522 except IOError:
517 523 fp = self.opener('localtags', 'a')
518 524 else:
519 525 prevtags = fp.read()
520 526
521 527 # local tags are stored in the current charset
522 528 writetags(fp, names, None, prevtags)
523 529 for name in names:
524 530 self.hook('tag', node=hex(node), tag=name, local=local)
525 531 return
526 532
527 533 try:
528 534 fp = self.wfile('.hgtags', 'rb+')
529 535 except IOError, e:
530 536 if e.errno != errno.ENOENT:
531 537 raise
532 538 fp = self.wfile('.hgtags', 'ab')
533 539 else:
534 540 prevtags = fp.read()
535 541
536 542 # committed tags are stored in UTF-8
537 543 writetags(fp, names, encoding.fromlocal, prevtags)
538 544
539 545 fp.close()
540 546
541 547 self.invalidatecaches()
542 548
543 549 if '.hgtags' not in self.dirstate:
544 550 self[None].add(['.hgtags'])
545 551
546 552 m = matchmod.exact(self.root, '', ['.hgtags'])
547 553 tagnode = self.commit(message, user, date, extra=extra, match=m,
548 554 editor=editor)
549 555
550 556 for name in names:
551 557 self.hook('tag', node=hex(node), tag=name, local=local)
552 558
553 559 return tagnode
554 560
555 561 def tag(self, names, node, message, local, user, date, editor=False):
556 562 '''tag a revision with one or more symbolic names.
557 563
558 564 names is a list of strings or, when adding a single tag, names may be a
559 565 string.
560 566
561 567 if local is True, the tags are stored in a per-repository file.
562 568 otherwise, they are stored in the .hgtags file, and a new
563 569 changeset is committed with the change.
564 570
565 571 keyword arguments:
566 572
567 573 local: whether to store tags in non-version-controlled file
568 574 (default False)
569 575
570 576 message: commit message to use if committing
571 577
572 578 user: name of user to use if committing
573 579
574 580 date: date tuple to use if committing'''
575 581
576 582 if not local:
577 583 m = matchmod.exact(self.root, '', ['.hgtags'])
578 584 if util.any(self.status(match=m, unknown=True, ignored=True)):
579 585 raise util.Abort(_('working copy of .hgtags is changed'),
580 586 hint=_('please commit .hgtags manually'))
581 587
582 588 self.tags() # instantiate the cache
583 589 self._tag(names, node, message, local, user, date, editor=editor)
584 590
585 591 @filteredpropertycache
586 592 def _tagscache(self):
587 593 '''Returns a tagscache object that contains various tags related
588 594 caches.'''
589 595
590 596 # This simplifies its cache management by having one decorated
591 597 # function (this one) and the rest simply fetch things from it.
592 598 class tagscache(object):
593 599 def __init__(self):
594 600 # These two define the set of tags for this repository. tags
595 601 # maps tag name to node; tagtypes maps tag name to 'global' or
596 602 # 'local'. (Global tags are defined by .hgtags across all
597 603 # heads, and local tags are defined in .hg/localtags.)
598 604 # They constitute the in-memory cache of tags.
599 605 self.tags = self.tagtypes = None
600 606
601 607 self.nodetagscache = self.tagslist = None
602 608
603 609 cache = tagscache()
604 610 cache.tags, cache.tagtypes = self._findtags()
605 611
606 612 return cache
607 613
608 614 def tags(self):
609 615 '''return a mapping of tag to node'''
610 616 t = {}
611 617 if self.changelog.filteredrevs:
612 618 tags, tt = self._findtags()
613 619 else:
614 620 tags = self._tagscache.tags
615 621 for k, v in tags.iteritems():
616 622 try:
617 623 # ignore tags to unknown nodes
618 624 self.changelog.rev(v)
619 625 t[k] = v
620 626 except (error.LookupError, ValueError):
621 627 pass
622 628 return t
623 629
624 630 def _findtags(self):
625 631 '''Do the hard work of finding tags. Return a pair of dicts
626 632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
627 633 maps tag name to a string like \'global\' or \'local\'.
628 634 Subclasses or extensions are free to add their own tags, but
629 635 should be aware that the returned dicts will be retained for the
630 636 duration of the localrepo object.'''
631 637
632 638 # XXX what tagtype should subclasses/extensions use? Currently
633 639 # mq and bookmarks add tags, but do not set the tagtype at all.
634 640 # Should each extension invent its own tag type? Should there
635 641 # be one tagtype for all such "virtual" tags? Or is the status
636 642 # quo fine?
637 643
638 644 alltags = {} # map tag name to (node, hist)
639 645 tagtypes = {}
640 646
641 647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
642 648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
643 649
644 650 # Build the return dicts. Have to re-encode tag names because
645 651 # the tags module always uses UTF-8 (in order not to lose info
646 652 # writing to the cache), but the rest of Mercurial wants them in
647 653 # local encoding.
648 654 tags = {}
649 655 for (name, (node, hist)) in alltags.iteritems():
650 656 if node != nullid:
651 657 tags[encoding.tolocal(name)] = node
652 658 tags['tip'] = self.changelog.tip()
653 659 tagtypes = dict([(encoding.tolocal(name), value)
654 660 for (name, value) in tagtypes.iteritems()])
655 661 return (tags, tagtypes)
656 662
657 663 def tagtype(self, tagname):
658 664 '''
659 665 return the type of the given tag. result can be:
660 666
661 667 'local' : a local tag
662 668 'global' : a global tag
663 669 None : tag does not exist
664 670 '''
665 671
666 672 return self._tagscache.tagtypes.get(tagname)
667 673
668 674 def tagslist(self):
669 675 '''return a list of tags ordered by revision'''
670 676 if not self._tagscache.tagslist:
671 677 l = []
672 678 for t, n in self.tags().iteritems():
673 679 l.append((self.changelog.rev(n), t, n))
674 680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
675 681
676 682 return self._tagscache.tagslist
677 683
678 684 def nodetags(self, node):
679 685 '''return the tags associated with a node'''
680 686 if not self._tagscache.nodetagscache:
681 687 nodetagscache = {}
682 688 for t, n in self._tagscache.tags.iteritems():
683 689 nodetagscache.setdefault(n, []).append(t)
684 690 for tags in nodetagscache.itervalues():
685 691 tags.sort()
686 692 self._tagscache.nodetagscache = nodetagscache
687 693 return self._tagscache.nodetagscache.get(node, [])
688 694
689 695 def nodebookmarks(self, node):
690 696 marks = []
691 697 for bookmark, n in self._bookmarks.iteritems():
692 698 if n == node:
693 699 marks.append(bookmark)
694 700 return sorted(marks)
695 701
696 702 def branchmap(self):
697 703 '''returns a dictionary {branch: [branchheads]} with branchheads
698 704 ordered by increasing revision number'''
699 705 branchmap.updatecache(self)
700 706 return self._branchcaches[self.filtername]
701 707
702 708 def branchtip(self, branch):
703 709 '''return the tip node for a given branch'''
704 710 try:
705 711 return self.branchmap().branchtip(branch)
706 712 except KeyError:
707 713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
708 714
709 715 def lookup(self, key):
710 716 return self[key].node()
711 717
712 718 def lookupbranch(self, key, remote=None):
713 719 repo = remote or self
714 720 if key in repo.branchmap():
715 721 return key
716 722
717 723 repo = (remote and remote.local()) and remote or self
718 724 return repo[key].branch()
719 725
720 726 def known(self, nodes):
721 727 nm = self.changelog.nodemap
722 728 pc = self._phasecache
723 729 result = []
724 730 for n in nodes:
725 731 r = nm.get(n)
726 732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
727 733 result.append(resp)
728 734 return result
729 735
730 736 def local(self):
731 737 return self
732 738
733 739 def cancopy(self):
734 740 # so statichttprepo's override of local() works
735 741 if not self.local():
736 742 return False
737 743 if not self.ui.configbool('phases', 'publish', True):
738 744 return True
739 745 # if publishing we can't copy if there is filtered content
740 746 return not self.filtered('visible').changelog.filteredrevs
741 747
742 748 def join(self, f, *insidef):
743 749 return os.path.join(self.path, f, *insidef)
744 750
745 751 def wjoin(self, f, *insidef):
746 752 return os.path.join(self.root, f, *insidef)
747 753
748 754 def file(self, f):
749 755 if f[0] == '/':
750 756 f = f[1:]
751 757 return filelog.filelog(self.sopener, f)
752 758
753 759 def changectx(self, changeid):
754 760 return self[changeid]
755 761
756 762 def parents(self, changeid=None):
757 763 '''get list of changectxs for parents of changeid'''
758 764 return self[changeid].parents()
759 765
760 766 def setparents(self, p1, p2=nullid):
761 767 self.dirstate.beginparentchange()
762 768 copies = self.dirstate.setparents(p1, p2)
763 769 pctx = self[p1]
764 770 if copies:
765 771 # Adjust copy records, the dirstate cannot do it, it
766 772 # requires access to parents manifests. Preserve them
767 773 # only for entries added to first parent.
768 774 for f in copies:
769 775 if f not in pctx and copies[f] in pctx:
770 776 self.dirstate.copy(copies[f], f)
771 777 if p2 == nullid:
772 778 for f, s in sorted(self.dirstate.copies().items()):
773 779 if f not in pctx and s not in pctx:
774 780 self.dirstate.copy(None, f)
775 781 self.dirstate.endparentchange()
776 782
777 783 def filectx(self, path, changeid=None, fileid=None):
778 784 """changeid can be a changeset revision, node, or tag.
779 785 fileid can be a file revision or node."""
780 786 return context.filectx(self, path, changeid, fileid)
781 787
782 788 def getcwd(self):
783 789 return self.dirstate.getcwd()
784 790
785 791 def pathto(self, f, cwd=None):
786 792 return self.dirstate.pathto(f, cwd)
787 793
788 794 def wfile(self, f, mode='r'):
789 795 return self.wopener(f, mode)
790 796
791 797 def _link(self, f):
792 798 return self.wvfs.islink(f)
793 799
794 800 def _loadfilter(self, filter):
795 801 if filter not in self.filterpats:
796 802 l = []
797 803 for pat, cmd in self.ui.configitems(filter):
798 804 if cmd == '!':
799 805 continue
800 806 mf = matchmod.match(self.root, '', [pat])
801 807 fn = None
802 808 params = cmd
803 809 for name, filterfn in self._datafilters.iteritems():
804 810 if cmd.startswith(name):
805 811 fn = filterfn
806 812 params = cmd[len(name):].lstrip()
807 813 break
808 814 if not fn:
809 815 fn = lambda s, c, **kwargs: util.filter(s, c)
810 816 # Wrap old filters not supporting keyword arguments
811 817 if not inspect.getargspec(fn)[2]:
812 818 oldfn = fn
813 819 fn = lambda s, c, **kwargs: oldfn(s, c)
814 820 l.append((mf, fn, params))
815 821 self.filterpats[filter] = l
816 822 return self.filterpats[filter]
817 823
818 824 def _filter(self, filterpats, filename, data):
819 825 for mf, fn, cmd in filterpats:
820 826 if mf(filename):
821 827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
822 828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
823 829 break
824 830
825 831 return data
826 832
827 833 @unfilteredpropertycache
828 834 def _encodefilterpats(self):
829 835 return self._loadfilter('encode')
830 836
831 837 @unfilteredpropertycache
832 838 def _decodefilterpats(self):
833 839 return self._loadfilter('decode')
834 840
835 841 def adddatafilter(self, name, filter):
836 842 self._datafilters[name] = filter
837 843
838 844 def wread(self, filename):
839 845 if self._link(filename):
840 846 data = self.wvfs.readlink(filename)
841 847 else:
842 848 data = self.wopener.read(filename)
843 849 return self._filter(self._encodefilterpats, filename, data)
844 850
845 851 def wwrite(self, filename, data, flags):
846 852 data = self._filter(self._decodefilterpats, filename, data)
847 853 if 'l' in flags:
848 854 self.wopener.symlink(data, filename)
849 855 else:
850 856 self.wopener.write(filename, data)
851 857 if 'x' in flags:
852 858 self.wvfs.setflags(filename, False, True)
853 859
854 860 def wwritedata(self, filename, data):
855 861 return self._filter(self._decodefilterpats, filename, data)
856 862
857 863 def transaction(self, desc, report=None):
858 864 tr = self._transref and self._transref() or None
859 865 if tr and tr.running():
860 866 return tr.nest()
861 867
862 868 # abort here if the journal already exists
863 869 if self.svfs.exists("journal"):
864 870 raise error.RepoError(
865 871 _("abandoned transaction found"),
866 872 hint=_("run 'hg recover' to clean up transaction"))
867 873
868 874 def onclose():
869 875 self.store.write(self._transref())
870 876
871 877 self._writejournal(desc)
872 878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
873 879 rp = report and report or self.ui.warn
874 880 tr = transaction.transaction(rp, self.sopener,
875 881 "journal",
876 882 aftertrans(renames),
877 883 self.store.createmode,
878 884 onclose)
879 885 self._transref = weakref.ref(tr)
880 886 return tr
881 887
882 888 def _journalfiles(self):
883 889 return ((self.svfs, 'journal'),
884 890 (self.vfs, 'journal.dirstate'),
885 891 (self.vfs, 'journal.branch'),
886 892 (self.vfs, 'journal.desc'),
887 893 (self.vfs, 'journal.bookmarks'),
888 894 (self.svfs, 'journal.phaseroots'))
889 895
890 896 def undofiles(self):
891 897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
892 898
893 899 def _writejournal(self, desc):
894 900 self.opener.write("journal.dirstate",
895 901 self.opener.tryread("dirstate"))
896 902 self.opener.write("journal.branch",
897 903 encoding.fromlocal(self.dirstate.branch()))
898 904 self.opener.write("journal.desc",
899 905 "%d\n%s\n" % (len(self), desc))
900 906 self.opener.write("journal.bookmarks",
901 907 self.opener.tryread("bookmarks"))
902 908 self.sopener.write("journal.phaseroots",
903 909 self.sopener.tryread("phaseroots"))
904 910
905 911 def recover(self):
906 912 lock = self.lock()
907 913 try:
908 914 if self.svfs.exists("journal"):
909 915 self.ui.status(_("rolling back interrupted transaction\n"))
910 916 transaction.rollback(self.sopener, "journal",
911 917 self.ui.warn)
912 918 self.invalidate()
913 919 return True
914 920 else:
915 921 self.ui.warn(_("no interrupted transaction available\n"))
916 922 return False
917 923 finally:
918 924 lock.release()
919 925
920 926 def rollback(self, dryrun=False, force=False):
921 927 wlock = lock = None
922 928 try:
923 929 wlock = self.wlock()
924 930 lock = self.lock()
925 931 if self.svfs.exists("undo"):
926 932 return self._rollback(dryrun, force)
927 933 else:
928 934 self.ui.warn(_("no rollback information available\n"))
929 935 return 1
930 936 finally:
931 937 release(lock, wlock)
932 938
933 939 @unfilteredmethod # Until we get smarter cache management
934 940 def _rollback(self, dryrun, force):
935 941 ui = self.ui
936 942 try:
937 943 args = self.opener.read('undo.desc').splitlines()
938 944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
939 945 if len(args) >= 3:
940 946 detail = args[2]
941 947 oldtip = oldlen - 1
942 948
943 949 if detail and ui.verbose:
944 950 msg = (_('repository tip rolled back to revision %s'
945 951 ' (undo %s: %s)\n')
946 952 % (oldtip, desc, detail))
947 953 else:
948 954 msg = (_('repository tip rolled back to revision %s'
949 955 ' (undo %s)\n')
950 956 % (oldtip, desc))
951 957 except IOError:
952 958 msg = _('rolling back unknown transaction\n')
953 959 desc = None
954 960
955 961 if not force and self['.'] != self['tip'] and desc == 'commit':
956 962 raise util.Abort(
957 963 _('rollback of last commit while not checked out '
958 964 'may lose data'), hint=_('use -f to force'))
959 965
960 966 ui.status(msg)
961 967 if dryrun:
962 968 return 0
963 969
964 970 parents = self.dirstate.parents()
965 971 self.destroying()
966 972 transaction.rollback(self.sopener, 'undo', ui.warn)
967 973 if self.vfs.exists('undo.bookmarks'):
968 974 self.vfs.rename('undo.bookmarks', 'bookmarks')
969 975 if self.svfs.exists('undo.phaseroots'):
970 976 self.svfs.rename('undo.phaseroots', 'phaseroots')
971 977 self.invalidate()
972 978
973 979 parentgone = (parents[0] not in self.changelog.nodemap or
974 980 parents[1] not in self.changelog.nodemap)
975 981 if parentgone:
976 982 self.vfs.rename('undo.dirstate', 'dirstate')
977 983 try:
978 984 branch = self.opener.read('undo.branch')
979 985 self.dirstate.setbranch(encoding.tolocal(branch))
980 986 except IOError:
981 987 ui.warn(_('named branch could not be reset: '
982 988 'current branch is still \'%s\'\n')
983 989 % self.dirstate.branch())
984 990
985 991 self.dirstate.invalidate()
986 992 parents = tuple([p.rev() for p in self.parents()])
987 993 if len(parents) > 1:
988 994 ui.status(_('working directory now based on '
989 995 'revisions %d and %d\n') % parents)
990 996 else:
991 997 ui.status(_('working directory now based on '
992 998 'revision %d\n') % parents)
993 999 # TODO: if we know which new heads may result from this rollback, pass
994 1000 # them to destroy(), which will prevent the branchhead cache from being
995 1001 # invalidated.
996 1002 self.destroyed()
997 1003 return 0
998 1004
999 1005 def invalidatecaches(self):
1000 1006
1001 1007 if '_tagscache' in vars(self):
1002 1008 # can't use delattr on proxy
1003 1009 del self.__dict__['_tagscache']
1004 1010
1005 1011 self.unfiltered()._branchcaches.clear()
1006 1012 self.invalidatevolatilesets()
1007 1013
1008 1014 def invalidatevolatilesets(self):
1009 1015 self.filteredrevcache.clear()
1010 1016 obsolete.clearobscaches(self)
1011 1017
1012 1018 def invalidatedirstate(self):
1013 1019 '''Invalidates the dirstate, causing the next call to dirstate
1014 1020 to check if it was modified since the last time it was read,
1015 1021 rereading it if it has.
1016 1022
1017 1023 This is different to dirstate.invalidate() that it doesn't always
1018 1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1019 1025 explicitly read the dirstate again (i.e. restoring it to a previous
1020 1026 known good state).'''
1021 1027 if hasunfilteredcache(self, 'dirstate'):
1022 1028 for k in self.dirstate._filecache:
1023 1029 try:
1024 1030 delattr(self.dirstate, k)
1025 1031 except AttributeError:
1026 1032 pass
1027 1033 delattr(self.unfiltered(), 'dirstate')
1028 1034
1029 1035 def invalidate(self):
1030 1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1031 1037 for k in self._filecache:
1032 1038 # dirstate is invalidated separately in invalidatedirstate()
1033 1039 if k == 'dirstate':
1034 1040 continue
1035 1041
1036 1042 try:
1037 1043 delattr(unfiltered, k)
1038 1044 except AttributeError:
1039 1045 pass
1040 1046 self.invalidatecaches()
1041 1047 self.store.invalidatecaches()
1042 1048
1043 1049 def invalidateall(self):
1044 1050 '''Fully invalidates both store and non-store parts, causing the
1045 1051 subsequent operation to reread any outside changes.'''
1046 1052 # extension should hook this to invalidate its caches
1047 1053 self.invalidate()
1048 1054 self.invalidatedirstate()
1049 1055
1050 1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1051 1057 try:
1052 1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1053 1059 except error.LockHeld, inst:
1054 1060 if not wait:
1055 1061 raise
1056 1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1057 1063 (desc, inst.locker))
1058 1064 # default to 600 seconds timeout
1059 1065 l = lockmod.lock(vfs, lockname,
1060 1066 int(self.ui.config("ui", "timeout", "600")),
1061 1067 releasefn, desc=desc)
1062 1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1063 1069 if acquirefn:
1064 1070 acquirefn()
1065 1071 return l
1066 1072
1067 1073 def _afterlock(self, callback):
1068 1074 """add a callback to the current repository lock.
1069 1075
1070 1076 The callback will be executed on lock release."""
1071 1077 l = self._lockref and self._lockref()
1072 1078 if l:
1073 1079 l.postrelease.append(callback)
1074 1080 else:
1075 1081 callback()
1076 1082
1077 1083 def lock(self, wait=True):
1078 1084 '''Lock the repository store (.hg/store) and return a weak reference
1079 1085 to the lock. Use this before modifying the store (e.g. committing or
1080 1086 stripping). If you are opening a transaction, get a lock as well.)'''
1081 1087 l = self._lockref and self._lockref()
1082 1088 if l is not None and l.held:
1083 1089 l.lock()
1084 1090 return l
1085 1091
1086 1092 def unlock():
1087 1093 for k, ce in self._filecache.items():
1088 1094 if k == 'dirstate' or k not in self.__dict__:
1089 1095 continue
1090 1096 ce.refresh()
1091 1097
1092 1098 l = self._lock(self.svfs, "lock", wait, unlock,
1093 1099 self.invalidate, _('repository %s') % self.origroot)
1094 1100 self._lockref = weakref.ref(l)
1095 1101 return l
1096 1102
1097 1103 def wlock(self, wait=True):
1098 1104 '''Lock the non-store parts of the repository (everything under
1099 1105 .hg except .hg/store) and return a weak reference to the lock.
1100 1106 Use this before modifying files in .hg.'''
1101 1107 l = self._wlockref and self._wlockref()
1102 1108 if l is not None and l.held:
1103 1109 l.lock()
1104 1110 return l
1105 1111
1106 1112 def unlock():
1107 1113 if self.dirstate.pendingparentchange():
1108 1114 self.dirstate.invalidate()
1109 1115 else:
1110 1116 self.dirstate.write()
1111 1117
1112 1118 self._filecache['dirstate'].refresh()
1113 1119
1114 1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1115 1121 self.invalidatedirstate, _('working directory of %s') %
1116 1122 self.origroot)
1117 1123 self._wlockref = weakref.ref(l)
1118 1124 return l
1119 1125
1120 1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1121 1127 """
1122 1128 commit an individual file as part of a larger transaction
1123 1129 """
1124 1130
1125 1131 fname = fctx.path()
1126 1132 text = fctx.data()
1127 1133 flog = self.file(fname)
1128 1134 fparent1 = manifest1.get(fname, nullid)
1129 1135 fparent2 = manifest2.get(fname, nullid)
1130 1136
1131 1137 meta = {}
1132 1138 copy = fctx.renamed()
1133 1139 if copy and copy[0] != fname:
1134 1140 # Mark the new revision of this file as a copy of another
1135 1141 # file. This copy data will effectively act as a parent
1136 1142 # of this new revision. If this is a merge, the first
1137 1143 # parent will be the nullid (meaning "look up the copy data")
1138 1144 # and the second one will be the other parent. For example:
1139 1145 #
1140 1146 # 0 --- 1 --- 3 rev1 changes file foo
1141 1147 # \ / rev2 renames foo to bar and changes it
1142 1148 # \- 2 -/ rev3 should have bar with all changes and
1143 1149 # should record that bar descends from
1144 1150 # bar in rev2 and foo in rev1
1145 1151 #
1146 1152 # this allows this merge to succeed:
1147 1153 #
1148 1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1149 1155 # \ / merging rev3 and rev4 should use bar@rev2
1150 1156 # \- 2 --- 4 as the merge base
1151 1157 #
1152 1158
1153 1159 cfname = copy[0]
1154 1160 crev = manifest1.get(cfname)
1155 1161 newfparent = fparent2
1156 1162
1157 1163 if manifest2: # branch merge
1158 1164 if fparent2 == nullid or crev is None: # copied on remote side
1159 1165 if cfname in manifest2:
1160 1166 crev = manifest2[cfname]
1161 1167 newfparent = fparent1
1162 1168
1163 1169 # find source in nearest ancestor if we've lost track
1164 1170 if not crev:
1165 1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1166 1172 (fname, cfname))
1167 1173 for ancestor in self[None].ancestors():
1168 1174 if cfname in ancestor:
1169 1175 crev = ancestor[cfname].filenode()
1170 1176 break
1171 1177
1172 1178 if crev:
1173 1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1174 1180 meta["copy"] = cfname
1175 1181 meta["copyrev"] = hex(crev)
1176 1182 fparent1, fparent2 = nullid, newfparent
1177 1183 else:
1178 1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1179 1185 "copied from '%s'!\n") % (fname, cfname))
1180 1186
1181 1187 elif fparent1 == nullid:
1182 1188 fparent1, fparent2 = fparent2, nullid
1183 1189 elif fparent2 != nullid:
1184 1190 # is one parent an ancestor of the other?
1185 1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1186 1192 if fparent1 in fparentancestors:
1187 1193 fparent1, fparent2 = fparent2, nullid
1188 1194 elif fparent2 in fparentancestors:
1189 1195 fparent2 = nullid
1190 1196
1191 1197 # is the file changed?
1192 1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1193 1199 changelist.append(fname)
1194 1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1195 1201 # are just the flags changed during merge?
1196 1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1197 1203 changelist.append(fname)
1198 1204
1199 1205 return fparent1
1200 1206
1201 1207 @unfilteredmethod
1202 1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1203 1209 editor=False, extra={}):
1204 1210 """Add a new revision to current repository.
1205 1211
1206 1212 Revision information is gathered from the working directory,
1207 1213 match can be used to filter the committed files. If editor is
1208 1214 supplied, it is called to get a commit message.
1209 1215 """
1210 1216
1211 1217 def fail(f, msg):
1212 1218 raise util.Abort('%s: %s' % (f, msg))
1213 1219
1214 1220 if not match:
1215 1221 match = matchmod.always(self.root, '')
1216 1222
1217 1223 if not force:
1218 1224 vdirs = []
1219 1225 match.explicitdir = vdirs.append
1220 1226 match.bad = fail
1221 1227
1222 1228 wlock = self.wlock()
1223 1229 try:
1224 1230 wctx = self[None]
1225 1231 merge = len(wctx.parents()) > 1
1226 1232
1227 1233 if (not force and merge and match and
1228 1234 (match.files() or match.anypats())):
1229 1235 raise util.Abort(_('cannot partially commit a merge '
1230 1236 '(do not specify files or patterns)'))
1231 1237
1232 1238 changes = self.status(match=match, clean=force)
1233 1239 if force:
1234 1240 changes[0].extend(changes[6]) # mq may commit unchanged files
1235 1241
1236 1242 # check subrepos
1237 1243 subs = []
1238 1244 commitsubs = set()
1239 1245 newstate = wctx.substate.copy()
1240 1246 # only manage subrepos and .hgsubstate if .hgsub is present
1241 1247 if '.hgsub' in wctx:
1242 1248 # we'll decide whether to track this ourselves, thanks
1243 1249 for c in changes[:3]:
1244 1250 if '.hgsubstate' in c:
1245 1251 c.remove('.hgsubstate')
1246 1252
1247 1253 # compare current state to last committed state
1248 1254 # build new substate based on last committed state
1249 1255 oldstate = wctx.p1().substate
1250 1256 for s in sorted(newstate.keys()):
1251 1257 if not match(s):
1252 1258 # ignore working copy, use old state if present
1253 1259 if s in oldstate:
1254 1260 newstate[s] = oldstate[s]
1255 1261 continue
1256 1262 if not force:
1257 1263 raise util.Abort(
1258 1264 _("commit with new subrepo %s excluded") % s)
1259 1265 if wctx.sub(s).dirty(True):
1260 1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1261 1267 raise util.Abort(
1262 1268 _("uncommitted changes in subrepo %s") % s,
1263 1269 hint=_("use --subrepos for recursive commit"))
1264 1270 subs.append(s)
1265 1271 commitsubs.add(s)
1266 1272 else:
1267 1273 bs = wctx.sub(s).basestate()
1268 1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1269 1275 if oldstate.get(s, (None, None, None))[1] != bs:
1270 1276 subs.append(s)
1271 1277
1272 1278 # check for removed subrepos
1273 1279 for p in wctx.parents():
1274 1280 r = [s for s in p.substate if s not in newstate]
1275 1281 subs += [s for s in r if match(s)]
1276 1282 if subs:
1277 1283 if (not match('.hgsub') and
1278 1284 '.hgsub' in (wctx.modified() + wctx.added())):
1279 1285 raise util.Abort(
1280 1286 _("can't commit subrepos without .hgsub"))
1281 1287 changes[0].insert(0, '.hgsubstate')
1282 1288
1283 1289 elif '.hgsub' in changes[2]:
1284 1290 # clean up .hgsubstate when .hgsub is removed
1285 1291 if ('.hgsubstate' in wctx and
1286 1292 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1287 1293 changes[2].insert(0, '.hgsubstate')
1288 1294
1289 1295 # make sure all explicit patterns are matched
1290 1296 if not force and match.files():
1291 1297 matched = set(changes[0] + changes[1] + changes[2])
1292 1298
1293 1299 for f in match.files():
1294 1300 f = self.dirstate.normalize(f)
1295 1301 if f == '.' or f in matched or f in wctx.substate:
1296 1302 continue
1297 1303 if f in changes[3]: # missing
1298 1304 fail(f, _('file not found!'))
1299 1305 if f in vdirs: # visited directory
1300 1306 d = f + '/'
1301 1307 for mf in matched:
1302 1308 if mf.startswith(d):
1303 1309 break
1304 1310 else:
1305 1311 fail(f, _("no match under directory!"))
1306 1312 elif f not in self.dirstate:
1307 1313 fail(f, _("file not tracked!"))
1308 1314
1309 1315 cctx = context.workingctx(self, text, user, date, extra, changes)
1310 1316
1311 1317 if (not force and not extra.get("close") and not merge
1312 1318 and not cctx.files()
1313 1319 and wctx.branch() == wctx.p1().branch()):
1314 1320 return None
1315 1321
1316 1322 if merge and cctx.deleted():
1317 1323 raise util.Abort(_("cannot commit merge with missing files"))
1318 1324
1319 1325 ms = mergemod.mergestate(self)
1320 1326 for f in changes[0]:
1321 1327 if f in ms and ms[f] == 'u':
1322 1328 raise util.Abort(_("unresolved merge conflicts "
1323 1329 "(see hg help resolve)"))
1324 1330
1325 1331 if editor:
1326 1332 cctx._text = editor(self, cctx, subs)
1327 1333 edited = (text != cctx._text)
1328 1334
1329 1335 # Save commit message in case this transaction gets rolled back
1330 1336 # (e.g. by a pretxncommit hook). Leave the content alone on
1331 1337 # the assumption that the user will use the same editor again.
1332 1338 msgfn = self.savecommitmessage(cctx._text)
1333 1339
1334 1340 # commit subs and write new state
1335 1341 if subs:
1336 1342 for s in sorted(commitsubs):
1337 1343 sub = wctx.sub(s)
1338 1344 self.ui.status(_('committing subrepository %s\n') %
1339 1345 subrepo.subrelpath(sub))
1340 1346 sr = sub.commit(cctx._text, user, date)
1341 1347 newstate[s] = (newstate[s][0], sr)
1342 1348 subrepo.writestate(self, newstate)
1343 1349
1344 1350 p1, p2 = self.dirstate.parents()
1345 1351 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1346 1352 try:
1347 1353 self.hook("precommit", throw=True, parent1=hookp1,
1348 1354 parent2=hookp2)
1349 1355 ret = self.commitctx(cctx, True)
1350 1356 except: # re-raises
1351 1357 if edited:
1352 1358 self.ui.write(
1353 1359 _('note: commit message saved in %s\n') % msgfn)
1354 1360 raise
1355 1361
1356 1362 # update bookmarks, dirstate and mergestate
1357 1363 bookmarks.update(self, [p1, p2], ret)
1358 1364 cctx.markcommitted(ret)
1359 1365 ms.reset()
1360 1366 finally:
1361 1367 wlock.release()
1362 1368
1363 1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1364 1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1365 1371 self._afterlock(commithook)
1366 1372 return ret
1367 1373
1368 1374 @unfilteredmethod
1369 1375 def commitctx(self, ctx, error=False):
1370 1376 """Add a new revision to current repository.
1371 1377 Revision information is passed via the context argument.
1372 1378 """
1373 1379
1374 1380 tr = lock = None
1375 1381 removed = list(ctx.removed())
1376 1382 p1, p2 = ctx.p1(), ctx.p2()
1377 1383 user = ctx.user()
1378 1384
1379 1385 lock = self.lock()
1380 1386 try:
1381 1387 tr = self.transaction("commit")
1382 1388 trp = weakref.proxy(tr)
1383 1389
1384 1390 if ctx.files():
1385 1391 m1 = p1.manifest().copy()
1386 1392 m2 = p2.manifest()
1387 1393
1388 1394 # check in files
1389 1395 new = {}
1390 1396 changed = []
1391 1397 linkrev = len(self)
1392 1398 for f in sorted(ctx.modified() + ctx.added()):
1393 1399 self.ui.note(f + "\n")
1394 1400 try:
1395 1401 fctx = ctx[f]
1396 1402 if fctx is None:
1397 1403 removed.append(f)
1398 1404 else:
1399 1405 new[f] = self._filecommit(fctx, m1, m2, linkrev,
1400 1406 trp, changed)
1401 1407 m1.set(f, fctx.flags())
1402 1408 except OSError, inst:
1403 1409 self.ui.warn(_("trouble committing %s!\n") % f)
1404 1410 raise
1405 1411 except IOError, inst:
1406 1412 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 1413 if error or errcode and errcode != errno.ENOENT:
1408 1414 self.ui.warn(_("trouble committing %s!\n") % f)
1409 1415 raise
1410 1416
1411 1417 # update manifest
1412 1418 m1.update(new)
1413 1419 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1414 1420 drop = [f for f in removed if f in m1]
1415 1421 for f in drop:
1416 1422 del m1[f]
1417 1423 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1418 1424 p2.manifestnode(), new, drop)
1419 1425 files = changed + removed
1420 1426 else:
1421 1427 mn = p1.manifestnode()
1422 1428 files = []
1423 1429
1424 1430 # update changelog
1425 1431 self.changelog.delayupdate()
1426 1432 n = self.changelog.add(mn, files, ctx.description(),
1427 1433 trp, p1.node(), p2.node(),
1428 1434 user, ctx.date(), ctx.extra().copy())
1429 1435 p = lambda: self.changelog.writepending() and self.root or ""
1430 1436 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1431 1437 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1432 1438 parent2=xp2, pending=p)
1433 1439 self.changelog.finalize(trp)
1434 1440 # set the new commit is proper phase
1435 1441 targetphase = subrepo.newcommitphase(self.ui, ctx)
1436 1442 if targetphase:
1437 1443 # retract boundary do not alter parent changeset.
1438 1444 # if a parent have higher the resulting phase will
1439 1445 # be compliant anyway
1440 1446 #
1441 1447 # if minimal phase was 0 we don't need to retract anything
1442 1448 phases.retractboundary(self, tr, targetphase, [n])
1443 1449 tr.close()
1444 1450 branchmap.updatecache(self.filtered('served'))
1445 1451 return n
1446 1452 finally:
1447 1453 if tr:
1448 1454 tr.release()
1449 1455 lock.release()
1450 1456
1451 1457 @unfilteredmethod
1452 1458 def destroying(self):
1453 1459 '''Inform the repository that nodes are about to be destroyed.
1454 1460 Intended for use by strip and rollback, so there's a common
1455 1461 place for anything that has to be done before destroying history.
1456 1462
1457 1463 This is mostly useful for saving state that is in memory and waiting
1458 1464 to be flushed when the current lock is released. Because a call to
1459 1465 destroyed is imminent, the repo will be invalidated causing those
1460 1466 changes to stay in memory (waiting for the next unlock), or vanish
1461 1467 completely.
1462 1468 '''
1463 1469 # When using the same lock to commit and strip, the phasecache is left
1464 1470 # dirty after committing. Then when we strip, the repo is invalidated,
1465 1471 # causing those changes to disappear.
1466 1472 if '_phasecache' in vars(self):
1467 1473 self._phasecache.write()
1468 1474
1469 1475 @unfilteredmethod
1470 1476 def destroyed(self):
1471 1477 '''Inform the repository that nodes have been destroyed.
1472 1478 Intended for use by strip and rollback, so there's a common
1473 1479 place for anything that has to be done after destroying history.
1474 1480 '''
1475 1481 # When one tries to:
1476 1482 # 1) destroy nodes thus calling this method (e.g. strip)
1477 1483 # 2) use phasecache somewhere (e.g. commit)
1478 1484 #
1479 1485 # then 2) will fail because the phasecache contains nodes that were
1480 1486 # removed. We can either remove phasecache from the filecache,
1481 1487 # causing it to reload next time it is accessed, or simply filter
1482 1488 # the removed nodes now and write the updated cache.
1483 1489 self._phasecache.filterunknown(self)
1484 1490 self._phasecache.write()
1485 1491
1486 1492 # update the 'served' branch cache to help read only server process
1487 1493 # Thanks to branchcache collaboration this is done from the nearest
1488 1494 # filtered subset and it is expected to be fast.
1489 1495 branchmap.updatecache(self.filtered('served'))
1490 1496
1491 1497 # Ensure the persistent tag cache is updated. Doing it now
1492 1498 # means that the tag cache only has to worry about destroyed
1493 1499 # heads immediately after a strip/rollback. That in turn
1494 1500 # guarantees that "cachetip == currenttip" (comparing both rev
1495 1501 # and node) always means no nodes have been added or destroyed.
1496 1502
1497 1503 # XXX this is suboptimal when qrefresh'ing: we strip the current
1498 1504 # head, refresh the tag cache, then immediately add a new head.
1499 1505 # But I think doing it this way is necessary for the "instant
1500 1506 # tag cache retrieval" case to work.
1501 1507 self.invalidate()
1502 1508
1503 1509 def walk(self, match, node=None):
1504 1510 '''
1505 1511 walk recursively through the directory tree or a given
1506 1512 changeset, finding all files matched by the match
1507 1513 function
1508 1514 '''
1509 1515 return self[node].walk(match)
1510 1516
1511 1517 def status(self, node1='.', node2=None, match=None,
1512 1518 ignored=False, clean=False, unknown=False,
1513 1519 listsubrepos=False):
1514 1520 '''a convenience method that calls node1.status(node2)'''
1515 1521 return self[node1].status(node2, match, ignored, clean, unknown,
1516 1522 listsubrepos)
1517 1523
1518 1524 def heads(self, start=None):
1519 1525 heads = self.changelog.heads(start)
1520 1526 # sort the output in rev descending order
1521 1527 return sorted(heads, key=self.changelog.rev, reverse=True)
1522 1528
1523 1529 def branchheads(self, branch=None, start=None, closed=False):
1524 1530 '''return a (possibly filtered) list of heads for the given branch
1525 1531
1526 1532 Heads are returned in topological order, from newest to oldest.
1527 1533 If branch is None, use the dirstate branch.
1528 1534 If start is not None, return only heads reachable from start.
1529 1535 If closed is True, return heads that are marked as closed as well.
1530 1536 '''
1531 1537 if branch is None:
1532 1538 branch = self[None].branch()
1533 1539 branches = self.branchmap()
1534 1540 if branch not in branches:
1535 1541 return []
1536 1542 # the cache returns heads ordered lowest to highest
1537 1543 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1538 1544 if start is not None:
1539 1545 # filter out the heads that cannot be reached from startrev
1540 1546 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1541 1547 bheads = [h for h in bheads if h in fbheads]
1542 1548 return bheads
1543 1549
1544 1550 def branches(self, nodes):
1545 1551 if not nodes:
1546 1552 nodes = [self.changelog.tip()]
1547 1553 b = []
1548 1554 for n in nodes:
1549 1555 t = n
1550 1556 while True:
1551 1557 p = self.changelog.parents(n)
1552 1558 if p[1] != nullid or p[0] == nullid:
1553 1559 b.append((t, n, p[0], p[1]))
1554 1560 break
1555 1561 n = p[0]
1556 1562 return b
1557 1563
1558 1564 def between(self, pairs):
1559 1565 r = []
1560 1566
1561 1567 for top, bottom in pairs:
1562 1568 n, l, i = top, [], 0
1563 1569 f = 1
1564 1570
1565 1571 while n != bottom and n != nullid:
1566 1572 p = self.changelog.parents(n)[0]
1567 1573 if i == f:
1568 1574 l.append(n)
1569 1575 f = f * 2
1570 1576 n = p
1571 1577 i += 1
1572 1578
1573 1579 r.append(l)
1574 1580
1575 1581 return r
1576 1582
1577 1583 def checkpush(self, pushop):
1578 1584 """Extensions can override this function if additional checks have
1579 1585 to be performed before pushing, or call it if they override push
1580 1586 command.
1581 1587 """
1582 1588 pass
1583 1589
1584 1590 @unfilteredpropertycache
1585 1591 def prepushoutgoinghooks(self):
1586 1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1587 1593 functions, which are called before pushing changesets.
1588 1594 """
1589 1595 return util.hooks()
1590 1596
1591 1597 def stream_in(self, remote, requirements):
1592 1598 lock = self.lock()
1593 1599 try:
1594 1600 # Save remote branchmap. We will use it later
1595 1601 # to speed up branchcache creation
1596 1602 rbranchmap = None
1597 1603 if remote.capable("branchmap"):
1598 1604 rbranchmap = remote.branchmap()
1599 1605
1600 1606 fp = remote.stream_out()
1601 1607 l = fp.readline()
1602 1608 try:
1603 1609 resp = int(l)
1604 1610 except ValueError:
1605 1611 raise error.ResponseError(
1606 1612 _('unexpected response from remote server:'), l)
1607 1613 if resp == 1:
1608 1614 raise util.Abort(_('operation forbidden by server'))
1609 1615 elif resp == 2:
1610 1616 raise util.Abort(_('locking the remote repository failed'))
1611 1617 elif resp != 0:
1612 1618 raise util.Abort(_('the server sent an unknown error code'))
1613 1619 self.ui.status(_('streaming all changes\n'))
1614 1620 l = fp.readline()
1615 1621 try:
1616 1622 total_files, total_bytes = map(int, l.split(' ', 1))
1617 1623 except (ValueError, TypeError):
1618 1624 raise error.ResponseError(
1619 1625 _('unexpected response from remote server:'), l)
1620 1626 self.ui.status(_('%d files to transfer, %s of data\n') %
1621 1627 (total_files, util.bytecount(total_bytes)))
1622 1628 handled_bytes = 0
1623 1629 self.ui.progress(_('clone'), 0, total=total_bytes)
1624 1630 start = time.time()
1625 1631
1626 1632 tr = self.transaction(_('clone'))
1627 1633 try:
1628 1634 for i in xrange(total_files):
1629 1635 # XXX doesn't support '\n' or '\r' in filenames
1630 1636 l = fp.readline()
1631 1637 try:
1632 1638 name, size = l.split('\0', 1)
1633 1639 size = int(size)
1634 1640 except (ValueError, TypeError):
1635 1641 raise error.ResponseError(
1636 1642 _('unexpected response from remote server:'), l)
1637 1643 if self.ui.debugflag:
1638 1644 self.ui.debug('adding %s (%s)\n' %
1639 1645 (name, util.bytecount(size)))
1640 1646 # for backwards compat, name was partially encoded
1641 1647 ofp = self.sopener(store.decodedir(name), 'w')
1642 1648 for chunk in util.filechunkiter(fp, limit=size):
1643 1649 handled_bytes += len(chunk)
1644 1650 self.ui.progress(_('clone'), handled_bytes,
1645 1651 total=total_bytes)
1646 1652 ofp.write(chunk)
1647 1653 ofp.close()
1648 1654 tr.close()
1649 1655 finally:
1650 1656 tr.release()
1651 1657
1652 1658 # Writing straight to files circumvented the inmemory caches
1653 1659 self.invalidate()
1654 1660
1655 1661 elapsed = time.time() - start
1656 1662 if elapsed <= 0:
1657 1663 elapsed = 0.001
1658 1664 self.ui.progress(_('clone'), None)
1659 1665 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1660 1666 (util.bytecount(total_bytes), elapsed,
1661 1667 util.bytecount(total_bytes / elapsed)))
1662 1668
1663 1669 # new requirements = old non-format requirements +
1664 1670 # new format-related
1665 1671 # requirements from the streamed-in repository
1666 1672 requirements.update(set(self.requirements) - self.supportedformats)
1667 1673 self._applyrequirements(requirements)
1668 1674 self._writerequirements()
1669 1675
1670 1676 if rbranchmap:
1671 1677 rbheads = []
1672 1678 for bheads in rbranchmap.itervalues():
1673 1679 rbheads.extend(bheads)
1674 1680
1675 1681 if rbheads:
1676 1682 rtiprev = max((int(self.changelog.rev(node))
1677 1683 for node in rbheads))
1678 1684 cache = branchmap.branchcache(rbranchmap,
1679 1685 self[rtiprev].node(),
1680 1686 rtiprev)
1681 1687 # Try to stick it as low as possible
1682 1688 # filter above served are unlikely to be fetch from a clone
1683 1689 for candidate in ('base', 'immutable', 'served'):
1684 1690 rview = self.filtered(candidate)
1685 1691 if cache.validfor(rview):
1686 1692 self._branchcaches[candidate] = cache
1687 1693 cache.write(rview)
1688 1694 break
1689 1695 self.invalidate()
1690 1696 return len(self.heads()) + 1
1691 1697 finally:
1692 1698 lock.release()
1693 1699
1694 1700 def clone(self, remote, heads=[], stream=False):
1695 1701 '''clone remote repository.
1696 1702
1697 1703 keyword arguments:
1698 1704 heads: list of revs to clone (forces use of pull)
1699 1705 stream: use streaming clone if possible'''
1700 1706
1701 1707 # now, all clients that can request uncompressed clones can
1702 1708 # read repo formats supported by all servers that can serve
1703 1709 # them.
1704 1710
1705 1711 # if revlog format changes, client will have to check version
1706 1712 # and format flags on "stream" capability, and use
1707 1713 # uncompressed only if compatible.
1708 1714
1709 1715 if not stream:
1710 1716 # if the server explicitly prefers to stream (for fast LANs)
1711 1717 stream = remote.capable('stream-preferred')
1712 1718
1713 1719 if stream and not heads:
1714 1720 # 'stream' means remote revlog format is revlogv1 only
1715 1721 if remote.capable('stream'):
1716 1722 return self.stream_in(remote, set(('revlogv1',)))
1717 1723 # otherwise, 'streamreqs' contains the remote revlog format
1718 1724 streamreqs = remote.capable('streamreqs')
1719 1725 if streamreqs:
1720 1726 streamreqs = set(streamreqs.split(','))
1721 1727 # if we support it, stream in and adjust our requirements
1722 1728 if not streamreqs - self.supportedformats:
1723 1729 return self.stream_in(remote, streamreqs)
1724 1730
1725 1731 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1726 1732 try:
1727 1733 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1728 1734 ret = exchange.pull(self, remote, heads).cgresult
1729 1735 finally:
1730 1736 self.ui.restoreconfig(quiet)
1731 1737 return ret
1732 1738
1733 1739 def pushkey(self, namespace, key, old, new):
1734 1740 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1735 1741 old=old, new=new)
1736 1742 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1737 1743 ret = pushkey.push(self, namespace, key, old, new)
1738 1744 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1739 1745 ret=ret)
1740 1746 return ret
1741 1747
1742 1748 def listkeys(self, namespace):
1743 1749 self.hook('prelistkeys', throw=True, namespace=namespace)
1744 1750 self.ui.debug('listing keys for "%s"\n' % namespace)
1745 1751 values = pushkey.list(self, namespace)
1746 1752 self.hook('listkeys', namespace=namespace, values=values)
1747 1753 return values
1748 1754
1749 1755 def debugwireargs(self, one, two, three=None, four=None, five=None):
1750 1756 '''used to test argument passing over the wire'''
1751 1757 return "%s %s %s %s %s" % (one, two, three, four, five)
1752 1758
1753 1759 def savecommitmessage(self, text):
1754 1760 fp = self.opener('last-message.txt', 'wb')
1755 1761 try:
1756 1762 fp.write(text)
1757 1763 finally:
1758 1764 fp.close()
1759 1765 return self.pathto(fp.name[len(self.root) + 1:])
1760 1766
1761 1767 # used to avoid circular references so destructors work
1762 1768 def aftertrans(files):
1763 1769 renamefiles = [tuple(t) for t in files]
1764 1770 def a():
1765 1771 for vfs, src, dest in renamefiles:
1766 1772 try:
1767 1773 vfs.rename(src, dest)
1768 1774 except OSError: # journal file does not yet exist
1769 1775 pass
1770 1776 return a
1771 1777
1772 1778 def undoname(fn):
1773 1779 base, name = os.path.split(fn)
1774 1780 assert name.startswith('journal')
1775 1781 return os.path.join(base, name.replace('journal', 'undo', 1))
1776 1782
1777 1783 def instance(ui, path, create):
1778 1784 return localrepository(ui, util.urllocalpath(path), create)
1779 1785
1780 1786 def islocal(path):
1781 1787 return True
@@ -1,1146 +1,1146
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 import struct
71 71 import util, base85, node
72 72 import phases
73 73 from i18n import _
74 74
75 75 _pack = struct.pack
76 76 _unpack = struct.unpack
77 77
78 78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79 79
80 80 # the obsolete feature is not mature enough to be enabled by default.
81 81 # you have to rely on third party extension extension to enable this.
82 82 _enabled = False
83 83
84 84 ### obsolescence marker flag
85 85
86 86 ## bumpedfix flag
87 87 #
88 88 # When a changeset A' succeed to a changeset A which became public, we call A'
89 89 # "bumped" because it's a successors of a public changesets
90 90 #
91 91 # o A' (bumped)
92 92 # |`:
93 93 # | o A
94 94 # |/
95 95 # o Z
96 96 #
97 97 # The way to solve this situation is to create a new changeset Ad as children
98 98 # of A. This changeset have the same content than A'. So the diff from A to A'
99 99 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
100 100 #
101 101 # o Ad
102 102 # |`:
103 103 # | x A'
104 104 # |'|
105 105 # o | A
106 106 # |/
107 107 # o Z
108 108 #
109 109 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
110 110 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
111 111 # This flag mean that the successors express the changes between the public and
112 112 # bumped version and fix the situation, breaking the transitivity of
113 113 # "bumped" here.
114 114 bumpedfix = 1
115 115 usingsha256 = 2
116 116
117 117 ## Parsing and writing of version "0"
118 118 #
119 119 # The header is followed by the markers. Each marker is made of:
120 120 #
121 121 # - 1 uint8 : number of new changesets "N", can be zero.
122 122 #
123 123 # - 1 uint32: metadata size "M" in bytes.
124 124 #
125 125 # - 1 byte: a bit field. It is reserved for flags used in common
126 126 # obsolete marker operations, to avoid repeated decoding of metadata
127 127 # entries.
128 128 #
129 129 # - 20 bytes: obsoleted changeset identifier.
130 130 #
131 131 # - N*20 bytes: new changesets identifiers.
132 132 #
133 133 # - M bytes: metadata as a sequence of nul-terminated strings. Each
134 134 # string contains a key and a value, separated by a colon ':', without
135 135 # additional encoding. Keys cannot contain '\0' or ':' and values
136 136 # cannot contain '\0'.
137 137 _fm0version = 0
138 138 _fm0fixed = '>BIB20s'
139 139 _fm0node = '20s'
140 140 _fm0fsize = struct.calcsize(_fm0fixed)
141 141 _fm0fnodesize = struct.calcsize(_fm0node)
142 142
143 143 def _fm0readmarkers(data, off=0):
144 144 # Loop on markers
145 145 l = len(data)
146 146 while off + _fm0fsize <= l:
147 147 # read fixed part
148 148 cur = data[off:off + _fm0fsize]
149 149 off += _fm0fsize
150 150 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
151 151 # read replacement
152 152 sucs = ()
153 153 if numsuc:
154 154 s = (_fm0fnodesize * numsuc)
155 155 cur = data[off:off + s]
156 156 sucs = _unpack(_fm0node * numsuc, cur)
157 157 off += s
158 158 # read metadata
159 159 # (metadata will be decoded on demand)
160 160 metadata = data[off:off + mdsize]
161 161 if len(metadata) != mdsize:
162 162 raise util.Abort(_('parsing obsolete marker: metadata is too '
163 163 'short, %d bytes expected, got %d')
164 164 % (mdsize, len(metadata)))
165 165 off += mdsize
166 166 metadata = _fm0decodemeta(metadata)
167 167 try:
168 168 when, offset = metadata.pop('date', '0 0').split(' ')
169 169 date = float(when), int(offset)
170 170 except ValueError:
171 171 date = (0., 0)
172 172 parents = None
173 173 if 'p2' in metadata:
174 174 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
175 175 elif 'p1' in metadata:
176 176 parents = (metadata.pop('p1', None),)
177 177 elif 'p0' in metadata:
178 178 parents = ()
179 179 if parents is not None:
180 180 try:
181 181 parents = tuple(node.bin(p) for p in parents)
182 182 # if parent content is not a nodeid, drop the data
183 183 for p in parents:
184 184 if len(p) != 20:
185 185 parents = None
186 186 break
187 187 except TypeError:
188 188 # if content cannot be translated to nodeid drop the data.
189 189 parents = None
190 190
191 191 metadata = tuple(sorted(metadata.iteritems()))
192 192
193 193 yield (pre, sucs, flags, metadata, date, parents)
194 194
195 195 def _fm0encodeonemarker(marker):
196 196 pre, sucs, flags, metadata, date, parents = marker
197 197 if flags & usingsha256:
198 198 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
199 199 metadata = dict(metadata)
200 200 metadata['date'] = '%d %i' % date
201 201 if parents is not None:
202 202 if not parents:
203 203 # mark that we explicitly recorded no parents
204 204 metadata['p0'] = ''
205 205 for i, p in enumerate(parents):
206 206 metadata['p%i' % (i + 1)] = node.hex(p)
207 207 metadata = _fm0encodemeta(metadata)
208 208 numsuc = len(sucs)
209 209 format = _fm0fixed + (_fm0node * numsuc)
210 210 data = [numsuc, len(metadata), flags, pre]
211 211 data.extend(sucs)
212 212 return _pack(format, *data) + metadata
213 213
214 214 def _fm0encodemeta(meta):
215 215 """Return encoded metadata string to string mapping.
216 216
217 217 Assume no ':' in key and no '\0' in both key and value."""
218 218 for key, value in meta.iteritems():
219 219 if ':' in key or '\0' in key:
220 220 raise ValueError("':' and '\0' are forbidden in metadata key'")
221 221 if '\0' in value:
222 222 raise ValueError("':' is forbidden in metadata value'")
223 223 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
224 224
225 225 def _fm0decodemeta(data):
226 226 """Return string to string dictionary from encoded version."""
227 227 d = {}
228 228 for l in data.split('\0'):
229 229 if l:
230 230 key, value = l.split(':')
231 231 d[key] = value
232 232 return d
233 233
234 234 ## Parsing and writing of version "1"
235 235 #
236 236 # The header is followed by the markers. Each marker is made of:
237 237 #
238 238 # - uint32: total size of the marker (including this field)
239 239 #
240 240 # - float64: date in seconds since epoch
241 241 #
242 242 # - int16: timezone offset in minutes
243 243 #
244 244 # - uint16: a bit field. It is reserved for flags used in common
245 245 # obsolete marker operations, to avoid repeated decoding of metadata
246 246 # entries.
247 247 #
248 248 # - uint8: number of successors "N", can be zero.
249 249 #
250 250 # - uint8: number of parents "P", can be zero.
251 251 #
252 252 # 0: parents data stored but no parent,
253 253 # 1: one parent stored,
254 254 # 2: two parents stored,
255 255 # 3: no parent data stored
256 256 #
257 257 # - uint8: number of metadata entries M
258 258 #
259 259 # - 20 or 32 bytes: precursor changeset identifier.
260 260 #
261 261 # - N*(20 or 32) bytes: successors changesets identifiers.
262 262 #
263 263 # - P*(20 or 32) bytes: parents of the precursors changesets.
264 264 #
265 265 # - M*(uint8, uint8): size of all metadata entries (key and value)
266 266 #
267 267 # - remaining bytes: the metadata, each (key, value) pair after the other.
268 268 _fm1version = 1
269 269 _fm1fixed = '>IdhHBBB20s'
270 270 _fm1nodesha1 = '20s'
271 271 _fm1nodesha256 = '32s'
272 272 _fm1fsize = struct.calcsize(_fm1fixed)
273 273 _fm1parentnone = 3
274 274 _fm1parentshift = 14
275 275 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
276 276 _fm1metapair = 'BB'
277 277 _fm1metapairsize = struct.calcsize('BB')
278 278
279 279 def _fm1readmarkers(data, off=0):
280 280 # Loop on markers
281 281 l = len(data)
282 282 while off + _fm1fsize <= l:
283 283 # read fixed part
284 284 cur = data[off:off + _fm1fsize]
285 285 off += _fm1fsize
286 286 fixeddata = _unpack(_fm1fixed, cur)
287 287 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
288 288 # extract the number of parents information
289 289 if numpar == _fm1parentnone:
290 290 numpar = None
291 291 # build the date tuple (upgrade tz minutes to seconds)
292 292 date = (seconds, tz * 60)
293 293 _fm1node = _fm1nodesha1
294 294 if flags & usingsha256:
295 295 _fm1node = _fm1nodesha256
296 296 fnodesize = struct.calcsize(_fm1node)
297 297 # read replacement
298 298 sucs = ()
299 299 if numsuc:
300 300 s = (fnodesize * numsuc)
301 301 cur = data[off:off + s]
302 302 sucs = _unpack(_fm1node * numsuc, cur)
303 303 off += s
304 304 # read parents
305 305 if numpar is None:
306 306 parents = None
307 307 elif numpar == 0:
308 308 parents = ()
309 309 elif numpar: # neither None nor zero
310 310 s = (fnodesize * numpar)
311 311 cur = data[off:off + s]
312 312 parents = _unpack(_fm1node * numpar, cur)
313 313 off += s
314 314 # read metadata
315 315 metaformat = '>' + (_fm1metapair * nummeta)
316 316 s = _fm1metapairsize * nummeta
317 317 metapairsize = _unpack(metaformat, data[off:off + s])
318 318 off += s
319 319 metadata = []
320 320 for idx in xrange(0, len(metapairsize), 2):
321 321 sk = metapairsize[idx]
322 322 sv = metapairsize[idx + 1]
323 323 key = data[off:off + sk]
324 324 value = data[off + sk:off + sk + sv]
325 325 assert len(key) == sk
326 326 assert len(value) == sv
327 327 metadata.append((key, value))
328 328 off += sk + sv
329 329 metadata = tuple(metadata)
330 330
331 331 yield (prec, sucs, flags, metadata, date, parents)
332 332
333 333 def _fm1encodeonemarker(marker):
334 334 pre, sucs, flags, metadata, date, parents = marker
335 335 # determine node size
336 336 _fm1node = _fm1nodesha1
337 337 if flags & usingsha256:
338 338 _fm1node = _fm1nodesha256
339 339 numsuc = len(sucs)
340 340 numextranodes = numsuc
341 341 if parents is None:
342 342 numpar = _fm1parentnone
343 343 else:
344 344 numpar = len(parents)
345 345 numextranodes += numpar
346 346 formatnodes = _fm1node * numextranodes
347 347 formatmeta = _fm1metapair * len(metadata)
348 348 format = _fm1fixed + formatnodes + formatmeta
349 349 # tz is stored in minutes so we divide by 60
350 350 tz = date[1]//60
351 351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
352 352 data.extend(sucs)
353 353 if parents is not None:
354 354 data.extend(parents)
355 355 totalsize = struct.calcsize(format)
356 356 for key, value in metadata:
357 357 lk = len(key)
358 358 lv = len(value)
359 359 data.append(lk)
360 360 data.append(lv)
361 361 totalsize += lk + lv
362 362 data[0] = totalsize
363 363 data = [_pack(format, *data)]
364 364 for key, value in metadata:
365 365 data.append(key)
366 366 data.append(value)
367 367 return ''.join(data)
368 368
369 369 # mapping to read/write various marker formats
370 370 # <version> -> (decoder, encoder)
371 371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
372 372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
373 373
374 374 def _readmarkers(data):
375 375 """Read and enumerate markers from raw data"""
376 376 off = 0
377 377 diskversion = _unpack('>B', data[off:off + 1])[0]
378 378 off += 1
379 379 if diskversion not in formats:
380 380 raise util.Abort(_('parsing obsolete marker: unknown version %r')
381 381 % diskversion)
382 382 return diskversion, formats[diskversion][0](data, off)
383 383
384 384 def encodemarkers(markers, addheader=False, version=_fm0version):
385 385 # Kept separate from flushmarkers(), it will be reused for
386 386 # markers exchange.
387 387 encodeone = formats[version][1]
388 388 if addheader:
389 389 yield _pack('>B', version)
390 390 for marker in markers:
391 391 yield encodeone(marker)
392 392
393 393
394 394 class marker(object):
395 395 """Wrap obsolete marker raw data"""
396 396
397 397 def __init__(self, repo, data):
398 398 # the repo argument will be used to create changectx in later version
399 399 self._repo = repo
400 400 self._data = data
401 401 self._decodedmeta = None
402 402
403 403 def __hash__(self):
404 404 return hash(self._data)
405 405
406 406 def __eq__(self, other):
407 407 if type(other) != type(self):
408 408 return False
409 409 return self._data == other._data
410 410
411 411 def precnode(self):
412 412 """Precursor changeset node identifier"""
413 413 return self._data[0]
414 414
415 415 def succnodes(self):
416 416 """List of successor changesets node identifiers"""
417 417 return self._data[1]
418 418
419 419 def parentnodes(self):
420 420 """Parents of the precursors (None if not recorded)"""
421 421 return self._data[5]
422 422
423 423 def metadata(self):
424 424 """Decoded metadata dictionary"""
425 425 return dict(self._data[3])
426 426
427 427 def date(self):
428 428 """Creation date as (unixtime, offset)"""
429 429 return self._data[4]
430 430
431 431 def flags(self):
432 432 """The flags field of the marker"""
433 433 return self._data[2]
434 434
435 435 class obsstore(object):
436 436 """Store obsolete markers
437 437
438 438 Markers can be accessed with two mappings:
439 439 - precursors[x] -> set(markers on precursors edges of x)
440 440 - successors[x] -> set(markers on successors edges of x)
441 441 - children[x] -> set(markers on precursors edges of children(x)
442 442 """
443 443
444 444 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
445 445 # prec: nodeid, precursor changesets
446 446 # succs: tuple of nodeid, successor changesets (0-N length)
447 447 # flag: integer, flag field carrying modifier for the markers (see doc)
448 448 # meta: binary blob, encoded metadata dictionary
449 449 # date: (float, int) tuple, date of marker creation
450 450 # parents: (tuple of nodeid) or None, parents of precursors
451 451 # None is used when no data has been recorded
452 452
453 def __init__(self, sopener):
453 def __init__(self, sopener, defaultformat=_fm0version):
454 454 # caches for various obsolescence related cache
455 455 self.caches = {}
456 456 self._all = []
457 457 self.precursors = {}
458 458 self.successors = {}
459 459 self.children = {}
460 460 self.sopener = sopener
461 461 data = sopener.tryread('obsstore')
462 self._version = _fm0version
462 self._version = defaultformat
463 463 if data:
464 464 self._version, markers = _readmarkers(data)
465 465 self._load(markers)
466 466
467 467 def __iter__(self):
468 468 return iter(self._all)
469 469
470 470 def __len__(self):
471 471 return len(self._all)
472 472
473 473 def __nonzero__(self):
474 474 return bool(self._all)
475 475
476 476 def create(self, transaction, prec, succs=(), flag=0, parents=None,
477 477 date=None, metadata=None):
478 478 """obsolete: add a new obsolete marker
479 479
480 480 * ensuring it is hashable
481 481 * check mandatory metadata
482 482 * encode metadata
483 483
484 484 If you are a human writing code creating marker you want to use the
485 485 `createmarkers` function in this module instead.
486 486
487 487 return True if a new marker have been added, False if the markers
488 488 already existed (no op).
489 489 """
490 490 if metadata is None:
491 491 metadata = {}
492 492 if date is None:
493 493 if 'date' in metadata:
494 494 # as a courtesy for out-of-tree extensions
495 495 date = util.parsedate(metadata.pop('date'))
496 496 else:
497 497 date = util.makedate()
498 498 if len(prec) != 20:
499 499 raise ValueError(prec)
500 500 for succ in succs:
501 501 if len(succ) != 20:
502 502 raise ValueError(succ)
503 503 if prec in succs:
504 504 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
505 505
506 506 metadata = tuple(sorted(metadata.iteritems()))
507 507
508 508 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
509 509 return bool(self.add(transaction, [marker]))
510 510
511 511 def add(self, transaction, markers):
512 512 """Add new markers to the store
513 513
514 514 Take care of filtering duplicate.
515 515 Return the number of new marker."""
516 516 if not _enabled:
517 517 raise util.Abort('obsolete feature is not enabled on this repo')
518 518 known = set(self._all)
519 519 new = []
520 520 for m in markers:
521 521 if m not in known:
522 522 known.add(m)
523 523 new.append(m)
524 524 if new:
525 525 f = self.sopener('obsstore', 'ab')
526 526 try:
527 527 # Whether the file's current position is at the begin or at
528 528 # the end after opening a file for appending is implementation
529 529 # defined. So we must seek to the end before calling tell(),
530 530 # or we may get a zero offset for non-zero sized files on
531 531 # some platforms (issue3543).
532 532 f.seek(0, _SEEK_END)
533 533 offset = f.tell()
534 534 transaction.add('obsstore', offset)
535 535 # offset == 0: new file - add the version header
536 536 for bytes in encodemarkers(new, offset == 0, self._version):
537 537 f.write(bytes)
538 538 finally:
539 539 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
540 540 # call 'filecacheentry.refresh()' here
541 541 f.close()
542 542 self._load(new)
543 543 # new marker *may* have changed several set. invalidate the cache.
544 544 self.caches.clear()
545 545 # records the number of new markers for the transaction hooks
546 546 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
547 547 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
548 548 return len(new)
549 549
550 550 def mergemarkers(self, transaction, data):
551 551 """merge a binary stream of markers inside the obsstore
552 552
553 553 Returns the number of new markers added."""
554 554 version, markers = _readmarkers(data)
555 555 return self.add(transaction, markers)
556 556
557 557 def _load(self, markers):
558 558 for mark in markers:
559 559 self._all.append(mark)
560 560 pre, sucs = mark[:2]
561 561 self.successors.setdefault(pre, set()).add(mark)
562 562 for suc in sucs:
563 563 self.precursors.setdefault(suc, set()).add(mark)
564 564 parents = mark[5]
565 565 if parents is not None:
566 566 for p in parents:
567 567 self.children.setdefault(p, set()).add(mark)
568 568 if node.nullid in self.precursors:
569 569 raise util.Abort(_('bad obsolescence marker detected: '
570 570 'invalid successors nullid'))
571 571 def relevantmarkers(self, nodes):
572 572 """return a set of all obsolescence markers relevant to a set of nodes.
573 573
574 574 "relevant" to a set of nodes mean:
575 575
576 576 - marker that use this changeset as successor
577 577 - prune marker of direct children on this changeset
578 578 - recursive application of the two rules on precursors of these markers
579 579
580 580 It is a set so you cannot rely on order."""
581 581
582 582 pendingnodes = set(nodes)
583 583 seenmarkers = set()
584 584 seennodes = set(pendingnodes)
585 585 precursorsmarkers = self.precursors
586 586 children = self.children
587 587 while pendingnodes:
588 588 direct = set()
589 589 for current in pendingnodes:
590 590 direct.update(precursorsmarkers.get(current, ()))
591 591 pruned = [m for m in children.get(current, ()) if not m[1]]
592 592 direct.update(pruned)
593 593 direct -= seenmarkers
594 594 pendingnodes = set([m[0] for m in direct])
595 595 seenmarkers |= direct
596 596 pendingnodes -= seennodes
597 597 seennodes |= pendingnodes
598 598 return seenmarkers
599 599
600 600 def commonversion(versions):
601 601 """Return the newest version listed in both versions and our local formats.
602 602
603 603 Returns None if no common version exists.
604 604 """
605 605 versions.sort(reverse=True)
606 606 # search for highest version known on both side
607 607 for v in versions:
608 608 if v in formats:
609 609 return v
610 610 return None
611 611
612 612 # arbitrary picked to fit into 8K limit from HTTP server
613 613 # you have to take in account:
614 614 # - the version header
615 615 # - the base85 encoding
616 616 _maxpayload = 5300
617 617
618 618 def _pushkeyescape(markers):
619 619 """encode markers into a dict suitable for pushkey exchange
620 620
621 621 - binary data is base85 encoded
622 622 - split in chunks smaller than 5300 bytes"""
623 623 keys = {}
624 624 parts = []
625 625 currentlen = _maxpayload * 2 # ensure we create a new part
626 626 for marker in markers:
627 627 nextdata = _fm0encodeonemarker(marker)
628 628 if (len(nextdata) + currentlen > _maxpayload):
629 629 currentpart = []
630 630 currentlen = 0
631 631 parts.append(currentpart)
632 632 currentpart.append(nextdata)
633 633 currentlen += len(nextdata)
634 634 for idx, part in enumerate(reversed(parts)):
635 635 data = ''.join([_pack('>B', _fm0version)] + part)
636 636 keys['dump%i' % idx] = base85.b85encode(data)
637 637 return keys
638 638
639 639 def listmarkers(repo):
640 640 """List markers over pushkey"""
641 641 if not repo.obsstore:
642 642 return {}
643 643 return _pushkeyescape(repo.obsstore)
644 644
645 645 def pushmarker(repo, key, old, new):
646 646 """Push markers over pushkey"""
647 647 if not key.startswith('dump'):
648 648 repo.ui.warn(_('unknown key: %r') % key)
649 649 return 0
650 650 if old:
651 651 repo.ui.warn(_('unexpected old value for %r') % key)
652 652 return 0
653 653 data = base85.b85decode(new)
654 654 lock = repo.lock()
655 655 try:
656 656 tr = repo.transaction('pushkey: obsolete markers')
657 657 try:
658 658 repo.obsstore.mergemarkers(tr, data)
659 659 tr.close()
660 660 return 1
661 661 finally:
662 662 tr.release()
663 663 finally:
664 664 lock.release()
665 665
666 666 def getmarkers(repo, nodes=None):
667 667 """returns markers known in a repository
668 668
669 669 If <nodes> is specified, only markers "relevant" to those nodes are are
670 670 returned"""
671 671 if nodes is None:
672 672 rawmarkers = repo.obsstore
673 673 else:
674 674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
675 675
676 676 for markerdata in rawmarkers:
677 677 yield marker(repo, markerdata)
678 678
679 679 def relevantmarkers(repo, node):
680 680 """all obsolete markers relevant to some revision"""
681 681 for markerdata in repo.obsstore.relevantmarkers(node):
682 682 yield marker(repo, markerdata)
683 683
684 684
685 685 def precursormarkers(ctx):
686 686 """obsolete marker marking this changeset as a successors"""
687 687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
688 688 yield marker(ctx._repo, data)
689 689
690 690 def successormarkers(ctx):
691 691 """obsolete marker making this changeset obsolete"""
692 692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
693 693 yield marker(ctx._repo, data)
694 694
695 695 def allsuccessors(obsstore, nodes, ignoreflags=0):
696 696 """Yield node for every successor of <nodes>.
697 697
698 698 Some successors may be unknown locally.
699 699
700 700 This is a linear yield unsuited to detecting split changesets. It includes
701 701 initial nodes too."""
702 702 remaining = set(nodes)
703 703 seen = set(remaining)
704 704 while remaining:
705 705 current = remaining.pop()
706 706 yield current
707 707 for mark in obsstore.successors.get(current, ()):
708 708 # ignore marker flagged with specified flag
709 709 if mark[2] & ignoreflags:
710 710 continue
711 711 for suc in mark[1]:
712 712 if suc not in seen:
713 713 seen.add(suc)
714 714 remaining.add(suc)
715 715
716 716 def allprecursors(obsstore, nodes, ignoreflags=0):
717 717 """Yield node for every precursors of <nodes>.
718 718
719 719 Some precursors may be unknown locally.
720 720
721 721 This is a linear yield unsuited to detecting folded changesets. It includes
722 722 initial nodes too."""
723 723
724 724 remaining = set(nodes)
725 725 seen = set(remaining)
726 726 while remaining:
727 727 current = remaining.pop()
728 728 yield current
729 729 for mark in obsstore.precursors.get(current, ()):
730 730 # ignore marker flagged with specified flag
731 731 if mark[2] & ignoreflags:
732 732 continue
733 733 suc = mark[0]
734 734 if suc not in seen:
735 735 seen.add(suc)
736 736 remaining.add(suc)
737 737
738 738 def foreground(repo, nodes):
739 739 """return all nodes in the "foreground" of other node
740 740
741 741 The foreground of a revision is anything reachable using parent -> children
742 742 or precursor -> successor relation. It is very similar to "descendant" but
743 743 augmented with obsolescence information.
744 744
745 745 Beware that possible obsolescence cycle may result if complex situation.
746 746 """
747 747 repo = repo.unfiltered()
748 748 foreground = set(repo.set('%ln::', nodes))
749 749 if repo.obsstore:
750 750 # We only need this complicated logic if there is obsolescence
751 751 # XXX will probably deserve an optimised revset.
752 752 nm = repo.changelog.nodemap
753 753 plen = -1
754 754 # compute the whole set of successors or descendants
755 755 while len(foreground) != plen:
756 756 plen = len(foreground)
757 757 succs = set(c.node() for c in foreground)
758 758 mutable = [c.node() for c in foreground if c.mutable()]
759 759 succs.update(allsuccessors(repo.obsstore, mutable))
760 760 known = (n for n in succs if n in nm)
761 761 foreground = set(repo.set('%ln::', known))
762 762 return set(c.node() for c in foreground)
763 763
764 764
765 765 def successorssets(repo, initialnode, cache=None):
766 766 """Return all set of successors of initial nodes
767 767
768 768 The successors set of a changeset A are a group of revisions that succeed
769 769 A. It succeeds A as a consistent whole, each revision being only a partial
770 770 replacement. The successors set contains non-obsolete changesets only.
771 771
772 772 This function returns the full list of successor sets which is why it
773 773 returns a list of tuples and not just a single tuple. Each tuple is a valid
774 774 successors set. Not that (A,) may be a valid successors set for changeset A
775 775 (see below).
776 776
777 777 In most cases, a changeset A will have a single element (e.g. the changeset
778 778 A is replaced by A') in its successors set. Though, it is also common for a
779 779 changeset A to have no elements in its successor set (e.g. the changeset
780 780 has been pruned). Therefore, the returned list of successors sets will be
781 781 [(A',)] or [], respectively.
782 782
783 783 When a changeset A is split into A' and B', however, it will result in a
784 784 successors set containing more than a single element, i.e. [(A',B')].
785 785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
786 786 (A'')].
787 787
788 788 If a changeset A is not obsolete, then it will conceptually have no
789 789 successors set. To distinguish this from a pruned changeset, the successor
790 790 set will only contain itself, i.e. [(A,)].
791 791
792 792 Finally, successors unknown locally are considered to be pruned (obsoleted
793 793 without any successors).
794 794
795 795 The optional `cache` parameter is a dictionary that may contain precomputed
796 796 successors sets. It is meant to reuse the computation of a previous call to
797 797 `successorssets` when multiple calls are made at the same time. The cache
798 798 dictionary is updated in place. The caller is responsible for its live
799 799 spawn. Code that makes multiple calls to `successorssets` *must* use this
800 800 cache mechanism or suffer terrible performances.
801 801
802 802 """
803 803
804 804 succmarkers = repo.obsstore.successors
805 805
806 806 # Stack of nodes we search successors sets for
807 807 toproceed = [initialnode]
808 808 # set version of above list for fast loop detection
809 809 # element added to "toproceed" must be added here
810 810 stackedset = set(toproceed)
811 811 if cache is None:
812 812 cache = {}
813 813
814 814 # This while loop is the flattened version of a recursive search for
815 815 # successors sets
816 816 #
817 817 # def successorssets(x):
818 818 # successors = directsuccessors(x)
819 819 # ss = [[]]
820 820 # for succ in directsuccessors(x):
821 821 # # product as in itertools cartesian product
822 822 # ss = product(ss, successorssets(succ))
823 823 # return ss
824 824 #
825 825 # But we can not use plain recursive calls here:
826 826 # - that would blow the python call stack
827 827 # - obsolescence markers may have cycles, we need to handle them.
828 828 #
829 829 # The `toproceed` list act as our call stack. Every node we search
830 830 # successors set for are stacked there.
831 831 #
832 832 # The `stackedset` is set version of this stack used to check if a node is
833 833 # already stacked. This check is used to detect cycles and prevent infinite
834 834 # loop.
835 835 #
836 836 # successors set of all nodes are stored in the `cache` dictionary.
837 837 #
838 838 # After this while loop ends we use the cache to return the successors sets
839 839 # for the node requested by the caller.
840 840 while toproceed:
841 841 # Every iteration tries to compute the successors sets of the topmost
842 842 # node of the stack: CURRENT.
843 843 #
844 844 # There are four possible outcomes:
845 845 #
846 846 # 1) We already know the successors sets of CURRENT:
847 847 # -> mission accomplished, pop it from the stack.
848 848 # 2) Node is not obsolete:
849 849 # -> the node is its own successors sets. Add it to the cache.
850 850 # 3) We do not know successors set of direct successors of CURRENT:
851 851 # -> We add those successors to the stack.
852 852 # 4) We know successors sets of all direct successors of CURRENT:
853 853 # -> We can compute CURRENT successors set and add it to the
854 854 # cache.
855 855 #
856 856 current = toproceed[-1]
857 857 if current in cache:
858 858 # case (1): We already know the successors sets
859 859 stackedset.remove(toproceed.pop())
860 860 elif current not in succmarkers:
861 861 # case (2): The node is not obsolete.
862 862 if current in repo:
863 863 # We have a valid last successors.
864 864 cache[current] = [(current,)]
865 865 else:
866 866 # Final obsolete version is unknown locally.
867 867 # Do not count that as a valid successors
868 868 cache[current] = []
869 869 else:
870 870 # cases (3) and (4)
871 871 #
872 872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
873 873 # from case (4):
874 874 #
875 875 # For each direct successors of CURRENT, we check whether its
876 876 # successors sets are known. If they are not, we stack the
877 877 # unknown node and proceed to the next iteration of the while
878 878 # loop. (case 3)
879 879 #
880 880 # During this step, we may detect obsolescence cycles: a node
881 881 # with unknown successors sets but already in the call stack.
882 882 # In such a situation, we arbitrary set the successors sets of
883 883 # the node to nothing (node pruned) to break the cycle.
884 884 #
885 885 # If no break was encountered we proceed to phase 2.
886 886 #
887 887 # Phase 2 computes successors sets of CURRENT (case 4); see details
888 888 # in phase 2 itself.
889 889 #
890 890 # Note the two levels of iteration in each phase.
891 891 # - The first one handles obsolescence markers using CURRENT as
892 892 # precursor (successors markers of CURRENT).
893 893 #
894 894 # Having multiple entry here means divergence.
895 895 #
896 896 # - The second one handles successors defined in each marker.
897 897 #
898 898 # Having none means pruned node, multiple successors means split,
899 899 # single successors are standard replacement.
900 900 #
901 901 for mark in sorted(succmarkers[current]):
902 902 for suc in mark[1]:
903 903 if suc not in cache:
904 904 if suc in stackedset:
905 905 # cycle breaking
906 906 cache[suc] = []
907 907 else:
908 908 # case (3) If we have not computed successors sets
909 909 # of one of those successors we add it to the
910 910 # `toproceed` stack and stop all work for this
911 911 # iteration.
912 912 toproceed.append(suc)
913 913 stackedset.add(suc)
914 914 break
915 915 else:
916 916 continue
917 917 break
918 918 else:
919 919 # case (4): we know all successors sets of all direct
920 920 # successors
921 921 #
922 922 # Successors set contributed by each marker depends on the
923 923 # successors sets of all its "successors" node.
924 924 #
925 925 # Each different marker is a divergence in the obsolescence
926 926 # history. It contributes successors sets distinct from other
927 927 # markers.
928 928 #
929 929 # Within a marker, a successor may have divergent successors
930 930 # sets. In such a case, the marker will contribute multiple
931 931 # divergent successors sets. If multiple successors have
932 932 # divergent successors sets, a Cartesian product is used.
933 933 #
934 934 # At the end we post-process successors sets to remove
935 935 # duplicated entry and successors set that are strict subset of
936 936 # another one.
937 937 succssets = []
938 938 for mark in sorted(succmarkers[current]):
939 939 # successors sets contributed by this marker
940 940 markss = [[]]
941 941 for suc in mark[1]:
942 942 # cardinal product with previous successors
943 943 productresult = []
944 944 for prefix in markss:
945 945 for suffix in cache[suc]:
946 946 newss = list(prefix)
947 947 for part in suffix:
948 948 # do not duplicated entry in successors set
949 949 # first entry wins.
950 950 if part not in newss:
951 951 newss.append(part)
952 952 productresult.append(newss)
953 953 markss = productresult
954 954 succssets.extend(markss)
955 955 # remove duplicated and subset
956 956 seen = []
957 957 final = []
958 958 candidate = sorted(((set(s), s) for s in succssets if s),
959 959 key=lambda x: len(x[1]), reverse=True)
960 960 for setversion, listversion in candidate:
961 961 for seenset in seen:
962 962 if setversion.issubset(seenset):
963 963 break
964 964 else:
965 965 final.append(listversion)
966 966 seen.append(setversion)
967 967 final.reverse() # put small successors set first
968 968 cache[current] = final
969 969 return cache[initialnode]
970 970
971 971 def _knownrevs(repo, nodes):
972 972 """yield revision numbers of known nodes passed in parameters
973 973
974 974 Unknown revisions are silently ignored."""
975 975 torev = repo.changelog.nodemap.get
976 976 for n in nodes:
977 977 rev = torev(n)
978 978 if rev is not None:
979 979 yield rev
980 980
981 981 # mapping of 'set-name' -> <function to compute this set>
982 982 cachefuncs = {}
983 983 def cachefor(name):
984 984 """Decorator to register a function as computing the cache for a set"""
985 985 def decorator(func):
986 986 assert name not in cachefuncs
987 987 cachefuncs[name] = func
988 988 return func
989 989 return decorator
990 990
991 991 def getrevs(repo, name):
992 992 """Return the set of revision that belong to the <name> set
993 993
994 994 Such access may compute the set and cache it for future use"""
995 995 repo = repo.unfiltered()
996 996 if not repo.obsstore:
997 997 return frozenset()
998 998 if name not in repo.obsstore.caches:
999 999 repo.obsstore.caches[name] = cachefuncs[name](repo)
1000 1000 return repo.obsstore.caches[name]
1001 1001
1002 1002 # To be simple we need to invalidate obsolescence cache when:
1003 1003 #
1004 1004 # - new changeset is added:
1005 1005 # - public phase is changed
1006 1006 # - obsolescence marker are added
1007 1007 # - strip is used a repo
1008 1008 def clearobscaches(repo):
1009 1009 """Remove all obsolescence related cache from a repo
1010 1010
1011 1011 This remove all cache in obsstore is the obsstore already exist on the
1012 1012 repo.
1013 1013
1014 1014 (We could be smarter here given the exact event that trigger the cache
1015 1015 clearing)"""
1016 1016 # only clear cache is there is obsstore data in this repo
1017 1017 if 'obsstore' in repo._filecache:
1018 1018 repo.obsstore.caches.clear()
1019 1019
1020 1020 @cachefor('obsolete')
1021 1021 def _computeobsoleteset(repo):
1022 1022 """the set of obsolete revisions"""
1023 1023 obs = set()
1024 1024 getrev = repo.changelog.nodemap.get
1025 1025 getphase = repo._phasecache.phase
1026 1026 for n in repo.obsstore.successors:
1027 1027 rev = getrev(n)
1028 1028 if rev is not None and getphase(repo, rev):
1029 1029 obs.add(rev)
1030 1030 return obs
1031 1031
1032 1032 @cachefor('unstable')
1033 1033 def _computeunstableset(repo):
1034 1034 """the set of non obsolete revisions with obsolete parents"""
1035 1035 # revset is not efficient enough here
1036 1036 # we do (obsolete()::) - obsolete() by hand
1037 1037 obs = getrevs(repo, 'obsolete')
1038 1038 if not obs:
1039 1039 return set()
1040 1040 cl = repo.changelog
1041 1041 return set(r for r in cl.descendants(obs) if r not in obs)
1042 1042
1043 1043 @cachefor('suspended')
1044 1044 def _computesuspendedset(repo):
1045 1045 """the set of obsolete parents with non obsolete descendants"""
1046 1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1047 1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1048 1048
1049 1049 @cachefor('extinct')
1050 1050 def _computeextinctset(repo):
1051 1051 """the set of obsolete parents without non obsolete descendants"""
1052 1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1053 1053
1054 1054
1055 1055 @cachefor('bumped')
1056 1056 def _computebumpedset(repo):
1057 1057 """the set of revs trying to obsolete public revisions"""
1058 1058 bumped = set()
1059 1059 # util function (avoid attribute lookup in the loop)
1060 1060 phase = repo._phasecache.phase # would be faster to grab the full list
1061 1061 public = phases.public
1062 1062 cl = repo.changelog
1063 1063 torev = cl.nodemap.get
1064 1064 obs = getrevs(repo, 'obsolete')
1065 1065 for rev in repo:
1066 1066 # We only evaluate mutable, non-obsolete revision
1067 1067 if (public < phase(repo, rev)) and (rev not in obs):
1068 1068 node = cl.node(rev)
1069 1069 # (future) A cache of precursors may worth if split is very common
1070 1070 for pnode in allprecursors(repo.obsstore, [node],
1071 1071 ignoreflags=bumpedfix):
1072 1072 prev = torev(pnode) # unfiltered! but so is phasecache
1073 1073 if (prev is not None) and (phase(repo, prev) <= public):
1074 1074 # we have a public precursors
1075 1075 bumped.add(rev)
1076 1076 break # Next draft!
1077 1077 return bumped
1078 1078
1079 1079 @cachefor('divergent')
1080 1080 def _computedivergentset(repo):
1081 1081 """the set of rev that compete to be the final successors of some revision.
1082 1082 """
1083 1083 divergent = set()
1084 1084 obsstore = repo.obsstore
1085 1085 newermap = {}
1086 1086 for ctx in repo.set('(not public()) - obsolete()'):
1087 1087 mark = obsstore.precursors.get(ctx.node(), ())
1088 1088 toprocess = set(mark)
1089 1089 while toprocess:
1090 1090 prec = toprocess.pop()[0]
1091 1091 if prec not in newermap:
1092 1092 successorssets(repo, prec, newermap)
1093 1093 newer = [n for n in newermap[prec] if n]
1094 1094 if len(newer) > 1:
1095 1095 divergent.add(ctx.rev())
1096 1096 break
1097 1097 toprocess.update(obsstore.precursors.get(prec, ()))
1098 1098 return divergent
1099 1099
1100 1100
1101 1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1102 1102 """Add obsolete markers between changesets in a repo
1103 1103
1104 1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1105 1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1106 1106 containing metadata for this marker only. It is merged with the global
1107 1107 metadata specified through the `metadata` argument of this function,
1108 1108
1109 1109 Trying to obsolete a public changeset will raise an exception.
1110 1110
1111 1111 Current user and date are used except if specified otherwise in the
1112 1112 metadata attribute.
1113 1113
1114 1114 This function operates within a transaction of its own, but does
1115 1115 not take any lock on the repo.
1116 1116 """
1117 1117 # prepare metadata
1118 1118 if metadata is None:
1119 1119 metadata = {}
1120 1120 if 'user' not in metadata:
1121 1121 metadata['user'] = repo.ui.username()
1122 1122 tr = repo.transaction('add-obsolescence-marker')
1123 1123 try:
1124 1124 for rel in relations:
1125 1125 prec = rel[0]
1126 1126 sucs = rel[1]
1127 1127 localmetadata = metadata.copy()
1128 1128 if 2 < len(rel):
1129 1129 localmetadata.update(rel[2])
1130 1130
1131 1131 if not prec.mutable():
1132 1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1133 1133 % prec)
1134 1134 nprec = prec.node()
1135 1135 nsucs = tuple(s.node() for s in sucs)
1136 1136 npare = None
1137 1137 if not nsucs:
1138 1138 npare = tuple(p.node() for p in prec.parents())
1139 1139 if nprec in nsucs:
1140 1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1141 1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1142 1142 date=date, metadata=localmetadata)
1143 1143 repo.filteredrevcache.clear()
1144 1144 tr.close()
1145 1145 finally:
1146 1146 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now