##// END OF EJS Templates
manifest: add manifestlog.add...
Durham Goode -
r29962:6b5a9a01 default
parent child Browse files
Show More
@@ -1,1996 +1,1996 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps, **kwargs)
154 154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 155 # When requesting a bundle2, getbundle returns a stream to make the
156 156 # wire level function happier. We need to build a proper object
157 157 # from it in local peer.
158 158 cg = bundle2.getunbundler(self.ui, cg)
159 159 return cg
160 160
161 161 # TODO We might want to move the next two calls into legacypeer and add
162 162 # unbundle instead.
163 163
164 164 def unbundle(self, cg, heads, url):
165 165 """apply a bundle on a repo
166 166
167 167 This function handles the repo locking itself."""
168 168 try:
169 169 try:
170 170 cg = exchange.readbundle(self.ui, cg, None)
171 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 172 if util.safehasattr(ret, 'getchunks'):
173 173 # This is a bundle20 object, turn it into an unbundler.
174 174 # This little dance should be dropped eventually when the
175 175 # API is finally improved.
176 176 stream = util.chunkbuffer(ret.getchunks())
177 177 ret = bundle2.getunbundler(self.ui, stream)
178 178 return ret
179 179 except Exception as exc:
180 180 # If the exception contains output salvaged from a bundle2
181 181 # reply, we need to make sure it is printed before continuing
182 182 # to fail. So we build a bundle2 with such output and consume
183 183 # it directly.
184 184 #
185 185 # This is not very elegant but allows a "simple" solution for
186 186 # issue4594
187 187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 188 if output:
189 189 bundler = bundle2.bundle20(self._repo.ui)
190 190 for out in output:
191 191 bundler.addpart(out)
192 192 stream = util.chunkbuffer(bundler.getchunks())
193 193 b = bundle2.getunbundler(self.ui, stream)
194 194 bundle2.processbundle(self._repo, b)
195 195 raise
196 196 except error.PushRaced as exc:
197 197 raise error.ResponseError(_('push failed:'), str(exc))
198 198
199 199 def lock(self):
200 200 return self._repo.lock()
201 201
202 202 def addchangegroup(self, cg, source, url):
203 203 return cg.apply(self._repo, source, url)
204 204
205 205 def pushkey(self, namespace, key, old, new):
206 206 return self._repo.pushkey(namespace, key, old, new)
207 207
208 208 def listkeys(self, namespace):
209 209 return self._repo.listkeys(namespace)
210 210
211 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 212 '''used to test argument passing over the wire'''
213 213 return "%s %s %s %s %s" % (one, two, three, four, five)
214 214
215 215 class locallegacypeer(localpeer):
216 216 '''peer extension which implements legacy methods too; used for tests with
217 217 restricted capabilities'''
218 218
219 219 def __init__(self, repo):
220 220 localpeer.__init__(self, repo, caps=legacycaps)
221 221
222 222 def branches(self, nodes):
223 223 return self._repo.branches(nodes)
224 224
225 225 def between(self, pairs):
226 226 return self._repo.between(pairs)
227 227
228 228 def changegroup(self, basenodes, source):
229 229 return changegroup.changegroup(self._repo, basenodes, source)
230 230
231 231 def changegroupsubset(self, bases, heads, source):
232 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233 233
234 234 class localrepository(object):
235 235
236 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 237 'manifestv2'))
238 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 239 'dotencode'))
240 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 241 filtername = None
242 242
243 243 # a list of (ui, featureset) functions.
244 244 # only functions defined in module of enabled extensions are invoked
245 245 featuresetupfuncs = set()
246 246
247 247 def __init__(self, baseui, path=None, create=False):
248 248 self.requirements = set()
249 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 250 self.wopener = self.wvfs
251 251 self.root = self.wvfs.base
252 252 self.path = self.wvfs.join(".hg")
253 253 self.origroot = path
254 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 256 realfs=False)
257 257 self.vfs = scmutil.vfs(self.path)
258 258 self.opener = self.vfs
259 259 self.baseui = baseui
260 260 self.ui = baseui.copy()
261 261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 262 # A list of callback to shape the phase if no data were found.
263 263 # Callback are in the form: func(repo, roots) --> processed root.
264 264 # This list it to be filled by extension during repo setup
265 265 self._phasedefaults = []
266 266 try:
267 267 self.ui.readconfig(self.join("hgrc"), self.root)
268 268 extensions.loadall(self.ui)
269 269 except IOError:
270 270 pass
271 271
272 272 if self.featuresetupfuncs:
273 273 self.supported = set(self._basesupported) # use private copy
274 274 extmods = set(m.__name__ for n, m
275 275 in extensions.extensions(self.ui))
276 276 for setupfunc in self.featuresetupfuncs:
277 277 if setupfunc.__module__ in extmods:
278 278 setupfunc(self.ui, self.supported)
279 279 else:
280 280 self.supported = self._basesupported
281 281
282 282 if not self.vfs.isdir():
283 283 if create:
284 284 self.requirements = newreporequirements(self)
285 285
286 286 if not self.wvfs.exists():
287 287 self.wvfs.makedirs()
288 288 self.vfs.makedir(notindexed=True)
289 289
290 290 if 'store' in self.requirements:
291 291 self.vfs.mkdir("store")
292 292
293 293 # create an invalid changelog
294 294 self.vfs.append(
295 295 "00changelog.i",
296 296 '\0\0\0\2' # represents revlogv2
297 297 ' dummy changelog to prevent using the old repo layout'
298 298 )
299 299 else:
300 300 raise error.RepoError(_("repository %s not found") % path)
301 301 elif create:
302 302 raise error.RepoError(_("repository %s already exists") % path)
303 303 else:
304 304 try:
305 305 self.requirements = scmutil.readrequires(
306 306 self.vfs, self.supported)
307 307 except IOError as inst:
308 308 if inst.errno != errno.ENOENT:
309 309 raise
310 310
311 311 self.sharedpath = self.path
312 312 try:
313 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 314 realpath=True)
315 315 s = vfs.base
316 316 if not vfs.exists():
317 317 raise error.RepoError(
318 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 319 self.sharedpath = s
320 320 except IOError as inst:
321 321 if inst.errno != errno.ENOENT:
322 322 raise
323 323
324 324 self.store = store.store(
325 325 self.requirements, self.sharedpath, scmutil.vfs)
326 326 self.spath = self.store.path
327 327 self.svfs = self.store.vfs
328 328 self.sjoin = self.store.join
329 329 self.vfs.createmode = self.store.createmode
330 330 self._applyopenerreqs()
331 331 if create:
332 332 self._writerequirements()
333 333
334 334 self._dirstatevalidatewarned = False
335 335
336 336 self._branchcaches = {}
337 337 self._revbranchcache = None
338 338 self.filterpats = {}
339 339 self._datafilters = {}
340 340 self._transref = self._lockref = self._wlockref = None
341 341
342 342 # A cache for various files under .hg/ that tracks file changes,
343 343 # (used by the filecache decorator)
344 344 #
345 345 # Maps a property name to its util.filecacheentry
346 346 self._filecache = {}
347 347
348 348 # hold sets of revision to be filtered
349 349 # should be cleared when something might have changed the filter value:
350 350 # - new changesets,
351 351 # - phase change,
352 352 # - new obsolescence marker,
353 353 # - working directory parent change,
354 354 # - bookmark changes
355 355 self.filteredrevcache = {}
356 356
357 357 # generic mapping between names and nodes
358 358 self.names = namespaces.namespaces()
359 359
360 360 def close(self):
361 361 self._writecaches()
362 362
363 363 def _writecaches(self):
364 364 if self._revbranchcache:
365 365 self._revbranchcache.write()
366 366
367 367 def _restrictcapabilities(self, caps):
368 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 369 caps = set(caps)
370 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 372 return caps
373 373
374 374 def _applyopenerreqs(self):
375 375 self.svfs.options = dict((r, 1) for r in self.requirements
376 376 if r in self.openerreqs)
377 377 # experimental config: format.chunkcachesize
378 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 379 if chunkcachesize is not None:
380 380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 381 # experimental config: format.maxchainlen
382 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 383 if maxchainlen is not None:
384 384 self.svfs.options['maxchainlen'] = maxchainlen
385 385 # experimental config: format.manifestcachesize
386 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 387 if manifestcachesize is not None:
388 388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 389 # experimental config: format.aggressivemergedeltas
390 390 aggressivemergedeltas = self.ui.configbool('format',
391 391 'aggressivemergedeltas', False)
392 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394 394
395 395 def _writerequirements(self):
396 396 scmutil.writerequires(self.vfs, self.requirements)
397 397
398 398 def _checknested(self, path):
399 399 """Determine if path is a legal nested repository."""
400 400 if not path.startswith(self.root):
401 401 return False
402 402 subpath = path[len(self.root) + 1:]
403 403 normsubpath = util.pconvert(subpath)
404 404
405 405 # XXX: Checking against the current working copy is wrong in
406 406 # the sense that it can reject things like
407 407 #
408 408 # $ hg cat -r 10 sub/x.txt
409 409 #
410 410 # if sub/ is no longer a subrepository in the working copy
411 411 # parent revision.
412 412 #
413 413 # However, it can of course also allow things that would have
414 414 # been rejected before, such as the above cat command if sub/
415 415 # is a subrepository now, but was a normal directory before.
416 416 # The old path auditor would have rejected by mistake since it
417 417 # panics when it sees sub/.hg/.
418 418 #
419 419 # All in all, checking against the working copy seems sensible
420 420 # since we want to prevent access to nested repositories on
421 421 # the filesystem *now*.
422 422 ctx = self[None]
423 423 parts = util.splitpath(subpath)
424 424 while parts:
425 425 prefix = '/'.join(parts)
426 426 if prefix in ctx.substate:
427 427 if prefix == normsubpath:
428 428 return True
429 429 else:
430 430 sub = ctx.sub(prefix)
431 431 return sub.checknested(subpath[len(prefix) + 1:])
432 432 else:
433 433 parts.pop()
434 434 return False
435 435
436 436 def peer(self):
437 437 return localpeer(self) # not cached to avoid reference cycle
438 438
439 439 def unfiltered(self):
440 440 """Return unfiltered version of the repository
441 441
442 442 Intended to be overwritten by filtered repo."""
443 443 return self
444 444
445 445 def filtered(self, name):
446 446 """Return a filtered version of a repository"""
447 447 # build a new class with the mixin and the current class
448 448 # (possibly subclass of the repo)
449 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 450 pass
451 451 return proxycls(self, name)
452 452
453 453 @repofilecache('bookmarks', 'bookmarks.current')
454 454 def _bookmarks(self):
455 455 return bookmarks.bmstore(self)
456 456
457 457 @property
458 458 def _activebookmark(self):
459 459 return self._bookmarks.active
460 460
461 461 def bookmarkheads(self, bookmark):
462 462 name = bookmark.split('@', 1)[0]
463 463 heads = []
464 464 for mark, n in self._bookmarks.iteritems():
465 465 if mark.split('@', 1)[0] == name:
466 466 heads.append(n)
467 467 return heads
468 468
469 469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 471 # can't be easily expressed in filecache mechanism.
472 472 @storecache('phaseroots', '00changelog.i')
473 473 def _phasecache(self):
474 474 return phases.phasecache(self, self._phasedefaults)
475 475
476 476 @storecache('obsstore')
477 477 def obsstore(self):
478 478 # read default format for new obsstore.
479 479 # developer config: format.obsstore-version
480 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 481 # rely on obsstore class default when possible.
482 482 kwargs = {}
483 483 if defaultformat is not None:
484 484 kwargs['defaultformat'] = defaultformat
485 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 487 **kwargs)
488 488 if store and readonly:
489 489 self.ui.warn(
490 490 _('obsolete feature not enabled but %i markers found!\n')
491 491 % len(list(store)))
492 492 return store
493 493
494 494 @storecache('00changelog.i')
495 495 def changelog(self):
496 496 c = changelog.changelog(self.svfs)
497 497 if 'HG_PENDING' in os.environ:
498 498 p = os.environ['HG_PENDING']
499 499 if p.startswith(self.root):
500 500 c.readpending('00changelog.i.a')
501 501 return c
502 502
503 503 @storecache('00manifest.i')
504 504 def manifest(self):
505 505 return manifest.manifest(self.svfs)
506 506
507 507 @property
508 508 def manifestlog(self):
509 509 return manifest.manifestlog(self.svfs, self)
510 510
511 511 @repofilecache('dirstate')
512 512 def dirstate(self):
513 513 return dirstate.dirstate(self.vfs, self.ui, self.root,
514 514 self._dirstatevalidate)
515 515
516 516 def _dirstatevalidate(self, node):
517 517 try:
518 518 self.changelog.rev(node)
519 519 return node
520 520 except error.LookupError:
521 521 if not self._dirstatevalidatewarned:
522 522 self._dirstatevalidatewarned = True
523 523 self.ui.warn(_("warning: ignoring unknown"
524 524 " working parent %s!\n") % short(node))
525 525 return nullid
526 526
527 527 def __getitem__(self, changeid):
528 528 if changeid is None or changeid == wdirrev:
529 529 return context.workingctx(self)
530 530 if isinstance(changeid, slice):
531 531 return [context.changectx(self, i)
532 532 for i in xrange(*changeid.indices(len(self)))
533 533 if i not in self.changelog.filteredrevs]
534 534 return context.changectx(self, changeid)
535 535
536 536 def __contains__(self, changeid):
537 537 try:
538 538 self[changeid]
539 539 return True
540 540 except error.RepoLookupError:
541 541 return False
542 542
543 543 def __nonzero__(self):
544 544 return True
545 545
546 546 def __len__(self):
547 547 return len(self.changelog)
548 548
549 549 def __iter__(self):
550 550 return iter(self.changelog)
551 551
552 552 def revs(self, expr, *args):
553 553 '''Find revisions matching a revset.
554 554
555 555 The revset is specified as a string ``expr`` that may contain
556 556 %-formatting to escape certain types. See ``revset.formatspec``.
557 557
558 558 Revset aliases from the configuration are not expanded. To expand
559 559 user aliases, consider calling ``scmutil.revrange()``.
560 560
561 561 Returns a revset.abstractsmartset, which is a list-like interface
562 562 that contains integer revisions.
563 563 '''
564 564 expr = revset.formatspec(expr, *args)
565 565 m = revset.match(None, expr)
566 566 return m(self)
567 567
568 568 def set(self, expr, *args):
569 569 '''Find revisions matching a revset and emit changectx instances.
570 570
571 571 This is a convenience wrapper around ``revs()`` that iterates the
572 572 result and is a generator of changectx instances.
573 573
574 574 Revset aliases from the configuration are not expanded. To expand
575 575 user aliases, consider calling ``scmutil.revrange()``.
576 576 '''
577 577 for r in self.revs(expr, *args):
578 578 yield self[r]
579 579
580 580 def url(self):
581 581 return 'file:' + self.root
582 582
583 583 def hook(self, name, throw=False, **args):
584 584 """Call a hook, passing this repo instance.
585 585
586 586 This a convenience method to aid invoking hooks. Extensions likely
587 587 won't call this unless they have registered a custom hook or are
588 588 replacing code that is expected to call a hook.
589 589 """
590 590 return hook.hook(self.ui, self, name, throw, **args)
591 591
592 592 @unfilteredmethod
593 593 def _tag(self, names, node, message, local, user, date, extra=None,
594 594 editor=False):
595 595 if isinstance(names, str):
596 596 names = (names,)
597 597
598 598 branches = self.branchmap()
599 599 for name in names:
600 600 self.hook('pretag', throw=True, node=hex(node), tag=name,
601 601 local=local)
602 602 if name in branches:
603 603 self.ui.warn(_("warning: tag %s conflicts with existing"
604 604 " branch name\n") % name)
605 605
606 606 def writetags(fp, names, munge, prevtags):
607 607 fp.seek(0, 2)
608 608 if prevtags and prevtags[-1] != '\n':
609 609 fp.write('\n')
610 610 for name in names:
611 611 if munge:
612 612 m = munge(name)
613 613 else:
614 614 m = name
615 615
616 616 if (self._tagscache.tagtypes and
617 617 name in self._tagscache.tagtypes):
618 618 old = self.tags().get(name, nullid)
619 619 fp.write('%s %s\n' % (hex(old), m))
620 620 fp.write('%s %s\n' % (hex(node), m))
621 621 fp.close()
622 622
623 623 prevtags = ''
624 624 if local:
625 625 try:
626 626 fp = self.vfs('localtags', 'r+')
627 627 except IOError:
628 628 fp = self.vfs('localtags', 'a')
629 629 else:
630 630 prevtags = fp.read()
631 631
632 632 # local tags are stored in the current charset
633 633 writetags(fp, names, None, prevtags)
634 634 for name in names:
635 635 self.hook('tag', node=hex(node), tag=name, local=local)
636 636 return
637 637
638 638 try:
639 639 fp = self.wfile('.hgtags', 'rb+')
640 640 except IOError as e:
641 641 if e.errno != errno.ENOENT:
642 642 raise
643 643 fp = self.wfile('.hgtags', 'ab')
644 644 else:
645 645 prevtags = fp.read()
646 646
647 647 # committed tags are stored in UTF-8
648 648 writetags(fp, names, encoding.fromlocal, prevtags)
649 649
650 650 fp.close()
651 651
652 652 self.invalidatecaches()
653 653
654 654 if '.hgtags' not in self.dirstate:
655 655 self[None].add(['.hgtags'])
656 656
657 657 m = matchmod.exact(self.root, '', ['.hgtags'])
658 658 tagnode = self.commit(message, user, date, extra=extra, match=m,
659 659 editor=editor)
660 660
661 661 for name in names:
662 662 self.hook('tag', node=hex(node), tag=name, local=local)
663 663
664 664 return tagnode
665 665
666 666 def tag(self, names, node, message, local, user, date, editor=False):
667 667 '''tag a revision with one or more symbolic names.
668 668
669 669 names is a list of strings or, when adding a single tag, names may be a
670 670 string.
671 671
672 672 if local is True, the tags are stored in a per-repository file.
673 673 otherwise, they are stored in the .hgtags file, and a new
674 674 changeset is committed with the change.
675 675
676 676 keyword arguments:
677 677
678 678 local: whether to store tags in non-version-controlled file
679 679 (default False)
680 680
681 681 message: commit message to use if committing
682 682
683 683 user: name of user to use if committing
684 684
685 685 date: date tuple to use if committing'''
686 686
687 687 if not local:
688 688 m = matchmod.exact(self.root, '', ['.hgtags'])
689 689 if any(self.status(match=m, unknown=True, ignored=True)):
690 690 raise error.Abort(_('working copy of .hgtags is changed'),
691 691 hint=_('please commit .hgtags manually'))
692 692
693 693 self.tags() # instantiate the cache
694 694 self._tag(names, node, message, local, user, date, editor=editor)
695 695
696 696 @filteredpropertycache
697 697 def _tagscache(self):
698 698 '''Returns a tagscache object that contains various tags related
699 699 caches.'''
700 700
701 701 # This simplifies its cache management by having one decorated
702 702 # function (this one) and the rest simply fetch things from it.
703 703 class tagscache(object):
704 704 def __init__(self):
705 705 # These two define the set of tags for this repository. tags
706 706 # maps tag name to node; tagtypes maps tag name to 'global' or
707 707 # 'local'. (Global tags are defined by .hgtags across all
708 708 # heads, and local tags are defined in .hg/localtags.)
709 709 # They constitute the in-memory cache of tags.
710 710 self.tags = self.tagtypes = None
711 711
712 712 self.nodetagscache = self.tagslist = None
713 713
714 714 cache = tagscache()
715 715 cache.tags, cache.tagtypes = self._findtags()
716 716
717 717 return cache
718 718
719 719 def tags(self):
720 720 '''return a mapping of tag to node'''
721 721 t = {}
722 722 if self.changelog.filteredrevs:
723 723 tags, tt = self._findtags()
724 724 else:
725 725 tags = self._tagscache.tags
726 726 for k, v in tags.iteritems():
727 727 try:
728 728 # ignore tags to unknown nodes
729 729 self.changelog.rev(v)
730 730 t[k] = v
731 731 except (error.LookupError, ValueError):
732 732 pass
733 733 return t
734 734
735 735 def _findtags(self):
736 736 '''Do the hard work of finding tags. Return a pair of dicts
737 737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
738 738 maps tag name to a string like \'global\' or \'local\'.
739 739 Subclasses or extensions are free to add their own tags, but
740 740 should be aware that the returned dicts will be retained for the
741 741 duration of the localrepo object.'''
742 742
743 743 # XXX what tagtype should subclasses/extensions use? Currently
744 744 # mq and bookmarks add tags, but do not set the tagtype at all.
745 745 # Should each extension invent its own tag type? Should there
746 746 # be one tagtype for all such "virtual" tags? Or is the status
747 747 # quo fine?
748 748
749 749 alltags = {} # map tag name to (node, hist)
750 750 tagtypes = {}
751 751
752 752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
753 753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
754 754
755 755 # Build the return dicts. Have to re-encode tag names because
756 756 # the tags module always uses UTF-8 (in order not to lose info
757 757 # writing to the cache), but the rest of Mercurial wants them in
758 758 # local encoding.
759 759 tags = {}
760 760 for (name, (node, hist)) in alltags.iteritems():
761 761 if node != nullid:
762 762 tags[encoding.tolocal(name)] = node
763 763 tags['tip'] = self.changelog.tip()
764 764 tagtypes = dict([(encoding.tolocal(name), value)
765 765 for (name, value) in tagtypes.iteritems()])
766 766 return (tags, tagtypes)
767 767
768 768 def tagtype(self, tagname):
769 769 '''
770 770 return the type of the given tag. result can be:
771 771
772 772 'local' : a local tag
773 773 'global' : a global tag
774 774 None : tag does not exist
775 775 '''
776 776
777 777 return self._tagscache.tagtypes.get(tagname)
778 778
779 779 def tagslist(self):
780 780 '''return a list of tags ordered by revision'''
781 781 if not self._tagscache.tagslist:
782 782 l = []
783 783 for t, n in self.tags().iteritems():
784 784 l.append((self.changelog.rev(n), t, n))
785 785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
786 786
787 787 return self._tagscache.tagslist
788 788
789 789 def nodetags(self, node):
790 790 '''return the tags associated with a node'''
791 791 if not self._tagscache.nodetagscache:
792 792 nodetagscache = {}
793 793 for t, n in self._tagscache.tags.iteritems():
794 794 nodetagscache.setdefault(n, []).append(t)
795 795 for tags in nodetagscache.itervalues():
796 796 tags.sort()
797 797 self._tagscache.nodetagscache = nodetagscache
798 798 return self._tagscache.nodetagscache.get(node, [])
799 799
800 800 def nodebookmarks(self, node):
801 801 """return the list of bookmarks pointing to the specified node"""
802 802 marks = []
803 803 for bookmark, n in self._bookmarks.iteritems():
804 804 if n == node:
805 805 marks.append(bookmark)
806 806 return sorted(marks)
807 807
808 808 def branchmap(self):
809 809 '''returns a dictionary {branch: [branchheads]} with branchheads
810 810 ordered by increasing revision number'''
811 811 branchmap.updatecache(self)
812 812 return self._branchcaches[self.filtername]
813 813
814 814 @unfilteredmethod
815 815 def revbranchcache(self):
816 816 if not self._revbranchcache:
817 817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
818 818 return self._revbranchcache
819 819
820 820 def branchtip(self, branch, ignoremissing=False):
821 821 '''return the tip node for a given branch
822 822
823 823 If ignoremissing is True, then this method will not raise an error.
824 824 This is helpful for callers that only expect None for a missing branch
825 825 (e.g. namespace).
826 826
827 827 '''
828 828 try:
829 829 return self.branchmap().branchtip(branch)
830 830 except KeyError:
831 831 if not ignoremissing:
832 832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
833 833 else:
834 834 pass
835 835
836 836 def lookup(self, key):
837 837 return self[key].node()
838 838
839 839 def lookupbranch(self, key, remote=None):
840 840 repo = remote or self
841 841 if key in repo.branchmap():
842 842 return key
843 843
844 844 repo = (remote and remote.local()) and remote or self
845 845 return repo[key].branch()
846 846
847 847 def known(self, nodes):
848 848 cl = self.changelog
849 849 nm = cl.nodemap
850 850 filtered = cl.filteredrevs
851 851 result = []
852 852 for n in nodes:
853 853 r = nm.get(n)
854 854 resp = not (r is None or r in filtered)
855 855 result.append(resp)
856 856 return result
857 857
858 858 def local(self):
859 859 return self
860 860
861 861 def publishing(self):
862 862 # it's safe (and desirable) to trust the publish flag unconditionally
863 863 # so that we don't finalize changes shared between users via ssh or nfs
864 864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
865 865
866 866 def cancopy(self):
867 867 # so statichttprepo's override of local() works
868 868 if not self.local():
869 869 return False
870 870 if not self.publishing():
871 871 return True
872 872 # if publishing we can't copy if there is filtered content
873 873 return not self.filtered('visible').changelog.filteredrevs
874 874
875 875 def shared(self):
876 876 '''the type of shared repository (None if not shared)'''
877 877 if self.sharedpath != self.path:
878 878 return 'store'
879 879 return None
880 880
881 881 def join(self, f, *insidef):
882 882 return self.vfs.join(os.path.join(f, *insidef))
883 883
884 884 def wjoin(self, f, *insidef):
885 885 return self.vfs.reljoin(self.root, f, *insidef)
886 886
887 887 def file(self, f):
888 888 if f[0] == '/':
889 889 f = f[1:]
890 890 return filelog.filelog(self.svfs, f)
891 891
892 892 def changectx(self, changeid):
893 893 return self[changeid]
894 894
895 895 def setparents(self, p1, p2=nullid):
896 896 self.dirstate.beginparentchange()
897 897 copies = self.dirstate.setparents(p1, p2)
898 898 pctx = self[p1]
899 899 if copies:
900 900 # Adjust copy records, the dirstate cannot do it, it
901 901 # requires access to parents manifests. Preserve them
902 902 # only for entries added to first parent.
903 903 for f in copies:
904 904 if f not in pctx and copies[f] in pctx:
905 905 self.dirstate.copy(copies[f], f)
906 906 if p2 == nullid:
907 907 for f, s in sorted(self.dirstate.copies().items()):
908 908 if f not in pctx and s not in pctx:
909 909 self.dirstate.copy(None, f)
910 910 self.dirstate.endparentchange()
911 911
912 912 def filectx(self, path, changeid=None, fileid=None):
913 913 """changeid can be a changeset revision, node, or tag.
914 914 fileid can be a file revision or node."""
915 915 return context.filectx(self, path, changeid, fileid)
916 916
917 917 def getcwd(self):
918 918 return self.dirstate.getcwd()
919 919
920 920 def pathto(self, f, cwd=None):
921 921 return self.dirstate.pathto(f, cwd)
922 922
923 923 def wfile(self, f, mode='r'):
924 924 return self.wvfs(f, mode)
925 925
926 926 def _link(self, f):
927 927 return self.wvfs.islink(f)
928 928
929 929 def _loadfilter(self, filter):
930 930 if filter not in self.filterpats:
931 931 l = []
932 932 for pat, cmd in self.ui.configitems(filter):
933 933 if cmd == '!':
934 934 continue
935 935 mf = matchmod.match(self.root, '', [pat])
936 936 fn = None
937 937 params = cmd
938 938 for name, filterfn in self._datafilters.iteritems():
939 939 if cmd.startswith(name):
940 940 fn = filterfn
941 941 params = cmd[len(name):].lstrip()
942 942 break
943 943 if not fn:
944 944 fn = lambda s, c, **kwargs: util.filter(s, c)
945 945 # Wrap old filters not supporting keyword arguments
946 946 if not inspect.getargspec(fn)[2]:
947 947 oldfn = fn
948 948 fn = lambda s, c, **kwargs: oldfn(s, c)
949 949 l.append((mf, fn, params))
950 950 self.filterpats[filter] = l
951 951 return self.filterpats[filter]
952 952
953 953 def _filter(self, filterpats, filename, data):
954 954 for mf, fn, cmd in filterpats:
955 955 if mf(filename):
956 956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 958 break
959 959
960 960 return data
961 961
962 962 @unfilteredpropertycache
963 963 def _encodefilterpats(self):
964 964 return self._loadfilter('encode')
965 965
966 966 @unfilteredpropertycache
967 967 def _decodefilterpats(self):
968 968 return self._loadfilter('decode')
969 969
970 970 def adddatafilter(self, name, filter):
971 971 self._datafilters[name] = filter
972 972
973 973 def wread(self, filename):
974 974 if self._link(filename):
975 975 data = self.wvfs.readlink(filename)
976 976 else:
977 977 data = self.wvfs.read(filename)
978 978 return self._filter(self._encodefilterpats, filename, data)
979 979
980 980 def wwrite(self, filename, data, flags, backgroundclose=False):
981 981 """write ``data`` into ``filename`` in the working directory
982 982
983 983 This returns length of written (maybe decoded) data.
984 984 """
985 985 data = self._filter(self._decodefilterpats, filename, data)
986 986 if 'l' in flags:
987 987 self.wvfs.symlink(data, filename)
988 988 else:
989 989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 990 if 'x' in flags:
991 991 self.wvfs.setflags(filename, False, True)
992 992 return len(data)
993 993
994 994 def wwritedata(self, filename, data):
995 995 return self._filter(self._decodefilterpats, filename, data)
996 996
997 997 def currenttransaction(self):
998 998 """return the current transaction or None if non exists"""
999 999 if self._transref:
1000 1000 tr = self._transref()
1001 1001 else:
1002 1002 tr = None
1003 1003
1004 1004 if tr and tr.running():
1005 1005 return tr
1006 1006 return None
1007 1007
1008 1008 def transaction(self, desc, report=None):
1009 1009 if (self.ui.configbool('devel', 'all-warnings')
1010 1010 or self.ui.configbool('devel', 'check-locks')):
1011 1011 if self._currentlock(self._lockref) is None:
1012 1012 raise RuntimeError('programming error: transaction requires '
1013 1013 'locking')
1014 1014 tr = self.currenttransaction()
1015 1015 if tr is not None:
1016 1016 return tr.nest()
1017 1017
1018 1018 # abort here if the journal already exists
1019 1019 if self.svfs.exists("journal"):
1020 1020 raise error.RepoError(
1021 1021 _("abandoned transaction found"),
1022 1022 hint=_("run 'hg recover' to clean up transaction"))
1023 1023
1024 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1025 1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1026 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027 1027
1028 1028 self._writejournal(desc)
1029 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 1030 if report:
1031 1031 rp = report
1032 1032 else:
1033 1033 rp = self.ui.warn
1034 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 1035 # we must avoid cyclic reference between repo and transaction.
1036 1036 reporef = weakref.ref(self)
1037 1037 def validate(tr):
1038 1038 """will run pre-closing hooks"""
1039 1039 reporef().hook('pretxnclose', throw=True,
1040 1040 txnname=desc, **tr.hookargs)
1041 1041 def releasefn(tr, success):
1042 1042 repo = reporef()
1043 1043 if success:
1044 1044 # this should be explicitly invoked here, because
1045 1045 # in-memory changes aren't written out at closing
1046 1046 # transaction, if tr.addfilegenerator (via
1047 1047 # dirstate.write or so) isn't invoked while
1048 1048 # transaction running
1049 1049 repo.dirstate.write(None)
1050 1050 else:
1051 1051 # discard all changes (including ones already written
1052 1052 # out) in this transaction
1053 1053 repo.dirstate.restorebackup(None, prefix='journal.')
1054 1054
1055 1055 repo.invalidate(clearfilecache=True)
1056 1056
1057 1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1058 1058 "journal",
1059 1059 "undo",
1060 1060 aftertrans(renames),
1061 1061 self.store.createmode,
1062 1062 validator=validate,
1063 1063 releasefn=releasefn)
1064 1064
1065 1065 tr.hookargs['txnid'] = txnid
1066 1066 # note: writing the fncache only during finalize mean that the file is
1067 1067 # outdated when running hooks. As fncache is used for streaming clone,
1068 1068 # this is not expected to break anything that happen during the hooks.
1069 1069 tr.addfinalize('flush-fncache', self.store.write)
1070 1070 def txnclosehook(tr2):
1071 1071 """To be run if transaction is successful, will schedule a hook run
1072 1072 """
1073 1073 # Don't reference tr2 in hook() so we don't hold a reference.
1074 1074 # This reduces memory consumption when there are multiple
1075 1075 # transactions per lock. This can likely go away if issue5045
1076 1076 # fixes the function accumulation.
1077 1077 hookargs = tr2.hookargs
1078 1078
1079 1079 def hook():
1080 1080 reporef().hook('txnclose', throw=False, txnname=desc,
1081 1081 **hookargs)
1082 1082 reporef()._afterlock(hook)
1083 1083 tr.addfinalize('txnclose-hook', txnclosehook)
1084 1084 def txnaborthook(tr2):
1085 1085 """To be run if transaction is aborted
1086 1086 """
1087 1087 reporef().hook('txnabort', throw=False, txnname=desc,
1088 1088 **tr2.hookargs)
1089 1089 tr.addabort('txnabort-hook', txnaborthook)
1090 1090 # avoid eager cache invalidation. in-memory data should be identical
1091 1091 # to stored data if transaction has no error.
1092 1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1093 1093 self._transref = weakref.ref(tr)
1094 1094 return tr
1095 1095
1096 1096 def _journalfiles(self):
1097 1097 return ((self.svfs, 'journal'),
1098 1098 (self.vfs, 'journal.dirstate'),
1099 1099 (self.vfs, 'journal.branch'),
1100 1100 (self.vfs, 'journal.desc'),
1101 1101 (self.vfs, 'journal.bookmarks'),
1102 1102 (self.svfs, 'journal.phaseroots'))
1103 1103
1104 1104 def undofiles(self):
1105 1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1106 1106
1107 1107 def _writejournal(self, desc):
1108 1108 self.dirstate.savebackup(None, prefix='journal.')
1109 1109 self.vfs.write("journal.branch",
1110 1110 encoding.fromlocal(self.dirstate.branch()))
1111 1111 self.vfs.write("journal.desc",
1112 1112 "%d\n%s\n" % (len(self), desc))
1113 1113 self.vfs.write("journal.bookmarks",
1114 1114 self.vfs.tryread("bookmarks"))
1115 1115 self.svfs.write("journal.phaseroots",
1116 1116 self.svfs.tryread("phaseroots"))
1117 1117
1118 1118 def recover(self):
1119 1119 with self.lock():
1120 1120 if self.svfs.exists("journal"):
1121 1121 self.ui.status(_("rolling back interrupted transaction\n"))
1122 1122 vfsmap = {'': self.svfs,
1123 1123 'plain': self.vfs,}
1124 1124 transaction.rollback(self.svfs, vfsmap, "journal",
1125 1125 self.ui.warn)
1126 1126 self.invalidate()
1127 1127 return True
1128 1128 else:
1129 1129 self.ui.warn(_("no interrupted transaction available\n"))
1130 1130 return False
1131 1131
1132 1132 def rollback(self, dryrun=False, force=False):
1133 1133 wlock = lock = dsguard = None
1134 1134 try:
1135 1135 wlock = self.wlock()
1136 1136 lock = self.lock()
1137 1137 if self.svfs.exists("undo"):
1138 1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1139 1139
1140 1140 return self._rollback(dryrun, force, dsguard)
1141 1141 else:
1142 1142 self.ui.warn(_("no rollback information available\n"))
1143 1143 return 1
1144 1144 finally:
1145 1145 release(dsguard, lock, wlock)
1146 1146
1147 1147 @unfilteredmethod # Until we get smarter cache management
1148 1148 def _rollback(self, dryrun, force, dsguard):
1149 1149 ui = self.ui
1150 1150 try:
1151 1151 args = self.vfs.read('undo.desc').splitlines()
1152 1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1153 1153 if len(args) >= 3:
1154 1154 detail = args[2]
1155 1155 oldtip = oldlen - 1
1156 1156
1157 1157 if detail and ui.verbose:
1158 1158 msg = (_('repository tip rolled back to revision %s'
1159 1159 ' (undo %s: %s)\n')
1160 1160 % (oldtip, desc, detail))
1161 1161 else:
1162 1162 msg = (_('repository tip rolled back to revision %s'
1163 1163 ' (undo %s)\n')
1164 1164 % (oldtip, desc))
1165 1165 except IOError:
1166 1166 msg = _('rolling back unknown transaction\n')
1167 1167 desc = None
1168 1168
1169 1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1170 1170 raise error.Abort(
1171 1171 _('rollback of last commit while not checked out '
1172 1172 'may lose data'), hint=_('use -f to force'))
1173 1173
1174 1174 ui.status(msg)
1175 1175 if dryrun:
1176 1176 return 0
1177 1177
1178 1178 parents = self.dirstate.parents()
1179 1179 self.destroying()
1180 1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1181 1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1182 1182 if self.vfs.exists('undo.bookmarks'):
1183 1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1184 1184 if self.svfs.exists('undo.phaseroots'):
1185 1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1186 1186 self.invalidate()
1187 1187
1188 1188 parentgone = (parents[0] not in self.changelog.nodemap or
1189 1189 parents[1] not in self.changelog.nodemap)
1190 1190 if parentgone:
1191 1191 # prevent dirstateguard from overwriting already restored one
1192 1192 dsguard.close()
1193 1193
1194 1194 self.dirstate.restorebackup(None, prefix='undo.')
1195 1195 try:
1196 1196 branch = self.vfs.read('undo.branch')
1197 1197 self.dirstate.setbranch(encoding.tolocal(branch))
1198 1198 except IOError:
1199 1199 ui.warn(_('named branch could not be reset: '
1200 1200 'current branch is still \'%s\'\n')
1201 1201 % self.dirstate.branch())
1202 1202
1203 1203 parents = tuple([p.rev() for p in self[None].parents()])
1204 1204 if len(parents) > 1:
1205 1205 ui.status(_('working directory now based on '
1206 1206 'revisions %d and %d\n') % parents)
1207 1207 else:
1208 1208 ui.status(_('working directory now based on '
1209 1209 'revision %d\n') % parents)
1210 1210 mergemod.mergestate.clean(self, self['.'].node())
1211 1211
1212 1212 # TODO: if we know which new heads may result from this rollback, pass
1213 1213 # them to destroy(), which will prevent the branchhead cache from being
1214 1214 # invalidated.
1215 1215 self.destroyed()
1216 1216 return 0
1217 1217
1218 1218 def invalidatecaches(self):
1219 1219
1220 1220 if '_tagscache' in vars(self):
1221 1221 # can't use delattr on proxy
1222 1222 del self.__dict__['_tagscache']
1223 1223
1224 1224 self.unfiltered()._branchcaches.clear()
1225 1225 self.invalidatevolatilesets()
1226 1226
1227 1227 def invalidatevolatilesets(self):
1228 1228 self.filteredrevcache.clear()
1229 1229 obsolete.clearobscaches(self)
1230 1230
1231 1231 def invalidatedirstate(self):
1232 1232 '''Invalidates the dirstate, causing the next call to dirstate
1233 1233 to check if it was modified since the last time it was read,
1234 1234 rereading it if it has.
1235 1235
1236 1236 This is different to dirstate.invalidate() that it doesn't always
1237 1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1238 1238 explicitly read the dirstate again (i.e. restoring it to a previous
1239 1239 known good state).'''
1240 1240 if hasunfilteredcache(self, 'dirstate'):
1241 1241 for k in self.dirstate._filecache:
1242 1242 try:
1243 1243 delattr(self.dirstate, k)
1244 1244 except AttributeError:
1245 1245 pass
1246 1246 delattr(self.unfiltered(), 'dirstate')
1247 1247
1248 1248 def invalidate(self, clearfilecache=False):
1249 1249 '''Invalidates both store and non-store parts other than dirstate
1250 1250
1251 1251 If a transaction is running, invalidation of store is omitted,
1252 1252 because discarding in-memory changes might cause inconsistency
1253 1253 (e.g. incomplete fncache causes unintentional failure, but
1254 1254 redundant one doesn't).
1255 1255 '''
1256 1256 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1257 1257 for k in self._filecache.keys():
1258 1258 # dirstate is invalidated separately in invalidatedirstate()
1259 1259 if k == 'dirstate':
1260 1260 continue
1261 1261
1262 1262 if clearfilecache:
1263 1263 del self._filecache[k]
1264 1264 try:
1265 1265 delattr(unfiltered, k)
1266 1266 except AttributeError:
1267 1267 pass
1268 1268 self.invalidatecaches()
1269 1269 if not self.currenttransaction():
1270 1270 # TODO: Changing contents of store outside transaction
1271 1271 # causes inconsistency. We should make in-memory store
1272 1272 # changes detectable, and abort if changed.
1273 1273 self.store.invalidatecaches()
1274 1274
1275 1275 def invalidateall(self):
1276 1276 '''Fully invalidates both store and non-store parts, causing the
1277 1277 subsequent operation to reread any outside changes.'''
1278 1278 # extension should hook this to invalidate its caches
1279 1279 self.invalidate()
1280 1280 self.invalidatedirstate()
1281 1281
1282 1282 @unfilteredmethod
1283 1283 def _refreshfilecachestats(self, tr):
1284 1284 """Reload stats of cached files so that they are flagged as valid"""
1285 1285 for k, ce in self._filecache.items():
1286 1286 if k == 'dirstate' or k not in self.__dict__:
1287 1287 continue
1288 1288 ce.refresh()
1289 1289
1290 1290 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1291 1291 inheritchecker=None, parentenvvar=None):
1292 1292 parentlock = None
1293 1293 # the contents of parentenvvar are used by the underlying lock to
1294 1294 # determine whether it can be inherited
1295 1295 if parentenvvar is not None:
1296 1296 parentlock = os.environ.get(parentenvvar)
1297 1297 try:
1298 1298 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1299 1299 acquirefn=acquirefn, desc=desc,
1300 1300 inheritchecker=inheritchecker,
1301 1301 parentlock=parentlock)
1302 1302 except error.LockHeld as inst:
1303 1303 if not wait:
1304 1304 raise
1305 1305 # show more details for new-style locks
1306 1306 if ':' in inst.locker:
1307 1307 host, pid = inst.locker.split(":", 1)
1308 1308 self.ui.warn(
1309 1309 _("waiting for lock on %s held by process %r "
1310 1310 "on host %r\n") % (desc, pid, host))
1311 1311 else:
1312 1312 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1313 1313 (desc, inst.locker))
1314 1314 # default to 600 seconds timeout
1315 1315 l = lockmod.lock(vfs, lockname,
1316 1316 int(self.ui.config("ui", "timeout", "600")),
1317 1317 releasefn=releasefn, acquirefn=acquirefn,
1318 1318 desc=desc)
1319 1319 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1320 1320 return l
1321 1321
1322 1322 def _afterlock(self, callback):
1323 1323 """add a callback to be run when the repository is fully unlocked
1324 1324
1325 1325 The callback will be executed when the outermost lock is released
1326 1326 (with wlock being higher level than 'lock')."""
1327 1327 for ref in (self._wlockref, self._lockref):
1328 1328 l = ref and ref()
1329 1329 if l and l.held:
1330 1330 l.postrelease.append(callback)
1331 1331 break
1332 1332 else: # no lock have been found.
1333 1333 callback()
1334 1334
1335 1335 def lock(self, wait=True):
1336 1336 '''Lock the repository store (.hg/store) and return a weak reference
1337 1337 to the lock. Use this before modifying the store (e.g. committing or
1338 1338 stripping). If you are opening a transaction, get a lock as well.)
1339 1339
1340 1340 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1341 1341 'wlock' first to avoid a dead-lock hazard.'''
1342 1342 l = self._currentlock(self._lockref)
1343 1343 if l is not None:
1344 1344 l.lock()
1345 1345 return l
1346 1346
1347 1347 l = self._lock(self.svfs, "lock", wait, None,
1348 1348 self.invalidate, _('repository %s') % self.origroot)
1349 1349 self._lockref = weakref.ref(l)
1350 1350 return l
1351 1351
1352 1352 def _wlockchecktransaction(self):
1353 1353 if self.currenttransaction() is not None:
1354 1354 raise error.LockInheritanceContractViolation(
1355 1355 'wlock cannot be inherited in the middle of a transaction')
1356 1356
1357 1357 def wlock(self, wait=True):
1358 1358 '''Lock the non-store parts of the repository (everything under
1359 1359 .hg except .hg/store) and return a weak reference to the lock.
1360 1360
1361 1361 Use this before modifying files in .hg.
1362 1362
1363 1363 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1364 1364 'wlock' first to avoid a dead-lock hazard.'''
1365 1365 l = self._wlockref and self._wlockref()
1366 1366 if l is not None and l.held:
1367 1367 l.lock()
1368 1368 return l
1369 1369
1370 1370 # We do not need to check for non-waiting lock acquisition. Such
1371 1371 # acquisition would not cause dead-lock as they would just fail.
1372 1372 if wait and (self.ui.configbool('devel', 'all-warnings')
1373 1373 or self.ui.configbool('devel', 'check-locks')):
1374 1374 if self._currentlock(self._lockref) is not None:
1375 1375 self.ui.develwarn('"wlock" acquired after "lock"')
1376 1376
1377 1377 def unlock():
1378 1378 if self.dirstate.pendingparentchange():
1379 1379 self.dirstate.invalidate()
1380 1380 else:
1381 1381 self.dirstate.write(None)
1382 1382
1383 1383 self._filecache['dirstate'].refresh()
1384 1384
1385 1385 l = self._lock(self.vfs, "wlock", wait, unlock,
1386 1386 self.invalidatedirstate, _('working directory of %s') %
1387 1387 self.origroot,
1388 1388 inheritchecker=self._wlockchecktransaction,
1389 1389 parentenvvar='HG_WLOCK_LOCKER')
1390 1390 self._wlockref = weakref.ref(l)
1391 1391 return l
1392 1392
1393 1393 def _currentlock(self, lockref):
1394 1394 """Returns the lock if it's held, or None if it's not."""
1395 1395 if lockref is None:
1396 1396 return None
1397 1397 l = lockref()
1398 1398 if l is None or not l.held:
1399 1399 return None
1400 1400 return l
1401 1401
1402 1402 def currentwlock(self):
1403 1403 """Returns the wlock if it's held, or None if it's not."""
1404 1404 return self._currentlock(self._wlockref)
1405 1405
1406 1406 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1407 1407 """
1408 1408 commit an individual file as part of a larger transaction
1409 1409 """
1410 1410
1411 1411 fname = fctx.path()
1412 1412 fparent1 = manifest1.get(fname, nullid)
1413 1413 fparent2 = manifest2.get(fname, nullid)
1414 1414 if isinstance(fctx, context.filectx):
1415 1415 node = fctx.filenode()
1416 1416 if node in [fparent1, fparent2]:
1417 1417 self.ui.debug('reusing %s filelog entry\n' % fname)
1418 1418 if manifest1.flags(fname) != fctx.flags():
1419 1419 changelist.append(fname)
1420 1420 return node
1421 1421
1422 1422 flog = self.file(fname)
1423 1423 meta = {}
1424 1424 copy = fctx.renamed()
1425 1425 if copy and copy[0] != fname:
1426 1426 # Mark the new revision of this file as a copy of another
1427 1427 # file. This copy data will effectively act as a parent
1428 1428 # of this new revision. If this is a merge, the first
1429 1429 # parent will be the nullid (meaning "look up the copy data")
1430 1430 # and the second one will be the other parent. For example:
1431 1431 #
1432 1432 # 0 --- 1 --- 3 rev1 changes file foo
1433 1433 # \ / rev2 renames foo to bar and changes it
1434 1434 # \- 2 -/ rev3 should have bar with all changes and
1435 1435 # should record that bar descends from
1436 1436 # bar in rev2 and foo in rev1
1437 1437 #
1438 1438 # this allows this merge to succeed:
1439 1439 #
1440 1440 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1441 1441 # \ / merging rev3 and rev4 should use bar@rev2
1442 1442 # \- 2 --- 4 as the merge base
1443 1443 #
1444 1444
1445 1445 cfname = copy[0]
1446 1446 crev = manifest1.get(cfname)
1447 1447 newfparent = fparent2
1448 1448
1449 1449 if manifest2: # branch merge
1450 1450 if fparent2 == nullid or crev is None: # copied on remote side
1451 1451 if cfname in manifest2:
1452 1452 crev = manifest2[cfname]
1453 1453 newfparent = fparent1
1454 1454
1455 1455 # Here, we used to search backwards through history to try to find
1456 1456 # where the file copy came from if the source of a copy was not in
1457 1457 # the parent directory. However, this doesn't actually make sense to
1458 1458 # do (what does a copy from something not in your working copy even
1459 1459 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1460 1460 # the user that copy information was dropped, so if they didn't
1461 1461 # expect this outcome it can be fixed, but this is the correct
1462 1462 # behavior in this circumstance.
1463 1463
1464 1464 if crev:
1465 1465 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1466 1466 meta["copy"] = cfname
1467 1467 meta["copyrev"] = hex(crev)
1468 1468 fparent1, fparent2 = nullid, newfparent
1469 1469 else:
1470 1470 self.ui.warn(_("warning: can't find ancestor for '%s' "
1471 1471 "copied from '%s'!\n") % (fname, cfname))
1472 1472
1473 1473 elif fparent1 == nullid:
1474 1474 fparent1, fparent2 = fparent2, nullid
1475 1475 elif fparent2 != nullid:
1476 1476 # is one parent an ancestor of the other?
1477 1477 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1478 1478 if fparent1 in fparentancestors:
1479 1479 fparent1, fparent2 = fparent2, nullid
1480 1480 elif fparent2 in fparentancestors:
1481 1481 fparent2 = nullid
1482 1482
1483 1483 # is the file changed?
1484 1484 text = fctx.data()
1485 1485 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1486 1486 changelist.append(fname)
1487 1487 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1488 1488 # are just the flags changed during merge?
1489 1489 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1490 1490 changelist.append(fname)
1491 1491
1492 1492 return fparent1
1493 1493
1494 1494 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1495 1495 """check for commit arguments that aren't commitable"""
1496 1496 if match.isexact() or match.prefix():
1497 1497 matched = set(status.modified + status.added + status.removed)
1498 1498
1499 1499 for f in match.files():
1500 1500 f = self.dirstate.normalize(f)
1501 1501 if f == '.' or f in matched or f in wctx.substate:
1502 1502 continue
1503 1503 if f in status.deleted:
1504 1504 fail(f, _('file not found!'))
1505 1505 if f in vdirs: # visited directory
1506 1506 d = f + '/'
1507 1507 for mf in matched:
1508 1508 if mf.startswith(d):
1509 1509 break
1510 1510 else:
1511 1511 fail(f, _("no match under directory!"))
1512 1512 elif f not in self.dirstate:
1513 1513 fail(f, _("file not tracked!"))
1514 1514
1515 1515 @unfilteredmethod
1516 1516 def commit(self, text="", user=None, date=None, match=None, force=False,
1517 1517 editor=False, extra=None):
1518 1518 """Add a new revision to current repository.
1519 1519
1520 1520 Revision information is gathered from the working directory,
1521 1521 match can be used to filter the committed files. If editor is
1522 1522 supplied, it is called to get a commit message.
1523 1523 """
1524 1524 if extra is None:
1525 1525 extra = {}
1526 1526
1527 1527 def fail(f, msg):
1528 1528 raise error.Abort('%s: %s' % (f, msg))
1529 1529
1530 1530 if not match:
1531 1531 match = matchmod.always(self.root, '')
1532 1532
1533 1533 if not force:
1534 1534 vdirs = []
1535 1535 match.explicitdir = vdirs.append
1536 1536 match.bad = fail
1537 1537
1538 1538 wlock = lock = tr = None
1539 1539 try:
1540 1540 wlock = self.wlock()
1541 1541 lock = self.lock() # for recent changelog (see issue4368)
1542 1542
1543 1543 wctx = self[None]
1544 1544 merge = len(wctx.parents()) > 1
1545 1545
1546 1546 if not force and merge and match.ispartial():
1547 1547 raise error.Abort(_('cannot partially commit a merge '
1548 1548 '(do not specify files or patterns)'))
1549 1549
1550 1550 status = self.status(match=match, clean=force)
1551 1551 if force:
1552 1552 status.modified.extend(status.clean) # mq may commit clean files
1553 1553
1554 1554 # check subrepos
1555 1555 subs = []
1556 1556 commitsubs = set()
1557 1557 newstate = wctx.substate.copy()
1558 1558 # only manage subrepos and .hgsubstate if .hgsub is present
1559 1559 if '.hgsub' in wctx:
1560 1560 # we'll decide whether to track this ourselves, thanks
1561 1561 for c in status.modified, status.added, status.removed:
1562 1562 if '.hgsubstate' in c:
1563 1563 c.remove('.hgsubstate')
1564 1564
1565 1565 # compare current state to last committed state
1566 1566 # build new substate based on last committed state
1567 1567 oldstate = wctx.p1().substate
1568 1568 for s in sorted(newstate.keys()):
1569 1569 if not match(s):
1570 1570 # ignore working copy, use old state if present
1571 1571 if s in oldstate:
1572 1572 newstate[s] = oldstate[s]
1573 1573 continue
1574 1574 if not force:
1575 1575 raise error.Abort(
1576 1576 _("commit with new subrepo %s excluded") % s)
1577 1577 dirtyreason = wctx.sub(s).dirtyreason(True)
1578 1578 if dirtyreason:
1579 1579 if not self.ui.configbool('ui', 'commitsubrepos'):
1580 1580 raise error.Abort(dirtyreason,
1581 1581 hint=_("use --subrepos for recursive commit"))
1582 1582 subs.append(s)
1583 1583 commitsubs.add(s)
1584 1584 else:
1585 1585 bs = wctx.sub(s).basestate()
1586 1586 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1587 1587 if oldstate.get(s, (None, None, None))[1] != bs:
1588 1588 subs.append(s)
1589 1589
1590 1590 # check for removed subrepos
1591 1591 for p in wctx.parents():
1592 1592 r = [s for s in p.substate if s not in newstate]
1593 1593 subs += [s for s in r if match(s)]
1594 1594 if subs:
1595 1595 if (not match('.hgsub') and
1596 1596 '.hgsub' in (wctx.modified() + wctx.added())):
1597 1597 raise error.Abort(
1598 1598 _("can't commit subrepos without .hgsub"))
1599 1599 status.modified.insert(0, '.hgsubstate')
1600 1600
1601 1601 elif '.hgsub' in status.removed:
1602 1602 # clean up .hgsubstate when .hgsub is removed
1603 1603 if ('.hgsubstate' in wctx and
1604 1604 '.hgsubstate' not in (status.modified + status.added +
1605 1605 status.removed)):
1606 1606 status.removed.insert(0, '.hgsubstate')
1607 1607
1608 1608 # make sure all explicit patterns are matched
1609 1609 if not force:
1610 1610 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1611 1611
1612 1612 cctx = context.workingcommitctx(self, status,
1613 1613 text, user, date, extra)
1614 1614
1615 1615 # internal config: ui.allowemptycommit
1616 1616 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1617 1617 or extra.get('close') or merge or cctx.files()
1618 1618 or self.ui.configbool('ui', 'allowemptycommit'))
1619 1619 if not allowemptycommit:
1620 1620 return None
1621 1621
1622 1622 if merge and cctx.deleted():
1623 1623 raise error.Abort(_("cannot commit merge with missing files"))
1624 1624
1625 1625 ms = mergemod.mergestate.read(self)
1626 1626
1627 1627 if list(ms.unresolved()):
1628 1628 raise error.Abort(_('unresolved merge conflicts '
1629 1629 '(see "hg help resolve")'))
1630 1630 if ms.mdstate() != 's' or list(ms.driverresolved()):
1631 1631 raise error.Abort(_('driver-resolved merge conflicts'),
1632 1632 hint=_('run "hg resolve --all" to resolve'))
1633 1633
1634 1634 if editor:
1635 1635 cctx._text = editor(self, cctx, subs)
1636 1636 edited = (text != cctx._text)
1637 1637
1638 1638 # Save commit message in case this transaction gets rolled back
1639 1639 # (e.g. by a pretxncommit hook). Leave the content alone on
1640 1640 # the assumption that the user will use the same editor again.
1641 1641 msgfn = self.savecommitmessage(cctx._text)
1642 1642
1643 1643 # commit subs and write new state
1644 1644 if subs:
1645 1645 for s in sorted(commitsubs):
1646 1646 sub = wctx.sub(s)
1647 1647 self.ui.status(_('committing subrepository %s\n') %
1648 1648 subrepo.subrelpath(sub))
1649 1649 sr = sub.commit(cctx._text, user, date)
1650 1650 newstate[s] = (newstate[s][0], sr)
1651 1651 subrepo.writestate(self, newstate)
1652 1652
1653 1653 p1, p2 = self.dirstate.parents()
1654 1654 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1655 1655 try:
1656 1656 self.hook("precommit", throw=True, parent1=hookp1,
1657 1657 parent2=hookp2)
1658 1658 tr = self.transaction('commit')
1659 1659 ret = self.commitctx(cctx, True)
1660 1660 except: # re-raises
1661 1661 if edited:
1662 1662 self.ui.write(
1663 1663 _('note: commit message saved in %s\n') % msgfn)
1664 1664 raise
1665 1665 # update bookmarks, dirstate and mergestate
1666 1666 bookmarks.update(self, [p1, p2], ret)
1667 1667 cctx.markcommitted(ret)
1668 1668 ms.reset()
1669 1669 tr.close()
1670 1670
1671 1671 finally:
1672 1672 lockmod.release(tr, lock, wlock)
1673 1673
1674 1674 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1675 1675 # hack for command that use a temporary commit (eg: histedit)
1676 1676 # temporary commit got stripped before hook release
1677 1677 if self.changelog.hasnode(ret):
1678 1678 self.hook("commit", node=node, parent1=parent1,
1679 1679 parent2=parent2)
1680 1680 self._afterlock(commithook)
1681 1681 return ret
1682 1682
1683 1683 @unfilteredmethod
1684 1684 def commitctx(self, ctx, error=False):
1685 1685 """Add a new revision to current repository.
1686 1686 Revision information is passed via the context argument.
1687 1687 """
1688 1688
1689 1689 tr = None
1690 1690 p1, p2 = ctx.p1(), ctx.p2()
1691 1691 user = ctx.user()
1692 1692
1693 1693 lock = self.lock()
1694 1694 try:
1695 1695 tr = self.transaction("commit")
1696 1696 trp = weakref.proxy(tr)
1697 1697
1698 1698 if ctx.files():
1699 1699 m1 = p1.manifest()
1700 1700 m2 = p2.manifest()
1701 1701 m = m1.copy()
1702 1702
1703 1703 # check in files
1704 1704 added = []
1705 1705 changed = []
1706 1706 removed = list(ctx.removed())
1707 1707 linkrev = len(self)
1708 1708 self.ui.note(_("committing files:\n"))
1709 1709 for f in sorted(ctx.modified() + ctx.added()):
1710 1710 self.ui.note(f + "\n")
1711 1711 try:
1712 1712 fctx = ctx[f]
1713 1713 if fctx is None:
1714 1714 removed.append(f)
1715 1715 else:
1716 1716 added.append(f)
1717 1717 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1718 1718 trp, changed)
1719 1719 m.setflag(f, fctx.flags())
1720 1720 except OSError as inst:
1721 1721 self.ui.warn(_("trouble committing %s!\n") % f)
1722 1722 raise
1723 1723 except IOError as inst:
1724 1724 errcode = getattr(inst, 'errno', errno.ENOENT)
1725 1725 if error or errcode and errcode != errno.ENOENT:
1726 1726 self.ui.warn(_("trouble committing %s!\n") % f)
1727 1727 raise
1728 1728
1729 1729 # update manifest
1730 1730 self.ui.note(_("committing manifest\n"))
1731 1731 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1732 1732 drop = [f for f in removed if f in m]
1733 1733 for f in drop:
1734 1734 del m[f]
1735 mn = self.manifest.add(m, trp, linkrev,
1736 p1.manifestnode(), p2.manifestnode(),
1737 added, drop)
1735 mn = self.manifestlog.add(m, trp, linkrev,
1736 p1.manifestnode(), p2.manifestnode(),
1737 added, drop)
1738 1738 files = changed + removed
1739 1739 else:
1740 1740 mn = p1.manifestnode()
1741 1741 files = []
1742 1742
1743 1743 # update changelog
1744 1744 self.ui.note(_("committing changelog\n"))
1745 1745 self.changelog.delayupdate(tr)
1746 1746 n = self.changelog.add(mn, files, ctx.description(),
1747 1747 trp, p1.node(), p2.node(),
1748 1748 user, ctx.date(), ctx.extra().copy())
1749 1749 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1750 1750 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1751 1751 parent2=xp2)
1752 1752 # set the new commit is proper phase
1753 1753 targetphase = subrepo.newcommitphase(self.ui, ctx)
1754 1754 if targetphase:
1755 1755 # retract boundary do not alter parent changeset.
1756 1756 # if a parent have higher the resulting phase will
1757 1757 # be compliant anyway
1758 1758 #
1759 1759 # if minimal phase was 0 we don't need to retract anything
1760 1760 phases.retractboundary(self, tr, targetphase, [n])
1761 1761 tr.close()
1762 1762 branchmap.updatecache(self.filtered('served'))
1763 1763 return n
1764 1764 finally:
1765 1765 if tr:
1766 1766 tr.release()
1767 1767 lock.release()
1768 1768
1769 1769 @unfilteredmethod
1770 1770 def destroying(self):
1771 1771 '''Inform the repository that nodes are about to be destroyed.
1772 1772 Intended for use by strip and rollback, so there's a common
1773 1773 place for anything that has to be done before destroying history.
1774 1774
1775 1775 This is mostly useful for saving state that is in memory and waiting
1776 1776 to be flushed when the current lock is released. Because a call to
1777 1777 destroyed is imminent, the repo will be invalidated causing those
1778 1778 changes to stay in memory (waiting for the next unlock), or vanish
1779 1779 completely.
1780 1780 '''
1781 1781 # When using the same lock to commit and strip, the phasecache is left
1782 1782 # dirty after committing. Then when we strip, the repo is invalidated,
1783 1783 # causing those changes to disappear.
1784 1784 if '_phasecache' in vars(self):
1785 1785 self._phasecache.write()
1786 1786
1787 1787 @unfilteredmethod
1788 1788 def destroyed(self):
1789 1789 '''Inform the repository that nodes have been destroyed.
1790 1790 Intended for use by strip and rollback, so there's a common
1791 1791 place for anything that has to be done after destroying history.
1792 1792 '''
1793 1793 # When one tries to:
1794 1794 # 1) destroy nodes thus calling this method (e.g. strip)
1795 1795 # 2) use phasecache somewhere (e.g. commit)
1796 1796 #
1797 1797 # then 2) will fail because the phasecache contains nodes that were
1798 1798 # removed. We can either remove phasecache from the filecache,
1799 1799 # causing it to reload next time it is accessed, or simply filter
1800 1800 # the removed nodes now and write the updated cache.
1801 1801 self._phasecache.filterunknown(self)
1802 1802 self._phasecache.write()
1803 1803
1804 1804 # update the 'served' branch cache to help read only server process
1805 1805 # Thanks to branchcache collaboration this is done from the nearest
1806 1806 # filtered subset and it is expected to be fast.
1807 1807 branchmap.updatecache(self.filtered('served'))
1808 1808
1809 1809 # Ensure the persistent tag cache is updated. Doing it now
1810 1810 # means that the tag cache only has to worry about destroyed
1811 1811 # heads immediately after a strip/rollback. That in turn
1812 1812 # guarantees that "cachetip == currenttip" (comparing both rev
1813 1813 # and node) always means no nodes have been added or destroyed.
1814 1814
1815 1815 # XXX this is suboptimal when qrefresh'ing: we strip the current
1816 1816 # head, refresh the tag cache, then immediately add a new head.
1817 1817 # But I think doing it this way is necessary for the "instant
1818 1818 # tag cache retrieval" case to work.
1819 1819 self.invalidate()
1820 1820
1821 1821 def walk(self, match, node=None):
1822 1822 '''
1823 1823 walk recursively through the directory tree or a given
1824 1824 changeset, finding all files matched by the match
1825 1825 function
1826 1826 '''
1827 1827 return self[node].walk(match)
1828 1828
1829 1829 def status(self, node1='.', node2=None, match=None,
1830 1830 ignored=False, clean=False, unknown=False,
1831 1831 listsubrepos=False):
1832 1832 '''a convenience method that calls node1.status(node2)'''
1833 1833 return self[node1].status(node2, match, ignored, clean, unknown,
1834 1834 listsubrepos)
1835 1835
1836 1836 def heads(self, start=None):
1837 1837 heads = self.changelog.heads(start)
1838 1838 # sort the output in rev descending order
1839 1839 return sorted(heads, key=self.changelog.rev, reverse=True)
1840 1840
1841 1841 def branchheads(self, branch=None, start=None, closed=False):
1842 1842 '''return a (possibly filtered) list of heads for the given branch
1843 1843
1844 1844 Heads are returned in topological order, from newest to oldest.
1845 1845 If branch is None, use the dirstate branch.
1846 1846 If start is not None, return only heads reachable from start.
1847 1847 If closed is True, return heads that are marked as closed as well.
1848 1848 '''
1849 1849 if branch is None:
1850 1850 branch = self[None].branch()
1851 1851 branches = self.branchmap()
1852 1852 if branch not in branches:
1853 1853 return []
1854 1854 # the cache returns heads ordered lowest to highest
1855 1855 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1856 1856 if start is not None:
1857 1857 # filter out the heads that cannot be reached from startrev
1858 1858 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1859 1859 bheads = [h for h in bheads if h in fbheads]
1860 1860 return bheads
1861 1861
1862 1862 def branches(self, nodes):
1863 1863 if not nodes:
1864 1864 nodes = [self.changelog.tip()]
1865 1865 b = []
1866 1866 for n in nodes:
1867 1867 t = n
1868 1868 while True:
1869 1869 p = self.changelog.parents(n)
1870 1870 if p[1] != nullid or p[0] == nullid:
1871 1871 b.append((t, n, p[0], p[1]))
1872 1872 break
1873 1873 n = p[0]
1874 1874 return b
1875 1875
1876 1876 def between(self, pairs):
1877 1877 r = []
1878 1878
1879 1879 for top, bottom in pairs:
1880 1880 n, l, i = top, [], 0
1881 1881 f = 1
1882 1882
1883 1883 while n != bottom and n != nullid:
1884 1884 p = self.changelog.parents(n)[0]
1885 1885 if i == f:
1886 1886 l.append(n)
1887 1887 f = f * 2
1888 1888 n = p
1889 1889 i += 1
1890 1890
1891 1891 r.append(l)
1892 1892
1893 1893 return r
1894 1894
1895 1895 def checkpush(self, pushop):
1896 1896 """Extensions can override this function if additional checks have
1897 1897 to be performed before pushing, or call it if they override push
1898 1898 command.
1899 1899 """
1900 1900 pass
1901 1901
1902 1902 @unfilteredpropertycache
1903 1903 def prepushoutgoinghooks(self):
1904 1904 """Return util.hooks consists of a pushop with repo, remote, outgoing
1905 1905 methods, which are called before pushing changesets.
1906 1906 """
1907 1907 return util.hooks()
1908 1908
1909 1909 def pushkey(self, namespace, key, old, new):
1910 1910 try:
1911 1911 tr = self.currenttransaction()
1912 1912 hookargs = {}
1913 1913 if tr is not None:
1914 1914 hookargs.update(tr.hookargs)
1915 1915 hookargs['namespace'] = namespace
1916 1916 hookargs['key'] = key
1917 1917 hookargs['old'] = old
1918 1918 hookargs['new'] = new
1919 1919 self.hook('prepushkey', throw=True, **hookargs)
1920 1920 except error.HookAbort as exc:
1921 1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 1922 if exc.hint:
1923 1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 1924 return False
1925 1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 1926 ret = pushkey.push(self, namespace, key, old, new)
1927 1927 def runhook():
1928 1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 1929 ret=ret)
1930 1930 self._afterlock(runhook)
1931 1931 return ret
1932 1932
1933 1933 def listkeys(self, namespace):
1934 1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 1936 values = pushkey.list(self, namespace)
1937 1937 self.hook('listkeys', namespace=namespace, values=values)
1938 1938 return values
1939 1939
1940 1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 1941 '''used to test argument passing over the wire'''
1942 1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1943 1943
1944 1944 def savecommitmessage(self, text):
1945 1945 fp = self.vfs('last-message.txt', 'wb')
1946 1946 try:
1947 1947 fp.write(text)
1948 1948 finally:
1949 1949 fp.close()
1950 1950 return self.pathto(fp.name[len(self.root) + 1:])
1951 1951
1952 1952 # used to avoid circular references so destructors work
1953 1953 def aftertrans(files):
1954 1954 renamefiles = [tuple(t) for t in files]
1955 1955 def a():
1956 1956 for vfs, src, dest in renamefiles:
1957 1957 try:
1958 1958 vfs.rename(src, dest)
1959 1959 except OSError: # journal file does not yet exist
1960 1960 pass
1961 1961 return a
1962 1962
1963 1963 def undoname(fn):
1964 1964 base, name = os.path.split(fn)
1965 1965 assert name.startswith('journal')
1966 1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1967 1967
1968 1968 def instance(ui, path, create):
1969 1969 return localrepository(ui, util.urllocalpath(path), create)
1970 1970
1971 1971 def islocal(path):
1972 1972 return True
1973 1973
1974 1974 def newreporequirements(repo):
1975 1975 """Determine the set of requirements for a new local repository.
1976 1976
1977 1977 Extensions can wrap this function to specify custom requirements for
1978 1978 new repositories.
1979 1979 """
1980 1980 ui = repo.ui
1981 1981 requirements = set(['revlogv1'])
1982 1982 if ui.configbool('format', 'usestore', True):
1983 1983 requirements.add('store')
1984 1984 if ui.configbool('format', 'usefncache', True):
1985 1985 requirements.add('fncache')
1986 1986 if ui.configbool('format', 'dotencode', True):
1987 1987 requirements.add('dotencode')
1988 1988
1989 1989 if scmutil.gdinitconfig(ui):
1990 1990 requirements.add('generaldelta')
1991 1991 if ui.configbool('experimental', 'treemanifest', False):
1992 1992 requirements.add('treemanifest')
1993 1993 if ui.configbool('experimental', 'manifestv2', False):
1994 1994 requirements.add('manifestv2')
1995 1995
1996 1996 return requirements
@@ -1,1295 +1,1298 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import array
11 11 import heapq
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 error,
18 18 mdiff,
19 19 parsers,
20 20 revlog,
21 21 util,
22 22 )
23 23
24 24 propertycache = util.propertycache
25 25
26 26 def _parsev1(data):
27 27 # This method does a little bit of excessive-looking
28 28 # precondition checking. This is so that the behavior of this
29 29 # class exactly matches its C counterpart to try and help
30 30 # prevent surprise breakage for anyone that develops against
31 31 # the pure version.
32 32 if data and data[-1] != '\n':
33 33 raise ValueError('Manifest did not end in a newline.')
34 34 prev = None
35 35 for l in data.splitlines():
36 36 if prev is not None and prev > l:
37 37 raise ValueError('Manifest lines not in sorted order.')
38 38 prev = l
39 39 f, n = l.split('\0')
40 40 if len(n) > 40:
41 41 yield f, revlog.bin(n[:40]), n[40:]
42 42 else:
43 43 yield f, revlog.bin(n), ''
44 44
45 45 def _parsev2(data):
46 46 metadataend = data.find('\n')
47 47 # Just ignore metadata for now
48 48 pos = metadataend + 1
49 49 prevf = ''
50 50 while pos < len(data):
51 51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 52 if end == -1:
53 53 raise ValueError('Manifest ended with incomplete file entry.')
54 54 stemlen = ord(data[pos])
55 55 items = data[pos + 1:end].split('\0')
56 56 f = prevf[:stemlen] + items[0]
57 57 if prevf > f:
58 58 raise ValueError('Manifest entries not in sorted order.')
59 59 fl = items[1]
60 60 # Just ignore metadata (items[2:] for now)
61 61 n = data[end + 1:end + 21]
62 62 yield f, n, fl
63 63 pos = end + 22
64 64 prevf = f
65 65
66 66 def _parse(data):
67 67 """Generates (path, node, flags) tuples from a manifest text"""
68 68 if data.startswith('\0'):
69 69 return iter(_parsev2(data))
70 70 else:
71 71 return iter(_parsev1(data))
72 72
73 73 def _text(it, usemanifestv2):
74 74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 75 text"""
76 76 if usemanifestv2:
77 77 return _textv2(it)
78 78 else:
79 79 return _textv1(it)
80 80
81 81 def _textv1(it):
82 82 files = []
83 83 lines = []
84 84 _hex = revlog.hex
85 85 for f, n, fl in it:
86 86 files.append(f)
87 87 # if this is changed to support newlines in filenames,
88 88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90 90
91 91 _checkforbidden(files)
92 92 return ''.join(lines)
93 93
94 94 def _textv2(it):
95 95 files = []
96 96 lines = ['\0\n']
97 97 prevf = ''
98 98 for f, n, fl in it:
99 99 files.append(f)
100 100 stem = os.path.commonprefix([prevf, f])
101 101 stemlen = min(len(stem), 255)
102 102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 103 prevf = f
104 104 _checkforbidden(files)
105 105 return ''.join(lines)
106 106
107 107 class _lazymanifest(dict):
108 108 """This is the pure implementation of lazymanifest.
109 109
110 110 It has not been optimized *at all* and is not lazy.
111 111 """
112 112
113 113 def __init__(self, data):
114 114 dict.__init__(self)
115 115 for f, n, fl in _parse(data):
116 116 self[f] = n, fl
117 117
118 118 def __setitem__(self, k, v):
119 119 node, flag = v
120 120 assert node is not None
121 121 if len(node) > 21:
122 122 node = node[:21] # match c implementation behavior
123 123 dict.__setitem__(self, k, (node, flag))
124 124
125 125 def __iter__(self):
126 126 return iter(sorted(dict.keys(self)))
127 127
128 128 def iterkeys(self):
129 129 return iter(sorted(dict.keys(self)))
130 130
131 131 def iterentries(self):
132 132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
133 133
134 134 def copy(self):
135 135 c = _lazymanifest('')
136 136 c.update(self)
137 137 return c
138 138
139 139 def diff(self, m2, clean=False):
140 140 '''Finds changes between the current manifest and m2.'''
141 141 diff = {}
142 142
143 143 for fn, e1 in self.iteritems():
144 144 if fn not in m2:
145 145 diff[fn] = e1, (None, '')
146 146 else:
147 147 e2 = m2[fn]
148 148 if e1 != e2:
149 149 diff[fn] = e1, e2
150 150 elif clean:
151 151 diff[fn] = None
152 152
153 153 for fn, e2 in m2.iteritems():
154 154 if fn not in self:
155 155 diff[fn] = (None, ''), e2
156 156
157 157 return diff
158 158
159 159 def filtercopy(self, filterfn):
160 160 c = _lazymanifest('')
161 161 for f, n, fl in self.iterentries():
162 162 if filterfn(f):
163 163 c[f] = n, fl
164 164 return c
165 165
166 166 def text(self):
167 167 """Get the full data of this manifest as a bytestring."""
168 168 return _textv1(self.iterentries())
169 169
170 170 try:
171 171 _lazymanifest = parsers.lazymanifest
172 172 except AttributeError:
173 173 pass
174 174
175 175 class manifestdict(object):
176 176 def __init__(self, data=''):
177 177 if data.startswith('\0'):
178 178 #_lazymanifest can not parse v2
179 179 self._lm = _lazymanifest('')
180 180 for f, n, fl in _parsev2(data):
181 181 self._lm[f] = n, fl
182 182 else:
183 183 self._lm = _lazymanifest(data)
184 184
185 185 def __getitem__(self, key):
186 186 return self._lm[key][0]
187 187
188 188 def find(self, key):
189 189 return self._lm[key]
190 190
191 191 def __len__(self):
192 192 return len(self._lm)
193 193
194 194 def __setitem__(self, key, node):
195 195 self._lm[key] = node, self.flags(key, '')
196 196
197 197 def __contains__(self, key):
198 198 return key in self._lm
199 199
200 200 def __delitem__(self, key):
201 201 del self._lm[key]
202 202
203 203 def __iter__(self):
204 204 return self._lm.__iter__()
205 205
206 206 def iterkeys(self):
207 207 return self._lm.iterkeys()
208 208
209 209 def keys(self):
210 210 return list(self.iterkeys())
211 211
212 212 def filesnotin(self, m2):
213 213 '''Set of files in this manifest that are not in the other'''
214 214 diff = self.diff(m2)
215 215 files = set(filepath
216 216 for filepath, hashflags in diff.iteritems()
217 217 if hashflags[1][0] is None)
218 218 return files
219 219
220 220 @propertycache
221 221 def _dirs(self):
222 222 return util.dirs(self)
223 223
224 224 def dirs(self):
225 225 return self._dirs
226 226
227 227 def hasdir(self, dir):
228 228 return dir in self._dirs
229 229
230 230 def _filesfastpath(self, match):
231 231 '''Checks whether we can correctly and quickly iterate over matcher
232 232 files instead of over manifest files.'''
233 233 files = match.files()
234 234 return (len(files) < 100 and (match.isexact() or
235 235 (match.prefix() and all(fn in self for fn in files))))
236 236
237 237 def walk(self, match):
238 238 '''Generates matching file names.
239 239
240 240 Equivalent to manifest.matches(match).iterkeys(), but without creating
241 241 an entirely new manifest.
242 242
243 243 It also reports nonexistent files by marking them bad with match.bad().
244 244 '''
245 245 if match.always():
246 246 for f in iter(self):
247 247 yield f
248 248 return
249 249
250 250 fset = set(match.files())
251 251
252 252 # avoid the entire walk if we're only looking for specific files
253 253 if self._filesfastpath(match):
254 254 for fn in sorted(fset):
255 255 yield fn
256 256 return
257 257
258 258 for fn in self:
259 259 if fn in fset:
260 260 # specified pattern is the exact name
261 261 fset.remove(fn)
262 262 if match(fn):
263 263 yield fn
264 264
265 265 # for dirstate.walk, files=['.'] means "walk the whole tree".
266 266 # follow that here, too
267 267 fset.discard('.')
268 268
269 269 for fn in sorted(fset):
270 270 if not self.hasdir(fn):
271 271 match.bad(fn, None)
272 272
273 273 def matches(self, match):
274 274 '''generate a new manifest filtered by the match argument'''
275 275 if match.always():
276 276 return self.copy()
277 277
278 278 if self._filesfastpath(match):
279 279 m = manifestdict()
280 280 lm = self._lm
281 281 for fn in match.files():
282 282 if fn in lm:
283 283 m._lm[fn] = lm[fn]
284 284 return m
285 285
286 286 m = manifestdict()
287 287 m._lm = self._lm.filtercopy(match)
288 288 return m
289 289
290 290 def diff(self, m2, clean=False):
291 291 '''Finds changes between the current manifest and m2.
292 292
293 293 Args:
294 294 m2: the manifest to which this manifest should be compared.
295 295 clean: if true, include files unchanged between these manifests
296 296 with a None value in the returned dictionary.
297 297
298 298 The result is returned as a dict with filename as key and
299 299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
300 300 nodeid in the current/other manifest and fl1/fl2 is the flag
301 301 in the current/other manifest. Where the file does not exist,
302 302 the nodeid will be None and the flags will be the empty
303 303 string.
304 304 '''
305 305 return self._lm.diff(m2._lm, clean)
306 306
307 307 def setflag(self, key, flag):
308 308 self._lm[key] = self[key], flag
309 309
310 310 def get(self, key, default=None):
311 311 try:
312 312 return self._lm[key][0]
313 313 except KeyError:
314 314 return default
315 315
316 316 def flags(self, key, default=''):
317 317 try:
318 318 return self._lm[key][1]
319 319 except KeyError:
320 320 return default
321 321
322 322 def copy(self):
323 323 c = manifestdict()
324 324 c._lm = self._lm.copy()
325 325 return c
326 326
327 327 def iteritems(self):
328 328 return (x[:2] for x in self._lm.iterentries())
329 329
330 330 def iterentries(self):
331 331 return self._lm.iterentries()
332 332
333 333 def text(self, usemanifestv2=False):
334 334 if usemanifestv2:
335 335 return _textv2(self._lm.iterentries())
336 336 else:
337 337 # use (probably) native version for v1
338 338 return self._lm.text()
339 339
340 340 def fastdelta(self, base, changes):
341 341 """Given a base manifest text as an array.array and a list of changes
342 342 relative to that text, compute a delta that can be used by revlog.
343 343 """
344 344 delta = []
345 345 dstart = None
346 346 dend = None
347 347 dline = [""]
348 348 start = 0
349 349 # zero copy representation of base as a buffer
350 350 addbuf = util.buffer(base)
351 351
352 352 changes = list(changes)
353 353 if len(changes) < 1000:
354 354 # start with a readonly loop that finds the offset of
355 355 # each line and creates the deltas
356 356 for f, todelete in changes:
357 357 # bs will either be the index of the item or the insert point
358 358 start, end = _msearch(addbuf, f, start)
359 359 if not todelete:
360 360 h, fl = self._lm[f]
361 361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
362 362 else:
363 363 if start == end:
364 364 # item we want to delete was not found, error out
365 365 raise AssertionError(
366 366 _("failed to remove %s from manifest") % f)
367 367 l = ""
368 368 if dstart is not None and dstart <= start and dend >= start:
369 369 if dend < end:
370 370 dend = end
371 371 if l:
372 372 dline.append(l)
373 373 else:
374 374 if dstart is not None:
375 375 delta.append([dstart, dend, "".join(dline)])
376 376 dstart = start
377 377 dend = end
378 378 dline = [l]
379 379
380 380 if dstart is not None:
381 381 delta.append([dstart, dend, "".join(dline)])
382 382 # apply the delta to the base, and get a delta for addrevision
383 383 deltatext, arraytext = _addlistdelta(base, delta)
384 384 else:
385 385 # For large changes, it's much cheaper to just build the text and
386 386 # diff it.
387 387 arraytext = array.array('c', self.text())
388 388 deltatext = mdiff.textdiff(base, arraytext)
389 389
390 390 return arraytext, deltatext
391 391
392 392 def _msearch(m, s, lo=0, hi=None):
393 393 '''return a tuple (start, end) that says where to find s within m.
394 394
395 395 If the string is found m[start:end] are the line containing
396 396 that string. If start == end the string was not found and
397 397 they indicate the proper sorted insertion point.
398 398
399 399 m should be a buffer or a string
400 400 s is a string'''
401 401 def advance(i, c):
402 402 while i < lenm and m[i] != c:
403 403 i += 1
404 404 return i
405 405 if not s:
406 406 return (lo, lo)
407 407 lenm = len(m)
408 408 if not hi:
409 409 hi = lenm
410 410 while lo < hi:
411 411 mid = (lo + hi) // 2
412 412 start = mid
413 413 while start > 0 and m[start - 1] != '\n':
414 414 start -= 1
415 415 end = advance(start, '\0')
416 416 if m[start:end] < s:
417 417 # we know that after the null there are 40 bytes of sha1
418 418 # this translates to the bisect lo = mid + 1
419 419 lo = advance(end + 40, '\n') + 1
420 420 else:
421 421 # this translates to the bisect hi = mid
422 422 hi = start
423 423 end = advance(lo, '\0')
424 424 found = m[lo:end]
425 425 if s == found:
426 426 # we know that after the null there are 40 bytes of sha1
427 427 end = advance(end + 40, '\n')
428 428 return (lo, end + 1)
429 429 else:
430 430 return (lo, lo)
431 431
432 432 def _checkforbidden(l):
433 433 """Check filenames for illegal characters."""
434 434 for f in l:
435 435 if '\n' in f or '\r' in f:
436 436 raise error.RevlogError(
437 437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
438 438
439 439
440 440 # apply the changes collected during the bisect loop to our addlist
441 441 # return a delta suitable for addrevision
442 442 def _addlistdelta(addlist, x):
443 443 # for large addlist arrays, building a new array is cheaper
444 444 # than repeatedly modifying the existing one
445 445 currentposition = 0
446 446 newaddlist = array.array('c')
447 447
448 448 for start, end, content in x:
449 449 newaddlist += addlist[currentposition:start]
450 450 if content:
451 451 newaddlist += array.array('c', content)
452 452
453 453 currentposition = end
454 454
455 455 newaddlist += addlist[currentposition:]
456 456
457 457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
458 458 + content for start, end, content in x)
459 459 return deltatext, newaddlist
460 460
461 461 def _splittopdir(f):
462 462 if '/' in f:
463 463 dir, subpath = f.split('/', 1)
464 464 return dir + '/', subpath
465 465 else:
466 466 return '', f
467 467
468 468 _noop = lambda s: None
469 469
470 470 class treemanifest(object):
471 471 def __init__(self, dir='', text=''):
472 472 self._dir = dir
473 473 self._node = revlog.nullid
474 474 self._loadfunc = _noop
475 475 self._copyfunc = _noop
476 476 self._dirty = False
477 477 self._dirs = {}
478 478 # Using _lazymanifest here is a little slower than plain old dicts
479 479 self._files = {}
480 480 self._flags = {}
481 481 if text:
482 482 def readsubtree(subdir, subm):
483 483 raise AssertionError('treemanifest constructor only accepts '
484 484 'flat manifests')
485 485 self.parse(text, readsubtree)
486 486 self._dirty = True # Mark flat manifest dirty after parsing
487 487
488 488 def _subpath(self, path):
489 489 return self._dir + path
490 490
491 491 def __len__(self):
492 492 self._load()
493 493 size = len(self._files)
494 494 for m in self._dirs.values():
495 495 size += m.__len__()
496 496 return size
497 497
498 498 def _isempty(self):
499 499 self._load() # for consistency; already loaded by all callers
500 500 return (not self._files and (not self._dirs or
501 501 all(m._isempty() for m in self._dirs.values())))
502 502
503 503 def __repr__(self):
504 504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
505 505 (self._dir, revlog.hex(self._node),
506 506 bool(self._loadfunc is _noop),
507 507 self._dirty, id(self)))
508 508
509 509 def dir(self):
510 510 '''The directory that this tree manifest represents, including a
511 511 trailing '/'. Empty string for the repo root directory.'''
512 512 return self._dir
513 513
514 514 def node(self):
515 515 '''This node of this instance. nullid for unsaved instances. Should
516 516 be updated when the instance is read or written from a revlog.
517 517 '''
518 518 assert not self._dirty
519 519 return self._node
520 520
521 521 def setnode(self, node):
522 522 self._node = node
523 523 self._dirty = False
524 524
525 525 def iterentries(self):
526 526 self._load()
527 527 for p, n in sorted(self._dirs.items() + self._files.items()):
528 528 if p in self._files:
529 529 yield self._subpath(p), n, self._flags.get(p, '')
530 530 else:
531 531 for x in n.iterentries():
532 532 yield x
533 533
534 534 def iteritems(self):
535 535 self._load()
536 536 for p, n in sorted(self._dirs.items() + self._files.items()):
537 537 if p in self._files:
538 538 yield self._subpath(p), n
539 539 else:
540 540 for f, sn in n.iteritems():
541 541 yield f, sn
542 542
543 543 def iterkeys(self):
544 544 self._load()
545 545 for p in sorted(self._dirs.keys() + self._files.keys()):
546 546 if p in self._files:
547 547 yield self._subpath(p)
548 548 else:
549 549 for f in self._dirs[p].iterkeys():
550 550 yield f
551 551
552 552 def keys(self):
553 553 return list(self.iterkeys())
554 554
555 555 def __iter__(self):
556 556 return self.iterkeys()
557 557
558 558 def __contains__(self, f):
559 559 if f is None:
560 560 return False
561 561 self._load()
562 562 dir, subpath = _splittopdir(f)
563 563 if dir:
564 564 if dir not in self._dirs:
565 565 return False
566 566 return self._dirs[dir].__contains__(subpath)
567 567 else:
568 568 return f in self._files
569 569
570 570 def get(self, f, default=None):
571 571 self._load()
572 572 dir, subpath = _splittopdir(f)
573 573 if dir:
574 574 if dir not in self._dirs:
575 575 return default
576 576 return self._dirs[dir].get(subpath, default)
577 577 else:
578 578 return self._files.get(f, default)
579 579
580 580 def __getitem__(self, f):
581 581 self._load()
582 582 dir, subpath = _splittopdir(f)
583 583 if dir:
584 584 return self._dirs[dir].__getitem__(subpath)
585 585 else:
586 586 return self._files[f]
587 587
588 588 def flags(self, f):
589 589 self._load()
590 590 dir, subpath = _splittopdir(f)
591 591 if dir:
592 592 if dir not in self._dirs:
593 593 return ''
594 594 return self._dirs[dir].flags(subpath)
595 595 else:
596 596 if f in self._dirs:
597 597 return ''
598 598 return self._flags.get(f, '')
599 599
600 600 def find(self, f):
601 601 self._load()
602 602 dir, subpath = _splittopdir(f)
603 603 if dir:
604 604 return self._dirs[dir].find(subpath)
605 605 else:
606 606 return self._files[f], self._flags.get(f, '')
607 607
608 608 def __delitem__(self, f):
609 609 self._load()
610 610 dir, subpath = _splittopdir(f)
611 611 if dir:
612 612 self._dirs[dir].__delitem__(subpath)
613 613 # If the directory is now empty, remove it
614 614 if self._dirs[dir]._isempty():
615 615 del self._dirs[dir]
616 616 else:
617 617 del self._files[f]
618 618 if f in self._flags:
619 619 del self._flags[f]
620 620 self._dirty = True
621 621
622 622 def __setitem__(self, f, n):
623 623 assert n is not None
624 624 self._load()
625 625 dir, subpath = _splittopdir(f)
626 626 if dir:
627 627 if dir not in self._dirs:
628 628 self._dirs[dir] = treemanifest(self._subpath(dir))
629 629 self._dirs[dir].__setitem__(subpath, n)
630 630 else:
631 631 self._files[f] = n[:21] # to match manifestdict's behavior
632 632 self._dirty = True
633 633
634 634 def _load(self):
635 635 if self._loadfunc is not _noop:
636 636 lf, self._loadfunc = self._loadfunc, _noop
637 637 lf(self)
638 638 elif self._copyfunc is not _noop:
639 639 cf, self._copyfunc = self._copyfunc, _noop
640 640 cf(self)
641 641
642 642 def setflag(self, f, flags):
643 643 """Set the flags (symlink, executable) for path f."""
644 644 self._load()
645 645 dir, subpath = _splittopdir(f)
646 646 if dir:
647 647 if dir not in self._dirs:
648 648 self._dirs[dir] = treemanifest(self._subpath(dir))
649 649 self._dirs[dir].setflag(subpath, flags)
650 650 else:
651 651 self._flags[f] = flags
652 652 self._dirty = True
653 653
654 654 def copy(self):
655 655 copy = treemanifest(self._dir)
656 656 copy._node = self._node
657 657 copy._dirty = self._dirty
658 658 if self._copyfunc is _noop:
659 659 def _copyfunc(s):
660 660 self._load()
661 661 for d in self._dirs:
662 662 s._dirs[d] = self._dirs[d].copy()
663 663 s._files = dict.copy(self._files)
664 664 s._flags = dict.copy(self._flags)
665 665 if self._loadfunc is _noop:
666 666 _copyfunc(copy)
667 667 else:
668 668 copy._copyfunc = _copyfunc
669 669 else:
670 670 copy._copyfunc = self._copyfunc
671 671 return copy
672 672
673 673 def filesnotin(self, m2):
674 674 '''Set of files in this manifest that are not in the other'''
675 675 files = set()
676 676 def _filesnotin(t1, t2):
677 677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
678 678 return
679 679 t1._load()
680 680 t2._load()
681 681 for d, m1 in t1._dirs.iteritems():
682 682 if d in t2._dirs:
683 683 m2 = t2._dirs[d]
684 684 _filesnotin(m1, m2)
685 685 else:
686 686 files.update(m1.iterkeys())
687 687
688 688 for fn in t1._files.iterkeys():
689 689 if fn not in t2._files:
690 690 files.add(t1._subpath(fn))
691 691
692 692 _filesnotin(self, m2)
693 693 return files
694 694
695 695 @propertycache
696 696 def _alldirs(self):
697 697 return util.dirs(self)
698 698
699 699 def dirs(self):
700 700 return self._alldirs
701 701
702 702 def hasdir(self, dir):
703 703 self._load()
704 704 topdir, subdir = _splittopdir(dir)
705 705 if topdir:
706 706 if topdir in self._dirs:
707 707 return self._dirs[topdir].hasdir(subdir)
708 708 return False
709 709 return (dir + '/') in self._dirs
710 710
711 711 def walk(self, match):
712 712 '''Generates matching file names.
713 713
714 714 Equivalent to manifest.matches(match).iterkeys(), but without creating
715 715 an entirely new manifest.
716 716
717 717 It also reports nonexistent files by marking them bad with match.bad().
718 718 '''
719 719 if match.always():
720 720 for f in iter(self):
721 721 yield f
722 722 return
723 723
724 724 fset = set(match.files())
725 725
726 726 for fn in self._walk(match):
727 727 if fn in fset:
728 728 # specified pattern is the exact name
729 729 fset.remove(fn)
730 730 yield fn
731 731
732 732 # for dirstate.walk, files=['.'] means "walk the whole tree".
733 733 # follow that here, too
734 734 fset.discard('.')
735 735
736 736 for fn in sorted(fset):
737 737 if not self.hasdir(fn):
738 738 match.bad(fn, None)
739 739
740 740 def _walk(self, match):
741 741 '''Recursively generates matching file names for walk().'''
742 742 if not match.visitdir(self._dir[:-1] or '.'):
743 743 return
744 744
745 745 # yield this dir's files and walk its submanifests
746 746 self._load()
747 747 for p in sorted(self._dirs.keys() + self._files.keys()):
748 748 if p in self._files:
749 749 fullp = self._subpath(p)
750 750 if match(fullp):
751 751 yield fullp
752 752 else:
753 753 for f in self._dirs[p]._walk(match):
754 754 yield f
755 755
756 756 def matches(self, match):
757 757 '''generate a new manifest filtered by the match argument'''
758 758 if match.always():
759 759 return self.copy()
760 760
761 761 return self._matches(match)
762 762
763 763 def _matches(self, match):
764 764 '''recursively generate a new manifest filtered by the match argument.
765 765 '''
766 766
767 767 visit = match.visitdir(self._dir[:-1] or '.')
768 768 if visit == 'all':
769 769 return self.copy()
770 770 ret = treemanifest(self._dir)
771 771 if not visit:
772 772 return ret
773 773
774 774 self._load()
775 775 for fn in self._files:
776 776 fullp = self._subpath(fn)
777 777 if not match(fullp):
778 778 continue
779 779 ret._files[fn] = self._files[fn]
780 780 if fn in self._flags:
781 781 ret._flags[fn] = self._flags[fn]
782 782
783 783 for dir, subm in self._dirs.iteritems():
784 784 m = subm._matches(match)
785 785 if not m._isempty():
786 786 ret._dirs[dir] = m
787 787
788 788 if not ret._isempty():
789 789 ret._dirty = True
790 790 return ret
791 791
792 792 def diff(self, m2, clean=False):
793 793 '''Finds changes between the current manifest and m2.
794 794
795 795 Args:
796 796 m2: the manifest to which this manifest should be compared.
797 797 clean: if true, include files unchanged between these manifests
798 798 with a None value in the returned dictionary.
799 799
800 800 The result is returned as a dict with filename as key and
801 801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
802 802 nodeid in the current/other manifest and fl1/fl2 is the flag
803 803 in the current/other manifest. Where the file does not exist,
804 804 the nodeid will be None and the flags will be the empty
805 805 string.
806 806 '''
807 807 result = {}
808 808 emptytree = treemanifest()
809 809 def _diff(t1, t2):
810 810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
811 811 return
812 812 t1._load()
813 813 t2._load()
814 814 for d, m1 in t1._dirs.iteritems():
815 815 m2 = t2._dirs.get(d, emptytree)
816 816 _diff(m1, m2)
817 817
818 818 for d, m2 in t2._dirs.iteritems():
819 819 if d not in t1._dirs:
820 820 _diff(emptytree, m2)
821 821
822 822 for fn, n1 in t1._files.iteritems():
823 823 fl1 = t1._flags.get(fn, '')
824 824 n2 = t2._files.get(fn, None)
825 825 fl2 = t2._flags.get(fn, '')
826 826 if n1 != n2 or fl1 != fl2:
827 827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
828 828 elif clean:
829 829 result[t1._subpath(fn)] = None
830 830
831 831 for fn, n2 in t2._files.iteritems():
832 832 if fn not in t1._files:
833 833 fl2 = t2._flags.get(fn, '')
834 834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
835 835
836 836 _diff(self, m2)
837 837 return result
838 838
839 839 def unmodifiedsince(self, m2):
840 840 return not self._dirty and not m2._dirty and self._node == m2._node
841 841
842 842 def parse(self, text, readsubtree):
843 843 for f, n, fl in _parse(text):
844 844 if fl == 't':
845 845 f = f + '/'
846 846 self._dirs[f] = readsubtree(self._subpath(f), n)
847 847 elif '/' in f:
848 848 # This is a flat manifest, so use __setitem__ and setflag rather
849 849 # than assigning directly to _files and _flags, so we can
850 850 # assign a path in a subdirectory, and to mark dirty (compared
851 851 # to nullid).
852 852 self[f] = n
853 853 if fl:
854 854 self.setflag(f, fl)
855 855 else:
856 856 # Assigning to _files and _flags avoids marking as dirty,
857 857 # and should be a little faster.
858 858 self._files[f] = n
859 859 if fl:
860 860 self._flags[f] = fl
861 861
862 862 def text(self, usemanifestv2=False):
863 863 """Get the full data of this manifest as a bytestring."""
864 864 self._load()
865 865 return _text(self.iterentries(), usemanifestv2)
866 866
867 867 def dirtext(self, usemanifestv2=False):
868 868 """Get the full data of this directory as a bytestring. Make sure that
869 869 any submanifests have been written first, so their nodeids are correct.
870 870 """
871 871 self._load()
872 872 flags = self.flags
873 873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
874 874 files = [(f, self._files[f], flags(f)) for f in self._files]
875 875 return _text(sorted(dirs + files), usemanifestv2)
876 876
877 877 def read(self, gettext, readsubtree):
878 878 def _load_for_read(s):
879 879 s.parse(gettext(), readsubtree)
880 880 s._dirty = False
881 881 self._loadfunc = _load_for_read
882 882
883 883 def writesubtrees(self, m1, m2, writesubtree):
884 884 self._load() # for consistency; should never have any effect here
885 885 m1._load()
886 886 m2._load()
887 887 emptytree = treemanifest()
888 888 for d, subm in self._dirs.iteritems():
889 889 subp1 = m1._dirs.get(d, emptytree)._node
890 890 subp2 = m2._dirs.get(d, emptytree)._node
891 891 if subp1 == revlog.nullid:
892 892 subp1, subp2 = subp2, subp1
893 893 writesubtree(subm, subp1, subp2)
894 894
895 895 class manifestrevlog(revlog.revlog):
896 896 '''A revlog that stores manifest texts. This is responsible for caching the
897 897 full-text manifest contents.
898 898 '''
899 899 def __init__(self, opener, dir='', dirlogcache=None):
900 900 # During normal operations, we expect to deal with not more than four
901 901 # revs at a time (such as during commit --amend). When rebasing large
902 902 # stacks of commits, the number can go up, hence the config knob below.
903 903 cachesize = 4
904 904 usetreemanifest = False
905 905 usemanifestv2 = False
906 906 opts = getattr(opener, 'options', None)
907 907 if opts is not None:
908 908 cachesize = opts.get('manifestcachesize', cachesize)
909 909 usetreemanifest = opts.get('treemanifest', usetreemanifest)
910 910 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
911 911
912 912 self._treeondisk = usetreemanifest
913 913 self._usemanifestv2 = usemanifestv2
914 914
915 915 self._fulltextcache = util.lrucachedict(cachesize)
916 916
917 917 indexfile = "00manifest.i"
918 918 if dir:
919 919 assert self._treeondisk, 'opts is %r' % opts
920 920 if not dir.endswith('/'):
921 921 dir = dir + '/'
922 922 indexfile = "meta/" + dir + "00manifest.i"
923 923 self._dir = dir
924 924 # The dirlogcache is kept on the root manifest log
925 925 if dir:
926 926 self._dirlogcache = dirlogcache
927 927 else:
928 928 self._dirlogcache = {'': self}
929 929
930 930 super(manifestrevlog, self).__init__(opener, indexfile)
931 931
932 932 @property
933 933 def fulltextcache(self):
934 934 return self._fulltextcache
935 935
936 936 def clearcaches(self):
937 937 super(manifestrevlog, self).clearcaches()
938 938 self._fulltextcache.clear()
939 939 self._dirlogcache = {'': self}
940 940
941 941 def dirlog(self, dir):
942 942 if dir:
943 943 assert self._treeondisk
944 944 if dir not in self._dirlogcache:
945 945 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
946 946 self._dirlogcache)
947 947 return self._dirlogcache[dir]
948 948
949 949 def add(self, m, transaction, link, p1, p2, added, removed):
950 950 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
951 951 and not self._usemanifestv2):
952 952 # If our first parent is in the manifest cache, we can
953 953 # compute a delta here using properties we know about the
954 954 # manifest up-front, which may save time later for the
955 955 # revlog layer.
956 956
957 957 _checkforbidden(added)
958 958 # combine the changed lists into one sorted iterator
959 959 work = heapq.merge([(x, False) for x in added],
960 960 [(x, True) for x in removed])
961 961
962 962 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
963 963 cachedelta = self.rev(p1), deltatext
964 964 text = util.buffer(arraytext)
965 965 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
966 966 else:
967 967 # The first parent manifest isn't already loaded, so we'll
968 968 # just encode a fulltext of the manifest and pass that
969 969 # through to the revlog layer, and let it handle the delta
970 970 # process.
971 971 if self._treeondisk:
972 972 m1 = self.read(p1)
973 973 m2 = self.read(p2)
974 974 n = self._addtree(m, transaction, link, m1, m2)
975 975 arraytext = None
976 976 else:
977 977 text = m.text(self._usemanifestv2)
978 978 n = self.addrevision(text, transaction, link, p1, p2)
979 979 arraytext = array.array('c', text)
980 980
981 981 self.fulltextcache[n] = arraytext
982 982
983 983 return n
984 984
985 985 def _addtree(self, m, transaction, link, m1, m2):
986 986 # If the manifest is unchanged compared to one parent,
987 987 # don't write a new revision
988 988 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
989 989 return m.node()
990 990 def writesubtree(subm, subp1, subp2):
991 991 sublog = self.dirlog(subm.dir())
992 992 sublog.add(subm, transaction, link, subp1, subp2, None, None)
993 993 m.writesubtrees(m1, m2, writesubtree)
994 994 text = m.dirtext(self._usemanifestv2)
995 995 # Double-check whether contents are unchanged to one parent
996 996 if text == m1.dirtext(self._usemanifestv2):
997 997 n = m1.node()
998 998 elif text == m2.dirtext(self._usemanifestv2):
999 999 n = m2.node()
1000 1000 else:
1001 1001 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1002 1002 # Save nodeid so parent manifest can calculate its nodeid
1003 1003 m.setnode(n)
1004 1004 return n
1005 1005
1006 1006 class manifestlog(object):
1007 1007 """A collection class representing the collection of manifest snapshots
1008 1008 referenced by commits in the repository.
1009 1009
1010 1010 In this situation, 'manifest' refers to the abstract concept of a snapshot
1011 1011 of the list of files in the given commit. Consumers of the output of this
1012 1012 class do not care about the implementation details of the actual manifests
1013 1013 they receive (i.e. tree or flat or lazily loaded, etc)."""
1014 1014 def __init__(self, opener, repo):
1015 1015 self._repo = repo
1016 1016
1017 1017 usetreemanifest = False
1018 1018
1019 1019 opts = getattr(opener, 'options', None)
1020 1020 if opts is not None:
1021 1021 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1022 1022 self._treeinmem = usetreemanifest
1023 1023
1024 1024 # We'll separate this into it's own cache once oldmanifest is no longer
1025 1025 # used
1026 1026 self._mancache = repo.manifest._mancache
1027 1027
1028 1028 @property
1029 1029 def _revlog(self):
1030 1030 return self._repo.manifest
1031 1031
1032 1032 def __getitem__(self, node):
1033 1033 """Retrieves the manifest instance for the given node. Throws a KeyError
1034 1034 if not found.
1035 1035 """
1036 1036 if node in self._mancache:
1037 1037 cachemf = self._mancache[node]
1038 1038 # The old manifest may put non-ctx manifests in the cache, so skip
1039 1039 # those since they don't implement the full api.
1040 1040 if (isinstance(cachemf, manifestctx) or
1041 1041 isinstance(cachemf, treemanifestctx)):
1042 1042 return cachemf
1043 1043
1044 1044 if self._treeinmem:
1045 1045 m = treemanifestctx(self._revlog, '', node)
1046 1046 else:
1047 1047 m = manifestctx(self._revlog, node)
1048 1048 if node != revlog.nullid:
1049 1049 self._mancache[node] = m
1050 1050 return m
1051 1051
1052 def add(self, m, transaction, link, p1, p2, added, removed):
1053 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1054
1052 1055 class manifestctx(object):
1053 1056 """A class representing a single revision of a manifest, including its
1054 1057 contents, its parent revs, and its linkrev.
1055 1058 """
1056 1059 def __init__(self, revlog, node):
1057 1060 self._revlog = revlog
1058 1061 self._data = None
1059 1062
1060 1063 self._node = node
1061 1064
1062 1065 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1063 1066 # but let's add it later when something needs it and we can load it
1064 1067 # lazily.
1065 1068 #self.p1, self.p2 = revlog.parents(node)
1066 1069 #rev = revlog.rev(node)
1067 1070 #self.linkrev = revlog.linkrev(rev)
1068 1071
1069 1072 def node(self):
1070 1073 return self._node
1071 1074
1072 1075 def read(self):
1073 1076 if not self._data:
1074 1077 if self._node == revlog.nullid:
1075 1078 self._data = manifestdict()
1076 1079 else:
1077 1080 text = self._revlog.revision(self._node)
1078 1081 arraytext = array.array('c', text)
1079 1082 self._revlog._fulltextcache[self._node] = arraytext
1080 1083 self._data = manifestdict(text)
1081 1084 return self._data
1082 1085
1083 1086 def readfast(self):
1084 1087 rl = self._revlog
1085 1088 r = rl.rev(self._node)
1086 1089 deltaparent = rl.deltaparent(r)
1087 1090 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1088 1091 return self.readdelta()
1089 1092 return self.read()
1090 1093
1091 1094 def readdelta(self):
1092 1095 revlog = self._revlog
1093 1096 if revlog._usemanifestv2:
1094 1097 # Need to perform a slow delta
1095 1098 r0 = revlog.deltaparent(revlog.rev(self._node))
1096 1099 m0 = manifestctx(revlog, revlog.node(r0)).read()
1097 1100 m1 = self.read()
1098 1101 md = manifestdict()
1099 1102 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1100 1103 if n1:
1101 1104 md[f] = n1
1102 1105 if fl1:
1103 1106 md.setflag(f, fl1)
1104 1107 return md
1105 1108
1106 1109 r = revlog.rev(self._node)
1107 1110 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1108 1111 return manifestdict(d)
1109 1112
1110 1113 class treemanifestctx(object):
1111 1114 def __init__(self, revlog, dir, node):
1112 1115 revlog = revlog.dirlog(dir)
1113 1116 self._revlog = revlog
1114 1117 self._dir = dir
1115 1118 self._data = None
1116 1119
1117 1120 self._node = node
1118 1121
1119 1122 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1120 1123 # we can instantiate treemanifestctx objects for directories we don't
1121 1124 # have on disk.
1122 1125 #self.p1, self.p2 = revlog.parents(node)
1123 1126 #rev = revlog.rev(node)
1124 1127 #self.linkrev = revlog.linkrev(rev)
1125 1128
1126 1129 def read(self):
1127 1130 if not self._data:
1128 1131 if self._node == revlog.nullid:
1129 1132 self._data = treemanifest()
1130 1133 elif self._revlog._treeondisk:
1131 1134 m = treemanifest(dir=self._dir)
1132 1135 def gettext():
1133 1136 return self._revlog.revision(self._node)
1134 1137 def readsubtree(dir, subm):
1135 1138 return treemanifestctx(self._revlog, dir, subm).read()
1136 1139 m.read(gettext, readsubtree)
1137 1140 m.setnode(self._node)
1138 1141 self._data = m
1139 1142 else:
1140 1143 text = self._revlog.revision(self._node)
1141 1144 arraytext = array.array('c', text)
1142 1145 self._revlog.fulltextcache[self._node] = arraytext
1143 1146 self._data = treemanifest(dir=self._dir, text=text)
1144 1147
1145 1148 return self._data
1146 1149
1147 1150 def node(self):
1148 1151 return self._node
1149 1152
1150 1153 def readdelta(self):
1151 1154 # Need to perform a slow delta
1152 1155 revlog = self._revlog
1153 1156 r0 = revlog.deltaparent(revlog.rev(self._node))
1154 1157 m0 = treemanifestctx(revlog, revlog.node(r0), dir=self._dir).read()
1155 1158 m1 = self.read()
1156 1159 md = treemanifest(dir=self._dir)
1157 1160 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1158 1161 if n1:
1159 1162 md[f] = n1
1160 1163 if fl1:
1161 1164 md.setflag(f, fl1)
1162 1165 return md
1163 1166
1164 1167 def readfast(self):
1165 1168 rl = self._revlog
1166 1169 r = rl.rev(self._node)
1167 1170 deltaparent = rl.deltaparent(r)
1168 1171 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1169 1172 return self.readdelta()
1170 1173 return self.read()
1171 1174
1172 1175 class manifest(manifestrevlog):
1173 1176 def __init__(self, opener, dir='', dirlogcache=None):
1174 1177 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1175 1178 manifest.manifest only. External users should create a root manifest
1176 1179 log with manifest.manifest(opener) and call dirlog() on it.
1177 1180 '''
1178 1181 # During normal operations, we expect to deal with not more than four
1179 1182 # revs at a time (such as during commit --amend). When rebasing large
1180 1183 # stacks of commits, the number can go up, hence the config knob below.
1181 1184 cachesize = 4
1182 1185 usetreemanifest = False
1183 1186 opts = getattr(opener, 'options', None)
1184 1187 if opts is not None:
1185 1188 cachesize = opts.get('manifestcachesize', cachesize)
1186 1189 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1187 1190 self._mancache = util.lrucachedict(cachesize)
1188 1191 self._treeinmem = usetreemanifest
1189 1192 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1190 1193
1191 1194 def _newmanifest(self, data=''):
1192 1195 if self._treeinmem:
1193 1196 return treemanifest(self._dir, data)
1194 1197 return manifestdict(data)
1195 1198
1196 1199 def dirlog(self, dir):
1197 1200 """This overrides the base revlog implementation to allow construction
1198 1201 'manifest' types instead of manifestrevlog types. This is only needed
1199 1202 until we migrate off the 'manifest' type."""
1200 1203 if dir:
1201 1204 assert self._treeondisk
1202 1205 if dir not in self._dirlogcache:
1203 1206 self._dirlogcache[dir] = manifest(self.opener, dir,
1204 1207 self._dirlogcache)
1205 1208 return self._dirlogcache[dir]
1206 1209
1207 1210 def _slowreaddelta(self, node):
1208 1211 r0 = self.deltaparent(self.rev(node))
1209 1212 m0 = self.read(self.node(r0))
1210 1213 m1 = self.read(node)
1211 1214 md = self._newmanifest()
1212 1215 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1213 1216 if n1:
1214 1217 md[f] = n1
1215 1218 if fl1:
1216 1219 md.setflag(f, fl1)
1217 1220 return md
1218 1221
1219 1222 def readdelta(self, node):
1220 1223 if self._usemanifestv2 or self._treeondisk:
1221 1224 return self._slowreaddelta(node)
1222 1225 r = self.rev(node)
1223 1226 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1224 1227 return self._newmanifest(d)
1225 1228
1226 1229 def readshallowdelta(self, node):
1227 1230 '''For flat manifests, this is the same as readdelta(). For
1228 1231 treemanifests, this will read the delta for this revlog's directory,
1229 1232 without recursively reading subdirectory manifests. Instead, any
1230 1233 subdirectory entry will be reported as it appears in the manifests, i.e.
1231 1234 the subdirectory will be reported among files and distinguished only by
1232 1235 its 't' flag.'''
1233 1236 if not self._treeondisk:
1234 1237 return self.readdelta(node)
1235 1238 if self._usemanifestv2:
1236 1239 raise error.Abort(
1237 1240 _("readshallowdelta() not implemented for manifestv2"))
1238 1241 r = self.rev(node)
1239 1242 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1240 1243 return manifestdict(d)
1241 1244
1242 1245 def readshallowfast(self, node):
1243 1246 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1244 1247 '''
1245 1248 r = self.rev(node)
1246 1249 deltaparent = self.deltaparent(r)
1247 1250 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1248 1251 return self.readshallowdelta(node)
1249 1252 return self.readshallow(node)
1250 1253
1251 1254 def read(self, node):
1252 1255 if node == revlog.nullid:
1253 1256 return self._newmanifest() # don't upset local cache
1254 1257 if node in self._mancache:
1255 1258 cached = self._mancache[node]
1256 1259 if (isinstance(cached, manifestctx) or
1257 1260 isinstance(cached, treemanifestctx)):
1258 1261 cached = cached.read()
1259 1262 return cached
1260 1263 if self._treeondisk:
1261 1264 def gettext():
1262 1265 return self.revision(node)
1263 1266 def readsubtree(dir, subm):
1264 1267 return self.dirlog(dir).read(subm)
1265 1268 m = self._newmanifest()
1266 1269 m.read(gettext, readsubtree)
1267 1270 m.setnode(node)
1268 1271 arraytext = None
1269 1272 else:
1270 1273 text = self.revision(node)
1271 1274 m = self._newmanifest(text)
1272 1275 arraytext = array.array('c', text)
1273 1276 self._mancache[node] = m
1274 1277 self.fulltextcache[node] = arraytext
1275 1278 return m
1276 1279
1277 1280 def readshallow(self, node):
1278 1281 '''Reads the manifest in this directory. When using flat manifests,
1279 1282 this manifest will generally have files in subdirectories in it. Does
1280 1283 not cache the manifest as the callers generally do not read the same
1281 1284 version twice.'''
1282 1285 return manifestdict(self.revision(node))
1283 1286
1284 1287 def find(self, node, f):
1285 1288 '''look up entry for a single file efficiently.
1286 1289 return (node, flags) pair if found, (None, None) if not.'''
1287 1290 m = self.read(node)
1288 1291 try:
1289 1292 return m.find(f)
1290 1293 except KeyError:
1291 1294 return None, None
1292 1295
1293 1296 def clearcaches(self):
1294 1297 super(manifest, self).clearcaches()
1295 1298 self._mancache.clear()
General Comments 0
You need to be logged in to leave comments. Login now